text
stringlengths 37
1.41M
|
---|
a=input("enter first string:")
b=input("enter second string:")
c=list(a)
d=list(b)
for i in range(2):
c[i]=b[i]
d[i]=a[i]
e=''.join(c)
f=''.join(d)
g=e+' '+f
print(g)
|
key=int(input("Enter the key (int) to be added:"))
value=int(input("Enter the value for the key to be added:"))
a={0:2,1:3}
a.update({key:value})
print("Updated dictionary is:")
print(a)
|
# 2019/09/30
n=int(input())
def is_prime(n):
if n==1:return False
for i in range(2,int(pow(n,0.5)+1)):
if n%i:continue
return False
return True
def is_poi(n):
if n==1:return False
if n%2 and n%5 and n%3:
return True
else:
return False
if is_prime(n) or is_poi(n):
print('Prime')
else:
print('Not Prime')
|
# 2019/08/31
n=int(input())
s=[]
for i in range(n):
s.append(input()[::-1])
s.sort()
for e in s:
print(e[::-1])
#きれいな解答
n = int(input())
s = sorted([input()[::-1] for _ in range(n)])
print("\n".join(map(lambda x: x[::-1], s))) #←ここいいよね
|
# https://atcoder.jp/contests/abc043/tasks/abc043_b
s=input()
res=[]
for e in s:
if e=='B':
if res:
res.pop()
else:continue
else:
res.append(e)
print(*res,sep='')
|
# 2019/08/14
s=set(input())
if ('N' in s and 'S' not in s) or ('S' in s and 'N' not in s)\
or ('W' in s and 'E' not in s) or ('E' in s and 'W' not in s):
print('No')
else:
print('Yes')
# きれいな書き方
s=set(input())
if ('N' in s)^('S' in s) or ('W' in s)^('E' in s):
print('No')
else:
print('Yes')
|
# 2019/07/29
# Union-Find
input=open(0).readline
class unionfind():
def __init__(self,n):
self.par=list(range(n+1))
self.rank=[0]*(n+1)
def find(self,x):
if self.par[x]==x:
return x
else:
self.par[x]=self.find(self.par[x])
return self.par[x]
def is_same(self,x,y):
return self.find(x)==self.find(y)
def unite(self,x,y):
x=self.find(x)
y=self.find(y)
if self.rank[x]<self.rank[y]:
self.par[x]=y
else:
self.par[y]=x
if self.rank[x]==self.rank[y]:
self.rank[x]+=1
n,q=map(int,input().split())
uf=unionfind(n)
for i in range(q):
q,x,y=map(int,input().split())
if q:
print('Yes' if uf.is_same(x,y) else 'No')
else:
uf.unite(x,y)
|
# 素数定理
import math
def get_num(x):
return x / (math.log(x, math.e))
# x = 10**216
# print(get_num(10**216) - get_num(10**215))
# print(get_num(10**215))
|
from collections import Counter
n=int(input())
s=[input() for _ in range(n)]
c=Counter(s)
mx=c.most_common()[0][1]
ans=[]
for e in c.most_common():
if mx==e[1]:
ans.append(e[0])
ans.sort()
print(*ans,sep='\n')
|
s=list(input())
odd=['R','U','D']
even=['L','U','D']
for i in range(len(s)):
if (i+1)%2:
if s[i] not in odd:
print('No')
exit()
else:
if s[i] not in even:
print('No')
exit()
print('Yes')
|
import numpy as np
import math
import heapq
class Navigation:
def __init__(self):
self.n = -1
self.edges = -1
self.node = dict()
self.graph = []
self.start = -1
self.goal = -1
self.takeInput()
def takeInput(self):
#Take input from file
file = open ("Lab7/input.txt" , "r")
line = file.readline()
words = line.strip().split()
self.n = int (words[0])
#get coordinates of nodes
for x in range(self.n):
line = file.readline()
words = line.strip().split()
temp , coordinates = int(words[0]) , (int(words[1]) , int (words[2]))
temp -= 1
self.node[temp] = coordinates
words = file.readline().strip().split()
self.edges = int(words[0])
#get edges and edge weights
self.graph = [[] for _ in range(self.n)]
for e in range(self.edges):
line = file.readline()
words = line.strip().split()
x , y, weight = int (words[0]) - 1 , int (words[1]) - 1 , float(words[2])
self.graph[x].append((y , weight))
self.graph[y].append((x , weight))
#get start and goal state
word = file.readline().strip().split()
self.start = int(word[0]) - 1
word = file.readline().strip().split()
self.goal = int(word[0]) - 1
file.close()
def printGraph(self):
print("number of nodes : " , self.n)
print("Nodes with there coordinates : ")
print(self.node)
print("Graph : ")
for i in range(self.n):
print(i+1, end = " ")
print(self.graph[i])
print("Start state : " , self.start)
print("Goal state : " , self.goal)
#Euclidean Heuristic
def heuristic(self, x, speed):
return math.sqrt((self.node[self.goal][0] - x[0])**2 + (self.node[self.goal][1] - x[1])**2) / speed
#solve using a star algo
def astar(self, congestion, budget, cost):
print("Congestion: " + str(congestion) + "%")
cycle = 25.0
bus = 10.0 + 40.0 * (100 - congestion)/100
speed = max(cycle, bus)
#store the state of each node as parent, g(n), h(n), mode
#each node is now stored as a tuple of (node, budget) in the search tree
state = dict()
Q = []
heapq.heapify(Q)
explored = set()
qcount = 0
frontier = []
state[(self.start, budget)] = ((-1, -1), 0, self.heuristic(self.node[self.start], speed), -1)
heapq.heappush(Q, (state[(self.start, budget)][1] + state[(self.start, budget)][2], budget, qcount, self.start))
frontier.append((self.start, budget))
counter = 0
while Q:
counter += 1
print("counter: " + str(counter))
d, m, w, curr = heapq.heappop(Q)
frontier.remove((curr, m))
#do not expand a node where money left is less than 0
if(m < 0):
continue
if(curr == self.goal):
self.printPath(state, m, budget)
return True
explored.add(curr)
for v in self.graph[curr]:
vertex = v[0]
weight = v[1]
s = (vertex, m)
#add cycle in frontier
if(s not in explored and s not in frontier):
state[s] = ((curr, m), state[(curr, m)][1] + weight/cycle, self.heuristic(self.node[vertex], cycle), 1)
qcount += 1
heapq.heappush(Q, (state[s][1] + state[s][2], m, qcount, vertex))
frontier.append(s)
elif s in frontier:
if(state[s][1] > state[(curr, m)][1] + weight/cycle):
state[s] = ((curr, m), state[(curr, m)][1] + weight/cycle, self.heuristic(self.node[vertex], cycle), 1)
qcount += 1
heapq.heappush(Q, (state[s][1] + state[s][2], m, qcount, vertex))
frontier.append(s)
#add bus in frontier subject to condition
b = int(m - (weight/bus)*cost)
if(weight > 3 and b > 0):
s = (vertex, b)
if(s not in explored and s not in frontier):
state[s] = ((curr, m), state[(curr, m)][1] + weight/bus, self.heuristic(self.node[vertex], bus), 0)
qcount += 1
heapq.heappush(Q, (state[s][1] + state[s][2], b, qcount, vertex))
frontier.append(s)
elif s in frontier:
if(state[s][1] > state[(curr, m)][1] + weight/bus):
state[s] = ((curr, m), state[(curr, m)][1] + weight/bus, self.heuristic(self.node[vertex], bus), 0)
qcount += 1
heapq.heappush(Q, (state[s][1] + state[s][2], b, qcount, vertex))
frontier.append(s)
print("Path not found")
return False
def printPath(self, state, m, budget):
current = (self.goal, m)
path = []
while(current[0] != self.start):
path.insert(0, (current[0], state[current][3]))
current = state[current][0]
path.insert(0, (self.start, state[(self.start, budget)][3]))
print("Path:", end = " ")
for (i, j) in path:
if(j == -1):
j = "Start"
elif(j == 0):
j = "Bus"
else:
j = "Cycle"
print(j + " " + str(i + 1), end = " ")
print()
print("Total Cost: " + str(state[(self.goal, m)][1] * 60) + " minutes.")
print("Money Left: " + str(m))
print()
Travel1 = Navigation()
Travel1.astar(0, 200, 1)
Travel2 = Navigation()
#Travel2.astar(50, 200, 1)
Travel3 = Navigation()
#Travel3.astar(100, 200, 1)
|
class Environment :
def __init__(self, xd, yd, xs, ys) :
self.xd = xd
self.yd = yd
self.xr = xs
self.yr = ys
def updateState(self, direction) :
if(direction == "left") :
self.xr -= 1
elif(direction == "right") :
self.xr += 1
elif(direction == "up") :
self.yr += 1
elif(direction == "down") :
self.yr -= 1
def providePerception(self) :
if(self.xr == self.xd and self.yr == self.yd) :
return True
else :
return False
class Agent :
def __init__(self) :
self.x = 0
self.y = 0
self.direction = "up"
self.steps = 1
self.s = 0
def getPerception(self, env) :
return env.providePerception()
def takeAction(self, env) :
if(self.direction == "up") :
if(self.s < self.steps) :
self.y += 1
self.s += 1
env.updateState(self.direction)
elif(self.s == self.steps) :
self.direction = "right"
self.s = 0
elif(self.direction == "right") :
if(self.s < self.steps) :
self.x += 1
self.s += 1
env.updateState(self.direction)
elif(self.s == self.steps) :
self.direction = "down"
self.s = 0
self.steps += 1
elif(self.direction == "down") :
if(self.s < self.steps) :
self.y -= 1
self.s += 1
env.updateState(self.direction)
elif(self.s == self.steps) :
self.direction = "left"
self.s = 0
elif(self.direction == "left") :
if(self.s < self.steps) :
self.x -= 1
self.s += 1
env.updateState(self.direction)
elif(self.s == self.steps) :
self.direction = "up"
self.s = 0
self.steps += 1
print("Give starting location (x y) ")
xs, ys = map(int, input().split())
print("Give dirt location (x y) ")
xd, yd = map(int, input().split())
Room = Environment(xd, yd, xs, ys)
Robot = Agent()
while(Robot.getPerception(Room) == False) :
condition = "Current Location: (" + str(Room.xr) + ", " + str(Room.yr) + ")"
condition += ", Robot Location: (" + str(Robot.x) + ", " + str(Robot.y) + ")"
condition += ", Perception: No Dirt"
Robot.takeAction(Room)
condition += ", Action Taken: Move " + Robot.direction
condition += ", Dirt Location: (" + str(Room.xd) + ", " + str(Room.yd) + ")"
if(Robot.s > 0) :
print(condition)
condition = "Current Location: (" + str(Room.xr) + ", " + str(Room.yr) + ")"
condition += ", Robot Location: (" + str(Robot.x) + ", " + str(Robot.y) + ")"
condition += ", Perception: Dirt Found"
condition += ", Action Taken: Dirt Cleaned "
condition += ", Dirt Location: (" + str(Room.xd) + ", " + str(Room.yd) + ")"
print(condition)
|
#这个=是赋值运算符
a = 1
b = 2
#加法
c = a+b
print(c)
#减法
d = a-b
print(d)
#乘法
f = a*b
print(f)
#除法
g = a/b
print(g)
#取商
h = a//b
print(h)
#取余数
k = a%b
print(k)
#幂
l = a**b
print(l)
|
'''
把手机号验证的功能是不是拆除来了
'''
def register(account,pwd):
a = isPhone(account)
if a:
print("呵呵呵")
def login(account,pwd):
result = isPhone(account)
if result:
print("哈哈哈")
def isPhone(account):#判断手机号合法不合法
if len(account) == 11 and account.startswith("1"):
return True
else:
return False
'''
当前有多个函数里面有相同重复的功能的时候,这个时候应该考虑独立封装成一个函数,方便多个函数去调用
'''
register("18612345678","122")
login("18612345678","122")
|
d = {"name":"xiaoyuan","age":28,"sex":"男"}
for i in d.items():
for j in i:
print(j)
for k,v in d.items():
print(k,v)
|
import random
number = random.randint(1,100)
count = 0
for i in range(10):
user = int(input("请输入数字"))
if user > number:
print("猜大了")
elif user < number:
print("猜小了")
else:
print("猜对了")
break
count+=1#猜的次数
if count == 1:
print("大神")
elif count > 1 and count < 5:
print("半仙")
elif count >=5 and count < 9:
print("修仙")
elif count >=9 and count == 10:
print("23333")
|
list = []#存放名字
print("名片管理系统".center(50,"*"))
while True:
print("1:添加名片".center(50," "))
print("2:查找名片".center(50," "))
print("3:修改名片".center(50," "))
print("4:删除名片".center(50," "))
print("5:退出".center(50," "))
num = int(input("请选择功能"))
if num == 1:
d = {}#空字典
name = input("请输入要添加的名字")
job = input("请输入要添加的职位")
phone = input("请输入手机号")
d["name"] = name
d["job"] = job
d["phone"] = phone
#添加到列表
list.append(d)
print(list)
|
age = int(input("请输入年龄"))
sex = input("请输入性别")
#假如是女孩并且大于15岁 才能玩游戏
#假如是男孩
# 17岁 男
if age > 15 and sex =="女":
print("可以玩游戏")
elif age > 18 or sex == "男":
print("回家种地")
else:
print("哈哈哈哈哈哈哈哈哈")
|
account = input("请输入账号")
pwd = input("请输入密码")
print("欢迎光临您的个人银行\n祝你花钱愉快")
money = 100000
#如果计算128天的利息
other_money = 0.04/365*128*money
print("你的个人资产:%d,昨天收益大概是%d"%(money,other_money))
need_money = int(input("请输入取款金额"))
#判断钱够不够
f_money = money+other_money-need_money
print("剩余金额:%d"%f_money)
|
a = "hello world, my name is python"
print(a.upper()) #모든 소문자를 대문자로 변경
b = "LOST ARK"
print(b.lower()) #모든 대문자를 소문자로 변경
a1 = """
hello guys haha
"""
print("------")
print(a1)
print("------")
print(a1.strip()) #앞뒤 공백 제거
print("안녕" in "안녕하세요") #앞의 문자열이 뒤에 포함이 되어있나 검사
ex = "10 20 30 40 50 70 9 0".split()
print(ex)
|
a = 0
b = 2
print(a)
a+=10
print(a)
a+=10
print(a)
print(b)
b**=2
print(b)
b**=2
print(b)
b**=2
print(b)
say = "Hello"
print(say)
say+="!"
print(say)
say+="!"
print(say)
say+="!"
print(say)
|
class Entry:
def __init__(self, address, available, last_used):
"""
Constructor for Entry data structure.
self.address -> str
self.available -> bool
self.last_used -> datetime
"""
self.address = address
self.available = available
self.last_used = last_used
def __gt__(self, other):
if self.address > other.address:
return True
else:
return False
|
import streamlit as st
import pandas as pd
Year_List = [2, 3, 4, 5, 6, 7, 8, 9, 10]
st.write("""
# Compound Interest Calculator!
""")
st.sidebar.header('User Input Values')
def user_input_features():
Int_Rate = st.sidebar.slider('Interest Rate in %', 6.0, 42.0, 10.0)
Principal = st.sidebar.text_input('Please input Principal Amount',10000)
No_Of_Years = st.sidebar.selectbox('Select No of Years', Year_List, 2)
data = {'Int_Rate': Int_Rate,
'Principal': Principal,
'No_Of_Years': No_Of_Years}
features = pd.DataFrame(data, index=[0])
return features
df = user_input_features()
st.subheader('User Entered parameters for Rate, Principal amount and ')
st.write(df)
#Compound Interest function
def compound_int(Principal, Int_Rate, No_Of_Years):
comp=1.0
for i in range(0, int(No_Of_Years)):
comp = comp * (1+Int_Rate/100)
comp = float(Principal)*(comp-1)
comp_text=str("Compound Interest is" + str("%.3f" % comp))
st.write(comp_text)
data_1 = {'Computed_Compoud_Interest': comp_text}
result = pd.DataFrame(data_1, index=[0])
return result
st.subheader('The calculated compound interest is')
df_1 = compound_int(df.Principal, df.Int_Rate, df.No_Of_Years)
st.subheader('This is print of data frame')
st.write(df_1)
|
import math
from functools import total_ordering
@total_ordering
class Polygon:
""" Regular strictly convex polygon whose
angles are less then 180 deg and sides have equal length
"""
def __init__(self, edges, circumradius):
edges = int(edges)
if edges < 3:
raise ValueError('Number of edges should be >= 3.')
if circumradius <= 0:
raise ValueError('Circuradius should be > 0')
self._edges = edges
self._circumradius = circumradius
def __repr__(self):
return f"Polygon({self.edges},{self.circumradius})"
@property
def edges(self):
return self._edges
@property
def vertices(self):
return self._edges
@property
def circumradius(self):
return self._circumradius
def __eq__(self, other):
if isinstance(other, self.__class__):
return (
self.edges == other.edges
and self.circumradius == other.circumradius
)
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, self.__class__):
return self.edges > other.edges
else:
return NotImplemented
@property
def interior_angle(self):
n = self.edges
return (n-2)*180/n
@property
def edge_length(self):
n = self.edges
r = self.circumradius
return 2*r*math.sin(math.pi/n)
@property
def apothem(self):
n = self.edges
r = self.circumradius
return r*math.cos(math.pi/n)
@property
def perimeter(self):
return self.edges * self.edge_length
@property
def area(self):
return self.perimeter * self.apothem / 2
|
# A palindromic number reads the same both ways. The largest palindrome made from
# the product of two 2-digit numbers is 9009 = 91 x 99.
#
# Find the largest palindrome made from the product of two 3-digit numbers.
#def is_palindromic(num):
# num_str = str(num)
# i, j = 0, len(num_str) - 1
# while i <= j:
# if (num_str[i] != num_str[j]):
# return False
# i += 1
# j -= 1
# if reach here, it's palindromic
return True
num_range = range(100, 1000)
largest_palindrom = max([n for n in
[n1*n2 for n1 in num_range for n2 in num_range]
if str(n) == str(n)[::-1]])
print(largest_palindrom)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from math import sqrt
from util import is_prime
'''
Problem 179
Find the number of integers 1 < n < 107, for which n and n + 1 have the same number of positive divisors. For example, 14 has the positive divisors 1, 2, 7, 14 while 15 has 1, 3, 5, 15.
'''
def factor_cnt(n):
results = set()
for i in range(1, int(sqrt(n)) + 1):
if n % i == 0:
results.add(i)
results.add(int(n/i))
return len(results)
def p179(): # 60 min > run time (bad..)
L = 10 ** 7
cnt = 1 # 2, 3
last_fc = 0
for i in range(2, L):
if is_prime(i):
last_fc = 0
continue
fc = factor_cnt(i)
if last_fc == fc:
cnt += 1
print([i], fc, last_fc)
last_fc = fc
print('[179]: ', cnt)
return
p179()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Problem 131
There are some prime values, p, for which there exists a positive integer, n, such that the expression n3 + n2p is a perfect cube.
For example, when p = 19, 83 + 82×19 = 123.
What is perhaps most surprising is that for each prime with this property the value of n is unique, and there are only four such primes below one-hundred.
How many primes below one million have this remarkable property?
'''
from util import prime_sieve, is_prime
from itertools import count
def is_perfect_cube(x):
# x = abs(x)
return int(round(x ** (1. / 3))) ** 3 == x
def p131_slow(): # Answer: 173, 68.54s Mac pro 2016
cnt = 0
primes = prime_sieve(1000000)
for p in primes:
for i in count(1):
n = i ** 3
if is_perfect_cube(n + p):
if is_perfect_cube(n ** 2):
# print('[great] ', [p, n, i], n**2, n+p)
cnt += 1
break
if i > 600:
break
print(cnt)
def p131():
# n**3 + p = (n+1)**3
# p = 3n**2 + 3n + 1
cnt = 0
for n in count(1):
p = 3 * (n ** 2) + 3 * n + 1
if p >= 1000000:
break
if is_prime(p):
cnt += 1
print(cnt)
p131()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Problem 303
For a positive integer n, define f(n) as the least positive multiple of n that, written in base 10, uses only digits ≤ 2.
Thus f(2)=2, f(3)=12, f(7)=21, f(42)=210, f(89)=1121222.
Also, . n = 1 ~ 100, f(n)/n = 11363107
Find . n = 1 ~ 10000, f(n)/n = ?
'''
from itertools import cycle, product
from functools import reduce
'''
mul = [ [1],
[1, 1, 8],
[1, 4, 1, 4],
[4, 3, 3],
[3, 2, 3, 2],
[2],
[2, 3, 2, 3],
[3, 3, 4],
[4, 1, 4, 1],
[8, 1, 1]]
def digit_012_check(n):
while n != 0:
d, m = divmod(n, 10)
if m > 2:
return False
n = d
return True
def fn(n):
if digit_012_check(n):
print([n], '-', 1, n)
return 1
#mul = [ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
# [0, 1, 2],
# [0, 1, 5, 6],
# [0, 4, 7],
# [0, 3, 5, 8],
# [0, 2, 4, 6, 8],
# [0, 2, 5, 7,],
# [0, 3, 6],
# [0, 4, 5, 9],
# [0, 8, 9]]
j = 0
for i in cycle(mul[n % 10]):
j += i
m = n * j
if digit_012_check(m):
print([n], i, j, m)
return j
'''
def p303(): # Answer: 1111981904675169, pretty awful though
L = 10000 + 1
check = [x for x in range(3, L)]
result = [0] * L
result[0] = 1
result[1] = 1
result[2] = 2
# run and check only 9990
# Found 111333555778 * 9990 = 1112222222222220
result[9990] = 1112222222222220
# by hand
# 9990 answer -> 111333555778
# attach [1] -> 1111333555778
# attach [3] -> 11113333555778
# attach [5] -> 111133335555778
# attach [7] -> 1111333355557778
# found -> 1111333355557778
result[9999] = 11112222222222222222
check.remove(9990)
check.remove(9999)
for i in product([0, 1, 2], repeat=30):
n = int(reduce(lambda x, y: str(x) + str(y), i))
temp = []
for c in check:
if n % c == 0:
if n == 0:
break
result[c] = n
temp.append(c)
# print([n], c, len(check), check)
for t in temp:
check.remove(t)
if 0 not in result:
break
total = 0
for i in range(1, len(result)):
# print([i], result[i])
total += result[i] // i
print(total)
p303()
|
"""Реализовать формирование списка, используя функцию range() и возможности генератора.
В список должны войти четные числа от 100 до 1000 (включая границы).
Необходимо получить результат вычисления произведения всех элементов списка.
Подсказка: использовать функцию reduce()."""
from functools import reduce
def my_func(el_p, el):
return el_p * el
print(f'numbers from 100 to 1000 are even only: {[el for el in range(99, 1001) if el % 2 == 0]}')
print(f'even numbers multiplied together: {reduce(my_func, [el for el in range(99, 1001) if el % 2 == 0])}')
|
from tank import Tank
from pump import Pump
from valve import Valve
from stirrer import Stirrer
from heater import Heater
import time
tanks = [Tank(500, False, False, Valve(10, 2)), # coffee
Tank(1000, False, Heater(300), False), # water
Tank(1000, Stirrer(10), False, Valve(50, 4)), # main tank
Tank(1000, False, False, False), # milk
Tank(500, False, False, False)] # cup
pumps = [Pump(30, [1, 2]), # water -> main tank
Pump(20, [3, 2])] # milk -> main tank
flag = 0
dTime = time.time()
dTimeWater = time.time()
while True: # main loop
if time.time() - dTime > 0.1: # 10Hz
if flag == 0: # Question about amount of coffee
try:
amount = int(input("Write how much coffee do you want(ml)"))
except ValueError:
print("Not an number!")
continue
else:
if amount > 500:
print("This is too much, choose amount up to 500ml")
else:
flag = 1
if flag == 1: # Question about amount of milk in coffee
try:
milk = int(input("Write how much milk in coffee do you want(ml) - can't excede previous number"))
except ValueError:
print("Not an number!")
continue
else:
if milk > amount:
print("This is too much, choose amount up to {}ml".format(amount))
else:
flag = 2
if flag == 2: # Checking if there is enough milk
if milk == 0:
flag = 3
elif tanks[3].level > (milk + 100):
flag = 3
else:
print("Don't enough milk. Add milk to milk tank.")
input("You can do that by clicking enter")
tanks[3].level = tanks[3].capacity
if flag == 3: # Checking if there is enough water
if tanks[1].level > (amount - milk + 100):
flag = 5
else:
print("Not enough water, pouring from valve")
dTimeWater = time.time()
flag = 4
if flag == 4: # Adding water to water tank
if time.time() - dTimeWater > 1:
flag = 3
elif tanks[1].level + 10 > tanks[1].capacity:
print("Water tank is full")
flag = 3
else:
tanks[1].level += 10
print("Pouring water", tanks[1].level)
if flag == 5: # Activating heater
if not tanks[1].heater.activated:
tanks[1].heater.set_activated(True)
else:
print("Heating water", tanks[1].temperature)
tanks[1].temperature += tanks[1].heater.heatingPower / tanks[1].level
if tanks[1].temperature > 90:
tanks[1].heater.set_activated(False)
flag = 6
if flag == 6: # Adding coffee
if not tanks[0].valve.open:
tanks[0].valve.set_open(True)
else:
print("Adding coffee")
tanks[0].level -= tanks[1].level / 15
flag = 7
if flag == 7: # Pour water into main tank
if tanks[2].level < (amount - milk) and not pumps[0].activated:
print("Start pumping water into main tank")
pumps[0].set_activated(True)
else:
print("Enough water")
pumps[0].set_activated(False)
flag = 8
if pumps[0].activated:
pumps[0].action(tanks)
if flag == 8: # Pour milk into main tank
if tanks[2].level < amount and not pumps[1].activated:
print("Start pumping milk into main tank")
pumps[1].set_activated(True)
else:
print("Enough milk")
pumps[1].set_activated(False)
flag = 9
if pumps[1].activated:
pumps[1].action(tanks)
if flag == 9: # Stir coffee
if not tanks[2].stirrer.activated:
tanks[2].stirrer.set_activated(True)
else:
for x in range(int(100 / tanks[2].stirrer.mixingSpeed)):
print("Stirring coffee")
time.sleep(0.1)
print("Coffee is stirred")
tanks[2].stirrer.set_activated(False)
flag = 10
if flag == 10: # Pouring coffee into cup
if not tanks[2].valve.open:
tanks[2].valve.set_open(True)
else:
if tanks[2].level > tanks[2].valve.volumePS:
tanks[2].level -= tanks[2].valve.volumePS
tanks[4].level += tanks[2].valve.volumePS
print("Pouring coffee into cup")
else:
tanks[4].level += tanks[2].level
tanks[2].level = 0
flag = 11
if flag == 11: # Coffee is done
input("Coffee is done, you can take it by clicking enter")
tanks[4].level = 0
flag = 0
dTime = time.time()
time.sleep(0.01)
|
#!/usr/bin/python3
def fizzbuzz():
for c in range(1, 101):
if c % 3 == 0 and c % 5 == 0:
print("FizzBuzz", end=' ')
elif c % 3 == 0:
print("Fizz", end=' ')
elif c % 5 == 0:
print("Buzz", end=' ')
else:
print(c, end=' ')
|
#!/usr/bin/python3
''' 1. Write to a file '''
def write_file(filename="", text=""):
''' writes a string to a text file (UTF8) and
returns the number of characters written'''
with open(filename, mode="w", encoding="utf-8") as f:
r = f.write(text)
return r
|
car = 100
space_in_a_car = 4.0
drivers = 30
passenger = 90
cars_not_drivers = car - drivers
cars_driven = drivers
carpool_capacity = cars_driven * space_in_a_car
averge_passenger_per_car = passenger / cars_driven
print("There are", car, "can available.")
print("There are only", drivers, "drivers available")
print("There will be", cars_not_drivers, "empty car today.")
print("we can transport", carpool_capacity, "people today")
print("we have", passenger, "to carpool today")
print("we need to put about", averge_passenger_per_car, "in each car")
|
import sqlite3
with sqlite3.connect("cars.db") as connection:
c = connection.cursor()
c.execute("UPDATE cars SET Quantity=6 WHERE MAKE='Honda'")
c.execute("SELECT * FROM cars")
rows = c.fetchall()
for r in rows:
print r[0], r[1], r[2]
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# # Using Pyndamics to Simulate Dynamical Systems
#
# Pyndamics provides a way to describe a dynamical system in terms of the differential equations, or the stock-flow formalism. It is a wrapper around the Scipy odeint function, with further functionality for time plots, phase plots, and vector fields.
#
# Page for this package: [https://code.google.com/p/pyndamics/](https://code.google.com/p/pyndamics/)
# <codecell>
from pyndamics import Simulation
# <markdowncell>
# ## Population of Mice - Exponential Growth
#
# ### Specifying the Differential Equation
# <codecell>
sim=Simulation() # get a simulation object
sim.add("mice'=b*mice - d*mice", # the equations
100, # initial value
plot=True) # display a plot
sim.params(b=1.1,d=0.08)
sim.run(0,4)
# <markdowncell>
# ### Specifying the Inflows/Outflows
# <codecell>
sim=Simulation() # get a simulation object
sim.stock("mice",100,plot=False)
sim.inflow("mice","b*mice")
sim.outflow("mice","d*mice")
sim.params(b=1.1,d=0.08)
sim.run(0,4)
# <markdowncell>
# ### Plotting Manually
# <codecell>
x,y=sim.t,sim.mice
plot(x,y,'r--')
xlabel('Days')
ylabel('Number of Mice')
# <markdowncell>
# ## Predator-Prey Dynamics
# <codecell>
sim=Simulation()
sim.add("deer' = r*deer*(1-deer/K)-c*deer*wolf",
initial_value=350,
plot=True)
sim.add("wolf' = -Wd*wolf+D*deer*wolf",
initial_value=50,
plot=True)
sim.params(r=0.25,D=0.001,c=0.005,Wd=0.3,K=1e500)
print sim.equations()
sim.run(0,500)
# <codecell>
from pyndamics import phase_plot
phase_plot(sim,'deer','wolf')
# <markdowncell>
# ## Exponential vs Logistic
# <codecell>
sim=Simulation()
# logistic growth
sim.add("pop' = r*pop*(1-pop/K)",
initial_value=350,
plot=1)
# exponential growth
sim.add("pop2' = r*pop2",
initial_value=350,
plot=1)
sim.params(r=0.25,K=3000)
sim.run(0,5)
# <markdowncell>
# ## Damped Spring - Second-order Differential Equations
#
# When specifying the initial conditions for a 2nd-order equation, you need to specify the value of the variable (e.g. position) and its first derivative (e.g. velocity). The simulator automatically changes the equations into a set of 1st-order equations.
# <codecell>
sim=Simulation()
sim.add("x''=-k*x/m -b*x'",[10,0],plot=True)
sim.params(k=1.0,m=1.0,b=0.5)
print sim.equations()
sim.run(0,20)
# <markdowncell>
# ## Vector Field Example
# <codecell>
sim=Simulation()
sim.add("p'=p*(1-p)",0.1,plot=True)
sim.run(0,10)
# <markdowncell>
# Arrows scaled by the magnitude...
# <codecell>
from pyndamics import vector_field
vector_field(sim,p=linspace(-1,2,20))
# <markdowncell>
# Arrows rescaled to constant value...
# <codecell>
vector_field(sim,rescale=True,p=linspace(-1,2,20))
# <markdowncell>
# ## The Lorenz System
#
# [http://en.wikipedia.org/wiki/Lorenz_system](http://en.wikipedia.org/wiki/Lorenz_system)
# <codecell>
sim=Simulation()
sim.add("x'=sigma*(y-x)",14,plot=True)
sim.add("y'=x*(rho-z)-y",8.1,plot=True)
sim.add("z'=x*y-beta*z",45,plot=True)
sim.params(sigma=10,beta=8.0/3,rho=15)
sim.run(0,50,num_iterations=10000) # increase the resolution
# <codecell>
phase_plot(sim,'x','z')
# <codecell>
phase_plot(sim,'x','y','z')
# <codecell>
|
"""
(C) Ivan Chanke 2020
Contains classes:
Node - > Layer -> Network
The three constantly refer to one another and aren't supposed to work separately
For details on math model visit GitHub directory corresponding to the project
"""
import numpy as np
import pickle
bias_signal = 1
def load_model(file):
"""
Loads network model from file
"""
f = open(file, 'rb')
model = pickle.load(f)
f.close()
return model
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def s_df(x):
return sigmoid(x) * (1 - sigmoid(x))
class Node:
"""
Stores:
Its position in the network (position, layernum)
A vector of weights going out of it.
Local gradient
Induced field
Weights deltas vector
Bias weight delta
Its own bias weight; bias signal is always 1
"""
def __init__(self, position):
"""
position is a (node index, layer index) tuple
"""
self.position = position[0]
self.layernum = position[1]
self.weights = None
self.bias_weight = None
self.local_grad = None
self.induced_field = None
self.weights_delta = None
self.bias_weight_delta = 0
def connect_node(self, layer):
"""
Initializes a vector of weights for synapses this node has with the next layer
Weights are initialized randomly
"""
self.weights = (np.random.rand(layer.nnodes)) * 2 - 1
self.weights_delta = np.zeros(layer.nnodes)
def apply_deltas(self):
self.weights += self.weights_delta
self.weights_delta = np.zeros(self.weights_delta.shape)
def modify_bias(self):
self.bias_weight += self.bias_weight_delta
self.bias_weight_delta = 0
class Layer:
"""
Handles backpropagation and feeding forward.
Stores:
Layer index
A list of nodes
Number of nodes in the list (nnodes)
"""
def __init__(self, nnodes, number):
self.number = number
self.nnodes = nnodes
self.nodes = [Node((i, number)) for i in range(nnodes)]
def connect_layer(self, other):
"""
Connects layer self with the PREVIOUS layer other
"""
for node in other.nodes:
node.connect_node(self)
def start_forward_propagation(self, other, vector):
"""
Begins feeding forward
This method works similarly to "propagate_forward" defined below
The only difference is that the input vector doesn't go through the activation function
"""
bias_weights_vector = np.array([node.bias_weight for node in other.nodes])
weights_stack = [node.weights for node in self.nodes]
weights_stack.append(bias_weights_vector)
memory_matrix = np.vstack(weights_stack)
memory_matrix = np.transpose(memory_matrix)
for i in range(len(self.nodes)):
self.nodes[i].induced_field = vector[i]
vector = vector
vector = np.append(vector, bias_signal)
return memory_matrix.dot(vector)
def propagate_forward(self, other, vector):
"""
Maps input vector of layer i to input vector of layer i+1
"""
bias_weights_vector = np.array([node.bias_weight for node in other.nodes]) # Composes a vector of bias weigths for layer other
weights_stack = [node.weights for node in self.nodes]
weights_stack.append(bias_weights_vector)
memory_matrix = np.vstack(weights_stack) # Stacks up weight vectors of each node in self and bias weight vector of other
memory_matrix = np.transpose(memory_matrix) # Each column is a weight vector; ncols = nnodes + 1, nrows + 1 = nsynapses;
for i in range(len(self.nodes)):
self.nodes[i].induced_field = vector[i] # Each node stores its induced field which is later used to compute local_grad
vector = sigmoid(vector) # Vector goes through the activation function
vector = np.append(vector, bias_signal) # Bias signal is added to a vector
return memory_matrix.dot(vector) # Returns input vector for the next layer
def start_backpropagation(self, e, learning_rate): # For output layer only; computes local_grad for each node in self; e - error vector
"""
Computes a local gradient for each neuron in the output vector
"""
for i in range(len(self.nodes)):
self.nodes[i].local_grad = s_df(self.nodes[i].induced_field) * e[i]
self.nodes[i].bias_weight_delta += learning_rate * self.nodes[i].local_grad * bias_signal
def propagate_backward(self, other, learning_rate): # For layers except output only; connection scheme: self-other
"""
Computes local gradients for nodes in self
"""
for i in range(len(self.nodes)): # Local_grad for each node in layer self is computed
lgv = np.array([node.local_grad for node in other.nodes]) # Local gradient vector for other
d = s_df(self.nodes[i].induced_field) * np.dot(lgv, np.transpose(self.nodes[i].weights))
self.nodes[i].local_grad = d
"""
Computes deltas using local gradients
"""
for node in self.nodes:
deltas = []
for i in range(len(node.weights)):
delta = learning_rate * sigmoid(node.induced_field) * other.nodes[i].local_grad
deltas.append(delta)
bias_delta = learning_rate * node.local_grad * bias_signal
node.weights_delta += np.array(deltas)
node.bias_weight_delta += bias_delta
class Network:
"""
Stores:
Current signal
A list of layers
last mse
Number of epoch trained
Task it performs
Initializing a network also initializes its layers and nodes in them
"""
def __init__(self, structure): # Structure is a tuple of nnodes in each layer
self.structure = structure
self.signal = None
self.layers = []
self.mse = None
self.epochs = 0
self.printing_option = None
for i in range(len(structure)): # Constructing layers
self.layers.append(Layer(structure[i], i))
for i in range(1, len(self.layers)):
self.layers[i].connect_layer(self.layers[i - 1])
for i in range(1, len(self.layers)): # Initializing bias weights
for j in range(len(self.layers[i].nodes)):
self.layers[i].nodes[j].bias_weight = (np.random.rand()) * 2 - 1
def feed_forward(self, vector):
"""
Maps input-output
"""
self.signal = self.layers[0].start_forward_propagation(self.layers[1], vector)
for i in range(1, len(self.layers) - 1): # Last layer does nothing, hence range(len - 1)
self.signal = self.layers[i].propagate_forward(self.layers[i + 1], self.signal)
for i in range(len(self.layers[-1].nodes)): # Stores last layer's nodes' induced fields
self.layers[-1].nodes[i].induced_field = self.signal[i]
return self.signal
def feed_backward(self, error_vector, learning_rate):
"""
Backpropagation
Recursively computes deltas for weights; applies them
"""
self.layers[-1].start_backpropagation(error_vector, learning_rate)
for i in range(len(self.layers) - 1, 0, -1):
self.layers[i - 1].propagate_backward(self.layers[i], learning_rate)
def learning_iteration(self, batch_tuple, learning_rate):
"""
One complete learning epoch
batch_tuple is a tuple of tuples: ((in, desired_out), (...), ..., (...))
"""
self.epochs += 1
self.mse = 0
for instance in batch_tuple:
output = sigmoid(self.feed_forward(np.array(instance[0])))
error_vector = instance[1] - output
self.mse += sum(error_vector**2)
self.feed_backward(error_vector, learning_rate)
if self.printing_option == 1:
print(instance[0], ':', output)
else:
print(output)
for node in self.layers[0].nodes:
node.apply_deltas()
for i in range(1, len(self.layers) - 1):
for node in self.layers[i].nodes:
node.apply_deltas()
node.modify_bias()
for node in self.layers[-1].nodes:
node.modify_bias()
print('MSE:', self.mse)
print('----------------')
def set_printing_option(self, arg):
self.printing_option = arg
def save(self, file):
"""
Stores current model as a file
"""
f = open(file, 'wb')
pickle.dump(self, f)
f.close()
def ask(self, question):
output = sigmoid(self.feed_forward(np.array(question)))
print('----------------')
if self.printing_option == 1:
print('Input:', np.array(question))
print('Response:', output)
print('Successfully imported ptron')
|
# Exercise 30: http://www.ling.gu.se/~lager/python_exercises.html
def translate(words):
'''A function that takes a list of English words and
returns a list of Swedish words.'''
dictionary = {"merry":"god", "christmas":"jul",
"and":"och", "happy":"gott", "new":"nytt", "year":"år"}
# new_words = [dictionary[w] for w in words]
new_words = map(lambda w: dictionary[w], words)
return new_words
|
# Exercise 10: http://www.ling.gu.se/~lager/python_exercises.html
def overlapping(list1, list2):
'''A function that takes two lists and returns True if they
have at least one member in common, False otherwise.'''
boo = False
for j in list1:
for z in list2:
if j == z:
boo = True
break
if boo: # No reason to loop if boo is True
break
return boo
|
# Exercise 6: http://www.ling.gu.se/~lager/python_exercises.html
def sum(nums):
'''A function that sums all the values in a list.'''
a = 0
for i in nums:
a += i
return a
def multiply(nums):
'''A function that multiplies all the values in a list.'''
m = 1
for i in nums:
m = m * i
return m
|
#!/usr/bin/env python
"""
Description : Change colours on an RGB LED
Author : Russell
E-mail : [email protected]
Date : 2020/08/26
Circuit : https://crcit.net/c/8bc8330a72874a9d97dc4b0c0dcb0748
My RGB LED has a common anode
In this case 0 means on full and 1 is off
Other RGB LEDs have a common cathode
In that case 1 means on full and 0 is off
"""
from gpiozero import RGBLED
from time import sleep
from signal import pause
print(__doc__)
led = RGBLED(red=10, green=9, blue=11)
while True:
led.color = (0,1,1)
print ("red")
sleep(5)
led.color = (1,0,1) # full green
print("green")
sleep(5)
led.color = (1,1,0)
print ("blue")
sleep(5)
led.color = (0, 1, 0) # magenta
print("magenta")
sleep(5)
led.color = (0, 0, 1) # yellow
print("yellow")
sleep(5)
led.color = (1, 0, 0) # cyan
print("cyan")
sleep(5)
led.color = (0,0,0) # white
print("white")
sleep(5)
"""
# slowly decrease intensity of blue
for n in range(100):
led.blue = n/100
sleep(0.1)
"""
|
#!/usr/bin/env python
#code to compare files
#print first line in which there is difference
from sys import argv,exit
import os
if(len(argv) != 3):
print('require 3 arguments')
exit()
if(not os.path.exists(argv[1])):
print('no file of name '+argv[1])
exit()
if(not os.path.exists(argv[2])):
print('no file of name '+argv[2])
exit()
#begin the code
f1 = open(argv[1])
f2 = open(argv[2])
l1 = sum(1 for line in f1)
l2 = sum(1 for line in f2)
if(l1 != l2):
print('files differ in size '+str(l1)+' '+str(l2))
l = min(l1,l2)
f1.seek(0,0)
f2.seek(0,0)
f1 = f1.readlines()
f2 = f2.readlines()
for i in range(l):
if(f1[i] != f2[i]):
print("diff in line "+str(i+1))
print("line in file 1 :\n"+f1[i])
print("line in file 2 :\n"+f2[i])
exit()
print("files are identical")
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import time
from datetime import datetime, timezone, timedelta
def cur_time():
"""
返回当前时间
:return: 当前时间,如:2021-05-27 02:29:12.096762+08:00
"""
dt = datetime.utcnow().replace(tzinfo = timezone.utc)
diff_8 = timezone(timedelta(hours = 8))
return dt.astimezone(diff_8)
def diff(seconds):
"""
把秒转成时间
:param seconds:
:return:
"""
s = ""
hours = seconds / 3600
if hours > 0:
s = "%d小时 " % hours
seconds = seconds % 3600
minutes = seconds / 60
s = s + ("%d分 " % minutes)
seconds = seconds % 60
s = s + ("%d秒 " % seconds)
return s
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 5 19:52:41 2021
@author: HP
"""
N=int(input())
for i in range(N):
num=list(map(int,input().split(" ")))
num.sort()
print(num[-2])
|
#! /usr/bin/env python
from random import *
num = randint(0, 800);
for i in range(num):
print(randint(-2**31, 2**31))
print(0)
|
def hs(a):
n = len(a)
for i in range(n // 2 - 1, -1, -1):
heapiy(a, n, i)
for i in range(n-1, 0, -1):
a[i], a[0] = a[0], a[i]
heapiy(a, i, 0)
def heapiy(a, n, i):
larger = i
left = 2 * i + 1
right = 2 * i + 2
if left < n and a[i] < a[left]:
larger = left
if right < n and a[larger] < a[right]:
larger = right
if larger != i:
a[i],a[larger] = a[larger],a[i]
heapiy(a, n, larger)
a= [ 27, 10, 20, 7, 9, 17]
hs(a)
n = len(a)
print ("sorted array are: ")
for i in range(n):
print ("%d" %a[i]),
|
class Textil:
def __init__(self, width, height):
self.width = width
self.height = height
def get_square_coat(self):
return self.width / 6.5 + 0.5
def get_square_jacket(self):
return self.height * 2 + 0.3
@property
def get_square_full(self):
return str(f'Площадь общая ткани \n'
f' {(self.width / 6.5 + 0.5) + (self.height * 2 + 0.3)}')
class Coat(Textil):
def __init__(self, width, height):
super().__init__(width, height)
self.square_coat = self.width / 6.5 + 0.5
def __str__(self):
return f'Площадь на пальто {self.square_coat}'
class Jacket(Textil):
def __init__(self, width, height):
super().__init__(width, height)
self.square_jacket = self.height / 2 + 0.3
def __str__(self):
return f'Площадь на костюм {self.square_jacket}'
coat = Coat(8, 5)
jacket = Jacket(1, 2)
print(coat)
print(jacket)
print(coat.get_square_coat())
print(jacket.get_square_jacket())
print(coat.get_square_full)
print(jacket.get_square_full)
|
class Worker:
def __init__(self, name, surname, position, wage, bonus):
self.name = name
self.surname = surname
self.position = position
self._income = {"wage" : wage, "bonus" : bonus}
class Position(Worker):
def __init__(self, name, surname, position, wage, bonus):
super().__init__(name, surname, position, wage, bonus)
def get_full_name(self):
return self.name + ' ' + self.surname
def get_total_income(self):
return self._income.get('wage') + self._income.get('bonus')
manager = Position('Иван', 'Иванов', 'менеджер', 500, 100)
print(f'Полное имя - {manager.get_full_name()}')
print(f'Доход = {manager.get_total_income()}')
|
my_list = [7, 5, 3, 3, 2]
print(my_list)
digit = int(input("Введите число (или 000 для выхода): "))
while digit != 000:
for elem in range(len(my_list)):
if my_list[elem] == digit:
my_list.insert(elem + 1, digit)
break
elif my_list[0] < digit:
my_list.insert(0, digit)
elif my_list[-1] > digit:
my_list.append(digit)
elif my_list[elem] > digit and my_list[elem + 1] < digit:
my_list.insert(elem + 1, digit)
print(f"Текущий список - {my_list}")
digit = int(input("Введите число (или 000 для выхода): "))
|
class ComplexNumber:
def __init__(self, a, b, *args):
self.a = a
self.b = b
#числа вида a+b*i, где a,b — вещественные числа, i — мнимая единица
self.z = 'a + b * i'
def __add__(self, other_numb):
print(f'Сумма z1 и z2 равна')
return f'z = {self.a + other_numb.a} + {self.b + other_numb.b} * i'
def __mul__(self, other_numb):
print(f'Произведение z1 и z2 равно')
return f'z = {self.a * other_numb.a - (self.b * other_numb.b)} + {self.b * other_numb.a} * i'
def __str__(self):
return f'z = {self.a} + {self.b} * i'
num1 = ComplexNumber(1, -2)
num2 = ComplexNumber(3, 4)
print(num1)
print(num2)
print(num1 + num2)
print(num1 * num2)
|
def reverse_number(number):
rest_number, numeral = divmod(number, 10)
if rest_number == 0:
return str(numeral)
else:
return str(numeral) + str(reverse_number(rest_number))
number = int(input("Введите число: "))
print(f'Перевернутое число = {reverse_number(number)}')
|
class Test():
'''This is a test for making a class.'''
def InitializeMethoc(self, one, two, three):
'''Initializing the Test class.'''
# The self argument is passed AUTOMATICALLY
# These three attributes will be passed when we create a class instance.
# This is because they have the SELF prefix. They are then available in
# every method within the class.
self.one = one
self.two = two
self.three = three
def AttributeMethodOne():
'''Constructing the first attribute - IDK?'''
|
# using a function to retrn a dictionary
def build_person(firstname, lastname, age=''):
'''Return a dictionary of information about a person.'''
person = {'first': firstname, 'last': lastname}
if age:
person['age'] = age
return person
musician = build_person('jimi', 'hendrix', age=27)
print(musician)
|
class Car():
'''A simple attempt to represent a car.'''
def __init__(self, make, model, year):
'''Initialize attributes to describe a car.'''
self.make = make
self.model = model
self.year = year
self.odometer_reading = 0
def get_descriptive_name(self):
'''Return a neatly formatted descriptive name.'''
long_name = str(self.year) + ' ' + self.make + ' ' + self.model
return long_name.title()
def read_odometer(self):
'''Print a statement showing the car's mileage.'''
print("This car has " + str(self.odometer_reading) + " miles on it.")
# modifying an attribute's value through a method
# see section below with same name
def update_odometer(self, mileage):
'''
Set the odometer reading to the given value.
Reject the change if it attempts to roll the odometer back.
'''
if mileage >= self.odometer_reading:
self.odometer_reading = mileage
else:
print("You can't roll back an odometer!")
# incrementing an attribute's value through a method
# see section below with same name
def increment_odometer(self,miles):
'''Add the given amount to the odometer reading.'''
self.odometer_reading += miles
# make instance of Car class
my_new_car = Car('audi', 'a4', 2016)
print(my_new_car.get_descriptive_name())
# modifying an attribute's value directly
my_new_car.odometer_reading = 23
my_new_car.read_odometer()
# modifying an attributes's value through a method
my_new_car.update_odometer(22)
my_new_car.read_odometer()
# incrementing an attribute's value through a method
my_used_car = Car('subaru', 'outback', 2013)
print("\n" + my_used_car.get_descriptive_name())
my_used_car.update_odometer(23500)
my_used_car.read_odometer
my_used_car.increment_odometer(100)
my_used_car.read_odometer()
|
num=int(input("enter a no="))
i=1
a=[ ]
while i<=num:
j=1
b=[ ]
if i==1 and j==1:
a.append(b)
else:
while j<=i:
b.append(j)
j=j+1
a.append(b)
i=i+1
print(a)
|
import numpy as np
import matplotlib.pyplot as plt
def random_walk(walk_length):
list_of_sums = []
x = np.random.choice([-1, 1], size = walk_length, replace = True, p = [0.5, 0.5])
for step in range(len(x)):
value = np.sum(x[0:(step+1)])
list_of_sums.append(value)
return x, list_of_sums
samples, walk = random_walk(1000)
plt.plot(walk)
plt.title('Random Walk')
plt.xlabel('Step Number')
plt.ylabel('Sum')
plt.axhline(y = 0, color = 'red')
plt.show()
|
def check_positive(v):
import argparse
num = int(v)
if num <= 0:
raise argparse.ArgumentTypeError(
f"{v} is an invalid number. Please use only positive")
return num
|
def input_float(msg):
result = None
while result is None:
try:
result = float(input('Введите ' + msg + ' :'))
except ValueError:
pass
return result
x = input_float('делимое')
y = input_float('делитель')
try:
print("Результат деления", x, 'на', y, 'равен', x / y)
except (ZeroDivisionError, ValueError):
print('Будьте внимательнее! Делитель не может быть равен нулю.')
|
#位1的个数
class Solution:
def hammingWeight(self, n: int) -> int:
res=0
while n:
res+=n&1
n>>=1
return res
#2的幂
class Solution:
def isPowerOfTwo(self, n: int) -> bool:
return n > 0 and n & (n - 1) == 0
#1122数组的相对排序
class Solution:
def relativeSortArray(self, arr1: List[int], arr2: List[int]) -> List[int]:
arr = [0 for _ in range(1001)]
ans = []
for i in range(len(arr1)):
arr[arr1[i]] += 1
for i in range(len(arr2)):
while arr[arr2[i]] > 0:
ans.append(arr2[i])
arr[arr2[i]] -= 1
for i in range(len(arr)):
while arr[i] > 0:
ans.append(i)
arr[i] -= 1
return ans
#56合并区间
class Solution:
def merge(self, intervals: List[List[int]]) -> List[List[int]]:
if not intervals: return []
intervals.sort()
res = [intervals[0]]
for x, y in intervals[1:]:
if res[-1][1] < x:
res.append([x, y])
else:
res[-1][1] = max(y, res[-1][1])
return res
#493翻转对
class Solution:
def reversePairs(self, nums: List[int]) -> int:
tb, res = [], 0
for n in reversed(nums) :
res += bisect.bisect_left(tb, n)
n2 = 2*n
idx = bisect.bisect_left(tb, n2)
tb.insert(idx, n2)
return res
|
import requests
from bs4 import BeautifulSoup
"""This script uses BeautifulSoup to scrape the links for all of the studies currently on clinicaltrials.gov off of
a clinicaltrials.gov page for webcrawlers."""
def ClinicalCrawl():
data = requests.get("http://clinicaltrials.gov/ct2/about-site/crawling").text
crawl_page_soup = BeautifulSoup(data)
trial_links=[]
for link in crawl_page_soup.find_all('a'):
valid_link = link.get('href')
if(valid_link[0:11] == "/ct2/crawl/"):
trial_links.append(valid_link)
trial_id_list=[]
id_file=open("nct_id_list.txt", 'w')
for group in trial_links:
data = requests.get("http://clinicaltrials.gov"+group).text
link_soup = BeautifulSoup(data)
for link in link_soup.find_all('a'):
valid_link = link.get('href')
if(valid_link[0:10] == "/ct2/show/"):
nct_id = valid_link[10:]
print nct_id
id_file.write(repr(nct_id)+"\n")
id_file.close()
ClinicalCrawl()
|
# Resolução referente 3 - Funções
"""
3 - Crie uma função que receba 2 números. O primeiro é um valor e o segundo um
percentual (ex. 10%). Retorne (return) o valor do primeiro número somado
do aumento do percentual do mesmo.
"""
def aumento(valor, porct):
return (valor*(porct/100)) + valor
valor = float(input('Digite o preço o produto: R$'))
porct = float(input('Digite a taxa(%) de imposto sobre o valor: '))
vlr_final = aumento(valor, porct)
print(f'O Preço final é R${vlr_final:0.2f}')
|
# Exercício de List Comprehension
'''
Desafio - Manipular o valor de string separando tendo resultado um conjunto
de 0 a 9 e depois adicionar um ponto entre cada grupo.
'''
string = '01234567890123456789012345678901234567890123456789012345678901234567890123456789'
lista_separado = [string[i:i + 10] for i in range(0, len(string), 10)]
ponto_separa = '.'. join(lista_separado)
print(lista_separado)
print(ponto_separa)
|
import json
class JsonManager:
""" It will loads data and dumps data into json. Written By Saurav Paul"""
@staticmethod
def json_read(json_file):
try:
with open(json_file, "r") as read_file:
data = json.load(read_file)
return data
except Exception as e:
print(e)
@staticmethod
def json_write(json_file, data=None):
if data is None:
data = {}
try:
with open(json_file, "w") as write_file:
json.dump(data, write_file)
json.dumps(data)
except Exception as e:
print(e)
|
'''
Input: a List of integers as well as an integer `k` representing the size of the sliding window
Returns: a List of integers
'''
def sliding_window_max(nums, k):
if k % 2 == 0:
extension = 1
window_range = k // 2
else:
extension = 0
window_range = 1 + k // 2
# Your code here
max_nums = []
for i in range(k // 2, len(nums) - k // 2 + extension):
max_nums.append(max(nums[i - k // 2: i + window_range]))
return max_nums
if __name__ == '__main__':
# Use the main function here to test out your implementation
arr = [1, 3, -1, -3, 5, 3, 6, 7]
k = 3
print(
f"Output of sliding_window_max function is: {sliding_window_max(arr, k)}")
|
#! python3
# mail_lists.py - assign a name with more than 3 letters
email_list = {
'lsit_a': '[email protected],[email protected]',
'list_b':'[email protected], [email protected]',
'list_c':'[email protected], [email protected]',
}
import sys, pyperclip
if len(sys.argv) < 2:
print('Usage of the program is: py mail_lists.py [list_name] - copy the list items from the dictionary key item')
sys.exit()
list_name = sys.argv[1] # first command line arg is the dictionary key item name
if list_name in email_list:
pyperclip.copy(email_list[list_name])
print('the mail list ' + list_name + ' has been copied to clipboard.')
else:
print('There is no mail list name ' + list_name)
|
# Returns a new string where for every char in the original string, there are two chars.
def double_char(string):
new_str = ""
for i in range(len(string)):
new_str += 2*string[i:i+1]
return new_str
print(double_char("The"))
print(double_char("AAABB"))
print(double_char("Hi-There"))
print(double_char("Hello World"))
print(double_char("Hii everyoneeee"))
|
"""
This program calculates the sum of the array elemets
The approach i used was i started from the last element and came to the first element
and calculated the result
"""
def sumArray(arr,leng):
if(leng == 0): #Base case if we dont have any element in the array
return 0
return arr[leng-1] + sumArray(arr,leng-1)
#leng is 3 and we start from 2 as the index starts from 2 1 0 & and we add the recursion function and we start from leng-1
arr = [1,2,3,4,5]
leng = len(arr)
print(sumArray(arr,leng))
|
#loops in python resolve to two: while and for loops for simplicity
def main():
x = 0
while(x<=10):
print(x)
x+=1
for x in range(5,10):
print(x)
daysOfWeek = ["Mon","Tue","Wed","Thur","Fri","Sat","Sun"]
for x in daysOfWeek:
print(x)
for x in range(0,10):
if(x%2!=0):continue
print(x)
#enumerate provides us with the index of a particular element
daysOfWeek = ["Mon","Tue","Wed","Thur","Fri","Sat","Sun"]
for i,x in enumerate(daysOfWeek):
if(i>=5):break
print(i,x)
if __name__ == "__main__":
main()
|
"""
This is a "nester.py" module, and it provides one function called
print_lol() which prints list that may or maynot include nested list
"""
def print_lol(the_list):
"""
This function takes positional argument called "the_list", whcih is any python
list (of possibly nested list). Each data item in the provided list is (recursively)
printed to the screen on its own line
"""
for each_item in the_list:
if isinstance(each_item, list):
print_lol(each_item)
else:
print(each_item)
|
# 逻辑运算符:构造复杂条件
# 逻辑与 and 并且同时
import random
# a = random.randint(1,5)
# if a > 1 and a < 3:
# print("true")
# else:
# print("false")
# 可以转换为假: '' 0 0.0 False None
# 如果第一个操作数是真,结果就是第二个操作数的值
print(11 and "ok")
# 如果第一个操作数是假,结果就是第一个操作数的值
print(0 and 15)
# 如果第一个操作数为假,逻辑与不计算第二个操作数的值,这个叫逻辑短路
a = 3
a > 3 and exit()
print("asdasd")
# 逻辑或 or
# 如果第一个操作数为真,结果就是第一个操作数
print('ok' or 15)
# 如果第一个操作数为假,结果就是第二个操作数的值
print(0 or 10)
# 如果逻辑或的第一个操作数为真,不计算第二个操作数,这也是逻辑短路
1 or print("****")
# 逻辑非 not 逻辑取反,如果原来是真,结果就是False,否则结果就是True
print(not 10) # False
print(not '') # True
# 判断闰年:能被4整除,不能被100整除,或者能被400整除的
year = random.randint(1970, 2019)
if year % 4 == 0 and year % 100 != 0 or year % 400 == 0:
print("%d年是闰年" % year)
else:
print("%d年不是闰年" % year)
|
# 7.计算从1到1000以内所有能同时被3,5和7整除的数的和并输出
a = 1
b = []
c = 0
while a < 1000:
if a % 3 == 0 and a % 5 == 0 and a % 7 == 0:
b.append(a)
a += 1
print(b)
for i in range(len(b)):
c += b[i]
print(c)
|
# 函数不返回值,系统默认返回None
# def add(a,b):
# print(a + b)
# res = add(2,3)
# print(res)
# def add(a,b):
# # 返回值,只能有一个返回值
# # 结束函数调用
# return a + b
# print("hello")
# res = add(3,2)
# print(res)
# def add(a,b):
# if a < 0:
# return -a + b
# else:
# print("hello")
# return 10
def demo9(a,b):
"""
文档字符串的例子
:param a:
:param b:
:return:
"""
# 只是返回一个元组
return 1,2,3,4
res = demo9(3,4)
print(res)
print(demo9.__doc__)
|
# map 内置函数
# 遍历一个可迭代对象,用传入函数处理可迭代对象的每一个元素
# a = [10,23,90,83]
# def mul(x):
# return 2*x
# 得到一个迭代器
# res = map(mul,a)
# print(res)
# print(list(map(mul,a)))
# map 函数的内部实现
# def tmp(mul,a):
# for i in range(len(a)):
# yield mul(a[i])
# res = tmp(mul,a)
# print(res,list(res))
# res = map(lambda x:2*x,a)
# print(list(res))
a = [10,23,90,83]
# res = map(lambda x,y:x+y,a,a[1:])
# print(list(res))
def tmp(func, *args):
min1 = min([len(x) for x in args])
for i in range(min1):
yield func(*[x[i] for x in args])
res = tmp(lambda x,y:x+y,a,a[1:])
print(list(res))
|
# 模拟骰子
# 第一生成一个[1,6]的随机数
# 根据随机数判断吃啥
import random
suiji = random.randint(1,6)
# if-elif方法
# if suiji == 1:
# print("吃1")
# elif suiji == 2:
# print("吃2")
# elif suiji == 3:
# print("吃3")
# elif suiji == 4:
# print("吃4")
# elif suiji == 5:
# print("吃5")
# elif suiji == 6:
# print("吃6")
# if 方法
# if suiji == 1:
# print("吃1")
# if suiji == 2:
# print("吃2")
# if suiji == 3:
# print("吃3")
# if suiji == 4:
# print("吃4")
# if suiji == 5:
# print("吃5")
# if suiji == 6:
# print("吃6")
# if-else 方法
if suiji == 1:
print("炸蝎子")
else:
if suiji == 2:
print("炸蚕蛹")
else:
if suiji == 3:
print("炸蜘蛛")
else:
if suiji == 4:
print("吃点啥")
else:
if suiji == 5:
print("炸蚂蚱")
else:
if suiji == 6:
print("不吃了")
|
# 类属性和类方法
class Student:
# 类属性
stu_num = 0
__num = 5
def __init__(self,name,age):
# 对象属性,实例属性
self.name = name
self.age = age
# self.__class__获取对象所属的类
self.__class__.stu_num += 1
def __del__(self):
self.__class__.stu_num -= 1
# def change_num(self,num):
# self.stu_num += 10 # 引用类属性可以使用self.类属性名
# obj1 = Student("tom",20)
# obj1.change_num(10)
# 也可以直接类名引用类属性
# print(Student.stu_num)
# 也可以通过对象引用类属性
# print(obj1.stu_num)
# 不能存取私有类属性
# print(Student.__num)
obj1 = Student("tom",20)
obj2 = Student("tom",20)
obj3 = Student("tom",20)
obj4 = Student("tom",20)
print(obj1.stu_num)
|
# 元组元素赋值给变量,这就是所谓的解包
# t1 = (10,20,30)
# t1 = 10,20,30 # 定义了一个(10,20,30)
# a,b,c = t1
# a, b = 2, 3
# a, b = b, a
# print(a,b,c)
# 元组元素多于变量个数
# a, b, *last = 10, 20, 30, 40
# # _变量接受其余所有元素,组成一个列表
# print(a,b,last)
# print(type(last))
# a, *_, b = 10, 20, 30, 40
# print(a,_,b)
# 列表解包
# a,b = [10,20]
# print(a,b)
# a,b,*_ = [1,2,3,4,5]
# print(a,b,_)
#
# # 字符串解包
# a, b, *_ = "hello"
# print(a,b,_)
#
# # range
# a,b, *_ = range(5)
# print(a,b,_)
grade = [
('小风', 90),
('黄毅', 87),
('金庸', 67)
]
for name,score in grade:
print(name,score)
|
# 韩信点兵:报数5,余2,报数7,余4,报数10,余7,最少多少人
# count = 0
# while 1:
# count += 1
# if count % 5 == 2 and count % 7 == 4 and count % 10 == 7:
# print(count)
# break
# 不定方程求解
# 2x + 3y = 100
# x>=0, y>=0
x = 0
while x <= 50:
y = 0
while y < 34:
if 2 * x + 3 * y == 100:
print(x, y)
y += 1
x += 1
|
# str1 = 'a fox jumped over the fence'
# count 子串出现的次数
# count(sub, start=0, end=len(str))
# print(str1.count('e',10,16))
# find(sub, start=0, end=len(star))
# 从左向右查找给定子串,如果找到返回子串第一个字符的下标,找不到返回-1
# print(str1.find("fox")) # 2
# print(str1.find('1fox')) # -1
# print(str1.find("fox", 3))
# # rfind
str1 = 'a fox jumped over fox the fence'
# print(str1.rfind("fox"))
# print(str1.rfind("fox",))
#
# # replace(old, new, [count]) 返回新串
res = str1.replace("fox", "tiger", 1) # 只替换一个
print(res)
print(str1)
|
# 用列表生成式完成,l1 = ['Java' , 'C' , 'Swift' , 'Python' , True,12.4],请将l1中字符串全部转换为小写,其他项不动
l1 = ['Java' , 'C' , 'Swift' , 'Python' , True,12.4]
l2 = [i.lower() if type(i) == str else i for i in l1 ]
print(l2)
|
class Animal(object):
def __init__(self,name,age):
self.name = name
self.__age = age
self._a = 10 # 保护属性直接使用
def eat(self):
print("吃")
def get_age(self):
return self.__age
class Dog(Animal):
def __init__(self,name,age,kind):
# 继承自父类的属性可以调用父类的构造方法初始化
# super(Dog,self).__init__(name)
# super().__init__(name)
Animal.__init__(self,name,age) # 不推荐
# self.age = age
self.kind = kind
def t1(self):
# 继承自父类的私有属性不能够在子类中直接引用
# print(self.__age)
print(self.get_age())
# 保护属性直接使用
# print(self._p)
# 改写继承父类的方法
def eat(self):
# 调用继承父类的方法
super().eat()
print("爱吃")
# 新增方法
def bark(self):
print("www")
jinmao = Dog("dd",18,"金毛")
print(jinmao.__dict__)
print(jinmao.name)
# 子类对象会优先调用子类改写的方法
jinmao.eat()
|
# 深复制和浅复制
import copy
# 不可变对象:int float boolean str tuple
# 不可变对象不存在深浅复制
# a = 10
# b = a
# # copy是浅拷贝
# c = copy.copy(a)
# print(id(a),id(b),id(c))
# s1 = "ok"
# s2 = copy.copy(s1)
# print(id(s1),id(s2))
# 可变对象,list,dict,set 容器
a = [1,[5,6],3]
# 浅拷贝 只拷贝容器,不拷贝元素
b = copy.copy(a)
# print(id(a),id(b))
a[0] = 10
a[1][0] = 10
print(a,b)
# print(id(a[1]),id(b[1]))
# 深拷贝,即复制容器也复制元素(元素必须是可变对象)
c = copy.deepcopy(a)
print(id(a),id(c))
|
import csv
# # 读文件
# with open("csv/bank.csv") as fp:
# # 获取csv操作对象
# # 参数:fp 文件指针
# # dalimiter 分隔符,如果是逗号,可以省略
# reader = csv.reader(fp,delimiter=':')
# for line in reader:
# print([int(x) if x.isdigit() else x for x in line])
# 写csv文件
a = {'a':10,'b':[1,2,3]}
# newline 换行符,默认是\n,如果设置为空字符串,则文件中的空行会被去掉
with open("csv/dict.csv",mode="w",newline='') as fp:
# csv写对象
writer = csv.writer(fp)
for key in a:
# writerow写入的是可迭代对象
writer.writerow(a[key])
|
# 6.给定一个句子(只包含字母和空格),将句子中的单词位置反转,单词用空格分割, 单词之间只有一个空格,前后没有空格。例如:
# “hello xiao mi”-> “mi xiao hello”
str1 = "hello xiao mi"
list1 = str1.split(" ")
a = list1[::-1]
print(" ".join(a))
|
# year = int(input("请输入年份:"))
# month = int(input("请输入月份:"))
# day = int(input("请输入日期:"))
# days = 0
# month_day = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
# if year > 0 and 0 < month < 13 and 0 < day < 32:
# if year % 4 == 0 and year % 100 != 0 or year % 400 == 0:
# month_day[1] = 29
# if month == 2 and day > 29:
# print("错误的日期输入")
# elif month == 4 or month == 6 or month == 9 or month == 10 and day > 30:
# print("错误的日期输入")
# else:
# for m in range(month - 1):
# days += month_day[m]
# print(days + day)
# else:
# if month == 2 and day > 28:
# print("错误的日期输入")
# elif month == 4 or month == 6 or month == 9 or month == 10 and day > 30:
# print("错误的日期输入")
# else:
# for m in range(month - 1):
# days += month_day[m]
# print(days + day)
# else:
# print("错误的日期输入")
year = int(input("请输入年分:"))
month = int(input("请输入月份:"))
day = int(input("请输入日:"))
print("-" * 20)
day += (month - 1) * 30 # 一个月按30天计算 然后day = 月份减少1,乘以30 + 加上天数
print(day)
if month < 9: # 如果月份小于9
day += month // 2 # 月 day = day + month // 2
else:
day += (month + 1) // 2
if month > 2:
if year % 400 == 0 or year % 4 == 0 and year % 100 != 0:
day -= 1
else:
day -= 2
print("是%d年的第%d天" % (year,day))
|
# 读文件
# 1 打开文件
fp = open("f2.txt")
# 2 读内容
# data = fp.read() # 读取文件所有内容
# data = fp.read(5)
# print(data)
# 读取一行
# data = fp.readline()
# while 1:
# data = fp.readline().strip()
# print(data)
# if not data:
# break
# 参数是字符数,如果字符数小于这一行的总字符数,则读入指定字符数,否则读取一行
# data = fp.readline(10)
# data = fp.readlines()
# data = [line.strip() for line in data]
# print(data)
# 文件遍历
# 文件指针是可迭代对象
for line in fp:
print(line.strip())
# 3 关闭文件
fp.close()
a = [[1,4],[3,2],[2,3]]
|
class Girl:
def __init__(self,name,age):
# 公有属性
self.name = name
# 私有属性,不能通过对象.__age调用
self.__age = age
# getter方法
def get_age(self):
return self.__age # 类内方法里不区分公有和私有
# setter
def set_age(self,age):
self.__age = age
xiaohong = Girl('小红',18)
# 公有属性可以直接通过 对象.属性名 访问
# print(xiaohong.name)
# xiaohong.name = '小芳'
# print(xiaohong.name)
# 私有属性无法通过 对象.属性名
# print(xiaohong.__age)
# 可以通过 对象._类名__属性名 访问
# print(xiaohong._Girl__age) # 不建议用
print(xiaohong.get_age())
xiaohong.set_age(20)
# print(xiaohong)
# 查看对象的属性
print(xiaohong.__dict__)
|
# 传入函数
def add(a,b):
return a + b
def opperate(a,b,func):
"""
:param a: 第一个数
:param b: 第二个数
:param func: 运算,需要传入一个函数:传入函数
:return:
"""
return func(a,b)
res = opperate(2,5,add)
print(res)
|
str1 = 'a fox jumped over fox the fence'
# split(seperator) 用指定分隔符分割字符串,返回一个列表
# res = str1.split(" ")
# s2 = "a1b1c"
# print(res, len(res))
# print(s2.split('1'))
# list 可以将字符串转成列表
# a = list(str1)
# print(a)
# partion
# res = str1.partition("over")
# print(res)
#
# s2 = """agc
# bchg
# cih
# dbn
# es
# """
# 按行分割,产生一个数组
# res = s2.splitlines()
# print(res)
# join 将列表的元素用指定分隔符连接为一个字符串
# 要求列表元素必须是字符串
# 分隔符在前
a = ['1','2','3','4']
res = '-'.join(a)
print(res)
|
# try:
# fp = open('f2.txt')
# fp.write("*********************")
# except FileNotFoundError as e:
# print(e)
# except Exception as e:
# print(e)
# finally:
# print("finally")
# fp.close()
# 上下文管理 只要文件打开了,系统就一定会关闭它
# with open('f2.txt') as fp:
# fp.write("11111")
# 上下文管理在类中的实现
class FileOperate:
def __init__(self,fileName,mode='r'):
self.fileName = fileName
self.mode = mode
def __enter__(self):
print("打开文件")
self.fp = open(self.fileName,self.mode)
return self.fp
def __exit__(self, exc_type, exc_val, exc_tb):
print("关闭文件")
self.fp.close()
with FileOperate('f2.txt') as fp:
data = fp.read()
print(data)
|
# 2. 写函数,传入一个参数n,返回n的阶乘
def jiecheng(n):
a = 1
for i in range(1,n+1):
a *= i
return a
res = jiecheng(3)
print(res)
|
#连续求和
def add(num):
sum1 = num
while True:
x = yield sum1
if isinstance(x,str):
break
sum1 += x
t1 = add(2) # 创建协程对象,代码没有执行
next(t1) # next启动代码执行到第一个yield
print(t1.send(10))
|
"""
ML Model that plays tic tac toe.
"""
from copy import deepcopy
import random
from board import Board
class Model:
"""
ML Model that plays tic tac toe.
"""
def __init__(self, piece):
"""
Initialize an unlearned model.
Arguments:
piece : the model's piece, either an 'X' or an 'O'
"""
self.piece = piece
self.initialize_random_weights()
def initialize_random_weights(self):
"""
Initially assign random values to the model's weights.
"""
self.weights = [random.random() for i in range(Board.number_of_features())]
def initialize_weights(self, weights):
"""
Assign weights.
Arguments:
weights : list : weights to use for this model
"""
self.weights = weights
def target_function(self, board):
"""
Return the value of a board according to target representation.
Arguments:
board : Board : the board to compute a score for
Returns:
value : float : the value for the board state
"""
x = board.target_representation(self.piece)
value = sum([self.weights[i] * x[i] for i in range(len(x))])
return value
def gradient_descent(self, batch, n=.01):
"""
Adjust model's learned weights based on Least Mean Squares cost.
Arguments:
batch : iterable : batch of (Board, score) training examples
n : float : learning rate
"""
# Iterate through batch
for i in batch:
board_state = i[0]
score = i[1]
# List of feature values
x = board_state.target_representation(self.piece)
# Compute approximation
approximation = self.target_function(board_state)
# Adjust weights based upon error
for i in range(len(self.weights)):
self.weights[i] = self.weights[i] + n * (score - approximation) * x[i]
def make_move(self, board, randomize=False):
"""
Will decide where to move based upon the target function.
Arguments:
board : Board : a Board object representing current board
randomize : bool : if True, model will make a random move
50% of the time
Returns:
next_board : Board :a board object representing the next
best move
"""
if None not in sum(board.board, []):
raise ValueError("Cannot make_move because game is over.")
# list of ((row, col), score) tuples which tracks optimal move
scores = []
for i, row in enumerate(board.board):
for j, square in enumerate(row):
if square is None:
board.board[i][j] = self.piece
scores.append(((i, j), self.target_function(board)))
board.board[i][j] = None
# If randomize flag is set, sometimes make a random move
if randomize:
if random.choice([True, False]):
row, col = random.randint(0, 2), random.randint(0, 2)
else:
row, col = max(scores, key = lambda i : i[1])[0]
else:
row, col = max(scores, key = lambda i : i[1])[0]
# Get best move, and return a deepcopy of the board
next_board = Board()
next_board.board = deepcopy(board.board)
next_board.board[row][col] = self.piece
return next_board
def main():
print("Do Nothing.")
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 11 15:35:03 2020
@author: akashkumar
"""
n =
ones_digit = {
0:'',
1:"one",
2:"two",
3:"three",
4:"four",
5:"five",
6:"six",
7:"seven",
8:"eight",
9:"nine"
}
tens_digit = {
0:'',
2:"twenty",
3:"thirty",
4:"fourty",
5:"fifty",
6:"sixty",
7:"seventy",
8:"eighty",
9:"ninety"
}
elevenToTwenty = {
10:"ten",
11:"eleven",
12:"twelve",
13:"thirteen",
14:"fourteen",
15:"fifteen",
16:"sixteen",
17:"seventeen",
18:"eighteen",
19:"ninteen"
}
s = str(n)
k = len(str(n))
if k == 1:
print(ones_digit[n])
if k == 2:
i = int(n / 10)
j = int(n%10)
if i == 1:
print(elevenToTwenty[n])
else:
print(tens_digit[i],ones_digit[j],end = "")
if k == 3:
i = int(n/100)
j = int((n//10)%10)
t = n % 100
l = int(n%10)
if j == 0 and t == 0:
print(ones_digit[i], "hundred", end = "")
elif j == 1:
print(ones_digit[i],"hundred and",elevenToTwenty[t],end = "")
else:
print(ones_digit[i],"hundred and",tens_digit[j],ones_digit[l],end = "")
if k == 4:
t = int(s[0])
h = int(s[1])
te = int(s[2])
o = int(s[3])
tenplus = int(s[2] + s[3])
if h == 0 and te == 0 and o == 0 :
print(ones_digit[t],"thousand",end = "")
elif h == 0 and te == 0:
print(ones_digit[t],"thousand",ones_digit[o], end = "")
elif te == 1 and h == 0:
print(ones_digit[t],"thousand",elevenToTwenty[tenplus])
elif te == 1:
print(ones_digit[t],"thousand",ones_digit[h],"hundred and",elevenToTwenty[tenplus], end = "")
elif h == 0:
print(ones_digit[t],"thousand",tens_digit[te],ones_digit[o], end = "")
else:
print(ones_digit[t],"thousand",ones_digit[h],"hundred and",tens_digit[te], ones_digit[o], end = "")
|
import random
print('''*********************************************************
Welcome to Aaron's Guessing Game
**********************************************************''')
best_score = 10
def start_game():
global best_score
solution = random.randint(1,10)
number_of_trys = 0
while True:
try:
check = int(input("Please quess a number between 1 and 10:"))
if check < 1 or check > 10:
print("Please choose a number between 1 and 10")
elif check == solution:
number_of_trys += 1
print("You Got It!!!!! It only took you {} number of tries Great Job!".format(number_of_trys))
if number_of_trys < best_score:
best_score = number_of_trys
return best_score
break
elif check > solution:
print("It's lower")
number_of_trys += 1
elif check < solution:
print("It's Higher")
number_of_trys += 1
except ValueError:
print("Sorry we where expecting just a number try again")
def play_agin():
while True :
try:
restart_game = str(input("Current low score is {} See if you can beat that would you like you try agin [Y]es/[N]o: ".format(best_score)))
if restart_game.upper() == "Y" or restart_game.upper() == "YES":
start_game()
elif restart_game.upper() == "N" or restart_game.upper() == "NO":
print("The lowest score of the game was {}".format(best_score))
print("********************* Game Over **********************************")
break
else:
print("Opps we where expecting a Y for Yes or a N for No")
except Exception as e:
raise e
if __name__ == '__main__':
# Kick off the program by calling the start_game function.
start_game()
play_agin()
|
#!/bin/python3
import math
import os
import random
import re
import sys
from collections import defaultdict
import pprint
def build_graph(n, cities):
isolated_cities = set()
for i in range(1, n + 1):
isolated_cities.add(i)
graph = defaultdict(set)
for edge in cities:
frm, to = edge[0], edge[1]
graph[frm].add(to)
graph[to].add(frm)
isolated_cities.discard(frm)
isolated_cities.discard(to)
for city in isolated_cities:
graph[city] = None
return graph
def dfs(graph):
components = []
visited = set()
def traverse(vertex):
for neighbor in graph[vertex]:
if neighbor not in visited:
visited.add(neighbor)
components[-1] += 1
traverse(neighbor)
for vertex in graph:
if vertex not in visited:
visited.add(vertex)
components.append(1)
if graph[vertex]:
traverse(vertex)
return components
# Complete the roadsAndLibraries function below.
def roadsAndLibraries(n, c_lib, c_road, cities):
print(f'n: {n}')
print(f'c_lib: {c_lib}')
print(f'c_road: {c_road}')
print(f'cities: {cities}')
# There are two cases:
# 1. c_lib <= c_road
# 2. c_lib > c_road
# For case 1., the the easiest thing to do is build a library for every
# city, and it will result in the minimum cost
# For case 2., we want to:
# find all of the components
# find the number of edges per component
# build a library in any city in each component
# So, the min cost will be:
# sum(for each comp: comp.num_edges*c_road + c_lib)
if c_lib <= c_road:
print('c_lib <= c_road')
print(f'Min sum: {n * c_lib}')
return n * c_lib
graph = build_graph(n, cities)
pp = pprint.PrettyPrinter(depth=6)
pp.pprint(graph)
components = dfs(graph)
print(f'# of verticies in each component: {components}')
total = 0
for component_count in components:
# #edges = #vertices - 1
total += (component_count - 1) * c_road + c_lib
print(f'Min sum: {total}')
return total
if __name__ == '__main__':
fptr = open('output.txt', 'w')
q = int(input())
for q_itr in range(q):
nmC_libC_road = input().split()
n = int(nmC_libC_road[0])
m = int(nmC_libC_road[1])
c_lib = int(nmC_libC_road[2])
c_road = int(nmC_libC_road[3])
cities = []
for _ in range(m):
cities.append(list(map(int, input().rstrip().split())))
result = roadsAndLibraries(n, c_lib, c_road, cities)
fptr.write(str(result) + '\n')
fptr.close()
|
import os
import csv
# Specify the file to write to
csvpath=os.path.join('..', '..', 'gt-atl-data-pt-06-2020-u-c', 'DataViz-Content', '03-Python', 'Homework', 'Instructions', 'PyPoll', 'Resources', 'election_data.csv')
with open(csvpath, 'r') as file:
poll = csv.reader(file)
next(poll)
# intitial value to hold all votes
#Create empty list to hold values
delegates = [] #print all names
percent = [] #print percent of votes
vote_count = [] #total votes for each canidate
ballots = [] # holds percent of votes for each canidates
candidates = [] #holds information for column
# Bound column as canidates
for votes in poll:
candidates.append(votes[2]) #column index
# Create list of unique canidates
nominee = {}
for name in set(candidates):
nominee.update({name: 0})
# Loop through list to update dictionary values
for votes in candidates:
nominee[votes] += 1
total_votes = sum(nominee.values())
print("Election Results")
print("--------------------------------")
print(f"Total Votes: {total_votes}")
print("--------------------------------")
for delegates in nominee:
#vote count for each nominee
vote_count = nominee[delegates]
#percent of votes obtained by each candidate
percent = round(float((vote_count/total_votes) * 100))
print(f"{delegates}: {percent}.00% ({vote_count})")
print("--------------------------------")
# create an index to calculate winning percentage.
if delegates in nominee:
index = candidates.index(votes)
#The winner of the election based on popular vote.
ballots.append(percent)
winning_percent = ballots.index(max(ballots))
winner = candidates[winning_percent]
#print(winner)
print(f"Winner: {winner}")
print("--------------------------------")
# 1) open a file for writing:
f = open("poll.txt", "w")
#2) replace all your print statements by print >>f, for example:
# print "hello" becomes print >>f, "hello
#3) close the file when you're done
#f.close()
print("REPRINT REPRINT REPRINT", file=f)
print("Election Results", file=f)
print("--------------------------------", file=f)
print(f"Total Votes: {total_votes}", file=f)
print("--------------------------------", file=f)
for delegates in nominee:
#vote count for each nominee
vote_count = nominee[delegates]
#percent of votes obtained by each candidate
percent = round(float((vote_count/total_votes) * 100))
print(f"{delegates}: {percent}.00% ({vote_count})")
print("--------------------------------", file=f)
print(f"Winner: {winner}", file=f)
print("--------------------------------", file=f)
f.close()
|
# coding: utf-8
# **References**
#
# https://andhint.github.io/machine-learning/nlp/Feature-Extraction-From-Text/
#
# # Tweet Sentiment Classification
# ## Objective
# Determine whether the sentiment on the Tweets related to a product or company is positive, negative or neutral.
# ## Classification Techniques
# ### A1. Supervised Learning
# *** Step 1: Using Manually Labelled Tweet Sentiments for Supervised Training***
# In[23]:
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
from IPython.display import HTML, display
# In[24]:
import pandas as pd
import numpy as np
np.random.seed(7)
print("[INFO] Data Frame created with Manually-labelled data for individual stocks")
aapl=pd.read_csv("/Users/hardeepsingh/Desktop/IS/University/sem-4/FE-520/Project/Manually_labelled_clean_aapl.csv")
print("\n[INFO] Apple Data Frame - Manually Labelled Sentiments")
aapl.info()
aapl=aapl.dropna()
# *** Step 2: Splitting the Data into Train (for Model training) and Test (for validation)***
# In[25]:
from sklearn.model_selection import train_test_split
text=aapl.text
target=aapl.sentiment
X_train,X_test,y_train,y_test=train_test_split(text,target, random_state=27,test_size=0.2)
# *** Step 3: Selecting the best Model***
# #### WordCloud Of Tweets
# In[26]:
# REFERENCE: https://github.com/amueller/word_cloud
from os import path
from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
d = path.dirname('manually_labeled_data/')
# Read the whole text.
text1 = open(path.join(d,'/Users/hardeepsingh/Desktop/IS/University/sem-4/FE-520/Project/Manually_labelled_clean_aapl.csv')).read()
stopwords = set(STOPWORDS)
# lower max_font_size
wordcloud = WordCloud(max_font_size=100,
background_color='white',
width=1200,
height=1000,
stopwords=stopwords).generate(text1)
plt.figure()
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
# * **Choice 1: CountVectorizer OR TfidfVectorizer (both from sklearn.feature_extraction.text)**
# * CountVectorizer takes the bag of word approach, i.e. each message is seperated into tokens and the number of times each token occurs in a message is counted. It create a matrix populated with token counts to represent all the messages. This is often referred to as a document term matrix(DTM).
# * TfidfVectorizer, instead of filling the DTM with token counts it calculates term frequency-inverse document frequency value for each word(TF-IDF). The TF-IDF is the product of two weights, the term frequency and the inverse document frequency. Term frequency is a weight representing how often a word occurs in a document. If we have several occurences of the same word in one document we can expect the TF-IDF to increase. Inverse document frequency is another weight representing how common a word is across documents. If a word is used in many documents then the TF-IDF will decrease.
# * **Choice 2: Classifier**
# * LogisticRegression
# * DecisionTree
# * MultinomialNB
# * Linear SVC
# ** i. CountVectorizer + Logistic Regression **
# In[27]:
# importing required libraries
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
import warnings
warnings.filterwarnings('ignore')
# import GridSearch
from sklearn.model_selection import GridSearchCV
# To store the results
classifier_results={}
# using count vectorizer instead of tfidf
# tokenizing only alpha numeric
tokenPatt = '[A-Za-z0-9]+(?=\\s+)'
# pipeline which does two steps all together:
# (1) generate CountVector, and (2) train classifier
# each step is named, i.e. "vec", "clf"
pl_1 = Pipeline([
('tfidf', CountVectorizer(token_pattern = tokenPatt)),
('clf', LogisticRegression())])
pl_1.fit(X_train,y_train)
# accuracy
accuracy = pl_1.score(X_test,y_test)
print ("Untuned Accuracy of Logistic Regression using CountVectorizer: ", accuracy)
classifier_results["Untuned Accuracy of Logistic Regression using CountVectorizer"]=accuracy
# Parameters to be used for Tuning
parameters = {'tfidf__min_df':[2,3],
'tfidf__stop_words':[None,"english"],
'clf__solver': ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga']
}
# the metric used to select the best parameters
metric = "f1_macro"
# GridSearch also uses cross validation
gs_clf = GridSearchCV(pl_1, param_grid=parameters, scoring=metric, cv=5)
gs_clf = gs_clf.fit(text, target)
# gs_clf.best_params_ returns a dictionary
# with parameter and its best value as an entry
for param_name in gs_clf.best_params_:
print(param_name,": ",gs_clf.best_params_[param_name])
# In[28]:
# Using Parameters from above GridSearch Result
p1_2 = Pipeline([
('tfidf', CountVectorizer(stop_words=None,token_pattern='[A-Za-z0-9]+(?=\\s+)',min_df=2)),
('clf', LogisticRegression(solver='sag'))
])
p1_2.fit(X_train,y_train)
# accuracy
accuracy = p1_2.score(X_test,y_test)
print("Tuned Accuracy of Logistic Regression using CountVectorizer: ", accuracy)
classifier_results["Tuned Accuracy of Logistic Regression using CountVectorizer"]=accuracy
# ** ii. CountVectorizer + DecisionTreeClassifier **
# In[29]:
# importing required libraries
from sklearn.tree import DecisionTreeClassifier
p2_1 = Pipeline([
('tfidf', CountVectorizer(token_pattern = tokenPatt)),
('clf', DecisionTreeClassifier())
])
p2_1.fit(X_train,y_train)
# accuracy
accuracy = p2_1.score(X_test,y_test)
print("Untuned Accuracy of Decision Tree using CountVectorizer: ", accuracy)
classifier_results["Untuned Accuracy of Decision Tree using CountVectorizer"]=accuracy
# Parameters to be used for Tuning
parameters = {'tfidf__min_df':[2,3],
'tfidf__stop_words':[None,"english"]
}
# the metric used to select the best parameters
metric = "f1_macro"
# GridSearch also uses cross validation
gs_clf = GridSearchCV(p2_1, param_grid=parameters, scoring=metric, cv=5)
gs_clf = gs_clf.fit(text, target)
# gs_clf.best_params_ returns a dictionary
# with parameter and its best value as an entry
for param_name in gs_clf.best_params_:
print(param_name,": ",gs_clf.best_params_[param_name])
# In[30]:
# Using Parameters from above GridSearch Result
p2_2 = Pipeline([
('tfidf', CountVectorizer(stop_words='english',token_pattern='[A-Za-z0-9]+(?=\\s+)',min_df=3)),
('clf', DecisionTreeClassifier())
])
p2_2.fit(X_train,y_train)
# accuracy
accuracy = p2_2.score(X_test,y_test)
print("Tuned Accuracy of Decision Tree using CountVectorizer: ", accuracy)
classifier_results["Tuned Accuracy of Decision Tree using CountVectorizer"]=accuracy
# ** iii. CountVectorizer + MultinomialNB **
# In[31]:
# importing required libraries
from sklearn.naive_bayes import MultinomialNB
p3_1 = Pipeline([
('tfidf', CountVectorizer()),
('clf', MultinomialNB())
])
p3_1.fit(X_train,y_train)
# accuracy
accuracy = p3_1.score(X_test,y_test)
print("Untuned Accuracy of MultinomialNB using CountVectorizer: ", accuracy)
classifier_results["Untuned Accuracy of MultinomialNB using CountVectorizer"]=accuracy
# Parameters to be used for Tuning
parameters = {'tfidf__min_df':[2,3],
'tfidf__stop_words':[None,"english"],
'clf__alpha': [0.5,1.0,2.0],
}
# the metric used to select the best parameters
metric = "f1_macro"
# GridSearch also uses cross validation
gs_clf = GridSearchCV(p3_1, param_grid=parameters, scoring=metric, cv=5)
gs_clf = gs_clf.fit(text, target)
# gs_clf.best_params_ returns a dictionary
# with parameter and its best value as an entry
for param_name in gs_clf.best_params_:
print(param_name,": ",gs_clf.best_params_[param_name])
# In[32]:
# Using Parameters from above GridSearch Result
p3_2 = Pipeline([
('tfidf', CountVectorizer(stop_words=None,token_pattern='[A-Za-z0-9]+(?=\\s+)',min_df=2)),
('clf', MultinomialNB(alpha=0.5))
])
p3_2.fit(X_train,y_train)
# accuracy
accuracy = p3_2.score(X_test,y_test)
print("Tuned Accuracy of MultinomialNB using CountVectorizer: ", accuracy)
classifier_results["Tuned Accuracy of MultinomialNB using CountVectorizer"]=accuracy
# **iv. CountVectorizer + LinearSVC**
# In[33]:
# importing required libraries
from sklearn.svm import LinearSVC
p4_1 = Pipeline([
('tfidf', CountVectorizer()),
('clf', LinearSVC())
])
p4_1.fit(X_train,y_train)
# accuracy
accuracy = p4_1.score(X_test,y_test)
print("Untuned Accuracy of LinearSVC using CountVectorizer: ", accuracy)
classifier_results["Untuned Accuracy of LinearSVC using CountVectorizer"]=accuracy
# Parameters to be used for Tuning
parameters = {'tfidf__min_df':[2,3],
'tfidf__stop_words':[None,"english"],
'clf__loss':['hinge','squared_hinge']
}
# the metric used to select the best parameters
metric = "f1_macro"
# GridSearch also uses cross validation
gs_clf = GridSearchCV(p4_1, param_grid=parameters, scoring=metric, cv=5)
gs_clf = gs_clf.fit(text, target)
# gs_clf.best_params_ returns a dictionary
# with parameter and its best value as an entry
for param_name in gs_clf.best_params_:
print(param_name,": ",gs_clf.best_params_[param_name])
# In[34]:
# Using Parameters from above GridSearch Result
p4_2 = Pipeline([
('tfidf', CountVectorizer(stop_words=None,token_pattern='[A-Za-z0-9]+(?=\\s+)',min_df=2)),
('clf', LinearSVC(loss='hinge'))
])
p4_2.fit(X_train,y_train)
# accuracy
accuracy = p4_2.score(X_test,y_test)
print("Tuned Accuracy of LinearSVC using CountVectorizer: ", accuracy)
classifier_results["Tuned Accuracy of LinearSVC using CountVectorizer"]=accuracy
# **v. TfidfVectorizer + Logistic Regression**
# In[35]:
p5_1 = Pipeline([
('tfidf', TfidfVectorizer()),
('clf', LogisticRegression())
])
p5_1.fit(X_train,y_train)
# accuracy
accuracy = p5_1.score(X_test,y_test)
print ("Untuned Accuracy of Logistic Regression using TfidfVectorizer: ", accuracy)
classifier_results["Untuned Accuracy of Logistic Regression using TfidfVectorizer"]=accuracy
# Parameters to be used for Tuning
parameters = {'tfidf__min_df':[2,3],
'tfidf__stop_words':[None,"english"],
'clf__solver': ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga']
}
# the metric used to select the best parameters
metric = "f1_macro"
# GridSearch also uses cross validation
gs_clf = GridSearchCV(p5_1, param_grid=parameters, scoring=metric, cv=5)
gs_clf = gs_clf.fit(text, target)
# gs_clf.best_params_ returns a dictionary
# with parameter and its best value as an entry
for param_name in gs_clf.best_params_:
print(param_name,": ",gs_clf.best_params_[param_name])
# In[36]:
# Using Parameters from above GridSearch Result
p5_2 = Pipeline([
('tfidf', TfidfVectorizer(stop_words=None,token_pattern='[A-Za-z0-9]+(?=\\s+)',min_df=2)),
('clf', LogisticRegression(solver='newton-cg'))
])
p5_2.fit(X_train,y_train)
# accuracy
accuracy = p5_2.score(X_test,y_test)
print("Tuned Accuracy of Logistic Regression using TfidfVectorizer: ", accuracy)
classifier_results["Tuned Accuracy of Logistic Regression using TfidfVectorizer"]=accuracy
# **vi. TfidfVectorizer + DecisionTreeClassifier**
# In[37]:
p6_1 = Pipeline([
('tfidf', TfidfVectorizer()),
('clf', DecisionTreeClassifier())
])
p6_1.fit(X_train,y_train)
# accuracy
accuracy = p6_1.score(X_test,y_test)
print("Untuned Accuracy of Decision Tree using TfidfVectorizer: ", accuracy)
classifier_results["Untuned Accuracy of Decision Tree using TfidfVectorizer"]=accuracy
# Parameters to be used for Tuning
parameters = {'tfidf__min_df':[2,3],
'tfidf__stop_words':[None,"english"]
}
# the metric used to select the best parameters
metric = "f1_macro"
# GridSearch also uses cross validation
gs_clf = GridSearchCV(p6_1, param_grid=parameters, scoring=metric, cv=5)
gs_clf = gs_clf.fit(text, target)
# gs_clf.best_params_ returns a dictionary
# with parameter and its best value as an entry
for param_name in gs_clf.best_params_:
print(param_name,": ",gs_clf.best_params_[param_name])
# In[38]:
# Using Parameters from above GridSearch Result
p6_2 = Pipeline([
('tfidf', TfidfVectorizer(stop_words='english',token_pattern='[A-Za-z0-9]+(?=\\s+)',min_df=3)),
('clf', DecisionTreeClassifier())
])
p6_2.fit(X_train,y_train)
# accuracy
accuracy = p6_2.score(X_test,y_test)
print("Tuned Accuracy of Decision Tree using TfidfVectorizer: ", accuracy)
classifier_results["Tuned Accuracy of Decision Tree using TfidfVectorizer"]=accuracy
# ** vii. TfidfVectorizer + MultinomialNB**
# In[39]:
p7_1 = Pipeline([
('tfidf', TfidfVectorizer()),
('clf', MultinomialNB())
])
p7_1.fit(X_train,y_train)
# accuracy without parameter tuning
accuracy = p7_1.score(X_test,y_test)
print("Untuned Accuracy of MultinomialNB using TfidfVectorizer: ", accuracy)
classifier_results["Untuned Accuracy of MultinomialNB using TfidfVectorizer"]=accuracy
# Parameters to be used for Tuning
parameters = {'tfidf__min_df':[2,3],
'tfidf__stop_words':[None,"english"],
'clf__alpha': [0.5,1.0,2.0],
}
# the metric used to select the best parameters
metric = "f1_macro"
# GridSearch also uses cross validation
gs_clf = GridSearchCV(p7_1, param_grid=parameters, scoring=metric, cv=5)
gs_clf = gs_clf.fit(text, target)
# gs_clf.best_params_ returns a dictionary
# with parameter and its best value as an entry
for param_name in gs_clf.best_params_:
print(param_name,": ",gs_clf.best_params_[param_name])
# In[40]:
# Using Parameters from above GridSearch Result
p7_2 = Pipeline([
('tfidf', TfidfVectorizer(stop_words='english', token_pattern='[A-Za-z0-9]+(?=\\s+)',min_df=3)),
('clf', MultinomialNB(alpha=0.5))
])
p7_2.fit(X_train,y_train)
# accuracy
accuracy = p7_2.score(X_test,y_test)
print("Tuned Accuracy of MultinomialNB using TfidfVectorizer: ", accuracy)
classifier_results["Tuned Accuracy of MultinomialNB using TfidfVectorizer"]=accuracy
# **viii. TfidfVectorizer + LinearSVC**
# In[41]:
p8_1 = Pipeline([
('tfidf', TfidfVectorizer()),
('clf', LinearSVC())
])
p8_1.fit(X_train,y_train)
# accuracy
accuracy = p8_1.score(X_test,y_test)
print("Untuned Accuracy of LinearSVC using TfidfVectorizer: ", accuracy)
classifier_results["Untuned Accuracy of LinearSVC using TfidfVectorizer"]=accuracy
# Parameters to be used for Tuning
parameters = {'tfidf__min_df':[2,3],
'tfidf__stop_words':[None,"english"],
'clf__loss':['hinge','squared_hinge']
}
# the metric used to select the best parameters
metric = "f1_macro"
# GridSearch also uses cross validation
gs_clf = GridSearchCV(p8_1, param_grid=parameters, scoring=metric, cv=5)
gs_clf = gs_clf.fit(text, target)
# gs_clf.best_params_ returns a dictionary
# with parameter and its best value as an entry
for param_name in gs_clf.best_params_:
print(param_name,": ",gs_clf.best_params_[param_name])
# In[42]:
# Using Parameters from above GridSearch Result
p8_2 = Pipeline([
('tfidf', TfidfVectorizer(stop_words=None,token_pattern='[A-Za-z0-9]+(?=\\s+)',min_df=2)),
('clf', LinearSVC(loss='hinge'))
])
p8_2.fit(X_train,y_train)
# accuracy
accuracy = p8_2.score(X_test,y_test)
print("Tuned Accuracy of LinearSVC using TfidfVectorizer: ", accuracy)
classifier_results["Tuned Accuracy of LinearSVC using TfidfVectorizer"]=accuracy
# ### A2. Supervised Learning - Model Comparison Analysis
# In[43]:
for key in classifier_results:
print('{}={}'.format(key,classifier_results[key]))
# ### B. Unsupervised Learning
# ** Unsupervised Sentiment Classification using NLTK Vader**
# *** Step B1: Analyze Sentiments***
# In[22]:
import pandas as pd
from nltk.sentiment.vader import SentimentIntensityAnalyzer
def sentiment_analysis_vader_validation(df, filepath):
sid = SentimentIntensityAnalyzer()
# print df.head()
d = []
sentiment_map = {'pos': 4, 'neg': 0, 'neu': 2}
for index, tweet in df.iterrows():
if len(str(tweet['text']).split()) > 4:
tweet_txt = tweet['text']
tweet_date = tweet['date']
tweet_manual_label = tweet['sentiment']
ss = sid.polarity_scores(tweet_txt)
'''MAX LOGIC'''
score_sentiment = max(ss['neg'], ss['neu'], ss['pos'])
sentiment = [k for k, v in ss.items() if v == score_sentiment][0]
sentiment_mapping = sentiment_map[sentiment]
if tweet_manual_label == sentiment_mapping:
validation_result='Match'
else:
validation_result='Mismatch'
d.append({'date': tweet_date, 'text': tweet_txt, 'polarity_score_neg':ss['neg'], 'polarity_score_neu':ss['neu'], 'polarity_score_pos':ss['pos'], 'predicted_sentiment': sentiment_mapping, 'labeled_sentiment':tweet_manual_label, 'validation_result': validation_result})
df_processed = pd.DataFrame(d)
#df_processed.to_csv(filepath, index=False)
print(df_processed.groupby(['validation_result'])['validation_result'].count())
# Using merged_df created in Step A1
# merged_df has all the labelled tweets for MSFT and AAPL
output_file = 'vader_predictions.csv'
sentiment_analysis_vader_validation(aapl, output_file)
# # OUTCOME OF THE SELECTION PROCESS
# For our project, it is important to achieve the highest accuracy in classifying the tweet sentiments. With the results of the above analysis, we observed that LinearSVC alongwith TfidfVectorizer gave the best accuracy of 67.38%.
#
# Therefore, we have selected the tuned LinearSVC alongwith TfidfVectorizer for the tweet sentiment classification.
|
def strEncrypt(first,second,key,result):
result = []
for x in range(4):
m=(ord(second[x])+ord(key[x]))%26+97
temp = ord(first[x]) ^ m
temp=temp%26+97
result += chr(temp)
return result
def Encrypt(word,key,cipher):
#对要加密的内容进行分组,8个字节一组
n = int(len(word) / 8)
cipher=[]
result=[]
for x in range(n):
t = [word[x * 8], word[x * 8 + 1], word[x * 8 + 2], word[x * 8 + 3], word[x * 8 + 4], word[x * 8 + 5],
word[x * 8 + 6], word[x * 8 + 7]]
L = t[:4]
R = t[4:]
#左边换成右边,右边进行enigma加密
temp=R
R=strEncrypt(L,R,key,result)
L=temp
cipher +=L+R
return cipher
if __name__ == '__main__':
word=input("请输入要加密的明文(字符个数为8的整数倍):")
word_wrong=input("请输入要改动的明文(字符个数为8的整数倍,和正确明文只差一比特):")
n=int(len(word)/8)
key = input("请输入密钥:")
#对密钥进行分组,4个字节一组
keylist=[]
for x in range(len(key)//4):
t = [key[x * 4], key[x * 4 + 1], key[x * 4 + 2], key[x * 4 + 3]]
keylist.append(t)
cipher = []
cipher_wrong = []
for x in range(len(key)//4):
cipher=Encrypt(word, keylist[x], cipher)
cipher_wrong=Encrypt(word_wrong, keylist[x], cipher_wrong)
print("第%d轮正确:"%(x+1),cipher)
print("第%d轮错误:"%(x+1),cipher_wrong)
h=0
for b in range(len(cipher)):
if cipher[b]!=cipher_wrong[b]:
h=h+1#错误字符个数
'''
p=bin(ord(cipher[b]))
q=bin(ord(cipher_wrong[b]))
for i in range(len(p)):
if p[i]!=q[i]:
s=s+1
'''
xxx = h / len(cipher)
print("第%d轮雪崩率为:" % (x + 1), "%.2f%%" % (xxx * 100))
word=cipher
word_wrong=cipher_wrong
|
#加密
def encrypt():
word=input("请输入要加密的文字:")
first=[]
second=[]
for x in word:
if word.index(x)%2==0:
first.append(x)
else:
second.append(x)
print("加密后的文字为:",''.join(first)+''.join(second))
#解密
def decrypt():
word=input("请输入要解密的文字:")
l=len(word)
first = []
second = []
plain=[]#明文
for x in word:
if word.index(x)<=round(l/2):
first.append(x)
else:
second.append(x)
for a in range(l):
if a%2==0:
plain.append(first[a//2])
else:
plain.append(second[a//2])
print("解密后的文字是:",''.join(plain))
while True:
choice=input("请选择操作(1.加密 2.解密 3.退出):")
if int(choice)==1:
encrypt()
elif int(choice)==2:
decrypt()
elif int(choice)==3:
break
else:
print("输入无效!")
|
import numpy as np
from layers import *
class FullyConnectedNet(object):
"""
A fully-connected neural network with an arbitrary number of hidden layers,
ReLU nonlinearities, and a softmax loss function. This will also implement
dropout and batch normalization as options. For a network with L layers,
the architecture will be
{affine - [batch norm] - relu - [dropout]} x (L - 1) - affine - softmax
where batch normalization and dropout are optional, and the {...} block is
repeated L - 1 times.
Similar to the TwoLayerNet above, learnable parameters are stored in the
self.params dictionary and will be learned using the Solver class.
"""
def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,
dropout=0, use_batchnorm=False, reg=0.0,
weight_scale=1e-2, dtype=np.float32, seed=None):
"""
Initialize a new FullyConnectedNet.
Inputs:
- hidden_dims: A list of integers giving the size of each hidden layer.
- input_dim: An integer giving the size of the input.
- num_classes: An integer giving the number of classes to classify.
- dropout: Scalar between 0 and 1 giving dropout strength. If dropout=0 then
the network should not use dropout at all.
- use_batchnorm: Whether or not the network should use batch normalization.
- reg: Scalar giving L2 regularization strength.
- weight_scale: Scalar giving the standard deviation for random
initialization of the weights.
- dtype: A numpy datatype object; all computations will be performed using
this datatype. float32 is faster but less accurate, so you should use
float64 for numeric gradient checking.
- seed: If not None, then pass this random seed to the dropout layers. This
will make the dropout layers deteriminstic so we can gradient check the
model.
"""
self.use_batchnorm = use_batchnorm
self.use_dropout = dropout > 0
self.reg = reg
self.num_layers = 1 + len(hidden_dims)
self.dtype = dtype
self.params = {}
############################################################################
# TODO: Initialize the parameters of the network, storing all values in #
# the self.params dictionary. Store weights and biases for the first layer #
# in W1 and b1; for the second layer use W2 and b2, etc. Weights should be #
# initialized from a normal distribution with standard deviation equal to #
# weight_scale and biases should be initialized to zero. #
# #
# When using batch normalization, store scale and shift parameters for the #
# first layer in gamma1 and beta1; for the second layer use gamma2 and #
# beta2, etc. Scale parameters should be initialized to one and shift #
# parameters should be initialized to zero. #
############################################################################
all_layer_sizes = [input_dim]+hidden_dims+[num_classes]
for i, (hidden_dim, last_hidden_dim) in enumerate(zip(all_layer_sizes[1:], all_layer_sizes[:-1])):
self.params[f'W{i+1}'] = weight_scale*np.random.randn(last_hidden_dim, hidden_dim)
self.params[f'b{i+1}'] = weight_scale*np.zeros(hidden_dim)
############################################################################
# END OF YOUR CODE #
############################################################################
# When using dropout we need to pass a dropout_param dictionary to each
# dropout layer so that the layer knows the dropout probability and the mode
# (train / test). You can pass the same dropout_param to each dropout layer.
self.dropout_param = {}
if self.use_dropout:
self.dropout_param = {'mode': 'train', 'p': dropout}
if seed is not None:
self.dropout_param['seed'] = seed
# With batch normalization we need to keep track of running means and
# variances, so we need to pass a special bn_param object to each batch
# normalization layer. You should pass self.bn_params[0] to the forward pass
# of the first batch normalization layer, self.bn_params[1] to the forward
# pass of the second batch normalization layer, etc.
self.bn_params = []
if self.use_batchnorm:
self.bn_params = [{'mode': 'train'} for i in range(self.num_layers - 1)]
# Cast all parameters to the correct datatype
for k, v in self.params.items():
self.params[k] = v.astype(dtype)
def loss(self, X, y=None):
"""
Compute loss and gradient for the fully-connected net.
Input / output: Same as TwoLayerNet above.
"""
X = X.astype(self.dtype)
mode = 'test' if y is None else 'train'
# Set train/test mode for batchnorm params and dropout param since they
# behave differently during training and testing.
if self.use_dropout:
self.dropout_param['mode'] = mode
if self.use_batchnorm:
for bn_param in self.bn_params:
bn_param['mode'] = mode
scores = None
############################################################################
# TODO: Implement the forward pass for the fully-connected net, computing #
# the class scores for X and storing them in the scores variable. #
# #
# When using dropout, you'll need to pass self.dropout_param to each #
# dropout forward pass. #
# #
# When using batch normalization, you'll need to pass self.bn_params[0] to #
# the forward pass for the first batch normalization layer, pass #
# self.bn_params[1] to the forward pass for the second batch normalization #
# layer, etc. #
############################################################################
layer_cache = []
activations = [X]
dropout_caches = []
for i in range(1, self.num_layers):
cache = None
cur_activation, cur_cache = affine_relu_forward(activations[-1], self.params[f'W{i}'],
self.params[f'b{i}'])
layer_cache.append(cur_cache)
activations.append(cur_activation)
# after ReLU, we now add dropout layer
# During backprop we will require the cache
cur_drop_cache = np.nan
if self.use_dropout:
cur_activation, cur_drop_cache = dropout_forward(cur_activation, self.dropout_param)
activations.append(cur_activation)
# Append dropout cache to list anyway for alignment purposes in autistic zip statement
# made in backprop
dropout_caches.append(cur_drop_cache)
# Softmax layer is done below after return scores
scores, presoft_cache = affine_forward(activations[-1], self.params[f'W{self.num_layers}'],
self.params[f'b{self.num_layers}'])
############################################################################
# END OF YOUR CODE #
############################################################################
# If test mode return early
if mode == 'test':
return scores
loss, grads = 0.0, {}
############################################################################
# TODO: Implement the backward pass for the fully-connected net. Store the #
# loss in the loss variable and gradients in the grads dictionary. Compute #
# data loss using softmax, and make sure that grads[k] holds the gradients #
# for self.params[k]. Don't forget to add L2 regularization! #
# #
# When using batch normalization, you don't need to regularize the scale #
# and shift parameters. #
# #
# NOTE: To ensure that your implementation matches ours and you pass the #
# automated tests, make sure that your L2 regularization includes a factor #
# of 0.5 to simplify the expression for the gradient. #
############################################################################
loss, dsoftmax = softmax_loss(scores, y)
# loss += 0.5*self.reg*sum(np.sum(thing**2) for thing in [W1, W2])
for i in range(self.num_layers):
loss += 0.5*self.reg*np.sum(self.params[f'W{i+1}']**2)
dup, dlastW, dlastb = affine_backward(dsoftmax, presoft_cache)
grads[f'W{self.num_layers}'] = dlastW+self.reg*self.params[f'W{self.num_layers}']
grads[f'b{self.num_layers}'] = dlastb
for i, cur_cache, cur_drop_cache in zip(range(self.num_layers-1, 0, -1),
reversed(layer_cache),
reversed(dropout_caches)):
# Get back from dropout first
if self.use_dropout:
dup = dropout_backward(dup, cur_drop_cache)
dup, dWcur, dbcur = affine_relu_backward(dup, cur_cache)
grads[f'W{i}'] = dWcur+self.reg*self.params[f'W{i}']
grads[f'b{i}'] = dbcur
# Notice that dup is now dx after all the backprop till x
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
|
print('Running...')
#get data
csv_file = 'auto_insurance_sweden.csv'
x=[]
with open(csv_file, 'r') as f:
for row in f:
x.append(row.split(',')[0])
x = [float(i) for i in x]
y=[]
with open(csv_file, 'r') as f:
for row in f:
y.append(row.split(',')[1])
y = [float(i) for i in y]
###########################################################################################
#the actual machine learning code is here
def computeError(x,y,slope,yint):
errorlist = [abs(y[i]-(slope*x[i]+yint)) for i in range(len(x))] #(y-(mx+b))^2
error = sum(errorlist)/len(errorlist) #take average
return error
def gradientDescent(x,y,initial_slope,initial_yint,learningrate,epochs):
slope = initial_slope
yint = initial_yint
for i in range(epochs):
slopegradient = (-2/len(x)) * sum([x[i]*(y[i]-(slope*x[i]+yint)) for i in range(len(x))]) #partial derivative (of error function) with respect to slope
yintgradient = (-2/len(x)) * sum([y[i]-(slope*x[i]+yint) for i in range(len(x))]) #partial derivative (of error function) with respect to y intercept
slope = slope - (learningrate*slopegradient) #update slope
yint = slope - (learningrate*yintgradient) #update y intercept
return slope,yint
learningrate = 0.0001
epochs = 100000
initial_slope = 0
initial_yint = 0
[slope, yint] = gradientDescent(x,y,initial_slope,initial_yint,learningrate,epochs)
error = computeError(x,y,yint,slope)
print('Error: '+str(error))
###########################################################################################
import matplotlib.pyplot as plt
import numpy as np
#graph points and line of best fit
print('y = '+str(slope)+'*x + '+str(yint))
xline = np.linspace(len(x),int(min(x)),int(max(x)))
yline = slope*xline+yint
plt.scatter(x, y) #plot the original points
plt.plot(xline,yline,'-r', label='y='+str(slope)+'*x+'+str(yint))
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.