text
stringlengths 37
1.41M
|
---|
from collections import deque
class Item:
'''
Representation of items in PriorityQueue.
For use internally in PriorityQueue class only.
'''
def __init__(self, label, key):
self.label, self.key = label, key
class PriorityQueue:
'''
Heap-based priority queue implementation.
'''
def __init__(self):
self.A = []
self.label2idx = {}
def min_heapify_up(self, c):
'''
Maintains the min-heap property by swapping the item at the given index upwards.
(You SHOULD NOT call this function. It is used internally for maintaining the heap)
'''
if c == 0: return
p = (c - 1) // 2
if self.A[p].key > self.A[c].key:
self.A[c], self.A[p] = self.A[p], self.A[c]
self.label2idx[self.A[c].label] = c
self.label2idx[self.A[p].label] = p
self.min_heapify_up(p)
def min_heapify_down(self, p):
'''
Maintains the min-heap property by swapping the iteam at the given index downwards.
(You SHOULD NOT call this function. It is used internally for maintaining the heap)
'''
if p >= len(self.A): return
l = 2 * p + 1
r = 2 * p + 2
if l >= len(self.A): l = p
if r >= len(self.A): r = p
c = l if self.A[r].key > self.A[l].key else r
if self.A[p].key > self.A[c].key:
self.A[c], self.A[p] = self.A[p], self.A[c]
self.label2idx[self.A[c].label] = c
self.label2idx[self.A[p].label] = p
self.min_heapify_down(c)
def size(self):
'''
Retrieves the number of elements in the priority queue
Args:
None
Returns:
Size of the priority queue
'''
return len(self.A)
def insert(self, label, key):
'''
Inserts a new element into the priority queue
Args:
label: Identifying nformation to be stored along with the priority
key: Priority of the element being inserted
Returns:
None
'''
self.A.append(Item(label, key))
idx = len(self.A) - 1
self.label2idx[self.A[idx].label] = idx
self.min_heapify_up(idx)
def extract_min(self):
'''
Removes and returns the minimum-priority element in the priority queue
Args:
None
Returns:
The identifier for the element removed.
'''
self.A[0], self.A[-1] = self.A[-1], self.A[0]
self.label2idx[self.A[0].label] = 0
del self.label2idx[self.A[-1].label]
min_label = self.A.pop().label
self.min_heapify_down(0)
return min_label
def decrease_key(self, label, key):
'''
Decreases the priority of a given item in the queue
Args:
label: Identifying information stored along with priority
key: New priority of the item with the specified label
Returns:
None
'''
if label in self.label2idx:
idx = self.label2idx[label]
if key < self.A[idx].key:
self.A[idx].key = key
self.min_heapify_up(idx)
'''
###################################################
### PLEASE DO NOT MODIFY ANY OF THE CODE ABOVE! ###
### This code is included for your convenience, ###
### but modifications may cause you a headache! ###
###################################################
'''
def bidi(adj, s, t):
'''
Implement bidirectional dijkstra.
Args:
adj: Routers are identified by unique integer id's. adj[u][v] is the latency between router u and router v.
For a router, u, with no neighbor adj[u] = {}.
s: Starting router id.
t: Destination router id.
Returns:
The minimum weighted distance from s to t. If there is no path from s to t, return None.
Note: Bidirectional dijkstra cuts down the number of nodes you visit. Only insert nodes into your priority queue (and whatever other data structures you may be maintaining)
when you actually discover them through relaxation.
'''
infinity = float("inf")
adjr = dict() #adjacency list dict for reverse
d = dict() #distance from s
dr = dict() #distance from t
pq = PriorityQueue() #priority q for forward djiksta
pqr = PriorityQueue() #priority q for backward djikstra
seen = set() #useful for stopping conditions
seenr = set() #useful for stopping conditions
u = None
#create reverse adjacency list mapping value to key
for key in adj:
adj_set = adj[key]
if key not in adjr:#for nodes with only outgoing edges
adjr[key] = dict()
for k in adj_set:
if k not in adjr:
adjr[k] = dict()
adjr[k][key] = adj_set[k]
d[s] = 0
dr[t] = 0
pq.insert(s,0)
pqr.insert(t,0)
while max(pq.size(),pqr.size()) > 0:
#forward djikstra step
if pq.size() > 0:
node = pq.extract_min()
else:
node = None
if node is not None:
for k in adj[node]:
dist = adj[node][k] + d[node]
if k not in d:
d[k] = dist
pq.insert(k,d[k])
elif d[k] > adj[node][k] + d[node]:
d[k] = adj[node][k] + d[node]
pq.decrease_key(k,d[k])
if node in seenr:
u = node
break
seen.add(node)
#backward djikstra step
if pqr.size() >0:
noder = pqr.extract_min()
else:
noder = None
if noder is not None:
for k in adjr[noder]:
dist = adjr[noder][k] + dr[noder]
if k not in dr:
dr[k] = dist
pqr.insert(k,dr[k])
elif dr[k] > adjr[noder][k] + dr[noder]:
dr[k] = adjr[noder][k] + dr[noder]
pqr.decrease_key(k,dr[k])
if noder in seen:
u = noder
break
seenr.add(noder)
#tracepath
if u is not None:
mu = d[u] + dr[u]
for v in d.keys():
if v in dr.keys():
if mu > d[v] + dr[v]:
mu = d[v] + dr[v]
return mu
return None
|
# ---------------------------------------------------------------------
# JSON normalization routines
from __future__ import annotations
from collections import (
abc,
defaultdict,
)
import copy
from typing import (
TYPE_CHECKING,
Any,
DefaultDict,
)
import numpy as np
from pandas._libs.writers import convert_json_to_lines
import pandas as pd
from pandas import DataFrame
if TYPE_CHECKING:
from collections.abc import Iterable
from pandas._typing import (
IgnoreRaise,
Scalar,
)
def convert_to_line_delimits(s: str) -> str:
"""
Helper function that converts JSON lists to line delimited JSON.
"""
# Determine we have a JSON list to turn to lines otherwise just return the
# json object, only lists can
if not s[0] == "[" and s[-1] == "]":
return s
s = s[1:-1]
return convert_json_to_lines(s)
def nested_to_record(
ds,
prefix: str = "",
sep: str = ".",
level: int = 0,
max_level: int | None = None,
):
"""
A simplified json_normalize
Converts a nested dict into a flat dict ("record"), unlike json_normalize,
it does not attempt to extract a subset of the data.
Parameters
----------
ds : dict or list of dicts
prefix: the prefix, optional, default: ""
sep : str, default '.'
Nested records will generate names separated by sep,
e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar
level: int, optional, default: 0
The number of levels in the json string.
max_level: int, optional, default: None
The max depth to normalize.
Returns
-------
d - dict or list of dicts, matching `ds`
Examples
--------
>>> nested_to_record(
... dict(flat1=1, dict1=dict(c=1, d=2), nested=dict(e=dict(c=1, d=2), d=2))
... )
{\
'flat1': 1, \
'dict1.c': 1, \
'dict1.d': 2, \
'nested.e.c': 1, \
'nested.e.d': 2, \
'nested.d': 2\
}
"""
singleton = False
if isinstance(ds, dict):
ds = [ds]
singleton = True
new_ds = []
for d in ds:
new_d = copy.deepcopy(d)
for k, v in d.items():
# each key gets renamed with prefix
if not isinstance(k, str):
k = str(k)
if level == 0:
newkey = k
else:
newkey = prefix + sep + k
# flatten if type is dict and
# current dict level < maximum level provided and
# only dicts gets recurse-flattened
# only at level>1 do we rename the rest of the keys
if not isinstance(v, dict) or (
max_level is not None and level >= max_level
):
if level != 0: # so we skip copying for top level, common case
v = new_d.pop(k)
new_d[newkey] = v
continue
v = new_d.pop(k)
new_d.update(nested_to_record(v, newkey, sep, level + 1, max_level))
new_ds.append(new_d)
if singleton:
return new_ds[0]
return new_ds
def _normalise_json(
data: Any,
key_string: str,
normalized_dict: dict[str, Any],
separator: str,
) -> dict[str, Any]:
"""
Main recursive function
Designed for the most basic use case of pd.json_normalize(data)
intended as a performance improvement, see #15621
Parameters
----------
data : Any
Type dependent on types contained within nested Json
key_string : str
New key (with separator(s) in) for data
normalized_dict : dict
The new normalized/flattened Json dict
separator : str, default '.'
Nested records will generate names separated by sep,
e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar
"""
if isinstance(data, dict):
for key, value in data.items():
new_key = f"{key_string}{separator}{key}"
if not key_string:
new_key = new_key.removeprefix(separator)
_normalise_json(
data=value,
key_string=new_key,
normalized_dict=normalized_dict,
separator=separator,
)
else:
normalized_dict[key_string] = data
return normalized_dict
def _normalise_json_ordered(data: dict[str, Any], separator: str) -> dict[str, Any]:
"""
Order the top level keys and then recursively go to depth
Parameters
----------
data : dict or list of dicts
separator : str, default '.'
Nested records will generate names separated by sep,
e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar
Returns
-------
dict or list of dicts, matching `normalised_json_object`
"""
top_dict_ = {k: v for k, v in data.items() if not isinstance(v, dict)}
nested_dict_ = _normalise_json(
data={k: v for k, v in data.items() if isinstance(v, dict)},
key_string="",
normalized_dict={},
separator=separator,
)
return {**top_dict_, **nested_dict_}
def _simple_json_normalize(
ds: dict | list[dict],
sep: str = ".",
) -> dict | list[dict] | Any:
"""
A optimized basic json_normalize
Converts a nested dict into a flat dict ("record"), unlike
json_normalize and nested_to_record it doesn't do anything clever.
But for the most basic use cases it enhances performance.
E.g. pd.json_normalize(data)
Parameters
----------
ds : dict or list of dicts
sep : str, default '.'
Nested records will generate names separated by sep,
e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar
Returns
-------
frame : DataFrame
d - dict or list of dicts, matching `normalised_json_object`
Examples
--------
>>> _simple_json_normalize(
... {
... "flat1": 1,
... "dict1": {"c": 1, "d": 2},
... "nested": {"e": {"c": 1, "d": 2}, "d": 2},
... }
... )
{\
'flat1': 1, \
'dict1.c': 1, \
'dict1.d': 2, \
'nested.e.c': 1, \
'nested.e.d': 2, \
'nested.d': 2\
}
"""
normalised_json_object = {}
# expect a dictionary, as most jsons are. However, lists are perfectly valid
if isinstance(ds, dict):
normalised_json_object = _normalise_json_ordered(data=ds, separator=sep)
elif isinstance(ds, list):
normalised_json_list = [_simple_json_normalize(row, sep=sep) for row in ds]
return normalised_json_list
return normalised_json_object
def json_normalize(
data: dict | list[dict],
record_path: str | list | None = None,
meta: str | list[str | list[str]] | None = None,
meta_prefix: str | None = None,
record_prefix: str | None = None,
errors: IgnoreRaise = "raise",
sep: str = ".",
max_level: int | None = None,
) -> DataFrame:
"""
Normalize semi-structured JSON data into a flat table.
Parameters
----------
data : dict or list of dicts
Unserialized JSON objects.
record_path : str or list of str, default None
Path in each object to list of records. If not passed, data will be
assumed to be an array of records.
meta : list of paths (str or list of str), default None
Fields to use as metadata for each record in resulting table.
meta_prefix : str, default None
If True, prefix records with dotted (?) path, e.g. foo.bar.field if
meta is ['foo', 'bar'].
record_prefix : str, default None
If True, prefix records with dotted (?) path, e.g. foo.bar.field if
path to records is ['foo', 'bar'].
errors : {'raise', 'ignore'}, default 'raise'
Configures error handling.
* 'ignore' : will ignore KeyError if keys listed in meta are not
always present.
* 'raise' : will raise KeyError if keys listed in meta are not
always present.
sep : str, default '.'
Nested records will generate names separated by sep.
e.g., for sep='.', {'foo': {'bar': 0}} -> foo.bar.
max_level : int, default None
Max number of levels(depth of dict) to normalize.
if None, normalizes all levels.
Returns
-------
frame : DataFrame
Normalize semi-structured JSON data into a flat table.
Examples
--------
>>> data = [
... {"id": 1, "name": {"first": "Coleen", "last": "Volk"}},
... {"name": {"given": "Mark", "family": "Regner"}},
... {"id": 2, "name": "Faye Raker"},
... ]
>>> pd.json_normalize(data)
id name.first name.last name.given name.family name
0 1.0 Coleen Volk NaN NaN NaN
1 NaN NaN NaN Mark Regner NaN
2 2.0 NaN NaN NaN NaN Faye Raker
>>> data = [
... {
... "id": 1,
... "name": "Cole Volk",
... "fitness": {"height": 130, "weight": 60},
... },
... {"name": "Mark Reg", "fitness": {"height": 130, "weight": 60}},
... {
... "id": 2,
... "name": "Faye Raker",
... "fitness": {"height": 130, "weight": 60},
... },
... ]
>>> pd.json_normalize(data, max_level=0)
id name fitness
0 1.0 Cole Volk {'height': 130, 'weight': 60}
1 NaN Mark Reg {'height': 130, 'weight': 60}
2 2.0 Faye Raker {'height': 130, 'weight': 60}
Normalizes nested data up to level 1.
>>> data = [
... {
... "id": 1,
... "name": "Cole Volk",
... "fitness": {"height": 130, "weight": 60},
... },
... {"name": "Mark Reg", "fitness": {"height": 130, "weight": 60}},
... {
... "id": 2,
... "name": "Faye Raker",
... "fitness": {"height": 130, "weight": 60},
... },
... ]
>>> pd.json_normalize(data, max_level=1)
id name fitness.height fitness.weight
0 1.0 Cole Volk 130 60
1 NaN Mark Reg 130 60
2 2.0 Faye Raker 130 60
>>> data = [
... {
... "state": "Florida",
... "shortname": "FL",
... "info": {"governor": "Rick Scott"},
... "counties": [
... {"name": "Dade", "population": 12345},
... {"name": "Broward", "population": 40000},
... {"name": "Palm Beach", "population": 60000},
... ],
... },
... {
... "state": "Ohio",
... "shortname": "OH",
... "info": {"governor": "John Kasich"},
... "counties": [
... {"name": "Summit", "population": 1234},
... {"name": "Cuyahoga", "population": 1337},
... ],
... },
... ]
>>> result = pd.json_normalize(
... data, "counties", ["state", "shortname", ["info", "governor"]]
... )
>>> result
name population state shortname info.governor
0 Dade 12345 Florida FL Rick Scott
1 Broward 40000 Florida FL Rick Scott
2 Palm Beach 60000 Florida FL Rick Scott
3 Summit 1234 Ohio OH John Kasich
4 Cuyahoga 1337 Ohio OH John Kasich
>>> data = {"A": [1, 2]}
>>> pd.json_normalize(data, "A", record_prefix="Prefix.")
Prefix.0
0 1
1 2
Returns normalized data with columns prefixed with the given string.
"""
def _pull_field(
js: dict[str, Any], spec: list | str, extract_record: bool = False
) -> Scalar | Iterable:
"""Internal function to pull field"""
result = js
try:
if isinstance(spec, list):
for field in spec:
if result is None:
raise KeyError(field)
result = result[field]
else:
result = result[spec]
except KeyError as e:
if extract_record:
raise KeyError(
f"Key {e} not found. If specifying a record_path, all elements of "
f"data should have the path."
) from e
if errors == "ignore":
return np.nan
else:
raise KeyError(
f"Key {e} not found. To replace missing values of {e} with "
f"np.nan, pass in errors='ignore'"
) from e
return result
def _pull_records(js: dict[str, Any], spec: list | str) -> list:
"""
Internal function to pull field for records, and similar to
_pull_field, but require to return list. And will raise error
if has non iterable value.
"""
result = _pull_field(js, spec, extract_record=True)
# GH 31507 GH 30145, GH 26284 if result is not list, raise TypeError if not
# null, otherwise return an empty list
if not isinstance(result, list):
if pd.isnull(result):
result = []
else:
raise TypeError(
f"{js} has non list value {result} for path {spec}. "
"Must be list or null."
)
return result
if isinstance(data, list) and not data:
return DataFrame()
elif isinstance(data, dict):
# A bit of a hackjob
data = [data]
elif isinstance(data, abc.Iterable) and not isinstance(data, str):
# GH35923 Fix pd.json_normalize to not skip the first element of a
# generator input
data = list(data)
else:
raise NotImplementedError
# check to see if a simple recursive function is possible to
# improve performance (see #15621) but only for cases such
# as pd.Dataframe(data) or pd.Dataframe(data, sep)
if (
record_path is None
and meta is None
and meta_prefix is None
and record_prefix is None
and max_level is None
):
return DataFrame(_simple_json_normalize(data, sep=sep))
if record_path is None:
if any([isinstance(x, dict) for x in y.values()] for y in data):
# naive normalization, this is idempotent for flat records
# and potentially will inflate the data considerably for
# deeply nested structures:
# {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@}
#
# TODO: handle record value which are lists, at least error
# reasonably
data = nested_to_record(data, sep=sep, max_level=max_level)
return DataFrame(data)
elif not isinstance(record_path, list):
record_path = [record_path]
if meta is None:
meta = []
elif not isinstance(meta, list):
meta = [meta]
_meta = [m if isinstance(m, list) else [m] for m in meta]
# Disastrously inefficient for now
records: list = []
lengths = []
meta_vals: DefaultDict = defaultdict(list)
meta_keys = [sep.join(val) for val in _meta]
def _recursive_extract(data, path, seen_meta, level: int = 0) -> None:
if isinstance(data, dict):
data = [data]
if len(path) > 1:
for obj in data:
for val, key in zip(_meta, meta_keys):
if level + 1 == len(val):
seen_meta[key] = _pull_field(obj, val[-1])
_recursive_extract(obj[path[0]], path[1:], seen_meta, level=level + 1)
else:
for obj in data:
recs = _pull_records(obj, path[0])
recs = [
nested_to_record(r, sep=sep, max_level=max_level)
if isinstance(r, dict)
else r
for r in recs
]
# For repeating the metadata later
lengths.append(len(recs))
for val, key in zip(_meta, meta_keys):
if level + 1 > len(val):
meta_val = seen_meta[key]
else:
meta_val = _pull_field(obj, val[level:])
meta_vals[key].append(meta_val)
records.extend(recs)
_recursive_extract(data, record_path, {}, level=0)
result = DataFrame(records)
if record_prefix is not None:
result = result.rename(columns=lambda x: f"{record_prefix}{x}")
# Data types, a problem
for k, v in meta_vals.items():
if meta_prefix is not None:
k = meta_prefix + k
if k in result:
raise ValueError(
f"Conflicting metadata name {k}, need distinguishing prefix "
)
# GH 37782
values = np.array(v, dtype=object)
if values.ndim > 1:
# GH 37782
values = np.empty((len(v),), dtype=object)
for i, v in enumerate(v):
values[i] = v
result[k] = values.repeat(lengths)
return result
|
DAYS: list[str]
MONTH_ALIASES: dict[int, str]
MONTH_NUMBERS: dict[str, int]
MONTHS: list[str]
int_to_weekday: dict[int, str]
def get_firstbday(year: int, month: int) -> int: ...
def get_lastbday(year: int, month: int) -> int: ...
def get_day_of_year(year: int, month: int, day: int) -> int: ...
def get_iso_calendar(year: int, month: int, day: int) -> tuple[int, int, int]: ...
def get_week_of_year(year: int, month: int, day: int) -> int: ...
def get_days_in_month(year: int, month: int) -> int: ...
|
"""
Reversed Operations not available in the stdlib operator module.
Defining these instead of using lambdas allows us to reference them by name.
"""
from __future__ import annotations
import operator
def radd(left, right):
return right + left
def rsub(left, right):
return right - left
def rmul(left, right):
return right * left
def rdiv(left, right):
return right / left
def rtruediv(left, right):
return right / left
def rfloordiv(left, right):
return right // left
def rmod(left, right):
# check if right is a string as % is the string
# formatting operation; this is a TypeError
# otherwise perform the op
if isinstance(right, str):
typ = type(left).__name__
raise TypeError(f"{typ} cannot perform the operation mod")
return right % left
def rdivmod(left, right):
return divmod(right, left)
def rpow(left, right):
return right**left
def rand_(left, right):
return operator.and_(right, left)
def ror_(left, right):
return operator.or_(right, left)
def rxor(left, right):
return operator.xor(right, left)
|
total=0
for days in range(7):
Deliveries=int(input("How many deliveries did you do\n>>>"))
total=Deliveries * 0.1
print("This how much you made in one week\n>>> £" + str(total))
|
from tkinter import *
equalsState=False
def addition(num1, num2):
return num1+num2
def subtraction(num1, num2):
return num1-num2
def multiplication(num1, num2):
return num1*num2
def division(num1, num2):
return num1/num2
def equalsPressed(state):
global equalsState
equalsState = state
def clearDisplay():
display.delete(0,END)
def displayValue(text):
clearDisplay()
display.insert(0,text)
def appendToDisplay(text):
global equalsState
if equalsState:
clearDisplay()
equalsPressed(False)
displayValue(display.get() + text)
def calculate():
displayedValue = display.get()
if "+" in displayedValue:
operands = displayedValue.split('+')
displayValue(addition(int(operands[0]), int(operands[1])))
elif "-" in displayedValue:
operands = displayedValue.split('-')
displayValue(subtraction(int(operands[0]), int(operands[1])))
elif "*" in displayedValue:
operands = displayedValue.split('*')
displayValue(multiplication(int(operands[0]), int(operands[1])))
elif "/" in displayedValue:
operands = displayedValue.split('/')
displayValue(division(int(operands[0]), int(operands[1])))
equalsPressed(True)
root = Tk()
display = Entry(root)
display.grid(row=0, column=1, columnspan=4, padx=4, pady=4)
button1 = Button(command=lambda:appendToDisplay("1"),text="1",height = 2, width = 4)
button1.grid(row=1, column=1, padx=2, pady=2)
button2 = Button(command=lambda:appendToDisplay("2"),text="2",height = 2, width = 4)
button2.grid(row=1, column=2, padx=2, pady=2)
button3 = Button(command=lambda:appendToDisplay("3"),text="3",height = 2, width = 4)
button3.grid(row=1, column=3, padx=2, pady=2)
button4 = Button(command=lambda:appendToDisplay("4"),text="4",height = 2, width = 4)
button4.grid(row=2, column=1, padx=2, pady=2)
button5 = Button(command=lambda:appendToDisplay("5"),text="5",height = 2, width = 4)
button5.grid(row=2, column=2, padx=2, pady=2)
button6 = Button(command=lambda:appendToDisplay("6"),text="6",height = 2, width = 4)
button6.grid(row=2, column=3, padx=2, pady=2)
button7 = Button(command=lambda:appendToDisplay("7"),text="7",height = 2, width = 4)
button7.grid(row=3, column=1, padx=2, pady=2)
button8 = Button(command=lambda:appendToDisplay("8"),text="8",height = 2, width = 4)
button8.grid(row=3, column=2, padx=2, pady=2)
button9 = Button(command=lambda:appendToDisplay("9"),text="9",height = 2, width = 4)
button9.grid(row=3, column=3, padx=2, pady=2)
button0 = Button(command=lambda:appendToDisplay("0"),text="0",height = 2, width = 4)
button0.grid(row=4, column=2, padx=2, pady=2)
buttonC = Button(command=lambda:clearDisplay(),text="C",height = 2, width = 4)
buttonC.grid(row=4, column=1, padx=2, pady=2)
buttonE = Button(command=lambda:calculate(),text="=",height = 2, width = 4)
buttonE.grid(row=4, column=3, padx=2, pady=2)
additionButton = Button(command=lambda:appendToDisplay("+"),text="+",height = 2, width = 4)
additionButton.grid(row=1, column=4, padx=2, pady=2)
buttonS = Button(command=lambda:appendToDisplay("-"),text="-",height = 2, width = 4)
buttonS.grid(row=2, column=4, padx=2, pady=2)
buttonM = Button(command=lambda:appendToDisplay("*"),text="*",height = 2, width = 4)
buttonM.grid(row=3, column=4, padx=2, pady=2)
buttonD = Button(command=lambda:appendToDisplay("/"),text="/",height = 2, width = 4)
buttonD.grid(row=4, column=4, padx=2, pady=2)
root.mainloop()
|
import datetime
person = input('Enter your name: ')
print('Hello', person)
now = datetime.datetime.now()
age = int(input('enter your age: '))
yearsto100 = 100 - age
print("this is when you will be 100: ",yearsto100+now.year)
|
import random
numero = random.randint (0,99)
print ("Intente adivinar el numero")
while True:
while True:
intento = input ("Introduzca un numero entre el 0 y el 99 incluidos ")
try:
initento = int(intento)
except:
pass
else:
if 0 <= numero <= 99:
break
if intento < numero:
print ("Demasiado pequeño")
elif intento > numero:
print ("Demasiado Grande")
else:
print ("¡Victoria!")
break
|
"""Dictionaries(hash tables)"""
""" Mappings are a collection of objects that are stored by a key, unlike
a sequence that stored objects by their relative position. This is an
important distinction, since mappings won't retain order since they
have objects defined by a key. """
""" A python dictionary consists of a key and then an associated value.
That value can be almost any python object. """
# print dict --> prints full dictionary
# print dict['key1'] --> prints 1st key's value
dict1 = {'key1':'value','key2':'value'}
print dict1
print dict1['key1']
#It's important to note that dictionaries are very flexible in the data
#types they can hold
dict2 = {'key1': 123, 'key2':3.4,'key3':'string'}
print dict2
#lets call items from the dictionary
print dict2['key1']
# Can call an index on that value
print dict2['key3'][0]
#Can even call methods on that value
print dict2['key3'][0].upper()
#We can effect the values of a key as well...
print dict2['key1']
#subtract 123 from the value
dict2['key1'] = dict2['key1'] - 123
#check it...
print dict2['key1']
""" Python has a built-in method of doing a self subtraction or
addition (or multiplication...division). we could have also
used += or -= for the above statement. for example..."""
#set the object equal to itself minus 123
dict2['key1'] -= 123
print dict2
"""We can also create keys by assignment.for instance if we started off
with an empty dictionary, we could continually add to it."""
#Creating a dictionary
d = {}
#create a new key through assignment
d['animal'] = 'Dog'
d['answer'] = 42
print d
#Nesting with Dictionaries
#Dictionary nested inside a dictionary nested inside a dictionary
k = {'key1':{'nestkey':{'subnestkey':'value'}}}
print k
#print until we only print 'value'
print k['key1']
print k['key1']['nestkey']
print k['key1']['nestkey']['subnestkey']
#Dictionary methods
q = {'key1':1,'key2':2,'key3':3}
print q.keys() #returns list of all keys
print q.values() #grabs all values
print q.items() #returns tuples of all items
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# 函数定义
def hello():
print ("我是李强")
# 函数
def passFunc():
pass
# 函数参数探究
def argFunc01(str):
print (str)
argFunc01("李强")
argFunc01(123)
def argFunc02(str):
print (str + 99)
# argFunc02("liqiang")
argFunc02(66)
def hello2(name, sex = "小哥哥"):
print ("Hello \n" + "\t"+name + sex)
hello2("李强")
sumS = "2" + "2"
sumN = 2 + 2
print (sumS)
print (sumN)
# strInput= raw_input("Enter your input:")
# print int(strInput) + 5
for num in range(1,20):
print (num)
url = "djfklj{}".format(str(1))
print (url)
ary = [5 + i for i in range(1,10)]
print (ary)
# print("hahahhah" + 5)
print("hahah" + str(3))
|
class LongestSubstring():
def getLength(self, s):
if len(s) < 2:
return len(s)
seen = []
maxLength = 0
for char in s:
if char in seen:
index = seen.index(char)
del seen[:index + 1]
seen.append(char)
if len(seen) > maxLength:
maxLength = len(seen)
return maxLength
|
x = 7 * 10
y = 5 * 6
# Jika x sama dengan 70, cetak 'x adalah 70'
if (x == 70):
print('x adalah 70')
# Jika y tidak sama dengan 40, cetak 'y bukan 40'
if (y != 40):
print('y bukan 40')
|
# Node of a generic binary tree
# Extend later to create other tree types like heaps, BST, etc.
class BinaryNode():
left = None
right = None
parent = None
side = None
def __init__(self, data):
self.data = data
def get(self):
return self.data
# Sets parent and tells what side it's on
def setParent(self, parent, side):
self.parent = parent
self.side = side
if parent:
if side is "left":
parent.left = self
if side is "right":
parent.right = self
def getParent(self):
return self.parent
def getSide(self):
return self.side
def setLeft(self, node):
self.left = node
if node and node is not self:
node.setParent(self, "left")
def getLeft(self):
return self.left
def setRight(self, node):
self.right = node
if node and node is not self:
node.setParent(self, "right")
def setChild(self, node, side):
if side is "left":
self.setLeft(node)
elif side is "right":
self.setRight(node)
def getRight(self):
return self.right
# Generic insert function that prioritizes left.
# Returns True on success, False on failure
# Overwrite to allow for more meaningful trees
def insert(self, node):
if not self.getLeft():
self.setLeft(node)
return True
if not self.right:
self.setRight(node)
return True
return False
def isConnected(self, node):
return self.getParent() is node or node.getParent() is self
# Switches references from parents and children of this node and another node
def switch(self, node):
if not self.isConnected(node):
tempParent = self.getParent()
tempLeft = self.getLeft()
tempRight = self.getRight()
tempSide = self.getSide()
self.setLeft(node.getLeft())
self.setRight(node.getRight())
self.setParent(node.getParent(), node.getSide())
node.setLeft(tempLeft)
node.setRight(tempRight)
node.setParent(tempParent, tempSide)
else:
self.switchImmediate(node)
def switchImmediate(self, node):
selfParent = self.getParent()
nodeParent = node.getParent()
selfLeft = self.getLeft()
nodeLeft = node.getLeft()
selfRight = self.getRight()
nodeRight = node.getRight()
selfSide = self.getSide()
nodeSide = node.getSide()
# Reference modifications because they are connected
if selfParent is node:
selfParent = self
nodeLeft = nodeLeft if nodeLeft is not self else node
nodeRight = nodeRight if nodeRight is not self else node
elif nodeParent is self:
nodeParent = node
selfLeft = selfLeft if selfLeft is not node else self
selfRight = selfRight if selfRight is not node else self
else:
print "How'd we get here?!"
return
self.setParent(nodeParent, nodeSide)
node.setParent(selfParent, selfSide)
self.setLeft(nodeLeft)
node.setLeft(selfLeft)
self.setRight(nodeRight)
node.setRight(selfRight)
def inorder(self):
return self._inorder()[:-2]
def _inorder(self):
leftString = self.left._inorder() if self.left else ""
rightString = self.right._inorder() if self.right else ""
return leftString + str(self.get()) + ", " + rightString
def preorder(self):
queue = [self]
output = ""
while queue:
node = queue[0]
queue = queue[1:]
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
output = output + str(node.get()) + ", "
return output[0:-2]
class BinarySortNode(BinaryNode):
# Binary Sort Node find method. O(logn) time
def find(self, data):
if self.get() is data:
return self
if self.get() < data and self.right:
return self.right.find(data)
if self.get() > data and self.left:
return self.left.find(data)
return None # No match found
def findMin(self):
if self.left:
return self.left.findMin()
return self
def findMax(self):
if self.right:
return self.right.findMax()
return self
def insert(self, data):
if type(data) is not type(self):
data = BinarySortNode(data)
if data.get() is self.get():
return False
if data.get() > self.get():
if self.right:
return self.right.insert(data)
self.setRight(data)
return True
if data.get() < self.get():
if self.left:
return self.left.insert(data)
self.setLeft(data)
return True
return "Why am I here?"
# Finds node. If found, removes references and returns the node
# returns false if not found
def delete(self, data):
node = self.find(data)
if node:
if node.left and node.right:
node.switch(node.right.findMin())
node.parent.setChild(None, node.side)
# TODO: remove references to node from the new parent
elif node.left:
node.parent.setChild(node.left, node.side)
elif node.right:
node.parent.setChild(node.right, node.side)
else:
node.parent.setChild(None, node.side)
return node
return False
def test1():
n1 = BinaryNode(1)
n2 = BinaryNode(2)
n3 = BinaryNode(3)
n4 = BinaryNode(4)
n5 = BinaryNode(5)
n6 = BinaryNode(6)
n7 = BinaryNode(7)
n1.setLeft(n2)
n1.setRight(n3)
n2.setLeft(n4)
n2.setRight(n5)
n3.setLeft(n6)
n3.setRight(n7)
n6.switch(n3)
print n1.preorder()
def test2():
root = BinarySortNode(4)
root.insert(2)
root.insert(6)
root.insert(1)
root.insert(3)
root.insert(5)
root.insert(7)
root.delete(7)
print "preorder: ", root.preorder()
print "inorder: ", root.inorder()
test2()
|
from karel.stanfordkarel import *
def pave_all_hurdles():
"""
This is the master function that gets Karel from one side of the chart to the other and covers the entire chart in a layer of beepers over all the hurdles.
pre-condition: Karel is at (1,1) facing north. To his right is the first hurdle. There are no beepers on the board.
post-condition: Karel has gone over every hurdle, placing a beeper in every position that is anywhere next to a hurdle (including above), and is in the bottom-right corner of the board, facing North with a wall in front of him.
"""
# YOU ARE FREE TO DIVIDE YOUR PROBLEM UP THIS WAY, BUT IF YOU PREFER ANY OTHER WAY, DO WHATEVER MAKES MOST SENSE
# TO YOU!
# THE FUNCTION pave_all_hurdles() IS THE ONLY FUNCTION THAT IS REQUIRED FOR THIS PROGRAM TO RUN.
while front_is_clear():
pave_hurdle()
move_to_wall()
put_beeper()
def pave_hurdle():
"""
This is the function that Karel will execute whenever he arrives at the base of a hurdle. When it finishes, it will put a layer of beepers surrounding the hurdle and Karel will be on the other side.
pre-condition: Karel is facing north to the left of a hurdle at the bottom.
post-condition: There is a beeper in every position that Karel occupied while going over the hurdle and Karel is on the other side of the hurdle facing east.
"""
move_with_wall_on_right()
put_beeper()
turn_right()
move()
move_with_wall_on_right()
put_beeper()
turn_right()
move()
move_to_wall()
def move_to_wall():
"""
This function is used at the end of the pave_hurdle function and then executed again after every instance of said function. The first time it runs in the pave_hurdle function is to get Karel from the top-right corner of the hurdle to the bottom wall, which I chose to do because using the move_with_wall_on_right function for all three sides of the hurdle had the potential to break the function at the end by trying to turn right. The instance of this function that runs after pave_hurdle is executed gets Karel from the bottom-right corner of one hurdle to the next available wall to the east, whether this be the next hurdle or the final wall.
pre-condition: Karel is facing a clear path to a wall that does not have any beepers yet. This can be in different directions depending on which instance of this function is running.
post-condition: Karel has reached the aforementioned wall and turned left, leaving a trail of beepers from where he was before.
"""
while front_is_clear():
put_beeper()
move()
turn_left()
def move_with_wall_on_right():
"""
This function is the "up and over" part of the hurdle - it is executed twice in the pave_hurdle function, once to get Karel from the bottom-left to the top-left of the hurdle and again to get Karel from the top-left to the top-right. As long as there is a wall to Karel's right, he will place a beeper and move forward.
pre-condition: Karel has a wall to his right and his front is clear with no beepers in his path.
post-condition: Karel has gone one space past the end of the wall, so there is no longer a wall to his right, and he has left a trail of beepers behind him.
"""
while not right_is_clear():
if front_is_clear():
put_beeper()
move()
def turn_right():
turn_around()
turn_left()
def turn_around():
turn_left()
turn_left()
def main():
pave_all_hurdles()
####### DO NOT EDIT CODE BELOW THIS LINE ########
if __name__ == '__main__':
execute_karel_task(main)
|
print('welcome to my guess game:')
print('you have only 9 turn if you dont guess within 9 you will lost this game')
n = 18
x = int(input('enter your guess number: '))
no_of_guess = 9
while True:
no_of_guess -= 1
if no_of_guess < 1:
print('your turn over: \n GAME OVER \n YOU LOST')
elif x > 18:
print('your number is larger than guess number so input small number \n number of guess left:',no_of_guess)
x = int(input('enter your guess number: '))
elif x < 18:
print('your number is smaller than guess number so input big number \n number of guess left:',no_of_guess)
x = int(input('enter your guess number: '))
else:
print('congratulation you won this battel',no_of_guess)
break
|
print('---'*20)
print(f'{"CÁLCULO IMC":^60}')
print()
nome = str(input('Informe o seu nome: '))
altura = float(input('Informe a sua altura: '))
peso = float(input('Informe o seu peso: '))
print()
print()
print('--'*20)
print(f'{"DADOS INFORMADOS":^40}')
print(f'Nome: {nome}')
print(f'Altura: {altura}')
print(f'Peso: {peso}')
print()
print()
print('--'* 20)
print(f'{"DIAGNÓSTICO":^40}')
print()
imc = peso / (altura*altura)
if imc < 17:
print(f'IMC: {imc:.2f} - Situação: Muito abaixo do peso')
elif imc >= 17 and imc < 18.50:
print(f'IMC: {imc:.2f} - Situação: Abaixo do peso')
elif imc >= 18.50 and imc < 25:
print(f'IMC: {imc:.2f} - Situação: Peso normal')
elif imc >= 25 and imc < 30:
print(f'IMC: {imc:.2f} - Situação: Acima do peso')
elif imc >= 30 and imc < 35:
print(f'IMC: {imc:.2f} - Situação: Obesidade I')
elif imc >= 35 and imc < 40:
print(f'IMC: {imc:.2f} - Situação: Obesidade II(severa)')
else:
print(f'IMC: {imc:.2f} - Situação: Obesidade III(mórbida)')
print('--'*20)
|
""" The purpose of this file is to determine the maximimum value in a list """
# Requirements: "Please write a Python function, max_num_in_list to return the max number of a given list. The first line of the code has been defined as below. def max_num_in_list(a_list):"
def max_num_in_list(a_list):
maximum_var = max(a_list)
print(maximum_var)
return maximum_var
l_provided = [1, 200, 500, 300, 700, 20]
# call function
max_num_in_list(l_provided)
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*
import sys
#import os
#import math
###### Read in file
# f = open("test.txt", "r")
f = open("rosalind_ins.txt", "r")
toSort = f.readline().rstrip()
myList = f.readline().rstrip().split()
myList = [int(i) for i in myList]
# print("input:", myList)
# sys.exit()
num_swap = 0
for i in range(1, len(myList)):
k = i
while (k > 0) and (myList[k] < myList[k-1]):
#swap the two positions
t = myList[k-1]
myList[k-1] = myList[k]
myList[k] = t
#step backward in case of the current number can be placed much before
k = k - 1
#Count the number of swap perfomed in total
num_swap += 1
print("number of swap:", num_swap)
#Algo
# for i = 2:n,
# for (k = i; k > 1 and a[k] < a[k-1]; k--)
# swap a[k,k-1]
# k = k - 1
# -> invariant: a[1..i] is sorted
#end
|
#!/Users/courtine/anaconda3/bin/python
# -*- coding:utf-8 -*
# k homozygous dominant
# m heterozygous recessive
# n homozygous recessive
#### Better solution from Rosalind
#def firstLaw(k,m,n):
# N = float(k+m+n)
# return 1 - ( m*n + .25*m*(m-1) + n*(n-1) ) / ( N*(N-1) )
####
k, m, n = 20, 21, 23
''' Probability two random organisms mate and produce individual
with a dominant allele'''
# Transform <int> in <float>
k,m,n = map(float, (k,m,n))
total = k+m+n
pk = k/total
pm = m/total
pn = n/total
# Max probability
prob = 1
# subtract probability if both homozygous recessive
prob -= pn*((n-1)/(total-1))
# subtract prob if one homozygous recessive & one heterozygous recessive
# double because two scenarios (e.g. Aa * aa & aa * Aa)
prob -= 2*pn*(m/(total-1))*0.5
# subtract prob if both heterozygous recessive
prob -= pm*((m-1)/(total-1))*0.25
print(prob)
###'''Also possible to accomplish by calculating the dominant
###allele probabilities instead'''
###
###def prob_dom_alleles(k,m,n):
### k,m,n = map(float, (k,m,n))
### t = k+m+n
###
### # Simply add up probabilities of possessing dominant allele
### # AA x anything
### prob = k/t
###
### # Aa x AA
### prob += pm*(k/(t-1))
###
### # Aa x Aa
### prob += pm*((m-1)/(t-1))*.75)
###
### # Aa x aa & aa x Aa
### prob += 2*pm*(n/(t-1)*.5))
###
### # aa x AA
### prob += pn*(k/(t-1))
###return prob
|
##This is a Inheritance example with child class does not have parameteres.
##Hence pass is mentioned.
class person:
def __init__(person,fname,lname):
person.fname = fname
person.lname = lname
def printname(self):
print("Hello! "+self.fname+" "+self.lname)
class student(person):
pass
def funCall():
p1 = person('Nilesh','Nehete')
p2 = student('Yushan','Nehete')
p1.printname()
p2.printname()
if __name__ == '__main__':
funCall()
|
"""
6. Class tree links. In “Namespaces: The Whole Story” in Chapter 29 and in “Multiple
Inheritance: ‘Mix-in’ Classes” in Chapter 31, we learned that classes have a
__bases__ attribute that returns a tuple of their superclass objects (the ones listed
in parentheses in the class header). Use __bases__ to extend the lister.py mix-in
classes we wrote in Chapter 31 so that they print the names of the immediate
superclasses of the instance’s class. When you’re done, the first line of the string
representation should look like this (your address will almost certainly vary):
<Instance of Sub(Super, Lister), address 7841200:
"""
class ListInstance:
def __attrnames(self):
result = ''
for attr in sorted(self.__dict__):
result += '\t%s=%s\n' % (attr, self.__dict__[attr])
return result
def __str__(self):
return '<Instance of %s(%s), address %s:\n%s>' % (
self.__class__.__name__, # My class's name
self.__supers(), # My class's own supers
id(self), # My address
self.__attrnames()) # name=value list
def __supers(self):
names = []
for super in self.__class__.__bases__: # One level up from class
names.append(super.__name__) # name, not str(super)
return ', '.join(names)
# Or: ', '.join(super.__name__ for super in self.__class__.__bases__)
if __name__ == "__main__":
class Super:
def __init__(self):
self.data1 = 'spam'
class Sub(Super, ListInstance):
def __init__(self):
Super.__init__(self)
self.data2 = 'eggs'
self.data3 = 42
X = Sub()
print(X)
print(X.__dict__)
print(X.__class__)
print(X.__class__.__bases__)
print(X.__class__.__bases__[0].__name__)
print(X.__class__.__bases__[1].__name__)
print(X.__class__.__bases__[1].__bases__[0].__name__)
|
# Lambda is commonly used to code jump tables
# which are lists/dicts of actions to be performed on demand
L = [lambda x: x ** 2, # Inline function definition
lambda x: x ** 3,
lambda x: x ** 4] # A list of three callable functions
for f in L:
print(f(2)) # Prints 4, 8, 16
print(L[0](3)) # Prints 9
"Multiway branch switches: The finale"
# actions tables
key = 'got'
{'already': (lambda: 2 + 2),
'got': (lambda: 2 * 4),
'one': (lambda: 2 ** 6)}[key]()
# def equivalent
def f1(): return 2 + 2
def f2(): return 2 * 4
def f3(): return 2 ** 6
key = 'one'
{'already': f1, 'got': f2, 'one': f3}[key]()
'''
How (Not) to Obfuscate Your Python Code
if a:
b
else:
c
b if a else c
((a and b) or c)
'''
lower = (lambda x, y: x if x < y else y)
print(lower('bb', 'aa'))
print(lower('aa', 'bb'))
"Loops"
import sys
showall = lambda x: list(map(sys.stdout.write, x)) # 3.X: must use list
t = showall(['spam\n', 'toast\n', 'eggs\n']) # 3.X: can use print
showall = lambda x: [sys.stdout.write(line) for line in x]
t = showall(('bright\n', 'side\n', 'of\n', 'life\n'))
showall = lambda x: [print(line, end='') for line in x] # Same: 3.X only
showall = lambda x: print(*x, sep='', end='') # Same: 3.X only
|
X = set('bimri')
Y = {'i', 'm', 'r', 'ii'}
print(X, Y) # A tuple of two sets without parantheses
print(X & Y) # Intersection
print(X | Y) # Union
print(X - Y) # Difference
print(X > Y) # Superset
# Preferrable use cases for sets
print(list(set([1,2,1,3,1]))) # Filtering out duplicates(possibly reordered)
print(set('spam') - set('ham')) # Finding differences in collections
print(set('spam') == set('asmp')) # Order-neutral equality tests(== is False)
# in membership tests
print('p' in set('spam'), 'p' in 'spam', 'ham' in ['eggs', 'spam', 'ham'])
|
'''
Common Python expression statements:
spam(eggs, ham) Function calls
spam.ham(eggs) Method calls
spam Printing variables in the interactive interpreter
print(a, b, c, sep='') Printing operations in Python 3.X
yield x ** 2 Yielding expression statements
'''
x = print('spam') # print is a function call expression in 3.X
print(x) # But it is coded as an expression statement
'''Expression Statements and In-Place Changes'''
L = [1, 2]
L.append(3) # Append is an in-place change
print(L)
# Mistake - newcomers
L = L.append(4) # But append returns None, not L
print(L) # So we lose our list!
# KEY POINT: -
'''call in-place change operations without as
signing their results'''
|
"Computed Attributes"
class DescSquare:
def __init__(self, start): # Each desc has own state
self.value = start
def __get__(self, instance, owner): # On attr fetch
return self.value ** 2
def __set__(self, instance, value): # On attr assign
self.value = value # No delete or docs
class Client1:
X = DescSquare(3) # Assign descriptor instance to class attr
class Client2:
X = DescSquare(32) # Another instance in another client class
# Could also code two instances in same class
c1 = Client1()
c2 = Client2()
print(c1.X) # 3 ** 2
c1.X = 4
print(c1.X) # 4 ** 2
print(c2.X) # 32 ** 2 (1024)
'Using State Information in Descriptors'
'''
Descriptors can use both instance state and descriptor state, or any combination thereof:
• Descriptor state is used to manage either data internal to the workings of the descriptor,
or data that spans all instances. It can vary per attribute appearance (often,
per client class).
• Instance state records information related to and possibly created by the client class.
It can vary per client class instance (that is, per application object).
descriptor state is per-descriptor data and instance state is per-clientinstance data.
'''
class DescState:
def __init__(self, value):
self.value = value
def __get__(self, instance, owner): # On attr fetch
print('DescState get')
return self.value * 10
def __set__(self, instance, value): # On attr assign
print('DescState set')
self.value = value
# Client class
class CalcAttrs:
X = DescState(2) # Descriptor class attr
Y = 3 # Class attr
def __init__(self):
self.Z = 4 # Instance attr
obj = CalcAttrs()
print(obj.X, obj.Y, obj.Z) # X is computed, others are not
obj.X = 5 # X assignment is intercepted
CalcAttrs.Y = 6 # Y reassigned in class
obj.Z = 7 # Z assigned in instance
print(obj.X, obj.Y, obj.Z)
obj2 = CalcAttrs() # But X uses shared data, like Y!
print(obj2.X, obj2.Y, obj2.Z)
"""
It’s also feasible for a descriptor to store or use an attribute attached to the client class’s
instance, instead of itself. Crucially, unlike data stored in the descriptor itself, this allows
for data that can vary per client class instance.
"""
class InstState:
def __get__(self, instance, owner):
print('InstState get') # Assume set by client class
return instance._X * 10
def __set__(self, instance, value):
print('InstState set')
instance._X = value
# Client class
class CalcAttrs:
X = InstState() # Descriptor class attr
Y = 3
def __init__(self):
self._X = 2 # Instance attr
self.Z = 4 # Instance attr
obj = CalcAttrs()
print(obj.X, obj.Y, obj.Z) # X is computed, others are not
obj.X = 5
CalcAttrs.Y = 6 # X assigned is intercepted
obj.Z = 7 # Z assigned in instance
print(obj.X, obj.Y, obj.Z)
obj2 = CalcAttrs()
print(obj2.X, obj2.Y, obj2.Z)
class DescBoth:
def __init__(self, data):
self.data = data
def __get__(self, instance, owner):
return '%s, %s' % (self.data, instance.data)
def __set__(self, instance, value):
instance.data = value
class Client:
def __init__(self, data):
self.data = data
managed = DescBoth('spam')
I = Client('eggs')
print(I.managed)
I.managed = 'SPAM' # Change instance data
print(I.managed)
"""
Whether you should
access these this way probably varies per program—properties and descriptors may
run arbitrary computation, and may be less obviously instance “data” than slots:
"""
print(I.__dict__)
print(
[x for x in dir(I) if not x.startswith('__')]
)
print(getattr(I, 'data'))
print(getattr(I, 'managed'))
for attr in (x for x in dir(I) if not x.startswith('__')):
print('%s => %s' % (attr, getattr(I, attr)))
|
"Scopes and try except Variables"
try:
1/0
except Exception as X:
print(X)
# print(X) # NameError: name 'X' is not defined
'''
Unlike compression loop variables, though, this variable is removed after the except
block exits in 3.X. It does so because it would otherwise retain a reference to the runtime
call stack, which would defer garbage collection and thus retain excess memory space.
This removal occurs, though, even if you’re using the name elsewhere, and is more
extreme policy than that used for comprehensions:
'''
X = 89
try:
1/0
except Exception as X: # 3.X localizes _and_ removes on exit
print(X)
# print(X) # NameError: name 'X' is not defined
X = 99
c = {X for X in 'spam'} # 2.X/3.X localizes only: not removed
print(c)
print(X)
'''
Because of this, you should generally use unique variable names in your try statement’s
except clauses, even if they are localized by scope. If you do need to reference the
exception instance after the try statement, simply assign it to another name that won’t
be automatically removed:
'''
try:
1 / 0
except Exception as X: # Python removes this reference
print(X)
Saveit = X # Assign exc to retain exc if needed
# print(X)
print("Saveit = " + str(Saveit))
|
''' 1. The Basics'''
print(2 ** 16) # 2 raised to the power 16
print(2/5, 2/5.0) # Integer / truncates in 2.X, but not 3.X
# Strings
print('spam' + 'eggs') # Concatenation
S = 'ham'
print("eggs " + S)
print(S * 5) # Repetition
S[:0] # slicing
print(S)
print("green %s and %s" % ("eggs", S)) # formatting
print('green {0} and {1}'.format('eggs', S))
#Tuples
print(('x',)[0]) # Indexing a single-item tuple
print(('x', 'y')[1]) # Indexing a two-item tuples
# Lists
L = [1,2,3] + [4,5,6] # List operations
print(L, L[:], L[:0], L[-2], L[-2:])
print(([1,2,3] + [4,5,6])[2:4])
print(L)
print([L[2], L[3:]]) # Fetch from offsets; store in a list
L.reverse(); print(L) # Method: reverse list in place
L.sort(); print(L) # Method: sort list in place
print(L.index(4)) # Method: offset of first four (search)
# Dictionaries
print({'a':1, 'b':2}['b']) # Index a dictionary by key
D = {'x':1, 'y':2, 'z':3}
D['w'] = 0 # Create a new entry
print(D['x'] + D['w'])
D[(1,2,3)] = 4 # A tuple used as a key (immutable)
print(D)
print(list(D.keys()), list(D.values()), (1,2,3) in D) # Methods, key test
# Empties
print([[]], ["", [], (), {}, None]) # Lots of nothings: empty objects
|
"The try/except/else Statement"
'''
Syntactically, the try is a compound, multipart statement. It starts with a try header
line, followed by a block of (usually) indented statements; then one or more except
clauses that identify exceptions to be caught and blocks to process them; and an optional
else clause and block at the end. You associate the words try, except, and
else by indenting them to the same level (i.e., lining them up vertically). For reference,
here’s the general and most complete format in Python 3.X:
'''
try:
statements # Run this main action first
except name1:
statements # Run if name1 is raised during try block
except (name2, name3):
statements # Run if any of these exceptions occur
except name4 as var:
statements # Run if name4 is raised, assign instance raised to var
except:
statements # Run for all other exceptions raised
else:
statements # Run if no exception was raised during try block
"""
Semantically, the block under the try header in this statement represents the main
action of the statement—the code you’re trying to run and wrap in error processing
logic. The except clauses define handlers for exceptions raised during the try block,
and the else clause (if coded) provides a handler to be run if no exceptions occur.
"""
|
"Raising Exceptions"
'''
the following two forms are equivalent—both raise an instance of the exception class named,
but the first creates the instance implicitly:
'''
raise IndexError # Class (instance created)
raise IndexError() # Instance (created in statement)
'''
We can also create the instance ahead of time—because the raise statement accepts
any kind of object reference, the following two examples raise IndexError just like the
prior two:
'''
exc = IndexError() # Create instance ahead of time
raise exc
excs = [IndexError, TypeError]
raise excs[0]
'''
When an exception is raised, Python sends the raised instance along with the exception.
If a try includes an except name as X: clause, the variable X will be assigned the instance
provided in the raise:
The as is optional in a try handler (if it’s omitted, the instance is simply not assigned
to a name), but including it allows the handler to access both data in the instance and
methods in the exception class.
'''
try:
...
except IndexError as X: # X assigned the raise instance object
...
# model works the same for user-defined exceptions
class MyExc(Exception): pass
...
raise MyExc('spam') # Exception class with constructor args
...
try:
...
except MyExc as X: # Instance attributes in handler
print(X.args)
"""
Regardless of how you name them, exceptions are always identified by class instance
objects, and at most one is active at any given time. Once caught by an except clause
anywhere in the program, an exception dies (i.e., won’t propagate to another try),
unless it’s reraised by another raise statement or error.
"""
|
'''
fundamental built-in tools such as range, map, dictionary
keys, and even files are now generators
don’t complicate your code with user-defined generators if
they are not warranted. Especially for smaller programs and
data sets
simple lists of results will suffice, will be easier to understand, will be
garbage-collected automatically, and may be produced quicker
Explicit is better than implicit == EIBTI
Always: keep it simple unless it must be complicated!
'''
# On the other hand: Space and time, conciseness, expressiveness
'''
there are specific use cases that generators can address well. They can
reduce memory footprint in some programs, reduce delays in others, and can occasionally
make the impossible possible.
for example, a program that must
produce all possible permutations of a nontrivial sequence.
'''
import math
print(
math.factorial(10)
)
from permute import permute1, permute2
seq = list(range(10))
# the list builder pauses for 37 seconds on my computer to build a 3.6-millionitem list
p1 = permute1(seq) # 37 seconds on a 2GHz quad-core machine
# Creates a list of 3.6M numbers
print(
len(p1), p1[0], p1[1]
)
# but the generator can begin returning results immediately
p2 = permute2(seq) # Returns generator immediately
print(
next(p2) # And produces each result quickly on request
)
print(next(p2))
p2 = list(permute2(seq)) # About 28 seconds, though still impractical
print(p1 == p2)
math.factorial(50)
p3 = permute2(list(range(50)))
print(
next(p3) # permute1 is not an option here!
)
"yield results that are more variable and less obviously deterministic"
import random
math.factorial(20) # permute1 is not an option here
seq = list(range(20))
random.shuffle(seq) # Shuffle sequence randomly first
p = permute2(seq)
next(p)
next(p)
random.shuffle(seq)
p = permute2(seq)
next(p)
next(p)
'''
Class-based
iterables can produce items on request too, and are far more explicit than the magic
objects and methods produced for generator functions and expressions.
Like comprehensions, generators
also offer an expressiveness and code economy that’s hard to resist if you understand
how they work
'''
|
L = [1, 2, 3, 4, 5]
for i in range(len(L)):
L[i] *= 10
print(L)
# List comprehension of above code(not simiilar though for it makes a new list object)
L = [x * 100 for x in L]
print(L)
|
'''The range Iterable'''
R = range(10) # range returns an iterable, not list
print(R)
I = iter(R) # Make an iterator from the range iterable
print(next(I))
print(next(I)) # Advance to next result
print(next(I)) # What happens in for loops, comprehensions, etc.
# To force a list if required
print(
list(range(10))
)
'''The map, zip, and filter Iterables'''
# map returns an iterable, not a list
M = map(abs, (-1, 0, 1))
print(M)
print(next(M))
print(next(M))
print(next(M))
# print(next(M)) # stop iteration
# map iterator is now empty: one pass only
for x in M:
print(x)
# Make a new iterable/iterator to scan again
M = map(abs, (-1, 0, 1))
for x in M:
print(x)
# Can force a real list if needed
print(list(map(abs, (-1, 0, 1))))
'''ZIP built-in'''
# zip is the same: a one-pass iterator
Z = zip((1, 2, 3), (10, 20, 30))
print(Z)
print(list(Z))
# Exhausted after one pass
for pair in Z:
print(pair)
Z = zip((1, 2, 3), (10, 20, 30)) # reassign for a new pass
for pair in Z: print(pair) # Iterator used automatically or manually
# Manual iteration (iter() not needed)
Z = zip((1, 2, 3), (10, 20, 30))
print(next(Z))
print(next(Z))
'''The FILTER built-in'''
print(filter(bool, ['spam', '', 'ni']))
print(list(filter(bool, ['spam', '', 'ni'])))
# filter both accepts and returns an iterable to generate results
print(
[x for x in ['spam', 'ni'] if bool(x)]
)
print(
[x for x in ['spam', 'ni'] if x]
)
|
import sys
print('{1:10} = {1:10}'.format('spam', 123.3456))
print('{0:>10} = {1:<10}'.format('spam', 123.3456))
print('{0.platform} = {1[kind]:<10}'.format(sys, dict(kind='laptop')))
print('{:10} = {:10}'.format('spam', 123.4567))
print('{:>10} = {:<10}'.format('spam', 123.4567))
print('{.platform:>10} = {[kind]:<10}'.format(sys, dict(kind='laptop')))
'''
Floating-point numbers support the same type codes
and formatting specificity in formatting method calls
as in % expressions.
'''
print('{0:e}, {1:.3e}, {2:g}'.format(3.14159, 3.14159, 3.14159))
print('{0:f}, {1:.2f}, {2:06.2f}'.format(3.14159, 3.14159, 3.14159))
'''
Hex, octal, and binary formats are supported by the
format method as well
'''
print('{0:X}, {1:o}, {2:b}'.format(255, 255, 255)) # Hex, octal, binary
print(bin(255), int('11111111', 2), 0b11111111) # Other to/from binary
print(hex(255), int('FF', 16), 0xFF) # Other to/from hex
print(oct(255), int('377', 8), 0o377) # Other to/from octal, in 3.X
'''
Formatting parameters can either be hardcoded in format strings or taken from the
arguments list dynamically by nested format syntax
'''
print('{0:.2f}'.format(1 / 3.0)) # Parameters hardcoded
print('%.2f' % (1 / 3.0)) # Ditto for expression
print('{0:.{1}f}'.format(1 / 3.0, 4)) # Take value from arguments
print('%.*f' % (4, 1 / 3.0)) # Ditto for expression
# building data ahead of time in both
data = dict(platform=sys.platform, kind='laptop')
print('My {kind:<8} runs {platform:>8}'.format(**data))
print('My %(kind)-8s runs %(platform)8s' % data)
print('{:,d}'.format(999999999999))
print('{:,d} {:,d}'.format(9999999, 8888888))
print('{:,.2f}'.format(296999.2567))
|
# Embedding-based Manager alternative
class Person:
def __init__(self, name, job=None, pay=0):
self.name = name
self.job = job
self.pay = pay
def lastName(self):
return self.name.split()[-1]
def giveRaise(self, percent):
self.pay = int(self.pay * (1 + percent))
def __repr__(self):
return '[Person: %s, %s]' % (self.name, self.pay)
class Manager:
'''
this Manager alternative is representative of a general coding
pattern usually known as delegation—a composite-based structure that manages a
wrapped object and propagates method calls to it.
'''
def __init__(self, name, pay):
self.person = Person(name, 'mgr', pay) # Embeded a Person object
def giveRaise(self, percent, bonus=.10):
self.person.giveRaise(percent + bonus) # Intercept and delegate
def __getattr__(self, attr):
return getattr(self.person, attr) # Delegate all other attrs
def __repr__(self):
return str(self.person) # Must ovreload again (in 3.X)
if __name__ == '__main__':
oluchi = Person('Oluchi Ibeneme')
nyathi = Person('Pamela Nyathi', job='lab technician', pay=250000)
print(oluchi)
print(nyathi)
print(nyathi.lastName(), oluchi.lastName())
nyathi.giveRaise(.10)
print(nyathi)
rio = Manager('Rio Je', 500000) # Job name not needed:
rio.giveRaise(.10) # Implied/set by class
print(rio.lastName())
print(rio)
|
"Metaclasses Versus Class Decorators: Round 3 (and Last)"
# Class decorator factory: apply any decorator to all methods of a class
from types import FunctionType
from decotools import tracer, timer
def decorateAll(decorator):
def DecoDecorate(aClass):
for attr, attrval in aClass.__dict__.items():
if type(attrval) is FunctionType:
setattr(aClass, attr, decorator(attrval)) # Not __dict__
return aClass
return DecoDecorate
@decorateAll(tracer) # Use a class decorator
class Person: # Applies func decorator to methods
def __init__(self, name, pay): # Person = decorateAll(..)(Person)
self.name = name # Person = DecoDecorate(Person)
self.pay = pay
def giveRaise(self, percent):
self.pay *= (1.0 + percent)
def lastName(self):
return self.name.split()[-1]
bob = Person('Bob Smith', 50000)
sue = Person('Sue Jones', 100000)
print(bob.name, sue.name)
sue.giveRaise(.10)
print('%.2f' % sue.pay)
print(bob.lastName(), sue.lastName())
'''
Notice that the class decorator returns the original, augmented class, not a wrapper
layer for it (as is common when wrapping instance objects instead). As for the metaclass
version, we retain the type of the original class—an instance of Person is an instance of
Person, not of some wrapper class. In fact, this class decorator deals with class creation
only; instance creation calls are not intercepted at all.
This distinction can matter in programs that require type testing for instances to yield
the original class, not a wrapper. When augmenting a class instead of an instance, class
decorators can retain the original class type. The class’s methods are not their original
functions because they are rebound to decorators.
'''
"""
To use this scheme to apply the timer
decorator, for example, either of the last two decoration lines in the following will
suffice if coded just before our class definition—the first uses decorator argument defaults,
and the second provides one explicitly:
"""
# @decorateAll(tracer) # Decorate all with tracer
# @decorateAll(timer()) # Decorate all with timer, defaults
# @decorateAll(timer(label='@@')) # Same but pass a decorator argument
"""
Finally, it’s possible to combine decorators such that each runs per method call, but it
will likely require changes to those we’ve coded here. As is, nesting calls to them directly
winds up tracing or timing the other’s creation-time application, listing the two on
separate lines results in tracing or timing the other’s wrapper before running the original
method, and metaclasses seem to fare no better on this front:
"""
# @decorateAll(tracer(timer(label='@@'))) # Traces applying the timer
# class Person:
# @decorateAll(tracer) # Traces onCall wrapper, times methods
# @decorateAll(timer(label='@@'))
# class Person:
# @decorateAll(timer(label='@@'))
# @decorateAll(tracer) # Times onCall wrapper, traces methods
# class Person:
'''
metaclasses and class decorators are not only often interchangeable,
but also commonly complementary. Both provide advanced but powerful ways to customize
and manage both class and instance objects
'''
|
"Text and Binary Files"
# File I/O (input and output)
'''
Python now makes a sharp platform-independent distinction between text files and binary files;
~ Text files
When a file is opened in text mode, reading its data automatically decodes its content
and returns it as a str; writing takes a str and automatically encodes it before
transferring it to the file. Both reads and writes translate per a platform default or
a provided encoding name. Text-mode files also support universal end-of-line
translation and additional encoding specification arguments. Depending on the
encoding name, text files may also automatically process the byte order mark sequence
at the start of a file.
~ Binary files
When a file is opened in binary mode by adding a b (lowercase only) to the modestring
argument in the built-in open call, reading its data does not decode it in any
way but simply returns its content raw and unchanged, as a bytes object; writing
similarly takes a bytes object and transfers it to the file unchanged. Binary-mode
files also accept a bytearray object for the content to be written to the file.
'''
"""
Because the language sharply differentiates between str and bytes, you must decide
whether your data is text or binary in nature and use either str or bytes objects to
represent its content in your script, as appropriate. Ultimately, the mode in which you
open a file will dictate which type of object your script will use to represent its content:
• If you are processing image files, data transferred over networks, packed binary
data whose content you must extract, or some device data streams, chances are
good that you will want to deal with it using bytes and binary-mode files. You might
also opt for bytearray if you wish to update the data without making copies of it
in memory.
• If instead you are processing something that is textual in nature, such as program
output, HTML, email content, or CSV or XML files, you’ll probably want to use
str and text-mode files.
"""
'''
Notice that the mode string argument to built-in function open (its second argument)
becomes fairly crucial in Python 3.X—its content not only specifies a file processing
mode, but also implies a Python object type. By adding a b to the mode string, you specify
binary mode and will receive, or must provide, a bytes object to represent the file’s
content when reading or writing. Without the b, your file is processed in text mode,
and you’ll use str objects to represent its content in your script. For example, the modes
rb, wb, and rb+ imply bytes; r, w+, and rt (the default) imply str.
'''
"""
Text-mode files also handle the byte order marker (BOM) sequence that may appear at
the start of files under some encoding schemes.
"""
|
"Records Revisited: Classes Versus Dictionaries"
'''
dictionaries, tuples, and lists to record properties
of entities in our programs, generically called records. It turns out that classes can often
serve better in this role—they package information like dictionaries, but can also bundle
processing logic in the form of methods.
'''
rec = ('Bob', 40.5, ['dev', 'mgr']) # Tuple-based record
print(rec[0])
rec = {}
rec['name'] = 'Bob' # Dictionary-based record
rec['age'] = 40.5 # Or {...}, dict(n=v), etc.
rec['jobs'] = ['dev', 'mgr']
print(rec['name'])
'''
This code has substantially less syntax than the dictionary equivalent. It uses an empty
class statement to generate an empty namespace object. Once we make the empty
class, we fill it out by assigning class attributes over time, as before.
This works, but a new class statement will be required for each distinct record we will
need. We can instead generate instances of an empty class to
represent each distinct entity:
'''
class rec: pass
rec.name = 'Bob' # Class-based record
rec.age = 40.5
rec.jobs = ['dev', 'mgr']
print(rec.name)
class rec: pass
# make two records from the same class.
"Instances start out life empty, just like classes. We then fill in the records by assigning to attributes."
'there are two separate objects, and hence two separate name attributes'
pers1 = rec() # Instance-based records
pers1.name = 'Bob'
pers1.jobs = ['dev', 'mgr']
pers1.age = 40.5
pers2 = rec()
pers2.name = 'Sue'
pers2.jobs = ['dev', 'cto']
print(pers1.name, pers2.name)
'''
Finally, we might instead code a more full-blown class to implement the record and its
processing—something that data-oriented dictionaries do not directly support:
'''
class Person:
def __init__(self, name, jobs, age=None): # class = data = logic
self.name = name
self.jobs = jobs
self.age = age
def info(self):
return (self.name, self.jobs)
rec1 = Person('Bob', ['dev', 'mgr'], 40.5) # Construction calls
rec2 = Person('Sue', ['dev', 'cto'])
print(rec1.jobs, rec2.info()) # Attributes + methods
'''
This scheme also makes multiple instances, but the class is not empty this time: we’ve
added logic (methods) to initialize instances at construction time and collect attributes
into a tuple on request. The constructor imposes some consistency on instances here
by always setting the name, job, and age attributes, even though the latter can be omitted
when an object is made. Together, the class’s methods and instance attributes create a
package, which combines both data and logic.
'''
"""
To be fair to other tools, in this form, the two class construction calls above more closely
resemble dictionaries made all at once, but still seem less cluttered and provide extra
processing methods. In fact, the class’s construction calls more closely resemble Chapter
9’s named tuples—which makes sense, given that named tuples really are classes
with extra logic to map attributes to tuple offsets:
>>> rec = dict(name='Bob', age=40.5, jobs=['dev', 'mgr']) # Dictionaries
>>> rec = {'name': 'Bob', 'age': 40.5, 'jobs': ['dev', 'mgr']}
>>> rec = Rec('Bob', 40.5, ['dev', 'mgr']) # Named tuples
"""
|
"__getattribute__ and Descriptors: Attribute Tools"
"""
the __getattribute__ operator overloading
method, available for new-style classes only, allows a class to intercept all attribute
references, not just undefined references. This makes it more potent than its __get
attr__ cousin, but also trickier to use—it’s prone to loops
much like __setattr__, but in different ways.
Python supports the notion of attribute descriptors—classes with
__get__ and __set__ methods, assigned to class attributes and inherited by instances,
that intercept read and write accesses to specific attributes.
"""
class AgeDesc(object):
def __get__(self, instance, owner): return 40
def __set__(self, instance, value): instance._age = value
class descriptors(object):
age = AgeDesc()
x = descriptors()
print(x.age) # Runs AgeDesc.__get__
x.age = 42 # Runs AgeDesc.__set__
print(x._age) # Normal fetch: no AgeDesc call
'''
Descriptors have access to state in instances of themselves as well as their client class,
and are in a sense a more general form of properties; in fact, properties are a simplified
way to define a specific type of descriptor—one that runs functions on access.
'''
|
# range allows multiple iterators
R = range(3)
# next(R) # TypeError: range object is not an iterator
I1 = iter(R)
print(next(I1))
print(next(I1))
I2 = iter(R) # Two iterators on one range
print(next(I2))
print(next(I2)) # I1 is at a different spot than I2
'''
zip, map, and filter do not support multiple active iterators on the same result
'''
Z = zip((1, 2, 3), (10, 11, 12))
I1 = iter(Z)
I2 = iter(Z) # Two iterators on one zip
print(next(I1))
print(next(I1))
print(next(I2))
M = map(abs, (-1, 0, 1)) # Ditto for map (and filter)
I1 = iter(M); I2 = iter(M)
print(next(I1), next(I1), next(I1)) # (3.X) Single scan is exhausted!
# print(next(I2))
R = range(3) # But range allows many iterators
I1, I2 = iter(R), iter(R)
print(
[next(I1), next(I1), next(I1)]
)
print(
next(I2) # Multiple active scans, like 2.X lists
)
|
"Encodings converting"
'''
It’s also
possible to convert a string to a different encoding than its original, but we must provide
an explicit encoding name to encode to and decode from. This is true whether the
original text string originated in a file or a literal.
The term conversion may be a misnomer here—it really just means encoding a text
string to raw bytes per a different encoding scheme than the one it was decoded from.
Still, this scheme allows scripts to read data in one encoding and store it
in another, to support multiple clients of the same data:
Keep in mind that the special Unicode and hex character escapes are only necessary
when you code non-ASCII Unicode strings manually. In practice, you’ll often load such
text from files instead.
'''
B = b'A\xc3\x84B\xc3\xa8C' # Text encoded in UTF-8 format originally
S = B.decode('utf-8') # Decode to Unicode text per UTF-8
print(S)
T = S.encode('cp500') # Convert to encoded bytes per EBCDIC
print(T)
U = T.decode('cp500') # Convert back to Unicode per EBCDIC
print(U)
print(U.encode()) # Per default utf-8 encoding again
|
"Metaclass Methods Versus Class Methods"
'''
Though they differ in inheritance visibility, much like class methods, metaclass methods
are designed to manage class-level data. In fact, their roles can overlap—much as
metaclasses do in general with class decorators—but metaclass methods are not accessible
except through the class, and do not require an explicit classmethod class-level
data declaration in order to be bound with the class. In other words, metaclass methods
can be thought of as implicit class methods, with limited visibility:
'''
class A(type):
def a(cls): # Metaclass method: gets class
cls.x = cls.y + cls.z
class B(metaclass=A):
y, z = 11, 22
@classmethod # Class method: gets class
def b(cls):
return cls.x
B.a() # Call metaclass method; visible to class only
B.x # Creates class data on B, accessible to normal instances
I = B()
I.x, I.y, I.z
I.b() # Class method: sends class, not instance; visible to instance
# I.a() # Metaclass methods: accessible through class only; AttributeError
|
'''
keyword-only arguments are coded as named arguments
that may appear after in the arguments list *args
'''
def kwonly(a, *b, c):
print(a, b, c)
kwonly(1, 2, c=3)
kwonly(a=1, c=3)
kwonly(1,2,3,4,5,6,c=9)
"TypeError: kwonly() missing 1 required keyword-only argument: 'c'"
# kwonly(1,2,3)
'''
We can also use a * character by itself in the arguments list to indicate that a function
does not accept a variable-length argument list but still expects all arguments following
the * to be passed as keywords. In the next function, a may be passed by position or
name again, but b and c must be keywords, and no extra positionals are allowed:
'''
def kwonly(a, *, b, c):
print(a, b, c)
kwonly(1, c=3, b=2)
kwonly(c=30, b=20, a=100)
"TypeError: kwonly() takes 1 positional argument but 3 were given"
# kwonly(1, 2, 3)
"TypeError: kwonly() missing 2 required keyword-only arguments: 'b' and 'c'"
# kwonly(1)
'''
You can still use defaults for keyword-only arguments, even though they appear after
the * in the function header.
'''
def kwonly(a, *, b='spam', c='ham'):
print(a, b, c)
kwonly(1)
kwonly(1, c=3)
kwonly(a=1)
kwonly(c=3, b=2, a=1)
"TypeError: kwonly() takes 1 positional argument but 2 were given"
# kwonly(1, 2)
'''
keyword-only arguments with defaults are optional, but those without defaults
effectively become required keywords
'''
def kwonly(a, *, b, c='spam'):
print(a, b, c)
kwonly(1, b='eggs')
"TypeError: kwonly() missing 1 required keyword-only argument: 'b'"
# kwonly(1, c='eggs')
"TypeError: kwonly() takes 1 positional argument but 2 were given"
# kwonly(1, 2)
def kwonly(a, *, b=1, c, d=2):
print(a, b, c, d)
kwonly(3, c=4)
kwonly(3, c=4, b=5)
"TypeError: kwonly() missing 1 required keyword-only argument: 'c'"
# kwonly(3)
"TypeError: kwonly() takes 1 positional argument but 3 were given"
# kwonly(1, 2, 3)
|
"annotation information"
# arbitrary user-defined data about a function’s
# arguments and result—to a function object
'annotations are completely optional'
#attached to the function object's __annotations__ attribute
# e.g. use annotations in the context of error testing
def func(a, b, c):
return a + b + c
print(func(2, 3, 4))
# Syntactically, function annotations are code in def header lines
# associated with arguments and return values
def func(a: 'spam', b: (1, 10), c: float) -> int:
return a + b + c
print(func(1,2,3))
'''
when annotations are present Python
collects them in a dictionary and
attaches it to the function object itself
'''
print(func.__annotations__)
def func(a: 'spam', b, c: 99):
return a + b + c
print(func(1,3,4))
print(func.__annotations__)
for arg in func.__annotations__:
print(arg, '=>', func.__annotations__[arg])
'''
you can still use defaults for arguments if
you code annotations—the annotation (and its : character) appear before the default
(and its = character).
'''
def func(a: 'spam' = 4, b: (1,10) = 5, c: float =6) -> int:
return a + b + c
print(func(1,2,3))
print(func()) # 4 + 5 + 6 (all defaults)
print(func(1, c=10)) # 1 + 5 + 10 (keyword work normally)
print(func.__annotations__)
'''
note that annotations work only in def statements,
not lambda expressions
'''
|
class Super:
def hello(self):
self.data1 = 'spam'
class Sub(Super):
def hola(self):
self.data2 = 'eggs'
X = Sub()
print(
X.__dict__ # Instance namespace dict
)
print(
X.__class__ # Class of intance
)
print(
Sub.__bases__ # Superclasses of class
)
print(
Super.__bases__ # () empty tuple in Python 2.X
)
'''
As classes assign to self attributes, they populate the instance objects—that is, attributes
wind up in the instances’ attribute namespace dictionaries, not in the classes’.
An instance object’s namespace records data that can vary from instance to instance,
and self is a hook into that namespace:'''
Y = Sub()
X.hello()
print(X.__dict__)
X.hola()
print(X.__dict__)
print(list(Sub.__dict__.keys()))
print(list(Super.__dict__.keys()))
print(Y.__dict__)
'''
each instance has an independent namespace dictionary,
which starts out empty and can record completely different attributes than those
recorded by the namespace dictionaries of other instances of the same class.
'''
"""
Because attributes are actually dictionary keys inside Python, there are really two ways
to fetch and assign their values—by qualification, or by key indexing:
"""
print(
X.data1, X.__dict__['data1']
)
X.data3 = 'toast'
print(X.__dict__)
X.__dict__['data3'] = 'ham'
print(X.data3)
# This equivalence applies only to attributes actually attached to the instance
# Because attribute fetch qualification also performs an inheritance search, it can access
# inherited attributes that namespace dictionary indexing cannot.
"The inherited attribute X.hello, for instance, cannot be accessed by X.__dict__['hello']."
'try running these objects through the dir function'
'''
dir(X) is similar to
X.__dict__.keys(), but dir sorts its list and includes some inherited and built-in attributes.
'''
|
# sys.stdout is just a normal file object
import sys
x, y, z = 'spam', 12, [12, 23, 45]
temp = sys.stdout # Save for restoring later
sys.stdout = open('log1.txt', 'a') # Redirect prints to a file
print('spam') # Prints go to file, not here
print(1, 2, 3)
sys.stdout.close() # Flush output to disk
sys.stdout = temp # Restore original stream
print('back here') # Prints show up here again
print(open('log1.txt').read()) # Result of earlier prints
print()
print('ENTER FILE KEYWORD TO SAVE THE HUSTLE FROM ABOVE')
# FILE KEYWORD
log = open('log2.txt', 'a')
print(x, y, z, file=log) # Print to a file-like object
print('==> Orginal output not disturbed')
'''Redirects forms of print are handy if you need to
print to both files and the standard output stream
in the same program'''
log = open('log3.txt', 'w')
print(1, 3, 3, 4, file=log)
print(345, 78, 678, file=log)
log.close()
print(7, 8, 9)
# read outputs that were printed earlier to log3.txt file
print(open('log3.txt').read())
# PRINTING ERROR MESSAGES to the standart error stream
import sys
sys.stderr.write(('Bad!' * 8) + '\n')
print('Bad!' * 5, file=sys.stderr)
|
"Function Interfaces and Callback-Based Code"
class Callback:
def __init__(self, color): # Function + state information
self.color = color
def __call__(self): # Support calls with no arguments
print('turn', self.color)
if __name__ == '__main__':
from tkinter import Button
# Handlers
cb1 = Callback('blue') # Remember blue
cb2 = Callback('green') # Remember green
B1 = Button(command=cb1) # Register handlers
B2 = Button(command=cb2)
# Events
cb1() # Prints 'turn blue'
cb2() # Prints 'turn green'
"""
many consider such classes to be the best way to retain state information in the
Python language. With OOP, the
state remembered is made explicit with attribute assignments. This is different than
other state retention techniques (e.g., global variables, enclosing function scope references,
and default mutable arguments), which rely on more limited or implicit behavior.
Moreover, the added structure and customization in classes goes beyond state retention.
closure equivalent:
def callback(color): # Enclosing scope versus attrs
def oncall():
print('turn', color)
return oncall
cb3 = callback('yellow') # Handler to be registered
cb3() # On event: prints 'turn yellow'
there are two other ways that Python programmers sometimes tie
information to a callback function like this. One option is to use default arguments in
lambda functions:
cb4 = (lambda color='red': 'turn ' + color) # Defaults retain state too
print(cb4())
The other is to use bound methods of a class. A bound method object is a kind of object that remembers both the
self instance and the referenced function.
class Callback:
def __init__(self, color): # Class with state information
self.color = color
def changeColor(self): # A normal named method
print('turn', self.color)
cb1 = Callback('blue')
cb2 = Callback('yellow')
B1 = Button(command=cb1.changeColor) # Bound method: reference, don't call
B2 = Button(command=cb2.changeColor) # Remembers function + self pair
In this case, when this button is later pressed it’s as if the GUI does this, which invokes
the instance’s changeColor method to process the object’s state information, instead of
the instance itself:
cb1 = Callback('blue')
obj = cb1.changeColor # Registered event handler
obj() # On event prints 'turn blue'
"""
'''
Because __call__ allows us to attach
state information to a callable object, it’s a natural implementation technique for a
function that must remember to call another function when called itself.
'''
|
"List comprehensions with if clauses can be thought of as analogous to the filter built-in"
lc = [x for x in range(5) if x % 2 == 0]
print("list comprehension", lc)
fltr = list(filter((lambda x: x % 2 == 0), range(5)))
print("filter", fltr)
# Procedural
res = []
for x in range(5):
if x % 2 == 0:
res.append(x)
print("procedural", res)
'''
combine an if clause and an arbitrary expression in our list comprehension,
to give it the effect of a filter and a map
'''
lcm = [x ** 2 for x in range(10) if x % 2 == 0]
print("list compre", lcm)
# equivalent map call
mp = list(map((lambda x: x ** 2), filter((lambda x: x % 2 == 0), range(10))))
print("map call", mp)
|
from __future__ import print_function
from functools import reduce
from timeit import repeat
import math
def fact0(N): # Recursive
if N == 1: # Fails at 999 by default
return N
else:
return N * fact0(N-1)
def fact1(N):
return N if N == 1 else N * fact1(N-1) # Recursive, one-liner
def fact2(N): # Functional
return reduce(lambda x, y: x * y, range(1, N+1))
def fact3(N):
res = 1
for i in range(1, N+1): res *= i # Iterative
return res
def fact4(N):
return math.factorial(N) # Stdlib "batteries"
# Tests
print(fact0(6), fact1(6), fact2(6), fact3(6), fact4(6)) # 6*5*4*3*2*1: all 720
print(fact0(500) == fact1(500) == fact2(500) == fact3(500) == fact4(500)) # True
for test in (fact0, fact1, fact2, fact3, fact4):
print(test.__name__, min(repeat(stmt=lambda: test(500), number=20, repeat=3)))
'''
Conclusions: recursion is slowest on my Python and machine, and fails once N
reaches 999 due to the default stack size setting in sys;
this limit can be increased, but simple loops or the standard library tool seem the best route
here in any event
recursion is today an order
of magnitude slower in CPython, though these results vary in PyPy:
'''
def rev1(S):
if len(S) == 1:
return S
else:
return S[-1] + rev1(S[:-1]) # Recursive: 10x slower in CPython today
def rev2(S):
return ''.join(reversed(S)) # Nonrecursive iterable: simpler, faster
def rev3(S):
return S[::-1] # Even better?: sequence reversal by slice
|
"Listing instance attributes with __dict__"
from listinstance import ListInstance # Get lister tool class # Get lister tool class
class Super:
def __init__(self): # Superclass __init__
self.data1 = 'spam' # Create instance attrs
def ham(self):
pass
class Sub(Super, ListInstance): # Mix in ham and a __str__
def __init__(self):
Super.__init__(self)
self.data2 = 'eggs' # More instance attrs
self.data3 = 24
def spam(self): # Define another method here
pass
if __name__ == "__main__":
X = Sub()
print(X) # Run mixed-in __str__
'''
This is where multiple inheritance comes in handy: by
adding ListInstance to the list of superclasses in a class header (i.e., mixing it in), you
get its __str__ “for free” while still inheriting from the existing superclass(es).
'''
|
"Why You Will Care: Bound Method Callbacks"
'''
Because bound methods automatically pair an instance with a class’s method function,
you can use them anywhere a simple function is expected. One of the most common
places you’ll see this idea put to work is in code that registers methods as event callback
handlers in the tkinter GUI interface
'''
from tkinter import Button
def handler():
"...use globals or closure scopes for state..."
...
widget = Button(text='spam', command=handler)
'''
To register a handler for button click events, we usually pass a callable object that takes
no arguments to the command keyword argument. Function names (and lambdas) work
here, and so do class-level methods—though they must be bound methods if they expect
an instance when called:
Here, the event handler is self.handler—a bound method object that remembers both
self and MyGui.handler. Because self will refer to the original instance when handler
is later invoked on events, the method will have access to instance attributes that can
retain state between events, as well as class-level methods. With simple functions, state
normally must be retained in global variables or enclosing function scopes instead.
'''
class MyGui:
def handler(self):
"...use self.attr for state..."
def makewidgets(self):
b = Button(text='spam', command=self.handler)
|
'''the built-in dir function is an easy way to grab a list of all the
attributes available inside an object'''
import sys
print(
dir(sys)
)
print(
len(dir(sys)) # Number names in sys
)
print(
len([x for x in dir(sys) if not x.startswith('__')]) # Non __X names only
)
print(
len([x for x in dir(sys) if not x[0] == '_']) # Non underscore names
)
# find out what attributes are provided in objects of built-in types
print(
dir([])
)
print(
dir("")
)
# operator overloading methods
print(
len(dir([])), len([x for x in dir([]) if not x.startswith('__')])
)
print(
len(dir('')), len([x for x in dir('') if not x.startswith('__')])
)
# to filter out double-underscored items that are not of common program interest
print(
[a for a in dir(list) if not a.startswith('__')]
)
print(
[a for a in dir(dict) if not a.startswith('__')]
)
# importable and reusable function
def dir1(x):
return [a for a in dir(x) if not a.startswith('__')]
print(
dir1(tuple),
dir1(set)
)
# Same result, type name or literal
print(
dir(str) == dir('')
)
print(
dir(list) == dir([])
)
'''
function serves as a sort of memory-jogger dir'''
|
'''
In Python:
- All objects have an inherent Boolean true or false value
- Any nonzero number or nonempty object is true
- Zero numbers, empty objects, and the special object None
are considered false
- Comparisons and equality tests are applied recursively to
data structures
- Comparisons and equality tests return True or False
(custom versions of 1 and 0)
- Boolean and and or operators return a true or false
operand object
- Boolean operators stop evaluating (“short circuit”) as
soon as a result is known
'''
# Boolean expression operators in Python
X = Y = 0
X and Y # Is true if both X and Y are true
X or Y # Is true if either X or Y is true
not X # Is true if X is false (the expression returns True or False)
print(2 < 3, 3< 2)
# OR test --> evaluated from L to R; returns first one that is true
print(2 or 3, 3 or 2) # return left operand if true
# Else, return right operand (true or false)
print([] or 3)
print([] or {})
print({} or [])
|
"Runtime Class Changes and super"
'''
Superclass that might be changed at runtime dynamically preclude hardcoding their
names in a subclass’s methods, while super will happily look up the current superclass
dynamically. Still, this case may be too rare in practice to warrant the super model by
itself, and can often be implemented in other ways in the exceptional cases where it is
needed.
'''
class X:
def m(self): print('X.m')
class Y:
def m(self): print('Y.m')
class C(X): # Start out inheriting from X
def m(self): super().m() # Can't hardcode class name here
if __name__ == '__main__':
print(C.__mro__)
c = C()
c.m()
print(Y.__mro__)
y = Y()
y.m()
C.__bases__ = (Y,) # Change superclass at runtime!
c.m()
"""
This works (and shares behavior-morphing goals with other deep magic, such as
changing an instance’s __class__), but seems rare in the extreme. Moreover, there may
be other ways to achieve the same effect—perhaps most simply, calling through the
current superclass tuple’s value indirectly: special code to be sure, but only for a very
special case (and perhaps not any more special than implicit routing by MROs):
"""
class C(X):
def m(self): C.__bases__[0].m(self) # Special code for a special case
if __name__ == '__main__':
print()
i = C()
i.m()
C.__bases__ = (Y,) # Same effect, without super()
i.m()
|
class Squares: # __iter__ + yield generator
def __init__(self, start, stop): # __next__ is automatic/implied
self.start = start
self.stop = stop
def __iter__(self):
for value in range(self.start, self.stop + 1):
yield value ** 2
if __name__ == '__main__':
for i in Squares(1, 5): print(i, end=' ')
print()
S = Squares(1, 5) # Runs __init__: class saves instance state
print(S)
I = iter(S) # Runs __iter__: returns a generator
print(I)
print(next(I))
print(next(I))
print(next(I))
print(next(I))
print(next(I))
print(next(I)) # Generator has both instance and local scope state
|
"__getattr__ and __getattribute__"
'A First Example'
class Person:
def __init__(self, name): # On [Person()]
self._name = name # Triggers __setattr__!
def __getattr__(self, attr): # On [obj.undefined]
print('get: ' + attr)
if attr == 'name': # Intercept name: not stored
return self._name # Does not loop: read attr
else:
raise AttributeError(attr)
def __setattr__(self, attr, value): # On [obj.any = value]
print('set: ' + attr)
if attr == 'name':
attr = '_name' # Set internal name
self.__dict__[attr] = value # Avoid looping here
def __delattr__(self, attr): # On [del obj.any]
print('del: ' + attr)
if attr == 'name':
attr = '_name' # Avoid looping here too
del self.__dict__[attr] # but mucg less common
bmr = Person('Bimri Coder') # bmr has a managed attribute
print(bmr.name) # Runs __getattr__
bmr.name = 'Coder Bimri' # Runs __setattr__
print(bmr.name)
del bmr._name # Runs __delattr__
print('-'*20)
al = Person('Al Capone') # al inherits property too
print(al.name)
# print(Person.name.__doc__) # No equivalent here
"""
Notice that the attribute assignment in the __init__ constructor triggers __setattr__
too—this method catches every attribute assignment, even those anywhere within the
class itself. When this code is run, the same output is produced, but this time it’s the
result of Python’s normal operator overloading mechanism and our attribute interception
methods:
Also note that, unlike with properties and descriptors, there’s no direct notion of specifying
documentation for our attribute here; managed attributes exist within the code
of our interception methods, not as distinct objects.
"""
'Using __getattribute__'
# To achieve exactly the same results with __getattribute__, replace __getattr__
class Person:
def __init__(self, name):
self._name = name
def __getattribute__(self, attr): # On [obj.any]
print('get: ' + attr)
if attr == 'name': # Intercept all names
attr = '_name' # Map to internal name
return object.__getattribute__(self, attr) # Avoid looping here
def __setattr__(self, attr, value):
print('set: ' + attr)
if attr == 'name':
attr = '_name'
self.__dict__[attr] = value
def __delattr__(self, attr):
print('del: ' + attr)
if attr == 'name':
attr = '_name'
del self.__dict__[attr]
print()
bmr = Person('Yuri Fyodor') # bmr has a managed attribute
print(bmr.name) # Runs __getattr__
bmr.name = 'Neitzsche Friedrick' # Runs __setattr__
print(bmr.name)
del bmr._name # Runs __delattr__
print('-'*20)
al = Person('Michio Kaku') # al inherits property too
print(al.name)
"""
When run with this change, the output is similar, but we get an extra __getattri
bute__ call for the fetch in __setattr__ (the first time originating in __init__):
"""
'''
This example is equivalent to that coded for properties and descriptors, but it’s a bit
artificial, and it doesn’t really highlight these tools’ assets. Because they are generic,
__getattr__ and __getattribute__ are probably more commonly used in delegationbase
code (as sketched earlier), where attribute access is validated and routed to an
embedded object. Where just a single attribute must be managed, properties and descriptors
might do as well or better.
'''
|
"Adding Decorator Arguments"
import time
def timer(label='', trace=True): # On decorator args:retain args
class Timer:
def __init__(self, func): # On @: retain decorated func
self.func = func
self.alltime = 0
def __call__(self, *args, **kargs): # On calls: call original
start = time.perf_counter()
result = self.func(*args, **kargs)
elapsed = time.perf_counter() - start
self.alltime += elapsed
if trace:
format = '%s %s: %.5f, %.5f'
values = (label, self.func.__name__, elapsed, self.alltime)
print(format % values)
return result
return Timer
"""
Mostly all we’ve done here is embed the original Timer class in an enclosing function,
in order to create a scope that retains the decorator arguments per deployment. The
outer timer function is called before decoration occurs, and it simply returns the
Timer class to serve as the actual decorator. On decoration, an instance of Timer is made
that remembers the decorated function itself, but also has access to the decorator arguments
in the enclosing function scope.
"""
'Timing with decorator arguments'
if __name__ == '__main__':
import sys
from timerdeco2 import timer
force = list if sys.version_info[0] == 3 else (lambda X: X)
@timer(label='[CCC]==>')
def listcomp(N): # Like listcomp = timer(...)(listcomp)
return [x * 2 for x in range(N)] # listcomp(...) triggers Timer.__call__
@timer(trace=True, label='[MMM]==>')
def mapcall(N):
return force(map((lambda x: x * 2), range(N)))
for func in (listcomp, mapcall):
result = func(5) # Time for this call, all calls, return value
func(50000)
func(500000)
func(1000000)
print(result)
print('allTime = %s\n' % func.alltime) # Total time for all calls
print('**map/comp = %s' % round(mapcall.alltime / listcomp.alltime, 3))
"""
As usual, we can also test interactively to see how the decorator’s configuration arguments
come into play:
>>> from timerdeco2 import timer
>>> @timer(trace=False) # No tracing, collect total time
... def listcomp(N):
... return [x * 2 for x in range(N)]
...
>>> x = listcomp(5000)
>>> x = listcomp(5000)
>>> x = listcomp(5000)
>>> listcomp.alltime
0.0037191417530599152
>>> listcomp
<timerdeco2.timer.<locals>.Timer object at 0x02957518>
>>> @timer(trace=True, label='\t=>') # Turn on tracing, custom label
... def listcomp(N):
... return [x * 2 for x in range(N)]
...
>>> x = listcomp(5000)
=> listcomp: 0.00106, 0.00106
>>> x = listcomp(5000)
=> listcomp: 0.00108, 0.00214
>>> x = listcomp(5000)
=> listcomp: 0.00107, 0.00321
>>> listcomp.alltime
0.003208920466562404
"""
'''
As is, this timing function decorator can be used for any function, both in modules and
interactively. In other words, it automatically qualifies as a general-purpose tool for
timing code in our scripts.
'''
|
def countdown(N):
if N == 0:
print('stop')
else:
print(N, end=' ')
countdown(N-1)
countdown(10)
countdown(90)
# generator-based solution
def countdown2(N): # Generator function, recursive
if N == 0:
yield 'stop'
else:
yield N
for x in countdown2(N-1): yield x # 3.3+: yield from countdown2(N-1)
print(list(countdown2(5)))
# Nonrecursive options:
def countdown3(): # Generator function, simpler
yield from range(5, 0, -1) # Pre 3.3: for x in range(): yield x
print(list(countdown3()))
print(list(x for x in range(5, 0, -1))) # Equivalent generator expression
print(list(range(5, 0, -1))) # Equivalent nongenerator form
|
L = [123, 'spam', 1.23]
print(len(L)) # get length of collection
print(L[0]) # item at offset 0
print(L[:-1]) # all items except last one
print(L + [4, 5, 6]) # concat make new list too
print(L * 2) # repeat make new list too
print(L) # Original list stays intact
# Lists in py don't have type constraint
# and have no fixed size - they can grow &
# shrink in size.
L.append('NI') # growing: add object at end of list
print(L)
# pop is equivalent to del statement & removes item at a given offset
L.pop(2) # shrinking: delete item in the middle
print(L)
# Lists being mutable they can change list objects in place
M = ['bb', 'aa', 'cc']
M.sort()
print(M)
M.reverse()
print(M)
# Nesting
N = [[1,2,3], # A 3 x 3 matrix, as nested lists
[4,5,6], # Code can span lines if bracketed
[7,8,9]]
print(N)
print(N[1]) # Get row 2
print(N[1][2]) # Get row 2, then get item 3 within the row
# Comprehensions
col2 = [row[1] for row in N]
print(col2) # Collect the items in column 2
print([row[1] + 1 for row in N]) # add 1 to each item in column 1
print([row[1] for row in N if row[1] % 2 == 0]) # filter out odd items
diag = [N[i][i] for i in [0, 1, 2]] # collect a diagonal from matrix
print(diag)
doubles = [c * 2 for c in 'soma'] # repeat chars in a string
print(doubles)
# Collect multipe values with range
# Must be wrapped in a nested collection
print(list(range(10))) # 0..9 (list() required in 3.X)
print(list(range(-6, 7, 2))) # −6 to +6 by 2 (need list() in 3.X)
print([[x ** 2, x ** 3] for x in range(4)]) # Multiple values, "if" filters
print([[x, x / 2, x * 2] for x in range(-6, 7, 2) if x > 0])
# Generators from enclosing a comprehension in paratheses
G = (sum(row) for row in N)
print(next(G)) # Run the iteration protocol next()
print(next(G))
print(next(G))
# map BUILT-IN
print(list(map(sum, N))) # Map sum over items in N
# Sets & dicts from comptehension syntax
print({sum(row) for row in N}) # Create a set of row sums
print({i : sum(N[i]) for i in range(3)}) # Creates key:value table of row sums
# lists, sets, dictionaries, and generators
# can all be built with comprehensions
print([ord(x) for x in 'bimri']) # List of character ordinals
print({ord(x) for x in 'nyathi'}) # Sets remove duplicates
print({x: ord(x) for x in 'koima'}) # Dictionary keys are unique
print((ord(x) for x in 'nekesa')) # Generator of values
|
"Multiple Context Managers in 3.1, 2.7, and Later"
'''
the with statement may also specify multiple (sometimes
referred to as “nested”) context managers with new comma syntax
'''
with open('data') as fin, open('res', 'w') as fout:
for line in fin:
if 'some key' in line:
fout.write(line)
'''
the following uses with to open two files at once and zip together their lines,
without having to manually close when finished
'''
with open('script1.py') as f1, open('script2.py') as f2:
for pair in zip(f1, f2):
print(pair)
"coding structure to do a line-by-line comparison of two text files"
with open('script1.py') as f1, open('script2.py') as f2:
for (linenum, (line1, line2)) in enumerate(zip(f1, f2)):
if line1 != line2:
print('%s\n%r\n%r' % (linenum, line1, line2))
'''
the preceding technique isn’t all that useful in CPython, because input file objects
don’t require a buffer flush, and file objects are closed automatically when reclaimed
if still open. In CPython, the files would be reclaimed immediately if the parallel scan
were coded the following simpler way:'''
for pair in zip(open('script1.py'), open('script2.py')): # same effect, auto close
print(pair)
"""
In both cases, we can instead simply open files in individual statements and close after
processing if needed, and in some scripts we probably should—there’s no point in using
statements that catch an exception if it means your program is out of business anyhow!
"""
fin = open('script2.py')
fout = open('upper.py', 'w')
for line in fin: # Same effect as preceding code, auto close
fout.write(line.upper())
"""
However, in cases where programs must continue after exceptions, the with forms also
implicitly catch exceptions, and thereby also avoid a try/finally in cases where close
is required. The equivalent without with is more explicit, but requires noticeably more
code:
"""
fin = open('script2.py')
fout = open('upper.py', 'w')
try:
for line in fin: # same effect but explicit close on error
fout.write(line.upper())
finally:
fin.close()
fout.close()
'''
On the other hand, the try/finally is a single tool that applies to all finalization cases,
whereas the with adds a second tool that can be more concise, but applies to only certain
objects types, and doubles the required knowledge base of programmers.
'''
|
L = ['loyal', '6lack', 'SONG!']
L.append('fying') # Append method call: add item at end
print(L)
L.sort()
print(L)
L = ['abc', 'ABD', 'aBe']
L.sort() # Sort with mixed case
print(L)
L = ['abc', 'ABD', 'aBe'] # Normalize to lowercase
L.sort(key=str.lower)
print(L)
L = ['abc', 'ABD', 'aBe']
L.sort(key=str.lower, reverse=True) # Change sort order
print(L)
# Sorting built-in
L = ['abc', 'ABD', 'aBe']
print(sorted(L, key=str.lower, reverse=True))
# Pretransform items: differs!
L = ['abc', 'ABD', 'aBe']
print(sorted([x.lower() for x in L], reverse=True))
# Reverse, reversed, extend, pop
L = [1, 2]
L.extend([3, 4, 5]) # Add many items at teh end(like in-place +)
print(L)
L.pop() # Delete and return last item(by default: -1)
print(L)
L.reverse() # In-place reversal method
print(L)
print(list(reversed(L))) # Reversal build-in with a result(iterator)
'''Last-in-first-out(LIFO) stack structure'''
L = []
L.append(1) # Push onto stack
L.append(2)
print(L)
L.pop() # Pop off stack
print(L)
# remove, insert, count & index methods
L = ['spam', 'eggs', 'ham']
print(L.index('eggs')) # Index of an object (search/find)
L.insert(1, 'toast') # Insert at position
print(L)
L.remove('eggs') # Delete by value
print(L)
print(L.pop(1)) # Delete by position
print(L)
print(L.count('spam')) # Number of occurences
# Other list operations
L = ['spam', 'eggs', 'ham', 'toast']
print(L)
del L[0] # Delete one item
print(L)
del L[1:] # Delete an entire section
print(L)
|
"Object Destruction: __del__"
'''
__init__ constructor is called whenever an instance is generated
the destructor method __del__, is run automatically when an instance’s space is being reclaimed
(i.e., at “garbage collection” time):
'''
class Life:
def __init__(self, name='unknown'):
print('Hello ' + name)
self.name = name
def live(self):
print(self.name)
def __del__(self):
print('Goodbye ' + self.name)
if __name__ == "__main__":
brian = Life('Brian')
brian.live()
brian = 'loretta'
"""
Here, when brian is assigned a string, we lose the last reference to the Life instance
and so trigger its destructor method. This works, and it may be useful for implementing
some cleanup activities, such as terminating a server connection. However, destructors
are not as commonly used in Python as in some OOP languages
"""
|
"Example: Default Behavior"
'''
Because the control flow through a program is easier to capture in Python than in
English, let’s run some examples that further illustrate exception basics in the context
of larger code samples in files.
'''
def gobad(x, y):
return x / y
def gosouth(x):
print(gobad(x, 0))
gosouth(1)
"""
When ran this in a shell window with Python 3.X. The message consists of a stack trace
(“Traceback”) and the name of and details about the exception that was raised. The
stack trace lists all lines active when the exception occurred, from oldest to newest.
"""
"""
Because Python detects and reports all errors at runtime by raising exceptions, exceptions
are intimately bound up with the ideas of error handling and debugging in general.
"""
|
from firstClass import FirstClass
class SecondClass(FirstClass): # Inherits setdata
"""
SecondClass defines the display method to print with a different format. By defining
an attribute with the same name as an attribute in FirstClass, SecondClass effectively
replaces the display attribute in its superclass.
"""
def display(self):
"""
Recall that inheritance searches proceed upward from instances to subclasses to superclasses,
stopping at the first appearance of the attribute name that it finds. In this
case, since the display name in SecondClass will be found before the one in First
Class, we say that SecondClass overrides FirstClass’s display. Sometimes we call this
act of replacing attributes by redefining them lower in the tree overloading.
"""
print('Current value = "%s"' % self.data)
# Make an instance that inherists the setdata method in FirstClass verbatim
'''
>>> z = SecondClass()
>>> z.setdata(42) # Finds setdata in FirstClass
>>> z.display() # Finds overridden method in SecondClass
Current value = "42"
'''
|
"A First Look at User-Defined Function Decorators"
'''
Because the spam function is run through the tracer decorator, when the original
spam name is called it actually triggers the __call__ method in the class. This method
counts and logs the call, and then dispatches it to the original wrapped function.
'''
class tracer:
def __init__(self, func): # Remember original, init counter
self.calls = 0
self.func = func
def __call__(self, *args): # On later calls: add logic, run original
self.calls += 1
print('call %s to %s' % (self.calls, self.func.__name__))
return self.func(*args)
"The net effect is to add a layer of logic to the original spam function."
@tracer # Same as spam = tracer(spam)
def spam(a, b, c): # Wrap spam in a decorator object
return a + b + c
print(spam(1, 2, 3)) # Really calls the tracer wrapper object
print(spam('a', 'b', 'c')) # Invokes __call__ in class
|
def intersect(*args):
res = []
for x in args[0]: # Scan first sequence
if x in res: continue # Skip duplicates
for other in args[1:]: # For all other args
if x not in other: break # Item in each one?
else: # No: break out of loop
res.append(x) # Yes: add items to end
return res
def union(*args):
res = []
for seq in args: # For all args
for x in seq: # For all nodes
if not x in res:
res.append(x) # Add new items to result
return res
s1, s2, s3 = "SPAM", "SCAM", "SLAM"
print(intersect(s1, s2), union(s1, s2)) # Two operands
print(intersect([1, 2, 3], (1, 4))) # Mixed types
print(intersect(s1, s2, s3)) # Three operands
print(union(s1, s2, s3))
def tester(func, items, trace=True):
for i in range(len(items)):
items = items[1:] + items[:1]
if trace: print(items)
print(sorted(func(*items)))
tester(intersect, ('a', 'abcdefg', 'abdst', 'albmcnd'))
tester(union, ('a', 'abcdefg', 'abdst', 'albmcnd'), False)
tester(intersect, ('ba', 'abcdefg', 'abdst', 'albmcnd'), False)
# duplicates won’t appear in either intersection or union results
print(
intersect([1, 2, 1, 3], (1, 1, 4)),
union([1, 2, 1, 3], (1, 1, 4)),
)
tester(intersect, ('ababa', 'abcdefga', 'aaaab'), False)
|
"Computed Attributes"
# properties do much more—computing the value of an attribute
# dynamically when fetched
class PropSquare:
def __init__(self, start):
self.value = start
def getX(self): # On attr fetch
return self.value ** 2
def setX(self, value): # On attr assign
self.value = value
X = property(getX, setX) # No delete or docs
P = PropSquare(3) # Two instances of class with property
Q = PropSquare(32) # Each has different state information
print(P.X) # 3 ** 2
P.X = 4
print(P.X) # 4 ** 2
print(Q.X) # 32 ** 2(1024)
'''
Notice that we’ve made two different instances—because property methods automatically
receive a self argument, they have access to the state information stored in instances.
In our case, this means the fetch computes the square of the subject instance’s
own data.
'''
|
# use this technique to skip items as we go:
S = 'abcdefghijk'
print(
list(range(0, len(S), 2))
)
for i in range(0, len(S), 2): print(S[i], end=' ')
print()
# slice with a stride of 2
for c in S[::2]: print(c, end=' ')
|
# update Person object on database
"prints the database and gives a raise to one of our stored objects each time"
import shelve
db = shelve.open('persondb') # Reopen shelve with same filename
for key in sorted(db): # Iterate to display database objects
print(key, '\t=>', db[key]) # Prints with custom format
jenny = db['Jenny Writes'] # Index by key to fetch
jenny.giveRaise(.10) # Update in memory using class's method
db['Jenny Writes'] = jenny # Assign to key to update in shelve
db.close() # Close after making changes
|
"Coding Exceptions Classes"
class General(Exception): pass
class Specific1(General): pass
class Specific2(General): pass
def raiser0(): raise General()
def raiser1(): raise Specific1()
def raiser2(): raise Specific2()
for func in (raiser0, raiser1, raiser2):
try:
func()
except General as X: # X us the raised instance
print('caught: %s' % X.__class__) # Same as sys.exc_info()[0]
'''
Because __class__ can be used like this to determine the specific type of exception
raised, sys.exc_info is more useful for empty except clauses that do not otherwise have
a way to access the instance or its class.
Furthermore, more realistic programs usually
should not have to care about which specific exception was raised at all—by calling
methods of the exception class instance generically, we automatically dispatch to behavior
tailored for the exception raised.
'''
|
from streams import Processor
class Uppercase(Processor):
def converter(self, data):
return data.upper()
if __name__ == "__main__":
import sys
obj = Uppercase(open('trispam.txt'), sys.stdout)
obj.process(); print()
class HTMLize:
"""
we get both uppercase conversion (by inheritance) and HTML formatting (by composition), even though the
core processing logic in the original Processor superclass knows nothing about either step.
"""
def write(self, line):
print('<PRE>%s</PRE>' % line.rstrip())
print(Uppercase(open('trispam.txt'), HTMLize()).process())
|
"Python 3.X Exception Chaining: raise from"
'''
Exceptions can sometimes be triggered in response to other exceptions—both deliberately
and by new program errors.
Python 3.X allows raise statements to have an optional from clause:
raise newexception from otherexception
'''
try:
1/0
except Exception as E:
raise TypeError('Bad') from E # Explicitly chained exceptions
...
"""
When an exception is raised implicitly by a program error inside an exception handler,
a similar procedure is followed automatically: the previous exception is attached to the
new exception’s __context__ attribute and is again displayed in the standard error
message if the exception goes uncaught:
"""
try:
1/0
except:
badname # Implictly chained exceptions
...
"""
In both cases, because the original exception objects thus attached to new exception
objects may themselves have attached causes, the causality chain can be arbitrary
long, and is displayed in full in error messages. That is, error messages might give more
than two exceptions. The net effect in both explicit and implicit contexts is to allow
programmers to know all exceptions involved, when one exception triggers another:
"""
try:
try:
raise IndexError()
except Exception as E:
raise TypeError() from E
except Exception as E:
raise SyntaxError() from E
'''
Code like the following would similarly display three exceptions, though implicitly
triggered here:
'''
try:
try:
1 / 0
except:
badname
except:
open('nonesuch')
|
'''
filter is an iterable
in 3.X that generates its results on request, a generator expression with an if clause is
operationally equivalent
'''
# generator seems marginally simpler than the filter here
line = 'aa bbb c'
print(
''.join(x for x in line.split() if len(x) > 1) # Generator with 'if
)
print(
''.join(filter(lambda x: len(x) > 1, line.split())) # Similar to filter
)
"there is always a statement-based equivalent to a generator expression"
print(
''.join(x.upper() for x in line.split() if len(x) > 1)
)
# Statement equivalent?
"the statement form isn’t quite the same"
"cannot produce items one at a time"
res = ''
for x in line.split():
if len(x) > 1:
res += x.upper() # this is also a join
print(res)
'''
The true equivalent to a generator expression would be a generator
function with a yield
'''
|
"Reduce function lives in functools module"
# Accepts aan iterable to process, but it's bot an iterable itself
# - it returns a single result.
from functools import reduce # Import in 3.X
# reduce passes the current sum or product. respectively
print(
reduce((lambda x, y: x + y), [1, 2, 3, 4])
)
print(
reduce((lambda x, y: x * y), [1, 2, 3, 4])
)
# for loop equivalent of first case
L = [1,2,3,4]
res = L[0]
for x in L[1:]:
res = res + x
print(res)
"Custom own version of reduce"
def myreduce(function, sequence):
tally = sequence[0]
for next in sequence[1:]:
tally = function(tally, next)
return tally
print(
myreduce((lambda x, y: x + y), [1,2,3,4,5])
)
print(
myreduce((lambda x, y: x * y), [1, 2, 3, 4, 5])
)
'''
The built-in reduce also allows an optional third argument placed before the items in
the sequence to serve as a default result when the sequence is empty
'''
'''
operator module, which provides functions that correspond to builtin
expressions and so comes in handy for some uses of functional tools
'''
import operator, functools
print(
functools.reduce(operator.add, [2, 4, 6]) # function-based+
)
print(
functools.reduce((lambda x, y: x + y), [2, 4, 6])
)
|
"""\
JSON存儲文件
"""
import json
str='''[{"name":"bob","age":12},
{"name":"zjb","age":91}]'''
json.loads(str)
with open("data.json",'r') as a:
str=a.read()
jsonData=json.loads(str)
print(jsonData[0])
|
#!/usr/bin/env python
# coding: utf-8
# ## Task 11
# In[ ]:
# In[5]:
number1 = int(input('Enter 1st number: '))
number2 = int(input('Enter 2nd number: '))
ans = int(input('press 1 for addition, 2 for subtraction, 3 for multiplication, 4 for division: '))
def addition(number1,number2):
Jack = number1 + number2
print('Addition is :',Jack)
def substraction(number1,number2):
Jack = number1 - number2
print('Substraction is :',Jack)
def multiplication(number1,number2):
Jack = number1 * number2
print('multiplication is :' ,Jack)
def division(number1,number2):
Jack = number1 / number2
print('Division is: ',Jack)
if ans == 1:
addition(number1,number2)
elif ans == 2:
substraction(number1,number2)
elif ans == 3:
multiplication(number1,number2)
elif ans == 4:
division(number1,number2)
else:
print("Error press only 1,2,3,4")
# In[ ]:
|
string = "How can mirrors be real if our eyes aren't real"
def to_jaden_case(string):
ans = []
for n in string.split():
ans.append(n.capitalize())
return " ".join(ans)
print(to_jaden_case(string))
|
'''
Given an n x n snail_map, return the snail_map elements arranged from outermost elements to the middle element, traveling clockwise.
snail_map = [[1,2,3],
[4,5,6],
[7,8,9]]
snail(snail_map) #=> [1,2,3,6,9,8,7,4,5]
For better understanding, please follow the numbers of the next snail_map consecutively:
snail_map = [[1,2,3],
[8,9,4],
[7,6,5]]
snail(snail_map) #=> [1,2,3,4,5,6,7,8,9]
NOTE: The idea is not sort the elements from the lowest value to the highest; the idea is to traverse the 2-d snail_map in a clockwise snailshell pattern.
NOTE 2: The 0x0 (empty matrix) is represented as en empty snail_map inside an snail_map [[]].
'''
def snail(snail_map):
temp_list = []
if len(snail_map) > 1:
if isinstance(snail_map[0], list):
temp_list.extend(snail_map[0])
else:
temp_list.append(snail_map[0])
snail_map.pop(0)
for lis_index in range(len(snail_map)):
temp_list.append(snail_map[lis_index][-1])
snail_map[lis_index].pop(-1)
if isinstance(snail_map[-1], list):
temp_list.extend(snail_map[-1][::-1])
else:
temp_list.append(snail_map[-1])
snail_map.pop(-1)
for lis_index in range(len(snail_map)):
temp_list.append(snail_map[::-1][lis_index][0])
snail_map[::-1][lis_index].pop(0)
temp_list.extend(snail(snail_map))
return temp_list
elif snail_map:
return snail_map[0]
else:
return []
# snail_map = [
# [1,2,3],
# [4,5,6],
# [7,8,9]
# ]
snail_map = [
[1,2,3,4],
[12,13,14,5],
[11,16,15,6],
[10,9,8,7]
]
# snail_map = [
# [1]
# ]
# snail_map = [
# []
# ]
print(snail(snail_map))
|
'''
If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.
Finish the solution so that it returns the sum of all the multiples of 3 or 5 below the number passed in.
Note: If the number is a multiple of both 3 and 5, only count it once. Also, if a number is negative, return 0(for languages that do have them)
'''
def solution(number):
nmmbers = []
for n in range(1,number):
print(n)
if n % 3 == 0 and n not in nmmbers:
nmmbers.append(n)
elif n % 5 == 0 and n not in nmmbers:
nmmbers.append(n)
return (sum(nmmbers))
solution(10)
|
#нарисовать 8
import turtle
import math
sqrt_3 = math.sqrt(3)
rad = sqrt_3 * 100/3
#print (rad)
turtle.left(120)
turtle.forward(200)
turtle.circle(-rad,240)
turtle.forward(200)
turtle.circle(rad,240)
|
#funtion below is to check for type-0
def type0(p,lhs):
#n=p*2
for n in range(len(lhs)):
if((lhs[n]=="A")or(lhs[n]=="B")or(lhs[n]=="S")):
return 0
return 9
#function below is to check for type-1
def type1(p,lhs,rhs,l,r):
j=0
k=0
for q in range(len(lhs)):
if(lhs[q]=="S"):
j=j+1
for t in range(len(rhs)):
if(rhs[t]=="S"):
k=k+1
if((j==0)and(k!=0)):
return 0
if((lhs=="S")and(rhs=="l")): #this will be applicable only for the first production
return 1
if(l<=r):
return 1
def type2(l):
if(l==1):
return 2
else:
return 1
def type3_LL(p,rhs,lhs):
#m=(p*2)+1
m=1
counter=0
for m in range(len(rhs)):
if((rhs[m]=="A")or(rhs[m]=="B")or(rhs[m]=="S")):
counter=counter+1
if(counter==0):
return 3
else:
return 2
def type3_RL(p,rhs):
#b=(p*2)+1
b=1
counter1=0
for b in range(len(rhs)):
if((rhs[b]=="A")or(rhs[b]=="B")or(rhs[b]=="S")):
counter1=counter1+1
if((counter1==1)and((rhs[len(rhs)]!="A")or(rhs[len(rhs)]!="B")or(rhs[len(rhs)]!="S"))):
return 3
else:
return 2
s=int (input("Enter the Number of production functions : ")) #input number of production functions
symb=input("Enter start symbol: ")
pfunc=[]
type=4
i=0
#flag="false"
for i in range(s):
pfunc.append(input("Enter LHS of "+str(i)+"th/st production function : "))
pfunc.append(input("Enter RHS of "+str(i)+"th/st production function : "))
l_len=len(pfunc[i])
r_len=len(pfunc[i+1])
min=type0(i,pfunc[i])
if(min==0):
min=type1(i,pfunc[i],pfunc[i+1],l_len,r_len)
if(min==1):
min=type2(l_len)
if(min==2):
if((pfunc[i+1][0]=="A")or(pfunc[i+1][0]=="B")or(pfunc[i+1][0]=="S")):
min=type3_LL(i,pfunc[i+1],pfunc[i])
elif((pfunc[i+1][0]=="a")or(pfunc[i+1][0]=="b")or(pfunc[i+1][0]=="l")):
min=type3_RL(i,pfunc[i+1])
continue
if((min==0)or(min==1)or(min==2)or(min==3)):
print("Type is :"+str(min))
else:
print("No type")
|
soma=0
x=int(input())
y=int(input())
if x < y:
for i in range (x,y+1):
if i % 13 != 0:
soma += i
else:
for i in range (y,x+1):
if i % 13 != 0:
soma += i
print(soma)
|
for i in range(1,10,2):
print("I="+str(i),"J=7")
print("I="+str(i),"J=6")
print("I="+str(i),"J=5")
|
x=int(input())
y=int(input())
if x < y:
for i in range (x+1,y):
if i % 5 == 2 or i % 5 == 3:
print(i)
else:
for i in range (y+1,x):
if i % 5 == 2 or i % 5 == 3:
print(i)
|
novoGrenal=1
somaGrenais=0
somaInter=0
somaGremio=0
empate=0
while novoGrenal == 1:
Inter,Gremio=map(int,input().split(" "))
if Inter > Gremio:
somaInter += 1
elif Gremio > Inter:
somaGremio += 1
elif Inter == Gremio:
empate += 1
somaGrenais += 1
print("Novo grenal (1-sim 2-nao)")
novoGrenal=int(input())
if novoGrenal == 2:
break
print(somaGrenais,"grenais")
print("Inter:"+(str(somaInter)))
print("Gremio:"+(str(somaGremio)))
print("Empates:"+(str(empate)))
if somaInter > somaGremio:
print("Inter venceu mais")
elif somaGremio > somaInter:
print("Gremio venceu mais")
else:
print("Nao houve vencedor")
|
n=int(input())
line=list(map(int,input().split()))
menorValor=0
posicao=0
for i in range(n):
if int(line[i]) < menorValor or menorValor == 0:
menorValor = int(line[i])
posicao = i
print("Menor valor:",menorValor)
print("Posicao:",posicao)
|
print('Type "qqq" for exit')
while True:
try:
def funk():
if sign == "+":
return a + b
elif sign == "-":
return a - b
elif sign == "*":
return a * b
elif sign == "/":
return a / b
sign = input("Chose operation:(+, -, *, /)")
if sign == "qqq":
break
if sign not in ("+", "-", "*", "/"):
print("Дурак?")
continue
a = float(input("a ="))
b = float(input("b ="))
print(funk())
except ZeroDivisionError as ex:
print("На ноль делить нельзя")
except ValueError as e:
print("Нужно ввести число")
|
def sustituye_it(lista_pal, pal_busc, pal_reemp):
""" lista, string, string -> lista
OBJ: Recibe una lista de palabras, busca todas las apariciones de la palabra a buscar(pal_bus) y la sustituye por la palabra a reemplazar (pal_reemp).
Devuelve la lista de palabras con las sustituciones realizadas."""
pal_reemp = "Coche"
lista_pal = ["Avión", "Tren", "Coche", "Cohete", "Bicicleta", "Coche"]
pal_busc = ""
for p in lista_pal:
if p == pal_reemp:
pal_busc += " " + 'Moto'
else:
pal_busc += " " + p
print(pal_busc)
|
# Realice un subprograma que determine si un numero esta dentro de sus limites naturales, los cuales se pasan como argumento al subprograma.
def numero_limite(num, lim_inf, lim_sup):
"""Parameters: int, int, int -> boolean
OBJ: Comprueba si un num esta comprendido entre dos limites
PRE: lim_inf sera menor que lim_sup"""
return (num >= lim_inf) and (num <= lim_sup)
if (numero_limite(0, 15, 50)):
print("Dentro limites")
else:
print("Fuera limites")
|
clientes = {}
opcion = ''
while opcion != '6':
if opcion == '1':
dni = input('Introduce DNI del cliente: ')
nombre = input('Introduce el nombre del cliente: ')
direccion = input('Introduce la dirección del cliente: ')
fecha = input('Introduce la fecha de nacimiento del cliente: ')
tipo = input('Introduce el tipo de sus ultimas 5 compras: ')
media = input('Introduce la cantidad media mensual del gasto: ')
cliente = {'nombre': nombre, 'dirección': direccion,
'fecha': fecha, 'tipo': tipo, 'media': media}
clientes[dni] = cliente
if opcion == '2':
dni = input('Introduce DNI del cliente: ')
if dni in clientes:
del clientes[dni]
else:
print('No existe el cliente con el dni', dni)
if opcion == '3':
dni = input('Introduce DNI del cliente: ')
if dni in clientes:
print('DNI:', dni)
for key, value in clientes[dni].items():
print(key.title() + ':', value)
else:
print('No existe el cliente con el dni', dni)
if opcion == '4':
print('Lista de clientes')
for key, value in clientes.items():
print(key, value['nombre'])
if opcion == '5':
print('Ordenados')
for key, value in clientes.items():
if value['media']:
print(key, value['nombre'])
opcion = input('Menú de opciones\n(1) Añadir cliente\n(2) Eliminar cliente\n(3) Mostrar cliente\n(4) Listar clientes\n(5) Listar clientes ordenados\n(6) Terminar\nElige una opción:')
|
#!/usr/bin/env python3
"""Module to store implementation of the normal distribution"""
π = 3.1415926536
e = 2.7182818285
def erf(x, n=4):
"""Compute the Maclaurin approximation of erf up the fifth term"""
return 2/π**0.5*sum([x, -1/3*x**3, 1/10*x**5, -1/42*x**7, 1/216*x**9])
class Normal:
"""Class to represent the normal distribution"""
def __init__(self, data=None, mean=0., stddev=1.):
"""Initialize normal distribution using data or with mean and stddev
if data is missing"""
self.mean = float(mean)
self.stddev = float(stddev)
if data is None and self.stddev <= 0:
raise ValueError('stddev must be a positive value')
if data is not None:
if type(data) != list:
raise TypeError('data must be a list')
if len(data) < 2:
raise ValueError('data must contain multiple values')
self.mean = sum(data)/len(data)
self.stddev = (sum((self.mean - x)**2 for x in data)/len(data))**.5
def z_score(self, x):
"""Calculate the z-score of x"""
return (x - self.mean)/self.stddev
def x_value(self, z):
"""Calculate the value that x would take with a z-score of z"""
return z*self.stddev + self.mean
def pdf(self, x):
"""Calculate the pdf at x"""
return 1/(self.stddev * (2*π)**0.5) * e**(-1/2 * self.z_score(x)**2)
def cdf(self, x):
"""Calculate the cdf at x"""
return 0.5*(1 + erf(self.z_score(x)/2**0.5))
|
#!/usr/bin/env python3
"""Demonstrate use of element-wise operations on numpy arrays"""
def np_elementwise(mat1, mat2):
"""Peform element-wise addition, subtraction,
multiplication, and division on mat1 and mat2"""
return mat1+mat2, mat1-mat2, mat1*mat2, mat1/mat2
|
import numpy as np
import matplotlib.pyplot as plt
rng = np.random.RandomState(37)
x = rng.rand(200)
y = 3*x**7 + x + rng.randn(200)
y3 = 3*x**7 + x
plt.scatter(x, y)
plt.scatter(x, y3)
def F(x, y, a, c):
e = 0.
for i in range(len(y)):
e = e + (y[i] - ((-a*x**7)[i] + c*x[i]))**2
return e/(2*(len(y)))
F(x, y, 3, 1)
def derivative(x, y, a, c):
f = y - (-a*x**7 + c*x)
dfa = f*(-x**7)
dfc = f*(x)
return np.sum(dfa)/len(y), np.sum(dfc)/len(y)
print(derivative(x, y, 3, 1))
def gradient_descent(x, y, a, c):
for i in range(100000):
df = derivative(x, y, a, c)
a -= 0.00001*(abs(df[0]))
c -= 0.00001*df[1]
return a, c
solution = gradient_descent(x, y, 3.5, 1.4)
y2 = solution[0]*(x**7) + solution[1]*x
plt.scatter(x, y2)
|
import math
def function(x):
return math.exp(-x) - x
def derivative(x):
return -(math.exp(-x)) - 1
def roots_nr(guess, error):
ec = 100
while ec > error:
xn = guess - (function(guess)/derivative(guess))
ec = (guess - xn)/xn * 100
guess = xn
return xn
roots_nr(1, 0)
|
import socket # We need our socket functions
# This is the client function, used for sending the mbr info to the server
class Client:
# Declaring the host and port variables
host = '127.0.0.1'
port = 5000
def __init__(self):
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Creating our socket
except socket.error as err:
print('Socket failed with error: ' + str(err))
return 1
# Method for sending chunk of data to host
def send_data(self, data):
try:
self.sock.connect((Client.host, Client.port)) # Create locatlhost:8080 connection
self.sock.send(data) # Send our data
except socket.error as err:
print('Error sending sector data with error: ' + str(err))
return 1
# The main function
def main():
mbr = bytearray() # Declare the MBR variable, open and read the MBR into the variable
try:
binary = open('./block.dd', 'rb')
except IOError as err:
print('File not found: ' + str(err))
return 1
mbr = binary.read(512)
# Collect our 15 byte sector from the MBR
sector = bytearray(mbr[0x1BE:0x1CD])
# Initialize the Client object and send the sector collected
client = Client()
print("[+] Sending sector data")
client.send_data(sector)
print("[+] Data sent successfully")
# Once the data is sent, close the socket connection
client.sock.close()
if __name__ == '__main__':
main()
|
tempc = input('請輸入攝氏C,我來幫你轉成華氏F唷')
tempc = float(tempc)
tempf = tempc * 9 / 5 + 32
tempf = float(tempf)
print('轉成華氏後是',tempf,'度唷!')
|
def answers(numbers):
# Create an empty list, to put the pirates as they're met.
pirates = []
# Start at pirate 0.
i = 0
# Keep going until the cycle is found.
while True:
# Check the current pirate matches any existing pirates.
# As they're added sequentially, any previous pirate means that
# there's now a cycle. The length of the cycle is the length of
# the list, minus the index of the pirate that was just found.
if i in pirates:
return len(pirates) - pirates.index(i)
# Otherwise, append the current pirate.
pirates.append(i)
# Move to the next pirate.
i = numbers[i]
|
import math
def even(x):
c = math.ceil(x)
f = math.floor(x)
if c%2 == 0:
r = c
else:
r = f
return r
|
import datetime
def fact():
'''Get the current day name'''
try:
now = datetime.datetime.now()
current_hour = now.strftime("%H")
except (IOError, OSError):
current_hour = ""
return {'current_hour': current_hour}
if __name__ == '__main__':
print(fact())
|
import datetime
def fact():
'''Get the current day name'''
try:
now = datetime.datetime.now()
dayname = now.strftime("%A")
except (IOError, OSError):
dayname = ""
return {'current_day_name': dayname}
if __name__ == '__main__':
print(fact())
|
a = [6,4,1]
swap = 0
swap1 = 1
while swap1 != 0:
swap1 = 0
for i in range (1,len(a),1):
if a[i-1] > a[i]:
temp = a[i]
a[i] = a[i-1]
a[i-1] = temp
swap1 += 1
print(swap1 ,a)
swap += 1
print(f"Array is sorted in {swap} swaps")
print("First Element:",a[0])
print("Last Element:",a[-1])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.