code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
if isinstance(el, NavigableString): return type(el)(el) copy = Tag(None, el.builder, el.name, el.namespace, el.nsprefix) # work around bug where there is no builder set # https://bugs.launchpad.net/beautifulsoup/+bug/1307471 copy.attrs = dict(el.attrs) for attr in ('can_be_empty_element', 'hidden'): setattr(copy, attr, getattr(el, attr)) for child in el.contents: copy.append(clone_bs4_elem(child)) return copy
def clone_bs4_elem(el)
Clone a bs4 tag before modifying it. Code from `http://stackoverflow.com/questions/23057631/clone-element-with -beautifulsoup`
3.585169
3.543571
1.011739
self._session.get(Zhihu_URL) r = self._session.get(self._get_captcha_url()) return r.content
def get_captcha(self)
获取验证码数据。 :return: 验证码图片数据。 :rtype: bytes
6.398108
6.285144
1.017973
data = {'email': email, 'password': password, 'remember_me': 'true'} if captcha is not None: data['captcha'] = captcha r = self._session.post(Login_URL, data=data) j = r.json() code = int(j['r']) message = j['msg'] cookies_str = json.dumps(self._session.cookies.get_dict()) \ if code == 0 else '' return code, message, cookies_str
def login(self, email, password, captcha=None)
登陆知乎. :param str email: 邮箱 :param str password: 密码 :param str captcha: 验证码, 默认为None,表示不提交验证码 :return: ======== ======== ============== ==================== 元素序号 元素类型 意义 说明 ======== ======== ============== ==================== 0 int 是否成功 0为成功,1为失败 1 str 失败原因 登录成功则为空字符串 2 str cookies字符串 登录失败则为空字符串 ======== ======== ============== ==================== :rtype: (int, str, str)
2.73551
2.49153
1.097924
if os.path.isfile(cookies): with open(cookies) as f: cookies = f.read() cookies_dict = json.loads(cookies) self._session.cookies.update(cookies_dict)
def login_with_cookies(self, cookies)
使用cookies文件或字符串登录知乎 :param str cookies: ============== =========================== 参数形式 作用 ============== =========================== 文件名 将文件内容作为cookies字符串 cookies 字符串 直接提供cookies字符串 ============== =========================== :return: 无 :rtype: None
2.633849
2.432363
1.082836
print('====== zhihu login =====') email = input('email: ') if use_getpass: password = getpass.getpass('password: ') else: password = input("password: ") if need_captcha: captcha_data = self.get_captcha() with open('captcha.gif', 'wb') as f: f.write(captcha_data) print('please check captcha.gif for captcha') captcha = input('captcha: ') os.remove('captcha.gif') else: captcha = None print('====== logging.... =====') code, msg, cookies = self.login(email, password, captcha) if code == 0: print('login successfully') else: print('login failed, reason: {0}'.format(msg)) return cookies
def login_in_terminal(self, need_captcha=False, use_getpass=True)
不使用cookies,在终端中根据提示登陆知乎 :param bool need_captcha: 是否要求输入验证码,如果登录失败请设为 True :param bool use_getpass: 是否使用安全模式输入密码,默认为 True, 如果在某些 Windows IDE 中无法正常输入密码,请把此参数设置为 False 试试 :return: 如果成功返回cookies字符串 :rtype: str
2.578979
2.606441
0.989464
cookies_str = self.login_in_terminal(need_captcha, use_getpass) if cookies_str: with open(file, 'w') as f: f.write(cookies_str) print('cookies file created.') else: print('can\'t create cookies.')
def create_cookies(self, file, need_captcha=False, use_getpass=True)
在终端中执行登录流程,将 cookies 存放在文件中以便后续使用 :param str file: 文件名 :param bool need_captcha: 登录过程中是否使用验证码, 默认为 False :param bool use_getpass: 是否使用安全模式输入密码,默认为 True, 如果在某些 Windows IDE 中无法正常输入密码,请把此参数设置为 False 试试 :return:
2.943454
3.336029
0.882323
from random import choice if https: self.proxies = [{'http': p, 'https': p} for p in proxies] else: self.proxies = [{'http': p} for p in proxies] def get_with_random_proxy(url, **kwargs): proxy = choice(self.proxies) kwargs['proxies'] = proxy if auth: kwargs['auth'] = auth return self._session.original_get(url, **kwargs) def post_with_random_proxy(url, *args, **kwargs): proxy = choice(self.proxies) kwargs['proxies'] = proxy if auth: kwargs['auth'] = auth return self._session.original_post(url, *args, **kwargs) self._session.original_get = self._session.get self._session.get = get_with_random_proxy self._session.original_post = self._session.post self._session.post = post_with_random_proxy
def set_proxy_pool(self, proxies, auth=None, https=True)
设置代理池 :param proxies: proxy列表, 形如 ``["ip1:port1", "ip2:port2"]`` :param auth: 如果代理需要验证身份, 通过这个参数提供, 比如 :param https: 默认为 True, 传入 False 则不设置 https 代理 .. code-block:: python from requests.auth import HTTPProxyAuth auth = HTTPProxyAuth('laike9m', '123') :说明: 每次 GET/POST 请求会随机选择列表中的代理
1.797325
1.935954
0.928393
self.proxies = None self._session.get = self._session.original_get self._session.post = self._session.original_post del self._session.original_get del self._session.original_post
def remove_proxy_pool(self)
移除代理池
3.279792
3.082333
1.064061
from .me import Me headers = dict(Default_Header) headers['Host'] = 'zhuanlan.zhihu.com' res = self._session.get(Get_Me_Info_Url, headers=headers) json_data = res.json() url = json_data['profileUrl'] name = json_data['name'] motto = json_data['bio'] photo = json_data['avatar']['template'].format( id=json_data['avatar']['id'], size='r') return Me(url, name, motto, photo, session=self._session)
def me(self)
获取使用特定 cookies 的 Me 实例 :return: cookies对应的Me对象 :rtype: Me
3.873699
3.742579
1.035035
self._make_soup() parent_topic_tag = self.soup.find('div', class_='parent-topic') if parent_topic_tag is None: yield [] else: for topic_tag in parent_topic_tag.find_all('a'): yield Topic(Zhihu_URL + topic_tag['href'], topic_tag.text.strip(), session=self._session)
def parents(self)
获取此话题的父话题。 注意:由于没找到有很多父话题的话题来测试, 所以本方法可能再某些时候出现问题,请不吝反馈。 :return: 此话题的父话题,返回生成器 :rtype: Topic.Iterable
3.78043
3.249895
1.163247
self._make_soup() child_topic_tag = self.soup.find('div', class_='child-topic') if child_topic_tag is None: return [] elif '共有' not in child_topic_tag.contents[-2].text: for topic_tag in child_topic_tag.div.find_all('a'): yield Topic(Zhihu_URL + topic_tag['href'], topic_tag.text.strip(), session=self._session) else: flag = 'load' child = '' data = {'_xsrf': self.xsrf} params = { 'parent': self.id } while flag == 'load': params['child'] = child res = self._session.post(Topic_Get_Children_Url, params=params, data=data) j = map(lambda x: x[0], res.json()['msg'][1]) *topics, last = j for topic in topics: yield Topic(Zhihu_URL + '/topic/' + topic[2], topic[1], session=self._session) flag = last[0] child = last[2] if flag == 'topic': yield Topic(Zhihu_URL + '/topic/' + last[2], last[1], session=self._session)
def children(self)
获取此话题的子话题 :return: 此话题的子话题, 返回生成器 :rtype: Topic.Iterable
3.438081
3.311765
1.038142
from .author import Author, ANONYMOUS self._make_soup() gotten_data_num = 20 data = { '_xsrf': self.xsrf, 'start': '', 'offset': 0 } while gotten_data_num == 20: res = self._session.post( Topic_Get_More_Follower_Url.format(self.id), data=data) j = res.json()['msg'] gotten_data_num = j[0] data['offset'] += gotten_data_num soup = BeautifulSoup(j[1]) divs = soup.find_all('div', class_='zm-person-item') for div in divs: h2 = div.h2 url = Zhihu_URL + h2.a['href'] name = h2.a.text motto = h2.parent.div.text.strip() try: yield Author(url, name, motto, session=self._session) except ValueError: # invalid url yield ANONYMOUS data['start'] = int(re_get_number.match(divs[-1]['id']).group(1))
def followers(self)
获取话题关注者 :return: 话题关注者,返回生成器 :rtype: Author.Iterable
4.226947
4.118792
1.026259
img = self.soup.find('a', id='zh-avartar-edit-form').img['src'] return img.replace('_m', '_r')
def photo_url(self)
获取话题头像图片地址. :return: 话题头像url :rtype: str
13.296858
14.145775
0.939988
from .author import Author, ANONYMOUS self._make_soup() t = self.soup.find('div', id='zh-topic-top-answerer') if t is None: return for d in t.find_all('div', class_='zm-topic-side-person-item-content'): url = Zhihu_URL + d.a['href'] name = d.a.text motto = d.find('span', class_='bio')['title'] try: yield Author(url, name, motto, session=self._session) except ValueError: # invalid url yield ANONYMOUS
def top_authors(self)
获取最佳回答者 :return: 此话题下最佳回答者,一般来说是5个,要不就没有,返回生成器 :rtype: Author.Iterable
5.251022
4.71797
1.112983
from .question import Question from .answer import Answer from .author import Author, ANONYMOUS top_answers_url = Topic_Top_Answers_Url.format(self.id) params = {'page': 1} while True: # 超出50页直接返回 if params['page'] > 50: return res = self._session.get(top_answers_url, params=params) params['page'] += 1 soup = BeautifulSoup(res.content) # 不够50页,来到错误页面 返回 if soup.find('div', class_='error') is not None: return questions = soup.find_all('a', class_='question_link') answers = soup.find_all('a', class_='answer-date-link') authors = soup.find_all('div', class_='zm-item-answer-author-info') upvotes = soup.find_all('a', class_='zm-item-vote-count') for ans, up, q, au in zip(answers, upvotes, questions, authors): answer_url = Zhihu_URL + ans['href'] question_url = Zhihu_URL + q['href'] question_title = q.text.strip() upvote = up.text if upvote.isdigit(): upvote = int(upvote) else: upvote = None question = Question(question_url, question_title, session=self._session) if au.a is None: author = ANONYMOUS else: author_url = Zhihu_URL + au.a['href'] author_name = au.a.text author_motto = au.strong['title'] if au.strong else '' author = Author(author_url, author_name, author_motto, session=self._session) yield Answer(answer_url, question, author, upvote, session=self._session)
def top_answers(self)
获取话题下的精华答案. :return: 话题下的精华答案,返回生成器. :rtype: Answer.Iterable
2.611468
2.510453
1.040238
from .question import Question question_url = Topic_Questions_Url.format(self.id) params = {'page': 1} older_time_stamp = int(time.time()) * 1000 while True: res = self._session.get(question_url, params=params) soup = BeautifulSoup(res.content) if soup.find('div', class_='error') is not None: return questions = soup.find_all('div', class_='question-item') questions = list(filter( lambda x: int(x.h2.span['data-timestamp']) < older_time_stamp, questions)) for qu_div in questions: url = Zhihu_URL + qu_div.h2.a['href'] title = qu_div.h2.a.text.strip() creation_time = datetime.fromtimestamp( int(qu_div.h2.span['data-timestamp']) // 1000) yield Question(url, title, creation_time=creation_time, session=self._session) older_time_stamp = int(questions[-1].h2.span['data-timestamp']) params['page'] += 1
def questions(self)
获取话题下的所有问题(按时间降序排列) :return: 话题下所有问题,返回生成器 :rtype: Question.Iterable
2.757412
2.614891
1.054504
from .question import Question question_url = Topic_Unanswered_Question_Url.format(self.id) params = {'page': 1} while True: res = self._session.get(question_url, params=params) soup = BeautifulSoup(res.content) if soup.find('div', class_='error') is not None: return questions = soup.find_all('div', class_='question-item') for qu_div in questions: url = Zhihu_URL + qu_div.h2.a['href'] title = qu_div.h2.a.text.strip() yield Question(url, title, session=self._session) params['page'] += 1
def unanswered_questions(self)
获取话题下的等待回答的问题 什么是「等待回答」的问题:https://www.zhihu.com/question/40470324 :return: 话题下等待回答的问题,返回生成器 :rtype: Question.Iterable
3.041194
2.788214
1.090732
from .question import Question from .answer import Answer from .author import Author, ANONYMOUS newest_url = Topic_Newest_Url.format(self.id) params = {'start': 0, '_xsrf': self.xsrf} res = self._session.get(newest_url) soup = BeautifulSoup(res.content) while True: divs = soup.find_all('div', class_='folding') # 如果话题下无答案,则直接返回 if len(divs) == 0: return last_score = divs[-1]['data-score'] for div in divs: q = div.find('a', class_="question_link") question_url = Zhihu_URL + q['href'] question_title = q.text.strip() question = Question(question_url, question_title, session=self._session) ans = div.find('a', class_='answer-date-link') answer_url = Zhihu_URL + ans['href'] upvote = div.find('a', class_='zm-item-vote-count').text if upvote.isdigit(): upvote = int(upvote) else: upvote = None au = div.find('div', class_='zm-item-answer-author-info') if au.a is None: author = ANONYMOUS else: author_url = Zhihu_URL + au.a['href'] author_name = au.a.text author_motto = au.strong['title'] if au.strong else '' author = Author(author_url, author_name, author_motto, session=self._session) yield Answer(answer_url, question, author, upvote, session=self._session) params['offset'] = last_score res = self._session.post(newest_url, data=params) gotten_feed_num = res.json()['msg'][0] # 如果得到内容数量为0则返回 if gotten_feed_num == 0: return soup = BeautifulSoup(res.json()['msg'][1])
def answers(self)
获取话题下所有答案(按时间降序排列) :return: 话题下所有答案,返回生成器 :rtype: Answer.Iterable
3.050071
2.851848
1.069507
from .question import Question hot_questions_url = Topic_Hot_Questions_Url.format(self.id) params = {'start': 0, '_xsrf': self.xsrf} res = self._session.get(hot_questions_url) soup = BeautifulSoup(res.content) while True: questions_duplicate = soup.find_all('a', class_='question_link') # 如果话题下无问题,则直接返回 if len(questions_duplicate) == 0: return # 去除重复的问题 questions = list(set(questions_duplicate)) questions.sort(key=self._get_score, reverse=True) last_score = soup.find_all( 'div', class_='feed-item')[-1]['data-score'] for q in questions: question_url = Zhihu_URL + q['href'] question_title = q.text.strip() question = Question(question_url, question_title, session=self._session) yield question params['offset'] = last_score res = self._session.post(hot_questions_url, data=params) gotten_feed_num = res.json()['msg'][0] # 如果得到问题数量为0则返回 if gotten_feed_num == 0: return soup = BeautifulSoup(res.json()['msg'][1])
def hot_questions(self)
获取话题下热门的问题 :return: 话题下的热门动态中的问题,按热门度顺序返回生成器 :rtype: Question.Iterable
3.664018
3.478378
1.05337
from .question import Question from .author import Author from .answer import Answer hot_questions_url = Topic_Hot_Questions_Url.format(self.id) params = {'start': 0, '_xsrf': self.xsrf} res = self._session.get(hot_questions_url) soup = BeautifulSoup(res.content) while True: answers_div = soup.find_all('div', class_='feed-item') last_score = answers_div[-1]['data-score'] for div in answers_div: # 没有 text area 的情况是:答案被和谐。 if not div.textarea: continue question_url = Zhihu_URL + div.h2.a['href'] question_title = div.h2.a.text.strip() question = Question(question_url, question_title, session=self._session) author_link = div.find('a', class_='author-link') if not author_link: author_url = None author_name = '匿名用户' author_motto = '' else: author_url = Zhihu_URL + author_link['href'] author_name = author_link.text author_motto_span = div.find('span', class_='bio') author_motto = author_motto_span['title'] \ if author_motto_span else '' author = Author(author_url, author_name, author_motto, session=self._session) body = div.find('div', class_='zm-item-rich-text') answer_url = Zhihu_URL + body['data-entry-url'] upvote_num = int(div.find( 'div', class_='zm-item-vote-info')['data-votecount']) yield Answer(answer_url, question, author, upvote_num, session=self._session) params['offset'] = last_score res = self._session.post(hot_questions_url, data=params) gotten_feed_num = res.json()['msg'][0] # 如果得到问题数量为0则返回 if gotten_feed_num == 0: return soup = BeautifulSoup(res.json()['msg'][1])
def hot_answers(self)
获取话题下热门的回答 :return: 话题下的热门动态中的回答,按热门度顺序返回生成器 :rtype: Question.Iterable
2.964578
2.892556
1.024899
return re.match(r'^.*/([^/]+)/$', self.url).group(1) \ if self.url is not None else ''
def id(self)
获取用户id,就是网址最后那一部分. :return: 用户id :rtype: str
6.509048
6.439113
1.010861
div = self.soup.find('div', class_='zm-profile-header-op-btns') if div is not None: return div.button['data-id'] else: ga = self.soup.find('script', attrs={'data-name': 'ga_vars'}) return json.loads(ga.text)['user_hash']
def hash_id(self)
获取作者的内部hash id(用不到就忽视吧~) :return: 用户hash id :rtype: str
5.628383
4.881176
1.153079
if self.url is None: return '匿名用户' if self.soup is not None: return self.soup.find('div', class_='title-section').span.text else: assert self.card is not None return self.card.find('span', class_='name').text
def name(self)
获取用户名字. :return: 用户名字 :rtype: str
4.487101
4.102652
1.093708
if self.url is None: return '' else: if self.soup is not None: bar = self.soup.find( 'div', class_='title-section') if len(bar.contents) < 4: return '' else: return bar.contents[3].text else: assert self.card is not None motto = self.card.find('div', class_='tagline') return motto.text if motto is not None else ''
def motto(self)
获取用户自我介绍,由于历史原因,我还是把这个属性叫做motto吧. :return: 用户自我介绍 :rtype: str
3.457884
3.44955
1.002416
if self.url is not None: if self.soup is not None: img = self.soup.find('img', class_='Avatar Avatar--l')['src'] return img.replace('_l', '_r') else: assert (self.card is not None) return PROTOCOL + self.card.img['src'].replace('_xs', '_r') else: return 'http://pic1.zhimg.com/da8e974dc_r.jpg'
def photo_url(self)
获取用户头像图片地址. :return: 用户头像url :rtype: str
6.954608
6.543668
1.0628
if self.url is None: return 0 else: number = int(self.soup.find( 'div', class_='zm-profile-side-following').a.strong.text) return number
def followee_num(self)
获取关注了多少人. :return: 关注的人数 :rtype: int
5.83357
5.037823
1.157955
if self.url is None: return 0 else: number = int(self.soup.find( 'div', class_='zm-profile-side-following zg-clear').find_all( 'a')[1].strong.text) return number
def follower_num(self)
获取追随者数量,就是关注此人的人数. :return: 追随者数量 :rtype: int
5.478661
4.761305
1.150664
if self.url is None: return 0 else: number = int(self.soup.find( 'span', class_='zm-profile-header-user-agree').strong.text) return number
def upvote_num(self)
获取收到的的赞同数量. :return: 收到的的赞同数量 :rtype: int
6.477684
5.545155
1.16817
if self.url is None: return None else: tmp = self.soup.find( 'a', class_='zm-profile-header-user-weibo') return tmp['href'] if tmp is not None else 'unknown'
def weibo_url(self)
获取用户微博链接. :return: 微博链接地址,如没有则返回 ‘unknown’ :rtype: str
5.379034
4.318594
1.245552
if self.url is not None: tag = self.soup.find('div', class_='zm-profile-side-topics') if tag is not None: return int(re_get_number.match( tag.parent.strong.text).group(1)) return 0
def followed_topic_num(self)
获取用户关注的话题数 :return: 关注的话题数 :rtype: int
5.43146
5.259185
1.032757
from .question import Question if self.url is None or self.question_num == 0: return for page_index in range(1, (self.question_num - 1) // 20 + 2): html = self._session.get( self.url + 'asks?page=' + str(page_index)).text soup = BeautifulSoup(html) question_links = soup.find_all('a', class_='question_link') question_datas = soup.find_all( 'div', class_='zm-profile-section-main') for link, data in zip(question_links, question_datas): url = Zhihu_URL + link['href'] title = link.text.strip() answer_num = int( re_get_number.match(data.div.contents[4]).group(1)) follower_num = int( re_get_number.match(data.div.contents[6]).group(1)) q = Question(url, title, follower_num, answer_num, session=self._session) yield q
def questions(self)
获取用户的所有问题. :return: 用户的所有问题,返回生成器. :rtype: Question.Iterable
2.860006
2.693595
1.06178
from .question import Question from .answer import Answer if self.url is None or self.answer_num == 0: return for page_index in range(1, (self.answer_num - 1) // 20 + 2): html = self._session.get( self.url + 'answers?page=' + str(page_index)).text soup = BeautifulSoup(html) questions = soup.find_all('a', class_='question_link') upvotes = soup.find_all('a', class_='zm-item-vote-count') for q, upvote in zip(questions, upvotes): answer_url = Zhihu_URL + q['href'] question_url = Zhihu_URL + re_a2q.match(q['href']).group(1) question_title = q.text upvote_num = upvote.text if upvote_num.isdigit(): upvote_num = int(upvote_num) else: upvote_num = None question = Question(question_url, question_title, session=self._session) yield Answer(answer_url, question, self, upvote_num, session=self._session)
def answers(self)
获取用户的所有答案. :return: 用户所有答案,返回生成器. :rtype: Answer.Iterable
2.573105
2.489663
1.033515
from .collection import Collection if self.url is None or self.collection_num == 0: return else: collection_num = self.collection_num for page_index in range(1, (collection_num - 1) // 20 + 2): html = self._session.get( self.url + 'collections?page=' + str(page_index)).text soup = BeautifulSoup(html) collections_names = soup.find_all( 'a', class_='zm-profile-fav-item-title') collection_follower_nums = soup.find_all( 'div', class_='zm-profile-fav-bio') for c, f in zip(collections_names, collection_follower_nums): c_url = Zhihu_URL + c['href'] c_name = c.text c_fn = int(re_get_number.match(f.contents[2]).group(1)) yield Collection(c_url, self, c_name, c_fn, session=self._session)
def collections(self)
获取用户收藏夹. :return: 用户收藏夹,返回生成器 :rtype: Collection.Iterable
3.40947
3.213354
1.061032
from .column import Column if self.url is None or self.post_num == 0: return soup = BeautifulSoup(self._session.get(self.url + 'posts').text) column_list = soup.find('div', class_='column-list') column_tags = column_list.find_all('div', class_='item') for column_tag in column_tags: name = column_tag['title'] url = column_tag['data-href'] numbers = column_tag.find('span', class_='des').text.split('•') follower_num = int(re_get_number.match(numbers[0]).group(1)) if len(numbers) == 1: post_num = 0 else: post_num = int( re_get_number.match(numbers[1]).group(1)) yield Column(url, name, follower_num, post_num, session=self._session)
def columns(self)
获取用户专栏. :return: 用户专栏,返回生成器 :rtype: Column.Iterable
3.091011
2.813909
1.098476
from .column import Column if self.url is None: return if self.followed_column_num > 0: tag = self.soup.find('div', class_='zm-profile-side-columns') if tag is not None: for a in tag.find_all('a'): yield Column(a['href'], a.img['alt'], session=self._session) if self.followed_column_num > 7: offset = 7 gotten_data_num = 20 while gotten_data_num == 20: params = { 'hash_id': self.hash_id, 'limit': 20, 'offset': offset } data = { 'method': 'next', '_xsrf': self.xsrf, 'params': json.dumps(params) } j = self._session.post(Author_Get_More_Follow_Column_URL, data=data).json() gotten_data_num = len(j['msg']) offset += gotten_data_num for msg in map(BeautifulSoup, j['msg']): name = msg.strong.text url = msg.a['href'] post_num = int(re_get_number.match( msg.span.text).group(1)) yield Column(url, name, post_num=post_num, session=self._session)
def followed_columns(self)
获取用户关注的专栏. :return: 用户关注的专栏,返回生成器 :rtype: Column.Iterable
3.51045
3.323222
1.05634
from .topic import Topic if self.url is None: return if self.followed_topic_num > 0: tag = self.soup.find('div', class_='zm-profile-side-topics') if tag is not None: for a in tag.find_all('a'): yield Topic(Zhihu_URL + a['href'], a.img['alt'], session=self._session) if self.followed_topic_num > 7: offset = 7 gotten_data_num = 20 while gotten_data_num == 20: data = {'start': 0, 'offset': offset, '_xsrf': self.xsrf} j = self._session.post( Author_Get_More_Follow_Topic_URL.format(self.id), data=data).json() gotten_data_num = j['msg'][0] offset += gotten_data_num topic_item = BeautifulSoup(j['msg'][1]).find_all( 'div', class_='zm-profile-section-item') for div in topic_item: name = div.strong.text url = Zhihu_URL + div.a['href'] yield Topic(url, name, session=self._session)
def followed_topics(self)
获取用户关注的话题. :return: 用户关注的话题,返回生成器 :rtype: Topic.Iterable
3.48986
3.383555
1.031418
from .activity import Activity if self.url is None: return gotten_feed_num = 20 start = '0' api_url = self.url + 'activities' while gotten_feed_num == 20: data = {'_xsrf': self.xsrf, 'start': start} res = self._session.post(api_url, data=data) gotten_feed_num = res.json()['msg'][0] soup = BeautifulSoup(res.json()['msg'][1]) acts = soup.find_all( 'div', class_='zm-profile-section-item zm-item clearfix') start = acts[-1]['data-time'] if len(acts) > 0 else 0 for act in acts: # --- ignore Round Table temporarily --- if act.attrs['data-type-detail'] == "member_follow_roundtable": continue # --- --- --- --- -- --- --- --- --- --- yield Activity(act, self._session, self)
def activities(self)
获取用户的最近动态. :return: 最近动态,返回生成器,具体说明见 :class:`.Activity` :rtype: Activity.Iterable
5.08177
4.883243
1.040655
self._make_soup() act = self.soup.find( 'div', class_='zm-profile-section-item zm-item clearfix') return int(act['data-time']) if act is not None else -1
def last_activity_time(self)
获取用户最后一次活动的时间 :return: 用户最后一次活动的时间,返回值为 unix 时间戳 :rtype: int
5.891836
5.975665
0.985972
return self.upvote_num + self.thank_num + \ self.question_num + self.answer_num == 0
def is_zero_user(self)
返回当前用户是否为三零用户,其实是四零: 赞同0,感谢0,提问0,回答0. :return: 是否是三零用户 :rtype: bool
8.407869
5.015746
1.676295
f.mcache = {} f.mrefresh_keyword = refresh_keyword return decorator.decorator(_memoize, f)
def memoize(f, refresh_keyword='mrefresh')
Memoize decorator. The refresh keyword is the keyword used to bypass the cache (in the function call).
6.847397
10.178571
0.672727
# handle string input if type(arg) == str: arg = arg.strip() # parse csv as tickers and create children if ',' in arg: arg = arg.split(',') arg = [x.strip() for x in arg] # assume single string - create single item list else: arg = [arg] return arg
def parse_arg(arg)
Parses arguments for convenience. Argument can be a csv list ('a,b,c'), a string, a list, a tuple. Returns a list.
5.877699
5.80276
1.012914
pattern = re.compile('[\W_]+') res = pattern.sub('', ticker.split(' ')[0]) return res.lower()
def clean_ticker(ticker)
Cleans a ticker for easier use throughout MoneyTree Splits by space and only keeps first bit. Also removes any characters that are not letters. Returns as lowercase. >>> clean_ticker('^VIX') 'vix' >>> clean_ticker('SPX Index') 'spx'
4.554669
5.039972
0.903709
if val < src[0]: return dst[0] if val > src[1]: return dst[1] return ((val - src[0]) / (src[1] - src[0])) * (dst[1] - dst[0]) + dst[0]
def scale(val, src, dst)
Scale value from src range to dst range. If value outside bounds, it is clipped and set to the low or high bound of dst. Ex: scale(0, (0.0, 99.0), (-1.0, 1.0)) == -1.0 scale(-5, (0.0, 99.0), (-1.0, 1.0)) == -1.0
1.620307
1.798956
0.900693
if isinstance(item, pd.Series): return item.map(lambda x: format(x, format_str)) elif isinstance(item, pd.DataFrame): return item.applymap(lambda x: format(x, format_str))
def as_format(item, format_str='.2f')
Map a format string over a pandas object.
1.793384
1.613469
1.111508
return (returns.replace(to_replace=np.nan, value=0) + 1).cumprod() * start
def to_price_index(returns, start=100)
Returns a price index given a series of returns. Args: * returns: Expects a return series * start (number): Starting level Assumes arithmetic returns. Formula is: cumprod (1+r)
4.052061
5.911827
0.685416
if isinstance(prices, pd.Series): return PerformanceStats(prices) elif isinstance(prices, pd.DataFrame): return GroupStats(*[prices[x] for x in prices.columns]) else: raise NotImplementedError('Unsupported type')
def calc_stats(prices)
Calculates performance stats of a given object. If object is Series, a PerformanceStats object is returned. If object is DataFrame, a GroupStats object is returned. Args: * prices (Series, DataFrame): Set of prices
4.591638
3.131911
1.466082
# make a copy so that we don't modify original data drawdown = prices.copy() # Fill NaN's with previous values drawdown = drawdown.fillna(method='ffill') # Ignore problems with NaN's in the beginning drawdown[np.isnan(drawdown)] = -np.Inf # Rolling maximum roll_max = np.maximum.accumulate(drawdown) drawdown = drawdown / roll_max - 1. return drawdown
def to_drawdown_series(prices)
Calculates the `drawdown <https://www.investopedia.com/terms/d/drawdown.asp>`_ series. This returns a series representing a drawdown. When the price is at all time highs, the drawdown is 0. However, when prices are below high water marks, the drawdown series = current / hwm - 1 The max drawdown can be obtained by simply calling .min() on the result (since the drawdown series is negative) Method ignores all gaps of NaN's in the price series. Args: * prices (Series or DataFrame): Series of prices.
3.922082
4.353952
0.90081
is_zero = drawdown == 0 # find start dates (first day where dd is non-zero after a zero) start = ~is_zero & is_zero.shift(1) start = list(start[start == True].index) # NOQA # find end dates (first day where dd is 0 after non-zero) end = is_zero & (~is_zero).shift(1) end = list(end[end == True].index) # NOQA if len(start) is 0: return None # drawdown has no end (end period in dd) if len(end) is 0: end.append(drawdown.index[-1]) # if the first drawdown start is larger than the first drawdown end it # means the drawdown series begins in a drawdown and therefore we must add # the first index to the start series if start[0] > end[0]: start.insert(0, drawdown.index[0]) # if the last start is greater than the end then we must add the last index # to the end series since the drawdown series must finish with a drawdown if start[-1] > end[-1]: end.append(drawdown.index[-1]) result = pd.DataFrame( columns=('Start', 'End', 'Length', 'drawdown'), index=range(0, len(start)) ) for i in range(0, len(start)): dd = drawdown[start[i]:end[i]].min() if index_type is pd.DatetimeIndex: result.iloc[i] = (start[i], end[i], (end[i] - start[i]).days, dd) else: result.iloc[i] = (start[i], end[i], (end[i] - start[i]), dd) return result
def drawdown_details(drawdown, index_type=pd.DatetimeIndex)
Returns a data frame with start, end, days (duration) and drawdown for each drawdown in a drawdown series. .. note:: days are actual calendar days, not trading days Args: * drawdown (pandas.Series): A drawdown Series (can be obtained w/ drawdown(prices). Returns: * pandas.DataFrame -- A data frame with the following columns: start, end, days, drawdown.
2.847473
2.888379
0.985838
start = prices.index[0] end = prices.index[-1] return (prices.iloc[-1] / prices.iloc[0]) ** (1 / year_frac(start, end)) - 1
def calc_cagr(prices)
Calculates the `CAGR (compound annual growth rate) <https://www.investopedia.com/terms/c/cagr.asp>`_ for a given price series. Args: * prices (pandas.Series): A Series of prices. Returns: * float -- cagr.
3.117695
3.762838
0.828549
diff_rets = returns - benchmark_returns diff_std = np.std(diff_rets, ddof=1) if np.isnan(diff_std) or diff_std == 0: return 0.0 return np.divide(diff_rets.mean(), diff_std)
def calc_information_ratio(returns, benchmark_returns)
Calculates the `Information ratio <https://www.investopedia.com/terms/i/informationratio.asp>`_ (or `from Wikipedia <http://en.wikipedia.org/wiki/Information_ratio>`_).
2.69944
2.735784
0.986715
return t.cdf(returns.calc_information_ratio(other_returns), len(returns) - 1)
def calc_prob_mom(returns, other_returns)
`Probabilistic momentum <http://cssanalytics.wordpress.com/2014/01/28/are-simple-momentum-strategies-too-dumb-introducing-probabilistic-momentum/>`_ (see `momentum investing <https://www.investopedia.com/terms/m/momentum_investing.asp>`_) Basically the "probability or confidence that one asset is going to outperform the other". Source: http://cssanalytics.wordpress.com/2014/01/28/are-simple-momentum-strategies-too-dumb-introducing-probabilistic-momentum/ # NOQA
11.296884
16.522074
0.683745
dfs = [] for s in series: if isinstance(s, pd.DataFrame): dfs.append(s) elif isinstance(s, pd.Series): tmpdf = pd.DataFrame({s.name: s}) dfs.append(tmpdf) else: raise NotImplementedError('Unsupported merge type') return pd.concat(dfs, axis=1)
def merge(*series)
Merge Series and/or DataFrames together. Returns a DataFrame.
2.27849
2.277547
1.000414
names = set(df.columns) for n in names: if len(df[n].shape) > 1: # get subset of df w/ colname n sub = df[n] # make unique colnames sub.columns = ['%s-%s' % (n, x) for x in range(sub.shape[1])] # get colname w/ max # of data keep = sub.count().idxmax() # drop all columns of name n from original df del df[n] # update original df w/ longest col with name n df[n] = sub[keep] return df
def drop_duplicate_cols(df)
Removes duplicate columns from a dataframe and keeps column w/ longest history
4.568271
4.237298
1.078109
return series.asfreq_actual('M', method=method, how=how)
def to_monthly(series, method='ffill', how='end')
Convenience method that wraps asfreq_actual with 'M' param (method='ffill', how='end').
8.054525
3.514014
2.292116
orig = series is_series = False if isinstance(series, pd.Series): is_series = True name = series.name if series.name else 'data' orig = pd.DataFrame({name: series}) # add date column t = pd.concat([orig, pd.DataFrame({'dt': orig.index.values}, index=orig.index.values)], axis=1) # fetch dates dts = t.asfreq(freq=freq, method=method, how=how, normalize=normalize)['dt'] res = orig.loc[dts.values] if is_series: return res[name] else: return res
def asfreq_actual(series, freq, method='ffill', how='end', normalize=False)
Similar to pandas' asfreq but keeps the actual dates. For example, if last data point in Jan is on the 29th, that date will be used instead of the 31st.
3.324636
3.291755
1.009989
# calc vols vol = np.divide(1., np.std(returns, ddof=1)) vol[np.isinf(vol)] = np.NaN volsum = vol.sum() return np.divide(vol, volsum)
def calc_inv_vol_weights(returns)
Calculates weights proportional to inverse volatility of each column. Returns weights that are inversely proportional to the column's volatility resulting in a set of portfolio weights where each position has the same level of volatility. Note, that assets with returns all equal to NaN or 0 are excluded from the portfolio (their weight is set to NaN). Returns: Series {col_name: weight}
4.130804
5.667283
0.728886
def fitness(weights, exp_rets, covar, rf): # portfolio mean mean = sum(exp_rets * weights) # portfolio var var = np.dot(np.dot(weights, covar), weights) # utility - i.e. sharpe ratio util = (mean - rf) / np.sqrt(var) # negative because we want to maximize and optimizer # minimizes metric return -util n = len(returns.columns) # expected return defaults to mean return by default exp_rets = returns.mean() # calc covariance matrix if covar_method == 'ledoit-wolf': covar = sklearn.covariance.ledoit_wolf(returns)[0] elif covar_method == 'standard': covar = returns.cov() else: raise NotImplementedError('covar_method not implemented') weights = np.ones([n]) / n bounds = [weight_bounds for i in range(n)] # sum of weights must be equal to 1 constraints = ({'type': 'eq', 'fun': lambda W: sum(W) - 1.}) optimized = minimize(fitness, weights, (exp_rets, covar, rf), method='SLSQP', constraints=constraints, bounds=bounds, options=options) # check if success if not optimized.success: raise Exception(optimized.message) # return weight vector return pd.Series({returns.columns[i]: optimized.x[i] for i in range(n)})
def calc_mean_var_weights(returns, weight_bounds=(0., 1.), rf=0., covar_method='ledoit-wolf', options=None)
Calculates the mean-variance weights given a DataFrame of returns. Args: * returns (DataFrame): Returns for multiple securities. * weight_bounds ((low, high)): Weigh limits for optimization. * rf (float): `Risk-free rate <https://www.investopedia.com/terms/r/risk-freerate.asp>`_ used in utility calculation * covar_method (str): Covariance matrix estimation method. Currently supported: - `ledoit-wolf <http://www.ledoit.net/honey.pdf>`_ - standard * options (dict): options for minimizing, e.g. {'maxiter': 10000 } Returns: Series {col_name: weight}
3.043145
2.928672
1.039087
def fitness(weights, covar): # total risk contributions # trc = weights*np.matmul(covar,weights)/np.sqrt(np.matmul(weights.T,np.matmul(covar,weights))) # instead of using the true definition for trc we will use the optimization on page 5 trc = weights * np.matmul(covar, weights) n = len(trc) # sum of squared differences of total risk contributions sse = 0. for i in range(n): for j in range(n): #switched from squared deviations to absolute deviations to avoid numerical instability sse += np.abs(trc[i] - trc[j]) # minimizes metric return sse #nonnegative bounds = [(0,None) for i in range(len(x0))] # sum of weights must be equal to 1 constraints = ( { 'type': 'eq', 'fun': lambda W: sum(W) - 1. } ) options = { 'maxiter':maximum_iterations } optimized = minimize( fitness, x0, (cov), method='SLSQP', constraints=constraints, bounds=bounds, options=options, tol=tolerance ) # check if success if not optimized.success: raise Exception(optimized.message) # return weight vector return optimized.x
def _erc_weights_slsqp( x0, cov, b, maximum_iterations, tolerance )
Calculates the equal risk contribution / risk parity weights given a DataFrame of returns. Args: * x0 (np.array): Starting asset weights. * cov (np.array): covariance matrix. * b (np.array): Risk target weights. By definition target total risk contributions are all equal which makes this redundant. * maximum_iterations (int): Maximum iterations in iterative solutions. * tolerance (float): Tolerance level in iterative solutions. Returns: np.array {weight} You can read more about ERC at http://thierry-roncalli.com/download/erc.pdf
4.487516
4.181559
1.073168
n = len(x0) x = x0.copy() var = np.diagonal(cov) ctr = cov.dot(x) sigma_x = np.sqrt(x.T.dot(ctr)) for iteration in range(maximum_iterations): for i in range(n): alpha = var[i] beta = ctr[i] - x[i] * alpha gamma = -b[i] * sigma_x x_tilde = (-beta + np.sqrt( beta * beta - 4 * alpha * gamma)) / (2 * alpha) x_i = x[i] ctr = ctr - cov[i] * x_i + cov[i] * x_tilde sigma_x = sigma_x * sigma_x - 2 * x_i * cov[i].dot( x) + x_i * x_i * var[i] x[i] = x_tilde sigma_x = np.sqrt(sigma_x + 2 * x_tilde * cov[i].dot( x) - x_tilde * x_tilde * var[i]) # check convergence if np.power((x - x0) / x.sum(), 2).sum() < tolerance: return x / x.sum() x0 = x.copy() # no solution found raise ValueError('No solution found after {0} iterations.'.format( maximum_iterations))
def _erc_weights_ccd(x0, cov, b, maximum_iterations, tolerance)
Calculates the equal risk contribution / risk parity weights given a DataFrame of returns. Args: * x0 (np.array): Starting asset weights. * cov (np.array): covariance matrix. * b (np.array): Risk target weights. * maximum_iterations (int): Maximum iterations in iterative solutions. * tolerance (float): Tolerance level in iterative solutions. Returns: np.array {weight} Reference: Griveau-Billion, Theophile and Richard, Jean-Charles and Roncalli, Thierry, A Fast Algorithm for Computing High-Dimensional Risk Parity Portfolios (2013). Available at SSRN: https://ssrn.com/abstract=2325255
2.859864
2.833553
1.009286
n = len(returns.columns) # calc covariance matrix if covar_method == 'ledoit-wolf': covar = sklearn.covariance.ledoit_wolf(returns)[0] elif covar_method == 'standard': covar = returns.cov().values else: raise NotImplementedError('covar_method not implemented') # initial weights (default to inverse vol) if initial_weights is None: inv_vol = 1. / np.sqrt(np.diagonal(covar)) initial_weights = inv_vol / inv_vol.sum() # default to equal risk weight if risk_weights is None: risk_weights = np.ones(n) / n # calc risk parity weights matrix if risk_parity_method == 'ccd': # cyclical coordinate descent implementation erc_weights = _erc_weights_ccd( initial_weights, covar, risk_weights, maximum_iterations, tolerance ) elif risk_parity_method == 'slsqp': #scipys slsqp optimizer erc_weights = _erc_weights_slsqp( initial_weights, covar, risk_weights, maximum_iterations, tolerance ) else: raise NotImplementedError('risk_parity_method not implemented') # return erc weights vector return pd.Series(erc_weights, index=returns.columns, name='erc')
def calc_erc_weights(returns, initial_weights=None, risk_weights=None, covar_method='ledoit-wolf', risk_parity_method='ccd', maximum_iterations=100, tolerance=1E-8)
Calculates the equal risk contribution / risk parity weights given a DataFrame of returns. Args: * returns (DataFrame): Returns for multiple securities. * initial_weights (list): Starting asset weights [default inverse vol]. * risk_weights (list): Risk target weights [default equal weight]. * covar_method (str): Covariance matrix estimation method. Currently supported: - `ledoit-wolf <http://www.ledoit.net/honey.pdf>`_ [default] - standard * risk_parity_method (str): Risk parity estimation method. Currently supported: - ccd (cyclical coordinate descent)[default] * maximum_iterations (int): Maximum iterations in iterative solutions. * tolerance (float): Tolerance level in iterative solutions. Returns: Series {col_name: weight}
2.379554
2.266916
1.049688
x = pd.to_datetime('2010-01-01') delta = x - (x - offset) # convert to 'trading days' - rough guestimate days = delta.days * 0.69 if period == 'd': req = days * perc_required elif period == 'm': req = (days / 20) * perc_required elif period == 'y': req = (days / 252) * perc_required else: raise NotImplementedError( 'period not supported. Supported periods are d, m, y') return req
def get_num_days_required(offset, period='d', perc_required=0.90)
Estimates the number of days required to assume that data is OK. Helper function used to determine if there are enough "good" data days over a given period. Args: * offset (DateOffset): Offset (lookback) period. * period (str): Period string. * perc_required (float): percentage of number of days expected required.
3.814085
4.047316
0.942374
# calculate correlation corr = returns.corr() # calculate dissimilarity matrix diss = 1 - corr # scale down to 2 dimensions using MDS # (multi-dimensional scaling) using the # dissimilarity matrix mds = sklearn.manifold.MDS(dissimilarity='precomputed') xy = mds.fit_transform(diss) def routine(k): # fit KMeans km = sklearn.cluster.KMeans(n_clusters=k) km_fit = km.fit(xy) labels = km_fit.labels_ centers = km_fit.cluster_centers_ # get {ticker: label} mappings mappings = dict(zip(returns.columns, labels)) # print % of var explained totss = 0 withinss = 0 # column average fot totss avg = np.array([np.mean(xy[:, 0]), np.mean(xy[:, 1])]) for idx, lbl in enumerate(labels): withinss += sum((xy[idx] - centers[lbl]) ** 2) totss += sum((xy[idx] - avg) ** 2) pvar_expl = 1.0 - withinss / totss return mappings, pvar_expl, labels if n: result = routine(n) else: n = len(returns.columns) n1 = int(np.ceil(n * 0.6666666666)) for i in range(2, n1 + 1): result = routine(i) if result[1] > 0.9: break if plot: fig, ax = plt.subplots() ax.scatter(xy[:, 0], xy[:, 1], c=result[2], s=90) for i, txt in enumerate(returns.columns): ax.annotate(txt, (xy[i, 0], xy[i, 1]), size=14) # sanitize return value tmp = result[0] # map as such {cluster: [list of tickers], cluster2: [...]} inv_map = {} for k, v in iteritems(tmp): inv_map[v] = inv_map.get(v, []) inv_map[v].append(k) return inv_map
def calc_clusters(returns, n=None, plot=False)
Calculates the clusters based on k-means clustering. Args: * returns (pd.DataFrame): DataFrame of returns * n (int): Specify # of clusters. If None, this will be automatically determined * plot (bool): Show plot? Returns: * dict with structure: {cluster# : [col names]}
3.335124
3.288474
1.014186
# cluster index (name) i = 0 # correlation matrix corr = returns.corr() # remaining securities to cluster remain = list(corr.index.copy()) n = len(remain) res = {} while n > 0: # if only one left then create cluster and finish if n == 1: i += 1 res[i] = remain n = 0 # if not then we have some work to do else: # filter down correlation matrix to current remain cur_corr = corr[remain].loc[remain] # get mean correlations, ordered mc = cur_corr.mean().sort_values() # get lowest and highest mean correlation low = mc.index[0] high = mc.index[-1] # case if corr(high,low) > threshold if corr[high][low] > threshold: i += 1 # new cluster for high and low res[i] = [low, high] remain.remove(low) remain.remove(high) rmv = [] for x in remain: avg_corr = (corr[x][high] + corr[x][low]) / 2.0 if avg_corr > threshold: res[i].append(x) rmv.append(x) [remain.remove(x) for x in rmv] n = len(remain) # otherwise we are creating two clusters - one for high # and one for low else: # add cluster with HC i += 1 res[i] = [high] remain.remove(high) remain.remove(low) rmv = [] for x in remain: if corr[x][high] > threshold: res[i].append(x) rmv.append(x) [remain.remove(x) for x in rmv] i += 1 res[i] = [low] rmv = [] for x in remain: if corr[x][low] > threshold: res[i].append(x) rmv.append(x) [remain.remove(x) for x in rmv] n = len(remain) return res
def calc_ftca(returns, threshold=0.5)
Implementation of David Varadi's `Fast Threshold Clustering Algorithm (FTCA) <http://cssanalytics.wordpress.com/2013/11/26/fast-threshold-clustering-algorithm-ftca/>`_. http://cssanalytics.wordpress.com/2013/11/26/fast-threshold-clustering-algorithm-ftca/ # NOQA More stable than k-means for clustering purposes. If you want more clusters, use a higher threshold. Args: * returns - expects a pandas dataframe of returns where each column is the name of a given security. * threshold (float): Threshold parameter - use higher value for more clusters. Basically controls how similar (correlated) series have to be. Returns: dict of cluster name (a number) and list of securities in cluster
2.783742
2.707183
1.02828
if 1.0 / limit > len(weights): raise ValueError('invalid limit -> 1 / limit must be <= len(weights)') if isinstance(weights, dict): weights = pd.Series(weights) if np.round(weights.sum(), 1) != 1.0: raise ValueError('Expecting weights (that sum to 1) - sum is %s' % weights.sum()) res = np.round(weights.copy(), 4) to_rebalance = (res[res > limit] - limit).sum() ok = res[res < limit] ok += (ok / ok.sum()) * to_rebalance res[res > limit] = limit res[res < limit] = ok if any(x > limit for x in res): return limit_weights(res, limit=limit) return res
def limit_weights(weights, limit=0.1)
Limits weights and redistributes excedent amount proportionally. ex: - weights are {a: 0.7, b: 0.2, c: 0.1} - call with limit=0.5 - excess 0.2 in a is ditributed to b and c proportionally. - result is {a: 0.5, b: 0.33, c: 0.167} Args: * weights (Series): A series describing the weights * limit (float): Maximum weight allowed
4.19982
4.170558
1.007016
low = bounds[0] high = bounds[1] if high < low: raise ValueError('Higher bound must be greater or ' 'equal to lower bound') if n * high < total or n * low > total: raise ValueError('solution not possible with given n and bounds') w = [0] * n tgt = -float(total) for i in range(n): rn = n - i - 1 rhigh = rn * high rlow = rn * low lowb = max(-rhigh - tgt, low) highb = min(-rlow - tgt, high) rw = random.uniform(lowb, highb) w[i] = rw tgt += rw random.shuffle(w) return w
def random_weights(n, bounds=(0., 1.), total=1.0)
Generate pseudo-random weights. Returns a list of random weights that is of length n, where each weight is in the range bounds, and where the weights sum up to total. Useful for creating random portfolios when benchmarking. Args: * n (int): number of random weights * bounds ((low, high)): bounds for each weight * total (float): total sum of the weights
3.667985
3.935066
0.932128
fig, ax = plt.subplots(figsize=figsize) heatmap = ax.pcolor(data, vmin=vmin, vmax=vmax, cmap=cmap) # for some reason heatmap has the y values backwards.... ax.invert_yaxis() if title is not None: plt.title(title) if show_legend: fig.colorbar(heatmap) if show_labels: vals = data.values for x in range(data.shape[0]): for y in range(data.shape[1]): plt.text(x + 0.5, y + 0.5, format(vals[y, x], label_fmt), horizontalalignment='center', verticalalignment='center', color=label_color) plt.yticks(np.arange(0.5, len(data.index), 1), data.index) plt.xticks(np.arange(0.5, len(data.columns), 1), data.columns) return plt
def plot_heatmap(data, title='Heatmap', show_legend=True, show_labels=True, label_fmt='.2f', vmin=None, vmax=None, figsize=None, label_color='w', cmap='RdBu', **kwargs)
Plot a heatmap using matplotlib's pcolor. Args: * data (DataFrame): DataFrame to plot. Usually small matrix (ex. correlation matrix). * title (string): Plot title * show_legend (bool): Show color legend * show_labels (bool): Show value labels * label_fmt (str): Label format string * vmin (float): Min value for scale * vmax (float): Max value for scale * cmap (string): Color map * kwargs: Passed to matplotlib's pcolor
1.984988
2.04528
0.970521
res = data.copy() res[:] = np.nan n = len(data) if window > n: return res for i in range(window - 1, n): res.iloc[i] = fn(data.iloc[i - window + 1:i + 1]) return res
def rollapply(data, window, fn)
Apply a function fn over a rolling window of size window. Args: * data (Series or DataFrame): Series or DataFrame * window (int): Window size * fn (function): Function to apply over the rolling window. For a series, the return value is expected to be a single number. For a DataFrame, it shuold return a new row. Returns: * Object of same dimensions as data
2.481694
3.237607
0.766521
if isinstance(x, pd.Series): if x.count() == 0: return x notnanx = ~np.isnan(x) x[notnanx] = scipy.stats.mstats.winsorize(x[notnanx], limits=limits) return x else: return scipy.stats.mstats.winsorize(x, limits=limits)
def _winsorize_wrapper(x, limits)
Wraps scipy winsorize function to drop na's
2.458584
2.239484
1.097835
# operate on copy x = x.copy() if isinstance(x, pd.DataFrame): return x.apply(_winsorize_wrapper, axis=axis, args=(limits, )) else: return pd.Series(_winsorize_wrapper(x, limits).values, index=x.index)
def winsorize(x, axis=0, limits=0.01)
`Winsorize <https://en.wikipedia.org/wiki/Winsorizing>`_ values based on limits
3.216815
3.478717
0.924713
def innerfn(x, min, max): return np.interp(x, [np.min(x), np.max(x)], [min, max]) if isinstance(x, pd.DataFrame): return x.apply(innerfn, axis=axis, args=(min, max,)) else: return pd.Series(innerfn(x, min, max), index=x.index)
def rescale(x, min=0., max=1., axis=0)
Rescale values to fit a certain range [min, max]
2.46394
2.364665
1.041983
if type(rf) is float and rf != 0 and nperiods is None: raise Exception('nperiods must be set if rf != 0 and rf is not a price series') er = returns.to_excess_returns(rf, nperiods=nperiods) negative_returns = np.minimum(returns[1:], 0.) std = np.std(negative_returns, ddof=1) res = np.divide(er.mean(), std) if annualize: if nperiods is None: nperiods = 1 return res * np.sqrt(nperiods) return res
def calc_sortino_ratio(returns, rf=0., nperiods=None, annualize=True)
Calculates the `Sortino ratio <https://www.investopedia.com/terms/s/sortinoratio.asp>`_ given a series of returns (see `Sharpe vs. Sortino <https://www.investopedia.com/ask/answers/010815/what-difference-between-sharpe-ratio-and-sortino-ratio.asp>`_). Args: * returns (Series or DataFrame): Returns * rf (float, Series): `Risk-free rate <https://www.investopedia.com/terms/r/risk-freerate.asp>`_ expressed in yearly (annualized) terms or return series. * nperiods (int): Number of periods used for annualization. Must be provided if rf is non-zero and rf is not a price series
4.16068
4.155534
1.001238
if type(rf) is float and nperiods is not None: _rf = deannualize(rf, nperiods) else: _rf = rf return returns - _rf
def to_excess_returns(returns, rf, nperiods=None)
Given a series of returns, it will return the excess returns over rf. Args: * returns (Series, DataFrame): Returns * rf (float, Series): `Risk-Free rate(s) <https://www.investopedia.com/terms/r/risk-freerate.asp>`_ expressed in annualized term or return series * nperiods (int): Optional. If provided, will convert rf to different frequency using deannualize only if rf is a float Returns: * excess_returns (Series, DataFrame): Returns - rf
5.279886
3.877302
1.361742
dd = prices.to_drawdown_series() return np.divide(np.sqrt(np.sum(np.power(dd, 2))), dd.count())
def to_ulcer_index(prices)
Converts from prices -> `Ulcer index <https://www.investopedia.com/terms/u/ulcerindex.asp>`_ See https://en.wikipedia.org/wiki/Ulcer_index Args: * prices (Series, DataFrame): Prices
6.428133
7.979998
0.805531
if type(rf) is float and rf != 0 and nperiods is None: raise Exception('nperiods must be set if rf != 0 and rf is not a price series') er = prices.to_returns().to_excess_returns(rf, nperiods=nperiods) return np.divide(er.mean(), prices.to_ulcer_index())
def to_ulcer_performance_index(prices, rf=0., nperiods=None)
Converts from prices -> `ulcer performance index <https://www.investopedia.com/terms/u/ulcerindex.asp>`_. See https://en.wikipedia.org/wiki/Ulcer_index Args: * prices (Series, DataFrame): Prices * rf (float, Series): `Risk-free rate of return <https://www.investopedia.com/terms/r/risk-freerate.asp>`_. Assumed to be expressed in yearly (annualized) terms or return series * nperiods (int): Used to deannualize rf if rf is provided (non-zero)
5.555897
6.945306
0.79995
# stats = [] if type(returns) is pd.Series: stats = pd.Series(index=range(num_trials)) elif type(returns) is pd.DataFrame: stats = pd.DataFrame( index=range(num_trials), columns=returns.columns ) else: raise(TypeError("returns needs to be a Series or DataFrame!")) n = returns.shape[0] for i in range(num_trials): random_indices = resample(returns.index, n_samples=n, random_state=seed + i) stats.loc[i] = func(returns.loc[random_indices]) return stats
def resample_returns( returns, func, seed=0, num_trials=100 )
Resample the returns and calculate any statistic on every new sample. https://en.wikipedia.org/wiki/Resampling_(statistics) :param returns (Series, DataFrame): Returns :param func: Given the resampled returns calculate a statistic :param seed: Seed for random number generator :param num_trials: Number of times to resample and run the experiment :return: Series of resampled statistics
2.396432
2.415652
0.992044
PandasObject.to_returns = to_returns PandasObject.to_log_returns = to_log_returns PandasObject.to_price_index = to_price_index PandasObject.rebase = rebase PandasObject.calc_perf_stats = calc_perf_stats PandasObject.to_drawdown_series = to_drawdown_series PandasObject.calc_max_drawdown = calc_max_drawdown PandasObject.calc_cagr = calc_cagr PandasObject.calc_total_return = calc_total_return PandasObject.as_percent = utils.as_percent PandasObject.as_format = utils.as_format PandasObject.to_monthly = to_monthly PandasObject.asfreq_actual = asfreq_actual PandasObject.drop_duplicate_cols = drop_duplicate_cols PandasObject.calc_information_ratio = calc_information_ratio PandasObject.calc_prob_mom = calc_prob_mom PandasObject.calc_risk_return_ratio = calc_risk_return_ratio PandasObject.calc_erc_weights = calc_erc_weights PandasObject.calc_inv_vol_weights = calc_inv_vol_weights PandasObject.calc_mean_var_weights = calc_mean_var_weights PandasObject.calc_clusters = calc_clusters PandasObject.calc_ftca = calc_ftca PandasObject.calc_stats = calc_stats PandasObject.plot_heatmap = plot_heatmap PandasObject.plot_corr_heatmap = plot_corr_heatmap PandasObject.rollapply = rollapply PandasObject.winsorize = winsorize PandasObject.rescale = rescale PandasObject.calc_sortino_ratio = calc_sortino_ratio PandasObject.calc_calmar_ratio = calc_calmar_ratio PandasObject.calc_sharpe = calc_sharpe PandasObject.to_excess_returns = to_excess_returns PandasObject.to_ulcer_index = to_ulcer_index PandasObject.to_ulcer_performance_index = to_ulcer_performance_index
def extend_pandas()
Extends pandas' PandasObject (Series, Series, DataFrame) with some functions defined in this file. This facilitates common functional composition used in quant finance. Ex: prices.to_returns().dropna().calc_clusters() (where prices would be a DataFrame)
2.533129
2.436946
1.039469
self.rf = rf # Note, that we recalculate everything. self._update(self.prices)
def set_riskfree_rate(self, rf)
Set annual risk-free rate property and calculate properly annualized monthly and daily rates. Then performance stats are recalculated. Affects only this instance of the PerformanceStats. Args: * rf (float): Annual `risk-free rate <https://www.investopedia.com/terms/r/risk-freerate.asp>`_
17.087055
14.893752
1.147263
print('Stats for %s from %s - %s' % (self.name, self.start, self.end)) if type(self.rf) is float: print('Annual risk-free rate considered: %s' % (fmtp(self.rf))) print('Summary:') data = [[fmtp(self.total_return), fmtn(self.daily_sharpe), fmtp(self.cagr), fmtp(self.max_drawdown)]] print(tabulate(data, headers=['Total Return', 'Sharpe', 'CAGR', 'Max Drawdown'])) print('\nAnnualized Returns:') data = [[fmtp(self.mtd), fmtp(self.three_month), fmtp(self.six_month), fmtp(self.ytd), fmtp(self.one_year), fmtp(self.three_year), fmtp(self.five_year), fmtp(self.ten_year), fmtp(self.incep)]] print(tabulate(data, headers=['mtd', '3m', '6m', 'ytd', '1y', '3y', '5y', '10y', 'incep.'])) print('\nPeriodic:') data = [ ['sharpe', fmtn(self.daily_sharpe), fmtn(self.monthly_sharpe), fmtn(self.yearly_sharpe)], ['mean', fmtp(self.daily_mean), fmtp(self.monthly_mean), fmtp(self.yearly_mean)], ['vol', fmtp(self.daily_vol), fmtp(self.monthly_vol), fmtp(self.yearly_vol)], ['skew', fmtn(self.daily_skew), fmtn(self.monthly_skew), fmtn(self.yearly_skew)], ['kurt', fmtn(self.daily_kurt), fmtn(self.monthly_kurt), fmtn(self.yearly_kurt)], ['best', fmtp(self.best_day), fmtp(self.best_month), fmtp(self.best_year)], ['worst', fmtp(self.worst_day), fmtp(self.worst_month), fmtp(self.worst_year)]] print(tabulate(data, headers=['daily', 'monthly', 'yearly'])) print('\nDrawdowns:') data = [ [fmtp(self.max_drawdown), fmtp(self.avg_drawdown), fmtn(self.avg_drawdown_days)]] print(tabulate(data, headers=['max', 'avg', '# days'])) print('\nMisc:') data = [['avg. up month', fmtp(self.avg_up_month)], ['avg. down month', fmtp(self.avg_down_month)], ['up year %', fmtp(self.win_year_perc)], ['12m up %', fmtp(self.twelve_month_win_perc)]] print(tabulate(data))
def display(self)
Displays an overview containing descriptive stats for the Series provided.
2.135701
2.113346
1.010578
data = [['Year', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec', 'YTD']] for k in self.return_table.index: r = self.return_table.loc[k].values data.append([k] + [fmtpn(x) for x in r]) print(tabulate(data, headers='firstrow'))
def display_monthly_returns(self)
Display a table containing monthly returns and ytd returns for every year in range.
3.090467
2.728934
1.132482
if title is None: title = self._get_default_plot_title( self.name, freq, 'Return Histogram') ser = self._get_series(freq).to_returns().dropna() plt.figure(figsize=figsize) ax = ser.hist(bins=bins, figsize=figsize, normed=True, **kwargs) ax.set_title(title) plt.axvline(0, linewidth=4) return ser.plot(kind='kde')
def plot_histogram(self, freq=None, figsize=(15, 5), title=None, bins=20, **kwargs)
Plots a histogram of returns given a return frequency. Args: * freq (str): Data frequency used for display purposes. This will dictate the type of returns (daily returns, monthly, ...) Refer to pandas docs for valid period strings. * figsize ((x,y)): figure size * title (str): Title if default not appropriate * bins (int): number of bins for the histogram * kwargs: passed to pandas' hist method
3.69052
3.598095
1.025687
stats = self._stats() data = [] first_row = ['Stat', self.name] data.append(sep.join(first_row)) for stat in stats: k, n, f = stat # blank row if k is None: row = [''] * len(data[0]) data.append(sep.join(row)) continue elif k == 'rf' and not type(self.rf) == float: continue row = [n] raw = getattr(self, k) if f is None: row.append(raw) elif f == 'p': row.append(fmtp(raw)) elif f == 'n': row.append(fmtn(raw)) elif f == 'dt': row.append(raw.strftime('%Y-%m-%d')) else: raise NotImplementedError('unsupported format %s' % f) data.append(sep.join(row)) res = '\n'.join(data) if path is not None: with open(path, 'w') as fl: fl.write(res) else: return res
def to_csv(self, sep=',', path=None)
Returns a CSV string with appropriate formatting. If path is not None, the string will be saved to file at path. Args: * sep (char): Separator * path (str): If None, CSV string returned. Else file written to specified path.
2.923039
2.976406
0.98207
for key in self._names: self[key].set_riskfree_rate(rf) # calculate stats for entire series self._update_stats()
def set_riskfree_rate(self, rf)
Set annual `risk-free rate <https://www.investopedia.com/terms/r/risk-freerate.asp>`_ property and calculate properly annualized monthly and daily rates. Then performance stats are recalculated. Affects only those instances of PerformanceStats that are children of this GroupStats object. Args: * rf (float, Series): Annual risk-free rate or risk-free rate price series
9.047575
7.620552
1.18726
start = self._start if start is None else pd.to_datetime(start) end = self._end if end is None else pd.to_datetime(end) self._update(self._prices.loc[start:end])
def set_date_range(self, start=None, end=None)
Update date range of stats, charts, etc. If None then the original date range is used. So to reset to the original range, just call with no args. Args: * start (date): start date * end (end): end date
3.194659
3.788594
0.843231
data = [] first_row = ['Stat'] first_row.extend(self._names) data.append(first_row) stats = self._stats() for stat in stats: k, n, f = stat # blank row if k is None: row = [''] * len(data[0]) data.append(row) continue row = [n] for key in self._names: raw = getattr(self[key], k) # if rf is a series print nan if k == 'rf' and not type(raw) == float: row.append(np.nan) elif f is None: row.append(raw) elif f == 'p': row.append(fmtp(raw)) elif f == 'n': row.append(fmtn(raw)) elif f == 'dt': row.append(raw.strftime('%Y-%m-%d')) else: raise NotImplementedError('unsupported format %s' % f) data.append(row) print(tabulate(data, headers='firstrow'))
def display(self)
Display summary stats table.
3.430615
3.326663
1.031248
return self.lookback_returns.apply( lambda x: x.map('{:,.2%}'.format), axis=1)
def display_lookback_returns(self)
Displays the current lookback returns for each series.
5.755013
4.44975
1.293334
if title is None: title = self._get_default_plot_title( freq, 'Equity Progression') ser = self._get_series(freq).rebase() return ser.plot(figsize=figsize, logy=logy, title=title, **kwargs)
def plot(self, freq=None, figsize=(15, 5), title=None, logy=False, **kwargs)
Helper function for plotting the series. Args: * freq (str): Data frequency used for display purposes. Refer to pandas docs for valid freq strings. * figsize ((x,y)): figure size * title (str): Title if default not appropriate * logy (bool): log-scale for y axis * kwargs: passed to pandas' plot method
5.912335
6.259909
0.944476
if title is None: title = self._get_default_plot_title( freq, 'Return Scatter Matrix') plt.figure() ser = self._get_series(freq).to_returns().dropna() pd.scatter_matrix(ser, figsize=figsize, **kwargs) return plt.suptitle(title)
def plot_scatter_matrix(self, freq=None, title=None, figsize=(10, 10), **kwargs)
Wrapper around pandas' scatter_matrix. Args: * freq (str): Data frequency used for display purposes. Refer to pandas docs for valid freq strings. * figsize ((x,y)): figure size * title (str): Title if default not appropriate * kwargs: passed to pandas' scatter_matrix method
4.892009
5.155019
0.94898
if title is None: title = self._get_default_plot_title( freq, 'Return Histogram Matrix') plt.figure() ser = self._get_series(freq).to_returns().dropna() ser.hist(figsize=figsize, **kwargs) return plt.suptitle(title)
def plot_histograms(self, freq=None, title=None, figsize=(10, 10), **kwargs)
Wrapper around pandas' hist. Args: * freq (str): Data frequency used for display purposes. Refer to pandas docs for valid freq strings. * figsize ((x,y)): figure size * title (str): Title if default not appropriate * kwargs: passed to pandas' hist method
5.446325
5.848025
0.93131
if title is None: title = self._get_default_plot_title( freq, 'Return Correlation Matrix') rets = self._get_series(freq).to_returns().dropna() return rets.plot_corr_heatmap(title=title, figsize=figsize, **kwargs)
def plot_correlation(self, freq=None, title=None, figsize=(12, 6), **kwargs)
Utility function to plot correlations. Args: * freq (str): Pandas data frequency alias string * title (str): Plot title * figsize (tuple (x,y)): figure size * kwargs: passed to Pandas' plot_corr_heatmap function
4.828103
4.598275
1.049981
data = [] first_row = ['Stat'] first_row.extend(self._names) data.append(sep.join(first_row)) stats = self._stats() for stat in stats: k, n, f = stat # blank row if k is None: row = [''] * len(data[0]) data.append(sep.join(row)) continue row = [n] for key in self._names: raw = getattr(self[key], k) if f is None: row.append(raw) elif f == 'p': row.append(fmtp(raw)) elif f == 'n': row.append(fmtn(raw)) elif f == 'dt': row.append(raw.strftime('%Y-%m-%d')) else: raise NotImplementedError('unsupported format %s' % f) data.append(sep.join(row)) res = '\n'.join(data) if path is not None: with open(path, 'w') as fl: fl.write(res) else: return res
def to_csv(self, sep=',', path=None)
Returns a CSV string with appropriate formatting. If path is not None, the string will be saved to file at path. Args: * sep (char): Separator * path (str): If None, CSV string returned. Else file written to specified path.
2.669036
2.785176
0.958301
if provider is None: provider = DEFAULT_PROVIDER tickers = utils.parse_arg(tickers) data = {} for ticker in tickers: t = ticker f = None # check for field bits = ticker.split(ticker_field_sep, 1) if len(bits) == 2: t = bits[0] f = bits[1] # call provider - check if supports memoization if hasattr(provider, 'mcache'): data[ticker] = provider(ticker=t, field=f, mrefresh=mrefresh, **kwargs) else: data[ticker] = provider(ticker=t, field=f, **kwargs) df = pd.DataFrame(data) # ensure same order as provided df = df[tickers] if existing is not None: df = ffn.merge(existing, df) if common_dates: df = df.dropna() if forward_fill: df = df.fillna(method='ffill') if column_names: cnames = utils.parse_arg(column_names) if len(cnames) != len(df.columns): raise ValueError( 'column_names must be of same length as tickers') df.columns = cnames elif clean_tickers: df.columns = map(utils.clean_ticker, df.columns) return df
def get(tickers, provider=None, common_dates=True, forward_fill=False, clean_tickers=True, column_names=None, ticker_field_sep=':', mrefresh=False, existing=None, **kwargs)
Helper function for retrieving data as a DataFrame. Args: * tickers (list, string, csv string): Tickers to download. * provider (function): Provider to use for downloading data. By default it will be ffn.DEFAULT_PROVIDER if not provided. * common_dates (bool): Keep common dates only? Drop na's. * forward_fill (bool): forward fill values if missing. Only works if common_dates is False, since common_dates will remove all nan's, so no filling forward necessary. * clean_tickers (bool): Should the tickers be 'cleaned' using ffn.utils.clean_tickers? Basically remove non-standard characters (^VIX -> vix) and standardize to lower case. * column_names (list): List of column names if clean_tickers is not satisfactory. * ticker_field_sep (char): separator used to determine the ticker and field. This is in case we want to specify particular, non-default fields. For example, we might want: AAPL:Low,AAPL:High,AAPL:Close. ':' is the separator. * mrefresh (bool): Ignore memoization. * existing (DataFrame): Existing DataFrame to append returns to - used when we download from multiple sources * kwargs: passed to provider
2.570698
2.466571
1.042216
if source == 'yahoo' and field is None: field = 'Adj Close' tmp = _download_web(ticker, data_source=source, start=start, end=end) if tmp is None: raise ValueError('failed to retrieve data for %s:%s' % (ticker, field)) if field: return tmp[field] else: return tmp
def web(ticker, field=None, start=None, end=None, mrefresh=False, source='yahoo')
Data provider wrapper around pandas.io.data provider. Provides memoization.
3.232705
3.527082
0.916538
# set defaults if not specified if 'index_col' not in kwargs: kwargs['index_col'] = 0 if 'parse_dates' not in kwargs: kwargs['parse_dates'] = True # read in dataframe from csv file df = pd.read_csv(path, **kwargs) tf = ticker if field is not '' and field is not None: tf = '%s:%s' % (tf, field) # check that required column exists if tf not in df: raise ValueError('Ticker(field) not present in csv file!') return df[tf]
def csv(ticker, path='data.csv', field='', mrefresh=False, **kwargs)
Data provider wrapper around pandas' read_csv. Provides memoization.
3.110095
3.059613
1.0165
# determine defaults if limit == 0: limit = config.look_limit if vrepr is None: vrepr = config.look_vrepr if index_header is None: index_header = config.look_index_header if style is None: style = config.look_style if width is None: width = config.look_width return Look(table, limit=limit, vrepr=vrepr, index_header=index_header, style=style, truncate=truncate, width=width)
def look(table, limit=0, vrepr=None, index_header=None, style=None, truncate=None, width=None)
Format a portion of the table as text for inspection in an interactive session. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar'], ... ['a', 1], ... ['b', 2]] >>> etl.look(table1) +-----+-----+ | foo | bar | +=====+=====+ | 'a' | 1 | +-----+-----+ | 'b' | 2 | +-----+-----+ >>> # alternative formatting styles ... etl.look(table1, style='simple') === === foo bar === === 'a' 1 'b' 2 === === >>> etl.look(table1, style='minimal') foo bar 'a' 1 'b' 2 >>> # any irregularities in the length of header and/or data ... # rows will appear as blank cells ... table2 = [['foo', 'bar'], ... ['a'], ... ['b', 2, True]] >>> etl.look(table2) +-----+-----+------+ | foo | bar | | +=====+=====+======+ | 'a' | | | +-----+-----+------+ | 'b' | 2 | True | +-----+-----+------+ Three alternative presentation styles are available: 'grid', 'simple' and 'minimal', where 'grid' is the default. A different style can be specified using the `style` keyword argument. The default style can also be changed by setting ``petl.config.look_style``.
1.795305
1.954374
0.918609
kwargs['vrepr'] = str return look(table, limit=limit, **kwargs)
def lookstr(table, limit=0, **kwargs)
Like :func:`petl.util.vis.look` but use str() rather than repr() for data values.
8.259012
6.367799
1.296996
# determine defaults if limit == 0: limit = config.see_limit if vrepr is None: vrepr = config.see_vrepr if index_header is None: index_header = config.see_index_header return See(table, limit=limit, vrepr=vrepr, index_header=index_header)
def see(table, limit=0, vrepr=None, index_header=None)
Format a portion of a table as text in a column-oriented layout for inspection in an interactive session. E.g.:: >>> import petl as etl >>> table = [['foo', 'bar'], ['a', 1], ['b', 2]] >>> etl.see(table) foo: 'a', 'b' bar: 1, 2 Useful for tables with a larger number of fields.
2.292089
2.639462
0.868392
from IPython.core.display import display_html html = _display_html(table, limit=limit, vrepr=vrepr, index_header=index_header, caption=caption, tr_style=tr_style, td_styles=td_styles, encoding=encoding, truncate=truncate, epilogue=epilogue) display_html(html, raw=True)
def display(table, limit=0, vrepr=None, index_header=None, caption=None, tr_style=None, td_styles=None, encoding=None, truncate=None, epilogue=None)
Display a table inline within an IPython notebook.
1.670899
1.686556
0.990717
return XLSXView(filename, sheet=sheet, range_string=range_string, row_offset=row_offset, column_offset=column_offset, **kwargs)
def fromxlsx(filename, sheet=None, range_string=None, row_offset=0, column_offset=0, **kwargs)
Extract a table from a sheet in an Excel .xlsx file. N.B., the sheet name is case sensitive. The `sheet` argument can be omitted, in which case the first sheet in the workbook is used by default. The `range_string` argument can be used to provide a range string specifying a range of cells to extract. The `row_offset` and `column_offset` arguments can be used to specify offsets. Any other keyword arguments are passed through to :func:`openpyxl.load_workbook()`.
2.326663
2.931928
0.793561
import openpyxl if encoding is None: encoding = locale.getpreferredencoding() wb = openpyxl.Workbook(write_only=True) ws = wb.create_sheet(title=sheet) for row in tbl: ws.append(row) wb.save(filename)
def toxlsx(tbl, filename, sheet=None, encoding=None)
Write a table to a new Excel .xlsx file.
2.093469
2.09941
0.997171
# convenience for working with sqlite3 if isinstance(dbo, string_types): import sqlite3 dbo = sqlite3.connect(dbo) return DbView(dbo, query, *args, **kwargs)
def fromdb(dbo, query, *args, **kwargs)
Provides access to data from any DB-API 2.0 connection via a given query. E.g., using :mod:`sqlite3`:: >>> import petl as etl >>> import sqlite3 >>> connection = sqlite3.connect('example.db') >>> table = etl.fromdb(connection, 'SELECT * FROM example') E.g., using :mod:`psycopg2` (assuming you've installed it first):: >>> import petl as etl >>> import psycopg2 >>> connection = psycopg2.connect('dbname=example user=postgres') >>> table = etl.fromdb(connection, 'SELECT * FROM example') E.g., using :mod:`pymysql` (assuming you've installed it first):: >>> import petl as etl >>> import pymysql >>> connection = pymysql.connect(password='moonpie', database='thangs') >>> table = etl.fromdb(connection, 'SELECT * FROM example') The `dbo` argument may also be a function that creates a cursor. N.B., each call to the function should return a new cursor. E.g.:: >>> import petl as etl >>> import psycopg2 >>> connection = psycopg2.connect('dbname=example user=postgres') >>> mkcursor = lambda: connection.cursor(cursor_factory=psycopg2.extras.DictCursor) >>> table = etl.fromdb(mkcursor, 'SELECT * FROM example') The parameter `dbo` may also be an SQLAlchemy engine, session or connection object. The parameter `dbo` may also be a string, in which case it is interpreted as the name of a file containing an :mod:`sqlite3` database. Note that the default behaviour of most database servers and clients is for the entire result set for each query to be sent from the server to the client. If your query returns a large result set this can result in significant memory usage at the client. Some databases support server-side cursors which provide a means for client libraries to fetch result sets incrementally, reducing memory usage at the client. To use a server-side cursor with a PostgreSQL database, e.g.:: >>> import petl as etl >>> import psycopg2 >>> connection = psycopg2.connect('dbname=example user=postgres') >>> table = etl.fromdb(lambda: connection.cursor(name='arbitrary'), ... 'SELECT * FROM example') For more information on server-side cursors see the following links: * http://initd.org/psycopg/docs/usage.html#server-side-cursors * http://mysql-python.sourceforge.net/MySQLdb.html#using-and-extending
5.250022
7.897763
0.664748