file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
amap.go | package amap
import (
"encoding/json"
"fmt"
"math"
"strconv"
"strings"
"time"
"github.com/go-resty/resty"
)
// PoiResult PoiResult
type PoiResult struct {
Count string `json:"count"`
Info string `json:"info"`
Infocode string `json:"infocode"`
Pois []Poi `json:"pois"`
Status string `json:"status"`
Suggestion struct {
Cities []interface{} `json:"cities"`
Keywords []interface{} `json:"keywords"`
} `json:"suggestion"`
}
// Poi Poi
type Poi struct {
Adcode string `json:"adcode"`
Address string `json:"address"`
Adname string `json:"adname"`
Alias string `json:"alias"`
BizExt struct {
Cost string `json:"cost"`
Rating string `json:"rating"`
} `json:"biz_ext"`
BizType string `json:"biz_type"`
BusinessArea string `json:"business_area"`
Children []interface{} `json:"children"`
Citycode string `json:"citycode"`
Cityname string `json:"cityname"`
DiscountNum string `json:"discount_num"`
Distance string `json:"distance"`
Email string `json:"email"`
EntrLocation string `json:"entr_location"`
Event []interface{} `json:"event"`
ExitLocation []interface{} `json:"exit_location"`
Gridcode string `json:"gridcode"`
GroupbuyNum string `json:"groupbuy_num"`
ID string `json:"id"`
Importance []interface{} `json:"importance"`
IndoorData struct {
Cmsid []interface{} `json:"cmsid"`
Cpid []interface{} `json:"cpid"`
Floor []interface{} `json:"floor"`
Truefloor []interface{} `json:"truefloor"`
} `json:"indoor_data"`
IndoorMap string `json:"indoor_map"`
Location string `json:"location"`
Match string `json:"match"`
Name string `json:"name"`
NaviPoiid string `json:"navi_poiid"`
Pcode string `json:"pcode"`
Photos []struct {
Title []interface{} `json:"title"`
URL string `json:"url"`
} `json:"photos"`
Pname string `json:"pname"`
Poiweight []interface{} `json:"poiweight"`
Postcode []interface{} `json:"postcode"`
Recommend string `json:"recommend"`
Shopid []interface{} `json:"shopid"`
Shopinfo string `json:"shopinfo"`
Tag []interface{} `json:"tag"`
Tel string `json:"tel"`
Timestamp []interface{} `json:"timestamp"`
Type string `json:"type"`
Typecode string `json:"typecode"`
Website []interface{} `json:"website"`
}
func (p Poi) String() string {
return fmt.Sprintln(spaceD(p.ID), spaceD(p.Name), spaceD(p.Type), spaceD(p.Typecode), spaceD(p.Address), spaceD(p.Cityname), spaceD(p.Adname), spaceD(p.Location), spaceD(p.Alias))
}
func spaceD(s string) string {
return strings.Join(strings.Fields(s), "")
}
// Point Point
type Point struct {
Lng float64
Lat float64
}
// Rectangle Rectangle
type Rectangle struct {
PointLT Point
PointRB Point
}
func (r Rectangle) check() bool {
return r.PointLT.Lng < r.PointRB.Lng && r.PointLT.Lat > r.PointRB.Lat
}
func (r Rectangle) polygon() string {
return fmt.Sprintf("%f,%f|%f,%f", r.PointLT.Lng, r.PointLT.Lat, r.PointRB.Lng, r.PointRB.Lat)
}
func (r Rectangle) quadtree() []Rectangle {
halflng, halflat := math.Abs(r.PointRB.Lng-r.PointLT.Lng)/2, math.Abs(r.PointLT.Lat-r.PointRB.Lat)/2
return []Rectangle{
{r.PointLT, Point{round(r.PointLT.Lng + halflng), round(r.PointLT.Lat - halflat)}},
{Point{round(r.PointLT.Lng + halflng), r.PointLT.Lat}, Point{r.PointRB.Lng, round(r.PointLT.Lat - halflat)}},
{Point{r.PointLT.Lng, round(r.PointLT.Lat - halflat)}, Point{round(r.PointLT.Lng + halflng), r.PointRB.Lat}},
{Point{round(r.PointLT.Lng + halflng), round(r.PointLT.Lat - halflat)}, r.PointRB}}
}
type minRec struct {
Rec Rectangle
Types string
Count int
Err error
}
type minRecPage struct {
Rec Rectangle
Types string
Page string
}
func round(f float64) float64 {
n10 := math.Pow10(6)
return math.Trunc(f*n10) / n10
}
var gaoDePolygonURL = "https://restapi.amap.com/v3/place/polygon"
var gaoDeDetailURL = "https://www.amap.com/detail/get/detail"
var key = "aaa8abdaf05433e3702eae99964cc8c6"
// var key = "935c7385f239000f98ade53bbbc002e7"
func cutRec(rec Rectangle, types string) (recCutresult []minRec) {
count, err := recCount(rec, types)
if err != nil {
fmt.Println(rec, types, count, err)
recCutresult = append(recCutresult, minRec{rec, types, count, err})
} else if count <= 800 && count > 0 {
fmt.Println(rec, types, count, err)
recCutresult = append(recCutresult, minRec{rec, types, count, err})
} else if count > 800 {
// fmt.Println("cuting:", rec, types, count, err)
rec4s := rec.quadtree()
for _, rec4 := range rec4s {
recCutresult = append(recCutresult, cutRec(rec4, types)...)
}
}
return
}
func recCount(rec Rectangle, types string) (count int, err error) {
para := map[string]string{
"types": types,
"offset": "1",
"polygon": rec.polygon(),
}
poiResult1, err := recRequest(para)
if err != nil {
return
}
count, err = strconv.Atoi(poiResult1.Count)
if err != nil {
return
}
return
}
func minRecPagePois(minRecPage minRecPage) (pois []Poi, err error) {
para := map[string]string{
"types": minRecPage.Types,
"offset": "20",
"polygon": minRecPage.Rec.polygon(),
"page": minRecPage.Page,
}
result, err := recRequest(para)
if err != nil {
return
}
pois = result.Pois
return
}
func minRecPagesPois(minRecPages []minRecPage) (pois []Poi) {
for _, minRecPage := range minRecPages {
pagePois, err := minRecPagePois(minRecPage)
if err == nil {
pois = append(pois, pagePois...)
} else {
fmt.Println(minRecPages, err)
}
}
return
}
func minRecPages(mRec minRec) (minRecPages []minRecPage) {
for page := int(math.Ceil(float64(mRec.Count) / 20)); page > 0; page-- {
minRecPages = append(minRecPages, minRecPage{mRec.Rec, mRec.Types, strconv.Itoa(page)})
}
return
}
func | (mRecs []minRec) (mrp []minRecPage) {
for _, mRec := range mRecs {
mrp = append(mrp, minRecPages(mRec)...)
}
return
}
func recTypePages(rec Rectangle, types string) (mrp []minRecPage) {
cutrec := cutRec(rec, types)
mrp = minRecsPages(cutrec)
return
}
// RecTypePois RecTypePois
func RecTypePois(rec Rectangle, types string) (pois []Poi) {
pages := recTypePages(rec, types)
pois = minRecPagesPois(pages)
return
}
func recRequest(para map[string]string) (result PoiResult, err error) {
para["key"] = key
resp, err := resty.
SetTimeout(10 * time.Second).
SetRetryCount(5).
SetRetryWaitTime(10 * time.Second).
SetRetryMaxWaitTime(65 * time.Second).
R().
SetQueryParams(para).
Get(gaoDePolygonURL)
if err != nil {
return
}
json.Unmarshal(resp.Body(), &result)
if err != nil {
return
}
if result.Status != "1" || result.Infocode != "10000" {
err = fmt.Errorf(result.Status, result.Infocode, result.Info)
return
}
return
}
// Detail Detail
type Detail struct {
Status string `json:"status"`
Data struct {
Base struct {
PoiTag string `json:"poi_tag"`
Code string `json:"code"`
ImportanceVipFlag int `json:"importance_vip_flag"`
CityAdcode string `json:"city_adcode"`
Telephone string `json:"telephone"`
NewType string `json:"new_type"`
CityName string `json:"city_name"`
NewKeytype string `json:"new_keytype"`
Checked string `json:"checked"`
Title string `json:"title"`
CreFlag int `json:"cre_flag"`
StdTTag0V string `json:"std_t_tag_0_v"`
NaviGeometry string `json:"navi_geometry"`
Classify string `json:"classify"`
Business string `json:"business"`
ShopInfo struct {
Claim int `json:"claim"`
} `json:"shop_info"`
PoiTagHasTTag int `json:"poi_tag_has_t_tag"`
Pixelx string `json:"pixelx"`
Pixely string `json:"pixely"`
Geodata struct {
Aoi []struct {
Name string `json:"name"`
Mainpoi string `json:"mainpoi"`
Area float64 `json:"area"`
} `json:"aoi"`
} `json:"geodata"`
Poiid string `json:"poiid"`
Distance int `json:"distance"`
Name string `json:"name"`
StdVTag0V string `json:"std_v_tag_0_v"`
EndPoiExtension string `json:"end_poi_extension"`
Y string `json:"y"`
X string `json:"x"`
Address string `json:"address"`
Bcs string `json:"bcs"`
Tag string `json:"tag"`
} `json:"base"`
Spec struct {
MiningShape struct {
Aoiid string `json:"aoiid"`
Center string `json:"center"`
Level int `json:"level"`
SpType string `json:"sp_type"`
Area string `json:"area"`
Shape string `json:"shape"`
Type int `json:"type"`
} `json:"mining_shape"`
SpPic []interface{} `json:"sp_pic"`
} `json:"spec"`
Residential struct {
BuildingTypes string `json:"building_types"`
SrcTypeMix string `json:"src_type_mix"`
SrcID string `json:"src_id"`
IsCommunity int `json:"is_community"`
Business string `json:"business"`
Price string `json:"price"`
HaveSchDistrict int `json:"have_sch_district"`
PropertyFee string `json:"property_fee"`
AreaTotal string `json:"area_total"`
PropertyCompany string `json:"property_company"`
VolumeRate float64 `json:"volume_rate"`
GreenRate string `json:"green_rate"`
SrcType string `json:"src_type"`
Intro string `json:"intro"`
HxpicInfo []interface{} `json:"hxpic_info"`
Developer string `json:"developer"`
} `json:"residential"`
Deep struct {
BuildingTypes string `json:"building_types"`
SrcTypeMix string `json:"src_type_mix"`
SrcID string `json:"src_id"`
IsCommunity int `json:"is_community"`
Business string `json:"business"`
Price string `json:"price"`
HaveSchDistrict int `json:"have_sch_district"`
PropertyFee string `json:"property_fee"`
AreaTotal string `json:"area_total"`
PropertyCompany string `json:"property_company"`
VolumeRate float64 `json:"volume_rate"`
GreenRate string `json:"green_rate"`
SrcType string `json:"src_type"`
Intro string `json:"intro"`
HxpicInfo []interface{} `json:"hxpic_info"`
Developer string `json:"developer"`
} `json:"deep"`
Rti struct {
ReviewEntrance int `json:"review_entrance"`
ReviewSummary string `json:"review_summary"`
ReviewCount int `json:"review_count"`
HasDiscountFlag int `json:"has_discount_flag"`
ReviewLabels []interface{} `json:"review_labels"`
} `json:"rti"`
Review struct {
Comment []struct {
AosTagScore float64 `json:"aos_tag_score"`
Recommend string `json:"recommend"`
HighQuality int `json:"high_quality"`
Labels []interface{} `json:"labels"`
ReviewID string `json:"review_id"`
AuthorProfileurl string `json:"author_profileurl"`
ReviewWeburl string `json:"review_weburl"`
ReviewWapurl string `json:"review_wapurl"`
Review string `json:"review"`
Author string `json:"author"`
GoldNum int `json:"gold_num"`
QualityFlag int `json:"quality_flag"`
GoldType string `json:"gold_type"`
Score int `json:"score"`
LikeNum string `json:"like_num"`
ReviewAppurl struct {
IosAppurl string `json:"ios_appurl"`
AndroidAppurl string `json:"android_appurl"`
} `json:"review_appurl"`
Time string `json:"time"`
SrcName string `json:"src_name"`
SrcType string `json:"src_type"`
AuthorID int `json:"author_id"`
} `json:"comment"`
} `json:"review"`
SrcInfo []interface{} `json:"src_info"`
ShareURL string `json:"share_url"`
} `json:"data"`
}
func requestDetail(id string) (result Detail, err error) {
resp, err := resty.
R().
SetQueryParams(map[string]string{"id": id}).
Get(gaoDeDetailURL)
if err != nil {
return
}
json.Unmarshal(resp.Body(), &result)
if err != nil {
return
}
if result.Status != "1" {
err = fmt.Errorf(id, result.Status)
return
}
return
}
func requestDetails(ids []string) (result []Detail) {
for _, id := range ids {
r, err1 := requestDetail(id)
if err1 == nil {
result = append(result, r)
}
}
return
}
func printResult(id string, ch chan string) {
r, err := requestDetail(id)
if err == nil {
fmt.Println(id, r.Data.Spec.MiningShape.Shape, "type:"+strconv.Itoa(r.Data.Spec.MiningShape.Type), "sptype:"+r.Data.Spec.MiningShape.SpType)
} else if r.Status == "6" {
fmt.Println(id, "err:toofast")
time.Sleep(10 * time.Second)
} else if r.Status == "8" {
fmt.Println(id, "err:notfounddetail")
} else {
fmt.Println(id, "err"+r.Status)
time.Sleep(10 * time.Second)
}
<-ch
}
| minRecsPages | identifier_name |
models.py | from django.db import models
import uuid
from on.activities.base import Goal, Activity
from on.user import UserInfo, UserTicket, UserRecord, UserSettlement
import django.utils.timezone as timezone
from django.conf import settings
import os
import pytz
import math
from datetime import timedelta, datetime
class RunningGoalManager(models.Manager):
# 创建一个新的goal
def create_goal(self, user_id, runningtype, guaranty, down_payment, activate_deposit, coefficient, mode, goal_day,
distance, average, nosign,extra_earn,reality_price, deserve_price, down_num):
running_type = 0 if runningtype == "FREE" else 1
if settings.DEBUG:
start_time = timezone.now()
else:
# 当天创建活动只有后一天才能参加,所以以后一天为开始日期
start_time = timezone.now() # + timedelta(days=1)
# start_time = datetime.strptime("2018-01-01 00:00:01", "%Y-%m-%d %H:%M:%S")
kilos_day, goal_distance, left_distance = None, None, None
if running_type:
kilos_day = distance
else:
actual_day_map = {
7: 6,
14: 12,
21: 18,
30: 25,
61: 50
}
goal_distance = distance
left_distance = distance
distances = int(distance)
kilos_day = 2 * distances // actual_day_map[goal_day]
# 查询出没有支付的活动
goal = self.filter(user_id=user_id).filter(start_time=start_time).filter(status="PENDING")
# 如果存在的话就删掉
if goal:
goal.first().delete()
goal = self.create(user_id=user_id,
activity_type=RunningGoal.get_activity(),
start_time=start_time,
goal_day=goal_day,
mode=mode,
guaranty=guaranty,
down_payment=down_payment,
activate_deposit=activate_deposit,
coefficient=coefficient,
goal_type=running_type,
goal_distance=goal_distance,
left_distance=left_distance,
kilos_day=kilos_day,
extra_earn=extra_earn,
average=average,
reality_price=reality_price,
deserve_price=deserve_price,
down_num=down_num
)
# 更新活动的免签卡券
if running_type:
nosgin_number = int(nosign)
UserTicket.objects.create_ticket(goal.goal_id, "NS", nosgin_number)
return goal
# 删除一个目标
def delete_goal(self, goal_id):
goal = self.get(goal_id=goal_id)
# 删除本目标对应的所有打卡记录
goal.punch.all().delete()
# 删除本目标
goal.delete()
class RunningGoal(Goal):
""" Model for running goal
User needs to set running duration days and distance as
objective
"""
# 目标距离
goal_distance = models.FloatField(null=True)
# 单日目标距离,对于自由模式来说,kilos_day为单日目标上限
kilos_day = models.FloatField(null=True)
# 剩余距离, 只针对自由模式有效
left_distance = models.FloatField(null=True)
# 用户实际要付出的金额
reality_price = models.DecimalField(max_digits=12, decimal_places=2, null=False)
# 用户应该要付出的金额
deserve_price = models.DecimalField(max_digits=12, decimal_places=2, null=False)
# 扣完底金需要的次数
down_num = models.IntegerField(default=1, null=False)
# 平均每次要扣的
average = models.DecimalField(max_digits=12, decimal_places=2, null=False)
# 活动押金
activate_deposit = models.DecimalField(max_digits=12, decimal_places=2, null=False)
# 累计距离,只对自由模式有效
add_distance = models.FloatField(default=0,null=True)
# 活动额外收益
extra_earn = models.DecimalField(max_digits=12, decimal_places=2, null=False)
objects = RunningGoalManager()
@staticmethod
def get_start_date():
return datetime.strptime("00:01", "%H:%M").time()
def calc_pay_out(self):
print("计算开始..........")
pay_out = 0
# 如果是日常模式
if self.goal_type==1:
# 如果之前没有过不良记录, 则扣除保证金
if self.none_punch_days == 0:
pay_out = self.guaranty
print(pay_out,'如果之前没有过不良记录, 则扣除保证金,扣除金额就是保证金的数量')
# 清除个人的保证金数额
self.guaranty = 0
print("将保证金改为0")
# 增加不良记录天数
self.none_punch_days = 1
elif self.none_punch_days >= 1 and self.down_payment > 0:
print("如果不良天数不等于1")
#防止修改数据库debug而出现的错误
self.guaranty = 0
# 如果是日常模式
if self.guaranty == 0:
# 底金次数
pay_out = self.average
print(pay_out, "当保证金等于0的时候需要扣除的底金金额")
# 如果有降低投入
# 从账户中扣除金额
self.down_payment -= pay_out
print("扣除之后需要将用户的底金减去")
# 不良天数记录+1
self.none_punch_days += 1
# 如果是自由模式
else:
print("若是自由模式,开始扣款")
if float(self.left_distance) > 0.0:
print("当剩余距离大于0的时候才开始扣款")
#剩余的距离
left_distance = self.left_distance
# 求解剩余距离
if left_distance<=1:
pay_out = self.guaranty
print("当剩余的距离小于1的时候,直接扣除用户的保证金{}".format(self.guaranty))
self.guaranty = 0
else:
remain = math.floor(self.left_distance)-1
print("剩余的距离减去1是:{}".format(remain))
if remain <=self.down_num:
print(type(remain),type(self.down_num),"remain:{},down_num{}".format(remain,self.down_num))
print("走这里就对了")
pay_out = remain*self.average+self.guaranty
self.guaranty=0
print("用户的剩余距离减去1之后的距离数{}".format(math.floor(self.left_distance)-1),"平均需要扣除的金额{}".format(self.average))
self.down_payment -= remain * self.average
else:
# remain = self.down_num
print("若剩余距离大于底金次数,那么剩余距离{}".format(remain))
pay_out = self.down_num * self.average + self.guaranty
self.guaranty = 0
print("用户的剩余距离减去1之后的距离数{}".format(math.floor(self.left_distance) - 1),
"平均需要扣除的金额{}".format(self.average))
self.down_payment -= self.down_num*self.average
else:
pay_out = 0
print("当剩余的距离大于零的时候,需要付出的金额就是保证金")
if pay_out > 0:
# 更新值
self.save()
# 把本次瓜分金额写入数据库记录中
UserSettlement.objects.loose_pay(goal_id=self.goal_id, bonus=pay_out)
print("瓜分记录写入成功")
# 完成所有瓜分金额的计算
return pay_out
@staticmethod
def get_activity():
return "1"
def update_activity(self, user_id):
# 更新该种活动的总系数
Activity.objects.add_bonus_coeff(RunningGoal.get_activity(), self.guaranty + self.down_payment,
self.coefficient)
# 增加用户的累计参加次数
UserRecord.objects.update_join(user=UserInfo.objects.get(user_id=user_id), coeff=self.coefficient)
def update_activity_person(self):
Activity.objects.update_person(RunningGoal.get_activity())
Activity.objects.update_coeff(RunningGoal.get_activity(), -self.coefficient)
import base64
# TODO
class RunningPunchRecordManager(models.Manager):
# 创建一个新的record
def create_record(self, goal, filename, distance,punch_record_time, document,base64_str):
print(3333333333333333333333333333333333333333)
# 文件存储的实际路径
filePath = os.path.join(settings.MEDIA_DIR, timezone.now().strftime("%Y-%m-%d")+"/")
# # 引用所使用的路径
refPath = os.path.join(settings.MEDIA_ROOT, timezone.now().strftime("%Y-%m-%d")+"/")
#mysql存储的地址
file_filepath = filePath+filename
file_refpath = refPath+filename
if not os.path.exists(filePath):
os.makedirs(filePath)
print(444444444444444444444444444444444)
# 写入文件内容
with open(filePath+filename, 'wb') as f:
f.write(base64_str)
print("保存图片成功")
# 如果是日常模式打卡,则规定distance必须为日常距离
if goal.goal_type:
distance = goal.kilos_day
print(666666666666666666666666666666666666)
record = self.create(goal=goal, voucher_ref=file_refpath, voucher_store=file_filepath, distance=distance,record_time = punch_record_time,
document=document)
print(555555555555555555555555555555555555555)
# 如果是自由模式, 则计算剩余距离
if not goal.goal_type:
goal.left_distance -= distance
goal.save()
return record
#
# 获取时间
def get_day_record(self, daydelta):
"""
:param day: 表示一个timedelta
:return:
"""
# 判断现在的时间距离开始时间的时长
# day = (timezone.now()-self.recod_time)
# print(day)
# 今天的日期加上
today = timezone.now().date() + timedelta(daydelta)
print(today,"这个时间加上一个时间段")
# 明天
end = today + timedelta(1)
print(end,"today加上一天,表示的是那一天的一整天的时间段")
return self.filter(record_time__range=(today, end))
# 第一天是否存在打卡记录
# user对某punch点赞
def praise_punch(self, user_id, punch_id):
try:
praise = RunningPunchPraise(user_id=user_id, punch_id=punch_id)
praise.save()
record = self.get(punch_id=punch_id)
record.praise += 1
record.save()
except Exception:
pass
# user对某punch举报
def report_punch(self, user_id, punch_id):
try:
praise = RunningPunchReport(user_id=user_id, punch_id=punch_id)
praise.save()
record = self.get(punch_id=punch_id)
record.report += 1
record.save()
except Exception:
pass
# 是否存在某user对某punch的点赞
def exist_praise_punch(self, user_id, punch_id):
record = RunningPunchPraise.objects.filter(user_id=user_id, punch_id=punch_id)
if record:
return True
else:
return False
# 是否存在某user对某punch的点赞
def exist_report_punch(self, user_id, punch_id):
record = RunningPunchReport.objects.filter(user_id=user_id, punch_id=punch_id)
if record:
return True
else:
return False
class RunningPunchRecord(models.Model):
""" Model for running task record
To save user's actual running distance per day
"""
# 主键ID,标识打卡记录
punch_id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# 外键ID,标识对应目标
goal = models.ForeignKey(RunningGoal, related_name="punch", on_delete=models.PROTECT)
# Time when user creates the record
record_time = models.DateTimeField(null=False)
# 截图的引用地址
# voucher_ref = models.CharField(max_length=256, null=False)
voucher_ref = models.TextField(null=False)
# 截图的存储地址
voucher_store = models.TextField(null=False)
# 跑步距离
distance = models.FloatField(default=0)
# 被赞数
praise = models.IntegerField(default=0)
# 被举报数
report = models.IntegerField(default=0)
# 保存的一段话
document = models.TextField(default=" ", null=True)
# 重新打卡
reload = models.IntegerField(default=0, null=True)
# 指定一个Manager
objects = RunningPunchRecordManager()
# 点赞
class RunningPunchPraise(models.Model):
# 点赞的人
user_id = models.IntegerField()
# punch id
punch_id = models.UUIDField()
class Meta:
unique_together = ("punch_id", "user_id")
# 举报
class RunningPunchReport(models.Model):
# 举报的人
user_id = models.IntegerField(null=False, default=0)
# punch id
punch_id = models.UUIDField(null=False, default=uuid.uuid4)
class Meta:
unique_together = ("punch_id", "user_id")
| identifier_body |
||
models.py | from django.db import models
import uuid
from on.activities.base import Goal, Activity
from on.user import UserInfo, UserTicket, UserRecord, UserSettlement
import django.utils.timezone as timezone
from django.conf import settings
import os
import pytz
import math
from datetime import timedelta, datetime
class RunningGoalManager(models.Manager):
# 创建一个新的goal
def create_goal(self, user_id, runningtype, guaranty, down_payment, activate_deposit, coefficient, mode, goal_day,
distance, average, nosign,extra_earn,reality_price, deserve_price, down_num):
running_type = 0 if runningtype == "FREE" else 1
if settings.DEBUG:
start_time = timezone.now()
else:
# 当天创建活动只有后一天才能参加,所以以后一天为开始日期
start_time = timezone.now() # + timedelta(days=1)
# start_time = datetime.strptime("2018-01-01 00:00:01", "%Y-%m-%d %H:%M:%S")
kilos_day, goal_distance, left_distance = None, None, None
if running_type:
kilos_day = distance
else:
actual_day_map = {
7: 6,
14: 12,
21: 18,
30: 25,
61: 50
}
goal_distance = distance
left_distance = distance
distances = int(distance)
kilos_day = 2 * distances // actual_day_map[goal_day]
# 查询出没有支付的活动
goal = self.filter(user_id=user_id).filter(start_time=start_time).filter(status="PENDING")
# 如果存在的话就删掉
if goal:
goal.first().delete()
goal = self.create(user_id=user_id,
activity_type=RunningGoal.get_activity(),
start_time=start_time,
goal_day=goal_day,
mode=mode,
guaranty=guaranty,
down_payment=down_payment,
activate_deposit=activate_deposit,
coefficient=coefficient,
goal_type=running_type,
goal_distance=goal_distance,
left_distance=left_distance,
kilos_day=kilos_day,
extra_earn=extra_earn,
average=average,
reality_price=reality_price,
deserve_price=deserve_price,
down_num=down_num
)
# 更新活动的免签卡券
if running_type:
nosgin_number = int(nosign)
UserTicket.objects.create_ticket(goal.goal_id, "NS", nosgin_number)
return goal
# 删除一个目标
def delete_goal(self, goal_id):
goal = self.get(goal_id=goal_id)
# 删除本目标对应的所有打卡记录
goal.punch.all().delete()
# 删除本目标
goal.delete()
class RunningGoal(Goal):
""" Model for running goal
User needs to set running duration days and distance as
objective
"""
# 目标距离
goal_distance = models.FloatField(null=True)
# 单日目标距离,对于自由模式来说,kilos_day为单日目标上限
kilos_day = models.FloatField(null=True)
# 剩余距离, 只针对自由模式有效
left_distance = models.FloatField(null=True)
# 用户实际要付出的金额
reality_price = models.DecimalField(max_digits=12, decimal_places=2, null=False)
# 用户应该要付出的金额
deserve_price = models.DecimalField(max_digits=12, decimal_places=2, null=False)
# 扣完底金需要的次数
down_num = models.IntegerField(default=1, null=False)
# 平均每次要扣的
average = models.DecimalField(max_digits=12, decimal_places=2, null=False)
# 活动押金
activate_deposit = models.DecimalField(max_digits=12, decimal_places=2, null=False)
# 累计距离,只对自由模式有效
add_distance = models.FloatField(default=0,null=True)
# 活动额外收益
extra_earn = models.DecimalField(max_digits=12, decimal_places=2, null=False)
objects = RunningGoalManager()
@staticmethod
def get_start_date():
return datetime.strptime("00:01", "%H:%M").time()
def calc_pay_out(self):
print("计算开始..........")
pay_out = 0
# 如果是日常模式
if self.goal_type==1:
# 如果之前没有过不良记录, 则扣除保证金
if self.none_punch_days == 0:
pay_out = self.guaranty
print(pay_out,'如果之前没有过不良记录, 则扣除保证金,扣除金额就是保证金的数量')
# 清除个人的保证金数额
self.guaranty = 0
print("将保证金改为0")
# 增加不良记录天数
self.none_punch_days = 1
elif self.none_punch_days >= 1 and self.down_payment > 0:
print("如果不良天数不等于1")
#防止修改数据库debug而出现的错误
self.guaranty = 0
# 如果是日常模式
if self.guaranty == 0:
# 底金次数
pay_out = self.average
print(pay_out, "当保证金等于0的时候需要扣除的底金金额")
# 如果有降低投入
# 从账户中扣除金额
self.down_payment -= pay_out
print("扣除之后需要将用户的底金减去")
# 不良天数记录+1
self.none_punch_days += 1
# 如果是自由模式
else:
print("若是自由模式,开始扣款")
if float(self.left_distance) > 0.0:
print("当剩余距离大于0的时候才开始扣款")
#剩余的距离
left_distance = self.left_distance
# 求解剩余距离
if left_distance<=1:
pay_out = self.guaranty
print("当剩余的距离小于1的时候,直接扣除用户的保证金{}".format(self.guaranty))
self.guaranty = 0
else:
remain = math.floor(self.left_distance)-1
print("剩余的距离减去1是:{}".format(remain))
if remain <=self.down_num:
print(type(remain),type(self.down_num),"remain:{},down_num{}".format(remain,self.down_num))
print("走这里就对了")
pay_out = remain*self.average+self.guaranty
self.guaranty=0
print("用户的剩余距离减去1之后的距离数{}".format(math.floor(self.left_distance)-1),"平均需要扣除的金额{}".format(self.average))
self.down_payment -= remain * self.average
else:
# remain = self.down_num
print("若剩余距离大于底金次数,那么剩余距离{}".format(remain))
pay_out = self.down_num * self.average + self.guaranty
self.guaranty = 0
print("用户的剩余距离减去1之后的距离数{}".format(math.floor(self.left_distance) - 1),
"平均需要扣除的金额{}".format(self.average))
self.down_payment -= self.down_num*self.average
else:
pay_out = 0
print("当剩余的距离大于零的时候,需要付出的金额就是保证金")
if pay_out > 0:
# 更新值
self.save()
# 把本次瓜分金额写入数据库记录中
UserSettlement.objects.loose_pay(goal_id=self.goal_id, bonus=pay_out)
print("瓜分记录写入成功")
# 完成所有瓜分金额的计算
return pay_out
@staticmethod
def get_activity():
return "1"
def update_activity(self, user_id):
# 更新该种活动的总系数
Activity.objects.add_bonus_coeff(RunningGoal.get_activity(), self.guaranty + self.down_payment,
self.coefficient)
# 增加用户的累计参加次数
UserRecord.objects.update_join(user=UserInfo.objects.get(user_id=user_id), coeff=self.coefficient)
def update_activity_person(self):
Activity.objects.update_person(RunningGoal.get_activity())
Activity.objects.update_coeff(RunningGoal.get_activity(), -self.coefficient)
import base64
# TODO
class RunningPunchRecordManager(models.Manager):
# 创建一个新的record
def create_record(self, goal, filename, distance,punch_record_time, document,base64_str):
print(3333333333333333333333333333333333333333)
# 文件存储的实际路径
filePath = os.path.join(settings.MEDIA_DIR, timezone.now().strftime("%Y-%m-%d")+"/")
# # 引用所使用的路径
refPath = os.path.join(settings.MEDIA_ROOT, timezone.now().strftime("%Y-%m-%d")+"/")
#mysql存储的地址
file_filepath = filePath+filename
file_refpath = refPath+filename
if not os.path.exists(filePath):
os.makedirs(filePath)
print(444444444444444444444444444444444)
# 写入文件内容
with open(filePath+filename, 'wb') as f:
f.write(base64_str)
print("保存图片成功")
# 如果是日常模式打卡,则规定distance必须为日常距离
if goal.goal_type:
distance = goal.kilos_day
print(666666666666666666666666666666666666)
| te(goal=goal, voucher_ref=file_refpath, voucher_store=file_filepath, distance=distance,record_time = punch_record_time,
document=document)
print(555555555555555555555555555555555555555)
# 如果是自由模式, 则计算剩余距离
if not goal.goal_type:
goal.left_distance -= distance
goal.save()
return record
#
# 获取时间
def get_day_record(self, daydelta):
"""
:param day: 表示一个timedelta
:return:
"""
# 判断现在的时间距离开始时间的时长
# day = (timezone.now()-self.recod_time)
# print(day)
# 今天的日期加上
today = timezone.now().date() + timedelta(daydelta)
print(today,"这个时间加上一个时间段")
# 明天
end = today + timedelta(1)
print(end,"today加上一天,表示的是那一天的一整天的时间段")
return self.filter(record_time__range=(today, end))
# 第一天是否存在打卡记录
# user对某punch点赞
def praise_punch(self, user_id, punch_id):
try:
praise = RunningPunchPraise(user_id=user_id, punch_id=punch_id)
praise.save()
record = self.get(punch_id=punch_id)
record.praise += 1
record.save()
except Exception:
pass
# user对某punch举报
def report_punch(self, user_id, punch_id):
try:
praise = RunningPunchReport(user_id=user_id, punch_id=punch_id)
praise.save()
record = self.get(punch_id=punch_id)
record.report += 1
record.save()
except Exception:
pass
# 是否存在某user对某punch的点赞
def exist_praise_punch(self, user_id, punch_id):
record = RunningPunchPraise.objects.filter(user_id=user_id, punch_id=punch_id)
if record:
return True
else:
return False
# 是否存在某user对某punch的点赞
def exist_report_punch(self, user_id, punch_id):
record = RunningPunchReport.objects.filter(user_id=user_id, punch_id=punch_id)
if record:
return True
else:
return False
class RunningPunchRecord(models.Model):
""" Model for running task record
To save user's actual running distance per day
"""
# 主键ID,标识打卡记录
punch_id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# 外键ID,标识对应目标
goal = models.ForeignKey(RunningGoal, related_name="punch", on_delete=models.PROTECT)
# Time when user creates the record
record_time = models.DateTimeField(null=False)
# 截图的引用地址
# voucher_ref = models.CharField(max_length=256, null=False)
voucher_ref = models.TextField(null=False)
# 截图的存储地址
voucher_store = models.TextField(null=False)
# 跑步距离
distance = models.FloatField(default=0)
# 被赞数
praise = models.IntegerField(default=0)
# 被举报数
report = models.IntegerField(default=0)
# 保存的一段话
document = models.TextField(default=" ", null=True)
# 重新打卡
reload = models.IntegerField(default=0, null=True)
# 指定一个Manager
objects = RunningPunchRecordManager()
# 点赞
class RunningPunchPraise(models.Model):
# 点赞的人
user_id = models.IntegerField()
# punch id
punch_id = models.UUIDField()
class Meta:
unique_together = ("punch_id", "user_id")
# 举报
class RunningPunchReport(models.Model):
# 举报的人
user_id = models.IntegerField(null=False, default=0)
# punch id
punch_id = models.UUIDField(null=False, default=uuid.uuid4)
class Meta:
unique_together = ("punch_id", "user_id")
| record = self.crea | identifier_name |
models.py | from django.db import models
import uuid
from on.activities.base import Goal, Activity
from on.user import UserInfo, UserTicket, UserRecord, UserSettlement
import django.utils.timezone as timezone
from django.conf import settings
import os
import pytz
import math
from datetime import timedelta, datetime
class RunningGoalManager(models.Manager):
# 创建一个新的goal
def create_goal(self, user_id, runningtype, guaranty, down_payment, activate_deposit, coefficient, mode, goal_day,
distance, average, nosign,extra_earn,reality_price, deserve_price, down_num):
running_type = 0 if runningtype == "FREE" else 1
if settings.DEBUG:
start_time = timezone.now()
else:
# 当天创建活动只有后一天才能参加,所以以后一天为开始日期
start_time = timezone.now() # + timedelta(days=1)
# start_time = datetime.strptime("2018-01-01 00:00:01", "%Y-%m-%d %H:%M:%S")
kilos_day, goal_distance, left_distance = None, None, None
if running_type:
kilos_day = distance
else:
actual_day_map = {
7: 6,
14: 12,
| filter(start_time=start_time).filter(status="PENDING")
# 如果存在的话就删掉
if goal:
goal.first().delete()
goal = self.create(user_id=user_id,
activity_type=RunningGoal.get_activity(),
start_time=start_time,
goal_day=goal_day,
mode=mode,
guaranty=guaranty,
down_payment=down_payment,
activate_deposit=activate_deposit,
coefficient=coefficient,
goal_type=running_type,
goal_distance=goal_distance,
left_distance=left_distance,
kilos_day=kilos_day,
extra_earn=extra_earn,
average=average,
reality_price=reality_price,
deserve_price=deserve_price,
down_num=down_num
)
# 更新活动的免签卡券
if running_type:
nosgin_number = int(nosign)
UserTicket.objects.create_ticket(goal.goal_id, "NS", nosgin_number)
return goal
# 删除一个目标
def delete_goal(self, goal_id):
goal = self.get(goal_id=goal_id)
# 删除本目标对应的所有打卡记录
goal.punch.all().delete()
# 删除本目标
goal.delete()
class RunningGoal(Goal):
""" Model for running goal
User needs to set running duration days and distance as
objective
"""
# 目标距离
goal_distance = models.FloatField(null=True)
# 单日目标距离,对于自由模式来说,kilos_day为单日目标上限
kilos_day = models.FloatField(null=True)
# 剩余距离, 只针对自由模式有效
left_distance = models.FloatField(null=True)
# 用户实际要付出的金额
reality_price = models.DecimalField(max_digits=12, decimal_places=2, null=False)
# 用户应该要付出的金额
deserve_price = models.DecimalField(max_digits=12, decimal_places=2, null=False)
# 扣完底金需要的次数
down_num = models.IntegerField(default=1, null=False)
# 平均每次要扣的
average = models.DecimalField(max_digits=12, decimal_places=2, null=False)
# 活动押金
activate_deposit = models.DecimalField(max_digits=12, decimal_places=2, null=False)
# 累计距离,只对自由模式有效
add_distance = models.FloatField(default=0,null=True)
# 活动额外收益
extra_earn = models.DecimalField(max_digits=12, decimal_places=2, null=False)
objects = RunningGoalManager()
@staticmethod
def get_start_date():
return datetime.strptime("00:01", "%H:%M").time()
def calc_pay_out(self):
print("计算开始..........")
pay_out = 0
# 如果是日常模式
if self.goal_type==1:
# 如果之前没有过不良记录, 则扣除保证金
if self.none_punch_days == 0:
pay_out = self.guaranty
print(pay_out,'如果之前没有过不良记录, 则扣除保证金,扣除金额就是保证金的数量')
# 清除个人的保证金数额
self.guaranty = 0
print("将保证金改为0")
# 增加不良记录天数
self.none_punch_days = 1
elif self.none_punch_days >= 1 and self.down_payment > 0:
print("如果不良天数不等于1")
#防止修改数据库debug而出现的错误
self.guaranty = 0
# 如果是日常模式
if self.guaranty == 0:
# 底金次数
pay_out = self.average
print(pay_out, "当保证金等于0的时候需要扣除的底金金额")
# 如果有降低投入
# 从账户中扣除金额
self.down_payment -= pay_out
print("扣除之后需要将用户的底金减去")
# 不良天数记录+1
self.none_punch_days += 1
# 如果是自由模式
else:
print("若是自由模式,开始扣款")
if float(self.left_distance) > 0.0:
print("当剩余距离大于0的时候才开始扣款")
#剩余的距离
left_distance = self.left_distance
# 求解剩余距离
if left_distance<=1:
pay_out = self.guaranty
print("当剩余的距离小于1的时候,直接扣除用户的保证金{}".format(self.guaranty))
self.guaranty = 0
else:
remain = math.floor(self.left_distance)-1
print("剩余的距离减去1是:{}".format(remain))
if remain <=self.down_num:
print(type(remain),type(self.down_num),"remain:{},down_num{}".format(remain,self.down_num))
print("走这里就对了")
pay_out = remain*self.average+self.guaranty
self.guaranty=0
print("用户的剩余距离减去1之后的距离数{}".format(math.floor(self.left_distance)-1),"平均需要扣除的金额{}".format(self.average))
self.down_payment -= remain * self.average
else:
# remain = self.down_num
print("若剩余距离大于底金次数,那么剩余距离{}".format(remain))
pay_out = self.down_num * self.average + self.guaranty
self.guaranty = 0
print("用户的剩余距离减去1之后的距离数{}".format(math.floor(self.left_distance) - 1),
"平均需要扣除的金额{}".format(self.average))
self.down_payment -= self.down_num*self.average
else:
pay_out = 0
print("当剩余的距离大于零的时候,需要付出的金额就是保证金")
if pay_out > 0:
# 更新值
self.save()
# 把本次瓜分金额写入数据库记录中
UserSettlement.objects.loose_pay(goal_id=self.goal_id, bonus=pay_out)
print("瓜分记录写入成功")
# 完成所有瓜分金额的计算
return pay_out
@staticmethod
def get_activity():
return "1"
def update_activity(self, user_id):
# 更新该种活动的总系数
Activity.objects.add_bonus_coeff(RunningGoal.get_activity(), self.guaranty + self.down_payment,
self.coefficient)
# 增加用户的累计参加次数
UserRecord.objects.update_join(user=UserInfo.objects.get(user_id=user_id), coeff=self.coefficient)
def update_activity_person(self):
Activity.objects.update_person(RunningGoal.get_activity())
Activity.objects.update_coeff(RunningGoal.get_activity(), -self.coefficient)
import base64
# TODO
class RunningPunchRecordManager(models.Manager):
# 创建一个新的record
def create_record(self, goal, filename, distance,punch_record_time, document,base64_str):
print(3333333333333333333333333333333333333333)
# 文件存储的实际路径
filePath = os.path.join(settings.MEDIA_DIR, timezone.now().strftime("%Y-%m-%d")+"/")
# # 引用所使用的路径
refPath = os.path.join(settings.MEDIA_ROOT, timezone.now().strftime("%Y-%m-%d")+"/")
#mysql存储的地址
file_filepath = filePath+filename
file_refpath = refPath+filename
if not os.path.exists(filePath):
os.makedirs(filePath)
print(444444444444444444444444444444444)
# 写入文件内容
with open(filePath+filename, 'wb') as f:
f.write(base64_str)
print("保存图片成功")
# 如果是日常模式打卡,则规定distance必须为日常距离
if goal.goal_type:
distance = goal.kilos_day
print(666666666666666666666666666666666666)
record = self.create(goal=goal, voucher_ref=file_refpath, voucher_store=file_filepath, distance=distance,record_time = punch_record_time,
document=document)
print(555555555555555555555555555555555555555)
# 如果是自由模式, 则计算剩余距离
if not goal.goal_type:
goal.left_distance -= distance
goal.save()
return record
#
# 获取时间
def get_day_record(self, daydelta):
"""
:param day: 表示一个timedelta
:return:
"""
# 判断现在的时间距离开始时间的时长
# day = (timezone.now()-self.recod_time)
# print(day)
# 今天的日期加上
today = timezone.now().date() + timedelta(daydelta)
print(today,"这个时间加上一个时间段")
# 明天
end = today + timedelta(1)
print(end,"today加上一天,表示的是那一天的一整天的时间段")
return self.filter(record_time__range=(today, end))
# 第一天是否存在打卡记录
# user对某punch点赞
def praise_punch(self, user_id, punch_id):
try:
praise = RunningPunchPraise(user_id=user_id, punch_id=punch_id)
praise.save()
record = self.get(punch_id=punch_id)
record.praise += 1
record.save()
except Exception:
pass
# user对某punch举报
def report_punch(self, user_id, punch_id):
try:
praise = RunningPunchReport(user_id=user_id, punch_id=punch_id)
praise.save()
record = self.get(punch_id=punch_id)
record.report += 1
record.save()
except Exception:
pass
# 是否存在某user对某punch的点赞
def exist_praise_punch(self, user_id, punch_id):
record = RunningPunchPraise.objects.filter(user_id=user_id, punch_id=punch_id)
if record:
return True
else:
return False
# 是否存在某user对某punch的点赞
def exist_report_punch(self, user_id, punch_id):
record = RunningPunchReport.objects.filter(user_id=user_id, punch_id=punch_id)
if record:
return True
else:
return False
class RunningPunchRecord(models.Model):
""" Model for running task record
To save user's actual running distance per day
"""
# 主键ID,标识打卡记录
punch_id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# 外键ID,标识对应目标
goal = models.ForeignKey(RunningGoal, related_name="punch", on_delete=models.PROTECT)
# Time when user creates the record
record_time = models.DateTimeField(null=False)
# 截图的引用地址
# voucher_ref = models.CharField(max_length=256, null=False)
voucher_ref = models.TextField(null=False)
# 截图的存储地址
voucher_store = models.TextField(null=False)
# 跑步距离
distance = models.FloatField(default=0)
# 被赞数
praise = models.IntegerField(default=0)
# 被举报数
report = models.IntegerField(default=0)
# 保存的一段话
document = models.TextField(default=" ", null=True)
# 重新打卡
reload = models.IntegerField(default=0, null=True)
# 指定一个Manager
objects = RunningPunchRecordManager()
# 点赞
class RunningPunchPraise(models.Model):
# 点赞的人
user_id = models.IntegerField()
# punch id
punch_id = models.UUIDField()
class Meta:
unique_together = ("punch_id", "user_id")
# 举报
class RunningPunchReport(models.Model):
# 举报的人
user_id = models.IntegerField(null=False, default=0)
# punch id
punch_id = models.UUIDField(null=False, default=uuid.uuid4)
class Meta:
unique_together = ("punch_id", "user_id")
| 21: 18,
30: 25,
61: 50
}
goal_distance = distance
left_distance = distance
distances = int(distance)
kilos_day = 2 * distances // actual_day_map[goal_day]
# 查询出没有支付的活动
goal = self.filter(user_id=user_id). | conditional_block |
models.py | from django.db import models
import uuid
from on.activities.base import Goal, Activity
from on.user import UserInfo, UserTicket, UserRecord, UserSettlement
import django.utils.timezone as timezone
from django.conf import settings
import os
import pytz
import math
from datetime import timedelta, datetime
class RunningGoalManager(models.Manager):
# 创建一个新的goal
def create_goal(self, user_id, runningtype, guaranty, down_payment, activate_deposit, coefficient, mode, goal_day,
distance, average, nosign,extra_earn,reality_price, deserve_price, down_num):
running_type = 0 if runningtype == "FREE" else 1 | start_time = timezone.now() # + timedelta(days=1)
# start_time = datetime.strptime("2018-01-01 00:00:01", "%Y-%m-%d %H:%M:%S")
kilos_day, goal_distance, left_distance = None, None, None
if running_type:
kilos_day = distance
else:
actual_day_map = {
7: 6,
14: 12,
21: 18,
30: 25,
61: 50
}
goal_distance = distance
left_distance = distance
distances = int(distance)
kilos_day = 2 * distances // actual_day_map[goal_day]
# 查询出没有支付的活动
goal = self.filter(user_id=user_id).filter(start_time=start_time).filter(status="PENDING")
# 如果存在的话就删掉
if goal:
goal.first().delete()
goal = self.create(user_id=user_id,
activity_type=RunningGoal.get_activity(),
start_time=start_time,
goal_day=goal_day,
mode=mode,
guaranty=guaranty,
down_payment=down_payment,
activate_deposit=activate_deposit,
coefficient=coefficient,
goal_type=running_type,
goal_distance=goal_distance,
left_distance=left_distance,
kilos_day=kilos_day,
extra_earn=extra_earn,
average=average,
reality_price=reality_price,
deserve_price=deserve_price,
down_num=down_num
)
# 更新活动的免签卡券
if running_type:
nosgin_number = int(nosign)
UserTicket.objects.create_ticket(goal.goal_id, "NS", nosgin_number)
return goal
# 删除一个目标
def delete_goal(self, goal_id):
goal = self.get(goal_id=goal_id)
# 删除本目标对应的所有打卡记录
goal.punch.all().delete()
# 删除本目标
goal.delete()
class RunningGoal(Goal):
""" Model for running goal
User needs to set running duration days and distance as
objective
"""
# 目标距离
goal_distance = models.FloatField(null=True)
# 单日目标距离,对于自由模式来说,kilos_day为单日目标上限
kilos_day = models.FloatField(null=True)
# 剩余距离, 只针对自由模式有效
left_distance = models.FloatField(null=True)
# 用户实际要付出的金额
reality_price = models.DecimalField(max_digits=12, decimal_places=2, null=False)
# 用户应该要付出的金额
deserve_price = models.DecimalField(max_digits=12, decimal_places=2, null=False)
# 扣完底金需要的次数
down_num = models.IntegerField(default=1, null=False)
# 平均每次要扣的
average = models.DecimalField(max_digits=12, decimal_places=2, null=False)
# 活动押金
activate_deposit = models.DecimalField(max_digits=12, decimal_places=2, null=False)
# 累计距离,只对自由模式有效
add_distance = models.FloatField(default=0,null=True)
# 活动额外收益
extra_earn = models.DecimalField(max_digits=12, decimal_places=2, null=False)
objects = RunningGoalManager()
@staticmethod
def get_start_date():
return datetime.strptime("00:01", "%H:%M").time()
def calc_pay_out(self):
print("计算开始..........")
pay_out = 0
# 如果是日常模式
if self.goal_type==1:
# 如果之前没有过不良记录, 则扣除保证金
if self.none_punch_days == 0:
pay_out = self.guaranty
print(pay_out,'如果之前没有过不良记录, 则扣除保证金,扣除金额就是保证金的数量')
# 清除个人的保证金数额
self.guaranty = 0
print("将保证金改为0")
# 增加不良记录天数
self.none_punch_days = 1
elif self.none_punch_days >= 1 and self.down_payment > 0:
print("如果不良天数不等于1")
#防止修改数据库debug而出现的错误
self.guaranty = 0
# 如果是日常模式
if self.guaranty == 0:
# 底金次数
pay_out = self.average
print(pay_out, "当保证金等于0的时候需要扣除的底金金额")
# 如果有降低投入
# 从账户中扣除金额
self.down_payment -= pay_out
print("扣除之后需要将用户的底金减去")
# 不良天数记录+1
self.none_punch_days += 1
# 如果是自由模式
else:
print("若是自由模式,开始扣款")
if float(self.left_distance) > 0.0:
print("当剩余距离大于0的时候才开始扣款")
#剩余的距离
left_distance = self.left_distance
# 求解剩余距离
if left_distance<=1:
pay_out = self.guaranty
print("当剩余的距离小于1的时候,直接扣除用户的保证金{}".format(self.guaranty))
self.guaranty = 0
else:
remain = math.floor(self.left_distance)-1
print("剩余的距离减去1是:{}".format(remain))
if remain <=self.down_num:
print(type(remain),type(self.down_num),"remain:{},down_num{}".format(remain,self.down_num))
print("走这里就对了")
pay_out = remain*self.average+self.guaranty
self.guaranty=0
print("用户的剩余距离减去1之后的距离数{}".format(math.floor(self.left_distance)-1),"平均需要扣除的金额{}".format(self.average))
self.down_payment -= remain * self.average
else:
# remain = self.down_num
print("若剩余距离大于底金次数,那么剩余距离{}".format(remain))
pay_out = self.down_num * self.average + self.guaranty
self.guaranty = 0
print("用户的剩余距离减去1之后的距离数{}".format(math.floor(self.left_distance) - 1),
"平均需要扣除的金额{}".format(self.average))
self.down_payment -= self.down_num*self.average
else:
pay_out = 0
print("当剩余的距离大于零的时候,需要付出的金额就是保证金")
if pay_out > 0:
# 更新值
self.save()
# 把本次瓜分金额写入数据库记录中
UserSettlement.objects.loose_pay(goal_id=self.goal_id, bonus=pay_out)
print("瓜分记录写入成功")
# 完成所有瓜分金额的计算
return pay_out
@staticmethod
def get_activity():
return "1"
def update_activity(self, user_id):
# 更新该种活动的总系数
Activity.objects.add_bonus_coeff(RunningGoal.get_activity(), self.guaranty + self.down_payment,
self.coefficient)
# 增加用户的累计参加次数
UserRecord.objects.update_join(user=UserInfo.objects.get(user_id=user_id), coeff=self.coefficient)
def update_activity_person(self):
Activity.objects.update_person(RunningGoal.get_activity())
Activity.objects.update_coeff(RunningGoal.get_activity(), -self.coefficient)
import base64
# TODO
class RunningPunchRecordManager(models.Manager):
# 创建一个新的record
def create_record(self, goal, filename, distance,punch_record_time, document,base64_str):
print(3333333333333333333333333333333333333333)
# 文件存储的实际路径
filePath = os.path.join(settings.MEDIA_DIR, timezone.now().strftime("%Y-%m-%d")+"/")
# # 引用所使用的路径
refPath = os.path.join(settings.MEDIA_ROOT, timezone.now().strftime("%Y-%m-%d")+"/")
#mysql存储的地址
file_filepath = filePath+filename
file_refpath = refPath+filename
if not os.path.exists(filePath):
os.makedirs(filePath)
print(444444444444444444444444444444444)
# 写入文件内容
with open(filePath+filename, 'wb') as f:
f.write(base64_str)
print("保存图片成功")
# 如果是日常模式打卡,则规定distance必须为日常距离
if goal.goal_type:
distance = goal.kilos_day
print(666666666666666666666666666666666666)
record = self.create(goal=goal, voucher_ref=file_refpath, voucher_store=file_filepath, distance=distance,record_time = punch_record_time,
document=document)
print(555555555555555555555555555555555555555)
# 如果是自由模式, 则计算剩余距离
if not goal.goal_type:
goal.left_distance -= distance
goal.save()
return record
#
# 获取时间
def get_day_record(self, daydelta):
"""
:param day: 表示一个timedelta
:return:
"""
# 判断现在的时间距离开始时间的时长
# day = (timezone.now()-self.recod_time)
# print(day)
# 今天的日期加上
today = timezone.now().date() + timedelta(daydelta)
print(today,"这个时间加上一个时间段")
# 明天
end = today + timedelta(1)
print(end,"today加上一天,表示的是那一天的一整天的时间段")
return self.filter(record_time__range=(today, end))
# 第一天是否存在打卡记录
# user对某punch点赞
def praise_punch(self, user_id, punch_id):
try:
praise = RunningPunchPraise(user_id=user_id, punch_id=punch_id)
praise.save()
record = self.get(punch_id=punch_id)
record.praise += 1
record.save()
except Exception:
pass
# user对某punch举报
def report_punch(self, user_id, punch_id):
try:
praise = RunningPunchReport(user_id=user_id, punch_id=punch_id)
praise.save()
record = self.get(punch_id=punch_id)
record.report += 1
record.save()
except Exception:
pass
# 是否存在某user对某punch的点赞
def exist_praise_punch(self, user_id, punch_id):
record = RunningPunchPraise.objects.filter(user_id=user_id, punch_id=punch_id)
if record:
return True
else:
return False
# 是否存在某user对某punch的点赞
def exist_report_punch(self, user_id, punch_id):
record = RunningPunchReport.objects.filter(user_id=user_id, punch_id=punch_id)
if record:
return True
else:
return False
class RunningPunchRecord(models.Model):
""" Model for running task record
To save user's actual running distance per day
"""
# 主键ID,标识打卡记录
punch_id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# 外键ID,标识对应目标
goal = models.ForeignKey(RunningGoal, related_name="punch", on_delete=models.PROTECT)
# Time when user creates the record
record_time = models.DateTimeField(null=False)
# 截图的引用地址
# voucher_ref = models.CharField(max_length=256, null=False)
voucher_ref = models.TextField(null=False)
# 截图的存储地址
voucher_store = models.TextField(null=False)
# 跑步距离
distance = models.FloatField(default=0)
# 被赞数
praise = models.IntegerField(default=0)
# 被举报数
report = models.IntegerField(default=0)
# 保存的一段话
document = models.TextField(default=" ", null=True)
# 重新打卡
reload = models.IntegerField(default=0, null=True)
# 指定一个Manager
objects = RunningPunchRecordManager()
# 点赞
class RunningPunchPraise(models.Model):
# 点赞的人
user_id = models.IntegerField()
# punch id
punch_id = models.UUIDField()
class Meta:
unique_together = ("punch_id", "user_id")
# 举报
class RunningPunchReport(models.Model):
# 举报的人
user_id = models.IntegerField(null=False, default=0)
# punch id
punch_id = models.UUIDField(null=False, default=uuid.uuid4)
class Meta:
unique_together = ("punch_id", "user_id") | if settings.DEBUG:
start_time = timezone.now()
else:
# 当天创建活动只有后一天才能参加,所以以后一天为开始日期 | random_line_split |
j1f.rs | /* origin: FreeBSD /usr/src/lib/msun/src/e_j1f.c */
/*
* Conversion to float by Ian Lance Taylor, Cygnus Support, [email protected].
*/
/*
* ====================================================
* Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
*
* Developed at SunPro, a Sun Microsystems, Inc. business.
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
use super::{cosf, fabsf, logf, sinf, sqrtf};
const INVSQRTPI: f32 = 5.6418961287e-01; /* 0x3f106ebb */
const TPI: f32 = 6.3661974669e-01; /* 0x3f22f983 */
fn | (ix: u32, x: f32, y1: bool, sign: bool) -> f32 {
let z: f64;
let mut s: f64;
let c: f64;
let mut ss: f64;
let mut cc: f64;
s = sinf(x) as f64;
if y1 {
s = -s;
}
c = cosf(x) as f64;
cc = s - c;
if ix < 0x7f000000 {
ss = -s - c;
z = cosf(2.0 * x) as f64;
if s * c > 0.0 {
cc = z / ss;
} else {
ss = z / cc;
}
if ix < 0x58800000 {
if y1 {
ss = -ss;
}
cc = (ponef(x) as f64) * cc - (qonef(x) as f64) * ss;
}
}
if sign {
cc = -cc;
}
return (((INVSQRTPI as f64) * cc) / (sqrtf(x) as f64)) as f32;
}
/* R0/S0 on [0,2] */
const R00: f32 = -6.2500000000e-02; /* 0xbd800000 */
const R01: f32 = 1.4070566976e-03; /* 0x3ab86cfd */
const R02: f32 = -1.5995563444e-05; /* 0xb7862e36 */
const R03: f32 = 4.9672799207e-08; /* 0x335557d2 */
const S01: f32 = 1.9153760746e-02; /* 0x3c9ce859 */
const S02: f32 = 1.8594678841e-04; /* 0x3942fab6 */
const S03: f32 = 1.1771846857e-06; /* 0x359dffc2 */
const S04: f32 = 5.0463624390e-09; /* 0x31ad6446 */
const S05: f32 = 1.2354227016e-11; /* 0x2d59567e */
pub fn j1f(x: f32) -> f32 {
let mut z: f32;
let r: f32;
let s: f32;
let mut ix: u32;
let sign: bool;
ix = x.to_bits();
sign = (ix >> 31) != 0;
ix &= 0x7fffffff;
if ix >= 0x7f800000 {
return 1.0 / (x * x);
}
if ix >= 0x40000000 {
/* |x| >= 2 */
return common(ix, fabsf(x), false, sign);
}
if ix >= 0x39000000 {
/* |x| >= 2**-13 */
z = x * x;
r = z * (R00 + z * (R01 + z * (R02 + z * R03)));
s = 1.0 + z * (S01 + z * (S02 + z * (S03 + z * (S04 + z * S05))));
z = 0.5 + r / s;
} else {
z = 0.5;
}
return z * x;
}
const U0: [f32; 5] = [
-1.9605709612e-01, /* 0xbe48c331 */
5.0443872809e-02, /* 0x3d4e9e3c */
-1.9125689287e-03, /* 0xbafaaf2a */
2.3525259166e-05, /* 0x37c5581c */
-9.1909917899e-08, /* 0xb3c56003 */
];
const V0: [f32; 5] = [
1.9916731864e-02, /* 0x3ca3286a */
2.0255257550e-04, /* 0x3954644b */
1.3560879779e-06, /* 0x35b602d4 */
6.2274145840e-09, /* 0x31d5f8eb */
1.6655924903e-11, /* 0x2d9281cf */
];
pub fn y1f(x: f32) -> f32 {
let z: f32;
let u: f32;
let v: f32;
let ix: u32;
ix = x.to_bits();
if (ix & 0x7fffffff) == 0 {
return -1.0 / 0.0;
}
if (ix >> 31) != 0 {
return 0.0 / 0.0;
}
if ix >= 0x7f800000 {
return 1.0 / x;
}
if ix >= 0x40000000 {
/* |x| >= 2.0 */
return common(ix, x, true, false);
}
if ix < 0x33000000 {
/* x < 2**-25 */
return -TPI / x;
}
z = x * x;
u = U0[0] + z * (U0[1] + z * (U0[2] + z * (U0[3] + z * U0[4])));
v = 1.0 + z * (V0[0] + z * (V0[1] + z * (V0[2] + z * (V0[3] + z * V0[4]))));
return x * (u / v) + TPI * (j1f(x) * logf(x) - 1.0 / x);
}
/* For x >= 8, the asymptotic expansions of pone is
* 1 + 15/128 s^2 - 4725/2^15 s^4 - ..., where s = 1/x.
* We approximate pone by
* pone(x) = 1 + (R/S)
* where R = pr0 + pr1*s^2 + pr2*s^4 + ... + pr5*s^10
* S = 1 + ps0*s^2 + ... + ps4*s^10
* and
* | pone(x)-1-R/S | <= 2 ** ( -60.06)
*/
const PR8: [f32; 6] = [
/* for x in [inf, 8]=1/[0,0.125] */
0.0000000000e+00, /* 0x00000000 */
1.1718750000e-01, /* 0x3df00000 */
1.3239480972e+01, /* 0x4153d4ea */
4.1205184937e+02, /* 0x43ce06a3 */
3.8747453613e+03, /* 0x45722bed */
7.9144794922e+03, /* 0x45f753d6 */
];
const PS8: [f32; 5] = [
1.1420736694e+02, /* 0x42e46a2c */
3.6509309082e+03, /* 0x45642ee5 */
3.6956207031e+04, /* 0x47105c35 */
9.7602796875e+04, /* 0x47bea166 */
3.0804271484e+04, /* 0x46f0a88b */
];
const PR5: [f32; 6] = [
/* for x in [8,4.5454]=1/[0.125,0.22001] */
1.3199052094e-11, /* 0x2d68333f */
1.1718749255e-01, /* 0x3defffff */
6.8027510643e+00, /* 0x40d9b023 */
1.0830818176e+02, /* 0x42d89dca */
5.1763616943e+02, /* 0x440168b7 */
5.2871520996e+02, /* 0x44042dc6 */
];
const PS5: [f32; 5] = [
5.9280597687e+01, /* 0x426d1f55 */
9.9140142822e+02, /* 0x4477d9b1 */
5.3532670898e+03, /* 0x45a74a23 */
7.8446904297e+03, /* 0x45f52586 */
1.5040468750e+03, /* 0x44bc0180 */
];
const PR3: [f32; 6] = [
3.0250391081e-09, /* 0x314fe10d */
1.1718686670e-01, /* 0x3defffab */
3.9329774380e+00, /* 0x407bb5e7 */
3.5119403839e+01, /* 0x420c7a45 */
9.1055007935e+01, /* 0x42b61c2a */
4.8559066772e+01, /* 0x42423c7c */
];
const PS3: [f32; 5] = [
3.4791309357e+01, /* 0x420b2a4d */
3.3676245117e+02, /* 0x43a86198 */
1.0468714600e+03, /* 0x4482dbe3 */
8.9081134033e+02, /* 0x445eb3ed */
1.0378793335e+02, /* 0x42cf936c */
];
const PR2: [f32; 6] = [
/* for x in [2.8570,2]=1/[0.3499,0.5] */
1.0771083225e-07, /* 0x33e74ea8 */
1.1717621982e-01, /* 0x3deffa16 */
2.3685150146e+00, /* 0x401795c0 */
1.2242610931e+01, /* 0x4143e1bc */
1.7693971634e+01, /* 0x418d8d41 */
5.0735230446e+00, /* 0x40a25a4d */
];
const PS2: [f32; 5] = [
2.1436485291e+01, /* 0x41ab7dec */
1.2529022980e+02, /* 0x42fa9499 */
2.3227647400e+02, /* 0x436846c7 */
1.1767937469e+02, /* 0x42eb5bd7 */
8.3646392822e+00, /* 0x4105d590 */
];
fn ponef(x: f32) -> f32 {
let p: &[f32; 6];
let q: &[f32; 5];
let z: f32;
let r: f32;
let s: f32;
let mut ix: u32;
ix = x.to_bits();
ix &= 0x7fffffff;
if ix >= 0x41000000 {
p = &PR8;
q = &PS8;
} else if ix >= 0x409173eb {
p = &PR5;
q = &PS5;
} else if ix >= 0x4036d917 {
p = &PR3;
q = &PS3;
} else
/*ix >= 0x40000000*/
{
p = &PR2;
q = &PS2;
}
z = 1.0 / (x * x);
r = p[0] + z * (p[1] + z * (p[2] + z * (p[3] + z * (p[4] + z * p[5]))));
s = 1.0 + z * (q[0] + z * (q[1] + z * (q[2] + z * (q[3] + z * q[4]))));
return 1.0 + r / s;
}
/* For x >= 8, the asymptotic expansions of qone is
* 3/8 s - 105/1024 s^3 - ..., where s = 1/x.
* We approximate pone by
* qone(x) = s*(0.375 + (R/S))
* where R = qr1*s^2 + qr2*s^4 + ... + qr5*s^10
* S = 1 + qs1*s^2 + ... + qs6*s^12
* and
* | qone(x)/s -0.375-R/S | <= 2 ** ( -61.13)
*/
const QR8: [f32; 6] = [
/* for x in [inf, 8]=1/[0,0.125] */
0.0000000000e+00, /* 0x00000000 */
-1.0253906250e-01, /* 0xbdd20000 */
-1.6271753311e+01, /* 0xc1822c8d */
-7.5960174561e+02, /* 0xc43de683 */
-1.1849806641e+04, /* 0xc639273a */
-4.8438511719e+04, /* 0xc73d3683 */
];
const QS8: [f32; 6] = [
1.6139537048e+02, /* 0x43216537 */
7.8253862305e+03, /* 0x45f48b17 */
1.3387534375e+05, /* 0x4802bcd6 */
7.1965775000e+05, /* 0x492fb29c */
6.6660125000e+05, /* 0x4922be94 */
-2.9449025000e+05, /* 0xc88fcb48 */
];
const QR5: [f32; 6] = [
/* for x in [8,4.5454]=1/[0.125,0.22001] */
-2.0897993405e-11, /* 0xadb7d219 */
-1.0253904760e-01, /* 0xbdd1fffe */
-8.0564479828e+00, /* 0xc100e736 */
-1.8366960144e+02, /* 0xc337ab6b */
-1.3731937256e+03, /* 0xc4aba633 */
-2.6124443359e+03, /* 0xc523471c */
];
const QS5: [f32; 6] = [
8.1276550293e+01, /* 0x42a28d98 */
1.9917987061e+03, /* 0x44f8f98f */
1.7468484375e+04, /* 0x468878f8 */
4.9851425781e+04, /* 0x4742bb6d */
2.7948074219e+04, /* 0x46da5826 */
-4.7191835938e+03, /* 0xc5937978 */
];
const QR3: [f32; 6] = [
-5.0783124372e-09, /* 0xb1ae7d4f */
-1.0253783315e-01, /* 0xbdd1ff5b */
-4.6101160049e+00, /* 0xc0938612 */
-5.7847221375e+01, /* 0xc267638e */
-2.2824453735e+02, /* 0xc3643e9a */
-2.1921012878e+02, /* 0xc35b35cb */
];
const QS3: [f32; 6] = [
4.7665153503e+01, /* 0x423ea91e */
6.7386511230e+02, /* 0x4428775e */
3.3801528320e+03, /* 0x45534272 */
5.5477290039e+03, /* 0x45ad5dd5 */
1.9031191406e+03, /* 0x44ede3d0 */
-1.3520118713e+02, /* 0xc3073381 */
];
const QR2: [f32; 6] = [
/* for x in [2.8570,2]=1/[0.3499,0.5] */
-1.7838172539e-07, /* 0xb43f8932 */
-1.0251704603e-01, /* 0xbdd1f475 */
-2.7522056103e+00, /* 0xc0302423 */
-1.9663616180e+01, /* 0xc19d4f16 */
-4.2325313568e+01, /* 0xc2294d1f */
-2.1371921539e+01, /* 0xc1aaf9b2 */
];
const QS2: [f32; 6] = [
2.9533363342e+01, /* 0x41ec4454 */
2.5298155212e+02, /* 0x437cfb47 */
7.5750280762e+02, /* 0x443d602e */
7.3939318848e+02, /* 0x4438d92a */
1.5594900513e+02, /* 0x431bf2f2 */
-4.9594988823e+00, /* 0xc09eb437 */
];
fn qonef(x: f32) -> f32 {
let p: &[f32; 6];
let q: &[f32; 6];
let s: f32;
let r: f32;
let z: f32;
let mut ix: u32;
ix = x.to_bits();
ix &= 0x7fffffff;
if ix >= 0x41000000 {
p = &QR8;
q = &QS8;
} else if ix >= 0x409173eb {
p = &QR5;
q = &QS5;
} else if ix >= 0x4036d917 {
p = &QR3;
q = &QS3;
} else
/*ix >= 0x40000000*/
{
p = &QR2;
q = &QS2;
}
z = 1.0 / (x * x);
r = p[0] + z * (p[1] + z * (p[2] + z * (p[3] + z * (p[4] + z * p[5]))));
s = 1.0 + z * (q[0] + z * (q[1] + z * (q[2] + z * (q[3] + z * (q[4] + z * q[5])))));
return (0.375 + r / s) / x;
}
// PowerPC tests are failing on LLVM 13: https://github.com/rust-lang/rust/issues/88520
#[cfg(not(target_arch = "powerpc64"))]
#[cfg(test)]
mod tests {
use super::{j1f, y1f};
#[test]
fn test_j1f_2488() {
// 0x401F3E49
assert_eq!(j1f(2.4881766_f32), 0.49999475_f32);
}
#[test]
fn test_y1f_2002() {
//allow slightly different result on x87
let res = y1f(2.0000002_f32);
if cfg!(all(target_arch = "x86", not(target_feature = "sse2"))) && (res == -0.10703231_f32)
{
return;
}
assert_eq!(res, -0.10703229_f32);
}
}
| common | identifier_name |
j1f.rs | /* origin: FreeBSD /usr/src/lib/msun/src/e_j1f.c */
/*
* Conversion to float by Ian Lance Taylor, Cygnus Support, [email protected].
*/
/*
* ====================================================
* Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
*
* Developed at SunPro, a Sun Microsystems, Inc. business.
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
use super::{cosf, fabsf, logf, sinf, sqrtf};
const INVSQRTPI: f32 = 5.6418961287e-01; /* 0x3f106ebb */
const TPI: f32 = 6.3661974669e-01; /* 0x3f22f983 */
fn common(ix: u32, x: f32, y1: bool, sign: bool) -> f32 {
let z: f64;
let mut s: f64;
let c: f64;
let mut ss: f64;
let mut cc: f64;
s = sinf(x) as f64;
if y1 {
s = -s;
}
c = cosf(x) as f64;
cc = s - c;
if ix < 0x7f000000 {
ss = -s - c;
z = cosf(2.0 * x) as f64;
if s * c > 0.0 {
cc = z / ss;
} else {
ss = z / cc;
}
if ix < 0x58800000 {
if y1 {
ss = -ss;
}
cc = (ponef(x) as f64) * cc - (qonef(x) as f64) * ss;
}
}
if sign {
cc = -cc;
}
return (((INVSQRTPI as f64) * cc) / (sqrtf(x) as f64)) as f32;
}
/* R0/S0 on [0,2] */
const R00: f32 = -6.2500000000e-02; /* 0xbd800000 */
const R01: f32 = 1.4070566976e-03; /* 0x3ab86cfd */
const R02: f32 = -1.5995563444e-05; /* 0xb7862e36 */
const R03: f32 = 4.9672799207e-08; /* 0x335557d2 */
const S01: f32 = 1.9153760746e-02; /* 0x3c9ce859 */
const S02: f32 = 1.8594678841e-04; /* 0x3942fab6 */
const S03: f32 = 1.1771846857e-06; /* 0x359dffc2 */
const S04: f32 = 5.0463624390e-09; /* 0x31ad6446 */
const S05: f32 = 1.2354227016e-11; /* 0x2d59567e */
pub fn j1f(x: f32) -> f32 {
let mut z: f32;
let r: f32;
let s: f32;
let mut ix: u32;
let sign: bool;
ix = x.to_bits();
sign = (ix >> 31) != 0;
ix &= 0x7fffffff;
if ix >= 0x7f800000 {
return 1.0 / (x * x);
}
if ix >= 0x40000000 {
/* |x| >= 2 */
return common(ix, fabsf(x), false, sign);
}
if ix >= 0x39000000 {
/* |x| >= 2**-13 */
z = x * x;
r = z * (R00 + z * (R01 + z * (R02 + z * R03)));
s = 1.0 + z * (S01 + z * (S02 + z * (S03 + z * (S04 + z * S05))));
z = 0.5 + r / s;
} else {
z = 0.5;
}
return z * x;
}
const U0: [f32; 5] = [
-1.9605709612e-01, /* 0xbe48c331 */
5.0443872809e-02, /* 0x3d4e9e3c */
-1.9125689287e-03, /* 0xbafaaf2a */
2.3525259166e-05, /* 0x37c5581c */
-9.1909917899e-08, /* 0xb3c56003 */
];
const V0: [f32; 5] = [
1.9916731864e-02, /* 0x3ca3286a */
2.0255257550e-04, /* 0x3954644b */
1.3560879779e-06, /* 0x35b602d4 */
6.2274145840e-09, /* 0x31d5f8eb */
1.6655924903e-11, /* 0x2d9281cf */
];
pub fn y1f(x: f32) -> f32 {
let z: f32;
let u: f32;
let v: f32;
let ix: u32;
ix = x.to_bits();
if (ix & 0x7fffffff) == 0 {
return -1.0 / 0.0;
}
if (ix >> 31) != 0 {
return 0.0 / 0.0;
}
if ix >= 0x7f800000 {
return 1.0 / x;
}
if ix >= 0x40000000 {
/* |x| >= 2.0 */
return common(ix, x, true, false);
}
if ix < 0x33000000 {
/* x < 2**-25 */
return -TPI / x;
}
z = x * x;
u = U0[0] + z * (U0[1] + z * (U0[2] + z * (U0[3] + z * U0[4])));
v = 1.0 + z * (V0[0] + z * (V0[1] + z * (V0[2] + z * (V0[3] + z * V0[4]))));
return x * (u / v) + TPI * (j1f(x) * logf(x) - 1.0 / x);
}
/* For x >= 8, the asymptotic expansions of pone is
* 1 + 15/128 s^2 - 4725/2^15 s^4 - ..., where s = 1/x.
* We approximate pone by
* pone(x) = 1 + (R/S)
* where R = pr0 + pr1*s^2 + pr2*s^4 + ... + pr5*s^10
* S = 1 + ps0*s^2 + ... + ps4*s^10
* and
* | pone(x)-1-R/S | <= 2 ** ( -60.06)
*/
const PR8: [f32; 6] = [
/* for x in [inf, 8]=1/[0,0.125] */
0.0000000000e+00, /* 0x00000000 */
1.1718750000e-01, /* 0x3df00000 */
1.3239480972e+01, /* 0x4153d4ea */
4.1205184937e+02, /* 0x43ce06a3 */
3.8747453613e+03, /* 0x45722bed */
7.9144794922e+03, /* 0x45f753d6 */
];
const PS8: [f32; 5] = [
1.1420736694e+02, /* 0x42e46a2c */
3.6509309082e+03, /* 0x45642ee5 */
3.6956207031e+04, /* 0x47105c35 */
9.7602796875e+04, /* 0x47bea166 */
3.0804271484e+04, /* 0x46f0a88b */
];
const PR5: [f32; 6] = [
/* for x in [8,4.5454]=1/[0.125,0.22001] */
1.3199052094e-11, /* 0x2d68333f */
1.1718749255e-01, /* 0x3defffff */
6.8027510643e+00, /* 0x40d9b023 */
1.0830818176e+02, /* 0x42d89dca */
5.1763616943e+02, /* 0x440168b7 */
5.2871520996e+02, /* 0x44042dc6 */
];
const PS5: [f32; 5] = [
5.9280597687e+01, /* 0x426d1f55 */
9.9140142822e+02, /* 0x4477d9b1 */
5.3532670898e+03, /* 0x45a74a23 */
7.8446904297e+03, /* 0x45f52586 */
1.5040468750e+03, /* 0x44bc0180 */
];
const PR3: [f32; 6] = [
3.0250391081e-09, /* 0x314fe10d */
1.1718686670e-01, /* 0x3defffab */
3.9329774380e+00, /* 0x407bb5e7 */
3.5119403839e+01, /* 0x420c7a45 */
9.1055007935e+01, /* 0x42b61c2a */
4.8559066772e+01, /* 0x42423c7c */
];
const PS3: [f32; 5] = [
3.4791309357e+01, /* 0x420b2a4d */
3.3676245117e+02, /* 0x43a86198 */
1.0468714600e+03, /* 0x4482dbe3 */
8.9081134033e+02, /* 0x445eb3ed */
1.0378793335e+02, /* 0x42cf936c */
];
const PR2: [f32; 6] = [
/* for x in [2.8570,2]=1/[0.3499,0.5] */
1.0771083225e-07, /* 0x33e74ea8 */
1.1717621982e-01, /* 0x3deffa16 */
2.3685150146e+00, /* 0x401795c0 */
1.2242610931e+01, /* 0x4143e1bc */
1.7693971634e+01, /* 0x418d8d41 */
5.0735230446e+00, /* 0x40a25a4d */
];
const PS2: [f32; 5] = [
2.1436485291e+01, /* 0x41ab7dec */
1.2529022980e+02, /* 0x42fa9499 */
2.3227647400e+02, /* 0x436846c7 */
1.1767937469e+02, /* 0x42eb5bd7 */
8.3646392822e+00, /* 0x4105d590 */
];
fn ponef(x: f32) -> f32 {
let p: &[f32; 6];
let q: &[f32; 5];
let z: f32;
let r: f32;
let s: f32;
let mut ix: u32;
ix = x.to_bits();
ix &= 0x7fffffff;
if ix >= 0x41000000 {
p = &PR8;
q = &PS8;
} else if ix >= 0x409173eb {
p = &PR5;
q = &PS5;
} else if ix >= 0x4036d917 {
p = &PR3;
q = &PS3;
} else
/*ix >= 0x40000000*/
{
p = &PR2;
q = &PS2;
}
z = 1.0 / (x * x);
r = p[0] + z * (p[1] + z * (p[2] + z * (p[3] + z * (p[4] + z * p[5]))));
s = 1.0 + z * (q[0] + z * (q[1] + z * (q[2] + z * (q[3] + z * q[4]))));
return 1.0 + r / s;
}
/* For x >= 8, the asymptotic expansions of qone is
* 3/8 s - 105/1024 s^3 - ..., where s = 1/x.
* We approximate pone by
* qone(x) = s*(0.375 + (R/S))
* where R = qr1*s^2 + qr2*s^4 + ... + qr5*s^10
* S = 1 + qs1*s^2 + ... + qs6*s^12
* and
* | qone(x)/s -0.375-R/S | <= 2 ** ( -61.13)
*/
const QR8: [f32; 6] = [
/* for x in [inf, 8]=1/[0,0.125] */
0.0000000000e+00, /* 0x00000000 */
-1.0253906250e-01, /* 0xbdd20000 */
-1.6271753311e+01, /* 0xc1822c8d */
-7.5960174561e+02, /* 0xc43de683 */
-1.1849806641e+04, /* 0xc639273a */
-4.8438511719e+04, /* 0xc73d3683 */
];
const QS8: [f32; 6] = [
1.6139537048e+02, /* 0x43216537 */
7.8253862305e+03, /* 0x45f48b17 */
1.3387534375e+05, /* 0x4802bcd6 */
7.1965775000e+05, /* 0x492fb29c */
6.6660125000e+05, /* 0x4922be94 */
-2.9449025000e+05, /* 0xc88fcb48 */
];
const QR5: [f32; 6] = [
/* for x in [8,4.5454]=1/[0.125,0.22001] */
-2.0897993405e-11, /* 0xadb7d219 */
-1.0253904760e-01, /* 0xbdd1fffe */
-8.0564479828e+00, /* 0xc100e736 */
-1.8366960144e+02, /* 0xc337ab6b */
-1.3731937256e+03, /* 0xc4aba633 */
-2.6124443359e+03, /* 0xc523471c */
];
const QS5: [f32; 6] = [
8.1276550293e+01, /* 0x42a28d98 */
1.9917987061e+03, /* 0x44f8f98f */
1.7468484375e+04, /* 0x468878f8 */
4.9851425781e+04, /* 0x4742bb6d */
2.7948074219e+04, /* 0x46da5826 */
-4.7191835938e+03, /* 0xc5937978 */
];
const QR3: [f32; 6] = [
-5.0783124372e-09, /* 0xb1ae7d4f */
-1.0253783315e-01, /* 0xbdd1ff5b */
-4.6101160049e+00, /* 0xc0938612 */
-5.7847221375e+01, /* 0xc267638e */
-2.2824453735e+02, /* 0xc3643e9a */
-2.1921012878e+02, /* 0xc35b35cb */
];
const QS3: [f32; 6] = [
4.7665153503e+01, /* 0x423ea91e */
6.7386511230e+02, /* 0x4428775e */
3.3801528320e+03, /* 0x45534272 */
5.5477290039e+03, /* 0x45ad5dd5 */
1.9031191406e+03, /* 0x44ede3d0 */
-1.3520118713e+02, /* 0xc3073381 */
];
const QR2: [f32; 6] = [
/* for x in [2.8570,2]=1/[0.3499,0.5] */
-1.7838172539e-07, /* 0xb43f8932 */
-1.0251704603e-01, /* 0xbdd1f475 */
-2.7522056103e+00, /* 0xc0302423 */
-1.9663616180e+01, /* 0xc19d4f16 */
-4.2325313568e+01, /* 0xc2294d1f */
-2.1371921539e+01, /* 0xc1aaf9b2 */
];
const QS2: [f32; 6] = [
2.9533363342e+01, /* 0x41ec4454 */
2.5298155212e+02, /* 0x437cfb47 */
7.5750280762e+02, /* 0x443d602e */
7.3939318848e+02, /* 0x4438d92a */
1.5594900513e+02, /* 0x431bf2f2 */
-4.9594988823e+00, /* 0xc09eb437 */
];
fn qonef(x: f32) -> f32 {
let p: &[f32; 6];
let q: &[f32; 6];
let s: f32;
let r: f32;
let z: f32;
let mut ix: u32;
ix = x.to_bits();
ix &= 0x7fffffff;
if ix >= 0x41000000 {
p = &QR8;
q = &QS8;
} else if ix >= 0x409173eb {
p = &QR5;
q = &QS5;
} else if ix >= 0x4036d917 {
p = &QR3;
q = &QS3;
} else
/*ix >= 0x40000000*/
{
p = &QR2;
q = &QS2;
}
z = 1.0 / (x * x);
r = p[0] + z * (p[1] + z * (p[2] + z * (p[3] + z * (p[4] + z * p[5]))));
s = 1.0 + z * (q[0] + z * (q[1] + z * (q[2] + z * (q[3] + z * (q[4] + z * q[5])))));
return (0.375 + r / s) / x;
}
// PowerPC tests are failing on LLVM 13: https://github.com/rust-lang/rust/issues/88520
#[cfg(not(target_arch = "powerpc64"))]
#[cfg(test)]
mod tests {
use super::{j1f, y1f};
#[test]
fn test_j1f_2488() |
#[test]
fn test_y1f_2002() {
//allow slightly different result on x87
let res = y1f(2.0000002_f32);
if cfg!(all(target_arch = "x86", not(target_feature = "sse2"))) && (res == -0.10703231_f32)
{
return;
}
assert_eq!(res, -0.10703229_f32);
}
}
| {
// 0x401F3E49
assert_eq!(j1f(2.4881766_f32), 0.49999475_f32);
} | identifier_body |
j1f.rs | /* origin: FreeBSD /usr/src/lib/msun/src/e_j1f.c */
/*
* Conversion to float by Ian Lance Taylor, Cygnus Support, [email protected].
*/
/*
* ====================================================
* Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
*
* Developed at SunPro, a Sun Microsystems, Inc. business.
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
use super::{cosf, fabsf, logf, sinf, sqrtf};
const INVSQRTPI: f32 = 5.6418961287e-01; /* 0x3f106ebb */
const TPI: f32 = 6.3661974669e-01; /* 0x3f22f983 */
fn common(ix: u32, x: f32, y1: bool, sign: bool) -> f32 {
let z: f64;
let mut s: f64;
let c: f64;
let mut ss: f64;
let mut cc: f64;
s = sinf(x) as f64;
if y1 {
s = -s;
}
c = cosf(x) as f64;
cc = s - c;
if ix < 0x7f000000 {
ss = -s - c;
z = cosf(2.0 * x) as f64;
if s * c > 0.0 {
cc = z / ss;
} else {
ss = z / cc;
}
if ix < 0x58800000 {
if y1 {
ss = -ss;
}
cc = (ponef(x) as f64) * cc - (qonef(x) as f64) * ss;
}
}
if sign {
cc = -cc;
}
return (((INVSQRTPI as f64) * cc) / (sqrtf(x) as f64)) as f32;
}
/* R0/S0 on [0,2] */
const R00: f32 = -6.2500000000e-02; /* 0xbd800000 */
const R01: f32 = 1.4070566976e-03; /* 0x3ab86cfd */
const R02: f32 = -1.5995563444e-05; /* 0xb7862e36 */
const R03: f32 = 4.9672799207e-08; /* 0x335557d2 */
const S01: f32 = 1.9153760746e-02; /* 0x3c9ce859 */
const S02: f32 = 1.8594678841e-04; /* 0x3942fab6 */
const S03: f32 = 1.1771846857e-06; /* 0x359dffc2 */
const S04: f32 = 5.0463624390e-09; /* 0x31ad6446 */
const S05: f32 = 1.2354227016e-11; /* 0x2d59567e */
pub fn j1f(x: f32) -> f32 {
let mut z: f32;
let r: f32;
let s: f32;
let mut ix: u32; | ix = x.to_bits();
sign = (ix >> 31) != 0;
ix &= 0x7fffffff;
if ix >= 0x7f800000 {
return 1.0 / (x * x);
}
if ix >= 0x40000000 {
/* |x| >= 2 */
return common(ix, fabsf(x), false, sign);
}
if ix >= 0x39000000 {
/* |x| >= 2**-13 */
z = x * x;
r = z * (R00 + z * (R01 + z * (R02 + z * R03)));
s = 1.0 + z * (S01 + z * (S02 + z * (S03 + z * (S04 + z * S05))));
z = 0.5 + r / s;
} else {
z = 0.5;
}
return z * x;
}
const U0: [f32; 5] = [
-1.9605709612e-01, /* 0xbe48c331 */
5.0443872809e-02, /* 0x3d4e9e3c */
-1.9125689287e-03, /* 0xbafaaf2a */
2.3525259166e-05, /* 0x37c5581c */
-9.1909917899e-08, /* 0xb3c56003 */
];
const V0: [f32; 5] = [
1.9916731864e-02, /* 0x3ca3286a */
2.0255257550e-04, /* 0x3954644b */
1.3560879779e-06, /* 0x35b602d4 */
6.2274145840e-09, /* 0x31d5f8eb */
1.6655924903e-11, /* 0x2d9281cf */
];
pub fn y1f(x: f32) -> f32 {
let z: f32;
let u: f32;
let v: f32;
let ix: u32;
ix = x.to_bits();
if (ix & 0x7fffffff) == 0 {
return -1.0 / 0.0;
}
if (ix >> 31) != 0 {
return 0.0 / 0.0;
}
if ix >= 0x7f800000 {
return 1.0 / x;
}
if ix >= 0x40000000 {
/* |x| >= 2.0 */
return common(ix, x, true, false);
}
if ix < 0x33000000 {
/* x < 2**-25 */
return -TPI / x;
}
z = x * x;
u = U0[0] + z * (U0[1] + z * (U0[2] + z * (U0[3] + z * U0[4])));
v = 1.0 + z * (V0[0] + z * (V0[1] + z * (V0[2] + z * (V0[3] + z * V0[4]))));
return x * (u / v) + TPI * (j1f(x) * logf(x) - 1.0 / x);
}
/* For x >= 8, the asymptotic expansions of pone is
* 1 + 15/128 s^2 - 4725/2^15 s^4 - ..., where s = 1/x.
* We approximate pone by
* pone(x) = 1 + (R/S)
* where R = pr0 + pr1*s^2 + pr2*s^4 + ... + pr5*s^10
* S = 1 + ps0*s^2 + ... + ps4*s^10
* and
* | pone(x)-1-R/S | <= 2 ** ( -60.06)
*/
const PR8: [f32; 6] = [
/* for x in [inf, 8]=1/[0,0.125] */
0.0000000000e+00, /* 0x00000000 */
1.1718750000e-01, /* 0x3df00000 */
1.3239480972e+01, /* 0x4153d4ea */
4.1205184937e+02, /* 0x43ce06a3 */
3.8747453613e+03, /* 0x45722bed */
7.9144794922e+03, /* 0x45f753d6 */
];
const PS8: [f32; 5] = [
1.1420736694e+02, /* 0x42e46a2c */
3.6509309082e+03, /* 0x45642ee5 */
3.6956207031e+04, /* 0x47105c35 */
9.7602796875e+04, /* 0x47bea166 */
3.0804271484e+04, /* 0x46f0a88b */
];
const PR5: [f32; 6] = [
/* for x in [8,4.5454]=1/[0.125,0.22001] */
1.3199052094e-11, /* 0x2d68333f */
1.1718749255e-01, /* 0x3defffff */
6.8027510643e+00, /* 0x40d9b023 */
1.0830818176e+02, /* 0x42d89dca */
5.1763616943e+02, /* 0x440168b7 */
5.2871520996e+02, /* 0x44042dc6 */
];
const PS5: [f32; 5] = [
5.9280597687e+01, /* 0x426d1f55 */
9.9140142822e+02, /* 0x4477d9b1 */
5.3532670898e+03, /* 0x45a74a23 */
7.8446904297e+03, /* 0x45f52586 */
1.5040468750e+03, /* 0x44bc0180 */
];
const PR3: [f32; 6] = [
3.0250391081e-09, /* 0x314fe10d */
1.1718686670e-01, /* 0x3defffab */
3.9329774380e+00, /* 0x407bb5e7 */
3.5119403839e+01, /* 0x420c7a45 */
9.1055007935e+01, /* 0x42b61c2a */
4.8559066772e+01, /* 0x42423c7c */
];
const PS3: [f32; 5] = [
3.4791309357e+01, /* 0x420b2a4d */
3.3676245117e+02, /* 0x43a86198 */
1.0468714600e+03, /* 0x4482dbe3 */
8.9081134033e+02, /* 0x445eb3ed */
1.0378793335e+02, /* 0x42cf936c */
];
const PR2: [f32; 6] = [
/* for x in [2.8570,2]=1/[0.3499,0.5] */
1.0771083225e-07, /* 0x33e74ea8 */
1.1717621982e-01, /* 0x3deffa16 */
2.3685150146e+00, /* 0x401795c0 */
1.2242610931e+01, /* 0x4143e1bc */
1.7693971634e+01, /* 0x418d8d41 */
5.0735230446e+00, /* 0x40a25a4d */
];
const PS2: [f32; 5] = [
2.1436485291e+01, /* 0x41ab7dec */
1.2529022980e+02, /* 0x42fa9499 */
2.3227647400e+02, /* 0x436846c7 */
1.1767937469e+02, /* 0x42eb5bd7 */
8.3646392822e+00, /* 0x4105d590 */
];
fn ponef(x: f32) -> f32 {
let p: &[f32; 6];
let q: &[f32; 5];
let z: f32;
let r: f32;
let s: f32;
let mut ix: u32;
ix = x.to_bits();
ix &= 0x7fffffff;
if ix >= 0x41000000 {
p = &PR8;
q = &PS8;
} else if ix >= 0x409173eb {
p = &PR5;
q = &PS5;
} else if ix >= 0x4036d917 {
p = &PR3;
q = &PS3;
} else
/*ix >= 0x40000000*/
{
p = &PR2;
q = &PS2;
}
z = 1.0 / (x * x);
r = p[0] + z * (p[1] + z * (p[2] + z * (p[3] + z * (p[4] + z * p[5]))));
s = 1.0 + z * (q[0] + z * (q[1] + z * (q[2] + z * (q[3] + z * q[4]))));
return 1.0 + r / s;
}
/* For x >= 8, the asymptotic expansions of qone is
* 3/8 s - 105/1024 s^3 - ..., where s = 1/x.
* We approximate pone by
* qone(x) = s*(0.375 + (R/S))
* where R = qr1*s^2 + qr2*s^4 + ... + qr5*s^10
* S = 1 + qs1*s^2 + ... + qs6*s^12
* and
* | qone(x)/s -0.375-R/S | <= 2 ** ( -61.13)
*/
const QR8: [f32; 6] = [
/* for x in [inf, 8]=1/[0,0.125] */
0.0000000000e+00, /* 0x00000000 */
-1.0253906250e-01, /* 0xbdd20000 */
-1.6271753311e+01, /* 0xc1822c8d */
-7.5960174561e+02, /* 0xc43de683 */
-1.1849806641e+04, /* 0xc639273a */
-4.8438511719e+04, /* 0xc73d3683 */
];
const QS8: [f32; 6] = [
1.6139537048e+02, /* 0x43216537 */
7.8253862305e+03, /* 0x45f48b17 */
1.3387534375e+05, /* 0x4802bcd6 */
7.1965775000e+05, /* 0x492fb29c */
6.6660125000e+05, /* 0x4922be94 */
-2.9449025000e+05, /* 0xc88fcb48 */
];
const QR5: [f32; 6] = [
/* for x in [8,4.5454]=1/[0.125,0.22001] */
-2.0897993405e-11, /* 0xadb7d219 */
-1.0253904760e-01, /* 0xbdd1fffe */
-8.0564479828e+00, /* 0xc100e736 */
-1.8366960144e+02, /* 0xc337ab6b */
-1.3731937256e+03, /* 0xc4aba633 */
-2.6124443359e+03, /* 0xc523471c */
];
const QS5: [f32; 6] = [
8.1276550293e+01, /* 0x42a28d98 */
1.9917987061e+03, /* 0x44f8f98f */
1.7468484375e+04, /* 0x468878f8 */
4.9851425781e+04, /* 0x4742bb6d */
2.7948074219e+04, /* 0x46da5826 */
-4.7191835938e+03, /* 0xc5937978 */
];
const QR3: [f32; 6] = [
-5.0783124372e-09, /* 0xb1ae7d4f */
-1.0253783315e-01, /* 0xbdd1ff5b */
-4.6101160049e+00, /* 0xc0938612 */
-5.7847221375e+01, /* 0xc267638e */
-2.2824453735e+02, /* 0xc3643e9a */
-2.1921012878e+02, /* 0xc35b35cb */
];
const QS3: [f32; 6] = [
4.7665153503e+01, /* 0x423ea91e */
6.7386511230e+02, /* 0x4428775e */
3.3801528320e+03, /* 0x45534272 */
5.5477290039e+03, /* 0x45ad5dd5 */
1.9031191406e+03, /* 0x44ede3d0 */
-1.3520118713e+02, /* 0xc3073381 */
];
const QR2: [f32; 6] = [
/* for x in [2.8570,2]=1/[0.3499,0.5] */
-1.7838172539e-07, /* 0xb43f8932 */
-1.0251704603e-01, /* 0xbdd1f475 */
-2.7522056103e+00, /* 0xc0302423 */
-1.9663616180e+01, /* 0xc19d4f16 */
-4.2325313568e+01, /* 0xc2294d1f */
-2.1371921539e+01, /* 0xc1aaf9b2 */
];
const QS2: [f32; 6] = [
2.9533363342e+01, /* 0x41ec4454 */
2.5298155212e+02, /* 0x437cfb47 */
7.5750280762e+02, /* 0x443d602e */
7.3939318848e+02, /* 0x4438d92a */
1.5594900513e+02, /* 0x431bf2f2 */
-4.9594988823e+00, /* 0xc09eb437 */
];
fn qonef(x: f32) -> f32 {
let p: &[f32; 6];
let q: &[f32; 6];
let s: f32;
let r: f32;
let z: f32;
let mut ix: u32;
ix = x.to_bits();
ix &= 0x7fffffff;
if ix >= 0x41000000 {
p = &QR8;
q = &QS8;
} else if ix >= 0x409173eb {
p = &QR5;
q = &QS5;
} else if ix >= 0x4036d917 {
p = &QR3;
q = &QS3;
} else
/*ix >= 0x40000000*/
{
p = &QR2;
q = &QS2;
}
z = 1.0 / (x * x);
r = p[0] + z * (p[1] + z * (p[2] + z * (p[3] + z * (p[4] + z * p[5]))));
s = 1.0 + z * (q[0] + z * (q[1] + z * (q[2] + z * (q[3] + z * (q[4] + z * q[5])))));
return (0.375 + r / s) / x;
}
// PowerPC tests are failing on LLVM 13: https://github.com/rust-lang/rust/issues/88520
#[cfg(not(target_arch = "powerpc64"))]
#[cfg(test)]
mod tests {
use super::{j1f, y1f};
#[test]
fn test_j1f_2488() {
// 0x401F3E49
assert_eq!(j1f(2.4881766_f32), 0.49999475_f32);
}
#[test]
fn test_y1f_2002() {
//allow slightly different result on x87
let res = y1f(2.0000002_f32);
if cfg!(all(target_arch = "x86", not(target_feature = "sse2"))) && (res == -0.10703231_f32)
{
return;
}
assert_eq!(res, -0.10703229_f32);
}
} | let sign: bool;
| random_line_split |
postgres.go | package postgres
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"strings"
"time"
"github.com/cortexproject/cortex/pkg/configs/userconfig"
util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/Masterminds/squirrel"
"github.com/go-kit/log/level"
"github.com/golang-migrate/migrate/v4"
_ "github.com/golang-migrate/migrate/v4/database/postgres" // Import the postgres migrations driver
_ "github.com/golang-migrate/migrate/v4/source/file" // Import the postgres migrations driver
"github.com/lib/pq"
_ "github.com/lib/pq" // Import the postgres sql driver
"github.com/pkg/errors"
)
const (
// TODO: These are a legacy from when configs was more general. Update the
// schema so this isn't needed.
entityType = "org"
subsystem = "cortex"
// timeout waiting for database connection to be established
dbTimeout = 5 * time.Minute
)
var (
allConfigs = squirrel.Eq{
"owner_type": entityType,
"subsystem": subsystem,
}
)
// DB is a postgres db, for dev and production
type DB struct {
dbProxy
squirrel.StatementBuilderType
}
type dbProxy interface {
Exec(query string, args ...interface{}) (sql.Result, error)
Query(query string, args ...interface{}) (*sql.Rows, error)
QueryRow(query string, args ...interface{}) *sql.Row
Prepare(query string) (*sql.Stmt, error)
}
// dbWait waits for database connection to be established
func dbWait(db *sql.DB) error {
deadline := time.Now().Add(dbTimeout)
var err error
for tries := 0; time.Now().Before(deadline); tries++ {
err = db.Ping()
if err == nil {
return nil
}
level.Warn(util_log.Logger).Log("msg", "db connection not established, retrying...", "err", err)
time.Sleep(time.Second << uint(tries))
}
return errors.Wrapf(err, "db connection not established after %s", dbTimeout)
}
// New creates a new postgres DB
func New(uri, migrationsDir string) (DB, error) {
db, err := sql.Open("postgres", uri)
if err != nil {
return DB{}, errors.Wrap(err, "cannot open postgres db")
}
if err := dbWait(db); err != nil {
return DB{}, errors.Wrap(err, "cannot establish db connection")
}
if migrationsDir != "" {
// Add file scheme if no scheme is present
if !strings.HasPrefix(migrationsDir, "file:") {
migrationsDir = "file:" + migrationsDir
}
m, err := migrate.New(migrationsDir, uri)
if err != nil {
return DB{}, errors.Wrap(err, "database migrations initialization failed")
}
level.Info(util_log.Logger).Log("msg", "running database migrations...")
if err := m.Up(); err != nil {
if err != migrate.ErrNoChange {
return DB{}, errors.Wrap(err, "database migrations failed")
}
level.Debug(util_log.Logger).Log("msg", "no change in schema, error (ignored)", "err", err)
}
}
return DB{
dbProxy: db,
StatementBuilderType: statementBuilder(db),
}, err
}
var statementBuilder = squirrel.StatementBuilder.PlaceholderFormat(squirrel.Dollar).RunWith
func (d DB) findConfigs(filter squirrel.Sqlizer) (map[string]userconfig.View, error) {
rows, err := d.Select("id", "owner_id", "config", "deleted_at").
Options("DISTINCT ON (owner_id)").
From("configs").
Where(filter).
OrderBy("owner_id, id DESC").
Query()
if err != nil {
return nil, err
}
defer rows.Close()
cfgs := map[string]userconfig.View{}
for rows.Next() {
var cfg userconfig.View
var cfgBytes []byte
var userID string
var deletedAt pq.NullTime
err = rows.Scan(&cfg.ID, &userID, &cfgBytes, &deletedAt)
if err != nil {
return nil, err
}
err = json.Unmarshal(cfgBytes, &cfg.Config)
if err != nil {
return nil, err
}
cfg.DeletedAt = deletedAt.Time
cfgs[userID] = cfg
}
// Check for any errors encountered.
err = rows.Err()
if err != nil {
return nil, err
}
return cfgs, nil
}
// GetConfig gets a configuration.
func (d DB) GetConfig(ctx context.Context, userID string) (userconfig.View, error) {
var cfgView userconfig.View
var cfgBytes []byte
var deletedAt pq.NullTime
err := d.Select("id", "config", "deleted_at").
From("configs").
Where(squirrel.And{allConfigs, squirrel.Eq{"owner_id": userID}}).
OrderBy("id DESC").
Limit(1).
QueryRow().Scan(&cfgView.ID, &cfgBytes, &deletedAt)
if err != nil {
return cfgView, err
}
cfgView.DeletedAt = deletedAt.Time
err = json.Unmarshal(cfgBytes, &cfgView.Config)
return cfgView, err
}
// SetConfig sets a configuration.
func (d DB) SetConfig(ctx context.Context, userID string, cfg userconfig.Config) error |
// GetAllConfigs gets all of the userconfig.
func (d DB) GetAllConfigs(ctx context.Context) (map[string]userconfig.View, error) {
return d.findConfigs(allConfigs)
}
// GetConfigs gets all of the configs that have changed recently.
func (d DB) GetConfigs(ctx context.Context, since userconfig.ID) (map[string]userconfig.View, error) {
return d.findConfigs(squirrel.And{
allConfigs,
squirrel.Gt{"id": since},
})
}
// GetRulesConfig gets the latest alertmanager config for a user.
func (d DB) GetRulesConfig(ctx context.Context, userID string) (userconfig.VersionedRulesConfig, error) {
current, err := d.GetConfig(ctx, userID)
if err != nil {
return userconfig.VersionedRulesConfig{}, err
}
cfg := current.GetVersionedRulesConfig()
if cfg == nil {
return userconfig.VersionedRulesConfig{}, sql.ErrNoRows
}
return *cfg, nil
}
// SetRulesConfig sets the current alertmanager config for a user.
func (d DB) SetRulesConfig(ctx context.Context, userID string, oldConfig, newConfig userconfig.RulesConfig) (bool, error) {
updated := false
err := d.Transaction(func(tx DB) error {
current, err := d.GetConfig(ctx, userID)
if err != nil && err != sql.ErrNoRows {
return err
}
// The supplied oldConfig must match the current config. If no config
// exists, then oldConfig must be nil. Otherwise, it must exactly
// equal the existing config.
if !((err == sql.ErrNoRows && oldConfig.Files == nil) || oldConfig.Equal(current.Config.RulesConfig)) {
return nil
}
new := userconfig.Config{
AlertmanagerConfig: current.Config.AlertmanagerConfig,
RulesConfig: newConfig,
}
updated = true
return d.SetConfig(ctx, userID, new)
})
return updated, err
}
// findRulesConfigs helps GetAllRulesConfigs and GetRulesConfigs retrieve the
// set of all active rules configurations across all our users.
func (d DB) findRulesConfigs(filter squirrel.Sqlizer) (map[string]userconfig.VersionedRulesConfig, error) {
rows, err := d.Select("id", "owner_id", "config ->> 'rules_files'", "config ->> 'rule_format_version'", "deleted_at").
Options("DISTINCT ON (owner_id)").
From("configs").
Where(filter).
// `->>` gets a JSON object field as text. When a config row exists
// and alertmanager config is provided but ruler config has not yet
// been, the 'rules_files' key will have an empty JSON object as its
// value. This is (probably) the most efficient way to test for a
// non-empty `rules_files` key.
//
// This whole situation is way too complicated. See
// https://github.com/cortexproject/cortex/issues/619 for the whole
// story, and our plans to improve it.
Where("config ->> 'rules_files' <> '{}'").
OrderBy("owner_id, id DESC").
Query()
if err != nil {
return nil, err
}
defer rows.Close()
cfgs := map[string]userconfig.VersionedRulesConfig{}
for rows.Next() {
var cfg userconfig.VersionedRulesConfig
var userID string
var cfgBytes []byte
var rfvBytes []byte
var deletedAt pq.NullTime
err = rows.Scan(&cfg.ID, &userID, &cfgBytes, &rfvBytes, &deletedAt)
if err != nil {
return nil, err
}
err = json.Unmarshal(cfgBytes, &cfg.Config.Files)
if err != nil {
return nil, err
}
// Legacy configs don't have a rule format version, in which case this will
// be a zero-length (but non-nil) slice.
if len(rfvBytes) > 0 {
err = json.Unmarshal([]byte(`"`+string(rfvBytes)+`"`), &cfg.Config.FormatVersion)
if err != nil {
return nil, err
}
}
cfg.DeletedAt = deletedAt.Time
cfgs[userID] = cfg
}
// Check for any errors encountered.
err = rows.Err()
if err != nil {
return nil, err
}
return cfgs, nil
}
// GetAllRulesConfigs gets all alertmanager configs for all users.
func (d DB) GetAllRulesConfigs(ctx context.Context) (map[string]userconfig.VersionedRulesConfig, error) {
return d.findRulesConfigs(allConfigs)
}
// GetRulesConfigs gets all the alertmanager configs that have changed since a given config.
func (d DB) GetRulesConfigs(ctx context.Context, since userconfig.ID) (map[string]userconfig.VersionedRulesConfig, error) {
return d.findRulesConfigs(squirrel.And{
allConfigs,
squirrel.Gt{"id": since},
})
}
// SetDeletedAtConfig sets a deletedAt for configuration
// by adding a single new row with deleted_at set
// the same as SetConfig is actually insert
func (d DB) SetDeletedAtConfig(ctx context.Context, userID string, deletedAt pq.NullTime, cfg userconfig.Config) error {
cfgBytes, err := json.Marshal(cfg)
if err != nil {
return err
}
_, err = d.Insert("configs").
Columns("owner_id", "owner_type", "subsystem", "deleted_at", "config").
Values(userID, entityType, subsystem, deletedAt, cfgBytes).
Exec()
return err
}
// DeactivateConfig deactivates a configuration.
func (d DB) DeactivateConfig(ctx context.Context, userID string) error {
cfg, err := d.GetConfig(ctx, userID)
if err != nil {
return err
}
return d.SetDeletedAtConfig(ctx, userID, pq.NullTime{Time: time.Now(), Valid: true}, cfg.Config)
}
// RestoreConfig restores configuration.
func (d DB) RestoreConfig(ctx context.Context, userID string) error {
cfg, err := d.GetConfig(ctx, userID)
if err != nil {
return err
}
return d.SetDeletedAtConfig(ctx, userID, pq.NullTime{}, cfg.Config)
}
// Transaction runs the given function in a postgres transaction. If fn returns
// an error the txn will be rolled back.
func (d DB) Transaction(f func(DB) error) error {
if _, ok := d.dbProxy.(*sql.Tx); ok {
// Already in a nested transaction
return f(d)
}
tx, err := d.dbProxy.(*sql.DB).Begin()
if err != nil {
return err
}
err = f(DB{
dbProxy: tx,
StatementBuilderType: statementBuilder(tx),
})
if err != nil {
// Rollback error is ignored as we already have one in progress
if err2 := tx.Rollback(); err2 != nil {
level.Warn(util_log.Logger).Log("msg", "transaction rollback error (ignored)", "err", err2)
}
return err
}
return tx.Commit()
}
// Close finishes using the db
func (d DB) Close() error {
if db, ok := d.dbProxy.(interface {
Close() error
}); ok {
return db.Close()
}
return nil
}
| {
if !cfg.RulesConfig.FormatVersion.IsValid() {
return fmt.Errorf("invalid rule format version %v", cfg.RulesConfig.FormatVersion)
}
cfgBytes, err := json.Marshal(cfg)
if err != nil {
return err
}
_, err = d.Insert("configs").
Columns("owner_id", "owner_type", "subsystem", "config").
Values(userID, entityType, subsystem, cfgBytes).
Exec()
return err
} | identifier_body |
postgres.go | package postgres
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"strings"
"time"
"github.com/cortexproject/cortex/pkg/configs/userconfig"
util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/Masterminds/squirrel"
"github.com/go-kit/log/level"
"github.com/golang-migrate/migrate/v4"
_ "github.com/golang-migrate/migrate/v4/database/postgres" // Import the postgres migrations driver
_ "github.com/golang-migrate/migrate/v4/source/file" // Import the postgres migrations driver
"github.com/lib/pq"
_ "github.com/lib/pq" // Import the postgres sql driver
"github.com/pkg/errors"
)
const (
// TODO: These are a legacy from when configs was more general. Update the
// schema so this isn't needed.
entityType = "org"
subsystem = "cortex"
// timeout waiting for database connection to be established
dbTimeout = 5 * time.Minute
)
var (
allConfigs = squirrel.Eq{
"owner_type": entityType,
"subsystem": subsystem,
}
)
// DB is a postgres db, for dev and production
type DB struct {
dbProxy
squirrel.StatementBuilderType
}
type dbProxy interface {
Exec(query string, args ...interface{}) (sql.Result, error)
Query(query string, args ...interface{}) (*sql.Rows, error)
QueryRow(query string, args ...interface{}) *sql.Row
Prepare(query string) (*sql.Stmt, error)
}
// dbWait waits for database connection to be established
func dbWait(db *sql.DB) error {
deadline := time.Now().Add(dbTimeout)
var err error
for tries := 0; time.Now().Before(deadline); tries++ {
err = db.Ping()
if err == nil {
return nil
}
level.Warn(util_log.Logger).Log("msg", "db connection not established, retrying...", "err", err)
time.Sleep(time.Second << uint(tries))
}
return errors.Wrapf(err, "db connection not established after %s", dbTimeout)
}
// New creates a new postgres DB
func New(uri, migrationsDir string) (DB, error) {
db, err := sql.Open("postgres", uri)
if err != nil {
return DB{}, errors.Wrap(err, "cannot open postgres db")
}
if err := dbWait(db); err != nil {
return DB{}, errors.Wrap(err, "cannot establish db connection")
}
if migrationsDir != "" {
// Add file scheme if no scheme is present
if !strings.HasPrefix(migrationsDir, "file:") {
migrationsDir = "file:" + migrationsDir
}
m, err := migrate.New(migrationsDir, uri)
if err != nil {
return DB{}, errors.Wrap(err, "database migrations initialization failed")
}
level.Info(util_log.Logger).Log("msg", "running database migrations...")
if err := m.Up(); err != nil {
if err != migrate.ErrNoChange {
return DB{}, errors.Wrap(err, "database migrations failed")
}
level.Debug(util_log.Logger).Log("msg", "no change in schema, error (ignored)", "err", err)
}
}
return DB{
dbProxy: db,
StatementBuilderType: statementBuilder(db),
}, err
}
var statementBuilder = squirrel.StatementBuilder.PlaceholderFormat(squirrel.Dollar).RunWith
func (d DB) findConfigs(filter squirrel.Sqlizer) (map[string]userconfig.View, error) {
rows, err := d.Select("id", "owner_id", "config", "deleted_at").
Options("DISTINCT ON (owner_id)").
From("configs").
Where(filter).
OrderBy("owner_id, id DESC").
Query()
if err != nil {
return nil, err
}
defer rows.Close()
cfgs := map[string]userconfig.View{}
for rows.Next() {
var cfg userconfig.View
var cfgBytes []byte
var userID string
var deletedAt pq.NullTime
err = rows.Scan(&cfg.ID, &userID, &cfgBytes, &deletedAt)
if err != nil {
return nil, err
}
err = json.Unmarshal(cfgBytes, &cfg.Config)
if err != nil {
return nil, err
}
cfg.DeletedAt = deletedAt.Time
cfgs[userID] = cfg
}
// Check for any errors encountered.
err = rows.Err()
if err != nil {
return nil, err
}
return cfgs, nil
}
// GetConfig gets a configuration.
func (d DB) GetConfig(ctx context.Context, userID string) (userconfig.View, error) {
var cfgView userconfig.View
var cfgBytes []byte
var deletedAt pq.NullTime
err := d.Select("id", "config", "deleted_at").
From("configs").
Where(squirrel.And{allConfigs, squirrel.Eq{"owner_id": userID}}).
OrderBy("id DESC").
Limit(1).
QueryRow().Scan(&cfgView.ID, &cfgBytes, &deletedAt)
if err != nil {
return cfgView, err
}
cfgView.DeletedAt = deletedAt.Time
err = json.Unmarshal(cfgBytes, &cfgView.Config)
return cfgView, err
}
// SetConfig sets a configuration.
func (d DB) SetConfig(ctx context.Context, userID string, cfg userconfig.Config) error {
if !cfg.RulesConfig.FormatVersion.IsValid() {
return fmt.Errorf("invalid rule format version %v", cfg.RulesConfig.FormatVersion)
}
cfgBytes, err := json.Marshal(cfg)
if err != nil {
return err
}
_, err = d.Insert("configs").
Columns("owner_id", "owner_type", "subsystem", "config").
Values(userID, entityType, subsystem, cfgBytes).
Exec()
return err
}
// GetAllConfigs gets all of the userconfig.
func (d DB) GetAllConfigs(ctx context.Context) (map[string]userconfig.View, error) {
return d.findConfigs(allConfigs)
}
// GetConfigs gets all of the configs that have changed recently.
func (d DB) GetConfigs(ctx context.Context, since userconfig.ID) (map[string]userconfig.View, error) {
return d.findConfigs(squirrel.And{
allConfigs,
squirrel.Gt{"id": since},
})
}
// GetRulesConfig gets the latest alertmanager config for a user.
func (d DB) GetRulesConfig(ctx context.Context, userID string) (userconfig.VersionedRulesConfig, error) {
current, err := d.GetConfig(ctx, userID)
if err != nil {
return userconfig.VersionedRulesConfig{}, err
}
cfg := current.GetVersionedRulesConfig()
if cfg == nil {
return userconfig.VersionedRulesConfig{}, sql.ErrNoRows
}
return *cfg, nil
}
// SetRulesConfig sets the current alertmanager config for a user.
func (d DB) SetRulesConfig(ctx context.Context, userID string, oldConfig, newConfig userconfig.RulesConfig) (bool, error) {
updated := false
err := d.Transaction(func(tx DB) error {
current, err := d.GetConfig(ctx, userID)
if err != nil && err != sql.ErrNoRows {
return err
}
// The supplied oldConfig must match the current config. If no config
// exists, then oldConfig must be nil. Otherwise, it must exactly
// equal the existing config.
if !((err == sql.ErrNoRows && oldConfig.Files == nil) || oldConfig.Equal(current.Config.RulesConfig)) {
return nil
}
new := userconfig.Config{
AlertmanagerConfig: current.Config.AlertmanagerConfig,
RulesConfig: newConfig,
}
updated = true
return d.SetConfig(ctx, userID, new)
})
return updated, err
}
// findRulesConfigs helps GetAllRulesConfigs and GetRulesConfigs retrieve the
// set of all active rules configurations across all our users.
func (d DB) findRulesConfigs(filter squirrel.Sqlizer) (map[string]userconfig.VersionedRulesConfig, error) {
rows, err := d.Select("id", "owner_id", "config ->> 'rules_files'", "config ->> 'rule_format_version'", "deleted_at").
Options("DISTINCT ON (owner_id)").
From("configs").
Where(filter).
// `->>` gets a JSON object field as text. When a config row exists
// and alertmanager config is provided but ruler config has not yet
// been, the 'rules_files' key will have an empty JSON object as its
// value. This is (probably) the most efficient way to test for a
// non-empty `rules_files` key.
//
// This whole situation is way too complicated. See
// https://github.com/cortexproject/cortex/issues/619 for the whole
// story, and our plans to improve it.
Where("config ->> 'rules_files' <> '{}'").
OrderBy("owner_id, id DESC").
Query()
if err != nil {
return nil, err
}
defer rows.Close()
cfgs := map[string]userconfig.VersionedRulesConfig{}
for rows.Next() {
var cfg userconfig.VersionedRulesConfig
var userID string
var cfgBytes []byte
var rfvBytes []byte
var deletedAt pq.NullTime
err = rows.Scan(&cfg.ID, &userID, &cfgBytes, &rfvBytes, &deletedAt)
if err != nil {
return nil, err
}
err = json.Unmarshal(cfgBytes, &cfg.Config.Files)
if err != nil {
return nil, err
}
// Legacy configs don't have a rule format version, in which case this will
// be a zero-length (but non-nil) slice.
if len(rfvBytes) > 0 {
err = json.Unmarshal([]byte(`"`+string(rfvBytes)+`"`), &cfg.Config.FormatVersion)
if err != nil {
return nil, err
}
}
cfg.DeletedAt = deletedAt.Time
cfgs[userID] = cfg
}
// Check for any errors encountered.
err = rows.Err()
if err != nil {
return nil, err
}
return cfgs, nil
}
// GetAllRulesConfigs gets all alertmanager configs for all users.
func (d DB) GetAllRulesConfigs(ctx context.Context) (map[string]userconfig.VersionedRulesConfig, error) {
return d.findRulesConfigs(allConfigs)
}
// GetRulesConfigs gets all the alertmanager configs that have changed since a given config.
func (d DB) GetRulesConfigs(ctx context.Context, since userconfig.ID) (map[string]userconfig.VersionedRulesConfig, error) {
return d.findRulesConfigs(squirrel.And{
allConfigs,
squirrel.Gt{"id": since},
})
}
// SetDeletedAtConfig sets a deletedAt for configuration
// by adding a single new row with deleted_at set
// the same as SetConfig is actually insert
func (d DB) SetDeletedAtConfig(ctx context.Context, userID string, deletedAt pq.NullTime, cfg userconfig.Config) error {
cfgBytes, err := json.Marshal(cfg)
if err != nil {
return err
}
_, err = d.Insert("configs").
Columns("owner_id", "owner_type", "subsystem", "deleted_at", "config").
Values(userID, entityType, subsystem, deletedAt, cfgBytes).
Exec()
return err
}
// DeactivateConfig deactivates a configuration.
func (d DB) DeactivateConfig(ctx context.Context, userID string) error {
cfg, err := d.GetConfig(ctx, userID)
if err != nil { |
// RestoreConfig restores configuration.
func (d DB) RestoreConfig(ctx context.Context, userID string) error {
cfg, err := d.GetConfig(ctx, userID)
if err != nil {
return err
}
return d.SetDeletedAtConfig(ctx, userID, pq.NullTime{}, cfg.Config)
}
// Transaction runs the given function in a postgres transaction. If fn returns
// an error the txn will be rolled back.
func (d DB) Transaction(f func(DB) error) error {
if _, ok := d.dbProxy.(*sql.Tx); ok {
// Already in a nested transaction
return f(d)
}
tx, err := d.dbProxy.(*sql.DB).Begin()
if err != nil {
return err
}
err = f(DB{
dbProxy: tx,
StatementBuilderType: statementBuilder(tx),
})
if err != nil {
// Rollback error is ignored as we already have one in progress
if err2 := tx.Rollback(); err2 != nil {
level.Warn(util_log.Logger).Log("msg", "transaction rollback error (ignored)", "err", err2)
}
return err
}
return tx.Commit()
}
// Close finishes using the db
func (d DB) Close() error {
if db, ok := d.dbProxy.(interface {
Close() error
}); ok {
return db.Close()
}
return nil
} | return err
}
return d.SetDeletedAtConfig(ctx, userID, pq.NullTime{Time: time.Now(), Valid: true}, cfg.Config)
} | random_line_split |
postgres.go | package postgres
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"strings"
"time"
"github.com/cortexproject/cortex/pkg/configs/userconfig"
util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/Masterminds/squirrel"
"github.com/go-kit/log/level"
"github.com/golang-migrate/migrate/v4"
_ "github.com/golang-migrate/migrate/v4/database/postgres" // Import the postgres migrations driver
_ "github.com/golang-migrate/migrate/v4/source/file" // Import the postgres migrations driver
"github.com/lib/pq"
_ "github.com/lib/pq" // Import the postgres sql driver
"github.com/pkg/errors"
)
const (
// TODO: These are a legacy from when configs was more general. Update the
// schema so this isn't needed.
entityType = "org"
subsystem = "cortex"
// timeout waiting for database connection to be established
dbTimeout = 5 * time.Minute
)
var (
allConfigs = squirrel.Eq{
"owner_type": entityType,
"subsystem": subsystem,
}
)
// DB is a postgres db, for dev and production
type DB struct {
dbProxy
squirrel.StatementBuilderType
}
type dbProxy interface {
Exec(query string, args ...interface{}) (sql.Result, error)
Query(query string, args ...interface{}) (*sql.Rows, error)
QueryRow(query string, args ...interface{}) *sql.Row
Prepare(query string) (*sql.Stmt, error)
}
// dbWait waits for database connection to be established
func dbWait(db *sql.DB) error {
deadline := time.Now().Add(dbTimeout)
var err error
for tries := 0; time.Now().Before(deadline); tries++ {
err = db.Ping()
if err == nil {
return nil
}
level.Warn(util_log.Logger).Log("msg", "db connection not established, retrying...", "err", err)
time.Sleep(time.Second << uint(tries))
}
return errors.Wrapf(err, "db connection not established after %s", dbTimeout)
}
// New creates a new postgres DB
func New(uri, migrationsDir string) (DB, error) {
db, err := sql.Open("postgres", uri)
if err != nil {
return DB{}, errors.Wrap(err, "cannot open postgres db")
}
if err := dbWait(db); err != nil {
return DB{}, errors.Wrap(err, "cannot establish db connection")
}
if migrationsDir != "" {
// Add file scheme if no scheme is present
if !strings.HasPrefix(migrationsDir, "file:") {
migrationsDir = "file:" + migrationsDir
}
m, err := migrate.New(migrationsDir, uri)
if err != nil {
return DB{}, errors.Wrap(err, "database migrations initialization failed")
}
level.Info(util_log.Logger).Log("msg", "running database migrations...")
if err := m.Up(); err != nil {
if err != migrate.ErrNoChange {
return DB{}, errors.Wrap(err, "database migrations failed")
}
level.Debug(util_log.Logger).Log("msg", "no change in schema, error (ignored)", "err", err)
}
}
return DB{
dbProxy: db,
StatementBuilderType: statementBuilder(db),
}, err
}
var statementBuilder = squirrel.StatementBuilder.PlaceholderFormat(squirrel.Dollar).RunWith
func (d DB) findConfigs(filter squirrel.Sqlizer) (map[string]userconfig.View, error) {
rows, err := d.Select("id", "owner_id", "config", "deleted_at").
Options("DISTINCT ON (owner_id)").
From("configs").
Where(filter).
OrderBy("owner_id, id DESC").
Query()
if err != nil {
return nil, err
}
defer rows.Close()
cfgs := map[string]userconfig.View{}
for rows.Next() {
var cfg userconfig.View
var cfgBytes []byte
var userID string
var deletedAt pq.NullTime
err = rows.Scan(&cfg.ID, &userID, &cfgBytes, &deletedAt)
if err != nil {
return nil, err
}
err = json.Unmarshal(cfgBytes, &cfg.Config)
if err != nil {
return nil, err
}
cfg.DeletedAt = deletedAt.Time
cfgs[userID] = cfg
}
// Check for any errors encountered.
err = rows.Err()
if err != nil {
return nil, err
}
return cfgs, nil
}
// GetConfig gets a configuration.
func (d DB) GetConfig(ctx context.Context, userID string) (userconfig.View, error) {
var cfgView userconfig.View
var cfgBytes []byte
var deletedAt pq.NullTime
err := d.Select("id", "config", "deleted_at").
From("configs").
Where(squirrel.And{allConfigs, squirrel.Eq{"owner_id": userID}}).
OrderBy("id DESC").
Limit(1).
QueryRow().Scan(&cfgView.ID, &cfgBytes, &deletedAt)
if err != nil {
return cfgView, err
}
cfgView.DeletedAt = deletedAt.Time
err = json.Unmarshal(cfgBytes, &cfgView.Config)
return cfgView, err
}
// SetConfig sets a configuration.
func (d DB) SetConfig(ctx context.Context, userID string, cfg userconfig.Config) error {
if !cfg.RulesConfig.FormatVersion.IsValid() {
return fmt.Errorf("invalid rule format version %v", cfg.RulesConfig.FormatVersion)
}
cfgBytes, err := json.Marshal(cfg)
if err != nil {
return err
}
_, err = d.Insert("configs").
Columns("owner_id", "owner_type", "subsystem", "config").
Values(userID, entityType, subsystem, cfgBytes).
Exec()
return err
}
// GetAllConfigs gets all of the userconfig.
func (d DB) GetAllConfigs(ctx context.Context) (map[string]userconfig.View, error) {
return d.findConfigs(allConfigs)
}
// GetConfigs gets all of the configs that have changed recently.
func (d DB) GetConfigs(ctx context.Context, since userconfig.ID) (map[string]userconfig.View, error) {
return d.findConfigs(squirrel.And{
allConfigs,
squirrel.Gt{"id": since},
})
}
// GetRulesConfig gets the latest alertmanager config for a user.
func (d DB) GetRulesConfig(ctx context.Context, userID string) (userconfig.VersionedRulesConfig, error) {
current, err := d.GetConfig(ctx, userID)
if err != nil {
return userconfig.VersionedRulesConfig{}, err
}
cfg := current.GetVersionedRulesConfig()
if cfg == nil {
return userconfig.VersionedRulesConfig{}, sql.ErrNoRows
}
return *cfg, nil
}
// SetRulesConfig sets the current alertmanager config for a user.
func (d DB) SetRulesConfig(ctx context.Context, userID string, oldConfig, newConfig userconfig.RulesConfig) (bool, error) {
updated := false
err := d.Transaction(func(tx DB) error {
current, err := d.GetConfig(ctx, userID)
if err != nil && err != sql.ErrNoRows {
return err
}
// The supplied oldConfig must match the current config. If no config
// exists, then oldConfig must be nil. Otherwise, it must exactly
// equal the existing config.
if !((err == sql.ErrNoRows && oldConfig.Files == nil) || oldConfig.Equal(current.Config.RulesConfig)) {
return nil
}
new := userconfig.Config{
AlertmanagerConfig: current.Config.AlertmanagerConfig,
RulesConfig: newConfig,
}
updated = true
return d.SetConfig(ctx, userID, new)
})
return updated, err
}
// findRulesConfigs helps GetAllRulesConfigs and GetRulesConfigs retrieve the
// set of all active rules configurations across all our users.
func (d DB) findRulesConfigs(filter squirrel.Sqlizer) (map[string]userconfig.VersionedRulesConfig, error) {
rows, err := d.Select("id", "owner_id", "config ->> 'rules_files'", "config ->> 'rule_format_version'", "deleted_at").
Options("DISTINCT ON (owner_id)").
From("configs").
Where(filter).
// `->>` gets a JSON object field as text. When a config row exists
// and alertmanager config is provided but ruler config has not yet
// been, the 'rules_files' key will have an empty JSON object as its
// value. This is (probably) the most efficient way to test for a
// non-empty `rules_files` key.
//
// This whole situation is way too complicated. See
// https://github.com/cortexproject/cortex/issues/619 for the whole
// story, and our plans to improve it.
Where("config ->> 'rules_files' <> '{}'").
OrderBy("owner_id, id DESC").
Query()
if err != nil {
return nil, err
}
defer rows.Close()
cfgs := map[string]userconfig.VersionedRulesConfig{}
for rows.Next() {
var cfg userconfig.VersionedRulesConfig
var userID string
var cfgBytes []byte
var rfvBytes []byte
var deletedAt pq.NullTime
err = rows.Scan(&cfg.ID, &userID, &cfgBytes, &rfvBytes, &deletedAt)
if err != nil {
return nil, err
}
err = json.Unmarshal(cfgBytes, &cfg.Config.Files)
if err != nil |
// Legacy configs don't have a rule format version, in which case this will
// be a zero-length (but non-nil) slice.
if len(rfvBytes) > 0 {
err = json.Unmarshal([]byte(`"`+string(rfvBytes)+`"`), &cfg.Config.FormatVersion)
if err != nil {
return nil, err
}
}
cfg.DeletedAt = deletedAt.Time
cfgs[userID] = cfg
}
// Check for any errors encountered.
err = rows.Err()
if err != nil {
return nil, err
}
return cfgs, nil
}
// GetAllRulesConfigs gets all alertmanager configs for all users.
func (d DB) GetAllRulesConfigs(ctx context.Context) (map[string]userconfig.VersionedRulesConfig, error) {
return d.findRulesConfigs(allConfigs)
}
// GetRulesConfigs gets all the alertmanager configs that have changed since a given config.
func (d DB) GetRulesConfigs(ctx context.Context, since userconfig.ID) (map[string]userconfig.VersionedRulesConfig, error) {
return d.findRulesConfigs(squirrel.And{
allConfigs,
squirrel.Gt{"id": since},
})
}
// SetDeletedAtConfig sets a deletedAt for configuration
// by adding a single new row with deleted_at set
// the same as SetConfig is actually insert
func (d DB) SetDeletedAtConfig(ctx context.Context, userID string, deletedAt pq.NullTime, cfg userconfig.Config) error {
cfgBytes, err := json.Marshal(cfg)
if err != nil {
return err
}
_, err = d.Insert("configs").
Columns("owner_id", "owner_type", "subsystem", "deleted_at", "config").
Values(userID, entityType, subsystem, deletedAt, cfgBytes).
Exec()
return err
}
// DeactivateConfig deactivates a configuration.
func (d DB) DeactivateConfig(ctx context.Context, userID string) error {
cfg, err := d.GetConfig(ctx, userID)
if err != nil {
return err
}
return d.SetDeletedAtConfig(ctx, userID, pq.NullTime{Time: time.Now(), Valid: true}, cfg.Config)
}
// RestoreConfig restores configuration.
func (d DB) RestoreConfig(ctx context.Context, userID string) error {
cfg, err := d.GetConfig(ctx, userID)
if err != nil {
return err
}
return d.SetDeletedAtConfig(ctx, userID, pq.NullTime{}, cfg.Config)
}
// Transaction runs the given function in a postgres transaction. If fn returns
// an error the txn will be rolled back.
func (d DB) Transaction(f func(DB) error) error {
if _, ok := d.dbProxy.(*sql.Tx); ok {
// Already in a nested transaction
return f(d)
}
tx, err := d.dbProxy.(*sql.DB).Begin()
if err != nil {
return err
}
err = f(DB{
dbProxy: tx,
StatementBuilderType: statementBuilder(tx),
})
if err != nil {
// Rollback error is ignored as we already have one in progress
if err2 := tx.Rollback(); err2 != nil {
level.Warn(util_log.Logger).Log("msg", "transaction rollback error (ignored)", "err", err2)
}
return err
}
return tx.Commit()
}
// Close finishes using the db
func (d DB) Close() error {
if db, ok := d.dbProxy.(interface {
Close() error
}); ok {
return db.Close()
}
return nil
}
| {
return nil, err
} | conditional_block |
postgres.go | package postgres
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"strings"
"time"
"github.com/cortexproject/cortex/pkg/configs/userconfig"
util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/Masterminds/squirrel"
"github.com/go-kit/log/level"
"github.com/golang-migrate/migrate/v4"
_ "github.com/golang-migrate/migrate/v4/database/postgres" // Import the postgres migrations driver
_ "github.com/golang-migrate/migrate/v4/source/file" // Import the postgres migrations driver
"github.com/lib/pq"
_ "github.com/lib/pq" // Import the postgres sql driver
"github.com/pkg/errors"
)
const (
// TODO: These are a legacy from when configs was more general. Update the
// schema so this isn't needed.
entityType = "org"
subsystem = "cortex"
// timeout waiting for database connection to be established
dbTimeout = 5 * time.Minute
)
var (
allConfigs = squirrel.Eq{
"owner_type": entityType,
"subsystem": subsystem,
}
)
// DB is a postgres db, for dev and production
type DB struct {
dbProxy
squirrel.StatementBuilderType
}
type dbProxy interface {
Exec(query string, args ...interface{}) (sql.Result, error)
Query(query string, args ...interface{}) (*sql.Rows, error)
QueryRow(query string, args ...interface{}) *sql.Row
Prepare(query string) (*sql.Stmt, error)
}
// dbWait waits for database connection to be established
func dbWait(db *sql.DB) error {
deadline := time.Now().Add(dbTimeout)
var err error
for tries := 0; time.Now().Before(deadline); tries++ {
err = db.Ping()
if err == nil {
return nil
}
level.Warn(util_log.Logger).Log("msg", "db connection not established, retrying...", "err", err)
time.Sleep(time.Second << uint(tries))
}
return errors.Wrapf(err, "db connection not established after %s", dbTimeout)
}
// New creates a new postgres DB
func New(uri, migrationsDir string) (DB, error) {
db, err := sql.Open("postgres", uri)
if err != nil {
return DB{}, errors.Wrap(err, "cannot open postgres db")
}
if err := dbWait(db); err != nil {
return DB{}, errors.Wrap(err, "cannot establish db connection")
}
if migrationsDir != "" {
// Add file scheme if no scheme is present
if !strings.HasPrefix(migrationsDir, "file:") {
migrationsDir = "file:" + migrationsDir
}
m, err := migrate.New(migrationsDir, uri)
if err != nil {
return DB{}, errors.Wrap(err, "database migrations initialization failed")
}
level.Info(util_log.Logger).Log("msg", "running database migrations...")
if err := m.Up(); err != nil {
if err != migrate.ErrNoChange {
return DB{}, errors.Wrap(err, "database migrations failed")
}
level.Debug(util_log.Logger).Log("msg", "no change in schema, error (ignored)", "err", err)
}
}
return DB{
dbProxy: db,
StatementBuilderType: statementBuilder(db),
}, err
}
var statementBuilder = squirrel.StatementBuilder.PlaceholderFormat(squirrel.Dollar).RunWith
func (d DB) findConfigs(filter squirrel.Sqlizer) (map[string]userconfig.View, error) {
rows, err := d.Select("id", "owner_id", "config", "deleted_at").
Options("DISTINCT ON (owner_id)").
From("configs").
Where(filter).
OrderBy("owner_id, id DESC").
Query()
if err != nil {
return nil, err
}
defer rows.Close()
cfgs := map[string]userconfig.View{}
for rows.Next() {
var cfg userconfig.View
var cfgBytes []byte
var userID string
var deletedAt pq.NullTime
err = rows.Scan(&cfg.ID, &userID, &cfgBytes, &deletedAt)
if err != nil {
return nil, err
}
err = json.Unmarshal(cfgBytes, &cfg.Config)
if err != nil {
return nil, err
}
cfg.DeletedAt = deletedAt.Time
cfgs[userID] = cfg
}
// Check for any errors encountered.
err = rows.Err()
if err != nil {
return nil, err
}
return cfgs, nil
}
// GetConfig gets a configuration.
func (d DB) GetConfig(ctx context.Context, userID string) (userconfig.View, error) {
var cfgView userconfig.View
var cfgBytes []byte
var deletedAt pq.NullTime
err := d.Select("id", "config", "deleted_at").
From("configs").
Where(squirrel.And{allConfigs, squirrel.Eq{"owner_id": userID}}).
OrderBy("id DESC").
Limit(1).
QueryRow().Scan(&cfgView.ID, &cfgBytes, &deletedAt)
if err != nil {
return cfgView, err
}
cfgView.DeletedAt = deletedAt.Time
err = json.Unmarshal(cfgBytes, &cfgView.Config)
return cfgView, err
}
// SetConfig sets a configuration.
func (d DB) SetConfig(ctx context.Context, userID string, cfg userconfig.Config) error {
if !cfg.RulesConfig.FormatVersion.IsValid() {
return fmt.Errorf("invalid rule format version %v", cfg.RulesConfig.FormatVersion)
}
cfgBytes, err := json.Marshal(cfg)
if err != nil {
return err
}
_, err = d.Insert("configs").
Columns("owner_id", "owner_type", "subsystem", "config").
Values(userID, entityType, subsystem, cfgBytes).
Exec()
return err
}
// GetAllConfigs gets all of the userconfig.
func (d DB) GetAllConfigs(ctx context.Context) (map[string]userconfig.View, error) {
return d.findConfigs(allConfigs)
}
// GetConfigs gets all of the configs that have changed recently.
func (d DB) GetConfigs(ctx context.Context, since userconfig.ID) (map[string]userconfig.View, error) {
return d.findConfigs(squirrel.And{
allConfigs,
squirrel.Gt{"id": since},
})
}
// GetRulesConfig gets the latest alertmanager config for a user.
func (d DB) GetRulesConfig(ctx context.Context, userID string) (userconfig.VersionedRulesConfig, error) {
current, err := d.GetConfig(ctx, userID)
if err != nil {
return userconfig.VersionedRulesConfig{}, err
}
cfg := current.GetVersionedRulesConfig()
if cfg == nil {
return userconfig.VersionedRulesConfig{}, sql.ErrNoRows
}
return *cfg, nil
}
// SetRulesConfig sets the current alertmanager config for a user.
func (d DB) SetRulesConfig(ctx context.Context, userID string, oldConfig, newConfig userconfig.RulesConfig) (bool, error) {
updated := false
err := d.Transaction(func(tx DB) error {
current, err := d.GetConfig(ctx, userID)
if err != nil && err != sql.ErrNoRows {
return err
}
// The supplied oldConfig must match the current config. If no config
// exists, then oldConfig must be nil. Otherwise, it must exactly
// equal the existing config.
if !((err == sql.ErrNoRows && oldConfig.Files == nil) || oldConfig.Equal(current.Config.RulesConfig)) {
return nil
}
new := userconfig.Config{
AlertmanagerConfig: current.Config.AlertmanagerConfig,
RulesConfig: newConfig,
}
updated = true
return d.SetConfig(ctx, userID, new)
})
return updated, err
}
// findRulesConfigs helps GetAllRulesConfigs and GetRulesConfigs retrieve the
// set of all active rules configurations across all our users.
func (d DB) findRulesConfigs(filter squirrel.Sqlizer) (map[string]userconfig.VersionedRulesConfig, error) {
rows, err := d.Select("id", "owner_id", "config ->> 'rules_files'", "config ->> 'rule_format_version'", "deleted_at").
Options("DISTINCT ON (owner_id)").
From("configs").
Where(filter).
// `->>` gets a JSON object field as text. When a config row exists
// and alertmanager config is provided but ruler config has not yet
// been, the 'rules_files' key will have an empty JSON object as its
// value. This is (probably) the most efficient way to test for a
// non-empty `rules_files` key.
//
// This whole situation is way too complicated. See
// https://github.com/cortexproject/cortex/issues/619 for the whole
// story, and our plans to improve it.
Where("config ->> 'rules_files' <> '{}'").
OrderBy("owner_id, id DESC").
Query()
if err != nil {
return nil, err
}
defer rows.Close()
cfgs := map[string]userconfig.VersionedRulesConfig{}
for rows.Next() {
var cfg userconfig.VersionedRulesConfig
var userID string
var cfgBytes []byte
var rfvBytes []byte
var deletedAt pq.NullTime
err = rows.Scan(&cfg.ID, &userID, &cfgBytes, &rfvBytes, &deletedAt)
if err != nil {
return nil, err
}
err = json.Unmarshal(cfgBytes, &cfg.Config.Files)
if err != nil {
return nil, err
}
// Legacy configs don't have a rule format version, in which case this will
// be a zero-length (but non-nil) slice.
if len(rfvBytes) > 0 {
err = json.Unmarshal([]byte(`"`+string(rfvBytes)+`"`), &cfg.Config.FormatVersion)
if err != nil {
return nil, err
}
}
cfg.DeletedAt = deletedAt.Time
cfgs[userID] = cfg
}
// Check for any errors encountered.
err = rows.Err()
if err != nil {
return nil, err
}
return cfgs, nil
}
// GetAllRulesConfigs gets all alertmanager configs for all users.
func (d DB) GetAllRulesConfigs(ctx context.Context) (map[string]userconfig.VersionedRulesConfig, error) {
return d.findRulesConfigs(allConfigs)
}
// GetRulesConfigs gets all the alertmanager configs that have changed since a given config.
func (d DB) GetRulesConfigs(ctx context.Context, since userconfig.ID) (map[string]userconfig.VersionedRulesConfig, error) {
return d.findRulesConfigs(squirrel.And{
allConfigs,
squirrel.Gt{"id": since},
})
}
// SetDeletedAtConfig sets a deletedAt for configuration
// by adding a single new row with deleted_at set
// the same as SetConfig is actually insert
func (d DB) SetDeletedAtConfig(ctx context.Context, userID string, deletedAt pq.NullTime, cfg userconfig.Config) error {
cfgBytes, err := json.Marshal(cfg)
if err != nil {
return err
}
_, err = d.Insert("configs").
Columns("owner_id", "owner_type", "subsystem", "deleted_at", "config").
Values(userID, entityType, subsystem, deletedAt, cfgBytes).
Exec()
return err
}
// DeactivateConfig deactivates a configuration.
func (d DB) DeactivateConfig(ctx context.Context, userID string) error {
cfg, err := d.GetConfig(ctx, userID)
if err != nil {
return err
}
return d.SetDeletedAtConfig(ctx, userID, pq.NullTime{Time: time.Now(), Valid: true}, cfg.Config)
}
// RestoreConfig restores configuration.
func (d DB) RestoreConfig(ctx context.Context, userID string) error {
cfg, err := d.GetConfig(ctx, userID)
if err != nil {
return err
}
return d.SetDeletedAtConfig(ctx, userID, pq.NullTime{}, cfg.Config)
}
// Transaction runs the given function in a postgres transaction. If fn returns
// an error the txn will be rolled back.
func (d DB) | (f func(DB) error) error {
if _, ok := d.dbProxy.(*sql.Tx); ok {
// Already in a nested transaction
return f(d)
}
tx, err := d.dbProxy.(*sql.DB).Begin()
if err != nil {
return err
}
err = f(DB{
dbProxy: tx,
StatementBuilderType: statementBuilder(tx),
})
if err != nil {
// Rollback error is ignored as we already have one in progress
if err2 := tx.Rollback(); err2 != nil {
level.Warn(util_log.Logger).Log("msg", "transaction rollback error (ignored)", "err", err2)
}
return err
}
return tx.Commit()
}
// Close finishes using the db
func (d DB) Close() error {
if db, ok := d.dbProxy.(interface {
Close() error
}); ok {
return db.Close()
}
return nil
}
| Transaction | identifier_name |
chroot.go | package shared
import (
"fmt"
"os"
"path/filepath"
"regexp"
"strconv"
lxd "github.com/canonical/lxd/shared"
"golang.org/x/sys/unix"
)
// ChrootMount defines mount args.
type ChrootMount struct {
Source string
Target string
FSType string
Flags uintptr
Data string
IsDir bool
}
// ActiveChroots is a map of all active chroots and their exit functions.
var ActiveChroots = make(map[string]func() error)
func setupMounts(rootfs string, mounts []ChrootMount) error {
// Create a temporary mount path
err := os.MkdirAll(filepath.Join(rootfs, ".distrobuilder"), 0700)
if err != nil {
return fmt.Errorf("Failed to create directory %q: %w", filepath.Join(rootfs, ".distrobuilder"), err)
}
for i, mount := range mounts {
// Target path
tmpTarget := filepath.Join(rootfs, ".distrobuilder", fmt.Sprintf("%d", i))
// Create the target mountpoint
if mount.IsDir {
err := os.MkdirAll(tmpTarget, 0755)
if err != nil {
return fmt.Errorf("Failed to create directory %q: %w", tmpTarget, err)
}
} else {
f, err := os.Create(tmpTarget)
if err != nil {
return fmt.Errorf("Failed to create file %q: %w", tmpTarget, err)
}
f.Close()
} | if err != nil {
return fmt.Errorf("Failed to mount '%s': %w", mount.Source, err)
}
}
return nil
}
func moveMounts(mounts []ChrootMount) error {
for i, mount := range mounts {
// Source path
tmpSource := filepath.Join("/", ".distrobuilder", fmt.Sprintf("%d", i))
// Resolve symlinks
target := mount.Target
for {
// Get information on current target
fi, err := os.Lstat(target)
if err != nil {
break
}
// If not a symlink, we're done
if fi.Mode()&os.ModeSymlink == 0 {
break
}
// If a symlink, resolve it
newTarget, err := os.Readlink(target)
if err != nil {
break
}
target = newTarget
}
// If the target's parent directory is a symlink, we need to resolve that as well.
targetDir := filepath.Dir(target)
if lxd.PathExists(targetDir) {
// Get information on current target
fi, err := os.Lstat(targetDir)
if err != nil {
return fmt.Errorf("Failed to stat directory %q: %w", targetDir, err)
}
// If a symlink, resolve it
if fi.Mode()&os.ModeSymlink != 0 {
newTarget, err := os.Readlink(targetDir)
if err != nil {
return fmt.Errorf("Failed to get destination of %q: %w", targetDir, err)
}
targetDir = newTarget
}
}
// Create parent paths if missing
err := os.MkdirAll(targetDir, 0755)
if err != nil {
return fmt.Errorf("Failed to create directory %q: %w", targetDir, err)
}
// Create target path
if mount.IsDir {
err = os.MkdirAll(target, 0755)
if err != nil {
return fmt.Errorf("Failed to create directory %q: %w", target, err)
}
} else {
err := os.WriteFile(target, nil, 0644)
if err != nil {
return fmt.Errorf("Failed to create file %q: %w", target, err)
}
}
// Move the mount to its destination
err = unix.Mount(tmpSource, target, "", unix.MS_MOVE, "")
if err != nil {
return fmt.Errorf("Failed to mount '%s': %w", mount.Source, err)
}
}
// Cleanup our temporary path
err := os.RemoveAll(filepath.Join("/", ".distrobuilder"))
if err != nil {
return fmt.Errorf("Failed to remove directory %q: %w", filepath.Join("/", ".distrobuilder"), err)
}
return nil
}
func killChrootProcesses(rootfs string) error {
// List all files under /proc
proc, err := os.Open(filepath.Join(rootfs, "proc"))
if err != nil {
return fmt.Errorf("Failed to open file %q: %w", filepath.Join(rootfs, "proc"), err)
}
dirs, err := proc.Readdirnames(0)
if err != nil {
return fmt.Errorf("Failed to read directory content of %q: %w", filepath.Join(rootfs, "proc"), err)
}
// Get all processes and kill them
re := regexp.MustCompile(`\d+`)
for _, dir := range dirs {
if re.MatchString(dir) {
link, _ := os.Readlink(filepath.Join(rootfs, "proc", dir, "root"))
if link == rootfs {
pid, _ := strconv.Atoi(dir)
err = unix.Kill(pid, unix.SIGKILL)
if err != nil {
return fmt.Errorf("Failed killing process: %w", err)
}
}
}
}
return nil
}
// SetupChroot sets up mount and files, a reverter and then chroots for you.
func SetupChroot(rootfs string, definition Definition, m []ChrootMount) (func() error, error) {
// Mount the rootfs
err := unix.Mount(rootfs, rootfs, "", unix.MS_BIND, "")
if err != nil {
return nil, fmt.Errorf("Failed to mount '%s': %w", rootfs, err)
}
// Setup all other needed mounts
mounts := []ChrootMount{
{"none", "/proc", "proc", 0, "", true},
{"none", "/sys", "sysfs", 0, "", true},
{"none", "/run", "tmpfs", 0, "", true},
{"none", "/tmp", "tmpfs", 0, "", true},
{"none", "/dev", "tmpfs", 0, "", true},
{"none", "/dev/shm", "tmpfs", 0, "", true},
{"/etc/resolv.conf", "/etc/resolv.conf", "", unix.MS_BIND, "", false},
}
// Keep a reference to the host rootfs and cwd
root, err := os.Open("/")
if err != nil {
return nil, err
}
cwd, err := os.Getwd()
if err != nil {
return nil, err
}
// Setup all needed mounts in a temporary location
if len(m) > 0 {
err = setupMounts(rootfs, append(mounts, m...))
} else {
err = setupMounts(rootfs, mounts)
}
if err != nil {
return nil, fmt.Errorf("Failed to mount filesystems: %w", err)
}
// Chroot into the container's rootfs
err = unix.Chroot(rootfs)
if err != nil {
root.Close()
return nil, err
}
err = unix.Chdir("/")
if err != nil {
return nil, err
}
// Move all the mounts into place
err = moveMounts(append(mounts, m...))
if err != nil {
return nil, err
}
// Populate /dev directory instead of bind mounting it from the host
err = populateDev()
if err != nil {
return nil, fmt.Errorf("Failed to populate /dev: %w", err)
}
// Change permission for /dev/shm
err = unix.Chmod("/dev/shm", 01777)
if err != nil {
return nil, fmt.Errorf("Failed to chmod /dev/shm: %w", err)
}
var env Environment
envs := definition.Environment
if envs.ClearDefaults {
env = Environment{}
} else {
env = Environment{
"PATH": EnvVariable{
Value: "/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin",
Set: true,
},
"SHELL": EnvVariable{
Value: "/bin/sh",
Set: true,
},
"TERM": EnvVariable{
Value: "xterm",
Set: true,
},
"DEBIAN_FRONTEND": EnvVariable{
Value: "noninteractive",
Set: true,
},
}
}
if envs.EnvVariables != nil && len(envs.EnvVariables) > 0 {
imageTargets := ImageTargetUndefined | ImageTargetAll
if definition.Targets.Type == DefinitionFilterTypeContainer {
imageTargets |= ImageTargetContainer
} else if definition.Targets.Type == DefinitionFilterTypeVM {
imageTargets |= ImageTargetVM
}
for _, e := range envs.EnvVariables {
if !ApplyFilter(&e, definition.Image.Release, definition.Image.ArchitectureMapped, definition.Image.Variant, definition.Targets.Type, imageTargets) {
continue
}
entry, ok := env[e.Key]
if ok {
entry.Value = e.Value
entry.Set = true
} else {
env[e.Key] = EnvVariable{
Value: e.Value,
Set: true,
}
}
}
}
// Set environment variables
oldEnv := SetEnvVariables(env)
// Setup policy-rc.d override
policyCleanup := false
if lxd.PathExists("/usr/sbin/") && !lxd.PathExists("/usr/sbin/policy-rc.d") {
err = os.WriteFile("/usr/sbin/policy-rc.d", []byte(`#!/bin/sh
exit 101
`), 0755)
if err != nil {
return nil, err
}
policyCleanup = true
}
exitFunc := func() error {
defer root.Close()
// Cleanup policy-rc.d
if policyCleanup {
err = os.Remove("/usr/sbin/policy-rc.d")
if err != nil {
return fmt.Errorf("Failed to remove %q: %w", "/usr/sbin/policy-rc.d", err)
}
}
// Reset old environment variables
SetEnvVariables(oldEnv)
// Switch back to the host rootfs
err = root.Chdir()
if err != nil {
return fmt.Errorf("Failed to chdir: %w", err)
}
err = unix.Chroot(".")
if err != nil {
return fmt.Errorf("Failed to chroot: %w", err)
}
err = unix.Chdir(cwd)
if err != nil {
return fmt.Errorf("Failed to chdir: %w", err)
}
// This will kill all processes in the chroot and allow to cleanly
// unmount everything.
err = killChrootProcesses(rootfs)
if err != nil {
return fmt.Errorf("Failed killing chroot processes: %w", err)
}
// And now unmount the entire tree
err = unix.Unmount(rootfs, unix.MNT_DETACH)
if err != nil {
return fmt.Errorf("Failed unmounting rootfs: %w", err)
}
devPath := filepath.Join(rootfs, "dev")
// Wipe $rootfs/dev
err := os.RemoveAll(devPath)
if err != nil {
return fmt.Errorf("Failed to remove directory %q: %w", devPath, err)
}
ActiveChroots[rootfs] = nil
return os.MkdirAll(devPath, 0755)
}
ActiveChroots[rootfs] = exitFunc
return exitFunc, nil
}
func populateDev() error {
devs := []struct {
Path string
Major uint32
Minor uint32
Mode uint32
}{
{"/dev/console", 5, 1, unix.S_IFCHR | 0640},
{"/dev/full", 1, 7, unix.S_IFCHR | 0666},
{"/dev/null", 1, 3, unix.S_IFCHR | 0666},
{"/dev/random", 1, 8, unix.S_IFCHR | 0666},
{"/dev/tty", 5, 0, unix.S_IFCHR | 0666},
{"/dev/urandom", 1, 9, unix.S_IFCHR | 0666},
{"/dev/zero", 1, 5, unix.S_IFCHR | 0666},
}
for _, d := range devs {
if lxd.PathExists(d.Path) {
continue
}
dev := unix.Mkdev(d.Major, d.Minor)
err := unix.Mknod(d.Path, d.Mode, int(dev))
if err != nil {
return fmt.Errorf("Failed to create %q: %w", d.Path, err)
}
// For some odd reason, unix.Mknod will not set the mode correctly.
// This fixes that.
err = unix.Chmod(d.Path, d.Mode)
if err != nil {
return fmt.Errorf("Failed to chmod %q: %w", d.Path, err)
}
}
symlinks := []struct {
Symlink string
Target string
}{
{"/dev/fd", "/proc/self/fd"},
{"/dev/stdin", "/proc/self/fd/0"},
{"/dev/stdout", "/proc/self/fd/1"},
{"/dev/stderr", "/proc/self/fd/2"},
}
for _, l := range symlinks {
err := os.Symlink(l.Target, l.Symlink)
if err != nil {
return fmt.Errorf("Failed to create link %q -> %q: %w", l.Symlink, l.Target, err)
}
}
return nil
} |
// Mount to the temporary path
err := unix.Mount(mount.Source, tmpTarget, mount.FSType, mount.Flags, mount.Data) | random_line_split |
chroot.go | package shared
import (
"fmt"
"os"
"path/filepath"
"regexp"
"strconv"
lxd "github.com/canonical/lxd/shared"
"golang.org/x/sys/unix"
)
// ChrootMount defines mount args.
type ChrootMount struct {
Source string
Target string
FSType string
Flags uintptr
Data string
IsDir bool
}
// ActiveChroots is a map of all active chroots and their exit functions.
var ActiveChroots = make(map[string]func() error)
func setupMounts(rootfs string, mounts []ChrootMount) error {
// Create a temporary mount path
err := os.MkdirAll(filepath.Join(rootfs, ".distrobuilder"), 0700)
if err != nil {
return fmt.Errorf("Failed to create directory %q: %w", filepath.Join(rootfs, ".distrobuilder"), err)
}
for i, mount := range mounts {
// Target path
tmpTarget := filepath.Join(rootfs, ".distrobuilder", fmt.Sprintf("%d", i))
// Create the target mountpoint
if mount.IsDir {
err := os.MkdirAll(tmpTarget, 0755)
if err != nil {
return fmt.Errorf("Failed to create directory %q: %w", tmpTarget, err)
}
} else {
f, err := os.Create(tmpTarget)
if err != nil {
return fmt.Errorf("Failed to create file %q: %w", tmpTarget, err)
}
f.Close()
}
// Mount to the temporary path
err := unix.Mount(mount.Source, tmpTarget, mount.FSType, mount.Flags, mount.Data)
if err != nil {
return fmt.Errorf("Failed to mount '%s': %w", mount.Source, err)
}
}
return nil
}
func moveMounts(mounts []ChrootMount) error {
for i, mount := range mounts {
// Source path
tmpSource := filepath.Join("/", ".distrobuilder", fmt.Sprintf("%d", i))
// Resolve symlinks
target := mount.Target
for {
// Get information on current target
fi, err := os.Lstat(target)
if err != nil {
break
}
// If not a symlink, we're done
if fi.Mode()&os.ModeSymlink == 0 {
break
}
// If a symlink, resolve it
newTarget, err := os.Readlink(target)
if err != nil {
break
}
target = newTarget
}
// If the target's parent directory is a symlink, we need to resolve that as well.
targetDir := filepath.Dir(target)
if lxd.PathExists(targetDir) {
// Get information on current target
fi, err := os.Lstat(targetDir)
if err != nil {
return fmt.Errorf("Failed to stat directory %q: %w", targetDir, err)
}
// If a symlink, resolve it
if fi.Mode()&os.ModeSymlink != 0 {
newTarget, err := os.Readlink(targetDir)
if err != nil {
return fmt.Errorf("Failed to get destination of %q: %w", targetDir, err)
}
targetDir = newTarget
}
}
// Create parent paths if missing
err := os.MkdirAll(targetDir, 0755)
if err != nil {
return fmt.Errorf("Failed to create directory %q: %w", targetDir, err)
}
// Create target path
if mount.IsDir {
err = os.MkdirAll(target, 0755)
if err != nil {
return fmt.Errorf("Failed to create directory %q: %w", target, err)
}
} else {
err := os.WriteFile(target, nil, 0644)
if err != nil {
return fmt.Errorf("Failed to create file %q: %w", target, err)
}
}
// Move the mount to its destination
err = unix.Mount(tmpSource, target, "", unix.MS_MOVE, "")
if err != nil {
return fmt.Errorf("Failed to mount '%s': %w", mount.Source, err)
}
}
// Cleanup our temporary path
err := os.RemoveAll(filepath.Join("/", ".distrobuilder"))
if err != nil {
return fmt.Errorf("Failed to remove directory %q: %w", filepath.Join("/", ".distrobuilder"), err)
}
return nil
}
func killChrootProcesses(rootfs string) error {
// List all files under /proc
proc, err := os.Open(filepath.Join(rootfs, "proc"))
if err != nil {
return fmt.Errorf("Failed to open file %q: %w", filepath.Join(rootfs, "proc"), err)
}
dirs, err := proc.Readdirnames(0)
if err != nil {
return fmt.Errorf("Failed to read directory content of %q: %w", filepath.Join(rootfs, "proc"), err)
}
// Get all processes and kill them
re := regexp.MustCompile(`\d+`)
for _, dir := range dirs {
if re.MatchString(dir) {
link, _ := os.Readlink(filepath.Join(rootfs, "proc", dir, "root"))
if link == rootfs {
pid, _ := strconv.Atoi(dir)
err = unix.Kill(pid, unix.SIGKILL)
if err != nil {
return fmt.Errorf("Failed killing process: %w", err)
}
}
}
}
return nil
}
// SetupChroot sets up mount and files, a reverter and then chroots for you.
func SetupChroot(rootfs string, definition Definition, m []ChrootMount) (func() error, error) |
func populateDev() error {
devs := []struct {
Path string
Major uint32
Minor uint32
Mode uint32
}{
{"/dev/console", 5, 1, unix.S_IFCHR | 0640},
{"/dev/full", 1, 7, unix.S_IFCHR | 0666},
{"/dev/null", 1, 3, unix.S_IFCHR | 0666},
{"/dev/random", 1, 8, unix.S_IFCHR | 0666},
{"/dev/tty", 5, 0, unix.S_IFCHR | 0666},
{"/dev/urandom", 1, 9, unix.S_IFCHR | 0666},
{"/dev/zero", 1, 5, unix.S_IFCHR | 0666},
}
for _, d := range devs {
if lxd.PathExists(d.Path) {
continue
}
dev := unix.Mkdev(d.Major, d.Minor)
err := unix.Mknod(d.Path, d.Mode, int(dev))
if err != nil {
return fmt.Errorf("Failed to create %q: %w", d.Path, err)
}
// For some odd reason, unix.Mknod will not set the mode correctly.
// This fixes that.
err = unix.Chmod(d.Path, d.Mode)
if err != nil {
return fmt.Errorf("Failed to chmod %q: %w", d.Path, err)
}
}
symlinks := []struct {
Symlink string
Target string
}{
{"/dev/fd", "/proc/self/fd"},
{"/dev/stdin", "/proc/self/fd/0"},
{"/dev/stdout", "/proc/self/fd/1"},
{"/dev/stderr", "/proc/self/fd/2"},
}
for _, l := range symlinks {
err := os.Symlink(l.Target, l.Symlink)
if err != nil {
return fmt.Errorf("Failed to create link %q -> %q: %w", l.Symlink, l.Target, err)
}
}
return nil
}
| {
// Mount the rootfs
err := unix.Mount(rootfs, rootfs, "", unix.MS_BIND, "")
if err != nil {
return nil, fmt.Errorf("Failed to mount '%s': %w", rootfs, err)
}
// Setup all other needed mounts
mounts := []ChrootMount{
{"none", "/proc", "proc", 0, "", true},
{"none", "/sys", "sysfs", 0, "", true},
{"none", "/run", "tmpfs", 0, "", true},
{"none", "/tmp", "tmpfs", 0, "", true},
{"none", "/dev", "tmpfs", 0, "", true},
{"none", "/dev/shm", "tmpfs", 0, "", true},
{"/etc/resolv.conf", "/etc/resolv.conf", "", unix.MS_BIND, "", false},
}
// Keep a reference to the host rootfs and cwd
root, err := os.Open("/")
if err != nil {
return nil, err
}
cwd, err := os.Getwd()
if err != nil {
return nil, err
}
// Setup all needed mounts in a temporary location
if len(m) > 0 {
err = setupMounts(rootfs, append(mounts, m...))
} else {
err = setupMounts(rootfs, mounts)
}
if err != nil {
return nil, fmt.Errorf("Failed to mount filesystems: %w", err)
}
// Chroot into the container's rootfs
err = unix.Chroot(rootfs)
if err != nil {
root.Close()
return nil, err
}
err = unix.Chdir("/")
if err != nil {
return nil, err
}
// Move all the mounts into place
err = moveMounts(append(mounts, m...))
if err != nil {
return nil, err
}
// Populate /dev directory instead of bind mounting it from the host
err = populateDev()
if err != nil {
return nil, fmt.Errorf("Failed to populate /dev: %w", err)
}
// Change permission for /dev/shm
err = unix.Chmod("/dev/shm", 01777)
if err != nil {
return nil, fmt.Errorf("Failed to chmod /dev/shm: %w", err)
}
var env Environment
envs := definition.Environment
if envs.ClearDefaults {
env = Environment{}
} else {
env = Environment{
"PATH": EnvVariable{
Value: "/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin",
Set: true,
},
"SHELL": EnvVariable{
Value: "/bin/sh",
Set: true,
},
"TERM": EnvVariable{
Value: "xterm",
Set: true,
},
"DEBIAN_FRONTEND": EnvVariable{
Value: "noninteractive",
Set: true,
},
}
}
if envs.EnvVariables != nil && len(envs.EnvVariables) > 0 {
imageTargets := ImageTargetUndefined | ImageTargetAll
if definition.Targets.Type == DefinitionFilterTypeContainer {
imageTargets |= ImageTargetContainer
} else if definition.Targets.Type == DefinitionFilterTypeVM {
imageTargets |= ImageTargetVM
}
for _, e := range envs.EnvVariables {
if !ApplyFilter(&e, definition.Image.Release, definition.Image.ArchitectureMapped, definition.Image.Variant, definition.Targets.Type, imageTargets) {
continue
}
entry, ok := env[e.Key]
if ok {
entry.Value = e.Value
entry.Set = true
} else {
env[e.Key] = EnvVariable{
Value: e.Value,
Set: true,
}
}
}
}
// Set environment variables
oldEnv := SetEnvVariables(env)
// Setup policy-rc.d override
policyCleanup := false
if lxd.PathExists("/usr/sbin/") && !lxd.PathExists("/usr/sbin/policy-rc.d") {
err = os.WriteFile("/usr/sbin/policy-rc.d", []byte(`#!/bin/sh
exit 101
`), 0755)
if err != nil {
return nil, err
}
policyCleanup = true
}
exitFunc := func() error {
defer root.Close()
// Cleanup policy-rc.d
if policyCleanup {
err = os.Remove("/usr/sbin/policy-rc.d")
if err != nil {
return fmt.Errorf("Failed to remove %q: %w", "/usr/sbin/policy-rc.d", err)
}
}
// Reset old environment variables
SetEnvVariables(oldEnv)
// Switch back to the host rootfs
err = root.Chdir()
if err != nil {
return fmt.Errorf("Failed to chdir: %w", err)
}
err = unix.Chroot(".")
if err != nil {
return fmt.Errorf("Failed to chroot: %w", err)
}
err = unix.Chdir(cwd)
if err != nil {
return fmt.Errorf("Failed to chdir: %w", err)
}
// This will kill all processes in the chroot and allow to cleanly
// unmount everything.
err = killChrootProcesses(rootfs)
if err != nil {
return fmt.Errorf("Failed killing chroot processes: %w", err)
}
// And now unmount the entire tree
err = unix.Unmount(rootfs, unix.MNT_DETACH)
if err != nil {
return fmt.Errorf("Failed unmounting rootfs: %w", err)
}
devPath := filepath.Join(rootfs, "dev")
// Wipe $rootfs/dev
err := os.RemoveAll(devPath)
if err != nil {
return fmt.Errorf("Failed to remove directory %q: %w", devPath, err)
}
ActiveChroots[rootfs] = nil
return os.MkdirAll(devPath, 0755)
}
ActiveChroots[rootfs] = exitFunc
return exitFunc, nil
} | identifier_body |
chroot.go | package shared
import (
"fmt"
"os"
"path/filepath"
"regexp"
"strconv"
lxd "github.com/canonical/lxd/shared"
"golang.org/x/sys/unix"
)
// ChrootMount defines mount args.
type ChrootMount struct {
Source string
Target string
FSType string
Flags uintptr
Data string
IsDir bool
}
// ActiveChroots is a map of all active chroots and their exit functions.
var ActiveChroots = make(map[string]func() error)
func | (rootfs string, mounts []ChrootMount) error {
// Create a temporary mount path
err := os.MkdirAll(filepath.Join(rootfs, ".distrobuilder"), 0700)
if err != nil {
return fmt.Errorf("Failed to create directory %q: %w", filepath.Join(rootfs, ".distrobuilder"), err)
}
for i, mount := range mounts {
// Target path
tmpTarget := filepath.Join(rootfs, ".distrobuilder", fmt.Sprintf("%d", i))
// Create the target mountpoint
if mount.IsDir {
err := os.MkdirAll(tmpTarget, 0755)
if err != nil {
return fmt.Errorf("Failed to create directory %q: %w", tmpTarget, err)
}
} else {
f, err := os.Create(tmpTarget)
if err != nil {
return fmt.Errorf("Failed to create file %q: %w", tmpTarget, err)
}
f.Close()
}
// Mount to the temporary path
err := unix.Mount(mount.Source, tmpTarget, mount.FSType, mount.Flags, mount.Data)
if err != nil {
return fmt.Errorf("Failed to mount '%s': %w", mount.Source, err)
}
}
return nil
}
func moveMounts(mounts []ChrootMount) error {
for i, mount := range mounts {
// Source path
tmpSource := filepath.Join("/", ".distrobuilder", fmt.Sprintf("%d", i))
// Resolve symlinks
target := mount.Target
for {
// Get information on current target
fi, err := os.Lstat(target)
if err != nil {
break
}
// If not a symlink, we're done
if fi.Mode()&os.ModeSymlink == 0 {
break
}
// If a symlink, resolve it
newTarget, err := os.Readlink(target)
if err != nil {
break
}
target = newTarget
}
// If the target's parent directory is a symlink, we need to resolve that as well.
targetDir := filepath.Dir(target)
if lxd.PathExists(targetDir) {
// Get information on current target
fi, err := os.Lstat(targetDir)
if err != nil {
return fmt.Errorf("Failed to stat directory %q: %w", targetDir, err)
}
// If a symlink, resolve it
if fi.Mode()&os.ModeSymlink != 0 {
newTarget, err := os.Readlink(targetDir)
if err != nil {
return fmt.Errorf("Failed to get destination of %q: %w", targetDir, err)
}
targetDir = newTarget
}
}
// Create parent paths if missing
err := os.MkdirAll(targetDir, 0755)
if err != nil {
return fmt.Errorf("Failed to create directory %q: %w", targetDir, err)
}
// Create target path
if mount.IsDir {
err = os.MkdirAll(target, 0755)
if err != nil {
return fmt.Errorf("Failed to create directory %q: %w", target, err)
}
} else {
err := os.WriteFile(target, nil, 0644)
if err != nil {
return fmt.Errorf("Failed to create file %q: %w", target, err)
}
}
// Move the mount to its destination
err = unix.Mount(tmpSource, target, "", unix.MS_MOVE, "")
if err != nil {
return fmt.Errorf("Failed to mount '%s': %w", mount.Source, err)
}
}
// Cleanup our temporary path
err := os.RemoveAll(filepath.Join("/", ".distrobuilder"))
if err != nil {
return fmt.Errorf("Failed to remove directory %q: %w", filepath.Join("/", ".distrobuilder"), err)
}
return nil
}
func killChrootProcesses(rootfs string) error {
// List all files under /proc
proc, err := os.Open(filepath.Join(rootfs, "proc"))
if err != nil {
return fmt.Errorf("Failed to open file %q: %w", filepath.Join(rootfs, "proc"), err)
}
dirs, err := proc.Readdirnames(0)
if err != nil {
return fmt.Errorf("Failed to read directory content of %q: %w", filepath.Join(rootfs, "proc"), err)
}
// Get all processes and kill them
re := regexp.MustCompile(`\d+`)
for _, dir := range dirs {
if re.MatchString(dir) {
link, _ := os.Readlink(filepath.Join(rootfs, "proc", dir, "root"))
if link == rootfs {
pid, _ := strconv.Atoi(dir)
err = unix.Kill(pid, unix.SIGKILL)
if err != nil {
return fmt.Errorf("Failed killing process: %w", err)
}
}
}
}
return nil
}
// SetupChroot sets up mount and files, a reverter and then chroots for you.
func SetupChroot(rootfs string, definition Definition, m []ChrootMount) (func() error, error) {
// Mount the rootfs
err := unix.Mount(rootfs, rootfs, "", unix.MS_BIND, "")
if err != nil {
return nil, fmt.Errorf("Failed to mount '%s': %w", rootfs, err)
}
// Setup all other needed mounts
mounts := []ChrootMount{
{"none", "/proc", "proc", 0, "", true},
{"none", "/sys", "sysfs", 0, "", true},
{"none", "/run", "tmpfs", 0, "", true},
{"none", "/tmp", "tmpfs", 0, "", true},
{"none", "/dev", "tmpfs", 0, "", true},
{"none", "/dev/shm", "tmpfs", 0, "", true},
{"/etc/resolv.conf", "/etc/resolv.conf", "", unix.MS_BIND, "", false},
}
// Keep a reference to the host rootfs and cwd
root, err := os.Open("/")
if err != nil {
return nil, err
}
cwd, err := os.Getwd()
if err != nil {
return nil, err
}
// Setup all needed mounts in a temporary location
if len(m) > 0 {
err = setupMounts(rootfs, append(mounts, m...))
} else {
err = setupMounts(rootfs, mounts)
}
if err != nil {
return nil, fmt.Errorf("Failed to mount filesystems: %w", err)
}
// Chroot into the container's rootfs
err = unix.Chroot(rootfs)
if err != nil {
root.Close()
return nil, err
}
err = unix.Chdir("/")
if err != nil {
return nil, err
}
// Move all the mounts into place
err = moveMounts(append(mounts, m...))
if err != nil {
return nil, err
}
// Populate /dev directory instead of bind mounting it from the host
err = populateDev()
if err != nil {
return nil, fmt.Errorf("Failed to populate /dev: %w", err)
}
// Change permission for /dev/shm
err = unix.Chmod("/dev/shm", 01777)
if err != nil {
return nil, fmt.Errorf("Failed to chmod /dev/shm: %w", err)
}
var env Environment
envs := definition.Environment
if envs.ClearDefaults {
env = Environment{}
} else {
env = Environment{
"PATH": EnvVariable{
Value: "/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin",
Set: true,
},
"SHELL": EnvVariable{
Value: "/bin/sh",
Set: true,
},
"TERM": EnvVariable{
Value: "xterm",
Set: true,
},
"DEBIAN_FRONTEND": EnvVariable{
Value: "noninteractive",
Set: true,
},
}
}
if envs.EnvVariables != nil && len(envs.EnvVariables) > 0 {
imageTargets := ImageTargetUndefined | ImageTargetAll
if definition.Targets.Type == DefinitionFilterTypeContainer {
imageTargets |= ImageTargetContainer
} else if definition.Targets.Type == DefinitionFilterTypeVM {
imageTargets |= ImageTargetVM
}
for _, e := range envs.EnvVariables {
if !ApplyFilter(&e, definition.Image.Release, definition.Image.ArchitectureMapped, definition.Image.Variant, definition.Targets.Type, imageTargets) {
continue
}
entry, ok := env[e.Key]
if ok {
entry.Value = e.Value
entry.Set = true
} else {
env[e.Key] = EnvVariable{
Value: e.Value,
Set: true,
}
}
}
}
// Set environment variables
oldEnv := SetEnvVariables(env)
// Setup policy-rc.d override
policyCleanup := false
if lxd.PathExists("/usr/sbin/") && !lxd.PathExists("/usr/sbin/policy-rc.d") {
err = os.WriteFile("/usr/sbin/policy-rc.d", []byte(`#!/bin/sh
exit 101
`), 0755)
if err != nil {
return nil, err
}
policyCleanup = true
}
exitFunc := func() error {
defer root.Close()
// Cleanup policy-rc.d
if policyCleanup {
err = os.Remove("/usr/sbin/policy-rc.d")
if err != nil {
return fmt.Errorf("Failed to remove %q: %w", "/usr/sbin/policy-rc.d", err)
}
}
// Reset old environment variables
SetEnvVariables(oldEnv)
// Switch back to the host rootfs
err = root.Chdir()
if err != nil {
return fmt.Errorf("Failed to chdir: %w", err)
}
err = unix.Chroot(".")
if err != nil {
return fmt.Errorf("Failed to chroot: %w", err)
}
err = unix.Chdir(cwd)
if err != nil {
return fmt.Errorf("Failed to chdir: %w", err)
}
// This will kill all processes in the chroot and allow to cleanly
// unmount everything.
err = killChrootProcesses(rootfs)
if err != nil {
return fmt.Errorf("Failed killing chroot processes: %w", err)
}
// And now unmount the entire tree
err = unix.Unmount(rootfs, unix.MNT_DETACH)
if err != nil {
return fmt.Errorf("Failed unmounting rootfs: %w", err)
}
devPath := filepath.Join(rootfs, "dev")
// Wipe $rootfs/dev
err := os.RemoveAll(devPath)
if err != nil {
return fmt.Errorf("Failed to remove directory %q: %w", devPath, err)
}
ActiveChroots[rootfs] = nil
return os.MkdirAll(devPath, 0755)
}
ActiveChroots[rootfs] = exitFunc
return exitFunc, nil
}
func populateDev() error {
devs := []struct {
Path string
Major uint32
Minor uint32
Mode uint32
}{
{"/dev/console", 5, 1, unix.S_IFCHR | 0640},
{"/dev/full", 1, 7, unix.S_IFCHR | 0666},
{"/dev/null", 1, 3, unix.S_IFCHR | 0666},
{"/dev/random", 1, 8, unix.S_IFCHR | 0666},
{"/dev/tty", 5, 0, unix.S_IFCHR | 0666},
{"/dev/urandom", 1, 9, unix.S_IFCHR | 0666},
{"/dev/zero", 1, 5, unix.S_IFCHR | 0666},
}
for _, d := range devs {
if lxd.PathExists(d.Path) {
continue
}
dev := unix.Mkdev(d.Major, d.Minor)
err := unix.Mknod(d.Path, d.Mode, int(dev))
if err != nil {
return fmt.Errorf("Failed to create %q: %w", d.Path, err)
}
// For some odd reason, unix.Mknod will not set the mode correctly.
// This fixes that.
err = unix.Chmod(d.Path, d.Mode)
if err != nil {
return fmt.Errorf("Failed to chmod %q: %w", d.Path, err)
}
}
symlinks := []struct {
Symlink string
Target string
}{
{"/dev/fd", "/proc/self/fd"},
{"/dev/stdin", "/proc/self/fd/0"},
{"/dev/stdout", "/proc/self/fd/1"},
{"/dev/stderr", "/proc/self/fd/2"},
}
for _, l := range symlinks {
err := os.Symlink(l.Target, l.Symlink)
if err != nil {
return fmt.Errorf("Failed to create link %q -> %q: %w", l.Symlink, l.Target, err)
}
}
return nil
}
| setupMounts | identifier_name |
chroot.go | package shared
import (
"fmt"
"os"
"path/filepath"
"regexp"
"strconv"
lxd "github.com/canonical/lxd/shared"
"golang.org/x/sys/unix"
)
// ChrootMount defines mount args.
type ChrootMount struct {
Source string
Target string
FSType string
Flags uintptr
Data string
IsDir bool
}
// ActiveChroots is a map of all active chroots and their exit functions.
var ActiveChroots = make(map[string]func() error)
func setupMounts(rootfs string, mounts []ChrootMount) error {
// Create a temporary mount path
err := os.MkdirAll(filepath.Join(rootfs, ".distrobuilder"), 0700)
if err != nil {
return fmt.Errorf("Failed to create directory %q: %w", filepath.Join(rootfs, ".distrobuilder"), err)
}
for i, mount := range mounts {
// Target path
tmpTarget := filepath.Join(rootfs, ".distrobuilder", fmt.Sprintf("%d", i))
// Create the target mountpoint
if mount.IsDir {
err := os.MkdirAll(tmpTarget, 0755)
if err != nil {
return fmt.Errorf("Failed to create directory %q: %w", tmpTarget, err)
}
} else {
f, err := os.Create(tmpTarget)
if err != nil {
return fmt.Errorf("Failed to create file %q: %w", tmpTarget, err)
}
f.Close()
}
// Mount to the temporary path
err := unix.Mount(mount.Source, tmpTarget, mount.FSType, mount.Flags, mount.Data)
if err != nil {
return fmt.Errorf("Failed to mount '%s': %w", mount.Source, err)
}
}
return nil
}
func moveMounts(mounts []ChrootMount) error {
for i, mount := range mounts {
// Source path
tmpSource := filepath.Join("/", ".distrobuilder", fmt.Sprintf("%d", i))
// Resolve symlinks
target := mount.Target
for {
// Get information on current target
fi, err := os.Lstat(target)
if err != nil {
break
}
// If not a symlink, we're done
if fi.Mode()&os.ModeSymlink == 0 {
break
}
// If a symlink, resolve it
newTarget, err := os.Readlink(target)
if err != nil {
break
}
target = newTarget
}
// If the target's parent directory is a symlink, we need to resolve that as well.
targetDir := filepath.Dir(target)
if lxd.PathExists(targetDir) {
// Get information on current target
fi, err := os.Lstat(targetDir)
if err != nil {
return fmt.Errorf("Failed to stat directory %q: %w", targetDir, err)
}
// If a symlink, resolve it
if fi.Mode()&os.ModeSymlink != 0 {
newTarget, err := os.Readlink(targetDir)
if err != nil {
return fmt.Errorf("Failed to get destination of %q: %w", targetDir, err)
}
targetDir = newTarget
}
}
// Create parent paths if missing
err := os.MkdirAll(targetDir, 0755)
if err != nil {
return fmt.Errorf("Failed to create directory %q: %w", targetDir, err)
}
// Create target path
if mount.IsDir {
err = os.MkdirAll(target, 0755)
if err != nil {
return fmt.Errorf("Failed to create directory %q: %w", target, err)
}
} else {
err := os.WriteFile(target, nil, 0644)
if err != nil {
return fmt.Errorf("Failed to create file %q: %w", target, err)
}
}
// Move the mount to its destination
err = unix.Mount(tmpSource, target, "", unix.MS_MOVE, "")
if err != nil {
return fmt.Errorf("Failed to mount '%s': %w", mount.Source, err)
}
}
// Cleanup our temporary path
err := os.RemoveAll(filepath.Join("/", ".distrobuilder"))
if err != nil {
return fmt.Errorf("Failed to remove directory %q: %w", filepath.Join("/", ".distrobuilder"), err)
}
return nil
}
func killChrootProcesses(rootfs string) error {
// List all files under /proc
proc, err := os.Open(filepath.Join(rootfs, "proc"))
if err != nil {
return fmt.Errorf("Failed to open file %q: %w", filepath.Join(rootfs, "proc"), err)
}
dirs, err := proc.Readdirnames(0)
if err != nil {
return fmt.Errorf("Failed to read directory content of %q: %w", filepath.Join(rootfs, "proc"), err)
}
// Get all processes and kill them
re := regexp.MustCompile(`\d+`)
for _, dir := range dirs {
if re.MatchString(dir) {
link, _ := os.Readlink(filepath.Join(rootfs, "proc", dir, "root"))
if link == rootfs {
pid, _ := strconv.Atoi(dir)
err = unix.Kill(pid, unix.SIGKILL)
if err != nil {
return fmt.Errorf("Failed killing process: %w", err)
}
}
}
}
return nil
}
// SetupChroot sets up mount and files, a reverter and then chroots for you.
func SetupChroot(rootfs string, definition Definition, m []ChrootMount) (func() error, error) {
// Mount the rootfs
err := unix.Mount(rootfs, rootfs, "", unix.MS_BIND, "")
if err != nil {
return nil, fmt.Errorf("Failed to mount '%s': %w", rootfs, err)
}
// Setup all other needed mounts
mounts := []ChrootMount{
{"none", "/proc", "proc", 0, "", true},
{"none", "/sys", "sysfs", 0, "", true},
{"none", "/run", "tmpfs", 0, "", true},
{"none", "/tmp", "tmpfs", 0, "", true},
{"none", "/dev", "tmpfs", 0, "", true},
{"none", "/dev/shm", "tmpfs", 0, "", true},
{"/etc/resolv.conf", "/etc/resolv.conf", "", unix.MS_BIND, "", false},
}
// Keep a reference to the host rootfs and cwd
root, err := os.Open("/")
if err != nil {
return nil, err
}
cwd, err := os.Getwd()
if err != nil {
return nil, err
}
// Setup all needed mounts in a temporary location
if len(m) > 0 {
err = setupMounts(rootfs, append(mounts, m...))
} else {
err = setupMounts(rootfs, mounts)
}
if err != nil {
return nil, fmt.Errorf("Failed to mount filesystems: %w", err)
}
// Chroot into the container's rootfs
err = unix.Chroot(rootfs)
if err != nil {
root.Close()
return nil, err
}
err = unix.Chdir("/")
if err != nil {
return nil, err
}
// Move all the mounts into place
err = moveMounts(append(mounts, m...))
if err != nil {
return nil, err
}
// Populate /dev directory instead of bind mounting it from the host
err = populateDev()
if err != nil {
return nil, fmt.Errorf("Failed to populate /dev: %w", err)
}
// Change permission for /dev/shm
err = unix.Chmod("/dev/shm", 01777)
if err != nil {
return nil, fmt.Errorf("Failed to chmod /dev/shm: %w", err)
}
var env Environment
envs := definition.Environment
if envs.ClearDefaults {
env = Environment{}
} else {
env = Environment{
"PATH": EnvVariable{
Value: "/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin",
Set: true,
},
"SHELL": EnvVariable{
Value: "/bin/sh",
Set: true,
},
"TERM": EnvVariable{
Value: "xterm",
Set: true,
},
"DEBIAN_FRONTEND": EnvVariable{
Value: "noninteractive",
Set: true,
},
}
}
if envs.EnvVariables != nil && len(envs.EnvVariables) > 0 {
imageTargets := ImageTargetUndefined | ImageTargetAll
if definition.Targets.Type == DefinitionFilterTypeContainer {
imageTargets |= ImageTargetContainer
} else if definition.Targets.Type == DefinitionFilterTypeVM {
imageTargets |= ImageTargetVM
}
for _, e := range envs.EnvVariables {
if !ApplyFilter(&e, definition.Image.Release, definition.Image.ArchitectureMapped, definition.Image.Variant, definition.Targets.Type, imageTargets) {
continue
}
entry, ok := env[e.Key]
if ok {
entry.Value = e.Value
entry.Set = true
} else {
env[e.Key] = EnvVariable{
Value: e.Value,
Set: true,
}
}
}
}
// Set environment variables
oldEnv := SetEnvVariables(env)
// Setup policy-rc.d override
policyCleanup := false
if lxd.PathExists("/usr/sbin/") && !lxd.PathExists("/usr/sbin/policy-rc.d") {
err = os.WriteFile("/usr/sbin/policy-rc.d", []byte(`#!/bin/sh
exit 101
`), 0755)
if err != nil {
return nil, err
}
policyCleanup = true
}
exitFunc := func() error {
defer root.Close()
// Cleanup policy-rc.d
if policyCleanup {
err = os.Remove("/usr/sbin/policy-rc.d")
if err != nil {
return fmt.Errorf("Failed to remove %q: %w", "/usr/sbin/policy-rc.d", err)
}
}
// Reset old environment variables
SetEnvVariables(oldEnv)
// Switch back to the host rootfs
err = root.Chdir()
if err != nil {
return fmt.Errorf("Failed to chdir: %w", err)
}
err = unix.Chroot(".")
if err != nil {
return fmt.Errorf("Failed to chroot: %w", err)
}
err = unix.Chdir(cwd)
if err != nil {
return fmt.Errorf("Failed to chdir: %w", err)
}
// This will kill all processes in the chroot and allow to cleanly
// unmount everything.
err = killChrootProcesses(rootfs)
if err != nil {
return fmt.Errorf("Failed killing chroot processes: %w", err)
}
// And now unmount the entire tree
err = unix.Unmount(rootfs, unix.MNT_DETACH)
if err != nil {
return fmt.Errorf("Failed unmounting rootfs: %w", err)
}
devPath := filepath.Join(rootfs, "dev")
// Wipe $rootfs/dev
err := os.RemoveAll(devPath)
if err != nil {
return fmt.Errorf("Failed to remove directory %q: %w", devPath, err)
}
ActiveChroots[rootfs] = nil
return os.MkdirAll(devPath, 0755)
}
ActiveChroots[rootfs] = exitFunc
return exitFunc, nil
}
func populateDev() error {
devs := []struct {
Path string
Major uint32
Minor uint32
Mode uint32
}{
{"/dev/console", 5, 1, unix.S_IFCHR | 0640},
{"/dev/full", 1, 7, unix.S_IFCHR | 0666},
{"/dev/null", 1, 3, unix.S_IFCHR | 0666},
{"/dev/random", 1, 8, unix.S_IFCHR | 0666},
{"/dev/tty", 5, 0, unix.S_IFCHR | 0666},
{"/dev/urandom", 1, 9, unix.S_IFCHR | 0666},
{"/dev/zero", 1, 5, unix.S_IFCHR | 0666},
}
for _, d := range devs {
if lxd.PathExists(d.Path) {
continue
}
dev := unix.Mkdev(d.Major, d.Minor)
err := unix.Mknod(d.Path, d.Mode, int(dev))
if err != nil {
return fmt.Errorf("Failed to create %q: %w", d.Path, err)
}
// For some odd reason, unix.Mknod will not set the mode correctly.
// This fixes that.
err = unix.Chmod(d.Path, d.Mode)
if err != nil |
}
symlinks := []struct {
Symlink string
Target string
}{
{"/dev/fd", "/proc/self/fd"},
{"/dev/stdin", "/proc/self/fd/0"},
{"/dev/stdout", "/proc/self/fd/1"},
{"/dev/stderr", "/proc/self/fd/2"},
}
for _, l := range symlinks {
err := os.Symlink(l.Target, l.Symlink)
if err != nil {
return fmt.Errorf("Failed to create link %q -> %q: %w", l.Symlink, l.Target, err)
}
}
return nil
}
| {
return fmt.Errorf("Failed to chmod %q: %w", d.Path, err)
} | conditional_block |
main.rs | #![feature(proc_macro_hygiene, decl_macro)]
extern crate dotenv;
extern crate chrono;
extern crate uuid;
#[macro_use] extern crate rocket;
extern crate rocket_contrib;
extern crate base64;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate r2d2;
extern crate r2d2_diesel;
#[macro_use]
extern crate diesel;
extern crate egg_mode;
extern crate tokio;
extern crate lettre;
extern crate lettre_email;
extern crate htmlescape;
extern crate reing_text2image;
extern crate log;
use std::sync::mpsc::{SyncSender, sync_channel};
use std::collections::HashMap;
use std::env;
use std::path::{Path, PathBuf};
use rocket::http::{Header, Status};
use rocket::request;
use rocket::response;
use rocket::response::status;
use rocket::Request;
use rocket_contrib::templates::Template;
use rocket_contrib::json::Json;
use chrono::prelude::*;
use std::{thread, time};
use rocket::State;
mod web;
mod model;
mod db;
mod tweet;
mod utils;
mod notify;
#[derive(Serialize, Debug)]
struct AnswerDTO {
pub id: i32,
pub body: String,
pub question: QuestionDTO,
pub created_at: DateTime<Local>,
pub created_at_recognizable: String,
}
impl AnswerDTO {
fn from(a: model::Answer) -> Self {
Self {
id: a.id, body: a.body, created_at: a.created_at,
created_at_recognizable: utils::recognizable_datetime(a.created_at),
question: QuestionDTO::from(a.question)
}
}
}
#[derive(Serialize, Debug)]
struct QuestionDTO {
pub id: i32,
pub body: String,
pub created_at: DateTime<Local>,
pub created_at_recognizable: String,
}
impl QuestionDTO {
fn from(q: model::Question) -> Self {
Self {
id: q.id, body: q.body, created_at: q.created_at,
created_at_recognizable: utils::recognizable_datetime(q.created_at)
}
}
}
/* Force ssl */
#[get("/<path..>")]
fn redirect_ssl(path: PathBuf, _ssl: web::guard::ForceSSL) -> response::Redirect {
let redirect_to = format!("https://{}/{}", env::var("APPLICATION_DOMAIN").unwrap(), path.as_path().display());
println!("Redirect to:{}", redirect_to);
response::Redirect::to(redirect_to)
}
/* GET /static/ */
#[get("/static/<file..>")]
fn files(file: PathBuf) -> Result<web::CachedFile, status::NotFound<String>> {
let path = Path::new("static/").join(file);
response::NamedFile::open(&path)
.map_err(|_| status::NotFound(format!("Bad path: {:?}", path)))
.map(|nf| web::CachedFile(nf))
}
/* GET / */
#[derive(Serialize, Debug)]
struct IndexDTO {
pub profile: ProfileDTO,
pub answers: Vec<AnswerDTO>,
pub site_url: String,
pub next_page: Option<i64>,
pub prev_page: Option<i64>,
}
#[derive(Serialize, Debug)]
struct ProfileDTO {
pub username: String,
pub image_url: String,
}
#[test]
fn next_prev_page_test() {
assert!((None, Some(1)) == next_prev_page(0));
assert!((Some(0), Some(2)) == next_prev_page(1));
assert!((Some(1), Some(3)) == next_prev_page(2));
}
// next: newer, prev: older
// older -> page number increases
fn next_prev_page(current_page: i64) -> (Option<i64>, Option<i64>) {
let prev_page = Some(current_page + 1);
let next_page = if current_page <= 0 {
None
} else {
Some(current_page - 1)
};
return (next_page, prev_page);
}
const ANSWER_COUNT_PER_PAGE : i64 = 30;
#[get("/")]
fn index(repo: web::guard::Repository, profile: State<UserProfile>) -> Template {
let page = 0;
index_with_page(repo, profile, page)
}
#[get("/page/<page>")]
fn index_with_page(repo: web::guard::Repository, profile: State<UserProfile>, page: i64) -> Template {
let offset = page * ANSWER_COUNT_PER_PAGE;
let answer_dtos = repo.answers(offset, ANSWER_COUNT_PER_PAGE)
.into_iter()
.map(|a| AnswerDTO::from(a))
.collect::<Vec<_>>();
let (next_page, prev_page) = next_prev_page(page);
let context = IndexDTO {
profile: ProfileDTO {
username: profile.clone().name,
image_url: String::from("/static/image/profile.jpg")
},
answers: answer_dtos,
site_url: format!("https://{}/", env::var("APPLICATION_DOMAIN").unwrap()),
prev_page: prev_page,
next_page: next_page,
};
Template::render("index", &context)
}
#[derive(Serialize, Debug)]
struct SearchDTO {
pub profile: ProfileDTO,
pub search_results: Vec<AnswerDTO>,
pub site_url: String,
pub query: String,
}
#[get("/search?<query>")]
fn search(repo: web::guard::Repository, profile: State<UserProfile>, query: String) -> Template |
/* POST /questions */
#[derive(FromForm)]
struct PostQuestionForm {
body: String
}
#[derive(Serialize, Debug)]
struct PostQuestionFailedDTO {
reason: String
}
#[post("/questions", data = "<params>")]
fn post_question(repo: web::guard::Repository, client_ip: web::guard::ClientIP, params: request::Form<PostQuestionForm>)
-> Result<response::Redirect, Template> {
match repo.store_question(params.body.clone(), client_ip.address()) {
Ok(question) => {
let question_id = question.id;
notify::send_email(question);
Ok(response::Redirect::to(format!("/question/{}/after_post", question_id)))
},
Err(err) => {
match err {
model::StoreQuestionError::BlankBody => {
let context = PostQuestionFailedDTO { reason: String::from("質問の内容が空です") };
Err(Template::render("question/post_failed", &context))
}
}
}
}
}
/* GET /question/after_post */
#[derive(Serialize, Debug)]
struct AfterPostQuestionDTO{
pub question: QuestionDTO
}
#[get("/question/<question_id>/after_post")]
fn after_post_question(question_id: i32, repo: web::guard::Repository) -> Result<Template, response::Redirect> {
if let Some(question) = repo.find_question(question_id) {
let context = AfterPostQuestionDTO{
question: QuestionDTO::from(question)
};
Ok(Template::render("question/after_post", &context))
} else {
Err(response::Redirect::to("/"))
}
}
/* GET /answer/<question_id> */
#[derive(Serialize, Debug)]
struct ShowAnswerDTO {
pub answer: AnswerDTO,
pub next_answer: Option<AnswerDTO>,
pub prev_answer: Option<AnswerDTO>,
}
#[get("/question/<question_id>")]
fn show_question(question_id: i32, repo: web::guard::Repository) -> Result<response::Redirect, status::NotFound<&'static str>> {
match repo.find_answer_by_question_id(question_id) {
Some(answer) => Ok(response::Redirect::to(format!("/answer/{}", answer.id))),
None => Err(status::NotFound("not found"))
}
}
#[get("/answer/<_answer_id>")]
fn show_answer(_answer_id: i32, app_env: State<AppEnvironment>) -> Template {
let mut context: HashMap<String, bool> = HashMap::new();
context.insert(String::from("is_production"), app_env.is_production);
return Template::render("answer/show", &context);
}
#[get("/api/answer/<answer_id>")]
fn show_answer_json(answer_id: i32, repo: web::guard::Repository) -> Result<Json<ShowAnswerDTO>, status::NotFound<&'static str>> {
if let Some(answer) = repo.find_answer(answer_id) {
let next_answer_opt = repo.find_next_answer(answer.created_at);
let prev_answer_opt = repo.find_prev_answer(answer.created_at);
let context = ShowAnswerDTO {
answer: AnswerDTO::from(answer),
next_answer: next_answer_opt.map(|a| AnswerDTO::from(a)),
prev_answer: prev_answer_opt.map(|a| AnswerDTO::from(a))
};
return Ok(Json(context));
}
return Err(status::NotFound("not found"));
}
/* GET /admin */
#[derive(Serialize, Debug)]
struct AdminIndexDTO {
pub questions: Vec<QuestionDTO>
}
#[get("/admin")]
fn admin_index(repo: web::guard::Repository, _auth: web::guard::BasicAuth) -> Template {
let question_dtos = repo.not_answered_questions()
.into_iter()
.filter(|q| !q.hidden )
.map(|q| QuestionDTO::from(q))
.collect::<Vec<_>>();
let context = AdminIndexDTO { questions: question_dtos };
Template::render("admin/index", &context)
}
/* GET /admin/question/<question_id> */
#[get("/admin/question/<question_id>")]
fn admin_show_question(question_id: i32, repo: web::guard::Repository, _auth: web::guard::BasicAuth) -> Template {
let question = repo.find_question(question_id).unwrap();
let context = QuestionDTO::from(question);
Template::render("admin/questions/show", &context)
}
/* POST /question/<question_id>/answer */
#[derive(FromForm)]
struct PostAnswerForm {
body: String
}
#[post("/admin/question/<question_id>/answer", data = "<params>")]
fn admin_post_answer(
question_id: i32, repo: web::guard::Repository,
params: request::Form<PostAnswerForm>,
tweet_sender: State<SyncSender<model::Answer>>,
_auth: web::guard::BasicAuth
) -> response::Redirect {
let answer_body = params.body.clone();
if let Some(answer) = repo.store_answer(question_id, answer_body.clone()) {
tweet_sender.send(answer).unwrap();
}
response::Redirect::to("/admin")
}
/* POST /admin/question/<question_id>/hide */
#[post("/admin/question/<question_id>/hide")]
fn admin_hide_question(question_id: i32, repo: web::guard::Repository, _auth: web::guard::BasicAuth ) -> response::Redirect {
let mut question = repo.find_question(question_id).unwrap();
question.hidden = true;
repo.update_question(question);
response::Redirect::to("/admin")
}
/* Force login */
struct RequireLogin();
impl<'r> response::Responder<'r> for RequireLogin {
fn respond_to(self, _req: &Request) -> Result<response::Response<'r>, Status> {
response::Response::build()
.status(Status::Unauthorized)
.header(Header::new("WWW-Authenticate", "Basic realm=\"SECRET AREA\""))
.ok()
}
}
#[catch(401)]
fn unauthorized(_req: &Request) -> RequireLogin {
RequireLogin()
}
#[derive(Clone)]
struct UserProfile {
pub name: String
}
#[derive(Clone)]
struct AppEnvironment {
pub is_production: bool
}
fn main() {
dotenv::dotenv().ok();
let manager = r2d2_diesel::ConnectionManager::<diesel::PgConnection>::new(
env::var("DATABASE_URL").unwrap()
);
let pool = r2d2::Pool::builder()
.max_size(15)
.build(manager)
.unwrap();
let (tweet_sender, tweet_receiver) = sync_channel(1000);
thread::spawn(move || {
loop {
let answer = tweet_receiver.recv().unwrap();
tweet::tweet_answer(answer);
thread::sleep(time::Duration::from_secs(5 * 60));
}
});
let user_profile = UserProfile {
name: tweet::get_twitter_username()
};
let app_env = AppEnvironment {
is_production: env::var("MODE").map(|mode| mode == "production").unwrap_or(false)
};
rocket::ignite()
.manage(pool)
.manage(tweet_sender)
.manage(user_profile)
.manage(app_env)
.mount("/", routes![
index, index_with_page, files, post_question, after_post_question, show_answer,
admin_index, admin_post_answer, admin_show_question, admin_hide_question, search,
show_question, show_answer_json
])
.register(catchers![unauthorized])
.attach(Template::fairing())
.launch();
}
| {
let answer_dtos = repo.search_answers(query.clone())
.into_iter()
.map(|a| AnswerDTO::from(a))
.collect::<Vec<_>>();
let context = SearchDTO {
profile: ProfileDTO {
username: profile.clone().name,
image_url: String::from("/static/image/profile.jpg")
},
search_results: answer_dtos,
site_url: format!("https://{}/", env::var("APPLICATION_DOMAIN").unwrap()),
query: query,
};
Template::render("search", &context)
} | identifier_body |
main.rs | #![feature(proc_macro_hygiene, decl_macro)]
extern crate dotenv;
extern crate chrono;
extern crate uuid;
#[macro_use] extern crate rocket;
extern crate rocket_contrib;
extern crate base64;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate r2d2;
extern crate r2d2_diesel;
#[macro_use]
extern crate diesel;
extern crate egg_mode;
extern crate tokio;
extern crate lettre;
extern crate lettre_email;
extern crate htmlescape;
extern crate reing_text2image;
extern crate log;
use std::sync::mpsc::{SyncSender, sync_channel};
use std::collections::HashMap;
use std::env;
use std::path::{Path, PathBuf};
use rocket::http::{Header, Status};
use rocket::request;
use rocket::response;
use rocket::response::status;
use rocket::Request;
use rocket_contrib::templates::Template;
use rocket_contrib::json::Json;
use chrono::prelude::*;
use std::{thread, time};
use rocket::State;
mod web;
mod model;
mod db;
mod tweet;
mod utils;
mod notify;
#[derive(Serialize, Debug)]
struct AnswerDTO {
pub id: i32,
pub body: String,
pub question: QuestionDTO,
pub created_at: DateTime<Local>,
pub created_at_recognizable: String,
}
impl AnswerDTO {
fn from(a: model::Answer) -> Self {
Self {
id: a.id, body: a.body, created_at: a.created_at,
created_at_recognizable: utils::recognizable_datetime(a.created_at),
question: QuestionDTO::from(a.question)
}
}
}
#[derive(Serialize, Debug)]
struct QuestionDTO {
pub id: i32,
pub body: String,
pub created_at: DateTime<Local>,
pub created_at_recognizable: String,
}
impl QuestionDTO {
fn from(q: model::Question) -> Self {
Self {
id: q.id, body: q.body, created_at: q.created_at,
created_at_recognizable: utils::recognizable_datetime(q.created_at)
}
}
}
/* Force ssl */
#[get("/<path..>")]
fn redirect_ssl(path: PathBuf, _ssl: web::guard::ForceSSL) -> response::Redirect {
let redirect_to = format!("https://{}/{}", env::var("APPLICATION_DOMAIN").unwrap(), path.as_path().display());
println!("Redirect to:{}", redirect_to);
response::Redirect::to(redirect_to)
}
/* GET /static/ */
#[get("/static/<file..>")]
fn files(file: PathBuf) -> Result<web::CachedFile, status::NotFound<String>> {
let path = Path::new("static/").join(file);
response::NamedFile::open(&path)
.map_err(|_| status::NotFound(format!("Bad path: {:?}", path)))
.map(|nf| web::CachedFile(nf))
}
/* GET / */
#[derive(Serialize, Debug)]
struct IndexDTO {
pub profile: ProfileDTO,
pub answers: Vec<AnswerDTO>,
pub site_url: String,
pub next_page: Option<i64>,
pub prev_page: Option<i64>,
}
#[derive(Serialize, Debug)]
struct ProfileDTO {
pub username: String,
pub image_url: String,
}
#[test]
fn next_prev_page_test() {
assert!((None, Some(1)) == next_prev_page(0));
assert!((Some(0), Some(2)) == next_prev_page(1));
assert!((Some(1), Some(3)) == next_prev_page(2));
}
// next: newer, prev: older
// older -> page number increases
fn next_prev_page(current_page: i64) -> (Option<i64>, Option<i64>) {
let prev_page = Some(current_page + 1);
let next_page = if current_page <= 0 {
None
} else {
Some(current_page - 1)
};
return (next_page, prev_page);
}
const ANSWER_COUNT_PER_PAGE : i64 = 30;
#[get("/")]
fn index(repo: web::guard::Repository, profile: State<UserProfile>) -> Template {
let page = 0;
index_with_page(repo, profile, page)
}
#[get("/page/<page>")]
fn index_with_page(repo: web::guard::Repository, profile: State<UserProfile>, page: i64) -> Template {
let offset = page * ANSWER_COUNT_PER_PAGE;
let answer_dtos = repo.answers(offset, ANSWER_COUNT_PER_PAGE)
.into_iter()
.map(|a| AnswerDTO::from(a))
.collect::<Vec<_>>();
let (next_page, prev_page) = next_prev_page(page);
let context = IndexDTO {
profile: ProfileDTO {
username: profile.clone().name,
image_url: String::from("/static/image/profile.jpg")
},
answers: answer_dtos,
site_url: format!("https://{}/", env::var("APPLICATION_DOMAIN").unwrap()),
prev_page: prev_page,
next_page: next_page,
};
Template::render("index", &context)
}
#[derive(Serialize, Debug)]
struct SearchDTO {
pub profile: ProfileDTO,
pub search_results: Vec<AnswerDTO>,
pub site_url: String,
pub query: String,
}
#[get("/search?<query>")]
fn search(repo: web::guard::Repository, profile: State<UserProfile>, query: String) -> Template {
let answer_dtos = repo.search_answers(query.clone())
.into_iter()
.map(|a| AnswerDTO::from(a))
.collect::<Vec<_>>();
let context = SearchDTO {
profile: ProfileDTO {
username: profile.clone().name,
image_url: String::from("/static/image/profile.jpg")
},
search_results: answer_dtos,
site_url: format!("https://{}/", env::var("APPLICATION_DOMAIN").unwrap()),
query: query,
};
Template::render("search", &context)
}
/* POST /questions */
#[derive(FromForm)]
struct PostQuestionForm {
body: String
}
#[derive(Serialize, Debug)]
struct PostQuestionFailedDTO {
reason: String
}
#[post("/questions", data = "<params>")]
fn post_question(repo: web::guard::Repository, client_ip: web::guard::ClientIP, params: request::Form<PostQuestionForm>)
-> Result<response::Redirect, Template> {
match repo.store_question(params.body.clone(), client_ip.address()) {
Ok(question) => {
let question_id = question.id;
notify::send_email(question);
Ok(response::Redirect::to(format!("/question/{}/after_post", question_id)))
},
Err(err) => {
match err {
model::StoreQuestionError::BlankBody => {
let context = PostQuestionFailedDTO { reason: String::from("質問の内容が空です") };
Err(Template::render("question/post_failed", &context))
}
}
}
}
}
/* GET /question/after_post */
#[derive(Serialize, Debug)]
struct AfterPostQuestionDTO{
pub question: QuestionDTO
}
#[get("/question/<question_id>/after_post")]
fn after_post_question(question_id: i32, repo: web::guard::Repository) -> Result<Template, response::Redirect> {
if let Some(question) = repo.find_question(question_id) {
let context = AfterPostQuestionDTO{
question: QuestionDTO::from(question)
};
Ok(Template::render("question/after_post", &context))
} else {
Err(response::Redirect::to("/"))
}
}
/* GET /answer/<question_id> */
#[derive(Serialize, Debug)]
struct ShowAnswerDTO {
pub answer: AnswerDTO,
pub next_answer: Option<AnswerDTO>,
pub prev_answer: Option<AnswerDTO>,
}
#[get("/question/<question_id>")]
fn show_question(question_id: i32, repo: web::guard::Repository) -> Result<response::Redirect, status::NotFound<&'static str>> {
match repo.find_answer_by_question_id(question_id) {
Some(answer) => Ok(response::Redirect::to(format!("/answer/{}", answer.id))),
None => Err(status::NotFound("not found"))
}
}
#[get("/answer/<_answer_id>")]
fn show_answer(_answer_id: i32, app_env: State<AppEnvironment>) -> Template {
let mut context: HashMap<String, bool> = HashMap::new();
context.insert(String::from("is_production"), app_env.is_production);
return Template::render("answer/show", &context);
}
#[get("/api/answer/<answer_id>")]
fn show_answer_json(answer_id: i32, repo: web::guard::Repository) -> Result<Json<ShowAnswerDTO>, status::NotFound<&'static str>> {
if let Some(answer) = repo.find_answer(answer_id) {
let next_answer_opt = repo.find_next_answer(answer.created_at);
let prev_answer_opt = repo.find_prev_answer(answer.created_at);
let context = ShowAnswerDTO {
answer: AnswerDTO::from(answer),
next_answer: next_answer_opt.map(|a| AnswerDTO::from(a)),
prev_answer: prev_answer_opt.map(|a| AnswerDTO::from(a))
};
return Ok(Json(context));
}
return Err(status::NotFound("not found"));
}
/* GET /admin */
#[derive(Serialize, Debug)]
struct AdminIndexDTO {
pub questions: Vec<QuestionDTO>
}
#[get("/admin")]
fn admin_index(repo: web::guard::Repository, _auth: web::guard::BasicAuth) -> Template {
let question_dtos = repo.not_answered_questions()
.into_iter()
.filter(|q| !q.hidden )
.map(|q| QuestionDTO::from(q))
.collect::<Vec<_>>();
let context = AdminIndexDTO { questions: question_dtos };
Template::render("admin/index", &context)
}
/* GET /admin/question/<question_id> */
#[get("/admin/question/<question_id>")]
fn admin_show_question(question_id: i32, repo: web::guard::Repository, _auth: web::guard::BasicAuth) -> Template {
let question = repo.find_question(question_id).unwrap();
let context = QuestionDTO::from(question);
Template::render("admin/questions/show", &context)
}
/* POST /question/<question_id>/answer */
#[derive(FromForm)]
struct PostAnswerForm {
body: String
}
#[post("/admin/question/<question_id>/answer", data = "<params>")]
fn admin_post_answer(
question_id: i32, repo: web::guard::Repository,
params: request::Form<PostAnswerForm>,
tweet_sender: State<SyncSender<model::Answer>>,
_auth: web::guard::BasicAuth
) -> response::Redirect {
let answer_body = params.body.clone();
if let Some(answer) = repo.store_answer(question_id, answer_body.clone()) {
tweet_sender.send(answer).unwrap();
}
response::Redirect::to("/admin")
}
/* POST /admin/question/<question_id>/hide */
#[post("/admin/question/<question_id>/hide")]
fn admin_hide_question(question_id: i32, repo: web::guard::Repository, _auth: web::guard::BasicAuth ) -> response::Redirect {
let mut question = repo.find_question(question_id).unwrap();
question.hidden = true;
repo.update_question(question);
response::Redirect::to("/admin")
}
/* Force login */
struct RequireLogin();
i | onse::Responder<'r> for RequireLogin {
fn respond_to(self, _req: &Request) -> Result<response::Response<'r>, Status> {
response::Response::build()
.status(Status::Unauthorized)
.header(Header::new("WWW-Authenticate", "Basic realm=\"SECRET AREA\""))
.ok()
}
}
#[catch(401)]
fn unauthorized(_req: &Request) -> RequireLogin {
RequireLogin()
}
#[derive(Clone)]
struct UserProfile {
pub name: String
}
#[derive(Clone)]
struct AppEnvironment {
pub is_production: bool
}
fn main() {
dotenv::dotenv().ok();
let manager = r2d2_diesel::ConnectionManager::<diesel::PgConnection>::new(
env::var("DATABASE_URL").unwrap()
);
let pool = r2d2::Pool::builder()
.max_size(15)
.build(manager)
.unwrap();
let (tweet_sender, tweet_receiver) = sync_channel(1000);
thread::spawn(move || {
loop {
let answer = tweet_receiver.recv().unwrap();
tweet::tweet_answer(answer);
thread::sleep(time::Duration::from_secs(5 * 60));
}
});
let user_profile = UserProfile {
name: tweet::get_twitter_username()
};
let app_env = AppEnvironment {
is_production: env::var("MODE").map(|mode| mode == "production").unwrap_or(false)
};
rocket::ignite()
.manage(pool)
.manage(tweet_sender)
.manage(user_profile)
.manage(app_env)
.mount("/", routes![
index, index_with_page, files, post_question, after_post_question, show_answer,
admin_index, admin_post_answer, admin_show_question, admin_hide_question, search,
show_question, show_answer_json
])
.register(catchers![unauthorized])
.attach(Template::fairing())
.launch();
}
| mpl<'r> resp | identifier_name |
main.rs | #![feature(proc_macro_hygiene, decl_macro)]
extern crate dotenv;
extern crate chrono;
extern crate uuid;
#[macro_use] extern crate rocket;
extern crate rocket_contrib;
extern crate base64;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate r2d2;
extern crate r2d2_diesel;
#[macro_use]
extern crate diesel;
extern crate egg_mode;
extern crate tokio;
extern crate lettre;
extern crate lettre_email;
extern crate htmlescape;
extern crate reing_text2image;
extern crate log;
use std::sync::mpsc::{SyncSender, sync_channel};
use std::collections::HashMap;
use std::env;
use std::path::{Path, PathBuf};
use rocket::http::{Header, Status};
use rocket::request;
use rocket::response;
use rocket::response::status;
use rocket::Request;
use rocket_contrib::templates::Template;
use rocket_contrib::json::Json;
use chrono::prelude::*;
use std::{thread, time};
use rocket::State;
mod web;
mod model;
mod db;
mod tweet;
mod utils;
mod notify;
#[derive(Serialize, Debug)]
struct AnswerDTO {
pub id: i32,
pub body: String,
pub question: QuestionDTO,
pub created_at: DateTime<Local>,
pub created_at_recognizable: String,
}
impl AnswerDTO {
fn from(a: model::Answer) -> Self {
Self {
id: a.id, body: a.body, created_at: a.created_at,
created_at_recognizable: utils::recognizable_datetime(a.created_at),
question: QuestionDTO::from(a.question)
}
}
}
#[derive(Serialize, Debug)]
struct QuestionDTO {
pub id: i32,
pub body: String,
pub created_at: DateTime<Local>,
pub created_at_recognizable: String,
}
impl QuestionDTO {
fn from(q: model::Question) -> Self {
Self {
id: q.id, body: q.body, created_at: q.created_at,
created_at_recognizable: utils::recognizable_datetime(q.created_at)
}
}
}
/* Force ssl */
#[get("/<path..>")]
fn redirect_ssl(path: PathBuf, _ssl: web::guard::ForceSSL) -> response::Redirect {
let redirect_to = format!("https://{}/{}", env::var("APPLICATION_DOMAIN").unwrap(), path.as_path().display());
println!("Redirect to:{}", redirect_to);
response::Redirect::to(redirect_to)
}
/* GET /static/ */
#[get("/static/<file..>")]
fn files(file: PathBuf) -> Result<web::CachedFile, status::NotFound<String>> {
let path = Path::new("static/").join(file);
response::NamedFile::open(&path)
.map_err(|_| status::NotFound(format!("Bad path: {:?}", path)))
.map(|nf| web::CachedFile(nf))
}
/* GET / */
#[derive(Serialize, Debug)]
struct IndexDTO {
pub profile: ProfileDTO,
pub answers: Vec<AnswerDTO>,
pub site_url: String,
pub next_page: Option<i64>,
pub prev_page: Option<i64>,
}
#[derive(Serialize, Debug)]
struct ProfileDTO {
pub username: String,
pub image_url: String,
}
#[test]
fn next_prev_page_test() {
assert!((None, Some(1)) == next_prev_page(0));
assert!((Some(0), Some(2)) == next_prev_page(1));
assert!((Some(1), Some(3)) == next_prev_page(2));
}
// next: newer, prev: older
// older -> page number increases
fn next_prev_page(current_page: i64) -> (Option<i64>, Option<i64>) {
let prev_page = Some(current_page + 1);
let next_page = if current_page <= 0 {
None
} else {
Some(current_page - 1)
};
return (next_page, prev_page);
}
const ANSWER_COUNT_PER_PAGE : i64 = 30;
#[get("/")]
fn index(repo: web::guard::Repository, profile: State<UserProfile>) -> Template {
let page = 0;
index_with_page(repo, profile, page)
}
#[get("/page/<page>")]
fn index_with_page(repo: web::guard::Repository, profile: State<UserProfile>, page: i64) -> Template {
let offset = page * ANSWER_COUNT_PER_PAGE;
let answer_dtos = repo.answers(offset, ANSWER_COUNT_PER_PAGE)
.into_iter()
.map(|a| AnswerDTO::from(a))
.collect::<Vec<_>>();
let (next_page, prev_page) = next_prev_page(page);
let context = IndexDTO {
profile: ProfileDTO {
username: profile.clone().name,
image_url: String::from("/static/image/profile.jpg")
},
answers: answer_dtos,
site_url: format!("https://{}/", env::var("APPLICATION_DOMAIN").unwrap()),
prev_page: prev_page,
next_page: next_page,
};
Template::render("index", &context)
}
#[derive(Serialize, Debug)]
struct SearchDTO {
pub profile: ProfileDTO,
pub search_results: Vec<AnswerDTO>,
pub site_url: String,
pub query: String,
}
#[get("/search?<query>")]
fn search(repo: web::guard::Repository, profile: State<UserProfile>, query: String) -> Template {
let answer_dtos = repo.search_answers(query.clone())
.into_iter()
.map(|a| AnswerDTO::from(a))
.collect::<Vec<_>>();
let context = SearchDTO {
profile: ProfileDTO {
username: profile.clone().name,
image_url: String::from("/static/image/profile.jpg")
},
search_results: answer_dtos,
site_url: format!("https://{}/", env::var("APPLICATION_DOMAIN").unwrap()),
query: query,
};
Template::render("search", &context)
}
/* POST /questions */
#[derive(FromForm)]
struct PostQuestionForm {
body: String
}
#[derive(Serialize, Debug)]
struct PostQuestionFailedDTO {
reason: String
}
#[post("/questions", data = "<params>")]
fn post_question(repo: web::guard::Repository, client_ip: web::guard::ClientIP, params: request::Form<PostQuestionForm>)
-> Result<response::Redirect, Template> {
match repo.store_question(params.body.clone(), client_ip.address()) {
Ok(question) => | ,
Err(err) => {
match err {
model::StoreQuestionError::BlankBody => {
let context = PostQuestionFailedDTO { reason: String::from("質問の内容が空です") };
Err(Template::render("question/post_failed", &context))
}
}
}
}
}
/* GET /question/after_post */
#[derive(Serialize, Debug)]
struct AfterPostQuestionDTO{
pub question: QuestionDTO
}
#[get("/question/<question_id>/after_post")]
fn after_post_question(question_id: i32, repo: web::guard::Repository) -> Result<Template, response::Redirect> {
if let Some(question) = repo.find_question(question_id) {
let context = AfterPostQuestionDTO{
question: QuestionDTO::from(question)
};
Ok(Template::render("question/after_post", &context))
} else {
Err(response::Redirect::to("/"))
}
}
/* GET /answer/<question_id> */
#[derive(Serialize, Debug)]
struct ShowAnswerDTO {
pub answer: AnswerDTO,
pub next_answer: Option<AnswerDTO>,
pub prev_answer: Option<AnswerDTO>,
}
#[get("/question/<question_id>")]
fn show_question(question_id: i32, repo: web::guard::Repository) -> Result<response::Redirect, status::NotFound<&'static str>> {
match repo.find_answer_by_question_id(question_id) {
Some(answer) => Ok(response::Redirect::to(format!("/answer/{}", answer.id))),
None => Err(status::NotFound("not found"))
}
}
#[get("/answer/<_answer_id>")]
fn show_answer(_answer_id: i32, app_env: State<AppEnvironment>) -> Template {
let mut context: HashMap<String, bool> = HashMap::new();
context.insert(String::from("is_production"), app_env.is_production);
return Template::render("answer/show", &context);
}
#[get("/api/answer/<answer_id>")]
fn show_answer_json(answer_id: i32, repo: web::guard::Repository) -> Result<Json<ShowAnswerDTO>, status::NotFound<&'static str>> {
if let Some(answer) = repo.find_answer(answer_id) {
let next_answer_opt = repo.find_next_answer(answer.created_at);
let prev_answer_opt = repo.find_prev_answer(answer.created_at);
let context = ShowAnswerDTO {
answer: AnswerDTO::from(answer),
next_answer: next_answer_opt.map(|a| AnswerDTO::from(a)),
prev_answer: prev_answer_opt.map(|a| AnswerDTO::from(a))
};
return Ok(Json(context));
}
return Err(status::NotFound("not found"));
}
/* GET /admin */
#[derive(Serialize, Debug)]
struct AdminIndexDTO {
pub questions: Vec<QuestionDTO>
}
#[get("/admin")]
fn admin_index(repo: web::guard::Repository, _auth: web::guard::BasicAuth) -> Template {
let question_dtos = repo.not_answered_questions()
.into_iter()
.filter(|q| !q.hidden )
.map(|q| QuestionDTO::from(q))
.collect::<Vec<_>>();
let context = AdminIndexDTO { questions: question_dtos };
Template::render("admin/index", &context)
}
/* GET /admin/question/<question_id> */
#[get("/admin/question/<question_id>")]
fn admin_show_question(question_id: i32, repo: web::guard::Repository, _auth: web::guard::BasicAuth) -> Template {
let question = repo.find_question(question_id).unwrap();
let context = QuestionDTO::from(question);
Template::render("admin/questions/show", &context)
}
/* POST /question/<question_id>/answer */
#[derive(FromForm)]
struct PostAnswerForm {
body: String
}
#[post("/admin/question/<question_id>/answer", data = "<params>")]
fn admin_post_answer(
question_id: i32, repo: web::guard::Repository,
params: request::Form<PostAnswerForm>,
tweet_sender: State<SyncSender<model::Answer>>,
_auth: web::guard::BasicAuth
) -> response::Redirect {
let answer_body = params.body.clone();
if let Some(answer) = repo.store_answer(question_id, answer_body.clone()) {
tweet_sender.send(answer).unwrap();
}
response::Redirect::to("/admin")
}
/* POST /admin/question/<question_id>/hide */
#[post("/admin/question/<question_id>/hide")]
fn admin_hide_question(question_id: i32, repo: web::guard::Repository, _auth: web::guard::BasicAuth ) -> response::Redirect {
let mut question = repo.find_question(question_id).unwrap();
question.hidden = true;
repo.update_question(question);
response::Redirect::to("/admin")
}
/* Force login */
struct RequireLogin();
impl<'r> response::Responder<'r> for RequireLogin {
fn respond_to(self, _req: &Request) -> Result<response::Response<'r>, Status> {
response::Response::build()
.status(Status::Unauthorized)
.header(Header::new("WWW-Authenticate", "Basic realm=\"SECRET AREA\""))
.ok()
}
}
#[catch(401)]
fn unauthorized(_req: &Request) -> RequireLogin {
RequireLogin()
}
#[derive(Clone)]
struct UserProfile {
pub name: String
}
#[derive(Clone)]
struct AppEnvironment {
pub is_production: bool
}
fn main() {
dotenv::dotenv().ok();
let manager = r2d2_diesel::ConnectionManager::<diesel::PgConnection>::new(
env::var("DATABASE_URL").unwrap()
);
let pool = r2d2::Pool::builder()
.max_size(15)
.build(manager)
.unwrap();
let (tweet_sender, tweet_receiver) = sync_channel(1000);
thread::spawn(move || {
loop {
let answer = tweet_receiver.recv().unwrap();
tweet::tweet_answer(answer);
thread::sleep(time::Duration::from_secs(5 * 60));
}
});
let user_profile = UserProfile {
name: tweet::get_twitter_username()
};
let app_env = AppEnvironment {
is_production: env::var("MODE").map(|mode| mode == "production").unwrap_or(false)
};
rocket::ignite()
.manage(pool)
.manage(tweet_sender)
.manage(user_profile)
.manage(app_env)
.mount("/", routes![
index, index_with_page, files, post_question, after_post_question, show_answer,
admin_index, admin_post_answer, admin_show_question, admin_hide_question, search,
show_question, show_answer_json
])
.register(catchers![unauthorized])
.attach(Template::fairing())
.launch();
}
| {
let question_id = question.id;
notify::send_email(question);
Ok(response::Redirect::to(format!("/question/{}/after_post", question_id)))
} | conditional_block |
main.rs | #![feature(proc_macro_hygiene, decl_macro)]
extern crate dotenv;
extern crate chrono;
extern crate uuid;
#[macro_use] extern crate rocket;
extern crate rocket_contrib;
extern crate base64;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate r2d2;
extern crate r2d2_diesel;
#[macro_use]
extern crate diesel;
extern crate egg_mode;
extern crate tokio;
extern crate lettre;
extern crate lettre_email;
extern crate htmlescape;
extern crate reing_text2image;
extern crate log;
use std::sync::mpsc::{SyncSender, sync_channel};
use std::collections::HashMap;
use std::env;
use std::path::{Path, PathBuf};
use rocket::http::{Header, Status};
use rocket::request;
use rocket::response;
use rocket::response::status;
use rocket::Request;
use rocket_contrib::templates::Template;
use rocket_contrib::json::Json;
use chrono::prelude::*;
use std::{thread, time};
use rocket::State;
mod web;
mod model;
mod db;
mod tweet;
mod utils;
mod notify;
#[derive(Serialize, Debug)]
struct AnswerDTO {
pub id: i32,
pub body: String,
pub question: QuestionDTO,
pub created_at: DateTime<Local>,
pub created_at_recognizable: String,
}
impl AnswerDTO {
fn from(a: model::Answer) -> Self {
Self {
id: a.id, body: a.body, created_at: a.created_at,
created_at_recognizable: utils::recognizable_datetime(a.created_at),
question: QuestionDTO::from(a.question)
}
}
}
#[derive(Serialize, Debug)]
struct QuestionDTO {
pub id: i32,
pub body: String,
pub created_at: DateTime<Local>,
pub created_at_recognizable: String,
}
impl QuestionDTO {
fn from(q: model::Question) -> Self {
Self {
id: q.id, body: q.body, created_at: q.created_at,
created_at_recognizable: utils::recognizable_datetime(q.created_at)
}
}
}
/* Force ssl */
#[get("/<path..>")]
fn redirect_ssl(path: PathBuf, _ssl: web::guard::ForceSSL) -> response::Redirect {
let redirect_to = format!("https://{}/{}", env::var("APPLICATION_DOMAIN").unwrap(), path.as_path().display());
println!("Redirect to:{}", redirect_to);
response::Redirect::to(redirect_to)
}
/* GET /static/ */
#[get("/static/<file..>")]
fn files(file: PathBuf) -> Result<web::CachedFile, status::NotFound<String>> {
let path = Path::new("static/").join(file);
response::NamedFile::open(&path)
.map_err(|_| status::NotFound(format!("Bad path: {:?}", path)))
.map(|nf| web::CachedFile(nf))
}
/* GET / */
#[derive(Serialize, Debug)]
struct IndexDTO {
pub profile: ProfileDTO,
pub answers: Vec<AnswerDTO>,
pub site_url: String,
pub next_page: Option<i64>,
pub prev_page: Option<i64>,
}
#[derive(Serialize, Debug)]
struct ProfileDTO {
pub username: String,
pub image_url: String,
}
#[test]
fn next_prev_page_test() {
assert!((None, Some(1)) == next_prev_page(0));
assert!((Some(0), Some(2)) == next_prev_page(1));
assert!((Some(1), Some(3)) == next_prev_page(2));
}
// next: newer, prev: older
// older -> page number increases
fn next_prev_page(current_page: i64) -> (Option<i64>, Option<i64>) {
let prev_page = Some(current_page + 1);
let next_page = if current_page <= 0 {
None
} else {
Some(current_page - 1)
};
return (next_page, prev_page);
}
const ANSWER_COUNT_PER_PAGE : i64 = 30;
#[get("/")]
fn index(repo: web::guard::Repository, profile: State<UserProfile>) -> Template {
let page = 0;
index_with_page(repo, profile, page)
}
#[get("/page/<page>")]
fn index_with_page(repo: web::guard::Repository, profile: State<UserProfile>, page: i64) -> Template {
let offset = page * ANSWER_COUNT_PER_PAGE;
let answer_dtos = repo.answers(offset, ANSWER_COUNT_PER_PAGE)
.into_iter()
.map(|a| AnswerDTO::from(a))
.collect::<Vec<_>>();
let (next_page, prev_page) = next_prev_page(page);
let context = IndexDTO {
profile: ProfileDTO {
username: profile.clone().name,
image_url: String::from("/static/image/profile.jpg")
},
answers: answer_dtos,
site_url: format!("https://{}/", env::var("APPLICATION_DOMAIN").unwrap()),
prev_page: prev_page,
next_page: next_page,
};
Template::render("index", &context)
}
#[derive(Serialize, Debug)]
struct SearchDTO {
pub profile: ProfileDTO,
pub search_results: Vec<AnswerDTO>,
pub site_url: String,
pub query: String,
}
#[get("/search?<query>")]
fn search(repo: web::guard::Repository, profile: State<UserProfile>, query: String) -> Template {
let answer_dtos = repo.search_answers(query.clone())
.into_iter()
.map(|a| AnswerDTO::from(a))
.collect::<Vec<_>>();
let context = SearchDTO {
profile: ProfileDTO {
username: profile.clone().name,
image_url: String::from("/static/image/profile.jpg")
},
search_results: answer_dtos,
site_url: format!("https://{}/", env::var("APPLICATION_DOMAIN").unwrap()),
query: query,
};
Template::render("search", &context)
}
/* POST /questions */
#[derive(FromForm)]
struct PostQuestionForm {
body: String
}
#[derive(Serialize, Debug)]
struct PostQuestionFailedDTO {
reason: String
}
#[post("/questions", data = "<params>")]
fn post_question(repo: web::guard::Repository, client_ip: web::guard::ClientIP, params: request::Form<PostQuestionForm>)
-> Result<response::Redirect, Template> {
match repo.store_question(params.body.clone(), client_ip.address()) {
Ok(question) => {
let question_id = question.id;
notify::send_email(question);
Ok(response::Redirect::to(format!("/question/{}/after_post", question_id)))
},
Err(err) => {
match err {
model::StoreQuestionError::BlankBody => {
let context = PostQuestionFailedDTO { reason: String::from("質問の内容が空です") };
Err(Template::render("question/post_failed", &context))
}
}
}
}
}
/* GET /question/after_post */
#[derive(Serialize, Debug)]
struct AfterPostQuestionDTO{
pub question: QuestionDTO
}
#[get("/question/<question_id>/after_post")]
fn after_post_question(question_id: i32, repo: web::guard::Repository) -> Result<Template, response::Redirect> {
if let Some(question) = repo.find_question(question_id) {
let context = AfterPostQuestionDTO{
question: QuestionDTO::from(question)
};
Ok(Template::render("question/after_post", &context))
} else {
Err(response::Redirect::to("/"))
}
}
/* GET /answer/<question_id> */ | #[derive(Serialize, Debug)]
struct ShowAnswerDTO {
pub answer: AnswerDTO,
pub next_answer: Option<AnswerDTO>,
pub prev_answer: Option<AnswerDTO>,
}
#[get("/question/<question_id>")]
fn show_question(question_id: i32, repo: web::guard::Repository) -> Result<response::Redirect, status::NotFound<&'static str>> {
match repo.find_answer_by_question_id(question_id) {
Some(answer) => Ok(response::Redirect::to(format!("/answer/{}", answer.id))),
None => Err(status::NotFound("not found"))
}
}
#[get("/answer/<_answer_id>")]
fn show_answer(_answer_id: i32, app_env: State<AppEnvironment>) -> Template {
let mut context: HashMap<String, bool> = HashMap::new();
context.insert(String::from("is_production"), app_env.is_production);
return Template::render("answer/show", &context);
}
#[get("/api/answer/<answer_id>")]
fn show_answer_json(answer_id: i32, repo: web::guard::Repository) -> Result<Json<ShowAnswerDTO>, status::NotFound<&'static str>> {
if let Some(answer) = repo.find_answer(answer_id) {
let next_answer_opt = repo.find_next_answer(answer.created_at);
let prev_answer_opt = repo.find_prev_answer(answer.created_at);
let context = ShowAnswerDTO {
answer: AnswerDTO::from(answer),
next_answer: next_answer_opt.map(|a| AnswerDTO::from(a)),
prev_answer: prev_answer_opt.map(|a| AnswerDTO::from(a))
};
return Ok(Json(context));
}
return Err(status::NotFound("not found"));
}
/* GET /admin */
#[derive(Serialize, Debug)]
struct AdminIndexDTO {
pub questions: Vec<QuestionDTO>
}
#[get("/admin")]
fn admin_index(repo: web::guard::Repository, _auth: web::guard::BasicAuth) -> Template {
let question_dtos = repo.not_answered_questions()
.into_iter()
.filter(|q| !q.hidden )
.map(|q| QuestionDTO::from(q))
.collect::<Vec<_>>();
let context = AdminIndexDTO { questions: question_dtos };
Template::render("admin/index", &context)
}
/* GET /admin/question/<question_id> */
#[get("/admin/question/<question_id>")]
fn admin_show_question(question_id: i32, repo: web::guard::Repository, _auth: web::guard::BasicAuth) -> Template {
let question = repo.find_question(question_id).unwrap();
let context = QuestionDTO::from(question);
Template::render("admin/questions/show", &context)
}
/* POST /question/<question_id>/answer */
#[derive(FromForm)]
struct PostAnswerForm {
body: String
}
#[post("/admin/question/<question_id>/answer", data = "<params>")]
fn admin_post_answer(
question_id: i32, repo: web::guard::Repository,
params: request::Form<PostAnswerForm>,
tweet_sender: State<SyncSender<model::Answer>>,
_auth: web::guard::BasicAuth
) -> response::Redirect {
let answer_body = params.body.clone();
if let Some(answer) = repo.store_answer(question_id, answer_body.clone()) {
tweet_sender.send(answer).unwrap();
}
response::Redirect::to("/admin")
}
/* POST /admin/question/<question_id>/hide */
#[post("/admin/question/<question_id>/hide")]
fn admin_hide_question(question_id: i32, repo: web::guard::Repository, _auth: web::guard::BasicAuth ) -> response::Redirect {
let mut question = repo.find_question(question_id).unwrap();
question.hidden = true;
repo.update_question(question);
response::Redirect::to("/admin")
}
/* Force login */
struct RequireLogin();
impl<'r> response::Responder<'r> for RequireLogin {
fn respond_to(self, _req: &Request) -> Result<response::Response<'r>, Status> {
response::Response::build()
.status(Status::Unauthorized)
.header(Header::new("WWW-Authenticate", "Basic realm=\"SECRET AREA\""))
.ok()
}
}
#[catch(401)]
fn unauthorized(_req: &Request) -> RequireLogin {
RequireLogin()
}
#[derive(Clone)]
struct UserProfile {
pub name: String
}
#[derive(Clone)]
struct AppEnvironment {
pub is_production: bool
}
fn main() {
dotenv::dotenv().ok();
let manager = r2d2_diesel::ConnectionManager::<diesel::PgConnection>::new(
env::var("DATABASE_URL").unwrap()
);
let pool = r2d2::Pool::builder()
.max_size(15)
.build(manager)
.unwrap();
let (tweet_sender, tweet_receiver) = sync_channel(1000);
thread::spawn(move || {
loop {
let answer = tweet_receiver.recv().unwrap();
tweet::tweet_answer(answer);
thread::sleep(time::Duration::from_secs(5 * 60));
}
});
let user_profile = UserProfile {
name: tweet::get_twitter_username()
};
let app_env = AppEnvironment {
is_production: env::var("MODE").map(|mode| mode == "production").unwrap_or(false)
};
rocket::ignite()
.manage(pool)
.manage(tweet_sender)
.manage(user_profile)
.manage(app_env)
.mount("/", routes![
index, index_with_page, files, post_question, after_post_question, show_answer,
admin_index, admin_post_answer, admin_show_question, admin_hide_question, search,
show_question, show_answer_json
])
.register(catchers![unauthorized])
.attach(Template::fairing())
.launch();
} | random_line_split |
|
lib.rs | // Copyright (c) 2019, Bayu Aldi Yansyah <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Crabsformer is an easy-to-use fundamental library for scientific computing
//! with Rust, highly inspired by [NumPy].
//!
//! **Notice!** This project is in early phase. Expect bugs and missing
//! features.
//!
//! [NumPy]: http://www.numpy.org/
//!
//! # Usage
//! Add this to your `Cargo.toml`:
//!
//! ```toml
//! [dependencies]
//! crabsformer = "2019.3.17"
//! ``` | //! ```
//!
//! To get started using Crabsformer, read the quickstart tutorial below.
//!
//! # Quickstart Tutorial
//!
//! ## Prerequisites
//! Before reading this quick tutorial you should know a bit of Rust. If you
//! would like to refresh your memory, take a look at the [Rust book].
//!
//! [Rust book]: https://doc.rust-lang.org/book/
//!
//! ## The Basics
//! There are two main data structures in Crabsformer:
//!
//! 1. [`Vector<T>`] is a fixed-length list of elements of the same
//! [numeric type]. It has one atribute called [`len`] to represent the
//! total number of elements.
//! 2. [`Matrix<T>`] is a table of elements of the same [numeric type]. It has
//! one atribute called [`shape`] that represent the number of rows and
//! the number of columns.
//!
//! `Vector<T>` is pronounced as 'numeric vector' to avoid confussion with
//! Rust's vector [`Vec<T>`] data structure.
//!
//! [`Vector<T>`]: vector/struct.Vector.html
//! [`Matrix<T>`]: matrix/struct.Matrix.html
//! [`len`]: vector/struct.Vector.html#method.len
//! [`shape`]: matrix/struct.Matrix.html#method.shape
//! [`Vec<T>`]: https://doc.rust-lang.org/std/vec/struct.Vec.html
//!
//! ### Numeric Vector Builders
//! There are several ways to create numeric vector.
//!
//! For example, you can create a numeric vector from a Rust vector using
//! `Vector::from` static method. The type of the resulting numeric vector is
//! deduced from the type of the elements in the sequences.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let x = vec![3, 1, 4, 1, 5];
//! let y = Vector::from(x);
//! ```
//!
//! The [`vector!`] macro is provided to make initialization of the numeric
//! vector more convenient.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let v = vector![1, 10, 11, 314];
//! ```
//!
//! It can also initialize each element of a numeric vector with a given value.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let v = vector![0; 5]; // vector![0, 0, 0, 0, 0]
//! ```
//!
//! To create a numeric vector of evenly spaced values, Crabformer provide
//! [`Vector::range`] function.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let x = Vector::range(0, 10, 1).unwrap();
//! assert_eq!(x, vector![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
//! ```
//!
//! To create random numeric vectors, Crabsformer provide
//! [`RandomVectorBuilder`]. It can be explicitly seeded to make the results
//! are reproducible.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let mut rvb = RandomVectorBuilder::new();
//! ```
//!
//! The method [`rvb.uniform`] creates a numeric vector of the given length
//! and populate it with random samples from a uniform distribution over the
//! half-open interval.
//!
//! ```
//! # use crabsformer::prelude::*;
//! # let mut rvb = RandomVectorBuilder::new();
//! let v = rvb.uniform(5, 0.0, 1.0).unwrap();
//! // Random
//! // [0.054709196, 0.86043775, 0.21187294, 0.6413728, 0.14186311]
//! ```
//!
//! See also: [Numeric Vector Builders].
//!
//! [`vector!`]: macro.vector.html
//! [`RandomVectorBuilder`]: vector/builders/struct.RandomVectorBuilder.html
//! [`rvb.uniform`]: vector/builders/struct.RandomVectorBuilder.html#method.uniform
//! [Numeric Vector Builders]: vector/builders/index.html
//! [`Vector::range`]: vector/struct.Vector.html#method.range
//!
//! ### Numeric Vector Basic Operations
//! You can perform arithmetic operations on a numeric vector. Arithmetic
//! operators on numeric vectors apply elementwise. A new numeric vector is
//! created and filled with the result.
//!
//! For example, if you add the numeric vector, the arithmetic operator
//! will work element-wise. The output will be a numeric vector of the same
//! length.
//!
//! ```rust
//! # use crabsformer::prelude::*;
//! let x = vector![2, 4, 6] + vector![1, 3, 5];
//! assert_eq!(x, vector![3, 7, 11]);
//! ```
//!
//! Numeric vector substraction and multiplication also works the same:
//!
//! ```rust
//! # use crabsformer::prelude::*;
//! let x = vector![3, 1, 5] - vector![1, 3, 5];
//! assert_eq!(x, vector![2, -2, 0]);
//!
//! let y = vector![5, 4, 1] * vector![2, 1, 4];
//! assert_eq!(y, vector![10, 4, 4]);
//! ```
//!
//! You can run an arithmetic operation on the numeric vector with a scalar
//! value too. For example, this code multiplies each element of the numeric
//! vector by 2.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let x = vector![3, 1, 4] * 2;
//! assert_eq!(x, vector![6, 2, 8]);
//! ```
//!
//! Some operations, such as `+=` and `*=`, act in place to modify an
//! existing numeric vector rather than create a new one.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let mut x = vector![3, 1, 4];
//!
//! x += 3;
//! assert_eq!(x, vector![6, 4, 7]);
//!
//! x -= 1;
//! assert_eq!(x, vector![5, 3, 6]);
//!
//! x *= 2;
//! assert_eq!(x, vector![10, 6, 12]);
//! ```
//!
//! If you try to add, substract or multiply numeric vector with a different
//! number of elements, you will get an error. For example:
//!
//! ```should_panic
//! # use crabsformer::prelude::*;
//! let x = vector![3, 1, 4, 1, 5] + vector![2, 10, 9];
//! // thread 'main' panicked at 'Vector addition with invalid length: 5 != 3' src/main.rs:12:13
//! ```
//!
//! *TODO: add alternative x.add() to return Result instead of panics*
//!
//! If you would like to square of the individual elements of the numeric
//! vector, or even higher up, use the [`power`] method. Here, each element of the
//! numeric vector is raised to the power 2.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let x = vector![3, 1, 4, 1];
//! let y = x.power(2);
//! assert_eq!(y, vector![9, 1, 16, 1]);
//! ```
//!
//! [`power`]: struct.Vector.html#method.power
//!
//! When operating with numeric vectors of different types,
//! the Rust compiler will raise error like the following:
//!
//! ```text
//! cannot add `vector::Vector<{integer}>` to `vector::Vector<{float}>`
//! ```
//!
//! Many unary operations, such as computing the sum of all the elements in the
//! numeric vector, are implemented as methods.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let x = vector![3, 1, 4];
//! let sum = x.sum();
//! assert_eq!(sum, 8);
//! assert_eq!(*x.max(), 4);
//! assert_eq!(*x.min(), 1);
//! ```
//!
//! See also: [`power`], [`filter`], [`sum`], [`max`], [`min`].
//!
//! [`power`]: struct.Vector.html#method.power
//! [`filter`]: struct.Vector.html#method.filter
//! [`sum`]: struct.Vector.html#method.sum
//! [`max`]: struct.Vector.html#method.max
//! [`min`]: struct.Vector.html#method.min
//!
//! ### Indexing, Slicing and Iterating Numeric Vector
//! Numeric vectors can be indexed, sliced and iterated over, much like
//! Rust's vector.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let x = vector![3, 1, 4, 1];
//!
//! // Indexing numeric vector
//! assert_eq!(x[0], 3);
//! assert_eq!(x[2], 4);
//!
//! // Slicing numeric vector
//! x.slice(0..2); // [3, 1]
//! x.slice(2..); // [4, 1]
//! x.slice(..2); // [3, 1]
//!
//! // Iterating over element of numeric vector
//! for element in x.elements() {
//! println!("element = {:?}", element);
//! }
//! ```
//!
//! ### Matrix Builders
//! There are several ways to create matrix too.
//!
//! For example, you can create a matrix from a Rust's vector using
//! `Matrix::from` static method. The type of the resulting matrix is
//! deduced from the type of the elements in the sequences.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let x = vec![
//! vec![3, 1, 4],
//! vec![1, 5, 9],
//! vec![0, 1, 2],
//! ];
//! let w = Matrix::from(x);
//! ```
//!
//! The number of the columns should be consistent
//! otherwise it will panic. For example:
//!
//! ```should_panic
//! # use crabsformer::prelude::*;
//! let x = vec![
//! vec![3, 1, 4],
//! vec![1, 5],
//! ];
//! let w = Matrix::from(x);
//! // thread 'main' panicked at 'Invalid matrix: the number of columns is inconsistent',
//! ```
//!
//!
//! The [`matrix!`] macro is provided to make initialization of the
//! matrix more convenient.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let w = matrix![
//! 3.0, 1.0, 4.0;
//! 1.0, 5.0, 9.0;
//! ];
//! ```
//!
//! It can also initialize each element of a matrix with a given shape
//! and value.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let w = matrix![[3, 3] => 0]; // matrix![0, 0, 0; 0, 0, 0; 0, 0, 0]
//! ```
//!
//! To create random matrix, Crabsformer provide
//! [`RandomMatrixBuilder`]. It can be explicitly seeded to make the results
//! are reproducible.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let mut rmb = RandomMatrixBuilder::new();
//! ```
//!
//! The method [`rmb.uniform`] creates a matrix of the given shape and
//! populate it with random samples from a uniform distribution over the
//! half-open interval.
//!
//! ```
//! # use crabsformer::prelude::*;
//! # let mut rmb = RandomMatrixBuilder::new();
//! let v = rmb.uniform([5, 5], 0.0, 1.0).unwrap();
//! ```
//!
//! See also: [Matrix Builders].
//!
//! [`matrix!`]: macro.matrix.html
//! [`RandomMatrixBuilder`]: matrix/builders/struct.RandomMatrixBuilder.html
//! [`rmb.uniform`]: matrix/builders/struct.RandomMatrixBuilder.html#method.uniform
//! [Matrix Builders]: matrix/builders/index.html
//!
//! ### Matrix Basic Operations
//! You can perform arithmetic operations on a matrix.
//! Arithmetic operators on matrices apply elementwise.
//! A new matrix is created and filled with the result.
//! For example, if you add the matrix, the arithmetic operator
//! will work element-wise. The output will be a matrix of the same
//! shape.
//!
//!
//! ```rust
//! # use crabsformer::prelude::*;
//! let w1 = matrix![
//! 2, 4, 6;
//! 3, 1, 1;
//! 4, 5, 6;
//! ];
//!
//! let w2 = matrix![
//! 1, 3, 5;
//! 3, 1, 3;
//! 1, 1, 1;
//! ];
//!
//! let w3 = w1 + w2;
//!
//! assert_eq!(w3, matrix![
//! 3, 7, 11;
//! 6, 2, 4;
//! 5, 6, 7;
//! ]);
//! ```
//!
//! Matrix substraction and multiplication also works the same:
//!
//! ```rust
//! # use crabsformer::prelude::*;
//! let w1 = matrix![2, 4; 3, 1] - matrix![1, 3; 3, 1];
//! assert_eq!(w1, matrix![
//! 1, 1;
//! 0, 0;
//! ]);
//!
//! let w2 = matrix![0, 1; 2, 0] - matrix![1, 1; 0, 1];
//! assert_eq!(w2, matrix![
//! -1, 0;
//! 2, -1;
//! ]);
//!
//! let w3 = matrix![0, 1; 1, 0] * matrix![1, 1; 1, 1];
//! assert_eq!(w3, matrix![
//! 0, 1;
//! 1, 0;
//! ]);
//! ```
//!
//! You can run an arithmetic operation on the matrix with
//! a scalar value too. For example, this code multiplies each element
//! of the matrix by 2.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let w = matrix![3, 1; 4, 1] * 2;
//! assert_eq!(w, matrix![6, 2; 8, 2]);
//! ```
//!
//! Some operations, such as `+=` and `*=`, act in place to modify an
//! existing matrix rather than create a new one.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let mut w = matrix![3, 1; 4, 1];
//!
//! w += 3;
//! assert_eq!(w, matrix![6, 4; 7, 4]);
//!
//! w -= 1;
//! assert_eq!(w, matrix![5, 3; 6, 3]);
//!
//! w *= 2;
//! assert_eq!(w, matrix![10, 6; 12, 6]);
//! ```
//!
//! If you try to add, substract or multiply matrix with a
//! different shape, you will get an error. For example:
//!
//! ```should_panic
//! # use crabsformer::prelude::*;
//! let x = matrix![3, 1; 4, 1] + matrix![2, 10, 9; 1, 4, 7];
//! // thread 'main' panicked at 'Matrix addition with invalid shape: [2, 2] != [3, 3]' src/main.rs:12:13
//! ```
//!
//! If you would like to square of the individual elements of the matrix
//! , or even higher up, use the [`power`][m.power] method. Here, each element
//! of the matrix is raised to the power 2.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let w1 = matrix![3, 1; 4, 1];
//! let w2 = w1.power(2);
//! assert_eq!(w2, matrix![9, 1; 16, 1]);
//! ```
//!
//! [m.power]: struct.Matrix.html#method.power
//!
//! When operating with matrices of different types,
//! the Rust compiler will raise error like the following:
//!
//! ```text
//! cannot add `matrix::Matrix<{integer}>` to `matrix::Matrix<{float}>`
//! ```
//!
//! ---
//! TODO(pyk): Continue quick tutorial here
//!
//! ---
//!
//! [numeric type]: https://doc.rust-lang.org/reference/types/numeric.html
//! [pyk]: https://github.com/pyk
//!
//! ## Getting help
//! Feel free to start discussion at [GitHub issues].
//!
//! [Github issues]: https://github.com/pyk/crabsformer/issues/new/choose
//!
//! ## License
//! Crabsformer is licensed under the [Apache-2.0] license.
//!
//! Unless you explicitly state otherwise, any contribution intentionally
//! submitted for inclusion in Crabsformer by you, as defined in the Apache-2.0
//! license, shall be licensed as above, without
//! any additional terms or conditions.
//!
//! [Apache-2.0]: https://github.com/pyk/crabsformer/blob/master/LICENSE
//!
pub mod matrix;
pub mod prelude;
pub mod utils;
pub mod vector; | //!
//! and this to your crate root:
//!
//! ```
//! use crabsformer::prelude::*; | random_line_split |
2.1.dl_tf_intermediate_classifications.py | #%% Binary classification On Kaggle data using Tensorflow Multi level
# Restart the Spyder
import pandas as pd
import numpy as np
import tensorflow as tf
import os
from sklearn.metrics import accuracy_score, cohen_kappa_score, confusion_matrix, classification_report
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import gc; gc.enable()
#Set PANDAS to show all columns in DataFrame
pd.set_option('display.max_columns', None)
#Set PANDAS to show all rows in DataFrame
pd.set_option('display.max_rows', None)
pd.set_option('precision', 2)
os.chdir("D:\\trainings\\tensorflow")
exec(open(os.path.abspath('tf_CommonUtils.py')).read())
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
# fix random seed for reproducibility
seed = 123; np.random.seed(seed); tf.compat.v1.set_random_seed(seed)
# Read data from https://www.kaggle.com/c/titanic/data
data = pd.read_csv("./data/kaggle_titanic_train.csv")
data.shape
data.dtypes
data.head(2)
data.info()
print(data.describe())
#print(data.describe(include = [np.number])) # for number only
#Drop few columns, may not be use ful for current analysis
data.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1, inplace=True)
data.info() # Now see if any missing values in any columns
#Note: how to impute missing value is purview of ML. Here, do simple thing so that we focus on DL
age_avg = data['Age'].mean()
data['Age'].fillna(value = age_avg, inplace=True)
#Now, drop rows if any missing
data.dropna(inplace=True)
data.info() # Now see if any missing values in any columns
# identifications of features and response. Detail'll be explained in a few minutes
NUM_FEATURES = ['Pclass','SibSp','Parch','Fare']
bucketized_FEATURES = 'Age'
categorical_FEATURES = 'Sex'
embedding_FEATURES = 'Embarked'
crossed_FEATURES = 'Embarked' # With Age
FEATURES = np.append(np.append(np.append(np.append(NUM_FEATURES, bucketized_FEATURES), categorical_FEATURES), embedding_FEATURES), crossed_FEATURES)
FEATURES = np.unique(FEATURES)
LABEL = "Survived"
batch_size = 8
#Do the data type conversion for category
data[[categorical_FEATURES,embedding_FEATURES]] = data[[categorical_FEATURES,embedding_FEATURES]].apply(lambda x: x.astype('category'))
data.dtypes
#One hot encode
data = Encoding(data, LABEL, scale_and_center = True, fileTrain = "./data/kaggle_titanic_train_EncodedScaled.csv")
data.head(2)
#Get list of independent features
ar_independent_features = np.setdiff1d(data.columns, LABEL)
#Segragate 85% and 15%
training_set ,test_set = train_test_split(data,test_size=0.15)
del(data)
training_set.shape
training_set.head(2)
len_fea = len(ar_independent_features)
# Build the model
model = tf.keras.models.Sequential() # same as tf.keras.Sequential()
model.add(tf.keras.layers.Dense(2*len_fea, input_shape=(len_fea,), activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(len_fea, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(2, activation=tf.nn.softmax))
model.summary()
#Compile
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# Train it
model.fit(training_set[ar_independent_features].values, training_set[LABEL].values, epochs=100, batch_size=batch_size) # 6 min
#Save and retrieve
model.save('./model/model_tf_kaggle_titanic_binary_classsification.h5')
#model = tf.keras.models.load_model('./model/model_tf_kaggle_titanic_binary_classsification.h5')
# Evaluate on test data
model.evaluate(test_set[ar_independent_features].values, test_set[LABEL].values, verbose = 0)
# loss value & metrics values: [0.45, 0.79]
#Making Predictions
predictions = model.predict(x=test_set[ar_independent_features].values)
# Extracting max probability
predictions_number = np.array([])
for row_num in range(predictions.shape[0]): # row_num = 0
predictions_number = np.append(predictions_number, np.argmax(predictions[row_num]))
#Few statistics
confusion_matrix(test_set[LABEL].values, predictions_number)
classification_report(test_set[LABEL].values, predictions_number)
#Statistics are also available as follows
print("Overall Accuracy is ", round(accuracy_score(test_set[LABEL].values, predictions_number), 2),", Kappa is ", round(abs(cohen_kappa_score(test_set[LABEL].values, predictions_number)), 2))
#Overall Accuracy is 0.81 , Kappa is 0.56
del(training_set, test_set, predictions_number); gc.collect()
#%% Binary classification: Explore few more ways to better classification
# Restart the Spyder
import pandas as pd
import numpy as np
import tensorflow as tf
import os
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, cohen_kappa_score, confusion_matrix, classification_report
import gc; gc.enable()
tf.keras.backend.clear_session() # For easy reset of notebook state
#Set PANDAS to show all columns in DataFrame
pd.set_option('display.max_columns', None)
#Set PANDAS to show all rows in DataFrame
pd.set_option('display.max_rows', None)
pd.set_option('precision', 2)
os.chdir("D:\\trainings\\tensorflow")
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
# fix random seed for reproducibility
seed = 123; np.random.seed(seed); tf.compat.v1.set_random_seed(seed)
# Read data
data = pd.read_csv("./data/kaggle_titanic_train.csv")
data.shape
data.dtypes
data.head(2)
data.info()
print(data.describe())
#print(data.describe(include = [np.number])) # for number only
#Drop few columns, may not be use ful for current analysis
data.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1, inplace=True)
data.info() # Now see if any missing values in any columns
#Note: how to impute missing value is purview of ML. Here, do simple thing so that we focus on DL
age_avg = data['Age'].mean()
data['Age'].fillna(value = age_avg, inplace=True)
#Now, drop rows if any missing
data.dropna(inplace=True)
data.info() # Now see if any missing values in any columns
# identifications of features and response. Detail'll be explained in a few minutes
NUM_FEATURES = ['Pclass','SibSp','Parch','Fare']
bucketized_FEATURES = 'Age'
categorical_FEATURES = 'Sex'
embedding_FEATURES = 'Embarked'
crossed_FEATURES = 'Embarked' # With Age
FEATURES = np.append(np.append(np.append(np.append(NUM_FEATURES, bucketized_FEATURES), categorical_FEATURES), embedding_FEATURES), crossed_FEATURES)
FEATURES = np.unique(FEATURES)
LABEL = "Survived"
batch_size = 8
#Do the data type conversion for category
data[[categorical_FEATURES,embedding_FEATURES]] = data[[categorical_FEATURES,embedding_FEATURES]].apply(lambda x: x.astype('category'))
#Segragate 85% and 15%
training_set ,test_set = train_test_split(data,test_size=0.15, random_state = seed, stratify = data[LABEL])
#Building the input_fn: regressor accepts Tensors and custom function to convert pandas
#Dataframe and return feature column and label values as Tensors:
def input_fn(features, labels = None, custom_batch_size = batch_size, caller_source = 'train'):
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices(dict(features))
if caller_source != 'test':
|
if caller_source == 'train':
dataset = dataset.shuffle(len(features)) #if ".repeat()" is added here then add "epochs steps_per_epoch" in fit
dataset = dataset.batch(custom_batch_size)
return dataset
#train in iterable dataset
ds_train = input_fn(training_set[FEATURES], training_set[LABEL],custom_batch_size = batch_size)
#Create feature columns
feature_cols = []
# numeric cols
for num_col in NUM_FEATURES:
feature_cols.append(tf.feature_column.numeric_column(num_col, dtype=tf.float32))
#bucketized cols: If don't want to feed a number directly odel, but instead split its value into
#different categories based on numerical ranges.
#Buckets include the left boundary, and exclude the right boundary.
bucketized_col = tf.feature_column.numeric_column(bucketized_FEATURES, dtype=tf.float32)
age_buckets = tf.feature_column.bucketized_column(bucketized_col, boundaries=[30, 40, 50, 60])
feature_cols.append(age_buckets)
# indicator cols
cat_vocab = tf.feature_column.categorical_column_with_vocabulary_list(categorical_FEATURES, pd.unique(data[categorical_FEATURES].values))
cat_one_hot = tf.feature_column.indicator_column(cat_vocab)
feature_cols.append(cat_one_hot)
# Just to see - one hot encoding
first_batch = next(iter(ds_train))[0]
feature_layer = tf.keras.layers.DenseFeatures(cat_one_hot)
print(feature_layer(first_batch).numpy())
#Embedding cols: When there are large values per category then use an embedding column to
#overcome this limitation. Instead of representing the data as a one-hot vector of many
#dimensions, an embedding column represents that data as a lower-dimensional, dense vector in
#which each cell can contain any number, not just 0 or 1. The size of the embedding (8, in the
#example below) is a parameter that must be tuned.
embedding_col = tf.feature_column.embedding_column(cat_vocab, dimension=8) # 8 Need to be tuned
feature_cols.append(embedding_col)
# Just to see - one hot encoding
first_batch = next(iter(ds_train))[0]
feature_layer = tf.keras.layers.DenseFeatures(embedding_col)
print(feature_layer(first_batch).numpy())
#CW: Read 'Hashed feature columns' and practice above
## crossed cols TBD: Not working
#cat_vocab_crosssed = tf.feature_column.categorical_column_with_vocabulary_list(crossed_FEATURES, pd.unique(data[crossed_FEATURES].values))
#crossed_feature = tf.feature_column.crossed_column([age_buckets, cat_vocab_crosssed],
# hash_bucket_size=10) # Max size of all combination
#crossed_feature = tf.feature_column.indicator_column(crossed_feature)
#feature_cols.append(crossed_feature)
# Model in this way
feature_cols = tf.keras.layers.DenseFeatures(feature_cols)
# Build the model
model = tf.keras.models.Sequential() # same as tf.keras.Sequential()
model.add(feature_cols)
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(1, activation=tf.nn.sigmoid))
#Compile
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'], run_eagerly=True)
model.summary()
model.fit(ds_train, epochs = 100) # epochs steps_per_epoch=100,
#Because of embedding is shows 95 (755/8)
# Evaluate on test data
test_ds = input_fn(test_set[FEATURES], test_set[LABEL],custom_batch_size = batch_size, caller_source = 'eval')
model.evaluate(test_ds)
# loss value & metrics values: [0.13, 0.95]
#Making Predictions
test_ds = input_fn(test_set[FEATURES], test_set[LABEL],custom_batch_size = batch_size, caller_source = 'test')
predictions = model.predict(test_ds, verbose=1)
predictions = np.ravel(predictions)
#Explain why cutoff is required and why not 0.5 always works
cutoff = data[data[LABEL] == 1].shape[0]/data.shape[0]
predictions_number = tf.where(predictions >= (1-cutoff), 1, 0)
#Few statistics
confusion_matrix(test_set[LABEL].values, predictions_number)
classification_report(test_set[LABEL].values, predictions_number)
#Statistics are also available as follows
print("Overall Accuracy is ", round(accuracy_score(test_set[LABEL].values, predictions_number), 2),", Kappa is ", round(abs(cohen_kappa_score(test_set[LABEL].values, predictions_number)), 2))
#Overall Accuracy is 0.81 , Kappa is 0.58
# Compare with 'without embedding' done above. even though number of records come down, not much difference in accuracy
del(data, training_set, test_set, predictions_number, age_avg, NUM_FEATURES, bucketized_FEATURES, categorical_FEATURES, embedding_FEATURES, crossed_FEATURES, FEATURES, LABEL, batch_size, ds_train, feature_cols, bucketized_col, age_buckets, cat_vocab, cat_one_hot, first_batch, feature_layer, embedding_col, model, test_ds, cutoff); gc.collect()
## Which one to use?
## Say - you have 10 million records although you can process 1 million in memory | dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels)) | conditional_block |
2.1.dl_tf_intermediate_classifications.py | #%% Binary classification On Kaggle data using Tensorflow Multi level
# Restart the Spyder
import pandas as pd
import numpy as np
import tensorflow as tf
import os
from sklearn.metrics import accuracy_score, cohen_kappa_score, confusion_matrix, classification_report
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import gc; gc.enable()
#Set PANDAS to show all columns in DataFrame
pd.set_option('display.max_columns', None)
#Set PANDAS to show all rows in DataFrame
pd.set_option('display.max_rows', None)
pd.set_option('precision', 2)
os.chdir("D:\\trainings\\tensorflow")
exec(open(os.path.abspath('tf_CommonUtils.py')).read())
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
# fix random seed for reproducibility
seed = 123; np.random.seed(seed); tf.compat.v1.set_random_seed(seed)
# Read data from https://www.kaggle.com/c/titanic/data
data = pd.read_csv("./data/kaggle_titanic_train.csv")
data.shape
data.dtypes
data.head(2)
data.info()
print(data.describe())
#print(data.describe(include = [np.number])) # for number only
#Drop few columns, may not be use ful for current analysis
data.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1, inplace=True)
data.info() # Now see if any missing values in any columns
#Note: how to impute missing value is purview of ML. Here, do simple thing so that we focus on DL
age_avg = data['Age'].mean()
data['Age'].fillna(value = age_avg, inplace=True)
#Now, drop rows if any missing
data.dropna(inplace=True)
data.info() # Now see if any missing values in any columns
# identifications of features and response. Detail'll be explained in a few minutes
NUM_FEATURES = ['Pclass','SibSp','Parch','Fare']
bucketized_FEATURES = 'Age'
categorical_FEATURES = 'Sex'
embedding_FEATURES = 'Embarked'
crossed_FEATURES = 'Embarked' # With Age
FEATURES = np.append(np.append(np.append(np.append(NUM_FEATURES, bucketized_FEATURES), categorical_FEATURES), embedding_FEATURES), crossed_FEATURES)
FEATURES = np.unique(FEATURES)
LABEL = "Survived"
batch_size = 8
#Do the data type conversion for category
data[[categorical_FEATURES,embedding_FEATURES]] = data[[categorical_FEATURES,embedding_FEATURES]].apply(lambda x: x.astype('category'))
data.dtypes
#One hot encode
data = Encoding(data, LABEL, scale_and_center = True, fileTrain = "./data/kaggle_titanic_train_EncodedScaled.csv")
data.head(2)
#Get list of independent features
ar_independent_features = np.setdiff1d(data.columns, LABEL)
#Segragate 85% and 15%
training_set ,test_set = train_test_split(data,test_size=0.15)
del(data)
training_set.shape
training_set.head(2)
len_fea = len(ar_independent_features)
# Build the model
model = tf.keras.models.Sequential() # same as tf.keras.Sequential()
model.add(tf.keras.layers.Dense(2*len_fea, input_shape=(len_fea,), activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(len_fea, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(2, activation=tf.nn.softmax))
model.summary()
#Compile
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# Train it
model.fit(training_set[ar_independent_features].values, training_set[LABEL].values, epochs=100, batch_size=batch_size) # 6 min
#Save and retrieve
model.save('./model/model_tf_kaggle_titanic_binary_classsification.h5')
#model = tf.keras.models.load_model('./model/model_tf_kaggle_titanic_binary_classsification.h5')
# Evaluate on test data
model.evaluate(test_set[ar_independent_features].values, test_set[LABEL].values, verbose = 0)
# loss value & metrics values: [0.45, 0.79]
#Making Predictions
predictions = model.predict(x=test_set[ar_independent_features].values)
# Extracting max probability
predictions_number = np.array([])
for row_num in range(predictions.shape[0]): # row_num = 0
predictions_number = np.append(predictions_number, np.argmax(predictions[row_num]))
#Few statistics
confusion_matrix(test_set[LABEL].values, predictions_number)
classification_report(test_set[LABEL].values, predictions_number)
#Statistics are also available as follows
print("Overall Accuracy is ", round(accuracy_score(test_set[LABEL].values, predictions_number), 2),", Kappa is ", round(abs(cohen_kappa_score(test_set[LABEL].values, predictions_number)), 2))
#Overall Accuracy is 0.81 , Kappa is 0.56
del(training_set, test_set, predictions_number); gc.collect()
#%% Binary classification: Explore few more ways to better classification
# Restart the Spyder
import pandas as pd
import numpy as np
import tensorflow as tf
import os
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, cohen_kappa_score, confusion_matrix, classification_report
import gc; gc.enable()
tf.keras.backend.clear_session() # For easy reset of notebook state
#Set PANDAS to show all columns in DataFrame
pd.set_option('display.max_columns', None)
#Set PANDAS to show all rows in DataFrame
pd.set_option('display.max_rows', None)
pd.set_option('precision', 2)
os.chdir("D:\\trainings\\tensorflow")
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
# fix random seed for reproducibility
seed = 123; np.random.seed(seed); tf.compat.v1.set_random_seed(seed)
# Read data
data = pd.read_csv("./data/kaggle_titanic_train.csv")
data.shape
data.dtypes
data.head(2)
data.info()
print(data.describe())
#print(data.describe(include = [np.number])) # for number only
#Drop few columns, may not be use ful for current analysis
data.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1, inplace=True)
data.info() # Now see if any missing values in any columns
#Note: how to impute missing value is purview of ML. Here, do simple thing so that we focus on DL
age_avg = data['Age'].mean()
data['Age'].fillna(value = age_avg, inplace=True)
#Now, drop rows if any missing
data.dropna(inplace=True)
data.info() # Now see if any missing values in any columns
# identifications of features and response. Detail'll be explained in a few minutes
NUM_FEATURES = ['Pclass','SibSp','Parch','Fare']
bucketized_FEATURES = 'Age'
categorical_FEATURES = 'Sex'
embedding_FEATURES = 'Embarked'
crossed_FEATURES = 'Embarked' # With Age
FEATURES = np.append(np.append(np.append(np.append(NUM_FEATURES, bucketized_FEATURES), categorical_FEATURES), embedding_FEATURES), crossed_FEATURES)
FEATURES = np.unique(FEATURES)
LABEL = "Survived"
batch_size = 8
#Do the data type conversion for category
data[[categorical_FEATURES,embedding_FEATURES]] = data[[categorical_FEATURES,embedding_FEATURES]].apply(lambda x: x.astype('category'))
#Segragate 85% and 15%
training_set ,test_set = train_test_split(data,test_size=0.15, random_state = seed, stratify = data[LABEL])
#Building the input_fn: regressor accepts Tensors and custom function to convert pandas
#Dataframe and return feature column and label values as Tensors:
def | (features, labels = None, custom_batch_size = batch_size, caller_source = 'train'):
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices(dict(features))
if caller_source != 'test':
dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
if caller_source == 'train':
dataset = dataset.shuffle(len(features)) #if ".repeat()" is added here then add "epochs steps_per_epoch" in fit
dataset = dataset.batch(custom_batch_size)
return dataset
#train in iterable dataset
ds_train = input_fn(training_set[FEATURES], training_set[LABEL],custom_batch_size = batch_size)
#Create feature columns
feature_cols = []
# numeric cols
for num_col in NUM_FEATURES:
feature_cols.append(tf.feature_column.numeric_column(num_col, dtype=tf.float32))
#bucketized cols: If don't want to feed a number directly odel, but instead split its value into
#different categories based on numerical ranges.
#Buckets include the left boundary, and exclude the right boundary.
bucketized_col = tf.feature_column.numeric_column(bucketized_FEATURES, dtype=tf.float32)
age_buckets = tf.feature_column.bucketized_column(bucketized_col, boundaries=[30, 40, 50, 60])
feature_cols.append(age_buckets)
# indicator cols
cat_vocab = tf.feature_column.categorical_column_with_vocabulary_list(categorical_FEATURES, pd.unique(data[categorical_FEATURES].values))
cat_one_hot = tf.feature_column.indicator_column(cat_vocab)
feature_cols.append(cat_one_hot)
# Just to see - one hot encoding
first_batch = next(iter(ds_train))[0]
feature_layer = tf.keras.layers.DenseFeatures(cat_one_hot)
print(feature_layer(first_batch).numpy())
#Embedding cols: When there are large values per category then use an embedding column to
#overcome this limitation. Instead of representing the data as a one-hot vector of many
#dimensions, an embedding column represents that data as a lower-dimensional, dense vector in
#which each cell can contain any number, not just 0 or 1. The size of the embedding (8, in the
#example below) is a parameter that must be tuned.
embedding_col = tf.feature_column.embedding_column(cat_vocab, dimension=8) # 8 Need to be tuned
feature_cols.append(embedding_col)
# Just to see - one hot encoding
first_batch = next(iter(ds_train))[0]
feature_layer = tf.keras.layers.DenseFeatures(embedding_col)
print(feature_layer(first_batch).numpy())
#CW: Read 'Hashed feature columns' and practice above
## crossed cols TBD: Not working
#cat_vocab_crosssed = tf.feature_column.categorical_column_with_vocabulary_list(crossed_FEATURES, pd.unique(data[crossed_FEATURES].values))
#crossed_feature = tf.feature_column.crossed_column([age_buckets, cat_vocab_crosssed],
# hash_bucket_size=10) # Max size of all combination
#crossed_feature = tf.feature_column.indicator_column(crossed_feature)
#feature_cols.append(crossed_feature)
# Model in this way
feature_cols = tf.keras.layers.DenseFeatures(feature_cols)
# Build the model
model = tf.keras.models.Sequential() # same as tf.keras.Sequential()
model.add(feature_cols)
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(1, activation=tf.nn.sigmoid))
#Compile
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'], run_eagerly=True)
model.summary()
model.fit(ds_train, epochs = 100) # epochs steps_per_epoch=100,
#Because of embedding is shows 95 (755/8)
# Evaluate on test data
test_ds = input_fn(test_set[FEATURES], test_set[LABEL],custom_batch_size = batch_size, caller_source = 'eval')
model.evaluate(test_ds)
# loss value & metrics values: [0.13, 0.95]
#Making Predictions
test_ds = input_fn(test_set[FEATURES], test_set[LABEL],custom_batch_size = batch_size, caller_source = 'test')
predictions = model.predict(test_ds, verbose=1)
predictions = np.ravel(predictions)
#Explain why cutoff is required and why not 0.5 always works
cutoff = data[data[LABEL] == 1].shape[0]/data.shape[0]
predictions_number = tf.where(predictions >= (1-cutoff), 1, 0)
#Few statistics
confusion_matrix(test_set[LABEL].values, predictions_number)
classification_report(test_set[LABEL].values, predictions_number)
#Statistics are also available as follows
print("Overall Accuracy is ", round(accuracy_score(test_set[LABEL].values, predictions_number), 2),", Kappa is ", round(abs(cohen_kappa_score(test_set[LABEL].values, predictions_number)), 2))
#Overall Accuracy is 0.81 , Kappa is 0.58
# Compare with 'without embedding' done above. even though number of records come down, not much difference in accuracy
del(data, training_set, test_set, predictions_number, age_avg, NUM_FEATURES, bucketized_FEATURES, categorical_FEATURES, embedding_FEATURES, crossed_FEATURES, FEATURES, LABEL, batch_size, ds_train, feature_cols, bucketized_col, age_buckets, cat_vocab, cat_one_hot, first_batch, feature_layer, embedding_col, model, test_ds, cutoff); gc.collect()
## Which one to use?
## Say - you have 10 million records although you can process 1 million in memory | input_fn | identifier_name |
2.1.dl_tf_intermediate_classifications.py | #%% Binary classification On Kaggle data using Tensorflow Multi level
# Restart the Spyder
import pandas as pd
import numpy as np
import tensorflow as tf
import os
from sklearn.metrics import accuracy_score, cohen_kappa_score, confusion_matrix, classification_report
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import gc; gc.enable()
#Set PANDAS to show all columns in DataFrame
pd.set_option('display.max_columns', None)
#Set PANDAS to show all rows in DataFrame
pd.set_option('display.max_rows', None)
pd.set_option('precision', 2)
os.chdir("D:\\trainings\\tensorflow")
exec(open(os.path.abspath('tf_CommonUtils.py')).read())
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
# fix random seed for reproducibility
seed = 123; np.random.seed(seed); tf.compat.v1.set_random_seed(seed)
# Read data from https://www.kaggle.com/c/titanic/data
data = pd.read_csv("./data/kaggle_titanic_train.csv")
data.shape
data.dtypes
data.head(2)
data.info()
print(data.describe())
#print(data.describe(include = [np.number])) # for number only
#Drop few columns, may not be use ful for current analysis
data.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1, inplace=True)
data.info() # Now see if any missing values in any columns
#Note: how to impute missing value is purview of ML. Here, do simple thing so that we focus on DL
age_avg = data['Age'].mean()
data['Age'].fillna(value = age_avg, inplace=True)
#Now, drop rows if any missing
data.dropna(inplace=True)
data.info() # Now see if any missing values in any columns
# identifications of features and response. Detail'll be explained in a few minutes
NUM_FEATURES = ['Pclass','SibSp','Parch','Fare']
bucketized_FEATURES = 'Age'
categorical_FEATURES = 'Sex'
embedding_FEATURES = 'Embarked'
crossed_FEATURES = 'Embarked' # With Age
FEATURES = np.append(np.append(np.append(np.append(NUM_FEATURES, bucketized_FEATURES), categorical_FEATURES), embedding_FEATURES), crossed_FEATURES)
FEATURES = np.unique(FEATURES)
LABEL = "Survived"
batch_size = 8
#Do the data type conversion for category
data[[categorical_FEATURES,embedding_FEATURES]] = data[[categorical_FEATURES,embedding_FEATURES]].apply(lambda x: x.astype('category'))
data.dtypes
#One hot encode
data = Encoding(data, LABEL, scale_and_center = True, fileTrain = "./data/kaggle_titanic_train_EncodedScaled.csv")
data.head(2)
#Get list of independent features
ar_independent_features = np.setdiff1d(data.columns, LABEL)
#Segragate 85% and 15%
training_set ,test_set = train_test_split(data,test_size=0.15)
del(data)
| training_set.shape
training_set.head(2)
len_fea = len(ar_independent_features)
# Build the model
model = tf.keras.models.Sequential() # same as tf.keras.Sequential()
model.add(tf.keras.layers.Dense(2*len_fea, input_shape=(len_fea,), activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(len_fea, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(2, activation=tf.nn.softmax))
model.summary()
#Compile
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# Train it
model.fit(training_set[ar_independent_features].values, training_set[LABEL].values, epochs=100, batch_size=batch_size) # 6 min
#Save and retrieve
model.save('./model/model_tf_kaggle_titanic_binary_classsification.h5')
#model = tf.keras.models.load_model('./model/model_tf_kaggle_titanic_binary_classsification.h5')
# Evaluate on test data
model.evaluate(test_set[ar_independent_features].values, test_set[LABEL].values, verbose = 0)
# loss value & metrics values: [0.45, 0.79]
#Making Predictions
predictions = model.predict(x=test_set[ar_independent_features].values)
# Extracting max probability
predictions_number = np.array([])
for row_num in range(predictions.shape[0]): # row_num = 0
predictions_number = np.append(predictions_number, np.argmax(predictions[row_num]))
#Few statistics
confusion_matrix(test_set[LABEL].values, predictions_number)
classification_report(test_set[LABEL].values, predictions_number)
#Statistics are also available as follows
print("Overall Accuracy is ", round(accuracy_score(test_set[LABEL].values, predictions_number), 2),", Kappa is ", round(abs(cohen_kappa_score(test_set[LABEL].values, predictions_number)), 2))
#Overall Accuracy is 0.81 , Kappa is 0.56
del(training_set, test_set, predictions_number); gc.collect()
#%% Binary classification: Explore few more ways to better classification
# Restart the Spyder
import pandas as pd
import numpy as np
import tensorflow as tf
import os
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, cohen_kappa_score, confusion_matrix, classification_report
import gc; gc.enable()
tf.keras.backend.clear_session() # For easy reset of notebook state
#Set PANDAS to show all columns in DataFrame
pd.set_option('display.max_columns', None)
#Set PANDAS to show all rows in DataFrame
pd.set_option('display.max_rows', None)
pd.set_option('precision', 2)
os.chdir("D:\\trainings\\tensorflow")
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
# fix random seed for reproducibility
seed = 123; np.random.seed(seed); tf.compat.v1.set_random_seed(seed)
# Read data
data = pd.read_csv("./data/kaggle_titanic_train.csv")
data.shape
data.dtypes
data.head(2)
data.info()
print(data.describe())
#print(data.describe(include = [np.number])) # for number only
#Drop few columns, may not be use ful for current analysis
data.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1, inplace=True)
data.info() # Now see if any missing values in any columns
#Note: how to impute missing value is purview of ML. Here, do simple thing so that we focus on DL
age_avg = data['Age'].mean()
data['Age'].fillna(value = age_avg, inplace=True)
#Now, drop rows if any missing
data.dropna(inplace=True)
data.info() # Now see if any missing values in any columns
# identifications of features and response. Detail'll be explained in a few minutes
NUM_FEATURES = ['Pclass','SibSp','Parch','Fare']
bucketized_FEATURES = 'Age'
categorical_FEATURES = 'Sex'
embedding_FEATURES = 'Embarked'
crossed_FEATURES = 'Embarked' # With Age
FEATURES = np.append(np.append(np.append(np.append(NUM_FEATURES, bucketized_FEATURES), categorical_FEATURES), embedding_FEATURES), crossed_FEATURES)
FEATURES = np.unique(FEATURES)
LABEL = "Survived"
batch_size = 8
#Do the data type conversion for category
data[[categorical_FEATURES,embedding_FEATURES]] = data[[categorical_FEATURES,embedding_FEATURES]].apply(lambda x: x.astype('category'))
#Segragate 85% and 15%
training_set ,test_set = train_test_split(data,test_size=0.15, random_state = seed, stratify = data[LABEL])
#Building the input_fn: regressor accepts Tensors and custom function to convert pandas
#Dataframe and return feature column and label values as Tensors:
def input_fn(features, labels = None, custom_batch_size = batch_size, caller_source = 'train'):
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices(dict(features))
if caller_source != 'test':
dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
if caller_source == 'train':
dataset = dataset.shuffle(len(features)) #if ".repeat()" is added here then add "epochs steps_per_epoch" in fit
dataset = dataset.batch(custom_batch_size)
return dataset
#train in iterable dataset
ds_train = input_fn(training_set[FEATURES], training_set[LABEL],custom_batch_size = batch_size)
#Create feature columns
feature_cols = []
# numeric cols
for num_col in NUM_FEATURES:
feature_cols.append(tf.feature_column.numeric_column(num_col, dtype=tf.float32))
#bucketized cols: If don't want to feed a number directly odel, but instead split its value into
#different categories based on numerical ranges.
#Buckets include the left boundary, and exclude the right boundary.
bucketized_col = tf.feature_column.numeric_column(bucketized_FEATURES, dtype=tf.float32)
age_buckets = tf.feature_column.bucketized_column(bucketized_col, boundaries=[30, 40, 50, 60])
feature_cols.append(age_buckets)
# indicator cols
cat_vocab = tf.feature_column.categorical_column_with_vocabulary_list(categorical_FEATURES, pd.unique(data[categorical_FEATURES].values))
cat_one_hot = tf.feature_column.indicator_column(cat_vocab)
feature_cols.append(cat_one_hot)
# Just to see - one hot encoding
first_batch = next(iter(ds_train))[0]
feature_layer = tf.keras.layers.DenseFeatures(cat_one_hot)
print(feature_layer(first_batch).numpy())
#Embedding cols: When there are large values per category then use an embedding column to
#overcome this limitation. Instead of representing the data as a one-hot vector of many
#dimensions, an embedding column represents that data as a lower-dimensional, dense vector in
#which each cell can contain any number, not just 0 or 1. The size of the embedding (8, in the
#example below) is a parameter that must be tuned.
embedding_col = tf.feature_column.embedding_column(cat_vocab, dimension=8) # 8 Need to be tuned
feature_cols.append(embedding_col)
# Just to see - one hot encoding
first_batch = next(iter(ds_train))[0]
feature_layer = tf.keras.layers.DenseFeatures(embedding_col)
print(feature_layer(first_batch).numpy())
#CW: Read 'Hashed feature columns' and practice above
## crossed cols TBD: Not working
#cat_vocab_crosssed = tf.feature_column.categorical_column_with_vocabulary_list(crossed_FEATURES, pd.unique(data[crossed_FEATURES].values))
#crossed_feature = tf.feature_column.crossed_column([age_buckets, cat_vocab_crosssed],
# hash_bucket_size=10) # Max size of all combination
#crossed_feature = tf.feature_column.indicator_column(crossed_feature)
#feature_cols.append(crossed_feature)
# Model in this way
feature_cols = tf.keras.layers.DenseFeatures(feature_cols)
# Build the model
model = tf.keras.models.Sequential() # same as tf.keras.Sequential()
model.add(feature_cols)
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(1, activation=tf.nn.sigmoid))
#Compile
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'], run_eagerly=True)
model.summary()
model.fit(ds_train, epochs = 100) # epochs steps_per_epoch=100,
#Because of embedding is shows 95 (755/8)
# Evaluate on test data
test_ds = input_fn(test_set[FEATURES], test_set[LABEL],custom_batch_size = batch_size, caller_source = 'eval')
model.evaluate(test_ds)
# loss value & metrics values: [0.13, 0.95]
#Making Predictions
test_ds = input_fn(test_set[FEATURES], test_set[LABEL],custom_batch_size = batch_size, caller_source = 'test')
predictions = model.predict(test_ds, verbose=1)
predictions = np.ravel(predictions)
#Explain why cutoff is required and why not 0.5 always works
cutoff = data[data[LABEL] == 1].shape[0]/data.shape[0]
predictions_number = tf.where(predictions >= (1-cutoff), 1, 0)
#Few statistics
confusion_matrix(test_set[LABEL].values, predictions_number)
classification_report(test_set[LABEL].values, predictions_number)
#Statistics are also available as follows
print("Overall Accuracy is ", round(accuracy_score(test_set[LABEL].values, predictions_number), 2),", Kappa is ", round(abs(cohen_kappa_score(test_set[LABEL].values, predictions_number)), 2))
#Overall Accuracy is 0.81 , Kappa is 0.58
# Compare with 'without embedding' done above. even though number of records come down, not much difference in accuracy
del(data, training_set, test_set, predictions_number, age_avg, NUM_FEATURES, bucketized_FEATURES, categorical_FEATURES, embedding_FEATURES, crossed_FEATURES, FEATURES, LABEL, batch_size, ds_train, feature_cols, bucketized_col, age_buckets, cat_vocab, cat_one_hot, first_batch, feature_layer, embedding_col, model, test_ds, cutoff); gc.collect()
## Which one to use?
## Say - you have 10 million records although you can process 1 million in memory | random_line_split |
|
2.1.dl_tf_intermediate_classifications.py | #%% Binary classification On Kaggle data using Tensorflow Multi level
# Restart the Spyder
import pandas as pd
import numpy as np
import tensorflow as tf
import os
from sklearn.metrics import accuracy_score, cohen_kappa_score, confusion_matrix, classification_report
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import gc; gc.enable()
#Set PANDAS to show all columns in DataFrame
pd.set_option('display.max_columns', None)
#Set PANDAS to show all rows in DataFrame
pd.set_option('display.max_rows', None)
pd.set_option('precision', 2)
os.chdir("D:\\trainings\\tensorflow")
exec(open(os.path.abspath('tf_CommonUtils.py')).read())
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
# fix random seed for reproducibility
seed = 123; np.random.seed(seed); tf.compat.v1.set_random_seed(seed)
# Read data from https://www.kaggle.com/c/titanic/data
data = pd.read_csv("./data/kaggle_titanic_train.csv")
data.shape
data.dtypes
data.head(2)
data.info()
print(data.describe())
#print(data.describe(include = [np.number])) # for number only
#Drop few columns, may not be use ful for current analysis
data.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1, inplace=True)
data.info() # Now see if any missing values in any columns
#Note: how to impute missing value is purview of ML. Here, do simple thing so that we focus on DL
age_avg = data['Age'].mean()
data['Age'].fillna(value = age_avg, inplace=True)
#Now, drop rows if any missing
data.dropna(inplace=True)
data.info() # Now see if any missing values in any columns
# identifications of features and response. Detail'll be explained in a few minutes
NUM_FEATURES = ['Pclass','SibSp','Parch','Fare']
bucketized_FEATURES = 'Age'
categorical_FEATURES = 'Sex'
embedding_FEATURES = 'Embarked'
crossed_FEATURES = 'Embarked' # With Age
FEATURES = np.append(np.append(np.append(np.append(NUM_FEATURES, bucketized_FEATURES), categorical_FEATURES), embedding_FEATURES), crossed_FEATURES)
FEATURES = np.unique(FEATURES)
LABEL = "Survived"
batch_size = 8
#Do the data type conversion for category
data[[categorical_FEATURES,embedding_FEATURES]] = data[[categorical_FEATURES,embedding_FEATURES]].apply(lambda x: x.astype('category'))
data.dtypes
#One hot encode
data = Encoding(data, LABEL, scale_and_center = True, fileTrain = "./data/kaggle_titanic_train_EncodedScaled.csv")
data.head(2)
#Get list of independent features
ar_independent_features = np.setdiff1d(data.columns, LABEL)
#Segragate 85% and 15%
training_set ,test_set = train_test_split(data,test_size=0.15)
del(data)
training_set.shape
training_set.head(2)
len_fea = len(ar_independent_features)
# Build the model
model = tf.keras.models.Sequential() # same as tf.keras.Sequential()
model.add(tf.keras.layers.Dense(2*len_fea, input_shape=(len_fea,), activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(len_fea, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(2, activation=tf.nn.softmax))
model.summary()
#Compile
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# Train it
model.fit(training_set[ar_independent_features].values, training_set[LABEL].values, epochs=100, batch_size=batch_size) # 6 min
#Save and retrieve
model.save('./model/model_tf_kaggle_titanic_binary_classsification.h5')
#model = tf.keras.models.load_model('./model/model_tf_kaggle_titanic_binary_classsification.h5')
# Evaluate on test data
model.evaluate(test_set[ar_independent_features].values, test_set[LABEL].values, verbose = 0)
# loss value & metrics values: [0.45, 0.79]
#Making Predictions
predictions = model.predict(x=test_set[ar_independent_features].values)
# Extracting max probability
predictions_number = np.array([])
for row_num in range(predictions.shape[0]): # row_num = 0
predictions_number = np.append(predictions_number, np.argmax(predictions[row_num]))
#Few statistics
confusion_matrix(test_set[LABEL].values, predictions_number)
classification_report(test_set[LABEL].values, predictions_number)
#Statistics are also available as follows
print("Overall Accuracy is ", round(accuracy_score(test_set[LABEL].values, predictions_number), 2),", Kappa is ", round(abs(cohen_kappa_score(test_set[LABEL].values, predictions_number)), 2))
#Overall Accuracy is 0.81 , Kappa is 0.56
del(training_set, test_set, predictions_number); gc.collect()
#%% Binary classification: Explore few more ways to better classification
# Restart the Spyder
import pandas as pd
import numpy as np
import tensorflow as tf
import os
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, cohen_kappa_score, confusion_matrix, classification_report
import gc; gc.enable()
tf.keras.backend.clear_session() # For easy reset of notebook state
#Set PANDAS to show all columns in DataFrame
pd.set_option('display.max_columns', None)
#Set PANDAS to show all rows in DataFrame
pd.set_option('display.max_rows', None)
pd.set_option('precision', 2)
os.chdir("D:\\trainings\\tensorflow")
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
# fix random seed for reproducibility
seed = 123; np.random.seed(seed); tf.compat.v1.set_random_seed(seed)
# Read data
data = pd.read_csv("./data/kaggle_titanic_train.csv")
data.shape
data.dtypes
data.head(2)
data.info()
print(data.describe())
#print(data.describe(include = [np.number])) # for number only
#Drop few columns, may not be use ful for current analysis
data.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1, inplace=True)
data.info() # Now see if any missing values in any columns
#Note: how to impute missing value is purview of ML. Here, do simple thing so that we focus on DL
age_avg = data['Age'].mean()
data['Age'].fillna(value = age_avg, inplace=True)
#Now, drop rows if any missing
data.dropna(inplace=True)
data.info() # Now see if any missing values in any columns
# identifications of features and response. Detail'll be explained in a few minutes
NUM_FEATURES = ['Pclass','SibSp','Parch','Fare']
bucketized_FEATURES = 'Age'
categorical_FEATURES = 'Sex'
embedding_FEATURES = 'Embarked'
crossed_FEATURES = 'Embarked' # With Age
FEATURES = np.append(np.append(np.append(np.append(NUM_FEATURES, bucketized_FEATURES), categorical_FEATURES), embedding_FEATURES), crossed_FEATURES)
FEATURES = np.unique(FEATURES)
LABEL = "Survived"
batch_size = 8
#Do the data type conversion for category
data[[categorical_FEATURES,embedding_FEATURES]] = data[[categorical_FEATURES,embedding_FEATURES]].apply(lambda x: x.astype('category'))
#Segragate 85% and 15%
training_set ,test_set = train_test_split(data,test_size=0.15, random_state = seed, stratify = data[LABEL])
#Building the input_fn: regressor accepts Tensors and custom function to convert pandas
#Dataframe and return feature column and label values as Tensors:
def input_fn(features, labels = None, custom_batch_size = batch_size, caller_source = 'train'):
# Convert the inputs to a Dataset.
|
#train in iterable dataset
ds_train = input_fn(training_set[FEATURES], training_set[LABEL],custom_batch_size = batch_size)
#Create feature columns
feature_cols = []
# numeric cols
for num_col in NUM_FEATURES:
feature_cols.append(tf.feature_column.numeric_column(num_col, dtype=tf.float32))
#bucketized cols: If don't want to feed a number directly odel, but instead split its value into
#different categories based on numerical ranges.
#Buckets include the left boundary, and exclude the right boundary.
bucketized_col = tf.feature_column.numeric_column(bucketized_FEATURES, dtype=tf.float32)
age_buckets = tf.feature_column.bucketized_column(bucketized_col, boundaries=[30, 40, 50, 60])
feature_cols.append(age_buckets)
# indicator cols
cat_vocab = tf.feature_column.categorical_column_with_vocabulary_list(categorical_FEATURES, pd.unique(data[categorical_FEATURES].values))
cat_one_hot = tf.feature_column.indicator_column(cat_vocab)
feature_cols.append(cat_one_hot)
# Just to see - one hot encoding
first_batch = next(iter(ds_train))[0]
feature_layer = tf.keras.layers.DenseFeatures(cat_one_hot)
print(feature_layer(first_batch).numpy())
#Embedding cols: When there are large values per category then use an embedding column to
#overcome this limitation. Instead of representing the data as a one-hot vector of many
#dimensions, an embedding column represents that data as a lower-dimensional, dense vector in
#which each cell can contain any number, not just 0 or 1. The size of the embedding (8, in the
#example below) is a parameter that must be tuned.
embedding_col = tf.feature_column.embedding_column(cat_vocab, dimension=8) # 8 Need to be tuned
feature_cols.append(embedding_col)
# Just to see - one hot encoding
first_batch = next(iter(ds_train))[0]
feature_layer = tf.keras.layers.DenseFeatures(embedding_col)
print(feature_layer(first_batch).numpy())
#CW: Read 'Hashed feature columns' and practice above
## crossed cols TBD: Not working
#cat_vocab_crosssed = tf.feature_column.categorical_column_with_vocabulary_list(crossed_FEATURES, pd.unique(data[crossed_FEATURES].values))
#crossed_feature = tf.feature_column.crossed_column([age_buckets, cat_vocab_crosssed],
# hash_bucket_size=10) # Max size of all combination
#crossed_feature = tf.feature_column.indicator_column(crossed_feature)
#feature_cols.append(crossed_feature)
# Model in this way
feature_cols = tf.keras.layers.DenseFeatures(feature_cols)
# Build the model
model = tf.keras.models.Sequential() # same as tf.keras.Sequential()
model.add(feature_cols)
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(1, activation=tf.nn.sigmoid))
#Compile
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'], run_eagerly=True)
model.summary()
model.fit(ds_train, epochs = 100) # epochs steps_per_epoch=100,
#Because of embedding is shows 95 (755/8)
# Evaluate on test data
test_ds = input_fn(test_set[FEATURES], test_set[LABEL],custom_batch_size = batch_size, caller_source = 'eval')
model.evaluate(test_ds)
# loss value & metrics values: [0.13, 0.95]
#Making Predictions
test_ds = input_fn(test_set[FEATURES], test_set[LABEL],custom_batch_size = batch_size, caller_source = 'test')
predictions = model.predict(test_ds, verbose=1)
predictions = np.ravel(predictions)
#Explain why cutoff is required and why not 0.5 always works
cutoff = data[data[LABEL] == 1].shape[0]/data.shape[0]
predictions_number = tf.where(predictions >= (1-cutoff), 1, 0)
#Few statistics
confusion_matrix(test_set[LABEL].values, predictions_number)
classification_report(test_set[LABEL].values, predictions_number)
#Statistics are also available as follows
print("Overall Accuracy is ", round(accuracy_score(test_set[LABEL].values, predictions_number), 2),", Kappa is ", round(abs(cohen_kappa_score(test_set[LABEL].values, predictions_number)), 2))
#Overall Accuracy is 0.81 , Kappa is 0.58
# Compare with 'without embedding' done above. even though number of records come down, not much difference in accuracy
del(data, training_set, test_set, predictions_number, age_avg, NUM_FEATURES, bucketized_FEATURES, categorical_FEATURES, embedding_FEATURES, crossed_FEATURES, FEATURES, LABEL, batch_size, ds_train, feature_cols, bucketized_col, age_buckets, cat_vocab, cat_one_hot, first_batch, feature_layer, embedding_col, model, test_ds, cutoff); gc.collect()
## Which one to use?
## Say - you have 10 million records although you can process 1 million in memory | dataset = tf.data.Dataset.from_tensor_slices(dict(features))
if caller_source != 'test':
dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
if caller_source == 'train':
dataset = dataset.shuffle(len(features)) #if ".repeat()" is added here then add "epochs steps_per_epoch" in fit
dataset = dataset.batch(custom_batch_size)
return dataset | identifier_body |
ivf_torch.py | import torch
import time
from pykeops.torch import LazyTensor, Genred, KernelSolve, default_dtype
from pykeops.torch.cluster import swap_axes as torch_swap_axes
from pykeops.torch.cluster import cluster_ranges_centroids, from_matrix
# from pykeops.torch.generic.generic_red import GenredLowlevel
def is_on_device(x):
return x.is_cuda
class torchtools:
copy = torch.clone
exp = torch.exp
log = torch.log
norm = torch.norm
swap_axes = torch_swap_axes
Genred = Genred
KernelSolve = KernelSolve
arraytype = torch.Tensor
float_types = [float]
# GenredLowlevel = GenredLowlevel
@staticmethod
def eq(x, y):
return torch.eq(x, y)
@staticmethod
def transpose(x):
return x.t()
@staticmethod
def permute(x, *args):
return x.permute(*args)
@staticmethod
def contiguous(x):
return x.contiguous()
@staticmethod
def solve(A, b):
return torch.solve(b, A)[0].contiguous()
@staticmethod
def arraysum(x, axis=None):
return x.sum() if axis is None else x.sum(dim=axis)
@staticmethod
def long(x):
return x.long()
@staticmethod
def size(x):
return x.numel()
@staticmethod
def tile(*args):
return torch.Tensor.repeat(*args)
@staticmethod
def numpy(x):
return x.detach().cpu().numpy()
@staticmethod
def view(x, s):
return x.view(s)
@staticmethod
def is_tensor(x):
return isinstance(x, torch.Tensor)
@staticmethod
def dtype(x):
if hasattr(x, "dtype"):
return x.dtype
else:
return type(x)
@staticmethod
def detect_complex(x):
if type(x) == list:
return any(type(v) == complex for v in x)
elif type(x) == torch.Tensor:
return torch.is_complex(x)
else:
return type(x) == complex
@staticmethod
def view_as_complex(x):
sh = list(x.shape)
sh[-1] //= 2
sh += [2]
x = x.view(sh)
return torch.view_as_complex(x)
@staticmethod
def view_as_real(x):
sh = list(x.shape)
sh[-1] *= 2
return torch.view_as_real(x).view(sh)
@staticmethod
def dtypename(dtype):
if dtype == torch.float32:
return "float32"
elif dtype == torch.float64: | elif dtype == int:
return int
elif dtype == list:
return "float32"
else:
raise ValueError(
"[KeOps] {} data type incompatible with KeOps.".format(dtype)
)
@staticmethod
def rand(m, n, dtype=default_dtype, device="cpu"):
return torch.rand(m, n, dtype=dtype, device=device)
@staticmethod
def randn(m, n, dtype=default_dtype, device="cpu"):
return torch.randn(m, n, dtype=dtype, device=device)
@staticmethod
def zeros(shape, dtype=default_dtype, device="cpu"):
return torch.zeros(shape, dtype=dtype, device=device)
@staticmethod
def eye(n, dtype=default_dtype, device="cpu"):
return torch.eye(n, dtype=dtype, device=device)
@staticmethod
def array(x, dtype=default_dtype, device="cpu"):
if dtype == "float32":
dtype = torch.float32
elif dtype == "float64":
dtype = torch.float64
elif dtype == "float16":
dtype = torch.float16
else:
raise ValueError("[KeOps] data type incompatible with KeOps.")
return torch.tensor(x, dtype=dtype, device=device)
@staticmethod
def device(x):
if isinstance(x, torch.Tensor):
return x.device
else:
return None
@staticmethod
def distance_function(metric):
def euclidean(x,y):
return ((x-y) ** 2).sum(-1)
def manhattan(x,y):
return ((x-y).abs()).sum(-1)
def angular(x,y):
return -(x | y)
def angular_full(x,y):
return angular(x,y)/((angular(x,x)*angular(y,y)).sqrt())
def hyperbolic(x,y):
return ((x - y) ** 2).sum(-1) / (x[0] * y[0])
if metric=='euclidean':
return euclidean
elif metric=='manhattan':
return manhattan
elif metric=='angular':
return angular
elif metric=='angular_full':
return angular_full
elif metric=='hyperbolic':
return hyperbolic
else:
raise ValueError('Unknown metric')
@staticmethod
def sort(x):
return torch.sort(x)
@staticmethod
def unsqueeze(x,n):
return torch.unsqueeze(x,n)
@staticmethod
def arange(n,device="cpu"):
return torch.arange(n,device=device)
@staticmethod
def repeat(x,n):
return torch.repeat_interleave(x,n)
@staticmethod
def to(x,device):
return x.to(device)
@staticmethod
def index_select(input,dim,index):
return torch.index_select(input,dim,index)
@staticmethod
def norm(x,p=2,dim=-1):
return torch.norm(x,p=p,dim=dim)
@staticmethod
def kmeans(x, distance=None, K=10, Niter=10, device="cuda", approx=False, n=10):
from pykeops.torch import LazyTensor
if distance is None:
distance = torchtools.distance_function("euclidean")
def calc_centroid(x, c, cl, n=10):
"Helper function to optimise centroid location"
c = torch.clone(c.detach()).to(device)
c.requires_grad = True
x1 = LazyTensor(x.unsqueeze(0))
op = torch.optim.Adam([c], lr=1 / n)
scaling = 1 / torch.gather(torch.bincount(cl), 0, cl).view(-1, 1)
scaling.requires_grad = False
with torch.autograd.set_detect_anomaly(True):
for _ in range(n):
c.requires_grad = True
op.zero_grad()
c1 = LazyTensor(torch.index_select(c, 0, cl).unsqueeze(0))
d = distance(x1, c1)
loss = (
d.sum(0) * scaling
).sum() # calculate distance to centroid for each datapoint, divide by total number of points in that cluster, and sum
loss.backward(retain_graph=False)
op.step()
return c.detach()
N, D = x.shape
c = x[:K, :].clone()
x_i = LazyTensor(x.view(N, 1, D).to(device))
for i in range(Niter):
c_j = LazyTensor(c.view(1, K, D).to(device))
D_ij = distance(x_i, c_j)
cl = D_ij.argmin(dim=1).long().view(-1)
# updating c: either with approximation or exact
if approx:
# approximate with GD optimisation
c = calc_centroid(x, c, cl, n)
else:
# exact from average
c.zero_()
c.scatter_add_(0, cl[:, None].repeat(1, D), x)
Ncl = torch.bincount(cl, minlength=K).type_as(c).view(K, 1)
c /= Ncl
if torch.any(torch.isnan(c)):
raise ValueError(
"NaN detected in centroids during KMeans, please check metric is correct"
)
return cl, c
def squared_distances(x, y):
x_norm = (x ** 2).sum(1).reshape(-1, 1)
y_norm = (y ** 2).sum(1).reshape(1, -1)
dist = x_norm + y_norm - 2.0 * torch.matmul(x, torch.transpose(y, 0, 1))
return dist
def torch_kernel(x, y, s, kernel):
sq = squared_distances(x, y)
_kernel = {
"gaussian": lambda _sq, _s: torch.exp(-_sq / (_s * _s)),
"laplacian": lambda _sq, _s: torch.exp(-torch.sqrt(_sq) / _s),
"cauchy": lambda _sq, _s: 1.0 / (1 + _sq / (_s * _s)),
"inverse_multiquadric": lambda _sq, _s: torch.rsqrt(1 + _sq / (_s * _s)),
}
return _kernel[kernel](sq, s)
class GenericIVF:
"""Abstract class to compute IVF functions
End-users should use 'pykeops.numpy.ivf' or 'pykeops.torch.ivf'
"""
def __init__(self, k, metric, normalise, LazyTensor):
self.__k = k
self.__normalise = normalise
self.__update_metric(metric)
self.__LazyTensor = LazyTensor
self.__c = None
def __update_metric(self, metric):
if isinstance(metric, str):
self.__distance = self.tools.distance_function(metric)
self.__metric = metric
elif callable(metric):
self.__distance = metric
self.__metric = "custom"
else:
raise ValueError("Unrecognised metric input type")
@property
def metric(self):
"""Returns the metric used in the search"""
return self.__metric
@property
def c(self):
"""Returns the clusters obtained through K-Means"""
if self.__c is not None:
return self.__c
else:
raise ValueError("Run .fit() first!")
def __get_tools(self):
pass
def __k_argmin(self, x, y, k=1):
x_LT = self.__LazyTensor(
self.tools.to(self.tools.unsqueeze(x, 1), self.__device)
)
y_LT = self.__LazyTensor(
self.tools.to(self.tools.unsqueeze(y, 0), self.__device)
)
d = self.__distance(x_LT, y_LT)
if not self.tools.is_tensor(x):
if self.__backend:
d.backend = self.__backend
if k == 1:
return self.tools.view(self.tools.long(d.argmin(dim=1)), -1)
else:
return self.tools.long(d.argKmin(K=k, dim=1))
def __sort_clusters(self, x, lab, store_x=True):
lab, perm = self.tools.sort(self.tools.view(lab, -1))
if store_x:
self.__x_perm = perm
else:
self.__y_perm = perm
return x[perm], lab
def __unsort(self, nn):
return self.tools.index_select(self.__x_perm[nn], 0, self.__y_perm.argsort())
def _fit(
self,
x,
clusters=50,
a=5,
Niter=15,
device=None,
backend=None,
approx=False,
n=50,
):
"""
Fits the main dataset
"""
if type(clusters) != int:
raise ValueError("Clusters must be an integer")
if clusters >= len(x):
raise ValueError("Number of clusters must be less than length of dataset")
if type(a) != int:
raise ValueError("Number of clusters to search over must be an integer")
if a > clusters:
raise ValueError(
"Number of clusters to search over must be less than total number of clusters"
)
if len(x.shape) != 2:
raise ValueError("Input must be a 2D array")
if self.__normalise:
x = x / self.tools.repeat(self.tools.norm(x, 2, -1), x.shape[1]).reshape(
-1, x.shape[1]
)
# if we want to use the approximation in Kmeans, and our metric is angular, switch to full angular metric
if approx and self.__metric == "angular":
self.__update_metric("angular_full")
x = self.tools.contiguous(x)
self.__device = device
self.__backend = backend
cl, c = self.tools.kmeans(
x,
self.__distance,
clusters,
Niter=Niter,
device=self.__device,
approx=approx,
n=n,
)
self.__c = c
cl = self.__assign(x)
ncl = self.__k_argmin(c, c, k=a)
self.__x_ranges, _, _ = cluster_ranges_centroids(x, cl)
x, x_labels = self.__sort_clusters(x, cl, store_x=True)
self.__x = x
r = self.tools.repeat(self.tools.arange(clusters, device=self.__device), a)
self.__keep = self.tools.to(
self.tools.zeros([clusters, clusters], dtype=bool), self.__device
)
self.__keep[r, ncl.flatten()] = True
return self
def __assign(self, x, c=None):
if c is None:
c = self.__c
return self.__k_argmin(x, c)
def _kneighbors(self, y):
"""
Obtain the k nearest neighbors of the query dataset y
"""
if self.__x is None:
raise ValueError("Input dataset not fitted yet! Call .fit() first!")
if self.__device and self.tools.device(y) != self.__device:
raise ValueError("Input dataset and query dataset must be on same device")
if len(y.shape) != 2:
raise ValueError("Query dataset must be a 2D tensor")
if self.__x.shape[-1] != y.shape[-1]:
raise ValueError("Query and dataset must have same dimensions")
if self.__normalise:
y = y / self.tools.repeat(self.tools.norm(y, 2, -1), y.shape[1]).reshape(
-1, y.shape[1]
)
y = self.tools.contiguous(y)
y_labels = self.__assign(y)
y_ranges, _, _ = cluster_ranges_centroids(y, y_labels)
self.__y_ranges = y_ranges
y, y_labels = self.__sort_clusters(y, y_labels, store_x=False)
x_LT = self.__LazyTensor(self.tools.unsqueeze(self.__x, 0))
y_LT = self.__LazyTensor(self.tools.unsqueeze(y, 1))
D_ij = self.__distance(y_LT, x_LT)
ranges_ij = from_matrix(y_ranges, self.__x_ranges, self.__keep)
D_ij.ranges = ranges_ij
nn = D_ij.argKmin(K=self.__k, axis=1)
return self.__unsort(nn)
def brute_force(self, x, y, k=5):
"""Performs a brute force search with KeOps
Args:
x (array): Input dataset
y (array): Query dataset
k (int): Number of nearest neighbors to obtain
"""
x_LT = self.__LazyTensor(self.tools.unsqueeze(x, 0))
y_LT = self.__LazyTensor(self.tools.unsqueeze(y, 1))
D_ij = self.__distance(y_LT, x_LT)
return D_ij.argKmin(K=k, axis=1)
class IVF(GenericIVF):
"""IVF-Flat is a KNN approximation algorithm that first clusters the data and then performs the query search on a subset of the input dataset."""
def __init__(self, k=5, metric="euclidean", normalise=False):
"""Initialise the IVF-Flat class.
IVF-Flat is a KNN approximation algorithm that first clusters the data and then performs the query search on a subset of the input dataset.
Args:
k (int): Number of nearest neighbours to obtain
metric (str,function): Metric to use
Currently, "euclidean", "manhattan", "angular" and "hyperbolic" are directly supported, apart from custom metrics
Hyperbolic metric requires the use of approx = True, during the fit() function later
Custom metrics should be in the form of a function with 2 inputs and returns their distance
For more information, refer to the tutorial
normalise (bool): Whether or not to normalise all input data to norm 1
This is used mainly for angular metric
In place of this, "angular_full" metric may be used instead
"""
from pykeops.torch import LazyTensor
self.__get_tools()
super().__init__(k=k, metric=metric, normalise=normalise, LazyTensor=LazyTensor)
def __get_tools(self):
# from pykeops.torch.utils import torchtools
self.tools = torchtools
def fit(self, x, clusters=50, a=5, Niter=15, approx=False, n=50):
"""Fits a dataset to perform the nearest neighbour search over
K-Means is performed on the dataset to obtain clusters
Then the closest clusters to each cluster is stored for use during query time
Args:
x (torch.Tensor): Torch tensor dataset of shape N, D
Where N is the number of points and D is the number of dimensions
clusters (int): Total number of clusters to create in K-Means
a (int): Number of clusters to search over, must be less than total number of clusters created
Niter (int): Number of iterations to run in K-Means algorithm
approx (bool): Whether or not to use an approximation step in K-Means
In hyperbolic metric and custom metric, this should be set to True
This is because the optimal cluster centroid may not have a simple closed form expression
n (int): Number of iterations to optimise the cluster centroid, when approx = True
A value of around 50 is recommended
Lower values are faster while higher values give better accuracy in centroid location
"""
if type(x) != torch.Tensor:
raise ValueError("Input dataset must be a torch tensor")
return self._fit(
x, clusters=clusters, a=a, Niter=Niter, device=x.device, approx=approx, n=n
)
def kneighbors(self, y):
"""Obtains the nearest neighbors for an input dataset from the fitted dataset
Args:
y (torch.Tensor): Input dataset to search over
"""
if type(y) != torch.Tensor:
raise ValueError("Query dataset must be a torch tensor")
return self._kneighbors(y) | return "float64"
elif dtype == torch.float16:
return "float16" | random_line_split |
ivf_torch.py | import torch
import time
from pykeops.torch import LazyTensor, Genred, KernelSolve, default_dtype
from pykeops.torch.cluster import swap_axes as torch_swap_axes
from pykeops.torch.cluster import cluster_ranges_centroids, from_matrix
# from pykeops.torch.generic.generic_red import GenredLowlevel
def is_on_device(x):
return x.is_cuda
class torchtools:
copy = torch.clone
exp = torch.exp
log = torch.log
norm = torch.norm
swap_axes = torch_swap_axes
Genred = Genred
KernelSolve = KernelSolve
arraytype = torch.Tensor
float_types = [float]
# GenredLowlevel = GenredLowlevel
@staticmethod
def eq(x, y):
return torch.eq(x, y)
@staticmethod
def transpose(x):
return x.t()
@staticmethod
def permute(x, *args):
return x.permute(*args)
@staticmethod
def contiguous(x):
return x.contiguous()
@staticmethod
def solve(A, b):
return torch.solve(b, A)[0].contiguous()
@staticmethod
def arraysum(x, axis=None):
return x.sum() if axis is None else x.sum(dim=axis)
@staticmethod
def long(x):
return x.long()
@staticmethod
def size(x):
return x.numel()
@staticmethod
def tile(*args):
return torch.Tensor.repeat(*args)
@staticmethod
def numpy(x):
return x.detach().cpu().numpy()
@staticmethod
def view(x, s):
return x.view(s)
@staticmethod
def is_tensor(x):
return isinstance(x, torch.Tensor)
@staticmethod
def dtype(x):
if hasattr(x, "dtype"):
return x.dtype
else:
return type(x)
@staticmethod
def detect_complex(x):
if type(x) == list:
return any(type(v) == complex for v in x)
elif type(x) == torch.Tensor:
return torch.is_complex(x)
else:
return type(x) == complex
@staticmethod
def view_as_complex(x):
sh = list(x.shape)
sh[-1] //= 2
sh += [2]
x = x.view(sh)
return torch.view_as_complex(x)
@staticmethod
def view_as_real(x):
sh = list(x.shape)
sh[-1] *= 2
return torch.view_as_real(x).view(sh)
@staticmethod
def dtypename(dtype):
if dtype == torch.float32:
return "float32"
elif dtype == torch.float64:
return "float64"
elif dtype == torch.float16:
return "float16"
elif dtype == int:
return int
elif dtype == list:
return "float32"
else:
raise ValueError(
"[KeOps] {} data type incompatible with KeOps.".format(dtype)
)
@staticmethod
def rand(m, n, dtype=default_dtype, device="cpu"):
return torch.rand(m, n, dtype=dtype, device=device)
@staticmethod
def randn(m, n, dtype=default_dtype, device="cpu"):
return torch.randn(m, n, dtype=dtype, device=device)
@staticmethod
def zeros(shape, dtype=default_dtype, device="cpu"):
return torch.zeros(shape, dtype=dtype, device=device)
@staticmethod
def eye(n, dtype=default_dtype, device="cpu"):
return torch.eye(n, dtype=dtype, device=device)
@staticmethod
def array(x, dtype=default_dtype, device="cpu"):
if dtype == "float32":
dtype = torch.float32
elif dtype == "float64":
dtype = torch.float64
elif dtype == "float16":
dtype = torch.float16
else:
raise ValueError("[KeOps] data type incompatible with KeOps.")
return torch.tensor(x, dtype=dtype, device=device)
@staticmethod
def device(x):
if isinstance(x, torch.Tensor):
return x.device
else:
return None
@staticmethod
def distance_function(metric):
def euclidean(x,y):
return ((x-y) ** 2).sum(-1)
def manhattan(x,y):
return ((x-y).abs()).sum(-1)
def angular(x,y):
return -(x | y)
def angular_full(x,y):
return angular(x,y)/((angular(x,x)*angular(y,y)).sqrt())
def hyperbolic(x,y):
return ((x - y) ** 2).sum(-1) / (x[0] * y[0])
if metric=='euclidean':
return euclidean
elif metric=='manhattan':
return manhattan
elif metric=='angular':
return angular
elif metric=='angular_full':
return angular_full
elif metric=='hyperbolic':
return hyperbolic
else:
raise ValueError('Unknown metric')
@staticmethod
def sort(x):
return torch.sort(x)
@staticmethod
def unsqueeze(x,n):
return torch.unsqueeze(x,n)
@staticmethod
def arange(n,device="cpu"):
return torch.arange(n,device=device)
@staticmethod
def repeat(x,n):
return torch.repeat_interleave(x,n)
@staticmethod
def to(x,device):
return x.to(device)
@staticmethod
def index_select(input,dim,index):
return torch.index_select(input,dim,index)
@staticmethod
def norm(x,p=2,dim=-1):
return torch.norm(x,p=p,dim=dim)
@staticmethod
def kmeans(x, distance=None, K=10, Niter=10, device="cuda", approx=False, n=10):
from pykeops.torch import LazyTensor
if distance is None:
distance = torchtools.distance_function("euclidean")
def calc_centroid(x, c, cl, n=10):
"Helper function to optimise centroid location"
c = torch.clone(c.detach()).to(device)
c.requires_grad = True
x1 = LazyTensor(x.unsqueeze(0))
op = torch.optim.Adam([c], lr=1 / n)
scaling = 1 / torch.gather(torch.bincount(cl), 0, cl).view(-1, 1)
scaling.requires_grad = False
with torch.autograd.set_detect_anomaly(True):
for _ in range(n):
c.requires_grad = True
op.zero_grad()
c1 = LazyTensor(torch.index_select(c, 0, cl).unsqueeze(0))
d = distance(x1, c1)
loss = (
d.sum(0) * scaling
).sum() # calculate distance to centroid for each datapoint, divide by total number of points in that cluster, and sum
loss.backward(retain_graph=False)
op.step()
return c.detach()
N, D = x.shape
c = x[:K, :].clone()
x_i = LazyTensor(x.view(N, 1, D).to(device))
for i in range(Niter):
c_j = LazyTensor(c.view(1, K, D).to(device))
D_ij = distance(x_i, c_j)
cl = D_ij.argmin(dim=1).long().view(-1)
# updating c: either with approximation or exact
if approx:
# approximate with GD optimisation
c = calc_centroid(x, c, cl, n)
else:
# exact from average
c.zero_()
c.scatter_add_(0, cl[:, None].repeat(1, D), x)
Ncl = torch.bincount(cl, minlength=K).type_as(c).view(K, 1)
c /= Ncl
if torch.any(torch.isnan(c)):
raise ValueError(
"NaN detected in centroids during KMeans, please check metric is correct"
)
return cl, c
def squared_distances(x, y):
x_norm = (x ** 2).sum(1).reshape(-1, 1)
y_norm = (y ** 2).sum(1).reshape(1, -1)
dist = x_norm + y_norm - 2.0 * torch.matmul(x, torch.transpose(y, 0, 1))
return dist
def torch_kernel(x, y, s, kernel):
sq = squared_distances(x, y)
_kernel = {
"gaussian": lambda _sq, _s: torch.exp(-_sq / (_s * _s)),
"laplacian": lambda _sq, _s: torch.exp(-torch.sqrt(_sq) / _s),
"cauchy": lambda _sq, _s: 1.0 / (1 + _sq / (_s * _s)),
"inverse_multiquadric": lambda _sq, _s: torch.rsqrt(1 + _sq / (_s * _s)),
}
return _kernel[kernel](sq, s)
class GenericIVF:
"""Abstract class to compute IVF functions
End-users should use 'pykeops.numpy.ivf' or 'pykeops.torch.ivf'
"""
def __init__(self, k, metric, normalise, LazyTensor):
self.__k = k
self.__normalise = normalise
self.__update_metric(metric)
self.__LazyTensor = LazyTensor
self.__c = None
def __update_metric(self, metric):
if isinstance(metric, str):
self.__distance = self.tools.distance_function(metric)
self.__metric = metric
elif callable(metric):
self.__distance = metric
self.__metric = "custom"
else:
raise ValueError("Unrecognised metric input type")
@property
def metric(self):
"""Returns the metric used in the search"""
return self.__metric
@property
def c(self):
"""Returns the clusters obtained through K-Means"""
if self.__c is not None:
return self.__c
else:
raise ValueError("Run .fit() first!")
def __get_tools(self):
pass
def __k_argmin(self, x, y, k=1):
x_LT = self.__LazyTensor(
self.tools.to(self.tools.unsqueeze(x, 1), self.__device)
)
y_LT = self.__LazyTensor(
self.tools.to(self.tools.unsqueeze(y, 0), self.__device)
)
d = self.__distance(x_LT, y_LT)
if not self.tools.is_tensor(x):
if self.__backend:
|
if k == 1:
return self.tools.view(self.tools.long(d.argmin(dim=1)), -1)
else:
return self.tools.long(d.argKmin(K=k, dim=1))
def __sort_clusters(self, x, lab, store_x=True):
lab, perm = self.tools.sort(self.tools.view(lab, -1))
if store_x:
self.__x_perm = perm
else:
self.__y_perm = perm
return x[perm], lab
def __unsort(self, nn):
return self.tools.index_select(self.__x_perm[nn], 0, self.__y_perm.argsort())
def _fit(
self,
x,
clusters=50,
a=5,
Niter=15,
device=None,
backend=None,
approx=False,
n=50,
):
"""
Fits the main dataset
"""
if type(clusters) != int:
raise ValueError("Clusters must be an integer")
if clusters >= len(x):
raise ValueError("Number of clusters must be less than length of dataset")
if type(a) != int:
raise ValueError("Number of clusters to search over must be an integer")
if a > clusters:
raise ValueError(
"Number of clusters to search over must be less than total number of clusters"
)
if len(x.shape) != 2:
raise ValueError("Input must be a 2D array")
if self.__normalise:
x = x / self.tools.repeat(self.tools.norm(x, 2, -1), x.shape[1]).reshape(
-1, x.shape[1]
)
# if we want to use the approximation in Kmeans, and our metric is angular, switch to full angular metric
if approx and self.__metric == "angular":
self.__update_metric("angular_full")
x = self.tools.contiguous(x)
self.__device = device
self.__backend = backend
cl, c = self.tools.kmeans(
x,
self.__distance,
clusters,
Niter=Niter,
device=self.__device,
approx=approx,
n=n,
)
self.__c = c
cl = self.__assign(x)
ncl = self.__k_argmin(c, c, k=a)
self.__x_ranges, _, _ = cluster_ranges_centroids(x, cl)
x, x_labels = self.__sort_clusters(x, cl, store_x=True)
self.__x = x
r = self.tools.repeat(self.tools.arange(clusters, device=self.__device), a)
self.__keep = self.tools.to(
self.tools.zeros([clusters, clusters], dtype=bool), self.__device
)
self.__keep[r, ncl.flatten()] = True
return self
def __assign(self, x, c=None):
if c is None:
c = self.__c
return self.__k_argmin(x, c)
def _kneighbors(self, y):
"""
Obtain the k nearest neighbors of the query dataset y
"""
if self.__x is None:
raise ValueError("Input dataset not fitted yet! Call .fit() first!")
if self.__device and self.tools.device(y) != self.__device:
raise ValueError("Input dataset and query dataset must be on same device")
if len(y.shape) != 2:
raise ValueError("Query dataset must be a 2D tensor")
if self.__x.shape[-1] != y.shape[-1]:
raise ValueError("Query and dataset must have same dimensions")
if self.__normalise:
y = y / self.tools.repeat(self.tools.norm(y, 2, -1), y.shape[1]).reshape(
-1, y.shape[1]
)
y = self.tools.contiguous(y)
y_labels = self.__assign(y)
y_ranges, _, _ = cluster_ranges_centroids(y, y_labels)
self.__y_ranges = y_ranges
y, y_labels = self.__sort_clusters(y, y_labels, store_x=False)
x_LT = self.__LazyTensor(self.tools.unsqueeze(self.__x, 0))
y_LT = self.__LazyTensor(self.tools.unsqueeze(y, 1))
D_ij = self.__distance(y_LT, x_LT)
ranges_ij = from_matrix(y_ranges, self.__x_ranges, self.__keep)
D_ij.ranges = ranges_ij
nn = D_ij.argKmin(K=self.__k, axis=1)
return self.__unsort(nn)
def brute_force(self, x, y, k=5):
"""Performs a brute force search with KeOps
Args:
x (array): Input dataset
y (array): Query dataset
k (int): Number of nearest neighbors to obtain
"""
x_LT = self.__LazyTensor(self.tools.unsqueeze(x, 0))
y_LT = self.__LazyTensor(self.tools.unsqueeze(y, 1))
D_ij = self.__distance(y_LT, x_LT)
return D_ij.argKmin(K=k, axis=1)
class IVF(GenericIVF):
"""IVF-Flat is a KNN approximation algorithm that first clusters the data and then performs the query search on a subset of the input dataset."""
def __init__(self, k=5, metric="euclidean", normalise=False):
"""Initialise the IVF-Flat class.
IVF-Flat is a KNN approximation algorithm that first clusters the data and then performs the query search on a subset of the input dataset.
Args:
k (int): Number of nearest neighbours to obtain
metric (str,function): Metric to use
Currently, "euclidean", "manhattan", "angular" and "hyperbolic" are directly supported, apart from custom metrics
Hyperbolic metric requires the use of approx = True, during the fit() function later
Custom metrics should be in the form of a function with 2 inputs and returns their distance
For more information, refer to the tutorial
normalise (bool): Whether or not to normalise all input data to norm 1
This is used mainly for angular metric
In place of this, "angular_full" metric may be used instead
"""
from pykeops.torch import LazyTensor
self.__get_tools()
super().__init__(k=k, metric=metric, normalise=normalise, LazyTensor=LazyTensor)
def __get_tools(self):
# from pykeops.torch.utils import torchtools
self.tools = torchtools
def fit(self, x, clusters=50, a=5, Niter=15, approx=False, n=50):
"""Fits a dataset to perform the nearest neighbour search over
K-Means is performed on the dataset to obtain clusters
Then the closest clusters to each cluster is stored for use during query time
Args:
x (torch.Tensor): Torch tensor dataset of shape N, D
Where N is the number of points and D is the number of dimensions
clusters (int): Total number of clusters to create in K-Means
a (int): Number of clusters to search over, must be less than total number of clusters created
Niter (int): Number of iterations to run in K-Means algorithm
approx (bool): Whether or not to use an approximation step in K-Means
In hyperbolic metric and custom metric, this should be set to True
This is because the optimal cluster centroid may not have a simple closed form expression
n (int): Number of iterations to optimise the cluster centroid, when approx = True
A value of around 50 is recommended
Lower values are faster while higher values give better accuracy in centroid location
"""
if type(x) != torch.Tensor:
raise ValueError("Input dataset must be a torch tensor")
return self._fit(
x, clusters=clusters, a=a, Niter=Niter, device=x.device, approx=approx, n=n
)
def kneighbors(self, y):
"""Obtains the nearest neighbors for an input dataset from the fitted dataset
Args:
y (torch.Tensor): Input dataset to search over
"""
if type(y) != torch.Tensor:
raise ValueError("Query dataset must be a torch tensor")
return self._kneighbors(y) | d.backend = self.__backend | conditional_block |
ivf_torch.py | import torch
import time
from pykeops.torch import LazyTensor, Genred, KernelSolve, default_dtype
from pykeops.torch.cluster import swap_axes as torch_swap_axes
from pykeops.torch.cluster import cluster_ranges_centroids, from_matrix
# from pykeops.torch.generic.generic_red import GenredLowlevel
def is_on_device(x):
return x.is_cuda
class torchtools:
copy = torch.clone
exp = torch.exp
log = torch.log
norm = torch.norm
swap_axes = torch_swap_axes
Genred = Genred
KernelSolve = KernelSolve
arraytype = torch.Tensor
float_types = [float]
# GenredLowlevel = GenredLowlevel
@staticmethod
def eq(x, y):
return torch.eq(x, y)
@staticmethod
def transpose(x):
return x.t()
@staticmethod
def permute(x, *args):
return x.permute(*args)
@staticmethod
def contiguous(x):
return x.contiguous()
@staticmethod
def solve(A, b):
return torch.solve(b, A)[0].contiguous()
@staticmethod
def arraysum(x, axis=None):
return x.sum() if axis is None else x.sum(dim=axis)
@staticmethod
def long(x):
return x.long()
@staticmethod
def size(x):
return x.numel()
@staticmethod
def tile(*args):
return torch.Tensor.repeat(*args)
@staticmethod
def numpy(x):
return x.detach().cpu().numpy()
@staticmethod
def view(x, s):
return x.view(s)
@staticmethod
def is_tensor(x):
return isinstance(x, torch.Tensor)
@staticmethod
def dtype(x):
if hasattr(x, "dtype"):
return x.dtype
else:
return type(x)
@staticmethod
def detect_complex(x):
if type(x) == list:
return any(type(v) == complex for v in x)
elif type(x) == torch.Tensor:
return torch.is_complex(x)
else:
return type(x) == complex
@staticmethod
def view_as_complex(x):
sh = list(x.shape)
sh[-1] //= 2
sh += [2]
x = x.view(sh)
return torch.view_as_complex(x)
@staticmethod
def view_as_real(x):
sh = list(x.shape)
sh[-1] *= 2
return torch.view_as_real(x).view(sh)
@staticmethod
def dtypename(dtype):
if dtype == torch.float32:
return "float32"
elif dtype == torch.float64:
return "float64"
elif dtype == torch.float16:
return "float16"
elif dtype == int:
return int
elif dtype == list:
return "float32"
else:
raise ValueError(
"[KeOps] {} data type incompatible with KeOps.".format(dtype)
)
@staticmethod
def rand(m, n, dtype=default_dtype, device="cpu"):
return torch.rand(m, n, dtype=dtype, device=device)
@staticmethod
def randn(m, n, dtype=default_dtype, device="cpu"):
return torch.randn(m, n, dtype=dtype, device=device)
@staticmethod
def zeros(shape, dtype=default_dtype, device="cpu"):
return torch.zeros(shape, dtype=dtype, device=device)
@staticmethod
def eye(n, dtype=default_dtype, device="cpu"):
return torch.eye(n, dtype=dtype, device=device)
@staticmethod
def array(x, dtype=default_dtype, device="cpu"):
if dtype == "float32":
dtype = torch.float32
elif dtype == "float64":
dtype = torch.float64
elif dtype == "float16":
dtype = torch.float16
else:
raise ValueError("[KeOps] data type incompatible with KeOps.")
return torch.tensor(x, dtype=dtype, device=device)
@staticmethod
def device(x):
if isinstance(x, torch.Tensor):
return x.device
else:
return None
@staticmethod
def distance_function(metric):
def euclidean(x,y):
return ((x-y) ** 2).sum(-1)
def manhattan(x,y):
return ((x-y).abs()).sum(-1)
def angular(x,y):
return -(x | y)
def angular_full(x,y):
return angular(x,y)/((angular(x,x)*angular(y,y)).sqrt())
def hyperbolic(x,y):
return ((x - y) ** 2).sum(-1) / (x[0] * y[0])
if metric=='euclidean':
return euclidean
elif metric=='manhattan':
return manhattan
elif metric=='angular':
return angular
elif metric=='angular_full':
return angular_full
elif metric=='hyperbolic':
return hyperbolic
else:
raise ValueError('Unknown metric')
@staticmethod
def sort(x):
return torch.sort(x)
@staticmethod
def unsqueeze(x,n):
return torch.unsqueeze(x,n)
@staticmethod
def arange(n,device="cpu"):
return torch.arange(n,device=device)
@staticmethod
def repeat(x,n):
return torch.repeat_interleave(x,n)
@staticmethod
def to(x,device):
return x.to(device)
@staticmethod
def index_select(input,dim,index):
return torch.index_select(input,dim,index)
@staticmethod
def norm(x,p=2,dim=-1):
return torch.norm(x,p=p,dim=dim)
@staticmethod
def kmeans(x, distance=None, K=10, Niter=10, device="cuda", approx=False, n=10):
from pykeops.torch import LazyTensor
if distance is None:
distance = torchtools.distance_function("euclidean")
def calc_centroid(x, c, cl, n=10):
|
N, D = x.shape
c = x[:K, :].clone()
x_i = LazyTensor(x.view(N, 1, D).to(device))
for i in range(Niter):
c_j = LazyTensor(c.view(1, K, D).to(device))
D_ij = distance(x_i, c_j)
cl = D_ij.argmin(dim=1).long().view(-1)
# updating c: either with approximation or exact
if approx:
# approximate with GD optimisation
c = calc_centroid(x, c, cl, n)
else:
# exact from average
c.zero_()
c.scatter_add_(0, cl[:, None].repeat(1, D), x)
Ncl = torch.bincount(cl, minlength=K).type_as(c).view(K, 1)
c /= Ncl
if torch.any(torch.isnan(c)):
raise ValueError(
"NaN detected in centroids during KMeans, please check metric is correct"
)
return cl, c
def squared_distances(x, y):
x_norm = (x ** 2).sum(1).reshape(-1, 1)
y_norm = (y ** 2).sum(1).reshape(1, -1)
dist = x_norm + y_norm - 2.0 * torch.matmul(x, torch.transpose(y, 0, 1))
return dist
def torch_kernel(x, y, s, kernel):
sq = squared_distances(x, y)
_kernel = {
"gaussian": lambda _sq, _s: torch.exp(-_sq / (_s * _s)),
"laplacian": lambda _sq, _s: torch.exp(-torch.sqrt(_sq) / _s),
"cauchy": lambda _sq, _s: 1.0 / (1 + _sq / (_s * _s)),
"inverse_multiquadric": lambda _sq, _s: torch.rsqrt(1 + _sq / (_s * _s)),
}
return _kernel[kernel](sq, s)
class GenericIVF:
"""Abstract class to compute IVF functions
End-users should use 'pykeops.numpy.ivf' or 'pykeops.torch.ivf'
"""
def __init__(self, k, metric, normalise, LazyTensor):
self.__k = k
self.__normalise = normalise
self.__update_metric(metric)
self.__LazyTensor = LazyTensor
self.__c = None
def __update_metric(self, metric):
if isinstance(metric, str):
self.__distance = self.tools.distance_function(metric)
self.__metric = metric
elif callable(metric):
self.__distance = metric
self.__metric = "custom"
else:
raise ValueError("Unrecognised metric input type")
@property
def metric(self):
"""Returns the metric used in the search"""
return self.__metric
@property
def c(self):
"""Returns the clusters obtained through K-Means"""
if self.__c is not None:
return self.__c
else:
raise ValueError("Run .fit() first!")
def __get_tools(self):
pass
def __k_argmin(self, x, y, k=1):
x_LT = self.__LazyTensor(
self.tools.to(self.tools.unsqueeze(x, 1), self.__device)
)
y_LT = self.__LazyTensor(
self.tools.to(self.tools.unsqueeze(y, 0), self.__device)
)
d = self.__distance(x_LT, y_LT)
if not self.tools.is_tensor(x):
if self.__backend:
d.backend = self.__backend
if k == 1:
return self.tools.view(self.tools.long(d.argmin(dim=1)), -1)
else:
return self.tools.long(d.argKmin(K=k, dim=1))
def __sort_clusters(self, x, lab, store_x=True):
lab, perm = self.tools.sort(self.tools.view(lab, -1))
if store_x:
self.__x_perm = perm
else:
self.__y_perm = perm
return x[perm], lab
def __unsort(self, nn):
return self.tools.index_select(self.__x_perm[nn], 0, self.__y_perm.argsort())
def _fit(
self,
x,
clusters=50,
a=5,
Niter=15,
device=None,
backend=None,
approx=False,
n=50,
):
"""
Fits the main dataset
"""
if type(clusters) != int:
raise ValueError("Clusters must be an integer")
if clusters >= len(x):
raise ValueError("Number of clusters must be less than length of dataset")
if type(a) != int:
raise ValueError("Number of clusters to search over must be an integer")
if a > clusters:
raise ValueError(
"Number of clusters to search over must be less than total number of clusters"
)
if len(x.shape) != 2:
raise ValueError("Input must be a 2D array")
if self.__normalise:
x = x / self.tools.repeat(self.tools.norm(x, 2, -1), x.shape[1]).reshape(
-1, x.shape[1]
)
# if we want to use the approximation in Kmeans, and our metric is angular, switch to full angular metric
if approx and self.__metric == "angular":
self.__update_metric("angular_full")
x = self.tools.contiguous(x)
self.__device = device
self.__backend = backend
cl, c = self.tools.kmeans(
x,
self.__distance,
clusters,
Niter=Niter,
device=self.__device,
approx=approx,
n=n,
)
self.__c = c
cl = self.__assign(x)
ncl = self.__k_argmin(c, c, k=a)
self.__x_ranges, _, _ = cluster_ranges_centroids(x, cl)
x, x_labels = self.__sort_clusters(x, cl, store_x=True)
self.__x = x
r = self.tools.repeat(self.tools.arange(clusters, device=self.__device), a)
self.__keep = self.tools.to(
self.tools.zeros([clusters, clusters], dtype=bool), self.__device
)
self.__keep[r, ncl.flatten()] = True
return self
def __assign(self, x, c=None):
if c is None:
c = self.__c
return self.__k_argmin(x, c)
def _kneighbors(self, y):
"""
Obtain the k nearest neighbors of the query dataset y
"""
if self.__x is None:
raise ValueError("Input dataset not fitted yet! Call .fit() first!")
if self.__device and self.tools.device(y) != self.__device:
raise ValueError("Input dataset and query dataset must be on same device")
if len(y.shape) != 2:
raise ValueError("Query dataset must be a 2D tensor")
if self.__x.shape[-1] != y.shape[-1]:
raise ValueError("Query and dataset must have same dimensions")
if self.__normalise:
y = y / self.tools.repeat(self.tools.norm(y, 2, -1), y.shape[1]).reshape(
-1, y.shape[1]
)
y = self.tools.contiguous(y)
y_labels = self.__assign(y)
y_ranges, _, _ = cluster_ranges_centroids(y, y_labels)
self.__y_ranges = y_ranges
y, y_labels = self.__sort_clusters(y, y_labels, store_x=False)
x_LT = self.__LazyTensor(self.tools.unsqueeze(self.__x, 0))
y_LT = self.__LazyTensor(self.tools.unsqueeze(y, 1))
D_ij = self.__distance(y_LT, x_LT)
ranges_ij = from_matrix(y_ranges, self.__x_ranges, self.__keep)
D_ij.ranges = ranges_ij
nn = D_ij.argKmin(K=self.__k, axis=1)
return self.__unsort(nn)
def brute_force(self, x, y, k=5):
"""Performs a brute force search with KeOps
Args:
x (array): Input dataset
y (array): Query dataset
k (int): Number of nearest neighbors to obtain
"""
x_LT = self.__LazyTensor(self.tools.unsqueeze(x, 0))
y_LT = self.__LazyTensor(self.tools.unsqueeze(y, 1))
D_ij = self.__distance(y_LT, x_LT)
return D_ij.argKmin(K=k, axis=1)
class IVF(GenericIVF):
"""IVF-Flat is a KNN approximation algorithm that first clusters the data and then performs the query search on a subset of the input dataset."""
def __init__(self, k=5, metric="euclidean", normalise=False):
"""Initialise the IVF-Flat class.
IVF-Flat is a KNN approximation algorithm that first clusters the data and then performs the query search on a subset of the input dataset.
Args:
k (int): Number of nearest neighbours to obtain
metric (str,function): Metric to use
Currently, "euclidean", "manhattan", "angular" and "hyperbolic" are directly supported, apart from custom metrics
Hyperbolic metric requires the use of approx = True, during the fit() function later
Custom metrics should be in the form of a function with 2 inputs and returns their distance
For more information, refer to the tutorial
normalise (bool): Whether or not to normalise all input data to norm 1
This is used mainly for angular metric
In place of this, "angular_full" metric may be used instead
"""
from pykeops.torch import LazyTensor
self.__get_tools()
super().__init__(k=k, metric=metric, normalise=normalise, LazyTensor=LazyTensor)
def __get_tools(self):
# from pykeops.torch.utils import torchtools
self.tools = torchtools
def fit(self, x, clusters=50, a=5, Niter=15, approx=False, n=50):
"""Fits a dataset to perform the nearest neighbour search over
K-Means is performed on the dataset to obtain clusters
Then the closest clusters to each cluster is stored for use during query time
Args:
x (torch.Tensor): Torch tensor dataset of shape N, D
Where N is the number of points and D is the number of dimensions
clusters (int): Total number of clusters to create in K-Means
a (int): Number of clusters to search over, must be less than total number of clusters created
Niter (int): Number of iterations to run in K-Means algorithm
approx (bool): Whether or not to use an approximation step in K-Means
In hyperbolic metric and custom metric, this should be set to True
This is because the optimal cluster centroid may not have a simple closed form expression
n (int): Number of iterations to optimise the cluster centroid, when approx = True
A value of around 50 is recommended
Lower values are faster while higher values give better accuracy in centroid location
"""
if type(x) != torch.Tensor:
raise ValueError("Input dataset must be a torch tensor")
return self._fit(
x, clusters=clusters, a=a, Niter=Niter, device=x.device, approx=approx, n=n
)
def kneighbors(self, y):
"""Obtains the nearest neighbors for an input dataset from the fitted dataset
Args:
y (torch.Tensor): Input dataset to search over
"""
if type(y) != torch.Tensor:
raise ValueError("Query dataset must be a torch tensor")
return self._kneighbors(y) | "Helper function to optimise centroid location"
c = torch.clone(c.detach()).to(device)
c.requires_grad = True
x1 = LazyTensor(x.unsqueeze(0))
op = torch.optim.Adam([c], lr=1 / n)
scaling = 1 / torch.gather(torch.bincount(cl), 0, cl).view(-1, 1)
scaling.requires_grad = False
with torch.autograd.set_detect_anomaly(True):
for _ in range(n):
c.requires_grad = True
op.zero_grad()
c1 = LazyTensor(torch.index_select(c, 0, cl).unsqueeze(0))
d = distance(x1, c1)
loss = (
d.sum(0) * scaling
).sum() # calculate distance to centroid for each datapoint, divide by total number of points in that cluster, and sum
loss.backward(retain_graph=False)
op.step()
return c.detach() | identifier_body |
ivf_torch.py | import torch
import time
from pykeops.torch import LazyTensor, Genred, KernelSolve, default_dtype
from pykeops.torch.cluster import swap_axes as torch_swap_axes
from pykeops.torch.cluster import cluster_ranges_centroids, from_matrix
# from pykeops.torch.generic.generic_red import GenredLowlevel
def is_on_device(x):
return x.is_cuda
class torchtools:
copy = torch.clone
exp = torch.exp
log = torch.log
norm = torch.norm
swap_axes = torch_swap_axes
Genred = Genred
KernelSolve = KernelSolve
arraytype = torch.Tensor
float_types = [float]
# GenredLowlevel = GenredLowlevel
@staticmethod
def eq(x, y):
return torch.eq(x, y)
@staticmethod
def transpose(x):
return x.t()
@staticmethod
def permute(x, *args):
return x.permute(*args)
@staticmethod
def contiguous(x):
return x.contiguous()
@staticmethod
def solve(A, b):
return torch.solve(b, A)[0].contiguous()
@staticmethod
def arraysum(x, axis=None):
return x.sum() if axis is None else x.sum(dim=axis)
@staticmethod
def long(x):
return x.long()
@staticmethod
def size(x):
return x.numel()
@staticmethod
def tile(*args):
return torch.Tensor.repeat(*args)
@staticmethod
def numpy(x):
return x.detach().cpu().numpy()
@staticmethod
def view(x, s):
return x.view(s)
@staticmethod
def is_tensor(x):
return isinstance(x, torch.Tensor)
@staticmethod
def dtype(x):
if hasattr(x, "dtype"):
return x.dtype
else:
return type(x)
@staticmethod
def detect_complex(x):
if type(x) == list:
return any(type(v) == complex for v in x)
elif type(x) == torch.Tensor:
return torch.is_complex(x)
else:
return type(x) == complex
@staticmethod
def view_as_complex(x):
sh = list(x.shape)
sh[-1] //= 2
sh += [2]
x = x.view(sh)
return torch.view_as_complex(x)
@staticmethod
def view_as_real(x):
sh = list(x.shape)
sh[-1] *= 2
return torch.view_as_real(x).view(sh)
@staticmethod
def dtypename(dtype):
if dtype == torch.float32:
return "float32"
elif dtype == torch.float64:
return "float64"
elif dtype == torch.float16:
return "float16"
elif dtype == int:
return int
elif dtype == list:
return "float32"
else:
raise ValueError(
"[KeOps] {} data type incompatible with KeOps.".format(dtype)
)
@staticmethod
def rand(m, n, dtype=default_dtype, device="cpu"):
return torch.rand(m, n, dtype=dtype, device=device)
@staticmethod
def randn(m, n, dtype=default_dtype, device="cpu"):
return torch.randn(m, n, dtype=dtype, device=device)
@staticmethod
def zeros(shape, dtype=default_dtype, device="cpu"):
return torch.zeros(shape, dtype=dtype, device=device)
@staticmethod
def eye(n, dtype=default_dtype, device="cpu"):
return torch.eye(n, dtype=dtype, device=device)
@staticmethod
def array(x, dtype=default_dtype, device="cpu"):
if dtype == "float32":
dtype = torch.float32
elif dtype == "float64":
dtype = torch.float64
elif dtype == "float16":
dtype = torch.float16
else:
raise ValueError("[KeOps] data type incompatible with KeOps.")
return torch.tensor(x, dtype=dtype, device=device)
@staticmethod
def device(x):
if isinstance(x, torch.Tensor):
return x.device
else:
return None
@staticmethod
def distance_function(metric):
def euclidean(x,y):
return ((x-y) ** 2).sum(-1)
def manhattan(x,y):
return ((x-y).abs()).sum(-1)
def angular(x,y):
return -(x | y)
def angular_full(x,y):
return angular(x,y)/((angular(x,x)*angular(y,y)).sqrt())
def hyperbolic(x,y):
return ((x - y) ** 2).sum(-1) / (x[0] * y[0])
if metric=='euclidean':
return euclidean
elif metric=='manhattan':
return manhattan
elif metric=='angular':
return angular
elif metric=='angular_full':
return angular_full
elif metric=='hyperbolic':
return hyperbolic
else:
raise ValueError('Unknown metric')
@staticmethod
def sort(x):
return torch.sort(x)
@staticmethod
def unsqueeze(x,n):
return torch.unsqueeze(x,n)
@staticmethod
def arange(n,device="cpu"):
return torch.arange(n,device=device)
@staticmethod
def repeat(x,n):
return torch.repeat_interleave(x,n)
@staticmethod
def to(x,device):
return x.to(device)
@staticmethod
def index_select(input,dim,index):
return torch.index_select(input,dim,index)
@staticmethod
def norm(x,p=2,dim=-1):
return torch.norm(x,p=p,dim=dim)
@staticmethod
def kmeans(x, distance=None, K=10, Niter=10, device="cuda", approx=False, n=10):
from pykeops.torch import LazyTensor
if distance is None:
distance = torchtools.distance_function("euclidean")
def calc_centroid(x, c, cl, n=10):
"Helper function to optimise centroid location"
c = torch.clone(c.detach()).to(device)
c.requires_grad = True
x1 = LazyTensor(x.unsqueeze(0))
op = torch.optim.Adam([c], lr=1 / n)
scaling = 1 / torch.gather(torch.bincount(cl), 0, cl).view(-1, 1)
scaling.requires_grad = False
with torch.autograd.set_detect_anomaly(True):
for _ in range(n):
c.requires_grad = True
op.zero_grad()
c1 = LazyTensor(torch.index_select(c, 0, cl).unsqueeze(0))
d = distance(x1, c1)
loss = (
d.sum(0) * scaling
).sum() # calculate distance to centroid for each datapoint, divide by total number of points in that cluster, and sum
loss.backward(retain_graph=False)
op.step()
return c.detach()
N, D = x.shape
c = x[:K, :].clone()
x_i = LazyTensor(x.view(N, 1, D).to(device))
for i in range(Niter):
c_j = LazyTensor(c.view(1, K, D).to(device))
D_ij = distance(x_i, c_j)
cl = D_ij.argmin(dim=1).long().view(-1)
# updating c: either with approximation or exact
if approx:
# approximate with GD optimisation
c = calc_centroid(x, c, cl, n)
else:
# exact from average
c.zero_()
c.scatter_add_(0, cl[:, None].repeat(1, D), x)
Ncl = torch.bincount(cl, minlength=K).type_as(c).view(K, 1)
c /= Ncl
if torch.any(torch.isnan(c)):
raise ValueError(
"NaN detected in centroids during KMeans, please check metric is correct"
)
return cl, c
def squared_distances(x, y):
x_norm = (x ** 2).sum(1).reshape(-1, 1)
y_norm = (y ** 2).sum(1).reshape(1, -1)
dist = x_norm + y_norm - 2.0 * torch.matmul(x, torch.transpose(y, 0, 1))
return dist
def torch_kernel(x, y, s, kernel):
sq = squared_distances(x, y)
_kernel = {
"gaussian": lambda _sq, _s: torch.exp(-_sq / (_s * _s)),
"laplacian": lambda _sq, _s: torch.exp(-torch.sqrt(_sq) / _s),
"cauchy": lambda _sq, _s: 1.0 / (1 + _sq / (_s * _s)),
"inverse_multiquadric": lambda _sq, _s: torch.rsqrt(1 + _sq / (_s * _s)),
}
return _kernel[kernel](sq, s)
class GenericIVF:
"""Abstract class to compute IVF functions
End-users should use 'pykeops.numpy.ivf' or 'pykeops.torch.ivf'
"""
def __init__(self, k, metric, normalise, LazyTensor):
self.__k = k
self.__normalise = normalise
self.__update_metric(metric)
self.__LazyTensor = LazyTensor
self.__c = None
def __update_metric(self, metric):
if isinstance(metric, str):
self.__distance = self.tools.distance_function(metric)
self.__metric = metric
elif callable(metric):
self.__distance = metric
self.__metric = "custom"
else:
raise ValueError("Unrecognised metric input type")
@property
def metric(self):
"""Returns the metric used in the search"""
return self.__metric
@property
def c(self):
"""Returns the clusters obtained through K-Means"""
if self.__c is not None:
return self.__c
else:
raise ValueError("Run .fit() first!")
def | (self):
pass
def __k_argmin(self, x, y, k=1):
x_LT = self.__LazyTensor(
self.tools.to(self.tools.unsqueeze(x, 1), self.__device)
)
y_LT = self.__LazyTensor(
self.tools.to(self.tools.unsqueeze(y, 0), self.__device)
)
d = self.__distance(x_LT, y_LT)
if not self.tools.is_tensor(x):
if self.__backend:
d.backend = self.__backend
if k == 1:
return self.tools.view(self.tools.long(d.argmin(dim=1)), -1)
else:
return self.tools.long(d.argKmin(K=k, dim=1))
def __sort_clusters(self, x, lab, store_x=True):
lab, perm = self.tools.sort(self.tools.view(lab, -1))
if store_x:
self.__x_perm = perm
else:
self.__y_perm = perm
return x[perm], lab
def __unsort(self, nn):
return self.tools.index_select(self.__x_perm[nn], 0, self.__y_perm.argsort())
def _fit(
self,
x,
clusters=50,
a=5,
Niter=15,
device=None,
backend=None,
approx=False,
n=50,
):
"""
Fits the main dataset
"""
if type(clusters) != int:
raise ValueError("Clusters must be an integer")
if clusters >= len(x):
raise ValueError("Number of clusters must be less than length of dataset")
if type(a) != int:
raise ValueError("Number of clusters to search over must be an integer")
if a > clusters:
raise ValueError(
"Number of clusters to search over must be less than total number of clusters"
)
if len(x.shape) != 2:
raise ValueError("Input must be a 2D array")
if self.__normalise:
x = x / self.tools.repeat(self.tools.norm(x, 2, -1), x.shape[1]).reshape(
-1, x.shape[1]
)
# if we want to use the approximation in Kmeans, and our metric is angular, switch to full angular metric
if approx and self.__metric == "angular":
self.__update_metric("angular_full")
x = self.tools.contiguous(x)
self.__device = device
self.__backend = backend
cl, c = self.tools.kmeans(
x,
self.__distance,
clusters,
Niter=Niter,
device=self.__device,
approx=approx,
n=n,
)
self.__c = c
cl = self.__assign(x)
ncl = self.__k_argmin(c, c, k=a)
self.__x_ranges, _, _ = cluster_ranges_centroids(x, cl)
x, x_labels = self.__sort_clusters(x, cl, store_x=True)
self.__x = x
r = self.tools.repeat(self.tools.arange(clusters, device=self.__device), a)
self.__keep = self.tools.to(
self.tools.zeros([clusters, clusters], dtype=bool), self.__device
)
self.__keep[r, ncl.flatten()] = True
return self
def __assign(self, x, c=None):
if c is None:
c = self.__c
return self.__k_argmin(x, c)
def _kneighbors(self, y):
"""
Obtain the k nearest neighbors of the query dataset y
"""
if self.__x is None:
raise ValueError("Input dataset not fitted yet! Call .fit() first!")
if self.__device and self.tools.device(y) != self.__device:
raise ValueError("Input dataset and query dataset must be on same device")
if len(y.shape) != 2:
raise ValueError("Query dataset must be a 2D tensor")
if self.__x.shape[-1] != y.shape[-1]:
raise ValueError("Query and dataset must have same dimensions")
if self.__normalise:
y = y / self.tools.repeat(self.tools.norm(y, 2, -1), y.shape[1]).reshape(
-1, y.shape[1]
)
y = self.tools.contiguous(y)
y_labels = self.__assign(y)
y_ranges, _, _ = cluster_ranges_centroids(y, y_labels)
self.__y_ranges = y_ranges
y, y_labels = self.__sort_clusters(y, y_labels, store_x=False)
x_LT = self.__LazyTensor(self.tools.unsqueeze(self.__x, 0))
y_LT = self.__LazyTensor(self.tools.unsqueeze(y, 1))
D_ij = self.__distance(y_LT, x_LT)
ranges_ij = from_matrix(y_ranges, self.__x_ranges, self.__keep)
D_ij.ranges = ranges_ij
nn = D_ij.argKmin(K=self.__k, axis=1)
return self.__unsort(nn)
def brute_force(self, x, y, k=5):
"""Performs a brute force search with KeOps
Args:
x (array): Input dataset
y (array): Query dataset
k (int): Number of nearest neighbors to obtain
"""
x_LT = self.__LazyTensor(self.tools.unsqueeze(x, 0))
y_LT = self.__LazyTensor(self.tools.unsqueeze(y, 1))
D_ij = self.__distance(y_LT, x_LT)
return D_ij.argKmin(K=k, axis=1)
class IVF(GenericIVF):
"""IVF-Flat is a KNN approximation algorithm that first clusters the data and then performs the query search on a subset of the input dataset."""
def __init__(self, k=5, metric="euclidean", normalise=False):
"""Initialise the IVF-Flat class.
IVF-Flat is a KNN approximation algorithm that first clusters the data and then performs the query search on a subset of the input dataset.
Args:
k (int): Number of nearest neighbours to obtain
metric (str,function): Metric to use
Currently, "euclidean", "manhattan", "angular" and "hyperbolic" are directly supported, apart from custom metrics
Hyperbolic metric requires the use of approx = True, during the fit() function later
Custom metrics should be in the form of a function with 2 inputs and returns their distance
For more information, refer to the tutorial
normalise (bool): Whether or not to normalise all input data to norm 1
This is used mainly for angular metric
In place of this, "angular_full" metric may be used instead
"""
from pykeops.torch import LazyTensor
self.__get_tools()
super().__init__(k=k, metric=metric, normalise=normalise, LazyTensor=LazyTensor)
def __get_tools(self):
# from pykeops.torch.utils import torchtools
self.tools = torchtools
def fit(self, x, clusters=50, a=5, Niter=15, approx=False, n=50):
"""Fits a dataset to perform the nearest neighbour search over
K-Means is performed on the dataset to obtain clusters
Then the closest clusters to each cluster is stored for use during query time
Args:
x (torch.Tensor): Torch tensor dataset of shape N, D
Where N is the number of points and D is the number of dimensions
clusters (int): Total number of clusters to create in K-Means
a (int): Number of clusters to search over, must be less than total number of clusters created
Niter (int): Number of iterations to run in K-Means algorithm
approx (bool): Whether or not to use an approximation step in K-Means
In hyperbolic metric and custom metric, this should be set to True
This is because the optimal cluster centroid may not have a simple closed form expression
n (int): Number of iterations to optimise the cluster centroid, when approx = True
A value of around 50 is recommended
Lower values are faster while higher values give better accuracy in centroid location
"""
if type(x) != torch.Tensor:
raise ValueError("Input dataset must be a torch tensor")
return self._fit(
x, clusters=clusters, a=a, Niter=Niter, device=x.device, approx=approx, n=n
)
def kneighbors(self, y):
"""Obtains the nearest neighbors for an input dataset from the fitted dataset
Args:
y (torch.Tensor): Input dataset to search over
"""
if type(y) != torch.Tensor:
raise ValueError("Query dataset must be a torch tensor")
return self._kneighbors(y) | __get_tools | identifier_name |
views.py | from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from .forms import signUpForm, EventForm, AddMemberForm, AddLocation
import requests
from datetime import datetime, date
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse, Http404
from django.views import generic
from django.utils.safestring import mark_safe
from datetime import timedelta, date
import calendar
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from django.core.files.storage import FileSystemStorage
from django.contrib import messages
from .models import *
from .utils import Calendar
from django.forms.models import model_to_dict
from django.contrib.auth.forms import UserCreationForm
#the tag @login_required make it so that only logged user can access the view
@login_required
def indexView(request):
# the view collects infromation from multiple table, event and returns them to the user
user = CustomUser.objects.get(username= request.user)
today = datetime.today()
start_time = today.replace(hour=23, minute=59)
end_time = today.replace(hour=00, minute=1)
events = Event.objects.filter(user = user)
notes = Notes.objects.filter(user=user)
events_today = []
for event in events:
#filtering events to see which are active on the day
if event.start_time <= start_time and event.end_time >= end_time:
#adding event infromation in a dictionary
event_today = {
'event_id' : event.id,
'start_time': event.start_time,
'end_time' : event.end_time,
'content' : event.description,
'title': event.title
}
#appending created dictionary
events_today.append(event_today)
context = {
'events_today' : events_today,
'notes' : notes
}
return render(request, "index.html", context)
#homepage view
def homeView(request):
#check if user is authenticated
if request.user.is_authenticated:
#if true render index
return redirect("manCal:index")
#else render homepage
return render(request, 'home.html')
#login view
def loginView(request):
# Get username and password from request
username = request.POST['username']
password = request.POST['password']
# Authenticate the user, if it exist returns a user object, otherwise an None
user = authenticate(request, username=username, password=password)
# If user is authenticated
if user is not None:
# Save username and password to the session, plus loggedin variable set to True
request.session['username'] = username
request.session['password'] = password
context = {
'username': username,
'password': password,
'loggedin': True
}
response = render(request, 'index.html', context)
# Remember last login in cookie
now = D.datetime.utcnow()
max_age = 365 * 24 * 60 * 60 #one year
delta = now + D.timedelta(seconds=max_age)
format = "%a, %d-%b-%Y %H:%M:%S GMT"
expires = D.datetime.strftime(delta, format)
response.set_cookie('last_login',now,expires=expires)
#return response
return redirect("/index")
else:
Http404('Wrong credentials')
# If logged in, session variables are cleaned up and user logged out. Otherwise redirected to login page
@login_required
def logoutView(request):
logout(request)
#registration
def signUpView(request):
#checking if methos is POST
if request.method == "POST":
#getting from from request
form = signUpForm(request.POST)
#validateing from
if form.is_valid():
#if valid save and redirect to login with messege
form.save()
messages.success(request,"Registration Successful!")
return redirect("/login")
else:
#error
print('failed after falidation')
else:
#clean up form
form = signUpForm()
return render(request, "signup.html", {"form": form})
#view to updated account info
@login_required
def profileView(request):
#checking request methos
if request.method == 'POST':
#extracting form infromation form request and storing them in local variable
user = CustomUser.objects.get(username= request.user)
first_name = request.POST.get('first_name')
last_name = request.POST.get('last_name')
email = request.POST.get('email')
#updateding existing value with updated one
user.first_name= first_name
user.last_name = last_name
user.email=email
#save and redirect to same page
user.save()
return redirect("manCal:profile")
context = {
}
return render(request, "profile.html", context)
# start calendar render views
#get date for starting calendar date
def get_date(req_day):
if req_day:
year, month = (int(x) for x in req_day.split('-'))
return date(year, month, day=1)
return datetime.today()
#action to go prev month
def prev_month(d):
#changeing the day with which the calendar is started
first = d.replace(day=1)
prev_month = first - timedelta(days=1)
#coverting and formatting data for html
month = 'month=' + str(prev_month.year) + '-' + str(prev_month.month)
return month
##same as prev_month
def next_month(d):
days_in_month = calendar.monthrange(d.year, d.month)[1]
last = d.replace(day=days_in_month)
next_month = last + timedelta(days=1)
month = 'month=' + str(next_month.year) + '-' + str(next_month.month)
return month
#calendar genric list view
class CalendarView(LoginRequiredMixin, generic.ListView):
model = Event
#template to render
template_name = 'calendar.html'
#setting up context data
def get_context_data(self, **kwargs):
#supercalss call
context = super().get_context_data(**kwargs)
#getting date for calendar start
d = get_date(self.request.GET.get('month', None))
user = CustomUser.objects.get(username= self.request.user)
#pasing initializing variable for calendar
cal = Calendar(d.year, d.month, user)
html_cal = cal.formatmonth(withyear=True)
#getting user notes
notes = Notes.objects.filter(user=user)
#defining new context data
context['calendar'] = mark_safe(html_cal)
context['prev_month'] = prev_month(d)
context['next_month'] = next_month(d)
context['notes'] = notes
context['user']= user
return context
#create events
@login_required
def create_event(request):
form = EventForm(request.POST or None)
#checking if the request type is post and if the form is valid
if request.POST and form.is_valid():
#getting specific inputs from Django form and storing them in separated variable
title = form.cleaned_data['title']
description = form.cleaned_data['description']
start_time = form.cleaned_data['start_time']
end_time = form.cleaned_data['end_time']
location = form.cleaned_data['location']
#creating new event object
Event.objects.get_or_create(
user=request.user,
title=title,
description=description,
start_time=start_time,
end_time=end_time,
location= location
)
return HttpResponseRedirect(reverse('manCal:calendar'))
return render(request, 'event.html', {'form': form})
#generic update view for event edit
class EventEdit(LoginRequiredMixin, generic.UpdateView):
#In which model the data are stored
model = Event
#fields to update
fields = ['title', 'description', 'start_time', 'end_time', 'location']
#template to use to get data
template_name = 'event.html'
#generic delete vie for event delete
class EventDelete(LoginRequiredMixin, generic.DeleteView):
model = Event
template_name = 'event_delete.html'
success_url = reverse_lazy('manCal:calendar')
#overriding data in confermation form to provide cancel button
def post(self, request, *args, **kwargs):
if "cancel" in request.POST:
return redirect('manCal:calendar')
else:
return super(EventDelete, self).post(request, *args, **kwargs)
#event details view
@login_required
def event_details(request, event_id):
#locating event in database useing the event_id given in the url
event = Event.objects.get(id=event_id)
#getting members and files attached to the event
eventmember = EventMember.objects.filter(event=event)
eventfiles = EventFiles.objects.filter(event=event)
#defining variables for API call
API_KEY = 'AIzaSyDio4Zj99JOhP8SBQBM3CydIsc91ld-Jbs'
address = event.location
params = {
'key' : API_KEY,
'address': address
}
lat = 51.509865
lon = -0.118092
base_url = 'https://maps.googleapis.com/maps/api/geocode/json?'
#API response conteining geo-cordinates
response = requests.get(base_url, params=params).json()
#checking if the request was succesful
if response['status'] == 'OK':
geometry = response['results'][0]['geometry']
#obtaing latiture and longitude
lat = geometry['location']['lat']
lon = geometry['location']['lng']
context = {
#pasing retrived data to the template
'event': event,
'eventmember': eventmember,
'eventfiles': eventfiles,
'lat' : lat,
'lon' : lon,
}
return render(request, 'event-details.html', context)
#weather view
@login_required
def weatherView(request):
#API variable for weather API
url = 'http://api.openweathermap.org/data/2.5/onecall?lat={lat}&exclude=hourly,minutely&lon={lon}&units=metric&appid=dbd607d4b59f61a34125bf4f2a185f8d'
user = CustomUser.objects.get(username= request.user)
#API variable for google API
API_KEY = 'AIzaSyDio4Zj99JOhP8SBQBM3CydIsc91ld-Jbs'
base_url = 'https://maps.googleapis.com/maps/api/geocode/json?'
#chekc if the search form was submitted or the page was reloaded
if request.method == 'POST':
#if form submitted, get input from request
location = request.POST.get('location')
#check if location already exist
cityCount = Locations.objects.filter(user=user).filter(location = location).count()
form = AddLocation(request.POST)
#validateing from
if form.is_valid():
if cityCount == 0:
#if city does not exist in database
params = {
'key' : API_KEY,
'address': location
}
#check if the location exist useing google API
response_test = requests.get(base_url, params=params).json()
if response_test['status'] == 'OK':
#if exist save city in database
obj= form.save(commit=False)
obj.user = user
obj.save()
#should be simple params not weather becasue we are useing Google API
paramsWeather = {
'key' : API_KEY,
'address': obj.location
}
#getting location cord
response = requests.get(base_url, params=paramsWeather).json()
if response['status'] == 'OK':
#if infomation available
geometry = response['results'][0]['geometry']
lat = geometry['location']['lat']
lon = geometry['location']['lng']
#send request for weather information
r = requests.get(url.format(lat=lat, lon=lon)).json()
#adding info in dictionary
city_weather = {
'location_id' : obj.id,
'city' : obj.location,
'temperature' : round(r['current']['temp']),
'main' : r['daily'][0]['weather'][0]['main'],
'icon' : r['daily'][0]['weather'][0]['icon'],
'tempMax' : round(r['daily'][0]['temp']['max']),
'tempMin' : round(r['daily'][0]['temp']['min']),
}
#return dictionary to Ajax reqeust with JsonResponse
return JsonResponse({'city_weather' : city_weather, 'errorCode' : "200"}, status= 200)
else:
return JsonResponse({'error' : "Location not found", 'errorCode' : "500"}, status= 200)
elif cityCount > 0:
return JsonResponse({'error' : "Location already added", 'errorCode' : "500"}, status= 200)
return JsonResponse({'error' : "Invalid input", 'errorCode' : "500"}, status= 200)
form = AddLocation()
#if the page was loaded without from submittion
#get all weather location saved by the user
cities = Locations.objects.filter(user=user)
#create empty arrasy to store all weather data about each city
weather_data = []
#do the same thing as we did when a city was added for each city in the database
for city in cities:
params = {
'key' : API_KEY,
'address': city.location
}
response = requests.get(base_url, params=params).json()
if response['status'] == 'OK':
geometry = response['results'][0]['geometry']
#check if lat and lgn are obtained correctly
lat = geometry['location']['lat']
lon = geometry['location']['lng']
r = requests.get(url.format(lat=lat, lon=lon)).json()
city_weather = {
'location_id' : city.id,
'city' : city.location,
'temperature' : round(r['current']['temp']),
'main' : r['daily'][0]['weather'][0]['main'],
'icon' : r['daily'][0]['weather'][0]['icon'],
'tempMax' : round(r['daily'][0]['temp']['max']),
'tempMin' : round(r['daily'][0]['temp']['min']),
}
#append the data for the city to weather_data before passing to the next city
weather_data.append(city_weather)
context = {
'form' : form,
'weather_data' : weather_data,
}
return render(request, 'weather.html', context)
#add a member for an event
@login_required
def add_eventmember(request, event_id):
forms = AddMemberForm()
#check request method
if request.method == 'POST':
#if POST validate and sabe
forms = AddMemberForm(request.POST)
if forms.is_valid():
member = EventMember.objects.filter(event=event_id)
event = Event.objects.get(id=event_id)
#maximum 9 member for event
if member.count() <= 9:
#save meber
user = forms.cleaned_data['user']
EventMember.objects.create(
event=event,
user=user
)
return redirect('manCal:event-detail', event_id = event.id,)
else:
print('--------------User limit exceed!-----------------')
context = {
'form': forms
}
return render(request, 'add_member.html', context)
#delete member
@login_required
def member_delete(request, member_id):
#get member useing the member_id in the url
member = EventMember.objects.get(id= member_id)
#delete form database
member.delete()
#return succesfful response to Ajax request
return JsonResponse({'result' : 'ok'}, status=200)
#delete file, same process as delete member
@login_required
def file_delete(request, file_id):
file = EventFiles.objects.get(id = file_id)
file.delete()
return JsonResponse({'result' : 'ok'}, status=200)
#delete location, same process as delete member
@login_required
def location_delete(request, location_id):
location = Locations.objects.get(id = location_id)
location.delete()
return JsonResponse({'result' : 'ok'}, status=200)
#note delte same process as delete member
@login_required
def | (request, note_id):
note= Notes.objects.get(id= note_id)
note.delete()
return JsonResponse({'result' : 'ok'}, status=200)
#add file for event view
@login_required
def add_files(request):
#getting the event to which we want to add file
event_id = request.POST.get('event_id')
event = Event.objects.get(id=event_id)
#list of the file to upload, this is a list becasue in the HTML form we allowed the user to select multiple files
files = request.FILES.getlist('files')
#looping throw all seleted files
for file in files:
fs= FileSystemStorage()
#saveing the file and getting the path to it
file_path = fs.save(file.name, file)
#creating new EventFiles object
sfile= EventFiles(event = event, files = file_path)
#saveing the object
sfile.save()
return redirect('manCal:event-detail', event_id = event_id,)
#create note
@login_required
def add_note(request):
#getting the user and the content of the note
if request.method == 'POST':
user = CustomUser.objects.get(username= request.user)
note = request.POST.get('note')
#createing new note
new_note = Notes.objects.create(
user = user,
note = note
)
#returning created object to Ajax request converting the model data to dictionary
return JsonResponse({'note' : model_to_dict(new_note)}, status=200)
#update note status
@login_required
def note_complited(request, note_id):
#getting note from note id
note = Notes.objects.get(id=note_id)
#changeing note staus
if note.complited == True:
note.complited = False
elif note.complited == False:
note.complited = True
#saveing new status
note.save()
#returning to ajax like in crete note
return JsonResponse({'note' : model_to_dict(note)}, status=200)
#exercise detail view
@login_required
def healthView(request):
user = CustomUser.objects.get(username= request.user)
#get exercise details if already created
if Exercise.objects.filter(user= user).exists():
exercise = Exercise.objects.get(user= user)
context = {
'exercise' : exercise
}
#passing data to template
return render(request, 'health.html', context)
#if not exist render without exercise data
return render(request, 'health.html')
#update exercise
@login_required
def addExercise(request):
if request.method == 'POST':
#get variable from post request
user = CustomUser.objects.get(username= request.user)
lunges_set = int(request.POST.get('Lunges_set'))
lunges_rep = int(request.POST.get('Lunges_rep'))
pushups_set = int(request.POST.get('Pushups_set'))
pushups_rep = int(request.POST.get('Pushups_rep'))
squats_set = int(request.POST.get('Squats_set'))
squats_rep = int(request.POST.get('Squats_rep'))
burpees_set = int(request.POST.get('Burpees_set'))
burpees_rep = int(request.POST.get('Burpees_rep'))
planks_set = int(request.POST.get('Planks_set'))
planks_rep = int(request.POST.get('Planks_rep'))
#if no previews data exsist, create new one
if not Exercise.objects.filter(user= user).exists():
Exercise.objects.create(
user= user,
Lunges_set = lunges_set,
Lunges_rep = lunges_rep,
Pushups_set = pushups_set,
Pushups_rep = pushups_rep,
Squats_set = squats_set,
Squats_rep = squats_rep,
Burpees_set = burpees_set,
Burpees_rep = burpees_rep,
Planks_set = planks_set,
Planks_rep = planks_rep
)
return redirect("manCal:health")
# if exist update existing dat
else:
exercise = Exercise.objects.get(user= user)
exercise.Lunges_set = lunges_set
exercise.Lunges_rep = lunges_rep
exercise.Pushups_set = pushups_set
exercise.Pushups_rep = pushups_rep
exercise.Squats_set = squats_set
exercise.Squats_rep = squats_rep
exercise.Burpees_set = burpees_set
exercise.Burpees_rep = burpees_rep
exercise.Planks_set = planks_set
exercise.Planks_rep = planks_rep
exercise.save()
return redirect("manCal:health")
| note_delete | identifier_name |
views.py | from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from .forms import signUpForm, EventForm, AddMemberForm, AddLocation
import requests
from datetime import datetime, date
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse, Http404
from django.views import generic
from django.utils.safestring import mark_safe
from datetime import timedelta, date
import calendar
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from django.core.files.storage import FileSystemStorage
from django.contrib import messages
from .models import *
from .utils import Calendar
from django.forms.models import model_to_dict
from django.contrib.auth.forms import UserCreationForm
#the tag @login_required make it so that only logged user can access the view
@login_required
def indexView(request):
# the view collects infromation from multiple table, event and returns them to the user
user = CustomUser.objects.get(username= request.user)
today = datetime.today()
start_time = today.replace(hour=23, minute=59)
end_time = today.replace(hour=00, minute=1)
events = Event.objects.filter(user = user)
notes = Notes.objects.filter(user=user)
events_today = []
for event in events:
#filtering events to see which are active on the day
if event.start_time <= start_time and event.end_time >= end_time:
#adding event infromation in a dictionary
event_today = {
'event_id' : event.id,
'start_time': event.start_time,
'end_time' : event.end_time,
'content' : event.description,
'title': event.title
}
#appending created dictionary
events_today.append(event_today)
context = {
'events_today' : events_today,
'notes' : notes
}
return render(request, "index.html", context)
#homepage view
def homeView(request):
#check if user is authenticated
if request.user.is_authenticated:
#if true render index
return redirect("manCal:index")
#else render homepage
return render(request, 'home.html')
#login view
def loginView(request):
# Get username and password from request
username = request.POST['username']
password = request.POST['password']
# Authenticate the user, if it exist returns a user object, otherwise an None
user = authenticate(request, username=username, password=password)
# If user is authenticated
if user is not None:
# Save username and password to the session, plus loggedin variable set to True
|
else:
Http404('Wrong credentials')
# If logged in, session variables are cleaned up and user logged out. Otherwise redirected to login page
@login_required
def logoutView(request):
logout(request)
#registration
def signUpView(request):
#checking if methos is POST
if request.method == "POST":
#getting from from request
form = signUpForm(request.POST)
#validateing from
if form.is_valid():
#if valid save and redirect to login with messege
form.save()
messages.success(request,"Registration Successful!")
return redirect("/login")
else:
#error
print('failed after falidation')
else:
#clean up form
form = signUpForm()
return render(request, "signup.html", {"form": form})
#view to updated account info
@login_required
def profileView(request):
#checking request methos
if request.method == 'POST':
#extracting form infromation form request and storing them in local variable
user = CustomUser.objects.get(username= request.user)
first_name = request.POST.get('first_name')
last_name = request.POST.get('last_name')
email = request.POST.get('email')
#updateding existing value with updated one
user.first_name= first_name
user.last_name = last_name
user.email=email
#save and redirect to same page
user.save()
return redirect("manCal:profile")
context = {
}
return render(request, "profile.html", context)
# start calendar render views
#get date for starting calendar date
def get_date(req_day):
if req_day:
year, month = (int(x) for x in req_day.split('-'))
return date(year, month, day=1)
return datetime.today()
#action to go prev month
def prev_month(d):
#changeing the day with which the calendar is started
first = d.replace(day=1)
prev_month = first - timedelta(days=1)
#coverting and formatting data for html
month = 'month=' + str(prev_month.year) + '-' + str(prev_month.month)
return month
##same as prev_month
def next_month(d):
days_in_month = calendar.monthrange(d.year, d.month)[1]
last = d.replace(day=days_in_month)
next_month = last + timedelta(days=1)
month = 'month=' + str(next_month.year) + '-' + str(next_month.month)
return month
#calendar genric list view
class CalendarView(LoginRequiredMixin, generic.ListView):
model = Event
#template to render
template_name = 'calendar.html'
#setting up context data
def get_context_data(self, **kwargs):
#supercalss call
context = super().get_context_data(**kwargs)
#getting date for calendar start
d = get_date(self.request.GET.get('month', None))
user = CustomUser.objects.get(username= self.request.user)
#pasing initializing variable for calendar
cal = Calendar(d.year, d.month, user)
html_cal = cal.formatmonth(withyear=True)
#getting user notes
notes = Notes.objects.filter(user=user)
#defining new context data
context['calendar'] = mark_safe(html_cal)
context['prev_month'] = prev_month(d)
context['next_month'] = next_month(d)
context['notes'] = notes
context['user']= user
return context
#create events
@login_required
def create_event(request):
form = EventForm(request.POST or None)
#checking if the request type is post and if the form is valid
if request.POST and form.is_valid():
#getting specific inputs from Django form and storing them in separated variable
title = form.cleaned_data['title']
description = form.cleaned_data['description']
start_time = form.cleaned_data['start_time']
end_time = form.cleaned_data['end_time']
location = form.cleaned_data['location']
#creating new event object
Event.objects.get_or_create(
user=request.user,
title=title,
description=description,
start_time=start_time,
end_time=end_time,
location= location
)
return HttpResponseRedirect(reverse('manCal:calendar'))
return render(request, 'event.html', {'form': form})
#generic update view for event edit
class EventEdit(LoginRequiredMixin, generic.UpdateView):
#In which model the data are stored
model = Event
#fields to update
fields = ['title', 'description', 'start_time', 'end_time', 'location']
#template to use to get data
template_name = 'event.html'
#generic delete vie for event delete
class EventDelete(LoginRequiredMixin, generic.DeleteView):
model = Event
template_name = 'event_delete.html'
success_url = reverse_lazy('manCal:calendar')
#overriding data in confermation form to provide cancel button
def post(self, request, *args, **kwargs):
if "cancel" in request.POST:
return redirect('manCal:calendar')
else:
return super(EventDelete, self).post(request, *args, **kwargs)
#event details view
@login_required
def event_details(request, event_id):
#locating event in database useing the event_id given in the url
event = Event.objects.get(id=event_id)
#getting members and files attached to the event
eventmember = EventMember.objects.filter(event=event)
eventfiles = EventFiles.objects.filter(event=event)
#defining variables for API call
API_KEY = 'AIzaSyDio4Zj99JOhP8SBQBM3CydIsc91ld-Jbs'
address = event.location
params = {
'key' : API_KEY,
'address': address
}
lat = 51.509865
lon = -0.118092
base_url = 'https://maps.googleapis.com/maps/api/geocode/json?'
#API response conteining geo-cordinates
response = requests.get(base_url, params=params).json()
#checking if the request was succesful
if response['status'] == 'OK':
geometry = response['results'][0]['geometry']
#obtaing latiture and longitude
lat = geometry['location']['lat']
lon = geometry['location']['lng']
context = {
#pasing retrived data to the template
'event': event,
'eventmember': eventmember,
'eventfiles': eventfiles,
'lat' : lat,
'lon' : lon,
}
return render(request, 'event-details.html', context)
#weather view
@login_required
def weatherView(request):
#API variable for weather API
url = 'http://api.openweathermap.org/data/2.5/onecall?lat={lat}&exclude=hourly,minutely&lon={lon}&units=metric&appid=dbd607d4b59f61a34125bf4f2a185f8d'
user = CustomUser.objects.get(username= request.user)
#API variable for google API
API_KEY = 'AIzaSyDio4Zj99JOhP8SBQBM3CydIsc91ld-Jbs'
base_url = 'https://maps.googleapis.com/maps/api/geocode/json?'
#chekc if the search form was submitted or the page was reloaded
if request.method == 'POST':
#if form submitted, get input from request
location = request.POST.get('location')
#check if location already exist
cityCount = Locations.objects.filter(user=user).filter(location = location).count()
form = AddLocation(request.POST)
#validateing from
if form.is_valid():
if cityCount == 0:
#if city does not exist in database
params = {
'key' : API_KEY,
'address': location
}
#check if the location exist useing google API
response_test = requests.get(base_url, params=params).json()
if response_test['status'] == 'OK':
#if exist save city in database
obj= form.save(commit=False)
obj.user = user
obj.save()
#should be simple params not weather becasue we are useing Google API
paramsWeather = {
'key' : API_KEY,
'address': obj.location
}
#getting location cord
response = requests.get(base_url, params=paramsWeather).json()
if response['status'] == 'OK':
#if infomation available
geometry = response['results'][0]['geometry']
lat = geometry['location']['lat']
lon = geometry['location']['lng']
#send request for weather information
r = requests.get(url.format(lat=lat, lon=lon)).json()
#adding info in dictionary
city_weather = {
'location_id' : obj.id,
'city' : obj.location,
'temperature' : round(r['current']['temp']),
'main' : r['daily'][0]['weather'][0]['main'],
'icon' : r['daily'][0]['weather'][0]['icon'],
'tempMax' : round(r['daily'][0]['temp']['max']),
'tempMin' : round(r['daily'][0]['temp']['min']),
}
#return dictionary to Ajax reqeust with JsonResponse
return JsonResponse({'city_weather' : city_weather, 'errorCode' : "200"}, status= 200)
else:
return JsonResponse({'error' : "Location not found", 'errorCode' : "500"}, status= 200)
elif cityCount > 0:
return JsonResponse({'error' : "Location already added", 'errorCode' : "500"}, status= 200)
return JsonResponse({'error' : "Invalid input", 'errorCode' : "500"}, status= 200)
form = AddLocation()
#if the page was loaded without from submittion
#get all weather location saved by the user
cities = Locations.objects.filter(user=user)
#create empty arrasy to store all weather data about each city
weather_data = []
#do the same thing as we did when a city was added for each city in the database
for city in cities:
params = {
'key' : API_KEY,
'address': city.location
}
response = requests.get(base_url, params=params).json()
if response['status'] == 'OK':
geometry = response['results'][0]['geometry']
#check if lat and lgn are obtained correctly
lat = geometry['location']['lat']
lon = geometry['location']['lng']
r = requests.get(url.format(lat=lat, lon=lon)).json()
city_weather = {
'location_id' : city.id,
'city' : city.location,
'temperature' : round(r['current']['temp']),
'main' : r['daily'][0]['weather'][0]['main'],
'icon' : r['daily'][0]['weather'][0]['icon'],
'tempMax' : round(r['daily'][0]['temp']['max']),
'tempMin' : round(r['daily'][0]['temp']['min']),
}
#append the data for the city to weather_data before passing to the next city
weather_data.append(city_weather)
context = {
'form' : form,
'weather_data' : weather_data,
}
return render(request, 'weather.html', context)
#add a member for an event
@login_required
def add_eventmember(request, event_id):
forms = AddMemberForm()
#check request method
if request.method == 'POST':
#if POST validate and sabe
forms = AddMemberForm(request.POST)
if forms.is_valid():
member = EventMember.objects.filter(event=event_id)
event = Event.objects.get(id=event_id)
#maximum 9 member for event
if member.count() <= 9:
#save meber
user = forms.cleaned_data['user']
EventMember.objects.create(
event=event,
user=user
)
return redirect('manCal:event-detail', event_id = event.id,)
else:
print('--------------User limit exceed!-----------------')
context = {
'form': forms
}
return render(request, 'add_member.html', context)
#delete member
@login_required
def member_delete(request, member_id):
#get member useing the member_id in the url
member = EventMember.objects.get(id= member_id)
#delete form database
member.delete()
#return succesfful response to Ajax request
return JsonResponse({'result' : 'ok'}, status=200)
#delete file, same process as delete member
@login_required
def file_delete(request, file_id):
file = EventFiles.objects.get(id = file_id)
file.delete()
return JsonResponse({'result' : 'ok'}, status=200)
#delete location, same process as delete member
@login_required
def location_delete(request, location_id):
location = Locations.objects.get(id = location_id)
location.delete()
return JsonResponse({'result' : 'ok'}, status=200)
#note delte same process as delete member
@login_required
def note_delete(request, note_id):
note= Notes.objects.get(id= note_id)
note.delete()
return JsonResponse({'result' : 'ok'}, status=200)
#add file for event view
@login_required
def add_files(request):
#getting the event to which we want to add file
event_id = request.POST.get('event_id')
event = Event.objects.get(id=event_id)
#list of the file to upload, this is a list becasue in the HTML form we allowed the user to select multiple files
files = request.FILES.getlist('files')
#looping throw all seleted files
for file in files:
fs= FileSystemStorage()
#saveing the file and getting the path to it
file_path = fs.save(file.name, file)
#creating new EventFiles object
sfile= EventFiles(event = event, files = file_path)
#saveing the object
sfile.save()
return redirect('manCal:event-detail', event_id = event_id,)
#create note
@login_required
def add_note(request):
#getting the user and the content of the note
if request.method == 'POST':
user = CustomUser.objects.get(username= request.user)
note = request.POST.get('note')
#createing new note
new_note = Notes.objects.create(
user = user,
note = note
)
#returning created object to Ajax request converting the model data to dictionary
return JsonResponse({'note' : model_to_dict(new_note)}, status=200)
#update note status
@login_required
def note_complited(request, note_id):
#getting note from note id
note = Notes.objects.get(id=note_id)
#changeing note staus
if note.complited == True:
note.complited = False
elif note.complited == False:
note.complited = True
#saveing new status
note.save()
#returning to ajax like in crete note
return JsonResponse({'note' : model_to_dict(note)}, status=200)
#exercise detail view
@login_required
def healthView(request):
user = CustomUser.objects.get(username= request.user)
#get exercise details if already created
if Exercise.objects.filter(user= user).exists():
exercise = Exercise.objects.get(user= user)
context = {
'exercise' : exercise
}
#passing data to template
return render(request, 'health.html', context)
#if not exist render without exercise data
return render(request, 'health.html')
#update exercise
@login_required
def addExercise(request):
if request.method == 'POST':
#get variable from post request
user = CustomUser.objects.get(username= request.user)
lunges_set = int(request.POST.get('Lunges_set'))
lunges_rep = int(request.POST.get('Lunges_rep'))
pushups_set = int(request.POST.get('Pushups_set'))
pushups_rep = int(request.POST.get('Pushups_rep'))
squats_set = int(request.POST.get('Squats_set'))
squats_rep = int(request.POST.get('Squats_rep'))
burpees_set = int(request.POST.get('Burpees_set'))
burpees_rep = int(request.POST.get('Burpees_rep'))
planks_set = int(request.POST.get('Planks_set'))
planks_rep = int(request.POST.get('Planks_rep'))
#if no previews data exsist, create new one
if not Exercise.objects.filter(user= user).exists():
Exercise.objects.create(
user= user,
Lunges_set = lunges_set,
Lunges_rep = lunges_rep,
Pushups_set = pushups_set,
Pushups_rep = pushups_rep,
Squats_set = squats_set,
Squats_rep = squats_rep,
Burpees_set = burpees_set,
Burpees_rep = burpees_rep,
Planks_set = planks_set,
Planks_rep = planks_rep
)
return redirect("manCal:health")
# if exist update existing dat
else:
exercise = Exercise.objects.get(user= user)
exercise.Lunges_set = lunges_set
exercise.Lunges_rep = lunges_rep
exercise.Pushups_set = pushups_set
exercise.Pushups_rep = pushups_rep
exercise.Squats_set = squats_set
exercise.Squats_rep = squats_rep
exercise.Burpees_set = burpees_set
exercise.Burpees_rep = burpees_rep
exercise.Planks_set = planks_set
exercise.Planks_rep = planks_rep
exercise.save()
return redirect("manCal:health")
| request.session['username'] = username
request.session['password'] = password
context = {
'username': username,
'password': password,
'loggedin': True
}
response = render(request, 'index.html', context)
# Remember last login in cookie
now = D.datetime.utcnow()
max_age = 365 * 24 * 60 * 60 #one year
delta = now + D.timedelta(seconds=max_age)
format = "%a, %d-%b-%Y %H:%M:%S GMT"
expires = D.datetime.strftime(delta, format)
response.set_cookie('last_login',now,expires=expires)
#return response
return redirect("/index") | conditional_block |
views.py | from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from .forms import signUpForm, EventForm, AddMemberForm, AddLocation
import requests
from datetime import datetime, date
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse, Http404
from django.views import generic
from django.utils.safestring import mark_safe
from datetime import timedelta, date
import calendar
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from django.core.files.storage import FileSystemStorage
from django.contrib import messages
from .models import *
from .utils import Calendar
from django.forms.models import model_to_dict
from django.contrib.auth.forms import UserCreationForm
#the tag @login_required make it so that only logged user can access the view
@login_required
def indexView(request):
# the view collects infromation from multiple table, event and returns them to the user
user = CustomUser.objects.get(username= request.user)
today = datetime.today()
start_time = today.replace(hour=23, minute=59)
end_time = today.replace(hour=00, minute=1)
events = Event.objects.filter(user = user)
notes = Notes.objects.filter(user=user)
events_today = []
for event in events:
#filtering events to see which are active on the day
if event.start_time <= start_time and event.end_time >= end_time:
#adding event infromation in a dictionary
event_today = {
'event_id' : event.id,
'start_time': event.start_time,
'end_time' : event.end_time,
'content' : event.description,
'title': event.title
}
#appending created dictionary
events_today.append(event_today)
context = {
'events_today' : events_today,
'notes' : notes
}
return render(request, "index.html", context)
#homepage view
def homeView(request):
#check if user is authenticated
if request.user.is_authenticated:
#if true render index
return redirect("manCal:index")
#else render homepage
return render(request, 'home.html')
#login view
def loginView(request):
# Get username and password from request
username = request.POST['username']
password = request.POST['password']
# Authenticate the user, if it exist returns a user object, otherwise an None
user = authenticate(request, username=username, password=password)
# If user is authenticated
if user is not None:
# Save username and password to the session, plus loggedin variable set to True
request.session['username'] = username
request.session['password'] = password
context = {
'username': username,
'password': password,
'loggedin': True
}
response = render(request, 'index.html', context)
# Remember last login in cookie
now = D.datetime.utcnow()
max_age = 365 * 24 * 60 * 60 #one year
delta = now + D.timedelta(seconds=max_age)
format = "%a, %d-%b-%Y %H:%M:%S GMT"
expires = D.datetime.strftime(delta, format)
response.set_cookie('last_login',now,expires=expires)
#return response
return redirect("/index")
else:
Http404('Wrong credentials')
# If logged in, session variables are cleaned up and user logged out. Otherwise redirected to login page
@login_required
def logoutView(request):
logout(request)
#registration
def signUpView(request):
#checking if methos is POST
if request.method == "POST":
#getting from from request
form = signUpForm(request.POST)
#validateing from
if form.is_valid():
#if valid save and redirect to login with messege
form.save()
messages.success(request,"Registration Successful!")
return redirect("/login")
else:
#error
print('failed after falidation')
else:
#clean up form
form = signUpForm()
return render(request, "signup.html", {"form": form})
#view to updated account info
@login_required
def profileView(request):
#checking request methos
if request.method == 'POST':
#extracting form infromation form request and storing them in local variable
user = CustomUser.objects.get(username= request.user)
first_name = request.POST.get('first_name')
last_name = request.POST.get('last_name')
email = request.POST.get('email')
#updateding existing value with updated one
user.first_name= first_name
user.last_name = last_name
user.email=email
#save and redirect to same page
user.save()
return redirect("manCal:profile")
context = {
}
return render(request, "profile.html", context)
# start calendar render views
#get date for starting calendar date
def get_date(req_day):
if req_day:
year, month = (int(x) for x in req_day.split('-'))
return date(year, month, day=1)
return datetime.today()
#action to go prev month
def prev_month(d):
#changeing the day with which the calendar is started
first = d.replace(day=1)
prev_month = first - timedelta(days=1)
#coverting and formatting data for html
month = 'month=' + str(prev_month.year) + '-' + str(prev_month.month)
return month
##same as prev_month
def next_month(d):
days_in_month = calendar.monthrange(d.year, d.month)[1]
last = d.replace(day=days_in_month)
next_month = last + timedelta(days=1)
month = 'month=' + str(next_month.year) + '-' + str(next_month.month)
return month
#calendar genric list view
class CalendarView(LoginRequiredMixin, generic.ListView):
model = Event
#template to render
template_name = 'calendar.html'
#setting up context data
def get_context_data(self, **kwargs):
#supercalss call
context = super().get_context_data(**kwargs)
#getting date for calendar start
d = get_date(self.request.GET.get('month', None))
user = CustomUser.objects.get(username= self.request.user)
#pasing initializing variable for calendar
cal = Calendar(d.year, d.month, user)
html_cal = cal.formatmonth(withyear=True)
#getting user notes
notes = Notes.objects.filter(user=user)
#defining new context data
context['calendar'] = mark_safe(html_cal)
context['prev_month'] = prev_month(d)
context['next_month'] = next_month(d)
context['notes'] = notes
context['user']= user
return context
#create events
@login_required
def create_event(request):
form = EventForm(request.POST or None)
#checking if the request type is post and if the form is valid
if request.POST and form.is_valid():
#getting specific inputs from Django form and storing them in separated variable
title = form.cleaned_data['title']
description = form.cleaned_data['description']
start_time = form.cleaned_data['start_time']
end_time = form.cleaned_data['end_time']
location = form.cleaned_data['location']
#creating new event object
Event.objects.get_or_create(
user=request.user,
title=title,
description=description,
start_time=start_time,
end_time=end_time,
location= location
)
return HttpResponseRedirect(reverse('manCal:calendar'))
return render(request, 'event.html', {'form': form})
#generic update view for event edit
class EventEdit(LoginRequiredMixin, generic.UpdateView):
#In which model the data are stored
model = Event
#fields to update
fields = ['title', 'description', 'start_time', 'end_time', 'location']
#template to use to get data
template_name = 'event.html'
#generic delete vie for event delete
class EventDelete(LoginRequiredMixin, generic.DeleteView):
model = Event
template_name = 'event_delete.html'
success_url = reverse_lazy('manCal:calendar')
#overriding data in confermation form to provide cancel button
def post(self, request, *args, **kwargs):
if "cancel" in request.POST:
return redirect('manCal:calendar')
else:
return super(EventDelete, self).post(request, *args, **kwargs)
#event details view
@login_required
def event_details(request, event_id):
#locating event in database useing the event_id given in the url
event = Event.objects.get(id=event_id)
#getting members and files attached to the event
eventmember = EventMember.objects.filter(event=event)
eventfiles = EventFiles.objects.filter(event=event)
#defining variables for API call
API_KEY = 'AIzaSyDio4Zj99JOhP8SBQBM3CydIsc91ld-Jbs'
address = event.location
params = {
'key' : API_KEY,
'address': address
}
lat = 51.509865
lon = -0.118092
base_url = 'https://maps.googleapis.com/maps/api/geocode/json?'
#API response conteining geo-cordinates
response = requests.get(base_url, params=params).json()
#checking if the request was succesful
if response['status'] == 'OK':
geometry = response['results'][0]['geometry']
#obtaing latiture and longitude
lat = geometry['location']['lat']
lon = geometry['location']['lng']
context = {
#pasing retrived data to the template
'event': event,
'eventmember': eventmember,
'eventfiles': eventfiles,
'lat' : lat,
'lon' : lon,
}
return render(request, 'event-details.html', context)
#weather view
@login_required
def weatherView(request):
#API variable for weather API
url = 'http://api.openweathermap.org/data/2.5/onecall?lat={lat}&exclude=hourly,minutely&lon={lon}&units=metric&appid=dbd607d4b59f61a34125bf4f2a185f8d'
user = CustomUser.objects.get(username= request.user)
#API variable for google API
API_KEY = 'AIzaSyDio4Zj99JOhP8SBQBM3CydIsc91ld-Jbs'
base_url = 'https://maps.googleapis.com/maps/api/geocode/json?'
#chekc if the search form was submitted or the page was reloaded
if request.method == 'POST':
#if form submitted, get input from request
location = request.POST.get('location')
#check if location already exist
cityCount = Locations.objects.filter(user=user).filter(location = location).count()
form = AddLocation(request.POST)
#validateing from
if form.is_valid():
if cityCount == 0:
#if city does not exist in database
params = {
'key' : API_KEY,
'address': location
}
#check if the location exist useing google API
response_test = requests.get(base_url, params=params).json()
if response_test['status'] == 'OK':
#if exist save city in database
obj= form.save(commit=False)
obj.user = user
obj.save()
#should be simple params not weather becasue we are useing Google API
paramsWeather = {
'key' : API_KEY,
'address': obj.location
}
#getting location cord
response = requests.get(base_url, params=paramsWeather).json()
if response['status'] == 'OK':
#if infomation available
geometry = response['results'][0]['geometry']
lat = geometry['location']['lat']
lon = geometry['location']['lng']
#send request for weather information
r = requests.get(url.format(lat=lat, lon=lon)).json()
#adding info in dictionary
city_weather = {
'location_id' : obj.id,
'city' : obj.location,
'temperature' : round(r['current']['temp']),
'main' : r['daily'][0]['weather'][0]['main'],
'icon' : r['daily'][0]['weather'][0]['icon'],
'tempMax' : round(r['daily'][0]['temp']['max']),
'tempMin' : round(r['daily'][0]['temp']['min']),
}
#return dictionary to Ajax reqeust with JsonResponse
return JsonResponse({'city_weather' : city_weather, 'errorCode' : "200"}, status= 200)
else:
return JsonResponse({'error' : "Location not found", 'errorCode' : "500"}, status= 200)
elif cityCount > 0:
return JsonResponse({'error' : "Location already added", 'errorCode' : "500"}, status= 200)
return JsonResponse({'error' : "Invalid input", 'errorCode' : "500"}, status= 200)
form = AddLocation()
#if the page was loaded without from submittion
#get all weather location saved by the user
cities = Locations.objects.filter(user=user)
#create empty arrasy to store all weather data about each city
weather_data = []
#do the same thing as we did when a city was added for each city in the database
for city in cities:
params = {
'key' : API_KEY,
'address': city.location
}
response = requests.get(base_url, params=params).json()
if response['status'] == 'OK':
geometry = response['results'][0]['geometry']
#check if lat and lgn are obtained correctly
lat = geometry['location']['lat']
lon = geometry['location']['lng']
r = requests.get(url.format(lat=lat, lon=lon)).json()
city_weather = {
'location_id' : city.id,
'city' : city.location,
'temperature' : round(r['current']['temp']),
'main' : r['daily'][0]['weather'][0]['main'],
'icon' : r['daily'][0]['weather'][0]['icon'],
'tempMax' : round(r['daily'][0]['temp']['max']),
'tempMin' : round(r['daily'][0]['temp']['min']),
}
#append the data for the city to weather_data before passing to the next city
weather_data.append(city_weather)
context = {
'form' : form,
'weather_data' : weather_data,
}
return render(request, 'weather.html', context)
#add a member for an event
@login_required
def add_eventmember(request, event_id):
forms = AddMemberForm()
#check request method
if request.method == 'POST':
#if POST validate and sabe
forms = AddMemberForm(request.POST)
if forms.is_valid():
member = EventMember.objects.filter(event=event_id)
event = Event.objects.get(id=event_id)
#maximum 9 member for event
if member.count() <= 9:
#save meber
user = forms.cleaned_data['user']
EventMember.objects.create(
event=event,
user=user
)
return redirect('manCal:event-detail', event_id = event.id,)
else:
print('--------------User limit exceed!-----------------')
context = {
'form': forms
}
return render(request, 'add_member.html', context)
#delete member
@login_required
def member_delete(request, member_id):
#get member useing the member_id in the url
member = EventMember.objects.get(id= member_id)
#delete form database
member.delete()
#return succesfful response to Ajax request
return JsonResponse({'result' : 'ok'}, status=200)
#delete file, same process as delete member
@login_required
def file_delete(request, file_id):
file = EventFiles.objects.get(id = file_id)
file.delete()
return JsonResponse({'result' : 'ok'}, status=200)
#delete location, same process as delete member
@login_required
def location_delete(request, location_id):
location = Locations.objects.get(id = location_id)
location.delete()
return JsonResponse({'result' : 'ok'}, status=200)
#note delte same process as delete member
@login_required
def note_delete(request, note_id):
note= Notes.objects.get(id= note_id)
note.delete()
return JsonResponse({'result' : 'ok'}, status=200)
#add file for event view
@login_required
def add_files(request):
#getting the event to which we want to add file
event_id = request.POST.get('event_id')
event = Event.objects.get(id=event_id)
#list of the file to upload, this is a list becasue in the HTML form we allowed the user to select multiple files
files = request.FILES.getlist('files')
#looping throw all seleted files
for file in files:
fs= FileSystemStorage()
#saveing the file and getting the path to it
file_path = fs.save(file.name, file)
#creating new EventFiles object
sfile= EventFiles(event = event, files = file_path)
#saveing the object
sfile.save()
return redirect('manCal:event-detail', event_id = event_id,)
#create note
@login_required
def add_note(request):
#getting the user and the content of the note
if request.method == 'POST':
user = CustomUser.objects.get(username= request.user)
note = request.POST.get('note')
#createing new note
new_note = Notes.objects.create(
user = user,
note = note
)
#returning created object to Ajax request converting the model data to dictionary
return JsonResponse({'note' : model_to_dict(new_note)}, status=200)
#update note status
@login_required
def note_complited(request, note_id):
#getting note from note id
note = Notes.objects.get(id=note_id)
#changeing note staus
if note.complited == True: | note.save()
#returning to ajax like in crete note
return JsonResponse({'note' : model_to_dict(note)}, status=200)
#exercise detail view
@login_required
def healthView(request):
user = CustomUser.objects.get(username= request.user)
#get exercise details if already created
if Exercise.objects.filter(user= user).exists():
exercise = Exercise.objects.get(user= user)
context = {
'exercise' : exercise
}
#passing data to template
return render(request, 'health.html', context)
#if not exist render without exercise data
return render(request, 'health.html')
#update exercise
@login_required
def addExercise(request):
if request.method == 'POST':
#get variable from post request
user = CustomUser.objects.get(username= request.user)
lunges_set = int(request.POST.get('Lunges_set'))
lunges_rep = int(request.POST.get('Lunges_rep'))
pushups_set = int(request.POST.get('Pushups_set'))
pushups_rep = int(request.POST.get('Pushups_rep'))
squats_set = int(request.POST.get('Squats_set'))
squats_rep = int(request.POST.get('Squats_rep'))
burpees_set = int(request.POST.get('Burpees_set'))
burpees_rep = int(request.POST.get('Burpees_rep'))
planks_set = int(request.POST.get('Planks_set'))
planks_rep = int(request.POST.get('Planks_rep'))
#if no previews data exsist, create new one
if not Exercise.objects.filter(user= user).exists():
Exercise.objects.create(
user= user,
Lunges_set = lunges_set,
Lunges_rep = lunges_rep,
Pushups_set = pushups_set,
Pushups_rep = pushups_rep,
Squats_set = squats_set,
Squats_rep = squats_rep,
Burpees_set = burpees_set,
Burpees_rep = burpees_rep,
Planks_set = planks_set,
Planks_rep = planks_rep
)
return redirect("manCal:health")
# if exist update existing dat
else:
exercise = Exercise.objects.get(user= user)
exercise.Lunges_set = lunges_set
exercise.Lunges_rep = lunges_rep
exercise.Pushups_set = pushups_set
exercise.Pushups_rep = pushups_rep
exercise.Squats_set = squats_set
exercise.Squats_rep = squats_rep
exercise.Burpees_set = burpees_set
exercise.Burpees_rep = burpees_rep
exercise.Planks_set = planks_set
exercise.Planks_rep = planks_rep
exercise.save()
return redirect("manCal:health") | note.complited = False
elif note.complited == False:
note.complited = True
#saveing new status | random_line_split |
views.py | from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from .forms import signUpForm, EventForm, AddMemberForm, AddLocation
import requests
from datetime import datetime, date
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse, Http404
from django.views import generic
from django.utils.safestring import mark_safe
from datetime import timedelta, date
import calendar
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from django.core.files.storage import FileSystemStorage
from django.contrib import messages
from .models import *
from .utils import Calendar
from django.forms.models import model_to_dict
from django.contrib.auth.forms import UserCreationForm
#the tag @login_required make it so that only logged user can access the view
@login_required
def indexView(request):
# the view collects infromation from multiple table, event and returns them to the user
user = CustomUser.objects.get(username= request.user)
today = datetime.today()
start_time = today.replace(hour=23, minute=59)
end_time = today.replace(hour=00, minute=1)
events = Event.objects.filter(user = user)
notes = Notes.objects.filter(user=user)
events_today = []
for event in events:
#filtering events to see which are active on the day
if event.start_time <= start_time and event.end_time >= end_time:
#adding event infromation in a dictionary
event_today = {
'event_id' : event.id,
'start_time': event.start_time,
'end_time' : event.end_time,
'content' : event.description,
'title': event.title
}
#appending created dictionary
events_today.append(event_today)
context = {
'events_today' : events_today,
'notes' : notes
}
return render(request, "index.html", context)
#homepage view
def homeView(request):
#check if user is authenticated
if request.user.is_authenticated:
#if true render index
return redirect("manCal:index")
#else render homepage
return render(request, 'home.html')
#login view
def loginView(request):
# Get username and password from request
username = request.POST['username']
password = request.POST['password']
# Authenticate the user, if it exist returns a user object, otherwise an None
user = authenticate(request, username=username, password=password)
# If user is authenticated
if user is not None:
# Save username and password to the session, plus loggedin variable set to True
request.session['username'] = username
request.session['password'] = password
context = {
'username': username,
'password': password,
'loggedin': True
}
response = render(request, 'index.html', context)
# Remember last login in cookie
now = D.datetime.utcnow()
max_age = 365 * 24 * 60 * 60 #one year
delta = now + D.timedelta(seconds=max_age)
format = "%a, %d-%b-%Y %H:%M:%S GMT"
expires = D.datetime.strftime(delta, format)
response.set_cookie('last_login',now,expires=expires)
#return response
return redirect("/index")
else:
Http404('Wrong credentials')
# If logged in, session variables are cleaned up and user logged out. Otherwise redirected to login page
@login_required
def logoutView(request):
logout(request)
#registration
def signUpView(request):
#checking if methos is POST
if request.method == "POST":
#getting from from request
form = signUpForm(request.POST)
#validateing from
if form.is_valid():
#if valid save and redirect to login with messege
form.save()
messages.success(request,"Registration Successful!")
return redirect("/login")
else:
#error
print('failed after falidation')
else:
#clean up form
form = signUpForm()
return render(request, "signup.html", {"form": form})
#view to updated account info
@login_required
def profileView(request):
#checking request methos
if request.method == 'POST':
#extracting form infromation form request and storing them in local variable
user = CustomUser.objects.get(username= request.user)
first_name = request.POST.get('first_name')
last_name = request.POST.get('last_name')
email = request.POST.get('email')
#updateding existing value with updated one
user.first_name= first_name
user.last_name = last_name
user.email=email
#save and redirect to same page
user.save()
return redirect("manCal:profile")
context = {
}
return render(request, "profile.html", context)
# start calendar render views
#get date for starting calendar date
def get_date(req_day):
if req_day:
year, month = (int(x) for x in req_day.split('-'))
return date(year, month, day=1)
return datetime.today()
#action to go prev month
def prev_month(d):
#changeing the day with which the calendar is started
first = d.replace(day=1)
prev_month = first - timedelta(days=1)
#coverting and formatting data for html
month = 'month=' + str(prev_month.year) + '-' + str(prev_month.month)
return month
##same as prev_month
def next_month(d):
days_in_month = calendar.monthrange(d.year, d.month)[1]
last = d.replace(day=days_in_month)
next_month = last + timedelta(days=1)
month = 'month=' + str(next_month.year) + '-' + str(next_month.month)
return month
#calendar genric list view
class CalendarView(LoginRequiredMixin, generic.ListView):
model = Event
#template to render
template_name = 'calendar.html'
#setting up context data
def get_context_data(self, **kwargs):
#supercalss call
context = super().get_context_data(**kwargs)
#getting date for calendar start
d = get_date(self.request.GET.get('month', None))
user = CustomUser.objects.get(username= self.request.user)
#pasing initializing variable for calendar
cal = Calendar(d.year, d.month, user)
html_cal = cal.formatmonth(withyear=True)
#getting user notes
notes = Notes.objects.filter(user=user)
#defining new context data
context['calendar'] = mark_safe(html_cal)
context['prev_month'] = prev_month(d)
context['next_month'] = next_month(d)
context['notes'] = notes
context['user']= user
return context
#create events
@login_required
def create_event(request):
form = EventForm(request.POST or None)
#checking if the request type is post and if the form is valid
if request.POST and form.is_valid():
#getting specific inputs from Django form and storing them in separated variable
title = form.cleaned_data['title']
description = form.cleaned_data['description']
start_time = form.cleaned_data['start_time']
end_time = form.cleaned_data['end_time']
location = form.cleaned_data['location']
#creating new event object
Event.objects.get_or_create(
user=request.user,
title=title,
description=description,
start_time=start_time,
end_time=end_time,
location= location
)
return HttpResponseRedirect(reverse('manCal:calendar'))
return render(request, 'event.html', {'form': form})
#generic update view for event edit
class EventEdit(LoginRequiredMixin, generic.UpdateView):
#In which model the data are stored
model = Event
#fields to update
fields = ['title', 'description', 'start_time', 'end_time', 'location']
#template to use to get data
template_name = 'event.html'
#generic delete vie for event delete
class EventDelete(LoginRequiredMixin, generic.DeleteView):
model = Event
template_name = 'event_delete.html'
success_url = reverse_lazy('manCal:calendar')
#overriding data in confermation form to provide cancel button
def post(self, request, *args, **kwargs):
if "cancel" in request.POST:
return redirect('manCal:calendar')
else:
return super(EventDelete, self).post(request, *args, **kwargs)
#event details view
@login_required
def event_details(request, event_id):
#locating event in database useing the event_id given in the url
event = Event.objects.get(id=event_id)
#getting members and files attached to the event
eventmember = EventMember.objects.filter(event=event)
eventfiles = EventFiles.objects.filter(event=event)
#defining variables for API call
API_KEY = 'AIzaSyDio4Zj99JOhP8SBQBM3CydIsc91ld-Jbs'
address = event.location
params = {
'key' : API_KEY,
'address': address
}
lat = 51.509865
lon = -0.118092
base_url = 'https://maps.googleapis.com/maps/api/geocode/json?'
#API response conteining geo-cordinates
response = requests.get(base_url, params=params).json()
#checking if the request was succesful
if response['status'] == 'OK':
geometry = response['results'][0]['geometry']
#obtaing latiture and longitude
lat = geometry['location']['lat']
lon = geometry['location']['lng']
context = {
#pasing retrived data to the template
'event': event,
'eventmember': eventmember,
'eventfiles': eventfiles,
'lat' : lat,
'lon' : lon,
}
return render(request, 'event-details.html', context)
#weather view
@login_required
def weatherView(request):
#API variable for weather API
url = 'http://api.openweathermap.org/data/2.5/onecall?lat={lat}&exclude=hourly,minutely&lon={lon}&units=metric&appid=dbd607d4b59f61a34125bf4f2a185f8d'
user = CustomUser.objects.get(username= request.user)
#API variable for google API
API_KEY = 'AIzaSyDio4Zj99JOhP8SBQBM3CydIsc91ld-Jbs'
base_url = 'https://maps.googleapis.com/maps/api/geocode/json?'
#chekc if the search form was submitted or the page was reloaded
if request.method == 'POST':
#if form submitted, get input from request
location = request.POST.get('location')
#check if location already exist
cityCount = Locations.objects.filter(user=user).filter(location = location).count()
form = AddLocation(request.POST)
#validateing from
if form.is_valid():
if cityCount == 0:
#if city does not exist in database
params = {
'key' : API_KEY,
'address': location
}
#check if the location exist useing google API
response_test = requests.get(base_url, params=params).json()
if response_test['status'] == 'OK':
#if exist save city in database
obj= form.save(commit=False)
obj.user = user
obj.save()
#should be simple params not weather becasue we are useing Google API
paramsWeather = {
'key' : API_KEY,
'address': obj.location
}
#getting location cord
response = requests.get(base_url, params=paramsWeather).json()
if response['status'] == 'OK':
#if infomation available
geometry = response['results'][0]['geometry']
lat = geometry['location']['lat']
lon = geometry['location']['lng']
#send request for weather information
r = requests.get(url.format(lat=lat, lon=lon)).json()
#adding info in dictionary
city_weather = {
'location_id' : obj.id,
'city' : obj.location,
'temperature' : round(r['current']['temp']),
'main' : r['daily'][0]['weather'][0]['main'],
'icon' : r['daily'][0]['weather'][0]['icon'],
'tempMax' : round(r['daily'][0]['temp']['max']),
'tempMin' : round(r['daily'][0]['temp']['min']),
}
#return dictionary to Ajax reqeust with JsonResponse
return JsonResponse({'city_weather' : city_weather, 'errorCode' : "200"}, status= 200)
else:
return JsonResponse({'error' : "Location not found", 'errorCode' : "500"}, status= 200)
elif cityCount > 0:
return JsonResponse({'error' : "Location already added", 'errorCode' : "500"}, status= 200)
return JsonResponse({'error' : "Invalid input", 'errorCode' : "500"}, status= 200)
form = AddLocation()
#if the page was loaded without from submittion
#get all weather location saved by the user
cities = Locations.objects.filter(user=user)
#create empty arrasy to store all weather data about each city
weather_data = []
#do the same thing as we did when a city was added for each city in the database
for city in cities:
params = {
'key' : API_KEY,
'address': city.location
}
response = requests.get(base_url, params=params).json()
if response['status'] == 'OK':
geometry = response['results'][0]['geometry']
#check if lat and lgn are obtained correctly
lat = geometry['location']['lat']
lon = geometry['location']['lng']
r = requests.get(url.format(lat=lat, lon=lon)).json()
city_weather = {
'location_id' : city.id,
'city' : city.location,
'temperature' : round(r['current']['temp']),
'main' : r['daily'][0]['weather'][0]['main'],
'icon' : r['daily'][0]['weather'][0]['icon'],
'tempMax' : round(r['daily'][0]['temp']['max']),
'tempMin' : round(r['daily'][0]['temp']['min']),
}
#append the data for the city to weather_data before passing to the next city
weather_data.append(city_weather)
context = {
'form' : form,
'weather_data' : weather_data,
}
return render(request, 'weather.html', context)
#add a member for an event
@login_required
def add_eventmember(request, event_id):
forms = AddMemberForm()
#check request method
if request.method == 'POST':
#if POST validate and sabe
forms = AddMemberForm(request.POST)
if forms.is_valid():
member = EventMember.objects.filter(event=event_id)
event = Event.objects.get(id=event_id)
#maximum 9 member for event
if member.count() <= 9:
#save meber
user = forms.cleaned_data['user']
EventMember.objects.create(
event=event,
user=user
)
return redirect('manCal:event-detail', event_id = event.id,)
else:
print('--------------User limit exceed!-----------------')
context = {
'form': forms
}
return render(request, 'add_member.html', context)
#delete member
@login_required
def member_delete(request, member_id):
#get member useing the member_id in the url
member = EventMember.objects.get(id= member_id)
#delete form database
member.delete()
#return succesfful response to Ajax request
return JsonResponse({'result' : 'ok'}, status=200)
#delete file, same process as delete member
@login_required
def file_delete(request, file_id):
|
#delete location, same process as delete member
@login_required
def location_delete(request, location_id):
location = Locations.objects.get(id = location_id)
location.delete()
return JsonResponse({'result' : 'ok'}, status=200)
#note delte same process as delete member
@login_required
def note_delete(request, note_id):
note= Notes.objects.get(id= note_id)
note.delete()
return JsonResponse({'result' : 'ok'}, status=200)
#add file for event view
@login_required
def add_files(request):
#getting the event to which we want to add file
event_id = request.POST.get('event_id')
event = Event.objects.get(id=event_id)
#list of the file to upload, this is a list becasue in the HTML form we allowed the user to select multiple files
files = request.FILES.getlist('files')
#looping throw all seleted files
for file in files:
fs= FileSystemStorage()
#saveing the file and getting the path to it
file_path = fs.save(file.name, file)
#creating new EventFiles object
sfile= EventFiles(event = event, files = file_path)
#saveing the object
sfile.save()
return redirect('manCal:event-detail', event_id = event_id,)
#create note
@login_required
def add_note(request):
#getting the user and the content of the note
if request.method == 'POST':
user = CustomUser.objects.get(username= request.user)
note = request.POST.get('note')
#createing new note
new_note = Notes.objects.create(
user = user,
note = note
)
#returning created object to Ajax request converting the model data to dictionary
return JsonResponse({'note' : model_to_dict(new_note)}, status=200)
#update note status
@login_required
def note_complited(request, note_id):
#getting note from note id
note = Notes.objects.get(id=note_id)
#changeing note staus
if note.complited == True:
note.complited = False
elif note.complited == False:
note.complited = True
#saveing new status
note.save()
#returning to ajax like in crete note
return JsonResponse({'note' : model_to_dict(note)}, status=200)
#exercise detail view
@login_required
def healthView(request):
user = CustomUser.objects.get(username= request.user)
#get exercise details if already created
if Exercise.objects.filter(user= user).exists():
exercise = Exercise.objects.get(user= user)
context = {
'exercise' : exercise
}
#passing data to template
return render(request, 'health.html', context)
#if not exist render without exercise data
return render(request, 'health.html')
#update exercise
@login_required
def addExercise(request):
if request.method == 'POST':
#get variable from post request
user = CustomUser.objects.get(username= request.user)
lunges_set = int(request.POST.get('Lunges_set'))
lunges_rep = int(request.POST.get('Lunges_rep'))
pushups_set = int(request.POST.get('Pushups_set'))
pushups_rep = int(request.POST.get('Pushups_rep'))
squats_set = int(request.POST.get('Squats_set'))
squats_rep = int(request.POST.get('Squats_rep'))
burpees_set = int(request.POST.get('Burpees_set'))
burpees_rep = int(request.POST.get('Burpees_rep'))
planks_set = int(request.POST.get('Planks_set'))
planks_rep = int(request.POST.get('Planks_rep'))
#if no previews data exsist, create new one
if not Exercise.objects.filter(user= user).exists():
Exercise.objects.create(
user= user,
Lunges_set = lunges_set,
Lunges_rep = lunges_rep,
Pushups_set = pushups_set,
Pushups_rep = pushups_rep,
Squats_set = squats_set,
Squats_rep = squats_rep,
Burpees_set = burpees_set,
Burpees_rep = burpees_rep,
Planks_set = planks_set,
Planks_rep = planks_rep
)
return redirect("manCal:health")
# if exist update existing dat
else:
exercise = Exercise.objects.get(user= user)
exercise.Lunges_set = lunges_set
exercise.Lunges_rep = lunges_rep
exercise.Pushups_set = pushups_set
exercise.Pushups_rep = pushups_rep
exercise.Squats_set = squats_set
exercise.Squats_rep = squats_rep
exercise.Burpees_set = burpees_set
exercise.Burpees_rep = burpees_rep
exercise.Planks_set = planks_set
exercise.Planks_rep = planks_rep
exercise.save()
return redirect("manCal:health")
| file = EventFiles.objects.get(id = file_id)
file.delete()
return JsonResponse({'result' : 'ok'}, status=200) | identifier_body |
utils.py | #! /usr/bin/env python
import csv
import itertools
import numpy as np
import nltk
from nltk import word_tokenize
import time
import cPickle
import sys
import operator
import io
import array
from datetime import datetime
from gru_theano import GRUTheano
from keras.datasets.data_utils import get_file
from keras.preprocessing import sequence
SENTENCE_START_TOKEN = "SENTENCE_START"
SENTENCE_END_TOKEN = "SENTENCE_END"
UNKNOWN_TOKEN = "UNKNOWN_TOKEN"
def load_data(filename="data/reddit-comments-2015-08.csv", vocabulary_size=2000, min_sent_characters=0):
word_to_index = []
index_to_word = []
# Read the data and append SENTENCE_START and SENTENCE_END tokens
print("Reading CSV file...")
with open(filename, 'rt') as f:
reader = csv.reader(f, skipinitialspace=True)
reader.next()
# Split full comments into sentences
sentences = itertools.chain(*[nltk.sent_tokenize(x[0].decode("utf-8").lower()) for x in reader])
# Filter sentences
sentences = [s for s in sentences if len(s) >= min_sent_characters]
sentences = [s for s in sentences if "http" not in s]
# Append SENTENCE_START and SENTENCE_END
sentences = ["%s %s %s" % (SENTENCE_START_TOKEN, x, SENTENCE_END_TOKEN) for x in sentences]
print("Parsed %d sentences." % (len(sentences)))
# Tokenize the sentences into words
tokenized_sentences = [nltk.word_tokenize(sent) for sent in sentences]
# Count the word frequencies
word_freq = nltk.FreqDist(itertools.chain(*tokenized_sentences))
print("Found %d unique words tokens." % len(word_freq.items()))
# Get the most common words and build index_to_word and word_to_index vectors
vocab = sorted(word_freq.items(), key=lambda x: (x[1], x[0]), reverse=True)[:vocabulary_size-2]
print("Using vocabulary size %d." % vocabulary_size)
print("The least frequent word in our vocabulary is '%s' and appeared %d times." % (vocab[-1][0], vocab[-1][1]))
sorted_vocab = sorted(vocab, key=operator.itemgetter(1))
index_to_word = ["<MASK/>", UNKNOWN_TOKEN] + [x[0] for x in sorted_vocab]
word_to_index = dict([(w, i) for i, w in enumerate(index_to_word)])
# Replace all words not in our vocabulary with the unknown token
for i, sent in enumerate(tokenized_sentences):
tokenized_sentences[i] = [w if w in word_to_index else UNKNOWN_TOKEN for w in sent]
# Create the training data
X_train = np.asarray([[word_to_index[w] for w in sent[:-1]] for sent in tokenized_sentences])
y_train = np.asarray([[word_to_index[w] for w in sent[1:]] for sent in tokenized_sentences])
return X_train, y_train, word_to_index, index_to_word
def loadText(path, origin="", vocsize=1000, maxlen=25, training_type=1, verbose=True):
"""
type(path): string
path : path of text file to save to
origin : URL where text is
vocsize : vocabulary size
maxlen : max size of one sentence
Return:
x_train, y_train, vocabulary
eg: x,y,voc,i2w,w2i = loadData('pg11.txt', origin="http://www.gutenberg.org/cache/epub/11/pg11.txt")
"""
filesource = get_file(path, origin=origin)
text = open(filesource).read()
text = SENTENCE_START_TOKEN + text + SENTENCE_END_TOKEN
if verbose:
|
tokens = word_tokenize(text)
word_freq = nltk.FreqDist(tokens)
if verbose:
print("Found %d unique words tokens." % len(word_freq.items()))
vocab = word_freq.most_common(vocsize-3)
indices_word = [x[0] for x in vocab]
indices_word.append(UNKNOWN_TOKEN)
indices_word.append(SENTENCE_START_TOKEN)
indices_word.append(SENTENCE_END_TOKEN)
word_indices = dict([(w,i) for i,w in enumerate(indices_word)])
for i, word in enumerate(tokens):
tokens[i] = [word if word in word_indices else UNKNOWN_TOKEN]
# now the whole text is indices of words in the vocabulary
for i, word in enumerate(tokens):
tokens[i] = word_indices[word[0]]
# Create the training data
xx = np.asarray(tokens[:-1], dtype=np.int32)
yy = np.asarray(tokens[1:], dtype=np.int32)
return xx, yy, vocab, word_indices, indices_word
def train_with_sgd(model, X_train, y_train, learning_rate=0.001, nepoch=40, startfrom = 0, decay=0.9,
callback_every=10000, callback=None):
for epoch in range(startfrom, nepoch):
num_examples_seen = 0
# For each training example...
for i in np.random.permutation(len(y_train)):
# One SGD step
model.sgd_step(X_train[i], y_train[i], learning_rate, decay)
num_examples_seen += 1
# Optionally do callback
if (callback and callback_every and num_examples_seen % callback_every == 0):
callback(model, epoch, num_examples_seen)
return model
def save_model_parameters_theano(model, outfile):
np.savez(outfile,
E=model.E.get_value(),
U=model.U.get_value(),
W=model.W.get_value(),
V=model.V.get_value(),
b=model.b.get_value(),
c=model.c.get_value())
print "Saved model parameters to %s." % outfile
def load_model_parameters_theano(path, modelClass=GRUTheano):
npzfile = np.load(path)
E, U, W, V, b, c = npzfile["E"], npzfile["U"], npzfile["W"], npzfile["V"], npzfile["b"], npzfile["c"]
hidden_dim, word_dim = E.shape[0], E.shape[1]
print "Building model from %s with word_dim=%d" % (path, word_dim)
sys.stdout.flush()
model = modelClass(word_dim, hidden_dim=hidden_dim)
model.E.set_value(E)
model.U.set_value(U)
model.W.set_value(W)
model.V.set_value(V)
model.b.set_value(b)
model.c.set_value(c)
return model
def gradient_check_theano(model, x, y, h=0.001, error_threshold=0.01):
# Overwrite the bptt attribute. We need to backpropagate all the way to get the correct gradient
model.bptt_truncate = 1000
# Calculate the gradients using backprop
bptt_gradients = model.bptt(x, y)
# List of all parameters we want to chec.
model_parameters = ['E', 'U', 'W', 'b', 'V', 'c']
# Gradient check for each parameter
for pidx, pname in enumerate(model_parameters):
# Get the actual parameter value from the mode, e.g. model.W
parameter_T = operator.attrgetter(pname)(model)
parameter = parameter_T.get_value()
print "Performing gradient check for parameter %s with size %d." % (pname, np.prod(parameter.shape))
# Iterate over each element of the parameter matrix, e.g. (0,0), (0,1), ...
it = np.nditer(parameter, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
# Save the original value so we can reset it later
original_value = parameter[ix]
# Estimate the gradient using (f(x+h) - f(x-h))/(2*h)
parameter[ix] = original_value + h
parameter_T.set_value(parameter)
gradplus = model.calculate_total_loss([x],[y])
parameter[ix] = original_value - h
parameter_T.set_value(parameter)
gradminus = model.calculate_total_loss([x],[y])
estimated_gradient = (gradplus - gradminus)/(2*h)
parameter[ix] = original_value
parameter_T.set_value(parameter)
# The gradient for this parameter calculated using backpropagation
backprop_gradient = bptt_gradients[pidx][ix]
# calculate The relative error: (|x - y|/(|x| + |y|))
relative_error = np.abs(backprop_gradient - estimated_gradient)/(np.abs(backprop_gradient) + np.abs(estimated_gradient))
# If the error is to large fail the gradient check
if relative_error > error_threshold:
print "Gradient Check ERROR: parameter=%s ix=%s" % (pname, ix)
print "+h Loss: %f" % gradplus
print "-h Loss: %f" % gradminus
print "Estimated_gradient: %f" % estimated_gradient
print "Backpropagation gradient: %f" % backprop_gradient
print "Relative Error: %f" % relative_error
return
it.iternext()
print "Gradient check for parameter %s passed." % (pname)
def print_sentence(s, index_to_word):
sentence_str = [index_to_word[x] for x in s[1:-1]]
print(" ".join(sentence_str))
sys.stdout.flush()
def generate_sentence(model, index_to_word, word_to_index, min_length=5):
# We start the sentence with the start token
new_sentence = [word_to_index[SENTENCE_START_TOKEN]]
# Repeat until we get an end token
while not new_sentence[-1] == word_to_index[SENTENCE_END_TOKEN]:
#print('not finished')
next_word_probs = model.predict(new_sentence)[-1]
if sum(next_word_probs) < 1.:
samples = np.random.multinomial(1, next_word_probs)
sampled_word = np.argmax(samples)
else:
sampled_word = word_to_index[UNKNOWN_TOKEN]
if sampled_word < len(index_to_word):
new_sentence.append(sampled_word)
else:
new_sentence.append(word_to_index[UNKNOWN_TOKEN])
# Seomtimes we get stuck if the sentence becomes too long, e.g. "........" :(
# And: We don't want sentences with UNKNOWN_TOKEN's
#print(new_sentence)
if len(new_sentence) > 50 or sampled_word == word_to_index[UNKNOWN_TOKEN]:
#return None
return new_sentence
if len(new_sentence) < min_length:
return None
return new_sentence
def generate_sentences(model, n, index_to_word, word_to_index):
for i in range(n):
sent = None
while not sent:
sent = generate_sentence(model, index_to_word, word_to_index)
print_sentence(sent, index_to_word)
def saveStuff(stuff, path=None):
"""
Saves stuff to disk as pickle object
:type stuff: any type
:param stuff: data to be stored
Return: create pickle file at path
"""
if path == None:
# TODO take name from something
output = open('results/i-will-be-overwritten.pkl', 'wb')
else:
output = open(path, 'wb')
# Pickle the list using the highest protocol available.
cPickle.dump(stuff, output, -1)
output.close()
| print('corpus length:', len(text)) | conditional_block |
utils.py | #! /usr/bin/env python
import csv
import itertools
import numpy as np
import nltk
from nltk import word_tokenize
import time
import cPickle
import sys
import operator
import io
import array
from datetime import datetime
from gru_theano import GRUTheano
from keras.datasets.data_utils import get_file
from keras.preprocessing import sequence
SENTENCE_START_TOKEN = "SENTENCE_START"
SENTENCE_END_TOKEN = "SENTENCE_END"
UNKNOWN_TOKEN = "UNKNOWN_TOKEN"
def load_data(filename="data/reddit-comments-2015-08.csv", vocabulary_size=2000, min_sent_characters=0):
word_to_index = []
index_to_word = []
# Read the data and append SENTENCE_START and SENTENCE_END tokens
print("Reading CSV file...")
with open(filename, 'rt') as f:
reader = csv.reader(f, skipinitialspace=True)
reader.next()
# Split full comments into sentences
sentences = itertools.chain(*[nltk.sent_tokenize(x[0].decode("utf-8").lower()) for x in reader])
# Filter sentences
sentences = [s for s in sentences if len(s) >= min_sent_characters]
sentences = [s for s in sentences if "http" not in s]
# Append SENTENCE_START and SENTENCE_END
sentences = ["%s %s %s" % (SENTENCE_START_TOKEN, x, SENTENCE_END_TOKEN) for x in sentences]
print("Parsed %d sentences." % (len(sentences)))
# Tokenize the sentences into words
tokenized_sentences = [nltk.word_tokenize(sent) for sent in sentences]
# Count the word frequencies
word_freq = nltk.FreqDist(itertools.chain(*tokenized_sentences))
print("Found %d unique words tokens." % len(word_freq.items()))
# Get the most common words and build index_to_word and word_to_index vectors
vocab = sorted(word_freq.items(), key=lambda x: (x[1], x[0]), reverse=True)[:vocabulary_size-2]
print("Using vocabulary size %d." % vocabulary_size)
print("The least frequent word in our vocabulary is '%s' and appeared %d times." % (vocab[-1][0], vocab[-1][1]))
sorted_vocab = sorted(vocab, key=operator.itemgetter(1))
index_to_word = ["<MASK/>", UNKNOWN_TOKEN] + [x[0] for x in sorted_vocab]
word_to_index = dict([(w, i) for i, w in enumerate(index_to_word)])
# Replace all words not in our vocabulary with the unknown token
for i, sent in enumerate(tokenized_sentences):
tokenized_sentences[i] = [w if w in word_to_index else UNKNOWN_TOKEN for w in sent]
# Create the training data
X_train = np.asarray([[word_to_index[w] for w in sent[:-1]] for sent in tokenized_sentences])
y_train = np.asarray([[word_to_index[w] for w in sent[1:]] for sent in tokenized_sentences])
return X_train, y_train, word_to_index, index_to_word
def loadText(path, origin="", vocsize=1000, maxlen=25, training_type=1, verbose=True):
"""
type(path): string
path : path of text file to save to
origin : URL where text is
vocsize : vocabulary size
maxlen : max size of one sentence
Return:
x_train, y_train, vocabulary
eg: x,y,voc,i2w,w2i = loadData('pg11.txt', origin="http://www.gutenberg.org/cache/epub/11/pg11.txt")
"""
filesource = get_file(path, origin=origin)
text = open(filesource).read()
text = SENTENCE_START_TOKEN + text + SENTENCE_END_TOKEN
if verbose:
print('corpus length:', len(text))
tokens = word_tokenize(text)
word_freq = nltk.FreqDist(tokens)
if verbose:
print("Found %d unique words tokens." % len(word_freq.items()))
vocab = word_freq.most_common(vocsize-3)
indices_word = [x[0] for x in vocab]
indices_word.append(UNKNOWN_TOKEN)
indices_word.append(SENTENCE_START_TOKEN)
indices_word.append(SENTENCE_END_TOKEN)
word_indices = dict([(w,i) for i,w in enumerate(indices_word)])
for i, word in enumerate(tokens):
tokens[i] = [word if word in word_indices else UNKNOWN_TOKEN]
# now the whole text is indices of words in the vocabulary
for i, word in enumerate(tokens):
tokens[i] = word_indices[word[0]]
# Create the training data
xx = np.asarray(tokens[:-1], dtype=np.int32)
yy = np.asarray(tokens[1:], dtype=np.int32)
return xx, yy, vocab, word_indices, indices_word
def train_with_sgd(model, X_train, y_train, learning_rate=0.001, nepoch=40, startfrom = 0, decay=0.9,
callback_every=10000, callback=None):
for epoch in range(startfrom, nepoch):
num_examples_seen = 0
# For each training example...
for i in np.random.permutation(len(y_train)):
# One SGD step
model.sgd_step(X_train[i], y_train[i], learning_rate, decay)
num_examples_seen += 1
# Optionally do callback
if (callback and callback_every and num_examples_seen % callback_every == 0):
callback(model, epoch, num_examples_seen)
return model
def save_model_parameters_theano(model, outfile):
np.savez(outfile,
E=model.E.get_value(),
U=model.U.get_value(),
W=model.W.get_value(),
V=model.V.get_value(),
b=model.b.get_value(),
c=model.c.get_value())
print "Saved model parameters to %s." % outfile
def load_model_parameters_theano(path, modelClass=GRUTheano):
npzfile = np.load(path)
E, U, W, V, b, c = npzfile["E"], npzfile["U"], npzfile["W"], npzfile["V"], npzfile["b"], npzfile["c"]
hidden_dim, word_dim = E.shape[0], E.shape[1]
print "Building model from %s with word_dim=%d" % (path, word_dim)
sys.stdout.flush()
model = modelClass(word_dim, hidden_dim=hidden_dim)
model.E.set_value(E)
model.U.set_value(U)
model.W.set_value(W)
model.V.set_value(V)
model.b.set_value(b)
model.c.set_value(c)
return model
def gradient_check_theano(model, x, y, h=0.001, error_threshold=0.01):
# Overwrite the bptt attribute. We need to backpropagate all the way to get the correct gradient
model.bptt_truncate = 1000
# Calculate the gradients using backprop
bptt_gradients = model.bptt(x, y)
# List of all parameters we want to chec.
model_parameters = ['E', 'U', 'W', 'b', 'V', 'c']
# Gradient check for each parameter
for pidx, pname in enumerate(model_parameters):
# Get the actual parameter value from the mode, e.g. model.W
parameter_T = operator.attrgetter(pname)(model)
parameter = parameter_T.get_value()
print "Performing gradient check for parameter %s with size %d." % (pname, np.prod(parameter.shape))
# Iterate over each element of the parameter matrix, e.g. (0,0), (0,1), ...
it = np.nditer(parameter, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
# Save the original value so we can reset it later
original_value = parameter[ix]
# Estimate the gradient using (f(x+h) - f(x-h))/(2*h)
parameter[ix] = original_value + h
parameter_T.set_value(parameter)
gradplus = model.calculate_total_loss([x],[y])
parameter[ix] = original_value - h
parameter_T.set_value(parameter)
gradminus = model.calculate_total_loss([x],[y])
estimated_gradient = (gradplus - gradminus)/(2*h)
parameter[ix] = original_value
parameter_T.set_value(parameter)
# The gradient for this parameter calculated using backpropagation
backprop_gradient = bptt_gradients[pidx][ix]
# calculate The relative error: (|x - y|/(|x| + |y|))
relative_error = np.abs(backprop_gradient - estimated_gradient)/(np.abs(backprop_gradient) + np.abs(estimated_gradient))
# If the error is to large fail the gradient check
if relative_error > error_threshold:
print "Gradient Check ERROR: parameter=%s ix=%s" % (pname, ix)
print "+h Loss: %f" % gradplus
print "-h Loss: %f" % gradminus
print "Estimated_gradient: %f" % estimated_gradient
print "Backpropagation gradient: %f" % backprop_gradient
print "Relative Error: %f" % relative_error
return
it.iternext()
print "Gradient check for parameter %s passed." % (pname)
def print_sentence(s, index_to_word):
sentence_str = [index_to_word[x] for x in s[1:-1]]
print(" ".join(sentence_str))
sys.stdout.flush()
def generate_sentence(model, index_to_word, word_to_index, min_length=5):
# We start the sentence with the start token
new_sentence = [word_to_index[SENTENCE_START_TOKEN]]
# Repeat until we get an end token
while not new_sentence[-1] == word_to_index[SENTENCE_END_TOKEN]:
#print('not finished')
next_word_probs = model.predict(new_sentence)[-1]
if sum(next_word_probs) < 1.:
samples = np.random.multinomial(1, next_word_probs)
sampled_word = np.argmax(samples)
else:
sampled_word = word_to_index[UNKNOWN_TOKEN]
if sampled_word < len(index_to_word):
new_sentence.append(sampled_word)
else:
new_sentence.append(word_to_index[UNKNOWN_TOKEN])
# Seomtimes we get stuck if the sentence becomes too long, e.g. "........" :(
# And: We don't want sentences with UNKNOWN_TOKEN's
#print(new_sentence)
if len(new_sentence) > 50 or sampled_word == word_to_index[UNKNOWN_TOKEN]:
#return None
return new_sentence
if len(new_sentence) < min_length:
return None
return new_sentence
def generate_sentences(model, n, index_to_word, word_to_index):
for i in range(n):
sent = None
while not sent:
sent = generate_sentence(model, index_to_word, word_to_index)
print_sentence(sent, index_to_word)
def saveStuff(stuff, path=None):
| """
Saves stuff to disk as pickle object
:type stuff: any type
:param stuff: data to be stored
Return: create pickle file at path
"""
if path == None:
# TODO take name from something
output = open('results/i-will-be-overwritten.pkl', 'wb')
else:
output = open(path, 'wb')
# Pickle the list using the highest protocol available.
cPickle.dump(stuff, output, -1)
output.close() | identifier_body |
|
utils.py | #! /usr/bin/env python
import csv
import itertools
import numpy as np
import nltk
from nltk import word_tokenize
import time
import cPickle
import sys
import operator
import io
import array
from datetime import datetime
from gru_theano import GRUTheano
from keras.datasets.data_utils import get_file
from keras.preprocessing import sequence
SENTENCE_START_TOKEN = "SENTENCE_START"
SENTENCE_END_TOKEN = "SENTENCE_END"
UNKNOWN_TOKEN = "UNKNOWN_TOKEN"
def load_data(filename="data/reddit-comments-2015-08.csv", vocabulary_size=2000, min_sent_characters=0):
word_to_index = []
index_to_word = []
# Read the data and append SENTENCE_START and SENTENCE_END tokens
print("Reading CSV file...")
with open(filename, 'rt') as f:
reader = csv.reader(f, skipinitialspace=True)
reader.next()
# Split full comments into sentences
sentences = itertools.chain(*[nltk.sent_tokenize(x[0].decode("utf-8").lower()) for x in reader])
# Filter sentences
sentences = [s for s in sentences if len(s) >= min_sent_characters]
sentences = [s for s in sentences if "http" not in s]
# Append SENTENCE_START and SENTENCE_END
sentences = ["%s %s %s" % (SENTENCE_START_TOKEN, x, SENTENCE_END_TOKEN) for x in sentences]
print("Parsed %d sentences." % (len(sentences)))
# Tokenize the sentences into words
tokenized_sentences = [nltk.word_tokenize(sent) for sent in sentences]
# Count the word frequencies
word_freq = nltk.FreqDist(itertools.chain(*tokenized_sentences))
print("Found %d unique words tokens." % len(word_freq.items()))
# Get the most common words and build index_to_word and word_to_index vectors
vocab = sorted(word_freq.items(), key=lambda x: (x[1], x[0]), reverse=True)[:vocabulary_size-2]
print("Using vocabulary size %d." % vocabulary_size)
print("The least frequent word in our vocabulary is '%s' and appeared %d times." % (vocab[-1][0], vocab[-1][1]))
sorted_vocab = sorted(vocab, key=operator.itemgetter(1))
index_to_word = ["<MASK/>", UNKNOWN_TOKEN] + [x[0] for x in sorted_vocab]
word_to_index = dict([(w, i) for i, w in enumerate(index_to_word)])
# Replace all words not in our vocabulary with the unknown token
for i, sent in enumerate(tokenized_sentences):
tokenized_sentences[i] = [w if w in word_to_index else UNKNOWN_TOKEN for w in sent]
# Create the training data
X_train = np.asarray([[word_to_index[w] for w in sent[:-1]] for sent in tokenized_sentences])
y_train = np.asarray([[word_to_index[w] for w in sent[1:]] for sent in tokenized_sentences])
return X_train, y_train, word_to_index, index_to_word
def loadText(path, origin="", vocsize=1000, maxlen=25, training_type=1, verbose=True):
"""
type(path): string
path : path of text file to save to
origin : URL where text is
vocsize : vocabulary size
maxlen : max size of one sentence
Return:
x_train, y_train, vocabulary
eg: x,y,voc,i2w,w2i = loadData('pg11.txt', origin="http://www.gutenberg.org/cache/epub/11/pg11.txt")
"""
filesource = get_file(path, origin=origin)
text = open(filesource).read()
text = SENTENCE_START_TOKEN + text + SENTENCE_END_TOKEN
if verbose:
print('corpus length:', len(text))
tokens = word_tokenize(text)
word_freq = nltk.FreqDist(tokens)
if verbose:
print("Found %d unique words tokens." % len(word_freq.items()))
vocab = word_freq.most_common(vocsize-3)
indices_word = [x[0] for x in vocab]
indices_word.append(UNKNOWN_TOKEN)
indices_word.append(SENTENCE_START_TOKEN)
indices_word.append(SENTENCE_END_TOKEN)
word_indices = dict([(w,i) for i,w in enumerate(indices_word)])
for i, word in enumerate(tokens):
tokens[i] = [word if word in word_indices else UNKNOWN_TOKEN]
# now the whole text is indices of words in the vocabulary
for i, word in enumerate(tokens):
tokens[i] = word_indices[word[0]]
# Create the training data
xx = np.asarray(tokens[:-1], dtype=np.int32)
yy = np.asarray(tokens[1:], dtype=np.int32)
return xx, yy, vocab, word_indices, indices_word
def train_with_sgd(model, X_train, y_train, learning_rate=0.001, nepoch=40, startfrom = 0, decay=0.9,
callback_every=10000, callback=None):
for epoch in range(startfrom, nepoch):
num_examples_seen = 0
# For each training example...
for i in np.random.permutation(len(y_train)):
# One SGD step
model.sgd_step(X_train[i], y_train[i], learning_rate, decay)
num_examples_seen += 1
# Optionally do callback
if (callback and callback_every and num_examples_seen % callback_every == 0):
callback(model, epoch, num_examples_seen)
return model
def save_model_parameters_theano(model, outfile):
np.savez(outfile,
E=model.E.get_value(),
U=model.U.get_value(),
W=model.W.get_value(),
V=model.V.get_value(),
b=model.b.get_value(),
c=model.c.get_value())
print "Saved model parameters to %s." % outfile
def load_model_parameters_theano(path, modelClass=GRUTheano):
npzfile = np.load(path)
E, U, W, V, b, c = npzfile["E"], npzfile["U"], npzfile["W"], npzfile["V"], npzfile["b"], npzfile["c"]
hidden_dim, word_dim = E.shape[0], E.shape[1]
print "Building model from %s with word_dim=%d" % (path, word_dim)
sys.stdout.flush()
model = modelClass(word_dim, hidden_dim=hidden_dim)
model.E.set_value(E)
model.U.set_value(U)
model.W.set_value(W)
model.V.set_value(V)
model.b.set_value(b)
model.c.set_value(c)
return model
def gradient_check_theano(model, x, y, h=0.001, error_threshold=0.01):
# Overwrite the bptt attribute. We need to backpropagate all the way to get the correct gradient
model.bptt_truncate = 1000
# Calculate the gradients using backprop
bptt_gradients = model.bptt(x, y)
# List of all parameters we want to chec.
model_parameters = ['E', 'U', 'W', 'b', 'V', 'c']
# Gradient check for each parameter
for pidx, pname in enumerate(model_parameters):
# Get the actual parameter value from the mode, e.g. model.W
parameter_T = operator.attrgetter(pname)(model)
parameter = parameter_T.get_value()
print "Performing gradient check for parameter %s with size %d." % (pname, np.prod(parameter.shape))
# Iterate over each element of the parameter matrix, e.g. (0,0), (0,1), ...
it = np.nditer(parameter, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
# Save the original value so we can reset it later
original_value = parameter[ix]
# Estimate the gradient using (f(x+h) - f(x-h))/(2*h)
parameter[ix] = original_value + h
parameter_T.set_value(parameter)
gradplus = model.calculate_total_loss([x],[y])
parameter[ix] = original_value - h
parameter_T.set_value(parameter)
gradminus = model.calculate_total_loss([x],[y])
estimated_gradient = (gradplus - gradminus)/(2*h)
parameter[ix] = original_value
parameter_T.set_value(parameter)
# The gradient for this parameter calculated using backpropagation
backprop_gradient = bptt_gradients[pidx][ix]
# calculate The relative error: (|x - y|/(|x| + |y|))
relative_error = np.abs(backprop_gradient - estimated_gradient)/(np.abs(backprop_gradient) + np.abs(estimated_gradient))
# If the error is to large fail the gradient check
if relative_error > error_threshold:
print "Gradient Check ERROR: parameter=%s ix=%s" % (pname, ix)
print "+h Loss: %f" % gradplus
print "-h Loss: %f" % gradminus
print "Estimated_gradient: %f" % estimated_gradient
print "Backpropagation gradient: %f" % backprop_gradient
print "Relative Error: %f" % relative_error
return
it.iternext()
print "Gradient check for parameter %s passed." % (pname)
def | (s, index_to_word):
sentence_str = [index_to_word[x] for x in s[1:-1]]
print(" ".join(sentence_str))
sys.stdout.flush()
def generate_sentence(model, index_to_word, word_to_index, min_length=5):
# We start the sentence with the start token
new_sentence = [word_to_index[SENTENCE_START_TOKEN]]
# Repeat until we get an end token
while not new_sentence[-1] == word_to_index[SENTENCE_END_TOKEN]:
#print('not finished')
next_word_probs = model.predict(new_sentence)[-1]
if sum(next_word_probs) < 1.:
samples = np.random.multinomial(1, next_word_probs)
sampled_word = np.argmax(samples)
else:
sampled_word = word_to_index[UNKNOWN_TOKEN]
if sampled_word < len(index_to_word):
new_sentence.append(sampled_word)
else:
new_sentence.append(word_to_index[UNKNOWN_TOKEN])
# Seomtimes we get stuck if the sentence becomes too long, e.g. "........" :(
# And: We don't want sentences with UNKNOWN_TOKEN's
#print(new_sentence)
if len(new_sentence) > 50 or sampled_word == word_to_index[UNKNOWN_TOKEN]:
#return None
return new_sentence
if len(new_sentence) < min_length:
return None
return new_sentence
def generate_sentences(model, n, index_to_word, word_to_index):
for i in range(n):
sent = None
while not sent:
sent = generate_sentence(model, index_to_word, word_to_index)
print_sentence(sent, index_to_word)
def saveStuff(stuff, path=None):
"""
Saves stuff to disk as pickle object
:type stuff: any type
:param stuff: data to be stored
Return: create pickle file at path
"""
if path == None:
# TODO take name from something
output = open('results/i-will-be-overwritten.pkl', 'wb')
else:
output = open(path, 'wb')
# Pickle the list using the highest protocol available.
cPickle.dump(stuff, output, -1)
output.close()
| print_sentence | identifier_name |
utils.py | #! /usr/bin/env python
import csv
import itertools
import numpy as np
import nltk
from nltk import word_tokenize
import time
import cPickle
import sys
import operator
import io
import array
from datetime import datetime
from gru_theano import GRUTheano
from keras.datasets.data_utils import get_file
from keras.preprocessing import sequence
SENTENCE_START_TOKEN = "SENTENCE_START"
SENTENCE_END_TOKEN = "SENTENCE_END"
UNKNOWN_TOKEN = "UNKNOWN_TOKEN"
def load_data(filename="data/reddit-comments-2015-08.csv", vocabulary_size=2000, min_sent_characters=0):
word_to_index = []
index_to_word = []
# Read the data and append SENTENCE_START and SENTENCE_END tokens
print("Reading CSV file...")
with open(filename, 'rt') as f:
reader = csv.reader(f, skipinitialspace=True)
reader.next()
# Split full comments into sentences
sentences = itertools.chain(*[nltk.sent_tokenize(x[0].decode("utf-8").lower()) for x in reader])
# Filter sentences
sentences = [s for s in sentences if len(s) >= min_sent_characters]
sentences = [s for s in sentences if "http" not in s]
# Append SENTENCE_START and SENTENCE_END
sentences = ["%s %s %s" % (SENTENCE_START_TOKEN, x, SENTENCE_END_TOKEN) for x in sentences]
print("Parsed %d sentences." % (len(sentences)))
# Tokenize the sentences into words
tokenized_sentences = [nltk.word_tokenize(sent) for sent in sentences]
# Count the word frequencies
word_freq = nltk.FreqDist(itertools.chain(*tokenized_sentences))
print("Found %d unique words tokens." % len(word_freq.items()))
# Get the most common words and build index_to_word and word_to_index vectors
vocab = sorted(word_freq.items(), key=lambda x: (x[1], x[0]), reverse=True)[:vocabulary_size-2]
print("Using vocabulary size %d." % vocabulary_size)
print("The least frequent word in our vocabulary is '%s' and appeared %d times." % (vocab[-1][0], vocab[-1][1]))
sorted_vocab = sorted(vocab, key=operator.itemgetter(1))
index_to_word = ["<MASK/>", UNKNOWN_TOKEN] + [x[0] for x in sorted_vocab]
word_to_index = dict([(w, i) for i, w in enumerate(index_to_word)])
# Replace all words not in our vocabulary with the unknown token
for i, sent in enumerate(tokenized_sentences):
tokenized_sentences[i] = [w if w in word_to_index else UNKNOWN_TOKEN for w in sent]
# Create the training data
X_train = np.asarray([[word_to_index[w] for w in sent[:-1]] for sent in tokenized_sentences])
y_train = np.asarray([[word_to_index[w] for w in sent[1:]] for sent in tokenized_sentences])
return X_train, y_train, word_to_index, index_to_word
def loadText(path, origin="", vocsize=1000, maxlen=25, training_type=1, verbose=True):
"""
type(path): string
path : path of text file to save to
origin : URL where text is
vocsize : vocabulary size
maxlen : max size of one sentence
Return:
x_train, y_train, vocabulary
eg: x,y,voc,i2w,w2i = loadData('pg11.txt', origin="http://www.gutenberg.org/cache/epub/11/pg11.txt")
"""
filesource = get_file(path, origin=origin)
text = open(filesource).read()
text = SENTENCE_START_TOKEN + text + SENTENCE_END_TOKEN
if verbose:
print('corpus length:', len(text))
tokens = word_tokenize(text)
word_freq = nltk.FreqDist(tokens)
if verbose:
print("Found %d unique words tokens." % len(word_freq.items()))
vocab = word_freq.most_common(vocsize-3)
indices_word = [x[0] for x in vocab]
indices_word.append(UNKNOWN_TOKEN)
indices_word.append(SENTENCE_START_TOKEN)
indices_word.append(SENTENCE_END_TOKEN)
word_indices = dict([(w,i) for i,w in enumerate(indices_word)])
for i, word in enumerate(tokens):
tokens[i] = [word if word in word_indices else UNKNOWN_TOKEN]
# now the whole text is indices of words in the vocabulary
for i, word in enumerate(tokens):
tokens[i] = word_indices[word[0]] |
return xx, yy, vocab, word_indices, indices_word
def train_with_sgd(model, X_train, y_train, learning_rate=0.001, nepoch=40, startfrom = 0, decay=0.9,
callback_every=10000, callback=None):
for epoch in range(startfrom, nepoch):
num_examples_seen = 0
# For each training example...
for i in np.random.permutation(len(y_train)):
# One SGD step
model.sgd_step(X_train[i], y_train[i], learning_rate, decay)
num_examples_seen += 1
# Optionally do callback
if (callback and callback_every and num_examples_seen % callback_every == 0):
callback(model, epoch, num_examples_seen)
return model
def save_model_parameters_theano(model, outfile):
np.savez(outfile,
E=model.E.get_value(),
U=model.U.get_value(),
W=model.W.get_value(),
V=model.V.get_value(),
b=model.b.get_value(),
c=model.c.get_value())
print "Saved model parameters to %s." % outfile
def load_model_parameters_theano(path, modelClass=GRUTheano):
npzfile = np.load(path)
E, U, W, V, b, c = npzfile["E"], npzfile["U"], npzfile["W"], npzfile["V"], npzfile["b"], npzfile["c"]
hidden_dim, word_dim = E.shape[0], E.shape[1]
print "Building model from %s with word_dim=%d" % (path, word_dim)
sys.stdout.flush()
model = modelClass(word_dim, hidden_dim=hidden_dim)
model.E.set_value(E)
model.U.set_value(U)
model.W.set_value(W)
model.V.set_value(V)
model.b.set_value(b)
model.c.set_value(c)
return model
def gradient_check_theano(model, x, y, h=0.001, error_threshold=0.01):
# Overwrite the bptt attribute. We need to backpropagate all the way to get the correct gradient
model.bptt_truncate = 1000
# Calculate the gradients using backprop
bptt_gradients = model.bptt(x, y)
# List of all parameters we want to chec.
model_parameters = ['E', 'U', 'W', 'b', 'V', 'c']
# Gradient check for each parameter
for pidx, pname in enumerate(model_parameters):
# Get the actual parameter value from the mode, e.g. model.W
parameter_T = operator.attrgetter(pname)(model)
parameter = parameter_T.get_value()
print "Performing gradient check for parameter %s with size %d." % (pname, np.prod(parameter.shape))
# Iterate over each element of the parameter matrix, e.g. (0,0), (0,1), ...
it = np.nditer(parameter, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
# Save the original value so we can reset it later
original_value = parameter[ix]
# Estimate the gradient using (f(x+h) - f(x-h))/(2*h)
parameter[ix] = original_value + h
parameter_T.set_value(parameter)
gradplus = model.calculate_total_loss([x],[y])
parameter[ix] = original_value - h
parameter_T.set_value(parameter)
gradminus = model.calculate_total_loss([x],[y])
estimated_gradient = (gradplus - gradminus)/(2*h)
parameter[ix] = original_value
parameter_T.set_value(parameter)
# The gradient for this parameter calculated using backpropagation
backprop_gradient = bptt_gradients[pidx][ix]
# calculate The relative error: (|x - y|/(|x| + |y|))
relative_error = np.abs(backprop_gradient - estimated_gradient)/(np.abs(backprop_gradient) + np.abs(estimated_gradient))
# If the error is to large fail the gradient check
if relative_error > error_threshold:
print "Gradient Check ERROR: parameter=%s ix=%s" % (pname, ix)
print "+h Loss: %f" % gradplus
print "-h Loss: %f" % gradminus
print "Estimated_gradient: %f" % estimated_gradient
print "Backpropagation gradient: %f" % backprop_gradient
print "Relative Error: %f" % relative_error
return
it.iternext()
print "Gradient check for parameter %s passed." % (pname)
def print_sentence(s, index_to_word):
sentence_str = [index_to_word[x] for x in s[1:-1]]
print(" ".join(sentence_str))
sys.stdout.flush()
def generate_sentence(model, index_to_word, word_to_index, min_length=5):
# We start the sentence with the start token
new_sentence = [word_to_index[SENTENCE_START_TOKEN]]
# Repeat until we get an end token
while not new_sentence[-1] == word_to_index[SENTENCE_END_TOKEN]:
#print('not finished')
next_word_probs = model.predict(new_sentence)[-1]
if sum(next_word_probs) < 1.:
samples = np.random.multinomial(1, next_word_probs)
sampled_word = np.argmax(samples)
else:
sampled_word = word_to_index[UNKNOWN_TOKEN]
if sampled_word < len(index_to_word):
new_sentence.append(sampled_word)
else:
new_sentence.append(word_to_index[UNKNOWN_TOKEN])
# Seomtimes we get stuck if the sentence becomes too long, e.g. "........" :(
# And: We don't want sentences with UNKNOWN_TOKEN's
#print(new_sentence)
if len(new_sentence) > 50 or sampled_word == word_to_index[UNKNOWN_TOKEN]:
#return None
return new_sentence
if len(new_sentence) < min_length:
return None
return new_sentence
def generate_sentences(model, n, index_to_word, word_to_index):
for i in range(n):
sent = None
while not sent:
sent = generate_sentence(model, index_to_word, word_to_index)
print_sentence(sent, index_to_word)
def saveStuff(stuff, path=None):
"""
Saves stuff to disk as pickle object
:type stuff: any type
:param stuff: data to be stored
Return: create pickle file at path
"""
if path == None:
# TODO take name from something
output = open('results/i-will-be-overwritten.pkl', 'wb')
else:
output = open(path, 'wb')
# Pickle the list using the highest protocol available.
cPickle.dump(stuff, output, -1)
output.close() |
# Create the training data
xx = np.asarray(tokens[:-1], dtype=np.int32)
yy = np.asarray(tokens[1:], dtype=np.int32) | random_line_split |
Surreal.py | # ----------------------------------------------------------------------------------------------------------------------
#
# Class handling Surreal Dataset (training and testing)
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Hugues THOMAS - 11/06/2018
# Nicolas DONATI - 01/01/2020
# ----------------------------------------------------------------------------------------------------------------------
#
# Imports and global variables
# \**********************************/
#
# Basic libs
import tensorflow as tf
import numpy as np
# Dataset parent class
from datasets.common import Dataset
import cpp_wrappers.cpp_subsampling.grid_subsampling as cpp_subsampling
# ----------------------------------------------------------------------------------------------------------------------
#
# Utility functions
# \***********************/
#
def grid_subsampling(points, features=None, labels=None, sampleDl=0.1, verbose=0):
"""
CPP wrapper for a grid subsampling (method = barycenter for points and features
:param points: (N, 3) matrix of input points
:param features: optional (N, d) matrix of features (floating number)
:param labels: optional (N,) matrix of integer labels
:param sampleDl: parameter defining the size of grid voxels
:param verbose: 1 to display
:return: subsampled points, with features and/or labels depending of the input
"""
if (features is None) and (labels is None):
return cpp_subsampling.compute(points, sampleDl=sampleDl, verbose=verbose)
elif (labels is None):
return cpp_subsampling.compute(points, features=features, sampleDl=sampleDl, verbose=verbose)
elif (features is None):
return cpp_subsampling.compute(points, classes=labels, sampleDl=sampleDl, verbose=verbose)
else:
return cpp_subsampling.compute(points, features=features, classes=labels, sampleDl=sampleDl, verbose=verbose)
# ----------------------------------------------------------------------------------------------------------------------
#
# Class Definition
# \***************/
#
class SurrealDataset(Dataset):
"""
Class to handle any subset of 5000 shapes of the surreal dataset introduced in 3D coded (for comparison in exp2)
this dataset is composed of 6890-points shapes, so the spectral data is relatively heavy.
"""
# Initiation methods
# ------------------------------------------------------------------------------------------------------------------
def __init__(self, config):
Dataset.__init__(self, 'surreal')
####################
# Dataset parameters
####################
# Type of task conducted on this dataset
# self.network_model = 'shape_matching' # this is the only type of model here but it comes from KPConc code
##########################
# Parameters for the files
##########################
# Path of the folder containing files
self.dataset_name = 'surreal'
self.path = '../../../media/donati/Data1/Datasets/shapes_surreal/'
self.data_folder = 'off_2/'
self.spectral_folder = 'spectral_full/'
self.txt_file = 'surreal5000_training.txt'
####################################################
####################################################
####################################################
# decide the number of shapes to keep in the training set (exp 2 setting)
self.split = config.split
self.num_train = config.num_train # -1 for all
# Number of eigenvalues kept for this model fmaps
self.neig = config.neig
self.neig_full = config.neig_full
# Number of thread for input pipeline
self.num_threads = config.input_threads
# Utility methods
# ------------------------------------------------------------------------------------------------------------------
def get_batch_gen(self, config):
"""
A function defining the batch generator for each split. Should return the generator, the generated types and
generated shapes
:param split: string in "training", "validation" or "test" (here we just keep training)
:param config: configuration file
:return: gen_func, gen_types, gen_shapes
"""
################
# Def generators
################
def random_balanced_gen():
print('trying to generate batch series with ', self.num_train, 'shapes')
# Initiate concatenation lists
tp_list = [] # points
tev_list = [] # eigen vectors
tevt_list = [] # transposed eigen vectors
tv_list = [] # eigen values
tevf_list = [] # full eigen vectors for ground truth maps
ti_list = [] # cloud indices
batch_n = 0
i_batch = 0
gen_indices = np.random.permutation(int(self.num_train)) # initiate indices for the generator
# if we had to test on this dataset we would need to introduce a test/val case with non-shuffled indices
# print(gen_indices.shape, config.batch_num)
# if config.split == 'test':
# print('test setting here not fully supported')
# n_shapes = self.num_test # has to be defined
# gen_indices = []
# for i in range(n_shapes - 1):
# for j in range(i + 1, n_shapes):
# gen_indices += [i, j] # put all the pairs in order
# gen_indices = np.array(gen_indices)
# Generator loop
for p_i in gen_indices:
# Get points and other input data
new_points = self.input_points[p_i]
new_evecs = self.input_evecs[p_i][:, :self.neig]
new_evecs_trans = self.input_evecs_trans[p_i][:self.neig, :]
new_evals = self.input_evals[p_i][:self.neig]
new_evecs_full = self.input_evecs_full[p_i][:, :self.neig]
n = new_points.shape[0]
if i_batch == config.batch_num:
yield (np.concatenate(tp_list, axis=0),
np.concatenate(tev_list, axis=0),
np.concatenate(tevt_list, axis=1),
np.concatenate(tv_list, axis=1),
np.concatenate(tevf_list, axis=0),
np.array(ti_list, dtype=np.int32),
np.array([tp.shape[0] for tp in tp_list]))
tp_list = []
tev_list = []
tevt_list = []
tv_list = []
tevf_list = []
ti_list = []
batch_n = 0
i_batch = 0
# Add data to current batch
tp_list += [new_points]
tev_list += [new_evecs]
tevt_list += [new_evecs_trans]
tv_list += [new_evals]
tevf_list += [new_evecs_full]
ti_list += [p_i]
# Update batch size
batch_n += n
i_batch += 1
# yield the rest if necessary (it will not be a full batch and could lead to mistakes because of
# shape matching needing pairs !!!!)
yield (np.concatenate(tp_list, axis=0),
np.concatenate(tev_list, axis=0),
np.concatenate(tevt_list, axis=1),
np.concatenate(tv_list, axis=1),
np.concatenate(tevf_list, axis=0),
np.array(ti_list, dtype=np.int32),
np.array([tp.shape[0] for tp in tp_list]))
##################
# Return generator
##################
# Generator types and shapes
gen_types = (tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.int32, tf.int32)
gen_shapes = ([None, 3], [None, self.neig],
[self.neig, None], [self.neig, None], [None, self.neig], [None], [None])
return random_balanced_gen, gen_types, gen_shapes
def get_tf_mapping(self, config):
def | (stacked_points, stacked_evecs, stacked_evecs_trans,
stacked_evals, stacked_evecs_full, obj_inds, stack_lengths):
"""
From the input point cloud, this function compute all the point clouds at each conv layer, the neighbors
indices, the pooling indices and other useful variables.
:param stacked_points: Tensor with size [None, 3] where None is the total number of points
:param stack_lengths: Tensor with size [None] where None = number of batch // number of points in a batch
"""
# Get batch indice for each point
batch_inds = self.tf_get_batch_inds(stack_lengths)
# Augment input points
stacked_points, scales, rots = self.tf_augment_input(stacked_points,
batch_inds,
config)
# First add a column of 1 as feature for the network to be able to learn 3D shapes
stacked_features = tf.ones((tf.shape(stacked_points)[0], 1), dtype=tf.float32)
# Then use positions or not
if config.in_features_dim == 1:
pass
elif config.in_features_dim == 3:
stacked_features = tf.concat((stacked_features, stacked_points), axis=1)
else:
raise ValueError('Only accepted input dimensions are 1, 3 (with or without XYZ)')
# Get the whole input list
input_list = self.tf_shape_matching_inputs(config,
stacked_points,
stacked_features,
stack_lengths,
batch_inds)
# Add scale and rotation for testing
input_list += [scales, rots, obj_inds]
input_list += [stack_lengths] # in order further on to multiply element-wise in the stack
input_list += [stacked_evecs, stacked_evecs_trans, stacked_evals]
input_list += [stacked_evecs_full]
return input_list
return tf_map
| tf_map | identifier_name |
Surreal.py | # ----------------------------------------------------------------------------------------------------------------------
#
# Class handling Surreal Dataset (training and testing)
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Hugues THOMAS - 11/06/2018
# Nicolas DONATI - 01/01/2020
# ----------------------------------------------------------------------------------------------------------------------
#
# Imports and global variables
# \**********************************/
#
# Basic libs
import tensorflow as tf
import numpy as np
# Dataset parent class
from datasets.common import Dataset
import cpp_wrappers.cpp_subsampling.grid_subsampling as cpp_subsampling
# ----------------------------------------------------------------------------------------------------------------------
#
# Utility functions
# \***********************/
#
def grid_subsampling(points, features=None, labels=None, sampleDl=0.1, verbose=0):
"""
CPP wrapper for a grid subsampling (method = barycenter for points and features
:param points: (N, 3) matrix of input points
:param features: optional (N, d) matrix of features (floating number)
:param labels: optional (N,) matrix of integer labels
:param sampleDl: parameter defining the size of grid voxels
:param verbose: 1 to display
:return: subsampled points, with features and/or labels depending of the input
"""
if (features is None) and (labels is None):
return cpp_subsampling.compute(points, sampleDl=sampleDl, verbose=verbose)
elif (labels is None):
return cpp_subsampling.compute(points, features=features, sampleDl=sampleDl, verbose=verbose)
elif (features is None):
return cpp_subsampling.compute(points, classes=labels, sampleDl=sampleDl, verbose=verbose)
else:
return cpp_subsampling.compute(points, features=features, classes=labels, sampleDl=sampleDl, verbose=verbose)
# ----------------------------------------------------------------------------------------------------------------------
#
# Class Definition
# \***************/
#
class SurrealDataset(Dataset):
"""
Class to handle any subset of 5000 shapes of the surreal dataset introduced in 3D coded (for comparison in exp2)
this dataset is composed of 6890-points shapes, so the spectral data is relatively heavy.
"""
# Initiation methods
# ------------------------------------------------------------------------------------------------------------------
def __init__(self, config):
Dataset.__init__(self, 'surreal')
####################
# Dataset parameters
####################
# Type of task conducted on this dataset
# self.network_model = 'shape_matching' # this is the only type of model here but it comes from KPConc code
##########################
# Parameters for the files
##########################
# Path of the folder containing files
self.dataset_name = 'surreal'
self.path = '../../../media/donati/Data1/Datasets/shapes_surreal/'
self.data_folder = 'off_2/'
self.spectral_folder = 'spectral_full/'
self.txt_file = 'surreal5000_training.txt'
####################################################
####################################################
####################################################
# decide the number of shapes to keep in the training set (exp 2 setting)
self.split = config.split
self.num_train = config.num_train # -1 for all
# Number of eigenvalues kept for this model fmaps
self.neig = config.neig
self.neig_full = config.neig_full
# Number of thread for input pipeline
self.num_threads = config.input_threads
# Utility methods
# ------------------------------------------------------------------------------------------------------------------
def get_batch_gen(self, config):
"""
A function defining the batch generator for each split. Should return the generator, the generated types and
generated shapes
:param split: string in "training", "validation" or "test" (here we just keep training)
:param config: configuration file
:return: gen_func, gen_types, gen_shapes
"""
################
# Def generators
################
def random_balanced_gen():
print('trying to generate batch series with ', self.num_train, 'shapes')
# Initiate concatenation lists
tp_list = [] # points
tev_list = [] # eigen vectors
tevt_list = [] # transposed eigen vectors
tv_list = [] # eigen values
tevf_list = [] # full eigen vectors for ground truth maps
ti_list = [] # cloud indices
batch_n = 0
i_batch = 0
gen_indices = np.random.permutation(int(self.num_train)) # initiate indices for the generator
# if we had to test on this dataset we would need to introduce a test/val case with non-shuffled indices
# print(gen_indices.shape, config.batch_num)
# if config.split == 'test':
# print('test setting here not fully supported')
# n_shapes = self.num_test # has to be defined
# gen_indices = []
# for i in range(n_shapes - 1):
# for j in range(i + 1, n_shapes):
# gen_indices += [i, j] # put all the pairs in order
# gen_indices = np.array(gen_indices)
# Generator loop
for p_i in gen_indices:
# Get points and other input data
new_points = self.input_points[p_i]
new_evecs = self.input_evecs[p_i][:, :self.neig]
new_evecs_trans = self.input_evecs_trans[p_i][:self.neig, :]
new_evals = self.input_evals[p_i][:self.neig]
new_evecs_full = self.input_evecs_full[p_i][:, :self.neig]
n = new_points.shape[0]
if i_batch == config.batch_num:
yield (np.concatenate(tp_list, axis=0),
np.concatenate(tev_list, axis=0),
np.concatenate(tevt_list, axis=1),
np.concatenate(tv_list, axis=1),
np.concatenate(tevf_list, axis=0),
np.array(ti_list, dtype=np.int32),
np.array([tp.shape[0] for tp in tp_list]))
tp_list = []
tev_list = []
tevt_list = []
tv_list = []
tevf_list = []
ti_list = []
batch_n = 0
i_batch = 0
# Add data to current batch
tp_list += [new_points]
tev_list += [new_evecs]
tevt_list += [new_evecs_trans]
tv_list += [new_evals]
tevf_list += [new_evecs_full]
ti_list += [p_i]
# Update batch size
batch_n += n
i_batch += 1
# yield the rest if necessary (it will not be a full batch and could lead to mistakes because of
# shape matching needing pairs !!!!)
yield (np.concatenate(tp_list, axis=0),
np.concatenate(tev_list, axis=0),
np.concatenate(tevt_list, axis=1),
np.concatenate(tv_list, axis=1),
np.concatenate(tevf_list, axis=0),
np.array(ti_list, dtype=np.int32),
np.array([tp.shape[0] for tp in tp_list]))
##################
# Return generator
##################
# Generator types and shapes
gen_types = (tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.int32, tf.int32)
gen_shapes = ([None, 3], [None, self.neig],
[self.neig, None], [self.neig, None], [None, self.neig], [None], [None])
return random_balanced_gen, gen_types, gen_shapes
def get_tf_mapping(self, config):
def tf_map(stacked_points, stacked_evecs, stacked_evecs_trans,
stacked_evals, stacked_evecs_full, obj_inds, stack_lengths):
|
return tf_map
| """
From the input point cloud, this function compute all the point clouds at each conv layer, the neighbors
indices, the pooling indices and other useful variables.
:param stacked_points: Tensor with size [None, 3] where None is the total number of points
:param stack_lengths: Tensor with size [None] where None = number of batch // number of points in a batch
"""
# Get batch indice for each point
batch_inds = self.tf_get_batch_inds(stack_lengths)
# Augment input points
stacked_points, scales, rots = self.tf_augment_input(stacked_points,
batch_inds,
config)
# First add a column of 1 as feature for the network to be able to learn 3D shapes
stacked_features = tf.ones((tf.shape(stacked_points)[0], 1), dtype=tf.float32)
# Then use positions or not
if config.in_features_dim == 1:
pass
elif config.in_features_dim == 3:
stacked_features = tf.concat((stacked_features, stacked_points), axis=1)
else:
raise ValueError('Only accepted input dimensions are 1, 3 (with or without XYZ)')
# Get the whole input list
input_list = self.tf_shape_matching_inputs(config,
stacked_points,
stacked_features,
stack_lengths,
batch_inds)
# Add scale and rotation for testing
input_list += [scales, rots, obj_inds]
input_list += [stack_lengths] # in order further on to multiply element-wise in the stack
input_list += [stacked_evecs, stacked_evecs_trans, stacked_evals]
input_list += [stacked_evecs_full]
return input_list | identifier_body |
Surreal.py | # ----------------------------------------------------------------------------------------------------------------------
#
# Class handling Surreal Dataset (training and testing)
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Hugues THOMAS - 11/06/2018
# Nicolas DONATI - 01/01/2020
# ----------------------------------------------------------------------------------------------------------------------
#
# Imports and global variables
# \**********************************/
#
# Basic libs
import tensorflow as tf
import numpy as np
# Dataset parent class
from datasets.common import Dataset
import cpp_wrappers.cpp_subsampling.grid_subsampling as cpp_subsampling
# ----------------------------------------------------------------------------------------------------------------------
#
# Utility functions
# \***********************/
#
def grid_subsampling(points, features=None, labels=None, sampleDl=0.1, verbose=0):
"""
CPP wrapper for a grid subsampling (method = barycenter for points and features
:param points: (N, 3) matrix of input points
:param features: optional (N, d) matrix of features (floating number)
:param labels: optional (N,) matrix of integer labels
:param sampleDl: parameter defining the size of grid voxels
:param verbose: 1 to display
:return: subsampled points, with features and/or labels depending of the input
"""
if (features is None) and (labels is None):
return cpp_subsampling.compute(points, sampleDl=sampleDl, verbose=verbose)
elif (labels is None):
return cpp_subsampling.compute(points, features=features, sampleDl=sampleDl, verbose=verbose)
elif (features is None):
|
else:
return cpp_subsampling.compute(points, features=features, classes=labels, sampleDl=sampleDl, verbose=verbose)
# ----------------------------------------------------------------------------------------------------------------------
#
# Class Definition
# \***************/
#
class SurrealDataset(Dataset):
"""
Class to handle any subset of 5000 shapes of the surreal dataset introduced in 3D coded (for comparison in exp2)
this dataset is composed of 6890-points shapes, so the spectral data is relatively heavy.
"""
# Initiation methods
# ------------------------------------------------------------------------------------------------------------------
def __init__(self, config):
Dataset.__init__(self, 'surreal')
####################
# Dataset parameters
####################
# Type of task conducted on this dataset
# self.network_model = 'shape_matching' # this is the only type of model here but it comes from KPConc code
##########################
# Parameters for the files
##########################
# Path of the folder containing files
self.dataset_name = 'surreal'
self.path = '../../../media/donati/Data1/Datasets/shapes_surreal/'
self.data_folder = 'off_2/'
self.spectral_folder = 'spectral_full/'
self.txt_file = 'surreal5000_training.txt'
####################################################
####################################################
####################################################
# decide the number of shapes to keep in the training set (exp 2 setting)
self.split = config.split
self.num_train = config.num_train # -1 for all
# Number of eigenvalues kept for this model fmaps
self.neig = config.neig
self.neig_full = config.neig_full
# Number of thread for input pipeline
self.num_threads = config.input_threads
# Utility methods
# ------------------------------------------------------------------------------------------------------------------
def get_batch_gen(self, config):
"""
A function defining the batch generator for each split. Should return the generator, the generated types and
generated shapes
:param split: string in "training", "validation" or "test" (here we just keep training)
:param config: configuration file
:return: gen_func, gen_types, gen_shapes
"""
################
# Def generators
################
def random_balanced_gen():
print('trying to generate batch series with ', self.num_train, 'shapes')
# Initiate concatenation lists
tp_list = [] # points
tev_list = [] # eigen vectors
tevt_list = [] # transposed eigen vectors
tv_list = [] # eigen values
tevf_list = [] # full eigen vectors for ground truth maps
ti_list = [] # cloud indices
batch_n = 0
i_batch = 0
gen_indices = np.random.permutation(int(self.num_train)) # initiate indices for the generator
# if we had to test on this dataset we would need to introduce a test/val case with non-shuffled indices
# print(gen_indices.shape, config.batch_num)
# if config.split == 'test':
# print('test setting here not fully supported')
# n_shapes = self.num_test # has to be defined
# gen_indices = []
# for i in range(n_shapes - 1):
# for j in range(i + 1, n_shapes):
# gen_indices += [i, j] # put all the pairs in order
# gen_indices = np.array(gen_indices)
# Generator loop
for p_i in gen_indices:
# Get points and other input data
new_points = self.input_points[p_i]
new_evecs = self.input_evecs[p_i][:, :self.neig]
new_evecs_trans = self.input_evecs_trans[p_i][:self.neig, :]
new_evals = self.input_evals[p_i][:self.neig]
new_evecs_full = self.input_evecs_full[p_i][:, :self.neig]
n = new_points.shape[0]
if i_batch == config.batch_num:
yield (np.concatenate(tp_list, axis=0),
np.concatenate(tev_list, axis=0),
np.concatenate(tevt_list, axis=1),
np.concatenate(tv_list, axis=1),
np.concatenate(tevf_list, axis=0),
np.array(ti_list, dtype=np.int32),
np.array([tp.shape[0] for tp in tp_list]))
tp_list = []
tev_list = []
tevt_list = []
tv_list = []
tevf_list = []
ti_list = []
batch_n = 0
i_batch = 0
# Add data to current batch
tp_list += [new_points]
tev_list += [new_evecs]
tevt_list += [new_evecs_trans]
tv_list += [new_evals]
tevf_list += [new_evecs_full]
ti_list += [p_i]
# Update batch size
batch_n += n
i_batch += 1
# yield the rest if necessary (it will not be a full batch and could lead to mistakes because of
# shape matching needing pairs !!!!)
yield (np.concatenate(tp_list, axis=0),
np.concatenate(tev_list, axis=0),
np.concatenate(tevt_list, axis=1),
np.concatenate(tv_list, axis=1),
np.concatenate(tevf_list, axis=0),
np.array(ti_list, dtype=np.int32),
np.array([tp.shape[0] for tp in tp_list]))
##################
# Return generator
##################
# Generator types and shapes
gen_types = (tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.int32, tf.int32)
gen_shapes = ([None, 3], [None, self.neig],
[self.neig, None], [self.neig, None], [None, self.neig], [None], [None])
return random_balanced_gen, gen_types, gen_shapes
def get_tf_mapping(self, config):
def tf_map(stacked_points, stacked_evecs, stacked_evecs_trans,
stacked_evals, stacked_evecs_full, obj_inds, stack_lengths):
"""
From the input point cloud, this function compute all the point clouds at each conv layer, the neighbors
indices, the pooling indices and other useful variables.
:param stacked_points: Tensor with size [None, 3] where None is the total number of points
:param stack_lengths: Tensor with size [None] where None = number of batch // number of points in a batch
"""
# Get batch indice for each point
batch_inds = self.tf_get_batch_inds(stack_lengths)
# Augment input points
stacked_points, scales, rots = self.tf_augment_input(stacked_points,
batch_inds,
config)
# First add a column of 1 as feature for the network to be able to learn 3D shapes
stacked_features = tf.ones((tf.shape(stacked_points)[0], 1), dtype=tf.float32)
# Then use positions or not
if config.in_features_dim == 1:
pass
elif config.in_features_dim == 3:
stacked_features = tf.concat((stacked_features, stacked_points), axis=1)
else:
raise ValueError('Only accepted input dimensions are 1, 3 (with or without XYZ)')
# Get the whole input list
input_list = self.tf_shape_matching_inputs(config,
stacked_points,
stacked_features,
stack_lengths,
batch_inds)
# Add scale and rotation for testing
input_list += [scales, rots, obj_inds]
input_list += [stack_lengths] # in order further on to multiply element-wise in the stack
input_list += [stacked_evecs, stacked_evecs_trans, stacked_evals]
input_list += [stacked_evecs_full]
return input_list
return tf_map
| return cpp_subsampling.compute(points, classes=labels, sampleDl=sampleDl, verbose=verbose) | conditional_block |
Surreal.py | # ----------------------------------------------------------------------------------------------------------------------
#
# Class handling Surreal Dataset (training and testing)
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Hugues THOMAS - 11/06/2018
# Nicolas DONATI - 01/01/2020
# ----------------------------------------------------------------------------------------------------------------------
#
# Imports and global variables
# \**********************************/
#
# Basic libs
import tensorflow as tf
import numpy as np
# Dataset parent class
from datasets.common import Dataset
import cpp_wrappers.cpp_subsampling.grid_subsampling as cpp_subsampling
# ----------------------------------------------------------------------------------------------------------------------
#
# Utility functions
# \***********************/
#
def grid_subsampling(points, features=None, labels=None, sampleDl=0.1, verbose=0):
"""
CPP wrapper for a grid subsampling (method = barycenter for points and features | :param labels: optional (N,) matrix of integer labels
:param sampleDl: parameter defining the size of grid voxels
:param verbose: 1 to display
:return: subsampled points, with features and/or labels depending of the input
"""
if (features is None) and (labels is None):
return cpp_subsampling.compute(points, sampleDl=sampleDl, verbose=verbose)
elif (labels is None):
return cpp_subsampling.compute(points, features=features, sampleDl=sampleDl, verbose=verbose)
elif (features is None):
return cpp_subsampling.compute(points, classes=labels, sampleDl=sampleDl, verbose=verbose)
else:
return cpp_subsampling.compute(points, features=features, classes=labels, sampleDl=sampleDl, verbose=verbose)
# ----------------------------------------------------------------------------------------------------------------------
#
# Class Definition
# \***************/
#
class SurrealDataset(Dataset):
"""
Class to handle any subset of 5000 shapes of the surreal dataset introduced in 3D coded (for comparison in exp2)
this dataset is composed of 6890-points shapes, so the spectral data is relatively heavy.
"""
# Initiation methods
# ------------------------------------------------------------------------------------------------------------------
def __init__(self, config):
Dataset.__init__(self, 'surreal')
####################
# Dataset parameters
####################
# Type of task conducted on this dataset
# self.network_model = 'shape_matching' # this is the only type of model here but it comes from KPConc code
##########################
# Parameters for the files
##########################
# Path of the folder containing files
self.dataset_name = 'surreal'
self.path = '../../../media/donati/Data1/Datasets/shapes_surreal/'
self.data_folder = 'off_2/'
self.spectral_folder = 'spectral_full/'
self.txt_file = 'surreal5000_training.txt'
####################################################
####################################################
####################################################
# decide the number of shapes to keep in the training set (exp 2 setting)
self.split = config.split
self.num_train = config.num_train # -1 for all
# Number of eigenvalues kept for this model fmaps
self.neig = config.neig
self.neig_full = config.neig_full
# Number of thread for input pipeline
self.num_threads = config.input_threads
# Utility methods
# ------------------------------------------------------------------------------------------------------------------
def get_batch_gen(self, config):
"""
A function defining the batch generator for each split. Should return the generator, the generated types and
generated shapes
:param split: string in "training", "validation" or "test" (here we just keep training)
:param config: configuration file
:return: gen_func, gen_types, gen_shapes
"""
################
# Def generators
################
def random_balanced_gen():
print('trying to generate batch series with ', self.num_train, 'shapes')
# Initiate concatenation lists
tp_list = [] # points
tev_list = [] # eigen vectors
tevt_list = [] # transposed eigen vectors
tv_list = [] # eigen values
tevf_list = [] # full eigen vectors for ground truth maps
ti_list = [] # cloud indices
batch_n = 0
i_batch = 0
gen_indices = np.random.permutation(int(self.num_train)) # initiate indices for the generator
# if we had to test on this dataset we would need to introduce a test/val case with non-shuffled indices
# print(gen_indices.shape, config.batch_num)
# if config.split == 'test':
# print('test setting here not fully supported')
# n_shapes = self.num_test # has to be defined
# gen_indices = []
# for i in range(n_shapes - 1):
# for j in range(i + 1, n_shapes):
# gen_indices += [i, j] # put all the pairs in order
# gen_indices = np.array(gen_indices)
# Generator loop
for p_i in gen_indices:
# Get points and other input data
new_points = self.input_points[p_i]
new_evecs = self.input_evecs[p_i][:, :self.neig]
new_evecs_trans = self.input_evecs_trans[p_i][:self.neig, :]
new_evals = self.input_evals[p_i][:self.neig]
new_evecs_full = self.input_evecs_full[p_i][:, :self.neig]
n = new_points.shape[0]
if i_batch == config.batch_num:
yield (np.concatenate(tp_list, axis=0),
np.concatenate(tev_list, axis=0),
np.concatenate(tevt_list, axis=1),
np.concatenate(tv_list, axis=1),
np.concatenate(tevf_list, axis=0),
np.array(ti_list, dtype=np.int32),
np.array([tp.shape[0] for tp in tp_list]))
tp_list = []
tev_list = []
tevt_list = []
tv_list = []
tevf_list = []
ti_list = []
batch_n = 0
i_batch = 0
# Add data to current batch
tp_list += [new_points]
tev_list += [new_evecs]
tevt_list += [new_evecs_trans]
tv_list += [new_evals]
tevf_list += [new_evecs_full]
ti_list += [p_i]
# Update batch size
batch_n += n
i_batch += 1
# yield the rest if necessary (it will not be a full batch and could lead to mistakes because of
# shape matching needing pairs !!!!)
yield (np.concatenate(tp_list, axis=0),
np.concatenate(tev_list, axis=0),
np.concatenate(tevt_list, axis=1),
np.concatenate(tv_list, axis=1),
np.concatenate(tevf_list, axis=0),
np.array(ti_list, dtype=np.int32),
np.array([tp.shape[0] for tp in tp_list]))
##################
# Return generator
##################
# Generator types and shapes
gen_types = (tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.int32, tf.int32)
gen_shapes = ([None, 3], [None, self.neig],
[self.neig, None], [self.neig, None], [None, self.neig], [None], [None])
return random_balanced_gen, gen_types, gen_shapes
def get_tf_mapping(self, config):
def tf_map(stacked_points, stacked_evecs, stacked_evecs_trans,
stacked_evals, stacked_evecs_full, obj_inds, stack_lengths):
"""
From the input point cloud, this function compute all the point clouds at each conv layer, the neighbors
indices, the pooling indices and other useful variables.
:param stacked_points: Tensor with size [None, 3] where None is the total number of points
:param stack_lengths: Tensor with size [None] where None = number of batch // number of points in a batch
"""
# Get batch indice for each point
batch_inds = self.tf_get_batch_inds(stack_lengths)
# Augment input points
stacked_points, scales, rots = self.tf_augment_input(stacked_points,
batch_inds,
config)
# First add a column of 1 as feature for the network to be able to learn 3D shapes
stacked_features = tf.ones((tf.shape(stacked_points)[0], 1), dtype=tf.float32)
# Then use positions or not
if config.in_features_dim == 1:
pass
elif config.in_features_dim == 3:
stacked_features = tf.concat((stacked_features, stacked_points), axis=1)
else:
raise ValueError('Only accepted input dimensions are 1, 3 (with or without XYZ)')
# Get the whole input list
input_list = self.tf_shape_matching_inputs(config,
stacked_points,
stacked_features,
stack_lengths,
batch_inds)
# Add scale and rotation for testing
input_list += [scales, rots, obj_inds]
input_list += [stack_lengths] # in order further on to multiply element-wise in the stack
input_list += [stacked_evecs, stacked_evecs_trans, stacked_evals]
input_list += [stacked_evecs_full]
return input_list
return tf_map | :param points: (N, 3) matrix of input points
:param features: optional (N, d) matrix of features (floating number) | random_line_split |
widget.rs | //! Widget controller.
//!
//! The Widget Controller is responsible for querying the language server for information about
//! the node's widget configuration or resolving it from local cache.
mod configuration;
mod response;
use crate::prelude::*;
use crate::controller::visualization::manager::Manager;
use crate::controller::visualization::manager::Notification;
use crate::controller::ExecutedGraph;
use crate::executor::global::spawn_stream_handler;
use crate::model::execution_context::VisualizationUpdateData;
use engine_protocol::language_server::SuggestionId;
use ensogl::define_endpoints_2;
use ide_view::graph_editor::component::visualization;
use ide_view::graph_editor::component::visualization::Metadata;
use ide_view::graph_editor::data::enso::Code;
use ide_view::graph_editor::ArgumentWidgetConfig;
use ide_view::graph_editor::CallWidgetsConfig;
// =================
// === Constants ===
// =================
/// A module containing the widget visualization method.
const WIDGET_VISUALIZATION_MODULE: &str = "Standard.Visualization.Widgets";
/// A name of the widget visualization method.
const WIDGET_VISUALIZATION_METHOD: &str = "get_widget_json";
// ===============
// === Aliases ===
// ===============
/// An ID of a node in the graph. Always refers to the root expression.
type NodeId = ast::Id;
// An ID of any sub expression in the node, which can have a widget attached to it.
type ExpressionId = ast::Id;
// ==================
// === Controller ===
// ==================
define_endpoints_2! {
Input {
/// Create or update widget query with given definition.
request_widgets(Request),
/// Remove all widget queries of given node that are not on this list.
retain_node_expressions(NodeId, HashSet<ast::Id>),
/// Remove all widget data associated with given node.
remove_all_node_widgets(NodeId),
}
Output {
/// Emitted when the node's visualization has been set.
widget_data(NodeId, CallWidgetsConfig),
}
}
/// Graph widgets controller. Handles requests for widget configuration using visualizations. Maps
/// response data to the relevant node Id updates, and dispatches them over the FRP output.
/// Guarantees that each individual query eventually receives an update. It internally caches the
/// results of the last queries, so that the configuration can be delivered to the presenter even
/// when no visualization change is necessary.
#[derive(Debug, Deref)]
pub struct Controller {
#[deref]
frp: Frp,
#[allow(dead_code)]
model: Rc<RefCell<Model>>,
}
impl Controller {
/// Constructor
pub fn new(executed_graph: ExecutedGraph) -> Self {
let (manager, manager_notifications) = Manager::new(executed_graph.clone_ref());
let frp = Frp::new();
let model = Rc::new(RefCell::new(Model {
manager,
graph: executed_graph.clone_ref(),
widgets_of_node: default(),
widget_queries: default(),
}));
let network = &frp.network;
let input = &frp.input;
let output = &frp.private.output;
frp::extend! { network
updates_from_cache <- input.request_widgets.filter_map(
f!((definition) model.borrow_mut().request_widget(definition))
);
output.widget_data <+ updates_from_cache;
eval input.retain_node_expressions(((node_id, expr_ids)) {
model.borrow_mut().retain_node_expressions(*node_id, expr_ids)
});
eval input.remove_all_node_widgets((node_id) {
model.borrow_mut().remove_all_node_widgets(*node_id)
});
};
let out_widget_data = output.widget_data.clone_ref();
let weak = Rc::downgrade(&model);
spawn_stream_handler(weak, manager_notifications, move |notification, model| {
let data = model.borrow_mut().handle_notification(notification);
if let Some(data) = data {
out_widget_data.emit(data);
}
std::future::ready(())
});
Self { frp, model }
}
}
// =============
// === Model ===
// =============
/// Model of the Widget controller. Manages the widget queries, stores responses in cache. See
/// [`Controller`] for more information.
#[derive(Debug)]
pub struct Model {
manager: Rc<Manager>,
graph: ExecutedGraph,
widgets_of_node: NodeToWidgetsMapping,
/// Map of queries by the target expression ID. Required to be able to map visualization update
/// responses to the corresponding widgets.
widget_queries: HashMap<ExpressionId, QueryData>,
}
impl Model {
/// Visualization update notification handler. Updates the cache and returns the widget updates
/// when the notification provides new data.
fn handle_notification(
&mut self,
notification: Notification,
) -> Option<(NodeId, CallWidgetsConfig)> {
let report_error = |message, error| {
error!("{message}: {error}");
None
};
match notification {
Notification::ValueUpdate { target, data, .. } =>
self.handle_visualization_value_update(target, data),
Notification::FailedToAttach { error, .. } =>
report_error("Failed to attach widget visualization", error),
Notification::FailedToDetach { error, .. } =>
report_error("Failed to detach widget visualization", error),
Notification::FailedToModify { error, .. } =>
report_error("Failed to modify widget visualization", error),
}
}
/// Handle visualization data update. Return widget update data.
fn handle_visualization_value_update(
&mut self,
target: ast::Id,
data: VisualizationUpdateData,
) -> Option<(NodeId, CallWidgetsConfig)> {
let query_data = self.widget_queries.get_mut(&target)?;
let (definitions, errors) = configuration::deserialize_widget_definitions(
&data,
&self.graph.suggestion_db(),
&self.graph.parser(),
);
for error in errors {
error!("{:?}", error);
}
trace!("Widget definitions: {definitions:?}");
let definitions = Rc::new(definitions);
query_data.last_definitions = Some(definitions.clone());
let call_id = query_data.call_expression;
Some((query_data.node_id, CallWidgetsConfig { call_id, definitions }))
}
/// Handle a widget request from presenter. Returns the widget updates if the request can be
/// immediately fulfilled from the cache.
fn request_widget(&mut self, request: &Request) -> Option<(NodeId, CallWidgetsConfig)> {
let suggestion_db = self.graph.suggestion_db();
let suggestion = suggestion_db.lookup(request.call_suggestion).ok()?;
use std::collections::hash_map::Entry;
match self.widget_queries.entry(request.target_expression) {
Entry::Occupied(mut occupied) => {
let query = occupied.get_mut();
if query.node_id != request.node_id {
self.widgets_of_node.remove_widget(query.node_id, request.target_expression);
self.widgets_of_node.insert_widget(request.node_id, request.target_expression);
}
let visualization_modified = query.update(&suggestion, request);
if visualization_modified {
trace!("Updating widget visualization for {}", request.target_expression);
query.request_visualization(&self.manager, request.target_expression);
// The request is now pending. Once the request completes, the widget update
// will happen in the response handler.
None
} else {
// In the event that the visualization was not modified, we want to respond with
// the last known visualization data. Each widget request needs to be responded
// to, otherwise the widget might not be displayed after the widget view has
// been temporarily removed and created again.
query.last_definitions()
}
}
Entry::Vacant(vacant) => {
self.widgets_of_node.insert_widget(request.node_id, request.target_expression);
let query = vacant.insert(QueryData::new(&suggestion, request));
trace!("Registering widget visualization for {}", request.target_expression);
query.request_visualization(&self.manager, request.target_expression);
// The request is now pending. Once the request completes, the widget update will
// happen in the response handler.
None
}
}
}
/// Remove all widget queries of given node that are attached to expressions outside of provided
/// list. No widget update is emitted after a query is cleaned up.
fn retain_node_expressions(&mut self, node_id: NodeId, expressions: &HashSet<ast::Id>) {
self.widgets_of_node.retain_node_widgets(node_id, expressions, |expr_id| {
self.manager.remove_visualization(expr_id);
});
}
/// Remove all widget queries of given node. No widget update is emitted after a query is
/// cleaned up.
fn remove_all_node_widgets(&mut self, node_id: NodeId) {
for expr_id in self.widgets_of_node.remove_node_widgets(node_id) {
self.manager.remove_visualization(expr_id);
}
}
}
// ============================
// === NodeToWidgetsMapping ===
// ============================
/// A map of widgets attached to nodes. Used to perform cleanup of node widget queries when node is
/// removed.
#[derive(Debug, Default)]
struct NodeToWidgetsMapping {
attached_widgets: HashMap<NodeId, Vec<ExpressionId>>,
}
impl NodeToWidgetsMapping {
fn remove_widget(&mut self, node_id: NodeId, target: ast::Id) {
self.attached_widgets.entry(node_id).and_modify(|exprs| {
let Some(index) = exprs.iter().position(|e| *e == target) else { return };
exprs.swap_remove(index);
});
}
fn insert_widget(&mut self, node_id: NodeId, target: ast::Id) {
self.attached_widgets.entry(node_id).or_default().push(target);
}
fn retain_node_widgets(
&mut self,
node_id: NodeId,
remaining_expressions: &HashSet<ast::Id>,
mut on_remove: impl FnMut(ExpressionId),
) {
if let Some(registered) = self.attached_widgets.get_mut(&node_id) {
registered.retain(|expr_id| {
let retained = remaining_expressions.contains(expr_id);
if !retained {
on_remove(*expr_id);
}
retained
});
}
}
fn remove_node_widgets(&mut self, node_id: NodeId) -> Vec<ExpressionId> {
self.attached_widgets.remove(&node_id).unwrap_or_default()
}
}
// ===============
// === Request ===
// ===============
/// Definition of a widget request. Defines the node subexpression that the widgets will be attached
/// to, and the method call that corresponds to that expression.
#[derive(Debug, Default, Clone, Copy)]
pub struct Request {
/// The node ID of a node that contains the widget.
pub node_id: NodeId,
/// Expression of the whole method call. Only used to correlate the visualization response with
/// the widget view.
pub call_expression: ExpressionId,
/// Target (`self`) argument in the call expression. Used as a visualization target.
pub target_expression: ExpressionId,
/// The suggestion ID of the method that this call refers to.
pub call_suggestion: SuggestionId,
}
// =================
// === QueryData ===
// =================
/// Data of ongoing widget query. Defines which expressions a visualization query is attached to,
/// and maintains enough data to correlate the response with respective widget view.
#[derive(Debug)]
struct QueryData {
node_id: NodeId,
call_expression: ExpressionId,
method_name: ImString,
arguments: Vec<ImString>,
last_definitions: Option<Rc<Vec<ArgumentWidgetConfig>>>,
}
impl QueryData {
fn new(suggestion: &enso_suggestion_database::Entry, req: &Request) -> Self {
let node_id = req.node_id;
let arguments = suggestion.arguments.iter().map(|arg| arg.name.clone().into()).collect();
let method_name = suggestion.name.clone();
let call_expression = req.call_expression;
let last_definitions = None;
QueryData { node_id, arguments, method_name, call_expression, last_definitions }
}
/// Update existing query data on new request. Returns true if the visualization query needs to
/// be updated.
fn update(&mut self, suggestion: &enso_suggestion_database::Entry, req: &Request) -> bool {
let mut visualization_modified = false;
if self.method_name != suggestion.name {
self.method_name = suggestion.name.clone();
visualization_modified = true;
}
let mut zipped_arguments = self.arguments.iter().zip(&suggestion.arguments);
if self.arguments.len() != suggestion.arguments.len()
|| !zipped_arguments.all(|(a, b)| a == &b.name)
{
self.arguments =
suggestion.arguments.iter().map(|arg| arg.name.clone().into()).collect();
visualization_modified = true;
}
self.node_id = req.node_id;
self.call_expression = req.call_expression;
visualization_modified
}
fn last_definitions(&self) -> Option<(NodeId, CallWidgetsConfig)> {
self.last_definitions.as_ref().map(|definitions| {
let call_id = self.call_expression;
let config = CallWidgetsConfig { call_id, definitions: definitions.clone() };
(self.node_id, config)
})
}
fn request_visualization(&mut self, manager: &Rc<Manager>, target_expression: ast::Id) {
// When visualization is requested, remove stale queried value to prevent updates while
// language server request is pending.
self.last_definitions.take();
let vis_metadata = self.visualization_metadata();
manager.request_visualization(target_expression, vis_metadata);
}
/// Generate visualization metadata for this query.
fn visualization_metadata(&self) -> Metadata {
let arguments: Vec<Code> = vec![
Self::as_unresolved_symbol(&self.method_name).into(),
Self::arg_sequence(&self.arguments).into(),
];
let preprocessor = visualization::instance::PreprocessorConfiguration {
module: WIDGET_VISUALIZATION_MODULE.into(),
method: WIDGET_VISUALIZATION_METHOD.into(),
arguments: Rc::new(arguments),
};
Metadata { preprocessor }
}
/// Escape a string to be used as a visualization argument. Transforms the string into an enso
/// expression with string literal.
fn escape_visualization_argument(arg: &str) -> String {
Ast::raw_text_literal(arg).repr()
}
/// Creates unresolved symbol via ".name" syntax. Unresolved symbol contains name and also
/// module scope to resolve it properly.
fn as_unresolved_symbol(arg: &str) -> String {
format!(".{arg}")
}
/// Escape a list of strings to be used as a visualization argument. Transforms the strings into
/// an enso expression with a list of string literals.
fn arg_sequence(args: &[ImString]) -> String {
let mut buffer = String::from("[");
for (i, arg) in args.iter().enumerate() {
if i > 0 {
buffer.push_str(", ");
}
buffer.push_str(&Self::escape_visualization_argument(arg)); | buffer
}
} | }
buffer.push(']'); | random_line_split |
widget.rs | //! Widget controller.
//!
//! The Widget Controller is responsible for querying the language server for information about
//! the node's widget configuration or resolving it from local cache.
mod configuration;
mod response;
use crate::prelude::*;
use crate::controller::visualization::manager::Manager;
use crate::controller::visualization::manager::Notification;
use crate::controller::ExecutedGraph;
use crate::executor::global::spawn_stream_handler;
use crate::model::execution_context::VisualizationUpdateData;
use engine_protocol::language_server::SuggestionId;
use ensogl::define_endpoints_2;
use ide_view::graph_editor::component::visualization;
use ide_view::graph_editor::component::visualization::Metadata;
use ide_view::graph_editor::data::enso::Code;
use ide_view::graph_editor::ArgumentWidgetConfig;
use ide_view::graph_editor::CallWidgetsConfig;
// =================
// === Constants ===
// =================
/// A module containing the widget visualization method.
const WIDGET_VISUALIZATION_MODULE: &str = "Standard.Visualization.Widgets";
/// A name of the widget visualization method.
const WIDGET_VISUALIZATION_METHOD: &str = "get_widget_json";
// ===============
// === Aliases ===
// ===============
/// An ID of a node in the graph. Always refers to the root expression.
type NodeId = ast::Id;
// An ID of any sub expression in the node, which can have a widget attached to it.
type ExpressionId = ast::Id;
// ==================
// === Controller ===
// ==================
define_endpoints_2! {
Input {
/// Create or update widget query with given definition.
request_widgets(Request),
/// Remove all widget queries of given node that are not on this list.
retain_node_expressions(NodeId, HashSet<ast::Id>),
/// Remove all widget data associated with given node.
remove_all_node_widgets(NodeId),
}
Output {
/// Emitted when the node's visualization has been set.
widget_data(NodeId, CallWidgetsConfig),
}
}
/// Graph widgets controller. Handles requests for widget configuration using visualizations. Maps
/// response data to the relevant node Id updates, and dispatches them over the FRP output.
/// Guarantees that each individual query eventually receives an update. It internally caches the
/// results of the last queries, so that the configuration can be delivered to the presenter even
/// when no visualization change is necessary.
#[derive(Debug, Deref)]
pub struct Controller {
#[deref]
frp: Frp,
#[allow(dead_code)]
model: Rc<RefCell<Model>>,
}
impl Controller {
/// Constructor
pub fn new(executed_graph: ExecutedGraph) -> Self {
let (manager, manager_notifications) = Manager::new(executed_graph.clone_ref());
let frp = Frp::new();
let model = Rc::new(RefCell::new(Model {
manager,
graph: executed_graph.clone_ref(),
widgets_of_node: default(),
widget_queries: default(),
}));
let network = &frp.network;
let input = &frp.input;
let output = &frp.private.output;
frp::extend! { network
updates_from_cache <- input.request_widgets.filter_map(
f!((definition) model.borrow_mut().request_widget(definition))
);
output.widget_data <+ updates_from_cache;
eval input.retain_node_expressions(((node_id, expr_ids)) {
model.borrow_mut().retain_node_expressions(*node_id, expr_ids)
});
eval input.remove_all_node_widgets((node_id) {
model.borrow_mut().remove_all_node_widgets(*node_id)
});
};
let out_widget_data = output.widget_data.clone_ref();
let weak = Rc::downgrade(&model);
spawn_stream_handler(weak, manager_notifications, move |notification, model| {
let data = model.borrow_mut().handle_notification(notification);
if let Some(data) = data {
out_widget_data.emit(data);
}
std::future::ready(())
});
Self { frp, model }
}
}
// =============
// === Model ===
// =============
/// Model of the Widget controller. Manages the widget queries, stores responses in cache. See
/// [`Controller`] for more information.
#[derive(Debug)]
pub struct Model {
manager: Rc<Manager>,
graph: ExecutedGraph,
widgets_of_node: NodeToWidgetsMapping,
/// Map of queries by the target expression ID. Required to be able to map visualization update
/// responses to the corresponding widgets.
widget_queries: HashMap<ExpressionId, QueryData>,
}
impl Model {
/// Visualization update notification handler. Updates the cache and returns the widget updates
/// when the notification provides new data.
fn handle_notification(
&mut self,
notification: Notification,
) -> Option<(NodeId, CallWidgetsConfig)> {
let report_error = |message, error| {
error!("{message}: {error}");
None
};
match notification {
Notification::ValueUpdate { target, data, .. } =>
self.handle_visualization_value_update(target, data),
Notification::FailedToAttach { error, .. } =>
report_error("Failed to attach widget visualization", error),
Notification::FailedToDetach { error, .. } =>
report_error("Failed to detach widget visualization", error),
Notification::FailedToModify { error, .. } =>
report_error("Failed to modify widget visualization", error),
}
}
/// Handle visualization data update. Return widget update data.
fn handle_visualization_value_update(
&mut self,
target: ast::Id,
data: VisualizationUpdateData,
) -> Option<(NodeId, CallWidgetsConfig)> {
let query_data = self.widget_queries.get_mut(&target)?;
let (definitions, errors) = configuration::deserialize_widget_definitions(
&data,
&self.graph.suggestion_db(),
&self.graph.parser(),
);
for error in errors {
error!("{:?}", error);
}
trace!("Widget definitions: {definitions:?}");
let definitions = Rc::new(definitions);
query_data.last_definitions = Some(definitions.clone());
let call_id = query_data.call_expression;
Some((query_data.node_id, CallWidgetsConfig { call_id, definitions }))
}
/// Handle a widget request from presenter. Returns the widget updates if the request can be
/// immediately fulfilled from the cache.
fn request_widget(&mut self, request: &Request) -> Option<(NodeId, CallWidgetsConfig)> {
let suggestion_db = self.graph.suggestion_db();
let suggestion = suggestion_db.lookup(request.call_suggestion).ok()?;
use std::collections::hash_map::Entry;
match self.widget_queries.entry(request.target_expression) {
Entry::Occupied(mut occupied) => {
let query = occupied.get_mut();
if query.node_id != request.node_id {
self.widgets_of_node.remove_widget(query.node_id, request.target_expression);
self.widgets_of_node.insert_widget(request.node_id, request.target_expression);
}
let visualization_modified = query.update(&suggestion, request);
if visualization_modified {
trace!("Updating widget visualization for {}", request.target_expression);
query.request_visualization(&self.manager, request.target_expression);
// The request is now pending. Once the request completes, the widget update
// will happen in the response handler.
None
} else {
// In the event that the visualization was not modified, we want to respond with
// the last known visualization data. Each widget request needs to be responded
// to, otherwise the widget might not be displayed after the widget view has
// been temporarily removed and created again.
query.last_definitions()
}
}
Entry::Vacant(vacant) => {
self.widgets_of_node.insert_widget(request.node_id, request.target_expression);
let query = vacant.insert(QueryData::new(&suggestion, request));
trace!("Registering widget visualization for {}", request.target_expression);
query.request_visualization(&self.manager, request.target_expression);
// The request is now pending. Once the request completes, the widget update will
// happen in the response handler.
None
}
}
}
/// Remove all widget queries of given node that are attached to expressions outside of provided
/// list. No widget update is emitted after a query is cleaned up.
fn retain_node_expressions(&mut self, node_id: NodeId, expressions: &HashSet<ast::Id>) {
self.widgets_of_node.retain_node_widgets(node_id, expressions, |expr_id| {
self.manager.remove_visualization(expr_id);
});
}
/// Remove all widget queries of given node. No widget update is emitted after a query is
/// cleaned up.
fn remove_all_node_widgets(&mut self, node_id: NodeId) {
for expr_id in self.widgets_of_node.remove_node_widgets(node_id) {
self.manager.remove_visualization(expr_id);
}
}
}
// ============================
// === NodeToWidgetsMapping ===
// ============================
/// A map of widgets attached to nodes. Used to perform cleanup of node widget queries when node is
/// removed.
#[derive(Debug, Default)]
struct NodeToWidgetsMapping {
attached_widgets: HashMap<NodeId, Vec<ExpressionId>>,
}
impl NodeToWidgetsMapping {
fn remove_widget(&mut self, node_id: NodeId, target: ast::Id) {
self.attached_widgets.entry(node_id).and_modify(|exprs| {
let Some(index) = exprs.iter().position(|e| *e == target) else { return };
exprs.swap_remove(index);
});
}
fn insert_widget(&mut self, node_id: NodeId, target: ast::Id) {
self.attached_widgets.entry(node_id).or_default().push(target);
}
fn retain_node_widgets(
&mut self,
node_id: NodeId,
remaining_expressions: &HashSet<ast::Id>,
mut on_remove: impl FnMut(ExpressionId),
) {
if let Some(registered) = self.attached_widgets.get_mut(&node_id) {
registered.retain(|expr_id| {
let retained = remaining_expressions.contains(expr_id);
if !retained {
on_remove(*expr_id);
}
retained
});
}
}
fn remove_node_widgets(&mut self, node_id: NodeId) -> Vec<ExpressionId> {
self.attached_widgets.remove(&node_id).unwrap_or_default()
}
}
// ===============
// === Request ===
// ===============
/// Definition of a widget request. Defines the node subexpression that the widgets will be attached
/// to, and the method call that corresponds to that expression.
#[derive(Debug, Default, Clone, Copy)]
pub struct Request {
/// The node ID of a node that contains the widget.
pub node_id: NodeId,
/// Expression of the whole method call. Only used to correlate the visualization response with
/// the widget view.
pub call_expression: ExpressionId,
/// Target (`self`) argument in the call expression. Used as a visualization target.
pub target_expression: ExpressionId,
/// The suggestion ID of the method that this call refers to.
pub call_suggestion: SuggestionId,
}
// =================
// === QueryData ===
// =================
/// Data of ongoing widget query. Defines which expressions a visualization query is attached to,
/// and maintains enough data to correlate the response with respective widget view.
#[derive(Debug)]
struct QueryData {
node_id: NodeId,
call_expression: ExpressionId,
method_name: ImString,
arguments: Vec<ImString>,
last_definitions: Option<Rc<Vec<ArgumentWidgetConfig>>>,
}
impl QueryData {
fn new(suggestion: &enso_suggestion_database::Entry, req: &Request) -> Self {
let node_id = req.node_id;
let arguments = suggestion.arguments.iter().map(|arg| arg.name.clone().into()).collect();
let method_name = suggestion.name.clone();
let call_expression = req.call_expression;
let last_definitions = None;
QueryData { node_id, arguments, method_name, call_expression, last_definitions }
}
/// Update existing query data on new request. Returns true if the visualization query needs to
/// be updated.
fn | (&mut self, suggestion: &enso_suggestion_database::Entry, req: &Request) -> bool {
let mut visualization_modified = false;
if self.method_name != suggestion.name {
self.method_name = suggestion.name.clone();
visualization_modified = true;
}
let mut zipped_arguments = self.arguments.iter().zip(&suggestion.arguments);
if self.arguments.len() != suggestion.arguments.len()
|| !zipped_arguments.all(|(a, b)| a == &b.name)
{
self.arguments =
suggestion.arguments.iter().map(|arg| arg.name.clone().into()).collect();
visualization_modified = true;
}
self.node_id = req.node_id;
self.call_expression = req.call_expression;
visualization_modified
}
fn last_definitions(&self) -> Option<(NodeId, CallWidgetsConfig)> {
self.last_definitions.as_ref().map(|definitions| {
let call_id = self.call_expression;
let config = CallWidgetsConfig { call_id, definitions: definitions.clone() };
(self.node_id, config)
})
}
fn request_visualization(&mut self, manager: &Rc<Manager>, target_expression: ast::Id) {
// When visualization is requested, remove stale queried value to prevent updates while
// language server request is pending.
self.last_definitions.take();
let vis_metadata = self.visualization_metadata();
manager.request_visualization(target_expression, vis_metadata);
}
/// Generate visualization metadata for this query.
fn visualization_metadata(&self) -> Metadata {
let arguments: Vec<Code> = vec![
Self::as_unresolved_symbol(&self.method_name).into(),
Self::arg_sequence(&self.arguments).into(),
];
let preprocessor = visualization::instance::PreprocessorConfiguration {
module: WIDGET_VISUALIZATION_MODULE.into(),
method: WIDGET_VISUALIZATION_METHOD.into(),
arguments: Rc::new(arguments),
};
Metadata { preprocessor }
}
/// Escape a string to be used as a visualization argument. Transforms the string into an enso
/// expression with string literal.
fn escape_visualization_argument(arg: &str) -> String {
Ast::raw_text_literal(arg).repr()
}
/// Creates unresolved symbol via ".name" syntax. Unresolved symbol contains name and also
/// module scope to resolve it properly.
fn as_unresolved_symbol(arg: &str) -> String {
format!(".{arg}")
}
/// Escape a list of strings to be used as a visualization argument. Transforms the strings into
/// an enso expression with a list of string literals.
fn arg_sequence(args: &[ImString]) -> String {
let mut buffer = String::from("[");
for (i, arg) in args.iter().enumerate() {
if i > 0 {
buffer.push_str(", ");
}
buffer.push_str(&Self::escape_visualization_argument(arg));
}
buffer.push(']');
buffer
}
}
| update | identifier_name |
argument_parser.py | import argparse
import os
import sys
import textwrap
import configargparse
import locust
version = locust.__version__
DEFAULT_CONFIG_FILES = ['~/.locust.conf','locust.conf']
def _is_package(path):
"""
Is the given path a Python package?
"""
return (
os.path.isdir(path)
and os.path.exists(os.path.join(path, '__init__.py'))
)
def | (locustfile):
"""
Attempt to locate a locustfile, either explicitly or by searching parent dirs.
"""
# Obtain env value
names = [locustfile]
# Create .py version if necessary
if not names[0].endswith('.py'):
names.append(names[0] + '.py')
# Does the name contain path elements?
if os.path.dirname(names[0]):
# If so, expand home-directory markers and test for existence
for name in names:
expanded = os.path.expanduser(name)
if os.path.exists(expanded):
if name.endswith('.py') or _is_package(expanded):
return os.path.abspath(expanded)
else:
# Otherwise, start in cwd and work downwards towards filesystem root
path = os.path.abspath('.')
while True:
for name in names:
joined = os.path.join(path, name)
if os.path.exists(joined):
if name.endswith('.py') or _is_package(joined):
return os.path.abspath(joined)
parent_path = os.path.dirname(path)
if parent_path == path:
# we've reached the root path which has been checked this iteration
break
path = parent_path
# Implicit 'return None' if nothing was found
def get_empty_argument_parser(add_help=True, default_config_files=DEFAULT_CONFIG_FILES):
parser = configargparse.ArgumentParser(
default_config_files=default_config_files,
auto_env_var_prefix="LOCUST_",
add_env_var_help=False,
add_config_file_help=False,
add_help=add_help,
formatter_class=argparse.RawDescriptionHelpFormatter,
usage=argparse.SUPPRESS,
description=textwrap.dedent("""
Usage: locust [OPTIONS] [LocustClass ...]
"""),
#epilog="",
)
parser.add_argument(
'-f', '--locustfile',
default='locustfile',
help="Python module file to import, e.g. '../other.py'. Default: locustfile"
)
return parser
def parse_locustfile_option(args=None):
"""
Construct a command line parser that is only used to parse the -f argument so that we can
import the test scripts in case any of them adds additional command line arguments to the
parser
"""
parser = get_empty_argument_parser(add_help=False)
parser.add_argument(
'-h', '--help',
action='store_true',
default=False,
)
parser.add_argument(
'--version', '-V',
action='store_true',
default=False,
)
options, _ = parser.parse_known_args(args=args)
locustfile = find_locustfile(options.locustfile)
if not locustfile:
if options.help or options.version:
# if --help or --version is specified we'll call parse_options which will print the help/version message
parse_options(args=args)
sys.stderr.write("Could not find any locustfile! Ensure file ends in '.py' and see --help for available options.\n")
sys.exit(1)
if locustfile == "locust.py":
sys.stderr.write("The locustfile must not be named `locust.py`. Please rename the file and try again.\n")
sys.exit(1)
return locustfile
def setup_parser_arguments(parser):
"""
Setup command-line options
Takes a configargparse.ArgumentParser as argument and calls it's add_argument
for each of the supported arguments
"""
parser._optionals.title = "Common options"
parser.add_argument(
'-H', '--host',
help="Host to load test in the following format: http://10.21.32.33"
)
# Number of Locust users
parser.add_argument(
'-c', '--clients',
type=int,
dest='num_clients',
default=1,
help="Number of concurrent Locust users. Only used together with --headless"
)
# User hatch rate
parser.add_argument(
'-r', '--hatch-rate',
type=float,
default=1,
help="The rate per second in which clients are spawned. Only used together with --headless"
)
# Time limit of the test run
parser.add_argument(
'-t', '--run-time',
help="Stop after the specified amount of time, e.g. (300s, 20m, 3h, 1h30m, etc.). Only used together with --headless"
)
# List locust commands found in loaded locust files/source files
parser.add_argument(
'-l', '--list',
action='store_true',
dest='list_commands',
help="Show list of possible locust classes and exit"
)
web_ui_group = parser.add_argument_group("Web UI options")
web_ui_group.add_argument(
'--web-host',
default="",
help="Host to bind the web interface to. Defaults to '*' (all interfaces)"
)
web_ui_group.add_argument(
'--web-port', '-P',
type=int,
default=8089,
help="Port on which to run web host"
)
# if we should print stats in the console
web_ui_group.add_argument(
'--headless',
action='store_true',
help="Disable the web interface, and instead start the load test immediately. Requires -c and -t to be specified."
)
web_ui_group.add_argument(
'--web-auth',
type=str,
dest='web_auth',
default=None,
help='Turn on Basic Auth for the web interface. Should be supplied in the following format: username:password'
)
master_group = parser.add_argument_group(
"Master options",
"Options for running a Locust Master node when running Locust distributed. A Master node need Worker nodes that connect to it before it can run load tests.",
)
# if locust should be run in distributed mode as master
master_group.add_argument(
'--master',
action='store_true',
help="Set locust to run in distributed mode with this process as master"
)
master_group.add_argument(
'--master-bind-host',
default="*",
help="Interfaces (hostname, ip) that locust master should bind to. Only used when running with --master. Defaults to * (all available interfaces)."
)
master_group.add_argument(
'--master-bind-port',
type=int,
default=5557,
help="Port that locust master should bind to. Only used when running with --master. Defaults to 5557."
)
master_group.add_argument(
'--expect-workers',
type=int,
default=1,
help="How many workers master should expect to connect before starting the test (only when --headless used)."
)
master_group.add_argument(
'--expect-slaves',
action='store_true',
help=configargparse.SUPPRESS
)
worker_group = parser.add_argument_group(
"Worker options",
textwrap.dedent("""
Options for running a Locust Worker node when running Locust distributed.
Only the LOCUSTFILE (-f option) need to be specified when starting a Worker, since other options such as -c, -r, -t are specified on the Master node.
"""),
)
# if locust should be run in distributed mode as worker
worker_group.add_argument(
'--worker',
action='store_true',
help="Set locust to run in distributed mode with this process as worker"
)
worker_group.add_argument(
'--slave',
action='store_true',
help=configargparse.SUPPRESS
)
# master host options
worker_group.add_argument(
'--master-host',
default="127.0.0.1",
help="Host or IP address of locust master for distributed load testing. Only used when running with --worker. Defaults to 127.0.0.1."
)
worker_group.add_argument(
'--master-port',
type=int,
default=5557,
help="The port to connect to that is used by the locust master for distributed load testing. Only used when running with --worker. Defaults to 5557."
)
stats_group = parser.add_argument_group("Request statistics options")
# A file that contains the current request stats.
stats_group.add_argument(
'--csv', '--csv-base-name',
dest='csvfilebase',
help="Store current request stats to files in CSV format.",
)
# Adds each stats entry at every iteration to the _stats_history.csv file.
stats_group.add_argument(
'--csv-full-history',
action='store_true',
default=False,
dest='stats_history_enabled',
help="Store each stats entry in CSV format to _stats_history.csv file",
)
# if we should print stats in the console
stats_group.add_argument(
'--print-stats',
action='store_true',
help="Print stats in the console"
)
# only print summary stats
stats_group.add_argument(
'--only-summary',
action='store_true',
help='Only print the summary stats'
)
stats_group.add_argument(
'--reset-stats',
action='store_true',
help="Reset statistics once hatching has been completed. Should be set on both master and workers when running in distributed mode",
)
log_group = parser.add_argument_group("Logging options")
# skip logging setup
log_group.add_argument(
'--skip-log-setup',
action='store_true',
dest='skip_log_setup',
default=False,
help="Disable Locust's logging setup. Instead, the configuration is provided by the Locust test or Python defaults."
)
# log level
log_group.add_argument(
'--loglevel', '-L',
default='INFO',
help="Choose between DEBUG/INFO/WARNING/ERROR/CRITICAL. Default is INFO.",
)
# log file
log_group.add_argument(
'--logfile',
help="Path to log file. If not set, log will go to stdout/stderr",
)
step_load_group = parser.add_argument_group("Step load options")
# Enable Step Load mode
step_load_group.add_argument(
'--step-load',
action='store_true',
help="Enable Step Load mode to monitor how performance metrics varies when user load increases. Requires --step-clients and --step-time to be specified."
)
# Number of clients to incease by Step
step_load_group.add_argument(
'--step-clients',
type=int,
default=1,
help="Client count to increase by step in Step Load mode. Only used together with --step-load"
)
# Time limit of each step
step_load_group.add_argument(
'--step-time',
help="Step duration in Step Load mode, e.g. (300s, 20m, 3h, 1h30m, etc.). Only used together with --step-load"
)
other_group = parser.add_argument_group("Other options")
# Display ratio table of all tasks
other_group.add_argument(
'--show-task-ratio',
action='store_true',
help="Print table of the locust classes' task execution ratio"
)
# Display ratio table of all tasks in JSON format
other_group.add_argument(
'--show-task-ratio-json',
action='store_true',
help="Print json data of the locust classes' task execution ratio"
)
# Version number (optparse gives you --version but we have to do it
# ourselves to get -V too. sigh)
other_group.add_argument(
'--version', '-V',
action='version',
help="Show program's version number and exit",
version='%(prog)s {}'.format(version),
)
# set the exit code to post on errors
other_group.add_argument(
'--exit-code-on-error',
type=int,
default=1,
help="Sets the process exit code to use when a test result contain any failure or error"
)
other_group.add_argument(
'-s', '--stop-timeout',
action='store',
type=int,
dest='stop_timeout',
default=None,
help="Number of seconds to wait for a simulated user to complete any executing task before exiting. Default is to terminate immediately. This parameter only needs to be specified for the master process when running Locust distributed."
)
locust_classes_group = parser.add_argument_group("Locust user classes")
locust_classes_group.add_argument(
'locust_classes',
nargs='*',
metavar='LocustClass',
help="Optionally specify which Locust classes that should be used (available Locust classes can be listed with -l or --list)",
)
def get_parser(default_config_files=DEFAULT_CONFIG_FILES):
# get a parser that is only able to parse the -f argument
parser = get_empty_argument_parser(add_help=True, default_config_files=default_config_files)
# add all the other supported arguments
setup_parser_arguments(parser)
# fire event to provide a hook for locustscripts and plugins to add command line arguments
locust.events.init_command_line_parser.fire(parser=parser)
return parser
def parse_options(args=None):
parser = get_parser()
if 'LOCUST_MASTER_SERVICE' in os.environ and not isinstance(os.environ.get('LOCUST_MASTER_PORT', 0), int):
sys.stderr.write("Are you running in kubernetes? If you have a container called LOCUST_MASTER, kubernetes will set an env var called LOCUST_MASTER_PORT which will collide with locust's --master-port setting. Please rename your container.\n")
# parse command line and return options
options = parser.parse_args(args=args)
return options
| find_locustfile | identifier_name |
argument_parser.py | import argparse
import os
import sys
import textwrap
import configargparse
import locust
version = locust.__version__
DEFAULT_CONFIG_FILES = ['~/.locust.conf','locust.conf']
def _is_package(path):
"""
Is the given path a Python package?
"""
return (
os.path.isdir(path)
and os.path.exists(os.path.join(path, '__init__.py'))
)
def find_locustfile(locustfile):
"""
Attempt to locate a locustfile, either explicitly or by searching parent dirs.
"""
# Obtain env value
names = [locustfile]
# Create .py version if necessary
if not names[0].endswith('.py'):
names.append(names[0] + '.py')
# Does the name contain path elements?
if os.path.dirname(names[0]):
# If so, expand home-directory markers and test for existence
for name in names:
expanded = os.path.expanduser(name)
if os.path.exists(expanded):
if name.endswith('.py') or _is_package(expanded):
return os.path.abspath(expanded)
else:
# Otherwise, start in cwd and work downwards towards filesystem root
path = os.path.abspath('.')
while True:
for name in names:
joined = os.path.join(path, name)
if os.path.exists(joined):
if name.endswith('.py') or _is_package(joined):
return os.path.abspath(joined)
parent_path = os.path.dirname(path)
if parent_path == path:
# we've reached the root path which has been checked this iteration
break
path = parent_path
# Implicit 'return None' if nothing was found
def get_empty_argument_parser(add_help=True, default_config_files=DEFAULT_CONFIG_FILES):
parser = configargparse.ArgumentParser(
default_config_files=default_config_files,
auto_env_var_prefix="LOCUST_",
add_env_var_help=False,
add_config_file_help=False,
add_help=add_help,
formatter_class=argparse.RawDescriptionHelpFormatter,
usage=argparse.SUPPRESS,
description=textwrap.dedent("""
Usage: locust [OPTIONS] [LocustClass ...]
"""),
#epilog="",
)
parser.add_argument(
'-f', '--locustfile',
default='locustfile',
help="Python module file to import, e.g. '../other.py'. Default: locustfile"
)
return parser
def parse_locustfile_option(args=None):
"""
Construct a command line parser that is only used to parse the -f argument so that we can
import the test scripts in case any of them adds additional command line arguments to the
parser
"""
parser = get_empty_argument_parser(add_help=False)
parser.add_argument(
'-h', '--help',
action='store_true',
default=False,
)
parser.add_argument(
'--version', '-V',
action='store_true',
default=False,
)
options, _ = parser.parse_known_args(args=args)
locustfile = find_locustfile(options.locustfile)
if not locustfile:
if options.help or options.version:
# if --help or --version is specified we'll call parse_options which will print the help/version message
parse_options(args=args)
sys.stderr.write("Could not find any locustfile! Ensure file ends in '.py' and see --help for available options.\n")
sys.exit(1)
if locustfile == "locust.py":
sys.stderr.write("The locustfile must not be named `locust.py`. Please rename the file and try again.\n")
sys.exit(1)
return locustfile
def setup_parser_arguments(parser):
"""
Setup command-line options
Takes a configargparse.ArgumentParser as argument and calls it's add_argument
for each of the supported arguments
"""
parser._optionals.title = "Common options"
parser.add_argument(
'-H', '--host',
help="Host to load test in the following format: http://10.21.32.33"
)
# Number of Locust users
parser.add_argument(
'-c', '--clients',
type=int,
dest='num_clients',
default=1,
help="Number of concurrent Locust users. Only used together with --headless"
)
# User hatch rate
parser.add_argument(
'-r', '--hatch-rate',
type=float,
default=1,
help="The rate per second in which clients are spawned. Only used together with --headless"
)
# Time limit of the test run
parser.add_argument(
'-t', '--run-time',
help="Stop after the specified amount of time, e.g. (300s, 20m, 3h, 1h30m, etc.). Only used together with --headless"
)
# List locust commands found in loaded locust files/source files
parser.add_argument(
'-l', '--list',
action='store_true',
dest='list_commands',
help="Show list of possible locust classes and exit"
)
web_ui_group = parser.add_argument_group("Web UI options")
web_ui_group.add_argument(
'--web-host',
default="",
help="Host to bind the web interface to. Defaults to '*' (all interfaces)"
)
web_ui_group.add_argument(
'--web-port', '-P',
type=int,
default=8089,
help="Port on which to run web host"
)
# if we should print stats in the console
web_ui_group.add_argument(
'--headless',
action='store_true',
help="Disable the web interface, and instead start the load test immediately. Requires -c and -t to be specified."
)
web_ui_group.add_argument(
'--web-auth',
type=str,
dest='web_auth',
default=None,
help='Turn on Basic Auth for the web interface. Should be supplied in the following format: username:password'
)
master_group = parser.add_argument_group(
"Master options",
"Options for running a Locust Master node when running Locust distributed. A Master node need Worker nodes that connect to it before it can run load tests.",
)
# if locust should be run in distributed mode as master
master_group.add_argument(
'--master',
action='store_true',
help="Set locust to run in distributed mode with this process as master"
)
master_group.add_argument(
'--master-bind-host',
default="*",
help="Interfaces (hostname, ip) that locust master should bind to. Only used when running with --master. Defaults to * (all available interfaces)."
)
master_group.add_argument(
'--master-bind-port',
type=int,
default=5557,
help="Port that locust master should bind to. Only used when running with --master. Defaults to 5557."
)
master_group.add_argument(
'--expect-workers',
type=int,
default=1,
help="How many workers master should expect to connect before starting the test (only when --headless used)."
)
master_group.add_argument(
'--expect-slaves',
action='store_true',
help=configargparse.SUPPRESS
)
worker_group = parser.add_argument_group(
"Worker options",
textwrap.dedent("""
Options for running a Locust Worker node when running Locust distributed.
Only the LOCUSTFILE (-f option) need to be specified when starting a Worker, since other options such as -c, -r, -t are specified on the Master node.
"""),
)
# if locust should be run in distributed mode as worker
worker_group.add_argument(
'--worker',
action='store_true',
help="Set locust to run in distributed mode with this process as worker"
)
worker_group.add_argument(
'--slave',
action='store_true',
help=configargparse.SUPPRESS
)
# master host options
worker_group.add_argument(
'--master-host',
default="127.0.0.1",
help="Host or IP address of locust master for distributed load testing. Only used when running with --worker. Defaults to 127.0.0.1."
)
worker_group.add_argument(
'--master-port',
type=int,
default=5557,
help="The port to connect to that is used by the locust master for distributed load testing. Only used when running with --worker. Defaults to 5557."
)
stats_group = parser.add_argument_group("Request statistics options")
# A file that contains the current request stats.
stats_group.add_argument(
'--csv', '--csv-base-name',
dest='csvfilebase',
help="Store current request stats to files in CSV format.",
)
# Adds each stats entry at every iteration to the _stats_history.csv file.
stats_group.add_argument(
'--csv-full-history',
action='store_true',
default=False,
dest='stats_history_enabled',
help="Store each stats entry in CSV format to _stats_history.csv file",
)
# if we should print stats in the console
stats_group.add_argument(
'--print-stats',
action='store_true',
help="Print stats in the console"
)
# only print summary stats
stats_group.add_argument(
'--only-summary',
action='store_true',
help='Only print the summary stats'
)
stats_group.add_argument(
'--reset-stats',
action='store_true',
help="Reset statistics once hatching has been completed. Should be set on both master and workers when running in distributed mode",
)
log_group = parser.add_argument_group("Logging options")
# skip logging setup
log_group.add_argument(
'--skip-log-setup',
action='store_true',
dest='skip_log_setup',
default=False,
help="Disable Locust's logging setup. Instead, the configuration is provided by the Locust test or Python defaults."
)
# log level
log_group.add_argument(
'--loglevel', '-L',
default='INFO',
help="Choose between DEBUG/INFO/WARNING/ERROR/CRITICAL. Default is INFO.",
)
# log file
log_group.add_argument(
'--logfile',
help="Path to log file. If not set, log will go to stdout/stderr",
)
step_load_group = parser.add_argument_group("Step load options")
# Enable Step Load mode
step_load_group.add_argument(
'--step-load',
action='store_true',
help="Enable Step Load mode to monitor how performance metrics varies when user load increases. Requires --step-clients and --step-time to be specified."
)
# Number of clients to incease by Step
step_load_group.add_argument(
'--step-clients',
type=int,
default=1,
help="Client count to increase by step in Step Load mode. Only used together with --step-load"
)
# Time limit of each step
step_load_group.add_argument(
'--step-time',
help="Step duration in Step Load mode, e.g. (300s, 20m, 3h, 1h30m, etc.). Only used together with --step-load"
)
other_group = parser.add_argument_group("Other options")
# Display ratio table of all tasks
other_group.add_argument(
'--show-task-ratio',
action='store_true',
help="Print table of the locust classes' task execution ratio"
)
# Display ratio table of all tasks in JSON format
other_group.add_argument(
'--show-task-ratio-json',
action='store_true',
help="Print json data of the locust classes' task execution ratio"
)
# Version number (optparse gives you --version but we have to do it
# ourselves to get -V too. sigh)
other_group.add_argument(
'--version', '-V',
action='version',
help="Show program's version number and exit",
version='%(prog)s {}'.format(version),
)
# set the exit code to post on errors
other_group.add_argument(
'--exit-code-on-error',
type=int,
default=1,
help="Sets the process exit code to use when a test result contain any failure or error"
)
other_group.add_argument( | dest='stop_timeout',
default=None,
help="Number of seconds to wait for a simulated user to complete any executing task before exiting. Default is to terminate immediately. This parameter only needs to be specified for the master process when running Locust distributed."
)
locust_classes_group = parser.add_argument_group("Locust user classes")
locust_classes_group.add_argument(
'locust_classes',
nargs='*',
metavar='LocustClass',
help="Optionally specify which Locust classes that should be used (available Locust classes can be listed with -l or --list)",
)
def get_parser(default_config_files=DEFAULT_CONFIG_FILES):
# get a parser that is only able to parse the -f argument
parser = get_empty_argument_parser(add_help=True, default_config_files=default_config_files)
# add all the other supported arguments
setup_parser_arguments(parser)
# fire event to provide a hook for locustscripts and plugins to add command line arguments
locust.events.init_command_line_parser.fire(parser=parser)
return parser
def parse_options(args=None):
parser = get_parser()
if 'LOCUST_MASTER_SERVICE' in os.environ and not isinstance(os.environ.get('LOCUST_MASTER_PORT', 0), int):
sys.stderr.write("Are you running in kubernetes? If you have a container called LOCUST_MASTER, kubernetes will set an env var called LOCUST_MASTER_PORT which will collide with locust's --master-port setting. Please rename your container.\n")
# parse command line and return options
options = parser.parse_args(args=args)
return options | '-s', '--stop-timeout',
action='store',
type=int, | random_line_split |
argument_parser.py | import argparse
import os
import sys
import textwrap
import configargparse
import locust
version = locust.__version__
DEFAULT_CONFIG_FILES = ['~/.locust.conf','locust.conf']
def _is_package(path):
"""
Is the given path a Python package?
"""
return (
os.path.isdir(path)
and os.path.exists(os.path.join(path, '__init__.py'))
)
def find_locustfile(locustfile):
"""
Attempt to locate a locustfile, either explicitly or by searching parent dirs.
"""
# Obtain env value
names = [locustfile]
# Create .py version if necessary
if not names[0].endswith('.py'):
names.append(names[0] + '.py')
# Does the name contain path elements?
if os.path.dirname(names[0]):
# If so, expand home-directory markers and test for existence
for name in names:
expanded = os.path.expanduser(name)
if os.path.exists(expanded):
if name.endswith('.py') or _is_package(expanded):
return os.path.abspath(expanded)
else:
# Otherwise, start in cwd and work downwards towards filesystem root
path = os.path.abspath('.')
while True:
for name in names:
joined = os.path.join(path, name)
if os.path.exists(joined):
if name.endswith('.py') or _is_package(joined):
return os.path.abspath(joined)
parent_path = os.path.dirname(path)
if parent_path == path:
# we've reached the root path which has been checked this iteration
break
path = parent_path
# Implicit 'return None' if nothing was found
def get_empty_argument_parser(add_help=True, default_config_files=DEFAULT_CONFIG_FILES):
parser = configargparse.ArgumentParser(
default_config_files=default_config_files,
auto_env_var_prefix="LOCUST_",
add_env_var_help=False,
add_config_file_help=False,
add_help=add_help,
formatter_class=argparse.RawDescriptionHelpFormatter,
usage=argparse.SUPPRESS,
description=textwrap.dedent("""
Usage: locust [OPTIONS] [LocustClass ...]
"""),
#epilog="",
)
parser.add_argument(
'-f', '--locustfile',
default='locustfile',
help="Python module file to import, e.g. '../other.py'. Default: locustfile"
)
return parser
def parse_locustfile_option(args=None):
"""
Construct a command line parser that is only used to parse the -f argument so that we can
import the test scripts in case any of them adds additional command line arguments to the
parser
"""
parser = get_empty_argument_parser(add_help=False)
parser.add_argument(
'-h', '--help',
action='store_true',
default=False,
)
parser.add_argument(
'--version', '-V',
action='store_true',
default=False,
)
options, _ = parser.parse_known_args(args=args)
locustfile = find_locustfile(options.locustfile)
if not locustfile:
|
if locustfile == "locust.py":
sys.stderr.write("The locustfile must not be named `locust.py`. Please rename the file and try again.\n")
sys.exit(1)
return locustfile
def setup_parser_arguments(parser):
"""
Setup command-line options
Takes a configargparse.ArgumentParser as argument and calls it's add_argument
for each of the supported arguments
"""
parser._optionals.title = "Common options"
parser.add_argument(
'-H', '--host',
help="Host to load test in the following format: http://10.21.32.33"
)
# Number of Locust users
parser.add_argument(
'-c', '--clients',
type=int,
dest='num_clients',
default=1,
help="Number of concurrent Locust users. Only used together with --headless"
)
# User hatch rate
parser.add_argument(
'-r', '--hatch-rate',
type=float,
default=1,
help="The rate per second in which clients are spawned. Only used together with --headless"
)
# Time limit of the test run
parser.add_argument(
'-t', '--run-time',
help="Stop after the specified amount of time, e.g. (300s, 20m, 3h, 1h30m, etc.). Only used together with --headless"
)
# List locust commands found in loaded locust files/source files
parser.add_argument(
'-l', '--list',
action='store_true',
dest='list_commands',
help="Show list of possible locust classes and exit"
)
web_ui_group = parser.add_argument_group("Web UI options")
web_ui_group.add_argument(
'--web-host',
default="",
help="Host to bind the web interface to. Defaults to '*' (all interfaces)"
)
web_ui_group.add_argument(
'--web-port', '-P',
type=int,
default=8089,
help="Port on which to run web host"
)
# if we should print stats in the console
web_ui_group.add_argument(
'--headless',
action='store_true',
help="Disable the web interface, and instead start the load test immediately. Requires -c and -t to be specified."
)
web_ui_group.add_argument(
'--web-auth',
type=str,
dest='web_auth',
default=None,
help='Turn on Basic Auth for the web interface. Should be supplied in the following format: username:password'
)
master_group = parser.add_argument_group(
"Master options",
"Options for running a Locust Master node when running Locust distributed. A Master node need Worker nodes that connect to it before it can run load tests.",
)
# if locust should be run in distributed mode as master
master_group.add_argument(
'--master',
action='store_true',
help="Set locust to run in distributed mode with this process as master"
)
master_group.add_argument(
'--master-bind-host',
default="*",
help="Interfaces (hostname, ip) that locust master should bind to. Only used when running with --master. Defaults to * (all available interfaces)."
)
master_group.add_argument(
'--master-bind-port',
type=int,
default=5557,
help="Port that locust master should bind to. Only used when running with --master. Defaults to 5557."
)
master_group.add_argument(
'--expect-workers',
type=int,
default=1,
help="How many workers master should expect to connect before starting the test (only when --headless used)."
)
master_group.add_argument(
'--expect-slaves',
action='store_true',
help=configargparse.SUPPRESS
)
worker_group = parser.add_argument_group(
"Worker options",
textwrap.dedent("""
Options for running a Locust Worker node when running Locust distributed.
Only the LOCUSTFILE (-f option) need to be specified when starting a Worker, since other options such as -c, -r, -t are specified on the Master node.
"""),
)
# if locust should be run in distributed mode as worker
worker_group.add_argument(
'--worker',
action='store_true',
help="Set locust to run in distributed mode with this process as worker"
)
worker_group.add_argument(
'--slave',
action='store_true',
help=configargparse.SUPPRESS
)
# master host options
worker_group.add_argument(
'--master-host',
default="127.0.0.1",
help="Host or IP address of locust master for distributed load testing. Only used when running with --worker. Defaults to 127.0.0.1."
)
worker_group.add_argument(
'--master-port',
type=int,
default=5557,
help="The port to connect to that is used by the locust master for distributed load testing. Only used when running with --worker. Defaults to 5557."
)
stats_group = parser.add_argument_group("Request statistics options")
# A file that contains the current request stats.
stats_group.add_argument(
'--csv', '--csv-base-name',
dest='csvfilebase',
help="Store current request stats to files in CSV format.",
)
# Adds each stats entry at every iteration to the _stats_history.csv file.
stats_group.add_argument(
'--csv-full-history',
action='store_true',
default=False,
dest='stats_history_enabled',
help="Store each stats entry in CSV format to _stats_history.csv file",
)
# if we should print stats in the console
stats_group.add_argument(
'--print-stats',
action='store_true',
help="Print stats in the console"
)
# only print summary stats
stats_group.add_argument(
'--only-summary',
action='store_true',
help='Only print the summary stats'
)
stats_group.add_argument(
'--reset-stats',
action='store_true',
help="Reset statistics once hatching has been completed. Should be set on both master and workers when running in distributed mode",
)
log_group = parser.add_argument_group("Logging options")
# skip logging setup
log_group.add_argument(
'--skip-log-setup',
action='store_true',
dest='skip_log_setup',
default=False,
help="Disable Locust's logging setup. Instead, the configuration is provided by the Locust test or Python defaults."
)
# log level
log_group.add_argument(
'--loglevel', '-L',
default='INFO',
help="Choose between DEBUG/INFO/WARNING/ERROR/CRITICAL. Default is INFO.",
)
# log file
log_group.add_argument(
'--logfile',
help="Path to log file. If not set, log will go to stdout/stderr",
)
step_load_group = parser.add_argument_group("Step load options")
# Enable Step Load mode
step_load_group.add_argument(
'--step-load',
action='store_true',
help="Enable Step Load mode to monitor how performance metrics varies when user load increases. Requires --step-clients and --step-time to be specified."
)
# Number of clients to incease by Step
step_load_group.add_argument(
'--step-clients',
type=int,
default=1,
help="Client count to increase by step in Step Load mode. Only used together with --step-load"
)
# Time limit of each step
step_load_group.add_argument(
'--step-time',
help="Step duration in Step Load mode, e.g. (300s, 20m, 3h, 1h30m, etc.). Only used together with --step-load"
)
other_group = parser.add_argument_group("Other options")
# Display ratio table of all tasks
other_group.add_argument(
'--show-task-ratio',
action='store_true',
help="Print table of the locust classes' task execution ratio"
)
# Display ratio table of all tasks in JSON format
other_group.add_argument(
'--show-task-ratio-json',
action='store_true',
help="Print json data of the locust classes' task execution ratio"
)
# Version number (optparse gives you --version but we have to do it
# ourselves to get -V too. sigh)
other_group.add_argument(
'--version', '-V',
action='version',
help="Show program's version number and exit",
version='%(prog)s {}'.format(version),
)
# set the exit code to post on errors
other_group.add_argument(
'--exit-code-on-error',
type=int,
default=1,
help="Sets the process exit code to use when a test result contain any failure or error"
)
other_group.add_argument(
'-s', '--stop-timeout',
action='store',
type=int,
dest='stop_timeout',
default=None,
help="Number of seconds to wait for a simulated user to complete any executing task before exiting. Default is to terminate immediately. This parameter only needs to be specified for the master process when running Locust distributed."
)
locust_classes_group = parser.add_argument_group("Locust user classes")
locust_classes_group.add_argument(
'locust_classes',
nargs='*',
metavar='LocustClass',
help="Optionally specify which Locust classes that should be used (available Locust classes can be listed with -l or --list)",
)
def get_parser(default_config_files=DEFAULT_CONFIG_FILES):
# get a parser that is only able to parse the -f argument
parser = get_empty_argument_parser(add_help=True, default_config_files=default_config_files)
# add all the other supported arguments
setup_parser_arguments(parser)
# fire event to provide a hook for locustscripts and plugins to add command line arguments
locust.events.init_command_line_parser.fire(parser=parser)
return parser
def parse_options(args=None):
parser = get_parser()
if 'LOCUST_MASTER_SERVICE' in os.environ and not isinstance(os.environ.get('LOCUST_MASTER_PORT', 0), int):
sys.stderr.write("Are you running in kubernetes? If you have a container called LOCUST_MASTER, kubernetes will set an env var called LOCUST_MASTER_PORT which will collide with locust's --master-port setting. Please rename your container.\n")
# parse command line and return options
options = parser.parse_args(args=args)
return options
| if options.help or options.version:
# if --help or --version is specified we'll call parse_options which will print the help/version message
parse_options(args=args)
sys.stderr.write("Could not find any locustfile! Ensure file ends in '.py' and see --help for available options.\n")
sys.exit(1) | conditional_block |
argument_parser.py | import argparse
import os
import sys
import textwrap
import configargparse
import locust
version = locust.__version__
DEFAULT_CONFIG_FILES = ['~/.locust.conf','locust.conf']
def _is_package(path):
"""
Is the given path a Python package?
"""
return (
os.path.isdir(path)
and os.path.exists(os.path.join(path, '__init__.py'))
)
def find_locustfile(locustfile):
"""
Attempt to locate a locustfile, either explicitly or by searching parent dirs.
"""
# Obtain env value
names = [locustfile]
# Create .py version if necessary
if not names[0].endswith('.py'):
names.append(names[0] + '.py')
# Does the name contain path elements?
if os.path.dirname(names[0]):
# If so, expand home-directory markers and test for existence
for name in names:
expanded = os.path.expanduser(name)
if os.path.exists(expanded):
if name.endswith('.py') or _is_package(expanded):
return os.path.abspath(expanded)
else:
# Otherwise, start in cwd and work downwards towards filesystem root
path = os.path.abspath('.')
while True:
for name in names:
joined = os.path.join(path, name)
if os.path.exists(joined):
if name.endswith('.py') or _is_package(joined):
return os.path.abspath(joined)
parent_path = os.path.dirname(path)
if parent_path == path:
# we've reached the root path which has been checked this iteration
break
path = parent_path
# Implicit 'return None' if nothing was found
def get_empty_argument_parser(add_help=True, default_config_files=DEFAULT_CONFIG_FILES):
parser = configargparse.ArgumentParser(
default_config_files=default_config_files,
auto_env_var_prefix="LOCUST_",
add_env_var_help=False,
add_config_file_help=False,
add_help=add_help,
formatter_class=argparse.RawDescriptionHelpFormatter,
usage=argparse.SUPPRESS,
description=textwrap.dedent("""
Usage: locust [OPTIONS] [LocustClass ...]
"""),
#epilog="",
)
parser.add_argument(
'-f', '--locustfile',
default='locustfile',
help="Python module file to import, e.g. '../other.py'. Default: locustfile"
)
return parser
def parse_locustfile_option(args=None):
"""
Construct a command line parser that is only used to parse the -f argument so that we can
import the test scripts in case any of them adds additional command line arguments to the
parser
"""
parser = get_empty_argument_parser(add_help=False)
parser.add_argument(
'-h', '--help',
action='store_true',
default=False,
)
parser.add_argument(
'--version', '-V',
action='store_true',
default=False,
)
options, _ = parser.parse_known_args(args=args)
locustfile = find_locustfile(options.locustfile)
if not locustfile:
if options.help or options.version:
# if --help or --version is specified we'll call parse_options which will print the help/version message
parse_options(args=args)
sys.stderr.write("Could not find any locustfile! Ensure file ends in '.py' and see --help for available options.\n")
sys.exit(1)
if locustfile == "locust.py":
sys.stderr.write("The locustfile must not be named `locust.py`. Please rename the file and try again.\n")
sys.exit(1)
return locustfile
def setup_parser_arguments(parser):
|
def get_parser(default_config_files=DEFAULT_CONFIG_FILES):
# get a parser that is only able to parse the -f argument
parser = get_empty_argument_parser(add_help=True, default_config_files=default_config_files)
# add all the other supported arguments
setup_parser_arguments(parser)
# fire event to provide a hook for locustscripts and plugins to add command line arguments
locust.events.init_command_line_parser.fire(parser=parser)
return parser
def parse_options(args=None):
parser = get_parser()
if 'LOCUST_MASTER_SERVICE' in os.environ and not isinstance(os.environ.get('LOCUST_MASTER_PORT', 0), int):
sys.stderr.write("Are you running in kubernetes? If you have a container called LOCUST_MASTER, kubernetes will set an env var called LOCUST_MASTER_PORT which will collide with locust's --master-port setting. Please rename your container.\n")
# parse command line and return options
options = parser.parse_args(args=args)
return options
| """
Setup command-line options
Takes a configargparse.ArgumentParser as argument and calls it's add_argument
for each of the supported arguments
"""
parser._optionals.title = "Common options"
parser.add_argument(
'-H', '--host',
help="Host to load test in the following format: http://10.21.32.33"
)
# Number of Locust users
parser.add_argument(
'-c', '--clients',
type=int,
dest='num_clients',
default=1,
help="Number of concurrent Locust users. Only used together with --headless"
)
# User hatch rate
parser.add_argument(
'-r', '--hatch-rate',
type=float,
default=1,
help="The rate per second in which clients are spawned. Only used together with --headless"
)
# Time limit of the test run
parser.add_argument(
'-t', '--run-time',
help="Stop after the specified amount of time, e.g. (300s, 20m, 3h, 1h30m, etc.). Only used together with --headless"
)
# List locust commands found in loaded locust files/source files
parser.add_argument(
'-l', '--list',
action='store_true',
dest='list_commands',
help="Show list of possible locust classes and exit"
)
web_ui_group = parser.add_argument_group("Web UI options")
web_ui_group.add_argument(
'--web-host',
default="",
help="Host to bind the web interface to. Defaults to '*' (all interfaces)"
)
web_ui_group.add_argument(
'--web-port', '-P',
type=int,
default=8089,
help="Port on which to run web host"
)
# if we should print stats in the console
web_ui_group.add_argument(
'--headless',
action='store_true',
help="Disable the web interface, and instead start the load test immediately. Requires -c and -t to be specified."
)
web_ui_group.add_argument(
'--web-auth',
type=str,
dest='web_auth',
default=None,
help='Turn on Basic Auth for the web interface. Should be supplied in the following format: username:password'
)
master_group = parser.add_argument_group(
"Master options",
"Options for running a Locust Master node when running Locust distributed. A Master node need Worker nodes that connect to it before it can run load tests.",
)
# if locust should be run in distributed mode as master
master_group.add_argument(
'--master',
action='store_true',
help="Set locust to run in distributed mode with this process as master"
)
master_group.add_argument(
'--master-bind-host',
default="*",
help="Interfaces (hostname, ip) that locust master should bind to. Only used when running with --master. Defaults to * (all available interfaces)."
)
master_group.add_argument(
'--master-bind-port',
type=int,
default=5557,
help="Port that locust master should bind to. Only used when running with --master. Defaults to 5557."
)
master_group.add_argument(
'--expect-workers',
type=int,
default=1,
help="How many workers master should expect to connect before starting the test (only when --headless used)."
)
master_group.add_argument(
'--expect-slaves',
action='store_true',
help=configargparse.SUPPRESS
)
worker_group = parser.add_argument_group(
"Worker options",
textwrap.dedent("""
Options for running a Locust Worker node when running Locust distributed.
Only the LOCUSTFILE (-f option) need to be specified when starting a Worker, since other options such as -c, -r, -t are specified on the Master node.
"""),
)
# if locust should be run in distributed mode as worker
worker_group.add_argument(
'--worker',
action='store_true',
help="Set locust to run in distributed mode with this process as worker"
)
worker_group.add_argument(
'--slave',
action='store_true',
help=configargparse.SUPPRESS
)
# master host options
worker_group.add_argument(
'--master-host',
default="127.0.0.1",
help="Host or IP address of locust master for distributed load testing. Only used when running with --worker. Defaults to 127.0.0.1."
)
worker_group.add_argument(
'--master-port',
type=int,
default=5557,
help="The port to connect to that is used by the locust master for distributed load testing. Only used when running with --worker. Defaults to 5557."
)
stats_group = parser.add_argument_group("Request statistics options")
# A file that contains the current request stats.
stats_group.add_argument(
'--csv', '--csv-base-name',
dest='csvfilebase',
help="Store current request stats to files in CSV format.",
)
# Adds each stats entry at every iteration to the _stats_history.csv file.
stats_group.add_argument(
'--csv-full-history',
action='store_true',
default=False,
dest='stats_history_enabled',
help="Store each stats entry in CSV format to _stats_history.csv file",
)
# if we should print stats in the console
stats_group.add_argument(
'--print-stats',
action='store_true',
help="Print stats in the console"
)
# only print summary stats
stats_group.add_argument(
'--only-summary',
action='store_true',
help='Only print the summary stats'
)
stats_group.add_argument(
'--reset-stats',
action='store_true',
help="Reset statistics once hatching has been completed. Should be set on both master and workers when running in distributed mode",
)
log_group = parser.add_argument_group("Logging options")
# skip logging setup
log_group.add_argument(
'--skip-log-setup',
action='store_true',
dest='skip_log_setup',
default=False,
help="Disable Locust's logging setup. Instead, the configuration is provided by the Locust test or Python defaults."
)
# log level
log_group.add_argument(
'--loglevel', '-L',
default='INFO',
help="Choose between DEBUG/INFO/WARNING/ERROR/CRITICAL. Default is INFO.",
)
# log file
log_group.add_argument(
'--logfile',
help="Path to log file. If not set, log will go to stdout/stderr",
)
step_load_group = parser.add_argument_group("Step load options")
# Enable Step Load mode
step_load_group.add_argument(
'--step-load',
action='store_true',
help="Enable Step Load mode to monitor how performance metrics varies when user load increases. Requires --step-clients and --step-time to be specified."
)
# Number of clients to incease by Step
step_load_group.add_argument(
'--step-clients',
type=int,
default=1,
help="Client count to increase by step in Step Load mode. Only used together with --step-load"
)
# Time limit of each step
step_load_group.add_argument(
'--step-time',
help="Step duration in Step Load mode, e.g. (300s, 20m, 3h, 1h30m, etc.). Only used together with --step-load"
)
other_group = parser.add_argument_group("Other options")
# Display ratio table of all tasks
other_group.add_argument(
'--show-task-ratio',
action='store_true',
help="Print table of the locust classes' task execution ratio"
)
# Display ratio table of all tasks in JSON format
other_group.add_argument(
'--show-task-ratio-json',
action='store_true',
help="Print json data of the locust classes' task execution ratio"
)
# Version number (optparse gives you --version but we have to do it
# ourselves to get -V too. sigh)
other_group.add_argument(
'--version', '-V',
action='version',
help="Show program's version number and exit",
version='%(prog)s {}'.format(version),
)
# set the exit code to post on errors
other_group.add_argument(
'--exit-code-on-error',
type=int,
default=1,
help="Sets the process exit code to use when a test result contain any failure or error"
)
other_group.add_argument(
'-s', '--stop-timeout',
action='store',
type=int,
dest='stop_timeout',
default=None,
help="Number of seconds to wait for a simulated user to complete any executing task before exiting. Default is to terminate immediately. This parameter only needs to be specified for the master process when running Locust distributed."
)
locust_classes_group = parser.add_argument_group("Locust user classes")
locust_classes_group.add_argument(
'locust_classes',
nargs='*',
metavar='LocustClass',
help="Optionally specify which Locust classes that should be used (available Locust classes can be listed with -l or --list)",
) | identifier_body |
client.rs | //! Module defines LS binary protocol client `API` and its two implementation: `Client` and
//! `MockClient`.
use crate::prelude::*;
use crate::binary::message::ErrorPayload;
use crate::binary::message::FromServerPayloadOwned;
use crate::binary::message::MessageFromServerOwned;
use crate::binary::message::MessageToServerRef;
use crate::binary::message::ToServerPayload;
use crate::binary::message::VisualizationContext;
use crate::common::error::UnexpectedMessage;
use crate::handler::Disposition;
use crate::handler::Handler;
use crate::language_server::types::Path;
use crate::types::Sha3_224;
use json_rpc::Transport;
use json_rpc::TransportEvent;
use mockall::automock;
// ==============
// === Errors ===
// ==============
#[allow(missing_docs)]
#[derive(Debug, Fail, Clone, Copy)]
#[fail(display = "Received a text message when expecting only the binary ones.")]
pub struct UnexpectedTextMessage;
/// Errors that can cause a remote call to fail.
pub type RpcError = json_rpc::error::RpcError<ErrorPayload>;
// ====================
// === Notification ===
// ====================
/// The notifications that binary protocol client may receive.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Notification {
/// A new data has been sent for a visualization.
VisualizationUpdate {
/// Identifies the specific visualization.
context: VisualizationContext,
/// Data to be passed to the visualization.
data: Vec<u8>,
},
}
/// Events emitted by the LS binary protocol client.
pub type Event = crate::common::event::Event<Notification>;
// ===========
// === API ===
// ===========
/// The Engine Services Language Server Binary Protocol Client API.
#[automock]
pub trait API {
/// Initializes the protocol. Must be called exactly once before making any other calls.
fn init(&self, client_id: Uuid) -> StaticBoxFuture<FallibleResult>;
/// Writes binary data to the file.
fn write_file(&self, path: &Path, contents: &[u8]) -> StaticBoxFuture<FallibleResult>;
/// Retrieves the file contents as a binary data.
fn read_file(&self, path: &Path) -> StaticBoxFuture<FallibleResult<Vec<u8>>>;
/// Writes a set of bytes to the specified file at the specified offset.
fn write_bytes(
&self,
path: &Path,
byte_offset: u64,
overwrite: bool,
bytes: &[u8],
) -> StaticBoxFuture<FallibleResult<Sha3_224>>;
/// Asynchronous event stream with notification and errors.
///
/// On a repeated call, previous stream is closed.
fn event_stream(&self) -> StaticBoxStream<Event>;
}
// ==============
// === Client ===
// ==============
/// The client for Engine Services Language Server Binary Protocol.
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct Client {
handler: Handler<Uuid, FromServerPayloadOwned, Notification>,
}
impl Client {
/// Helper function that fails if the received message represents a remote error.
fn expect_success(result: FromServerPayloadOwned) -> FallibleResult {
if let FromServerPayloadOwned::Success {} = result {
Ok(())
} else |
}
/// Function that does early processing of the peer's message and decides how it shall be
/// handled. Returns a function so that it may be passed to the `Handler`.
fn processor(
) -> impl FnMut(TransportEvent) -> Disposition<Uuid, FromServerPayloadOwned, Notification> + 'static
{
move |event: TransportEvent| {
let binary_data = match event {
TransportEvent::BinaryMessage(data) => data,
_ => return Disposition::error(UnexpectedTextMessage),
};
let message = match MessageFromServerOwned::deserialize(&binary_data) {
Ok(message) => message,
Err(e) => return Disposition::error(e),
};
debug!("Deserialized incoming binary message: {message:?}");
let correlation_id = message.correlation_id;
match message.0.payload {
FromServerPayloadOwned::VisualizationUpdate { context, data } =>
Disposition::notify(Notification::VisualizationUpdate { data, context }),
payload => {
if let Some(id) = correlation_id {
Disposition::HandleReply { id, reply: payload }
} else {
// Not a known notification and yet not a response to our request.
Disposition::error(UnexpectedMessage)
}
}
}
}
}
/// Creates a new client from the given transport to the Language Server Data Endpoint.
///
/// Before client is functional:
/// * `runner` must be scheduled for execution;
/// * `init` must be called or it needs to be wrapped into `Connection`.
pub fn new(transport: impl Transport + 'static) -> Client {
let processor = Self::processor();
Client { handler: Handler::new(transport, processor) }
}
/// Starts a new request, described by the given payload.
/// Function `f` serves to retrieve the request's result from the more general `Reply` type.
pub fn make_request<F, R>(
&self,
payload: ToServerPayload,
f: F,
) -> StaticBoxFuture<FallibleResult<R>>
where
F: FnOnce(FromServerPayloadOwned) -> FallibleResult<R>,
R: 'static,
F: 'static,
{
let message = MessageToServerRef::new(payload);
let id = message.message_id;
let completer = move |reply| {
info!("Completing request {id} with a reply: {reply:?}");
if let FromServerPayloadOwned::Error { code, message, data } = reply {
let code = code as i64;
let error = json_rpc::messages::Error { code, message, data };
Err(RpcError::RemoteError(error).into())
} else {
f(reply)
}
};
let fut = self.handler.make_request(&message, completer);
Box::pin(fut)
}
/// A `runner`. Its execution must be scheduled for `Client` to be able to complete requests and
/// emit events.
pub fn runner(&self) -> impl Future<Output = ()> {
self.handler.runner()
}
}
impl API for Client {
fn init(&self, client_id: Uuid) -> StaticBoxFuture<FallibleResult> {
info!("Initializing binary connection as client with id {client_id}.");
let payload = ToServerPayload::InitSession { client_id };
self.make_request(payload, Self::expect_success)
}
fn write_file(&self, path: &Path, contents: &[u8]) -> StaticBoxFuture<FallibleResult> {
info!("Writing file {} with {} bytes.", path, contents.len());
let payload = ToServerPayload::WriteFile { path, contents };
self.make_request(payload, Self::expect_success)
}
fn read_file(&self, path: &Path) -> StaticBoxFuture<FallibleResult<Vec<u8>>> {
info!("Reading file {path}.");
let payload = ToServerPayload::ReadFile { path };
self.make_request(payload, move |result| {
if let FromServerPayloadOwned::FileContentsReply { contents } = result {
Ok(contents)
} else {
Err(RpcError::MismatchedResponseType.into())
}
})
}
fn write_bytes(
&self,
path: &Path,
byte_offset: u64,
overwrite: bool,
bytes: &[u8],
) -> StaticBoxFuture<FallibleResult<Sha3_224>> {
info!("Writing {} bytes to {path} at offset {byte_offset}", bytes.len());
let payload = ToServerPayload::WriteBytes { path, byte_offset, overwrite, bytes };
self.make_request(payload, move |result| {
if let FromServerPayloadOwned::WriteBytesReply { checksum } = result {
Ok(checksum.into())
} else {
Err(RpcError::MismatchedResponseType.into())
}
})
}
fn event_stream(&self) -> StaticBoxStream<Event> {
self.handler.event_stream().boxed_local()
}
}
// =============
// === Tests ===
// =============
#[cfg(test)]
mod tests {
use super::*;
use crate::binary::message::MessageFromServer;
use crate::binary::message::MessageToServerOwned;
use crate::binary::message::ToServerPayloadOwned;
use futures::task::LocalSpawnExt;
use json_rpc::test_util::transport::mock::MockTransport;
// ===============
// === Fixture ===
// ===============
struct ClientFixture {
transport: MockTransport,
client: Client,
executor: futures::executor::LocalPool,
}
impl ClientFixture {
fn new() -> ClientFixture {
let transport = MockTransport::new();
let client = Client::new(transport.clone());
let executor = futures::executor::LocalPool::new();
executor.spawner().spawn_local(client.runner()).unwrap();
ClientFixture { transport, client, executor }
}
}
// ========================
// === Testing Requests ===
// ========================
fn test_request<R>(
make_request: impl Fn(&Client) -> StaticBoxFuture<FallibleResult<R>>,
expected_result: R,
expected_request: ToServerPayloadOwned,
mock_reply: FromServerPayloadOwned,
) where
R: Debug + PartialEq + Sized,
{
let mut fixture = ClientFixture::new();
let mut fut = make_request(&fixture.client);
let generated_message = fixture.transport.expect_binary_message();
let generated_message = MessageToServerOwned::deserialize(&generated_message).unwrap();
assert_eq!(generated_message.payload, expected_request);
fut.expect_pending();
let mut mock_reply = MessageFromServer::new(mock_reply);
mock_reply.correlation_id = Some(generated_message.message_id);
mock_reply.with_serialized(|data| fixture.transport.mock_peer_binary_message(data));
fixture.executor.run_until_stalled();
assert_eq!(fut.expect_ok(), expected_result);
// Repeat request but now answer with error.
let mut fut = make_request(&fixture.client);
let generated_message = fixture.transport.expect_binary_message();
let generated_message = MessageToServerOwned::deserialize(&generated_message).unwrap();
let mock_error_code = 444;
let mock_error_message = "This is error".to_string();
let mut mock_reply = MessageFromServer::new(FromServerPayloadOwned::Error {
code: mock_error_code,
message: mock_error_message,
data: None,
});
mock_reply.correlation_id = Some(generated_message.message_id);
mock_reply.with_serialized(|data| fixture.transport.mock_peer_binary_message(data));
fixture.executor.run_until_stalled();
fut.expect_err();
}
#[test]
fn test_init() {
let client_id = Uuid::new_v4();
test_request(
|client| client.init(client_id),
(),
ToServerPayloadOwned::InitSession { client_id },
FromServerPayloadOwned::Success {},
);
}
#[test]
fn test_write_file() {
let root_id = Uuid::new_v4();
let path = Path::new(root_id, &["Main.enso"]);
let data = Vec::from("hello".as_bytes());
test_request(
|client| client.write_file(&path, &data),
(),
ToServerPayloadOwned::WriteFile { contents: data.clone(), path: path.clone() },
FromServerPayloadOwned::Success {},
);
}
#[test]
fn test_read_file() {
let root_id = Uuid::new_v4();
let path = Path::new(root_id, &["Main.enso"]);
let data = Vec::from("hello".as_bytes());
test_request(
|client| client.read_file(&path),
data.clone(),
ToServerPayloadOwned::ReadFile { path: path.clone() },
FromServerPayloadOwned::FileContentsReply { contents: data },
);
}
// =============================
// === Testing Notifications ===
// =============================
#[test]
fn test_visualization_update() {
let mut fixture = ClientFixture::new();
let mut event_fut = fixture.client.event_stream().into_future().boxed_local();
fixture.executor.run_until_stalled();
event_fut.expect_pending();
let context = VisualizationContext {
visualization_id: Uuid::new_v4(),
expression_id: Uuid::new_v4(),
context_id: Uuid::new_v4(),
};
let data = Vec::from("Hello".as_bytes());
let message = MessageFromServer::new(FromServerPayloadOwned::VisualizationUpdate {
data: data.clone(),
context,
});
message.with_serialized(|data| fixture.transport.mock_peer_binary_message(data));
fixture.executor.run_until_stalled();
let expected_notification = Notification::VisualizationUpdate { context, data };
let (event, tail) = event_fut.expect_ready();
match event.expect("Expected some notification.") {
Event::Notification(notification) => assert_eq!(notification, expected_notification),
event => panic!("Expected notification event, got: {event:?}"),
}
tail.boxed_local().expect_pending();
}
}
| {
Err(RpcError::MismatchedResponseType.into())
} | conditional_block |
client.rs | //! Module defines LS binary protocol client `API` and its two implementation: `Client` and
//! `MockClient`.
use crate::prelude::*;
use crate::binary::message::ErrorPayload;
use crate::binary::message::FromServerPayloadOwned;
use crate::binary::message::MessageFromServerOwned;
use crate::binary::message::MessageToServerRef;
use crate::binary::message::ToServerPayload;
use crate::binary::message::VisualizationContext;
use crate::common::error::UnexpectedMessage;
use crate::handler::Disposition;
use crate::handler::Handler;
use crate::language_server::types::Path;
use crate::types::Sha3_224;
use json_rpc::Transport;
use json_rpc::TransportEvent;
use mockall::automock;
// ==============
// === Errors ===
// ==============
#[allow(missing_docs)]
#[derive(Debug, Fail, Clone, Copy)]
#[fail(display = "Received a text message when expecting only the binary ones.")]
pub struct UnexpectedTextMessage;
/// Errors that can cause a remote call to fail.
pub type RpcError = json_rpc::error::RpcError<ErrorPayload>;
// ====================
// === Notification ===
// ====================
/// The notifications that binary protocol client may receive.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Notification {
/// A new data has been sent for a visualization.
VisualizationUpdate {
/// Identifies the specific visualization.
context: VisualizationContext,
/// Data to be passed to the visualization.
data: Vec<u8>,
},
}
/// Events emitted by the LS binary protocol client.
pub type Event = crate::common::event::Event<Notification>;
// ===========
// === API ===
// ===========
/// The Engine Services Language Server Binary Protocol Client API.
#[automock]
pub trait API {
/// Initializes the protocol. Must be called exactly once before making any other calls.
fn init(&self, client_id: Uuid) -> StaticBoxFuture<FallibleResult>;
/// Writes binary data to the file.
fn write_file(&self, path: &Path, contents: &[u8]) -> StaticBoxFuture<FallibleResult>;
/// Retrieves the file contents as a binary data.
fn read_file(&self, path: &Path) -> StaticBoxFuture<FallibleResult<Vec<u8>>>;
/// Writes a set of bytes to the specified file at the specified offset.
fn write_bytes(
&self,
path: &Path,
byte_offset: u64,
overwrite: bool,
bytes: &[u8],
) -> StaticBoxFuture<FallibleResult<Sha3_224>>;
/// Asynchronous event stream with notification and errors.
///
/// On a repeated call, previous stream is closed.
fn event_stream(&self) -> StaticBoxStream<Event>;
}
// ==============
// === Client ===
// ==============
/// The client for Engine Services Language Server Binary Protocol.
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct Client {
handler: Handler<Uuid, FromServerPayloadOwned, Notification>,
}
impl Client {
/// Helper function that fails if the received message represents a remote error.
fn expect_success(result: FromServerPayloadOwned) -> FallibleResult {
if let FromServerPayloadOwned::Success {} = result {
Ok(())
} else {
Err(RpcError::MismatchedResponseType.into())
}
}
/// Function that does early processing of the peer's message and decides how it shall be
/// handled. Returns a function so that it may be passed to the `Handler`.
fn | (
) -> impl FnMut(TransportEvent) -> Disposition<Uuid, FromServerPayloadOwned, Notification> + 'static
{
move |event: TransportEvent| {
let binary_data = match event {
TransportEvent::BinaryMessage(data) => data,
_ => return Disposition::error(UnexpectedTextMessage),
};
let message = match MessageFromServerOwned::deserialize(&binary_data) {
Ok(message) => message,
Err(e) => return Disposition::error(e),
};
debug!("Deserialized incoming binary message: {message:?}");
let correlation_id = message.correlation_id;
match message.0.payload {
FromServerPayloadOwned::VisualizationUpdate { context, data } =>
Disposition::notify(Notification::VisualizationUpdate { data, context }),
payload => {
if let Some(id) = correlation_id {
Disposition::HandleReply { id, reply: payload }
} else {
// Not a known notification and yet not a response to our request.
Disposition::error(UnexpectedMessage)
}
}
}
}
}
/// Creates a new client from the given transport to the Language Server Data Endpoint.
///
/// Before client is functional:
/// * `runner` must be scheduled for execution;
/// * `init` must be called or it needs to be wrapped into `Connection`.
pub fn new(transport: impl Transport + 'static) -> Client {
let processor = Self::processor();
Client { handler: Handler::new(transport, processor) }
}
/// Starts a new request, described by the given payload.
/// Function `f` serves to retrieve the request's result from the more general `Reply` type.
pub fn make_request<F, R>(
&self,
payload: ToServerPayload,
f: F,
) -> StaticBoxFuture<FallibleResult<R>>
where
F: FnOnce(FromServerPayloadOwned) -> FallibleResult<R>,
R: 'static,
F: 'static,
{
let message = MessageToServerRef::new(payload);
let id = message.message_id;
let completer = move |reply| {
info!("Completing request {id} with a reply: {reply:?}");
if let FromServerPayloadOwned::Error { code, message, data } = reply {
let code = code as i64;
let error = json_rpc::messages::Error { code, message, data };
Err(RpcError::RemoteError(error).into())
} else {
f(reply)
}
};
let fut = self.handler.make_request(&message, completer);
Box::pin(fut)
}
/// A `runner`. Its execution must be scheduled for `Client` to be able to complete requests and
/// emit events.
pub fn runner(&self) -> impl Future<Output = ()> {
self.handler.runner()
}
}
impl API for Client {
fn init(&self, client_id: Uuid) -> StaticBoxFuture<FallibleResult> {
info!("Initializing binary connection as client with id {client_id}.");
let payload = ToServerPayload::InitSession { client_id };
self.make_request(payload, Self::expect_success)
}
fn write_file(&self, path: &Path, contents: &[u8]) -> StaticBoxFuture<FallibleResult> {
info!("Writing file {} with {} bytes.", path, contents.len());
let payload = ToServerPayload::WriteFile { path, contents };
self.make_request(payload, Self::expect_success)
}
fn read_file(&self, path: &Path) -> StaticBoxFuture<FallibleResult<Vec<u8>>> {
info!("Reading file {path}.");
let payload = ToServerPayload::ReadFile { path };
self.make_request(payload, move |result| {
if let FromServerPayloadOwned::FileContentsReply { contents } = result {
Ok(contents)
} else {
Err(RpcError::MismatchedResponseType.into())
}
})
}
fn write_bytes(
&self,
path: &Path,
byte_offset: u64,
overwrite: bool,
bytes: &[u8],
) -> StaticBoxFuture<FallibleResult<Sha3_224>> {
info!("Writing {} bytes to {path} at offset {byte_offset}", bytes.len());
let payload = ToServerPayload::WriteBytes { path, byte_offset, overwrite, bytes };
self.make_request(payload, move |result| {
if let FromServerPayloadOwned::WriteBytesReply { checksum } = result {
Ok(checksum.into())
} else {
Err(RpcError::MismatchedResponseType.into())
}
})
}
fn event_stream(&self) -> StaticBoxStream<Event> {
self.handler.event_stream().boxed_local()
}
}
// =============
// === Tests ===
// =============
#[cfg(test)]
mod tests {
use super::*;
use crate::binary::message::MessageFromServer;
use crate::binary::message::MessageToServerOwned;
use crate::binary::message::ToServerPayloadOwned;
use futures::task::LocalSpawnExt;
use json_rpc::test_util::transport::mock::MockTransport;
// ===============
// === Fixture ===
// ===============
struct ClientFixture {
transport: MockTransport,
client: Client,
executor: futures::executor::LocalPool,
}
impl ClientFixture {
fn new() -> ClientFixture {
let transport = MockTransport::new();
let client = Client::new(transport.clone());
let executor = futures::executor::LocalPool::new();
executor.spawner().spawn_local(client.runner()).unwrap();
ClientFixture { transport, client, executor }
}
}
// ========================
// === Testing Requests ===
// ========================
fn test_request<R>(
make_request: impl Fn(&Client) -> StaticBoxFuture<FallibleResult<R>>,
expected_result: R,
expected_request: ToServerPayloadOwned,
mock_reply: FromServerPayloadOwned,
) where
R: Debug + PartialEq + Sized,
{
let mut fixture = ClientFixture::new();
let mut fut = make_request(&fixture.client);
let generated_message = fixture.transport.expect_binary_message();
let generated_message = MessageToServerOwned::deserialize(&generated_message).unwrap();
assert_eq!(generated_message.payload, expected_request);
fut.expect_pending();
let mut mock_reply = MessageFromServer::new(mock_reply);
mock_reply.correlation_id = Some(generated_message.message_id);
mock_reply.with_serialized(|data| fixture.transport.mock_peer_binary_message(data));
fixture.executor.run_until_stalled();
assert_eq!(fut.expect_ok(), expected_result);
// Repeat request but now answer with error.
let mut fut = make_request(&fixture.client);
let generated_message = fixture.transport.expect_binary_message();
let generated_message = MessageToServerOwned::deserialize(&generated_message).unwrap();
let mock_error_code = 444;
let mock_error_message = "This is error".to_string();
let mut mock_reply = MessageFromServer::new(FromServerPayloadOwned::Error {
code: mock_error_code,
message: mock_error_message,
data: None,
});
mock_reply.correlation_id = Some(generated_message.message_id);
mock_reply.with_serialized(|data| fixture.transport.mock_peer_binary_message(data));
fixture.executor.run_until_stalled();
fut.expect_err();
}
#[test]
fn test_init() {
let client_id = Uuid::new_v4();
test_request(
|client| client.init(client_id),
(),
ToServerPayloadOwned::InitSession { client_id },
FromServerPayloadOwned::Success {},
);
}
#[test]
fn test_write_file() {
let root_id = Uuid::new_v4();
let path = Path::new(root_id, &["Main.enso"]);
let data = Vec::from("hello".as_bytes());
test_request(
|client| client.write_file(&path, &data),
(),
ToServerPayloadOwned::WriteFile { contents: data.clone(), path: path.clone() },
FromServerPayloadOwned::Success {},
);
}
#[test]
fn test_read_file() {
let root_id = Uuid::new_v4();
let path = Path::new(root_id, &["Main.enso"]);
let data = Vec::from("hello".as_bytes());
test_request(
|client| client.read_file(&path),
data.clone(),
ToServerPayloadOwned::ReadFile { path: path.clone() },
FromServerPayloadOwned::FileContentsReply { contents: data },
);
}
// =============================
// === Testing Notifications ===
// =============================
#[test]
fn test_visualization_update() {
let mut fixture = ClientFixture::new();
let mut event_fut = fixture.client.event_stream().into_future().boxed_local();
fixture.executor.run_until_stalled();
event_fut.expect_pending();
let context = VisualizationContext {
visualization_id: Uuid::new_v4(),
expression_id: Uuid::new_v4(),
context_id: Uuid::new_v4(),
};
let data = Vec::from("Hello".as_bytes());
let message = MessageFromServer::new(FromServerPayloadOwned::VisualizationUpdate {
data: data.clone(),
context,
});
message.with_serialized(|data| fixture.transport.mock_peer_binary_message(data));
fixture.executor.run_until_stalled();
let expected_notification = Notification::VisualizationUpdate { context, data };
let (event, tail) = event_fut.expect_ready();
match event.expect("Expected some notification.") {
Event::Notification(notification) => assert_eq!(notification, expected_notification),
event => panic!("Expected notification event, got: {event:?}"),
}
tail.boxed_local().expect_pending();
}
}
| processor | identifier_name |
client.rs | //! Module defines LS binary protocol client `API` and its two implementation: `Client` and
//! `MockClient`.
use crate::prelude::*;
use crate::binary::message::ErrorPayload;
use crate::binary::message::FromServerPayloadOwned;
use crate::binary::message::MessageFromServerOwned;
use crate::binary::message::MessageToServerRef;
use crate::binary::message::ToServerPayload;
use crate::binary::message::VisualizationContext;
use crate::common::error::UnexpectedMessage;
use crate::handler::Disposition;
use crate::handler::Handler;
use crate::language_server::types::Path;
use crate::types::Sha3_224;
use json_rpc::Transport;
use json_rpc::TransportEvent;
use mockall::automock;
// ==============
// === Errors ===
// ==============
#[allow(missing_docs)]
#[derive(Debug, Fail, Clone, Copy)]
#[fail(display = "Received a text message when expecting only the binary ones.")]
pub struct UnexpectedTextMessage;
/// Errors that can cause a remote call to fail.
pub type RpcError = json_rpc::error::RpcError<ErrorPayload>;
// ====================
// === Notification ===
// ====================
/// The notifications that binary protocol client may receive.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Notification {
/// A new data has been sent for a visualization.
VisualizationUpdate {
/// Identifies the specific visualization.
context: VisualizationContext,
/// Data to be passed to the visualization.
data: Vec<u8>,
},
}
/// Events emitted by the LS binary protocol client.
pub type Event = crate::common::event::Event<Notification>;
// ===========
// === API ===
// ===========
/// The Engine Services Language Server Binary Protocol Client API.
#[automock]
pub trait API {
/// Initializes the protocol. Must be called exactly once before making any other calls.
fn init(&self, client_id: Uuid) -> StaticBoxFuture<FallibleResult>;
/// Writes binary data to the file.
fn write_file(&self, path: &Path, contents: &[u8]) -> StaticBoxFuture<FallibleResult>;
/// Retrieves the file contents as a binary data.
fn read_file(&self, path: &Path) -> StaticBoxFuture<FallibleResult<Vec<u8>>>;
/// Writes a set of bytes to the specified file at the specified offset.
fn write_bytes(
&self,
path: &Path,
byte_offset: u64,
overwrite: bool,
bytes: &[u8],
) -> StaticBoxFuture<FallibleResult<Sha3_224>>;
/// Asynchronous event stream with notification and errors.
///
/// On a repeated call, previous stream is closed.
fn event_stream(&self) -> StaticBoxStream<Event>;
}
// ==============
// === Client ===
// ==============
/// The client for Engine Services Language Server Binary Protocol.
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct Client {
handler: Handler<Uuid, FromServerPayloadOwned, Notification>,
}
impl Client {
/// Helper function that fails if the received message represents a remote error.
fn expect_success(result: FromServerPayloadOwned) -> FallibleResult {
if let FromServerPayloadOwned::Success {} = result {
Ok(())
} else {
Err(RpcError::MismatchedResponseType.into())
}
}
/// Function that does early processing of the peer's message and decides how it shall be
/// handled. Returns a function so that it may be passed to the `Handler`.
fn processor(
) -> impl FnMut(TransportEvent) -> Disposition<Uuid, FromServerPayloadOwned, Notification> + 'static
{
move |event: TransportEvent| {
let binary_data = match event {
TransportEvent::BinaryMessage(data) => data,
_ => return Disposition::error(UnexpectedTextMessage),
};
let message = match MessageFromServerOwned::deserialize(&binary_data) {
Ok(message) => message,
Err(e) => return Disposition::error(e),
};
debug!("Deserialized incoming binary message: {message:?}");
let correlation_id = message.correlation_id;
match message.0.payload {
FromServerPayloadOwned::VisualizationUpdate { context, data } =>
Disposition::notify(Notification::VisualizationUpdate { data, context }),
payload => {
if let Some(id) = correlation_id {
Disposition::HandleReply { id, reply: payload }
} else {
// Not a known notification and yet not a response to our request.
Disposition::error(UnexpectedMessage)
}
}
}
}
}
/// Creates a new client from the given transport to the Language Server Data Endpoint.
///
/// Before client is functional:
/// * `runner` must be scheduled for execution;
/// * `init` must be called or it needs to be wrapped into `Connection`.
pub fn new(transport: impl Transport + 'static) -> Client {
let processor = Self::processor();
Client { handler: Handler::new(transport, processor) }
}
/// Starts a new request, described by the given payload.
/// Function `f` serves to retrieve the request's result from the more general `Reply` type.
pub fn make_request<F, R>(
&self,
payload: ToServerPayload,
f: F,
) -> StaticBoxFuture<FallibleResult<R>>
where
F: FnOnce(FromServerPayloadOwned) -> FallibleResult<R>,
R: 'static,
F: 'static,
{
let message = MessageToServerRef::new(payload);
let id = message.message_id;
let completer = move |reply| {
info!("Completing request {id} with a reply: {reply:?}");
if let FromServerPayloadOwned::Error { code, message, data } = reply {
let code = code as i64;
let error = json_rpc::messages::Error { code, message, data };
Err(RpcError::RemoteError(error).into())
} else {
f(reply)
}
};
let fut = self.handler.make_request(&message, completer);
Box::pin(fut)
}
/// A `runner`. Its execution must be scheduled for `Client` to be able to complete requests and
/// emit events.
pub fn runner(&self) -> impl Future<Output = ()> {
self.handler.runner()
}
}
impl API for Client {
fn init(&self, client_id: Uuid) -> StaticBoxFuture<FallibleResult> |
fn write_file(&self, path: &Path, contents: &[u8]) -> StaticBoxFuture<FallibleResult> {
info!("Writing file {} with {} bytes.", path, contents.len());
let payload = ToServerPayload::WriteFile { path, contents };
self.make_request(payload, Self::expect_success)
}
fn read_file(&self, path: &Path) -> StaticBoxFuture<FallibleResult<Vec<u8>>> {
info!("Reading file {path}.");
let payload = ToServerPayload::ReadFile { path };
self.make_request(payload, move |result| {
if let FromServerPayloadOwned::FileContentsReply { contents } = result {
Ok(contents)
} else {
Err(RpcError::MismatchedResponseType.into())
}
})
}
fn write_bytes(
&self,
path: &Path,
byte_offset: u64,
overwrite: bool,
bytes: &[u8],
) -> StaticBoxFuture<FallibleResult<Sha3_224>> {
info!("Writing {} bytes to {path} at offset {byte_offset}", bytes.len());
let payload = ToServerPayload::WriteBytes { path, byte_offset, overwrite, bytes };
self.make_request(payload, move |result| {
if let FromServerPayloadOwned::WriteBytesReply { checksum } = result {
Ok(checksum.into())
} else {
Err(RpcError::MismatchedResponseType.into())
}
})
}
fn event_stream(&self) -> StaticBoxStream<Event> {
self.handler.event_stream().boxed_local()
}
}
// =============
// === Tests ===
// =============
#[cfg(test)]
mod tests {
use super::*;
use crate::binary::message::MessageFromServer;
use crate::binary::message::MessageToServerOwned;
use crate::binary::message::ToServerPayloadOwned;
use futures::task::LocalSpawnExt;
use json_rpc::test_util::transport::mock::MockTransport;
// ===============
// === Fixture ===
// ===============
struct ClientFixture {
transport: MockTransport,
client: Client,
executor: futures::executor::LocalPool,
}
impl ClientFixture {
fn new() -> ClientFixture {
let transport = MockTransport::new();
let client = Client::new(transport.clone());
let executor = futures::executor::LocalPool::new();
executor.spawner().spawn_local(client.runner()).unwrap();
ClientFixture { transport, client, executor }
}
}
// ========================
// === Testing Requests ===
// ========================
fn test_request<R>(
make_request: impl Fn(&Client) -> StaticBoxFuture<FallibleResult<R>>,
expected_result: R,
expected_request: ToServerPayloadOwned,
mock_reply: FromServerPayloadOwned,
) where
R: Debug + PartialEq + Sized,
{
let mut fixture = ClientFixture::new();
let mut fut = make_request(&fixture.client);
let generated_message = fixture.transport.expect_binary_message();
let generated_message = MessageToServerOwned::deserialize(&generated_message).unwrap();
assert_eq!(generated_message.payload, expected_request);
fut.expect_pending();
let mut mock_reply = MessageFromServer::new(mock_reply);
mock_reply.correlation_id = Some(generated_message.message_id);
mock_reply.with_serialized(|data| fixture.transport.mock_peer_binary_message(data));
fixture.executor.run_until_stalled();
assert_eq!(fut.expect_ok(), expected_result);
// Repeat request but now answer with error.
let mut fut = make_request(&fixture.client);
let generated_message = fixture.transport.expect_binary_message();
let generated_message = MessageToServerOwned::deserialize(&generated_message).unwrap();
let mock_error_code = 444;
let mock_error_message = "This is error".to_string();
let mut mock_reply = MessageFromServer::new(FromServerPayloadOwned::Error {
code: mock_error_code,
message: mock_error_message,
data: None,
});
mock_reply.correlation_id = Some(generated_message.message_id);
mock_reply.with_serialized(|data| fixture.transport.mock_peer_binary_message(data));
fixture.executor.run_until_stalled();
fut.expect_err();
}
#[test]
fn test_init() {
let client_id = Uuid::new_v4();
test_request(
|client| client.init(client_id),
(),
ToServerPayloadOwned::InitSession { client_id },
FromServerPayloadOwned::Success {},
);
}
#[test]
fn test_write_file() {
let root_id = Uuid::new_v4();
let path = Path::new(root_id, &["Main.enso"]);
let data = Vec::from("hello".as_bytes());
test_request(
|client| client.write_file(&path, &data),
(),
ToServerPayloadOwned::WriteFile { contents: data.clone(), path: path.clone() },
FromServerPayloadOwned::Success {},
);
}
#[test]
fn test_read_file() {
let root_id = Uuid::new_v4();
let path = Path::new(root_id, &["Main.enso"]);
let data = Vec::from("hello".as_bytes());
test_request(
|client| client.read_file(&path),
data.clone(),
ToServerPayloadOwned::ReadFile { path: path.clone() },
FromServerPayloadOwned::FileContentsReply { contents: data },
);
}
// =============================
// === Testing Notifications ===
// =============================
#[test]
fn test_visualization_update() {
let mut fixture = ClientFixture::new();
let mut event_fut = fixture.client.event_stream().into_future().boxed_local();
fixture.executor.run_until_stalled();
event_fut.expect_pending();
let context = VisualizationContext {
visualization_id: Uuid::new_v4(),
expression_id: Uuid::new_v4(),
context_id: Uuid::new_v4(),
};
let data = Vec::from("Hello".as_bytes());
let message = MessageFromServer::new(FromServerPayloadOwned::VisualizationUpdate {
data: data.clone(),
context,
});
message.with_serialized(|data| fixture.transport.mock_peer_binary_message(data));
fixture.executor.run_until_stalled();
let expected_notification = Notification::VisualizationUpdate { context, data };
let (event, tail) = event_fut.expect_ready();
match event.expect("Expected some notification.") {
Event::Notification(notification) => assert_eq!(notification, expected_notification),
event => panic!("Expected notification event, got: {event:?}"),
}
tail.boxed_local().expect_pending();
}
}
| {
info!("Initializing binary connection as client with id {client_id}.");
let payload = ToServerPayload::InitSession { client_id };
self.make_request(payload, Self::expect_success)
} | identifier_body |
client.rs | //! Module defines LS binary protocol client `API` and its two implementation: `Client` and
//! `MockClient`.
use crate::prelude::*;
use crate::binary::message::ErrorPayload;
use crate::binary::message::FromServerPayloadOwned;
use crate::binary::message::MessageFromServerOwned;
use crate::binary::message::MessageToServerRef;
use crate::binary::message::ToServerPayload;
use crate::binary::message::VisualizationContext;
use crate::common::error::UnexpectedMessage;
use crate::handler::Disposition;
use crate::handler::Handler;
use crate::language_server::types::Path;
use crate::types::Sha3_224;
use json_rpc::Transport;
use json_rpc::TransportEvent;
use mockall::automock;
// ==============
// === Errors ===
// ==============
#[allow(missing_docs)]
#[derive(Debug, Fail, Clone, Copy)]
#[fail(display = "Received a text message when expecting only the binary ones.")]
pub struct UnexpectedTextMessage;
/// Errors that can cause a remote call to fail.
pub type RpcError = json_rpc::error::RpcError<ErrorPayload>;
// ====================
// === Notification ===
// ====================
/// The notifications that binary protocol client may receive.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Notification {
/// A new data has been sent for a visualization.
VisualizationUpdate {
/// Identifies the specific visualization.
context: VisualizationContext,
/// Data to be passed to the visualization.
data: Vec<u8>,
},
}
/// Events emitted by the LS binary protocol client.
pub type Event = crate::common::event::Event<Notification>;
// ===========
// === API ===
// ===========
/// The Engine Services Language Server Binary Protocol Client API.
#[automock]
pub trait API {
/// Initializes the protocol. Must be called exactly once before making any other calls.
fn init(&self, client_id: Uuid) -> StaticBoxFuture<FallibleResult>;
/// Writes binary data to the file.
fn write_file(&self, path: &Path, contents: &[u8]) -> StaticBoxFuture<FallibleResult>;
/// Retrieves the file contents as a binary data.
fn read_file(&self, path: &Path) -> StaticBoxFuture<FallibleResult<Vec<u8>>>;
/// Writes a set of bytes to the specified file at the specified offset.
fn write_bytes(
&self,
path: &Path,
byte_offset: u64,
overwrite: bool,
bytes: &[u8],
) -> StaticBoxFuture<FallibleResult<Sha3_224>>;
/// Asynchronous event stream with notification and errors.
///
/// On a repeated call, previous stream is closed.
fn event_stream(&self) -> StaticBoxStream<Event>;
}
// ==============
// === Client ===
// ==============
/// The client for Engine Services Language Server Binary Protocol.
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct Client {
handler: Handler<Uuid, FromServerPayloadOwned, Notification>,
}
impl Client {
/// Helper function that fails if the received message represents a remote error.
fn expect_success(result: FromServerPayloadOwned) -> FallibleResult {
if let FromServerPayloadOwned::Success {} = result {
Ok(())
} else {
Err(RpcError::MismatchedResponseType.into())
}
}
/// Function that does early processing of the peer's message and decides how it shall be
/// handled. Returns a function so that it may be passed to the `Handler`.
fn processor(
) -> impl FnMut(TransportEvent) -> Disposition<Uuid, FromServerPayloadOwned, Notification> + 'static
{
move |event: TransportEvent| {
let binary_data = match event {
TransportEvent::BinaryMessage(data) => data,
_ => return Disposition::error(UnexpectedTextMessage),
};
let message = match MessageFromServerOwned::deserialize(&binary_data) {
Ok(message) => message,
Err(e) => return Disposition::error(e),
};
debug!("Deserialized incoming binary message: {message:?}");
let correlation_id = message.correlation_id;
match message.0.payload {
FromServerPayloadOwned::VisualizationUpdate { context, data } =>
Disposition::notify(Notification::VisualizationUpdate { data, context }),
payload => {
if let Some(id) = correlation_id {
Disposition::HandleReply { id, reply: payload }
} else {
// Not a known notification and yet not a response to our request.
Disposition::error(UnexpectedMessage)
}
}
}
}
}
/// Creates a new client from the given transport to the Language Server Data Endpoint.
///
/// Before client is functional:
/// * `runner` must be scheduled for execution;
/// * `init` must be called or it needs to be wrapped into `Connection`.
pub fn new(transport: impl Transport + 'static) -> Client {
let processor = Self::processor();
Client { handler: Handler::new(transport, processor) }
}
/// Starts a new request, described by the given payload.
/// Function `f` serves to retrieve the request's result from the more general `Reply` type.
pub fn make_request<F, R>(
&self,
payload: ToServerPayload,
f: F,
) -> StaticBoxFuture<FallibleResult<R>>
where
F: FnOnce(FromServerPayloadOwned) -> FallibleResult<R>,
R: 'static,
F: 'static,
{
let message = MessageToServerRef::new(payload);
let id = message.message_id;
let completer = move |reply| {
info!("Completing request {id} with a reply: {reply:?}");
if let FromServerPayloadOwned::Error { code, message, data } = reply {
let code = code as i64;
let error = json_rpc::messages::Error { code, message, data };
Err(RpcError::RemoteError(error).into())
} else {
f(reply)
}
};
let fut = self.handler.make_request(&message, completer);
Box::pin(fut)
}
/// A `runner`. Its execution must be scheduled for `Client` to be able to complete requests and
/// emit events.
pub fn runner(&self) -> impl Future<Output = ()> {
self.handler.runner()
}
}
impl API for Client {
fn init(&self, client_id: Uuid) -> StaticBoxFuture<FallibleResult> {
info!("Initializing binary connection as client with id {client_id}.");
let payload = ToServerPayload::InitSession { client_id };
self.make_request(payload, Self::expect_success)
}
fn write_file(&self, path: &Path, contents: &[u8]) -> StaticBoxFuture<FallibleResult> {
info!("Writing file {} with {} bytes.", path, contents.len());
let payload = ToServerPayload::WriteFile { path, contents };
self.make_request(payload, Self::expect_success)
}
fn read_file(&self, path: &Path) -> StaticBoxFuture<FallibleResult<Vec<u8>>> {
info!("Reading file {path}.");
let payload = ToServerPayload::ReadFile { path };
self.make_request(payload, move |result| {
if let FromServerPayloadOwned::FileContentsReply { contents } = result {
Ok(contents)
} else {
Err(RpcError::MismatchedResponseType.into())
}
})
}
fn write_bytes(
&self,
path: &Path,
byte_offset: u64,
overwrite: bool,
bytes: &[u8],
) -> StaticBoxFuture<FallibleResult<Sha3_224>> {
info!("Writing {} bytes to {path} at offset {byte_offset}", bytes.len());
let payload = ToServerPayload::WriteBytes { path, byte_offset, overwrite, bytes };
self.make_request(payload, move |result| {
if let FromServerPayloadOwned::WriteBytesReply { checksum } = result {
Ok(checksum.into())
} else {
Err(RpcError::MismatchedResponseType.into())
}
})
}
fn event_stream(&self) -> StaticBoxStream<Event> {
self.handler.event_stream().boxed_local()
}
}
// =============
// === Tests ===
// =============
#[cfg(test)]
mod tests {
use super::*;
use crate::binary::message::MessageFromServer;
use crate::binary::message::MessageToServerOwned;
use crate::binary::message::ToServerPayloadOwned;
use futures::task::LocalSpawnExt;
use json_rpc::test_util::transport::mock::MockTransport;
| // ===============
struct ClientFixture {
transport: MockTransport,
client: Client,
executor: futures::executor::LocalPool,
}
impl ClientFixture {
fn new() -> ClientFixture {
let transport = MockTransport::new();
let client = Client::new(transport.clone());
let executor = futures::executor::LocalPool::new();
executor.spawner().spawn_local(client.runner()).unwrap();
ClientFixture { transport, client, executor }
}
}
// ========================
// === Testing Requests ===
// ========================
fn test_request<R>(
make_request: impl Fn(&Client) -> StaticBoxFuture<FallibleResult<R>>,
expected_result: R,
expected_request: ToServerPayloadOwned,
mock_reply: FromServerPayloadOwned,
) where
R: Debug + PartialEq + Sized,
{
let mut fixture = ClientFixture::new();
let mut fut = make_request(&fixture.client);
let generated_message = fixture.transport.expect_binary_message();
let generated_message = MessageToServerOwned::deserialize(&generated_message).unwrap();
assert_eq!(generated_message.payload, expected_request);
fut.expect_pending();
let mut mock_reply = MessageFromServer::new(mock_reply);
mock_reply.correlation_id = Some(generated_message.message_id);
mock_reply.with_serialized(|data| fixture.transport.mock_peer_binary_message(data));
fixture.executor.run_until_stalled();
assert_eq!(fut.expect_ok(), expected_result);
// Repeat request but now answer with error.
let mut fut = make_request(&fixture.client);
let generated_message = fixture.transport.expect_binary_message();
let generated_message = MessageToServerOwned::deserialize(&generated_message).unwrap();
let mock_error_code = 444;
let mock_error_message = "This is error".to_string();
let mut mock_reply = MessageFromServer::new(FromServerPayloadOwned::Error {
code: mock_error_code,
message: mock_error_message,
data: None,
});
mock_reply.correlation_id = Some(generated_message.message_id);
mock_reply.with_serialized(|data| fixture.transport.mock_peer_binary_message(data));
fixture.executor.run_until_stalled();
fut.expect_err();
}
#[test]
fn test_init() {
let client_id = Uuid::new_v4();
test_request(
|client| client.init(client_id),
(),
ToServerPayloadOwned::InitSession { client_id },
FromServerPayloadOwned::Success {},
);
}
#[test]
fn test_write_file() {
let root_id = Uuid::new_v4();
let path = Path::new(root_id, &["Main.enso"]);
let data = Vec::from("hello".as_bytes());
test_request(
|client| client.write_file(&path, &data),
(),
ToServerPayloadOwned::WriteFile { contents: data.clone(), path: path.clone() },
FromServerPayloadOwned::Success {},
);
}
#[test]
fn test_read_file() {
let root_id = Uuid::new_v4();
let path = Path::new(root_id, &["Main.enso"]);
let data = Vec::from("hello".as_bytes());
test_request(
|client| client.read_file(&path),
data.clone(),
ToServerPayloadOwned::ReadFile { path: path.clone() },
FromServerPayloadOwned::FileContentsReply { contents: data },
);
}
// =============================
// === Testing Notifications ===
// =============================
#[test]
fn test_visualization_update() {
let mut fixture = ClientFixture::new();
let mut event_fut = fixture.client.event_stream().into_future().boxed_local();
fixture.executor.run_until_stalled();
event_fut.expect_pending();
let context = VisualizationContext {
visualization_id: Uuid::new_v4(),
expression_id: Uuid::new_v4(),
context_id: Uuid::new_v4(),
};
let data = Vec::from("Hello".as_bytes());
let message = MessageFromServer::new(FromServerPayloadOwned::VisualizationUpdate {
data: data.clone(),
context,
});
message.with_serialized(|data| fixture.transport.mock_peer_binary_message(data));
fixture.executor.run_until_stalled();
let expected_notification = Notification::VisualizationUpdate { context, data };
let (event, tail) = event_fut.expect_ready();
match event.expect("Expected some notification.") {
Event::Notification(notification) => assert_eq!(notification, expected_notification),
event => panic!("Expected notification event, got: {event:?}"),
}
tail.boxed_local().expect_pending();
}
} |
// ===============
// === Fixture === | random_line_split |
anlyzPRR.py | '''harvestPRR: analyze Public Record Requests from CSV data provided by NextRequest
Created 27 Aug 20
@author: [email protected]
'''
from collections import defaultdict
import csv
import datetime
import json
import random
import re
import requests
import sys
import time
import urllib
import re
PRRDateFmt = '%Y-%m-%dT%H:%M:%S'
PRRDateMicroSecFmt = '%Y-%m-%dT%H:%M:%S.%f'
DateTypes = {'date_received': 'recdDate',
'date_created': 'createDate',
'status_updated': 'statusUpDate'}
def freqHist3(tbl):
'''python3 version
ASSUME: values are frequencies, returns sorted list of (val,freq) items in descending freq order
'''
from functools import cmp_to_key
def cmpd1(a,b):
"decreasing order of frequencies"
return b[1] - a[1]
flist = list(tbl.items()) #python3
flist.sort(key=cmp_to_key(cmpd1))
return flist
AllCSVHeader = ['Id', 'Created At', 'Request Text', 'Due Date', 'Point of Contact', 'Request Date',
'Status', 'URL', 'Visibility', 'Closed Date', 'Closure Reasons',
'Departments', 'Format Received', 'Staff Time (hrs:minutes)',
'Staff Time (minutes)', 'Tags', 'Embargo Ends On Date',
'Staff Cost', 'Date First Contact', 'First Contact Event',
'Compliance', 'Anticipated Fulfillment Date', 'Expiration Date',
'Requester City', 'Requester State', 'Requester Zipcode', 'Requester Company']
DeptNorm = {"Admin: Planning, Building & Neighborhood Preserv": "Admin: Building Inspection",
"Budget and Fiscal": "Budget and Revenue - Revenue Division",
"City Attorney Administration Unit": "City Attorney",
"City Auditor Unit": "City Auditor",
"City Clerk Unit": "City Clerk",
"Oakland Police Department": "Police Department",
"Contracts and Compliance": "Contracts Compliance",
"Transportation Services - Administration": "Department of Transportation",
"Fire": "Fire Department",
"Human Resources Management": "Human Resources",
"Information Technology (IT)": "Information Technology",
"Public Works Agency": "Public Works"}
CSVDTFormat = '%m/%d/%Y %H:%M:%S %p'
# 07/01/2020 09:54:53 AM
def bldIndexTblCSV(inf,startDate=None):
'''return prrIDTbl, deptTbl
'''
prrTbl = {}
deptTbl = defaultdict(list) # keep list of all prrIDs
statusTbl = defaultdict(int)
ncloseDate = 0
nolder = 0
nmultDept = 0
deptSepChar = b'\xef\xbf\xbd' # only used in Finance
reader = csv.DictReader(open(inf,encoding = "utf8",errors='replace'))
for i,entry in enumerate(reader):
prr = {}
prrID = entry['Id']
createDateStr = entry['Created At'].strip()
prr['createDate'] = datetime.datetime.strptime(createDateStr,CSVDTFormat) if createDateStr != '' else None
if prr['createDate'] == None or \
(startDate != None and prr['createDate'] < startDate):
nolder += 1
continue
deptStr = entry['Departments'].strip()
# NB: multiple department separated by semi-colon
if deptStr.find(';') == -1:
deptList = [deptStr]
else:
nmultDept += 1
deptList = [dept.strip() for dept in deptStr.split(';')]
deptList2 = []
for dept in deptList:
ndept = DeptNorm[dept] if dept in DeptNorm else dept
if ndept != '':
deptList2.append(ndept)
deptTbl[ndept].append(prrID)
prr['dept'] = deptList2
closeDateStr = entry['Closed Date'].strip()
prr['closeDate'] = datetime.datetime.strptime(closeDateStr,CSVDTFormat) if closeDateStr != '' else None
prr['status'] = entry['Status'].strip()
prr['text'] = entry['Request Text'].strip()
prr['closeReason'] = entry['Closure Reasons'].strip()
prr['URL'] = entry['URL'].strip()
statusTbl[ prr['status'] ] += 1
if prr['closeDate'] != None:
ncloseDate += 1
prrTbl[prrID] = prr
print('bldIndexTblCSV: NPRR=%d NDept=%d NMultDept=%d NCloseDate=%d' % \
(len(prrTbl),len(deptTbl),nmultDept,ncloseDate))
if startDate != None:
print('bldIndexTblCSV: NOld dropped=%d' % (nolder))
# freqList = freqHist3(deptTbl)
# print('Dept,Freq')
# for dept,freq in freqList:
# print('"%s",%d' % (dept,freq))
freqList = freqHist3(statusTbl)
print('Status,Freq')
for status,freq in freqList:
print('"%s",%d' % (status,freq))
return (prrTbl, deptTbl)
def compHistAvg(hist):
'''compute first moment
ASSUME hist: value -> freq
'''
sum = n = 0
for v in hist.keys():
n += hist[v]
sum += v * hist[v]
return n,float(sum) / n
def compMedian(hist):
'''compute MEDIAN value
ASSUME hist: value -> freq
'''
# only singletons thwart the search for half-way point
if len(hist) == 1:
return hist[0]
sum = n = 0
vn = {}
for v in sorted(hist.keys()):
n += hist[v]
sum += v * hist[v]
vn[v] = n
half = float(n/2.)
for v in sorted(hist.keys()):
if vn[v] > half:
return v
def anlyzCreateDates(prrIDTbl,outf):
'''distribution of create dates
'''
dateDist = defaultdict(int)
nmissdate = 0
for prrID,prr in prrIDTbl.items():
# 180204
# for dtype in DateTypes.values():
# if dtype in prr:
# if cdateFnd == None:
# cdateFnd = prr[dtype]
# else:
# if prr[dtype] != cdateFnd:
# cdateFnd = min([cdateFnd,prr[dtype]])
cdateFnd = prr['createDate']
if cdateFnd== None:
nmissdate += 1
continue
mkey = '%d-%02d' % (cdateFnd.year, cdateFnd.month)
dateDist[mkey] += 1
print('anlyzCreateDates: NPRR=%d NBadDate=%d' % (len(prrIDTbl),nmissdate))
allMon = list(dateDist.keys())
allMon.sort()
outs = open(outf,'w')
outs.write('Month,Freq\n')
for mkey in allMon:
outs.write('%s,%d\n' % (mkey,dateDist[mkey]))
outs.close()
def normDeptName(dept):
return re.sub('\W','_',dept.upper())
def anlyzClearDates(prrIDTbl,deptTbl,startDate,outdir,minDeptFreq=10):
'''Compute average (over previous 90 days) number of days to respond to request
Number requests open at month start
'''
allDept = [dept for dept in deptTbl.keys() if len(deptTbl[dept]) > minDeptFreq ]
allDept.sort()
nonOPDresp = defaultdict(lambda: defaultdict(int)) # month -> ndays -> freq
nonOPDopen = defaultdict(int) # month -> freq
print('\n# Dept,NOld,NMissRecd,NMissClose')
missCloseDetails = defaultdict(lambda: defaultdict(list)) # dept -> recd -> [prrID]
for dept in allDept:
responseMon = defaultdict(lambda: defaultdict(int)) # month -> ndays -> freq
openReqMon = defaultdict(int) # month -> freq
nmissRecd = 0
nmissClose = 0
nolder = 0
for prrID in deptTbl[dept]:
prr = prrIDTbl[prrID]
# 180228
# recdDateTime = prr['recdDate']
recdDateTime = prr['createDate']
if recdDateTime==None:
nmissRecd += 1
continue
if recdDateTime < startDate:
nolder += 1
continue
try:
recdMonKey = '%d-%02d' % (recdDateTime.year, recdDateTime.month)
except Exception as e:
print('huh')
if prr['status'] == 'Closed':
# 180228
# closeDate = prr['statusUpDate']
closeDate = prr['closeDate']
if closeDate==None:
nmissClose += 1
missCloseDetails[dept][recdMonKey].append(prrID)
continue
respDelay = closeDate - recdDateTime
delayDays = respDelay.days
responseMon[recdMonKey][delayDays] += 1
# NB: was 'Oakland Police Deparment' in 180204
if dept != 'Police Department':
nonOPDresp[recdMonKey][delayDays] += 1
else:
openReqMon[recdMonKey] += 1
# NB: was 'Oakland Police Deparment' in 180204
if dept != 'Police Department':
nonOPDopen[recdMonKey] += 1
print('"%s",%d,%d,%d' % (dept,nolder,nmissRecd,nmissClose))
allMonth = list(responseMon.keys())
allMonth.sort()
normDept = normDeptName(dept)
outf = outdir + normDept + '-RT.csv'
outs = open(outf,'w')
outs.write('Month,NClose,NOpen,Avg,Median\n')
for recdMonKey in allMonth:
nreq,avgDelay = compHistAvg(responseMon[recdMonKey])
medianDelay = compMedian(responseMon[recdMonKey])
outs.write('%s,%d,%d,%f,%d\n' % (recdMonKey,nreq,openReqMon[recdMonKey],avgDelay,medianDelay))
outs.close()
# outf = outdir + normDept + '-nopen.csv'
# outs = open(outf,'w')
# outs.write('Month,NOpen\n')
# for recdMonKey in allMonth:
# outs.write('%s,%d\n' % (recdMonKey,openReqMon[recdMonKey]))
# outs.close()
allMonth = list(nonOPDresp.keys())
allMonth.sort()
outf = outdir + 'NonOPD-RT.csv'
outs = open(outf,'w')
outs.write('Month,N,NOPen,Avg,Median\n')
for recdMonKey in allMonth:
nreq,avgDelay = compHistAvg(nonOPDresp[recdMonKey])
medianDelay = compMedian(nonOPDresp[recdMonKey])
outs.write('%s,%d,%d,%f,%d\n' % (recdMonKey,nreq,nonOPDopen[recdMonKey],avgDelay,medianDelay))
outs.close()
# outf = outdir + 'NonOPD-NOpen.csv'
# outs = open(outf,'w')
# outs.write('Month,NOpen\n')
# for recdMonKey in allMonth:
# outs.write('%s,%d\n' % (recdMonKey,nonOPDopen[recdMonKey]))
# outs.close()
outf = outdir + 'missClose.csv'
outs = open(outf,'w')
# missCloseDetails: dept -> recd -> freq
allDateSet = set()
for dept in missCloseDetails.keys():
allDateSet.update(missCloseDetails[dept].keys())
allDates = sorted(list(allDateSet))
hdr = 'Dept'
for date in allDates:
hdr += ',%s' % (date,)
outs.write(hdr+'\n')
for dept in sorted(missCloseDetails.keys()):
line = dept
for date in allDates:
if date in missCloseDetails[dept]:
line += ',%d' % (len(missCloseDetails[dept][date]),)
else:
line += ', '
outs.write(line+'\n')
outs.close()
def rptDeptFreq(prrTbl, deptTbl,startDate,outf):
# freq = defaultdict(int)
outs = open(outf,'w')
outs.write('Dept,Freq\n')
for dept in sorted(deptTbl.keys()):
nrecent = 0
for prrIdx in deptTbl[dept]:
prr = prrTbl[prrIdx]
if prr['createDate'] >= startDate:
nrecent += 1
outs.write('%s,%d\n' % (dept,nrecent))
outs.close()
def rptOpenPRR(prrTbl,outf):
daysOpen = defaultdict(lambda: defaultdict(list)) # ndays -> OPD/non -> [prrID]
runDate = datetime.datetime.today()
for prrID in prrTbl.keys():
prr = prrTbl[prrID]
opdP = 'Police Department' in prr['dept']
if prr['status'] == 'Open' or prr['status'] == 'Overdue' or prr['status'] == 'Due soon':
recdDateTime = prr['createDate']
openPeriod = runDate - recdDateTime
openDays = openPeriod.days
# NB: capture integer dividend
openYears = openDays // 365
if openYears == 0:
dkey = openDays
else:
dkey = 1000 + openYears
daysOpen[opdP][dkey].append(prrID)
outs = open(outf,'w')
outs.write('DaysOpen,NOPD,NOther,PRR-OPD,PRR-non\n')
allNDaySet = set(daysOpen[0].keys()).union(set(daysOpen[0].keys()))
allNDay = sorted(list(allNDaySet))
for nday in allNDay:
if nday > 365:
lbl = '> %d year' % (nday-1000)
else:
lbl = '%d' % nday
opdList = daysOpen[1][nday] if nday in daysOpen[1] else []
nonList = daysOpen[0][nday] if nday in daysOpen[0] else []
outs.write('%s,%d,%d,"%s","%s"\n' % (lbl,len(opdList),len(nonList), opdList,nonList))
outs.close()
def getWebPages(prrTbl,outf):
outs = open(outf,'w')
outs.write('PRRID,OPD,Text\n')
nempty = 0
npdf = 0
for i,prrID in enumerate(sorted(prrTbl.keys())):
prr = prrTbl[prrID]
if prr['URL'] == '':
nempty += 1
continue
opdP = 'Police Department' in prr['dept']
url = prr['URL']
response = urllib.request.urlopen(url)
webContentBytes = response.read()
webContent = webContentBytes.decode("utf-8")
if webContent.find('pdf') != -1:
print('here')
npdf += 1
else:
continue
if i % 100 == 0:
print(i,npdf,nempty)
# outs.write('%s,%d,"%s"\n' % (prrID,opdP,prr['text']))
outs.close()
print('prr20-text: NPRR=%d NEmpty=%d' % (len(prrTbl),nempty))
def loadPRRQuery(inf):
reader = csv.DictReader(open(inf))
prrIDList = []
for i,entry in enumerate(reader):
# Exhibit,PRRId
prrIDList.append(entry['PRRId'].strip())
return prrIDList
def rptQry(qryList,outf):
outs = open(outf,'w')
outs.write('PRID,CreateDate,DaysOpen,Status\n')
runDate = datetime.datetime.today()
for prrID in qryList:
prr = prr20Recent[prrID]
recdDateTime = prr['createDate']
openPeriod = runDate - recdDateTime
openDays = openPeriod.days
outs.write('%s,%s,%d,%s\n' % (prrID,prr['createDate'].date(),openDays,prr['status']))
outs.close()
if __name__ == '__main__':
dataDir = '/Users/rik/Data/c4a-Data/OAK_data/recordTrac/'
startDate = datetime.datetime(2017,1,1)
csvFile = dataDir + 'requests-2020-07-01-sdoran.csv'
# prr20, deptTbl = bldIndexTblCSV(csvFile)
prr20Recent, deptTbl = bldIndexTblCSV(csvFile,startDate)
openPRRFile = dataDir + 'openPRR_200831.csv' | rptOpenPRR(prr20Recent,openPRRFile)
deptFreqFile = dataDir + 'deptFreq2.csv'
rptDeptFreq(prr20Recent, deptTbl,startDate,deptFreqFile)
createDateFile = dataDir + 'createDate_200831.csv'
anlyzCreateDates(prr20Recent,createDateFile)
clearDateDir = dataDir + 'deptClear_200831/'
anlyzClearDates(prr20Recent,deptTbl,startDate,clearDateDir)
openOPDFile = dataDir + 'openOPD_200831.csv'
rptOpenPRR(prr20Recent,openOPDFile) | random_line_split |
|
anlyzPRR.py | '''harvestPRR: analyze Public Record Requests from CSV data provided by NextRequest
Created 27 Aug 20
@author: [email protected]
'''
from collections import defaultdict
import csv
import datetime
import json
import random
import re
import requests
import sys
import time
import urllib
import re
PRRDateFmt = '%Y-%m-%dT%H:%M:%S'
PRRDateMicroSecFmt = '%Y-%m-%dT%H:%M:%S.%f'
DateTypes = {'date_received': 'recdDate',
'date_created': 'createDate',
'status_updated': 'statusUpDate'}
def freqHist3(tbl):
'''python3 version
ASSUME: values are frequencies, returns sorted list of (val,freq) items in descending freq order
'''
from functools import cmp_to_key
def cmpd1(a,b):
"decreasing order of frequencies"
return b[1] - a[1]
flist = list(tbl.items()) #python3
flist.sort(key=cmp_to_key(cmpd1))
return flist
AllCSVHeader = ['Id', 'Created At', 'Request Text', 'Due Date', 'Point of Contact', 'Request Date',
'Status', 'URL', 'Visibility', 'Closed Date', 'Closure Reasons',
'Departments', 'Format Received', 'Staff Time (hrs:minutes)',
'Staff Time (minutes)', 'Tags', 'Embargo Ends On Date',
'Staff Cost', 'Date First Contact', 'First Contact Event',
'Compliance', 'Anticipated Fulfillment Date', 'Expiration Date',
'Requester City', 'Requester State', 'Requester Zipcode', 'Requester Company']
DeptNorm = {"Admin: Planning, Building & Neighborhood Preserv": "Admin: Building Inspection",
"Budget and Fiscal": "Budget and Revenue - Revenue Division",
"City Attorney Administration Unit": "City Attorney",
"City Auditor Unit": "City Auditor",
"City Clerk Unit": "City Clerk",
"Oakland Police Department": "Police Department",
"Contracts and Compliance": "Contracts Compliance",
"Transportation Services - Administration": "Department of Transportation",
"Fire": "Fire Department",
"Human Resources Management": "Human Resources",
"Information Technology (IT)": "Information Technology",
"Public Works Agency": "Public Works"}
CSVDTFormat = '%m/%d/%Y %H:%M:%S %p'
# 07/01/2020 09:54:53 AM
def bldIndexTblCSV(inf,startDate=None):
'''return prrIDTbl, deptTbl
'''
prrTbl = {}
deptTbl = defaultdict(list) # keep list of all prrIDs
statusTbl = defaultdict(int)
ncloseDate = 0
nolder = 0
nmultDept = 0
deptSepChar = b'\xef\xbf\xbd' # only used in Finance
reader = csv.DictReader(open(inf,encoding = "utf8",errors='replace'))
for i,entry in enumerate(reader):
prr = {}
prrID = entry['Id']
createDateStr = entry['Created At'].strip()
prr['createDate'] = datetime.datetime.strptime(createDateStr,CSVDTFormat) if createDateStr != '' else None
if prr['createDate'] == None or \
(startDate != None and prr['createDate'] < startDate):
nolder += 1
continue
deptStr = entry['Departments'].strip()
# NB: multiple department separated by semi-colon
if deptStr.find(';') == -1:
deptList = [deptStr]
else:
nmultDept += 1
deptList = [dept.strip() for dept in deptStr.split(';')]
deptList2 = []
for dept in deptList:
ndept = DeptNorm[dept] if dept in DeptNorm else dept
if ndept != '':
deptList2.append(ndept)
deptTbl[ndept].append(prrID)
prr['dept'] = deptList2
closeDateStr = entry['Closed Date'].strip()
prr['closeDate'] = datetime.datetime.strptime(closeDateStr,CSVDTFormat) if closeDateStr != '' else None
prr['status'] = entry['Status'].strip()
prr['text'] = entry['Request Text'].strip()
prr['closeReason'] = entry['Closure Reasons'].strip()
prr['URL'] = entry['URL'].strip()
statusTbl[ prr['status'] ] += 1
if prr['closeDate'] != None:
ncloseDate += 1
prrTbl[prrID] = prr
print('bldIndexTblCSV: NPRR=%d NDept=%d NMultDept=%d NCloseDate=%d' % \
(len(prrTbl),len(deptTbl),nmultDept,ncloseDate))
if startDate != None:
print('bldIndexTblCSV: NOld dropped=%d' % (nolder))
# freqList = freqHist3(deptTbl)
# print('Dept,Freq')
# for dept,freq in freqList:
# print('"%s",%d' % (dept,freq))
freqList = freqHist3(statusTbl)
print('Status,Freq')
for status,freq in freqList:
print('"%s",%d' % (status,freq))
return (prrTbl, deptTbl)
def compHistAvg(hist):
'''compute first moment
ASSUME hist: value -> freq
'''
sum = n = 0
for v in hist.keys():
n += hist[v]
sum += v * hist[v]
return n,float(sum) / n
def compMedian(hist):
'''compute MEDIAN value
ASSUME hist: value -> freq
'''
# only singletons thwart the search for half-way point
if len(hist) == 1:
return hist[0]
sum = n = 0
vn = {}
for v in sorted(hist.keys()):
n += hist[v]
sum += v * hist[v]
vn[v] = n
half = float(n/2.)
for v in sorted(hist.keys()):
if vn[v] > half:
return v
def anlyzCreateDates(prrIDTbl,outf):
'''distribution of create dates
'''
dateDist = defaultdict(int)
nmissdate = 0
for prrID,prr in prrIDTbl.items():
# 180204
# for dtype in DateTypes.values():
# if dtype in prr:
# if cdateFnd == None:
# cdateFnd = prr[dtype]
# else:
# if prr[dtype] != cdateFnd:
# cdateFnd = min([cdateFnd,prr[dtype]])
cdateFnd = prr['createDate']
if cdateFnd== None:
nmissdate += 1
continue
mkey = '%d-%02d' % (cdateFnd.year, cdateFnd.month)
dateDist[mkey] += 1
print('anlyzCreateDates: NPRR=%d NBadDate=%d' % (len(prrIDTbl),nmissdate))
allMon = list(dateDist.keys())
allMon.sort()
outs = open(outf,'w')
outs.write('Month,Freq\n')
for mkey in allMon:
outs.write('%s,%d\n' % (mkey,dateDist[mkey]))
outs.close()
def normDeptName(dept):
return re.sub('\W','_',dept.upper())
def anlyzClearDates(prrIDTbl,deptTbl,startDate,outdir,minDeptFreq=10):
'''Compute average (over previous 90 days) number of days to respond to request
Number requests open at month start
'''
allDept = [dept for dept in deptTbl.keys() if len(deptTbl[dept]) > minDeptFreq ]
allDept.sort()
nonOPDresp = defaultdict(lambda: defaultdict(int)) # month -> ndays -> freq
nonOPDopen = defaultdict(int) # month -> freq
print('\n# Dept,NOld,NMissRecd,NMissClose')
missCloseDetails = defaultdict(lambda: defaultdict(list)) # dept -> recd -> [prrID]
for dept in allDept:
responseMon = defaultdict(lambda: defaultdict(int)) # month -> ndays -> freq
openReqMon = defaultdict(int) # month -> freq
nmissRecd = 0
nmissClose = 0
nolder = 0
for prrID in deptTbl[dept]:
prr = prrIDTbl[prrID]
# 180228
# recdDateTime = prr['recdDate']
recdDateTime = prr['createDate']
if recdDateTime==None:
nmissRecd += 1
continue
if recdDateTime < startDate:
nolder += 1
continue
try:
recdMonKey = '%d-%02d' % (recdDateTime.year, recdDateTime.month)
except Exception as e:
print('huh')
if prr['status'] == 'Closed':
# 180228
# closeDate = prr['statusUpDate']
closeDate = prr['closeDate']
if closeDate==None:
nmissClose += 1
missCloseDetails[dept][recdMonKey].append(prrID)
continue
respDelay = closeDate - recdDateTime
delayDays = respDelay.days
responseMon[recdMonKey][delayDays] += 1
# NB: was 'Oakland Police Deparment' in 180204
if dept != 'Police Department':
nonOPDresp[recdMonKey][delayDays] += 1
else:
openReqMon[recdMonKey] += 1
# NB: was 'Oakland Police Deparment' in 180204
if dept != 'Police Department':
nonOPDopen[recdMonKey] += 1
print('"%s",%d,%d,%d' % (dept,nolder,nmissRecd,nmissClose))
allMonth = list(responseMon.keys())
allMonth.sort()
normDept = normDeptName(dept)
outf = outdir + normDept + '-RT.csv'
outs = open(outf,'w')
outs.write('Month,NClose,NOpen,Avg,Median\n')
for recdMonKey in allMonth:
nreq,avgDelay = compHistAvg(responseMon[recdMonKey])
medianDelay = compMedian(responseMon[recdMonKey])
outs.write('%s,%d,%d,%f,%d\n' % (recdMonKey,nreq,openReqMon[recdMonKey],avgDelay,medianDelay))
outs.close()
# outf = outdir + normDept + '-nopen.csv'
# outs = open(outf,'w')
# outs.write('Month,NOpen\n')
# for recdMonKey in allMonth:
# outs.write('%s,%d\n' % (recdMonKey,openReqMon[recdMonKey]))
# outs.close()
allMonth = list(nonOPDresp.keys())
allMonth.sort()
outf = outdir + 'NonOPD-RT.csv'
outs = open(outf,'w')
outs.write('Month,N,NOPen,Avg,Median\n')
for recdMonKey in allMonth:
nreq,avgDelay = compHistAvg(nonOPDresp[recdMonKey])
medianDelay = compMedian(nonOPDresp[recdMonKey])
outs.write('%s,%d,%d,%f,%d\n' % (recdMonKey,nreq,nonOPDopen[recdMonKey],avgDelay,medianDelay))
outs.close()
# outf = outdir + 'NonOPD-NOpen.csv'
# outs = open(outf,'w')
# outs.write('Month,NOpen\n')
# for recdMonKey in allMonth:
# outs.write('%s,%d\n' % (recdMonKey,nonOPDopen[recdMonKey]))
# outs.close()
outf = outdir + 'missClose.csv'
outs = open(outf,'w')
# missCloseDetails: dept -> recd -> freq
allDateSet = set()
for dept in missCloseDetails.keys():
allDateSet.update(missCloseDetails[dept].keys())
allDates = sorted(list(allDateSet))
hdr = 'Dept'
for date in allDates:
hdr += ',%s' % (date,)
outs.write(hdr+'\n')
for dept in sorted(missCloseDetails.keys()):
line = dept
for date in allDates:
if date in missCloseDetails[dept]:
line += ',%d' % (len(missCloseDetails[dept][date]),)
else:
line += ', '
outs.write(line+'\n')
outs.close()
def rptDeptFreq(prrTbl, deptTbl,startDate,outf):
# freq = defaultdict(int)
|
def rptOpenPRR(prrTbl,outf):
daysOpen = defaultdict(lambda: defaultdict(list)) # ndays -> OPD/non -> [prrID]
runDate = datetime.datetime.today()
for prrID in prrTbl.keys():
prr = prrTbl[prrID]
opdP = 'Police Department' in prr['dept']
if prr['status'] == 'Open' or prr['status'] == 'Overdue' or prr['status'] == 'Due soon':
recdDateTime = prr['createDate']
openPeriod = runDate - recdDateTime
openDays = openPeriod.days
# NB: capture integer dividend
openYears = openDays // 365
if openYears == 0:
dkey = openDays
else:
dkey = 1000 + openYears
daysOpen[opdP][dkey].append(prrID)
outs = open(outf,'w')
outs.write('DaysOpen,NOPD,NOther,PRR-OPD,PRR-non\n')
allNDaySet = set(daysOpen[0].keys()).union(set(daysOpen[0].keys()))
allNDay = sorted(list(allNDaySet))
for nday in allNDay:
if nday > 365:
lbl = '> %d year' % (nday-1000)
else:
lbl = '%d' % nday
opdList = daysOpen[1][nday] if nday in daysOpen[1] else []
nonList = daysOpen[0][nday] if nday in daysOpen[0] else []
outs.write('%s,%d,%d,"%s","%s"\n' % (lbl,len(opdList),len(nonList), opdList,nonList))
outs.close()
def getWebPages(prrTbl,outf):
outs = open(outf,'w')
outs.write('PRRID,OPD,Text\n')
nempty = 0
npdf = 0
for i,prrID in enumerate(sorted(prrTbl.keys())):
prr = prrTbl[prrID]
if prr['URL'] == '':
nempty += 1
continue
opdP = 'Police Department' in prr['dept']
url = prr['URL']
response = urllib.request.urlopen(url)
webContentBytes = response.read()
webContent = webContentBytes.decode("utf-8")
if webContent.find('pdf') != -1:
print('here')
npdf += 1
else:
continue
if i % 100 == 0:
print(i,npdf,nempty)
# outs.write('%s,%d,"%s"\n' % (prrID,opdP,prr['text']))
outs.close()
print('prr20-text: NPRR=%d NEmpty=%d' % (len(prrTbl),nempty))
def loadPRRQuery(inf):
reader = csv.DictReader(open(inf))
prrIDList = []
for i,entry in enumerate(reader):
# Exhibit,PRRId
prrIDList.append(entry['PRRId'].strip())
return prrIDList
def rptQry(qryList,outf):
outs = open(outf,'w')
outs.write('PRID,CreateDate,DaysOpen,Status\n')
runDate = datetime.datetime.today()
for prrID in qryList:
prr = prr20Recent[prrID]
recdDateTime = prr['createDate']
openPeriod = runDate - recdDateTime
openDays = openPeriod.days
outs.write('%s,%s,%d,%s\n' % (prrID,prr['createDate'].date(),openDays,prr['status']))
outs.close()
if __name__ == '__main__':
dataDir = '/Users/rik/Data/c4a-Data/OAK_data/recordTrac/'
startDate = datetime.datetime(2017,1,1)
csvFile = dataDir + 'requests-2020-07-01-sdoran.csv'
# prr20, deptTbl = bldIndexTblCSV(csvFile)
prr20Recent, deptTbl = bldIndexTblCSV(csvFile,startDate)
openPRRFile = dataDir + 'openPRR_200831.csv'
rptOpenPRR(prr20Recent,openPRRFile)
deptFreqFile = dataDir + 'deptFreq2.csv'
rptDeptFreq(prr20Recent, deptTbl,startDate,deptFreqFile)
createDateFile = dataDir + 'createDate_200831.csv'
anlyzCreateDates(prr20Recent,createDateFile)
clearDateDir = dataDir + 'deptClear_200831/'
anlyzClearDates(prr20Recent,deptTbl,startDate,clearDateDir)
openOPDFile = dataDir + 'openOPD_200831.csv'
rptOpenPRR(prr20Recent,openOPDFile)
| outs = open(outf,'w')
outs.write('Dept,Freq\n')
for dept in sorted(deptTbl.keys()):
nrecent = 0
for prrIdx in deptTbl[dept]:
prr = prrTbl[prrIdx]
if prr['createDate'] >= startDate:
nrecent += 1
outs.write('%s,%d\n' % (dept,nrecent))
outs.close() | identifier_body |
anlyzPRR.py | '''harvestPRR: analyze Public Record Requests from CSV data provided by NextRequest
Created 27 Aug 20
@author: [email protected]
'''
from collections import defaultdict
import csv
import datetime
import json
import random
import re
import requests
import sys
import time
import urllib
import re
PRRDateFmt = '%Y-%m-%dT%H:%M:%S'
PRRDateMicroSecFmt = '%Y-%m-%dT%H:%M:%S.%f'
DateTypes = {'date_received': 'recdDate',
'date_created': 'createDate',
'status_updated': 'statusUpDate'}
def freqHist3(tbl):
'''python3 version
ASSUME: values are frequencies, returns sorted list of (val,freq) items in descending freq order
'''
from functools import cmp_to_key
def | (a,b):
"decreasing order of frequencies"
return b[1] - a[1]
flist = list(tbl.items()) #python3
flist.sort(key=cmp_to_key(cmpd1))
return flist
AllCSVHeader = ['Id', 'Created At', 'Request Text', 'Due Date', 'Point of Contact', 'Request Date',
'Status', 'URL', 'Visibility', 'Closed Date', 'Closure Reasons',
'Departments', 'Format Received', 'Staff Time (hrs:minutes)',
'Staff Time (minutes)', 'Tags', 'Embargo Ends On Date',
'Staff Cost', 'Date First Contact', 'First Contact Event',
'Compliance', 'Anticipated Fulfillment Date', 'Expiration Date',
'Requester City', 'Requester State', 'Requester Zipcode', 'Requester Company']
DeptNorm = {"Admin: Planning, Building & Neighborhood Preserv": "Admin: Building Inspection",
"Budget and Fiscal": "Budget and Revenue - Revenue Division",
"City Attorney Administration Unit": "City Attorney",
"City Auditor Unit": "City Auditor",
"City Clerk Unit": "City Clerk",
"Oakland Police Department": "Police Department",
"Contracts and Compliance": "Contracts Compliance",
"Transportation Services - Administration": "Department of Transportation",
"Fire": "Fire Department",
"Human Resources Management": "Human Resources",
"Information Technology (IT)": "Information Technology",
"Public Works Agency": "Public Works"}
CSVDTFormat = '%m/%d/%Y %H:%M:%S %p'
# 07/01/2020 09:54:53 AM
def bldIndexTblCSV(inf,startDate=None):
'''return prrIDTbl, deptTbl
'''
prrTbl = {}
deptTbl = defaultdict(list) # keep list of all prrIDs
statusTbl = defaultdict(int)
ncloseDate = 0
nolder = 0
nmultDept = 0
deptSepChar = b'\xef\xbf\xbd' # only used in Finance
reader = csv.DictReader(open(inf,encoding = "utf8",errors='replace'))
for i,entry in enumerate(reader):
prr = {}
prrID = entry['Id']
createDateStr = entry['Created At'].strip()
prr['createDate'] = datetime.datetime.strptime(createDateStr,CSVDTFormat) if createDateStr != '' else None
if prr['createDate'] == None or \
(startDate != None and prr['createDate'] < startDate):
nolder += 1
continue
deptStr = entry['Departments'].strip()
# NB: multiple department separated by semi-colon
if deptStr.find(';') == -1:
deptList = [deptStr]
else:
nmultDept += 1
deptList = [dept.strip() for dept in deptStr.split(';')]
deptList2 = []
for dept in deptList:
ndept = DeptNorm[dept] if dept in DeptNorm else dept
if ndept != '':
deptList2.append(ndept)
deptTbl[ndept].append(prrID)
prr['dept'] = deptList2
closeDateStr = entry['Closed Date'].strip()
prr['closeDate'] = datetime.datetime.strptime(closeDateStr,CSVDTFormat) if closeDateStr != '' else None
prr['status'] = entry['Status'].strip()
prr['text'] = entry['Request Text'].strip()
prr['closeReason'] = entry['Closure Reasons'].strip()
prr['URL'] = entry['URL'].strip()
statusTbl[ prr['status'] ] += 1
if prr['closeDate'] != None:
ncloseDate += 1
prrTbl[prrID] = prr
print('bldIndexTblCSV: NPRR=%d NDept=%d NMultDept=%d NCloseDate=%d' % \
(len(prrTbl),len(deptTbl),nmultDept,ncloseDate))
if startDate != None:
print('bldIndexTblCSV: NOld dropped=%d' % (nolder))
# freqList = freqHist3(deptTbl)
# print('Dept,Freq')
# for dept,freq in freqList:
# print('"%s",%d' % (dept,freq))
freqList = freqHist3(statusTbl)
print('Status,Freq')
for status,freq in freqList:
print('"%s",%d' % (status,freq))
return (prrTbl, deptTbl)
def compHistAvg(hist):
'''compute first moment
ASSUME hist: value -> freq
'''
sum = n = 0
for v in hist.keys():
n += hist[v]
sum += v * hist[v]
return n,float(sum) / n
def compMedian(hist):
'''compute MEDIAN value
ASSUME hist: value -> freq
'''
# only singletons thwart the search for half-way point
if len(hist) == 1:
return hist[0]
sum = n = 0
vn = {}
for v in sorted(hist.keys()):
n += hist[v]
sum += v * hist[v]
vn[v] = n
half = float(n/2.)
for v in sorted(hist.keys()):
if vn[v] > half:
return v
def anlyzCreateDates(prrIDTbl,outf):
'''distribution of create dates
'''
dateDist = defaultdict(int)
nmissdate = 0
for prrID,prr in prrIDTbl.items():
# 180204
# for dtype in DateTypes.values():
# if dtype in prr:
# if cdateFnd == None:
# cdateFnd = prr[dtype]
# else:
# if prr[dtype] != cdateFnd:
# cdateFnd = min([cdateFnd,prr[dtype]])
cdateFnd = prr['createDate']
if cdateFnd== None:
nmissdate += 1
continue
mkey = '%d-%02d' % (cdateFnd.year, cdateFnd.month)
dateDist[mkey] += 1
print('anlyzCreateDates: NPRR=%d NBadDate=%d' % (len(prrIDTbl),nmissdate))
allMon = list(dateDist.keys())
allMon.sort()
outs = open(outf,'w')
outs.write('Month,Freq\n')
for mkey in allMon:
outs.write('%s,%d\n' % (mkey,dateDist[mkey]))
outs.close()
def normDeptName(dept):
return re.sub('\W','_',dept.upper())
def anlyzClearDates(prrIDTbl,deptTbl,startDate,outdir,minDeptFreq=10):
'''Compute average (over previous 90 days) number of days to respond to request
Number requests open at month start
'''
allDept = [dept for dept in deptTbl.keys() if len(deptTbl[dept]) > minDeptFreq ]
allDept.sort()
nonOPDresp = defaultdict(lambda: defaultdict(int)) # month -> ndays -> freq
nonOPDopen = defaultdict(int) # month -> freq
print('\n# Dept,NOld,NMissRecd,NMissClose')
missCloseDetails = defaultdict(lambda: defaultdict(list)) # dept -> recd -> [prrID]
for dept in allDept:
responseMon = defaultdict(lambda: defaultdict(int)) # month -> ndays -> freq
openReqMon = defaultdict(int) # month -> freq
nmissRecd = 0
nmissClose = 0
nolder = 0
for prrID in deptTbl[dept]:
prr = prrIDTbl[prrID]
# 180228
# recdDateTime = prr['recdDate']
recdDateTime = prr['createDate']
if recdDateTime==None:
nmissRecd += 1
continue
if recdDateTime < startDate:
nolder += 1
continue
try:
recdMonKey = '%d-%02d' % (recdDateTime.year, recdDateTime.month)
except Exception as e:
print('huh')
if prr['status'] == 'Closed':
# 180228
# closeDate = prr['statusUpDate']
closeDate = prr['closeDate']
if closeDate==None:
nmissClose += 1
missCloseDetails[dept][recdMonKey].append(prrID)
continue
respDelay = closeDate - recdDateTime
delayDays = respDelay.days
responseMon[recdMonKey][delayDays] += 1
# NB: was 'Oakland Police Deparment' in 180204
if dept != 'Police Department':
nonOPDresp[recdMonKey][delayDays] += 1
else:
openReqMon[recdMonKey] += 1
# NB: was 'Oakland Police Deparment' in 180204
if dept != 'Police Department':
nonOPDopen[recdMonKey] += 1
print('"%s",%d,%d,%d' % (dept,nolder,nmissRecd,nmissClose))
allMonth = list(responseMon.keys())
allMonth.sort()
normDept = normDeptName(dept)
outf = outdir + normDept + '-RT.csv'
outs = open(outf,'w')
outs.write('Month,NClose,NOpen,Avg,Median\n')
for recdMonKey in allMonth:
nreq,avgDelay = compHistAvg(responseMon[recdMonKey])
medianDelay = compMedian(responseMon[recdMonKey])
outs.write('%s,%d,%d,%f,%d\n' % (recdMonKey,nreq,openReqMon[recdMonKey],avgDelay,medianDelay))
outs.close()
# outf = outdir + normDept + '-nopen.csv'
# outs = open(outf,'w')
# outs.write('Month,NOpen\n')
# for recdMonKey in allMonth:
# outs.write('%s,%d\n' % (recdMonKey,openReqMon[recdMonKey]))
# outs.close()
allMonth = list(nonOPDresp.keys())
allMonth.sort()
outf = outdir + 'NonOPD-RT.csv'
outs = open(outf,'w')
outs.write('Month,N,NOPen,Avg,Median\n')
for recdMonKey in allMonth:
nreq,avgDelay = compHistAvg(nonOPDresp[recdMonKey])
medianDelay = compMedian(nonOPDresp[recdMonKey])
outs.write('%s,%d,%d,%f,%d\n' % (recdMonKey,nreq,nonOPDopen[recdMonKey],avgDelay,medianDelay))
outs.close()
# outf = outdir + 'NonOPD-NOpen.csv'
# outs = open(outf,'w')
# outs.write('Month,NOpen\n')
# for recdMonKey in allMonth:
# outs.write('%s,%d\n' % (recdMonKey,nonOPDopen[recdMonKey]))
# outs.close()
outf = outdir + 'missClose.csv'
outs = open(outf,'w')
# missCloseDetails: dept -> recd -> freq
allDateSet = set()
for dept in missCloseDetails.keys():
allDateSet.update(missCloseDetails[dept].keys())
allDates = sorted(list(allDateSet))
hdr = 'Dept'
for date in allDates:
hdr += ',%s' % (date,)
outs.write(hdr+'\n')
for dept in sorted(missCloseDetails.keys()):
line = dept
for date in allDates:
if date in missCloseDetails[dept]:
line += ',%d' % (len(missCloseDetails[dept][date]),)
else:
line += ', '
outs.write(line+'\n')
outs.close()
def rptDeptFreq(prrTbl, deptTbl,startDate,outf):
# freq = defaultdict(int)
outs = open(outf,'w')
outs.write('Dept,Freq\n')
for dept in sorted(deptTbl.keys()):
nrecent = 0
for prrIdx in deptTbl[dept]:
prr = prrTbl[prrIdx]
if prr['createDate'] >= startDate:
nrecent += 1
outs.write('%s,%d\n' % (dept,nrecent))
outs.close()
def rptOpenPRR(prrTbl,outf):
daysOpen = defaultdict(lambda: defaultdict(list)) # ndays -> OPD/non -> [prrID]
runDate = datetime.datetime.today()
for prrID in prrTbl.keys():
prr = prrTbl[prrID]
opdP = 'Police Department' in prr['dept']
if prr['status'] == 'Open' or prr['status'] == 'Overdue' or prr['status'] == 'Due soon':
recdDateTime = prr['createDate']
openPeriod = runDate - recdDateTime
openDays = openPeriod.days
# NB: capture integer dividend
openYears = openDays // 365
if openYears == 0:
dkey = openDays
else:
dkey = 1000 + openYears
daysOpen[opdP][dkey].append(prrID)
outs = open(outf,'w')
outs.write('DaysOpen,NOPD,NOther,PRR-OPD,PRR-non\n')
allNDaySet = set(daysOpen[0].keys()).union(set(daysOpen[0].keys()))
allNDay = sorted(list(allNDaySet))
for nday in allNDay:
if nday > 365:
lbl = '> %d year' % (nday-1000)
else:
lbl = '%d' % nday
opdList = daysOpen[1][nday] if nday in daysOpen[1] else []
nonList = daysOpen[0][nday] if nday in daysOpen[0] else []
outs.write('%s,%d,%d,"%s","%s"\n' % (lbl,len(opdList),len(nonList), opdList,nonList))
outs.close()
def getWebPages(prrTbl,outf):
outs = open(outf,'w')
outs.write('PRRID,OPD,Text\n')
nempty = 0
npdf = 0
for i,prrID in enumerate(sorted(prrTbl.keys())):
prr = prrTbl[prrID]
if prr['URL'] == '':
nempty += 1
continue
opdP = 'Police Department' in prr['dept']
url = prr['URL']
response = urllib.request.urlopen(url)
webContentBytes = response.read()
webContent = webContentBytes.decode("utf-8")
if webContent.find('pdf') != -1:
print('here')
npdf += 1
else:
continue
if i % 100 == 0:
print(i,npdf,nempty)
# outs.write('%s,%d,"%s"\n' % (prrID,opdP,prr['text']))
outs.close()
print('prr20-text: NPRR=%d NEmpty=%d' % (len(prrTbl),nempty))
def loadPRRQuery(inf):
reader = csv.DictReader(open(inf))
prrIDList = []
for i,entry in enumerate(reader):
# Exhibit,PRRId
prrIDList.append(entry['PRRId'].strip())
return prrIDList
def rptQry(qryList,outf):
outs = open(outf,'w')
outs.write('PRID,CreateDate,DaysOpen,Status\n')
runDate = datetime.datetime.today()
for prrID in qryList:
prr = prr20Recent[prrID]
recdDateTime = prr['createDate']
openPeriod = runDate - recdDateTime
openDays = openPeriod.days
outs.write('%s,%s,%d,%s\n' % (prrID,prr['createDate'].date(),openDays,prr['status']))
outs.close()
if __name__ == '__main__':
dataDir = '/Users/rik/Data/c4a-Data/OAK_data/recordTrac/'
startDate = datetime.datetime(2017,1,1)
csvFile = dataDir + 'requests-2020-07-01-sdoran.csv'
# prr20, deptTbl = bldIndexTblCSV(csvFile)
prr20Recent, deptTbl = bldIndexTblCSV(csvFile,startDate)
openPRRFile = dataDir + 'openPRR_200831.csv'
rptOpenPRR(prr20Recent,openPRRFile)
deptFreqFile = dataDir + 'deptFreq2.csv'
rptDeptFreq(prr20Recent, deptTbl,startDate,deptFreqFile)
createDateFile = dataDir + 'createDate_200831.csv'
anlyzCreateDates(prr20Recent,createDateFile)
clearDateDir = dataDir + 'deptClear_200831/'
anlyzClearDates(prr20Recent,deptTbl,startDate,clearDateDir)
openOPDFile = dataDir + 'openOPD_200831.csv'
rptOpenPRR(prr20Recent,openOPDFile)
| cmpd1 | identifier_name |
anlyzPRR.py | '''harvestPRR: analyze Public Record Requests from CSV data provided by NextRequest
Created 27 Aug 20
@author: [email protected]
'''
from collections import defaultdict
import csv
import datetime
import json
import random
import re
import requests
import sys
import time
import urllib
import re
PRRDateFmt = '%Y-%m-%dT%H:%M:%S'
PRRDateMicroSecFmt = '%Y-%m-%dT%H:%M:%S.%f'
DateTypes = {'date_received': 'recdDate',
'date_created': 'createDate',
'status_updated': 'statusUpDate'}
def freqHist3(tbl):
'''python3 version
ASSUME: values are frequencies, returns sorted list of (val,freq) items in descending freq order
'''
from functools import cmp_to_key
def cmpd1(a,b):
"decreasing order of frequencies"
return b[1] - a[1]
flist = list(tbl.items()) #python3
flist.sort(key=cmp_to_key(cmpd1))
return flist
AllCSVHeader = ['Id', 'Created At', 'Request Text', 'Due Date', 'Point of Contact', 'Request Date',
'Status', 'URL', 'Visibility', 'Closed Date', 'Closure Reasons',
'Departments', 'Format Received', 'Staff Time (hrs:minutes)',
'Staff Time (minutes)', 'Tags', 'Embargo Ends On Date',
'Staff Cost', 'Date First Contact', 'First Contact Event',
'Compliance', 'Anticipated Fulfillment Date', 'Expiration Date',
'Requester City', 'Requester State', 'Requester Zipcode', 'Requester Company']
DeptNorm = {"Admin: Planning, Building & Neighborhood Preserv": "Admin: Building Inspection",
"Budget and Fiscal": "Budget and Revenue - Revenue Division",
"City Attorney Administration Unit": "City Attorney",
"City Auditor Unit": "City Auditor",
"City Clerk Unit": "City Clerk",
"Oakland Police Department": "Police Department",
"Contracts and Compliance": "Contracts Compliance",
"Transportation Services - Administration": "Department of Transportation",
"Fire": "Fire Department",
"Human Resources Management": "Human Resources",
"Information Technology (IT)": "Information Technology",
"Public Works Agency": "Public Works"}
CSVDTFormat = '%m/%d/%Y %H:%M:%S %p'
# 07/01/2020 09:54:53 AM
def bldIndexTblCSV(inf,startDate=None):
'''return prrIDTbl, deptTbl
'''
prrTbl = {}
deptTbl = defaultdict(list) # keep list of all prrIDs
statusTbl = defaultdict(int)
ncloseDate = 0
nolder = 0
nmultDept = 0
deptSepChar = b'\xef\xbf\xbd' # only used in Finance
reader = csv.DictReader(open(inf,encoding = "utf8",errors='replace'))
for i,entry in enumerate(reader):
prr = {}
prrID = entry['Id']
createDateStr = entry['Created At'].strip()
prr['createDate'] = datetime.datetime.strptime(createDateStr,CSVDTFormat) if createDateStr != '' else None
if prr['createDate'] == None or \
(startDate != None and prr['createDate'] < startDate):
nolder += 1
continue
deptStr = entry['Departments'].strip()
# NB: multiple department separated by semi-colon
if deptStr.find(';') == -1:
deptList = [deptStr]
else:
nmultDept += 1
deptList = [dept.strip() for dept in deptStr.split(';')]
deptList2 = []
for dept in deptList:
ndept = DeptNorm[dept] if dept in DeptNorm else dept
if ndept != '':
deptList2.append(ndept)
deptTbl[ndept].append(prrID)
prr['dept'] = deptList2
closeDateStr = entry['Closed Date'].strip()
prr['closeDate'] = datetime.datetime.strptime(closeDateStr,CSVDTFormat) if closeDateStr != '' else None
prr['status'] = entry['Status'].strip()
prr['text'] = entry['Request Text'].strip()
prr['closeReason'] = entry['Closure Reasons'].strip()
prr['URL'] = entry['URL'].strip()
statusTbl[ prr['status'] ] += 1
if prr['closeDate'] != None:
ncloseDate += 1
prrTbl[prrID] = prr
print('bldIndexTblCSV: NPRR=%d NDept=%d NMultDept=%d NCloseDate=%d' % \
(len(prrTbl),len(deptTbl),nmultDept,ncloseDate))
if startDate != None:
print('bldIndexTblCSV: NOld dropped=%d' % (nolder))
# freqList = freqHist3(deptTbl)
# print('Dept,Freq')
# for dept,freq in freqList:
# print('"%s",%d' % (dept,freq))
freqList = freqHist3(statusTbl)
print('Status,Freq')
for status,freq in freqList:
print('"%s",%d' % (status,freq))
return (prrTbl, deptTbl)
def compHistAvg(hist):
'''compute first moment
ASSUME hist: value -> freq
'''
sum = n = 0
for v in hist.keys():
n += hist[v]
sum += v * hist[v]
return n,float(sum) / n
def compMedian(hist):
'''compute MEDIAN value
ASSUME hist: value -> freq
'''
# only singletons thwart the search for half-way point
if len(hist) == 1:
return hist[0]
sum = n = 0
vn = {}
for v in sorted(hist.keys()):
n += hist[v]
sum += v * hist[v]
vn[v] = n
half = float(n/2.)
for v in sorted(hist.keys()):
if vn[v] > half:
return v
def anlyzCreateDates(prrIDTbl,outf):
'''distribution of create dates
'''
dateDist = defaultdict(int)
nmissdate = 0
for prrID,prr in prrIDTbl.items():
# 180204
# for dtype in DateTypes.values():
# if dtype in prr:
# if cdateFnd == None:
# cdateFnd = prr[dtype]
# else:
# if prr[dtype] != cdateFnd:
# cdateFnd = min([cdateFnd,prr[dtype]])
cdateFnd = prr['createDate']
if cdateFnd== None:
nmissdate += 1
continue
mkey = '%d-%02d' % (cdateFnd.year, cdateFnd.month)
dateDist[mkey] += 1
print('anlyzCreateDates: NPRR=%d NBadDate=%d' % (len(prrIDTbl),nmissdate))
allMon = list(dateDist.keys())
allMon.sort()
outs = open(outf,'w')
outs.write('Month,Freq\n')
for mkey in allMon:
outs.write('%s,%d\n' % (mkey,dateDist[mkey]))
outs.close()
def normDeptName(dept):
return re.sub('\W','_',dept.upper())
def anlyzClearDates(prrIDTbl,deptTbl,startDate,outdir,minDeptFreq=10):
'''Compute average (over previous 90 days) number of days to respond to request
Number requests open at month start
'''
allDept = [dept for dept in deptTbl.keys() if len(deptTbl[dept]) > minDeptFreq ]
allDept.sort()
nonOPDresp = defaultdict(lambda: defaultdict(int)) # month -> ndays -> freq
nonOPDopen = defaultdict(int) # month -> freq
print('\n# Dept,NOld,NMissRecd,NMissClose')
missCloseDetails = defaultdict(lambda: defaultdict(list)) # dept -> recd -> [prrID]
for dept in allDept:
responseMon = defaultdict(lambda: defaultdict(int)) # month -> ndays -> freq
openReqMon = defaultdict(int) # month -> freq
nmissRecd = 0
nmissClose = 0
nolder = 0
for prrID in deptTbl[dept]:
prr = prrIDTbl[prrID]
# 180228
# recdDateTime = prr['recdDate']
recdDateTime = prr['createDate']
if recdDateTime==None:
nmissRecd += 1
continue
if recdDateTime < startDate:
nolder += 1
continue
try:
recdMonKey = '%d-%02d' % (recdDateTime.year, recdDateTime.month)
except Exception as e:
print('huh')
if prr['status'] == 'Closed':
# 180228
# closeDate = prr['statusUpDate']
closeDate = prr['closeDate']
if closeDate==None:
nmissClose += 1
missCloseDetails[dept][recdMonKey].append(prrID)
continue
respDelay = closeDate - recdDateTime
delayDays = respDelay.days
responseMon[recdMonKey][delayDays] += 1
# NB: was 'Oakland Police Deparment' in 180204
if dept != 'Police Department':
nonOPDresp[recdMonKey][delayDays] += 1
else:
openReqMon[recdMonKey] += 1
# NB: was 'Oakland Police Deparment' in 180204
if dept != 'Police Department':
nonOPDopen[recdMonKey] += 1
print('"%s",%d,%d,%d' % (dept,nolder,nmissRecd,nmissClose))
allMonth = list(responseMon.keys())
allMonth.sort()
normDept = normDeptName(dept)
outf = outdir + normDept + '-RT.csv'
outs = open(outf,'w')
outs.write('Month,NClose,NOpen,Avg,Median\n')
for recdMonKey in allMonth:
nreq,avgDelay = compHistAvg(responseMon[recdMonKey])
medianDelay = compMedian(responseMon[recdMonKey])
outs.write('%s,%d,%d,%f,%d\n' % (recdMonKey,nreq,openReqMon[recdMonKey],avgDelay,medianDelay))
outs.close()
# outf = outdir + normDept + '-nopen.csv'
# outs = open(outf,'w')
# outs.write('Month,NOpen\n')
# for recdMonKey in allMonth:
# outs.write('%s,%d\n' % (recdMonKey,openReqMon[recdMonKey]))
# outs.close()
allMonth = list(nonOPDresp.keys())
allMonth.sort()
outf = outdir + 'NonOPD-RT.csv'
outs = open(outf,'w')
outs.write('Month,N,NOPen,Avg,Median\n')
for recdMonKey in allMonth:
nreq,avgDelay = compHistAvg(nonOPDresp[recdMonKey])
medianDelay = compMedian(nonOPDresp[recdMonKey])
outs.write('%s,%d,%d,%f,%d\n' % (recdMonKey,nreq,nonOPDopen[recdMonKey],avgDelay,medianDelay))
outs.close()
# outf = outdir + 'NonOPD-NOpen.csv'
# outs = open(outf,'w')
# outs.write('Month,NOpen\n')
# for recdMonKey in allMonth:
# outs.write('%s,%d\n' % (recdMonKey,nonOPDopen[recdMonKey]))
# outs.close()
outf = outdir + 'missClose.csv'
outs = open(outf,'w')
# missCloseDetails: dept -> recd -> freq
allDateSet = set()
for dept in missCloseDetails.keys():
allDateSet.update(missCloseDetails[dept].keys())
allDates = sorted(list(allDateSet))
hdr = 'Dept'
for date in allDates:
hdr += ',%s' % (date,)
outs.write(hdr+'\n')
for dept in sorted(missCloseDetails.keys()):
line = dept
for date in allDates:
if date in missCloseDetails[dept]:
line += ',%d' % (len(missCloseDetails[dept][date]),)
else:
line += ', '
outs.write(line+'\n')
outs.close()
def rptDeptFreq(prrTbl, deptTbl,startDate,outf):
# freq = defaultdict(int)
outs = open(outf,'w')
outs.write('Dept,Freq\n')
for dept in sorted(deptTbl.keys()):
nrecent = 0
for prrIdx in deptTbl[dept]:
prr = prrTbl[prrIdx]
if prr['createDate'] >= startDate:
nrecent += 1
outs.write('%s,%d\n' % (dept,nrecent))
outs.close()
def rptOpenPRR(prrTbl,outf):
daysOpen = defaultdict(lambda: defaultdict(list)) # ndays -> OPD/non -> [prrID]
runDate = datetime.datetime.today()
for prrID in prrTbl.keys():
prr = prrTbl[prrID]
opdP = 'Police Department' in prr['dept']
if prr['status'] == 'Open' or prr['status'] == 'Overdue' or prr['status'] == 'Due soon':
recdDateTime = prr['createDate']
openPeriod = runDate - recdDateTime
openDays = openPeriod.days
# NB: capture integer dividend
openYears = openDays // 365
if openYears == 0:
dkey = openDays
else:
|
daysOpen[opdP][dkey].append(prrID)
outs = open(outf,'w')
outs.write('DaysOpen,NOPD,NOther,PRR-OPD,PRR-non\n')
allNDaySet = set(daysOpen[0].keys()).union(set(daysOpen[0].keys()))
allNDay = sorted(list(allNDaySet))
for nday in allNDay:
if nday > 365:
lbl = '> %d year' % (nday-1000)
else:
lbl = '%d' % nday
opdList = daysOpen[1][nday] if nday in daysOpen[1] else []
nonList = daysOpen[0][nday] if nday in daysOpen[0] else []
outs.write('%s,%d,%d,"%s","%s"\n' % (lbl,len(opdList),len(nonList), opdList,nonList))
outs.close()
def getWebPages(prrTbl,outf):
outs = open(outf,'w')
outs.write('PRRID,OPD,Text\n')
nempty = 0
npdf = 0
for i,prrID in enumerate(sorted(prrTbl.keys())):
prr = prrTbl[prrID]
if prr['URL'] == '':
nempty += 1
continue
opdP = 'Police Department' in prr['dept']
url = prr['URL']
response = urllib.request.urlopen(url)
webContentBytes = response.read()
webContent = webContentBytes.decode("utf-8")
if webContent.find('pdf') != -1:
print('here')
npdf += 1
else:
continue
if i % 100 == 0:
print(i,npdf,nempty)
# outs.write('%s,%d,"%s"\n' % (prrID,opdP,prr['text']))
outs.close()
print('prr20-text: NPRR=%d NEmpty=%d' % (len(prrTbl),nempty))
def loadPRRQuery(inf):
reader = csv.DictReader(open(inf))
prrIDList = []
for i,entry in enumerate(reader):
# Exhibit,PRRId
prrIDList.append(entry['PRRId'].strip())
return prrIDList
def rptQry(qryList,outf):
outs = open(outf,'w')
outs.write('PRID,CreateDate,DaysOpen,Status\n')
runDate = datetime.datetime.today()
for prrID in qryList:
prr = prr20Recent[prrID]
recdDateTime = prr['createDate']
openPeriod = runDate - recdDateTime
openDays = openPeriod.days
outs.write('%s,%s,%d,%s\n' % (prrID,prr['createDate'].date(),openDays,prr['status']))
outs.close()
if __name__ == '__main__':
dataDir = '/Users/rik/Data/c4a-Data/OAK_data/recordTrac/'
startDate = datetime.datetime(2017,1,1)
csvFile = dataDir + 'requests-2020-07-01-sdoran.csv'
# prr20, deptTbl = bldIndexTblCSV(csvFile)
prr20Recent, deptTbl = bldIndexTblCSV(csvFile,startDate)
openPRRFile = dataDir + 'openPRR_200831.csv'
rptOpenPRR(prr20Recent,openPRRFile)
deptFreqFile = dataDir + 'deptFreq2.csv'
rptDeptFreq(prr20Recent, deptTbl,startDate,deptFreqFile)
createDateFile = dataDir + 'createDate_200831.csv'
anlyzCreateDates(prr20Recent,createDateFile)
clearDateDir = dataDir + 'deptClear_200831/'
anlyzClearDates(prr20Recent,deptTbl,startDate,clearDateDir)
openOPDFile = dataDir + 'openOPD_200831.csv'
rptOpenPRR(prr20Recent,openOPDFile)
| dkey = 1000 + openYears | conditional_block |
control.rs | //! Runtime control utils.
//!
//! ellidri is built on tokio and the future ecosystem. Therefore the main thing it does is manage
//! tasks. Tasks are useful because they can be created, polled, and stopped. This module, and
//! `Control` more specificaly, is responsible for loading and reloading the configuration file,
//! starting and stopping the necessary tasks.
//!
//! # Top-level tasks
//!
//! At the moment, the only kind of "top-level" task that ellidri runs are bindings; tasks that
//! bind then listen on a port. They are defined in `net::listen`. Bindings run with two data
//! "channels":
//!
//! - A "stop button": the binding task will send its listening address when it fails unexpectedly
//! (when it is not closed by `Control`),
//! - A command channel: bindings accept commands that change their configuration. All commands
//! are described in the `Command` enum.
//!
//! # The configuration file
//!
//! ellidri reads a configuration file at startup. This configuration file is meant to specify its
//! running state. It can be reloaded at runtime, to change the whole state of the server.
//!
//! The first time the configuration file is read, ellidri uses it to create the tokio runtime.
//! This is because the number of workers is yet unknown, and cannot be changed afterwards.
//!
//! Configuration can then be reloaded upon receiving a SIGUSR1 signal (on UNIX systems only,
//! windows is not yet supported), or a REHASH command. When it happens, `Control` reread the
//! configuration file and performs a diff algorithm to know which task needs to be stopped. This
//! is really simple:
//!
//! - If an old binding is not present in the new configuration, `Control` drops the binding,
//! - If a new binding was not present in the old configuration, `Control` spawns the binding on
//! the runtime,
//! - If a binding is present in both configurations, `Control` will keep the binding and send a
//! command to it, either to make it listen for raw TCP connections, or to listen for TLS
//! connections with a given `TlsAcceptor` (see `tokio-tls` doc for that).
//!
//! Bindings are identified by their socket address (IP address + TCP port). TLS identities are
//! not kept track of, thus ellidri might reload the same TLS identity for a binding (it is fine to
//! let it do we are not reading thousands for TLS identities here).
use crate::{Config, net, State, tls};
use crate::config::{Binding, Tls};
use std::future::Future;
use std::net::SocketAddr;
use std::sync::Arc;
use std::{fs, process};
use tokio::runtime as rt;
use tokio::sync::{mpsc, Notify};
use tokio::task;
/// A command from `Control` to binding tasks.
pub enum Command {
/// Ask the binding task to listen for raw TCP connections and not use TLS.
UsePlain,
/// Ask the binding task to listen for TLS connections with the given acceptor.
UseTls(tls::Acceptor),
}
/// A binding task that is ready to be spawned on the runtime.
struct LoadedBinding<F> {
/// The address to be bound.
address: SocketAddr,
/// Either `None` when the binding listens for raw TCP connections, or `Some(acceptor)` when the
/// bindings listens for TLS connections with `acceptor`.
acceptor: Option<tls::Acceptor>,
/// The sending end of the channel that brings commands to the task.
handle: mpsc::Sender<Command>,
/// The actual task, ready to be polled.
future: F,
}
/// Creates a tokio runtime with the given number of worker threads.
fn create_runtime(workers: usize) -> rt::Runtime {
let mut builder = rt::Builder::new_multi_thread();
if workers != 0 {
builder.worker_threads(workers);
}
builder
.enable_io()
.enable_time()
.build()
.unwrap_or_else(|err| {
log::error!("Failed to start the tokio runtime: {}", err);
process::exit(1);
})
}
/// Creates the bindings tasks and spawns them on the given runtime.
///
/// This function is what `Control` calls on startup to generate the bindings. Because it exits
/// the program on failure, it is not to be called for reloading.
///
/// It spawns all the generated bindings on the runtime, and returns their listening address and
/// command channel.
fn load_bindings(
bindings: Vec<Binding>,
shared: &State,
stop: &mpsc::Sender<SocketAddr>,
) -> Vec<(SocketAddr, mpsc::Sender<Command>)> {
let mut res = Vec::with_capacity(bindings.len());
let mut store = tls::IdentityStore::default();
for Binding { address, tls } in bindings {
let (handle, commands) = mpsc::channel(8);
if let Some(Tls { certificate, key, .. }) = tls {
let acceptor = match store.acceptor(certificate, key) {
Ok(acceptor) => acceptor,
Err(_) => process::exit(1),
};
let server = net::listen(
address,
shared.clone(),
Some(acceptor),
stop.clone(),
commands,
);
res.push((address, handle));
tokio::spawn(server);
} else {
let server = net::listen(address, shared.clone(), None, stop.clone(), commands);
res.push((address, handle));
tokio::spawn(server);
}
}
res
}
/// Reloads the configuration at `config_path`.
///
/// In four steps:
///
/// - Read the configuration and load the authentication provider,
/// - Remove old bindings that are not used anymore,
/// - Add new bindings, or send them a command to listen for raw TCP or TLS connections,
/// - Update the shared state.
async fn do_rehash(
config_path: String,
shared: &State,
stop: mpsc::Sender<SocketAddr>,
bindings: &mut Vec<(SocketAddr, mpsc::Sender<Command>)>,
) {
log::info!("Reloading configuration from {:?}", config_path);
let shared_clone = shared.clone();
let reloaded = task::spawn_blocking(|| reload_config(config_path, shared_clone, stop)).await;
let (cfg, new_bindings) = match reloaded {
Ok(Some(reloaded)) => reloaded,
_ => return,
};
let mut i = 0;
while i < bindings.len() {
let old_address = bindings[i].0;
if new_bindings
.iter()
.all(|new_b| old_address != new_b.address)
{
bindings.swap_remove(i);
} else {
i += 1;
}
}
for new_b in new_bindings {
if let Some(i) = bindings.iter().position(|old_b| old_b.0 == new_b.address) {
let res = bindings[i]
.1
.send(match new_b.acceptor {
Some(acceptor) => Command::UseTls(acceptor),
None => Command::UsePlain,
})
.await;
if res.is_err() |
} else {
tokio::spawn(new_b.future);
bindings.push((new_b.address, new_b.handle));
}
}
shared.rehash(cfg.state).await;
log::info!("Configuration reloaded");
}
/// Re-read the configuration file and re-generate the bindings.
///
/// See documentation of `reload_bindings` for how bindings are re-generated.
///
/// This function will put the contents of the MOTD file into `Config.motd_file`, so that the
/// shared state can use the field as-is, since it must not use blocking operations such as reading
/// a file.
fn reload_config(
config_path: String,
shared: State,
stop: mpsc::Sender<SocketAddr>,
) -> Option<(Config, Vec<LoadedBinding<impl Future<Output = ()>>>)> {
let mut cfg = match Config::from_file(&config_path) {
Ok(cfg) => cfg,
Err(err) => {
log::error!("Failed to read {:?}: {}", config_path, err);
return None;
}
};
cfg.state.motd_file = match fs::read_to_string(&cfg.state.motd_file) {
Ok(motd) => motd,
Err(err) => {
log::warn!("Failed to read {:?}: {}", cfg.state.motd_file, err);
String::new()
}
};
let new_bindings = reload_bindings(&cfg.bindings, &shared, &stop);
Some((cfg, new_bindings))
}
/// Equivalent of `load_bindings` for when exiting the program is not acceptable.
///
/// Instead of spawning the binding tasks on the runtime, this function returns them in an array.
/// Also instead of exiting on failure, it continues its process. Binding tasks that could not
/// be generated are not returned.
///
/// Otherwise both functions have the same behavior.
fn reload_bindings(
bindings: &[Binding],
shared: &State,
stop: &mpsc::Sender<SocketAddr>,
) -> Vec<LoadedBinding<impl Future<Output = ()>>> {
let mut res = Vec::with_capacity(bindings.len());
let mut store = tls::IdentityStore::default();
for Binding { address, tls } in bindings {
let (handle, commands) = mpsc::channel(8);
if let Some(Tls { certificate, key, .. }) = tls {
let acceptor = match store.acceptor(certificate, key) {
Ok(acceptor) => acceptor,
Err(_) => continue,
};
let future = net::listen(
*address,
shared.clone(),
Some(acceptor.clone()),
stop.clone(),
commands,
);
res.push(LoadedBinding {
address: *address,
acceptor: Some(acceptor),
handle,
future,
});
} else {
let future = net::listen(*address, shared.clone(), None, stop.clone(), commands);
res.push(LoadedBinding {
address: *address,
acceptor: None,
handle,
future,
});
}
}
res
}
pub fn load_config_and_run(config_path: String) {
let cfg = Config::from_file(&config_path).unwrap_or_else(|err| {
log::error!("Failed to read {:?}: {}", config_path, err);
process::exit(1);
});
let runtime = create_runtime(cfg.workers);
runtime.block_on(run(config_path, cfg));
}
pub async fn run(config_path: String, cfg: Config) {
let signal_fail = |err| {
log::error!("Cannot listen for signals to reload the configuration: {}", err);
process::exit(1);
};
#[cfg(unix)]
let mut signals = {
use tokio::signal::unix;
unix::signal(unix::SignalKind::user_defined1()).unwrap_or_else(signal_fail)
};
#[cfg(windows)]
let mut signals = {
use tokio::signal::windows;
windows::ctrl_break().unwrap_or_else(signal_fail)
};
let (stop, mut failures) = mpsc::channel(8);
let rehash = Arc::new(Notify::new());
let shared = State::new(cfg.state, rehash.clone()).await;
let mut bindings = load_bindings(cfg.bindings, &shared, &stop);
loop {
tokio::select! {
addr = failures.recv() => match addr {
Some(addr) => for i in 0..bindings.len() {
if bindings[i].0 == addr {
bindings.swap_remove(i);
break;
}
}
None => {
// `failures.recv()` returns `None` when all senders have been dropped, so
// when all bindings tasks have stopped.
log::error!("No binding left, exiting.");
return;
}
},
_ = rehash.notified() => {
do_rehash(config_path.clone(), &shared, stop.clone(), &mut bindings).await;
},
_ = signals.recv() => {
do_rehash(config_path.clone(), &shared, stop.clone(), &mut bindings).await;
},
}
}
}
| {
// Failure to send the command means either the binding task have dropped the
// command channel, or the binding task doesn't exist anymore. Both possibilities
// shouldn't happen (see doc for `Control.bindings`); but in the opposite case
// let's remove the binding from the array that keeps track of them, and spawn the
// new one on the runtime.
bindings.swap_remove(i);
tokio::spawn(new_b.future);
bindings.push((new_b.address, new_b.handle));
} | conditional_block |
control.rs | //! Runtime control utils.
//!
//! ellidri is built on tokio and the future ecosystem. Therefore the main thing it does is manage
//! tasks. Tasks are useful because they can be created, polled, and stopped. This module, and
//! `Control` more specificaly, is responsible for loading and reloading the configuration file,
//! starting and stopping the necessary tasks.
//!
//! # Top-level tasks
//!
//! At the moment, the only kind of "top-level" task that ellidri runs are bindings; tasks that
//! bind then listen on a port. They are defined in `net::listen`. Bindings run with two data
//! "channels":
//!
//! - A "stop button": the binding task will send its listening address when it fails unexpectedly
//! (when it is not closed by `Control`),
//! - A command channel: bindings accept commands that change their configuration. All commands
//! are described in the `Command` enum.
//!
//! # The configuration file
//!
//! ellidri reads a configuration file at startup. This configuration file is meant to specify its
//! running state. It can be reloaded at runtime, to change the whole state of the server.
//!
//! The first time the configuration file is read, ellidri uses it to create the tokio runtime.
//! This is because the number of workers is yet unknown, and cannot be changed afterwards.
//!
//! Configuration can then be reloaded upon receiving a SIGUSR1 signal (on UNIX systems only,
//! windows is not yet supported), or a REHASH command. When it happens, `Control` reread the
//! configuration file and performs a diff algorithm to know which task needs to be stopped. This
//! is really simple:
//!
//! - If an old binding is not present in the new configuration, `Control` drops the binding,
//! - If a new binding was not present in the old configuration, `Control` spawns the binding on
//! the runtime,
//! - If a binding is present in both configurations, `Control` will keep the binding and send a
//! command to it, either to make it listen for raw TCP connections, or to listen for TLS
//! connections with a given `TlsAcceptor` (see `tokio-tls` doc for that).
//!
//! Bindings are identified by their socket address (IP address + TCP port). TLS identities are
//! not kept track of, thus ellidri might reload the same TLS identity for a binding (it is fine to
//! let it do we are not reading thousands for TLS identities here).
use crate::{Config, net, State, tls};
use crate::config::{Binding, Tls};
use std::future::Future;
use std::net::SocketAddr;
use std::sync::Arc;
use std::{fs, process};
use tokio::runtime as rt;
use tokio::sync::{mpsc, Notify};
use tokio::task;
/// A command from `Control` to binding tasks.
pub enum Command {
/// Ask the binding task to listen for raw TCP connections and not use TLS.
UsePlain,
/// Ask the binding task to listen for TLS connections with the given acceptor.
UseTls(tls::Acceptor),
}
/// A binding task that is ready to be spawned on the runtime.
struct LoadedBinding<F> {
/// The address to be bound.
address: SocketAddr,
/// Either `None` when the binding listens for raw TCP connections, or `Some(acceptor)` when the
/// bindings listens for TLS connections with `acceptor`.
acceptor: Option<tls::Acceptor>,
/// The sending end of the channel that brings commands to the task.
handle: mpsc::Sender<Command>,
/// The actual task, ready to be polled.
future: F,
}
/// Creates a tokio runtime with the given number of worker threads.
fn create_runtime(workers: usize) -> rt::Runtime {
let mut builder = rt::Builder::new_multi_thread();
if workers != 0 {
builder.worker_threads(workers);
}
builder
.enable_io()
.enable_time()
.build()
.unwrap_or_else(|err| {
log::error!("Failed to start the tokio runtime: {}", err);
process::exit(1);
})
}
/// Creates the bindings tasks and spawns them on the given runtime.
///
/// This function is what `Control` calls on startup to generate the bindings. Because it exits
/// the program on failure, it is not to be called for reloading.
///
/// It spawns all the generated bindings on the runtime, and returns their listening address and
/// command channel.
fn load_bindings(
bindings: Vec<Binding>,
shared: &State,
stop: &mpsc::Sender<SocketAddr>,
) -> Vec<(SocketAddr, mpsc::Sender<Command>)> {
let mut res = Vec::with_capacity(bindings.len());
let mut store = tls::IdentityStore::default();
for Binding { address, tls } in bindings {
let (handle, commands) = mpsc::channel(8);
if let Some(Tls { certificate, key, .. }) = tls {
let acceptor = match store.acceptor(certificate, key) {
Ok(acceptor) => acceptor,
Err(_) => process::exit(1),
};
let server = net::listen(
address,
shared.clone(),
Some(acceptor),
stop.clone(),
commands,
);
res.push((address, handle));
tokio::spawn(server);
} else {
let server = net::listen(address, shared.clone(), None, stop.clone(), commands);
res.push((address, handle));
tokio::spawn(server);
}
}
res
}
/// Reloads the configuration at `config_path`.
///
/// In four steps:
///
/// - Read the configuration and load the authentication provider,
/// - Remove old bindings that are not used anymore,
/// - Add new bindings, or send them a command to listen for raw TCP or TLS connections,
/// - Update the shared state.
async fn do_rehash(
config_path: String,
shared: &State,
stop: mpsc::Sender<SocketAddr>,
bindings: &mut Vec<(SocketAddr, mpsc::Sender<Command>)>,
) {
log::info!("Reloading configuration from {:?}", config_path);
let shared_clone = shared.clone();
let reloaded = task::spawn_blocking(|| reload_config(config_path, shared_clone, stop)).await;
let (cfg, new_bindings) = match reloaded {
Ok(Some(reloaded)) => reloaded,
_ => return,
};
let mut i = 0;
while i < bindings.len() {
let old_address = bindings[i].0;
if new_bindings
.iter()
.all(|new_b| old_address != new_b.address)
{
bindings.swap_remove(i);
} else {
i += 1;
}
}
for new_b in new_bindings {
if let Some(i) = bindings.iter().position(|old_b| old_b.0 == new_b.address) {
let res = bindings[i]
.1
.send(match new_b.acceptor {
Some(acceptor) => Command::UseTls(acceptor),
None => Command::UsePlain,
})
.await;
if res.is_err() {
// Failure to send the command means either the binding task have dropped the
// command channel, or the binding task doesn't exist anymore. Both possibilities
// shouldn't happen (see doc for `Control.bindings`); but in the opposite case
// let's remove the binding from the array that keeps track of them, and spawn the
// new one on the runtime.
bindings.swap_remove(i);
tokio::spawn(new_b.future);
bindings.push((new_b.address, new_b.handle));
}
} else {
tokio::spawn(new_b.future);
bindings.push((new_b.address, new_b.handle));
}
}
shared.rehash(cfg.state).await;
log::info!("Configuration reloaded");
}
/// Re-read the configuration file and re-generate the bindings.
///
/// See documentation of `reload_bindings` for how bindings are re-generated.
///
/// This function will put the contents of the MOTD file into `Config.motd_file`, so that the
/// shared state can use the field as-is, since it must not use blocking operations such as reading
/// a file.
fn | (
config_path: String,
shared: State,
stop: mpsc::Sender<SocketAddr>,
) -> Option<(Config, Vec<LoadedBinding<impl Future<Output = ()>>>)> {
let mut cfg = match Config::from_file(&config_path) {
Ok(cfg) => cfg,
Err(err) => {
log::error!("Failed to read {:?}: {}", config_path, err);
return None;
}
};
cfg.state.motd_file = match fs::read_to_string(&cfg.state.motd_file) {
Ok(motd) => motd,
Err(err) => {
log::warn!("Failed to read {:?}: {}", cfg.state.motd_file, err);
String::new()
}
};
let new_bindings = reload_bindings(&cfg.bindings, &shared, &stop);
Some((cfg, new_bindings))
}
/// Equivalent of `load_bindings` for when exiting the program is not acceptable.
///
/// Instead of spawning the binding tasks on the runtime, this function returns them in an array.
/// Also instead of exiting on failure, it continues its process. Binding tasks that could not
/// be generated are not returned.
///
/// Otherwise both functions have the same behavior.
fn reload_bindings(
bindings: &[Binding],
shared: &State,
stop: &mpsc::Sender<SocketAddr>,
) -> Vec<LoadedBinding<impl Future<Output = ()>>> {
let mut res = Vec::with_capacity(bindings.len());
let mut store = tls::IdentityStore::default();
for Binding { address, tls } in bindings {
let (handle, commands) = mpsc::channel(8);
if let Some(Tls { certificate, key, .. }) = tls {
let acceptor = match store.acceptor(certificate, key) {
Ok(acceptor) => acceptor,
Err(_) => continue,
};
let future = net::listen(
*address,
shared.clone(),
Some(acceptor.clone()),
stop.clone(),
commands,
);
res.push(LoadedBinding {
address: *address,
acceptor: Some(acceptor),
handle,
future,
});
} else {
let future = net::listen(*address, shared.clone(), None, stop.clone(), commands);
res.push(LoadedBinding {
address: *address,
acceptor: None,
handle,
future,
});
}
}
res
}
pub fn load_config_and_run(config_path: String) {
let cfg = Config::from_file(&config_path).unwrap_or_else(|err| {
log::error!("Failed to read {:?}: {}", config_path, err);
process::exit(1);
});
let runtime = create_runtime(cfg.workers);
runtime.block_on(run(config_path, cfg));
}
pub async fn run(config_path: String, cfg: Config) {
let signal_fail = |err| {
log::error!("Cannot listen for signals to reload the configuration: {}", err);
process::exit(1);
};
#[cfg(unix)]
let mut signals = {
use tokio::signal::unix;
unix::signal(unix::SignalKind::user_defined1()).unwrap_or_else(signal_fail)
};
#[cfg(windows)]
let mut signals = {
use tokio::signal::windows;
windows::ctrl_break().unwrap_or_else(signal_fail)
};
let (stop, mut failures) = mpsc::channel(8);
let rehash = Arc::new(Notify::new());
let shared = State::new(cfg.state, rehash.clone()).await;
let mut bindings = load_bindings(cfg.bindings, &shared, &stop);
loop {
tokio::select! {
addr = failures.recv() => match addr {
Some(addr) => for i in 0..bindings.len() {
if bindings[i].0 == addr {
bindings.swap_remove(i);
break;
}
}
None => {
// `failures.recv()` returns `None` when all senders have been dropped, so
// when all bindings tasks have stopped.
log::error!("No binding left, exiting.");
return;
}
},
_ = rehash.notified() => {
do_rehash(config_path.clone(), &shared, stop.clone(), &mut bindings).await;
},
_ = signals.recv() => {
do_rehash(config_path.clone(), &shared, stop.clone(), &mut bindings).await;
},
}
}
}
| reload_config | identifier_name |
control.rs | //! Runtime control utils.
//!
//! ellidri is built on tokio and the future ecosystem. Therefore the main thing it does is manage
//! tasks. Tasks are useful because they can be created, polled, and stopped. This module, and
//! `Control` more specificaly, is responsible for loading and reloading the configuration file,
//! starting and stopping the necessary tasks.
//!
//! # Top-level tasks
//!
//! At the moment, the only kind of "top-level" task that ellidri runs are bindings; tasks that
//! bind then listen on a port. They are defined in `net::listen`. Bindings run with two data
//! "channels":
//!
//! - A "stop button": the binding task will send its listening address when it fails unexpectedly
//! (when it is not closed by `Control`),
//! - A command channel: bindings accept commands that change their configuration. All commands
//! are described in the `Command` enum.
//!
//! # The configuration file
//!
//! ellidri reads a configuration file at startup. This configuration file is meant to specify its
//! running state. It can be reloaded at runtime, to change the whole state of the server.
//!
//! The first time the configuration file is read, ellidri uses it to create the tokio runtime.
//! This is because the number of workers is yet unknown, and cannot be changed afterwards.
//!
//! Configuration can then be reloaded upon receiving a SIGUSR1 signal (on UNIX systems only,
//! windows is not yet supported), or a REHASH command. When it happens, `Control` reread the
//! configuration file and performs a diff algorithm to know which task needs to be stopped. This
//! is really simple:
//!
//! - If an old binding is not present in the new configuration, `Control` drops the binding,
//! - If a new binding was not present in the old configuration, `Control` spawns the binding on
//! the runtime, | //!
//! Bindings are identified by their socket address (IP address + TCP port). TLS identities are
//! not kept track of, thus ellidri might reload the same TLS identity for a binding (it is fine to
//! let it do we are not reading thousands for TLS identities here).
use crate::{Config, net, State, tls};
use crate::config::{Binding, Tls};
use std::future::Future;
use std::net::SocketAddr;
use std::sync::Arc;
use std::{fs, process};
use tokio::runtime as rt;
use tokio::sync::{mpsc, Notify};
use tokio::task;
/// A command from `Control` to binding tasks.
pub enum Command {
/// Ask the binding task to listen for raw TCP connections and not use TLS.
UsePlain,
/// Ask the binding task to listen for TLS connections with the given acceptor.
UseTls(tls::Acceptor),
}
/// A binding task that is ready to be spawned on the runtime.
struct LoadedBinding<F> {
/// The address to be bound.
address: SocketAddr,
/// Either `None` when the binding listens for raw TCP connections, or `Some(acceptor)` when the
/// bindings listens for TLS connections with `acceptor`.
acceptor: Option<tls::Acceptor>,
/// The sending end of the channel that brings commands to the task.
handle: mpsc::Sender<Command>,
/// The actual task, ready to be polled.
future: F,
}
/// Creates a tokio runtime with the given number of worker threads.
fn create_runtime(workers: usize) -> rt::Runtime {
let mut builder = rt::Builder::new_multi_thread();
if workers != 0 {
builder.worker_threads(workers);
}
builder
.enable_io()
.enable_time()
.build()
.unwrap_or_else(|err| {
log::error!("Failed to start the tokio runtime: {}", err);
process::exit(1);
})
}
/// Creates the bindings tasks and spawns them on the given runtime.
///
/// This function is what `Control` calls on startup to generate the bindings. Because it exits
/// the program on failure, it is not to be called for reloading.
///
/// It spawns all the generated bindings on the runtime, and returns their listening address and
/// command channel.
fn load_bindings(
bindings: Vec<Binding>,
shared: &State,
stop: &mpsc::Sender<SocketAddr>,
) -> Vec<(SocketAddr, mpsc::Sender<Command>)> {
let mut res = Vec::with_capacity(bindings.len());
let mut store = tls::IdentityStore::default();
for Binding { address, tls } in bindings {
let (handle, commands) = mpsc::channel(8);
if let Some(Tls { certificate, key, .. }) = tls {
let acceptor = match store.acceptor(certificate, key) {
Ok(acceptor) => acceptor,
Err(_) => process::exit(1),
};
let server = net::listen(
address,
shared.clone(),
Some(acceptor),
stop.clone(),
commands,
);
res.push((address, handle));
tokio::spawn(server);
} else {
let server = net::listen(address, shared.clone(), None, stop.clone(), commands);
res.push((address, handle));
tokio::spawn(server);
}
}
res
}
/// Reloads the configuration at `config_path`.
///
/// In four steps:
///
/// - Read the configuration and load the authentication provider,
/// - Remove old bindings that are not used anymore,
/// - Add new bindings, or send them a command to listen for raw TCP or TLS connections,
/// - Update the shared state.
async fn do_rehash(
config_path: String,
shared: &State,
stop: mpsc::Sender<SocketAddr>,
bindings: &mut Vec<(SocketAddr, mpsc::Sender<Command>)>,
) {
log::info!("Reloading configuration from {:?}", config_path);
let shared_clone = shared.clone();
let reloaded = task::spawn_blocking(|| reload_config(config_path, shared_clone, stop)).await;
let (cfg, new_bindings) = match reloaded {
Ok(Some(reloaded)) => reloaded,
_ => return,
};
let mut i = 0;
while i < bindings.len() {
let old_address = bindings[i].0;
if new_bindings
.iter()
.all(|new_b| old_address != new_b.address)
{
bindings.swap_remove(i);
} else {
i += 1;
}
}
for new_b in new_bindings {
if let Some(i) = bindings.iter().position(|old_b| old_b.0 == new_b.address) {
let res = bindings[i]
.1
.send(match new_b.acceptor {
Some(acceptor) => Command::UseTls(acceptor),
None => Command::UsePlain,
})
.await;
if res.is_err() {
// Failure to send the command means either the binding task have dropped the
// command channel, or the binding task doesn't exist anymore. Both possibilities
// shouldn't happen (see doc for `Control.bindings`); but in the opposite case
// let's remove the binding from the array that keeps track of them, and spawn the
// new one on the runtime.
bindings.swap_remove(i);
tokio::spawn(new_b.future);
bindings.push((new_b.address, new_b.handle));
}
} else {
tokio::spawn(new_b.future);
bindings.push((new_b.address, new_b.handle));
}
}
shared.rehash(cfg.state).await;
log::info!("Configuration reloaded");
}
/// Re-read the configuration file and re-generate the bindings.
///
/// See documentation of `reload_bindings` for how bindings are re-generated.
///
/// This function will put the contents of the MOTD file into `Config.motd_file`, so that the
/// shared state can use the field as-is, since it must not use blocking operations such as reading
/// a file.
fn reload_config(
config_path: String,
shared: State,
stop: mpsc::Sender<SocketAddr>,
) -> Option<(Config, Vec<LoadedBinding<impl Future<Output = ()>>>)> {
let mut cfg = match Config::from_file(&config_path) {
Ok(cfg) => cfg,
Err(err) => {
log::error!("Failed to read {:?}: {}", config_path, err);
return None;
}
};
cfg.state.motd_file = match fs::read_to_string(&cfg.state.motd_file) {
Ok(motd) => motd,
Err(err) => {
log::warn!("Failed to read {:?}: {}", cfg.state.motd_file, err);
String::new()
}
};
let new_bindings = reload_bindings(&cfg.bindings, &shared, &stop);
Some((cfg, new_bindings))
}
/// Equivalent of `load_bindings` for when exiting the program is not acceptable.
///
/// Instead of spawning the binding tasks on the runtime, this function returns them in an array.
/// Also instead of exiting on failure, it continues its process. Binding tasks that could not
/// be generated are not returned.
///
/// Otherwise both functions have the same behavior.
fn reload_bindings(
bindings: &[Binding],
shared: &State,
stop: &mpsc::Sender<SocketAddr>,
) -> Vec<LoadedBinding<impl Future<Output = ()>>> {
let mut res = Vec::with_capacity(bindings.len());
let mut store = tls::IdentityStore::default();
for Binding { address, tls } in bindings {
let (handle, commands) = mpsc::channel(8);
if let Some(Tls { certificate, key, .. }) = tls {
let acceptor = match store.acceptor(certificate, key) {
Ok(acceptor) => acceptor,
Err(_) => continue,
};
let future = net::listen(
*address,
shared.clone(),
Some(acceptor.clone()),
stop.clone(),
commands,
);
res.push(LoadedBinding {
address: *address,
acceptor: Some(acceptor),
handle,
future,
});
} else {
let future = net::listen(*address, shared.clone(), None, stop.clone(), commands);
res.push(LoadedBinding {
address: *address,
acceptor: None,
handle,
future,
});
}
}
res
}
pub fn load_config_and_run(config_path: String) {
let cfg = Config::from_file(&config_path).unwrap_or_else(|err| {
log::error!("Failed to read {:?}: {}", config_path, err);
process::exit(1);
});
let runtime = create_runtime(cfg.workers);
runtime.block_on(run(config_path, cfg));
}
pub async fn run(config_path: String, cfg: Config) {
let signal_fail = |err| {
log::error!("Cannot listen for signals to reload the configuration: {}", err);
process::exit(1);
};
#[cfg(unix)]
let mut signals = {
use tokio::signal::unix;
unix::signal(unix::SignalKind::user_defined1()).unwrap_or_else(signal_fail)
};
#[cfg(windows)]
let mut signals = {
use tokio::signal::windows;
windows::ctrl_break().unwrap_or_else(signal_fail)
};
let (stop, mut failures) = mpsc::channel(8);
let rehash = Arc::new(Notify::new());
let shared = State::new(cfg.state, rehash.clone()).await;
let mut bindings = load_bindings(cfg.bindings, &shared, &stop);
loop {
tokio::select! {
addr = failures.recv() => match addr {
Some(addr) => for i in 0..bindings.len() {
if bindings[i].0 == addr {
bindings.swap_remove(i);
break;
}
}
None => {
// `failures.recv()` returns `None` when all senders have been dropped, so
// when all bindings tasks have stopped.
log::error!("No binding left, exiting.");
return;
}
},
_ = rehash.notified() => {
do_rehash(config_path.clone(), &shared, stop.clone(), &mut bindings).await;
},
_ = signals.recv() => {
do_rehash(config_path.clone(), &shared, stop.clone(), &mut bindings).await;
},
}
}
} | //! - If a binding is present in both configurations, `Control` will keep the binding and send a
//! command to it, either to make it listen for raw TCP connections, or to listen for TLS
//! connections with a given `TlsAcceptor` (see `tokio-tls` doc for that). | random_line_split |
config.rs | use clap::{CommandFactory, Parser};
use pathfinder_common::AllowedOrigins;
use pathfinder_storage::JournalMode;
use reqwest::Url;
use std::collections::HashSet;
use std::net::SocketAddr;
use std::num::NonZeroUsize;
use std::path::PathBuf;
use pathfinder_common::consts::VERGEN_GIT_DESCRIBE;
#[derive(Parser)]
#[command(name = "Pathfinder")]
#[command(author = "Equilibrium Labs")]
#[command(version = VERGEN_GIT_DESCRIBE)]
#[command(
about = "A Starknet node implemented by Equilibrium Labs. Submit bug reports and issues at https://github.com/eqlabs/pathfinder."
)]
struct Cli {
#[arg(
long,
value_name = "DIR",
value_hint = clap::ValueHint::DirPath,
long_help = "Directory where the node should store its data",
env = "PATHFINDER_DATA_DIRECTORY",
default_value_os_t = (&std::path::Component::CurDir).into()
)]
data_directory: PathBuf, | env = "PATHFINDER_ETHEREUM_API_PASSWORD",
)]
ethereum_password: Option<String>,
#[arg(
long = "ethereum.url",
long_help = r"This should point to the HTTP RPC endpoint of your Ethereum entry-point, typically a local Ethereum client or a hosted gateway service such as Infura or Cloudflare.
Examples:
infura: https://goerli.infura.io/v3/<PROJECT_ID>
geth: https://localhost:8545",
value_name = "HTTP(s) URL",
value_hint = clap::ValueHint::Url,
env = "PATHFINDER_ETHEREUM_API_URL",
)]
ethereum_url: Url,
#[arg(
long = "http-rpc",
long_help = "HTTP-RPC listening address",
value_name = "IP:PORT",
default_value = "127.0.0.1:9545",
env = "PATHFINDER_HTTP_RPC_ADDRESS"
)]
rpc_address: SocketAddr,
#[arg(
long = "rpc.websocket",
long_help = "Enable RPC WebSocket transport",
default_value = "false",
env = "PATHFINDER_RPC_WEBSOCKET"
)]
ws: bool,
#[arg(
long = "rpc.websocket.capacity",
long_help = "Maximum number of websocket subscriptions per subscription type",
default_value = "100",
env = "PATHFINDER_RPC_WEBSOCKET_CAPACITY"
)]
ws_capacity: NonZeroUsize,
#[arg(
long = "rpc.cors-domains",
long_help = r"Comma separated list of domains from which Cross-Origin requests will be accepted by the RPC server.
Use '*' to indicate any domain and an empty list to disable CORS.
Examples:
single: http://one.io
a list: http://first.com,http://second.com:1234
any: *",
value_name = "DOMAIN-LIST",
value_delimiter = ',',
env = "PATHFINDER_RPC_CORS_DOMAINS"
)]
rpc_cors_domains: Vec<String>,
#[arg(
long = "monitor-address",
long_help = "The address at which pathfinder will serve monitoring related information",
value_name = "IP:PORT",
env = "PATHFINDER_MONITOR_ADDRESS"
)]
monitor_address: Option<SocketAddr>,
#[clap(flatten)]
network: NetworkCli,
#[arg(
long = "poll-pending",
long_help = "Enable polling pending block",
action = clap::ArgAction::Set,
default_value = "false",
env = "PATHFINDER_POLL_PENDING",
)]
poll_pending: bool,
#[arg(
long = "python-subprocesses",
long_help = "Number of Python starknet VMs subprocesses to start",
default_value = "2",
env = "PATHFINDER_PYTHON_SUBPROCESSES"
)]
python_subprocesses: std::num::NonZeroUsize,
#[arg(
long = "sqlite-wal",
long_help = "Enable SQLite write-ahead logging",
action = clap::ArgAction::Set,
default_value = "true",
env = "PATHFINDER_SQLITE_WAL",
)]
sqlite_wal: bool,
#[arg(
long = "max-rpc-connections",
long_help = "Set the maximum number of connections allowed",
env = "PATHFINDER_MAX_RPC_CONNECTIONS",
default_value = "1024"
)]
max_rpc_connections: std::num::NonZeroU32,
#[arg(
long = "sync.poll-interval",
long_help = "New block poll interval in seconds",
default_value = "5",
env = "PATHFINDER_HEAD_POLL_INTERVAL_SECONDS"
)]
poll_interval: std::num::NonZeroU64,
#[arg(
long = "color",
long_help = "This flag controls when to use colors in the output logs.",
default_value = "auto",
env = "PATHFINDER_COLOR",
value_name = "WHEN"
)]
color: Color,
}
#[derive(clap::ValueEnum, Debug, Clone, Copy, PartialEq)]
pub enum Color {
Auto,
Never,
Always,
}
impl Color {
/// Returns true if color should be enabled, either because the setting is [Color::Always],
/// or because it is [Color::Auto] and stdout is targetting a terminal.
pub fn is_color_enabled(&self) -> bool {
use std::io::IsTerminal;
match self {
Color::Auto => std::io::stdout().is_terminal(),
Color::Never => false,
Color::Always => true,
}
}
}
#[derive(clap::Args)]
struct NetworkCli {
#[arg(
long = "network",
long_help = r"Specify the Starknet network for pathfinder to operate on.
Note that 'custom' requires also setting the --gateway-url and --feeder-gateway-url options.",
value_enum,
env = "PATHFINDER_NETWORK"
)]
network: Option<Network>,
#[arg(
long,
long_help = "Set a custom Starknet chain ID (e.g. SN_GOERLI)",
value_name = "CHAIN ID",
env = "PATHFINDER_CHAIN_ID",
required_if_eq("network", Network::Custom)
)]
chain_id: Option<String>,
#[arg(
long = "feeder-gateway-url",
value_name = "URL",
value_hint = clap::ValueHint::Url,
long_help = "Specify a custom Starknet feeder gateway url. Can be used to run pathfinder on a custom Starknet network, or to use a gateway proxy. Requires '--network custom'.",
env = "PATHFINDER_FEEDER_GATEWAY_URL",
required_if_eq("network", Network::Custom),
)]
feeder_gateway: Option<Url>,
#[arg(
long = "gateway-url",
value_name = "URL",
value_hint = clap::ValueHint::Url,
long_help = "Specify a custom Starknet gateway url. Can be used to run pathfinder on a custom Starknet network, or to use a gateway proxy. Requires '--network custom'.",
env = "PATHFINDER_GATEWAY_URL",
required_if_eq("network", Network::Custom),
)]
gateway: Option<Url>,
}
#[derive(clap::ValueEnum, Clone)]
enum Network {
Mainnet,
Testnet,
Testnet2,
Integration,
Custom,
}
impl From<Network> for clap::builder::OsStr {
fn from(value: Network) -> Self {
match value {
Network::Mainnet => "mainnet",
Network::Testnet => "testnet",
Network::Testnet2 => "testnet2",
Network::Integration => "integration",
Network::Custom => "custom",
}
.into()
}
}
fn parse_cors(inputs: Vec<String>) -> Result<Option<AllowedOrigins>, RpcCorsDomainsParseError> {
if inputs.is_empty() {
return Ok(None);
}
if inputs.len() == 1 && inputs[0] == "*" {
return Ok(Some(AllowedOrigins::Any));
}
if inputs.iter().any(|s| s == "*") {
return Err(RpcCorsDomainsParseError::WildcardAmongOtherValues);
}
let valid_origins = inputs
.into_iter()
.map(|input| match url::Url::parse(&input) {
// Valid URL but has to be limited to origin form, i.e. no path, query, trailing slash for default path etc.
Ok(url) => {
let origin = url.origin();
if !origin.is_tuple() {
return Err(RpcCorsDomainsParseError::InvalidDomain(input));
}
if origin.ascii_serialization() == input {
Ok(input)
} else {
// Valid URL but not a valid origin
Err(RpcCorsDomainsParseError::InvalidDomain(input))
}
}
// Not an URL hence invalid origin
Err(_e) => {
eprintln!("Url_parse_error: {_e}");
Err(RpcCorsDomainsParseError::InvalidDomain(input))
}
})
.collect::<Result<HashSet<_>, RpcCorsDomainsParseError>>()?;
Ok(Some(AllowedOrigins::List(
valid_origins.into_iter().collect(),
)))
}
pub fn parse_cors_or_exit(input: Vec<String>) -> Option<AllowedOrigins> {
use clap::error::ErrorKind;
match parse_cors(input) {
Ok(parsed) => parsed,
Err(error) => Cli::command()
.error(ErrorKind::ValueValidation, error)
.exit(),
}
}
#[derive(Debug, thiserror::Error, PartialEq)]
#[error("Invalid domain for CORS: {0}")]
struct InvalidCorsDomainError(String);
#[derive(Debug, thiserror::Error, PartialEq)]
enum RpcCorsDomainsParseError {
#[error("Invalid allowed domain for CORS: {0}.")]
InvalidDomain(String),
#[error(
"Specify either wildcard '*' or a comma separated list of allowed domains for CORS, not both."
)]
WildcardAmongOtherValues,
}
pub struct Config {
pub data_directory: PathBuf,
pub ethereum: Ethereum,
pub rpc_address: SocketAddr,
pub rpc_cors_domains: Option<AllowedOrigins>,
pub ws: Option<WebSocket>,
pub monitor_address: Option<SocketAddr>,
pub network: Option<NetworkConfig>,
pub poll_pending: bool,
pub python_subprocesses: std::num::NonZeroUsize,
pub sqlite_wal: JournalMode,
pub max_rpc_connections: std::num::NonZeroU32,
pub poll_interval: std::time::Duration,
pub color: Color,
}
pub struct WebSocket {
pub capacity: NonZeroUsize,
}
pub struct Ethereum {
pub url: Url,
pub password: Option<String>,
}
pub enum NetworkConfig {
Mainnet,
Testnet,
Testnet2,
Integration,
Custom {
gateway: Url,
feeder_gateway: Url,
chain_id: String,
},
}
impl NetworkConfig {
fn from_components(args: NetworkCli) -> Option<Self> {
use Network::*;
let cfg = match (
args.network,
args.gateway,
args.feeder_gateway,
args.chain_id,
) {
(None, None, None, None) => return None,
(Some(Custom), Some(gateway), Some(feeder_gateway), Some(chain_id)) => {
NetworkConfig::Custom {
gateway,
feeder_gateway,
chain_id,
}
}
(Some(Custom), _, _, _) => {
unreachable!("`--network custom` requirements are handled by clap derive")
}
// Handle non-custom variants in an inner match so that the compiler will force
// us to handle a new network variants explicitly. Otherwise we end up with a
// catch-all arm that would swallow new variants silently.
(Some(non_custom), None, None, None) => match non_custom {
Mainnet => NetworkConfig::Mainnet,
Testnet => NetworkConfig::Testnet,
Testnet2 => NetworkConfig::Testnet2,
Integration => NetworkConfig::Integration,
Custom => unreachable!("Network::Custom handled in outer arm already"),
},
// clap does not support disallowing args based on an enum value, so we have check for
// `--network non-custom` + custom required args manually.
_ => {
use clap::error::ErrorKind;
Cli::command().error(ErrorKind::ArgumentConflict, "--gateway-url, --feeder-gateway-url and --chain-id may only be used with --network custom").exit()
}
};
Some(cfg)
}
}
impl Config {
pub fn parse() -> Self {
let cli = Cli::parse();
let network = NetworkConfig::from_components(cli.network);
Config {
data_directory: cli.data_directory,
ethereum: Ethereum {
password: cli.ethereum_password,
url: cli.ethereum_url,
},
rpc_address: cli.rpc_address,
rpc_cors_domains: parse_cors_or_exit(cli.rpc_cors_domains),
ws: cli.ws.then_some(WebSocket {
capacity: cli.ws_capacity,
}),
monitor_address: cli.monitor_address,
network,
poll_pending: cli.poll_pending,
python_subprocesses: cli.python_subprocesses,
sqlite_wal: match cli.sqlite_wal {
true => JournalMode::WAL,
false => JournalMode::Rollback,
},
max_rpc_connections: cli.max_rpc_connections,
poll_interval: std::time::Duration::from_secs(cli.poll_interval.get()),
color: cli.color,
}
}
}
#[cfg(test)]
mod tests {
use super::{AllowedOrigins, RpcCorsDomainsParseError};
use crate::config::parse_cors;
#[test]
fn parse_cors_domains() {
let empty = String::new();
let wildcard = "*".to_owned();
let valid = "http://valid.com:1234".to_owned();
let not_url = "not_url".to_string();
let with_path = "http://a.com/path".to_string();
let with_query = "http://a.com/?query=x".to_string();
let with_trailing_slash = format!("{valid}/");
[
(
vec![empty.clone()],
RpcCorsDomainsParseError::InvalidDomain(empty.clone()),
),
(
vec![empty, wildcard.clone()],
RpcCorsDomainsParseError::WildcardAmongOtherValues,
),
(
vec![valid.clone(), wildcard.clone()],
RpcCorsDomainsParseError::WildcardAmongOtherValues,
),
(
vec![wildcard.clone(), wildcard.clone()],
RpcCorsDomainsParseError::WildcardAmongOtherValues,
),
(
vec![valid.clone(), with_trailing_slash.clone()],
RpcCorsDomainsParseError::InvalidDomain(with_trailing_slash),
),
(
vec![valid.clone(), not_url.clone()],
RpcCorsDomainsParseError::InvalidDomain(not_url),
),
(
vec![valid.clone(), with_path.clone()],
RpcCorsDomainsParseError::InvalidDomain(with_path),
),
(
vec![valid.clone(), with_query.clone()],
RpcCorsDomainsParseError::InvalidDomain(with_query),
),
]
.into_iter()
.for_each(|(input, expected_error)| {
assert_eq!(
parse_cors(input.clone()).unwrap_err(),
expected_error,
"input: {input:?}"
);
});
[
(vec![], None),
(vec![wildcard], Some(AllowedOrigins::Any)),
(
vec![valid.clone()],
Some(AllowedOrigins::List(vec![valid.clone()])),
),
(
vec![valid.clone(), valid.clone()],
Some(AllowedOrigins::List(vec![valid])),
),
]
.into_iter()
.for_each(|(input, expected_ok)| {
assert_eq!(
parse_cors(input.clone()).unwrap(),
expected_ok,
"input: {input:?}"
)
});
}
} |
#[arg(
long = "ethereum.password",
long_help = "The optional password to use for the Ethereum API",
value_name = None, | random_line_split |
config.rs | use clap::{CommandFactory, Parser};
use pathfinder_common::AllowedOrigins;
use pathfinder_storage::JournalMode;
use reqwest::Url;
use std::collections::HashSet;
use std::net::SocketAddr;
use std::num::NonZeroUsize;
use std::path::PathBuf;
use pathfinder_common::consts::VERGEN_GIT_DESCRIBE;
#[derive(Parser)]
#[command(name = "Pathfinder")]
#[command(author = "Equilibrium Labs")]
#[command(version = VERGEN_GIT_DESCRIBE)]
#[command(
about = "A Starknet node implemented by Equilibrium Labs. Submit bug reports and issues at https://github.com/eqlabs/pathfinder."
)]
struct | {
#[arg(
long,
value_name = "DIR",
value_hint = clap::ValueHint::DirPath,
long_help = "Directory where the node should store its data",
env = "PATHFINDER_DATA_DIRECTORY",
default_value_os_t = (&std::path::Component::CurDir).into()
)]
data_directory: PathBuf,
#[arg(
long = "ethereum.password",
long_help = "The optional password to use for the Ethereum API",
value_name = None,
env = "PATHFINDER_ETHEREUM_API_PASSWORD",
)]
ethereum_password: Option<String>,
#[arg(
long = "ethereum.url",
long_help = r"This should point to the HTTP RPC endpoint of your Ethereum entry-point, typically a local Ethereum client or a hosted gateway service such as Infura or Cloudflare.
Examples:
infura: https://goerli.infura.io/v3/<PROJECT_ID>
geth: https://localhost:8545",
value_name = "HTTP(s) URL",
value_hint = clap::ValueHint::Url,
env = "PATHFINDER_ETHEREUM_API_URL",
)]
ethereum_url: Url,
#[arg(
long = "http-rpc",
long_help = "HTTP-RPC listening address",
value_name = "IP:PORT",
default_value = "127.0.0.1:9545",
env = "PATHFINDER_HTTP_RPC_ADDRESS"
)]
rpc_address: SocketAddr,
#[arg(
long = "rpc.websocket",
long_help = "Enable RPC WebSocket transport",
default_value = "false",
env = "PATHFINDER_RPC_WEBSOCKET"
)]
ws: bool,
#[arg(
long = "rpc.websocket.capacity",
long_help = "Maximum number of websocket subscriptions per subscription type",
default_value = "100",
env = "PATHFINDER_RPC_WEBSOCKET_CAPACITY"
)]
ws_capacity: NonZeroUsize,
#[arg(
long = "rpc.cors-domains",
long_help = r"Comma separated list of domains from which Cross-Origin requests will be accepted by the RPC server.
Use '*' to indicate any domain and an empty list to disable CORS.
Examples:
single: http://one.io
a list: http://first.com,http://second.com:1234
any: *",
value_name = "DOMAIN-LIST",
value_delimiter = ',',
env = "PATHFINDER_RPC_CORS_DOMAINS"
)]
rpc_cors_domains: Vec<String>,
#[arg(
long = "monitor-address",
long_help = "The address at which pathfinder will serve monitoring related information",
value_name = "IP:PORT",
env = "PATHFINDER_MONITOR_ADDRESS"
)]
monitor_address: Option<SocketAddr>,
#[clap(flatten)]
network: NetworkCli,
#[arg(
long = "poll-pending",
long_help = "Enable polling pending block",
action = clap::ArgAction::Set,
default_value = "false",
env = "PATHFINDER_POLL_PENDING",
)]
poll_pending: bool,
#[arg(
long = "python-subprocesses",
long_help = "Number of Python starknet VMs subprocesses to start",
default_value = "2",
env = "PATHFINDER_PYTHON_SUBPROCESSES"
)]
python_subprocesses: std::num::NonZeroUsize,
#[arg(
long = "sqlite-wal",
long_help = "Enable SQLite write-ahead logging",
action = clap::ArgAction::Set,
default_value = "true",
env = "PATHFINDER_SQLITE_WAL",
)]
sqlite_wal: bool,
#[arg(
long = "max-rpc-connections",
long_help = "Set the maximum number of connections allowed",
env = "PATHFINDER_MAX_RPC_CONNECTIONS",
default_value = "1024"
)]
max_rpc_connections: std::num::NonZeroU32,
#[arg(
long = "sync.poll-interval",
long_help = "New block poll interval in seconds",
default_value = "5",
env = "PATHFINDER_HEAD_POLL_INTERVAL_SECONDS"
)]
poll_interval: std::num::NonZeroU64,
#[arg(
long = "color",
long_help = "This flag controls when to use colors in the output logs.",
default_value = "auto",
env = "PATHFINDER_COLOR",
value_name = "WHEN"
)]
color: Color,
}
#[derive(clap::ValueEnum, Debug, Clone, Copy, PartialEq)]
pub enum Color {
Auto,
Never,
Always,
}
impl Color {
/// Returns true if color should be enabled, either because the setting is [Color::Always],
/// or because it is [Color::Auto] and stdout is targetting a terminal.
pub fn is_color_enabled(&self) -> bool {
use std::io::IsTerminal;
match self {
Color::Auto => std::io::stdout().is_terminal(),
Color::Never => false,
Color::Always => true,
}
}
}
#[derive(clap::Args)]
struct NetworkCli {
#[arg(
long = "network",
long_help = r"Specify the Starknet network for pathfinder to operate on.
Note that 'custom' requires also setting the --gateway-url and --feeder-gateway-url options.",
value_enum,
env = "PATHFINDER_NETWORK"
)]
network: Option<Network>,
#[arg(
long,
long_help = "Set a custom Starknet chain ID (e.g. SN_GOERLI)",
value_name = "CHAIN ID",
env = "PATHFINDER_CHAIN_ID",
required_if_eq("network", Network::Custom)
)]
chain_id: Option<String>,
#[arg(
long = "feeder-gateway-url",
value_name = "URL",
value_hint = clap::ValueHint::Url,
long_help = "Specify a custom Starknet feeder gateway url. Can be used to run pathfinder on a custom Starknet network, or to use a gateway proxy. Requires '--network custom'.",
env = "PATHFINDER_FEEDER_GATEWAY_URL",
required_if_eq("network", Network::Custom),
)]
feeder_gateway: Option<Url>,
#[arg(
long = "gateway-url",
value_name = "URL",
value_hint = clap::ValueHint::Url,
long_help = "Specify a custom Starknet gateway url. Can be used to run pathfinder on a custom Starknet network, or to use a gateway proxy. Requires '--network custom'.",
env = "PATHFINDER_GATEWAY_URL",
required_if_eq("network", Network::Custom),
)]
gateway: Option<Url>,
}
#[derive(clap::ValueEnum, Clone)]
enum Network {
Mainnet,
Testnet,
Testnet2,
Integration,
Custom,
}
impl From<Network> for clap::builder::OsStr {
fn from(value: Network) -> Self {
match value {
Network::Mainnet => "mainnet",
Network::Testnet => "testnet",
Network::Testnet2 => "testnet2",
Network::Integration => "integration",
Network::Custom => "custom",
}
.into()
}
}
fn parse_cors(inputs: Vec<String>) -> Result<Option<AllowedOrigins>, RpcCorsDomainsParseError> {
if inputs.is_empty() {
return Ok(None);
}
if inputs.len() == 1 && inputs[0] == "*" {
return Ok(Some(AllowedOrigins::Any));
}
if inputs.iter().any(|s| s == "*") {
return Err(RpcCorsDomainsParseError::WildcardAmongOtherValues);
}
let valid_origins = inputs
.into_iter()
.map(|input| match url::Url::parse(&input) {
// Valid URL but has to be limited to origin form, i.e. no path, query, trailing slash for default path etc.
Ok(url) => {
let origin = url.origin();
if !origin.is_tuple() {
return Err(RpcCorsDomainsParseError::InvalidDomain(input));
}
if origin.ascii_serialization() == input {
Ok(input)
} else {
// Valid URL but not a valid origin
Err(RpcCorsDomainsParseError::InvalidDomain(input))
}
}
// Not an URL hence invalid origin
Err(_e) => {
eprintln!("Url_parse_error: {_e}");
Err(RpcCorsDomainsParseError::InvalidDomain(input))
}
})
.collect::<Result<HashSet<_>, RpcCorsDomainsParseError>>()?;
Ok(Some(AllowedOrigins::List(
valid_origins.into_iter().collect(),
)))
}
pub fn parse_cors_or_exit(input: Vec<String>) -> Option<AllowedOrigins> {
use clap::error::ErrorKind;
match parse_cors(input) {
Ok(parsed) => parsed,
Err(error) => Cli::command()
.error(ErrorKind::ValueValidation, error)
.exit(),
}
}
#[derive(Debug, thiserror::Error, PartialEq)]
#[error("Invalid domain for CORS: {0}")]
struct InvalidCorsDomainError(String);
#[derive(Debug, thiserror::Error, PartialEq)]
enum RpcCorsDomainsParseError {
#[error("Invalid allowed domain for CORS: {0}.")]
InvalidDomain(String),
#[error(
"Specify either wildcard '*' or a comma separated list of allowed domains for CORS, not both."
)]
WildcardAmongOtherValues,
}
pub struct Config {
pub data_directory: PathBuf,
pub ethereum: Ethereum,
pub rpc_address: SocketAddr,
pub rpc_cors_domains: Option<AllowedOrigins>,
pub ws: Option<WebSocket>,
pub monitor_address: Option<SocketAddr>,
pub network: Option<NetworkConfig>,
pub poll_pending: bool,
pub python_subprocesses: std::num::NonZeroUsize,
pub sqlite_wal: JournalMode,
pub max_rpc_connections: std::num::NonZeroU32,
pub poll_interval: std::time::Duration,
pub color: Color,
}
pub struct WebSocket {
pub capacity: NonZeroUsize,
}
pub struct Ethereum {
pub url: Url,
pub password: Option<String>,
}
pub enum NetworkConfig {
Mainnet,
Testnet,
Testnet2,
Integration,
Custom {
gateway: Url,
feeder_gateway: Url,
chain_id: String,
},
}
impl NetworkConfig {
fn from_components(args: NetworkCli) -> Option<Self> {
use Network::*;
let cfg = match (
args.network,
args.gateway,
args.feeder_gateway,
args.chain_id,
) {
(None, None, None, None) => return None,
(Some(Custom), Some(gateway), Some(feeder_gateway), Some(chain_id)) => {
NetworkConfig::Custom {
gateway,
feeder_gateway,
chain_id,
}
}
(Some(Custom), _, _, _) => {
unreachable!("`--network custom` requirements are handled by clap derive")
}
// Handle non-custom variants in an inner match so that the compiler will force
// us to handle a new network variants explicitly. Otherwise we end up with a
// catch-all arm that would swallow new variants silently.
(Some(non_custom), None, None, None) => match non_custom {
Mainnet => NetworkConfig::Mainnet,
Testnet => NetworkConfig::Testnet,
Testnet2 => NetworkConfig::Testnet2,
Integration => NetworkConfig::Integration,
Custom => unreachable!("Network::Custom handled in outer arm already"),
},
// clap does not support disallowing args based on an enum value, so we have check for
// `--network non-custom` + custom required args manually.
_ => {
use clap::error::ErrorKind;
Cli::command().error(ErrorKind::ArgumentConflict, "--gateway-url, --feeder-gateway-url and --chain-id may only be used with --network custom").exit()
}
};
Some(cfg)
}
}
impl Config {
pub fn parse() -> Self {
let cli = Cli::parse();
let network = NetworkConfig::from_components(cli.network);
Config {
data_directory: cli.data_directory,
ethereum: Ethereum {
password: cli.ethereum_password,
url: cli.ethereum_url,
},
rpc_address: cli.rpc_address,
rpc_cors_domains: parse_cors_or_exit(cli.rpc_cors_domains),
ws: cli.ws.then_some(WebSocket {
capacity: cli.ws_capacity,
}),
monitor_address: cli.monitor_address,
network,
poll_pending: cli.poll_pending,
python_subprocesses: cli.python_subprocesses,
sqlite_wal: match cli.sqlite_wal {
true => JournalMode::WAL,
false => JournalMode::Rollback,
},
max_rpc_connections: cli.max_rpc_connections,
poll_interval: std::time::Duration::from_secs(cli.poll_interval.get()),
color: cli.color,
}
}
}
#[cfg(test)]
mod tests {
use super::{AllowedOrigins, RpcCorsDomainsParseError};
use crate::config::parse_cors;
#[test]
fn parse_cors_domains() {
let empty = String::new();
let wildcard = "*".to_owned();
let valid = "http://valid.com:1234".to_owned();
let not_url = "not_url".to_string();
let with_path = "http://a.com/path".to_string();
let with_query = "http://a.com/?query=x".to_string();
let with_trailing_slash = format!("{valid}/");
[
(
vec![empty.clone()],
RpcCorsDomainsParseError::InvalidDomain(empty.clone()),
),
(
vec![empty, wildcard.clone()],
RpcCorsDomainsParseError::WildcardAmongOtherValues,
),
(
vec![valid.clone(), wildcard.clone()],
RpcCorsDomainsParseError::WildcardAmongOtherValues,
),
(
vec![wildcard.clone(), wildcard.clone()],
RpcCorsDomainsParseError::WildcardAmongOtherValues,
),
(
vec![valid.clone(), with_trailing_slash.clone()],
RpcCorsDomainsParseError::InvalidDomain(with_trailing_slash),
),
(
vec![valid.clone(), not_url.clone()],
RpcCorsDomainsParseError::InvalidDomain(not_url),
),
(
vec![valid.clone(), with_path.clone()],
RpcCorsDomainsParseError::InvalidDomain(with_path),
),
(
vec![valid.clone(), with_query.clone()],
RpcCorsDomainsParseError::InvalidDomain(with_query),
),
]
.into_iter()
.for_each(|(input, expected_error)| {
assert_eq!(
parse_cors(input.clone()).unwrap_err(),
expected_error,
"input: {input:?}"
);
});
[
(vec![], None),
(vec![wildcard], Some(AllowedOrigins::Any)),
(
vec![valid.clone()],
Some(AllowedOrigins::List(vec![valid.clone()])),
),
(
vec![valid.clone(), valid.clone()],
Some(AllowedOrigins::List(vec![valid])),
),
]
.into_iter()
.for_each(|(input, expected_ok)| {
assert_eq!(
parse_cors(input.clone()).unwrap(),
expected_ok,
"input: {input:?}"
)
});
}
}
| Cli | identifier_name |
vacuum.py | """Support for Deebot Vaccums."""
import logging
from typing import Any, Mapping, Optional
import voluptuous as vol
from deebot_client.commands import (
Charge,
Clean,
FanSpeedLevel,
PlaySound,
SetFanSpeed,
SetRelocationState,
SetWaterInfo,
)
from deebot_client.commands.clean import CleanAction, CleanArea, CleanMode
from deebot_client.commands.custom import CustomCommand
from deebot_client.events import (
BatteryEvent,
CustomCommandEvent,
ErrorEvent,
FanSpeedEvent,
ReportStatsEvent,
RoomsEvent,
StatusEvent,
)
from deebot_client.events.event_bus import EventListener
from deebot_client.models import Room, VacuumState
from deebot_client.vacuum_bot import VacuumBot
from homeassistant.components.vacuum import (
SUPPORT_BATTERY,
SUPPORT_FAN_SPEED,
SUPPORT_LOCATE,
SUPPORT_MAP,
SUPPORT_PAUSE,
SUPPORT_RETURN_HOME,
SUPPORT_SEND_COMMAND,
SUPPORT_START,
SUPPORT_STATE,
SUPPORT_STOP,
StateVacuumEntity,
StateVacuumEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_platform
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
from homeassistant.util import slugify
from .const import (
DOMAIN,
EVENT_CLEANING_JOB,
EVENT_CUSTOM_COMMAND,
LAST_ERROR,
REFRESH_MAP,
REFRESH_STR_TO_EVENT_DTO,
VACUUMSTATE_TO_STATE,
)
from .entity import DeebotEntity
from .hub import DeebotHub
from .util import dataclass_to_dict, unsubscribe_listeners
_LOGGER = logging.getLogger(__name__)
SUPPORT_DEEBOT: int = (
SUPPORT_PAUSE
| SUPPORT_STOP
| SUPPORT_RETURN_HOME
| SUPPORT_FAN_SPEED
| SUPPORT_BATTERY
| SUPPORT_SEND_COMMAND
| SUPPORT_LOCATE
| SUPPORT_MAP
| SUPPORT_STATE
| SUPPORT_START
)
# Must be kept in sync with services.yaml
SERVICE_REFRESH = "refresh"
SERVICE_REFRESH_PART = "part"
SERVICE_REFRESH_SCHEMA = {
vol.Required(SERVICE_REFRESH_PART): vol.In(
[*REFRESH_STR_TO_EVENT_DTO.keys(), REFRESH_MAP]
)
}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Add entities for passed config_entry in HA."""
hub: DeebotHub = hass.data[DOMAIN][config_entry.entry_id]
new_devices = []
for vacbot in hub.vacuum_bots:
new_devices.append(DeebotVacuum(vacbot))
if new_devices:
async_add_entities(new_devices)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_REFRESH,
SERVICE_REFRESH_SCHEMA,
"_service_refresh",
)
class DeebotVacuum(DeebotEntity, StateVacuumEntity): # type: ignore
"""Deebot Vacuum."""
def __init__(self, vacuum_bot: VacuumBot):
"""Initialize the Deebot Vacuum."""
device_info = vacuum_bot.device_info
if device_info.nick is not None:
name: str = device_info.nick
else:
# In case there is no nickname defined, use the device id
name = device_info.did
super().__init__(vacuum_bot, StateVacuumEntityDescription(key="", name=name))
self._battery: Optional[int] = None
self._fan_speed: Optional[str] = None
self._state: Optional[VacuumState] = None
self._rooms: list[Room] = []
self._last_error: Optional[ErrorEvent] = None
async def async_added_to_hass(self) -> None:
"""Set up the event listeners now that hass is ready."""
await super().async_added_to_hass()
async def on_battery(event: BatteryEvent) -> None:
self._battery = event.value
self.async_write_ha_state()
async def on_custom_command(event: CustomCommandEvent) -> None:
self.hass.bus.fire(EVENT_CUSTOM_COMMAND, dataclass_to_dict(event))
async def on_error(event: ErrorEvent) -> None:
self._last_error = event
self.async_write_ha_state()
async def on_fan_speed(event: FanSpeedEvent) -> None:
self._fan_speed = event.speed
self.async_write_ha_state()
async def on_report_stats(event: ReportStatsEvent) -> None:
self.hass.bus.fire(EVENT_CLEANING_JOB, dataclass_to_dict(event))
async def on_rooms(event: RoomsEvent) -> None:
|
async def on_status(event: StatusEvent) -> None:
self._state = event.state
self.async_write_ha_state()
listeners: list[EventListener] = [
self._vacuum_bot.events.subscribe(BatteryEvent, on_battery),
self._vacuum_bot.events.subscribe(CustomCommandEvent, on_custom_command),
self._vacuum_bot.events.subscribe(ErrorEvent, on_error),
self._vacuum_bot.events.subscribe(FanSpeedEvent, on_fan_speed),
self._vacuum_bot.events.subscribe(ReportStatsEvent, on_report_stats),
self._vacuum_bot.events.subscribe(RoomsEvent, on_rooms),
self._vacuum_bot.events.subscribe(StatusEvent, on_status),
]
self.async_on_remove(lambda: unsubscribe_listeners(listeners))
@property
def supported_features(self) -> int:
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_DEEBOT
@property
def state(self) -> StateType:
"""Return the state of the vacuum cleaner."""
if self._state is not None and self.available:
return VACUUMSTATE_TO_STATE[self._state]
@property
def battery_level(self) -> Optional[int]:
"""Return the battery level of the vacuum cleaner."""
return self._battery
@property
def fan_speed(self) -> Optional[str]:
"""Return the fan speed of the vacuum cleaner."""
return self._fan_speed
@property
def fan_speed_list(self) -> list[str]:
"""Get the list of available fan speed steps of the vacuum cleaner."""
return [level.display_name for level in FanSpeedLevel]
@property
def extra_state_attributes(self) -> Optional[Mapping[str, Any]]:
"""Return entity specific state attributes.
Implemented by platform classes. Convention for attribute names
is lowercase snake_case.
"""
attributes: dict[str, Any] = {}
rooms: dict[str, Any] = {}
for room in self._rooms:
# convert room name to snake_case to meet the convention
room_name = slugify(room.subtype)
room_values = rooms.get(room_name)
if room_values is None:
rooms[room_name] = room.id
elif isinstance(room_values, list):
room_values.append(room.id)
else:
# Convert from int to list
rooms[room_name] = [room_values, room.id]
if rooms:
attributes["rooms"] = rooms
if self._last_error:
attributes[
LAST_ERROR
] = f"{self._last_error.description} ({self._last_error.code})"
return attributes
async def async_set_fan_speed(self, fan_speed: str, **kwargs: Any) -> None:
"""Set fan speed."""
await self._vacuum_bot.execute_command(SetFanSpeed(fan_speed))
async def async_return_to_base(self, **kwargs: Any) -> None:
"""Set the vacuum cleaner to return to the dock."""
await self._vacuum_bot.execute_command(Charge())
async def async_stop(self, **kwargs: Any) -> None:
"""Stop the vacuum cleaner."""
await self._vacuum_bot.execute_command(Clean(CleanAction.STOP))
async def async_pause(self) -> None:
"""Pause the vacuum cleaner."""
await self._vacuum_bot.execute_command(Clean(CleanAction.PAUSE))
async def async_start(self) -> None:
"""Start the vacuum cleaner."""
await self._vacuum_bot.execute_command(Clean(CleanAction.START))
async def async_locate(self, **kwargs: Any) -> None:
"""Locate the vacuum cleaner."""
await self._vacuum_bot.execute_command(PlaySound())
async def async_send_command(
self, command: str, params: Optional[dict[str, Any]] = None, **kwargs: Any
) -> None:
"""Send a command to a vacuum cleaner."""
_LOGGER.debug("async_send_command %s with %s", command, params)
if command in ["relocate", SetRelocationState.name]:
_LOGGER.warning("DEPRECATED! Please use relocate button entity instead.")
await self._vacuum_bot.execute_command(SetRelocationState())
elif command == "auto_clean":
clean_type = params.get("type", "auto") if params else "auto"
if clean_type == "auto":
_LOGGER.warning('DEPRECATED! Please use "vacuum.start" instead.')
await self.async_start()
elif command in ["spot_area", "custom_area", "set_water"]:
if params is None:
raise RuntimeError("Params are required!")
if command in "spot_area":
await self._vacuum_bot.execute_command(
CleanArea(
mode=CleanMode.SPOT_AREA,
area=str(params["rooms"]),
cleanings=params.get("cleanings", 1),
)
)
elif command == "custom_area":
await self._vacuum_bot.execute_command(
CleanArea(
mode=CleanMode.CUSTOM_AREA,
area=str(params["coordinates"]),
cleanings=params.get("cleanings", 1),
)
)
elif command == "set_water":
_LOGGER.warning("DEPRECATED! Please use water select entity instead.")
await self._vacuum_bot.execute_command(SetWaterInfo(params["amount"]))
else:
await self._vacuum_bot.execute_command(CustomCommand(command, params))
async def _service_refresh(self, part: str) -> None:
"""Service to manually refresh."""
_LOGGER.debug("Manually refresh %s", part)
event = REFRESH_STR_TO_EVENT_DTO.get(part, None)
if event:
self._vacuum_bot.events.request_refresh(event)
elif part == REFRESH_MAP:
self._vacuum_bot.map.refresh()
else:
_LOGGER.warning('Service "refresh" called with unknown part: %s', part)
| self._rooms = event.rooms
self.async_write_ha_state() | identifier_body |
vacuum.py | """Support for Deebot Vaccums."""
import logging
from typing import Any, Mapping, Optional
import voluptuous as vol
from deebot_client.commands import (
Charge,
Clean,
FanSpeedLevel,
PlaySound,
SetFanSpeed,
SetRelocationState,
SetWaterInfo,
)
from deebot_client.commands.clean import CleanAction, CleanArea, CleanMode
from deebot_client.commands.custom import CustomCommand
from deebot_client.events import (
BatteryEvent,
CustomCommandEvent,
ErrorEvent,
FanSpeedEvent,
ReportStatsEvent,
RoomsEvent,
StatusEvent,
)
from deebot_client.events.event_bus import EventListener
from deebot_client.models import Room, VacuumState
from deebot_client.vacuum_bot import VacuumBot
from homeassistant.components.vacuum import (
SUPPORT_BATTERY,
SUPPORT_FAN_SPEED,
SUPPORT_LOCATE,
SUPPORT_MAP,
SUPPORT_PAUSE,
SUPPORT_RETURN_HOME,
SUPPORT_SEND_COMMAND,
SUPPORT_START,
SUPPORT_STATE,
SUPPORT_STOP,
StateVacuumEntity,
StateVacuumEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_platform
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
from homeassistant.util import slugify
from .const import (
DOMAIN,
EVENT_CLEANING_JOB,
EVENT_CUSTOM_COMMAND,
LAST_ERROR,
REFRESH_MAP,
REFRESH_STR_TO_EVENT_DTO,
VACUUMSTATE_TO_STATE,
)
from .entity import DeebotEntity
from .hub import DeebotHub
from .util import dataclass_to_dict, unsubscribe_listeners
_LOGGER = logging.getLogger(__name__)
| | SUPPORT_RETURN_HOME
| SUPPORT_FAN_SPEED
| SUPPORT_BATTERY
| SUPPORT_SEND_COMMAND
| SUPPORT_LOCATE
| SUPPORT_MAP
| SUPPORT_STATE
| SUPPORT_START
)
# Must be kept in sync with services.yaml
SERVICE_REFRESH = "refresh"
SERVICE_REFRESH_PART = "part"
SERVICE_REFRESH_SCHEMA = {
vol.Required(SERVICE_REFRESH_PART): vol.In(
[*REFRESH_STR_TO_EVENT_DTO.keys(), REFRESH_MAP]
)
}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Add entities for passed config_entry in HA."""
hub: DeebotHub = hass.data[DOMAIN][config_entry.entry_id]
new_devices = []
for vacbot in hub.vacuum_bots:
new_devices.append(DeebotVacuum(vacbot))
if new_devices:
async_add_entities(new_devices)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_REFRESH,
SERVICE_REFRESH_SCHEMA,
"_service_refresh",
)
class DeebotVacuum(DeebotEntity, StateVacuumEntity): # type: ignore
"""Deebot Vacuum."""
def __init__(self, vacuum_bot: VacuumBot):
"""Initialize the Deebot Vacuum."""
device_info = vacuum_bot.device_info
if device_info.nick is not None:
name: str = device_info.nick
else:
# In case there is no nickname defined, use the device id
name = device_info.did
super().__init__(vacuum_bot, StateVacuumEntityDescription(key="", name=name))
self._battery: Optional[int] = None
self._fan_speed: Optional[str] = None
self._state: Optional[VacuumState] = None
self._rooms: list[Room] = []
self._last_error: Optional[ErrorEvent] = None
async def async_added_to_hass(self) -> None:
"""Set up the event listeners now that hass is ready."""
await super().async_added_to_hass()
async def on_battery(event: BatteryEvent) -> None:
self._battery = event.value
self.async_write_ha_state()
async def on_custom_command(event: CustomCommandEvent) -> None:
self.hass.bus.fire(EVENT_CUSTOM_COMMAND, dataclass_to_dict(event))
async def on_error(event: ErrorEvent) -> None:
self._last_error = event
self.async_write_ha_state()
async def on_fan_speed(event: FanSpeedEvent) -> None:
self._fan_speed = event.speed
self.async_write_ha_state()
async def on_report_stats(event: ReportStatsEvent) -> None:
self.hass.bus.fire(EVENT_CLEANING_JOB, dataclass_to_dict(event))
async def on_rooms(event: RoomsEvent) -> None:
self._rooms = event.rooms
self.async_write_ha_state()
async def on_status(event: StatusEvent) -> None:
self._state = event.state
self.async_write_ha_state()
listeners: list[EventListener] = [
self._vacuum_bot.events.subscribe(BatteryEvent, on_battery),
self._vacuum_bot.events.subscribe(CustomCommandEvent, on_custom_command),
self._vacuum_bot.events.subscribe(ErrorEvent, on_error),
self._vacuum_bot.events.subscribe(FanSpeedEvent, on_fan_speed),
self._vacuum_bot.events.subscribe(ReportStatsEvent, on_report_stats),
self._vacuum_bot.events.subscribe(RoomsEvent, on_rooms),
self._vacuum_bot.events.subscribe(StatusEvent, on_status),
]
self.async_on_remove(lambda: unsubscribe_listeners(listeners))
@property
def supported_features(self) -> int:
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_DEEBOT
@property
def state(self) -> StateType:
"""Return the state of the vacuum cleaner."""
if self._state is not None and self.available:
return VACUUMSTATE_TO_STATE[self._state]
@property
def battery_level(self) -> Optional[int]:
"""Return the battery level of the vacuum cleaner."""
return self._battery
@property
def fan_speed(self) -> Optional[str]:
"""Return the fan speed of the vacuum cleaner."""
return self._fan_speed
@property
def fan_speed_list(self) -> list[str]:
"""Get the list of available fan speed steps of the vacuum cleaner."""
return [level.display_name for level in FanSpeedLevel]
@property
def extra_state_attributes(self) -> Optional[Mapping[str, Any]]:
"""Return entity specific state attributes.
Implemented by platform classes. Convention for attribute names
is lowercase snake_case.
"""
attributes: dict[str, Any] = {}
rooms: dict[str, Any] = {}
for room in self._rooms:
# convert room name to snake_case to meet the convention
room_name = slugify(room.subtype)
room_values = rooms.get(room_name)
if room_values is None:
rooms[room_name] = room.id
elif isinstance(room_values, list):
room_values.append(room.id)
else:
# Convert from int to list
rooms[room_name] = [room_values, room.id]
if rooms:
attributes["rooms"] = rooms
if self._last_error:
attributes[
LAST_ERROR
] = f"{self._last_error.description} ({self._last_error.code})"
return attributes
async def async_set_fan_speed(self, fan_speed: str, **kwargs: Any) -> None:
"""Set fan speed."""
await self._vacuum_bot.execute_command(SetFanSpeed(fan_speed))
async def async_return_to_base(self, **kwargs: Any) -> None:
"""Set the vacuum cleaner to return to the dock."""
await self._vacuum_bot.execute_command(Charge())
async def async_stop(self, **kwargs: Any) -> None:
"""Stop the vacuum cleaner."""
await self._vacuum_bot.execute_command(Clean(CleanAction.STOP))
async def async_pause(self) -> None:
"""Pause the vacuum cleaner."""
await self._vacuum_bot.execute_command(Clean(CleanAction.PAUSE))
async def async_start(self) -> None:
"""Start the vacuum cleaner."""
await self._vacuum_bot.execute_command(Clean(CleanAction.START))
async def async_locate(self, **kwargs: Any) -> None:
"""Locate the vacuum cleaner."""
await self._vacuum_bot.execute_command(PlaySound())
async def async_send_command(
self, command: str, params: Optional[dict[str, Any]] = None, **kwargs: Any
) -> None:
"""Send a command to a vacuum cleaner."""
_LOGGER.debug("async_send_command %s with %s", command, params)
if command in ["relocate", SetRelocationState.name]:
_LOGGER.warning("DEPRECATED! Please use relocate button entity instead.")
await self._vacuum_bot.execute_command(SetRelocationState())
elif command == "auto_clean":
clean_type = params.get("type", "auto") if params else "auto"
if clean_type == "auto":
_LOGGER.warning('DEPRECATED! Please use "vacuum.start" instead.')
await self.async_start()
elif command in ["spot_area", "custom_area", "set_water"]:
if params is None:
raise RuntimeError("Params are required!")
if command in "spot_area":
await self._vacuum_bot.execute_command(
CleanArea(
mode=CleanMode.SPOT_AREA,
area=str(params["rooms"]),
cleanings=params.get("cleanings", 1),
)
)
elif command == "custom_area":
await self._vacuum_bot.execute_command(
CleanArea(
mode=CleanMode.CUSTOM_AREA,
area=str(params["coordinates"]),
cleanings=params.get("cleanings", 1),
)
)
elif command == "set_water":
_LOGGER.warning("DEPRECATED! Please use water select entity instead.")
await self._vacuum_bot.execute_command(SetWaterInfo(params["amount"]))
else:
await self._vacuum_bot.execute_command(CustomCommand(command, params))
async def _service_refresh(self, part: str) -> None:
"""Service to manually refresh."""
_LOGGER.debug("Manually refresh %s", part)
event = REFRESH_STR_TO_EVENT_DTO.get(part, None)
if event:
self._vacuum_bot.events.request_refresh(event)
elif part == REFRESH_MAP:
self._vacuum_bot.map.refresh()
else:
_LOGGER.warning('Service "refresh" called with unknown part: %s', part) | SUPPORT_DEEBOT: int = (
SUPPORT_PAUSE
| SUPPORT_STOP | random_line_split |
vacuum.py | """Support for Deebot Vaccums."""
import logging
from typing import Any, Mapping, Optional
import voluptuous as vol
from deebot_client.commands import (
Charge,
Clean,
FanSpeedLevel,
PlaySound,
SetFanSpeed,
SetRelocationState,
SetWaterInfo,
)
from deebot_client.commands.clean import CleanAction, CleanArea, CleanMode
from deebot_client.commands.custom import CustomCommand
from deebot_client.events import (
BatteryEvent,
CustomCommandEvent,
ErrorEvent,
FanSpeedEvent,
ReportStatsEvent,
RoomsEvent,
StatusEvent,
)
from deebot_client.events.event_bus import EventListener
from deebot_client.models import Room, VacuumState
from deebot_client.vacuum_bot import VacuumBot
from homeassistant.components.vacuum import (
SUPPORT_BATTERY,
SUPPORT_FAN_SPEED,
SUPPORT_LOCATE,
SUPPORT_MAP,
SUPPORT_PAUSE,
SUPPORT_RETURN_HOME,
SUPPORT_SEND_COMMAND,
SUPPORT_START,
SUPPORT_STATE,
SUPPORT_STOP,
StateVacuumEntity,
StateVacuumEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_platform
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
from homeassistant.util import slugify
from .const import (
DOMAIN,
EVENT_CLEANING_JOB,
EVENT_CUSTOM_COMMAND,
LAST_ERROR,
REFRESH_MAP,
REFRESH_STR_TO_EVENT_DTO,
VACUUMSTATE_TO_STATE,
)
from .entity import DeebotEntity
from .hub import DeebotHub
from .util import dataclass_to_dict, unsubscribe_listeners
_LOGGER = logging.getLogger(__name__)
SUPPORT_DEEBOT: int = (
SUPPORT_PAUSE
| SUPPORT_STOP
| SUPPORT_RETURN_HOME
| SUPPORT_FAN_SPEED
| SUPPORT_BATTERY
| SUPPORT_SEND_COMMAND
| SUPPORT_LOCATE
| SUPPORT_MAP
| SUPPORT_STATE
| SUPPORT_START
)
# Must be kept in sync with services.yaml
SERVICE_REFRESH = "refresh"
SERVICE_REFRESH_PART = "part"
SERVICE_REFRESH_SCHEMA = {
vol.Required(SERVICE_REFRESH_PART): vol.In(
[*REFRESH_STR_TO_EVENT_DTO.keys(), REFRESH_MAP]
)
}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Add entities for passed config_entry in HA."""
hub: DeebotHub = hass.data[DOMAIN][config_entry.entry_id]
new_devices = []
for vacbot in hub.vacuum_bots:
new_devices.append(DeebotVacuum(vacbot))
if new_devices:
|
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_REFRESH,
SERVICE_REFRESH_SCHEMA,
"_service_refresh",
)
class DeebotVacuum(DeebotEntity, StateVacuumEntity): # type: ignore
"""Deebot Vacuum."""
def __init__(self, vacuum_bot: VacuumBot):
"""Initialize the Deebot Vacuum."""
device_info = vacuum_bot.device_info
if device_info.nick is not None:
name: str = device_info.nick
else:
# In case there is no nickname defined, use the device id
name = device_info.did
super().__init__(vacuum_bot, StateVacuumEntityDescription(key="", name=name))
self._battery: Optional[int] = None
self._fan_speed: Optional[str] = None
self._state: Optional[VacuumState] = None
self._rooms: list[Room] = []
self._last_error: Optional[ErrorEvent] = None
async def async_added_to_hass(self) -> None:
"""Set up the event listeners now that hass is ready."""
await super().async_added_to_hass()
async def on_battery(event: BatteryEvent) -> None:
self._battery = event.value
self.async_write_ha_state()
async def on_custom_command(event: CustomCommandEvent) -> None:
self.hass.bus.fire(EVENT_CUSTOM_COMMAND, dataclass_to_dict(event))
async def on_error(event: ErrorEvent) -> None:
self._last_error = event
self.async_write_ha_state()
async def on_fan_speed(event: FanSpeedEvent) -> None:
self._fan_speed = event.speed
self.async_write_ha_state()
async def on_report_stats(event: ReportStatsEvent) -> None:
self.hass.bus.fire(EVENT_CLEANING_JOB, dataclass_to_dict(event))
async def on_rooms(event: RoomsEvent) -> None:
self._rooms = event.rooms
self.async_write_ha_state()
async def on_status(event: StatusEvent) -> None:
self._state = event.state
self.async_write_ha_state()
listeners: list[EventListener] = [
self._vacuum_bot.events.subscribe(BatteryEvent, on_battery),
self._vacuum_bot.events.subscribe(CustomCommandEvent, on_custom_command),
self._vacuum_bot.events.subscribe(ErrorEvent, on_error),
self._vacuum_bot.events.subscribe(FanSpeedEvent, on_fan_speed),
self._vacuum_bot.events.subscribe(ReportStatsEvent, on_report_stats),
self._vacuum_bot.events.subscribe(RoomsEvent, on_rooms),
self._vacuum_bot.events.subscribe(StatusEvent, on_status),
]
self.async_on_remove(lambda: unsubscribe_listeners(listeners))
@property
def supported_features(self) -> int:
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_DEEBOT
@property
def state(self) -> StateType:
"""Return the state of the vacuum cleaner."""
if self._state is not None and self.available:
return VACUUMSTATE_TO_STATE[self._state]
@property
def battery_level(self) -> Optional[int]:
"""Return the battery level of the vacuum cleaner."""
return self._battery
@property
def fan_speed(self) -> Optional[str]:
"""Return the fan speed of the vacuum cleaner."""
return self._fan_speed
@property
def fan_speed_list(self) -> list[str]:
"""Get the list of available fan speed steps of the vacuum cleaner."""
return [level.display_name for level in FanSpeedLevel]
@property
def extra_state_attributes(self) -> Optional[Mapping[str, Any]]:
"""Return entity specific state attributes.
Implemented by platform classes. Convention for attribute names
is lowercase snake_case.
"""
attributes: dict[str, Any] = {}
rooms: dict[str, Any] = {}
for room in self._rooms:
# convert room name to snake_case to meet the convention
room_name = slugify(room.subtype)
room_values = rooms.get(room_name)
if room_values is None:
rooms[room_name] = room.id
elif isinstance(room_values, list):
room_values.append(room.id)
else:
# Convert from int to list
rooms[room_name] = [room_values, room.id]
if rooms:
attributes["rooms"] = rooms
if self._last_error:
attributes[
LAST_ERROR
] = f"{self._last_error.description} ({self._last_error.code})"
return attributes
async def async_set_fan_speed(self, fan_speed: str, **kwargs: Any) -> None:
"""Set fan speed."""
await self._vacuum_bot.execute_command(SetFanSpeed(fan_speed))
async def async_return_to_base(self, **kwargs: Any) -> None:
"""Set the vacuum cleaner to return to the dock."""
await self._vacuum_bot.execute_command(Charge())
async def async_stop(self, **kwargs: Any) -> None:
"""Stop the vacuum cleaner."""
await self._vacuum_bot.execute_command(Clean(CleanAction.STOP))
async def async_pause(self) -> None:
"""Pause the vacuum cleaner."""
await self._vacuum_bot.execute_command(Clean(CleanAction.PAUSE))
async def async_start(self) -> None:
"""Start the vacuum cleaner."""
await self._vacuum_bot.execute_command(Clean(CleanAction.START))
async def async_locate(self, **kwargs: Any) -> None:
"""Locate the vacuum cleaner."""
await self._vacuum_bot.execute_command(PlaySound())
async def async_send_command(
self, command: str, params: Optional[dict[str, Any]] = None, **kwargs: Any
) -> None:
"""Send a command to a vacuum cleaner."""
_LOGGER.debug("async_send_command %s with %s", command, params)
if command in ["relocate", SetRelocationState.name]:
_LOGGER.warning("DEPRECATED! Please use relocate button entity instead.")
await self._vacuum_bot.execute_command(SetRelocationState())
elif command == "auto_clean":
clean_type = params.get("type", "auto") if params else "auto"
if clean_type == "auto":
_LOGGER.warning('DEPRECATED! Please use "vacuum.start" instead.')
await self.async_start()
elif command in ["spot_area", "custom_area", "set_water"]:
if params is None:
raise RuntimeError("Params are required!")
if command in "spot_area":
await self._vacuum_bot.execute_command(
CleanArea(
mode=CleanMode.SPOT_AREA,
area=str(params["rooms"]),
cleanings=params.get("cleanings", 1),
)
)
elif command == "custom_area":
await self._vacuum_bot.execute_command(
CleanArea(
mode=CleanMode.CUSTOM_AREA,
area=str(params["coordinates"]),
cleanings=params.get("cleanings", 1),
)
)
elif command == "set_water":
_LOGGER.warning("DEPRECATED! Please use water select entity instead.")
await self._vacuum_bot.execute_command(SetWaterInfo(params["amount"]))
else:
await self._vacuum_bot.execute_command(CustomCommand(command, params))
async def _service_refresh(self, part: str) -> None:
"""Service to manually refresh."""
_LOGGER.debug("Manually refresh %s", part)
event = REFRESH_STR_TO_EVENT_DTO.get(part, None)
if event:
self._vacuum_bot.events.request_refresh(event)
elif part == REFRESH_MAP:
self._vacuum_bot.map.refresh()
else:
_LOGGER.warning('Service "refresh" called with unknown part: %s', part)
| async_add_entities(new_devices) | conditional_block |
vacuum.py | """Support for Deebot Vaccums."""
import logging
from typing import Any, Mapping, Optional
import voluptuous as vol
from deebot_client.commands import (
Charge,
Clean,
FanSpeedLevel,
PlaySound,
SetFanSpeed,
SetRelocationState,
SetWaterInfo,
)
from deebot_client.commands.clean import CleanAction, CleanArea, CleanMode
from deebot_client.commands.custom import CustomCommand
from deebot_client.events import (
BatteryEvent,
CustomCommandEvent,
ErrorEvent,
FanSpeedEvent,
ReportStatsEvent,
RoomsEvent,
StatusEvent,
)
from deebot_client.events.event_bus import EventListener
from deebot_client.models import Room, VacuumState
from deebot_client.vacuum_bot import VacuumBot
from homeassistant.components.vacuum import (
SUPPORT_BATTERY,
SUPPORT_FAN_SPEED,
SUPPORT_LOCATE,
SUPPORT_MAP,
SUPPORT_PAUSE,
SUPPORT_RETURN_HOME,
SUPPORT_SEND_COMMAND,
SUPPORT_START,
SUPPORT_STATE,
SUPPORT_STOP,
StateVacuumEntity,
StateVacuumEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_platform
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
from homeassistant.util import slugify
from .const import (
DOMAIN,
EVENT_CLEANING_JOB,
EVENT_CUSTOM_COMMAND,
LAST_ERROR,
REFRESH_MAP,
REFRESH_STR_TO_EVENT_DTO,
VACUUMSTATE_TO_STATE,
)
from .entity import DeebotEntity
from .hub import DeebotHub
from .util import dataclass_to_dict, unsubscribe_listeners
_LOGGER = logging.getLogger(__name__)
SUPPORT_DEEBOT: int = (
SUPPORT_PAUSE
| SUPPORT_STOP
| SUPPORT_RETURN_HOME
| SUPPORT_FAN_SPEED
| SUPPORT_BATTERY
| SUPPORT_SEND_COMMAND
| SUPPORT_LOCATE
| SUPPORT_MAP
| SUPPORT_STATE
| SUPPORT_START
)
# Must be kept in sync with services.yaml
SERVICE_REFRESH = "refresh"
SERVICE_REFRESH_PART = "part"
SERVICE_REFRESH_SCHEMA = {
vol.Required(SERVICE_REFRESH_PART): vol.In(
[*REFRESH_STR_TO_EVENT_DTO.keys(), REFRESH_MAP]
)
}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Add entities for passed config_entry in HA."""
hub: DeebotHub = hass.data[DOMAIN][config_entry.entry_id]
new_devices = []
for vacbot in hub.vacuum_bots:
new_devices.append(DeebotVacuum(vacbot))
if new_devices:
async_add_entities(new_devices)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_REFRESH,
SERVICE_REFRESH_SCHEMA,
"_service_refresh",
)
class DeebotVacuum(DeebotEntity, StateVacuumEntity): # type: ignore
"""Deebot Vacuum."""
def __init__(self, vacuum_bot: VacuumBot):
"""Initialize the Deebot Vacuum."""
device_info = vacuum_bot.device_info
if device_info.nick is not None:
name: str = device_info.nick
else:
# In case there is no nickname defined, use the device id
name = device_info.did
super().__init__(vacuum_bot, StateVacuumEntityDescription(key="", name=name))
self._battery: Optional[int] = None
self._fan_speed: Optional[str] = None
self._state: Optional[VacuumState] = None
self._rooms: list[Room] = []
self._last_error: Optional[ErrorEvent] = None
async def async_added_to_hass(self) -> None:
"""Set up the event listeners now that hass is ready."""
await super().async_added_to_hass()
async def on_battery(event: BatteryEvent) -> None:
self._battery = event.value
self.async_write_ha_state()
async def on_custom_command(event: CustomCommandEvent) -> None:
self.hass.bus.fire(EVENT_CUSTOM_COMMAND, dataclass_to_dict(event))
async def on_error(event: ErrorEvent) -> None:
self._last_error = event
self.async_write_ha_state()
async def | (event: FanSpeedEvent) -> None:
self._fan_speed = event.speed
self.async_write_ha_state()
async def on_report_stats(event: ReportStatsEvent) -> None:
self.hass.bus.fire(EVENT_CLEANING_JOB, dataclass_to_dict(event))
async def on_rooms(event: RoomsEvent) -> None:
self._rooms = event.rooms
self.async_write_ha_state()
async def on_status(event: StatusEvent) -> None:
self._state = event.state
self.async_write_ha_state()
listeners: list[EventListener] = [
self._vacuum_bot.events.subscribe(BatteryEvent, on_battery),
self._vacuum_bot.events.subscribe(CustomCommandEvent, on_custom_command),
self._vacuum_bot.events.subscribe(ErrorEvent, on_error),
self._vacuum_bot.events.subscribe(FanSpeedEvent, on_fan_speed),
self._vacuum_bot.events.subscribe(ReportStatsEvent, on_report_stats),
self._vacuum_bot.events.subscribe(RoomsEvent, on_rooms),
self._vacuum_bot.events.subscribe(StatusEvent, on_status),
]
self.async_on_remove(lambda: unsubscribe_listeners(listeners))
@property
def supported_features(self) -> int:
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_DEEBOT
@property
def state(self) -> StateType:
"""Return the state of the vacuum cleaner."""
if self._state is not None and self.available:
return VACUUMSTATE_TO_STATE[self._state]
@property
def battery_level(self) -> Optional[int]:
"""Return the battery level of the vacuum cleaner."""
return self._battery
@property
def fan_speed(self) -> Optional[str]:
"""Return the fan speed of the vacuum cleaner."""
return self._fan_speed
@property
def fan_speed_list(self) -> list[str]:
"""Get the list of available fan speed steps of the vacuum cleaner."""
return [level.display_name for level in FanSpeedLevel]
@property
def extra_state_attributes(self) -> Optional[Mapping[str, Any]]:
"""Return entity specific state attributes.
Implemented by platform classes. Convention for attribute names
is lowercase snake_case.
"""
attributes: dict[str, Any] = {}
rooms: dict[str, Any] = {}
for room in self._rooms:
# convert room name to snake_case to meet the convention
room_name = slugify(room.subtype)
room_values = rooms.get(room_name)
if room_values is None:
rooms[room_name] = room.id
elif isinstance(room_values, list):
room_values.append(room.id)
else:
# Convert from int to list
rooms[room_name] = [room_values, room.id]
if rooms:
attributes["rooms"] = rooms
if self._last_error:
attributes[
LAST_ERROR
] = f"{self._last_error.description} ({self._last_error.code})"
return attributes
async def async_set_fan_speed(self, fan_speed: str, **kwargs: Any) -> None:
"""Set fan speed."""
await self._vacuum_bot.execute_command(SetFanSpeed(fan_speed))
async def async_return_to_base(self, **kwargs: Any) -> None:
"""Set the vacuum cleaner to return to the dock."""
await self._vacuum_bot.execute_command(Charge())
async def async_stop(self, **kwargs: Any) -> None:
"""Stop the vacuum cleaner."""
await self._vacuum_bot.execute_command(Clean(CleanAction.STOP))
async def async_pause(self) -> None:
"""Pause the vacuum cleaner."""
await self._vacuum_bot.execute_command(Clean(CleanAction.PAUSE))
async def async_start(self) -> None:
"""Start the vacuum cleaner."""
await self._vacuum_bot.execute_command(Clean(CleanAction.START))
async def async_locate(self, **kwargs: Any) -> None:
"""Locate the vacuum cleaner."""
await self._vacuum_bot.execute_command(PlaySound())
async def async_send_command(
self, command: str, params: Optional[dict[str, Any]] = None, **kwargs: Any
) -> None:
"""Send a command to a vacuum cleaner."""
_LOGGER.debug("async_send_command %s with %s", command, params)
if command in ["relocate", SetRelocationState.name]:
_LOGGER.warning("DEPRECATED! Please use relocate button entity instead.")
await self._vacuum_bot.execute_command(SetRelocationState())
elif command == "auto_clean":
clean_type = params.get("type", "auto") if params else "auto"
if clean_type == "auto":
_LOGGER.warning('DEPRECATED! Please use "vacuum.start" instead.')
await self.async_start()
elif command in ["spot_area", "custom_area", "set_water"]:
if params is None:
raise RuntimeError("Params are required!")
if command in "spot_area":
await self._vacuum_bot.execute_command(
CleanArea(
mode=CleanMode.SPOT_AREA,
area=str(params["rooms"]),
cleanings=params.get("cleanings", 1),
)
)
elif command == "custom_area":
await self._vacuum_bot.execute_command(
CleanArea(
mode=CleanMode.CUSTOM_AREA,
area=str(params["coordinates"]),
cleanings=params.get("cleanings", 1),
)
)
elif command == "set_water":
_LOGGER.warning("DEPRECATED! Please use water select entity instead.")
await self._vacuum_bot.execute_command(SetWaterInfo(params["amount"]))
else:
await self._vacuum_bot.execute_command(CustomCommand(command, params))
async def _service_refresh(self, part: str) -> None:
"""Service to manually refresh."""
_LOGGER.debug("Manually refresh %s", part)
event = REFRESH_STR_TO_EVENT_DTO.get(part, None)
if event:
self._vacuum_bot.events.request_refresh(event)
elif part == REFRESH_MAP:
self._vacuum_bot.map.refresh()
else:
_LOGGER.warning('Service "refresh" called with unknown part: %s', part)
| on_fan_speed | identifier_name |
lib.rs | //! # OpenID Connect Client
//!
//! There are two ways to interact with this library - the batteries included magic methods, and
//! the slightly more boilerplate fine grained ones. For most users the former is what you want:
//!
//! ```rust,ignore
//! use oidc;
//! use reqwest;
//! use std::default::Default;
//!
//! let id = "my client".to_string();
//! let secret = "a secret to everybody".to_string();
//! let redirect = reqwest::Url::parse("https://my-redirect.foo/dest")?;
//! let issuer = oidc::issuer::google();
//! let client = oidc::Client::discover(id, secret, redirect, issuer)?;
//! let auth_url = client.auth_url(&Default::default());
//!
//! // ... send your user to auth_url, get an auth_code back at your redirect url handler
//!
//! let token = client.authenticate(auth_code, None, None)?;
//! ```
//!
//! That example leaves you with a decoded `Token` that has been validated. Your user is
//! authenticated!
//!
//! You can also take a more nuanced approach that gives you more fine grained control:
//!
//! ```rust,ignore
//! use oidc;
//! use reqwest;
//! use std::default::Default;
//!
//! let id = "my client".to_string();
//! let secret = "a secret to everybody".to_string();
//! let redirect = reqwest::Url::parse("https://my-redirect.foo/dest")?;
//! let issuer = oidc::issuer::google();
//! let http = reqwest::Client::new();
//!
//! let config = oidc::discovery::discover(&http, issuer)?;
//! let jwks = oidc::discovery::jwks(&http, config.jwks_uri.clone())?;
//! let provider = oidc::discovery::Discovered(config);
//!
//! let client = oidc::new(id, secret, redirect, provider, jwks);
//! let auth_url = client.auth_url(Default::default());
//!
//! // ... send your user to auth_url, get an auth_code back at your redirect url handler
//!
//! let mut token = client.request_token(&http, auth_code)?;
//! client.decode_token(&mut token)?;
//! client.validate_token(&token, None, None)?;
//! let userinfo = client.request_userinfo(&http, &token)?;
//! ```
//!
//! This more complicated version uses the discovery module directly. Important distinctions to make
//! between the two:
//!
//! - The complex pattern avoids constructing a new reqwest client every time an outbound method is
//! called. Especially for token decoding having to rebuild reqwest every time can be a large
//! performance penalty.
//! - Tokens don't come decoded or validated. You need to do both manually.
//! - This version demonstrates userinfo. It is not required by spec, so make sure its available!
//! (you get an Error::Userinfo::Nourl if it is not)
pub mod discovery;
pub mod error;
pub mod issuer;
pub mod token;
use std::collections::HashMap;
pub use crate::error::Error;
use biscuit::jwa::{self, SignatureAlgorithm};
use biscuit::jwk::{AlgorithmParameters, JWKSet};
use biscuit::jws::{Compact, Secret};
use biscuit::{Empty, SingleOrMultiple};
use chrono::{Duration, NaiveDate, Utc};
use inth_oauth2::token::Token as _t;
use reqwest::Url;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use validator::Validate;
use validator_derive::Validate;
use crate::discovery::{Config, Discovered};
use crate::error::{Decode, Expiry, Mismatch, Missing, Validation};
use crate::token::{Claims, Token};
type IdToken = Compact<Claims, Empty>;
/// OpenID Connect Client for a provider specified at construction.
pub struct Client {
oauth: inth_oauth2::Client<Discovered>,
jwks: JWKSet<Empty>,
}
// Common pattern in the Client::decode function when dealing with mismatched keys
macro_rules! wrong_key {
($expected:expr, $actual:expr) => {
Err(error::Jose::WrongKeyType {
expected: format!("{:?}", $expected),
actual: format!("{:?}", $actual),
}
.into())
};
}
impl Client {
/// Constructs a client from an issuer url and client parameters via discovery
pub fn discover(id: String, secret: String, redirect: Url, issuer: Url) -> Result<Self, Error> {
discovery::secure(&redirect)?;
let client = reqwest::Client::new();
let config = discovery::discover(&client, issuer)?;
let jwks = discovery::jwks(&client, config.jwks_uri.clone())?;
let provider = Discovered(config);
Ok(Self::new(id, secret, redirect, provider, jwks))
}
/// Constructs a client from a given provider, key set, and parameters. Unlike ::discover(..)
/// this function does not perform any network operations.
pub fn new(
id: String,
secret: String,
redirect: Url,
provider: Discovered,
jwks: JWKSet<Empty>,
) -> Self {
Client {
oauth: inth_oauth2::Client::new(provider, id, secret, Some(redirect.into_string())),
jwks,
}
}
/// Passthrough to the redirect_url stored in inth_oauth2 as a str.
pub fn redirect_url(&self) -> &str {
self.oauth
.redirect_uri
.as_ref()
.expect("We always require a redirect to construct client!")
}
/// Passthrough to the inth_oauth2::client's request token.
pub fn request_token(&self, client: &reqwest::Client, auth_code: &str) -> Result<Token, Error> {
self.oauth
.request_token(client, auth_code)
.map_err(Error::from)
}
/// A reference to the config document of the provider obtained via discovery
pub fn config(&self) -> &Config {
&self.oauth.provider.0
}
/// Constructs the auth_url to redirect a client to the provider. Options are... optional. Use
/// them as needed. Keep the Options struct around for authentication, or at least the nonce
/// and max_age parameter - we need to verify they stay the same and validate if you used them.
pub fn auth_url(&self, options: &Options) -> Url {
let scope = match options.scope {
Some(ref scope) => {
if !scope.contains("openid") {
String::from("openid ") + scope
} else {
scope.clone()
}
}
// Default scope value
None => String::from("openid"),
};
let mut url = self
.oauth
.auth_uri(Some(&scope), options.state.as_ref().map(String::as_str));
{
let mut query = url.query_pairs_mut();
if let Some(ref nonce) = options.nonce {
query.append_pair("nonce", nonce.as_str());
}
if let Some(ref display) = options.display {
query.append_pair("display", display.as_str());
}
if let Some(ref prompt) = options.prompt {
let s = prompt
.iter()
.map(|s| s.as_str())
.collect::<Vec<_>>()
.join(" ");
query.append_pair("prompt", s.as_str());
}
if let Some(max_age) = options.max_age {
query.append_pair("max_age", max_age.num_seconds().to_string().as_str());
}
if let Some(ref ui_locales) = options.ui_locales {
query.append_pair("ui_locales", ui_locales.as_str());
}
if let Some(ref claims_locales) = options.claims_locales {
query.append_pair("claims_locales", claims_locales.as_str());
}
if let Some(ref id_token_hint) = options.id_token_hint {
query.append_pair("id_token_hint", id_token_hint.as_str());
}
if let Some(ref login_hint) = options.login_hint {
query.append_pair("login_hint", login_hint.as_str());
}
if let Some(ref acr_values) = options.acr_values {
query.append_pair("acr_values", acr_values.as_str());
}
}
url
}
/// Given an auth_code and auth options, request the token, decode, and validate it.
pub fn authenticate(
&self,
auth_code: &str,
nonce: Option<&str>,
max_age: Option<&Duration>,
) -> Result<Token, Error> {
let client = reqwest::Client::new();
let mut token = self.request_token(&client, auth_code)?;
self.decode_token(&mut token.id_token)?;
self.validate_token(&token.id_token, nonce, max_age)?;
Ok(token)
}
/// Mutates a Compact::encoded Token to Compact::decoded. Errors are:
///
/// - Decode::MissingKid if the keyset has multiple keys but the key id on the token is missing
/// - Decode::MissingKey if the given key id is not in the key set
/// - Decode::EmptySet if the keyset is empty
/// - Jose::WrongKeyType if the alg of the key and the alg in the token header mismatch
/// - Jose::WrongKeyType if the specified key alg isn't a signature algorithm
/// - Jose error if decoding fails
pub fn decode_token(&self, token: &mut IdToken) -> Result<(), Error> {
// This is an early return if the token is already decoded
if let Compact::Decoded { .. } = *token {
return Ok(());
}
let header = token.unverified_header()?;
// If there is more than one key, the token MUST have a key id
let key = if self.jwks.keys.len() > 1 {
let token_kid = header.registered.key_id.ok_or(Decode::MissingKid)?;
self.jwks
.find(&token_kid)
.ok_or(Decode::MissingKey(token_kid))?
} else {
// TODO We would want to verify the keyset is >1 in the constructor
// rather than every decode call, but we can't return an error in new().
self.jwks.keys.first().as_ref().ok_or(Decode::EmptySet)?
};
if let Some(alg) = key.common.algorithm.as_ref() {
if let &jwa::Algorithm::Signature(sig) = alg {
if header.registered.algorithm != sig {
return wrong_key!(sig, header.registered.algorithm);
}
} else {
return wrong_key!(SignatureAlgorithm::default(), alg);
}
}
let alg = header.registered.algorithm;
match key.algorithm {
// HMAC
AlgorithmParameters::OctectKey { ref value, .. } => match alg {
SignatureAlgorithm::HS256
| SignatureAlgorithm::HS384
| SignatureAlgorithm::HS512 => {
*token = token.decode(&Secret::Bytes(value.clone()), alg)?;
Ok(())
}
_ => wrong_key!("HS256 | HS384 | HS512", alg),
},
AlgorithmParameters::RSA(ref params) => match alg {
SignatureAlgorithm::RS256
| SignatureAlgorithm::RS384
| SignatureAlgorithm::RS512 => {
let pkcs = Secret::RSAModulusExponent {
n: params.n.clone(),
e: params.e.clone(),
};
*token = token.decode(&pkcs, alg)?;
Ok(())
}
_ => wrong_key!("RS256 | RS384 | RS512", alg),
},
AlgorithmParameters::EllipticCurve(_) => unimplemented!("No support for EC keys yet"),
}
}
/// Validate a decoded token. If you don't get an error, its valid! Nonce and max_age come from
/// your auth_uri options. Errors are:
///
/// - Jose Error if the Token isn't decoded
/// - Validation::Mismatch::Issuer if the provider issuer and token issuer mismatch
/// - Validation::Mismatch::Nonce if a given nonce and the token nonce mismatch
/// - Validation::Missing::Nonce if either the token or args has a nonce and the other does not
/// - Validation::Missing::Audience if the token aud doesn't contain the client id
/// - Validation::Missing::AuthorizedParty if there are multiple audiences and azp is missing
/// - Validation::Mismatch::AuthorizedParty if the azp is not the client_id
/// - Validation::Expired::Expires if the current time is past the expiration time
/// - Validation::Expired::MaxAge is the token is older than the provided max_age
/// - Validation::Missing::Authtime if a max_age was given and the token has no auth time
pub fn validate_token(
&self,
token: &IdToken,
nonce: Option<&str>,
max_age: Option<&Duration>,
) -> Result<(), Error> {
let claims = token.payload()?;
if claims.iss != self.config().issuer {
let expected = self.config().issuer.as_str().to_string();
let actual = claims.iss.as_str().to_string();
return Err(Validation::Mismatch(Mismatch::Issuer { expected, actual }).into());
}
match nonce {
Some(expected) => match claims.nonce {
Some(ref actual) => { | let expected = expected.to_string();
let actual = actual.to_string();
return Err(
Validation::Mismatch(Mismatch::Nonce { expected, actual }).into()
);
}
}
None => return Err(Validation::Missing(Missing::Nonce).into()),
},
None => {
if claims.nonce.is_some() {
return Err(Validation::Missing(Missing::Nonce).into());
}
}
}
if !claims.aud.contains(&self.oauth.client_id) {
return Err(Validation::Missing(Missing::Audience).into());
}
// By spec, if there are multiple auds, we must have an azp
if let SingleOrMultiple::Multiple(_) = claims.aud {
if let None = claims.azp {
return Err(Validation::Missing(Missing::AuthorizedParty).into());
}
}
// If there is an authorized party, it must be our client_id
if let Some(ref actual) = claims.azp {
if actual != &self.oauth.client_id {
let expected = self.oauth.client_id.to_string();
let actual = actual.to_string();
return Err(
Validation::Mismatch(Mismatch::AuthorizedParty { expected, actual }).into(),
);
}
}
let now = Utc::now();
// Now should never be less than the time this code was written!
if now.timestamp() < 1504758600 {
panic!("chrono::Utc::now() can never be before this was written!")
}
if claims.exp <= now.timestamp() {
return Err(Validation::Expired(Expiry::Expires(
chrono::naive::NaiveDateTime::from_timestamp(claims.exp, 0),
))
.into());
}
if let Some(max) = max_age {
match claims.auth_time {
Some(time) => {
let age = chrono::Duration::seconds(now.timestamp() - time);
if age >= *max {
return Err(error::Validation::Expired(Expiry::MaxAge(age)).into());
}
}
None => return Err(Validation::Missing(Missing::AuthTime).into()),
}
}
Ok(())
}
/// Get a userinfo json document for a given token at the provider's userinfo endpoint.
/// Errors are:
///
/// - Userinfo::NoUrl if this provider doesn't have a userinfo endpoint
/// - Error::Insecure if the userinfo url is not https
/// - Error::Jose if the token is not decoded
/// - Error::Http if something goes wrong getting the document
/// - Error::Json if the response is not a valid Userinfo document
/// - Userinfo::MismatchSubject if the returned userinfo document and tokens subject mismatch
pub fn request_userinfo(
&self,
client: &reqwest::Client,
token: &Token,
) -> Result<Userinfo, Error> {
match self.config().userinfo_endpoint {
Some(ref url) => {
discovery::secure(&url)?;
let claims = token.id_token.payload()?;
let auth_code = token.access_token().to_string();
let mut resp = client
.get(url.clone())
// FIXME This is a transitional hack for Reqwest 0.9 that should be refactored
// when upstream restores typed header support.
.header_011(reqwest::hyper_011::header::Authorization(
reqwest::hyper_011::header::Bearer { token: auth_code },
))
.send()?;
let info: Userinfo = resp.json()?;
if claims.sub != info.sub {
let expected = info.sub.clone();
let actual = claims.sub.clone();
return Err(error::Userinfo::MismatchSubject { expected, actual }.into());
}
Ok(info)
}
None => Err(error::Userinfo::NoUrl.into()),
}
}
}
/// Optional parameters that [OpenID specifies](https://openid.net/specs/openid-connect-basic-1_0.html#RequestParameters) for the auth URI.
/// Derives Default, so remember to ..Default::default() after you specify what you want.
#[derive(Default)]
pub struct Options {
/// MUST contain openid. By default this is ONLY openid. Official optional scopes are
/// email, profile, address, phone, offline_access. Check the Discovery config
/// `scopes_supported` to see what is available at your provider!
pub scope: Option<String>,
pub state: Option<String>,
pub nonce: Option<String>,
pub display: Option<Display>,
pub prompt: Option<std::collections::HashSet<Prompt>>,
pub max_age: Option<Duration>,
pub ui_locales: Option<String>,
pub claims_locales: Option<String>,
pub id_token_hint: Option<String>,
pub login_hint: Option<String>,
pub acr_values: Option<String>,
}
/// The userinfo struct contains all possible userinfo fields regardless of scope. [See spec.](https://openid.net/specs/openid-connect-basic-1_0.html#StandardClaims)
// TODO is there a way to use claims_supported in config to simplify this struct?
#[derive(Debug, Deserialize, Serialize, Validate)]
pub struct Userinfo {
pub sub: String,
#[serde(default)]
pub name: Option<String>,
#[serde(default)]
pub given_name: Option<String>,
#[serde(default)]
pub family_name: Option<String>,
#[serde(default)]
pub middle_name: Option<String>,
#[serde(default)]
pub nickname: Option<String>,
#[serde(default)]
pub preferred_username: Option<String>,
#[serde(default)]
#[serde(with = "url_serde")]
pub profile: Option<Url>,
#[serde(default)]
#[serde(with = "url_serde")]
pub picture: Option<Url>,
#[serde(default)]
#[serde(with = "url_serde")]
pub website: Option<Url>,
#[serde(default)]
#[validate(email)]
pub email: Option<String>,
#[serde(default)]
pub email_verified: bool,
// Isn't required to be just male or female
#[serde(default)]
pub gender: Option<String>,
// ISO 9601:2004 YYYY-MM-DD or YYYY.
#[serde(default)]
pub birthdate: Option<NaiveDate>,
// Region/City codes. Should also have a more concrete serializer form.
#[serde(default)]
pub zoneinfo: Option<String>,
// Usually RFC5646 langcode-countrycode, maybe with a _ sep, could be arbitrary
#[serde(default)]
pub locale: Option<String>,
// Usually E.164 format number
#[serde(default)]
pub phone_number: Option<String>,
#[serde(default)]
pub phone_number_verified: bool,
#[serde(default)]
pub address: Option<Address>,
#[serde(default)]
pub updated_at: Option<i64>,
#[serde(flatten)]
pub extra: HashMap<String, Value>,
}
/// The four values for the preferred display parameter in the Options. See spec for details.
pub enum Display {
Page,
Popup,
Touch,
Wap,
}
impl Display {
fn as_str(&self) -> &'static str {
use self::Display::*;
match *self {
Page => "page",
Popup => "popup",
Touch => "touch",
Wap => "wap",
}
}
}
/// The four possible values for the prompt parameter set in Options. See spec for details.
#[derive(PartialEq, Eq, Hash)]
pub enum Prompt {
None,
Login,
Consent,
SelectAccount,
}
impl Prompt {
fn as_str(&self) -> &'static str {
use self::Prompt::*;
match *self {
None => "none",
Login => "login",
Consent => "consent",
SelectAccount => "select_account",
}
}
}
/// Address Claim struct. Can be only formatted, only the rest, or both.
#[derive(Debug, Deserialize, Serialize)]
pub struct Address {
#[serde(default)]
pub formatted: Option<String>,
#[serde(default)]
pub street_address: Option<String>,
#[serde(default)]
pub locality: Option<String>,
#[serde(default)]
pub region: Option<String>,
// Countries like the UK use alphanumeric postal codes, so you can't just use a number here
#[serde(default)]
pub postal_code: Option<String>,
#[serde(default)]
pub country: Option<String>,
}
#[cfg(test)]
mod tests {
use crate::issuer;
use crate::Client;
use reqwest::Url;
#[test]
fn default_options() {
let _: super::Options = Default::default();
}
macro_rules! test {
($issuer:ident) => {
#[test]
fn $issuer() {
let id = "test".to_string();
let secret = "a secret to everybody".to_string();
let redirect = Url::parse("https://example.com/re").unwrap();
let client = Client::discover(id, secret, redirect, issuer::$issuer()).unwrap();
client.auth_url(&Default::default());
}
};
}
test!(google);
test!(microsoft);
test!(paypal);
test!(salesforce);
test!(yahoo);
} | if expected != actual { | random_line_split |
lib.rs | //! # OpenID Connect Client
//!
//! There are two ways to interact with this library - the batteries included magic methods, and
//! the slightly more boilerplate fine grained ones. For most users the former is what you want:
//!
//! ```rust,ignore
//! use oidc;
//! use reqwest;
//! use std::default::Default;
//!
//! let id = "my client".to_string();
//! let secret = "a secret to everybody".to_string();
//! let redirect = reqwest::Url::parse("https://my-redirect.foo/dest")?;
//! let issuer = oidc::issuer::google();
//! let client = oidc::Client::discover(id, secret, redirect, issuer)?;
//! let auth_url = client.auth_url(&Default::default());
//!
//! // ... send your user to auth_url, get an auth_code back at your redirect url handler
//!
//! let token = client.authenticate(auth_code, None, None)?;
//! ```
//!
//! That example leaves you with a decoded `Token` that has been validated. Your user is
//! authenticated!
//!
//! You can also take a more nuanced approach that gives you more fine grained control:
//!
//! ```rust,ignore
//! use oidc;
//! use reqwest;
//! use std::default::Default;
//!
//! let id = "my client".to_string();
//! let secret = "a secret to everybody".to_string();
//! let redirect = reqwest::Url::parse("https://my-redirect.foo/dest")?;
//! let issuer = oidc::issuer::google();
//! let http = reqwest::Client::new();
//!
//! let config = oidc::discovery::discover(&http, issuer)?;
//! let jwks = oidc::discovery::jwks(&http, config.jwks_uri.clone())?;
//! let provider = oidc::discovery::Discovered(config);
//!
//! let client = oidc::new(id, secret, redirect, provider, jwks);
//! let auth_url = client.auth_url(Default::default());
//!
//! // ... send your user to auth_url, get an auth_code back at your redirect url handler
//!
//! let mut token = client.request_token(&http, auth_code)?;
//! client.decode_token(&mut token)?;
//! client.validate_token(&token, None, None)?;
//! let userinfo = client.request_userinfo(&http, &token)?;
//! ```
//!
//! This more complicated version uses the discovery module directly. Important distinctions to make
//! between the two:
//!
//! - The complex pattern avoids constructing a new reqwest client every time an outbound method is
//! called. Especially for token decoding having to rebuild reqwest every time can be a large
//! performance penalty.
//! - Tokens don't come decoded or validated. You need to do both manually.
//! - This version demonstrates userinfo. It is not required by spec, so make sure its available!
//! (you get an Error::Userinfo::Nourl if it is not)
pub mod discovery;
pub mod error;
pub mod issuer;
pub mod token;
use std::collections::HashMap;
pub use crate::error::Error;
use biscuit::jwa::{self, SignatureAlgorithm};
use biscuit::jwk::{AlgorithmParameters, JWKSet};
use biscuit::jws::{Compact, Secret};
use biscuit::{Empty, SingleOrMultiple};
use chrono::{Duration, NaiveDate, Utc};
use inth_oauth2::token::Token as _t;
use reqwest::Url;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use validator::Validate;
use validator_derive::Validate;
use crate::discovery::{Config, Discovered};
use crate::error::{Decode, Expiry, Mismatch, Missing, Validation};
use crate::token::{Claims, Token};
type IdToken = Compact<Claims, Empty>;
/// OpenID Connect Client for a provider specified at construction.
pub struct Client {
oauth: inth_oauth2::Client<Discovered>,
jwks: JWKSet<Empty>,
}
// Common pattern in the Client::decode function when dealing with mismatched keys
macro_rules! wrong_key {
($expected:expr, $actual:expr) => {
Err(error::Jose::WrongKeyType {
expected: format!("{:?}", $expected),
actual: format!("{:?}", $actual),
}
.into())
};
}
impl Client {
/// Constructs a client from an issuer url and client parameters via discovery
pub fn discover(id: String, secret: String, redirect: Url, issuer: Url) -> Result<Self, Error> {
discovery::secure(&redirect)?;
let client = reqwest::Client::new();
let config = discovery::discover(&client, issuer)?;
let jwks = discovery::jwks(&client, config.jwks_uri.clone())?;
let provider = Discovered(config);
Ok(Self::new(id, secret, redirect, provider, jwks))
}
/// Constructs a client from a given provider, key set, and parameters. Unlike ::discover(..)
/// this function does not perform any network operations.
pub fn new(
id: String,
secret: String,
redirect: Url,
provider: Discovered,
jwks: JWKSet<Empty>,
) -> Self {
Client {
oauth: inth_oauth2::Client::new(provider, id, secret, Some(redirect.into_string())),
jwks,
}
}
/// Passthrough to the redirect_url stored in inth_oauth2 as a str.
pub fn redirect_url(&self) -> &str {
self.oauth
.redirect_uri
.as_ref()
.expect("We always require a redirect to construct client!")
}
/// Passthrough to the inth_oauth2::client's request token.
pub fn request_token(&self, client: &reqwest::Client, auth_code: &str) -> Result<Token, Error> {
self.oauth
.request_token(client, auth_code)
.map_err(Error::from)
}
/// A reference to the config document of the provider obtained via discovery
pub fn config(&self) -> &Config {
&self.oauth.provider.0
}
/// Constructs the auth_url to redirect a client to the provider. Options are... optional. Use
/// them as needed. Keep the Options struct around for authentication, or at least the nonce
/// and max_age parameter - we need to verify they stay the same and validate if you used them.
pub fn auth_url(&self, options: &Options) -> Url {
let scope = match options.scope {
Some(ref scope) => {
if !scope.contains("openid") {
String::from("openid ") + scope
} else {
scope.clone()
}
}
// Default scope value
None => String::from("openid"),
};
let mut url = self
.oauth
.auth_uri(Some(&scope), options.state.as_ref().map(String::as_str));
{
let mut query = url.query_pairs_mut();
if let Some(ref nonce) = options.nonce {
query.append_pair("nonce", nonce.as_str());
}
if let Some(ref display) = options.display {
query.append_pair("display", display.as_str());
}
if let Some(ref prompt) = options.prompt {
let s = prompt
.iter()
.map(|s| s.as_str())
.collect::<Vec<_>>()
.join(" ");
query.append_pair("prompt", s.as_str());
}
if let Some(max_age) = options.max_age {
query.append_pair("max_age", max_age.num_seconds().to_string().as_str());
}
if let Some(ref ui_locales) = options.ui_locales {
query.append_pair("ui_locales", ui_locales.as_str());
}
if let Some(ref claims_locales) = options.claims_locales {
query.append_pair("claims_locales", claims_locales.as_str());
}
if let Some(ref id_token_hint) = options.id_token_hint {
query.append_pair("id_token_hint", id_token_hint.as_str());
}
if let Some(ref login_hint) = options.login_hint {
query.append_pair("login_hint", login_hint.as_str());
}
if let Some(ref acr_values) = options.acr_values {
query.append_pair("acr_values", acr_values.as_str());
}
}
url
}
/// Given an auth_code and auth options, request the token, decode, and validate it.
pub fn authenticate(
&self,
auth_code: &str,
nonce: Option<&str>,
max_age: Option<&Duration>,
) -> Result<Token, Error> {
let client = reqwest::Client::new();
let mut token = self.request_token(&client, auth_code)?;
self.decode_token(&mut token.id_token)?;
self.validate_token(&token.id_token, nonce, max_age)?;
Ok(token)
}
/// Mutates a Compact::encoded Token to Compact::decoded. Errors are:
///
/// - Decode::MissingKid if the keyset has multiple keys but the key id on the token is missing
/// - Decode::MissingKey if the given key id is not in the key set
/// - Decode::EmptySet if the keyset is empty
/// - Jose::WrongKeyType if the alg of the key and the alg in the token header mismatch
/// - Jose::WrongKeyType if the specified key alg isn't a signature algorithm
/// - Jose error if decoding fails
pub fn | (&self, token: &mut IdToken) -> Result<(), Error> {
// This is an early return if the token is already decoded
if let Compact::Decoded { .. } = *token {
return Ok(());
}
let header = token.unverified_header()?;
// If there is more than one key, the token MUST have a key id
let key = if self.jwks.keys.len() > 1 {
let token_kid = header.registered.key_id.ok_or(Decode::MissingKid)?;
self.jwks
.find(&token_kid)
.ok_or(Decode::MissingKey(token_kid))?
} else {
// TODO We would want to verify the keyset is >1 in the constructor
// rather than every decode call, but we can't return an error in new().
self.jwks.keys.first().as_ref().ok_or(Decode::EmptySet)?
};
if let Some(alg) = key.common.algorithm.as_ref() {
if let &jwa::Algorithm::Signature(sig) = alg {
if header.registered.algorithm != sig {
return wrong_key!(sig, header.registered.algorithm);
}
} else {
return wrong_key!(SignatureAlgorithm::default(), alg);
}
}
let alg = header.registered.algorithm;
match key.algorithm {
// HMAC
AlgorithmParameters::OctectKey { ref value, .. } => match alg {
SignatureAlgorithm::HS256
| SignatureAlgorithm::HS384
| SignatureAlgorithm::HS512 => {
*token = token.decode(&Secret::Bytes(value.clone()), alg)?;
Ok(())
}
_ => wrong_key!("HS256 | HS384 | HS512", alg),
},
AlgorithmParameters::RSA(ref params) => match alg {
SignatureAlgorithm::RS256
| SignatureAlgorithm::RS384
| SignatureAlgorithm::RS512 => {
let pkcs = Secret::RSAModulusExponent {
n: params.n.clone(),
e: params.e.clone(),
};
*token = token.decode(&pkcs, alg)?;
Ok(())
}
_ => wrong_key!("RS256 | RS384 | RS512", alg),
},
AlgorithmParameters::EllipticCurve(_) => unimplemented!("No support for EC keys yet"),
}
}
/// Validate a decoded token. If you don't get an error, its valid! Nonce and max_age come from
/// your auth_uri options. Errors are:
///
/// - Jose Error if the Token isn't decoded
/// - Validation::Mismatch::Issuer if the provider issuer and token issuer mismatch
/// - Validation::Mismatch::Nonce if a given nonce and the token nonce mismatch
/// - Validation::Missing::Nonce if either the token or args has a nonce and the other does not
/// - Validation::Missing::Audience if the token aud doesn't contain the client id
/// - Validation::Missing::AuthorizedParty if there are multiple audiences and azp is missing
/// - Validation::Mismatch::AuthorizedParty if the azp is not the client_id
/// - Validation::Expired::Expires if the current time is past the expiration time
/// - Validation::Expired::MaxAge is the token is older than the provided max_age
/// - Validation::Missing::Authtime if a max_age was given and the token has no auth time
pub fn validate_token(
&self,
token: &IdToken,
nonce: Option<&str>,
max_age: Option<&Duration>,
) -> Result<(), Error> {
let claims = token.payload()?;
if claims.iss != self.config().issuer {
let expected = self.config().issuer.as_str().to_string();
let actual = claims.iss.as_str().to_string();
return Err(Validation::Mismatch(Mismatch::Issuer { expected, actual }).into());
}
match nonce {
Some(expected) => match claims.nonce {
Some(ref actual) => {
if expected != actual {
let expected = expected.to_string();
let actual = actual.to_string();
return Err(
Validation::Mismatch(Mismatch::Nonce { expected, actual }).into()
);
}
}
None => return Err(Validation::Missing(Missing::Nonce).into()),
},
None => {
if claims.nonce.is_some() {
return Err(Validation::Missing(Missing::Nonce).into());
}
}
}
if !claims.aud.contains(&self.oauth.client_id) {
return Err(Validation::Missing(Missing::Audience).into());
}
// By spec, if there are multiple auds, we must have an azp
if let SingleOrMultiple::Multiple(_) = claims.aud {
if let None = claims.azp {
return Err(Validation::Missing(Missing::AuthorizedParty).into());
}
}
// If there is an authorized party, it must be our client_id
if let Some(ref actual) = claims.azp {
if actual != &self.oauth.client_id {
let expected = self.oauth.client_id.to_string();
let actual = actual.to_string();
return Err(
Validation::Mismatch(Mismatch::AuthorizedParty { expected, actual }).into(),
);
}
}
let now = Utc::now();
// Now should never be less than the time this code was written!
if now.timestamp() < 1504758600 {
panic!("chrono::Utc::now() can never be before this was written!")
}
if claims.exp <= now.timestamp() {
return Err(Validation::Expired(Expiry::Expires(
chrono::naive::NaiveDateTime::from_timestamp(claims.exp, 0),
))
.into());
}
if let Some(max) = max_age {
match claims.auth_time {
Some(time) => {
let age = chrono::Duration::seconds(now.timestamp() - time);
if age >= *max {
return Err(error::Validation::Expired(Expiry::MaxAge(age)).into());
}
}
None => return Err(Validation::Missing(Missing::AuthTime).into()),
}
}
Ok(())
}
/// Get a userinfo json document for a given token at the provider's userinfo endpoint.
/// Errors are:
///
/// - Userinfo::NoUrl if this provider doesn't have a userinfo endpoint
/// - Error::Insecure if the userinfo url is not https
/// - Error::Jose if the token is not decoded
/// - Error::Http if something goes wrong getting the document
/// - Error::Json if the response is not a valid Userinfo document
/// - Userinfo::MismatchSubject if the returned userinfo document and tokens subject mismatch
pub fn request_userinfo(
&self,
client: &reqwest::Client,
token: &Token,
) -> Result<Userinfo, Error> {
match self.config().userinfo_endpoint {
Some(ref url) => {
discovery::secure(&url)?;
let claims = token.id_token.payload()?;
let auth_code = token.access_token().to_string();
let mut resp = client
.get(url.clone())
// FIXME This is a transitional hack for Reqwest 0.9 that should be refactored
// when upstream restores typed header support.
.header_011(reqwest::hyper_011::header::Authorization(
reqwest::hyper_011::header::Bearer { token: auth_code },
))
.send()?;
let info: Userinfo = resp.json()?;
if claims.sub != info.sub {
let expected = info.sub.clone();
let actual = claims.sub.clone();
return Err(error::Userinfo::MismatchSubject { expected, actual }.into());
}
Ok(info)
}
None => Err(error::Userinfo::NoUrl.into()),
}
}
}
/// Optional parameters that [OpenID specifies](https://openid.net/specs/openid-connect-basic-1_0.html#RequestParameters) for the auth URI.
/// Derives Default, so remember to ..Default::default() after you specify what you want.
#[derive(Default)]
pub struct Options {
/// MUST contain openid. By default this is ONLY openid. Official optional scopes are
/// email, profile, address, phone, offline_access. Check the Discovery config
/// `scopes_supported` to see what is available at your provider!
pub scope: Option<String>,
pub state: Option<String>,
pub nonce: Option<String>,
pub display: Option<Display>,
pub prompt: Option<std::collections::HashSet<Prompt>>,
pub max_age: Option<Duration>,
pub ui_locales: Option<String>,
pub claims_locales: Option<String>,
pub id_token_hint: Option<String>,
pub login_hint: Option<String>,
pub acr_values: Option<String>,
}
/// The userinfo struct contains all possible userinfo fields regardless of scope. [See spec.](https://openid.net/specs/openid-connect-basic-1_0.html#StandardClaims)
// TODO is there a way to use claims_supported in config to simplify this struct?
#[derive(Debug, Deserialize, Serialize, Validate)]
pub struct Userinfo {
pub sub: String,
#[serde(default)]
pub name: Option<String>,
#[serde(default)]
pub given_name: Option<String>,
#[serde(default)]
pub family_name: Option<String>,
#[serde(default)]
pub middle_name: Option<String>,
#[serde(default)]
pub nickname: Option<String>,
#[serde(default)]
pub preferred_username: Option<String>,
#[serde(default)]
#[serde(with = "url_serde")]
pub profile: Option<Url>,
#[serde(default)]
#[serde(with = "url_serde")]
pub picture: Option<Url>,
#[serde(default)]
#[serde(with = "url_serde")]
pub website: Option<Url>,
#[serde(default)]
#[validate(email)]
pub email: Option<String>,
#[serde(default)]
pub email_verified: bool,
// Isn't required to be just male or female
#[serde(default)]
pub gender: Option<String>,
// ISO 9601:2004 YYYY-MM-DD or YYYY.
#[serde(default)]
pub birthdate: Option<NaiveDate>,
// Region/City codes. Should also have a more concrete serializer form.
#[serde(default)]
pub zoneinfo: Option<String>,
// Usually RFC5646 langcode-countrycode, maybe with a _ sep, could be arbitrary
#[serde(default)]
pub locale: Option<String>,
// Usually E.164 format number
#[serde(default)]
pub phone_number: Option<String>,
#[serde(default)]
pub phone_number_verified: bool,
#[serde(default)]
pub address: Option<Address>,
#[serde(default)]
pub updated_at: Option<i64>,
#[serde(flatten)]
pub extra: HashMap<String, Value>,
}
/// The four values for the preferred display parameter in the Options. See spec for details.
pub enum Display {
Page,
Popup,
Touch,
Wap,
}
impl Display {
fn as_str(&self) -> &'static str {
use self::Display::*;
match *self {
Page => "page",
Popup => "popup",
Touch => "touch",
Wap => "wap",
}
}
}
/// The four possible values for the prompt parameter set in Options. See spec for details.
#[derive(PartialEq, Eq, Hash)]
pub enum Prompt {
None,
Login,
Consent,
SelectAccount,
}
impl Prompt {
fn as_str(&self) -> &'static str {
use self::Prompt::*;
match *self {
None => "none",
Login => "login",
Consent => "consent",
SelectAccount => "select_account",
}
}
}
/// Address Claim struct. Can be only formatted, only the rest, or both.
#[derive(Debug, Deserialize, Serialize)]
pub struct Address {
#[serde(default)]
pub formatted: Option<String>,
#[serde(default)]
pub street_address: Option<String>,
#[serde(default)]
pub locality: Option<String>,
#[serde(default)]
pub region: Option<String>,
// Countries like the UK use alphanumeric postal codes, so you can't just use a number here
#[serde(default)]
pub postal_code: Option<String>,
#[serde(default)]
pub country: Option<String>,
}
#[cfg(test)]
mod tests {
use crate::issuer;
use crate::Client;
use reqwest::Url;
#[test]
fn default_options() {
let _: super::Options = Default::default();
}
macro_rules! test {
($issuer:ident) => {
#[test]
fn $issuer() {
let id = "test".to_string();
let secret = "a secret to everybody".to_string();
let redirect = Url::parse("https://example.com/re").unwrap();
let client = Client::discover(id, secret, redirect, issuer::$issuer()).unwrap();
client.auth_url(&Default::default());
}
};
}
test!(google);
test!(microsoft);
test!(paypal);
test!(salesforce);
test!(yahoo);
}
| decode_token | identifier_name |
lib.rs | //! # OpenID Connect Client
//!
//! There are two ways to interact with this library - the batteries included magic methods, and
//! the slightly more boilerplate fine grained ones. For most users the former is what you want:
//!
//! ```rust,ignore
//! use oidc;
//! use reqwest;
//! use std::default::Default;
//!
//! let id = "my client".to_string();
//! let secret = "a secret to everybody".to_string();
//! let redirect = reqwest::Url::parse("https://my-redirect.foo/dest")?;
//! let issuer = oidc::issuer::google();
//! let client = oidc::Client::discover(id, secret, redirect, issuer)?;
//! let auth_url = client.auth_url(&Default::default());
//!
//! // ... send your user to auth_url, get an auth_code back at your redirect url handler
//!
//! let token = client.authenticate(auth_code, None, None)?;
//! ```
//!
//! That example leaves you with a decoded `Token` that has been validated. Your user is
//! authenticated!
//!
//! You can also take a more nuanced approach that gives you more fine grained control:
//!
//! ```rust,ignore
//! use oidc;
//! use reqwest;
//! use std::default::Default;
//!
//! let id = "my client".to_string();
//! let secret = "a secret to everybody".to_string();
//! let redirect = reqwest::Url::parse("https://my-redirect.foo/dest")?;
//! let issuer = oidc::issuer::google();
//! let http = reqwest::Client::new();
//!
//! let config = oidc::discovery::discover(&http, issuer)?;
//! let jwks = oidc::discovery::jwks(&http, config.jwks_uri.clone())?;
//! let provider = oidc::discovery::Discovered(config);
//!
//! let client = oidc::new(id, secret, redirect, provider, jwks);
//! let auth_url = client.auth_url(Default::default());
//!
//! // ... send your user to auth_url, get an auth_code back at your redirect url handler
//!
//! let mut token = client.request_token(&http, auth_code)?;
//! client.decode_token(&mut token)?;
//! client.validate_token(&token, None, None)?;
//! let userinfo = client.request_userinfo(&http, &token)?;
//! ```
//!
//! This more complicated version uses the discovery module directly. Important distinctions to make
//! between the two:
//!
//! - The complex pattern avoids constructing a new reqwest client every time an outbound method is
//! called. Especially for token decoding having to rebuild reqwest every time can be a large
//! performance penalty.
//! - Tokens don't come decoded or validated. You need to do both manually.
//! - This version demonstrates userinfo. It is not required by spec, so make sure its available!
//! (you get an Error::Userinfo::Nourl if it is not)
pub mod discovery;
pub mod error;
pub mod issuer;
pub mod token;
use std::collections::HashMap;
pub use crate::error::Error;
use biscuit::jwa::{self, SignatureAlgorithm};
use biscuit::jwk::{AlgorithmParameters, JWKSet};
use biscuit::jws::{Compact, Secret};
use biscuit::{Empty, SingleOrMultiple};
use chrono::{Duration, NaiveDate, Utc};
use inth_oauth2::token::Token as _t;
use reqwest::Url;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use validator::Validate;
use validator_derive::Validate;
use crate::discovery::{Config, Discovered};
use crate::error::{Decode, Expiry, Mismatch, Missing, Validation};
use crate::token::{Claims, Token};
type IdToken = Compact<Claims, Empty>;
/// OpenID Connect Client for a provider specified at construction.
pub struct Client {
oauth: inth_oauth2::Client<Discovered>,
jwks: JWKSet<Empty>,
}
// Common pattern in the Client::decode function when dealing with mismatched keys
macro_rules! wrong_key {
($expected:expr, $actual:expr) => {
Err(error::Jose::WrongKeyType {
expected: format!("{:?}", $expected),
actual: format!("{:?}", $actual),
}
.into())
};
}
impl Client {
/// Constructs a client from an issuer url and client parameters via discovery
pub fn discover(id: String, secret: String, redirect: Url, issuer: Url) -> Result<Self, Error> {
discovery::secure(&redirect)?;
let client = reqwest::Client::new();
let config = discovery::discover(&client, issuer)?;
let jwks = discovery::jwks(&client, config.jwks_uri.clone())?;
let provider = Discovered(config);
Ok(Self::new(id, secret, redirect, provider, jwks))
}
/// Constructs a client from a given provider, key set, and parameters. Unlike ::discover(..)
/// this function does not perform any network operations.
pub fn new(
id: String,
secret: String,
redirect: Url,
provider: Discovered,
jwks: JWKSet<Empty>,
) -> Self {
Client {
oauth: inth_oauth2::Client::new(provider, id, secret, Some(redirect.into_string())),
jwks,
}
}
/// Passthrough to the redirect_url stored in inth_oauth2 as a str.
pub fn redirect_url(&self) -> &str {
self.oauth
.redirect_uri
.as_ref()
.expect("We always require a redirect to construct client!")
}
/// Passthrough to the inth_oauth2::client's request token.
pub fn request_token(&self, client: &reqwest::Client, auth_code: &str) -> Result<Token, Error> {
self.oauth
.request_token(client, auth_code)
.map_err(Error::from)
}
/// A reference to the config document of the provider obtained via discovery
pub fn config(&self) -> &Config {
&self.oauth.provider.0
}
/// Constructs the auth_url to redirect a client to the provider. Options are... optional. Use
/// them as needed. Keep the Options struct around for authentication, or at least the nonce
/// and max_age parameter - we need to verify they stay the same and validate if you used them.
pub fn auth_url(&self, options: &Options) -> Url {
let scope = match options.scope {
Some(ref scope) => {
if !scope.contains("openid") {
String::from("openid ") + scope
} else {
scope.clone()
}
}
// Default scope value
None => String::from("openid"),
};
let mut url = self
.oauth
.auth_uri(Some(&scope), options.state.as_ref().map(String::as_str));
{
let mut query = url.query_pairs_mut();
if let Some(ref nonce) = options.nonce {
query.append_pair("nonce", nonce.as_str());
}
if let Some(ref display) = options.display {
query.append_pair("display", display.as_str());
}
if let Some(ref prompt) = options.prompt {
let s = prompt
.iter()
.map(|s| s.as_str())
.collect::<Vec<_>>()
.join(" ");
query.append_pair("prompt", s.as_str());
}
if let Some(max_age) = options.max_age {
query.append_pair("max_age", max_age.num_seconds().to_string().as_str());
}
if let Some(ref ui_locales) = options.ui_locales {
query.append_pair("ui_locales", ui_locales.as_str());
}
if let Some(ref claims_locales) = options.claims_locales {
query.append_pair("claims_locales", claims_locales.as_str());
}
if let Some(ref id_token_hint) = options.id_token_hint {
query.append_pair("id_token_hint", id_token_hint.as_str());
}
if let Some(ref login_hint) = options.login_hint {
query.append_pair("login_hint", login_hint.as_str());
}
if let Some(ref acr_values) = options.acr_values {
query.append_pair("acr_values", acr_values.as_str());
}
}
url
}
/// Given an auth_code and auth options, request the token, decode, and validate it.
pub fn authenticate(
&self,
auth_code: &str,
nonce: Option<&str>,
max_age: Option<&Duration>,
) -> Result<Token, Error> {
let client = reqwest::Client::new();
let mut token = self.request_token(&client, auth_code)?;
self.decode_token(&mut token.id_token)?;
self.validate_token(&token.id_token, nonce, max_age)?;
Ok(token)
}
/// Mutates a Compact::encoded Token to Compact::decoded. Errors are:
///
/// - Decode::MissingKid if the keyset has multiple keys but the key id on the token is missing
/// - Decode::MissingKey if the given key id is not in the key set
/// - Decode::EmptySet if the keyset is empty
/// - Jose::WrongKeyType if the alg of the key and the alg in the token header mismatch
/// - Jose::WrongKeyType if the specified key alg isn't a signature algorithm
/// - Jose error if decoding fails
pub fn decode_token(&self, token: &mut IdToken) -> Result<(), Error> {
// This is an early return if the token is already decoded
if let Compact::Decoded { .. } = *token {
return Ok(());
}
let header = token.unverified_header()?;
// If there is more than one key, the token MUST have a key id
let key = if self.jwks.keys.len() > 1 {
let token_kid = header.registered.key_id.ok_or(Decode::MissingKid)?;
self.jwks
.find(&token_kid)
.ok_or(Decode::MissingKey(token_kid))?
} else {
// TODO We would want to verify the keyset is >1 in the constructor
// rather than every decode call, but we can't return an error in new().
self.jwks.keys.first().as_ref().ok_or(Decode::EmptySet)?
};
if let Some(alg) = key.common.algorithm.as_ref() {
if let &jwa::Algorithm::Signature(sig) = alg {
if header.registered.algorithm != sig {
return wrong_key!(sig, header.registered.algorithm);
}
} else {
return wrong_key!(SignatureAlgorithm::default(), alg);
}
}
let alg = header.registered.algorithm;
match key.algorithm {
// HMAC
AlgorithmParameters::OctectKey { ref value, .. } => match alg {
SignatureAlgorithm::HS256
| SignatureAlgorithm::HS384
| SignatureAlgorithm::HS512 => {
*token = token.decode(&Secret::Bytes(value.clone()), alg)?;
Ok(())
}
_ => wrong_key!("HS256 | HS384 | HS512", alg),
},
AlgorithmParameters::RSA(ref params) => match alg {
SignatureAlgorithm::RS256
| SignatureAlgorithm::RS384
| SignatureAlgorithm::RS512 => {
let pkcs = Secret::RSAModulusExponent {
n: params.n.clone(),
e: params.e.clone(),
};
*token = token.decode(&pkcs, alg)?;
Ok(())
}
_ => wrong_key!("RS256 | RS384 | RS512", alg),
},
AlgorithmParameters::EllipticCurve(_) => unimplemented!("No support for EC keys yet"),
}
}
/// Validate a decoded token. If you don't get an error, its valid! Nonce and max_age come from
/// your auth_uri options. Errors are:
///
/// - Jose Error if the Token isn't decoded
/// - Validation::Mismatch::Issuer if the provider issuer and token issuer mismatch
/// - Validation::Mismatch::Nonce if a given nonce and the token nonce mismatch
/// - Validation::Missing::Nonce if either the token or args has a nonce and the other does not
/// - Validation::Missing::Audience if the token aud doesn't contain the client id
/// - Validation::Missing::AuthorizedParty if there are multiple audiences and azp is missing
/// - Validation::Mismatch::AuthorizedParty if the azp is not the client_id
/// - Validation::Expired::Expires if the current time is past the expiration time
/// - Validation::Expired::MaxAge is the token is older than the provided max_age
/// - Validation::Missing::Authtime if a max_age was given and the token has no auth time
pub fn validate_token(
&self,
token: &IdToken,
nonce: Option<&str>,
max_age: Option<&Duration>,
) -> Result<(), Error> {
let claims = token.payload()?;
if claims.iss != self.config().issuer {
let expected = self.config().issuer.as_str().to_string();
let actual = claims.iss.as_str().to_string();
return Err(Validation::Mismatch(Mismatch::Issuer { expected, actual }).into());
}
match nonce {
Some(expected) => match claims.nonce {
Some(ref actual) => {
if expected != actual {
let expected = expected.to_string();
let actual = actual.to_string();
return Err(
Validation::Mismatch(Mismatch::Nonce { expected, actual }).into()
);
}
}
None => return Err(Validation::Missing(Missing::Nonce).into()),
},
None => {
if claims.nonce.is_some() {
return Err(Validation::Missing(Missing::Nonce).into());
}
}
}
if !claims.aud.contains(&self.oauth.client_id) {
return Err(Validation::Missing(Missing::Audience).into());
}
// By spec, if there are multiple auds, we must have an azp
if let SingleOrMultiple::Multiple(_) = claims.aud {
if let None = claims.azp {
return Err(Validation::Missing(Missing::AuthorizedParty).into());
}
}
// If there is an authorized party, it must be our client_id
if let Some(ref actual) = claims.azp {
if actual != &self.oauth.client_id {
let expected = self.oauth.client_id.to_string();
let actual = actual.to_string();
return Err(
Validation::Mismatch(Mismatch::AuthorizedParty { expected, actual }).into(),
);
}
}
let now = Utc::now();
// Now should never be less than the time this code was written!
if now.timestamp() < 1504758600 {
panic!("chrono::Utc::now() can never be before this was written!")
}
if claims.exp <= now.timestamp() |
if let Some(max) = max_age {
match claims.auth_time {
Some(time) => {
let age = chrono::Duration::seconds(now.timestamp() - time);
if age >= *max {
return Err(error::Validation::Expired(Expiry::MaxAge(age)).into());
}
}
None => return Err(Validation::Missing(Missing::AuthTime).into()),
}
}
Ok(())
}
/// Get a userinfo json document for a given token at the provider's userinfo endpoint.
/// Errors are:
///
/// - Userinfo::NoUrl if this provider doesn't have a userinfo endpoint
/// - Error::Insecure if the userinfo url is not https
/// - Error::Jose if the token is not decoded
/// - Error::Http if something goes wrong getting the document
/// - Error::Json if the response is not a valid Userinfo document
/// - Userinfo::MismatchSubject if the returned userinfo document and tokens subject mismatch
pub fn request_userinfo(
&self,
client: &reqwest::Client,
token: &Token,
) -> Result<Userinfo, Error> {
match self.config().userinfo_endpoint {
Some(ref url) => {
discovery::secure(&url)?;
let claims = token.id_token.payload()?;
let auth_code = token.access_token().to_string();
let mut resp = client
.get(url.clone())
// FIXME This is a transitional hack for Reqwest 0.9 that should be refactored
// when upstream restores typed header support.
.header_011(reqwest::hyper_011::header::Authorization(
reqwest::hyper_011::header::Bearer { token: auth_code },
))
.send()?;
let info: Userinfo = resp.json()?;
if claims.sub != info.sub {
let expected = info.sub.clone();
let actual = claims.sub.clone();
return Err(error::Userinfo::MismatchSubject { expected, actual }.into());
}
Ok(info)
}
None => Err(error::Userinfo::NoUrl.into()),
}
}
}
/// Optional parameters that [OpenID specifies](https://openid.net/specs/openid-connect-basic-1_0.html#RequestParameters) for the auth URI.
/// Derives Default, so remember to ..Default::default() after you specify what you want.
#[derive(Default)]
pub struct Options {
/// MUST contain openid. By default this is ONLY openid. Official optional scopes are
/// email, profile, address, phone, offline_access. Check the Discovery config
/// `scopes_supported` to see what is available at your provider!
pub scope: Option<String>,
pub state: Option<String>,
pub nonce: Option<String>,
pub display: Option<Display>,
pub prompt: Option<std::collections::HashSet<Prompt>>,
pub max_age: Option<Duration>,
pub ui_locales: Option<String>,
pub claims_locales: Option<String>,
pub id_token_hint: Option<String>,
pub login_hint: Option<String>,
pub acr_values: Option<String>,
}
/// The userinfo struct contains all possible userinfo fields regardless of scope. [See spec.](https://openid.net/specs/openid-connect-basic-1_0.html#StandardClaims)
// TODO is there a way to use claims_supported in config to simplify this struct?
#[derive(Debug, Deserialize, Serialize, Validate)]
pub struct Userinfo {
pub sub: String,
#[serde(default)]
pub name: Option<String>,
#[serde(default)]
pub given_name: Option<String>,
#[serde(default)]
pub family_name: Option<String>,
#[serde(default)]
pub middle_name: Option<String>,
#[serde(default)]
pub nickname: Option<String>,
#[serde(default)]
pub preferred_username: Option<String>,
#[serde(default)]
#[serde(with = "url_serde")]
pub profile: Option<Url>,
#[serde(default)]
#[serde(with = "url_serde")]
pub picture: Option<Url>,
#[serde(default)]
#[serde(with = "url_serde")]
pub website: Option<Url>,
#[serde(default)]
#[validate(email)]
pub email: Option<String>,
#[serde(default)]
pub email_verified: bool,
// Isn't required to be just male or female
#[serde(default)]
pub gender: Option<String>,
// ISO 9601:2004 YYYY-MM-DD or YYYY.
#[serde(default)]
pub birthdate: Option<NaiveDate>,
// Region/City codes. Should also have a more concrete serializer form.
#[serde(default)]
pub zoneinfo: Option<String>,
// Usually RFC5646 langcode-countrycode, maybe with a _ sep, could be arbitrary
#[serde(default)]
pub locale: Option<String>,
// Usually E.164 format number
#[serde(default)]
pub phone_number: Option<String>,
#[serde(default)]
pub phone_number_verified: bool,
#[serde(default)]
pub address: Option<Address>,
#[serde(default)]
pub updated_at: Option<i64>,
#[serde(flatten)]
pub extra: HashMap<String, Value>,
}
/// The four values for the preferred display parameter in the Options. See spec for details.
pub enum Display {
Page,
Popup,
Touch,
Wap,
}
impl Display {
fn as_str(&self) -> &'static str {
use self::Display::*;
match *self {
Page => "page",
Popup => "popup",
Touch => "touch",
Wap => "wap",
}
}
}
/// The four possible values for the prompt parameter set in Options. See spec for details.
#[derive(PartialEq, Eq, Hash)]
pub enum Prompt {
None,
Login,
Consent,
SelectAccount,
}
impl Prompt {
fn as_str(&self) -> &'static str {
use self::Prompt::*;
match *self {
None => "none",
Login => "login",
Consent => "consent",
SelectAccount => "select_account",
}
}
}
/// Address Claim struct. Can be only formatted, only the rest, or both.
#[derive(Debug, Deserialize, Serialize)]
pub struct Address {
#[serde(default)]
pub formatted: Option<String>,
#[serde(default)]
pub street_address: Option<String>,
#[serde(default)]
pub locality: Option<String>,
#[serde(default)]
pub region: Option<String>,
// Countries like the UK use alphanumeric postal codes, so you can't just use a number here
#[serde(default)]
pub postal_code: Option<String>,
#[serde(default)]
pub country: Option<String>,
}
#[cfg(test)]
mod tests {
use crate::issuer;
use crate::Client;
use reqwest::Url;
#[test]
fn default_options() {
let _: super::Options = Default::default();
}
macro_rules! test {
($issuer:ident) => {
#[test]
fn $issuer() {
let id = "test".to_string();
let secret = "a secret to everybody".to_string();
let redirect = Url::parse("https://example.com/re").unwrap();
let client = Client::discover(id, secret, redirect, issuer::$issuer()).unwrap();
client.auth_url(&Default::default());
}
};
}
test!(google);
test!(microsoft);
test!(paypal);
test!(salesforce);
test!(yahoo);
}
| {
return Err(Validation::Expired(Expiry::Expires(
chrono::naive::NaiveDateTime::from_timestamp(claims.exp, 0),
))
.into());
} | conditional_block |
machine.rs | #[cfg(target_arch = "wasm32")]
use crate::ast::wasm::{LangValueArrayMap, LangValueMap, SourceElement};
use crate::{
ast::{
compiled::Program, Instruction, Label, LangValue, Node, RegisterRef,
SpanNode, StackRef, ValueSource,
},
consts::MAX_CYCLE_COUNT,
debug,
error::{RuntimeError, SourceErrorWrapper, WithSource},
models::{HardwareSpec, ProgramSpec},
util::Span,
};
use std::{
cmp::Ordering, collections::HashMap, convert::TryInto, iter, num::Wrapping,
};
#[cfg(target_arch = "wasm32")]
use wasm_bindgen::{prelude::*, JsCast};
/// A steppable program executor. Maintains the current state of the program,
/// and execution can be progressed one instruction at a time.
///
/// Created from a [HardwareSpec](HardwareSpec), [ProgramSpec](ProgramSpec), and
/// a program. The current machine state can be obtained at any time, including
/// execution stats (e.g. # cycles), which allows for handy visualizations of
/// execution.
#[cfg_attr(target_arch = "wasm32", wasm_bindgen)]
#[derive(Clone, Debug)]
pub struct Machine {
// Static data - this is copied from the input and shouldn't be included in
// serialization. We store these ourselves instead of keeping references
// to the originals because it just makes life a lot easier.
hardware_spec: HardwareSpec,
source: String,
program: Program<Span>,
expected_output: Vec<LangValue>,
// Runtime state
/// The index of the next instruction to be executed
program_counter: usize,
/// The current input buffer. This can be popped from as the program is
/// executed. We always pop from the front. This isn't ideal for a Vec,
/// but these arrays will be small enough that it probably doesn't matter.
/// Values never get added to the input, only popped off.
input: Vec<LangValue>,
/// The current output buffer. This can be pushed into, but never popped
/// out of.
output: Vec<LangValue>,
/// The registers that the user can read and write. Indexed by Register ID.
registers: Vec<LangValue>,
/// The series of stacks that act as the programs RAM. The number of stacks
/// and their capacity is determined by the initializating hardware spec.
stacks: Vec<Vec<LangValue>>,
/// The number of instructions that have been executed so far. This is not
/// unique, so repeated instructions are counted multiple times.
cycle_count: usize,
/// Stores a runtime error, if one has occurred. Once the error occurs,
/// this should be populated and from then on, the machine has terminated
/// and can no longer execute.
error: Option<WithSource<RuntimeError>>,
}
// Functions that DON'T get exported to wasm
impl Machine {
/// Creates a new machine, ready to be executed.
pub fn new(
hardware_spec: HardwareSpec,
program_spec: &ProgramSpec,
program: Program<Span>,
source: String,
) -> Self {
let registers =
iter::repeat(0).take(hardware_spec.num_registers).collect();
// Initialize `num_stacks` new stacks. Set an initial capacity
// for each one to prevent grows during program operation
let stacks = iter::repeat_with(|| {
Vec::with_capacity(hardware_spec.max_stack_length)
})
.take(hardware_spec.num_stacks)
.collect();
Self {
// Static data
hardware_spec,
program,
source,
expected_output: program_spec.expected_output().into(),
// Runtime state
program_counter: 0,
input: program_spec.input().into(),
output: Vec::new(),
registers,
stacks,
error: None,
// Performance stats
cycle_count: 0,
}
}
/// Gets a source value, which could either be a constant or a register.
/// If the value is a constant, just return that. If it's a register,
/// return the value from that register. Panics if the register reference is
/// invalid (shouldn't be possible because of validation).
fn get_val_from_src(&self, src: &SpanNode<ValueSource<Span>>) -> LangValue {
match src.value() {
ValueSource::Const(Node(val, _)) => *val,
ValueSource::Register(reg_ref) => self.get_reg(*reg_ref.value()),
}
}
/// Gets the value from the given register. The register reference is
/// assumed to be valid (should be validated at build time). Will panic if
/// it isn't valid.
fn get_reg(&self, reg: RegisterRef) -> LangValue {
match reg {
RegisterRef::Null => 0,
// These conversion unwraps are safe because we know that input
// and stack lengths are bounded by validation rules to fit into an
// i32 (max length is 256 at the time of writing this)
RegisterRef::InputLength => self.input.len().try_into().unwrap(),
RegisterRef::StackLength(stack_id) => {
self.stacks[stack_id].len().try_into().unwrap()
}
RegisterRef::User(reg_id) => *self.registers.get(reg_id).unwrap(),
}
}
/// Sets the register to the given value. The register reference is
/// assumed to be valid and writable (should be validated at build time).
/// Will panic if it isn't valid/writable.
fn set_reg(&mut self, reg: &SpanNode<RegisterRef>, value: LangValue) {
match reg.value() {
RegisterRef::Null => {} // /dev/null behavior - trash any input
RegisterRef::InputLength | RegisterRef::StackLength(_) => {
panic!("Unwritable register {:?}", reg)
}
RegisterRef::User(reg_id) => {
self.registers[*reg_id] = value;
}
}
}
/// Pushes the given value onto the given stack. If the stack reference is
/// invalid or the stack is at capacity, an error is returned. If the stack
/// reference is invalid, will panic (should be validated at build time).
fn push_stack(
&mut self,
stack_ref: &SpanNode<StackRef>,
value: LangValue,
) -> Result<(), (RuntimeError, Span)> {
// Have to access this first cause borrow checker
let max_stack_length = self.hardware_spec.max_stack_length;
let stack = &mut self.stacks[stack_ref.value().0];
// If the stack is capacity, make sure we're not over it
if stack.len() >= max_stack_length {
return Err((RuntimeError::StackOverflow, *stack_ref.metadata()));
}
stack.push(value);
Ok(())
}
/// Pops an element off the given stack. If the pop is successful, the
/// popped value is returned. If the stack is empty, an error is returned.
/// If the stack reference is invalid, will panic (should be validated at
/// build time).
fn pop_stack(
&mut self,
stack_ref: &SpanNode<StackRef>,
) -> Result<LangValue, (RuntimeError, Span)> {
let stack = &mut self.stacks[stack_ref.value().0];
if let Some(val) = stack.pop() {
Ok(val)
} else {
Err((RuntimeError::EmptyStack, *stack_ref.metadata()))
}
}
/// Internal function to execute the next instruction. The return value
/// is the same as [Self::execute_next], except the error needs to be
/// wrapped before being handed to the user.
fn execute_next_inner(&mut self) -> Result<bool, (RuntimeError, Span)> {
// We've previously hit an error, prevent further execution
if self.error.is_some() {
return Ok(false);
}
let instr_node =
match self.program.instructions.get(self.program_counter) {
// Clone is necessary so we don't maintain a ref to self
Some(instr_node) => instr_node.clone(),
// out of instructions to execute, just give up
None => return Ok(false),
};
// Prevent infinite loops
if self.cycle_count >= MAX_CYCLE_COUNT {
// Include the instruction that triggered the error
return Err((RuntimeError::TooManyCycles, *instr_node.metadata()));
}
// If we've reached this point, we know we're going to execute the
// instruction. Increment the cycle count now so that if we exit with
// an error, it still counts.
self.cycle_count += 1;
// Execute the instruction, and get a resulting optional label that we
// should jump to. For most instructions there will be no label, only
// when the instruction wants to trigger a jump.
let instruction = instr_node.value();
let span = *instr_node.metadata();
let target_label: Option<&Label> = match instruction {
Instruction::Read(reg) => {
if self.input.is_empty() {
return Err((RuntimeError::EmptyInput, span));
} else {
// Remove the first element in the input
let val = self.input.remove(0);
self.set_reg(reg, val);
}
None
}
Instruction::Write(src) => {
self.output.push(self.get_val_from_src(src));
None
}
Instruction::Set(dst, src) => {
self.set_reg(dst, self.get_val_from_src(src));
None
}
Instruction::Add(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
+ Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Sub(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
- Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Mul(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
* Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Div(dst, src) => {
let divisor = self.get_val_from_src(src);
let dividend = self.get_reg(*dst.value());
if divisor != 0 {
// This does flooring division
self.set_reg(dst, dividend / divisor);
} else {
return Err((RuntimeError::DivideByZero, span));
}
None
}
Instruction::Cmp(dst, src_1, src_2) => {
let val_1 = self.get_val_from_src(src_1);
let val_2 = self.get_val_from_src(src_2);
let cmp = match val_1.cmp(&val_2) {
Ordering::Less => -1,
Ordering::Equal => 0,
Ordering::Greater => 1,
};
self.set_reg(dst, cmp);
None
}
Instruction::Push(src, stack_ref) => {
self.push_stack(stack_ref, self.get_val_from_src(src))?;
None
}
Instruction::Pop(stack_ref, dst) => {
let popped = self.pop_stack(stack_ref)?;
self.set_reg(dst, popped);
None
}
// Jumps
Instruction::Jmp(Node(label, _)) => Some(label),
Instruction::Jez(src, Node(label, _)) => {
if self.get_val_from_src(src) == 0 {
Some(label)
} else {
None
}
}
Instruction::Jnz(src, Node(label, _)) => {
if self.get_val_from_src(src) != 0 {
Some(label)
} else {
None
}
}
Instruction::Jlz(src, Node(label, _)) => {
if self.get_val_from_src(src) < 0 {
Some(label)
} else {
None
}
}
Instruction::Jgz(src, Node(label, _)) => {
if self.get_val_from_src(src) > 0 {
Some(label)
} else {
None
}
}
};
// If the instruction wants to jump to a label, look up its
// corresponding index and go there. Otherwise, just advance the PC one
// instruction
match target_label {
Some(label) => {
let destination = self
.program
.symbol_table
.get(label)
// If this panics, that means there's a bug in the
// compiler pipeline
.unwrap_or_else(|| panic!("unknown label: {}", label));
self.program_counter = *destination;
}
None => {
self.program_counter += 1;
}
}
debug!(println!("Executed {:?}\n\tState: {:?}", instruction, self));
Ok(true)
}
/// Executes the next instruction in the program.
///
/// # Returns
/// - `Ok(true)` if the instruction executed normally
/// - `Ok(false)` if the instruction didn't execute because the program has
/// already terminated
/// - `Err(error)` if an error occurred. The error is returned, with the
/// source information of the offending instruction
pub fn execute_next(&mut self) -> Result<bool, &WithSource<RuntimeError>> {
match self.execute_next_inner() {
Ok(b) => Ok(b),
Err((error, span)) => {
// Store the error in self, then return a ref to it
self.error = Some(WithSource::new(
iter::once(SourceErrorWrapper::new(
error,
span,
&self.source,
)),
self.source.clone(),
));
Err(self.error.as_ref().unwrap())
}
}
}
/// Executes this machine until termination (or error). All instructions are
/// executed until [Self::terminated] returns true. Returns the value of
/// [Self::successful] upon termination.
pub fn execute_all(&mut self) -> Result<bool, &WithSource<RuntimeError>> {
// We can't return the error directly from the loop because of a bug
// in the borrow checker. Instead, we have to play lifetime tetris.
while !self.terminated() {
if self.execute_next().is_err() {
break;
}
}
// Check if an error occurred, and return it if so
match &self.error {
None => Ok(self.successful()),
Some(error) => Err(error),
}
}
/// Get the source code that this machine is built for.
pub fn source_code(&self) -> &str {
&self.source
}
/// Get a reference to the program being executed.
pub fn program(&self) -> &Program<Span> {
&self.program
}
/// Get the current input buffer.
pub fn input(&self) -> &[LangValue] {
self.input.as_slice()
}
/// Get the current output buffer.
pub fn output(&self) -> &[LangValue] {
self.output.as_slice()
}
/// Get all registers and their current values.
pub fn registers(&self) -> HashMap<RegisterRef, LangValue> {
self.hardware_spec
.all_register_refs()
.into_iter()
.map(|reg_ref| (reg_ref, self.get_reg(reg_ref)))
.collect()
}
/// Get all stacks and their current values.
pub fn stacks(&self) -> HashMap<StackRef, &[LangValue]> {
self.hardware_spec
.all_stack_refs()
.into_iter()
.map(|stack_ref| (stack_ref, self.stacks[stack_ref.0].as_slice()))
.collect()
}
/// Get the runtime error that halted execution of this machine. If no error
/// has occurred, return `None`.
pub fn error(&self) -> Option<&WithSource<RuntimeError>> {
self.error.as_ref()
}
}
// Functions that get exported to wasm
#[cfg_attr(target_arch = "wasm32", wasm_bindgen)]
impl Machine {
/// Get the index of the next instruction to be executed.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "programCounter")
)]
pub fn program_counter(&self) -> usize {
self.program_counter
}
/// Get the number of cycles, i.e. the number of instructions that have
/// been run, during the current program execution.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "cycleCount")
)]
pub fn cycle_count(&self) -> usize {
self.cycle_count
}
/// Checks if this machine has finished executing. This could be by normal
/// completion or by runtime error.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "terminated")
)]
pub fn terminated(&self) -> bool {
// Check for normal complete
self.program_counter >= self.program.instructions.len()
// Check for a runtime error
|| self.error.is_some()
}
/// Checks if this machine has completed successfully. The criteria are:
/// 1. Program is terminated (all instructions have been executed)
/// 2. No failures occurred (see [FailureReason] for possible failures)
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "successful")
)]
pub fn successful(&self) -> bool {
self.terminated() && self.failure_reason().is_none()
}
/// Determine why the executed program failed. **Only returns a value if
/// the program actually failed.** Will return `None` if the program
/// is still running or it succeeded.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "failureReason")
)]
pub fn failure_reason(&self) -> Option<FailureReason> {
if !self.terminated() {
// Program is still running, so we haven't failed (yet)
None
} else if self.error.is_some() {
Some(FailureReason::RuntimeError)
} else if !self.input.is_empty() {
Some(FailureReason::RemainingInput)
} else if self.output != self.expected_output {
Some(FailureReason::IncorrectOutput)
} else {
// No failure states were hit, so program was successful!
None
}
}
}
// Wasm-ONLY functions
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen]
impl Machine {
/// A wrapper for [Self::input], to be called from wasm.
#[wasm_bindgen(getter, js_name = "input")]
pub fn wasm_input(&self) -> Vec<LangValue> {
self.input.clone()
}
/// A wrapper for [Self::input], to be called from wasm.
#[wasm_bindgen(getter, js_name = "output")]
pub fn wasm_output(&self) -> Vec<LangValue> {
self.output.clone()
}
/// A wrapper for [Self::registers], to be called from wasm. We can't send
/// maps through wasm, so this returns a [JsValue] which is an object
/// mapping register names (strings) to their values (`LangValue`).
#[wasm_bindgen(getter, js_name = "registers")]
pub fn wasm_registers(&self) -> LangValueMap {
// Convert the keys of the register map to strings
let regs_by_name: HashMap<String, LangValue> = self
.registers()
.into_iter()
.map(|(reg_ref, reg_value)| (reg_ref.to_string(), reg_value))
.collect();
// Convert the hashmap to a js object. Be careful here!
JsValue::from_serde(®s_by_name).unwrap().unchecked_into()
}
/// A wrapper for [Self::stacks], to be called from wasm. We can't send
/// maps through wasm, so this returns a [JsValue] which is an object
/// mapping stacks names (strings) to their values (`Vec<LangValue>`).
#[wasm_bindgen(getter, js_name = "stacks")]
pub fn wasm_stacks(&self) -> LangValueArrayMap {
// Convert the keys of the stacks map to strings
let stacks_by_name: HashMap<String, &[LangValue]> = self
.stacks()
.into_iter()
.map(|(stack_ref, stack_value)| {
(stack_ref.to_string(), stack_value)
})
.collect();
// Convert the hashmap to a js object. Be careful here!
JsValue::from_serde(&stacks_by_name)
.unwrap()
.unchecked_into()
}
/// A wrapper for [Self::error], to be called from wasm. We can't send
/// maps through wasm, so this returns a simplified error as a
/// [SourceElement].
#[wasm_bindgen(getter, js_name = "error")]
pub fn wasm_error(&self) -> Option<SourceElement> {
self.error.as_ref().map(|wrapped_error| {
// If an error is present, there should always be exactly one
match wrapped_error.errors() {
[error] => error.into(),
errors => panic!(
"Expected exactly 1 runtime error, but got {:?}",
errors
),
}
})
}
/// A wrapper for [Self::execute_next], to be called from wasm. We throw
/// away the error because it simplifies the logic on the TS side. That
/// error is accessible via [Self::wasm_error] anyway.
#[wasm_bindgen(js_name = "executeNext")]
pub fn wasm_execute_next(&mut self) -> bool {
// If an error occurred, that means something executed, so return true
self.execute_next().unwrap_or(true)
}
/// A wrapper for [Self::execute_all], to be called from wasm. We throw
/// away the error because it simplifies the logic on the TS side. That
/// error is accessible via [Self::wasm_error] anyway.
#[wasm_bindgen(js_name = "executeAll")]
pub fn wasm_execute_all(&mut self) -> bool {
// If an error occurred, that means something executed, so return true
self.execute_all().unwrap_or(true)
}
}
/// The reason why a program failed. **These reasons are only applicable for
/// terminated, unsuccessful programs**. For a program that has yet to
/// terminate, or did so successfully, none of these cases apply.
#[cfg_attr(target_arch = "wasm32", wasm_bindgen)]
#[derive(Copy, Clone, Debug)]
pub enum | {
/// An error occurred while trying to execute one of the instructions
RuntimeError,
/// The input buffer wasn't empty upon terminated
RemainingInput,
/// The output buffer didn't match the expected output, as defined by the
/// program spec
IncorrectOutput,
}
| FailureReason | identifier_name |
machine.rs | #[cfg(target_arch = "wasm32")]
use crate::ast::wasm::{LangValueArrayMap, LangValueMap, SourceElement};
use crate::{
ast::{
compiled::Program, Instruction, Label, LangValue, Node, RegisterRef,
SpanNode, StackRef, ValueSource,
},
consts::MAX_CYCLE_COUNT,
debug,
error::{RuntimeError, SourceErrorWrapper, WithSource},
models::{HardwareSpec, ProgramSpec},
util::Span,
};
use std::{
cmp::Ordering, collections::HashMap, convert::TryInto, iter, num::Wrapping,
};
#[cfg(target_arch = "wasm32")]
use wasm_bindgen::{prelude::*, JsCast};
/// A steppable program executor. Maintains the current state of the program,
/// and execution can be progressed one instruction at a time.
///
/// Created from a [HardwareSpec](HardwareSpec), [ProgramSpec](ProgramSpec), and
/// a program. The current machine state can be obtained at any time, including
/// execution stats (e.g. # cycles), which allows for handy visualizations of
/// execution.
#[cfg_attr(target_arch = "wasm32", wasm_bindgen)]
#[derive(Clone, Debug)]
pub struct Machine {
// Static data - this is copied from the input and shouldn't be included in
// serialization. We store these ourselves instead of keeping references
// to the originals because it just makes life a lot easier.
hardware_spec: HardwareSpec,
source: String,
program: Program<Span>,
expected_output: Vec<LangValue>,
// Runtime state
/// The index of the next instruction to be executed
program_counter: usize,
/// The current input buffer. This can be popped from as the program is
/// executed. We always pop from the front. This isn't ideal for a Vec,
/// but these arrays will be small enough that it probably doesn't matter.
/// Values never get added to the input, only popped off.
input: Vec<LangValue>,
/// The current output buffer. This can be pushed into, but never popped
/// out of.
output: Vec<LangValue>,
/// The registers that the user can read and write. Indexed by Register ID.
registers: Vec<LangValue>,
/// The series of stacks that act as the programs RAM. The number of stacks
/// and their capacity is determined by the initializating hardware spec.
stacks: Vec<Vec<LangValue>>,
/// The number of instructions that have been executed so far. This is not
/// unique, so repeated instructions are counted multiple times.
cycle_count: usize,
/// Stores a runtime error, if one has occurred. Once the error occurs,
/// this should be populated and from then on, the machine has terminated
/// and can no longer execute.
error: Option<WithSource<RuntimeError>>,
}
// Functions that DON'T get exported to wasm
impl Machine {
/// Creates a new machine, ready to be executed.
pub fn new(
hardware_spec: HardwareSpec,
program_spec: &ProgramSpec,
program: Program<Span>,
source: String,
) -> Self {
let registers =
iter::repeat(0).take(hardware_spec.num_registers).collect();
// Initialize `num_stacks` new stacks. Set an initial capacity
// for each one to prevent grows during program operation
let stacks = iter::repeat_with(|| {
Vec::with_capacity(hardware_spec.max_stack_length)
})
.take(hardware_spec.num_stacks)
.collect();
Self {
// Static data
hardware_spec,
program,
source,
expected_output: program_spec.expected_output().into(),
// Runtime state
program_counter: 0,
input: program_spec.input().into(),
output: Vec::new(),
registers,
stacks,
error: None,
// Performance stats
cycle_count: 0,
}
}
/// Gets a source value, which could either be a constant or a register.
/// If the value is a constant, just return that. If it's a register,
/// return the value from that register. Panics if the register reference is
/// invalid (shouldn't be possible because of validation).
fn get_val_from_src(&self, src: &SpanNode<ValueSource<Span>>) -> LangValue {
match src.value() {
ValueSource::Const(Node(val, _)) => *val,
ValueSource::Register(reg_ref) => self.get_reg(*reg_ref.value()),
}
}
/// Gets the value from the given register. The register reference is
/// assumed to be valid (should be validated at build time). Will panic if
/// it isn't valid.
fn get_reg(&self, reg: RegisterRef) -> LangValue {
match reg {
RegisterRef::Null => 0,
// These conversion unwraps are safe because we know that input
// and stack lengths are bounded by validation rules to fit into an
// i32 (max length is 256 at the time of writing this)
RegisterRef::InputLength => self.input.len().try_into().unwrap(),
RegisterRef::StackLength(stack_id) => {
self.stacks[stack_id].len().try_into().unwrap()
}
RegisterRef::User(reg_id) => *self.registers.get(reg_id).unwrap(),
}
}
/// Sets the register to the given value. The register reference is
/// assumed to be valid and writable (should be validated at build time).
/// Will panic if it isn't valid/writable.
fn set_reg(&mut self, reg: &SpanNode<RegisterRef>, value: LangValue) {
match reg.value() {
RegisterRef::Null => {} // /dev/null behavior - trash any input
RegisterRef::InputLength | RegisterRef::StackLength(_) => {
panic!("Unwritable register {:?}", reg)
}
RegisterRef::User(reg_id) => {
self.registers[*reg_id] = value;
}
}
}
/// Pushes the given value onto the given stack. If the stack reference is
/// invalid or the stack is at capacity, an error is returned. If the stack
/// reference is invalid, will panic (should be validated at build time).
fn push_stack(
&mut self,
stack_ref: &SpanNode<StackRef>,
value: LangValue,
) -> Result<(), (RuntimeError, Span)> {
// Have to access this first cause borrow checker
let max_stack_length = self.hardware_spec.max_stack_length;
let stack = &mut self.stacks[stack_ref.value().0];
// If the stack is capacity, make sure we're not over it
if stack.len() >= max_stack_length {
return Err((RuntimeError::StackOverflow, *stack_ref.metadata()));
}
stack.push(value);
Ok(())
}
/// Pops an element off the given stack. If the pop is successful, the
/// popped value is returned. If the stack is empty, an error is returned.
/// If the stack reference is invalid, will panic (should be validated at
/// build time).
fn pop_stack(
&mut self,
stack_ref: &SpanNode<StackRef>,
) -> Result<LangValue, (RuntimeError, Span)> {
let stack = &mut self.stacks[stack_ref.value().0];
if let Some(val) = stack.pop() {
Ok(val)
} else {
Err((RuntimeError::EmptyStack, *stack_ref.metadata()))
}
}
/// Internal function to execute the next instruction. The return value
/// is the same as [Self::execute_next], except the error needs to be
/// wrapped before being handed to the user.
fn execute_next_inner(&mut self) -> Result<bool, (RuntimeError, Span)> {
// We've previously hit an error, prevent further execution
if self.error.is_some() {
return Ok(false);
}
let instr_node =
match self.program.instructions.get(self.program_counter) {
// Clone is necessary so we don't maintain a ref to self
Some(instr_node) => instr_node.clone(),
// out of instructions to execute, just give up
None => return Ok(false),
};
// Prevent infinite loops
if self.cycle_count >= MAX_CYCLE_COUNT {
// Include the instruction that triggered the error
return Err((RuntimeError::TooManyCycles, *instr_node.metadata()));
}
// If we've reached this point, we know we're going to execute the
// instruction. Increment the cycle count now so that if we exit with
// an error, it still counts.
self.cycle_count += 1;
// Execute the instruction, and get a resulting optional label that we
// should jump to. For most instructions there will be no label, only
// when the instruction wants to trigger a jump.
let instruction = instr_node.value();
let span = *instr_node.metadata();
let target_label: Option<&Label> = match instruction {
Instruction::Read(reg) => {
if self.input.is_empty() {
return Err((RuntimeError::EmptyInput, span));
} else {
// Remove the first element in the input
let val = self.input.remove(0);
self.set_reg(reg, val);
}
None
}
Instruction::Write(src) => {
self.output.push(self.get_val_from_src(src));
None
}
Instruction::Set(dst, src) => {
self.set_reg(dst, self.get_val_from_src(src));
None
}
Instruction::Add(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
+ Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Sub(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
- Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Mul(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
* Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Div(dst, src) => {
let divisor = self.get_val_from_src(src);
let dividend = self.get_reg(*dst.value());
if divisor != 0 {
// This does flooring division
self.set_reg(dst, dividend / divisor);
} else {
return Err((RuntimeError::DivideByZero, span));
}
None
}
Instruction::Cmp(dst, src_1, src_2) => {
let val_1 = self.get_val_from_src(src_1);
let val_2 = self.get_val_from_src(src_2);
let cmp = match val_1.cmp(&val_2) {
Ordering::Less => -1,
Ordering::Equal => 0,
Ordering::Greater => 1,
};
self.set_reg(dst, cmp);
None
}
Instruction::Push(src, stack_ref) => {
self.push_stack(stack_ref, self.get_val_from_src(src))?;
None
}
Instruction::Pop(stack_ref, dst) => {
let popped = self.pop_stack(stack_ref)?;
self.set_reg(dst, popped);
None
}
// Jumps
Instruction::Jmp(Node(label, _)) => Some(label),
Instruction::Jez(src, Node(label, _)) => {
if self.get_val_from_src(src) == 0 {
Some(label)
} else {
None
}
}
Instruction::Jnz(src, Node(label, _)) => {
if self.get_val_from_src(src) != 0 {
Some(label)
} else {
None
}
}
Instruction::Jlz(src, Node(label, _)) => {
if self.get_val_from_src(src) < 0 {
Some(label)
} else {
None
}
}
Instruction::Jgz(src, Node(label, _)) => {
if self.get_val_from_src(src) > 0 {
Some(label)
} else {
None
}
}
};
// If the instruction wants to jump to a label, look up its
// corresponding index and go there. Otherwise, just advance the PC one
// instruction
match target_label {
Some(label) => {
let destination = self
.program
.symbol_table
.get(label)
// If this panics, that means there's a bug in the
// compiler pipeline
.unwrap_or_else(|| panic!("unknown label: {}", label));
self.program_counter = *destination;
}
None => {
self.program_counter += 1;
}
}
debug!(println!("Executed {:?}\n\tState: {:?}", instruction, self));
Ok(true)
}
/// Executes the next instruction in the program.
///
/// # Returns
/// - `Ok(true)` if the instruction executed normally
/// - `Ok(false)` if the instruction didn't execute because the program has
/// already terminated
/// - `Err(error)` if an error occurred. The error is returned, with the
/// source information of the offending instruction
pub fn execute_next(&mut self) -> Result<bool, &WithSource<RuntimeError>> {
match self.execute_next_inner() {
Ok(b) => Ok(b),
Err((error, span)) => {
// Store the error in self, then return a ref to it
self.error = Some(WithSource::new(
iter::once(SourceErrorWrapper::new(
error,
span,
&self.source,
)),
self.source.clone(),
));
Err(self.error.as_ref().unwrap())
}
}
}
/// Executes this machine until termination (or error). All instructions are
/// executed until [Self::terminated] returns true. Returns the value of
/// [Self::successful] upon termination.
pub fn execute_all(&mut self) -> Result<bool, &WithSource<RuntimeError>> {
// We can't return the error directly from the loop because of a bug
// in the borrow checker. Instead, we have to play lifetime tetris.
while !self.terminated() {
if self.execute_next().is_err() {
break;
}
}
// Check if an error occurred, and return it if so
match &self.error {
None => Ok(self.successful()),
Some(error) => Err(error),
}
}
/// Get the source code that this machine is built for.
pub fn source_code(&self) -> &str {
&self.source
}
/// Get a reference to the program being executed.
pub fn program(&self) -> &Program<Span> {
&self.program
}
/// Get the current input buffer.
pub fn input(&self) -> &[LangValue] {
self.input.as_slice()
}
/// Get the current output buffer.
pub fn output(&self) -> &[LangValue] {
self.output.as_slice()
}
/// Get all registers and their current values.
pub fn registers(&self) -> HashMap<RegisterRef, LangValue> {
self.hardware_spec
.all_register_refs()
.into_iter()
.map(|reg_ref| (reg_ref, self.get_reg(reg_ref)))
.collect()
}
/// Get all stacks and their current values. | .into_iter()
.map(|stack_ref| (stack_ref, self.stacks[stack_ref.0].as_slice()))
.collect()
}
/// Get the runtime error that halted execution of this machine. If no error
/// has occurred, return `None`.
pub fn error(&self) -> Option<&WithSource<RuntimeError>> {
self.error.as_ref()
}
}
// Functions that get exported to wasm
#[cfg_attr(target_arch = "wasm32", wasm_bindgen)]
impl Machine {
/// Get the index of the next instruction to be executed.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "programCounter")
)]
pub fn program_counter(&self) -> usize {
self.program_counter
}
/// Get the number of cycles, i.e. the number of instructions that have
/// been run, during the current program execution.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "cycleCount")
)]
pub fn cycle_count(&self) -> usize {
self.cycle_count
}
/// Checks if this machine has finished executing. This could be by normal
/// completion or by runtime error.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "terminated")
)]
pub fn terminated(&self) -> bool {
// Check for normal complete
self.program_counter >= self.program.instructions.len()
// Check for a runtime error
|| self.error.is_some()
}
/// Checks if this machine has completed successfully. The criteria are:
/// 1. Program is terminated (all instructions have been executed)
/// 2. No failures occurred (see [FailureReason] for possible failures)
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "successful")
)]
pub fn successful(&self) -> bool {
self.terminated() && self.failure_reason().is_none()
}
/// Determine why the executed program failed. **Only returns a value if
/// the program actually failed.** Will return `None` if the program
/// is still running or it succeeded.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "failureReason")
)]
pub fn failure_reason(&self) -> Option<FailureReason> {
if !self.terminated() {
// Program is still running, so we haven't failed (yet)
None
} else if self.error.is_some() {
Some(FailureReason::RuntimeError)
} else if !self.input.is_empty() {
Some(FailureReason::RemainingInput)
} else if self.output != self.expected_output {
Some(FailureReason::IncorrectOutput)
} else {
// No failure states were hit, so program was successful!
None
}
}
}
// Wasm-ONLY functions
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen]
impl Machine {
/// A wrapper for [Self::input], to be called from wasm.
#[wasm_bindgen(getter, js_name = "input")]
pub fn wasm_input(&self) -> Vec<LangValue> {
self.input.clone()
}
/// A wrapper for [Self::input], to be called from wasm.
#[wasm_bindgen(getter, js_name = "output")]
pub fn wasm_output(&self) -> Vec<LangValue> {
self.output.clone()
}
/// A wrapper for [Self::registers], to be called from wasm. We can't send
/// maps through wasm, so this returns a [JsValue] which is an object
/// mapping register names (strings) to their values (`LangValue`).
#[wasm_bindgen(getter, js_name = "registers")]
pub fn wasm_registers(&self) -> LangValueMap {
// Convert the keys of the register map to strings
let regs_by_name: HashMap<String, LangValue> = self
.registers()
.into_iter()
.map(|(reg_ref, reg_value)| (reg_ref.to_string(), reg_value))
.collect();
// Convert the hashmap to a js object. Be careful here!
JsValue::from_serde(®s_by_name).unwrap().unchecked_into()
}
/// A wrapper for [Self::stacks], to be called from wasm. We can't send
/// maps through wasm, so this returns a [JsValue] which is an object
/// mapping stacks names (strings) to their values (`Vec<LangValue>`).
#[wasm_bindgen(getter, js_name = "stacks")]
pub fn wasm_stacks(&self) -> LangValueArrayMap {
// Convert the keys of the stacks map to strings
let stacks_by_name: HashMap<String, &[LangValue]> = self
.stacks()
.into_iter()
.map(|(stack_ref, stack_value)| {
(stack_ref.to_string(), stack_value)
})
.collect();
// Convert the hashmap to a js object. Be careful here!
JsValue::from_serde(&stacks_by_name)
.unwrap()
.unchecked_into()
}
/// A wrapper for [Self::error], to be called from wasm. We can't send
/// maps through wasm, so this returns a simplified error as a
/// [SourceElement].
#[wasm_bindgen(getter, js_name = "error")]
pub fn wasm_error(&self) -> Option<SourceElement> {
self.error.as_ref().map(|wrapped_error| {
// If an error is present, there should always be exactly one
match wrapped_error.errors() {
[error] => error.into(),
errors => panic!(
"Expected exactly 1 runtime error, but got {:?}",
errors
),
}
})
}
/// A wrapper for [Self::execute_next], to be called from wasm. We throw
/// away the error because it simplifies the logic on the TS side. That
/// error is accessible via [Self::wasm_error] anyway.
#[wasm_bindgen(js_name = "executeNext")]
pub fn wasm_execute_next(&mut self) -> bool {
// If an error occurred, that means something executed, so return true
self.execute_next().unwrap_or(true)
}
/// A wrapper for [Self::execute_all], to be called from wasm. We throw
/// away the error because it simplifies the logic on the TS side. That
/// error is accessible via [Self::wasm_error] anyway.
#[wasm_bindgen(js_name = "executeAll")]
pub fn wasm_execute_all(&mut self) -> bool {
// If an error occurred, that means something executed, so return true
self.execute_all().unwrap_or(true)
}
}
/// The reason why a program failed. **These reasons are only applicable for
/// terminated, unsuccessful programs**. For a program that has yet to
/// terminate, or did so successfully, none of these cases apply.
#[cfg_attr(target_arch = "wasm32", wasm_bindgen)]
#[derive(Copy, Clone, Debug)]
pub enum FailureReason {
/// An error occurred while trying to execute one of the instructions
RuntimeError,
/// The input buffer wasn't empty upon terminated
RemainingInput,
/// The output buffer didn't match the expected output, as defined by the
/// program spec
IncorrectOutput,
} | pub fn stacks(&self) -> HashMap<StackRef, &[LangValue]> {
self.hardware_spec
.all_stack_refs() | random_line_split |
machine.rs | #[cfg(target_arch = "wasm32")]
use crate::ast::wasm::{LangValueArrayMap, LangValueMap, SourceElement};
use crate::{
ast::{
compiled::Program, Instruction, Label, LangValue, Node, RegisterRef,
SpanNode, StackRef, ValueSource,
},
consts::MAX_CYCLE_COUNT,
debug,
error::{RuntimeError, SourceErrorWrapper, WithSource},
models::{HardwareSpec, ProgramSpec},
util::Span,
};
use std::{
cmp::Ordering, collections::HashMap, convert::TryInto, iter, num::Wrapping,
};
#[cfg(target_arch = "wasm32")]
use wasm_bindgen::{prelude::*, JsCast};
/// A steppable program executor. Maintains the current state of the program,
/// and execution can be progressed one instruction at a time.
///
/// Created from a [HardwareSpec](HardwareSpec), [ProgramSpec](ProgramSpec), and
/// a program. The current machine state can be obtained at any time, including
/// execution stats (e.g. # cycles), which allows for handy visualizations of
/// execution.
#[cfg_attr(target_arch = "wasm32", wasm_bindgen)]
#[derive(Clone, Debug)]
pub struct Machine {
// Static data - this is copied from the input and shouldn't be included in
// serialization. We store these ourselves instead of keeping references
// to the originals because it just makes life a lot easier.
hardware_spec: HardwareSpec,
source: String,
program: Program<Span>,
expected_output: Vec<LangValue>,
// Runtime state
/// The index of the next instruction to be executed
program_counter: usize,
/// The current input buffer. This can be popped from as the program is
/// executed. We always pop from the front. This isn't ideal for a Vec,
/// but these arrays will be small enough that it probably doesn't matter.
/// Values never get added to the input, only popped off.
input: Vec<LangValue>,
/// The current output buffer. This can be pushed into, but never popped
/// out of.
output: Vec<LangValue>,
/// The registers that the user can read and write. Indexed by Register ID.
registers: Vec<LangValue>,
/// The series of stacks that act as the programs RAM. The number of stacks
/// and their capacity is determined by the initializating hardware spec.
stacks: Vec<Vec<LangValue>>,
/// The number of instructions that have been executed so far. This is not
/// unique, so repeated instructions are counted multiple times.
cycle_count: usize,
/// Stores a runtime error, if one has occurred. Once the error occurs,
/// this should be populated and from then on, the machine has terminated
/// and can no longer execute.
error: Option<WithSource<RuntimeError>>,
}
// Functions that DON'T get exported to wasm
impl Machine {
/// Creates a new machine, ready to be executed.
pub fn new(
hardware_spec: HardwareSpec,
program_spec: &ProgramSpec,
program: Program<Span>,
source: String,
) -> Self {
let registers =
iter::repeat(0).take(hardware_spec.num_registers).collect();
// Initialize `num_stacks` new stacks. Set an initial capacity
// for each one to prevent grows during program operation
let stacks = iter::repeat_with(|| {
Vec::with_capacity(hardware_spec.max_stack_length)
})
.take(hardware_spec.num_stacks)
.collect();
Self {
// Static data
hardware_spec,
program,
source,
expected_output: program_spec.expected_output().into(),
// Runtime state
program_counter: 0,
input: program_spec.input().into(),
output: Vec::new(),
registers,
stacks,
error: None,
// Performance stats
cycle_count: 0,
}
}
/// Gets a source value, which could either be a constant or a register.
/// If the value is a constant, just return that. If it's a register,
/// return the value from that register. Panics if the register reference is
/// invalid (shouldn't be possible because of validation).
fn get_val_from_src(&self, src: &SpanNode<ValueSource<Span>>) -> LangValue {
match src.value() {
ValueSource::Const(Node(val, _)) => *val,
ValueSource::Register(reg_ref) => self.get_reg(*reg_ref.value()),
}
}
/// Gets the value from the given register. The register reference is
/// assumed to be valid (should be validated at build time). Will panic if
/// it isn't valid.
fn get_reg(&self, reg: RegisterRef) -> LangValue {
match reg {
RegisterRef::Null => 0,
// These conversion unwraps are safe because we know that input
// and stack lengths are bounded by validation rules to fit into an
// i32 (max length is 256 at the time of writing this)
RegisterRef::InputLength => self.input.len().try_into().unwrap(),
RegisterRef::StackLength(stack_id) => {
self.stacks[stack_id].len().try_into().unwrap()
}
RegisterRef::User(reg_id) => *self.registers.get(reg_id).unwrap(),
}
}
/// Sets the register to the given value. The register reference is
/// assumed to be valid and writable (should be validated at build time).
/// Will panic if it isn't valid/writable.
fn set_reg(&mut self, reg: &SpanNode<RegisterRef>, value: LangValue) {
match reg.value() {
RegisterRef::Null => {} // /dev/null behavior - trash any input
RegisterRef::InputLength | RegisterRef::StackLength(_) => {
panic!("Unwritable register {:?}", reg)
}
RegisterRef::User(reg_id) => {
self.registers[*reg_id] = value;
}
}
}
/// Pushes the given value onto the given stack. If the stack reference is
/// invalid or the stack is at capacity, an error is returned. If the stack
/// reference is invalid, will panic (should be validated at build time).
fn push_stack(
&mut self,
stack_ref: &SpanNode<StackRef>,
value: LangValue,
) -> Result<(), (RuntimeError, Span)> |
/// Pops an element off the given stack. If the pop is successful, the
/// popped value is returned. If the stack is empty, an error is returned.
/// If the stack reference is invalid, will panic (should be validated at
/// build time).
fn pop_stack(
&mut self,
stack_ref: &SpanNode<StackRef>,
) -> Result<LangValue, (RuntimeError, Span)> {
let stack = &mut self.stacks[stack_ref.value().0];
if let Some(val) = stack.pop() {
Ok(val)
} else {
Err((RuntimeError::EmptyStack, *stack_ref.metadata()))
}
}
/// Internal function to execute the next instruction. The return value
/// is the same as [Self::execute_next], except the error needs to be
/// wrapped before being handed to the user.
fn execute_next_inner(&mut self) -> Result<bool, (RuntimeError, Span)> {
// We've previously hit an error, prevent further execution
if self.error.is_some() {
return Ok(false);
}
let instr_node =
match self.program.instructions.get(self.program_counter) {
// Clone is necessary so we don't maintain a ref to self
Some(instr_node) => instr_node.clone(),
// out of instructions to execute, just give up
None => return Ok(false),
};
// Prevent infinite loops
if self.cycle_count >= MAX_CYCLE_COUNT {
// Include the instruction that triggered the error
return Err((RuntimeError::TooManyCycles, *instr_node.metadata()));
}
// If we've reached this point, we know we're going to execute the
// instruction. Increment the cycle count now so that if we exit with
// an error, it still counts.
self.cycle_count += 1;
// Execute the instruction, and get a resulting optional label that we
// should jump to. For most instructions there will be no label, only
// when the instruction wants to trigger a jump.
let instruction = instr_node.value();
let span = *instr_node.metadata();
let target_label: Option<&Label> = match instruction {
Instruction::Read(reg) => {
if self.input.is_empty() {
return Err((RuntimeError::EmptyInput, span));
} else {
// Remove the first element in the input
let val = self.input.remove(0);
self.set_reg(reg, val);
}
None
}
Instruction::Write(src) => {
self.output.push(self.get_val_from_src(src));
None
}
Instruction::Set(dst, src) => {
self.set_reg(dst, self.get_val_from_src(src));
None
}
Instruction::Add(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
+ Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Sub(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
- Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Mul(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
* Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Div(dst, src) => {
let divisor = self.get_val_from_src(src);
let dividend = self.get_reg(*dst.value());
if divisor != 0 {
// This does flooring division
self.set_reg(dst, dividend / divisor);
} else {
return Err((RuntimeError::DivideByZero, span));
}
None
}
Instruction::Cmp(dst, src_1, src_2) => {
let val_1 = self.get_val_from_src(src_1);
let val_2 = self.get_val_from_src(src_2);
let cmp = match val_1.cmp(&val_2) {
Ordering::Less => -1,
Ordering::Equal => 0,
Ordering::Greater => 1,
};
self.set_reg(dst, cmp);
None
}
Instruction::Push(src, stack_ref) => {
self.push_stack(stack_ref, self.get_val_from_src(src))?;
None
}
Instruction::Pop(stack_ref, dst) => {
let popped = self.pop_stack(stack_ref)?;
self.set_reg(dst, popped);
None
}
// Jumps
Instruction::Jmp(Node(label, _)) => Some(label),
Instruction::Jez(src, Node(label, _)) => {
if self.get_val_from_src(src) == 0 {
Some(label)
} else {
None
}
}
Instruction::Jnz(src, Node(label, _)) => {
if self.get_val_from_src(src) != 0 {
Some(label)
} else {
None
}
}
Instruction::Jlz(src, Node(label, _)) => {
if self.get_val_from_src(src) < 0 {
Some(label)
} else {
None
}
}
Instruction::Jgz(src, Node(label, _)) => {
if self.get_val_from_src(src) > 0 {
Some(label)
} else {
None
}
}
};
// If the instruction wants to jump to a label, look up its
// corresponding index and go there. Otherwise, just advance the PC one
// instruction
match target_label {
Some(label) => {
let destination = self
.program
.symbol_table
.get(label)
// If this panics, that means there's a bug in the
// compiler pipeline
.unwrap_or_else(|| panic!("unknown label: {}", label));
self.program_counter = *destination;
}
None => {
self.program_counter += 1;
}
}
debug!(println!("Executed {:?}\n\tState: {:?}", instruction, self));
Ok(true)
}
/// Executes the next instruction in the program.
///
/// # Returns
/// - `Ok(true)` if the instruction executed normally
/// - `Ok(false)` if the instruction didn't execute because the program has
/// already terminated
/// - `Err(error)` if an error occurred. The error is returned, with the
/// source information of the offending instruction
pub fn execute_next(&mut self) -> Result<bool, &WithSource<RuntimeError>> {
match self.execute_next_inner() {
Ok(b) => Ok(b),
Err((error, span)) => {
// Store the error in self, then return a ref to it
self.error = Some(WithSource::new(
iter::once(SourceErrorWrapper::new(
error,
span,
&self.source,
)),
self.source.clone(),
));
Err(self.error.as_ref().unwrap())
}
}
}
/// Executes this machine until termination (or error). All instructions are
/// executed until [Self::terminated] returns true. Returns the value of
/// [Self::successful] upon termination.
pub fn execute_all(&mut self) -> Result<bool, &WithSource<RuntimeError>> {
// We can't return the error directly from the loop because of a bug
// in the borrow checker. Instead, we have to play lifetime tetris.
while !self.terminated() {
if self.execute_next().is_err() {
break;
}
}
// Check if an error occurred, and return it if so
match &self.error {
None => Ok(self.successful()),
Some(error) => Err(error),
}
}
/// Get the source code that this machine is built for.
pub fn source_code(&self) -> &str {
&self.source
}
/// Get a reference to the program being executed.
pub fn program(&self) -> &Program<Span> {
&self.program
}
/// Get the current input buffer.
pub fn input(&self) -> &[LangValue] {
self.input.as_slice()
}
/// Get the current output buffer.
pub fn output(&self) -> &[LangValue] {
self.output.as_slice()
}
/// Get all registers and their current values.
pub fn registers(&self) -> HashMap<RegisterRef, LangValue> {
self.hardware_spec
.all_register_refs()
.into_iter()
.map(|reg_ref| (reg_ref, self.get_reg(reg_ref)))
.collect()
}
/// Get all stacks and their current values.
pub fn stacks(&self) -> HashMap<StackRef, &[LangValue]> {
self.hardware_spec
.all_stack_refs()
.into_iter()
.map(|stack_ref| (stack_ref, self.stacks[stack_ref.0].as_slice()))
.collect()
}
/// Get the runtime error that halted execution of this machine. If no error
/// has occurred, return `None`.
pub fn error(&self) -> Option<&WithSource<RuntimeError>> {
self.error.as_ref()
}
}
// Functions that get exported to wasm
#[cfg_attr(target_arch = "wasm32", wasm_bindgen)]
impl Machine {
/// Get the index of the next instruction to be executed.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "programCounter")
)]
pub fn program_counter(&self) -> usize {
self.program_counter
}
/// Get the number of cycles, i.e. the number of instructions that have
/// been run, during the current program execution.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "cycleCount")
)]
pub fn cycle_count(&self) -> usize {
self.cycle_count
}
/// Checks if this machine has finished executing. This could be by normal
/// completion or by runtime error.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "terminated")
)]
pub fn terminated(&self) -> bool {
// Check for normal complete
self.program_counter >= self.program.instructions.len()
// Check for a runtime error
|| self.error.is_some()
}
/// Checks if this machine has completed successfully. The criteria are:
/// 1. Program is terminated (all instructions have been executed)
/// 2. No failures occurred (see [FailureReason] for possible failures)
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "successful")
)]
pub fn successful(&self) -> bool {
self.terminated() && self.failure_reason().is_none()
}
/// Determine why the executed program failed. **Only returns a value if
/// the program actually failed.** Will return `None` if the program
/// is still running or it succeeded.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "failureReason")
)]
pub fn failure_reason(&self) -> Option<FailureReason> {
if !self.terminated() {
// Program is still running, so we haven't failed (yet)
None
} else if self.error.is_some() {
Some(FailureReason::RuntimeError)
} else if !self.input.is_empty() {
Some(FailureReason::RemainingInput)
} else if self.output != self.expected_output {
Some(FailureReason::IncorrectOutput)
} else {
// No failure states were hit, so program was successful!
None
}
}
}
// Wasm-ONLY functions
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen]
impl Machine {
/// A wrapper for [Self::input], to be called from wasm.
#[wasm_bindgen(getter, js_name = "input")]
pub fn wasm_input(&self) -> Vec<LangValue> {
self.input.clone()
}
/// A wrapper for [Self::input], to be called from wasm.
#[wasm_bindgen(getter, js_name = "output")]
pub fn wasm_output(&self) -> Vec<LangValue> {
self.output.clone()
}
/// A wrapper for [Self::registers], to be called from wasm. We can't send
/// maps through wasm, so this returns a [JsValue] which is an object
/// mapping register names (strings) to their values (`LangValue`).
#[wasm_bindgen(getter, js_name = "registers")]
pub fn wasm_registers(&self) -> LangValueMap {
// Convert the keys of the register map to strings
let regs_by_name: HashMap<String, LangValue> = self
.registers()
.into_iter()
.map(|(reg_ref, reg_value)| (reg_ref.to_string(), reg_value))
.collect();
// Convert the hashmap to a js object. Be careful here!
JsValue::from_serde(®s_by_name).unwrap().unchecked_into()
}
/// A wrapper for [Self::stacks], to be called from wasm. We can't send
/// maps through wasm, so this returns a [JsValue] which is an object
/// mapping stacks names (strings) to their values (`Vec<LangValue>`).
#[wasm_bindgen(getter, js_name = "stacks")]
pub fn wasm_stacks(&self) -> LangValueArrayMap {
// Convert the keys of the stacks map to strings
let stacks_by_name: HashMap<String, &[LangValue]> = self
.stacks()
.into_iter()
.map(|(stack_ref, stack_value)| {
(stack_ref.to_string(), stack_value)
})
.collect();
// Convert the hashmap to a js object. Be careful here!
JsValue::from_serde(&stacks_by_name)
.unwrap()
.unchecked_into()
}
/// A wrapper for [Self::error], to be called from wasm. We can't send
/// maps through wasm, so this returns a simplified error as a
/// [SourceElement].
#[wasm_bindgen(getter, js_name = "error")]
pub fn wasm_error(&self) -> Option<SourceElement> {
self.error.as_ref().map(|wrapped_error| {
// If an error is present, there should always be exactly one
match wrapped_error.errors() {
[error] => error.into(),
errors => panic!(
"Expected exactly 1 runtime error, but got {:?}",
errors
),
}
})
}
/// A wrapper for [Self::execute_next], to be called from wasm. We throw
/// away the error because it simplifies the logic on the TS side. That
/// error is accessible via [Self::wasm_error] anyway.
#[wasm_bindgen(js_name = "executeNext")]
pub fn wasm_execute_next(&mut self) -> bool {
// If an error occurred, that means something executed, so return true
self.execute_next().unwrap_or(true)
}
/// A wrapper for [Self::execute_all], to be called from wasm. We throw
/// away the error because it simplifies the logic on the TS side. That
/// error is accessible via [Self::wasm_error] anyway.
#[wasm_bindgen(js_name = "executeAll")]
pub fn wasm_execute_all(&mut self) -> bool {
// If an error occurred, that means something executed, so return true
self.execute_all().unwrap_or(true)
}
}
/// The reason why a program failed. **These reasons are only applicable for
/// terminated, unsuccessful programs**. For a program that has yet to
/// terminate, or did so successfully, none of these cases apply.
#[cfg_attr(target_arch = "wasm32", wasm_bindgen)]
#[derive(Copy, Clone, Debug)]
pub enum FailureReason {
/// An error occurred while trying to execute one of the instructions
RuntimeError,
/// The input buffer wasn't empty upon terminated
RemainingInput,
/// The output buffer didn't match the expected output, as defined by the
/// program spec
IncorrectOutput,
}
| {
// Have to access this first cause borrow checker
let max_stack_length = self.hardware_spec.max_stack_length;
let stack = &mut self.stacks[stack_ref.value().0];
// If the stack is capacity, make sure we're not over it
if stack.len() >= max_stack_length {
return Err((RuntimeError::StackOverflow, *stack_ref.metadata()));
}
stack.push(value);
Ok(())
} | identifier_body |
machine.rs | #[cfg(target_arch = "wasm32")]
use crate::ast::wasm::{LangValueArrayMap, LangValueMap, SourceElement};
use crate::{
ast::{
compiled::Program, Instruction, Label, LangValue, Node, RegisterRef,
SpanNode, StackRef, ValueSource,
},
consts::MAX_CYCLE_COUNT,
debug,
error::{RuntimeError, SourceErrorWrapper, WithSource},
models::{HardwareSpec, ProgramSpec},
util::Span,
};
use std::{
cmp::Ordering, collections::HashMap, convert::TryInto, iter, num::Wrapping,
};
#[cfg(target_arch = "wasm32")]
use wasm_bindgen::{prelude::*, JsCast};
/// A steppable program executor. Maintains the current state of the program,
/// and execution can be progressed one instruction at a time.
///
/// Created from a [HardwareSpec](HardwareSpec), [ProgramSpec](ProgramSpec), and
/// a program. The current machine state can be obtained at any time, including
/// execution stats (e.g. # cycles), which allows for handy visualizations of
/// execution.
#[cfg_attr(target_arch = "wasm32", wasm_bindgen)]
#[derive(Clone, Debug)]
pub struct Machine {
// Static data - this is copied from the input and shouldn't be included in
// serialization. We store these ourselves instead of keeping references
// to the originals because it just makes life a lot easier.
hardware_spec: HardwareSpec,
source: String,
program: Program<Span>,
expected_output: Vec<LangValue>,
// Runtime state
/// The index of the next instruction to be executed
program_counter: usize,
/// The current input buffer. This can be popped from as the program is
/// executed. We always pop from the front. This isn't ideal for a Vec,
/// but these arrays will be small enough that it probably doesn't matter.
/// Values never get added to the input, only popped off.
input: Vec<LangValue>,
/// The current output buffer. This can be pushed into, but never popped
/// out of.
output: Vec<LangValue>,
/// The registers that the user can read and write. Indexed by Register ID.
registers: Vec<LangValue>,
/// The series of stacks that act as the programs RAM. The number of stacks
/// and their capacity is determined by the initializating hardware spec.
stacks: Vec<Vec<LangValue>>,
/// The number of instructions that have been executed so far. This is not
/// unique, so repeated instructions are counted multiple times.
cycle_count: usize,
/// Stores a runtime error, if one has occurred. Once the error occurs,
/// this should be populated and from then on, the machine has terminated
/// and can no longer execute.
error: Option<WithSource<RuntimeError>>,
}
// Functions that DON'T get exported to wasm
impl Machine {
/// Creates a new machine, ready to be executed.
pub fn new(
hardware_spec: HardwareSpec,
program_spec: &ProgramSpec,
program: Program<Span>,
source: String,
) -> Self {
let registers =
iter::repeat(0).take(hardware_spec.num_registers).collect();
// Initialize `num_stacks` new stacks. Set an initial capacity
// for each one to prevent grows during program operation
let stacks = iter::repeat_with(|| {
Vec::with_capacity(hardware_spec.max_stack_length)
})
.take(hardware_spec.num_stacks)
.collect();
Self {
// Static data
hardware_spec,
program,
source,
expected_output: program_spec.expected_output().into(),
// Runtime state
program_counter: 0,
input: program_spec.input().into(),
output: Vec::new(),
registers,
stacks,
error: None,
// Performance stats
cycle_count: 0,
}
}
/// Gets a source value, which could either be a constant or a register.
/// If the value is a constant, just return that. If it's a register,
/// return the value from that register. Panics if the register reference is
/// invalid (shouldn't be possible because of validation).
fn get_val_from_src(&self, src: &SpanNode<ValueSource<Span>>) -> LangValue {
match src.value() {
ValueSource::Const(Node(val, _)) => *val,
ValueSource::Register(reg_ref) => self.get_reg(*reg_ref.value()),
}
}
/// Gets the value from the given register. The register reference is
/// assumed to be valid (should be validated at build time). Will panic if
/// it isn't valid.
fn get_reg(&self, reg: RegisterRef) -> LangValue {
match reg {
RegisterRef::Null => 0,
// These conversion unwraps are safe because we know that input
// and stack lengths are bounded by validation rules to fit into an
// i32 (max length is 256 at the time of writing this)
RegisterRef::InputLength => self.input.len().try_into().unwrap(),
RegisterRef::StackLength(stack_id) => {
self.stacks[stack_id].len().try_into().unwrap()
}
RegisterRef::User(reg_id) => *self.registers.get(reg_id).unwrap(),
}
}
/// Sets the register to the given value. The register reference is
/// assumed to be valid and writable (should be validated at build time).
/// Will panic if it isn't valid/writable.
fn set_reg(&mut self, reg: &SpanNode<RegisterRef>, value: LangValue) {
match reg.value() {
RegisterRef::Null => {} // /dev/null behavior - trash any input
RegisterRef::InputLength | RegisterRef::StackLength(_) => {
panic!("Unwritable register {:?}", reg)
}
RegisterRef::User(reg_id) => {
self.registers[*reg_id] = value;
}
}
}
/// Pushes the given value onto the given stack. If the stack reference is
/// invalid or the stack is at capacity, an error is returned. If the stack
/// reference is invalid, will panic (should be validated at build time).
fn push_stack(
&mut self,
stack_ref: &SpanNode<StackRef>,
value: LangValue,
) -> Result<(), (RuntimeError, Span)> {
// Have to access this first cause borrow checker
let max_stack_length = self.hardware_spec.max_stack_length;
let stack = &mut self.stacks[stack_ref.value().0];
// If the stack is capacity, make sure we're not over it
if stack.len() >= max_stack_length {
return Err((RuntimeError::StackOverflow, *stack_ref.metadata()));
}
stack.push(value);
Ok(())
}
/// Pops an element off the given stack. If the pop is successful, the
/// popped value is returned. If the stack is empty, an error is returned.
/// If the stack reference is invalid, will panic (should be validated at
/// build time).
fn pop_stack(
&mut self,
stack_ref: &SpanNode<StackRef>,
) -> Result<LangValue, (RuntimeError, Span)> {
let stack = &mut self.stacks[stack_ref.value().0];
if let Some(val) = stack.pop() {
Ok(val)
} else |
}
/// Internal function to execute the next instruction. The return value
/// is the same as [Self::execute_next], except the error needs to be
/// wrapped before being handed to the user.
fn execute_next_inner(&mut self) -> Result<bool, (RuntimeError, Span)> {
// We've previously hit an error, prevent further execution
if self.error.is_some() {
return Ok(false);
}
let instr_node =
match self.program.instructions.get(self.program_counter) {
// Clone is necessary so we don't maintain a ref to self
Some(instr_node) => instr_node.clone(),
// out of instructions to execute, just give up
None => return Ok(false),
};
// Prevent infinite loops
if self.cycle_count >= MAX_CYCLE_COUNT {
// Include the instruction that triggered the error
return Err((RuntimeError::TooManyCycles, *instr_node.metadata()));
}
// If we've reached this point, we know we're going to execute the
// instruction. Increment the cycle count now so that if we exit with
// an error, it still counts.
self.cycle_count += 1;
// Execute the instruction, and get a resulting optional label that we
// should jump to. For most instructions there will be no label, only
// when the instruction wants to trigger a jump.
let instruction = instr_node.value();
let span = *instr_node.metadata();
let target_label: Option<&Label> = match instruction {
Instruction::Read(reg) => {
if self.input.is_empty() {
return Err((RuntimeError::EmptyInput, span));
} else {
// Remove the first element in the input
let val = self.input.remove(0);
self.set_reg(reg, val);
}
None
}
Instruction::Write(src) => {
self.output.push(self.get_val_from_src(src));
None
}
Instruction::Set(dst, src) => {
self.set_reg(dst, self.get_val_from_src(src));
None
}
Instruction::Add(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
+ Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Sub(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
- Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Mul(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
* Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Div(dst, src) => {
let divisor = self.get_val_from_src(src);
let dividend = self.get_reg(*dst.value());
if divisor != 0 {
// This does flooring division
self.set_reg(dst, dividend / divisor);
} else {
return Err((RuntimeError::DivideByZero, span));
}
None
}
Instruction::Cmp(dst, src_1, src_2) => {
let val_1 = self.get_val_from_src(src_1);
let val_2 = self.get_val_from_src(src_2);
let cmp = match val_1.cmp(&val_2) {
Ordering::Less => -1,
Ordering::Equal => 0,
Ordering::Greater => 1,
};
self.set_reg(dst, cmp);
None
}
Instruction::Push(src, stack_ref) => {
self.push_stack(stack_ref, self.get_val_from_src(src))?;
None
}
Instruction::Pop(stack_ref, dst) => {
let popped = self.pop_stack(stack_ref)?;
self.set_reg(dst, popped);
None
}
// Jumps
Instruction::Jmp(Node(label, _)) => Some(label),
Instruction::Jez(src, Node(label, _)) => {
if self.get_val_from_src(src) == 0 {
Some(label)
} else {
None
}
}
Instruction::Jnz(src, Node(label, _)) => {
if self.get_val_from_src(src) != 0 {
Some(label)
} else {
None
}
}
Instruction::Jlz(src, Node(label, _)) => {
if self.get_val_from_src(src) < 0 {
Some(label)
} else {
None
}
}
Instruction::Jgz(src, Node(label, _)) => {
if self.get_val_from_src(src) > 0 {
Some(label)
} else {
None
}
}
};
// If the instruction wants to jump to a label, look up its
// corresponding index and go there. Otherwise, just advance the PC one
// instruction
match target_label {
Some(label) => {
let destination = self
.program
.symbol_table
.get(label)
// If this panics, that means there's a bug in the
// compiler pipeline
.unwrap_or_else(|| panic!("unknown label: {}", label));
self.program_counter = *destination;
}
None => {
self.program_counter += 1;
}
}
debug!(println!("Executed {:?}\n\tState: {:?}", instruction, self));
Ok(true)
}
/// Executes the next instruction in the program.
///
/// # Returns
/// - `Ok(true)` if the instruction executed normally
/// - `Ok(false)` if the instruction didn't execute because the program has
/// already terminated
/// - `Err(error)` if an error occurred. The error is returned, with the
/// source information of the offending instruction
pub fn execute_next(&mut self) -> Result<bool, &WithSource<RuntimeError>> {
match self.execute_next_inner() {
Ok(b) => Ok(b),
Err((error, span)) => {
// Store the error in self, then return a ref to it
self.error = Some(WithSource::new(
iter::once(SourceErrorWrapper::new(
error,
span,
&self.source,
)),
self.source.clone(),
));
Err(self.error.as_ref().unwrap())
}
}
}
/// Executes this machine until termination (or error). All instructions are
/// executed until [Self::terminated] returns true. Returns the value of
/// [Self::successful] upon termination.
pub fn execute_all(&mut self) -> Result<bool, &WithSource<RuntimeError>> {
// We can't return the error directly from the loop because of a bug
// in the borrow checker. Instead, we have to play lifetime tetris.
while !self.terminated() {
if self.execute_next().is_err() {
break;
}
}
// Check if an error occurred, and return it if so
match &self.error {
None => Ok(self.successful()),
Some(error) => Err(error),
}
}
/// Get the source code that this machine is built for.
pub fn source_code(&self) -> &str {
&self.source
}
/// Get a reference to the program being executed.
pub fn program(&self) -> &Program<Span> {
&self.program
}
/// Get the current input buffer.
pub fn input(&self) -> &[LangValue] {
self.input.as_slice()
}
/// Get the current output buffer.
pub fn output(&self) -> &[LangValue] {
self.output.as_slice()
}
/// Get all registers and their current values.
pub fn registers(&self) -> HashMap<RegisterRef, LangValue> {
self.hardware_spec
.all_register_refs()
.into_iter()
.map(|reg_ref| (reg_ref, self.get_reg(reg_ref)))
.collect()
}
/// Get all stacks and their current values.
pub fn stacks(&self) -> HashMap<StackRef, &[LangValue]> {
self.hardware_spec
.all_stack_refs()
.into_iter()
.map(|stack_ref| (stack_ref, self.stacks[stack_ref.0].as_slice()))
.collect()
}
/// Get the runtime error that halted execution of this machine. If no error
/// has occurred, return `None`.
pub fn error(&self) -> Option<&WithSource<RuntimeError>> {
self.error.as_ref()
}
}
// Functions that get exported to wasm
#[cfg_attr(target_arch = "wasm32", wasm_bindgen)]
impl Machine {
/// Get the index of the next instruction to be executed.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "programCounter")
)]
pub fn program_counter(&self) -> usize {
self.program_counter
}
/// Get the number of cycles, i.e. the number of instructions that have
/// been run, during the current program execution.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "cycleCount")
)]
pub fn cycle_count(&self) -> usize {
self.cycle_count
}
/// Checks if this machine has finished executing. This could be by normal
/// completion or by runtime error.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "terminated")
)]
pub fn terminated(&self) -> bool {
// Check for normal complete
self.program_counter >= self.program.instructions.len()
// Check for a runtime error
|| self.error.is_some()
}
/// Checks if this machine has completed successfully. The criteria are:
/// 1. Program is terminated (all instructions have been executed)
/// 2. No failures occurred (see [FailureReason] for possible failures)
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "successful")
)]
pub fn successful(&self) -> bool {
self.terminated() && self.failure_reason().is_none()
}
/// Determine why the executed program failed. **Only returns a value if
/// the program actually failed.** Will return `None` if the program
/// is still running or it succeeded.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "failureReason")
)]
pub fn failure_reason(&self) -> Option<FailureReason> {
if !self.terminated() {
// Program is still running, so we haven't failed (yet)
None
} else if self.error.is_some() {
Some(FailureReason::RuntimeError)
} else if !self.input.is_empty() {
Some(FailureReason::RemainingInput)
} else if self.output != self.expected_output {
Some(FailureReason::IncorrectOutput)
} else {
// No failure states were hit, so program was successful!
None
}
}
}
// Wasm-ONLY functions
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen]
impl Machine {
/// A wrapper for [Self::input], to be called from wasm.
#[wasm_bindgen(getter, js_name = "input")]
pub fn wasm_input(&self) -> Vec<LangValue> {
self.input.clone()
}
/// A wrapper for [Self::input], to be called from wasm.
#[wasm_bindgen(getter, js_name = "output")]
pub fn wasm_output(&self) -> Vec<LangValue> {
self.output.clone()
}
/// A wrapper for [Self::registers], to be called from wasm. We can't send
/// maps through wasm, so this returns a [JsValue] which is an object
/// mapping register names (strings) to their values (`LangValue`).
#[wasm_bindgen(getter, js_name = "registers")]
pub fn wasm_registers(&self) -> LangValueMap {
// Convert the keys of the register map to strings
let regs_by_name: HashMap<String, LangValue> = self
.registers()
.into_iter()
.map(|(reg_ref, reg_value)| (reg_ref.to_string(), reg_value))
.collect();
// Convert the hashmap to a js object. Be careful here!
JsValue::from_serde(®s_by_name).unwrap().unchecked_into()
}
/// A wrapper for [Self::stacks], to be called from wasm. We can't send
/// maps through wasm, so this returns a [JsValue] which is an object
/// mapping stacks names (strings) to their values (`Vec<LangValue>`).
#[wasm_bindgen(getter, js_name = "stacks")]
pub fn wasm_stacks(&self) -> LangValueArrayMap {
// Convert the keys of the stacks map to strings
let stacks_by_name: HashMap<String, &[LangValue]> = self
.stacks()
.into_iter()
.map(|(stack_ref, stack_value)| {
(stack_ref.to_string(), stack_value)
})
.collect();
// Convert the hashmap to a js object. Be careful here!
JsValue::from_serde(&stacks_by_name)
.unwrap()
.unchecked_into()
}
/// A wrapper for [Self::error], to be called from wasm. We can't send
/// maps through wasm, so this returns a simplified error as a
/// [SourceElement].
#[wasm_bindgen(getter, js_name = "error")]
pub fn wasm_error(&self) -> Option<SourceElement> {
self.error.as_ref().map(|wrapped_error| {
// If an error is present, there should always be exactly one
match wrapped_error.errors() {
[error] => error.into(),
errors => panic!(
"Expected exactly 1 runtime error, but got {:?}",
errors
),
}
})
}
/// A wrapper for [Self::execute_next], to be called from wasm. We throw
/// away the error because it simplifies the logic on the TS side. That
/// error is accessible via [Self::wasm_error] anyway.
#[wasm_bindgen(js_name = "executeNext")]
pub fn wasm_execute_next(&mut self) -> bool {
// If an error occurred, that means something executed, so return true
self.execute_next().unwrap_or(true)
}
/// A wrapper for [Self::execute_all], to be called from wasm. We throw
/// away the error because it simplifies the logic on the TS side. That
/// error is accessible via [Self::wasm_error] anyway.
#[wasm_bindgen(js_name = "executeAll")]
pub fn wasm_execute_all(&mut self) -> bool {
// If an error occurred, that means something executed, so return true
self.execute_all().unwrap_or(true)
}
}
/// The reason why a program failed. **These reasons are only applicable for
/// terminated, unsuccessful programs**. For a program that has yet to
/// terminate, or did so successfully, none of these cases apply.
#[cfg_attr(target_arch = "wasm32", wasm_bindgen)]
#[derive(Copy, Clone, Debug)]
pub enum FailureReason {
/// An error occurred while trying to execute one of the instructions
RuntimeError,
/// The input buffer wasn't empty upon terminated
RemainingInput,
/// The output buffer didn't match the expected output, as defined by the
/// program spec
IncorrectOutput,
}
| {
Err((RuntimeError::EmptyStack, *stack_ref.metadata()))
} | conditional_block |
rgou.py | #!/usr/bin/env python
# Royal Game of Ur
try:
# python2
import Tkinter as tk
from Tkinter.messagebox import showinfo
except ImportError:
# python3
import tkinter as tk
from tkinter.messagebox import showinfo
import random # for rolls
def | (event):
print("clicked at", event.x, event.y)
coords(event.x,event.y)
# coordss(event.x,event.y)
#frame = Frame(game, width=100, height=100)
#game.mainloop()
game = tk.Tk()
game.title("Royal Game of Ur")
## BG image
#fname = "RGOU.gif"
#fname = "RGOU2.gif"
fname = "RGOU4.gif"
bg_image = tk.PhotoImage(file=fname)
bg_image = bg_image.subsample(2,2)
w = bg_image.width()
h = bg_image.height()
strs = "%dx%d+50+30" % (w,h)
print(strs)
game.geometry(strs)
cv = tk.Canvas(width=w,height=h)
cv.pack(side='top',fill='both',expand='yes')
cv.create_image(0,0,image=bg_image,anchor='nw')
cv.bind("<Button-1>", callback)
cv.pack()
print(dir(cv))
board_x_y = [ # x ,y ,xn,yn,[xycoordinates]
[100,80,180,152,[0,0]],
[100,170,180,231,[1,0]],
[100,245,180,315,[2,0]],
[100,325,180,394,[3,0]],
[20,332,69,386,[4,0]], # white start
[60,443,142,517,[5,0]], # roll white
[100,578,180,635,[6,0]],
[100,650,180,719,[7,0]],
# w = cv.create_image(100,480,image=whiterollicon)
# [270,489,338,560,[5,2]], # roll black
# [287,428,338,560,[5,2]], # roll black
# b = cv.create_image(330,480,image=blackrollicon)
[189,80,257,152,[0,1]],
[189,170,257,231,[1,1]],
[189,239,257,315,[2,1]],
[189,325,257,394,[3,1]],
[189,403,257,478,[4,1]],
[189,489,257,560,[5,1]],
[189,578,257,635,[6,1]],
[189,650,257,719,[7,1]],
[270,80,338,152,[0,2]],
[270,170,338,231,[1,2]],
[270,245,338,315,[2,2]],
[270,325,338,394,[3,2]],
[365,319,445,396,[4,2]], # black start
[293,446,368,517,[5,2]], # roll black
[270,578,338,635,[6,2]],
[270,650,338,719,[7,2]]
]
def setup():
global white_pieces, black_pieces
global pieces
global white_track, black_track
global tracks
global turn , rolled_num, moved , rolled
rolled = False
moved = True # did we move after roll?
turn = 0 # 0 = white , 2 = black
rolled_num = 0 # number rolled
white_pieces = [[4,0] for i in range(7)] # score white
black_pieces = [[4,2] for i in range(7)]
pieces = [white_pieces,None,black_pieces]
white_track = [[4,0],[3,0],[2,0],[1,0],[0,0],
[0,1],[1,1],[2,1],[3,1],[4,1],[5,1],[6,1],[7,1],
[7,0],[6,0]]
black_track = [[4,2],[3,2],[2,2],[1,2],[0,2],
[0,1],[1,1],[2,1],[3,1],[4,1],[5,1],[6,1],[7,1],
[7,2],[6,2]]
# common_track = [[1,0],[1,1],[1,2],[1,3],[1,4],[1,5],[1,6],[1,7]]
tracks = [white_track,None,black_track]
def_cv_pieces()
# roll icons
checkroll()
score()
# forfeit "button"
t = cv.create_text(90,770,text="forfeit move",font="Times 20 bold")
r = cv.create_text(350,770,text="reset",font="Times 20 bold")
rollicons = []
def rollicon(y): # 0 white , 2 black
s = ""
if turn == 2:
dd = "-black"
else:
dd = "-white"
if turn == y:
s+=dd
if not rolled:
s+="roll"
else:
s+= str(rolled_num)
# if not moved:
# s+="-active"
else:
if rolled_num == 0:
s = "0"
else:
s="wait"
s+=".gif"
pc = tk.PhotoImage(file=s)
pc = pc.subsample(2,2)
return pc
def checkroll():
# 5,0 and 5,2 coords
global rollicons
global w ,b
global cv
global whiterollicon,blackrollicon
whiterollicon = rollicon(0)
blackrollicon = rollicon(2)
if len(rollicons) == 3:
cv.delete(rollicons[0])
cv.delete(rollicons[2])
# w = rollicons[0]
# b = rollicons[2]
# cv[w]["image"] = whiterollicon
# cv[b]["image"] = blackrollicon
print(f"rollicons = {rollicons}")
# cv.delete(w)
# cv.delete(b)
# tk.Canvas.itemconfig(w,100,493,image=whiterollicon)
# tk.Canvas.itemconfig(b,270,489,image=blackrollicon)
# cv.itemcomfigure(w,image = whiterollicon)
# cv.itemconfigure(b,image = blackrollicon)
# if len(rollicons) == 0:
# white
# [100,493,152,526,[5,0]], # roll white
# [73,433,152,526,[5,0]], # roll white
w = cv.create_image(100,480,image=whiterollicon)
# [270,489,338,560,[5,2]], # roll black
# [287,428,338,560,[5,2]], # roll black
b = cv.create_image(330,480,image=blackrollicon)
# print(cv.itemconfig(b))
rollicons = [w,None,b]
def def_cv_pieces(delete=False):
global whitepic , blackpic
global cv
global white_cv
global black_cv
global pieces_cv
if delete:
for i in white_cv:
cv.delete(i)
#
for i in black_cv:
cv.delete(i)
return
white_cv= []
black_cv = []
pieces_cv = []
whitepic = tk.PhotoImage(file="-white.gif")
whitepic = whitepic.subsample(2,2)
blackpic = tk.PhotoImage(file="-black.gif")
blackpic = blackpic.subsample(2,2)
## check if there are no more cv objects
t = cv.create_image(-100,-100,image=whitepic)
# for i in range(2,t+1):
# cv.delete(i)
for i in white_pieces:
x,y = i[0],i[1]
for c in board_x_y:
if c[4] == [x,y]:
xx = int((c[2] + c[0]) /2)
yy = int((c[3] + c[1]) / 2)
s = cv.create_image(xx, yy, image=whitepic)
white_cv.append(s)
print("white")
for i in black_pieces:
x,y = i[0],i[1]
for c in board_x_y:
if c[-1] == [x,y]:
xx = int((c[2] + c[0]) /2)
yy = int((c[3] + c[1]) / 2)
s = cv.create_image(xx, yy, image=blackpic)
black_cv.append(s)
print("black")
pieces_cv = [white_cv,None,black_cv]
print(pieces_cv)
def roll():
score()
global rolled_num
global moved,rolled
# check if game did not ended already
for i in range(0,3,2):
if not pieces[i]:
game_ended(i)
return
if moved == False or rolled == True:
return
i = 0
for a in range(4):
i+= random.randint(0,1)
rolled_num = i
moved = False
rolled = True
checkroll()
def game_ended(turn):
if turn == 0:
s = "white"
opp = 2
else:
s = "black"
opp = 0
t = f"{s} won 7 : {7 - len(pieces[opp])}"
showinfo("Window",t)
def reset():
a = tk.messagebox.askokcancel("popup","reset?")
if a:
def_cv_pieces(True)
setup()
# score()
def endmove(playagain = False): # True == one more move
global turn,rolled,moved
if turn == 0:
opponent = 2
else:
opponent = 0
if not playagain:
turn = opponent
rolled = False
moved = True
if playagain:
s = roll()
if s == 0:
endmove()
checkroll()
def coords(x,y):
if 16 < x < 164:
if 753 < y < 776:
forfeit()
return
if 315 < x < 390:
if 757 < y < 779:
reset()
return
for item in board_x_y:
if item[0] <= x <= item[2]:
if item[1] <= y <= item[3]:
print(item[4])
play(item[4])
# movec(item[4])
return
def getpossition(x,y):
for i in board_x_y:
if i[4] == [x,y]:
return i[0],i[1]
def play(coords):
# global white_pieces
# global black_pieces
# global pieces, board_butts
global rolled_num , turn, moved, rolled
global tracks
global pieces_cv
global pieces
print(f"rolled_num = {rolled_num}")
print(f"turn = {turn}")
print(f"moved = {moved}")
print(f"rolled = {rolled}")
print(pieces)
checkroll()
x = coords[0]
y = coords[1]
# if rollbutton ,rull
if x == 5 and y == turn:
if moved:
roll()
if rolled_num ==0:
if turn == 0:
turn = 2
else:
turn = 0
moved = True
rolled = False
checkroll()
return
if coords in pieces[turn] and not moved:
if turn == 0:
opponent = 2
else:
opponent = 0
trackindex = tracks[turn].index(coords) # position on board
print(f"trackindex = {trackindex}")
indpiece = pieces[turn].index(coords) # identify piece
print(f"indpiece = {indpiece}")
t = pieces_cv[turn][indpiece] # identify canvas of piece
print(f"t = {t}")
result = trackindex + rolled_num
print(result)
if len(tracks[turn]) < result:
return
if len(tracks[turn]) == result:
pieces[turn].pop(indpiece)
pieces_cv[turn].pop(indpiece)
cv.delete(t)
score()
if len(pieces[turn]) == 0:
game_ended(turn)
endmove()
# next turn
return
coords_new = tracks[turn][trackindex+rolled_num]
newx = coords_new[0]
newy = coords_new[1]
print(f"coords_new = {coords_new}")
# special case
if [newx,newy] == [3,1] : # can't take piece there
if [newx,newy] in pieces[opponent]:
newx+=1
if [newx,newy] in pieces[turn]: # can't take own piece
return
newcoordx,newcoordy = getpossition(newx,newy)
if [newx,newy] in pieces[opponent]: # take
oppindex = pieces[opponent].index([newx,newy])
oppx,oppy = getpossition(4,opponent)
difopx = oppx - newcoordx
difopy = oppy - newcoordy
taken = pieces_cv[opponent][oppindex]
cv.move(taken,difopx,difopy) # move to start
pieces[opponent][oppindex] = [4,opponent] # set coords
print(f"{newcoordx},{newcoordy}")
oldx,oldy = getpossition(x,y)
difx = newcoordx - oldx
dify = newcoordy - oldy
cv.move(t,difx,dify)
pieces[turn][indpiece] = [newx,newy]
print("move!!")
print(f"{t},{difx},{dify}")
print(f"{pieces[turn][indpiece]}")
print(f"{pieces[turn]}")
# play again squares
playagain = [ [0,0] , [0,2] , [3,1], [6,0] ,[6,2]]
play =( [newx,newy] in playagain )
endmove(play)
return
def is_move_possible():
a = pieces[turn] # all pieces of player on move
road = tracks[turn]
if turn == 0:
opponent = 2
else:
opponent = 0
alreadychecked = []
for piece in a:
if piece in alreadychecked:
continue
piece_position = road.index(piece)
if rolled_num + piece_position <= len(road):
newcoords = road[piece_position+rolled_num]
if newcoords == [3,1] : # special square check
if newcoords in pieces_coords[opponent]:
newcoords = [4,1]
if newcoords not in a:
return True
alreadychecked.append(piece)
return False
def forfeit():
global moved,rolled,turn
# check if game did not ended already
for i in range(0,3,2):
if not pieces[i]:
game_ended(i)
return
if not rolled:
tk.messagebox.askokcancel("popup","ROLL!")
return
if rolled and is_move_possible():
tk.messagebox.askokcancel("popup","you can move!")
return
endmove()
scoretext = []
def score():
global scoretext
w = str(7 - len(pieces[0]))
b = str(7 - len(pieces[2]))
t = f"{w} : {b}"
if len(scoretext) == 0:
score = cv.create_text(220,780,font="Times 30 italic bold",text=t)
scoretext.append(score)
else:
cv.itemconfig(scoretext[0],font="Times 30 italic bold",text=t)
# show canvas text :p
return
# RUN!
setup()
game.mainloop()
| callback | identifier_name |
rgou.py | #!/usr/bin/env python
# Royal Game of Ur
try:
# python2
import Tkinter as tk
from Tkinter.messagebox import showinfo
except ImportError:
# python3
import tkinter as tk
from tkinter.messagebox import showinfo
import random # for rolls
def callback(event):
print("clicked at", event.x, event.y)
coords(event.x,event.y)
# coordss(event.x,event.y)
#frame = Frame(game, width=100, height=100)
#game.mainloop()
game = tk.Tk()
game.title("Royal Game of Ur")
## BG image
#fname = "RGOU.gif"
#fname = "RGOU2.gif"
fname = "RGOU4.gif"
bg_image = tk.PhotoImage(file=fname)
bg_image = bg_image.subsample(2,2)
w = bg_image.width()
h = bg_image.height()
strs = "%dx%d+50+30" % (w,h)
print(strs)
game.geometry(strs)
cv = tk.Canvas(width=w,height=h)
cv.pack(side='top',fill='both',expand='yes')
cv.create_image(0,0,image=bg_image,anchor='nw')
cv.bind("<Button-1>", callback)
cv.pack()
print(dir(cv))
board_x_y = [ # x ,y ,xn,yn,[xycoordinates]
[100,80,180,152,[0,0]],
[100,170,180,231,[1,0]],
[100,245,180,315,[2,0]],
[100,325,180,394,[3,0]],
[20,332,69,386,[4,0]], # white start
[60,443,142,517,[5,0]], # roll white
[100,578,180,635,[6,0]],
[100,650,180,719,[7,0]],
# w = cv.create_image(100,480,image=whiterollicon)
# [270,489,338,560,[5,2]], # roll black
# [287,428,338,560,[5,2]], # roll black
# b = cv.create_image(330,480,image=blackrollicon)
[189,80,257,152,[0,1]],
[189,170,257,231,[1,1]],
[189,239,257,315,[2,1]],
[189,325,257,394,[3,1]],
[189,403,257,478,[4,1]],
[189,489,257,560,[5,1]],
[189,578,257,635,[6,1]],
[189,650,257,719,[7,1]],
[270,80,338,152,[0,2]],
[270,170,338,231,[1,2]],
[270,245,338,315,[2,2]],
[270,325,338,394,[3,2]],
[365,319,445,396,[4,2]], # black start
[293,446,368,517,[5,2]], # roll black
[270,578,338,635,[6,2]],
[270,650,338,719,[7,2]]
]
def setup():
global white_pieces, black_pieces
global pieces
global white_track, black_track
global tracks
global turn , rolled_num, moved , rolled
rolled = False
moved = True # did we move after roll?
turn = 0 # 0 = white , 2 = black
rolled_num = 0 # number rolled
white_pieces = [[4,0] for i in range(7)] # score white
black_pieces = [[4,2] for i in range(7)]
pieces = [white_pieces,None,black_pieces]
white_track = [[4,0],[3,0],[2,0],[1,0],[0,0],
[0,1],[1,1],[2,1],[3,1],[4,1],[5,1],[6,1],[7,1],
[7,0],[6,0]]
black_track = [[4,2],[3,2],[2,2],[1,2],[0,2],
[0,1],[1,1],[2,1],[3,1],[4,1],[5,1],[6,1],[7,1],
[7,2],[6,2]]
# common_track = [[1,0],[1,1],[1,2],[1,3],[1,4],[1,5],[1,6],[1,7]]
tracks = [white_track,None,black_track]
def_cv_pieces()
# roll icons
checkroll()
score()
# forfeit "button"
t = cv.create_text(90,770,text="forfeit move",font="Times 20 bold")
r = cv.create_text(350,770,text="reset",font="Times 20 bold")
rollicons = []
def rollicon(y): # 0 white , 2 black
s = ""
if turn == 2:
dd = "-black"
else:
dd = "-white"
if turn == y:
s+=dd
if not rolled:
s+="roll"
else:
s+= str(rolled_num)
# if not moved:
# s+="-active"
else:
if rolled_num == 0:
s = "0"
else:
s="wait"
s+=".gif"
pc = tk.PhotoImage(file=s)
pc = pc.subsample(2,2)
return pc
def checkroll():
# 5,0 and 5,2 coords
global rollicons
global w ,b
global cv
global whiterollicon,blackrollicon
whiterollicon = rollicon(0)
blackrollicon = rollicon(2)
if len(rollicons) == 3:
cv.delete(rollicons[0])
cv.delete(rollicons[2])
# w = rollicons[0]
# b = rollicons[2]
# cv[w]["image"] = whiterollicon
# cv[b]["image"] = blackrollicon
print(f"rollicons = {rollicons}")
# cv.delete(w)
# cv.delete(b)
# tk.Canvas.itemconfig(w,100,493,image=whiterollicon)
# tk.Canvas.itemconfig(b,270,489,image=blackrollicon)
# cv.itemcomfigure(w,image = whiterollicon)
# cv.itemconfigure(b,image = blackrollicon)
# if len(rollicons) == 0:
# white
# [100,493,152,526,[5,0]], # roll white
# [73,433,152,526,[5,0]], # roll white
w = cv.create_image(100,480,image=whiterollicon)
# [270,489,338,560,[5,2]], # roll black
# [287,428,338,560,[5,2]], # roll black
b = cv.create_image(330,480,image=blackrollicon)
# print(cv.itemconfig(b))
rollicons = [w,None,b]
def def_cv_pieces(delete=False):
global whitepic , blackpic
global cv
global white_cv
global black_cv
global pieces_cv
if delete:
for i in white_cv:
cv.delete(i)
#
for i in black_cv:
cv.delete(i)
return
white_cv= []
black_cv = []
pieces_cv = []
whitepic = tk.PhotoImage(file="-white.gif")
whitepic = whitepic.subsample(2,2)
blackpic = tk.PhotoImage(file="-black.gif")
blackpic = blackpic.subsample(2,2)
## check if there are no more cv objects
t = cv.create_image(-100,-100,image=whitepic)
# for i in range(2,t+1):
# cv.delete(i)
for i in white_pieces:
x,y = i[0],i[1]
for c in board_x_y:
if c[4] == [x,y]:
xx = int((c[2] + c[0]) /2)
yy = int((c[3] + c[1]) / 2)
s = cv.create_image(xx, yy, image=whitepic)
white_cv.append(s)
print("white")
for i in black_pieces:
x,y = i[0],i[1]
for c in board_x_y:
if c[-1] == [x,y]:
xx = int((c[2] + c[0]) /2)
yy = int((c[3] + c[1]) / 2)
s = cv.create_image(xx, yy, image=blackpic)
black_cv.append(s)
print("black")
pieces_cv = [white_cv,None,black_cv]
print(pieces_cv)
def roll():
score()
global rolled_num
global moved,rolled
# check if game did not ended already
for i in range(0,3,2):
if not pieces[i]:
game_ended(i)
return
if moved == False or rolled == True:
return
i = 0
for a in range(4):
i+= random.randint(0,1)
rolled_num = i
moved = False
rolled = True
checkroll()
def game_ended(turn):
if turn == 0:
s = "white"
opp = 2
else:
s = "black"
opp = 0
t = f"{s} won 7 : {7 - len(pieces[opp])}"
showinfo("Window",t)
def reset():
|
# score()
def endmove(playagain = False): # True == one more move
global turn,rolled,moved
if turn == 0:
opponent = 2
else:
opponent = 0
if not playagain:
turn = opponent
rolled = False
moved = True
if playagain:
s = roll()
if s == 0:
endmove()
checkroll()
def coords(x,y):
if 16 < x < 164:
if 753 < y < 776:
forfeit()
return
if 315 < x < 390:
if 757 < y < 779:
reset()
return
for item in board_x_y:
if item[0] <= x <= item[2]:
if item[1] <= y <= item[3]:
print(item[4])
play(item[4])
# movec(item[4])
return
def getpossition(x,y):
for i in board_x_y:
if i[4] == [x,y]:
return i[0],i[1]
def play(coords):
# global white_pieces
# global black_pieces
# global pieces, board_butts
global rolled_num , turn, moved, rolled
global tracks
global pieces_cv
global pieces
print(f"rolled_num = {rolled_num}")
print(f"turn = {turn}")
print(f"moved = {moved}")
print(f"rolled = {rolled}")
print(pieces)
checkroll()
x = coords[0]
y = coords[1]
# if rollbutton ,rull
if x == 5 and y == turn:
if moved:
roll()
if rolled_num ==0:
if turn == 0:
turn = 2
else:
turn = 0
moved = True
rolled = False
checkroll()
return
if coords in pieces[turn] and not moved:
if turn == 0:
opponent = 2
else:
opponent = 0
trackindex = tracks[turn].index(coords) # position on board
print(f"trackindex = {trackindex}")
indpiece = pieces[turn].index(coords) # identify piece
print(f"indpiece = {indpiece}")
t = pieces_cv[turn][indpiece] # identify canvas of piece
print(f"t = {t}")
result = trackindex + rolled_num
print(result)
if len(tracks[turn]) < result:
return
if len(tracks[turn]) == result:
pieces[turn].pop(indpiece)
pieces_cv[turn].pop(indpiece)
cv.delete(t)
score()
if len(pieces[turn]) == 0:
game_ended(turn)
endmove()
# next turn
return
coords_new = tracks[turn][trackindex+rolled_num]
newx = coords_new[0]
newy = coords_new[1]
print(f"coords_new = {coords_new}")
# special case
if [newx,newy] == [3,1] : # can't take piece there
if [newx,newy] in pieces[opponent]:
newx+=1
if [newx,newy] in pieces[turn]: # can't take own piece
return
newcoordx,newcoordy = getpossition(newx,newy)
if [newx,newy] in pieces[opponent]: # take
oppindex = pieces[opponent].index([newx,newy])
oppx,oppy = getpossition(4,opponent)
difopx = oppx - newcoordx
difopy = oppy - newcoordy
taken = pieces_cv[opponent][oppindex]
cv.move(taken,difopx,difopy) # move to start
pieces[opponent][oppindex] = [4,opponent] # set coords
print(f"{newcoordx},{newcoordy}")
oldx,oldy = getpossition(x,y)
difx = newcoordx - oldx
dify = newcoordy - oldy
cv.move(t,difx,dify)
pieces[turn][indpiece] = [newx,newy]
print("move!!")
print(f"{t},{difx},{dify}")
print(f"{pieces[turn][indpiece]}")
print(f"{pieces[turn]}")
# play again squares
playagain = [ [0,0] , [0,2] , [3,1], [6,0] ,[6,2]]
play =( [newx,newy] in playagain )
endmove(play)
return
def is_move_possible():
a = pieces[turn] # all pieces of player on move
road = tracks[turn]
if turn == 0:
opponent = 2
else:
opponent = 0
alreadychecked = []
for piece in a:
if piece in alreadychecked:
continue
piece_position = road.index(piece)
if rolled_num + piece_position <= len(road):
newcoords = road[piece_position+rolled_num]
if newcoords == [3,1] : # special square check
if newcoords in pieces_coords[opponent]:
newcoords = [4,1]
if newcoords not in a:
return True
alreadychecked.append(piece)
return False
def forfeit():
global moved,rolled,turn
# check if game did not ended already
for i in range(0,3,2):
if not pieces[i]:
game_ended(i)
return
if not rolled:
tk.messagebox.askokcancel("popup","ROLL!")
return
if rolled and is_move_possible():
tk.messagebox.askokcancel("popup","you can move!")
return
endmove()
scoretext = []
def score():
global scoretext
w = str(7 - len(pieces[0]))
b = str(7 - len(pieces[2]))
t = f"{w} : {b}"
if len(scoretext) == 0:
score = cv.create_text(220,780,font="Times 30 italic bold",text=t)
scoretext.append(score)
else:
cv.itemconfig(scoretext[0],font="Times 30 italic bold",text=t)
# show canvas text :p
return
# RUN!
setup()
game.mainloop()
| a = tk.messagebox.askokcancel("popup","reset?")
if a:
def_cv_pieces(True)
setup() | identifier_body |
rgou.py | #!/usr/bin/env python
# Royal Game of Ur
try:
# python2
import Tkinter as tk
from Tkinter.messagebox import showinfo
except ImportError:
# python3
import tkinter as tk
from tkinter.messagebox import showinfo
import random # for rolls
def callback(event):
print("clicked at", event.x, event.y)
coords(event.x,event.y)
# coordss(event.x,event.y)
#frame = Frame(game, width=100, height=100)
#game.mainloop()
game = tk.Tk()
game.title("Royal Game of Ur")
## BG image
#fname = "RGOU.gif"
#fname = "RGOU2.gif"
fname = "RGOU4.gif"
bg_image = tk.PhotoImage(file=fname)
bg_image = bg_image.subsample(2,2)
w = bg_image.width()
h = bg_image.height()
strs = "%dx%d+50+30" % (w,h)
print(strs)
game.geometry(strs)
cv = tk.Canvas(width=w,height=h)
cv.pack(side='top',fill='both',expand='yes')
cv.create_image(0,0,image=bg_image,anchor='nw')
cv.bind("<Button-1>", callback)
cv.pack()
print(dir(cv))
board_x_y = [ # x ,y ,xn,yn,[xycoordinates]
[100,80,180,152,[0,0]],
[100,170,180,231,[1,0]],
[100,245,180,315,[2,0]],
[100,325,180,394,[3,0]],
[20,332,69,386,[4,0]], # white start
[60,443,142,517,[5,0]], # roll white
[100,578,180,635,[6,0]],
[100,650,180,719,[7,0]],
# w = cv.create_image(100,480,image=whiterollicon)
# [270,489,338,560,[5,2]], # roll black
# [287,428,338,560,[5,2]], # roll black
# b = cv.create_image(330,480,image=blackrollicon)
[189,80,257,152,[0,1]],
[189,170,257,231,[1,1]],
[189,239,257,315,[2,1]],
[189,325,257,394,[3,1]],
[189,403,257,478,[4,1]],
[189,489,257,560,[5,1]],
[189,578,257,635,[6,1]],
[189,650,257,719,[7,1]],
[270,80,338,152,[0,2]],
[270,170,338,231,[1,2]],
[270,245,338,315,[2,2]],
[270,325,338,394,[3,2]],
[365,319,445,396,[4,2]], # black start
[293,446,368,517,[5,2]], # roll black
[270,578,338,635,[6,2]],
[270,650,338,719,[7,2]]
]
def setup():
global white_pieces, black_pieces
global pieces
global white_track, black_track
global tracks
global turn , rolled_num, moved , rolled
rolled = False
moved = True # did we move after roll?
turn = 0 # 0 = white , 2 = black
rolled_num = 0 # number rolled
white_pieces = [[4,0] for i in range(7)] # score white
black_pieces = [[4,2] for i in range(7)]
pieces = [white_pieces,None,black_pieces]
white_track = [[4,0],[3,0],[2,0],[1,0],[0,0],
[0,1],[1,1],[2,1],[3,1],[4,1],[5,1],[6,1],[7,1],
[7,0],[6,0]]
black_track = [[4,2],[3,2],[2,2],[1,2],[0,2],
[0,1],[1,1],[2,1],[3,1],[4,1],[5,1],[6,1],[7,1],
[7,2],[6,2]]
# common_track = [[1,0],[1,1],[1,2],[1,3],[1,4],[1,5],[1,6],[1,7]]
tracks = [white_track,None,black_track]
def_cv_pieces()
# roll icons
checkroll()
score()
# forfeit "button"
t = cv.create_text(90,770,text="forfeit move",font="Times 20 bold")
r = cv.create_text(350,770,text="reset",font="Times 20 bold")
rollicons = []
def rollicon(y): # 0 white , 2 black
s = ""
if turn == 2:
dd = "-black"
else:
dd = "-white"
if turn == y:
s+=dd
if not rolled:
s+="roll"
else:
s+= str(rolled_num)
# if not moved:
# s+="-active"
else:
if rolled_num == 0:
s = "0"
else:
s="wait"
s+=".gif"
pc = tk.PhotoImage(file=s)
pc = pc.subsample(2,2)
return pc
def checkroll():
# 5,0 and 5,2 coords
global rollicons | global cv
global whiterollicon,blackrollicon
whiterollicon = rollicon(0)
blackrollicon = rollicon(2)
if len(rollicons) == 3:
cv.delete(rollicons[0])
cv.delete(rollicons[2])
# w = rollicons[0]
# b = rollicons[2]
# cv[w]["image"] = whiterollicon
# cv[b]["image"] = blackrollicon
print(f"rollicons = {rollicons}")
# cv.delete(w)
# cv.delete(b)
# tk.Canvas.itemconfig(w,100,493,image=whiterollicon)
# tk.Canvas.itemconfig(b,270,489,image=blackrollicon)
# cv.itemcomfigure(w,image = whiterollicon)
# cv.itemconfigure(b,image = blackrollicon)
# if len(rollicons) == 0:
# white
# [100,493,152,526,[5,0]], # roll white
# [73,433,152,526,[5,0]], # roll white
w = cv.create_image(100,480,image=whiterollicon)
# [270,489,338,560,[5,2]], # roll black
# [287,428,338,560,[5,2]], # roll black
b = cv.create_image(330,480,image=blackrollicon)
# print(cv.itemconfig(b))
rollicons = [w,None,b]
def def_cv_pieces(delete=False):
global whitepic , blackpic
global cv
global white_cv
global black_cv
global pieces_cv
if delete:
for i in white_cv:
cv.delete(i)
#
for i in black_cv:
cv.delete(i)
return
white_cv= []
black_cv = []
pieces_cv = []
whitepic = tk.PhotoImage(file="-white.gif")
whitepic = whitepic.subsample(2,2)
blackpic = tk.PhotoImage(file="-black.gif")
blackpic = blackpic.subsample(2,2)
## check if there are no more cv objects
t = cv.create_image(-100,-100,image=whitepic)
# for i in range(2,t+1):
# cv.delete(i)
for i in white_pieces:
x,y = i[0],i[1]
for c in board_x_y:
if c[4] == [x,y]:
xx = int((c[2] + c[0]) /2)
yy = int((c[3] + c[1]) / 2)
s = cv.create_image(xx, yy, image=whitepic)
white_cv.append(s)
print("white")
for i in black_pieces:
x,y = i[0],i[1]
for c in board_x_y:
if c[-1] == [x,y]:
xx = int((c[2] + c[0]) /2)
yy = int((c[3] + c[1]) / 2)
s = cv.create_image(xx, yy, image=blackpic)
black_cv.append(s)
print("black")
pieces_cv = [white_cv,None,black_cv]
print(pieces_cv)
def roll():
score()
global rolled_num
global moved,rolled
# check if game did not ended already
for i in range(0,3,2):
if not pieces[i]:
game_ended(i)
return
if moved == False or rolled == True:
return
i = 0
for a in range(4):
i+= random.randint(0,1)
rolled_num = i
moved = False
rolled = True
checkroll()
def game_ended(turn):
if turn == 0:
s = "white"
opp = 2
else:
s = "black"
opp = 0
t = f"{s} won 7 : {7 - len(pieces[opp])}"
showinfo("Window",t)
def reset():
a = tk.messagebox.askokcancel("popup","reset?")
if a:
def_cv_pieces(True)
setup()
# score()
def endmove(playagain = False): # True == one more move
global turn,rolled,moved
if turn == 0:
opponent = 2
else:
opponent = 0
if not playagain:
turn = opponent
rolled = False
moved = True
if playagain:
s = roll()
if s == 0:
endmove()
checkroll()
def coords(x,y):
if 16 < x < 164:
if 753 < y < 776:
forfeit()
return
if 315 < x < 390:
if 757 < y < 779:
reset()
return
for item in board_x_y:
if item[0] <= x <= item[2]:
if item[1] <= y <= item[3]:
print(item[4])
play(item[4])
# movec(item[4])
return
def getpossition(x,y):
for i in board_x_y:
if i[4] == [x,y]:
return i[0],i[1]
def play(coords):
# global white_pieces
# global black_pieces
# global pieces, board_butts
global rolled_num , turn, moved, rolled
global tracks
global pieces_cv
global pieces
print(f"rolled_num = {rolled_num}")
print(f"turn = {turn}")
print(f"moved = {moved}")
print(f"rolled = {rolled}")
print(pieces)
checkroll()
x = coords[0]
y = coords[1]
# if rollbutton ,rull
if x == 5 and y == turn:
if moved:
roll()
if rolled_num ==0:
if turn == 0:
turn = 2
else:
turn = 0
moved = True
rolled = False
checkroll()
return
if coords in pieces[turn] and not moved:
if turn == 0:
opponent = 2
else:
opponent = 0
trackindex = tracks[turn].index(coords) # position on board
print(f"trackindex = {trackindex}")
indpiece = pieces[turn].index(coords) # identify piece
print(f"indpiece = {indpiece}")
t = pieces_cv[turn][indpiece] # identify canvas of piece
print(f"t = {t}")
result = trackindex + rolled_num
print(result)
if len(tracks[turn]) < result:
return
if len(tracks[turn]) == result:
pieces[turn].pop(indpiece)
pieces_cv[turn].pop(indpiece)
cv.delete(t)
score()
if len(pieces[turn]) == 0:
game_ended(turn)
endmove()
# next turn
return
coords_new = tracks[turn][trackindex+rolled_num]
newx = coords_new[0]
newy = coords_new[1]
print(f"coords_new = {coords_new}")
# special case
if [newx,newy] == [3,1] : # can't take piece there
if [newx,newy] in pieces[opponent]:
newx+=1
if [newx,newy] in pieces[turn]: # can't take own piece
return
newcoordx,newcoordy = getpossition(newx,newy)
if [newx,newy] in pieces[opponent]: # take
oppindex = pieces[opponent].index([newx,newy])
oppx,oppy = getpossition(4,opponent)
difopx = oppx - newcoordx
difopy = oppy - newcoordy
taken = pieces_cv[opponent][oppindex]
cv.move(taken,difopx,difopy) # move to start
pieces[opponent][oppindex] = [4,opponent] # set coords
print(f"{newcoordx},{newcoordy}")
oldx,oldy = getpossition(x,y)
difx = newcoordx - oldx
dify = newcoordy - oldy
cv.move(t,difx,dify)
pieces[turn][indpiece] = [newx,newy]
print("move!!")
print(f"{t},{difx},{dify}")
print(f"{pieces[turn][indpiece]}")
print(f"{pieces[turn]}")
# play again squares
playagain = [ [0,0] , [0,2] , [3,1], [6,0] ,[6,2]]
play =( [newx,newy] in playagain )
endmove(play)
return
def is_move_possible():
a = pieces[turn] # all pieces of player on move
road = tracks[turn]
if turn == 0:
opponent = 2
else:
opponent = 0
alreadychecked = []
for piece in a:
if piece in alreadychecked:
continue
piece_position = road.index(piece)
if rolled_num + piece_position <= len(road):
newcoords = road[piece_position+rolled_num]
if newcoords == [3,1] : # special square check
if newcoords in pieces_coords[opponent]:
newcoords = [4,1]
if newcoords not in a:
return True
alreadychecked.append(piece)
return False
def forfeit():
global moved,rolled,turn
# check if game did not ended already
for i in range(0,3,2):
if not pieces[i]:
game_ended(i)
return
if not rolled:
tk.messagebox.askokcancel("popup","ROLL!")
return
if rolled and is_move_possible():
tk.messagebox.askokcancel("popup","you can move!")
return
endmove()
scoretext = []
def score():
global scoretext
w = str(7 - len(pieces[0]))
b = str(7 - len(pieces[2]))
t = f"{w} : {b}"
if len(scoretext) == 0:
score = cv.create_text(220,780,font="Times 30 italic bold",text=t)
scoretext.append(score)
else:
cv.itemconfig(scoretext[0],font="Times 30 italic bold",text=t)
# show canvas text :p
return
# RUN!
setup()
game.mainloop() | global w ,b | random_line_split |
rgou.py | #!/usr/bin/env python
# Royal Game of Ur
try:
# python2
import Tkinter as tk
from Tkinter.messagebox import showinfo
except ImportError:
# python3
import tkinter as tk
from tkinter.messagebox import showinfo
import random # for rolls
def callback(event):
print("clicked at", event.x, event.y)
coords(event.x,event.y)
# coordss(event.x,event.y)
#frame = Frame(game, width=100, height=100)
#game.mainloop()
game = tk.Tk()
game.title("Royal Game of Ur")
## BG image
#fname = "RGOU.gif"
#fname = "RGOU2.gif"
fname = "RGOU4.gif"
bg_image = tk.PhotoImage(file=fname)
bg_image = bg_image.subsample(2,2)
w = bg_image.width()
h = bg_image.height()
strs = "%dx%d+50+30" % (w,h)
print(strs)
game.geometry(strs)
cv = tk.Canvas(width=w,height=h)
cv.pack(side='top',fill='both',expand='yes')
cv.create_image(0,0,image=bg_image,anchor='nw')
cv.bind("<Button-1>", callback)
cv.pack()
print(dir(cv))
board_x_y = [ # x ,y ,xn,yn,[xycoordinates]
[100,80,180,152,[0,0]],
[100,170,180,231,[1,0]],
[100,245,180,315,[2,0]],
[100,325,180,394,[3,0]],
[20,332,69,386,[4,0]], # white start
[60,443,142,517,[5,0]], # roll white
[100,578,180,635,[6,0]],
[100,650,180,719,[7,0]],
# w = cv.create_image(100,480,image=whiterollicon)
# [270,489,338,560,[5,2]], # roll black
# [287,428,338,560,[5,2]], # roll black
# b = cv.create_image(330,480,image=blackrollicon)
[189,80,257,152,[0,1]],
[189,170,257,231,[1,1]],
[189,239,257,315,[2,1]],
[189,325,257,394,[3,1]],
[189,403,257,478,[4,1]],
[189,489,257,560,[5,1]],
[189,578,257,635,[6,1]],
[189,650,257,719,[7,1]],
[270,80,338,152,[0,2]],
[270,170,338,231,[1,2]],
[270,245,338,315,[2,2]],
[270,325,338,394,[3,2]],
[365,319,445,396,[4,2]], # black start
[293,446,368,517,[5,2]], # roll black
[270,578,338,635,[6,2]],
[270,650,338,719,[7,2]]
]
def setup():
global white_pieces, black_pieces
global pieces
global white_track, black_track
global tracks
global turn , rolled_num, moved , rolled
rolled = False
moved = True # did we move after roll?
turn = 0 # 0 = white , 2 = black
rolled_num = 0 # number rolled
white_pieces = [[4,0] for i in range(7)] # score white
black_pieces = [[4,2] for i in range(7)]
pieces = [white_pieces,None,black_pieces]
white_track = [[4,0],[3,0],[2,0],[1,0],[0,0],
[0,1],[1,1],[2,1],[3,1],[4,1],[5,1],[6,1],[7,1],
[7,0],[6,0]]
black_track = [[4,2],[3,2],[2,2],[1,2],[0,2],
[0,1],[1,1],[2,1],[3,1],[4,1],[5,1],[6,1],[7,1],
[7,2],[6,2]]
# common_track = [[1,0],[1,1],[1,2],[1,3],[1,4],[1,5],[1,6],[1,7]]
tracks = [white_track,None,black_track]
def_cv_pieces()
# roll icons
checkroll()
score()
# forfeit "button"
t = cv.create_text(90,770,text="forfeit move",font="Times 20 bold")
r = cv.create_text(350,770,text="reset",font="Times 20 bold")
rollicons = []
def rollicon(y): # 0 white , 2 black
s = ""
if turn == 2:
dd = "-black"
else:
dd = "-white"
if turn == y:
s+=dd
if not rolled:
s+="roll"
else:
s+= str(rolled_num)
# if not moved:
# s+="-active"
else:
if rolled_num == 0:
s = "0"
else:
s="wait"
s+=".gif"
pc = tk.PhotoImage(file=s)
pc = pc.subsample(2,2)
return pc
def checkroll():
# 5,0 and 5,2 coords
global rollicons
global w ,b
global cv
global whiterollicon,blackrollicon
whiterollicon = rollicon(0)
blackrollicon = rollicon(2)
if len(rollicons) == 3:
cv.delete(rollicons[0])
cv.delete(rollicons[2])
# w = rollicons[0]
# b = rollicons[2]
# cv[w]["image"] = whiterollicon
# cv[b]["image"] = blackrollicon
print(f"rollicons = {rollicons}")
# cv.delete(w)
# cv.delete(b)
# tk.Canvas.itemconfig(w,100,493,image=whiterollicon)
# tk.Canvas.itemconfig(b,270,489,image=blackrollicon)
# cv.itemcomfigure(w,image = whiterollicon)
# cv.itemconfigure(b,image = blackrollicon)
# if len(rollicons) == 0:
# white
# [100,493,152,526,[5,0]], # roll white
# [73,433,152,526,[5,0]], # roll white
w = cv.create_image(100,480,image=whiterollicon)
# [270,489,338,560,[5,2]], # roll black
# [287,428,338,560,[5,2]], # roll black
b = cv.create_image(330,480,image=blackrollicon)
# print(cv.itemconfig(b))
rollicons = [w,None,b]
def def_cv_pieces(delete=False):
global whitepic , blackpic
global cv
global white_cv
global black_cv
global pieces_cv
if delete:
for i in white_cv:
cv.delete(i)
#
for i in black_cv:
cv.delete(i)
return
white_cv= []
black_cv = []
pieces_cv = []
whitepic = tk.PhotoImage(file="-white.gif")
whitepic = whitepic.subsample(2,2)
blackpic = tk.PhotoImage(file="-black.gif")
blackpic = blackpic.subsample(2,2)
## check if there are no more cv objects
t = cv.create_image(-100,-100,image=whitepic)
# for i in range(2,t+1):
# cv.delete(i)
for i in white_pieces:
x,y = i[0],i[1]
for c in board_x_y:
if c[4] == [x,y]:
xx = int((c[2] + c[0]) /2)
yy = int((c[3] + c[1]) / 2)
s = cv.create_image(xx, yy, image=whitepic)
white_cv.append(s)
print("white")
for i in black_pieces:
x,y = i[0],i[1]
for c in board_x_y:
if c[-1] == [x,y]:
xx = int((c[2] + c[0]) /2)
yy = int((c[3] + c[1]) / 2)
s = cv.create_image(xx, yy, image=blackpic)
black_cv.append(s)
print("black")
pieces_cv = [white_cv,None,black_cv]
print(pieces_cv)
def roll():
score()
global rolled_num
global moved,rolled
# check if game did not ended already
for i in range(0,3,2):
if not pieces[i]:
game_ended(i)
return
if moved == False or rolled == True:
return
i = 0
for a in range(4):
i+= random.randint(0,1)
rolled_num = i
moved = False
rolled = True
checkroll()
def game_ended(turn):
if turn == 0:
s = "white"
opp = 2
else:
s = "black"
opp = 0
t = f"{s} won 7 : {7 - len(pieces[opp])}"
showinfo("Window",t)
def reset():
a = tk.messagebox.askokcancel("popup","reset?")
if a:
def_cv_pieces(True)
setup()
# score()
def endmove(playagain = False): # True == one more move
global turn,rolled,moved
if turn == 0:
opponent = 2
else:
opponent = 0
if not playagain:
turn = opponent
rolled = False
moved = True
if playagain:
s = roll()
if s == 0:
endmove()
checkroll()
def coords(x,y):
if 16 < x < 164:
if 753 < y < 776:
forfeit()
return
if 315 < x < 390:
if 757 < y < 779:
reset()
return
for item in board_x_y:
if item[0] <= x <= item[2]:
if item[1] <= y <= item[3]:
print(item[4])
play(item[4])
# movec(item[4])
return
def getpossition(x,y):
for i in board_x_y:
if i[4] == [x,y]:
return i[0],i[1]
def play(coords):
# global white_pieces
# global black_pieces
# global pieces, board_butts
global rolled_num , turn, moved, rolled
global tracks
global pieces_cv
global pieces
print(f"rolled_num = {rolled_num}")
print(f"turn = {turn}")
print(f"moved = {moved}")
print(f"rolled = {rolled}")
print(pieces)
checkroll()
x = coords[0]
y = coords[1]
# if rollbutton ,rull
if x == 5 and y == turn:
if moved:
roll()
if rolled_num ==0:
if turn == 0:
turn = 2
else:
turn = 0
moved = True
rolled = False
checkroll()
return
if coords in pieces[turn] and not moved:
if turn == 0:
opponent = 2
else:
opponent = 0
trackindex = tracks[turn].index(coords) # position on board
print(f"trackindex = {trackindex}")
indpiece = pieces[turn].index(coords) # identify piece
print(f"indpiece = {indpiece}")
t = pieces_cv[turn][indpiece] # identify canvas of piece
print(f"t = {t}")
result = trackindex + rolled_num
print(result)
if len(tracks[turn]) < result:
return
if len(tracks[turn]) == result:
pieces[turn].pop(indpiece)
pieces_cv[turn].pop(indpiece)
cv.delete(t)
score()
if len(pieces[turn]) == 0:
game_ended(turn)
endmove()
# next turn
return
coords_new = tracks[turn][trackindex+rolled_num]
newx = coords_new[0]
newy = coords_new[1]
print(f"coords_new = {coords_new}")
# special case
if [newx,newy] == [3,1] : # can't take piece there
if [newx,newy] in pieces[opponent]:
newx+=1
if [newx,newy] in pieces[turn]: # can't take own piece
return
newcoordx,newcoordy = getpossition(newx,newy)
if [newx,newy] in pieces[opponent]: # take
oppindex = pieces[opponent].index([newx,newy])
oppx,oppy = getpossition(4,opponent)
difopx = oppx - newcoordx
difopy = oppy - newcoordy
taken = pieces_cv[opponent][oppindex]
cv.move(taken,difopx,difopy) # move to start
pieces[opponent][oppindex] = [4,opponent] # set coords
print(f"{newcoordx},{newcoordy}")
oldx,oldy = getpossition(x,y)
difx = newcoordx - oldx
dify = newcoordy - oldy
cv.move(t,difx,dify)
pieces[turn][indpiece] = [newx,newy]
print("move!!")
print(f"{t},{difx},{dify}")
print(f"{pieces[turn][indpiece]}")
print(f"{pieces[turn]}")
# play again squares
playagain = [ [0,0] , [0,2] , [3,1], [6,0] ,[6,2]]
play =( [newx,newy] in playagain )
endmove(play)
return
def is_move_possible():
a = pieces[turn] # all pieces of player on move
road = tracks[turn]
if turn == 0:
opponent = 2
else:
opponent = 0
alreadychecked = []
for piece in a:
if piece in alreadychecked:
continue
piece_position = road.index(piece)
if rolled_num + piece_position <= len(road):
newcoords = road[piece_position+rolled_num]
if newcoords == [3,1] : # special square check
if newcoords in pieces_coords[opponent]:
newcoords = [4,1]
if newcoords not in a:
return True
alreadychecked.append(piece)
return False
def forfeit():
global moved,rolled,turn
# check if game did not ended already
for i in range(0,3,2):
if not pieces[i]:
game_ended(i)
return
if not rolled:
tk.messagebox.askokcancel("popup","ROLL!")
return
if rolled and is_move_possible():
tk.messagebox.askokcancel("popup","you can move!")
return
endmove()
scoretext = []
def score():
global scoretext
w = str(7 - len(pieces[0]))
b = str(7 - len(pieces[2]))
t = f"{w} : {b}"
if len(scoretext) == 0:
score = cv.create_text(220,780,font="Times 30 italic bold",text=t)
scoretext.append(score)
else:
|
# show canvas text :p
return
# RUN!
setup()
game.mainloop()
| cv.itemconfig(scoretext[0],font="Times 30 italic bold",text=t) | conditional_block |
lib.rs | //! Embed images in documentation.
//!
//! This crate enables the portable embedding of images in
//! `rustdoc`-generated documentation. Standard
//! web-compatible image formats should be supported. Please [file an issue][issue-tracker]
//! if you have problems. Read on to learn how it works.
//!
//! # Showcase
//!
//! See the [showcase documentation][showcase-docs] for an example with embedded images.
//!
//! Please also check out the [source code][showcase-source] for [the showcase crate][showcase]
//! for a fleshed out example.
//!
//! # Motivation
//!
//! A picture is worth a thousand words. This oft quoted adage is no less true for technical
//! documentation. A carefully crafted diagram lets a new user immediately
//! grasp the high-level architecture of a complex library. Illustrations of geometric conventions
//! can vastly reduce confusion among users of scientific libraries. Despite the central role
//! of images in technical documentation, embedding images in Rust documentation in a way that
//! portably works correctly across local installations and [docs.rs](https://docs.rs) has been a
//! [longstanding issue of rustdoc][rustdoc-issue].
//!
//! This crate represents a carefully crafted solution based on procedural macros that works
//! around the current limitations of `rustdoc` and enables a practically workable approach to
//! embedding images in a portable manner.
//!
//! # How to embed images in documentation
//!
//! First, you'll need to depend on this crate. In `cargo.toml`:
//!
//! ```toml
//! [dependencies]
//! // Replace x.x with the latest version
//! embed-doc-image = "x.x"
//! ```
//!
//! What the next step is depends on whether you want to embed images into *inner attribute
//! documentation* or *outer attribute documentation*. Inner attribute documentation is usually
//! used to document crate-level or module-level documentation, and typically starts each line with
//! `//!`. Outer attribute docs are used for most other forms of documentation, such as function
//! and struct documentation. Outer attribute documentation typically starts each line with `///`.
//!
//! In both cases all image paths are relative to the **crate root**.
//!
//! ## Embedding images in outer attribute documentation
//!
//! Outer attribute documentation is typically used for documenting functions, structs, traits,
//! macros and so on. Let's consider documenting a function and embedding an image into its
//! documentation:
//!
//! ```rust
//! // Import the attribute macro
//! use embed_doc_image::embed_doc_image;
//!
//! /// Foos the bar.
//! ///
//! /// Let's drop an image below this text.
//! ///
//! /// ![Alt text goes here][myimagelabel]
//! ///
//! /// And another one.
//! ///
//! /// ![A Foobaring][foobaring]
//! ///
//! /// We can include any number of images in the above fashion. The important part is that
//! /// you match the label ("myimagelabel" or "foobaring" in this case) with the label in the
//! /// below attribute macro.
//! // Paths are always relative to the **crate root**
//! #[embed_doc_image("myimagelabel", "images/foo.png")]
//! #[embed_doc_image("foobaring", "assets/foobaring.jpg")]
//! fn foobar() {}
//! ```
//!
//! And that's it! If you run `cargo doc`, you should hopefully be able to see your images
//! in the documentation for `foobar`, and it should also work on `docs.rs` without trouble.
//!
//! ## Embedding images in inner attribute documentation
//!
//! The ability for macros to do *anything* with *inner attributes* is very limited. In fact,
//! before Rust 1.54 (which at the time of writing has not yet been released),
//! it is for all intents and purposes non-existent. This also means that we can not directly
//! use our approach to embed images in documentation for Rust < 1.54. However, we can make our
//! code compile with Rust < 1.54 and instead inject a prominent message that some images are
//! missing.
//! `docs.rs`, which always uses a nightly compiler, will be able to show the images. We'll
//! also locally be able to properly embed the images as long as we're using Rust >= 1.54
//! (or nightly). Here's how you can embed images in crate-level or module-level documentation:
//!
//! ```rust
//! //! My awesome crate for fast foobaring in latent space.
//! //!
//! // Important: note the blank line of documentation on each side of the image lookup table.
//! // The "image lookup table" can be placed anywhere, but we place it here together with the
//! // warning if the `doc-images` feature is not enabled.
//! #![cfg_attr(feature = "doc-images",
//! cfg_attr(all(),
//! doc = ::embed_doc_image::embed_image!("myimagelabel", "images/foo.png"),
//! doc = ::embed_doc_image::embed_image!("foobaring", "assets/foobaring.png")))]
//! #![cfg_attr(
//! not(feature = "doc-images"),
//! doc = "**Doc images not enabled**. Compile with feature `doc-images` and Rust version >= 1.54 \
//! to enable."
//! )]
//! //!
//! //! Let's use our images:
//! //! ![Alt text goes here][myimagelabel] ![A Foobaring][foobaring]
//! ```
//!
//! Sadly there is currently no way to detect Rust versions in `cfg_attr`. Therefore we must
//! rely on a feature flag for toggling proper image embedding. We'll need the following in our
//! `Cargo.toml`:
//!
//! ```toml
//! [features]
//! doc-images = []
//!
//! [package.metadata.docs.rs]
//! # docs.rs uses a nightly compiler, so by instructing it to use our `doc-images` feature we
//! # ensure that it will render any images that we may have in inner attribute documentation.
//! features = ["doc-images"]
//! ```
//!
//! Let's summarize:
//!
//! - `docs.rs` will correctly render our documentation with images.
//! - Locally:
//! - for Rust >= 1.54 with `--features doc-images`, the local documentation will
//! correctly render images.
//! - for Rust < 1.54: the local documentation will be missing some images, and will
//! contain a warning with instructions on how to enable proper image embedding.
//! - we can also use e.g. `cargo +nightly doc --features doc-images` to produce correct
//! documentation with a nightly compiler.
//!
//!
//! # How it works
//!
//! The crux of the issue is that `rustdoc` does not have a mechanism for tracking locally stored
//! images referenced by documentation and carry them over to the final documentation. Therefore
//! currently images on `docs.rs` can only be included if you host the image somewhere on the
//! internet and include the image with its URL. However, this has a number of issues:
//!
//! - You need to host the image, which incurs considerable additional effort on the part of
//! crate authors.
//! - The image is only available for as long as the image is hosted.
//! - Images in local documentation will not work without internet access.
//! - Images are not *versioned*, unless carefully done so manually by the crate author. That is,
//! the author must carefully provide *all* versions of the image across all versions of the
//! crate with a consistent naming convention in order to ensure that documentation of
//! older versions of the crate display the image consistent with that particular version.
//!
//! The solution employed by this crate is based on a remark made in an old
//! [reddit comment from 2017][reddit-comment]. In short, Rustdoc allows images to be provided
//! inline in the Markdown as `base64` encoded binary blobs in the following way:
//!
//! ```rust
//! ![Alt text][myimagelabel]
//!
//! [myimagelabel]: data:image/png;base64,BaSe64EnCoDeDdAtA
//! ```
//!
//! Basically we can use the "reference" feature of Markdown links/images to provide the URL
//! of the image in a different location than the image itself, but instead of providing an URL
//! we can directly provide the binary data of the image in the Markdown documentation.
//!
//! However, doing this manually with images would terribly clutter the documentation, which
//! seems less than ideal. Instead, we do this programmatically. The macros available in this
//! crate essentially follow this idea:
//!
//! - Take a label and image path relative to the crate root as input.
//! - Determine the MIME type (based on extension) and `base64` encoding of the image.
//! - Produce an appropriate doc string and inject it into the Markdown documentation for the
//! crate/function/struct/etc.
//!
//! Clearly, this is still quite hacky, but it seems like a workable solution until proper support
//! in `rustdoc` arrives, at which point we may rejoice and abandon this crate to the annals
//! of history.
//!
//! # Acknowledgements
//!
//! As an inexperienced proc macro hacker, I would not have managed to arrive at this
//! solution without the help of several individuals on the Rust Programming Language Community
//! Discord server, most notably:
//!
//! - Yandros [(github.com/danielhenrymantilla)](https://github.com/danielhenrymantilla)
//! - Nemo157 [(github.com/Nemo157)](https://github.com/Nemo157)
//!
//! [showcase]: https://crates.io/crates/embed-doc-image-showcase
//! [showcase-docs]: https://docs.rs/embed-doc-image-showcase
//! [showcase-source]: https://github.com/Andlon/embed-doc-image/tree/master/embed-doc-image-showcase
//! [rustdoc-issue]: https://github.com/rust-lang/rust/issues/32104
//! [issue-tracker]: https://github.com/Andlon/embed-doc-image/issues
//! [reddit-comment]: https://www.reddit.com/r/rust/comments/5ljshj/diagrams_in_documentation/dbwg96q?utm_source=share&utm_medium=web2x&context=3
//!
//!
use proc_macro::TokenStream;
use quote::{quote, ToTokens};
use std::fs::read;
use std::path::{Path, PathBuf};
use syn::parse;
use syn::parse::{Parse, ParseStream};
use syn::{
Item, ItemConst, ItemEnum, ItemExternCrate, ItemFn, ItemForeignMod, ItemImpl, ItemMacro,
ItemMacro2, ItemMod, ItemStatic, ItemStruct, ItemTrait, ItemTraitAlias, ItemType, ItemUnion,
ItemUse,
};
#[derive(Debug)]
struct | {
label: String,
path: PathBuf,
}
impl Parse for ImageDescription {
fn parse(input: ParseStream) -> parse::Result<Self> {
let label = input.parse::<syn::LitStr>()?;
input.parse::<syn::Token![,]>()?;
let path = input.parse::<syn::LitStr>()?;
Ok(ImageDescription {
label: label.value(),
path: PathBuf::from(path.value()),
})
}
}
fn encode_base64_image_from_path(path: &Path) -> String {
let bytes = read(path).unwrap_or_else(|_| panic!("Failed to load image at {}", path.display()));
base64::encode(bytes)
}
fn determine_mime_type(extension: &str) -> String {
let extension = extension.to_ascii_lowercase();
// TODO: Consider using the mime_guess crate? The below list does seem kinda exhaustive for
// doc purposes though?
// Matches taken haphazardly from
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types/Common_types
match extension.as_str() {
"jpg" | "jpeg" => "image/jpeg",
"png" => "image/png",
"bmp" => "image/bmp",
"svg" => "image/svg+xml",
"gif" => "image/gif",
"tif" | "tiff" => "image/tiff",
"webp" => "image/webp",
"ico" => "image/vnd.microsoft.icon",
_ => panic!("Unrecognized image extension, unable to infer correct MIME type"),
}
.to_string()
}
fn produce_doc_string_for_image(image_desc: &ImageDescription) -> String {
let root_dir = std::env::var("CARGO_MANIFEST_DIR")
.expect("Failed to retrieve value of CARGO_MANOFEST_DIR.");
let root_dir = Path::new(&root_dir);
let encoded = encode_base64_image_from_path(&root_dir.join(&image_desc.path));
let ext = image_desc.path.extension().unwrap_or_else(|| {
panic!(
"No extension for file {}. Unable to determine MIME type.",
image_desc.path.display()
)
});
let mime = determine_mime_type(&ext.to_string_lossy());
let doc_string = format!(
" [{label}]: data:{mime};base64,{encoded}",
label = &image_desc.label,
mime = mime,
encoded = &encoded
);
doc_string
}
/// Produces a doc string for inclusion in Markdown documentation.
///
/// Please see the crate-level documentation for usage instructions.
#[proc_macro]
pub fn embed_image(item: TokenStream) -> TokenStream {
let image_desc = syn::parse_macro_input!(item as ImageDescription);
let doc_string = produce_doc_string_for_image(&image_desc);
// Ensure that the "image table" at the end is separated from the rest of the documentation,
// otherwise the markdown parser will not treat them as a "lookup table" for the image data
let s = format!("\n \n {}", doc_string);
let tokens = quote! {
#s
};
tokens.into()
}
/// Produces a doc string for inclusion in Markdown documentation.
///
/// Please see the crate-level documentation for usage instructions.
#[proc_macro_attribute]
pub fn embed_doc_image(attr: TokenStream, item: TokenStream) -> TokenStream {
let image_desc = syn::parse_macro_input!(attr as ImageDescription);
let doc_string = produce_doc_string_for_image(&image_desc);
// Then inject a doc string that "resolves" the image reference and supplies the
// base64-encoded data inline
let mut input: syn::Item = syn::parse_macro_input!(item);
match input {
Item::Const(ItemConst { ref mut attrs, .. })
| Item::Enum(ItemEnum { ref mut attrs, .. })
| Item::ExternCrate(ItemExternCrate { ref mut attrs, .. })
| Item::Fn(ItemFn { ref mut attrs, .. })
| Item::ForeignMod(ItemForeignMod { ref mut attrs, .. })
| Item::Impl(ItemImpl { ref mut attrs, .. })
| Item::Macro(ItemMacro { ref mut attrs, .. })
| Item::Macro2(ItemMacro2 { ref mut attrs, .. })
| Item::Mod(ItemMod { ref mut attrs, .. })
| Item::Static(ItemStatic { ref mut attrs, .. })
| Item::Struct(ItemStruct { ref mut attrs, .. })
| Item::Trait(ItemTrait { ref mut attrs, .. })
| Item::TraitAlias(ItemTraitAlias { ref mut attrs, .. })
| Item::Type(ItemType { ref mut attrs, .. })
| Item::Union(ItemUnion { ref mut attrs, .. })
| Item::Use(ItemUse { ref mut attrs, .. }) => {
let str = doc_string;
// Insert an empty doc line to ensure that we get a blank line between the
// docs and the "bibliography" containing the actual image data.
// Otherwise the markdown parser will mess up our output.
attrs.push(syn::parse_quote! {
#[doc = ""]
});
attrs.push(syn::parse_quote! {
#[doc = #str]
});
input.into_token_stream()
}
_ => syn::Error::new_spanned(
input,
"Unsupported item. Cannot apply attribute to the given item.",
)
.to_compile_error(),
}
.into()
}
| ImageDescription | identifier_name |
lib.rs | //! Embed images in documentation.
//!
//! This crate enables the portable embedding of images in
//! `rustdoc`-generated documentation. Standard
//! web-compatible image formats should be supported. Please [file an issue][issue-tracker]
//! if you have problems. Read on to learn how it works.
//!
//! # Showcase
//!
//! See the [showcase documentation][showcase-docs] for an example with embedded images.
//!
//! Please also check out the [source code][showcase-source] for [the showcase crate][showcase]
//! for a fleshed out example.
//!
//! # Motivation
//!
//! A picture is worth a thousand words. This oft quoted adage is no less true for technical
//! documentation. A carefully crafted diagram lets a new user immediately
//! grasp the high-level architecture of a complex library. Illustrations of geometric conventions
//! can vastly reduce confusion among users of scientific libraries. Despite the central role
//! of images in technical documentation, embedding images in Rust documentation in a way that
//! portably works correctly across local installations and [docs.rs](https://docs.rs) has been a
//! [longstanding issue of rustdoc][rustdoc-issue]. | //! around the current limitations of `rustdoc` and enables a practically workable approach to
//! embedding images in a portable manner.
//!
//! # How to embed images in documentation
//!
//! First, you'll need to depend on this crate. In `cargo.toml`:
//!
//! ```toml
//! [dependencies]
//! // Replace x.x with the latest version
//! embed-doc-image = "x.x"
//! ```
//!
//! What the next step is depends on whether you want to embed images into *inner attribute
//! documentation* or *outer attribute documentation*. Inner attribute documentation is usually
//! used to document crate-level or module-level documentation, and typically starts each line with
//! `//!`. Outer attribute docs are used for most other forms of documentation, such as function
//! and struct documentation. Outer attribute documentation typically starts each line with `///`.
//!
//! In both cases all image paths are relative to the **crate root**.
//!
//! ## Embedding images in outer attribute documentation
//!
//! Outer attribute documentation is typically used for documenting functions, structs, traits,
//! macros and so on. Let's consider documenting a function and embedding an image into its
//! documentation:
//!
//! ```rust
//! // Import the attribute macro
//! use embed_doc_image::embed_doc_image;
//!
//! /// Foos the bar.
//! ///
//! /// Let's drop an image below this text.
//! ///
//! /// ![Alt text goes here][myimagelabel]
//! ///
//! /// And another one.
//! ///
//! /// ![A Foobaring][foobaring]
//! ///
//! /// We can include any number of images in the above fashion. The important part is that
//! /// you match the label ("myimagelabel" or "foobaring" in this case) with the label in the
//! /// below attribute macro.
//! // Paths are always relative to the **crate root**
//! #[embed_doc_image("myimagelabel", "images/foo.png")]
//! #[embed_doc_image("foobaring", "assets/foobaring.jpg")]
//! fn foobar() {}
//! ```
//!
//! And that's it! If you run `cargo doc`, you should hopefully be able to see your images
//! in the documentation for `foobar`, and it should also work on `docs.rs` without trouble.
//!
//! ## Embedding images in inner attribute documentation
//!
//! The ability for macros to do *anything* with *inner attributes* is very limited. In fact,
//! before Rust 1.54 (which at the time of writing has not yet been released),
//! it is for all intents and purposes non-existent. This also means that we can not directly
//! use our approach to embed images in documentation for Rust < 1.54. However, we can make our
//! code compile with Rust < 1.54 and instead inject a prominent message that some images are
//! missing.
//! `docs.rs`, which always uses a nightly compiler, will be able to show the images. We'll
//! also locally be able to properly embed the images as long as we're using Rust >= 1.54
//! (or nightly). Here's how you can embed images in crate-level or module-level documentation:
//!
//! ```rust
//! //! My awesome crate for fast foobaring in latent space.
//! //!
//! // Important: note the blank line of documentation on each side of the image lookup table.
//! // The "image lookup table" can be placed anywhere, but we place it here together with the
//! // warning if the `doc-images` feature is not enabled.
//! #![cfg_attr(feature = "doc-images",
//! cfg_attr(all(),
//! doc = ::embed_doc_image::embed_image!("myimagelabel", "images/foo.png"),
//! doc = ::embed_doc_image::embed_image!("foobaring", "assets/foobaring.png")))]
//! #![cfg_attr(
//! not(feature = "doc-images"),
//! doc = "**Doc images not enabled**. Compile with feature `doc-images` and Rust version >= 1.54 \
//! to enable."
//! )]
//! //!
//! //! Let's use our images:
//! //! ![Alt text goes here][myimagelabel] ![A Foobaring][foobaring]
//! ```
//!
//! Sadly there is currently no way to detect Rust versions in `cfg_attr`. Therefore we must
//! rely on a feature flag for toggling proper image embedding. We'll need the following in our
//! `Cargo.toml`:
//!
//! ```toml
//! [features]
//! doc-images = []
//!
//! [package.metadata.docs.rs]
//! # docs.rs uses a nightly compiler, so by instructing it to use our `doc-images` feature we
//! # ensure that it will render any images that we may have in inner attribute documentation.
//! features = ["doc-images"]
//! ```
//!
//! Let's summarize:
//!
//! - `docs.rs` will correctly render our documentation with images.
//! - Locally:
//! - for Rust >= 1.54 with `--features doc-images`, the local documentation will
//! correctly render images.
//! - for Rust < 1.54: the local documentation will be missing some images, and will
//! contain a warning with instructions on how to enable proper image embedding.
//! - we can also use e.g. `cargo +nightly doc --features doc-images` to produce correct
//! documentation with a nightly compiler.
//!
//!
//! # How it works
//!
//! The crux of the issue is that `rustdoc` does not have a mechanism for tracking locally stored
//! images referenced by documentation and carry them over to the final documentation. Therefore
//! currently images on `docs.rs` can only be included if you host the image somewhere on the
//! internet and include the image with its URL. However, this has a number of issues:
//!
//! - You need to host the image, which incurs considerable additional effort on the part of
//! crate authors.
//! - The image is only available for as long as the image is hosted.
//! - Images in local documentation will not work without internet access.
//! - Images are not *versioned*, unless carefully done so manually by the crate author. That is,
//! the author must carefully provide *all* versions of the image across all versions of the
//! crate with a consistent naming convention in order to ensure that documentation of
//! older versions of the crate display the image consistent with that particular version.
//!
//! The solution employed by this crate is based on a remark made in an old
//! [reddit comment from 2017][reddit-comment]. In short, Rustdoc allows images to be provided
//! inline in the Markdown as `base64` encoded binary blobs in the following way:
//!
//! ```rust
//! ![Alt text][myimagelabel]
//!
//! [myimagelabel]: data:image/png;base64,BaSe64EnCoDeDdAtA
//! ```
//!
//! Basically we can use the "reference" feature of Markdown links/images to provide the URL
//! of the image in a different location than the image itself, but instead of providing an URL
//! we can directly provide the binary data of the image in the Markdown documentation.
//!
//! However, doing this manually with images would terribly clutter the documentation, which
//! seems less than ideal. Instead, we do this programmatically. The macros available in this
//! crate essentially follow this idea:
//!
//! - Take a label and image path relative to the crate root as input.
//! - Determine the MIME type (based on extension) and `base64` encoding of the image.
//! - Produce an appropriate doc string and inject it into the Markdown documentation for the
//! crate/function/struct/etc.
//!
//! Clearly, this is still quite hacky, but it seems like a workable solution until proper support
//! in `rustdoc` arrives, at which point we may rejoice and abandon this crate to the annals
//! of history.
//!
//! # Acknowledgements
//!
//! As an inexperienced proc macro hacker, I would not have managed to arrive at this
//! solution without the help of several individuals on the Rust Programming Language Community
//! Discord server, most notably:
//!
//! - Yandros [(github.com/danielhenrymantilla)](https://github.com/danielhenrymantilla)
//! - Nemo157 [(github.com/Nemo157)](https://github.com/Nemo157)
//!
//! [showcase]: https://crates.io/crates/embed-doc-image-showcase
//! [showcase-docs]: https://docs.rs/embed-doc-image-showcase
//! [showcase-source]: https://github.com/Andlon/embed-doc-image/tree/master/embed-doc-image-showcase
//! [rustdoc-issue]: https://github.com/rust-lang/rust/issues/32104
//! [issue-tracker]: https://github.com/Andlon/embed-doc-image/issues
//! [reddit-comment]: https://www.reddit.com/r/rust/comments/5ljshj/diagrams_in_documentation/dbwg96q?utm_source=share&utm_medium=web2x&context=3
//!
//!
use proc_macro::TokenStream;
use quote::{quote, ToTokens};
use std::fs::read;
use std::path::{Path, PathBuf};
use syn::parse;
use syn::parse::{Parse, ParseStream};
use syn::{
Item, ItemConst, ItemEnum, ItemExternCrate, ItemFn, ItemForeignMod, ItemImpl, ItemMacro,
ItemMacro2, ItemMod, ItemStatic, ItemStruct, ItemTrait, ItemTraitAlias, ItemType, ItemUnion,
ItemUse,
};
#[derive(Debug)]
struct ImageDescription {
label: String,
path: PathBuf,
}
impl Parse for ImageDescription {
fn parse(input: ParseStream) -> parse::Result<Self> {
let label = input.parse::<syn::LitStr>()?;
input.parse::<syn::Token![,]>()?;
let path = input.parse::<syn::LitStr>()?;
Ok(ImageDescription {
label: label.value(),
path: PathBuf::from(path.value()),
})
}
}
fn encode_base64_image_from_path(path: &Path) -> String {
let bytes = read(path).unwrap_or_else(|_| panic!("Failed to load image at {}", path.display()));
base64::encode(bytes)
}
fn determine_mime_type(extension: &str) -> String {
let extension = extension.to_ascii_lowercase();
// TODO: Consider using the mime_guess crate? The below list does seem kinda exhaustive for
// doc purposes though?
// Matches taken haphazardly from
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types/Common_types
match extension.as_str() {
"jpg" | "jpeg" => "image/jpeg",
"png" => "image/png",
"bmp" => "image/bmp",
"svg" => "image/svg+xml",
"gif" => "image/gif",
"tif" | "tiff" => "image/tiff",
"webp" => "image/webp",
"ico" => "image/vnd.microsoft.icon",
_ => panic!("Unrecognized image extension, unable to infer correct MIME type"),
}
.to_string()
}
fn produce_doc_string_for_image(image_desc: &ImageDescription) -> String {
let root_dir = std::env::var("CARGO_MANIFEST_DIR")
.expect("Failed to retrieve value of CARGO_MANOFEST_DIR.");
let root_dir = Path::new(&root_dir);
let encoded = encode_base64_image_from_path(&root_dir.join(&image_desc.path));
let ext = image_desc.path.extension().unwrap_or_else(|| {
panic!(
"No extension for file {}. Unable to determine MIME type.",
image_desc.path.display()
)
});
let mime = determine_mime_type(&ext.to_string_lossy());
let doc_string = format!(
" [{label}]: data:{mime};base64,{encoded}",
label = &image_desc.label,
mime = mime,
encoded = &encoded
);
doc_string
}
/// Produces a doc string for inclusion in Markdown documentation.
///
/// Please see the crate-level documentation for usage instructions.
#[proc_macro]
pub fn embed_image(item: TokenStream) -> TokenStream {
let image_desc = syn::parse_macro_input!(item as ImageDescription);
let doc_string = produce_doc_string_for_image(&image_desc);
// Ensure that the "image table" at the end is separated from the rest of the documentation,
// otherwise the markdown parser will not treat them as a "lookup table" for the image data
let s = format!("\n \n {}", doc_string);
let tokens = quote! {
#s
};
tokens.into()
}
/// Produces a doc string for inclusion in Markdown documentation.
///
/// Please see the crate-level documentation for usage instructions.
#[proc_macro_attribute]
pub fn embed_doc_image(attr: TokenStream, item: TokenStream) -> TokenStream {
let image_desc = syn::parse_macro_input!(attr as ImageDescription);
let doc_string = produce_doc_string_for_image(&image_desc);
// Then inject a doc string that "resolves" the image reference and supplies the
// base64-encoded data inline
let mut input: syn::Item = syn::parse_macro_input!(item);
match input {
Item::Const(ItemConst { ref mut attrs, .. })
| Item::Enum(ItemEnum { ref mut attrs, .. })
| Item::ExternCrate(ItemExternCrate { ref mut attrs, .. })
| Item::Fn(ItemFn { ref mut attrs, .. })
| Item::ForeignMod(ItemForeignMod { ref mut attrs, .. })
| Item::Impl(ItemImpl { ref mut attrs, .. })
| Item::Macro(ItemMacro { ref mut attrs, .. })
| Item::Macro2(ItemMacro2 { ref mut attrs, .. })
| Item::Mod(ItemMod { ref mut attrs, .. })
| Item::Static(ItemStatic { ref mut attrs, .. })
| Item::Struct(ItemStruct { ref mut attrs, .. })
| Item::Trait(ItemTrait { ref mut attrs, .. })
| Item::TraitAlias(ItemTraitAlias { ref mut attrs, .. })
| Item::Type(ItemType { ref mut attrs, .. })
| Item::Union(ItemUnion { ref mut attrs, .. })
| Item::Use(ItemUse { ref mut attrs, .. }) => {
let str = doc_string;
// Insert an empty doc line to ensure that we get a blank line between the
// docs and the "bibliography" containing the actual image data.
// Otherwise the markdown parser will mess up our output.
attrs.push(syn::parse_quote! {
#[doc = ""]
});
attrs.push(syn::parse_quote! {
#[doc = #str]
});
input.into_token_stream()
}
_ => syn::Error::new_spanned(
input,
"Unsupported item. Cannot apply attribute to the given item.",
)
.to_compile_error(),
}
.into()
} | //!
//! This crate represents a carefully crafted solution based on procedural macros that works | random_line_split |
orders-alter.js | var order = {};
order.init = function(){
this.page = staticPage;
this.orderType = type;
this.picUrl = picUrl;
this.active();
this.doAction();
// this.all();
// this.unpay();
// this.unship();
// this.alship();
// this.evaluate();
// this.refund();
this.imgBox();
this.loadorderDetail();
this.odialog="<div class='dialog-order animation fadeRight'><div class='dialog-closefloor'></div><div class='dialog-content'></div> </div>";
$('body').append(this.odialog);
this.myScroll="";
var self=this;
setTimeout(function(){
self.myScrollfun();
},100)
}
// 处理按钮事件
order.doAction = function(){
$('.groupList').on('click','.operatebtn',function(e){
e.preventDefault();
var action = $(this).data('action');
var href = $(this).data('url');
if(action == 'jump'){
window.location.href = href;
return false;
}else if(action == 'post'){
order.doPostThings($(this));
} else if(action == 'open'){
order.showOrderDetail($(this));
}
})
}
order.doPostThings = function(obj){
var href = obj.data('url');
var parseSegment = href.split("||");
var data = {"group_id":parseSegment[1]};
if(obj.hasClass("alshipConfirm") && confirm("确认收货吗?")){
order.doPost(parseSegment[0],data,function(result){
order.commonCallBack(result,obj);
});
}else if(obj.hasClass("unpayDel") && confirm("确认删除订单吗?")){
order.doPost(parseSegment[0],data,function(result){
order.reloadCallBack(result,obj);
});
}else if(obj.hasClass("cancelBtn") && confirm("确认取消退单吗?")){
data = {"chargeback_id":parseSegment[1]};
order.doPost(parseSegment[0],data,function(result){
order.reloadCallBack(result,obj);
});
}else if(obj.hasClass("alRefundDel") && confirm("确认删除订单吗?")){ // 实际上是隐藏订单组
order.doPost(parseSegment[0],data,function(result){
order.reloadCallBack(result,obj);
});
}else if(obj.hasClass("delBtn") && confirm("确认删除吗?")){
data = {"chargeback_id":parseSegment[1]};
order.doPost(parseSegment[0],data,function(result){
order.reloadCallBack(result,obj);
});
}
}
order.doPost = function(url,data,func){
$.ajax({
url:url,
data:data,
type:'post',
dataType:'json',
success:func
});
}
order.showOrderDetail = function(obj){
var groupId = obj.closest('.orderList').data('id');
var href = "order.php?act=groupDetail&group_id="+groupId+"&r="+Math.random();
$(".dialog-order").show().html("").load(href,function(){
$(".dialog-order").removeClass("fadeHide").addClass("play").addClass("fadeRight");
$(".orderDetail-wrap .orderList").css({
"height":($(window).height()-$(".orderDetai-BtnWrap").height()-30)+"px",
"overflow":"auto"
})
$(".dialog-mask").show();
});
}
order.commonCallBack = function(result,obj){
var self=this;
if(!result.status){
alert(result.message);
return false;
}else{
// 刷新页面
obj.closest('.orderList').slideUp(100,function(){
obj.closest('.orderList').remove();
self.myScroll.refresh();
});
return false;
}
}
order.reloadCallBack = function(result,obj){
var self=this;
if(!result.status){
alert(result.message);
return false;
}else{
// 刷新页面
window.location.reload();
return false;
}
}
order.active = function(){
$(".order-head .tabs-line").width($(".tabs .swiper-slide").eq(0).width());
$('.tabBox').find('.swiper-slide').removeClass('active');
$('.tabBox').find('.'+type).addClass('active');
$(".tabBox .tabs-line").css({
"left":($(".tabs .swiper-slide").eq(0).width())*$('.tabBox').find('.'+type).index()+"px"
});
}
order.loadorderDetail = function (){
var self=this;
$(document).on("click",'.j-loadetail',function(){
var href=$(this).data("href")+"?r="+Math.random();
$(".dialog-order").show().html("").load(href,function(){
$(".dialog-order").removeClass("fadeHide").addClass("play").addClass("fadeRight");
$(".orderDetail-wrap .orderList").css({
"height":($(window).height()-$(".orderDetai-BtnWrap").height()-30)+"px",
"overflow":"auto"
})
$(".dialog-mask").show();
});
})
//去评价
$('body').on("click",".btn-evalute",function(e){
var oparent=$(e.currentTarget).closest(".orderList").find(".j-loadetail");
oparent.trigger("click");
})
//收起操作
$(document).on("tap",'.dialog-mask',function(){
$(".dialog-order").removeClass("fadeRight").addClass("fadeHide");
self.myScroll.refresh();
setTimeout(function(){
$('.dialog-mask').hide();
},500)
})
$(document).on("tap",".j-maskhide",function(e){
$(".dialog-order").removeClass("fadeRight").addClass("fadeHide");
self.myScroll.refresh();
setTimeout(function(){
$('.dialog-mask').hide();
},500)
//$('.dialog-mask').trigger("click");
})
}
order.imgBox= function (){
var oelis='<span class="order-elips"><img src="'+staticUrl+'/img/order/elips.png"><em>更多</em></span>',ohas=true;
$(".orderList-pdmore").each(function(index,item){
var olinum=$(item).find(".order-imgBox").length,owidth=$(item).find(".order-imgBox").eq(0).width(),oheight=$(item).find(".order-imgBox").eq(0).height();
var obili=65/58,oelipswid=$(item).find(".order-imgBox").eq(0).width(),oelipsheight= oelipswid/obili,oelipslen=$(item).find(".order-elips").length;
if(oelipslen<=0){
$(item).find(".order-imgBox").parent().append(oelis);
$(".order-elips").width(oelipswid);
$(".order-elips").height(oelipsheight);
$(".order-elips").css({
"width": owidth+"px",
"height":oheight+"px"
})
if(olinum>=3){
$(item).find(".order-imgBox").hide();
for(var i=0;i<3;i++){
$(item).find(".order-imgBox").eq(i).show();
}
}
}
})
}
order.hideOrder = function(orderId,obj){
var self=this;
$.confirm("确认删除吗?",function(flag){
if(flag){
$.ajax({
url:hideOrderUrl,
type:'post',
data:{'order_id':orderId},
dataType:'json',
success:function(result){
if(result.status == 1){
obj.closest('.orderList').slideUp(100);
obj.closest('.orderList').remove();
self.myScroll.refresh();
}else{
alert('删除失败');
}
}
});
}
});
}
order.getMoreOrder = function(self,func){
$.ajax({
'url':getMoreOrderUrl,
'data':{'page':order.page,'type':order.orderType}, | }
order.createGroupLiHtml = function(nowGroup,buttonHtml){
var orderHtmls = "";
var imagesHtmls = "";
var disableHtmls = "";
for(q in nowGroup.orderGoodsImage.pics){
disableHtmls = "";
if(nowGroup.orderGoodsImage.pics[q].disable){
disableHtmls = '<i class="icon-failstate"></i>';
}
imagesHtmls += '<span class="order-imgBox"><img src="'+order.picUrl + nowGroup.orderGoodsImage.pics[q].img+'" alt=""/>'+disableHtmls+'</span>';
}
var html = '<div class="orderList" data-type="'+nowGroup.priority.type+'" data-id="'+nowGroup.group_id+'">\
<div class="orderListBox clearfix">\
<p class="orderList-status '+nowGroup.translate.pClass+' fl">\
<i class="ordericon '+nowGroup.translate.iClass+'"></i>'+nowGroup.translate.title+'\
</p>\
<p class="orderList-ordernum fr">\
'+nowGroup.createtime+'\
</p>\
</div>\
<ul class="orderList-pdmore orderList-product">\
<li>\
<a class="j-loadetail" href="javascript:void(0);" data-href="order.php?act=groupDetail&group_id='+nowGroup.group_id+'&tag=1">\
<!--多个产品-->\
'+imagesHtmls+'\
<!--多个产品end-->\
</a>\
</li>\
</ul>\
<div class="orderList-btnBox btborder clearfix">\
<span class="totalPrice fl">\
总价:<em class="orange-icon">¥'+nowGroup.group_amount+'</em>\
</span>\
'+buttonHtml+'\
</div>\
</div>';
return html;
}
order.createNomalLiHtml = function(nowOrder,buttonHtml){
var disableHtml = "";
if(nowOrder.disable != undefined){
disableHtml = '<span class="failstate"></span>';
}
var html = '<div class="orderList" data-type="orderRefund" data-id="'+nowOrder.orders[0].order_id+'">\
<div class="orderListBox clearfix">\
<p class="orderList-status '+nowOrder.translate.pClass+' fl">\
<i class="ordericon '+nowOrder.translate.iClass+'"></i>'+nowOrder.translate.title+'\
</p>\
<p class="orderList-ordernum fr">\
'+nowOrder.orders[0].createtime+'\
</p>\
</div>\
<ul class="orderList-pdmore orderList-product">\
<li>\
<a class="j-loadetail" href="javascript:void(0);" data-href="order.php?act=orderDetail&order_id='+nowOrder.orders[0].order_id+'&tag=1">\
<!--多个产品-->\
<span class="order-imgBox"><img src="'+order.picUrl + nowOrder.orders[0].goods_image+'" alt=""/></span>\
<!--多个产品end-->\
</a>\
</li>\
</ul>\
<div class="orderList-btnBox btborder clearfix">\
<span class="totalPrice fl">\
总价:<em class="orange-icon">¥'+nowOrder.orders[0].order_amount+'</em>\
</span>\
'+buttonHtml+'\
</div>\
</div>';
return html;
}
order.createNomalButton = function(menus){
var buttonHtml = "";
for(o in menus){
buttonHtml += '<a class="operatebtn btn-orange '+menus[o].class+'" data-action="'+menus[o].action+'" href="" data-url="'+menus[o].url+'">\
'+menus[o].name+'\
</a>';
}
return buttonHtml;
}
order.groupPull = function(result,obj){
var el, li, i;
el = $("#orderwrapper .content-slide");
if(result.status == 1){
for(o in result.data.data){
var nowGroup = result.data.data[o];
var html = order.createGroupLiHtml(nowGroup,order.createNomalButton(nowGroup.Menu));
el.append(html);
}
order.page ++;
}else{
alert('哦哦,没有了');
}
obj.myScroll.refresh();
obj.imgBox();
}
order.orderPull = function(result,obj){
var el, li, i;
el = $("#orderwrapper .content-slide");
if(result.status == 1){
for(o in result.data.data){
var nowOrder = result.data.groupData[o];
var html = order.createNomalLiHtml(nowOrder,order.createNomalButton(nowOrder.Menu));
el.append(html);
}
order.page ++;
}else{
alert('哦哦,没有了');
}
obj.myScroll.refresh();
obj.imgBox();
}
order.pullUp = function(){
var self=this;
setTimeout(function () { // <-- Simulate network congestion, remove setTimeout from production!
switch(order.orderType){
case "all":
order.getMoreOrder(self,function(ev){
order.groupPull(ev,self);
});
break;
case 'unpay':
order.getMoreOrder(self,function(ev){
order.groupPull(ev,self);
});
break;
case 'unship':
order.getMoreOrder(self,function(ev){
order.groupPull(ev,self);
});
break;
case 'alship':
order.getMoreOrder(self,function(ev){
order.groupPull(ev,self);
});
break;
case 'evaluate':
var staticHeader = "待评价";
order.getMoreOrder(self,function(ev){
order.groupPull(ev,self,staticHeader);
});
break;
case 'refund':
order.getMoreOrder(self,function(ev){
order.orderPull(ev,self);
});
break;
}
// Remember to refresh when contents are loaded (ie: on ajax completion)
}, 1000); // <-- Simulate network congestion, remove setTimeout from production!
}
order.myScrollfun=function(){
var pullUpEl = document.getElementById('pullUp');
var pullUpOffset = pullUpEl.offsetHeight;
var self=this;
this.myScroll = new iScroll('orderwrapper', {
useTransition: true,
onRefresh: function () {
if (pullUpEl.className.match('loading')) {
pullUpEl.className = '';
pullUpEl.querySelector('.pullUpLabel').innerHTML = '下拉刷新...';
}
},
onScrollMove: function () {
if(this.y < (this.maxScrollY - 5) && !pullUpEl.className.match('flip')) {
pullUpEl.className = 'flip';
pullUpEl.querySelector('.pullUpLabel').innerHTML = '释放刷新...';
this.maxScrollY = this.maxScrollY;
} else if (this.y > (this.maxScrollY + 5) && pullUpEl.className.match('flip')) {
pullUpEl.className = '';
pullUpEl.querySelector('.pullUpLabel').innerHTML = '下拉刷新...';
this.maxScrollY = pullUpOffset;
}
},
onScrollEnd: function () {
if (pullUpEl.className.match('flip')) {
pullUpEl.className = 'loading';
pullUpEl.querySelector('.pullUpLabel').innerHTML = '<div class="loading" style="text-align:center;"><i class="icon-load"></i><span>正在加载中...</span></div>';
self.pullUp(); // Execute custom function (ajax call?)
}
if(this.y>=0){
self.myScroll.refresh();
}
}
});
}
$(document).ready(function(){
order.init();
}); | 'type':'post',
'dataType':'json',
success:func
}); | random_line_split |
rs_handler_user.go | package main
import (
"time"
"gohappy/data"
"gohappy/game/config"
"gohappy/game/handler"
"gohappy/glog"
"gohappy/pb"
"utils"
"github.com/AsynkronIT/protoactor-go/actor"
)
//玩家数据请求处理
func (rs *RoleActor) handlerUser(msg interface{}, ctx actor.Context) {
switch msg.(type) {
case *pb.CPing:
arg := msg.(*pb.CPing)
//glog.Debugf("CPing %#v", arg)
rsp := handler.Ping(arg)
rs.Send(rsp)
case *pb.CNotice:
arg := msg.(*pb.CNotice)
glog.Debugf("CNotice %#v", arg)
arg.Userid = rs.User.GetUserid()
rs.dbmsPid.Request(arg, ctx.Self())
case *pb.SNotice:
arg := msg.(*pb.SNotice)
glog.Debugf("SNotice %#v", arg)
handler.PackNotice(arg)
rs.Send(arg)
case *pb.CActivity:
arg := msg.(*pb.CActivity)
glog.Debugf("CActivity %#v", arg)
arg.Userid = rs.User.GetUserid()
rs.dbmsPid.Request(arg, ctx.Self())
case *pb.SActivity:
arg := msg.(*pb.SActivity)
glog.Debugf("SActivity %#v", arg)
handler.PackActivity(arg)
//glog.Debugf("SActivity %#v, userid %s", arg, rs.User.GetUserid())
rs.Send(arg)
case *pb.CJoinActivity:
arg := msg.(*pb.CJoinActivity)
glog.Debugf("CJoinActivity %#v", arg)
rs.joinActivity(arg, ctx)
case *pb.CGetCurrency:
arg := msg.(*pb.CGetCurrency)
glog.Debugf("CGetCurrency %#v", arg)
//响应
rsp := handler.GetCurrency(arg, rs.User)
rs.Send(rsp)
case *pb.CBuy:
arg := msg.(*pb.CBuy)
glog.Debugf("CBuy %#v", arg)
//优化
rsp, diamond, coin := handler.Buy(arg, rs.User)
//同步兑换
rs.addCurrency(diamond, coin, 0, 0, int32(pb.LOG_TYPE18))
//响应
rs.Send(rsp)
record, msg2 := handler.BuyNotice(coin, rs.User.GetUserid())
if record != nil {
rs.loggerPid.Tell(record)
}
if msg2 != nil {
rs.Send(msg2)
}
case *pb.CShop:
arg := msg.(*pb.CShop)
glog.Debugf("CShop %#v", arg)
//响应
rsp := handler.Shop(arg, rs.User)
rs.Send(rsp)
case *pb.BankGive:
arg := msg.(*pb.BankGive)
glog.Debugf("BankGive %#v", arg)
//rs.addBank(arg.Coin, arg.Type, arg.From)
rs.addCurrency(0, arg.GetCoin(), 0, 0, arg.GetType())
if rs.gamePid != nil {
rs.gamePid.Tell(arg)
}
case *pb.CBank:
arg := msg.(*pb.CBank)
glog.Debugf("CBank %#v", arg)
rs.bank(arg)
case *pb.CRank:
arg := msg.(*pb.CRank)
glog.Debugf("CRank %#v", arg)
rs.dbmsPid.Request(arg, ctx.Self())
case *pb.CBankLog:
arg := msg.(*pb.CBankLog)
glog.Debugf("CBankLog %#v", arg)
arg.Userid = rs.User.GetUserid()
rs.dbmsPid.Request(arg, ctx.Self())
case *pb.TaskUpdate:
arg := msg.(*pb.TaskUpdate)
glog.Debugf("TaskUpdate %#v", arg)
rs.taskUpdate(arg)
case *pb.CTask:
arg := msg.(*pb.CTask)
glog.Debugf("CTask %#v", arg)
rs.task()
case *pb.LuckyUpdate:
arg := msg.(*pb.LuckyUpdate)
glog.Debugf("LuckyUpdate %#v", arg)
rs.luckyUpdate(arg)
case *pb.CLucky:
arg := msg.(*pb.CLucky)
glog.Debugf("CLucky %#v", arg)
rs.lucky()
case *pb.CTaskPrize:
arg := msg.(*pb.CTaskPrize)
glog.Debugf("CTaskPrize %#v", arg)
rs.taskPrize(arg.Type)
case *pb.CLoginPrize:
arg := msg.(*pb.CLoginPrize)
glog.Debugf("CLoginPrize %#v", arg)
rs.loginPrize(arg)
case *pb.CSignature:
arg := msg.(*pb.CSignature)
glog.Debugf("CSignature %#v", arg)
rs.setSign(arg)
case *pb.CLatLng:
arg := msg.(*pb.CLatLng)
glog.Debugf("CLatLng %#v", arg)
rs.setLatLng(arg)
case *pb.CRoomRecord:
arg := msg.(*pb.CRoomRecord)
glog.Debugf("CRoomRecord %#v", arg)
msg1 := &pb.GetRoomRecord{
Gtype: arg.Gtype,
Page: arg.Page,
Userid: rs.User.GetUserid(),
}
rs.dbmsPid.Request(msg1, ctx.Self())
case *pb.CUserData:
arg := msg.(*pb.CUserData)
glog.Debugf("CUserData %#v", arg)
userid := arg.GetUserid()
if userid == "" {
userid = rs.User.GetUserid()
}
if userid != rs.User.GetUserid() {
msg1 := new(pb.GetUserData)
msg1.Userid = userid
rs.rolePid.Request(msg1, ctx.Self())
} else {
//TODO 添加房间数据返回
rsp := handler.GetUserDataMsg(arg, rs.User)
if rs.gamePid != nil {
rsp.Game = true
}
if rs.BankPhone != "" {
rsp.Bank = true
}
rs.Send(rsp)
}
case *pb.GotUserData:
arg := msg.(*pb.GotUserData)
glog.Debugf("GotUserData %#v", arg)
rsp := handler.UserDataMsg(arg)
rs.Send(rsp)
default:
//glog.Errorf("unknown message %v", msg)
rs.handlerPay(msg, ctx)
}
}
/*
func (rs *RoleActor) addPrize(rtype, ltype, amount int32) {
switch uint32(rtype) {
case data.DIAMOND:
rs.addCurrency(amount, 0, 0, 0, ltype)
case data.COIN:
rs.addCurrency(0, amount, 0, 0, ltype)
case data.CARD:
rs.addCurrency(0, 0, amount, 0, ltype)
case data.CHIP:
rs.addCurrency(0, 0, 0, amount, ltype)
}
}
//消耗钻石
func (rs *RoleActor) expend(cost uint32, ltype int32) {
diamond := -1 * int64(cost)
rs.addCurrency(diamond, 0, 0, 0, ltype)
}
*/
//奖励发放
func (rs *RoleActor) addCurrency(diamond, coin, card, chip int64, ltype int32) {
if rs.User == nil {
glog.Errorf("add currency user err: %d", ltype)
return
}
//日志记录
if diamond < 0 && ((rs.User.GetDiamond() + diamond) < 0) {
diamond = 0 - rs.User.GetDiamond()
}
if chip < 0 && ((rs.User.GetChip() + chip) < 0) {
chip = 0 - rs.User.GetChip()
}
if coin < 0 && ((rs.User.GetCoin() + coin) < 0) {
coin = 0 - rs.User.GetCoin()
}
if card < 0 && ((rs.User.GetCard() + card) < 0) {
card = 0 - rs.User.GetCard()
}
rs.User.AddCurrency(diamond, coin, card, chip)
//货币变更及时同步
msg2 := handler.ChangeCurrencyMsg(diamond, coin,
card, chip, ltype, rs.User.GetUserid())
rs.rolePid.Tell(msg2)
//消息
msg := handler.PushCurrencyMsg(diamond, coin,
card, chip, ltype)
rs.Send(msg)
//TODO 机器人不写日志
//if rs.User.GetRobot() {
// return
//}
//rs.status = true
//日志
//TODO 日志放在dbms中统一写入
//if diamond != 0 {
// msg1 := handler.LogDiamondMsg(diamond, ltype, rs.User)
// rs.loggerPid.Tell(msg1)
//}
//if coin != 0 {
// msg1 := handler.LogCoinMsg(coin, ltype, rs.User)
// rs.loggerPid.Tell(msg1)
//}
//if card != 0 {
// msg1 := handler.LogCardMsg(card, ltype, rs.User)
// rs.loggerPid.Tell(msg1)
//}
//if chip != 0 {
// msg1 := handler.LogChipMsg(chip, ltype, rs.User)
// rs.loggerPid.Tell(msg1)
//}
}
//同步数据
func (rs *RoleActor) syncUser() {
if rs.User == nil {
return
}
if rs.rolePid == nil {
return
}
if !rs.status { //有变更才同步
return
}
rs.status = false
msg := new(pb.SyncUser)
msg.Userid = rs.User.GetUserid()
glog.Debugf("syscUser %#v", rs.User)
result, err := json.Marshal(rs.User)
if err != nil {
glog.Errorf("user %s Marshal err %v", rs.User.GetUserid(), err)
return
}
msg.Data = result
rs.rolePid.Tell(msg)
}
//'银行
//银行发放
func (rs *RoleActor) addBank(coin int64, ltype int32, from string) {
if rs.User == nil {
glog.Errorf("add addBank user err: %d", ltype)
return
}
//日志记录
if coin < 0 && ((rs.User.GetBank() + coin) < 0) {
coin = 0 - rs.User.GetBank()
}
rs.User.AddBank(coin)
//银行变动及时同步
msg2 := handler.BankChangeMsg(coin,
ltype, rs.User.GetUserid(), from)
rs.rolePid.Tell(msg2)
}
//1存入,2取出,3赠送
func (rs *RoleActor) bank(arg *pb.CBank) {
msg := new(pb.SBank)
rtype := arg.GetRtype()
amount := int64(arg.GetAmount())
userid := arg.GetUserid()
coin := rs.User.GetCoin()
switch rtype {
case pb.BankDeposit: //存入
if rs.User.BankPhone == "" {
msg.Error = pb.BankNotOpen
} else if (coin - amount) < data.BANKRUPT {
msg.Error = pb.NotEnoughCoin
} else if amount <= 0 {
msg.Error = pb.DepositNumberError
} else {
rs.addCurrency(0, -1*amount, 0, 0, int32(pb.LOG_TYPE12))
rs.addBank(amount, int32(pb.LOG_TYPE12), "")
}
case pb.BankDraw: //取出
if rs.User.BankPhone == "" {
msg.Error = pb.BankNotOpen
} else if arg.GetPassword() != rs.User.BankPassword {
msg.Error = pb.PwdError
} else if amount > rs.User.GetBank() {
msg.Error = pb.NotEnoughCoin
} else if amount < data.DRAW_MONEY_LOW {
msg.Error = pb.DrawMoneyNumberError
} else {
rs.addCurrency(0, amount, 0, 0, int32(pb.LOG_TYPE13))
rs.addBank(-1*amount, int32(pb.LOG_TYPE13), "")
}
case pb.BankGift: //赠送
if rs.User.BankPhone == "" {
msg.Error = pb.BankNotOpen
} else if arg.GetPassword() != rs.User.BankPassword {
msg.Error = pb.PwdError
//} else if amount > rs.User.GetBank() {
} else if amount > rs.User.GetCoin() { //修改成赠送bank外面的
msg.Error = pb.NotEnoughCoin
} else if amount < data.DRAW_MONEY {
msg.Error = pb.GiveNumberError
} else if userid == "" {
msg.Error = pb.GiveUseridError
} else {
msg1 := handler.GiveBankMsg(amount, int32(pb.LOG_TYPE15), userid, rs.User.GetUserid())
if rs.bank2give(msg1) {
//rs.addBank(-1*amount, int32(pb.LOG_TYPE15), userid)
rs.add | OG_TYPE15))
//充值消息提醒
record1, msg1 := handler.GiveNotice(amount, rs.User.GetUserid(), userid)
if record1 != nil {
rs.loggerPid.Tell(record1)
}
rs.Send(msg1)
} else {
msg.Error = pb.GiveUseridError
}
}
case pb.BankSelect: //查询
msg.Phone = rs.User.BankPhone
case pb.BankOpen: //开通
if rs.User.BankPhone != "" {
msg.Error = pb.BankAlreadyOpen
} else if !utils.PhoneValidate(arg.GetPhone()) {
msg.Error = pb.PhoneNumberError
} else if len(arg.GetPassword()) != 32 {
msg.Error = pb.PwdError
} else if len(arg.GetSmscode()) != 6 {
msg.Error = pb.SmsCodeWrong
} else {
msg.Error = rs.bankCheck(arg)
if msg.Error == pb.OK {
//奖励发放
rs.addCurrency(0, 666, 0, 0, int32(pb.LOG_TYPE56))
//消息提醒
record, msg2 := handler.BankOpenNotice(666, rs.User.GetUserid())
if record != nil {
rs.loggerPid.Tell(record)
}
if msg2 != nil {
rs.Send(msg2)
}
}
}
case pb.BankResetPwd: //重置密码
if rs.User.BankPhone == "" {
msg.Error = pb.BankNotOpen
} else if rs.User.BankPhone != arg.GetPhone() {
msg.Error = pb.PhoneNumberError
} else if len(arg.GetPassword()) != 32 {
msg.Error = pb.PwdError
} else if len(arg.GetSmscode()) != 6 {
msg.Error = pb.SmsCodeWrong
} else {
msg.Error = rs.bankCheck(arg)
}
}
msg.Rtype = rtype
msg.Amount = arg.GetAmount()
msg.Userid = userid
msg.Balance = rs.User.GetBank()
rs.Send(msg)
}
//银行赠送
func (rs *RoleActor) bank2give(msg1 interface{}) bool {
timeout := 3 * time.Second
res1, err1 := rs.rolePid.RequestFuture(msg1, timeout).Result()
if err1 != nil {
glog.Errorf("bank give failed: %v", err1)
return false
}
if response1, ok := res1.(*pb.BankGiven); ok {
if response1.Error == pb.OK {
return true
}
glog.Errorf("BankGiven err %#v", response1)
return false
}
return false
}
//银行重置密码, 银行开通
func (rs *RoleActor) bankCheck(arg *pb.CBank) pb.ErrCode {
msg1 := &pb.BankCheck{
Userid: rs.User.GetUserid(),
Phone: arg.GetPhone(),
Password: arg.GetPassword(),
Smscode: arg.GetSmscode(),
}
timeout := 3 * time.Second
res1, err1 := rs.rolePid.RequestFuture(msg1, timeout).Result()
if err1 != nil {
glog.Errorf("bank check failed: %v", err1)
return pb.OperateError
}
if response1, ok := res1.(*pb.BankChecked); ok {
if response1.Error == pb.OK {
rs.User.BankPhone = arg.GetPhone()
rs.User.BankPassword = arg.GetPassword()
return response1.Error
}
glog.Errorf("bankCheck err %#v", response1)
return response1.Error
}
return pb.OperateError
}
//.
//'任务
//任务信息,TODO next任务不显示和重置当日任务
func (rs *RoleActor) task() {
rs.taskInit()
msg := new(pb.STask)
list := config.GetOrderTasks()
m := make(map[int32]bool)
for _, v := range list {
if val, ok := rs.User.Task[utils.String(v.Type)]; ok {
if val.Prize {
continue
}
if val.Taskid != v.Taskid {
continue
}
}
if _, ok := m[v.Type]; ok {
continue
}
msg2 := &pb.Task{
Taskid: v.Taskid,
Type: v.Type,
Name: v.Name,
Count: v.Count,
Coin: v.Coin,
Diamond: v.Diamond,
}
if val, ok := rs.User.Task[utils.String(v.Type)]; ok {
msg2.Num = val.Num
}
m[v.Type] = true
msg.List = append(msg.List, msg2)
}
rs.Send(msg)
}
//任务奖励领取
func (rs *RoleActor) taskPrize(taskType int32) {
rs.taskInit()
glog.Debugf("task prize type %d, task %#v", taskType, rs.User.Task)
msg := new(pb.STaskPrize)
if val, ok := rs.User.Task[utils.String(taskType)]; ok {
task := config.GetTask(val.Taskid)
if val.Num < task.Count || task.Taskid != val.Taskid {
msg.Error = pb.AwardFaild
rs.Send(msg)
glog.Errorf("task prize err %d, val %#v", taskType, val)
return
}
//奖励发放
rs.addCurrency(task.Diamond, task.Coin,
0, 0, int32(pb.LOG_TYPE46))
//消息提醒
record2, msg2 := handler.TaskNotice(task.Coin, task.Name, rs.User.GetUserid())
if record2 != nil {
rs.loggerPid.Tell(record2)
}
if msg2 != nil {
rs.Send(msg2)
}
val.Prize = true
rs.User.Task[utils.String(taskType)] = val
//响应消息
msg.Type = taskType
msg.Coin = task.Coin
msg.Diamond = task.Diamond
//添加新任务
rs.nextTask(taskType, task.Nextid, msg)
//日志记录
record := &pb.LogTask{
Userid: rs.User.GetUserid(),
Taskid: val.Taskid,
Type: taskType,
}
rs.loggerPid.Tell(record)
} else {
msg.Error = pb.AwardFaild
glog.Errorf("task prize err type %d", taskType)
}
rs.Send(msg)
}
func (rs *RoleActor) nextTask(taskType, nextid int32, msg *pb.STaskPrize) {
rs.taskInit()
//TODO 任务完成日志
msg2 := handler.TaskUpdateMsg(0, pb.TaskType(taskType),
rs.User.GetUserid())
msg2.Prize = true //移除标识
msg2.Nextid = nextid
rs.rolePid.Tell(msg2)
if nextid == 0 {
return
}
//存在下个任务
delete(rs.User.Task, utils.String(taskType)) //移除
//查找
task := config.GetTask(nextid)
if task.Taskid != nextid {
return
}
msg.Next = &pb.Task{
Taskid: task.Taskid,
Type: task.Type,
Name: task.Name,
Count: task.Count,
Coin: task.Coin,
Diamond: task.Diamond,
}
//添加新任务
taskInfo := data.TaskInfo{
Taskid: task.Taskid,
Utime: time.Now(),
}
rs.User.Task[utils.String(task.Type)] = taskInfo
msg3 := handler.TaskUpdateMsg(0, pb.TaskType(task.Type),
rs.User.GetUserid())
msg3.Taskid = task.Taskid
rs.rolePid.Tell(msg3)
}
//更新任务数据
func (rs *RoleActor) taskUpdate(arg *pb.TaskUpdate) {
rs.taskInit()
taskTypeStr := utils.String(int32(arg.Type))
if val, ok := rs.User.Task[taskTypeStr]; ok {
if val.Prize {
return
}
//数值超出不再更新
task := config.GetTask(val.Taskid)
if val.Num >= task.Count {
return
}
val.Num += arg.Num
val.Utime = time.Now()
rs.User.Task[taskTypeStr] = val
rs.rolePid.Tell(arg)
} else {
list := config.GetOrderTasks()
for _, v := range list {
if v.Type != int32(arg.Type) {
continue
}
taskInfo := data.TaskInfo{
Taskid: v.Taskid,
Num: arg.Num,
Utime: time.Now(),
}
rs.User.Task[taskTypeStr] = taskInfo
rs.rolePid.Tell(arg)
break
}
}
}
func (rs *RoleActor) taskInit() {
if rs.User.Task == nil {
rs.User.Task = make(map[string]data.TaskInfo)
}
}
func (rs *RoleActor) luckyInit() {
if rs.User.Lucky == nil {
rs.User.Lucky = make(map[string]data.LuckyInfo)
}
}
//lucky信息
func (rs *RoleActor) lucky() {
rs.luckyInit()
msg := new(pb.SLucky)
list := config.GetLuckys()
for _, v := range list {
msg2 := &pb.Lucky{
Luckyid: v.Luckyid,
Name: v.Name,
Count: v.Count,
Coin: v.Coin,
Diamond: v.Diamond,
Gtype: v.Gtype,
}
if val, ok := rs.User.Lucky[utils.String(v.Luckyid)]; ok {
msg2.Num = val.Num
}
msg.List = append(msg.List, msg2)
}
rs.Send(msg)
}
//更新lucky数据
func (rs *RoleActor) luckyUpdate(arg *pb.LuckyUpdate) {
rs.luckyInit()
luckyidStr := utils.String(int32(arg.GetLuckyid()))
lucky := config.GetLucky(arg.GetLuckyid())
if lucky.Luckyid != arg.GetLuckyid() {
return
}
if lucky.Luckyid == 0 {
return
}
if val, ok := rs.User.Lucky[luckyidStr]; ok {
//数值超出不再更新
if val.Num >= lucky.Count {
//return
}
val.Num += arg.Num
rs.User.Lucky[luckyidStr] = val
if val.Num == lucky.Count {
//奖励发放
//rs.addCurrency(lucky.Diamond, lucky.Coin, 0, 0, int32(pb.LOG_TYPE51))
//消息提醒
//record, msg2 := handler.LuckyNotice(lucky.Coin, lucky.Name, arg.Userid)
//if record != nil {
// rs.loggerPid.Tell(record)
//}
//if msg2 != nil {
// rs.Send(msg2)
//}
}
} else {
luckyInfo := data.LuckyInfo{
Luckyid: arg.GetLuckyid(),
Num: arg.Num,
}
rs.User.Lucky[luckyidStr] = luckyInfo
}
rs.rolePid.Tell(arg)
}
//.
//'签到
//更新连续登录奖励
func (rs *RoleActor) loginPrizeInit() {
//连续登录
glog.Debugf("userid %s, LoginTime %s", rs.User.GetUserid(),
utils.Time2Str(rs.User.LoginTime.Local()))
glog.Debugf("userid %s, LoginTimes %d, LoginPrize %d",
rs.User.GetUserid(), rs.User.LoginTimes, rs.User.LoginPrize)
//rs.User.LoginTime = utils.Stamp2Time(utils.TimestampToday() - 10)
handler.SetLoginPrize(rs.User)
glog.Debugf("userid %s, LoginTime %s", rs.User.GetUserid(),
utils.Time2Str(rs.User.LoginTime.Local()))
glog.Debugf("userid %s, LoginTimes %d, LoginPrize %d",
rs.User.GetUserid(), rs.User.LoginTimes, rs.User.LoginPrize)
rs.User.LoginTime = utils.BsonNow().Local()
msg := handler.LoginPrizeUpdateMsg(rs.User)
rs.rolePid.Tell(msg)
}
//连续登录奖励处理
func (rs *RoleActor) loginPrize(arg *pb.CLoginPrize) {
msg := new(pb.SLoginPrize)
msg.Type = arg.Type
switch arg.Type {
case pb.LoginPrizeSelect:
msg.List = handler.LoginPrizeInfo(rs.User)
case pb.LoginPrizeDraw:
coin, diamond, ok := handler.GetLoginPrize(arg.Day, rs.User)
msg.Error = ok
if ok == pb.OK {
//奖励发放
rs.addCurrency(diamond, coin, 0, 0, int32(pb.LOG_TYPE47))
msg.List = handler.LoginPrizeInfo(rs.User)
msg := handler.LoginPrizeUpdateMsg(rs.User)
rs.rolePid.Tell(msg)
}
}
rs.Send(msg)
}
//.
//设置个性签名
func (rs *RoleActor) setSign(arg *pb.CSignature) {
msg := new(pb.SSignature)
if len(arg.GetContent()) > 1024 {
msg.Error = pb.SignTooLong
rs.Send(msg)
return
}
msg.Userid = rs.User.GetUserid()
msg.Content = arg.GetContent()
rs.User.SetSign(arg.GetContent())
arg.Userid = rs.User.GetUserid()
rs.rolePid.Tell(arg)
rs.Send(msg)
}
//设置经纬度
func (rs *RoleActor) setLatLng(arg *pb.CLatLng) {
msg := new(pb.SLatLng)
rs.User.Lat = arg.GetLat()
rs.User.Lng = arg.GetLng()
rs.User.Address = arg.GetAddress()
rs.Send(msg)
arg.Userid = rs.User.GetUserid()
rs.rolePid.Tell(arg)
}
//join activity
func (rs *RoleActor) joinActivity(arg *pb.CJoinActivity, ctx actor.Context) {
if handler.IsNotAgent(rs.User) {
rsp := new(pb.SJoinActivity)
rsp.Error = pb.NotAgent
rs.Send(rsp)
return
}
arg.Selfid = rs.User.GetUserid()
act := config.GetActivity(arg.GetActid())
if act.Id != arg.GetActid() {
msg := new(pb.SJoinActivity)
msg.Error = pb.ActidError
rs.Send(msg)
return
}
rs.dbmsPid.Request(arg, ctx.Self())
}
// vim: set foldmethod=marker foldmarker=//',//.:
| Currency(0, -1*amount, 0, 0, int32(pb.L | conditional_block |
rs_handler_user.go | package main
import (
"time"
"gohappy/data"
"gohappy/game/config"
"gohappy/game/handler"
"gohappy/glog"
"gohappy/pb"
"utils"
"github.com/AsynkronIT/protoactor-go/actor"
)
//玩家数据请求处理
func (rs *RoleActor) handlerUser(msg interface{}, ctx actor.Context) {
switch msg.(type) {
case *pb.CPing:
arg := msg.(*pb.CPing)
//glog.Debugf("CPing %#v", arg)
rsp := handler.Ping(arg)
rs.Send(rsp)
case *pb.CNotice:
arg := msg.(*pb.CNotice)
glog.Debugf("CNotice %#v", arg)
arg.Userid = rs.User.GetUserid()
rs.dbmsPid.Request(arg, ctx.Self())
case *pb.SNotice:
arg := msg.(*pb.SNotice)
glog.Debugf("SNotice %#v", arg)
handler.PackNotice(arg)
rs.Send(arg)
case *pb.CActivity:
arg := msg.(*pb.CActivity)
glog.Debugf("CActivity %#v", arg)
arg.Userid = rs.User.GetUserid()
rs.dbmsPid.Request(arg, ctx.Self())
case *pb.SActivity:
arg := msg.(*pb.SActivity)
glog.Debugf("SActivity %#v", arg)
handler.PackActivity(arg)
//glog.Debugf("SActivity %#v, userid %s", arg, rs.User.GetUserid())
rs.Send(arg)
case *pb.CJoinActivity:
arg := msg.(*pb.CJoinActivity)
glog.Debugf("CJoinActivity %#v", arg)
rs.joinActivity(arg, ctx)
case *pb.CGetCurrency:
arg := msg.(*pb.CGetCurrency)
glog.Debugf("CGetCurrency %#v", arg)
//响应
rsp := handler.GetCurrency(arg, rs.User)
rs.Send(rsp)
case *pb.CBuy:
arg := msg.(*pb.CBuy)
glog.Debugf("CBuy %#v", arg)
//优化
rsp, diamond, coin := handler.Buy(arg, rs.User)
//同步兑换
rs.addCurrency(diamond, coin, 0, 0, int32(pb.LOG_TYPE18))
//响应
rs.Send(rsp)
record, msg2 := handler.BuyNotice(coin, rs.User.GetUserid())
if record != nil {
rs.loggerPid.Tell(record)
}
if msg2 != nil {
rs.Send(msg2)
}
case *pb.CShop:
arg := msg.(*pb.CShop)
glog.Debugf("CShop %#v", arg)
//响应
rsp := handler.Shop(arg, rs.User)
rs.Send(rsp)
case *pb.BankGive:
arg := msg.(*pb.BankGive)
glog.Debugf("BankGive %#v", arg)
//rs.addBank(arg.Coin, arg.Type, arg.From)
rs.addCurrency(0, arg.GetCoin(), 0, 0, arg.GetType())
if rs.gamePid != nil {
rs.gamePid.Tell(arg)
}
case *pb.CBank:
arg := msg.(*pb.CBank)
glog.Debugf("CBank %#v", arg)
rs.bank(arg)
case *pb.CRank:
arg := msg.(*pb.CRank)
glog.Debugf("CRank %#v", arg)
rs.dbmsPid.Request(arg, ctx.Self())
case *pb.CBankLog:
arg := msg.(*pb.CBankLog)
glog.Debugf("CBankLog %#v", arg)
arg.Userid = rs.User.GetUserid()
rs.dbmsPid.Request(arg, ctx.Self())
case *pb.TaskUpdate:
arg := msg.(*pb.TaskUpdate)
glog.Debugf("TaskUpdate %#v", arg)
rs.taskUpdate(arg)
case *pb.CTask:
arg := msg.(*pb.CTask)
glog.Debugf("CTask %#v", arg)
rs.task()
case *pb.LuckyUpdate:
arg := msg.(*pb.LuckyUpdate)
glog.Debugf("LuckyUpdate %#v", arg)
rs.luckyUpdate(arg)
case *pb.CLucky:
arg := msg.(*pb.CLucky)
glog.Debugf("CLucky %#v", arg)
rs.lucky()
case *pb.CTaskPrize:
arg := msg.(*pb.CTaskPrize)
glog.Debugf("CTaskPrize %#v", arg)
rs.taskPrize(arg.Type)
case *pb.CLoginPrize:
arg := msg.(*pb.CLoginPrize)
glog.Debugf("CLoginPrize %#v", arg)
rs.loginPrize(arg)
case *pb.CSignature:
arg := msg.(*pb.CSignature)
glog.Debugf("CSignature %#v", arg)
rs.setSign(arg)
case *pb.CLatLng:
arg := msg.(*pb.CLatLng)
glog.Debugf("CLatLng %#v", arg)
rs.setLatLng(arg)
case *pb.CRoomRecord:
arg := msg.(*pb.CRoomRecord)
glog.Debugf("CRoomRecord %#v", arg)
msg1 := &pb.GetRoomRecord{
Gtype: arg.Gtype,
Page: arg.Page,
Userid: rs.User.GetUserid(),
}
rs.dbmsPid.Request(msg1, ctx.Self())
case *pb.CUserData:
arg := msg.(*pb.CUserData)
glog.Debugf("CUserData %#v", arg)
userid := arg.GetUserid()
if userid == "" {
userid = rs.User.GetUserid()
}
if userid != rs.User.GetUserid() {
msg1 := new(pb.GetUserData)
msg1.Userid = userid
rs.rolePid.Request(msg1, ctx.Self())
} else {
//TODO 添加房间数据返回
rsp := handler.GetUserDataMsg(arg, rs.User)
if rs.gamePid != nil {
rsp.Game = true
}
if rs.BankPhone != "" {
rsp.Bank = true
}
rs.Send(rsp)
}
case *pb.GotUserData:
arg := msg.(*pb.GotUserData)
glog.Debugf("GotUserData %#v", arg)
rsp := handler.UserDataMsg(arg)
rs.Send(rsp)
default:
//glog.Errorf("unknown message %v", msg)
rs.handlerPay(msg, ctx)
}
}
/*
func (rs *RoleActor) addPrize(rtype, ltype, amount int32) {
switch uint32(rtype) {
case data.DIAMOND:
rs.addCurrency(amount, 0, 0, 0, ltype)
case data.COIN:
rs.addCurrency(0, amount, 0, 0, ltype)
case data.CARD:
rs.addCurrency(0, 0, amount, 0, ltype)
case data.CHIP:
rs.addCurrency(0, 0, 0, amount, ltype)
}
}
//消耗钻石
func (rs *RoleActor) expend(cost uint32, ltype int32) {
diamond := -1 * int64(cost)
rs.addCurrency(diamond, 0, 0, 0, ltype)
}
*/
//奖励发放
func (rs *RoleActor) addCurrency(diamond, coin, card, chip int64, ltype int32) {
if rs.User == nil {
glog.Errorf("add currency user err: %d", ltype)
return
}
//日志记录
if diamond < 0 && ((rs.User.GetDiamond() + diamond) < 0) {
diamond = 0 - rs.User.GetDiamond()
}
if chip < 0 && ((rs.User.GetChip() + chip) < 0) {
chip = 0 - rs.User.GetChip()
}
if coin < 0 && ((rs.User.GetCoin() + coin) < 0) {
coin = 0 - rs.User.GetCoin()
}
if card < 0 && ((rs.User.GetCard() + card) < 0) {
card = 0 - rs.User.GetCard()
}
rs.User.AddCurrency(diamond, coin, card, chip)
//货币变更及时同步
msg2 := handler.ChangeCurrencyMsg(diamond, coin,
card, chip, ltype, rs.User.GetUserid())
rs.rolePid.Tell(msg2)
//消息
msg := handler.PushCurrencyMsg(diamond, coin,
card, chip, ltype)
rs.Send(msg)
//TODO 机器人不写日志
//if rs.User.GetRobot() {
// return
//}
//rs.status = true
//日志
//TODO 日志放在dbms中统一写入
//if diamond != 0 {
// msg1 := handler.LogDiamondMsg(diamond, ltype, rs.User)
// rs.loggerPid.Tell(msg1)
//}
//if coin != 0 {
// msg1 := handler.LogCoinMsg(coin, ltype, rs.User)
// rs.loggerPid.Tell(msg1)
//}
//if card != 0 {
// msg1 := handler.LogCardMsg(card, ltype, rs.User)
// rs.loggerPid.Tell(msg1)
//}
//if chip != 0 {
// msg1 := handler.LogChipMsg(chip, ltype, rs.User)
// rs.loggerPid.Tell(msg1)
//}
}
//同步数据
func (rs *RoleActor) syncUser() {
if rs.User == nil {
return
}
if rs.rolePid == nil {
return
}
if !rs.status { //有变更才同步
return
}
rs.status = false
msg := new(pb.SyncUser)
msg.Userid = rs.User.GetUserid()
glog.Debugf("syscUser %#v", rs.User)
result, err := json.Marshal(rs.User)
if err != nil {
glog.Errorf("user %s Marshal err %v", rs.User.GetUserid(), err)
return
}
msg.Data = result
rs.rolePid.Tell(msg)
}
//'银行
//银行发放
func (rs *RoleActor) addBank(coin int64, ltype int32, from string) {
if rs.User == nil {
glog.Errorf("add addBank user err: %d", ltype)
return
}
//日志记录
if coin < 0 && ((rs.User.GetBank() + coin) < 0) {
coin = 0 - rs.User.GetBank()
}
rs.User.AddBank(coin)
//银行变动及时同步
msg2 := handler.BankChangeMsg(coin,
ltype, rs.User.GetUserid(), from)
rs.rolePid.Tell(msg2)
}
//1存入,2取出,3赠送
func (rs *RoleActor) bank(arg *pb.CBank) {
msg := new(pb.SBank)
rtype := arg.GetRtype()
amount := int64(arg.GetAmount())
userid := arg.GetUserid()
coin := rs.User.GetCoin()
switch rtype {
case pb.BankDeposit: //存入
if | User.BankPhone == "" {
msg.Error = pb.BankNotOpen
} else if (coin - amount) < data.BANKRUPT {
msg.Error = pb.NotEnoughCoin
} else if amount <= 0 {
msg.Error = pb.DepositNumberError
} else {
rs.addCurrency(0, -1*amount, 0, 0, int32(pb.LOG_TYPE12))
rs.addBank(amount, int32(pb.LOG_TYPE12), "")
}
case pb.BankDraw: //取出
if rs.User.BankPhone == "" {
msg.Error = pb.BankNotOpen
} else if arg.GetPassword() != rs.User.BankPassword {
msg.Error = pb.PwdError
} else if amount > rs.User.GetBank() {
msg.Error = pb.NotEnoughCoin
} else if amount < data.DRAW_MONEY_LOW {
msg.Error = pb.DrawMoneyNumberError
} else {
rs.addCurrency(0, amount, 0, 0, int32(pb.LOG_TYPE13))
rs.addBank(-1*amount, int32(pb.LOG_TYPE13), "")
}
case pb.BankGift: //赠送
if rs.User.BankPhone == "" {
msg.Error = pb.BankNotOpen
} else if arg.GetPassword() != rs.User.BankPassword {
msg.Error = pb.PwdError
//} else if amount > rs.User.GetBank() {
} else if amount > rs.User.GetCoin() { //修改成赠送bank外面的
msg.Error = pb.NotEnoughCoin
} else if amount < data.DRAW_MONEY {
msg.Error = pb.GiveNumberError
} else if userid == "" {
msg.Error = pb.GiveUseridError
} else {
msg1 := handler.GiveBankMsg(amount, int32(pb.LOG_TYPE15), userid, rs.User.GetUserid())
if rs.bank2give(msg1) {
//rs.addBank(-1*amount, int32(pb.LOG_TYPE15), userid)
rs.addCurrency(0, -1*amount, 0, 0, int32(pb.LOG_TYPE15))
//充值消息提醒
record1, msg1 := handler.GiveNotice(amount, rs.User.GetUserid(), userid)
if record1 != nil {
rs.loggerPid.Tell(record1)
}
rs.Send(msg1)
} else {
msg.Error = pb.GiveUseridError
}
}
case pb.BankSelect: //查询
msg.Phone = rs.User.BankPhone
case pb.BankOpen: //开通
if rs.User.BankPhone != "" {
msg.Error = pb.BankAlreadyOpen
} else if !utils.PhoneValidate(arg.GetPhone()) {
msg.Error = pb.PhoneNumberError
} else if len(arg.GetPassword()) != 32 {
msg.Error = pb.PwdError
} else if len(arg.GetSmscode()) != 6 {
msg.Error = pb.SmsCodeWrong
} else {
msg.Error = rs.bankCheck(arg)
if msg.Error == pb.OK {
//奖励发放
rs.addCurrency(0, 666, 0, 0, int32(pb.LOG_TYPE56))
//消息提醒
record, msg2 := handler.BankOpenNotice(666, rs.User.GetUserid())
if record != nil {
rs.loggerPid.Tell(record)
}
if msg2 != nil {
rs.Send(msg2)
}
}
}
case pb.BankResetPwd: //重置密码
if rs.User.BankPhone == "" {
msg.Error = pb.BankNotOpen
} else if rs.User.BankPhone != arg.GetPhone() {
msg.Error = pb.PhoneNumberError
} else if len(arg.GetPassword()) != 32 {
msg.Error = pb.PwdError
} else if len(arg.GetSmscode()) != 6 {
msg.Error = pb.SmsCodeWrong
} else {
msg.Error = rs.bankCheck(arg)
}
}
msg.Rtype = rtype
msg.Amount = arg.GetAmount()
msg.Userid = userid
msg.Balance = rs.User.GetBank()
rs.Send(msg)
}
//银行赠送
func (rs *RoleActor) bank2give(msg1 interface{}) bool {
timeout := 3 * time.Second
res1, err1 := rs.rolePid.RequestFuture(msg1, timeout).Result()
if err1 != nil {
glog.Errorf("bank give failed: %v", err1)
return false
}
if response1, ok := res1.(*pb.BankGiven); ok {
if response1.Error == pb.OK {
return true
}
glog.Errorf("BankGiven err %#v", response1)
return false
}
return false
}
//银行重置密码, 银行开通
func (rs *RoleActor) bankCheck(arg *pb.CBank) pb.ErrCode {
msg1 := &pb.BankCheck{
Userid: rs.User.GetUserid(),
Phone: arg.GetPhone(),
Password: arg.GetPassword(),
Smscode: arg.GetSmscode(),
}
timeout := 3 * time.Second
res1, err1 := rs.rolePid.RequestFuture(msg1, timeout).Result()
if err1 != nil {
glog.Errorf("bank check failed: %v", err1)
return pb.OperateError
}
if response1, ok := res1.(*pb.BankChecked); ok {
if response1.Error == pb.OK {
rs.User.BankPhone = arg.GetPhone()
rs.User.BankPassword = arg.GetPassword()
return response1.Error
}
glog.Errorf("bankCheck err %#v", response1)
return response1.Error
}
return pb.OperateError
}
//.
//'任务
//任务信息,TODO next任务不显示和重置当日任务
func (rs *RoleActor) task() {
rs.taskInit()
msg := new(pb.STask)
list := config.GetOrderTasks()
m := make(map[int32]bool)
for _, v := range list {
if val, ok := rs.User.Task[utils.String(v.Type)]; ok {
if val.Prize {
continue
}
if val.Taskid != v.Taskid {
continue
}
}
if _, ok := m[v.Type]; ok {
continue
}
msg2 := &pb.Task{
Taskid: v.Taskid,
Type: v.Type,
Name: v.Name,
Count: v.Count,
Coin: v.Coin,
Diamond: v.Diamond,
}
if val, ok := rs.User.Task[utils.String(v.Type)]; ok {
msg2.Num = val.Num
}
m[v.Type] = true
msg.List = append(msg.List, msg2)
}
rs.Send(msg)
}
//任务奖励领取
func (rs *RoleActor) taskPrize(taskType int32) {
rs.taskInit()
glog.Debugf("task prize type %d, task %#v", taskType, rs.User.Task)
msg := new(pb.STaskPrize)
if val, ok := rs.User.Task[utils.String(taskType)]; ok {
task := config.GetTask(val.Taskid)
if val.Num < task.Count || task.Taskid != val.Taskid {
msg.Error = pb.AwardFaild
rs.Send(msg)
glog.Errorf("task prize err %d, val %#v", taskType, val)
return
}
//奖励发放
rs.addCurrency(task.Diamond, task.Coin,
0, 0, int32(pb.LOG_TYPE46))
//消息提醒
record2, msg2 := handler.TaskNotice(task.Coin, task.Name, rs.User.GetUserid())
if record2 != nil {
rs.loggerPid.Tell(record2)
}
if msg2 != nil {
rs.Send(msg2)
}
val.Prize = true
rs.User.Task[utils.String(taskType)] = val
//响应消息
msg.Type = taskType
msg.Coin = task.Coin
msg.Diamond = task.Diamond
//添加新任务
rs.nextTask(taskType, task.Nextid, msg)
//日志记录
record := &pb.LogTask{
Userid: rs.User.GetUserid(),
Taskid: val.Taskid,
Type: taskType,
}
rs.loggerPid.Tell(record)
} else {
msg.Error = pb.AwardFaild
glog.Errorf("task prize err type %d", taskType)
}
rs.Send(msg)
}
func (rs *RoleActor) nextTask(taskType, nextid int32, msg *pb.STaskPrize) {
rs.taskInit()
//TODO 任务完成日志
msg2 := handler.TaskUpdateMsg(0, pb.TaskType(taskType),
rs.User.GetUserid())
msg2.Prize = true //移除标识
msg2.Nextid = nextid
rs.rolePid.Tell(msg2)
if nextid == 0 {
return
}
//存在下个任务
delete(rs.User.Task, utils.String(taskType)) //移除
//查找
task := config.GetTask(nextid)
if task.Taskid != nextid {
return
}
msg.Next = &pb.Task{
Taskid: task.Taskid,
Type: task.Type,
Name: task.Name,
Count: task.Count,
Coin: task.Coin,
Diamond: task.Diamond,
}
//添加新任务
taskInfo := data.TaskInfo{
Taskid: task.Taskid,
Utime: time.Now(),
}
rs.User.Task[utils.String(task.Type)] = taskInfo
msg3 := handler.TaskUpdateMsg(0, pb.TaskType(task.Type),
rs.User.GetUserid())
msg3.Taskid = task.Taskid
rs.rolePid.Tell(msg3)
}
//更新任务数据
func (rs *RoleActor) taskUpdate(arg *pb.TaskUpdate) {
rs.taskInit()
taskTypeStr := utils.String(int32(arg.Type))
if val, ok := rs.User.Task[taskTypeStr]; ok {
if val.Prize {
return
}
//数值超出不再更新
task := config.GetTask(val.Taskid)
if val.Num >= task.Count {
return
}
val.Num += arg.Num
val.Utime = time.Now()
rs.User.Task[taskTypeStr] = val
rs.rolePid.Tell(arg)
} else {
list := config.GetOrderTasks()
for _, v := range list {
if v.Type != int32(arg.Type) {
continue
}
taskInfo := data.TaskInfo{
Taskid: v.Taskid,
Num: arg.Num,
Utime: time.Now(),
}
rs.User.Task[taskTypeStr] = taskInfo
rs.rolePid.Tell(arg)
break
}
}
}
func (rs *RoleActor) taskInit() {
if rs.User.Task == nil {
rs.User.Task = make(map[string]data.TaskInfo)
}
}
func (rs *RoleActor) luckyInit() {
if rs.User.Lucky == nil {
rs.User.Lucky = make(map[string]data.LuckyInfo)
}
}
//lucky信息
func (rs *RoleActor) lucky() {
rs.luckyInit()
msg := new(pb.SLucky)
list := config.GetLuckys()
for _, v := range list {
msg2 := &pb.Lucky{
Luckyid: v.Luckyid,
Name: v.Name,
Count: v.Count,
Coin: v.Coin,
Diamond: v.Diamond,
Gtype: v.Gtype,
}
if val, ok := rs.User.Lucky[utils.String(v.Luckyid)]; ok {
msg2.Num = val.Num
}
msg.List = append(msg.List, msg2)
}
rs.Send(msg)
}
//更新lucky数据
func (rs *RoleActor) luckyUpdate(arg *pb.LuckyUpdate) {
rs.luckyInit()
luckyidStr := utils.String(int32(arg.GetLuckyid()))
lucky := config.GetLucky(arg.GetLuckyid())
if lucky.Luckyid != arg.GetLuckyid() {
return
}
if lucky.Luckyid == 0 {
return
}
if val, ok := rs.User.Lucky[luckyidStr]; ok {
//数值超出不再更新
if val.Num >= lucky.Count {
//return
}
val.Num += arg.Num
rs.User.Lucky[luckyidStr] = val
if val.Num == lucky.Count {
//奖励发放
//rs.addCurrency(lucky.Diamond, lucky.Coin, 0, 0, int32(pb.LOG_TYPE51))
//消息提醒
//record, msg2 := handler.LuckyNotice(lucky.Coin, lucky.Name, arg.Userid)
//if record != nil {
// rs.loggerPid.Tell(record)
//}
//if msg2 != nil {
// rs.Send(msg2)
//}
}
} else {
luckyInfo := data.LuckyInfo{
Luckyid: arg.GetLuckyid(),
Num: arg.Num,
}
rs.User.Lucky[luckyidStr] = luckyInfo
}
rs.rolePid.Tell(arg)
}
//.
//'签到
//更新连续登录奖励
func (rs *RoleActor) loginPrizeInit() {
//连续登录
glog.Debugf("userid %s, LoginTime %s", rs.User.GetUserid(),
utils.Time2Str(rs.User.LoginTime.Local()))
glog.Debugf("userid %s, LoginTimes %d, LoginPrize %d",
rs.User.GetUserid(), rs.User.LoginTimes, rs.User.LoginPrize)
//rs.User.LoginTime = utils.Stamp2Time(utils.TimestampToday() - 10)
handler.SetLoginPrize(rs.User)
glog.Debugf("userid %s, LoginTime %s", rs.User.GetUserid(),
utils.Time2Str(rs.User.LoginTime.Local()))
glog.Debugf("userid %s, LoginTimes %d, LoginPrize %d",
rs.User.GetUserid(), rs.User.LoginTimes, rs.User.LoginPrize)
rs.User.LoginTime = utils.BsonNow().Local()
msg := handler.LoginPrizeUpdateMsg(rs.User)
rs.rolePid.Tell(msg)
}
//连续登录奖励处理
func (rs *RoleActor) loginPrize(arg *pb.CLoginPrize) {
msg := new(pb.SLoginPrize)
msg.Type = arg.Type
switch arg.Type {
case pb.LoginPrizeSelect:
msg.List = handler.LoginPrizeInfo(rs.User)
case pb.LoginPrizeDraw:
coin, diamond, ok := handler.GetLoginPrize(arg.Day, rs.User)
msg.Error = ok
if ok == pb.OK {
//奖励发放
rs.addCurrency(diamond, coin, 0, 0, int32(pb.LOG_TYPE47))
msg.List = handler.LoginPrizeInfo(rs.User)
msg := handler.LoginPrizeUpdateMsg(rs.User)
rs.rolePid.Tell(msg)
}
}
rs.Send(msg)
}
//.
//设置个性签名
func (rs *RoleActor) setSign(arg *pb.CSignature) {
msg := new(pb.SSignature)
if len(arg.GetContent()) > 1024 {
msg.Error = pb.SignTooLong
rs.Send(msg)
return
}
msg.Userid = rs.User.GetUserid()
msg.Content = arg.GetContent()
rs.User.SetSign(arg.GetContent())
arg.Userid = rs.User.GetUserid()
rs.rolePid.Tell(arg)
rs.Send(msg)
}
//设置经纬度
func (rs *RoleActor) setLatLng(arg *pb.CLatLng) {
msg := new(pb.SLatLng)
rs.User.Lat = arg.GetLat()
rs.User.Lng = arg.GetLng()
rs.User.Address = arg.GetAddress()
rs.Send(msg)
arg.Userid = rs.User.GetUserid()
rs.rolePid.Tell(arg)
}
//join activity
func (rs *RoleActor) joinActivity(arg *pb.CJoinActivity, ctx actor.Context) {
if handler.IsNotAgent(rs.User) {
rsp := new(pb.SJoinActivity)
rsp.Error = pb.NotAgent
rs.Send(rsp)
return
}
arg.Selfid = rs.User.GetUserid()
act := config.GetActivity(arg.GetActid())
if act.Id != arg.GetActid() {
msg := new(pb.SJoinActivity)
msg.Error = pb.ActidError
rs.Send(msg)
return
}
rs.dbmsPid.Request(arg, ctx.Self())
}
// vim: set foldmethod=marker foldmarker=//',//.:
| rs. | identifier_name |
rs_handler_user.go | package main
import (
"time"
"gohappy/data"
"gohappy/game/config"
"gohappy/game/handler"
"gohappy/glog"
"gohappy/pb"
"utils"
"github.com/AsynkronIT/protoactor-go/actor"
)
//玩家数据请求处理
func (rs *RoleActor) handlerUser(msg interface{}, ctx actor.Context) {
switch msg.(type) {
case *pb.CPing:
arg := msg.(*pb.CPing)
//glog.Debugf("CPing %#v", arg)
rsp := handler.Ping(arg)
rs.Send(rsp)
case *pb.CNotice:
arg := msg.(*pb.CNotice)
glog.Debugf("CNotice %#v", arg)
arg.Userid = rs.User.GetUserid()
rs.dbmsPid.Request(arg, ctx.Self())
case *pb.SNotice:
arg := msg.(*pb.SNotice)
glog.Debugf("SNotice %#v", arg)
handler.PackNotice(arg)
rs.Send(arg)
case *pb.CActivity:
arg := msg.(*pb.CActivity)
glog.Debugf("CActivity %#v", arg)
arg.Userid = rs.User.GetUserid()
rs.dbmsPid.Request(arg, ctx.Self())
case *pb.SActivity:
arg := msg.(*pb.SActivity)
glog.Debugf("SActivity %#v", arg)
handler.PackActivity(arg)
//glog.Debugf("SActivity %#v, userid %s", arg, rs.User.GetUserid())
rs.Send(arg)
case *pb.CJoinActivity:
arg := msg.(*pb.CJoinActivity)
glog.Debugf("CJoinActivity %#v", arg) | rsp := handler.GetCurrency(arg, rs.User)
rs.Send(rsp)
case *pb.CBuy:
arg := msg.(*pb.CBuy)
glog.Debugf("CBuy %#v", arg)
//优化
rsp, diamond, coin := handler.Buy(arg, rs.User)
//同步兑换
rs.addCurrency(diamond, coin, 0, 0, int32(pb.LOG_TYPE18))
//响应
rs.Send(rsp)
record, msg2 := handler.BuyNotice(coin, rs.User.GetUserid())
if record != nil {
rs.loggerPid.Tell(record)
}
if msg2 != nil {
rs.Send(msg2)
}
case *pb.CShop:
arg := msg.(*pb.CShop)
glog.Debugf("CShop %#v", arg)
//响应
rsp := handler.Shop(arg, rs.User)
rs.Send(rsp)
case *pb.BankGive:
arg := msg.(*pb.BankGive)
glog.Debugf("BankGive %#v", arg)
//rs.addBank(arg.Coin, arg.Type, arg.From)
rs.addCurrency(0, arg.GetCoin(), 0, 0, arg.GetType())
if rs.gamePid != nil {
rs.gamePid.Tell(arg)
}
case *pb.CBank:
arg := msg.(*pb.CBank)
glog.Debugf("CBank %#v", arg)
rs.bank(arg)
case *pb.CRank:
arg := msg.(*pb.CRank)
glog.Debugf("CRank %#v", arg)
rs.dbmsPid.Request(arg, ctx.Self())
case *pb.CBankLog:
arg := msg.(*pb.CBankLog)
glog.Debugf("CBankLog %#v", arg)
arg.Userid = rs.User.GetUserid()
rs.dbmsPid.Request(arg, ctx.Self())
case *pb.TaskUpdate:
arg := msg.(*pb.TaskUpdate)
glog.Debugf("TaskUpdate %#v", arg)
rs.taskUpdate(arg)
case *pb.CTask:
arg := msg.(*pb.CTask)
glog.Debugf("CTask %#v", arg)
rs.task()
case *pb.LuckyUpdate:
arg := msg.(*pb.LuckyUpdate)
glog.Debugf("LuckyUpdate %#v", arg)
rs.luckyUpdate(arg)
case *pb.CLucky:
arg := msg.(*pb.CLucky)
glog.Debugf("CLucky %#v", arg)
rs.lucky()
case *pb.CTaskPrize:
arg := msg.(*pb.CTaskPrize)
glog.Debugf("CTaskPrize %#v", arg)
rs.taskPrize(arg.Type)
case *pb.CLoginPrize:
arg := msg.(*pb.CLoginPrize)
glog.Debugf("CLoginPrize %#v", arg)
rs.loginPrize(arg)
case *pb.CSignature:
arg := msg.(*pb.CSignature)
glog.Debugf("CSignature %#v", arg)
rs.setSign(arg)
case *pb.CLatLng:
arg := msg.(*pb.CLatLng)
glog.Debugf("CLatLng %#v", arg)
rs.setLatLng(arg)
case *pb.CRoomRecord:
arg := msg.(*pb.CRoomRecord)
glog.Debugf("CRoomRecord %#v", arg)
msg1 := &pb.GetRoomRecord{
Gtype: arg.Gtype,
Page: arg.Page,
Userid: rs.User.GetUserid(),
}
rs.dbmsPid.Request(msg1, ctx.Self())
case *pb.CUserData:
arg := msg.(*pb.CUserData)
glog.Debugf("CUserData %#v", arg)
userid := arg.GetUserid()
if userid == "" {
userid = rs.User.GetUserid()
}
if userid != rs.User.GetUserid() {
msg1 := new(pb.GetUserData)
msg1.Userid = userid
rs.rolePid.Request(msg1, ctx.Self())
} else {
//TODO 添加房间数据返回
rsp := handler.GetUserDataMsg(arg, rs.User)
if rs.gamePid != nil {
rsp.Game = true
}
if rs.BankPhone != "" {
rsp.Bank = true
}
rs.Send(rsp)
}
case *pb.GotUserData:
arg := msg.(*pb.GotUserData)
glog.Debugf("GotUserData %#v", arg)
rsp := handler.UserDataMsg(arg)
rs.Send(rsp)
default:
//glog.Errorf("unknown message %v", msg)
rs.handlerPay(msg, ctx)
}
}
/*
func (rs *RoleActor) addPrize(rtype, ltype, amount int32) {
switch uint32(rtype) {
case data.DIAMOND:
rs.addCurrency(amount, 0, 0, 0, ltype)
case data.COIN:
rs.addCurrency(0, amount, 0, 0, ltype)
case data.CARD:
rs.addCurrency(0, 0, amount, 0, ltype)
case data.CHIP:
rs.addCurrency(0, 0, 0, amount, ltype)
}
}
//消耗钻石
func (rs *RoleActor) expend(cost uint32, ltype int32) {
diamond := -1 * int64(cost)
rs.addCurrency(diamond, 0, 0, 0, ltype)
}
*/
//奖励发放
func (rs *RoleActor) addCurrency(diamond, coin, card, chip int64, ltype int32) {
if rs.User == nil {
glog.Errorf("add currency user err: %d", ltype)
return
}
//日志记录
if diamond < 0 && ((rs.User.GetDiamond() + diamond) < 0) {
diamond = 0 - rs.User.GetDiamond()
}
if chip < 0 && ((rs.User.GetChip() + chip) < 0) {
chip = 0 - rs.User.GetChip()
}
if coin < 0 && ((rs.User.GetCoin() + coin) < 0) {
coin = 0 - rs.User.GetCoin()
}
if card < 0 && ((rs.User.GetCard() + card) < 0) {
card = 0 - rs.User.GetCard()
}
rs.User.AddCurrency(diamond, coin, card, chip)
//货币变更及时同步
msg2 := handler.ChangeCurrencyMsg(diamond, coin,
card, chip, ltype, rs.User.GetUserid())
rs.rolePid.Tell(msg2)
//消息
msg := handler.PushCurrencyMsg(diamond, coin,
card, chip, ltype)
rs.Send(msg)
//TODO 机器人不写日志
//if rs.User.GetRobot() {
// return
//}
//rs.status = true
//日志
//TODO 日志放在dbms中统一写入
//if diamond != 0 {
// msg1 := handler.LogDiamondMsg(diamond, ltype, rs.User)
// rs.loggerPid.Tell(msg1)
//}
//if coin != 0 {
// msg1 := handler.LogCoinMsg(coin, ltype, rs.User)
// rs.loggerPid.Tell(msg1)
//}
//if card != 0 {
// msg1 := handler.LogCardMsg(card, ltype, rs.User)
// rs.loggerPid.Tell(msg1)
//}
//if chip != 0 {
// msg1 := handler.LogChipMsg(chip, ltype, rs.User)
// rs.loggerPid.Tell(msg1)
//}
}
//同步数据
func (rs *RoleActor) syncUser() {
if rs.User == nil {
return
}
if rs.rolePid == nil {
return
}
if !rs.status { //有变更才同步
return
}
rs.status = false
msg := new(pb.SyncUser)
msg.Userid = rs.User.GetUserid()
glog.Debugf("syscUser %#v", rs.User)
result, err := json.Marshal(rs.User)
if err != nil {
glog.Errorf("user %s Marshal err %v", rs.User.GetUserid(), err)
return
}
msg.Data = result
rs.rolePid.Tell(msg)
}
//'银行
//银行发放
func (rs *RoleActor) addBank(coin int64, ltype int32, from string) {
if rs.User == nil {
glog.Errorf("add addBank user err: %d", ltype)
return
}
//日志记录
if coin < 0 && ((rs.User.GetBank() + coin) < 0) {
coin = 0 - rs.User.GetBank()
}
rs.User.AddBank(coin)
//银行变动及时同步
msg2 := handler.BankChangeMsg(coin,
ltype, rs.User.GetUserid(), from)
rs.rolePid.Tell(msg2)
}
//1存入,2取出,3赠送
func (rs *RoleActor) bank(arg *pb.CBank) {
msg := new(pb.SBank)
rtype := arg.GetRtype()
amount := int64(arg.GetAmount())
userid := arg.GetUserid()
coin := rs.User.GetCoin()
switch rtype {
case pb.BankDeposit: //存入
if rs.User.BankPhone == "" {
msg.Error = pb.BankNotOpen
} else if (coin - amount) < data.BANKRUPT {
msg.Error = pb.NotEnoughCoin
} else if amount <= 0 {
msg.Error = pb.DepositNumberError
} else {
rs.addCurrency(0, -1*amount, 0, 0, int32(pb.LOG_TYPE12))
rs.addBank(amount, int32(pb.LOG_TYPE12), "")
}
case pb.BankDraw: //取出
if rs.User.BankPhone == "" {
msg.Error = pb.BankNotOpen
} else if arg.GetPassword() != rs.User.BankPassword {
msg.Error = pb.PwdError
} else if amount > rs.User.GetBank() {
msg.Error = pb.NotEnoughCoin
} else if amount < data.DRAW_MONEY_LOW {
msg.Error = pb.DrawMoneyNumberError
} else {
rs.addCurrency(0, amount, 0, 0, int32(pb.LOG_TYPE13))
rs.addBank(-1*amount, int32(pb.LOG_TYPE13), "")
}
case pb.BankGift: //赠送
if rs.User.BankPhone == "" {
msg.Error = pb.BankNotOpen
} else if arg.GetPassword() != rs.User.BankPassword {
msg.Error = pb.PwdError
//} else if amount > rs.User.GetBank() {
} else if amount > rs.User.GetCoin() { //修改成赠送bank外面的
msg.Error = pb.NotEnoughCoin
} else if amount < data.DRAW_MONEY {
msg.Error = pb.GiveNumberError
} else if userid == "" {
msg.Error = pb.GiveUseridError
} else {
msg1 := handler.GiveBankMsg(amount, int32(pb.LOG_TYPE15), userid, rs.User.GetUserid())
if rs.bank2give(msg1) {
//rs.addBank(-1*amount, int32(pb.LOG_TYPE15), userid)
rs.addCurrency(0, -1*amount, 0, 0, int32(pb.LOG_TYPE15))
//充值消息提醒
record1, msg1 := handler.GiveNotice(amount, rs.User.GetUserid(), userid)
if record1 != nil {
rs.loggerPid.Tell(record1)
}
rs.Send(msg1)
} else {
msg.Error = pb.GiveUseridError
}
}
case pb.BankSelect: //查询
msg.Phone = rs.User.BankPhone
case pb.BankOpen: //开通
if rs.User.BankPhone != "" {
msg.Error = pb.BankAlreadyOpen
} else if !utils.PhoneValidate(arg.GetPhone()) {
msg.Error = pb.PhoneNumberError
} else if len(arg.GetPassword()) != 32 {
msg.Error = pb.PwdError
} else if len(arg.GetSmscode()) != 6 {
msg.Error = pb.SmsCodeWrong
} else {
msg.Error = rs.bankCheck(arg)
if msg.Error == pb.OK {
//奖励发放
rs.addCurrency(0, 666, 0, 0, int32(pb.LOG_TYPE56))
//消息提醒
record, msg2 := handler.BankOpenNotice(666, rs.User.GetUserid())
if record != nil {
rs.loggerPid.Tell(record)
}
if msg2 != nil {
rs.Send(msg2)
}
}
}
case pb.BankResetPwd: //重置密码
if rs.User.BankPhone == "" {
msg.Error = pb.BankNotOpen
} else if rs.User.BankPhone != arg.GetPhone() {
msg.Error = pb.PhoneNumberError
} else if len(arg.GetPassword()) != 32 {
msg.Error = pb.PwdError
} else if len(arg.GetSmscode()) != 6 {
msg.Error = pb.SmsCodeWrong
} else {
msg.Error = rs.bankCheck(arg)
}
}
msg.Rtype = rtype
msg.Amount = arg.GetAmount()
msg.Userid = userid
msg.Balance = rs.User.GetBank()
rs.Send(msg)
}
//银行赠送
func (rs *RoleActor) bank2give(msg1 interface{}) bool {
timeout := 3 * time.Second
res1, err1 := rs.rolePid.RequestFuture(msg1, timeout).Result()
if err1 != nil {
glog.Errorf("bank give failed: %v", err1)
return false
}
if response1, ok := res1.(*pb.BankGiven); ok {
if response1.Error == pb.OK {
return true
}
glog.Errorf("BankGiven err %#v", response1)
return false
}
return false
}
//银行重置密码, 银行开通
func (rs *RoleActor) bankCheck(arg *pb.CBank) pb.ErrCode {
msg1 := &pb.BankCheck{
Userid: rs.User.GetUserid(),
Phone: arg.GetPhone(),
Password: arg.GetPassword(),
Smscode: arg.GetSmscode(),
}
timeout := 3 * time.Second
res1, err1 := rs.rolePid.RequestFuture(msg1, timeout).Result()
if err1 != nil {
glog.Errorf("bank check failed: %v", err1)
return pb.OperateError
}
if response1, ok := res1.(*pb.BankChecked); ok {
if response1.Error == pb.OK {
rs.User.BankPhone = arg.GetPhone()
rs.User.BankPassword = arg.GetPassword()
return response1.Error
}
glog.Errorf("bankCheck err %#v", response1)
return response1.Error
}
return pb.OperateError
}
//.
//'任务
//任务信息,TODO next任务不显示和重置当日任务
func (rs *RoleActor) task() {
rs.taskInit()
msg := new(pb.STask)
list := config.GetOrderTasks()
m := make(map[int32]bool)
for _, v := range list {
if val, ok := rs.User.Task[utils.String(v.Type)]; ok {
if val.Prize {
continue
}
if val.Taskid != v.Taskid {
continue
}
}
if _, ok := m[v.Type]; ok {
continue
}
msg2 := &pb.Task{
Taskid: v.Taskid,
Type: v.Type,
Name: v.Name,
Count: v.Count,
Coin: v.Coin,
Diamond: v.Diamond,
}
if val, ok := rs.User.Task[utils.String(v.Type)]; ok {
msg2.Num = val.Num
}
m[v.Type] = true
msg.List = append(msg.List, msg2)
}
rs.Send(msg)
}
//任务奖励领取
func (rs *RoleActor) taskPrize(taskType int32) {
rs.taskInit()
glog.Debugf("task prize type %d, task %#v", taskType, rs.User.Task)
msg := new(pb.STaskPrize)
if val, ok := rs.User.Task[utils.String(taskType)]; ok {
task := config.GetTask(val.Taskid)
if val.Num < task.Count || task.Taskid != val.Taskid {
msg.Error = pb.AwardFaild
rs.Send(msg)
glog.Errorf("task prize err %d, val %#v", taskType, val)
return
}
//奖励发放
rs.addCurrency(task.Diamond, task.Coin,
0, 0, int32(pb.LOG_TYPE46))
//消息提醒
record2, msg2 := handler.TaskNotice(task.Coin, task.Name, rs.User.GetUserid())
if record2 != nil {
rs.loggerPid.Tell(record2)
}
if msg2 != nil {
rs.Send(msg2)
}
val.Prize = true
rs.User.Task[utils.String(taskType)] = val
//响应消息
msg.Type = taskType
msg.Coin = task.Coin
msg.Diamond = task.Diamond
//添加新任务
rs.nextTask(taskType, task.Nextid, msg)
//日志记录
record := &pb.LogTask{
Userid: rs.User.GetUserid(),
Taskid: val.Taskid,
Type: taskType,
}
rs.loggerPid.Tell(record)
} else {
msg.Error = pb.AwardFaild
glog.Errorf("task prize err type %d", taskType)
}
rs.Send(msg)
}
func (rs *RoleActor) nextTask(taskType, nextid int32, msg *pb.STaskPrize) {
rs.taskInit()
//TODO 任务完成日志
msg2 := handler.TaskUpdateMsg(0, pb.TaskType(taskType),
rs.User.GetUserid())
msg2.Prize = true //移除标识
msg2.Nextid = nextid
rs.rolePid.Tell(msg2)
if nextid == 0 {
return
}
//存在下个任务
delete(rs.User.Task, utils.String(taskType)) //移除
//查找
task := config.GetTask(nextid)
if task.Taskid != nextid {
return
}
msg.Next = &pb.Task{
Taskid: task.Taskid,
Type: task.Type,
Name: task.Name,
Count: task.Count,
Coin: task.Coin,
Diamond: task.Diamond,
}
//添加新任务
taskInfo := data.TaskInfo{
Taskid: task.Taskid,
Utime: time.Now(),
}
rs.User.Task[utils.String(task.Type)] = taskInfo
msg3 := handler.TaskUpdateMsg(0, pb.TaskType(task.Type),
rs.User.GetUserid())
msg3.Taskid = task.Taskid
rs.rolePid.Tell(msg3)
}
//更新任务数据
func (rs *RoleActor) taskUpdate(arg *pb.TaskUpdate) {
rs.taskInit()
taskTypeStr := utils.String(int32(arg.Type))
if val, ok := rs.User.Task[taskTypeStr]; ok {
if val.Prize {
return
}
//数值超出不再更新
task := config.GetTask(val.Taskid)
if val.Num >= task.Count {
return
}
val.Num += arg.Num
val.Utime = time.Now()
rs.User.Task[taskTypeStr] = val
rs.rolePid.Tell(arg)
} else {
list := config.GetOrderTasks()
for _, v := range list {
if v.Type != int32(arg.Type) {
continue
}
taskInfo := data.TaskInfo{
Taskid: v.Taskid,
Num: arg.Num,
Utime: time.Now(),
}
rs.User.Task[taskTypeStr] = taskInfo
rs.rolePid.Tell(arg)
break
}
}
}
func (rs *RoleActor) taskInit() {
if rs.User.Task == nil {
rs.User.Task = make(map[string]data.TaskInfo)
}
}
func (rs *RoleActor) luckyInit() {
if rs.User.Lucky == nil {
rs.User.Lucky = make(map[string]data.LuckyInfo)
}
}
//lucky信息
func (rs *RoleActor) lucky() {
rs.luckyInit()
msg := new(pb.SLucky)
list := config.GetLuckys()
for _, v := range list {
msg2 := &pb.Lucky{
Luckyid: v.Luckyid,
Name: v.Name,
Count: v.Count,
Coin: v.Coin,
Diamond: v.Diamond,
Gtype: v.Gtype,
}
if val, ok := rs.User.Lucky[utils.String(v.Luckyid)]; ok {
msg2.Num = val.Num
}
msg.List = append(msg.List, msg2)
}
rs.Send(msg)
}
//更新lucky数据
func (rs *RoleActor) luckyUpdate(arg *pb.LuckyUpdate) {
rs.luckyInit()
luckyidStr := utils.String(int32(arg.GetLuckyid()))
lucky := config.GetLucky(arg.GetLuckyid())
if lucky.Luckyid != arg.GetLuckyid() {
return
}
if lucky.Luckyid == 0 {
return
}
if val, ok := rs.User.Lucky[luckyidStr]; ok {
//数值超出不再更新
if val.Num >= lucky.Count {
//return
}
val.Num += arg.Num
rs.User.Lucky[luckyidStr] = val
if val.Num == lucky.Count {
//奖励发放
//rs.addCurrency(lucky.Diamond, lucky.Coin, 0, 0, int32(pb.LOG_TYPE51))
//消息提醒
//record, msg2 := handler.LuckyNotice(lucky.Coin, lucky.Name, arg.Userid)
//if record != nil {
// rs.loggerPid.Tell(record)
//}
//if msg2 != nil {
// rs.Send(msg2)
//}
}
} else {
luckyInfo := data.LuckyInfo{
Luckyid: arg.GetLuckyid(),
Num: arg.Num,
}
rs.User.Lucky[luckyidStr] = luckyInfo
}
rs.rolePid.Tell(arg)
}
//.
//'签到
//更新连续登录奖励
func (rs *RoleActor) loginPrizeInit() {
//连续登录
glog.Debugf("userid %s, LoginTime %s", rs.User.GetUserid(),
utils.Time2Str(rs.User.LoginTime.Local()))
glog.Debugf("userid %s, LoginTimes %d, LoginPrize %d",
rs.User.GetUserid(), rs.User.LoginTimes, rs.User.LoginPrize)
//rs.User.LoginTime = utils.Stamp2Time(utils.TimestampToday() - 10)
handler.SetLoginPrize(rs.User)
glog.Debugf("userid %s, LoginTime %s", rs.User.GetUserid(),
utils.Time2Str(rs.User.LoginTime.Local()))
glog.Debugf("userid %s, LoginTimes %d, LoginPrize %d",
rs.User.GetUserid(), rs.User.LoginTimes, rs.User.LoginPrize)
rs.User.LoginTime = utils.BsonNow().Local()
msg := handler.LoginPrizeUpdateMsg(rs.User)
rs.rolePid.Tell(msg)
}
//连续登录奖励处理
func (rs *RoleActor) loginPrize(arg *pb.CLoginPrize) {
msg := new(pb.SLoginPrize)
msg.Type = arg.Type
switch arg.Type {
case pb.LoginPrizeSelect:
msg.List = handler.LoginPrizeInfo(rs.User)
case pb.LoginPrizeDraw:
coin, diamond, ok := handler.GetLoginPrize(arg.Day, rs.User)
msg.Error = ok
if ok == pb.OK {
//奖励发放
rs.addCurrency(diamond, coin, 0, 0, int32(pb.LOG_TYPE47))
msg.List = handler.LoginPrizeInfo(rs.User)
msg := handler.LoginPrizeUpdateMsg(rs.User)
rs.rolePid.Tell(msg)
}
}
rs.Send(msg)
}
//.
//设置个性签名
func (rs *RoleActor) setSign(arg *pb.CSignature) {
msg := new(pb.SSignature)
if len(arg.GetContent()) > 1024 {
msg.Error = pb.SignTooLong
rs.Send(msg)
return
}
msg.Userid = rs.User.GetUserid()
msg.Content = arg.GetContent()
rs.User.SetSign(arg.GetContent())
arg.Userid = rs.User.GetUserid()
rs.rolePid.Tell(arg)
rs.Send(msg)
}
//设置经纬度
func (rs *RoleActor) setLatLng(arg *pb.CLatLng) {
msg := new(pb.SLatLng)
rs.User.Lat = arg.GetLat()
rs.User.Lng = arg.GetLng()
rs.User.Address = arg.GetAddress()
rs.Send(msg)
arg.Userid = rs.User.GetUserid()
rs.rolePid.Tell(arg)
}
//join activity
func (rs *RoleActor) joinActivity(arg *pb.CJoinActivity, ctx actor.Context) {
if handler.IsNotAgent(rs.User) {
rsp := new(pb.SJoinActivity)
rsp.Error = pb.NotAgent
rs.Send(rsp)
return
}
arg.Selfid = rs.User.GetUserid()
act := config.GetActivity(arg.GetActid())
if act.Id != arg.GetActid() {
msg := new(pb.SJoinActivity)
msg.Error = pb.ActidError
rs.Send(msg)
return
}
rs.dbmsPid.Request(arg, ctx.Self())
}
// vim: set foldmethod=marker foldmarker=//',//.: | rs.joinActivity(arg, ctx)
case *pb.CGetCurrency:
arg := msg.(*pb.CGetCurrency)
glog.Debugf("CGetCurrency %#v", arg)
//响应 | random_line_split |
rs_handler_user.go | package main
import (
"time"
"gohappy/data"
"gohappy/game/config"
"gohappy/game/handler"
"gohappy/glog"
"gohappy/pb"
"utils"
"github.com/AsynkronIT/protoactor-go/actor"
)
//玩家数据请求处理
func (rs *RoleActor) handlerUser(msg interface{}, ctx actor.Context) {
switch msg.(type) {
case *pb.CPing:
arg := msg.(*pb.CPing)
//glog.Debugf("CPing %#v", arg)
rsp := handler.Ping(arg)
rs.Send(rsp)
case *pb.CNotice:
arg := msg.(*pb.CNotice)
glog.Debugf("CNotice %#v", arg)
arg.Userid = rs.User.GetUserid()
rs.dbmsPid.Request(arg, ctx.Self())
case *pb.SNotice:
arg := msg.(*pb.SNotice)
glog.Debugf("SNotice %#v", arg)
handler.PackNotice(arg)
rs.Send(arg)
case *pb.CActivity:
arg := msg.(*pb.CActivity)
glog.Debugf("CActivity %#v", arg)
arg.Userid = rs.User.GetUserid()
rs.dbmsPid.Request(arg, ctx.Self())
case *pb.SActivity:
arg := msg.(*pb.SActivity)
glog.Debugf("SActivity %#v", arg)
handler.PackActivity(arg)
//glog.Debugf("SActivity %#v, userid %s", arg, rs.User.GetUserid())
rs.Send(arg)
case *pb.CJoinActivity:
arg := msg.(*pb.CJoinActivity)
glog.Debugf("CJoinActivity %#v", arg)
rs.joinActivity(arg, ctx)
case *pb.CGetCurrency:
arg := msg.(*pb.CGetCurrency)
glog.Debugf("CGetCurrency %#v", arg)
//响应
rsp := handler.GetCurrency(arg, rs.User)
rs.Send(rsp)
case *pb.CBuy:
arg := msg.(*pb.CBuy)
glog.Debugf("CBuy %#v", arg)
//优化
rsp, diamond, coin := handler.Buy(arg, rs.User)
//同步兑换
rs.addCurrency(diamond, coin, 0, 0, int32(pb.LOG_TYPE18))
//响应
rs.Send(rsp)
record, msg2 := handler.BuyNotice(coin, rs.User.GetUserid())
if record != nil {
rs.loggerPid.Tell(record)
}
if msg2 != nil {
rs.Send(msg2)
}
case *pb.CShop:
arg := msg.(*pb.CShop)
glog.Debugf("CShop %#v", arg)
//响应
rsp := handler.Shop(arg, rs.User)
rs.Send(rsp)
case *pb.BankGive:
arg := msg.(*pb.BankGive)
glog.Debugf("BankGive %#v", arg)
//rs.addBank(arg.Coin, arg.Type, arg.From)
rs.addCurrency(0, arg.GetCoin(), 0, 0, arg.GetType())
if rs.gamePid != nil {
rs.gamePid.Tell(arg)
}
case *pb.CBank:
arg := msg.(*pb.CBank)
glog.Debugf("CBank %#v", arg)
rs.bank(arg)
case *pb.CRank:
arg := msg.(*pb.CRank)
glog.Debugf("CRank %#v", arg)
rs.dbmsPid.Request(arg, ctx.Self())
case *pb.CBankLog:
arg := msg.(*pb.CBankLog)
glog.Debugf("CBankLog %#v", arg)
arg.Userid = rs.User.GetUserid()
rs.dbmsPid.Request(arg, ctx.Self())
case *pb.TaskUpdate:
arg := msg.(*pb.TaskUpdate)
glog.Debugf("TaskUpdate %#v", arg)
rs.taskUpdate(arg)
case *pb.CTask:
arg := msg.(*pb.CTask)
glog.Debugf("CTask %#v", arg)
rs.task()
case *pb.LuckyUpdate:
arg := msg.(*pb.LuckyUpdate)
glog.Debugf("LuckyUpdate %#v", arg)
rs.luckyUpdate(arg)
case *pb.CLucky:
arg := msg.(*pb.CLucky)
glog.Debugf("CLucky %#v", arg)
rs.lucky()
case *pb.CTaskPrize:
arg := msg.(*pb.CTaskPrize)
glog.Debugf("CTaskPrize %#v", arg)
rs.taskPrize(arg.Type)
case *pb.CLoginPrize:
arg := msg.(*pb.CLoginPrize)
glog.Debugf("CLoginPrize %#v", arg)
rs.loginPrize(arg)
case *pb.CSignature:
arg := msg.(*pb.CSignature)
glog.Debugf("CSignature %#v", arg)
rs.setSign(arg)
case *pb.CLatLng:
arg := msg.(*pb.CLatLng)
glog.Debugf("CLatLng %#v", arg)
rs.setLatLng(arg)
case *pb.CRoomRecord:
arg := msg.(*pb.CRoomRecord)
glog.Debugf("CRoomRecord %#v", arg)
msg1 := &pb.GetRoomRecord{
Gtype: arg.Gtype,
Page: arg.Page,
Userid: rs.User.GetUserid(),
}
rs.dbmsPid.Request(msg1, ctx.Self())
case *pb.CUserData:
arg := msg.(*pb.CUserData)
glog.Debugf("CUserData %#v", arg)
userid := arg.GetUserid()
if userid == "" {
userid = rs.User.GetUserid()
}
if userid != rs.User.GetUserid() {
msg1 := new(pb.GetUserData)
msg1.Userid = userid
rs.rolePid.Request(msg1, ctx.Self())
} else {
//TODO 添加房间数据返回
rsp := handler.GetUserDataMsg(arg, rs.User)
if rs.gamePid != nil {
rsp.Game = true
}
if rs.BankPhone != "" {
rsp.Bank = true
}
rs.Send(rsp)
}
case *pb.GotUserData:
arg := msg.(*pb.GotUserData)
glog.Debugf("GotUserData %#v", arg)
rsp := handler.UserDataMsg(arg)
rs.Send(rsp)
default:
//glog.Errorf("unknown message %v", msg)
rs.handlerPay(msg, ctx)
}
}
/*
func (rs *RoleActor) addPrize(rtype, ltype, amount int32) {
switch uint32(rtype) {
case data.DIAMOND:
rs.addCurrency(amount, 0, 0, 0, ltype)
case data.COIN:
rs.addCurrency(0, amount, 0, 0, ltype)
case data.CARD:
rs.addCurrency(0, 0, amount, 0, ltype)
case data.CHIP:
rs.addCurrency(0, 0, 0, amount, ltype)
}
}
//消耗钻石
func (rs *RoleActor) expend(cost uint32, ltype int32) {
diamond := -1 * int64(cost)
rs.addCurrency(diamond, 0, 0, 0, ltype)
}
*/
//奖励发放
func (rs *RoleActor) addCurrency(diamond, coin, card, chip int64, ltype int32) {
if rs.User == nil {
glog.Errorf("add currency user err: %d", ltype)
return
}
//日志记录
if diamond < 0 && ((rs.User.GetDiamond() + diamond) < 0) {
diamond = 0 - rs.User.GetDiamond()
}
if chip < 0 && ((rs.User.GetChip() + chip) < 0) {
chip = 0 - rs.User.GetChip()
}
if coin < 0 && ((rs.User.GetCoin() + coin) < 0) {
coin = 0 - rs.User.GetCoin()
}
if card < 0 && ((rs.User.GetCard() + card) < 0) {
card = 0 - rs.User.GetCard()
}
rs.User.AddCurrency(diamond, coin, card, chip)
//货币变更及时同步
msg2 := handler.ChangeCurrencyMsg(diamond, coin,
card, chip, ltype, rs.User.GetUserid())
rs.rolePid.Tell(msg2)
//消息
msg := handler.PushCurrencyMsg(diamond, coin,
card, chip, ltype)
rs.Send(msg)
//TODO 机器人不写日志
//if rs.User.GetRobot() {
// return
//}
//rs.status = true
//日志
//TODO 日志放在dbms中统一写入
//if diamond != 0 {
// msg1 := handler.LogDiamondMsg(diamond, ltype, rs.User)
// rs.loggerPid.Tell(msg1)
//}
//if coin != 0 {
// msg1 := handler.LogCoinMsg(coin, ltype, rs.User)
// rs.loggerPid.Tell(msg1)
//}
//if card != 0 {
// msg1 := handler.LogCardMsg(card, ltype, rs.User)
// rs.loggerPid.Tell(msg1)
//}
//if chip != 0 {
// msg1 := handler.LogChipMsg(chip, ltype, rs.User)
// rs.loggerPid.Tell(msg1)
//}
}
//同步数据
func (rs *RoleActor) syncUser() {
if rs.User == nil {
return
}
if rs.rolePid == nil {
return
}
if !rs.status { //有变更才同步
return
}
rs.status = false
msg := new(pb.SyncUser)
msg.Userid = rs.User.GetUserid()
glog.Debugf("syscUser %#v", rs.User)
result, err := json.Marshal(rs.User)
if err != nil {
glog.Errorf("user %s Marshal err %v", rs.User.GetUserid(), err)
return
}
msg.Data = result
rs.rolePid.Tell(msg)
}
//'银行
//银行发放
func (rs *RoleActor) addBank(coin int64, ltype int32, from string) {
if rs.User == nil {
glog.Errorf("add addBank user err: %d", ltype)
return
}
//日志记录
if coin < 0 && ((rs.User.GetBank() + coin) < 0) {
coin = 0 - rs.User.GetBank()
}
rs.User.AddBank(coin)
//银行变动及时同步
msg2 := handler.BankChangeMsg(coin,
ltype, rs.User.GetUserid(), from)
rs.rolePid.Tell(msg2)
}
//1存入,2取出,3赠送
func (rs *RoleActor) bank(arg *pb.CBank) {
msg := new(pb.SBank)
rtype := arg.GetRtype()
amount := int64(arg.GetAmount())
userid := arg.GetUserid()
coin := rs.User.GetCoin()
switch rtype {
case pb.BankDeposit: //存入
if rs.User.BankPhone = | n); ok {
if response1.Error == pb.OK {
return true
}
glog.Errorf("BankGiven err %#v", response1)
return false
}
return false
}
//银行重置密码, 银行开通
func (rs *RoleActor) bankCheck(arg *pb.CBank) pb.ErrCode {
msg1 := &pb.BankCheck{
Userid: rs.User.GetUserid(),
Phone: arg.GetPhone(),
Password: arg.GetPassword(),
Smscode: arg.GetSmscode(),
}
timeout := 3 * time.Second
res1, err1 := rs.rolePid.RequestFuture(msg1, timeout).Result()
if err1 != nil {
glog.Errorf("bank check failed: %v", err1)
return pb.OperateError
}
if response1, ok := res1.(*pb.BankChecked); ok {
if response1.Error == pb.OK {
rs.User.BankPhone = arg.GetPhone()
rs.User.BankPassword = arg.GetPassword()
return response1.Error
}
glog.Errorf("bankCheck err %#v", response1)
return response1.Error
}
return pb.OperateError
}
//.
//'任务
//任务信息,TODO next任务不显示和重置当日任务
func (rs *RoleActor) task() {
rs.taskInit()
msg := new(pb.STask)
list := config.GetOrderTasks()
m := make(map[int32]bool)
for _, v := range list {
if val, ok := rs.User.Task[utils.String(v.Type)]; ok {
if val.Prize {
continue
}
if val.Taskid != v.Taskid {
continue
}
}
if _, ok := m[v.Type]; ok {
continue
}
msg2 := &pb.Task{
Taskid: v.Taskid,
Type: v.Type,
Name: v.Name,
Count: v.Count,
Coin: v.Coin,
Diamond: v.Diamond,
}
if val, ok := rs.User.Task[utils.String(v.Type)]; ok {
msg2.Num = val.Num
}
m[v.Type] = true
msg.List = append(msg.List, msg2)
}
rs.Send(msg)
}
//任务奖励领取
func (rs *RoleActor) taskPrize(taskType int32) {
rs.taskInit()
glog.Debugf("task prize type %d, task %#v", taskType, rs.User.Task)
msg := new(pb.STaskPrize)
if val, ok := rs.User.Task[utils.String(taskType)]; ok {
task := config.GetTask(val.Taskid)
if val.Num < task.Count || task.Taskid != val.Taskid {
msg.Error = pb.AwardFaild
rs.Send(msg)
glog.Errorf("task prize err %d, val %#v", taskType, val)
return
}
//奖励发放
rs.addCurrency(task.Diamond, task.Coin,
0, 0, int32(pb.LOG_TYPE46))
//消息提醒
record2, msg2 := handler.TaskNotice(task.Coin, task.Name, rs.User.GetUserid())
if record2 != nil {
rs.loggerPid.Tell(record2)
}
if msg2 != nil {
rs.Send(msg2)
}
val.Prize = true
rs.User.Task[utils.String(taskType)] = val
//响应消息
msg.Type = taskType
msg.Coin = task.Coin
msg.Diamond = task.Diamond
//添加新任务
rs.nextTask(taskType, task.Nextid, msg)
//日志记录
record := &pb.LogTask{
Userid: rs.User.GetUserid(),
Taskid: val.Taskid,
Type: taskType,
}
rs.loggerPid.Tell(record)
} else {
msg.Error = pb.AwardFaild
glog.Errorf("task prize err type %d", taskType)
}
rs.Send(msg)
}
func (rs *RoleActor) nextTask(taskType, nextid int32, msg *pb.STaskPrize) {
rs.taskInit()
//TODO 任务完成日志
msg2 := handler.TaskUpdateMsg(0, pb.TaskType(taskType),
rs.User.GetUserid())
msg2.Prize = true //移除标识
msg2.Nextid = nextid
rs.rolePid.Tell(msg2)
if nextid == 0 {
return
}
//存在下个任务
delete(rs.User.Task, utils.String(taskType)) //移除
//查找
task := config.GetTask(nextid)
if task.Taskid != nextid {
return
}
msg.Next = &pb.Task{
Taskid: task.Taskid,
Type: task.Type,
Name: task.Name,
Count: task.Count,
Coin: task.Coin,
Diamond: task.Diamond,
}
//添加新任务
taskInfo := data.TaskInfo{
Taskid: task.Taskid,
Utime: time.Now(),
}
rs.User.Task[utils.String(task.Type)] = taskInfo
msg3 := handler.TaskUpdateMsg(0, pb.TaskType(task.Type),
rs.User.GetUserid())
msg3.Taskid = task.Taskid
rs.rolePid.Tell(msg3)
}
//更新任务数据
func (rs *RoleActor) taskUpdate(arg *pb.TaskUpdate) {
rs.taskInit()
taskTypeStr := utils.String(int32(arg.Type))
if val, ok := rs.User.Task[taskTypeStr]; ok {
if val.Prize {
return
}
//数值超出不再更新
task := config.GetTask(val.Taskid)
if val.Num >= task.Count {
return
}
val.Num += arg.Num
val.Utime = time.Now()
rs.User.Task[taskTypeStr] = val
rs.rolePid.Tell(arg)
} else {
list := config.GetOrderTasks()
for _, v := range list {
if v.Type != int32(arg.Type) {
continue
}
taskInfo := data.TaskInfo{
Taskid: v.Taskid,
Num: arg.Num,
Utime: time.Now(),
}
rs.User.Task[taskTypeStr] = taskInfo
rs.rolePid.Tell(arg)
break
}
}
}
func (rs *RoleActor) taskInit() {
if rs.User.Task == nil {
rs.User.Task = make(map[string]data.TaskInfo)
}
}
func (rs *RoleActor) luckyInit() {
if rs.User.Lucky == nil {
rs.User.Lucky = make(map[string]data.LuckyInfo)
}
}
//lucky信息
func (rs *RoleActor) lucky() {
rs.luckyInit()
msg := new(pb.SLucky)
list := config.GetLuckys()
for _, v := range list {
msg2 := &pb.Lucky{
Luckyid: v.Luckyid,
Name: v.Name,
Count: v.Count,
Coin: v.Coin,
Diamond: v.Diamond,
Gtype: v.Gtype,
}
if val, ok := rs.User.Lucky[utils.String(v.Luckyid)]; ok {
msg2.Num = val.Num
}
msg.List = append(msg.List, msg2)
}
rs.Send(msg)
}
//更新lucky数据
func (rs *RoleActor) luckyUpdate(arg *pb.LuckyUpdate) {
rs.luckyInit()
luckyidStr := utils.String(int32(arg.GetLuckyid()))
lucky := config.GetLucky(arg.GetLuckyid())
if lucky.Luckyid != arg.GetLuckyid() {
return
}
if lucky.Luckyid == 0 {
return
}
if val, ok := rs.User.Lucky[luckyidStr]; ok {
//数值超出不再更新
if val.Num >= lucky.Count {
//return
}
val.Num += arg.Num
rs.User.Lucky[luckyidStr] = val
if val.Num == lucky.Count {
//奖励发放
//rs.addCurrency(lucky.Diamond, lucky.Coin, 0, 0, int32(pb.LOG_TYPE51))
//消息提醒
//record, msg2 := handler.LuckyNotice(lucky.Coin, lucky.Name, arg.Userid)
//if record != nil {
// rs.loggerPid.Tell(record)
//}
//if msg2 != nil {
// rs.Send(msg2)
//}
}
} else {
luckyInfo := data.LuckyInfo{
Luckyid: arg.GetLuckyid(),
Num: arg.Num,
}
rs.User.Lucky[luckyidStr] = luckyInfo
}
rs.rolePid.Tell(arg)
}
//.
//'签到
//更新连续登录奖励
func (rs *RoleActor) loginPrizeInit() {
//连续登录
glog.Debugf("userid %s, LoginTime %s", rs.User.GetUserid(),
utils.Time2Str(rs.User.LoginTime.Local()))
glog.Debugf("userid %s, LoginTimes %d, LoginPrize %d",
rs.User.GetUserid(), rs.User.LoginTimes, rs.User.LoginPrize)
//rs.User.LoginTime = utils.Stamp2Time(utils.TimestampToday() - 10)
handler.SetLoginPrize(rs.User)
glog.Debugf("userid %s, LoginTime %s", rs.User.GetUserid(),
utils.Time2Str(rs.User.LoginTime.Local()))
glog.Debugf("userid %s, LoginTimes %d, LoginPrize %d",
rs.User.GetUserid(), rs.User.LoginTimes, rs.User.LoginPrize)
rs.User.LoginTime = utils.BsonNow().Local()
msg := handler.LoginPrizeUpdateMsg(rs.User)
rs.rolePid.Tell(msg)
}
//连续登录奖励处理
func (rs *RoleActor) loginPrize(arg *pb.CLoginPrize) {
msg := new(pb.SLoginPrize)
msg.Type = arg.Type
switch arg.Type {
case pb.LoginPrizeSelect:
msg.List = handler.LoginPrizeInfo(rs.User)
case pb.LoginPrizeDraw:
coin, diamond, ok := handler.GetLoginPrize(arg.Day, rs.User)
msg.Error = ok
if ok == pb.OK {
//奖励发放
rs.addCurrency(diamond, coin, 0, 0, int32(pb.LOG_TYPE47))
msg.List = handler.LoginPrizeInfo(rs.User)
msg := handler.LoginPrizeUpdateMsg(rs.User)
rs.rolePid.Tell(msg)
}
}
rs.Send(msg)
}
//.
//设置个性签名
func (rs *RoleActor) setSign(arg *pb.CSignature) {
msg := new(pb.SSignature)
if len(arg.GetContent()) > 1024 {
msg.Error = pb.SignTooLong
rs.Send(msg)
return
}
msg.Userid = rs.User.GetUserid()
msg.Content = arg.GetContent()
rs.User.SetSign(arg.GetContent())
arg.Userid = rs.User.GetUserid()
rs.rolePid.Tell(arg)
rs.Send(msg)
}
//设置经纬度
func (rs *RoleActor) setLatLng(arg *pb.CLatLng) {
msg := new(pb.SLatLng)
rs.User.Lat = arg.GetLat()
rs.User.Lng = arg.GetLng()
rs.User.Address = arg.GetAddress()
rs.Send(msg)
arg.Userid = rs.User.GetUserid()
rs.rolePid.Tell(arg)
}
//join activity
func (rs *RoleActor) joinActivity(arg *pb.CJoinActivity, ctx actor.Context) {
if handler.IsNotAgent(rs.User) {
rsp := new(pb.SJoinActivity)
rsp.Error = pb.NotAgent
rs.Send(rsp)
return
}
arg.Selfid = rs.User.GetUserid()
act := config.GetActivity(arg.GetActid())
if act.Id != arg.GetActid() {
msg := new(pb.SJoinActivity)
msg.Error = pb.ActidError
rs.Send(msg)
return
}
rs.dbmsPid.Request(arg, ctx.Self())
}
// vim: set foldmethod=marker foldmarker=//',//.:
| = "" {
msg.Error = pb.BankNotOpen
} else if (coin - amount) < data.BANKRUPT {
msg.Error = pb.NotEnoughCoin
} else if amount <= 0 {
msg.Error = pb.DepositNumberError
} else {
rs.addCurrency(0, -1*amount, 0, 0, int32(pb.LOG_TYPE12))
rs.addBank(amount, int32(pb.LOG_TYPE12), "")
}
case pb.BankDraw: //取出
if rs.User.BankPhone == "" {
msg.Error = pb.BankNotOpen
} else if arg.GetPassword() != rs.User.BankPassword {
msg.Error = pb.PwdError
} else if amount > rs.User.GetBank() {
msg.Error = pb.NotEnoughCoin
} else if amount < data.DRAW_MONEY_LOW {
msg.Error = pb.DrawMoneyNumberError
} else {
rs.addCurrency(0, amount, 0, 0, int32(pb.LOG_TYPE13))
rs.addBank(-1*amount, int32(pb.LOG_TYPE13), "")
}
case pb.BankGift: //赠送
if rs.User.BankPhone == "" {
msg.Error = pb.BankNotOpen
} else if arg.GetPassword() != rs.User.BankPassword {
msg.Error = pb.PwdError
//} else if amount > rs.User.GetBank() {
} else if amount > rs.User.GetCoin() { //修改成赠送bank外面的
msg.Error = pb.NotEnoughCoin
} else if amount < data.DRAW_MONEY {
msg.Error = pb.GiveNumberError
} else if userid == "" {
msg.Error = pb.GiveUseridError
} else {
msg1 := handler.GiveBankMsg(amount, int32(pb.LOG_TYPE15), userid, rs.User.GetUserid())
if rs.bank2give(msg1) {
//rs.addBank(-1*amount, int32(pb.LOG_TYPE15), userid)
rs.addCurrency(0, -1*amount, 0, 0, int32(pb.LOG_TYPE15))
//充值消息提醒
record1, msg1 := handler.GiveNotice(amount, rs.User.GetUserid(), userid)
if record1 != nil {
rs.loggerPid.Tell(record1)
}
rs.Send(msg1)
} else {
msg.Error = pb.GiveUseridError
}
}
case pb.BankSelect: //查询
msg.Phone = rs.User.BankPhone
case pb.BankOpen: //开通
if rs.User.BankPhone != "" {
msg.Error = pb.BankAlreadyOpen
} else if !utils.PhoneValidate(arg.GetPhone()) {
msg.Error = pb.PhoneNumberError
} else if len(arg.GetPassword()) != 32 {
msg.Error = pb.PwdError
} else if len(arg.GetSmscode()) != 6 {
msg.Error = pb.SmsCodeWrong
} else {
msg.Error = rs.bankCheck(arg)
if msg.Error == pb.OK {
//奖励发放
rs.addCurrency(0, 666, 0, 0, int32(pb.LOG_TYPE56))
//消息提醒
record, msg2 := handler.BankOpenNotice(666, rs.User.GetUserid())
if record != nil {
rs.loggerPid.Tell(record)
}
if msg2 != nil {
rs.Send(msg2)
}
}
}
case pb.BankResetPwd: //重置密码
if rs.User.BankPhone == "" {
msg.Error = pb.BankNotOpen
} else if rs.User.BankPhone != arg.GetPhone() {
msg.Error = pb.PhoneNumberError
} else if len(arg.GetPassword()) != 32 {
msg.Error = pb.PwdError
} else if len(arg.GetSmscode()) != 6 {
msg.Error = pb.SmsCodeWrong
} else {
msg.Error = rs.bankCheck(arg)
}
}
msg.Rtype = rtype
msg.Amount = arg.GetAmount()
msg.Userid = userid
msg.Balance = rs.User.GetBank()
rs.Send(msg)
}
//银行赠送
func (rs *RoleActor) bank2give(msg1 interface{}) bool {
timeout := 3 * time.Second
res1, err1 := rs.rolePid.RequestFuture(msg1, timeout).Result()
if err1 != nil {
glog.Errorf("bank give failed: %v", err1)
return false
}
if response1, ok := res1.(*pb.BankGive | identifier_body |
ppo_trainer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import time
import logging
from collections import deque, defaultdict
from typing import Dict, List, Any
import json
import random
import glob
import numpy as np
import torch
from torch.optim.lr_scheduler import LambdaLR
from tqdm import tqdm
from numpy.linalg import norm
from habitat import Config, logger
from ss_baselines.common.utils import observations_to_image
from ss_baselines.common.base_trainer import BaseRLTrainer
from ss_baselines.common.baseline_registry import baseline_registry
from ss_baselines.common.env_utils import construct_envs
from ss_baselines.common.environments import get_env_class
from ss_baselines.common.rollout_storage import RolloutStorage
from ss_baselines.common.tensorboard_utils import TensorboardWriter
from ss_baselines.common.utils import (
batch_obs,
generate_video,
linear_decay,
plot_top_down_map,
resize_observation,
NpEncoder
)
from ss_baselines.av_nav.ppo.policy import AudioNavBaselinePolicy
from ss_baselines.av_nav.ppo.ppo import PPO
from ss_baselines.savi.ppo.slurm_utils import (
EXIT,
REQUEUE,
load_interrupted_state,
requeue_job,
save_interrupted_state,
)
class DataParallelPassthrough(torch.nn.DataParallel):
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
return getattr(self.module, name)
@baseline_registry.register_trainer(name="av_nav_ppo")
class PPOTrainer(BaseRLTrainer):
r"""Trainer class for PPO algorithm
Paper: https://arxiv.org/abs/1707.06347.
"""
supported_tasks = ["Nav-v0"]
def __init__(self, config=None):
super().__init__(config)
self.actor_critic = None
self.agent = None
self.envs = None
self._static_smt_encoder = False
self._encoder = None
def _setup_actor_critic_agent(self, ppo_cfg: Config, observation_space=None) -> None:
r"""Sets up actor critic and agent for PPO.
Args:
ppo_cfg: config node with relevant params
Returns:
None
"""
logger.add_filehandler(self.config.LOG_FILE)
if observation_space is None:
observation_space = self.envs.observation_spaces[0]
self.actor_critic = AudioNavBaselinePolicy(
observation_space=observation_space,
action_space=self.envs.action_spaces[0],
hidden_size=ppo_cfg.hidden_size,
goal_sensor_uuid=self.config.TASK_CONFIG.TASK.GOAL_SENSOR_UUID,
extra_rgb=self.config.EXTRA_RGB
)
self.agent = PPO(
actor_critic=self.actor_critic,
clip_param=ppo_cfg.clip_param,
ppo_epoch=ppo_cfg.ppo_epoch,
num_mini_batch=ppo_cfg.num_mini_batch,
value_loss_coef=ppo_cfg.value_loss_coef,
entropy_coef=ppo_cfg.entropy_coef,
lr=ppo_cfg.lr,
eps=ppo_cfg.eps,
max_grad_norm=ppo_cfg.max_grad_norm,
)
if self.config.RESUME:
ckpt_dict = self.load_checkpoint('data/models/smt_with_pose/ckpt.400.pth', map_location="cpu")
self.agent.actor_critic.net.visual_encoder.load_state_dict(self.search_dict(ckpt_dict, 'visual_encoder'))
self.agent.actor_critic.net.goal_encoder.load_state_dict(self.search_dict(ckpt_dict, 'goal_encoder'))
self.agent.actor_critic.net.action_encoder.load_state_dict(self.search_dict(ckpt_dict, 'action_encoder'))
self.actor_critic.to(self.device)
@staticmethod
def search_dict(ckpt_dict, encoder_name):
encoder_dict = {}
for key, value in ckpt_dict['state_dict'].items():
if encoder_name in key:
encoder_dict['.'.join(key.split('.')[3:])] = value
return encoder_dict
def save_checkpoint(
self, file_name: str, extra_state=None
) -> None:
checkpoint = {
"state_dict": self.agent.state_dict(),
"config": self.config,
}
if extra_state is not None:
checkpoint["extra_state"] = extra_state
torch.save(
checkpoint, os.path.join(self.config.CHECKPOINT_FOLDER, file_name)
)
def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:
r"""Load checkpoint of specified path as a dict.
Args:
checkpoint_path: path of target checkpoint
*args: additional positional args
**kwargs: additional keyword args
Returns:
dict containing checkpoint info
"""
return torch.load(checkpoint_path, *args, **kwargs)
def try_to_resume_checkpoint(self):
|
METRICS_BLACKLIST = {"top_down_map", "collisions.is_collision"}
@classmethod
def _extract_scalars_from_info(
cls, info: Dict[str, Any]
) -> Dict[str, float]:
result = {}
for k, v in info.items():
if k in cls.METRICS_BLACKLIST:
continue
if isinstance(v, dict):
result.update(
{
k + "." + subk: subv
for subk, subv in cls._extract_scalars_from_info(
v
).items()
if (k + "." + subk) not in cls.METRICS_BLACKLIST
}
)
# Things that are scalar-like will have an np.size of 1.
# Strings also have an np.size of 1, so explicitly ban those
elif np.size(v) == 1 and not isinstance(v, str):
result[k] = float(v)
return result
@classmethod
def _extract_scalars_from_infos(
cls, infos: List[Dict[str, Any]]
) -> Dict[str, List[float]]:
results = defaultdict(list)
for i in range(len(infos)):
for k, v in cls._extract_scalars_from_info(infos[i]).items():
results[k].append(v)
return results
def _collect_rollout_step(
self, rollouts, current_episode_reward, running_episode_stats
):
pth_time = 0.0
env_time = 0.0
t_sample_action = time.time()
# sample actions
with torch.no_grad():
step_observation = {
k: v[rollouts.step] for k, v in rollouts.observations.items()
}
(
values,
actions,
actions_log_probs,
recurrent_hidden_states
) = self.actor_critic.act(
step_observation,
rollouts.recurrent_hidden_states[rollouts.step],
rollouts.prev_actions[rollouts.step],
rollouts.masks[rollouts.step],
)
pth_time += time.time() - t_sample_action
t_step_env = time.time()
outputs = self.envs.step([a[0].item() for a in actions])
observations, rewards, dones, infos = [list(x) for x in zip(*outputs)]
logging.debug('Reward: {}'.format(rewards[0]))
env_time += time.time() - t_step_env
t_update_stats = time.time()
batch = batch_obs(observations, device=self.device)
rewards = torch.tensor(rewards, dtype=torch.float, device=current_episode_reward.device)
rewards = rewards.unsqueeze(1)
masks = torch.tensor(
[[0.0] if done else [1.0] for done in dones], dtype=torch.float, device=current_episode_reward.device
)
current_episode_reward += rewards
running_episode_stats["reward"] += (1 - masks) * current_episode_reward
running_episode_stats["count"] += 1 - masks
for k, v in self._extract_scalars_from_infos(infos).items():
v = torch.tensor(
v, dtype=torch.float, device=current_episode_reward.device
).unsqueeze(1)
if k not in running_episode_stats:
running_episode_stats[k] = torch.zeros_like(
running_episode_stats["count"]
)
running_episode_stats[k] += (1 - masks) * v
current_episode_reward *= masks
rollouts.insert(
batch,
recurrent_hidden_states,
actions,
actions_log_probs,
values,
rewards.to(device=self.device),
masks.to(device=self.device),
)
pth_time += time.time() - t_update_stats
return pth_time, env_time, self.envs.num_envs
def _update_agent(self, ppo_cfg, rollouts):
t_update_model = time.time()
with torch.no_grad():
last_observation = {
k: v[-1] for k, v in rollouts.observations.items()
}
next_value = self.actor_critic.get_value(
last_observation,
rollouts.recurrent_hidden_states[rollouts.step],
rollouts.prev_actions[rollouts.step],
rollouts.masks[rollouts.step]
).detach()
rollouts.compute_returns(
next_value, ppo_cfg.use_gae, ppo_cfg.gamma, ppo_cfg.tau
)
value_loss, action_loss, dist_entropy = self.agent.update(rollouts)
rollouts.after_update()
return (
time.time() - t_update_model,
value_loss,
action_loss,
dist_entropy,
)
def train(self) -> None:
r"""Main method for training PPO.
Returns:
None
"""
logger.info(f"config: {self.config}")
random.seed(self.config.SEED)
np.random.seed(self.config.SEED)
torch.manual_seed(self.config.SEED)
# add_signal_handlers()
self.envs = construct_envs(
self.config, get_env_class(self.config.ENV_NAME), workers_ignore_signals=True
)
ppo_cfg = self.config.RL.PPO
self.device = (
torch.device("cuda", self.config.TORCH_GPU_ID)
if torch.cuda.is_available()
else torch.device("cpu")
)
if not os.path.isdir(self.config.CHECKPOINT_FOLDER):
os.makedirs(self.config.CHECKPOINT_FOLDER)
self._setup_actor_critic_agent(ppo_cfg)
logger.info(
"agent number of parameters: {}".format(
sum(param.numel() for param in self.agent.parameters())
)
)
rollouts = RolloutStorage(
ppo_cfg.num_steps,
self.envs.num_envs,
self.envs.observation_spaces[0],
self.envs.action_spaces[0],
ppo_cfg.hidden_size
)
rollouts.to(self.device)
observations = self.envs.reset()
batch = batch_obs(observations)
for sensor in rollouts.observations:
rollouts.observations[sensor][0].copy_(batch[sensor])
# batch and observations may contain shared PyTorch CUDA
# tensors. We must explicitly clear them here otherwise
# they will be kept in memory for the entire duration of training!
batch = None
observations = None
current_episode_reward = torch.zeros(self.envs.num_envs, 1)
running_episode_stats = dict(
count=torch.zeros(self.envs.num_envs, 1),
reward=torch.zeros(self.envs.num_envs, 1),
)
window_episode_stats = defaultdict(
lambda: deque(maxlen=ppo_cfg.reward_window_size)
)
t_start = time.time()
env_time = 0
pth_time = 0
count_steps = 0
count_checkpoints = 0
start_update = 0
prev_time = 0
lr_scheduler = LambdaLR(
optimizer=self.agent.optimizer,
lr_lambda=lambda x: linear_decay(x, self.config.NUM_UPDATES),
)
interrupted_state = load_interrupted_state(model_dir=self.config.MODEL_DIR)
if interrupted_state is not None:
self.agent.load_state_dict(interrupted_state["state_dict"])
self.agent.optimizer.load_state_dict(
interrupted_state["optimizer_state"]
)
lr_scheduler.load_state_dict(interrupted_state["lr_scheduler_state"])
requeue_stats = interrupted_state["requeue_stats"]
env_time = requeue_stats["env_time"]
pth_time = requeue_stats["pth_time"]
count_steps = requeue_stats["count_steps"]
count_checkpoints = requeue_stats["count_checkpoints"]
start_update = requeue_stats["start_update"]
prev_time = requeue_stats["prev_time"]
with TensorboardWriter(
self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs
) as writer:
for update in range(start_update, self.config.NUM_UPDATES):
if ppo_cfg.use_linear_lr_decay:
lr_scheduler.step()
if ppo_cfg.use_linear_clip_decay:
self.agent.clip_param = ppo_cfg.clip_param * linear_decay(
update, self.config.NUM_UPDATES
)
if EXIT.is_set():
self.envs.close()
if REQUEUE.is_set():
requeue_stats = dict(
env_time=env_time,
pth_time=pth_time,
count_steps=count_steps,
count_checkpoints=count_checkpoints,
start_update=update,
prev_time=(time.time() - t_start) + prev_time,
)
save_interrupted_state(
dict(
state_dict=self.agent.state_dict(),
optimizer_state=self.agent.optimizer.state_dict(),
lr_scheduler_state=lr_scheduler.state_dict(),
config=self.config,
requeue_stats=requeue_stats,
),
model_dir=self.config.MODEL_DIR
)
requeue_job()
return
for step in range(ppo_cfg.num_steps):
delta_pth_time, delta_env_time, delta_steps = self._collect_rollout_step(
rollouts,
current_episode_reward,
running_episode_stats
)
pth_time += delta_pth_time
env_time += delta_env_time
count_steps += delta_steps
delta_pth_time, value_loss, action_loss, dist_entropy = self._update_agent(
ppo_cfg, rollouts
)
pth_time += delta_pth_time
deltas = {
k: (
(v[-1] - v[0]).sum().item()
if len(v) > 1
else v[0].sum().item()
)
for k, v in window_episode_stats.items()
}
deltas["count"] = max(deltas["count"], 1.0)
writer.add_scalar(
"Metrics/reward", deltas["reward"] / deltas["count"], count_steps
)
# Check to see if there are any metrics
# that haven't been logged yet
metrics = {
k: v / deltas["count"]
for k, v in deltas.items()
if k not in {"reward", "count"}
}
if len(metrics) > 0:
# writer.add_scalars("metrics", metrics, count_steps)
for metric, value in metrics.items():
writer.add_scalar(f"Metrics/{metric}", value, count_steps)
writer.add_scalar("Policy/value_loss", value_loss, count_steps)
writer.add_scalar("Policy/policy_loss", action_loss, count_steps)
writer.add_scalar("Policy/entropy_loss", dist_entropy, count_steps)
writer.add_scalar('Policy/learning_rate', lr_scheduler.get_lr()[0], count_steps)
# log stats
if update > 0 and update % self.config.LOG_INTERVAL == 0:
logger.info(
"update: {}\tfps: {:.3f}\t".format(
update, count_steps / (time.time() - t_start)
)
)
logger.info(
"update: {}\tenv-time: {:.3f}s\tpth-time: {:.3f}s\t"
"frames: {}".format(
update, env_time, pth_time, count_steps
)
)
logger.info(
"Average window size: {} {}".format(
len(window_episode_stats["count"]),
" ".join(
"{}: {:.3f}".format(k, v / deltas["count"])
for k, v in deltas.items()
if k != "count"
),
)
)
# checkpoint model
if update % self.config.CHECKPOINT_INTERVAL == 0:
self.save_checkpoint(f"ckpt.{count_checkpoints}.pth")
count_checkpoints += 1
self.envs.close()
| checkpoints = glob.glob(f"{self.config.CHECKPOINT_FOLDER}/*.pth")
if len(checkpoints) == 0:
count_steps = 0
count_checkpoints = 0
start_update = 0
else:
last_ckpt = sorted(checkpoints, key=lambda x: int(x.split(".")[1]))[-1]
checkpoint_path = last_ckpt
# Restore checkpoints to models
ckpt_dict = self.load_checkpoint(checkpoint_path)
self.agent.load_state_dict(ckpt_dict["state_dict"])
ckpt_id = int(last_ckpt.split("/")[-1].split(".")[1])
count_steps = ckpt_dict["extra_state"]["step"]
count_checkpoints = ckpt_id + 1
start_update = ckpt_dict["config"].CHECKPOINT_INTERVAL * ckpt_id + 1
print(f"Resuming checkpoint {last_ckpt} at {count_steps} frames")
return count_steps, count_checkpoints, start_update | identifier_body |
ppo_trainer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import time
import logging
from collections import deque, defaultdict
from typing import Dict, List, Any
import json
import random
import glob
import numpy as np
import torch
from torch.optim.lr_scheduler import LambdaLR
from tqdm import tqdm
from numpy.linalg import norm
from habitat import Config, logger
from ss_baselines.common.utils import observations_to_image
from ss_baselines.common.base_trainer import BaseRLTrainer
from ss_baselines.common.baseline_registry import baseline_registry
from ss_baselines.common.env_utils import construct_envs
from ss_baselines.common.environments import get_env_class
from ss_baselines.common.rollout_storage import RolloutStorage
from ss_baselines.common.tensorboard_utils import TensorboardWriter
from ss_baselines.common.utils import (
batch_obs,
generate_video,
linear_decay,
plot_top_down_map,
resize_observation,
NpEncoder
)
from ss_baselines.av_nav.ppo.policy import AudioNavBaselinePolicy
from ss_baselines.av_nav.ppo.ppo import PPO
from ss_baselines.savi.ppo.slurm_utils import (
EXIT,
REQUEUE,
load_interrupted_state,
requeue_job,
save_interrupted_state,
)
class DataParallelPassthrough(torch.nn.DataParallel):
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
return getattr(self.module, name)
@baseline_registry.register_trainer(name="av_nav_ppo")
class PPOTrainer(BaseRLTrainer):
r"""Trainer class for PPO algorithm
Paper: https://arxiv.org/abs/1707.06347.
"""
supported_tasks = ["Nav-v0"]
def __init__(self, config=None):
super().__init__(config)
self.actor_critic = None
self.agent = None
self.envs = None
self._static_smt_encoder = False
self._encoder = None
def _setup_actor_critic_agent(self, ppo_cfg: Config, observation_space=None) -> None:
r"""Sets up actor critic and agent for PPO.
Args:
ppo_cfg: config node with relevant params
Returns:
None
"""
logger.add_filehandler(self.config.LOG_FILE)
if observation_space is None:
observation_space = self.envs.observation_spaces[0]
self.actor_critic = AudioNavBaselinePolicy(
observation_space=observation_space,
action_space=self.envs.action_spaces[0],
hidden_size=ppo_cfg.hidden_size,
goal_sensor_uuid=self.config.TASK_CONFIG.TASK.GOAL_SENSOR_UUID,
extra_rgb=self.config.EXTRA_RGB
)
self.agent = PPO(
actor_critic=self.actor_critic,
clip_param=ppo_cfg.clip_param,
ppo_epoch=ppo_cfg.ppo_epoch,
num_mini_batch=ppo_cfg.num_mini_batch,
value_loss_coef=ppo_cfg.value_loss_coef,
entropy_coef=ppo_cfg.entropy_coef,
lr=ppo_cfg.lr,
eps=ppo_cfg.eps,
max_grad_norm=ppo_cfg.max_grad_norm,
)
if self.config.RESUME:
ckpt_dict = self.load_checkpoint('data/models/smt_with_pose/ckpt.400.pth', map_location="cpu")
self.agent.actor_critic.net.visual_encoder.load_state_dict(self.search_dict(ckpt_dict, 'visual_encoder'))
self.agent.actor_critic.net.goal_encoder.load_state_dict(self.search_dict(ckpt_dict, 'goal_encoder'))
self.agent.actor_critic.net.action_encoder.load_state_dict(self.search_dict(ckpt_dict, 'action_encoder'))
self.actor_critic.to(self.device)
@staticmethod
def search_dict(ckpt_dict, encoder_name):
encoder_dict = {}
for key, value in ckpt_dict['state_dict'].items():
if encoder_name in key:
encoder_dict['.'.join(key.split('.')[3:])] = value
return encoder_dict
def save_checkpoint(
self, file_name: str, extra_state=None
) -> None:
checkpoint = {
"state_dict": self.agent.state_dict(),
"config": self.config,
}
if extra_state is not None:
checkpoint["extra_state"] = extra_state
torch.save(
checkpoint, os.path.join(self.config.CHECKPOINT_FOLDER, file_name)
)
def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:
r"""Load checkpoint of specified path as a dict.
Args:
checkpoint_path: path of target checkpoint
*args: additional positional args
**kwargs: additional keyword args
Returns:
dict containing checkpoint info
"""
return torch.load(checkpoint_path, *args, **kwargs)
def try_to_resume_checkpoint(self):
checkpoints = glob.glob(f"{self.config.CHECKPOINT_FOLDER}/*.pth")
if len(checkpoints) == 0:
count_steps = 0
count_checkpoints = 0
start_update = 0
else:
last_ckpt = sorted(checkpoints, key=lambda x: int(x.split(".")[1]))[-1]
checkpoint_path = last_ckpt
# Restore checkpoints to models
ckpt_dict = self.load_checkpoint(checkpoint_path)
self.agent.load_state_dict(ckpt_dict["state_dict"])
ckpt_id = int(last_ckpt.split("/")[-1].split(".")[1])
count_steps = ckpt_dict["extra_state"]["step"]
count_checkpoints = ckpt_id + 1
start_update = ckpt_dict["config"].CHECKPOINT_INTERVAL * ckpt_id + 1
print(f"Resuming checkpoint {last_ckpt} at {count_steps} frames")
return count_steps, count_checkpoints, start_update
METRICS_BLACKLIST = {"top_down_map", "collisions.is_collision"}
@classmethod
def _extract_scalars_from_info(
cls, info: Dict[str, Any]
) -> Dict[str, float]:
result = {}
for k, v in info.items():
if k in cls.METRICS_BLACKLIST:
continue
if isinstance(v, dict):
result.update(
{
k + "." + subk: subv
for subk, subv in cls._extract_scalars_from_info(
v
).items()
if (k + "." + subk) not in cls.METRICS_BLACKLIST
}
)
# Things that are scalar-like will have an np.size of 1.
# Strings also have an np.size of 1, so explicitly ban those
elif np.size(v) == 1 and not isinstance(v, str):
result[k] = float(v)
return result
@classmethod
def _extract_scalars_from_infos(
cls, infos: List[Dict[str, Any]]
) -> Dict[str, List[float]]:
results = defaultdict(list)
for i in range(len(infos)):
for k, v in cls._extract_scalars_from_info(infos[i]).items():
results[k].append(v)
return results
def _collect_rollout_step(
self, rollouts, current_episode_reward, running_episode_stats
):
pth_time = 0.0
env_time = 0.0
t_sample_action = time.time()
# sample actions
with torch.no_grad():
step_observation = {
k: v[rollouts.step] for k, v in rollouts.observations.items()
}
(
values,
actions,
actions_log_probs,
recurrent_hidden_states
) = self.actor_critic.act(
step_observation,
rollouts.recurrent_hidden_states[rollouts.step],
rollouts.prev_actions[rollouts.step],
rollouts.masks[rollouts.step],
)
pth_time += time.time() - t_sample_action
t_step_env = time.time()
outputs = self.envs.step([a[0].item() for a in actions])
observations, rewards, dones, infos = [list(x) for x in zip(*outputs)]
logging.debug('Reward: {}'.format(rewards[0]))
env_time += time.time() - t_step_env
t_update_stats = time.time()
batch = batch_obs(observations, device=self.device)
rewards = torch.tensor(rewards, dtype=torch.float, device=current_episode_reward.device)
rewards = rewards.unsqueeze(1)
masks = torch.tensor(
[[0.0] if done else [1.0] for done in dones], dtype=torch.float, device=current_episode_reward.device
)
current_episode_reward += rewards
running_episode_stats["reward"] += (1 - masks) * current_episode_reward
running_episode_stats["count"] += 1 - masks
for k, v in self._extract_scalars_from_infos(infos).items():
v = torch.tensor(
v, dtype=torch.float, device=current_episode_reward.device
).unsqueeze(1)
if k not in running_episode_stats:
running_episode_stats[k] = torch.zeros_like(
running_episode_stats["count"]
)
running_episode_stats[k] += (1 - masks) * v
current_episode_reward *= masks
rollouts.insert(
batch,
recurrent_hidden_states,
actions,
actions_log_probs,
values,
rewards.to(device=self.device),
masks.to(device=self.device),
)
pth_time += time.time() - t_update_stats
return pth_time, env_time, self.envs.num_envs
def _update_agent(self, ppo_cfg, rollouts):
t_update_model = time.time()
with torch.no_grad():
last_observation = {
k: v[-1] for k, v in rollouts.observations.items()
}
next_value = self.actor_critic.get_value(
last_observation,
rollouts.recurrent_hidden_states[rollouts.step],
rollouts.prev_actions[rollouts.step],
rollouts.masks[rollouts.step]
).detach()
rollouts.compute_returns(
next_value, ppo_cfg.use_gae, ppo_cfg.gamma, ppo_cfg.tau
)
value_loss, action_loss, dist_entropy = self.agent.update(rollouts)
rollouts.after_update()
return (
time.time() - t_update_model,
value_loss,
action_loss,
dist_entropy,
)
def train(self) -> None:
r"""Main method for training PPO.
Returns:
None
"""
logger.info(f"config: {self.config}")
random.seed(self.config.SEED)
np.random.seed(self.config.SEED)
torch.manual_seed(self.config.SEED)
# add_signal_handlers()
self.envs = construct_envs(
self.config, get_env_class(self.config.ENV_NAME), workers_ignore_signals=True
)
ppo_cfg = self.config.RL.PPO
self.device = (
torch.device("cuda", self.config.TORCH_GPU_ID)
if torch.cuda.is_available()
else torch.device("cpu")
)
if not os.path.isdir(self.config.CHECKPOINT_FOLDER):
os.makedirs(self.config.CHECKPOINT_FOLDER)
self._setup_actor_critic_agent(ppo_cfg)
logger.info(
"agent number of parameters: {}".format(
sum(param.numel() for param in self.agent.parameters())
)
)
rollouts = RolloutStorage(
ppo_cfg.num_steps,
self.envs.num_envs,
self.envs.observation_spaces[0],
self.envs.action_spaces[0],
ppo_cfg.hidden_size
)
rollouts.to(self.device)
observations = self.envs.reset()
batch = batch_obs(observations)
for sensor in rollouts.observations:
rollouts.observations[sensor][0].copy_(batch[sensor])
# batch and observations may contain shared PyTorch CUDA
# tensors. We must explicitly clear them here otherwise
# they will be kept in memory for the entire duration of training!
batch = None
observations = None
current_episode_reward = torch.zeros(self.envs.num_envs, 1)
running_episode_stats = dict( | count=torch.zeros(self.envs.num_envs, 1),
reward=torch.zeros(self.envs.num_envs, 1),
)
window_episode_stats = defaultdict(
lambda: deque(maxlen=ppo_cfg.reward_window_size)
)
t_start = time.time()
env_time = 0
pth_time = 0
count_steps = 0
count_checkpoints = 0
start_update = 0
prev_time = 0
lr_scheduler = LambdaLR(
optimizer=self.agent.optimizer,
lr_lambda=lambda x: linear_decay(x, self.config.NUM_UPDATES),
)
interrupted_state = load_interrupted_state(model_dir=self.config.MODEL_DIR)
if interrupted_state is not None:
self.agent.load_state_dict(interrupted_state["state_dict"])
self.agent.optimizer.load_state_dict(
interrupted_state["optimizer_state"]
)
lr_scheduler.load_state_dict(interrupted_state["lr_scheduler_state"])
requeue_stats = interrupted_state["requeue_stats"]
env_time = requeue_stats["env_time"]
pth_time = requeue_stats["pth_time"]
count_steps = requeue_stats["count_steps"]
count_checkpoints = requeue_stats["count_checkpoints"]
start_update = requeue_stats["start_update"]
prev_time = requeue_stats["prev_time"]
with TensorboardWriter(
self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs
) as writer:
for update in range(start_update, self.config.NUM_UPDATES):
if ppo_cfg.use_linear_lr_decay:
lr_scheduler.step()
if ppo_cfg.use_linear_clip_decay:
self.agent.clip_param = ppo_cfg.clip_param * linear_decay(
update, self.config.NUM_UPDATES
)
if EXIT.is_set():
self.envs.close()
if REQUEUE.is_set():
requeue_stats = dict(
env_time=env_time,
pth_time=pth_time,
count_steps=count_steps,
count_checkpoints=count_checkpoints,
start_update=update,
prev_time=(time.time() - t_start) + prev_time,
)
save_interrupted_state(
dict(
state_dict=self.agent.state_dict(),
optimizer_state=self.agent.optimizer.state_dict(),
lr_scheduler_state=lr_scheduler.state_dict(),
config=self.config,
requeue_stats=requeue_stats,
),
model_dir=self.config.MODEL_DIR
)
requeue_job()
return
for step in range(ppo_cfg.num_steps):
delta_pth_time, delta_env_time, delta_steps = self._collect_rollout_step(
rollouts,
current_episode_reward,
running_episode_stats
)
pth_time += delta_pth_time
env_time += delta_env_time
count_steps += delta_steps
delta_pth_time, value_loss, action_loss, dist_entropy = self._update_agent(
ppo_cfg, rollouts
)
pth_time += delta_pth_time
deltas = {
k: (
(v[-1] - v[0]).sum().item()
if len(v) > 1
else v[0].sum().item()
)
for k, v in window_episode_stats.items()
}
deltas["count"] = max(deltas["count"], 1.0)
writer.add_scalar(
"Metrics/reward", deltas["reward"] / deltas["count"], count_steps
)
# Check to see if there are any metrics
# that haven't been logged yet
metrics = {
k: v / deltas["count"]
for k, v in deltas.items()
if k not in {"reward", "count"}
}
if len(metrics) > 0:
# writer.add_scalars("metrics", metrics, count_steps)
for metric, value in metrics.items():
writer.add_scalar(f"Metrics/{metric}", value, count_steps)
writer.add_scalar("Policy/value_loss", value_loss, count_steps)
writer.add_scalar("Policy/policy_loss", action_loss, count_steps)
writer.add_scalar("Policy/entropy_loss", dist_entropy, count_steps)
writer.add_scalar('Policy/learning_rate', lr_scheduler.get_lr()[0], count_steps)
# log stats
if update > 0 and update % self.config.LOG_INTERVAL == 0:
logger.info(
"update: {}\tfps: {:.3f}\t".format(
update, count_steps / (time.time() - t_start)
)
)
logger.info(
"update: {}\tenv-time: {:.3f}s\tpth-time: {:.3f}s\t"
"frames: {}".format(
update, env_time, pth_time, count_steps
)
)
logger.info(
"Average window size: {} {}".format(
len(window_episode_stats["count"]),
" ".join(
"{}: {:.3f}".format(k, v / deltas["count"])
for k, v in deltas.items()
if k != "count"
),
)
)
# checkpoint model
if update % self.config.CHECKPOINT_INTERVAL == 0:
self.save_checkpoint(f"ckpt.{count_checkpoints}.pth")
count_checkpoints += 1
self.envs.close() | random_line_split |
|
ppo_trainer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import time
import logging
from collections import deque, defaultdict
from typing import Dict, List, Any
import json
import random
import glob
import numpy as np
import torch
from torch.optim.lr_scheduler import LambdaLR
from tqdm import tqdm
from numpy.linalg import norm
from habitat import Config, logger
from ss_baselines.common.utils import observations_to_image
from ss_baselines.common.base_trainer import BaseRLTrainer
from ss_baselines.common.baseline_registry import baseline_registry
from ss_baselines.common.env_utils import construct_envs
from ss_baselines.common.environments import get_env_class
from ss_baselines.common.rollout_storage import RolloutStorage
from ss_baselines.common.tensorboard_utils import TensorboardWriter
from ss_baselines.common.utils import (
batch_obs,
generate_video,
linear_decay,
plot_top_down_map,
resize_observation,
NpEncoder
)
from ss_baselines.av_nav.ppo.policy import AudioNavBaselinePolicy
from ss_baselines.av_nav.ppo.ppo import PPO
from ss_baselines.savi.ppo.slurm_utils import (
EXIT,
REQUEUE,
load_interrupted_state,
requeue_job,
save_interrupted_state,
)
class DataParallelPassthrough(torch.nn.DataParallel):
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
return getattr(self.module, name)
@baseline_registry.register_trainer(name="av_nav_ppo")
class PPOTrainer(BaseRLTrainer):
r"""Trainer class for PPO algorithm
Paper: https://arxiv.org/abs/1707.06347.
"""
supported_tasks = ["Nav-v0"]
def __init__(self, config=None):
super().__init__(config)
self.actor_critic = None
self.agent = None
self.envs = None
self._static_smt_encoder = False
self._encoder = None
def _setup_actor_critic_agent(self, ppo_cfg: Config, observation_space=None) -> None:
r"""Sets up actor critic and agent for PPO.
Args:
ppo_cfg: config node with relevant params
Returns:
None
"""
logger.add_filehandler(self.config.LOG_FILE)
if observation_space is None:
observation_space = self.envs.observation_spaces[0]
self.actor_critic = AudioNavBaselinePolicy(
observation_space=observation_space,
action_space=self.envs.action_spaces[0],
hidden_size=ppo_cfg.hidden_size,
goal_sensor_uuid=self.config.TASK_CONFIG.TASK.GOAL_SENSOR_UUID,
extra_rgb=self.config.EXTRA_RGB
)
self.agent = PPO(
actor_critic=self.actor_critic,
clip_param=ppo_cfg.clip_param,
ppo_epoch=ppo_cfg.ppo_epoch,
num_mini_batch=ppo_cfg.num_mini_batch,
value_loss_coef=ppo_cfg.value_loss_coef,
entropy_coef=ppo_cfg.entropy_coef,
lr=ppo_cfg.lr,
eps=ppo_cfg.eps,
max_grad_norm=ppo_cfg.max_grad_norm,
)
if self.config.RESUME:
ckpt_dict = self.load_checkpoint('data/models/smt_with_pose/ckpt.400.pth', map_location="cpu")
self.agent.actor_critic.net.visual_encoder.load_state_dict(self.search_dict(ckpt_dict, 'visual_encoder'))
self.agent.actor_critic.net.goal_encoder.load_state_dict(self.search_dict(ckpt_dict, 'goal_encoder'))
self.agent.actor_critic.net.action_encoder.load_state_dict(self.search_dict(ckpt_dict, 'action_encoder'))
self.actor_critic.to(self.device)
@staticmethod
def search_dict(ckpt_dict, encoder_name):
encoder_dict = {}
for key, value in ckpt_dict['state_dict'].items():
if encoder_name in key:
encoder_dict['.'.join(key.split('.')[3:])] = value
return encoder_dict
def save_checkpoint(
self, file_name: str, extra_state=None
) -> None:
checkpoint = {
"state_dict": self.agent.state_dict(),
"config": self.config,
}
if extra_state is not None:
checkpoint["extra_state"] = extra_state
torch.save(
checkpoint, os.path.join(self.config.CHECKPOINT_FOLDER, file_name)
)
def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:
r"""Load checkpoint of specified path as a dict.
Args:
checkpoint_path: path of target checkpoint
*args: additional positional args
**kwargs: additional keyword args
Returns:
dict containing checkpoint info
"""
return torch.load(checkpoint_path, *args, **kwargs)
def try_to_resume_checkpoint(self):
checkpoints = glob.glob(f"{self.config.CHECKPOINT_FOLDER}/*.pth")
if len(checkpoints) == 0:
count_steps = 0
count_checkpoints = 0
start_update = 0
else:
last_ckpt = sorted(checkpoints, key=lambda x: int(x.split(".")[1]))[-1]
checkpoint_path = last_ckpt
# Restore checkpoints to models
ckpt_dict = self.load_checkpoint(checkpoint_path)
self.agent.load_state_dict(ckpt_dict["state_dict"])
ckpt_id = int(last_ckpt.split("/")[-1].split(".")[1])
count_steps = ckpt_dict["extra_state"]["step"]
count_checkpoints = ckpt_id + 1
start_update = ckpt_dict["config"].CHECKPOINT_INTERVAL * ckpt_id + 1
print(f"Resuming checkpoint {last_ckpt} at {count_steps} frames")
return count_steps, count_checkpoints, start_update
METRICS_BLACKLIST = {"top_down_map", "collisions.is_collision"}
@classmethod
def _extract_scalars_from_info(
cls, info: Dict[str, Any]
) -> Dict[str, float]:
result = {}
for k, v in info.items():
if k in cls.METRICS_BLACKLIST:
continue
if isinstance(v, dict):
result.update(
{
k + "." + subk: subv
for subk, subv in cls._extract_scalars_from_info(
v
).items()
if (k + "." + subk) not in cls.METRICS_BLACKLIST
}
)
# Things that are scalar-like will have an np.size of 1.
# Strings also have an np.size of 1, so explicitly ban those
elif np.size(v) == 1 and not isinstance(v, str):
result[k] = float(v)
return result
@classmethod
def | (
cls, infos: List[Dict[str, Any]]
) -> Dict[str, List[float]]:
results = defaultdict(list)
for i in range(len(infos)):
for k, v in cls._extract_scalars_from_info(infos[i]).items():
results[k].append(v)
return results
def _collect_rollout_step(
self, rollouts, current_episode_reward, running_episode_stats
):
pth_time = 0.0
env_time = 0.0
t_sample_action = time.time()
# sample actions
with torch.no_grad():
step_observation = {
k: v[rollouts.step] for k, v in rollouts.observations.items()
}
(
values,
actions,
actions_log_probs,
recurrent_hidden_states
) = self.actor_critic.act(
step_observation,
rollouts.recurrent_hidden_states[rollouts.step],
rollouts.prev_actions[rollouts.step],
rollouts.masks[rollouts.step],
)
pth_time += time.time() - t_sample_action
t_step_env = time.time()
outputs = self.envs.step([a[0].item() for a in actions])
observations, rewards, dones, infos = [list(x) for x in zip(*outputs)]
logging.debug('Reward: {}'.format(rewards[0]))
env_time += time.time() - t_step_env
t_update_stats = time.time()
batch = batch_obs(observations, device=self.device)
rewards = torch.tensor(rewards, dtype=torch.float, device=current_episode_reward.device)
rewards = rewards.unsqueeze(1)
masks = torch.tensor(
[[0.0] if done else [1.0] for done in dones], dtype=torch.float, device=current_episode_reward.device
)
current_episode_reward += rewards
running_episode_stats["reward"] += (1 - masks) * current_episode_reward
running_episode_stats["count"] += 1 - masks
for k, v in self._extract_scalars_from_infos(infos).items():
v = torch.tensor(
v, dtype=torch.float, device=current_episode_reward.device
).unsqueeze(1)
if k not in running_episode_stats:
running_episode_stats[k] = torch.zeros_like(
running_episode_stats["count"]
)
running_episode_stats[k] += (1 - masks) * v
current_episode_reward *= masks
rollouts.insert(
batch,
recurrent_hidden_states,
actions,
actions_log_probs,
values,
rewards.to(device=self.device),
masks.to(device=self.device),
)
pth_time += time.time() - t_update_stats
return pth_time, env_time, self.envs.num_envs
def _update_agent(self, ppo_cfg, rollouts):
t_update_model = time.time()
with torch.no_grad():
last_observation = {
k: v[-1] for k, v in rollouts.observations.items()
}
next_value = self.actor_critic.get_value(
last_observation,
rollouts.recurrent_hidden_states[rollouts.step],
rollouts.prev_actions[rollouts.step],
rollouts.masks[rollouts.step]
).detach()
rollouts.compute_returns(
next_value, ppo_cfg.use_gae, ppo_cfg.gamma, ppo_cfg.tau
)
value_loss, action_loss, dist_entropy = self.agent.update(rollouts)
rollouts.after_update()
return (
time.time() - t_update_model,
value_loss,
action_loss,
dist_entropy,
)
def train(self) -> None:
r"""Main method for training PPO.
Returns:
None
"""
logger.info(f"config: {self.config}")
random.seed(self.config.SEED)
np.random.seed(self.config.SEED)
torch.manual_seed(self.config.SEED)
# add_signal_handlers()
self.envs = construct_envs(
self.config, get_env_class(self.config.ENV_NAME), workers_ignore_signals=True
)
ppo_cfg = self.config.RL.PPO
self.device = (
torch.device("cuda", self.config.TORCH_GPU_ID)
if torch.cuda.is_available()
else torch.device("cpu")
)
if not os.path.isdir(self.config.CHECKPOINT_FOLDER):
os.makedirs(self.config.CHECKPOINT_FOLDER)
self._setup_actor_critic_agent(ppo_cfg)
logger.info(
"agent number of parameters: {}".format(
sum(param.numel() for param in self.agent.parameters())
)
)
rollouts = RolloutStorage(
ppo_cfg.num_steps,
self.envs.num_envs,
self.envs.observation_spaces[0],
self.envs.action_spaces[0],
ppo_cfg.hidden_size
)
rollouts.to(self.device)
observations = self.envs.reset()
batch = batch_obs(observations)
for sensor in rollouts.observations:
rollouts.observations[sensor][0].copy_(batch[sensor])
# batch and observations may contain shared PyTorch CUDA
# tensors. We must explicitly clear them here otherwise
# they will be kept in memory for the entire duration of training!
batch = None
observations = None
current_episode_reward = torch.zeros(self.envs.num_envs, 1)
running_episode_stats = dict(
count=torch.zeros(self.envs.num_envs, 1),
reward=torch.zeros(self.envs.num_envs, 1),
)
window_episode_stats = defaultdict(
lambda: deque(maxlen=ppo_cfg.reward_window_size)
)
t_start = time.time()
env_time = 0
pth_time = 0
count_steps = 0
count_checkpoints = 0
start_update = 0
prev_time = 0
lr_scheduler = LambdaLR(
optimizer=self.agent.optimizer,
lr_lambda=lambda x: linear_decay(x, self.config.NUM_UPDATES),
)
interrupted_state = load_interrupted_state(model_dir=self.config.MODEL_DIR)
if interrupted_state is not None:
self.agent.load_state_dict(interrupted_state["state_dict"])
self.agent.optimizer.load_state_dict(
interrupted_state["optimizer_state"]
)
lr_scheduler.load_state_dict(interrupted_state["lr_scheduler_state"])
requeue_stats = interrupted_state["requeue_stats"]
env_time = requeue_stats["env_time"]
pth_time = requeue_stats["pth_time"]
count_steps = requeue_stats["count_steps"]
count_checkpoints = requeue_stats["count_checkpoints"]
start_update = requeue_stats["start_update"]
prev_time = requeue_stats["prev_time"]
with TensorboardWriter(
self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs
) as writer:
for update in range(start_update, self.config.NUM_UPDATES):
if ppo_cfg.use_linear_lr_decay:
lr_scheduler.step()
if ppo_cfg.use_linear_clip_decay:
self.agent.clip_param = ppo_cfg.clip_param * linear_decay(
update, self.config.NUM_UPDATES
)
if EXIT.is_set():
self.envs.close()
if REQUEUE.is_set():
requeue_stats = dict(
env_time=env_time,
pth_time=pth_time,
count_steps=count_steps,
count_checkpoints=count_checkpoints,
start_update=update,
prev_time=(time.time() - t_start) + prev_time,
)
save_interrupted_state(
dict(
state_dict=self.agent.state_dict(),
optimizer_state=self.agent.optimizer.state_dict(),
lr_scheduler_state=lr_scheduler.state_dict(),
config=self.config,
requeue_stats=requeue_stats,
),
model_dir=self.config.MODEL_DIR
)
requeue_job()
return
for step in range(ppo_cfg.num_steps):
delta_pth_time, delta_env_time, delta_steps = self._collect_rollout_step(
rollouts,
current_episode_reward,
running_episode_stats
)
pth_time += delta_pth_time
env_time += delta_env_time
count_steps += delta_steps
delta_pth_time, value_loss, action_loss, dist_entropy = self._update_agent(
ppo_cfg, rollouts
)
pth_time += delta_pth_time
deltas = {
k: (
(v[-1] - v[0]).sum().item()
if len(v) > 1
else v[0].sum().item()
)
for k, v in window_episode_stats.items()
}
deltas["count"] = max(deltas["count"], 1.0)
writer.add_scalar(
"Metrics/reward", deltas["reward"] / deltas["count"], count_steps
)
# Check to see if there are any metrics
# that haven't been logged yet
metrics = {
k: v / deltas["count"]
for k, v in deltas.items()
if k not in {"reward", "count"}
}
if len(metrics) > 0:
# writer.add_scalars("metrics", metrics, count_steps)
for metric, value in metrics.items():
writer.add_scalar(f"Metrics/{metric}", value, count_steps)
writer.add_scalar("Policy/value_loss", value_loss, count_steps)
writer.add_scalar("Policy/policy_loss", action_loss, count_steps)
writer.add_scalar("Policy/entropy_loss", dist_entropy, count_steps)
writer.add_scalar('Policy/learning_rate', lr_scheduler.get_lr()[0], count_steps)
# log stats
if update > 0 and update % self.config.LOG_INTERVAL == 0:
logger.info(
"update: {}\tfps: {:.3f}\t".format(
update, count_steps / (time.time() - t_start)
)
)
logger.info(
"update: {}\tenv-time: {:.3f}s\tpth-time: {:.3f}s\t"
"frames: {}".format(
update, env_time, pth_time, count_steps
)
)
logger.info(
"Average window size: {} {}".format(
len(window_episode_stats["count"]),
" ".join(
"{}: {:.3f}".format(k, v / deltas["count"])
for k, v in deltas.items()
if k != "count"
),
)
)
# checkpoint model
if update % self.config.CHECKPOINT_INTERVAL == 0:
self.save_checkpoint(f"ckpt.{count_checkpoints}.pth")
count_checkpoints += 1
self.envs.close()
| _extract_scalars_from_infos | identifier_name |
ppo_trainer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import time
import logging
from collections import deque, defaultdict
from typing import Dict, List, Any
import json
import random
import glob
import numpy as np
import torch
from torch.optim.lr_scheduler import LambdaLR
from tqdm import tqdm
from numpy.linalg import norm
from habitat import Config, logger
from ss_baselines.common.utils import observations_to_image
from ss_baselines.common.base_trainer import BaseRLTrainer
from ss_baselines.common.baseline_registry import baseline_registry
from ss_baselines.common.env_utils import construct_envs
from ss_baselines.common.environments import get_env_class
from ss_baselines.common.rollout_storage import RolloutStorage
from ss_baselines.common.tensorboard_utils import TensorboardWriter
from ss_baselines.common.utils import (
batch_obs,
generate_video,
linear_decay,
plot_top_down_map,
resize_observation,
NpEncoder
)
from ss_baselines.av_nav.ppo.policy import AudioNavBaselinePolicy
from ss_baselines.av_nav.ppo.ppo import PPO
from ss_baselines.savi.ppo.slurm_utils import (
EXIT,
REQUEUE,
load_interrupted_state,
requeue_job,
save_interrupted_state,
)
class DataParallelPassthrough(torch.nn.DataParallel):
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
return getattr(self.module, name)
@baseline_registry.register_trainer(name="av_nav_ppo")
class PPOTrainer(BaseRLTrainer):
r"""Trainer class for PPO algorithm
Paper: https://arxiv.org/abs/1707.06347.
"""
supported_tasks = ["Nav-v0"]
def __init__(self, config=None):
super().__init__(config)
self.actor_critic = None
self.agent = None
self.envs = None
self._static_smt_encoder = False
self._encoder = None
def _setup_actor_critic_agent(self, ppo_cfg: Config, observation_space=None) -> None:
r"""Sets up actor critic and agent for PPO.
Args:
ppo_cfg: config node with relevant params
Returns:
None
"""
logger.add_filehandler(self.config.LOG_FILE)
if observation_space is None:
observation_space = self.envs.observation_spaces[0]
self.actor_critic = AudioNavBaselinePolicy(
observation_space=observation_space,
action_space=self.envs.action_spaces[0],
hidden_size=ppo_cfg.hidden_size,
goal_sensor_uuid=self.config.TASK_CONFIG.TASK.GOAL_SENSOR_UUID,
extra_rgb=self.config.EXTRA_RGB
)
self.agent = PPO(
actor_critic=self.actor_critic,
clip_param=ppo_cfg.clip_param,
ppo_epoch=ppo_cfg.ppo_epoch,
num_mini_batch=ppo_cfg.num_mini_batch,
value_loss_coef=ppo_cfg.value_loss_coef,
entropy_coef=ppo_cfg.entropy_coef,
lr=ppo_cfg.lr,
eps=ppo_cfg.eps,
max_grad_norm=ppo_cfg.max_grad_norm,
)
if self.config.RESUME:
ckpt_dict = self.load_checkpoint('data/models/smt_with_pose/ckpt.400.pth', map_location="cpu")
self.agent.actor_critic.net.visual_encoder.load_state_dict(self.search_dict(ckpt_dict, 'visual_encoder'))
self.agent.actor_critic.net.goal_encoder.load_state_dict(self.search_dict(ckpt_dict, 'goal_encoder'))
self.agent.actor_critic.net.action_encoder.load_state_dict(self.search_dict(ckpt_dict, 'action_encoder'))
self.actor_critic.to(self.device)
@staticmethod
def search_dict(ckpt_dict, encoder_name):
encoder_dict = {}
for key, value in ckpt_dict['state_dict'].items():
if encoder_name in key:
encoder_dict['.'.join(key.split('.')[3:])] = value
return encoder_dict
def save_checkpoint(
self, file_name: str, extra_state=None
) -> None:
checkpoint = {
"state_dict": self.agent.state_dict(),
"config": self.config,
}
if extra_state is not None:
checkpoint["extra_state"] = extra_state
torch.save(
checkpoint, os.path.join(self.config.CHECKPOINT_FOLDER, file_name)
)
def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:
r"""Load checkpoint of specified path as a dict.
Args:
checkpoint_path: path of target checkpoint
*args: additional positional args
**kwargs: additional keyword args
Returns:
dict containing checkpoint info
"""
return torch.load(checkpoint_path, *args, **kwargs)
def try_to_resume_checkpoint(self):
checkpoints = glob.glob(f"{self.config.CHECKPOINT_FOLDER}/*.pth")
if len(checkpoints) == 0:
count_steps = 0
count_checkpoints = 0
start_update = 0
else:
last_ckpt = sorted(checkpoints, key=lambda x: int(x.split(".")[1]))[-1]
checkpoint_path = last_ckpt
# Restore checkpoints to models
ckpt_dict = self.load_checkpoint(checkpoint_path)
self.agent.load_state_dict(ckpt_dict["state_dict"])
ckpt_id = int(last_ckpt.split("/")[-1].split(".")[1])
count_steps = ckpt_dict["extra_state"]["step"]
count_checkpoints = ckpt_id + 1
start_update = ckpt_dict["config"].CHECKPOINT_INTERVAL * ckpt_id + 1
print(f"Resuming checkpoint {last_ckpt} at {count_steps} frames")
return count_steps, count_checkpoints, start_update
METRICS_BLACKLIST = {"top_down_map", "collisions.is_collision"}
@classmethod
def _extract_scalars_from_info(
cls, info: Dict[str, Any]
) -> Dict[str, float]:
result = {}
for k, v in info.items():
if k in cls.METRICS_BLACKLIST:
continue
if isinstance(v, dict):
result.update(
{
k + "." + subk: subv
for subk, subv in cls._extract_scalars_from_info(
v
).items()
if (k + "." + subk) not in cls.METRICS_BLACKLIST
}
)
# Things that are scalar-like will have an np.size of 1.
# Strings also have an np.size of 1, so explicitly ban those
elif np.size(v) == 1 and not isinstance(v, str):
result[k] = float(v)
return result
@classmethod
def _extract_scalars_from_infos(
cls, infos: List[Dict[str, Any]]
) -> Dict[str, List[float]]:
results = defaultdict(list)
for i in range(len(infos)):
for k, v in cls._extract_scalars_from_info(infos[i]).items():
results[k].append(v)
return results
def _collect_rollout_step(
self, rollouts, current_episode_reward, running_episode_stats
):
pth_time = 0.0
env_time = 0.0
t_sample_action = time.time()
# sample actions
with torch.no_grad():
step_observation = {
k: v[rollouts.step] for k, v in rollouts.observations.items()
}
(
values,
actions,
actions_log_probs,
recurrent_hidden_states
) = self.actor_critic.act(
step_observation,
rollouts.recurrent_hidden_states[rollouts.step],
rollouts.prev_actions[rollouts.step],
rollouts.masks[rollouts.step],
)
pth_time += time.time() - t_sample_action
t_step_env = time.time()
outputs = self.envs.step([a[0].item() for a in actions])
observations, rewards, dones, infos = [list(x) for x in zip(*outputs)]
logging.debug('Reward: {}'.format(rewards[0]))
env_time += time.time() - t_step_env
t_update_stats = time.time()
batch = batch_obs(observations, device=self.device)
rewards = torch.tensor(rewards, dtype=torch.float, device=current_episode_reward.device)
rewards = rewards.unsqueeze(1)
masks = torch.tensor(
[[0.0] if done else [1.0] for done in dones], dtype=torch.float, device=current_episode_reward.device
)
current_episode_reward += rewards
running_episode_stats["reward"] += (1 - masks) * current_episode_reward
running_episode_stats["count"] += 1 - masks
for k, v in self._extract_scalars_from_infos(infos).items():
v = torch.tensor(
v, dtype=torch.float, device=current_episode_reward.device
).unsqueeze(1)
if k not in running_episode_stats:
running_episode_stats[k] = torch.zeros_like(
running_episode_stats["count"]
)
running_episode_stats[k] += (1 - masks) * v
current_episode_reward *= masks
rollouts.insert(
batch,
recurrent_hidden_states,
actions,
actions_log_probs,
values,
rewards.to(device=self.device),
masks.to(device=self.device),
)
pth_time += time.time() - t_update_stats
return pth_time, env_time, self.envs.num_envs
def _update_agent(self, ppo_cfg, rollouts):
t_update_model = time.time()
with torch.no_grad():
last_observation = {
k: v[-1] for k, v in rollouts.observations.items()
}
next_value = self.actor_critic.get_value(
last_observation,
rollouts.recurrent_hidden_states[rollouts.step],
rollouts.prev_actions[rollouts.step],
rollouts.masks[rollouts.step]
).detach()
rollouts.compute_returns(
next_value, ppo_cfg.use_gae, ppo_cfg.gamma, ppo_cfg.tau
)
value_loss, action_loss, dist_entropy = self.agent.update(rollouts)
rollouts.after_update()
return (
time.time() - t_update_model,
value_loss,
action_loss,
dist_entropy,
)
def train(self) -> None:
r"""Main method for training PPO.
Returns:
None
"""
logger.info(f"config: {self.config}")
random.seed(self.config.SEED)
np.random.seed(self.config.SEED)
torch.manual_seed(self.config.SEED)
# add_signal_handlers()
self.envs = construct_envs(
self.config, get_env_class(self.config.ENV_NAME), workers_ignore_signals=True
)
ppo_cfg = self.config.RL.PPO
self.device = (
torch.device("cuda", self.config.TORCH_GPU_ID)
if torch.cuda.is_available()
else torch.device("cpu")
)
if not os.path.isdir(self.config.CHECKPOINT_FOLDER):
os.makedirs(self.config.CHECKPOINT_FOLDER)
self._setup_actor_critic_agent(ppo_cfg)
logger.info(
"agent number of parameters: {}".format(
sum(param.numel() for param in self.agent.parameters())
)
)
rollouts = RolloutStorage(
ppo_cfg.num_steps,
self.envs.num_envs,
self.envs.observation_spaces[0],
self.envs.action_spaces[0],
ppo_cfg.hidden_size
)
rollouts.to(self.device)
observations = self.envs.reset()
batch = batch_obs(observations)
for sensor in rollouts.observations:
rollouts.observations[sensor][0].copy_(batch[sensor])
# batch and observations may contain shared PyTorch CUDA
# tensors. We must explicitly clear them here otherwise
# they will be kept in memory for the entire duration of training!
batch = None
observations = None
current_episode_reward = torch.zeros(self.envs.num_envs, 1)
running_episode_stats = dict(
count=torch.zeros(self.envs.num_envs, 1),
reward=torch.zeros(self.envs.num_envs, 1),
)
window_episode_stats = defaultdict(
lambda: deque(maxlen=ppo_cfg.reward_window_size)
)
t_start = time.time()
env_time = 0
pth_time = 0
count_steps = 0
count_checkpoints = 0
start_update = 0
prev_time = 0
lr_scheduler = LambdaLR(
optimizer=self.agent.optimizer,
lr_lambda=lambda x: linear_decay(x, self.config.NUM_UPDATES),
)
interrupted_state = load_interrupted_state(model_dir=self.config.MODEL_DIR)
if interrupted_state is not None:
self.agent.load_state_dict(interrupted_state["state_dict"])
self.agent.optimizer.load_state_dict(
interrupted_state["optimizer_state"]
)
lr_scheduler.load_state_dict(interrupted_state["lr_scheduler_state"])
requeue_stats = interrupted_state["requeue_stats"]
env_time = requeue_stats["env_time"]
pth_time = requeue_stats["pth_time"]
count_steps = requeue_stats["count_steps"]
count_checkpoints = requeue_stats["count_checkpoints"]
start_update = requeue_stats["start_update"]
prev_time = requeue_stats["prev_time"]
with TensorboardWriter(
self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs
) as writer:
for update in range(start_update, self.config.NUM_UPDATES):
if ppo_cfg.use_linear_lr_decay:
lr_scheduler.step()
if ppo_cfg.use_linear_clip_decay:
self.agent.clip_param = ppo_cfg.clip_param * linear_decay(
update, self.config.NUM_UPDATES
)
if EXIT.is_set():
self.envs.close()
if REQUEUE.is_set():
requeue_stats = dict(
env_time=env_time,
pth_time=pth_time,
count_steps=count_steps,
count_checkpoints=count_checkpoints,
start_update=update,
prev_time=(time.time() - t_start) + prev_time,
)
save_interrupted_state(
dict(
state_dict=self.agent.state_dict(),
optimizer_state=self.agent.optimizer.state_dict(),
lr_scheduler_state=lr_scheduler.state_dict(),
config=self.config,
requeue_stats=requeue_stats,
),
model_dir=self.config.MODEL_DIR
)
requeue_job()
return
for step in range(ppo_cfg.num_steps):
delta_pth_time, delta_env_time, delta_steps = self._collect_rollout_step(
rollouts,
current_episode_reward,
running_episode_stats
)
pth_time += delta_pth_time
env_time += delta_env_time
count_steps += delta_steps
delta_pth_time, value_loss, action_loss, dist_entropy = self._update_agent(
ppo_cfg, rollouts
)
pth_time += delta_pth_time
deltas = {
k: (
(v[-1] - v[0]).sum().item()
if len(v) > 1
else v[0].sum().item()
)
for k, v in window_episode_stats.items()
}
deltas["count"] = max(deltas["count"], 1.0)
writer.add_scalar(
"Metrics/reward", deltas["reward"] / deltas["count"], count_steps
)
# Check to see if there are any metrics
# that haven't been logged yet
metrics = {
k: v / deltas["count"]
for k, v in deltas.items()
if k not in {"reward", "count"}
}
if len(metrics) > 0:
# writer.add_scalars("metrics", metrics, count_steps)
|
writer.add_scalar("Policy/value_loss", value_loss, count_steps)
writer.add_scalar("Policy/policy_loss", action_loss, count_steps)
writer.add_scalar("Policy/entropy_loss", dist_entropy, count_steps)
writer.add_scalar('Policy/learning_rate', lr_scheduler.get_lr()[0], count_steps)
# log stats
if update > 0 and update % self.config.LOG_INTERVAL == 0:
logger.info(
"update: {}\tfps: {:.3f}\t".format(
update, count_steps / (time.time() - t_start)
)
)
logger.info(
"update: {}\tenv-time: {:.3f}s\tpth-time: {:.3f}s\t"
"frames: {}".format(
update, env_time, pth_time, count_steps
)
)
logger.info(
"Average window size: {} {}".format(
len(window_episode_stats["count"]),
" ".join(
"{}: {:.3f}".format(k, v / deltas["count"])
for k, v in deltas.items()
if k != "count"
),
)
)
# checkpoint model
if update % self.config.CHECKPOINT_INTERVAL == 0:
self.save_checkpoint(f"ckpt.{count_checkpoints}.pth")
count_checkpoints += 1
self.envs.close()
| for metric, value in metrics.items():
writer.add_scalar(f"Metrics/{metric}", value, count_steps) | conditional_block |
imaging-multibeam.py | #!/usr/bin/env python
import os
import sys
import numpy
import math
import glob
import shutil
import lofar.parameterset
from multiprocessing import cpu_count
from multiprocessing.pool import ThreadPool
from tempfile import mkdtemp
from pyrap.tables import table
from utility import run_process
from utility import time_code
from utility import get_parset_subset
from utility import make_directory
from utility import copy_to_work_area
from utility import run_awimager
from utility import run_ndppp
from utility import run_calibrate_standalone
from utility import clear_calibrate_stand_alone_logs
from utility import find_bad_stations
from utility import strip_stations
from utility import limit_baselines
from utility import estimate_noise
from utility import make_mask
from utility import read_ms_list
# All temporary writes go to scratch space on the node.
scratch = os.getenv("TMPDIR")
if __name__ == "__main__":
# Our single command line argument is a parset containing all
# configuration information we'll need.
input_parset = lofar.parameterset.parameterset(sys.argv[1])
# We require `sbs_per_beam` input MeasurementSets for each beam, including
# the calibrator.
sbs_per_beam = sum(input_parset.getIntVector("band_size"))
print "Locating calibrator data and checking paths"
ms_cal = {}
ms_cal["datafiles"] = read_ms_list(input_parset.getString("cal_ms_list"))
assert(len(ms_cal["datafiles"]) == sbs_per_beam)
ms_cal["output_dir"] = os.path.join(
input_parset.getString("output_dir"),
"calibrator",
input_parset.getString("cal_obsid")
)
make_directory(ms_cal["output_dir"])
print "Copying calibrator subbands to output"
ms_cal["datafiles"] = copy_to_work_area(ms_cal["datafiles"], ms_cal["output_dir"])
print "Locating target data and checking paths"
# ms_target will be a dict that provides all the information we need to
# process each independent element of the observation, where an "element"
# is a combination of a beam (SAP) and a band (number of subbands)
ms_target = {}
target_mss = read_ms_list(input_parset.getString("target_ms_list"))
assert(len(target_mss) == input_parset.getInt("n_beams") * sbs_per_beam)
for beam, data in enumerate(zip(*[iter(target_mss)]*sbs_per_beam)):
start_sb = 0
for band, band_size in enumerate(input_parset.getIntVector("band_size")):
target_info = {}
target_info['datafiles'] = target_mss[start_sb:start_sb+band_size]
target_info['calfiles' ] = ms_cal["datafiles"][start_sb:start_sb+band_size]
assert(len(target_info['datafiles']) == len(target_info['calfiles']))
target_info['output_dir'] = os.path.join(
input_parset.getString("output_dir"),
"target",
input_parset.getString("target_obsid"),
"SAP00%d" % (beam,)
)
make_directory(target_info["output_dir"])
target_info["output_ms"] = os.path.join(target_info["output_dir"], "%s_SAP00%d_band%d.MS" % (input_parset.getString("target_obsid"), beam, band))
assert(not os.path.exists(target_info["output_ms"]))
target_info["output_im"] = os.path.join(target_info["output_dir"], "%s_SAP00%d_band%d.img" % (input_parset.getString("target_obsid"), beam, band))
assert(not os.path.exists(target_info["output_im"]))
pointing = map(math.degrees, table("%s::FIELD" % target_info["datafiles"][0]).getcol("REFERENCE_DIR")[0][0])
target_info["skymodel"] = os.path.join(
input_parset.getString("skymodel_dir"),
"%.2f_%.2f.skymodel" % (pointing[0], pointing[1])
)
assert(os.path.exists(target_info["skymodel"]))
ms_target["SAP00%d_band%d" % (beam, band)] = target_info
start_sb += band_size
# Copy to working directories
for name in ms_target.iterkeys():
print "Copying %s to scratch area" % (name,)
ms_target[name]["datafiles"] = copy_to_work_area(
ms_target[name]["datafiles"], scratch
)
# We'll run as many simultaneous jobs as we have CPUs
pool = ThreadPool(cpu_count())
# Calibration of each calibrator subband
os.chdir(ms_cal['output_dir']) # Logs will get dumped here
clear_calibrate_stand_alone_logs()
calcal_parset = get_parset_subset(input_parset, "calcal.parset", scratch)
def calibrate_calibrator(cal):
source = table("%s::OBSERVATION" % (cal,)).getcol("LOFAR_TARGET")['array'][0].lower().replace(' ', '')
skymodel = os.path.join(
input_parset.getString("skymodel_dir"),
"%s.skymodel" % (source,)
)
print "Calibrating %s with skymodel %s" % (cal, skymodel)
run_calibrate_standalone(calcal_parset, cal, skymodel, replace_parmdb=True, replace_sourcedb=True)
with time_code("Calibration of calibrator"):
pool.map(calibrate_calibrator, ms_cal["datafiles"])
# Clip calibrator parmdbs
def clip_parmdb(sb):
run_process(
input_parset.getString("pdbclip.executable"),
"--auto",
"--sigma=%f" % (input_parset.getFloat("pdbclip.sigma"),),
os.path.join(sb, "instrument")
)
with time_code("Clip calibrator instrument databases"):
pool.map(lambda sb: clip_parmdb(sb), ms_cal["datafiles"])
# Transfer calibration solutions to targets
transfer_parset = get_parset_subset(input_parset, "transfer.parset", scratch)
transfer_skymodel = input_parset.getString("transfer.skymodel")
clear_calibrate_stand_alone_logs()
def transfer_calibration(ms_pair):
cal, target = ms_pair
print "Transferring solution from %s to %s" % (cal, target)
parmdb_name = mkdtemp(dir=scratch)
run_process("parmexportcal", "in=%s/instrument/" % (cal,), "out=%s" % (parmdb_name,))
run_process("calibrate-stand-alone", "--parmdb", parmdb_name, target, transfer_parset, transfer_skymodel)
with time_code("Transfer of calibration solutions"):
for target in ms_target.itervalues():
pool.map(transfer_calibration, zip(target["calfiles"], target["datafiles"]))
# Combine with NDPPP
def combine_ms(target_info):
output = os.path.join(mkdtemp(dir=scratch), "combined.MS")
run_ndppp(
get_parset_subset(input_parset, "combine.parset", scratch),
{
"msin": str(target_info["datafiles"]),
"msout": output
}
)
target_info["combined_ms"] = output
with time_code("Combining target subbands"):
pool.map(combine_ms, ms_target.values())
# Phase only calibration of combined target subbands
print "Running phase only calibration"
def phaseonly(target_info):
# We chdir to the scratch directory initially, so that logs get dumped
# there, then we'll copy the logs to the output directory when we're
# done.
try:
os.chdir(os.path.dirname(target_info["combined_ms"]))
run_calibrate_standalone(
get_parset_subset(input_parset, "phaseonly.parset", scratch),
target_info["combined_ms"],
target_info["skymodel"]
)
for logfile in glob.glob(
os.path.join(
os.path.dirname(target_info["combined_ms"]),
"*log"
)
):
shutil.copy(logfile, target_info["output_dir"])
except Exception, e:
print "Error in phaseonly with %s" % (target_info["combined_ms"])
print str(e)
raise
# Most Lisa nodes have 24 GB RAM -- we don't want to run out
calpool = ThreadPool(6)
with time_code("Phase-only calibration"):
calpool.map(phaseonly, ms_target.values())
# Strip bad stations.
# Note that the combined, calibrated, stripped MS is one of our output
# data products, so we save that with the name specified in the parset.
def strip_bad_stations(target_info):
bad_stations = find_bad_stations(target_info["combined_ms"], scratch)
strip_stations(target_info["combined_ms"], target_info["output_ms"], bad_stations)
with time_code("Strip bad stations"):
pool.map(strip_bad_stations, ms_target.values())
# Limit the length of the baselines we're using.
# We'll image a reference table using only the short baselines.
maxbl = input_parset.getFloat("limit.max_baseline")
def limit_bl(target_info):
target_info["bl_limit_ms"] = mkdtemp(dir=scratch) |
# We source a special build for using the "new" awimager
awim_init = input_parset.getString("awimager.initscript")
# Calculate the threshold for cleaning based on the noise in a dirty map
# We don't use our threadpool here, since awimager is parallelized
noise_parset_name = get_parset_subset(input_parset, "noise.parset", scratch)
with time_code("Calculating threshold for cleaning"):
for target_info in ms_target.values():
print "Getting threshold for %s" % target_info["output_ms"]
target_info["threshold"] = input_parset.getFloat("noise.multiplier") * estimate_noise(
target_info["bl_limit_ms"],
noise_parset_name,
maxbl,
input_parset.getFloat("noise.box_size"),
scratch
)
print "Threshold for %s is %f Jy" % (target_info["output_ms"], target_info["threshold"])
# Make a mask for cleaning
aw_parset_name = get_parset_subset(input_parset, "image.parset", scratch)
with time_code("Making mask"):
for target_info in ms_target.values():
print "Making mask for %s" % target_info["output_ms"]
target_info["mask"] = make_mask(
target_info["bl_limit_ms"],
aw_parset_name,
target_info["skymodel"],
input_parset.getString("make_mask.executable"),
scratch,
awim_init=awim_init
)
with time_code("Making images"):
for target_info in ms_target.values():
print "Making image %s" % target_info["output_im"]
print run_awimager(aw_parset_name,
{
"ms": target_info["bl_limit_ms"],
"mask": target_info["mask"],
"threshold": "%fJy" % (target_info["threshold"],),
"image": target_info["output_im"],
"wmax": maxbl
},
initscript=awim_init
)
print "Updaging metadata in %s" % target_info["output_im"]
run_process(
"addImagingInfo",
"%s.restored.corr" % target_info["output_im"],
"", # No sky model specified
"0",
str(maxbl),
target_info["output_ms"]
)
print "Saving mask for %s to %s" % (target_info["output_im"], target_info["output_im"] + ".mask")
shutil.copytree(target_info["mask"], target_info["output_im"] + ".mask") | limit_baselines(target_info["output_ms"], target_info["bl_limit_ms"], maxbl)
with time_code("Limiting maximum baseline length"):
pool.map(limit_bl, ms_target.values()) | random_line_split |
imaging-multibeam.py | #!/usr/bin/env python
import os
import sys
import numpy
import math
import glob
import shutil
import lofar.parameterset
from multiprocessing import cpu_count
from multiprocessing.pool import ThreadPool
from tempfile import mkdtemp
from pyrap.tables import table
from utility import run_process
from utility import time_code
from utility import get_parset_subset
from utility import make_directory
from utility import copy_to_work_area
from utility import run_awimager
from utility import run_ndppp
from utility import run_calibrate_standalone
from utility import clear_calibrate_stand_alone_logs
from utility import find_bad_stations
from utility import strip_stations
from utility import limit_baselines
from utility import estimate_noise
from utility import make_mask
from utility import read_ms_list
# All temporary writes go to scratch space on the node.
scratch = os.getenv("TMPDIR")
if __name__ == "__main__":
# Our single command line argument is a parset containing all
# configuration information we'll need.
input_parset = lofar.parameterset.parameterset(sys.argv[1])
# We require `sbs_per_beam` input MeasurementSets for each beam, including
# the calibrator.
sbs_per_beam = sum(input_parset.getIntVector("band_size"))
print "Locating calibrator data and checking paths"
ms_cal = {}
ms_cal["datafiles"] = read_ms_list(input_parset.getString("cal_ms_list"))
assert(len(ms_cal["datafiles"]) == sbs_per_beam)
ms_cal["output_dir"] = os.path.join(
input_parset.getString("output_dir"),
"calibrator",
input_parset.getString("cal_obsid")
)
make_directory(ms_cal["output_dir"])
print "Copying calibrator subbands to output"
ms_cal["datafiles"] = copy_to_work_area(ms_cal["datafiles"], ms_cal["output_dir"])
print "Locating target data and checking paths"
# ms_target will be a dict that provides all the information we need to
# process each independent element of the observation, where an "element"
# is a combination of a beam (SAP) and a band (number of subbands)
ms_target = {}
target_mss = read_ms_list(input_parset.getString("target_ms_list"))
assert(len(target_mss) == input_parset.getInt("n_beams") * sbs_per_beam)
for beam, data in enumerate(zip(*[iter(target_mss)]*sbs_per_beam)):
start_sb = 0
for band, band_size in enumerate(input_parset.getIntVector("band_size")):
target_info = {}
target_info['datafiles'] = target_mss[start_sb:start_sb+band_size]
target_info['calfiles' ] = ms_cal["datafiles"][start_sb:start_sb+band_size]
assert(len(target_info['datafiles']) == len(target_info['calfiles']))
target_info['output_dir'] = os.path.join(
input_parset.getString("output_dir"),
"target",
input_parset.getString("target_obsid"),
"SAP00%d" % (beam,)
)
make_directory(target_info["output_dir"])
target_info["output_ms"] = os.path.join(target_info["output_dir"], "%s_SAP00%d_band%d.MS" % (input_parset.getString("target_obsid"), beam, band))
assert(not os.path.exists(target_info["output_ms"]))
target_info["output_im"] = os.path.join(target_info["output_dir"], "%s_SAP00%d_band%d.img" % (input_parset.getString("target_obsid"), beam, band))
assert(not os.path.exists(target_info["output_im"]))
pointing = map(math.degrees, table("%s::FIELD" % target_info["datafiles"][0]).getcol("REFERENCE_DIR")[0][0])
target_info["skymodel"] = os.path.join(
input_parset.getString("skymodel_dir"),
"%.2f_%.2f.skymodel" % (pointing[0], pointing[1])
)
assert(os.path.exists(target_info["skymodel"]))
ms_target["SAP00%d_band%d" % (beam, band)] = target_info
start_sb += band_size
# Copy to working directories
for name in ms_target.iterkeys():
print "Copying %s to scratch area" % (name,)
ms_target[name]["datafiles"] = copy_to_work_area(
ms_target[name]["datafiles"], scratch
)
# We'll run as many simultaneous jobs as we have CPUs
pool = ThreadPool(cpu_count())
# Calibration of each calibrator subband
os.chdir(ms_cal['output_dir']) # Logs will get dumped here
clear_calibrate_stand_alone_logs()
calcal_parset = get_parset_subset(input_parset, "calcal.parset", scratch)
def calibrate_calibrator(cal):
source = table("%s::OBSERVATION" % (cal,)).getcol("LOFAR_TARGET")['array'][0].lower().replace(' ', '')
skymodel = os.path.join(
input_parset.getString("skymodel_dir"),
"%s.skymodel" % (source,)
)
print "Calibrating %s with skymodel %s" % (cal, skymodel)
run_calibrate_standalone(calcal_parset, cal, skymodel, replace_parmdb=True, replace_sourcedb=True)
with time_code("Calibration of calibrator"):
pool.map(calibrate_calibrator, ms_cal["datafiles"])
# Clip calibrator parmdbs
def clip_parmdb(sb):
run_process(
input_parset.getString("pdbclip.executable"),
"--auto",
"--sigma=%f" % (input_parset.getFloat("pdbclip.sigma"),),
os.path.join(sb, "instrument")
)
with time_code("Clip calibrator instrument databases"):
pool.map(lambda sb: clip_parmdb(sb), ms_cal["datafiles"])
# Transfer calibration solutions to targets
transfer_parset = get_parset_subset(input_parset, "transfer.parset", scratch)
transfer_skymodel = input_parset.getString("transfer.skymodel")
clear_calibrate_stand_alone_logs()
def transfer_calibration(ms_pair):
cal, target = ms_pair
print "Transferring solution from %s to %s" % (cal, target)
parmdb_name = mkdtemp(dir=scratch)
run_process("parmexportcal", "in=%s/instrument/" % (cal,), "out=%s" % (parmdb_name,))
run_process("calibrate-stand-alone", "--parmdb", parmdb_name, target, transfer_parset, transfer_skymodel)
with time_code("Transfer of calibration solutions"):
for target in ms_target.itervalues():
pool.map(transfer_calibration, zip(target["calfiles"], target["datafiles"]))
# Combine with NDPPP
def combine_ms(target_info):
output = os.path.join(mkdtemp(dir=scratch), "combined.MS")
run_ndppp(
get_parset_subset(input_parset, "combine.parset", scratch),
{
"msin": str(target_info["datafiles"]),
"msout": output
}
)
target_info["combined_ms"] = output
with time_code("Combining target subbands"):
pool.map(combine_ms, ms_target.values())
# Phase only calibration of combined target subbands
print "Running phase only calibration"
def phaseonly(target_info):
# We chdir to the scratch directory initially, so that logs get dumped
# there, then we'll copy the logs to the output directory when we're
# done.
try:
os.chdir(os.path.dirname(target_info["combined_ms"]))
run_calibrate_standalone(
get_parset_subset(input_parset, "phaseonly.parset", scratch),
target_info["combined_ms"],
target_info["skymodel"]
)
for logfile in glob.glob(
os.path.join(
os.path.dirname(target_info["combined_ms"]),
"*log"
)
):
shutil.copy(logfile, target_info["output_dir"])
except Exception, e:
print "Error in phaseonly with %s" % (target_info["combined_ms"])
print str(e)
raise
# Most Lisa nodes have 24 GB RAM -- we don't want to run out
calpool = ThreadPool(6)
with time_code("Phase-only calibration"):
calpool.map(phaseonly, ms_target.values())
# Strip bad stations.
# Note that the combined, calibrated, stripped MS is one of our output
# data products, so we save that with the name specified in the parset.
def strip_bad_stations(target_info):
bad_stations = find_bad_stations(target_info["combined_ms"], scratch)
strip_stations(target_info["combined_ms"], target_info["output_ms"], bad_stations)
with time_code("Strip bad stations"):
pool.map(strip_bad_stations, ms_target.values())
# Limit the length of the baselines we're using.
# We'll image a reference table using only the short baselines.
maxbl = input_parset.getFloat("limit.max_baseline")
def limit_bl(target_info):
|
with time_code("Limiting maximum baseline length"):
pool.map(limit_bl, ms_target.values())
# We source a special build for using the "new" awimager
awim_init = input_parset.getString("awimager.initscript")
# Calculate the threshold for cleaning based on the noise in a dirty map
# We don't use our threadpool here, since awimager is parallelized
noise_parset_name = get_parset_subset(input_parset, "noise.parset", scratch)
with time_code("Calculating threshold for cleaning"):
for target_info in ms_target.values():
print "Getting threshold for %s" % target_info["output_ms"]
target_info["threshold"] = input_parset.getFloat("noise.multiplier") * estimate_noise(
target_info["bl_limit_ms"],
noise_parset_name,
maxbl,
input_parset.getFloat("noise.box_size"),
scratch
)
print "Threshold for %s is %f Jy" % (target_info["output_ms"], target_info["threshold"])
# Make a mask for cleaning
aw_parset_name = get_parset_subset(input_parset, "image.parset", scratch)
with time_code("Making mask"):
for target_info in ms_target.values():
print "Making mask for %s" % target_info["output_ms"]
target_info["mask"] = make_mask(
target_info["bl_limit_ms"],
aw_parset_name,
target_info["skymodel"],
input_parset.getString("make_mask.executable"),
scratch,
awim_init=awim_init
)
with time_code("Making images"):
for target_info in ms_target.values():
print "Making image %s" % target_info["output_im"]
print run_awimager(aw_parset_name,
{
"ms": target_info["bl_limit_ms"],
"mask": target_info["mask"],
"threshold": "%fJy" % (target_info["threshold"],),
"image": target_info["output_im"],
"wmax": maxbl
},
initscript=awim_init
)
print "Updaging metadata in %s" % target_info["output_im"]
run_process(
"addImagingInfo",
"%s.restored.corr" % target_info["output_im"],
"", # No sky model specified
"0",
str(maxbl),
target_info["output_ms"]
)
print "Saving mask for %s to %s" % (target_info["output_im"], target_info["output_im"] + ".mask")
shutil.copytree(target_info["mask"], target_info["output_im"] + ".mask")
| target_info["bl_limit_ms"] = mkdtemp(dir=scratch)
limit_baselines(target_info["output_ms"], target_info["bl_limit_ms"], maxbl) | identifier_body |
imaging-multibeam.py | #!/usr/bin/env python
import os
import sys
import numpy
import math
import glob
import shutil
import lofar.parameterset
from multiprocessing import cpu_count
from multiprocessing.pool import ThreadPool
from tempfile import mkdtemp
from pyrap.tables import table
from utility import run_process
from utility import time_code
from utility import get_parset_subset
from utility import make_directory
from utility import copy_to_work_area
from utility import run_awimager
from utility import run_ndppp
from utility import run_calibrate_standalone
from utility import clear_calibrate_stand_alone_logs
from utility import find_bad_stations
from utility import strip_stations
from utility import limit_baselines
from utility import estimate_noise
from utility import make_mask
from utility import read_ms_list
# All temporary writes go to scratch space on the node.
scratch = os.getenv("TMPDIR")
if __name__ == "__main__":
# Our single command line argument is a parset containing all
# configuration information we'll need.
input_parset = lofar.parameterset.parameterset(sys.argv[1])
# We require `sbs_per_beam` input MeasurementSets for each beam, including
# the calibrator.
sbs_per_beam = sum(input_parset.getIntVector("band_size"))
print "Locating calibrator data and checking paths"
ms_cal = {}
ms_cal["datafiles"] = read_ms_list(input_parset.getString("cal_ms_list"))
assert(len(ms_cal["datafiles"]) == sbs_per_beam)
ms_cal["output_dir"] = os.path.join(
input_parset.getString("output_dir"),
"calibrator",
input_parset.getString("cal_obsid")
)
make_directory(ms_cal["output_dir"])
print "Copying calibrator subbands to output"
ms_cal["datafiles"] = copy_to_work_area(ms_cal["datafiles"], ms_cal["output_dir"])
print "Locating target data and checking paths"
# ms_target will be a dict that provides all the information we need to
# process each independent element of the observation, where an "element"
# is a combination of a beam (SAP) and a band (number of subbands)
ms_target = {}
target_mss = read_ms_list(input_parset.getString("target_ms_list"))
assert(len(target_mss) == input_parset.getInt("n_beams") * sbs_per_beam)
for beam, data in enumerate(zip(*[iter(target_mss)]*sbs_per_beam)):
start_sb = 0
for band, band_size in enumerate(input_parset.getIntVector("band_size")):
target_info = {}
target_info['datafiles'] = target_mss[start_sb:start_sb+band_size]
target_info['calfiles' ] = ms_cal["datafiles"][start_sb:start_sb+band_size]
assert(len(target_info['datafiles']) == len(target_info['calfiles']))
target_info['output_dir'] = os.path.join(
input_parset.getString("output_dir"),
"target",
input_parset.getString("target_obsid"),
"SAP00%d" % (beam,)
)
make_directory(target_info["output_dir"])
target_info["output_ms"] = os.path.join(target_info["output_dir"], "%s_SAP00%d_band%d.MS" % (input_parset.getString("target_obsid"), beam, band))
assert(not os.path.exists(target_info["output_ms"]))
target_info["output_im"] = os.path.join(target_info["output_dir"], "%s_SAP00%d_band%d.img" % (input_parset.getString("target_obsid"), beam, band))
assert(not os.path.exists(target_info["output_im"]))
pointing = map(math.degrees, table("%s::FIELD" % target_info["datafiles"][0]).getcol("REFERENCE_DIR")[0][0])
target_info["skymodel"] = os.path.join(
input_parset.getString("skymodel_dir"),
"%.2f_%.2f.skymodel" % (pointing[0], pointing[1])
)
assert(os.path.exists(target_info["skymodel"]))
ms_target["SAP00%d_band%d" % (beam, band)] = target_info
start_sb += band_size
# Copy to working directories
for name in ms_target.iterkeys():
print "Copying %s to scratch area" % (name,)
ms_target[name]["datafiles"] = copy_to_work_area(
ms_target[name]["datafiles"], scratch
)
# We'll run as many simultaneous jobs as we have CPUs
pool = ThreadPool(cpu_count())
# Calibration of each calibrator subband
os.chdir(ms_cal['output_dir']) # Logs will get dumped here
clear_calibrate_stand_alone_logs()
calcal_parset = get_parset_subset(input_parset, "calcal.parset", scratch)
def calibrate_calibrator(cal):
source = table("%s::OBSERVATION" % (cal,)).getcol("LOFAR_TARGET")['array'][0].lower().replace(' ', '')
skymodel = os.path.join(
input_parset.getString("skymodel_dir"),
"%s.skymodel" % (source,)
)
print "Calibrating %s with skymodel %s" % (cal, skymodel)
run_calibrate_standalone(calcal_parset, cal, skymodel, replace_parmdb=True, replace_sourcedb=True)
with time_code("Calibration of calibrator"):
pool.map(calibrate_calibrator, ms_cal["datafiles"])
# Clip calibrator parmdbs
def clip_parmdb(sb):
run_process(
input_parset.getString("pdbclip.executable"),
"--auto",
"--sigma=%f" % (input_parset.getFloat("pdbclip.sigma"),),
os.path.join(sb, "instrument")
)
with time_code("Clip calibrator instrument databases"):
pool.map(lambda sb: clip_parmdb(sb), ms_cal["datafiles"])
# Transfer calibration solutions to targets
transfer_parset = get_parset_subset(input_parset, "transfer.parset", scratch)
transfer_skymodel = input_parset.getString("transfer.skymodel")
clear_calibrate_stand_alone_logs()
def transfer_calibration(ms_pair):
cal, target = ms_pair
print "Transferring solution from %s to %s" % (cal, target)
parmdb_name = mkdtemp(dir=scratch)
run_process("parmexportcal", "in=%s/instrument/" % (cal,), "out=%s" % (parmdb_name,))
run_process("calibrate-stand-alone", "--parmdb", parmdb_name, target, transfer_parset, transfer_skymodel)
with time_code("Transfer of calibration solutions"):
for target in ms_target.itervalues():
pool.map(transfer_calibration, zip(target["calfiles"], target["datafiles"]))
# Combine with NDPPP
def | (target_info):
output = os.path.join(mkdtemp(dir=scratch), "combined.MS")
run_ndppp(
get_parset_subset(input_parset, "combine.parset", scratch),
{
"msin": str(target_info["datafiles"]),
"msout": output
}
)
target_info["combined_ms"] = output
with time_code("Combining target subbands"):
pool.map(combine_ms, ms_target.values())
# Phase only calibration of combined target subbands
print "Running phase only calibration"
def phaseonly(target_info):
# We chdir to the scratch directory initially, so that logs get dumped
# there, then we'll copy the logs to the output directory when we're
# done.
try:
os.chdir(os.path.dirname(target_info["combined_ms"]))
run_calibrate_standalone(
get_parset_subset(input_parset, "phaseonly.parset", scratch),
target_info["combined_ms"],
target_info["skymodel"]
)
for logfile in glob.glob(
os.path.join(
os.path.dirname(target_info["combined_ms"]),
"*log"
)
):
shutil.copy(logfile, target_info["output_dir"])
except Exception, e:
print "Error in phaseonly with %s" % (target_info["combined_ms"])
print str(e)
raise
# Most Lisa nodes have 24 GB RAM -- we don't want to run out
calpool = ThreadPool(6)
with time_code("Phase-only calibration"):
calpool.map(phaseonly, ms_target.values())
# Strip bad stations.
# Note that the combined, calibrated, stripped MS is one of our output
# data products, so we save that with the name specified in the parset.
def strip_bad_stations(target_info):
bad_stations = find_bad_stations(target_info["combined_ms"], scratch)
strip_stations(target_info["combined_ms"], target_info["output_ms"], bad_stations)
with time_code("Strip bad stations"):
pool.map(strip_bad_stations, ms_target.values())
# Limit the length of the baselines we're using.
# We'll image a reference table using only the short baselines.
maxbl = input_parset.getFloat("limit.max_baseline")
def limit_bl(target_info):
target_info["bl_limit_ms"] = mkdtemp(dir=scratch)
limit_baselines(target_info["output_ms"], target_info["bl_limit_ms"], maxbl)
with time_code("Limiting maximum baseline length"):
pool.map(limit_bl, ms_target.values())
# We source a special build for using the "new" awimager
awim_init = input_parset.getString("awimager.initscript")
# Calculate the threshold for cleaning based on the noise in a dirty map
# We don't use our threadpool here, since awimager is parallelized
noise_parset_name = get_parset_subset(input_parset, "noise.parset", scratch)
with time_code("Calculating threshold for cleaning"):
for target_info in ms_target.values():
print "Getting threshold for %s" % target_info["output_ms"]
target_info["threshold"] = input_parset.getFloat("noise.multiplier") * estimate_noise(
target_info["bl_limit_ms"],
noise_parset_name,
maxbl,
input_parset.getFloat("noise.box_size"),
scratch
)
print "Threshold for %s is %f Jy" % (target_info["output_ms"], target_info["threshold"])
# Make a mask for cleaning
aw_parset_name = get_parset_subset(input_parset, "image.parset", scratch)
with time_code("Making mask"):
for target_info in ms_target.values():
print "Making mask for %s" % target_info["output_ms"]
target_info["mask"] = make_mask(
target_info["bl_limit_ms"],
aw_parset_name,
target_info["skymodel"],
input_parset.getString("make_mask.executable"),
scratch,
awim_init=awim_init
)
with time_code("Making images"):
for target_info in ms_target.values():
print "Making image %s" % target_info["output_im"]
print run_awimager(aw_parset_name,
{
"ms": target_info["bl_limit_ms"],
"mask": target_info["mask"],
"threshold": "%fJy" % (target_info["threshold"],),
"image": target_info["output_im"],
"wmax": maxbl
},
initscript=awim_init
)
print "Updaging metadata in %s" % target_info["output_im"]
run_process(
"addImagingInfo",
"%s.restored.corr" % target_info["output_im"],
"", # No sky model specified
"0",
str(maxbl),
target_info["output_ms"]
)
print "Saving mask for %s to %s" % (target_info["output_im"], target_info["output_im"] + ".mask")
shutil.copytree(target_info["mask"], target_info["output_im"] + ".mask")
| combine_ms | identifier_name |
imaging-multibeam.py | #!/usr/bin/env python
import os
import sys
import numpy
import math
import glob
import shutil
import lofar.parameterset
from multiprocessing import cpu_count
from multiprocessing.pool import ThreadPool
from tempfile import mkdtemp
from pyrap.tables import table
from utility import run_process
from utility import time_code
from utility import get_parset_subset
from utility import make_directory
from utility import copy_to_work_area
from utility import run_awimager
from utility import run_ndppp
from utility import run_calibrate_standalone
from utility import clear_calibrate_stand_alone_logs
from utility import find_bad_stations
from utility import strip_stations
from utility import limit_baselines
from utility import estimate_noise
from utility import make_mask
from utility import read_ms_list
# All temporary writes go to scratch space on the node.
scratch = os.getenv("TMPDIR")
if __name__ == "__main__":
# Our single command line argument is a parset containing all
# configuration information we'll need.
input_parset = lofar.parameterset.parameterset(sys.argv[1])
# We require `sbs_per_beam` input MeasurementSets for each beam, including
# the calibrator.
sbs_per_beam = sum(input_parset.getIntVector("band_size"))
print "Locating calibrator data and checking paths"
ms_cal = {}
ms_cal["datafiles"] = read_ms_list(input_parset.getString("cal_ms_list"))
assert(len(ms_cal["datafiles"]) == sbs_per_beam)
ms_cal["output_dir"] = os.path.join(
input_parset.getString("output_dir"),
"calibrator",
input_parset.getString("cal_obsid")
)
make_directory(ms_cal["output_dir"])
print "Copying calibrator subbands to output"
ms_cal["datafiles"] = copy_to_work_area(ms_cal["datafiles"], ms_cal["output_dir"])
print "Locating target data and checking paths"
# ms_target will be a dict that provides all the information we need to
# process each independent element of the observation, where an "element"
# is a combination of a beam (SAP) and a band (number of subbands)
ms_target = {}
target_mss = read_ms_list(input_parset.getString("target_ms_list"))
assert(len(target_mss) == input_parset.getInt("n_beams") * sbs_per_beam)
for beam, data in enumerate(zip(*[iter(target_mss)]*sbs_per_beam)):
start_sb = 0
for band, band_size in enumerate(input_parset.getIntVector("band_size")):
target_info = {}
target_info['datafiles'] = target_mss[start_sb:start_sb+band_size]
target_info['calfiles' ] = ms_cal["datafiles"][start_sb:start_sb+band_size]
assert(len(target_info['datafiles']) == len(target_info['calfiles']))
target_info['output_dir'] = os.path.join(
input_parset.getString("output_dir"),
"target",
input_parset.getString("target_obsid"),
"SAP00%d" % (beam,)
)
make_directory(target_info["output_dir"])
target_info["output_ms"] = os.path.join(target_info["output_dir"], "%s_SAP00%d_band%d.MS" % (input_parset.getString("target_obsid"), beam, band))
assert(not os.path.exists(target_info["output_ms"]))
target_info["output_im"] = os.path.join(target_info["output_dir"], "%s_SAP00%d_band%d.img" % (input_parset.getString("target_obsid"), beam, band))
assert(not os.path.exists(target_info["output_im"]))
pointing = map(math.degrees, table("%s::FIELD" % target_info["datafiles"][0]).getcol("REFERENCE_DIR")[0][0])
target_info["skymodel"] = os.path.join(
input_parset.getString("skymodel_dir"),
"%.2f_%.2f.skymodel" % (pointing[0], pointing[1])
)
assert(os.path.exists(target_info["skymodel"]))
ms_target["SAP00%d_band%d" % (beam, band)] = target_info
start_sb += band_size
# Copy to working directories
for name in ms_target.iterkeys():
print "Copying %s to scratch area" % (name,)
ms_target[name]["datafiles"] = copy_to_work_area(
ms_target[name]["datafiles"], scratch
)
# We'll run as many simultaneous jobs as we have CPUs
pool = ThreadPool(cpu_count())
# Calibration of each calibrator subband
os.chdir(ms_cal['output_dir']) # Logs will get dumped here
clear_calibrate_stand_alone_logs()
calcal_parset = get_parset_subset(input_parset, "calcal.parset", scratch)
def calibrate_calibrator(cal):
source = table("%s::OBSERVATION" % (cal,)).getcol("LOFAR_TARGET")['array'][0].lower().replace(' ', '')
skymodel = os.path.join(
input_parset.getString("skymodel_dir"),
"%s.skymodel" % (source,)
)
print "Calibrating %s with skymodel %s" % (cal, skymodel)
run_calibrate_standalone(calcal_parset, cal, skymodel, replace_parmdb=True, replace_sourcedb=True)
with time_code("Calibration of calibrator"):
pool.map(calibrate_calibrator, ms_cal["datafiles"])
# Clip calibrator parmdbs
def clip_parmdb(sb):
run_process(
input_parset.getString("pdbclip.executable"),
"--auto",
"--sigma=%f" % (input_parset.getFloat("pdbclip.sigma"),),
os.path.join(sb, "instrument")
)
with time_code("Clip calibrator instrument databases"):
pool.map(lambda sb: clip_parmdb(sb), ms_cal["datafiles"])
# Transfer calibration solutions to targets
transfer_parset = get_parset_subset(input_parset, "transfer.parset", scratch)
transfer_skymodel = input_parset.getString("transfer.skymodel")
clear_calibrate_stand_alone_logs()
def transfer_calibration(ms_pair):
cal, target = ms_pair
print "Transferring solution from %s to %s" % (cal, target)
parmdb_name = mkdtemp(dir=scratch)
run_process("parmexportcal", "in=%s/instrument/" % (cal,), "out=%s" % (parmdb_name,))
run_process("calibrate-stand-alone", "--parmdb", parmdb_name, target, transfer_parset, transfer_skymodel)
with time_code("Transfer of calibration solutions"):
for target in ms_target.itervalues():
pool.map(transfer_calibration, zip(target["calfiles"], target["datafiles"]))
# Combine with NDPPP
def combine_ms(target_info):
output = os.path.join(mkdtemp(dir=scratch), "combined.MS")
run_ndppp(
get_parset_subset(input_parset, "combine.parset", scratch),
{
"msin": str(target_info["datafiles"]),
"msout": output
}
)
target_info["combined_ms"] = output
with time_code("Combining target subbands"):
pool.map(combine_ms, ms_target.values())
# Phase only calibration of combined target subbands
print "Running phase only calibration"
def phaseonly(target_info):
# We chdir to the scratch directory initially, so that logs get dumped
# there, then we'll copy the logs to the output directory when we're
# done.
try:
os.chdir(os.path.dirname(target_info["combined_ms"]))
run_calibrate_standalone(
get_parset_subset(input_parset, "phaseonly.parset", scratch),
target_info["combined_ms"],
target_info["skymodel"]
)
for logfile in glob.glob(
os.path.join(
os.path.dirname(target_info["combined_ms"]),
"*log"
)
):
|
except Exception, e:
print "Error in phaseonly with %s" % (target_info["combined_ms"])
print str(e)
raise
# Most Lisa nodes have 24 GB RAM -- we don't want to run out
calpool = ThreadPool(6)
with time_code("Phase-only calibration"):
calpool.map(phaseonly, ms_target.values())
# Strip bad stations.
# Note that the combined, calibrated, stripped MS is one of our output
# data products, so we save that with the name specified in the parset.
def strip_bad_stations(target_info):
bad_stations = find_bad_stations(target_info["combined_ms"], scratch)
strip_stations(target_info["combined_ms"], target_info["output_ms"], bad_stations)
with time_code("Strip bad stations"):
pool.map(strip_bad_stations, ms_target.values())
# Limit the length of the baselines we're using.
# We'll image a reference table using only the short baselines.
maxbl = input_parset.getFloat("limit.max_baseline")
def limit_bl(target_info):
target_info["bl_limit_ms"] = mkdtemp(dir=scratch)
limit_baselines(target_info["output_ms"], target_info["bl_limit_ms"], maxbl)
with time_code("Limiting maximum baseline length"):
pool.map(limit_bl, ms_target.values())
# We source a special build for using the "new" awimager
awim_init = input_parset.getString("awimager.initscript")
# Calculate the threshold for cleaning based on the noise in a dirty map
# We don't use our threadpool here, since awimager is parallelized
noise_parset_name = get_parset_subset(input_parset, "noise.parset", scratch)
with time_code("Calculating threshold for cleaning"):
for target_info in ms_target.values():
print "Getting threshold for %s" % target_info["output_ms"]
target_info["threshold"] = input_parset.getFloat("noise.multiplier") * estimate_noise(
target_info["bl_limit_ms"],
noise_parset_name,
maxbl,
input_parset.getFloat("noise.box_size"),
scratch
)
print "Threshold for %s is %f Jy" % (target_info["output_ms"], target_info["threshold"])
# Make a mask for cleaning
aw_parset_name = get_parset_subset(input_parset, "image.parset", scratch)
with time_code("Making mask"):
for target_info in ms_target.values():
print "Making mask for %s" % target_info["output_ms"]
target_info["mask"] = make_mask(
target_info["bl_limit_ms"],
aw_parset_name,
target_info["skymodel"],
input_parset.getString("make_mask.executable"),
scratch,
awim_init=awim_init
)
with time_code("Making images"):
for target_info in ms_target.values():
print "Making image %s" % target_info["output_im"]
print run_awimager(aw_parset_name,
{
"ms": target_info["bl_limit_ms"],
"mask": target_info["mask"],
"threshold": "%fJy" % (target_info["threshold"],),
"image": target_info["output_im"],
"wmax": maxbl
},
initscript=awim_init
)
print "Updaging metadata in %s" % target_info["output_im"]
run_process(
"addImagingInfo",
"%s.restored.corr" % target_info["output_im"],
"", # No sky model specified
"0",
str(maxbl),
target_info["output_ms"]
)
print "Saving mask for %s to %s" % (target_info["output_im"], target_info["output_im"] + ".mask")
shutil.copytree(target_info["mask"], target_info["output_im"] + ".mask")
| shutil.copy(logfile, target_info["output_dir"]) | conditional_block |
bressan_computerscience.py | # -*- coding: utf-8 -*-
"""BRESSAN_ComputerScience
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1ueH8-8StvLf_jngtgChgeeIzwrVHLfZW
# "Foundations of Computer Science" course (F9101Q001)
## Final Project
**Matteo Bressan - 765957**
---
The current project refers to [this provided guideline](http://gianluca.dellavedova.org/foundationsCS/2019-project)
####0. **Common part - Libraries, configurations and files import**
"""
import pandas as pd
import numpy as np
from datetime import datetime
from calendar import isleap
from google.colab import drive
drive.mount('/content/drive')
loans_lenders_url = '/content/drive/My Drive/additional-kiva-snapshot/loans_lenders.csv'
loans_lenders_import = pd.read_csv(loans_lenders_url)
loans_lenders_import.dtypes
loans_lenders_import.head(2)
loans_url = '/content/drive/My Drive/additional-kiva-snapshot/loans.csv'
loans_import = pd.read_csv(loans_url)
loans_import.dtypes
loans_import.head(2)
lenders_url = '/content/drive/My Drive/additional-kiva-snapshot/lenders.csv'
lenders_import = pd.read_csv(lenders_url)
lenders_import.dtypes
lenders_import.head(5)
country_stats_url = '/content/drive/My Drive/additional-kiva-snapshot/country_stats.csv'
country_stats_import = pd.read_csv(country_stats_url)
country_stats_import.dtypes
country_stats_import.head(5)
"""## Questions
####1. **Normalize the loan_lenders table. In the normalized table, each row must have one loan_id and one lender.**
First of all, I cast the _lenders_ variable as an array
"""
loans_lenders_import['lenders'] = loans_lenders_import.lenders.apply(lambda x: x.split(','))
loans_lenders_import.head(2)
"""Then, I can explode _lenders_ variable.
Please note: ".drop_duplicates()" is used to avoid duplicated lenders for load_in, if present in the original _lenders_ array
"""
loans_lenders = loans_lenders_import.explode('lenders').drop_duplicates()
loans_lenders.head(5)
"""####2. **For each loan, add a column duration corresponding to the number of days between the disburse time and the planned expiration time. If any of those two dates is missing, also the duration must be missing.**
I calculate _duration_ on the _loans_ dataframe, converting needed columns to datetime.
Please note: with _errors="coerce"_ option the system will set to NaN all values that cannot be converted.
"""
loans_import['planned_expiration_time']= pd.to_datetime(loans_import['planned_expiration_time'], format="%Y-%m-%d %H:%M:%S", errors="coerce")
loans_import['disburse_time']= pd.to_datetime(loans_import['disburse_time'], format="%Y-%m-%d %H:%M:%S", errors="coerce")
loans_import['duration'] = loans_import['planned_expiration_time'] - loans_import['disburse_time']
loans_import.head(5)
"""####3. **Find the lenders that have funded at least twice.**"""
lender_foundings = loans_lenders.groupby('lenders').size().reset_index(name='foundings')
lender_foundings[lender_foundings['foundings'] > 2]
"""####4. **For each country, compute how many loans have involved that country as borrowers.**"""
country_loans = loans_import.groupby('country_code').size().reset_index(name='loans')
country_loans.head(10)
"""####5. **For each country, compute the overall amount of money borrowed.**"""
country_loans_amount = loans_import.groupby('country_code')['loan_amount'].agg('sum').reset_index(name='overall_founds')
country_loans_amount.head(5)
"""####6. **Like the previous point, but expressed as a percentage of the overall amount lent.**"""
country_loans_amount['overall_founds_perc'] = country_loans_amount.overall_founds / country_loans_amount.overall_founds.sum()
country_loans_amount.head(5)
"""####7. **Like the three previous points, but split for each year (with respect to disburse time).**"""
loans_import['disburse_year'] = pd.DatetimeIndex(loans_import['disburse_time']).year
country_year_loans = loans_import.groupby(['country_code','disburse_year']).size().reset_index(name='loans')
country_year_loans_amount = loans_import.groupby(['country_code','disburse_year'])['loan_amount'].agg('sum').reset_index(name='overall_founds')
country_year_loans_amount['overall_founds_perc'] = country_year_loans_amount.overall_founds / country_year_loans_amount.overall_founds.sum()
country_year_loans.head(5)
country_year_loans_amount.head(5)
"""####8. **For each lender, compute the overall amount of money lent. For each loan that has more than one lender, you must assume that all lenders contributed the same amount.**
First of all, I need to assing to each lender/loan, the corresponding loan's details. So, I need to join the 2 dataset. To avoid run out of RAM, I reduce the number of variables selected on _loans_import_
"""
lender_loan_details = pd.merge(
loans_lenders,
loans_import[['loan_id','loan_amount']],
left_on= ['loan_id'],
right_on= ['loan_id'],
how = 'inner')
lender_loan_details.head(5)
"""Then, it's possible to group the dataset to obtain the overall amount of money lent"""
lender_loan_details.groupby('lenders')['loan_amount'].agg('sum').reset_index(name='overall_money_lent')
"""####9. **For each country, compute the difference between the overall amount of money lent and the overall amount of money borrowed. Since the country of the lender is often unknown, you can assume that the true distribution among the countries is the same as the one computed from the rows where the country is known.**
First of all, I join the _lenders_ and the _loans_lenders_ dataset by lender name, removing lenders without a country code associated
"""
lenders_import_filtered = lenders_import[lenders_import.country_code.notnull()]
lender_loan_country = pd.merge(
loans_lenders,
lenders_import_filtered[['permanent_name','country_code']],
left_on= ['lenders'],
right_on= ['permanent_name'],
how = 'inner')
lender_loan_country['lender_country'] = lender_loan_country['country_code']
lender_loan_country = lender_loan_country[['loan_id', 'lender_country']]
lender_loan_country.head(5)
"""Then, I join obtained dataset with the _loans_ dataset by loan ID"""
lender_loan_country_full = pd.merge(
lender_loan_country.drop_duplicates(),
loans_import[['loan_id','loan_amount','country_code']],
left_on= ['loan_id'],
right_on= ['loan_id'],
how = 'inner')
lender_loan_country_full['borrowed_country'] = lender_loan_country_full['country_code']
lender_loan_country_group = lender_loan_country_full.groupby(['lender_country','borrowed_country'])['loan_amount'].agg('sum').reset_index(name='overall_founds')
lender_loan_country_group.head(5)
"""Finally, I can group the obtained dataset by the 2 country columns to obtain requested information"""
lender_loan_country_group_borrowers = lender_loan_country_group.groupby(['borrowed_country'])['overall_founds'].agg('sum').reset_index(name='amount_borrowed')
lender_loan_country_group_lenders = lender_loan_country_group.groupby(['lender_country'])['overall_founds'].agg('sum').reset_index(name='amount_lent')
lender_loan_country_group_join = pd.merge(
lender_loan_country_group_borrowers,
lender_loan_country_group_lenders,
left_on= ['borrowed_country'],
right_on= ['lender_country'],
how = 'inner')
lender_loan_country_group_join['country'] = lender_loan_country_group_join['borrowed_country']
lender_loan_country_group_join = lender_loan_country_group_join[['country','amount_borrowed','amount_lent']]
lender_loan_country_group_join['lent_borrowed_ratio'] = lender_loan_country_group_join['amount_borrowed']/lender_loan_country_group_join['amount_lent']
lender_loan_country_group_join['lent_borrowed_delta'] = lender_loan_country_group_join['amount_borrowed'] - lender_loan_country_group_join['amount_lent']
lender_loan_country_group_join.head(5)
"""####10. **Which country has the highest ratio between the difference computed at the previous point and the population?**
To evaluate this ratio, I've to join the previously created dataset with the _country_stats_ one
"""
lender_loan_country_group_stats = pd.merge(
lender_loan_country_group_join,
country_stats_import,
left_on= ['country'],
right_on= ['country_code'],
how = 'inner')
"""Then, I can compute the requested KPI"""
lender_loan_country_group_stats1 = lender_loan_country_group_stats
lender_loan_country_group_stats1['population_ratio'] = lender_loan_country_group_stats1['lent_borrowed_delta']/lender_loan_country_group_stats1['population']
lender_loan_country_group_stats1 = lender_loan_country_group_stats1[['country','lent_borrowed_delta','population_ratio']]
lender_loan_country_group_stats1.head(5)
"""####11. **Which country has the highest ratio between the difference computed at point 9 and the population that is not below the poverty line?**
To evaluate it, we have to multiply the overall population number and the _population_below_poverty_line_ ratio information
"""
lender_loan_country_group_stats2 = lender_loan_country_group_stats
lender_loan_country_group_stats2['population_weighed'] = lender_loan_country_group_stats2['population_below_poverty_line'] * lender_loan_country_group_stats2['population']
lender_loan_country_group_stats2['population_weighed_ratio'] = lender_loan_country_group_stats2['lent_borrowed_delta']/lender_loan_country_group_stats2['population_weighed']
lender_loan_country_group_stats2 = lender_loan_country_group_stats2[['country','lent_borrowed_delta','population_ratio', 'population_weighed_ratio']]
lender_loan_country_group_stats2.head(5)
"""####12. **For each year, compute the total amount of loans. Each loan that has planned expiration time and disburse time in different years must have its amount distributed proportionally to the number of days in each year. For example, a loan with disburse time December 1st, 2016, planned expiration time January 30th 2018, and amount 5000USD has an amount of 5000USD * 31 / (31+365+30) = 363.85 for 2016, 5000USD * 365 / (31+365+30) = 4284.04 for 2017, and 5000USD * 30 / (31+365+30) = 352.11 for 2018.**
Let's start defining a function that, given needed information (start date, end date and value) split it by years.
"""
def | (row):
start_date = row['disburse_time'].tz_localize(None)
end_date = row['planned_expiration_time'].tz_localize(None)
value = row['loan_amount']
# calculating the difference in years considewring leap years
jumps = end_date.year - start_date.year
if jumps != 0:
dayss = []
starting_year = start_date.year
for i in range(jumps):
next_year = starting_year + 1
next_year_comp = datetime(next_year, 1, 1)
# get the difference in days
diff = (next_year_comp - start_date).days
dayss.append(diff)
# re-assigning start and end dates
starting_year = next_year_comp.year
start_date = next_year_comp
# adding the days between the end date and the first day of the last year
dayss.append(((end_date - start_date).days) + 1)
# calculating the portion of value each period gets
if sum(dayss) > 0:
return [(x*value)/sum(dayss) for x in dayss]
else:
return value
else:
return value
"""Now, we can apply the funciton to the dataset, removing rows where one of the 2 dates are missing.
I also apply a check on overall duration, to remove issues (duration <= 0 days)
"""
time_loans = loans_import[loans_import.disburse_time.notnull() & loans_import.planned_expiration_time.notnull()]
time_loans = time_loans[time_loans.duration > pd.Timedelta(0,'D')]
time_loans['YearSplit'] = time_loans.apply(divide_value_by_period, axis=1)
time_loans.head(5) | divide_value_by_period | identifier_name |
bressan_computerscience.py | # -*- coding: utf-8 -*-
"""BRESSAN_ComputerScience
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1ueH8-8StvLf_jngtgChgeeIzwrVHLfZW
# "Foundations of Computer Science" course (F9101Q001)
## Final Project
**Matteo Bressan - 765957**
---
The current project refers to [this provided guideline](http://gianluca.dellavedova.org/foundationsCS/2019-project)
####0. **Common part - Libraries, configurations and files import**
"""
import pandas as pd
import numpy as np
from datetime import datetime
from calendar import isleap
from google.colab import drive
drive.mount('/content/drive')
loans_lenders_url = '/content/drive/My Drive/additional-kiva-snapshot/loans_lenders.csv'
loans_lenders_import = pd.read_csv(loans_lenders_url)
loans_lenders_import.dtypes
loans_lenders_import.head(2)
loans_url = '/content/drive/My Drive/additional-kiva-snapshot/loans.csv'
loans_import = pd.read_csv(loans_url)
loans_import.dtypes
loans_import.head(2)
lenders_url = '/content/drive/My Drive/additional-kiva-snapshot/lenders.csv'
lenders_import = pd.read_csv(lenders_url)
lenders_import.dtypes
lenders_import.head(5)
country_stats_url = '/content/drive/My Drive/additional-kiva-snapshot/country_stats.csv'
country_stats_import = pd.read_csv(country_stats_url)
country_stats_import.dtypes
country_stats_import.head(5)
"""## Questions
####1. **Normalize the loan_lenders table. In the normalized table, each row must have one loan_id and one lender.**
First of all, I cast the _lenders_ variable as an array
"""
loans_lenders_import['lenders'] = loans_lenders_import.lenders.apply(lambda x: x.split(','))
loans_lenders_import.head(2)
"""Then, I can explode _lenders_ variable.
Please note: ".drop_duplicates()" is used to avoid duplicated lenders for load_in, if present in the original _lenders_ array
"""
loans_lenders = loans_lenders_import.explode('lenders').drop_duplicates()
loans_lenders.head(5)
"""####2. **For each loan, add a column duration corresponding to the number of days between the disburse time and the planned expiration time. If any of those two dates is missing, also the duration must be missing.**
I calculate _duration_ on the _loans_ dataframe, converting needed columns to datetime.
Please note: with _errors="coerce"_ option the system will set to NaN all values that cannot be converted.
"""
loans_import['planned_expiration_time']= pd.to_datetime(loans_import['planned_expiration_time'], format="%Y-%m-%d %H:%M:%S", errors="coerce")
loans_import['disburse_time']= pd.to_datetime(loans_import['disburse_time'], format="%Y-%m-%d %H:%M:%S", errors="coerce")
loans_import['duration'] = loans_import['planned_expiration_time'] - loans_import['disburse_time']
loans_import.head(5)
"""####3. **Find the lenders that have funded at least twice.**"""
lender_foundings = loans_lenders.groupby('lenders').size().reset_index(name='foundings')
lender_foundings[lender_foundings['foundings'] > 2]
"""####4. **For each country, compute how many loans have involved that country as borrowers.**"""
country_loans = loans_import.groupby('country_code').size().reset_index(name='loans')
country_loans.head(10)
"""####5. **For each country, compute the overall amount of money borrowed.**"""
country_loans_amount = loans_import.groupby('country_code')['loan_amount'].agg('sum').reset_index(name='overall_founds')
country_loans_amount.head(5)
"""####6. **Like the previous point, but expressed as a percentage of the overall amount lent.**"""
country_loans_amount['overall_founds_perc'] = country_loans_amount.overall_founds / country_loans_amount.overall_founds.sum()
country_loans_amount.head(5)
"""####7. **Like the three previous points, but split for each year (with respect to disburse time).**"""
loans_import['disburse_year'] = pd.DatetimeIndex(loans_import['disburse_time']).year
country_year_loans = loans_import.groupby(['country_code','disburse_year']).size().reset_index(name='loans')
country_year_loans_amount = loans_import.groupby(['country_code','disburse_year'])['loan_amount'].agg('sum').reset_index(name='overall_founds')
country_year_loans_amount['overall_founds_perc'] = country_year_loans_amount.overall_founds / country_year_loans_amount.overall_founds.sum()
country_year_loans.head(5)
country_year_loans_amount.head(5)
"""####8. **For each lender, compute the overall amount of money lent. For each loan that has more than one lender, you must assume that all lenders contributed the same amount.**
First of all, I need to assing to each lender/loan, the corresponding loan's details. So, I need to join the 2 dataset. To avoid run out of RAM, I reduce the number of variables selected on _loans_import_
"""
lender_loan_details = pd.merge(
loans_lenders,
loans_import[['loan_id','loan_amount']],
left_on= ['loan_id'],
right_on= ['loan_id'],
how = 'inner')
lender_loan_details.head(5)
"""Then, it's possible to group the dataset to obtain the overall amount of money lent"""
lender_loan_details.groupby('lenders')['loan_amount'].agg('sum').reset_index(name='overall_money_lent')
"""####9. **For each country, compute the difference between the overall amount of money lent and the overall amount of money borrowed. Since the country of the lender is often unknown, you can assume that the true distribution among the countries is the same as the one computed from the rows where the country is known.**
First of all, I join the _lenders_ and the _loans_lenders_ dataset by lender name, removing lenders without a country code associated
"""
lenders_import_filtered = lenders_import[lenders_import.country_code.notnull()]
lender_loan_country = pd.merge(
loans_lenders,
lenders_import_filtered[['permanent_name','country_code']],
left_on= ['lenders'],
right_on= ['permanent_name'],
how = 'inner')
lender_loan_country['lender_country'] = lender_loan_country['country_code']
lender_loan_country = lender_loan_country[['loan_id', 'lender_country']]
lender_loan_country.head(5)
"""Then, I join obtained dataset with the _loans_ dataset by loan ID"""
lender_loan_country_full = pd.merge(
lender_loan_country.drop_duplicates(),
loans_import[['loan_id','loan_amount','country_code']],
left_on= ['loan_id'],
right_on= ['loan_id'],
how = 'inner')
lender_loan_country_full['borrowed_country'] = lender_loan_country_full['country_code']
lender_loan_country_group = lender_loan_country_full.groupby(['lender_country','borrowed_country'])['loan_amount'].agg('sum').reset_index(name='overall_founds')
lender_loan_country_group.head(5)
"""Finally, I can group the obtained dataset by the 2 country columns to obtain requested information"""
lender_loan_country_group_borrowers = lender_loan_country_group.groupby(['borrowed_country'])['overall_founds'].agg('sum').reset_index(name='amount_borrowed')
lender_loan_country_group_lenders = lender_loan_country_group.groupby(['lender_country'])['overall_founds'].agg('sum').reset_index(name='amount_lent')
lender_loan_country_group_join = pd.merge(
lender_loan_country_group_borrowers,
lender_loan_country_group_lenders,
left_on= ['borrowed_country'],
right_on= ['lender_country'],
how = 'inner')
lender_loan_country_group_join['country'] = lender_loan_country_group_join['borrowed_country']
lender_loan_country_group_join = lender_loan_country_group_join[['country','amount_borrowed','amount_lent']]
lender_loan_country_group_join['lent_borrowed_ratio'] = lender_loan_country_group_join['amount_borrowed']/lender_loan_country_group_join['amount_lent']
lender_loan_country_group_join['lent_borrowed_delta'] = lender_loan_country_group_join['amount_borrowed'] - lender_loan_country_group_join['amount_lent']
lender_loan_country_group_join.head(5)
"""####10. **Which country has the highest ratio between the difference computed at the previous point and the population?**
To evaluate this ratio, I've to join the previously created dataset with the _country_stats_ one
"""
lender_loan_country_group_stats = pd.merge(
lender_loan_country_group_join,
country_stats_import,
left_on= ['country'],
right_on= ['country_code'],
how = 'inner')
"""Then, I can compute the requested KPI"""
lender_loan_country_group_stats1 = lender_loan_country_group_stats
lender_loan_country_group_stats1['population_ratio'] = lender_loan_country_group_stats1['lent_borrowed_delta']/lender_loan_country_group_stats1['population']
lender_loan_country_group_stats1 = lender_loan_country_group_stats1[['country','lent_borrowed_delta','population_ratio']]
lender_loan_country_group_stats1.head(5)
"""####11. **Which country has the highest ratio between the difference computed at point 9 and the population that is not below the poverty line?**
To evaluate it, we have to multiply the overall population number and the _population_below_poverty_line_ ratio information
"""
lender_loan_country_group_stats2 = lender_loan_country_group_stats
lender_loan_country_group_stats2['population_weighed'] = lender_loan_country_group_stats2['population_below_poverty_line'] * lender_loan_country_group_stats2['population']
lender_loan_country_group_stats2['population_weighed_ratio'] = lender_loan_country_group_stats2['lent_borrowed_delta']/lender_loan_country_group_stats2['population_weighed']
lender_loan_country_group_stats2 = lender_loan_country_group_stats2[['country','lent_borrowed_delta','population_ratio', 'population_weighed_ratio']]
lender_loan_country_group_stats2.head(5)
"""####12. **For each year, compute the total amount of loans. Each loan that has planned expiration time and disburse time in different years must have its amount distributed proportionally to the number of days in each year. For example, a loan with disburse time December 1st, 2016, planned expiration time January 30th 2018, and amount 5000USD has an amount of 5000USD * 31 / (31+365+30) = 363.85 for 2016, 5000USD * 365 / (31+365+30) = 4284.04 for 2017, and 5000USD * 30 / (31+365+30) = 352.11 for 2018.**
Let's start defining a function that, given needed information (start date, end date and value) split it by years.
"""
def divide_value_by_period(row):
|
"""Now, we can apply the funciton to the dataset, removing rows where one of the 2 dates are missing.
I also apply a check on overall duration, to remove issues (duration <= 0 days)
"""
time_loans = loans_import[loans_import.disburse_time.notnull() & loans_import.planned_expiration_time.notnull()]
time_loans = time_loans[time_loans.duration > pd.Timedelta(0,'D')]
time_loans['YearSplit'] = time_loans.apply(divide_value_by_period, axis=1)
time_loans.head(5) | start_date = row['disburse_time'].tz_localize(None)
end_date = row['planned_expiration_time'].tz_localize(None)
value = row['loan_amount']
# calculating the difference in years considewring leap years
jumps = end_date.year - start_date.year
if jumps != 0:
dayss = []
starting_year = start_date.year
for i in range(jumps):
next_year = starting_year + 1
next_year_comp = datetime(next_year, 1, 1)
# get the difference in days
diff = (next_year_comp - start_date).days
dayss.append(diff)
# re-assigning start and end dates
starting_year = next_year_comp.year
start_date = next_year_comp
# adding the days between the end date and the first day of the last year
dayss.append(((end_date - start_date).days) + 1)
# calculating the portion of value each period gets
if sum(dayss) > 0:
return [(x*value)/sum(dayss) for x in dayss]
else:
return value
else:
return value | identifier_body |
bressan_computerscience.py | # -*- coding: utf-8 -*-
"""BRESSAN_ComputerScience
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1ueH8-8StvLf_jngtgChgeeIzwrVHLfZW
# "Foundations of Computer Science" course (F9101Q001)
## Final Project
**Matteo Bressan - 765957**
---
The current project refers to [this provided guideline](http://gianluca.dellavedova.org/foundationsCS/2019-project)
####0. **Common part - Libraries, configurations and files import**
"""
import pandas as pd
import numpy as np
from datetime import datetime
from calendar import isleap
from google.colab import drive
drive.mount('/content/drive')
loans_lenders_url = '/content/drive/My Drive/additional-kiva-snapshot/loans_lenders.csv'
loans_lenders_import = pd.read_csv(loans_lenders_url)
loans_lenders_import.dtypes
loans_lenders_import.head(2)
loans_url = '/content/drive/My Drive/additional-kiva-snapshot/loans.csv'
loans_import = pd.read_csv(loans_url)
loans_import.dtypes
loans_import.head(2)
lenders_url = '/content/drive/My Drive/additional-kiva-snapshot/lenders.csv'
lenders_import = pd.read_csv(lenders_url)
lenders_import.dtypes
lenders_import.head(5)
country_stats_url = '/content/drive/My Drive/additional-kiva-snapshot/country_stats.csv'
country_stats_import = pd.read_csv(country_stats_url)
country_stats_import.dtypes
country_stats_import.head(5)
"""## Questions
####1. **Normalize the loan_lenders table. In the normalized table, each row must have one loan_id and one lender.**
First of all, I cast the _lenders_ variable as an array
"""
loans_lenders_import['lenders'] = loans_lenders_import.lenders.apply(lambda x: x.split(','))
loans_lenders_import.head(2)
"""Then, I can explode _lenders_ variable.
Please note: ".drop_duplicates()" is used to avoid duplicated lenders for load_in, if present in the original _lenders_ array
"""
loans_lenders = loans_lenders_import.explode('lenders').drop_duplicates()
loans_lenders.head(5)
"""####2. **For each loan, add a column duration corresponding to the number of days between the disburse time and the planned expiration time. If any of those two dates is missing, also the duration must be missing.**
I calculate _duration_ on the _loans_ dataframe, converting needed columns to datetime.
Please note: with _errors="coerce"_ option the system will set to NaN all values that cannot be converted.
"""
loans_import['planned_expiration_time']= pd.to_datetime(loans_import['planned_expiration_time'], format="%Y-%m-%d %H:%M:%S", errors="coerce")
loans_import['disburse_time']= pd.to_datetime(loans_import['disburse_time'], format="%Y-%m-%d %H:%M:%S", errors="coerce")
loans_import['duration'] = loans_import['planned_expiration_time'] - loans_import['disburse_time']
loans_import.head(5)
"""####3. **Find the lenders that have funded at least twice.**"""
lender_foundings = loans_lenders.groupby('lenders').size().reset_index(name='foundings')
lender_foundings[lender_foundings['foundings'] > 2]
"""####4. **For each country, compute how many loans have involved that country as borrowers.**"""
country_loans = loans_import.groupby('country_code').size().reset_index(name='loans')
country_loans.head(10)
"""####5. **For each country, compute the overall amount of money borrowed.**"""
country_loans_amount = loans_import.groupby('country_code')['loan_amount'].agg('sum').reset_index(name='overall_founds')
country_loans_amount.head(5)
"""####6. **Like the previous point, but expressed as a percentage of the overall amount lent.**"""
country_loans_amount['overall_founds_perc'] = country_loans_amount.overall_founds / country_loans_amount.overall_founds.sum()
country_loans_amount.head(5)
"""####7. **Like the three previous points, but split for each year (with respect to disburse time).**"""
loans_import['disburse_year'] = pd.DatetimeIndex(loans_import['disburse_time']).year
country_year_loans = loans_import.groupby(['country_code','disburse_year']).size().reset_index(name='loans')
country_year_loans_amount = loans_import.groupby(['country_code','disburse_year'])['loan_amount'].agg('sum').reset_index(name='overall_founds')
country_year_loans_amount['overall_founds_perc'] = country_year_loans_amount.overall_founds / country_year_loans_amount.overall_founds.sum()
country_year_loans.head(5)
country_year_loans_amount.head(5)
"""####8. **For each lender, compute the overall amount of money lent. For each loan that has more than one lender, you must assume that all lenders contributed the same amount.**
First of all, I need to assing to each lender/loan, the corresponding loan's details. So, I need to join the 2 dataset. To avoid run out of RAM, I reduce the number of variables selected on _loans_import_
"""
lender_loan_details = pd.merge(
loans_lenders,
loans_import[['loan_id','loan_amount']],
left_on= ['loan_id'],
right_on= ['loan_id'],
how = 'inner')
lender_loan_details.head(5)
"""Then, it's possible to group the dataset to obtain the overall amount of money lent"""
lender_loan_details.groupby('lenders')['loan_amount'].agg('sum').reset_index(name='overall_money_lent')
"""####9. **For each country, compute the difference between the overall amount of money lent and the overall amount of money borrowed. Since the country of the lender is often unknown, you can assume that the true distribution among the countries is the same as the one computed from the rows where the country is known.**
First of all, I join the _lenders_ and the _loans_lenders_ dataset by lender name, removing lenders without a country code associated
"""
lenders_import_filtered = lenders_import[lenders_import.country_code.notnull()]
lender_loan_country = pd.merge(
loans_lenders,
lenders_import_filtered[['permanent_name','country_code']],
left_on= ['lenders'],
right_on= ['permanent_name'],
how = 'inner')
lender_loan_country['lender_country'] = lender_loan_country['country_code'] |
"""Then, I join obtained dataset with the _loans_ dataset by loan ID"""
lender_loan_country_full = pd.merge(
lender_loan_country.drop_duplicates(),
loans_import[['loan_id','loan_amount','country_code']],
left_on= ['loan_id'],
right_on= ['loan_id'],
how = 'inner')
lender_loan_country_full['borrowed_country'] = lender_loan_country_full['country_code']
lender_loan_country_group = lender_loan_country_full.groupby(['lender_country','borrowed_country'])['loan_amount'].agg('sum').reset_index(name='overall_founds')
lender_loan_country_group.head(5)
"""Finally, I can group the obtained dataset by the 2 country columns to obtain requested information"""
lender_loan_country_group_borrowers = lender_loan_country_group.groupby(['borrowed_country'])['overall_founds'].agg('sum').reset_index(name='amount_borrowed')
lender_loan_country_group_lenders = lender_loan_country_group.groupby(['lender_country'])['overall_founds'].agg('sum').reset_index(name='amount_lent')
lender_loan_country_group_join = pd.merge(
lender_loan_country_group_borrowers,
lender_loan_country_group_lenders,
left_on= ['borrowed_country'],
right_on= ['lender_country'],
how = 'inner')
lender_loan_country_group_join['country'] = lender_loan_country_group_join['borrowed_country']
lender_loan_country_group_join = lender_loan_country_group_join[['country','amount_borrowed','amount_lent']]
lender_loan_country_group_join['lent_borrowed_ratio'] = lender_loan_country_group_join['amount_borrowed']/lender_loan_country_group_join['amount_lent']
lender_loan_country_group_join['lent_borrowed_delta'] = lender_loan_country_group_join['amount_borrowed'] - lender_loan_country_group_join['amount_lent']
lender_loan_country_group_join.head(5)
"""####10. **Which country has the highest ratio between the difference computed at the previous point and the population?**
To evaluate this ratio, I've to join the previously created dataset with the _country_stats_ one
"""
lender_loan_country_group_stats = pd.merge(
lender_loan_country_group_join,
country_stats_import,
left_on= ['country'],
right_on= ['country_code'],
how = 'inner')
"""Then, I can compute the requested KPI"""
lender_loan_country_group_stats1 = lender_loan_country_group_stats
lender_loan_country_group_stats1['population_ratio'] = lender_loan_country_group_stats1['lent_borrowed_delta']/lender_loan_country_group_stats1['population']
lender_loan_country_group_stats1 = lender_loan_country_group_stats1[['country','lent_borrowed_delta','population_ratio']]
lender_loan_country_group_stats1.head(5)
"""####11. **Which country has the highest ratio between the difference computed at point 9 and the population that is not below the poverty line?**
To evaluate it, we have to multiply the overall population number and the _population_below_poverty_line_ ratio information
"""
lender_loan_country_group_stats2 = lender_loan_country_group_stats
lender_loan_country_group_stats2['population_weighed'] = lender_loan_country_group_stats2['population_below_poverty_line'] * lender_loan_country_group_stats2['population']
lender_loan_country_group_stats2['population_weighed_ratio'] = lender_loan_country_group_stats2['lent_borrowed_delta']/lender_loan_country_group_stats2['population_weighed']
lender_loan_country_group_stats2 = lender_loan_country_group_stats2[['country','lent_borrowed_delta','population_ratio', 'population_weighed_ratio']]
lender_loan_country_group_stats2.head(5)
"""####12. **For each year, compute the total amount of loans. Each loan that has planned expiration time and disburse time in different years must have its amount distributed proportionally to the number of days in each year. For example, a loan with disburse time December 1st, 2016, planned expiration time January 30th 2018, and amount 5000USD has an amount of 5000USD * 31 / (31+365+30) = 363.85 for 2016, 5000USD * 365 / (31+365+30) = 4284.04 for 2017, and 5000USD * 30 / (31+365+30) = 352.11 for 2018.**
Let's start defining a function that, given needed information (start date, end date and value) split it by years.
"""
def divide_value_by_period(row):
start_date = row['disburse_time'].tz_localize(None)
end_date = row['planned_expiration_time'].tz_localize(None)
value = row['loan_amount']
# calculating the difference in years considewring leap years
jumps = end_date.year - start_date.year
if jumps != 0:
dayss = []
starting_year = start_date.year
for i in range(jumps):
next_year = starting_year + 1
next_year_comp = datetime(next_year, 1, 1)
# get the difference in days
diff = (next_year_comp - start_date).days
dayss.append(diff)
# re-assigning start and end dates
starting_year = next_year_comp.year
start_date = next_year_comp
# adding the days between the end date and the first day of the last year
dayss.append(((end_date - start_date).days) + 1)
# calculating the portion of value each period gets
if sum(dayss) > 0:
return [(x*value)/sum(dayss) for x in dayss]
else:
return value
else:
return value
"""Now, we can apply the funciton to the dataset, removing rows where one of the 2 dates are missing.
I also apply a check on overall duration, to remove issues (duration <= 0 days)
"""
time_loans = loans_import[loans_import.disburse_time.notnull() & loans_import.planned_expiration_time.notnull()]
time_loans = time_loans[time_loans.duration > pd.Timedelta(0,'D')]
time_loans['YearSplit'] = time_loans.apply(divide_value_by_period, axis=1)
time_loans.head(5) |
lender_loan_country = lender_loan_country[['loan_id', 'lender_country']]
lender_loan_country.head(5) | random_line_split |
bressan_computerscience.py | # -*- coding: utf-8 -*-
"""BRESSAN_ComputerScience
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1ueH8-8StvLf_jngtgChgeeIzwrVHLfZW
# "Foundations of Computer Science" course (F9101Q001)
## Final Project
**Matteo Bressan - 765957**
---
The current project refers to [this provided guideline](http://gianluca.dellavedova.org/foundationsCS/2019-project)
####0. **Common part - Libraries, configurations and files import**
"""
import pandas as pd
import numpy as np
from datetime import datetime
from calendar import isleap
from google.colab import drive
drive.mount('/content/drive')
loans_lenders_url = '/content/drive/My Drive/additional-kiva-snapshot/loans_lenders.csv'
loans_lenders_import = pd.read_csv(loans_lenders_url)
loans_lenders_import.dtypes
loans_lenders_import.head(2)
loans_url = '/content/drive/My Drive/additional-kiva-snapshot/loans.csv'
loans_import = pd.read_csv(loans_url)
loans_import.dtypes
loans_import.head(2)
lenders_url = '/content/drive/My Drive/additional-kiva-snapshot/lenders.csv'
lenders_import = pd.read_csv(lenders_url)
lenders_import.dtypes
lenders_import.head(5)
country_stats_url = '/content/drive/My Drive/additional-kiva-snapshot/country_stats.csv'
country_stats_import = pd.read_csv(country_stats_url)
country_stats_import.dtypes
country_stats_import.head(5)
"""## Questions
####1. **Normalize the loan_lenders table. In the normalized table, each row must have one loan_id and one lender.**
First of all, I cast the _lenders_ variable as an array
"""
loans_lenders_import['lenders'] = loans_lenders_import.lenders.apply(lambda x: x.split(','))
loans_lenders_import.head(2)
"""Then, I can explode _lenders_ variable.
Please note: ".drop_duplicates()" is used to avoid duplicated lenders for load_in, if present in the original _lenders_ array
"""
loans_lenders = loans_lenders_import.explode('lenders').drop_duplicates()
loans_lenders.head(5)
"""####2. **For each loan, add a column duration corresponding to the number of days between the disburse time and the planned expiration time. If any of those two dates is missing, also the duration must be missing.**
I calculate _duration_ on the _loans_ dataframe, converting needed columns to datetime.
Please note: with _errors="coerce"_ option the system will set to NaN all values that cannot be converted.
"""
loans_import['planned_expiration_time']= pd.to_datetime(loans_import['planned_expiration_time'], format="%Y-%m-%d %H:%M:%S", errors="coerce")
loans_import['disburse_time']= pd.to_datetime(loans_import['disburse_time'], format="%Y-%m-%d %H:%M:%S", errors="coerce")
loans_import['duration'] = loans_import['planned_expiration_time'] - loans_import['disburse_time']
loans_import.head(5)
"""####3. **Find the lenders that have funded at least twice.**"""
lender_foundings = loans_lenders.groupby('lenders').size().reset_index(name='foundings')
lender_foundings[lender_foundings['foundings'] > 2]
"""####4. **For each country, compute how many loans have involved that country as borrowers.**"""
country_loans = loans_import.groupby('country_code').size().reset_index(name='loans')
country_loans.head(10)
"""####5. **For each country, compute the overall amount of money borrowed.**"""
country_loans_amount = loans_import.groupby('country_code')['loan_amount'].agg('sum').reset_index(name='overall_founds')
country_loans_amount.head(5)
"""####6. **Like the previous point, but expressed as a percentage of the overall amount lent.**"""
country_loans_amount['overall_founds_perc'] = country_loans_amount.overall_founds / country_loans_amount.overall_founds.sum()
country_loans_amount.head(5)
"""####7. **Like the three previous points, but split for each year (with respect to disburse time).**"""
loans_import['disburse_year'] = pd.DatetimeIndex(loans_import['disburse_time']).year
country_year_loans = loans_import.groupby(['country_code','disburse_year']).size().reset_index(name='loans')
country_year_loans_amount = loans_import.groupby(['country_code','disburse_year'])['loan_amount'].agg('sum').reset_index(name='overall_founds')
country_year_loans_amount['overall_founds_perc'] = country_year_loans_amount.overall_founds / country_year_loans_amount.overall_founds.sum()
country_year_loans.head(5)
country_year_loans_amount.head(5)
"""####8. **For each lender, compute the overall amount of money lent. For each loan that has more than one lender, you must assume that all lenders contributed the same amount.**
First of all, I need to assing to each lender/loan, the corresponding loan's details. So, I need to join the 2 dataset. To avoid run out of RAM, I reduce the number of variables selected on _loans_import_
"""
lender_loan_details = pd.merge(
loans_lenders,
loans_import[['loan_id','loan_amount']],
left_on= ['loan_id'],
right_on= ['loan_id'],
how = 'inner')
lender_loan_details.head(5)
"""Then, it's possible to group the dataset to obtain the overall amount of money lent"""
lender_loan_details.groupby('lenders')['loan_amount'].agg('sum').reset_index(name='overall_money_lent')
"""####9. **For each country, compute the difference between the overall amount of money lent and the overall amount of money borrowed. Since the country of the lender is often unknown, you can assume that the true distribution among the countries is the same as the one computed from the rows where the country is known.**
First of all, I join the _lenders_ and the _loans_lenders_ dataset by lender name, removing lenders without a country code associated
"""
lenders_import_filtered = lenders_import[lenders_import.country_code.notnull()]
lender_loan_country = pd.merge(
loans_lenders,
lenders_import_filtered[['permanent_name','country_code']],
left_on= ['lenders'],
right_on= ['permanent_name'],
how = 'inner')
lender_loan_country['lender_country'] = lender_loan_country['country_code']
lender_loan_country = lender_loan_country[['loan_id', 'lender_country']]
lender_loan_country.head(5)
"""Then, I join obtained dataset with the _loans_ dataset by loan ID"""
lender_loan_country_full = pd.merge(
lender_loan_country.drop_duplicates(),
loans_import[['loan_id','loan_amount','country_code']],
left_on= ['loan_id'],
right_on= ['loan_id'],
how = 'inner')
lender_loan_country_full['borrowed_country'] = lender_loan_country_full['country_code']
lender_loan_country_group = lender_loan_country_full.groupby(['lender_country','borrowed_country'])['loan_amount'].agg('sum').reset_index(name='overall_founds')
lender_loan_country_group.head(5)
"""Finally, I can group the obtained dataset by the 2 country columns to obtain requested information"""
lender_loan_country_group_borrowers = lender_loan_country_group.groupby(['borrowed_country'])['overall_founds'].agg('sum').reset_index(name='amount_borrowed')
lender_loan_country_group_lenders = lender_loan_country_group.groupby(['lender_country'])['overall_founds'].agg('sum').reset_index(name='amount_lent')
lender_loan_country_group_join = pd.merge(
lender_loan_country_group_borrowers,
lender_loan_country_group_lenders,
left_on= ['borrowed_country'],
right_on= ['lender_country'],
how = 'inner')
lender_loan_country_group_join['country'] = lender_loan_country_group_join['borrowed_country']
lender_loan_country_group_join = lender_loan_country_group_join[['country','amount_borrowed','amount_lent']]
lender_loan_country_group_join['lent_borrowed_ratio'] = lender_loan_country_group_join['amount_borrowed']/lender_loan_country_group_join['amount_lent']
lender_loan_country_group_join['lent_borrowed_delta'] = lender_loan_country_group_join['amount_borrowed'] - lender_loan_country_group_join['amount_lent']
lender_loan_country_group_join.head(5)
"""####10. **Which country has the highest ratio between the difference computed at the previous point and the population?**
To evaluate this ratio, I've to join the previously created dataset with the _country_stats_ one
"""
lender_loan_country_group_stats = pd.merge(
lender_loan_country_group_join,
country_stats_import,
left_on= ['country'],
right_on= ['country_code'],
how = 'inner')
"""Then, I can compute the requested KPI"""
lender_loan_country_group_stats1 = lender_loan_country_group_stats
lender_loan_country_group_stats1['population_ratio'] = lender_loan_country_group_stats1['lent_borrowed_delta']/lender_loan_country_group_stats1['population']
lender_loan_country_group_stats1 = lender_loan_country_group_stats1[['country','lent_borrowed_delta','population_ratio']]
lender_loan_country_group_stats1.head(5)
"""####11. **Which country has the highest ratio between the difference computed at point 9 and the population that is not below the poverty line?**
To evaluate it, we have to multiply the overall population number and the _population_below_poverty_line_ ratio information
"""
lender_loan_country_group_stats2 = lender_loan_country_group_stats
lender_loan_country_group_stats2['population_weighed'] = lender_loan_country_group_stats2['population_below_poverty_line'] * lender_loan_country_group_stats2['population']
lender_loan_country_group_stats2['population_weighed_ratio'] = lender_loan_country_group_stats2['lent_borrowed_delta']/lender_loan_country_group_stats2['population_weighed']
lender_loan_country_group_stats2 = lender_loan_country_group_stats2[['country','lent_borrowed_delta','population_ratio', 'population_weighed_ratio']]
lender_loan_country_group_stats2.head(5)
"""####12. **For each year, compute the total amount of loans. Each loan that has planned expiration time and disburse time in different years must have its amount distributed proportionally to the number of days in each year. For example, a loan with disburse time December 1st, 2016, planned expiration time January 30th 2018, and amount 5000USD has an amount of 5000USD * 31 / (31+365+30) = 363.85 for 2016, 5000USD * 365 / (31+365+30) = 4284.04 for 2017, and 5000USD * 30 / (31+365+30) = 352.11 for 2018.**
Let's start defining a function that, given needed information (start date, end date and value) split it by years.
"""
def divide_value_by_period(row):
start_date = row['disburse_time'].tz_localize(None)
end_date = row['planned_expiration_time'].tz_localize(None)
value = row['loan_amount']
# calculating the difference in years considewring leap years
jumps = end_date.year - start_date.year
if jumps != 0:
|
else:
return value
"""Now, we can apply the funciton to the dataset, removing rows where one of the 2 dates are missing.
I also apply a check on overall duration, to remove issues (duration <= 0 days)
"""
time_loans = loans_import[loans_import.disburse_time.notnull() & loans_import.planned_expiration_time.notnull()]
time_loans = time_loans[time_loans.duration > pd.Timedelta(0,'D')]
time_loans['YearSplit'] = time_loans.apply(divide_value_by_period, axis=1)
time_loans.head(5) | dayss = []
starting_year = start_date.year
for i in range(jumps):
next_year = starting_year + 1
next_year_comp = datetime(next_year, 1, 1)
# get the difference in days
diff = (next_year_comp - start_date).days
dayss.append(diff)
# re-assigning start and end dates
starting_year = next_year_comp.year
start_date = next_year_comp
# adding the days between the end date and the first day of the last year
dayss.append(((end_date - start_date).days) + 1)
# calculating the portion of value each period gets
if sum(dayss) > 0:
return [(x*value)/sum(dayss) for x in dayss]
else:
return value | conditional_block |
titanic-alpha-attempt.py | #!/usr/bin/env python
# coding: utf-8
# Predicting Surviving the Sinking of the Titanic
# -----------------------------------------------
#
#
# This represents my first attempt at training up some classifiers for the titanic dataset.
# In[ ]:
# data analysis and wrangling
import pandas as pd
import numpy as np
import random as rnd
# visualization
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().magic(u'matplotlib inline')
sns.set_style("whitegrid")
# machine learning
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.neural_network import MLPClassifier
# In[ ]:
# get titanic & test csv files as a DataFrame
train_df = pd.read_csv("../input/train.csv")
test_df = pd.read_csv("../input/test.csv")
combine = [train_df, test_df]
# # Data exploration #
#
# First get some summary statistics about the datasets.
# In[ ]:
# view column labels
print(train_df.columns.values)
# In[ ]:
# preview the data
train_df.head()
# Now transpose the first few rows in order to see all attributes more easily as row labels.
# In[ ]:
train_df.head(3).T
# In[ ]:
# missing values, data types
train_df.info()
print('-'*40)
test_df.info()
# The above info shows that columns (from training data) with missing/empty values are:
#
# - Age (177 missing values)
# - Cabin (687 missing values)
# - Embarked (2 missing values)
# In[ ]:
# describe numeric columns
train_df.describe()
# In the training dataset there are 891 passengers with an overall survival rate of 38.4%.
# The oldest person is 80 years and the youngest is 5 months (0.42*12). The average fare is 32.20 dollars but the median fare is 14.45. This suggests outliers at the upper end of the fare, and indeed the maximum fare is $512.33.
# In[ ]:
# describe categorical columns
train_df.describe(include=['O'])
# In[ ]:
# just for fun, examine the records of ten year olds (there are only two)
train_df[train_df.Age == 10].stack()
# # Detailed data investigation #
#
# A closer look at each of the attributes (columns) and their relationship to survival.
# ##Sex##
#
# Sex is a *nominal* attribute with two categories (i.e. it is dichotomous). Let's plot some counts and survival rates by sex. Note that survival values are 0/1, thus rates can be be calculated simply via the mean survive value.
# In[ ]:
# count passengers by sex
plt.subplot(211) # 3 digit convenience notation for arguments (last digit represents plot number)
sns.countplot(x='Sex', data=train_df, palette='Greens_d')
# survival rate by sex
# note that barplot plots mean() on y by default
plt.subplot(212)
sns.barplot(x='Sex', y='Survived', data=train_df, palette='Greens_d')
# **Observations:**
#
# - Many more males than females
# - Survival rate of females much greater than males
#
# Let's get the actual numbers below using pandas.
# In[ ]:
# count passengers by sex
train_df.groupby('Sex').size()
# In[ ]:
# survival rates by sex
train_df.groupby(['Sex'])['Survived'].mean().sort_values()
# Thus, 18.9% of males (from the training set) survived compared to 74.2% of females.
# ##Passenger class##
#
# Passenger class (Pclass) is an *ordinal* attribute with three categories, 1, 2 and 3. The three categories have an order (representing socioeconomic status) but although the categories are given numeric labels, this attribute *is not* numeric! To see this, consider that 3rd class = 1st + 2nd class is a nonsense. This will be important later when we construct features. Again, let's plot some counts and survival rates.
# In[ ]:
# size of groups in passenger class
plt.subplots(figsize=(8,6))
plt.subplot(211)
sns.countplot(x='Pclass', data=train_df, palette='Purples_d') # _d = dark palette
# survival rate by sex
plt.subplot(212)
sns.barplot(x='Pclass', y='Survived', data=train_df, palette='Purples_d')
# **Observations:**
#
# - Three classes
# - Most passengers travelled by 3rd class (more than half; see below)
# - Survival rate increases with class
#
# Again, let's get the actual numbers below using pandas.
# In[ ]:
# count passengers by passenger class
train_df.groupby(['Pclass']).size()
# In[ ]:
# survival rates by passenger class
train_df.groupby(['Pclass'])['Survived'].mean().sort_values(ascending=False)
# ##Age##
#
# Age is a *ratio* attribute (it is properly numeric, see [Types of data measurement scales][1]). Ages < 1 indicate age in months.
#
#
# [1]: http://www.mymarketresearchmethods.com/types-of-data-nominal-ordinal-interval-ratio/
# In[ ]:
# count the number of passengers for first 25 ages
train_df.groupby('Age').size().head(25)
# another way to do the above
#train_df['Age'].value_counts().sort_index().head(25)
# In[ ]:
# convert ages to ints
age = train_df[['Age','Survived']].dropna() # returns a copy with blanks removed
age['Age'] = age['Age'].astype(int) # floors floats
# count passengers by age (smoothed via gaussian kernels)
plt.subplots(figsize=(18,6))
plt.subplot(311)
sns.kdeplot(age['Age'], shade=True, cut=0)
# count passengers by age (no smoothing)
plt.subplot(312)
sns.countplot(x='Age', data=age, palette='GnBu_d')
# survival rates by age
plt.subplot(313)
sns.barplot(x='Age', y='Survived', data=age, ci=None, palette='Oranges_d') # takes mean by default
# Observations:
#
# - Under 16s tend to have the highest survival rates
# - Very high survival rates at 53, 63 and 80
# - Survival of over 16s is fairly noisy. Possible that survival might increase with age.
# ## Survival by age group and sex ##
#
# Now let's look at survival by age groups *and* sex to see if any patterns become clearer.
# In[ ]:
# bin age into groups
train_df['AgeGroup'] = pd.cut(train_df['Age'],[0,4,15,25,35,45,65,100])
test_df['AgeGroup'] = pd.cut(test_df['Age'],[0,4,15,25,35,45,65,100])
# survival by age group
train_df.groupby('AgeGroup')['Survived'].mean()
# In[ ]:
# survival by age group and sex
train_df[['Survived','AgeGroup', 'Sex']].groupby(['Sex', 'AgeGroup']).mean()
# In[ ]:
# count passengers by age group and sex
sns.factorplot(x='AgeGroup', col='Sex', data=train_df, kind='count')
# survival by age group and sex
sns.factorplot(x='AgeGroup', y='Survived', col='Sex', data=train_df, kind='bar')
# The relationship between survival and age group looks very different for males and females:
#
# - Males: survival rates increase *inversely* with age for (0, 25] and (25, 100). That is, younger boys fare better than older boys and younger men survive more than older men.
# - Females: no obvious relationship between surviving and age. In particular, girls and baby girls do not fare better than women; in fact, girls (4, 15] have the *lowest* survival rates of females.
#
# A feature space containing (child, man, woman) would do a decent job of representing this relationship to survivability.
#
# Non-linear classifiers (e.g. decision trees, multi-layer nn, nearest neighbour) applied to both sex and age group might do even better because of the noticeable relationship between survivability and age group for males.
# ## Family Size##
#
# We create a new feature, FamilySize, that sums Parch and SibSp. This will enable us to drop Parch and SibSp from the datasets.
# In[ ]:
# calculate family size
train_df['FamilySize'] = train_df['SibSp'] + train_df['Parch'] + 1
test_df['FamilySize'] = test_df['SibSp'] + test_df['Parch'] + 1
# count passengers by age group and sex
plt.subplot(211)
sns.countplot(x='FamilySize', data=train_df)
# survival by age group and sex
plt.subplot(212)
sns.barplot(x='FamilySize', y='Survived', data=train_df)
# Survival increases with family size, until families of size 4. Family sizes of 5 and above have reduced survival.
# Deck
# ----
#
# Cabin might be conceivably be related to survival, but unfortunately most values are missing. Nevertheless, by way of an exercise, we will extract the feature, Deck, from cabin by taking the first character of the label and analyze survival rates by deck.
# In[ ]:
# deck is the first letter of cabin
train_df['Deck'] = train_df['Cabin'].dropna().apply(lambda x: str(x)[0])
train_df[['PassengerId','Name', 'Cabin', 'Deck']].head(2).T
# In[ ]:
# count passengers by the deck their cabin is on
plt.subplots(figsize=(8,6))
plt.subplot(211)
sns.countplot(x='Deck', data=train_df)
# survival rate by deck
plt.subplot(212)
sns.barplot(x='Deck', y='Survived', data=train_df)
# ## Other attributes ##
# For this first attempt, I am ignoring the attributes below as they seem unlikely to be related to survival:
#
# - PassengerId
# - Name (however, extracting titles from names might be informative)
# - Ticket
# - Fare (could be related to socioeconomic status but we already have a class attribute)
# - Embarked
# # Data wrangling - Age group#
#
# Fill missing age group values. We don't want to drop them as this would lose many rows. Instead, we will randomly generate age groups according to the frequency that they occur in the data. We will calculate the frequency separately for males and females.
# In[ ]:
# number of males/females without an age
def get_na(dataset):
na_males = dataset[dataset.Sex == 'male'].loc[:,'AgeGroup'].isnull().sum()
na_females = dataset[dataset.Sex == 'female'].loc[:,'AgeGroup'].isnull().sum()
return {'male': na_males, 'female': na_females}
# number of males and females by age group
def get_counts(dataset):
return dataset.groupby(['Sex', 'AgeGroup']).size()
# randomly generate a list of age groups based on age group frequency (for each sex separately)
def generate_age_groups(num, freq):
age_groups = {}
for sex in ['male','female']:
relfreq = freq[sex] / freq[sex].sum()
age_groups[sex] = np.random.choice(freq[sex].index, size=num[sex], replace=True, p=relfreq)
return age_groups
# insert the new age group values
def | (dataset, age_groups):
for sex in ['male','female']:
tmp = pd.DataFrame(dataset[(dataset.Sex == sex) & dataset.Age.isnull()]) # filter on sex and null ages
tmp['AgeGroup'] = age_groups[sex] # index age group values
dataset = dataset.combine_first(tmp) # uses tmp to fill holes
return dataset
# fill holes for train_df
na = get_na(train_df)
counts = get_counts(train_df)
counts['female']
age_groups = generate_age_groups(na, counts)
age_groups['female']
train_df = insert_age_group_values(train_df, age_groups)
train_df.info() # check all nulls have been filled
print('-'*40)
# repeat for test_df
na = get_na(test_df)
counts = get_counts(train_df) # reuse the frequencies taken over the training data as it is larger
age_groups = generate_age_groups(na, counts)
test_df = insert_age_group_values(test_df, age_groups)
test_df.info() # check all nulls have been filled
# # Feature engineering #
#
# Now that we've explored the data let's create some features:
#
# - **Sex:** Convert to a single binary feature, Female. No need to create a feature for Male, that would be redundant.
# - **Pclass:** Convert to two binary features, PClass_1 and PClass_2. Similar to Male above, having a PClass_3 would be redundant.
# - **Age group:** The age attribute binned using separators [0, 4, 15, 25, 35, 45, 65, 100]. Convert to a number of binary features, one for each age group.
# - **Family size:** The sum of SibSp and Parch plus 1.
# In[ ]:
# Sex -> Female
# training set
dummy = pd.get_dummies(train_df['Sex'])
dummy.columns = ['Female','Male']
train_df = train_df.join(dummy['Female'])
# test set
dummy = pd.get_dummies(test_df['Sex'])
dummy.columns = ['Female','Male']
test_df = test_df.join(dummy['Female'])
train_df[['Name', 'Sex', 'Female']].head(2).T
#train_df.columns
# In[ ]:
# Pclass -> PClass_1, PClass_2
# training set
dummy = pd.get_dummies(train_df['Pclass'])
dummy.columns = ['PClass_1','PClass_2','PClass_3']
train_df = train_df.join(dummy[['PClass_1', 'PClass_2']])
# test set
dummy = pd.get_dummies(test_df['Pclass'])
dummy.columns = ['PClass_1','PClass_2','PClass_3']
test_df = test_df.join(dummy[['PClass_1', 'PClass_2']])
train_df[['Name', 'Pclass', 'PClass_1', 'PClass_2']].head(2).T
#train_df.columns
# In[ ]:
# AgeGroup -> binary features
# training set
dummy = pd.get_dummies(train_df['AgeGroup'])
dummy.columns = ['Ages_4','Ages_15','Ages_25','Ages_35','Ages_45','Ages_65','Ages_100']
train_df = train_df.join(dummy)
# test set
dummy = pd.get_dummies(test_df['AgeGroup'])
dummy.columns = ['Ages_4','Ages_15','Ages_25','Ages_35','Ages_45','Ages_65','Ages_100']
test_df = test_df.join(dummy)
# ## Experimental features ##
# Some additional features to explore.
# In[ ]:
# Fare
# there is a single missing "Fare" value
test_df['Fare'].fillna(test_df['Fare'].median(), inplace=True)
# convert from float to int (floor)
#train_df['Fare'] = train_df['Fare'].astype(int)
#test_df['Fare'] = test_df['Fare'].astype(int)
# In[ ]:
# Embarked -> PortC, PortQ
# Fill missing values with the most occurred value
print(train_df.groupby('Embarked').size().sort_values())
train_df['Embarked'] = train_df['Embarked'].fillna('S')
# training set
dummy = pd.get_dummies(train_df['Embarked'])
#dummy.columns
dummy.columns = ['Port_C','Port_Q','Port_S']
#train_df = train_df.join(dummy[['Port_C','Port_Q']])
# test set
dummy = pd.get_dummies(test_df['Embarked'])
dummy.columns = ['Port_C','Port_Q','Port_S']
#test_df = test_df.join(dummy[['Port_C','Port_Q']])
# ## Dropping attributes ##
# Drop unused attributes to avoid detecting spurious relationships.
# In[ ]:
# drop the attributes that will be unused
train_df.drop(['PassengerId', 'Pclass', 'Name', 'Sex', 'Age',
'SibSp', 'Parch', 'Ticket', 'Cabin', 'Fare',
'Embarked', 'Deck', 'AgeGroup'], axis=1, inplace=True)
test_df.drop(['Pclass', 'Name', 'Sex', 'Age',
'SibSp', 'Parch', 'Ticket', 'Cabin', 'Fare',
'Embarked', 'AgeGroup'], axis=1, inplace=True)
train_df.head(10).T
# The sample above shows the features and their values for the first ten training examples.
# # Modeling #
#
# Our task is a binary classification problem: we want to formulate a relationship that predicts an output (Survived or not) from engineered features (Sex, Age group, Family size...). This is type of learning is supervised learning, since a model will be trained on a dataset containing pairs of inputs and outputs.
#
# Suitable methods for performing classification include:
#
# - Logistic Regression*
# - Perceptron*
# - Support Vector Machines (SVMs)*
# - Naive Bayes classifier*
# - KNN or k-Nearest Neighbors
# - Decision Tree
# - Random Forrest
# - Artificial neural network
# - Relevance Vector Machine
#
# The methods marked * either discover linear classification boundaries (logistic regression, perceptron, and SVMs if using linear kernels) or assume no relationship between features (naive bayes) and thus are not expected to perform as well (see the section above on the relationship between survival, age group and sex).
# ## Training data ##
# Let's use cross validation to perform the evaluation. This method will give a reasonable indication of predictive accuracy as evaluation will take place on data that is not seen during training. The package **`sklearn.model_selection`** includes support for cross validation.
# In[ ]:
# split the datasets into matched input and ouput pairs
X_train = train_df.drop("Survived", axis=1) # X = inputs
Y_train = train_df["Survived"] # Y = outputs
X_test = test_df.drop("PassengerId", axis=1).copy()
X_train.shape, Y_train.shape, X_test.shape
# Model fitting
# ----------
# (Some of this section is based on [this titanic tutorial][1].)
#
# Logistic Regression is a useful model to run early in the workflow. Logistic regression measures the relationship between the categorical dependent variable (feature) and one or more independent variables (features) by estimating probabilities using a logistic function, which is the cumulative logistic distribution. See [Logistic regression on Wikipedia][2].
#
# Note the confidence score generated by the model based on our training dataset.
#
#
# [1]: https://www.kaggle.com/startupsci/titanic/titanic-data-science-solutions
# [2]: https://en.wikipedia.org/wiki/Logistic_regression
# In[ ]:
# Logistic Regression
logreg = LogisticRegression()
scores = cross_val_score(logreg, X_train, Y_train, cv=10)
acc_log = round(scores.mean() * 100, 2)
acc_log
#Y_pred = logreg.predict(X_test)
# We can use Logistic Regression to validate our assumptions and decisions for feature creating and completing goals. This can be done by calculating the coefficient of the features in the decision function.
# Positive coefficients increase the log-odds of the response (and thus increase the probability), and negative coefficients decrease the log-odds of the response (and thus decrease the probability).
# In[ ]:
logreg.fit(X_train, Y_train)
coeff_df = pd.DataFrame(train_df.columns.delete(0))
coeff_df.columns = ['Feature']
coeff_df["Correlation"] = pd.Series(logreg.coef_[0])
coeff_df.sort_values(by='Correlation', ascending=False)
# In[ ]:
# Gaussian Naive Bayes
gaussian = GaussianNB()
scores = cross_val_score(gaussian, X_train, Y_train, cv=10)
acc_gaussian = round(scores.mean() * 100, 2)
acc_gaussian
# In[ ]:
# Perceptron (a single layer neural net)
perceptron = Perceptron()
scores = cross_val_score(perceptron, X_train, Y_train, cv=10)
acc_perceptron = round(scores.mean() * 100, 2)
acc_perceptron
# In[ ]:
# Neural Network (a multi layer neural net)
neural_net = MLPClassifier()
scores = cross_val_score(neural_net, X_train, Y_train, cv=10)
acc_neural_net = round(scores.mean() * 100, 2)
acc_neural_net
# In[ ]:
# Stochastic Gradient Descent
sgd = SGDClassifier()
scores = cross_val_score(sgd, X_train, Y_train, cv=10)
acc_sgd = round(scores.mean() * 100, 2)
acc_sgd
# In[ ]:
# Linear SVC
linear_svc = LinearSVC()
scores = cross_val_score(linear_svc, X_train, Y_train, cv=10)
acc_linear_svc = round(scores.mean() * 100, 2)
acc_linear_svc
# In[ ]:
# Support Vector Machine
svc = SVC() # uses a rbf kernel by default (i.e. can discover non-linear boundaries)
scores = cross_val_score(svc, X_train, Y_train, cv=10)
acc_svc = round(scores.mean() * 100, 2)
acc_svc
# In[ ]:
# Decision Tree
decision_tree = DecisionTreeClassifier()
scores = cross_val_score(decision_tree, X_train, Y_train, cv=10)
acc_decision_tree = round(scores.mean() * 100, 2)
acc_decision_tree
# In[ ]:
# Random Forest - an ensemble model
random_forest = RandomForestClassifier(n_estimators=100)
scores = cross_val_score(random_forest, X_train, Y_train, cv=10)
acc_random_forest = round(scores.mean() * 100, 2)
acc_random_forest
# In[ ]:
# AdaBoost - an ensemble method
ada_boost = AdaBoostClassifier(n_estimators=100)
scores = cross_val_score(ada_boost, X_train, Y_train, cv=10)
acc_ada_boost = round(scores.mean() * 100, 2)
acc_ada_boost
# In[ ]:
# k-Nearest Neighbors - a non-parametric method
knn = KNeighborsClassifier(n_neighbors = 5)
scores = cross_val_score(knn, X_train, Y_train, cv=10)
acc_knn = round(scores.mean() * 100, 2)
acc_knn
# Model evaluation
# ----------------
#
# We now rank the models and choose a high performing one for our problem. The Support Vector Machine consistently tops the chart.
#
# Decision Tree and Random Forest also both score high, but we prefer Random Forest as it avoids overfitting to the training set better than a decision tree and is therefore likely to perform better on the test dataset.
# In[ ]:
models = pd.DataFrame({
'Model': ['Support Vector Machine', 'kNN', 'Logistic Regression',
'Random Forest', 'Naive Bayes', 'Perceptron',
'Stochastic Gradient Descent', 'Linear SVC',
'Decision Tree', 'AdaBoost', 'Neural Network'],
'Score': [acc_svc, acc_knn, acc_log,
acc_random_forest, acc_gaussian, acc_perceptron,
acc_sgd, acc_linear_svc, acc_decision_tree,
acc_ada_boost, acc_neural_net]})
models.sort_values(by='Score', ascending=False)
# In[ ]:
# using random forest for submission
random_forest.fit(X_train, Y_train)
Y_pred = random_forest.predict(X_test)
submission = pd.DataFrame({
"PassengerId": test_df["PassengerId"],
"Survived": Y_pred
})
submission.to_csv('titanic_submission_1.csv', index=False)
#pd.set_option('display.max_rows', len(submission))
#submission
# Use cross validation to assess predictive accuracy
# --------------------------------------------------
#
# We can easily improve the above scores by evaluating on the training data (compare the random forest scores above and below). However, scores produced like this are not truly indicative of predictive accuracy and should be avoided. To see why, consider that a classifier that simply memorizes each input and output pair will score perfectly but be unable to generalise to other examples.
#
# In[ ]:
# Random Forest : scoring on training data
random_forest = RandomForestClassifier(n_estimators=100)
random_forest.fit(X_train, Y_train)
acc_random_forest = round(random_forest.score(X_train, Y_train) * 100, 2)
acc_random_forest
# What next?
# -------------------------------
#
# **_More feature exploration:_**
# Including *Fare* significantly increases the best accuracy to about 92% when *fare* is floored and 94% otherwise. Additionally including *Embarked* brings it up to 95%. It may worth be investigating if any relationship between these attributes and survival can be detected, especially for *fare*.
#
# Other possibilities for features include *Deck* and *Title*, which can be extracted from *Cabin* and *Name* respectively.
#
# Could also try two or more overlapping binnings for age groups (e.g. bins as defined by cutting on [0,4,15,25,35,45,65,100] and [10,20,30,40,55,100]). If going down this path, focus on introducing extra bins for age groups that contain many passengers and have a steeper gradient on the survival curve (such as for the twenties, e.g. cut on [10,20,30]).
#
# **_Refitting:_**
# Most of the models above used their default parameters. Choose a few promising models and attempt to optimize their (hyper-)parameters. The sklearn library used above offers a couple of ways to do this automatically (via grid search and cross-validated models, see [Model selection][1] and [Tuning the hyper-parameters of an estimator][2]).
#
#
# [1]: http://scikit-learn.org/stable/tutorial/statistical_inference/model_selection.html
# [2]: http://scikit-learn.org/stable/modules/grid_search.html#grid-search
| insert_age_group_values | identifier_name |
titanic-alpha-attempt.py | #!/usr/bin/env python
# coding: utf-8
# Predicting Surviving the Sinking of the Titanic
# -----------------------------------------------
#
#
# This represents my first attempt at training up some classifiers for the titanic dataset.
# In[ ]:
# data analysis and wrangling
import pandas as pd
import numpy as np
import random as rnd
# visualization
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().magic(u'matplotlib inline')
sns.set_style("whitegrid")
# machine learning
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.neural_network import MLPClassifier
# In[ ]:
# get titanic & test csv files as a DataFrame
train_df = pd.read_csv("../input/train.csv")
test_df = pd.read_csv("../input/test.csv")
combine = [train_df, test_df]
# # Data exploration #
#
# First get some summary statistics about the datasets.
# In[ ]:
# view column labels
print(train_df.columns.values)
# In[ ]:
# preview the data
train_df.head()
# Now transpose the first few rows in order to see all attributes more easily as row labels.
# In[ ]:
train_df.head(3).T
# In[ ]:
# missing values, data types
train_df.info()
print('-'*40)
test_df.info()
# The above info shows that columns (from training data) with missing/empty values are:
#
# - Age (177 missing values)
# - Cabin (687 missing values)
# - Embarked (2 missing values)
# In[ ]:
# describe numeric columns
train_df.describe()
# In the training dataset there are 891 passengers with an overall survival rate of 38.4%.
# The oldest person is 80 years and the youngest is 5 months (0.42*12). The average fare is 32.20 dollars but the median fare is 14.45. This suggests outliers at the upper end of the fare, and indeed the maximum fare is $512.33.
# In[ ]:
# describe categorical columns
train_df.describe(include=['O'])
# In[ ]:
# just for fun, examine the records of ten year olds (there are only two)
train_df[train_df.Age == 10].stack()
# # Detailed data investigation #
#
# A closer look at each of the attributes (columns) and their relationship to survival.
# ##Sex##
#
# Sex is a *nominal* attribute with two categories (i.e. it is dichotomous). Let's plot some counts and survival rates by sex. Note that survival values are 0/1, thus rates can be be calculated simply via the mean survive value.
# In[ ]:
# count passengers by sex
plt.subplot(211) # 3 digit convenience notation for arguments (last digit represents plot number)
sns.countplot(x='Sex', data=train_df, palette='Greens_d')
# survival rate by sex
# note that barplot plots mean() on y by default
plt.subplot(212)
sns.barplot(x='Sex', y='Survived', data=train_df, palette='Greens_d')
# **Observations:**
#
# - Many more males than females
# - Survival rate of females much greater than males
#
# Let's get the actual numbers below using pandas.
# In[ ]:
# count passengers by sex
train_df.groupby('Sex').size()
# In[ ]:
# survival rates by sex
train_df.groupby(['Sex'])['Survived'].mean().sort_values()
# Thus, 18.9% of males (from the training set) survived compared to 74.2% of females.
# ##Passenger class##
#
# Passenger class (Pclass) is an *ordinal* attribute with three categories, 1, 2 and 3. The three categories have an order (representing socioeconomic status) but although the categories are given numeric labels, this attribute *is not* numeric! To see this, consider that 3rd class = 1st + 2nd class is a nonsense. This will be important later when we construct features. Again, let's plot some counts and survival rates.
# In[ ]:
# size of groups in passenger class
plt.subplots(figsize=(8,6))
plt.subplot(211)
sns.countplot(x='Pclass', data=train_df, palette='Purples_d') # _d = dark palette
# survival rate by sex
plt.subplot(212)
sns.barplot(x='Pclass', y='Survived', data=train_df, palette='Purples_d')
# **Observations:**
#
# - Three classes
# - Most passengers travelled by 3rd class (more than half; see below)
# - Survival rate increases with class
#
# Again, let's get the actual numbers below using pandas.
# In[ ]:
# count passengers by passenger class
train_df.groupby(['Pclass']).size()
# In[ ]:
# survival rates by passenger class
train_df.groupby(['Pclass'])['Survived'].mean().sort_values(ascending=False)
# ##Age##
#
# Age is a *ratio* attribute (it is properly numeric, see [Types of data measurement scales][1]). Ages < 1 indicate age in months.
#
#
# [1]: http://www.mymarketresearchmethods.com/types-of-data-nominal-ordinal-interval-ratio/
# In[ ]:
# count the number of passengers for first 25 ages
train_df.groupby('Age').size().head(25)
# another way to do the above
#train_df['Age'].value_counts().sort_index().head(25)
# In[ ]:
# convert ages to ints
age = train_df[['Age','Survived']].dropna() # returns a copy with blanks removed
age['Age'] = age['Age'].astype(int) # floors floats
# count passengers by age (smoothed via gaussian kernels)
plt.subplots(figsize=(18,6))
plt.subplot(311)
sns.kdeplot(age['Age'], shade=True, cut=0)
# count passengers by age (no smoothing)
plt.subplot(312)
sns.countplot(x='Age', data=age, palette='GnBu_d')
# survival rates by age
plt.subplot(313)
sns.barplot(x='Age', y='Survived', data=age, ci=None, palette='Oranges_d') # takes mean by default
# Observations:
#
# - Under 16s tend to have the highest survival rates
# - Very high survival rates at 53, 63 and 80
# - Survival of over 16s is fairly noisy. Possible that survival might increase with age.
# ## Survival by age group and sex ##
#
# Now let's look at survival by age groups *and* sex to see if any patterns become clearer.
# In[ ]:
# bin age into groups
train_df['AgeGroup'] = pd.cut(train_df['Age'],[0,4,15,25,35,45,65,100])
test_df['AgeGroup'] = pd.cut(test_df['Age'],[0,4,15,25,35,45,65,100])
# survival by age group
train_df.groupby('AgeGroup')['Survived'].mean()
# In[ ]:
# survival by age group and sex
train_df[['Survived','AgeGroup', 'Sex']].groupby(['Sex', 'AgeGroup']).mean()
# In[ ]:
# count passengers by age group and sex
sns.factorplot(x='AgeGroup', col='Sex', data=train_df, kind='count')
# survival by age group and sex
sns.factorplot(x='AgeGroup', y='Survived', col='Sex', data=train_df, kind='bar')
# The relationship between survival and age group looks very different for males and females:
#
# - Males: survival rates increase *inversely* with age for (0, 25] and (25, 100). That is, younger boys fare better than older boys and younger men survive more than older men.
# - Females: no obvious relationship between surviving and age. In particular, girls and baby girls do not fare better than women; in fact, girls (4, 15] have the *lowest* survival rates of females.
#
# A feature space containing (child, man, woman) would do a decent job of representing this relationship to survivability.
#
# Non-linear classifiers (e.g. decision trees, multi-layer nn, nearest neighbour) applied to both sex and age group might do even better because of the noticeable relationship between survivability and age group for males.
# ## Family Size##
#
# We create a new feature, FamilySize, that sums Parch and SibSp. This will enable us to drop Parch and SibSp from the datasets.
# In[ ]:
# calculate family size
train_df['FamilySize'] = train_df['SibSp'] + train_df['Parch'] + 1
test_df['FamilySize'] = test_df['SibSp'] + test_df['Parch'] + 1
# count passengers by age group and sex
plt.subplot(211)
sns.countplot(x='FamilySize', data=train_df)
# survival by age group and sex
plt.subplot(212)
sns.barplot(x='FamilySize', y='Survived', data=train_df)
# Survival increases with family size, until families of size 4. Family sizes of 5 and above have reduced survival.
# Deck
# ----
#
# Cabin might be conceivably be related to survival, but unfortunately most values are missing. Nevertheless, by way of an exercise, we will extract the feature, Deck, from cabin by taking the first character of the label and analyze survival rates by deck.
# In[ ]:
# deck is the first letter of cabin
train_df['Deck'] = train_df['Cabin'].dropna().apply(lambda x: str(x)[0])
train_df[['PassengerId','Name', 'Cabin', 'Deck']].head(2).T
# In[ ]:
# count passengers by the deck their cabin is on
plt.subplots(figsize=(8,6))
plt.subplot(211)
sns.countplot(x='Deck', data=train_df)
# survival rate by deck
plt.subplot(212)
sns.barplot(x='Deck', y='Survived', data=train_df)
# ## Other attributes ##
# For this first attempt, I am ignoring the attributes below as they seem unlikely to be related to survival:
#
# - PassengerId
# - Name (however, extracting titles from names might be informative)
# - Ticket
# - Fare (could be related to socioeconomic status but we already have a class attribute)
# - Embarked
# # Data wrangling - Age group#
#
# Fill missing age group values. We don't want to drop them as this would lose many rows. Instead, we will randomly generate age groups according to the frequency that they occur in the data. We will calculate the frequency separately for males and females.
# In[ ]:
# number of males/females without an age
def get_na(dataset):
|
# number of males and females by age group
def get_counts(dataset):
return dataset.groupby(['Sex', 'AgeGroup']).size()
# randomly generate a list of age groups based on age group frequency (for each sex separately)
def generate_age_groups(num, freq):
age_groups = {}
for sex in ['male','female']:
relfreq = freq[sex] / freq[sex].sum()
age_groups[sex] = np.random.choice(freq[sex].index, size=num[sex], replace=True, p=relfreq)
return age_groups
# insert the new age group values
def insert_age_group_values(dataset, age_groups):
for sex in ['male','female']:
tmp = pd.DataFrame(dataset[(dataset.Sex == sex) & dataset.Age.isnull()]) # filter on sex and null ages
tmp['AgeGroup'] = age_groups[sex] # index age group values
dataset = dataset.combine_first(tmp) # uses tmp to fill holes
return dataset
# fill holes for train_df
na = get_na(train_df)
counts = get_counts(train_df)
counts['female']
age_groups = generate_age_groups(na, counts)
age_groups['female']
train_df = insert_age_group_values(train_df, age_groups)
train_df.info() # check all nulls have been filled
print('-'*40)
# repeat for test_df
na = get_na(test_df)
counts = get_counts(train_df) # reuse the frequencies taken over the training data as it is larger
age_groups = generate_age_groups(na, counts)
test_df = insert_age_group_values(test_df, age_groups)
test_df.info() # check all nulls have been filled
# # Feature engineering #
#
# Now that we've explored the data let's create some features:
#
# - **Sex:** Convert to a single binary feature, Female. No need to create a feature for Male, that would be redundant.
# - **Pclass:** Convert to two binary features, PClass_1 and PClass_2. Similar to Male above, having a PClass_3 would be redundant.
# - **Age group:** The age attribute binned using separators [0, 4, 15, 25, 35, 45, 65, 100]. Convert to a number of binary features, one for each age group.
# - **Family size:** The sum of SibSp and Parch plus 1.
# In[ ]:
# Sex -> Female
# training set
dummy = pd.get_dummies(train_df['Sex'])
dummy.columns = ['Female','Male']
train_df = train_df.join(dummy['Female'])
# test set
dummy = pd.get_dummies(test_df['Sex'])
dummy.columns = ['Female','Male']
test_df = test_df.join(dummy['Female'])
train_df[['Name', 'Sex', 'Female']].head(2).T
#train_df.columns
# In[ ]:
# Pclass -> PClass_1, PClass_2
# training set
dummy = pd.get_dummies(train_df['Pclass'])
dummy.columns = ['PClass_1','PClass_2','PClass_3']
train_df = train_df.join(dummy[['PClass_1', 'PClass_2']])
# test set
dummy = pd.get_dummies(test_df['Pclass'])
dummy.columns = ['PClass_1','PClass_2','PClass_3']
test_df = test_df.join(dummy[['PClass_1', 'PClass_2']])
train_df[['Name', 'Pclass', 'PClass_1', 'PClass_2']].head(2).T
#train_df.columns
# In[ ]:
# AgeGroup -> binary features
# training set
dummy = pd.get_dummies(train_df['AgeGroup'])
dummy.columns = ['Ages_4','Ages_15','Ages_25','Ages_35','Ages_45','Ages_65','Ages_100']
train_df = train_df.join(dummy)
# test set
dummy = pd.get_dummies(test_df['AgeGroup'])
dummy.columns = ['Ages_4','Ages_15','Ages_25','Ages_35','Ages_45','Ages_65','Ages_100']
test_df = test_df.join(dummy)
# ## Experimental features ##
# Some additional features to explore.
# In[ ]:
# Fare
# there is a single missing "Fare" value
test_df['Fare'].fillna(test_df['Fare'].median(), inplace=True)
# convert from float to int (floor)
#train_df['Fare'] = train_df['Fare'].astype(int)
#test_df['Fare'] = test_df['Fare'].astype(int)
# In[ ]:
# Embarked -> PortC, PortQ
# Fill missing values with the most occurred value
print(train_df.groupby('Embarked').size().sort_values())
train_df['Embarked'] = train_df['Embarked'].fillna('S')
# training set
dummy = pd.get_dummies(train_df['Embarked'])
#dummy.columns
dummy.columns = ['Port_C','Port_Q','Port_S']
#train_df = train_df.join(dummy[['Port_C','Port_Q']])
# test set
dummy = pd.get_dummies(test_df['Embarked'])
dummy.columns = ['Port_C','Port_Q','Port_S']
#test_df = test_df.join(dummy[['Port_C','Port_Q']])
# ## Dropping attributes ##
# Drop unused attributes to avoid detecting spurious relationships.
# In[ ]:
# drop the attributes that will be unused
train_df.drop(['PassengerId', 'Pclass', 'Name', 'Sex', 'Age',
'SibSp', 'Parch', 'Ticket', 'Cabin', 'Fare',
'Embarked', 'Deck', 'AgeGroup'], axis=1, inplace=True)
test_df.drop(['Pclass', 'Name', 'Sex', 'Age',
'SibSp', 'Parch', 'Ticket', 'Cabin', 'Fare',
'Embarked', 'AgeGroup'], axis=1, inplace=True)
train_df.head(10).T
# The sample above shows the features and their values for the first ten training examples.
# # Modeling #
#
# Our task is a binary classification problem: we want to formulate a relationship that predicts an output (Survived or not) from engineered features (Sex, Age group, Family size...). This is type of learning is supervised learning, since a model will be trained on a dataset containing pairs of inputs and outputs.
#
# Suitable methods for performing classification include:
#
# - Logistic Regression*
# - Perceptron*
# - Support Vector Machines (SVMs)*
# - Naive Bayes classifier*
# - KNN or k-Nearest Neighbors
# - Decision Tree
# - Random Forrest
# - Artificial neural network
# - Relevance Vector Machine
#
# The methods marked * either discover linear classification boundaries (logistic regression, perceptron, and SVMs if using linear kernels) or assume no relationship between features (naive bayes) and thus are not expected to perform as well (see the section above on the relationship between survival, age group and sex).
# ## Training data ##
# Let's use cross validation to perform the evaluation. This method will give a reasonable indication of predictive accuracy as evaluation will take place on data that is not seen during training. The package **`sklearn.model_selection`** includes support for cross validation.
# In[ ]:
# split the datasets into matched input and ouput pairs
X_train = train_df.drop("Survived", axis=1) # X = inputs
Y_train = train_df["Survived"] # Y = outputs
X_test = test_df.drop("PassengerId", axis=1).copy()
X_train.shape, Y_train.shape, X_test.shape
# Model fitting
# ----------
# (Some of this section is based on [this titanic tutorial][1].)
#
# Logistic Regression is a useful model to run early in the workflow. Logistic regression measures the relationship between the categorical dependent variable (feature) and one or more independent variables (features) by estimating probabilities using a logistic function, which is the cumulative logistic distribution. See [Logistic regression on Wikipedia][2].
#
# Note the confidence score generated by the model based on our training dataset.
#
#
# [1]: https://www.kaggle.com/startupsci/titanic/titanic-data-science-solutions
# [2]: https://en.wikipedia.org/wiki/Logistic_regression
# In[ ]:
# Logistic Regression
logreg = LogisticRegression()
scores = cross_val_score(logreg, X_train, Y_train, cv=10)
acc_log = round(scores.mean() * 100, 2)
acc_log
#Y_pred = logreg.predict(X_test)
# We can use Logistic Regression to validate our assumptions and decisions for feature creating and completing goals. This can be done by calculating the coefficient of the features in the decision function.
# Positive coefficients increase the log-odds of the response (and thus increase the probability), and negative coefficients decrease the log-odds of the response (and thus decrease the probability).
# In[ ]:
logreg.fit(X_train, Y_train)
coeff_df = pd.DataFrame(train_df.columns.delete(0))
coeff_df.columns = ['Feature']
coeff_df["Correlation"] = pd.Series(logreg.coef_[0])
coeff_df.sort_values(by='Correlation', ascending=False)
# In[ ]:
# Gaussian Naive Bayes
gaussian = GaussianNB()
scores = cross_val_score(gaussian, X_train, Y_train, cv=10)
acc_gaussian = round(scores.mean() * 100, 2)
acc_gaussian
# In[ ]:
# Perceptron (a single layer neural net)
perceptron = Perceptron()
scores = cross_val_score(perceptron, X_train, Y_train, cv=10)
acc_perceptron = round(scores.mean() * 100, 2)
acc_perceptron
# In[ ]:
# Neural Network (a multi layer neural net)
neural_net = MLPClassifier()
scores = cross_val_score(neural_net, X_train, Y_train, cv=10)
acc_neural_net = round(scores.mean() * 100, 2)
acc_neural_net
# In[ ]:
# Stochastic Gradient Descent
sgd = SGDClassifier()
scores = cross_val_score(sgd, X_train, Y_train, cv=10)
acc_sgd = round(scores.mean() * 100, 2)
acc_sgd
# In[ ]:
# Linear SVC
linear_svc = LinearSVC()
scores = cross_val_score(linear_svc, X_train, Y_train, cv=10)
acc_linear_svc = round(scores.mean() * 100, 2)
acc_linear_svc
# In[ ]:
# Support Vector Machine
svc = SVC() # uses a rbf kernel by default (i.e. can discover non-linear boundaries)
scores = cross_val_score(svc, X_train, Y_train, cv=10)
acc_svc = round(scores.mean() * 100, 2)
acc_svc
# In[ ]:
# Decision Tree
decision_tree = DecisionTreeClassifier()
scores = cross_val_score(decision_tree, X_train, Y_train, cv=10)
acc_decision_tree = round(scores.mean() * 100, 2)
acc_decision_tree
# In[ ]:
# Random Forest - an ensemble model
random_forest = RandomForestClassifier(n_estimators=100)
scores = cross_val_score(random_forest, X_train, Y_train, cv=10)
acc_random_forest = round(scores.mean() * 100, 2)
acc_random_forest
# In[ ]:
# AdaBoost - an ensemble method
ada_boost = AdaBoostClassifier(n_estimators=100)
scores = cross_val_score(ada_boost, X_train, Y_train, cv=10)
acc_ada_boost = round(scores.mean() * 100, 2)
acc_ada_boost
# In[ ]:
# k-Nearest Neighbors - a non-parametric method
knn = KNeighborsClassifier(n_neighbors = 5)
scores = cross_val_score(knn, X_train, Y_train, cv=10)
acc_knn = round(scores.mean() * 100, 2)
acc_knn
# Model evaluation
# ----------------
#
# We now rank the models and choose a high performing one for our problem. The Support Vector Machine consistently tops the chart.
#
# Decision Tree and Random Forest also both score high, but we prefer Random Forest as it avoids overfitting to the training set better than a decision tree and is therefore likely to perform better on the test dataset.
# In[ ]:
models = pd.DataFrame({
'Model': ['Support Vector Machine', 'kNN', 'Logistic Regression',
'Random Forest', 'Naive Bayes', 'Perceptron',
'Stochastic Gradient Descent', 'Linear SVC',
'Decision Tree', 'AdaBoost', 'Neural Network'],
'Score': [acc_svc, acc_knn, acc_log,
acc_random_forest, acc_gaussian, acc_perceptron,
acc_sgd, acc_linear_svc, acc_decision_tree,
acc_ada_boost, acc_neural_net]})
models.sort_values(by='Score', ascending=False)
# In[ ]:
# using random forest for submission
random_forest.fit(X_train, Y_train)
Y_pred = random_forest.predict(X_test)
submission = pd.DataFrame({
"PassengerId": test_df["PassengerId"],
"Survived": Y_pred
})
submission.to_csv('titanic_submission_1.csv', index=False)
#pd.set_option('display.max_rows', len(submission))
#submission
# Use cross validation to assess predictive accuracy
# --------------------------------------------------
#
# We can easily improve the above scores by evaluating on the training data (compare the random forest scores above and below). However, scores produced like this are not truly indicative of predictive accuracy and should be avoided. To see why, consider that a classifier that simply memorizes each input and output pair will score perfectly but be unable to generalise to other examples.
#
# In[ ]:
# Random Forest : scoring on training data
random_forest = RandomForestClassifier(n_estimators=100)
random_forest.fit(X_train, Y_train)
acc_random_forest = round(random_forest.score(X_train, Y_train) * 100, 2)
acc_random_forest
# What next?
# -------------------------------
#
# **_More feature exploration:_**
# Including *Fare* significantly increases the best accuracy to about 92% when *fare* is floored and 94% otherwise. Additionally including *Embarked* brings it up to 95%. It may worth be investigating if any relationship between these attributes and survival can be detected, especially for *fare*.
#
# Other possibilities for features include *Deck* and *Title*, which can be extracted from *Cabin* and *Name* respectively.
#
# Could also try two or more overlapping binnings for age groups (e.g. bins as defined by cutting on [0,4,15,25,35,45,65,100] and [10,20,30,40,55,100]). If going down this path, focus on introducing extra bins for age groups that contain many passengers and have a steeper gradient on the survival curve (such as for the twenties, e.g. cut on [10,20,30]).
#
# **_Refitting:_**
# Most of the models above used their default parameters. Choose a few promising models and attempt to optimize their (hyper-)parameters. The sklearn library used above offers a couple of ways to do this automatically (via grid search and cross-validated models, see [Model selection][1] and [Tuning the hyper-parameters of an estimator][2]).
#
#
# [1]: http://scikit-learn.org/stable/tutorial/statistical_inference/model_selection.html
# [2]: http://scikit-learn.org/stable/modules/grid_search.html#grid-search
| na_males = dataset[dataset.Sex == 'male'].loc[:,'AgeGroup'].isnull().sum()
na_females = dataset[dataset.Sex == 'female'].loc[:,'AgeGroup'].isnull().sum()
return {'male': na_males, 'female': na_females} | identifier_body |
titanic-alpha-attempt.py | #!/usr/bin/env python
# coding: utf-8
# Predicting Surviving the Sinking of the Titanic
# -----------------------------------------------
#
#
# This represents my first attempt at training up some classifiers for the titanic dataset.
# In[ ]:
# data analysis and wrangling
import pandas as pd
import numpy as np
import random as rnd
# visualization
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().magic(u'matplotlib inline')
sns.set_style("whitegrid")
# machine learning
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.neural_network import MLPClassifier
# In[ ]:
# get titanic & test csv files as a DataFrame
train_df = pd.read_csv("../input/train.csv")
test_df = pd.read_csv("../input/test.csv")
combine = [train_df, test_df]
# # Data exploration #
#
# First get some summary statistics about the datasets.
# In[ ]:
# view column labels
print(train_df.columns.values)
# In[ ]:
# preview the data
train_df.head()
# Now transpose the first few rows in order to see all attributes more easily as row labels.
# In[ ]:
train_df.head(3).T
# In[ ]:
# missing values, data types
train_df.info()
print('-'*40)
test_df.info()
# The above info shows that columns (from training data) with missing/empty values are:
#
# - Age (177 missing values)
# - Cabin (687 missing values)
# - Embarked (2 missing values)
# In[ ]:
# describe numeric columns
train_df.describe()
# In the training dataset there are 891 passengers with an overall survival rate of 38.4%.
# The oldest person is 80 years and the youngest is 5 months (0.42*12). The average fare is 32.20 dollars but the median fare is 14.45. This suggests outliers at the upper end of the fare, and indeed the maximum fare is $512.33.
# In[ ]:
# describe categorical columns
train_df.describe(include=['O'])
# In[ ]:
# just for fun, examine the records of ten year olds (there are only two)
train_df[train_df.Age == 10].stack()
# # Detailed data investigation #
#
# A closer look at each of the attributes (columns) and their relationship to survival.
# ##Sex##
#
# Sex is a *nominal* attribute with two categories (i.e. it is dichotomous). Let's plot some counts and survival rates by sex. Note that survival values are 0/1, thus rates can be be calculated simply via the mean survive value.
# In[ ]:
# count passengers by sex
plt.subplot(211) # 3 digit convenience notation for arguments (last digit represents plot number)
sns.countplot(x='Sex', data=train_df, palette='Greens_d')
# survival rate by sex
# note that barplot plots mean() on y by default
plt.subplot(212)
sns.barplot(x='Sex', y='Survived', data=train_df, palette='Greens_d')
# **Observations:**
#
# - Many more males than females
# - Survival rate of females much greater than males
#
# Let's get the actual numbers below using pandas.
# In[ ]:
# count passengers by sex
train_df.groupby('Sex').size()
# In[ ]:
# survival rates by sex
train_df.groupby(['Sex'])['Survived'].mean().sort_values()
# Thus, 18.9% of males (from the training set) survived compared to 74.2% of females.
# ##Passenger class##
#
# Passenger class (Pclass) is an *ordinal* attribute with three categories, 1, 2 and 3. The three categories have an order (representing socioeconomic status) but although the categories are given numeric labels, this attribute *is not* numeric! To see this, consider that 3rd class = 1st + 2nd class is a nonsense. This will be important later when we construct features. Again, let's plot some counts and survival rates.
# In[ ]:
# size of groups in passenger class
plt.subplots(figsize=(8,6))
plt.subplot(211)
sns.countplot(x='Pclass', data=train_df, palette='Purples_d') # _d = dark palette
# survival rate by sex
plt.subplot(212)
sns.barplot(x='Pclass', y='Survived', data=train_df, palette='Purples_d')
# **Observations:**
#
# - Three classes
# - Most passengers travelled by 3rd class (more than half; see below)
# - Survival rate increases with class
#
# Again, let's get the actual numbers below using pandas.
# In[ ]:
# count passengers by passenger class
train_df.groupby(['Pclass']).size()
# In[ ]:
# survival rates by passenger class
train_df.groupby(['Pclass'])['Survived'].mean().sort_values(ascending=False)
# ##Age##
#
# Age is a *ratio* attribute (it is properly numeric, see [Types of data measurement scales][1]). Ages < 1 indicate age in months.
#
#
# [1]: http://www.mymarketresearchmethods.com/types-of-data-nominal-ordinal-interval-ratio/
# In[ ]:
# count the number of passengers for first 25 ages
train_df.groupby('Age').size().head(25)
# another way to do the above
#train_df['Age'].value_counts().sort_index().head(25)
# In[ ]:
# convert ages to ints
age = train_df[['Age','Survived']].dropna() # returns a copy with blanks removed
age['Age'] = age['Age'].astype(int) # floors floats
# count passengers by age (smoothed via gaussian kernels)
plt.subplots(figsize=(18,6))
plt.subplot(311)
sns.kdeplot(age['Age'], shade=True, cut=0)
# count passengers by age (no smoothing)
plt.subplot(312)
sns.countplot(x='Age', data=age, palette='GnBu_d')
# survival rates by age
plt.subplot(313)
sns.barplot(x='Age', y='Survived', data=age, ci=None, palette='Oranges_d') # takes mean by default
# Observations:
#
# - Under 16s tend to have the highest survival rates
# - Very high survival rates at 53, 63 and 80
# - Survival of over 16s is fairly noisy. Possible that survival might increase with age.
# ## Survival by age group and sex ##
#
# Now let's look at survival by age groups *and* sex to see if any patterns become clearer.
# In[ ]:
# bin age into groups
train_df['AgeGroup'] = pd.cut(train_df['Age'],[0,4,15,25,35,45,65,100])
test_df['AgeGroup'] = pd.cut(test_df['Age'],[0,4,15,25,35,45,65,100])
# survival by age group
train_df.groupby('AgeGroup')['Survived'].mean()
# In[ ]:
# survival by age group and sex
train_df[['Survived','AgeGroup', 'Sex']].groupby(['Sex', 'AgeGroup']).mean()
# In[ ]:
# count passengers by age group and sex
sns.factorplot(x='AgeGroup', col='Sex', data=train_df, kind='count')
# survival by age group and sex
sns.factorplot(x='AgeGroup', y='Survived', col='Sex', data=train_df, kind='bar')
# The relationship between survival and age group looks very different for males and females:
#
# - Males: survival rates increase *inversely* with age for (0, 25] and (25, 100). That is, younger boys fare better than older boys and younger men survive more than older men.
# - Females: no obvious relationship between surviving and age. In particular, girls and baby girls do not fare better than women; in fact, girls (4, 15] have the *lowest* survival rates of females.
#
# A feature space containing (child, man, woman) would do a decent job of representing this relationship to survivability.
#
# Non-linear classifiers (e.g. decision trees, multi-layer nn, nearest neighbour) applied to both sex and age group might do even better because of the noticeable relationship between survivability and age group for males.
# ## Family Size##
#
# We create a new feature, FamilySize, that sums Parch and SibSp. This will enable us to drop Parch and SibSp from the datasets.
# In[ ]:
# calculate family size
train_df['FamilySize'] = train_df['SibSp'] + train_df['Parch'] + 1
test_df['FamilySize'] = test_df['SibSp'] + test_df['Parch'] + 1
# count passengers by age group and sex
plt.subplot(211)
sns.countplot(x='FamilySize', data=train_df)
# survival by age group and sex
plt.subplot(212)
sns.barplot(x='FamilySize', y='Survived', data=train_df)
# Survival increases with family size, until families of size 4. Family sizes of 5 and above have reduced survival.
# Deck
# ----
#
# Cabin might be conceivably be related to survival, but unfortunately most values are missing. Nevertheless, by way of an exercise, we will extract the feature, Deck, from cabin by taking the first character of the label and analyze survival rates by deck.
# In[ ]:
# deck is the first letter of cabin
train_df['Deck'] = train_df['Cabin'].dropna().apply(lambda x: str(x)[0])
train_df[['PassengerId','Name', 'Cabin', 'Deck']].head(2).T
# In[ ]:
# count passengers by the deck their cabin is on
plt.subplots(figsize=(8,6))
plt.subplot(211)
sns.countplot(x='Deck', data=train_df)
# survival rate by deck
plt.subplot(212)
sns.barplot(x='Deck', y='Survived', data=train_df)
# ## Other attributes ##
# For this first attempt, I am ignoring the attributes below as they seem unlikely to be related to survival:
#
# - PassengerId
# - Name (however, extracting titles from names might be informative)
# - Ticket
# - Fare (could be related to socioeconomic status but we already have a class attribute)
# - Embarked
# # Data wrangling - Age group#
#
# Fill missing age group values. We don't want to drop them as this would lose many rows. Instead, we will randomly generate age groups according to the frequency that they occur in the data. We will calculate the frequency separately for males and females.
# In[ ]:
# number of males/females without an age
def get_na(dataset):
na_males = dataset[dataset.Sex == 'male'].loc[:,'AgeGroup'].isnull().sum()
na_females = dataset[dataset.Sex == 'female'].loc[:,'AgeGroup'].isnull().sum()
return {'male': na_males, 'female': na_females}
# number of males and females by age group
def get_counts(dataset):
return dataset.groupby(['Sex', 'AgeGroup']).size()
# randomly generate a list of age groups based on age group frequency (for each sex separately)
def generate_age_groups(num, freq):
age_groups = {}
for sex in ['male','female']:
relfreq = freq[sex] / freq[sex].sum()
age_groups[sex] = np.random.choice(freq[sex].index, size=num[sex], replace=True, p=relfreq)
return age_groups
# insert the new age group values
def insert_age_group_values(dataset, age_groups):
for sex in ['male','female']:
tmp = pd.DataFrame(dataset[(dataset.Sex == sex) & dataset.Age.isnull()]) # filter on sex and null ages
tmp['AgeGroup'] = age_groups[sex] # index age group values
dataset = dataset.combine_first(tmp) # uses tmp to fill holes
return dataset
# fill holes for train_df
na = get_na(train_df)
counts = get_counts(train_df)
counts['female']
age_groups = generate_age_groups(na, counts)
age_groups['female']
train_df = insert_age_group_values(train_df, age_groups)
train_df.info() # check all nulls have been filled
print('-'*40)
# repeat for test_df
na = get_na(test_df)
counts = get_counts(train_df) # reuse the frequencies taken over the training data as it is larger
age_groups = generate_age_groups(na, counts)
test_df = insert_age_group_values(test_df, age_groups)
test_df.info() # check all nulls have been filled
# # Feature engineering #
#
# Now that we've explored the data let's create some features:
#
# - **Sex:** Convert to a single binary feature, Female. No need to create a feature for Male, that would be redundant.
# - **Pclass:** Convert to two binary features, PClass_1 and PClass_2. Similar to Male above, having a PClass_3 would be redundant.
# - **Age group:** The age attribute binned using separators [0, 4, 15, 25, 35, 45, 65, 100]. Convert to a number of binary features, one for each age group.
# - **Family size:** The sum of SibSp and Parch plus 1.
# In[ ]:
# Sex -> Female
# training set
dummy = pd.get_dummies(train_df['Sex'])
dummy.columns = ['Female','Male']
train_df = train_df.join(dummy['Female'])
# test set
dummy = pd.get_dummies(test_df['Sex'])
dummy.columns = ['Female','Male']
test_df = test_df.join(dummy['Female'])
train_df[['Name', 'Sex', 'Female']].head(2).T
#train_df.columns
# In[ ]:
# Pclass -> PClass_1, PClass_2
# training set
dummy = pd.get_dummies(train_df['Pclass'])
dummy.columns = ['PClass_1','PClass_2','PClass_3']
train_df = train_df.join(dummy[['PClass_1', 'PClass_2']])
# test set
dummy = pd.get_dummies(test_df['Pclass'])
dummy.columns = ['PClass_1','PClass_2','PClass_3']
test_df = test_df.join(dummy[['PClass_1', 'PClass_2']])
train_df[['Name', 'Pclass', 'PClass_1', 'PClass_2']].head(2).T
#train_df.columns
# In[ ]:
# AgeGroup -> binary features
# training set
dummy = pd.get_dummies(train_df['AgeGroup'])
dummy.columns = ['Ages_4','Ages_15','Ages_25','Ages_35','Ages_45','Ages_65','Ages_100']
train_df = train_df.join(dummy)
# test set
dummy = pd.get_dummies(test_df['AgeGroup'])
dummy.columns = ['Ages_4','Ages_15','Ages_25','Ages_35','Ages_45','Ages_65','Ages_100']
test_df = test_df.join(dummy)
# ## Experimental features ##
# Some additional features to explore.
# In[ ]:
# Fare
# there is a single missing "Fare" value
test_df['Fare'].fillna(test_df['Fare'].median(), inplace=True)
# convert from float to int (floor)
#train_df['Fare'] = train_df['Fare'].astype(int)
#test_df['Fare'] = test_df['Fare'].astype(int)
# In[ ]:
# Embarked -> PortC, PortQ
# Fill missing values with the most occurred value
print(train_df.groupby('Embarked').size().sort_values())
train_df['Embarked'] = train_df['Embarked'].fillna('S')
# training set
dummy = pd.get_dummies(train_df['Embarked'])
#dummy.columns
dummy.columns = ['Port_C','Port_Q','Port_S']
#train_df = train_df.join(dummy[['Port_C','Port_Q']])
# test set
dummy = pd.get_dummies(test_df['Embarked'])
dummy.columns = ['Port_C','Port_Q','Port_S']
#test_df = test_df.join(dummy[['Port_C','Port_Q']])
# ## Dropping attributes ##
# Drop unused attributes to avoid detecting spurious relationships.
# In[ ]:
# drop the attributes that will be unused
train_df.drop(['PassengerId', 'Pclass', 'Name', 'Sex', 'Age',
'SibSp', 'Parch', 'Ticket', 'Cabin', 'Fare',
'Embarked', 'Deck', 'AgeGroup'], axis=1, inplace=True)
test_df.drop(['Pclass', 'Name', 'Sex', 'Age',
'SibSp', 'Parch', 'Ticket', 'Cabin', 'Fare',
'Embarked', 'AgeGroup'], axis=1, inplace=True)
train_df.head(10).T
# The sample above shows the features and their values for the first ten training examples.
# # Modeling #
#
# Our task is a binary classification problem: we want to formulate a relationship that predicts an output (Survived or not) from engineered features (Sex, Age group, Family size...). This is type of learning is supervised learning, since a model will be trained on a dataset containing pairs of inputs and outputs.
#
# Suitable methods for performing classification include:
#
# - Logistic Regression*
# - Perceptron*
# - Support Vector Machines (SVMs)*
# - Naive Bayes classifier*
# - KNN or k-Nearest Neighbors
# - Decision Tree
# - Random Forrest
# - Artificial neural network
# - Relevance Vector Machine
# |
# In[ ]:
# split the datasets into matched input and ouput pairs
X_train = train_df.drop("Survived", axis=1) # X = inputs
Y_train = train_df["Survived"] # Y = outputs
X_test = test_df.drop("PassengerId", axis=1).copy()
X_train.shape, Y_train.shape, X_test.shape
# Model fitting
# ----------
# (Some of this section is based on [this titanic tutorial][1].)
#
# Logistic Regression is a useful model to run early in the workflow. Logistic regression measures the relationship between the categorical dependent variable (feature) and one or more independent variables (features) by estimating probabilities using a logistic function, which is the cumulative logistic distribution. See [Logistic regression on Wikipedia][2].
#
# Note the confidence score generated by the model based on our training dataset.
#
#
# [1]: https://www.kaggle.com/startupsci/titanic/titanic-data-science-solutions
# [2]: https://en.wikipedia.org/wiki/Logistic_regression
# In[ ]:
# Logistic Regression
logreg = LogisticRegression()
scores = cross_val_score(logreg, X_train, Y_train, cv=10)
acc_log = round(scores.mean() * 100, 2)
acc_log
#Y_pred = logreg.predict(X_test)
# We can use Logistic Regression to validate our assumptions and decisions for feature creating and completing goals. This can be done by calculating the coefficient of the features in the decision function.
# Positive coefficients increase the log-odds of the response (and thus increase the probability), and negative coefficients decrease the log-odds of the response (and thus decrease the probability).
# In[ ]:
logreg.fit(X_train, Y_train)
coeff_df = pd.DataFrame(train_df.columns.delete(0))
coeff_df.columns = ['Feature']
coeff_df["Correlation"] = pd.Series(logreg.coef_[0])
coeff_df.sort_values(by='Correlation', ascending=False)
# In[ ]:
# Gaussian Naive Bayes
gaussian = GaussianNB()
scores = cross_val_score(gaussian, X_train, Y_train, cv=10)
acc_gaussian = round(scores.mean() * 100, 2)
acc_gaussian
# In[ ]:
# Perceptron (a single layer neural net)
perceptron = Perceptron()
scores = cross_val_score(perceptron, X_train, Y_train, cv=10)
acc_perceptron = round(scores.mean() * 100, 2)
acc_perceptron
# In[ ]:
# Neural Network (a multi layer neural net)
neural_net = MLPClassifier()
scores = cross_val_score(neural_net, X_train, Y_train, cv=10)
acc_neural_net = round(scores.mean() * 100, 2)
acc_neural_net
# In[ ]:
# Stochastic Gradient Descent
sgd = SGDClassifier()
scores = cross_val_score(sgd, X_train, Y_train, cv=10)
acc_sgd = round(scores.mean() * 100, 2)
acc_sgd
# In[ ]:
# Linear SVC
linear_svc = LinearSVC()
scores = cross_val_score(linear_svc, X_train, Y_train, cv=10)
acc_linear_svc = round(scores.mean() * 100, 2)
acc_linear_svc
# In[ ]:
# Support Vector Machine
svc = SVC() # uses a rbf kernel by default (i.e. can discover non-linear boundaries)
scores = cross_val_score(svc, X_train, Y_train, cv=10)
acc_svc = round(scores.mean() * 100, 2)
acc_svc
# In[ ]:
# Decision Tree
decision_tree = DecisionTreeClassifier()
scores = cross_val_score(decision_tree, X_train, Y_train, cv=10)
acc_decision_tree = round(scores.mean() * 100, 2)
acc_decision_tree
# In[ ]:
# Random Forest - an ensemble model
random_forest = RandomForestClassifier(n_estimators=100)
scores = cross_val_score(random_forest, X_train, Y_train, cv=10)
acc_random_forest = round(scores.mean() * 100, 2)
acc_random_forest
# In[ ]:
# AdaBoost - an ensemble method
ada_boost = AdaBoostClassifier(n_estimators=100)
scores = cross_val_score(ada_boost, X_train, Y_train, cv=10)
acc_ada_boost = round(scores.mean() * 100, 2)
acc_ada_boost
# In[ ]:
# k-Nearest Neighbors - a non-parametric method
knn = KNeighborsClassifier(n_neighbors = 5)
scores = cross_val_score(knn, X_train, Y_train, cv=10)
acc_knn = round(scores.mean() * 100, 2)
acc_knn
# Model evaluation
# ----------------
#
# We now rank the models and choose a high performing one for our problem. The Support Vector Machine consistently tops the chart.
#
# Decision Tree and Random Forest also both score high, but we prefer Random Forest as it avoids overfitting to the training set better than a decision tree and is therefore likely to perform better on the test dataset.
# In[ ]:
models = pd.DataFrame({
'Model': ['Support Vector Machine', 'kNN', 'Logistic Regression',
'Random Forest', 'Naive Bayes', 'Perceptron',
'Stochastic Gradient Descent', 'Linear SVC',
'Decision Tree', 'AdaBoost', 'Neural Network'],
'Score': [acc_svc, acc_knn, acc_log,
acc_random_forest, acc_gaussian, acc_perceptron,
acc_sgd, acc_linear_svc, acc_decision_tree,
acc_ada_boost, acc_neural_net]})
models.sort_values(by='Score', ascending=False)
# In[ ]:
# using random forest for submission
random_forest.fit(X_train, Y_train)
Y_pred = random_forest.predict(X_test)
submission = pd.DataFrame({
"PassengerId": test_df["PassengerId"],
"Survived": Y_pred
})
submission.to_csv('titanic_submission_1.csv', index=False)
#pd.set_option('display.max_rows', len(submission))
#submission
# Use cross validation to assess predictive accuracy
# --------------------------------------------------
#
# We can easily improve the above scores by evaluating on the training data (compare the random forest scores above and below). However, scores produced like this are not truly indicative of predictive accuracy and should be avoided. To see why, consider that a classifier that simply memorizes each input and output pair will score perfectly but be unable to generalise to other examples.
#
# In[ ]:
# Random Forest : scoring on training data
random_forest = RandomForestClassifier(n_estimators=100)
random_forest.fit(X_train, Y_train)
acc_random_forest = round(random_forest.score(X_train, Y_train) * 100, 2)
acc_random_forest
# What next?
# -------------------------------
#
# **_More feature exploration:_**
# Including *Fare* significantly increases the best accuracy to about 92% when *fare* is floored and 94% otherwise. Additionally including *Embarked* brings it up to 95%. It may worth be investigating if any relationship between these attributes and survival can be detected, especially for *fare*.
#
# Other possibilities for features include *Deck* and *Title*, which can be extracted from *Cabin* and *Name* respectively.
#
# Could also try two or more overlapping binnings for age groups (e.g. bins as defined by cutting on [0,4,15,25,35,45,65,100] and [10,20,30,40,55,100]). If going down this path, focus on introducing extra bins for age groups that contain many passengers and have a steeper gradient on the survival curve (such as for the twenties, e.g. cut on [10,20,30]).
#
# **_Refitting:_**
# Most of the models above used their default parameters. Choose a few promising models and attempt to optimize their (hyper-)parameters. The sklearn library used above offers a couple of ways to do this automatically (via grid search and cross-validated models, see [Model selection][1] and [Tuning the hyper-parameters of an estimator][2]).
#
#
# [1]: http://scikit-learn.org/stable/tutorial/statistical_inference/model_selection.html
# [2]: http://scikit-learn.org/stable/modules/grid_search.html#grid-search | # The methods marked * either discover linear classification boundaries (logistic regression, perceptron, and SVMs if using linear kernels) or assume no relationship between features (naive bayes) and thus are not expected to perform as well (see the section above on the relationship between survival, age group and sex).
# ## Training data ##
# Let's use cross validation to perform the evaluation. This method will give a reasonable indication of predictive accuracy as evaluation will take place on data that is not seen during training. The package **`sklearn.model_selection`** includes support for cross validation. | random_line_split |
titanic-alpha-attempt.py | #!/usr/bin/env python
# coding: utf-8
# Predicting Surviving the Sinking of the Titanic
# -----------------------------------------------
#
#
# This represents my first attempt at training up some classifiers for the titanic dataset.
# In[ ]:
# data analysis and wrangling
import pandas as pd
import numpy as np
import random as rnd
# visualization
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().magic(u'matplotlib inline')
sns.set_style("whitegrid")
# machine learning
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.neural_network import MLPClassifier
# In[ ]:
# get titanic & test csv files as a DataFrame
train_df = pd.read_csv("../input/train.csv")
test_df = pd.read_csv("../input/test.csv")
combine = [train_df, test_df]
# # Data exploration #
#
# First get some summary statistics about the datasets.
# In[ ]:
# view column labels
print(train_df.columns.values)
# In[ ]:
# preview the data
train_df.head()
# Now transpose the first few rows in order to see all attributes more easily as row labels.
# In[ ]:
train_df.head(3).T
# In[ ]:
# missing values, data types
train_df.info()
print('-'*40)
test_df.info()
# The above info shows that columns (from training data) with missing/empty values are:
#
# - Age (177 missing values)
# - Cabin (687 missing values)
# - Embarked (2 missing values)
# In[ ]:
# describe numeric columns
train_df.describe()
# In the training dataset there are 891 passengers with an overall survival rate of 38.4%.
# The oldest person is 80 years and the youngest is 5 months (0.42*12). The average fare is 32.20 dollars but the median fare is 14.45. This suggests outliers at the upper end of the fare, and indeed the maximum fare is $512.33.
# In[ ]:
# describe categorical columns
train_df.describe(include=['O'])
# In[ ]:
# just for fun, examine the records of ten year olds (there are only two)
train_df[train_df.Age == 10].stack()
# # Detailed data investigation #
#
# A closer look at each of the attributes (columns) and their relationship to survival.
# ##Sex##
#
# Sex is a *nominal* attribute with two categories (i.e. it is dichotomous). Let's plot some counts and survival rates by sex. Note that survival values are 0/1, thus rates can be be calculated simply via the mean survive value.
# In[ ]:
# count passengers by sex
plt.subplot(211) # 3 digit convenience notation for arguments (last digit represents plot number)
sns.countplot(x='Sex', data=train_df, palette='Greens_d')
# survival rate by sex
# note that barplot plots mean() on y by default
plt.subplot(212)
sns.barplot(x='Sex', y='Survived', data=train_df, palette='Greens_d')
# **Observations:**
#
# - Many more males than females
# - Survival rate of females much greater than males
#
# Let's get the actual numbers below using pandas.
# In[ ]:
# count passengers by sex
train_df.groupby('Sex').size()
# In[ ]:
# survival rates by sex
train_df.groupby(['Sex'])['Survived'].mean().sort_values()
# Thus, 18.9% of males (from the training set) survived compared to 74.2% of females.
# ##Passenger class##
#
# Passenger class (Pclass) is an *ordinal* attribute with three categories, 1, 2 and 3. The three categories have an order (representing socioeconomic status) but although the categories are given numeric labels, this attribute *is not* numeric! To see this, consider that 3rd class = 1st + 2nd class is a nonsense. This will be important later when we construct features. Again, let's plot some counts and survival rates.
# In[ ]:
# size of groups in passenger class
plt.subplots(figsize=(8,6))
plt.subplot(211)
sns.countplot(x='Pclass', data=train_df, palette='Purples_d') # _d = dark palette
# survival rate by sex
plt.subplot(212)
sns.barplot(x='Pclass', y='Survived', data=train_df, palette='Purples_d')
# **Observations:**
#
# - Three classes
# - Most passengers travelled by 3rd class (more than half; see below)
# - Survival rate increases with class
#
# Again, let's get the actual numbers below using pandas.
# In[ ]:
# count passengers by passenger class
train_df.groupby(['Pclass']).size()
# In[ ]:
# survival rates by passenger class
train_df.groupby(['Pclass'])['Survived'].mean().sort_values(ascending=False)
# ##Age##
#
# Age is a *ratio* attribute (it is properly numeric, see [Types of data measurement scales][1]). Ages < 1 indicate age in months.
#
#
# [1]: http://www.mymarketresearchmethods.com/types-of-data-nominal-ordinal-interval-ratio/
# In[ ]:
# count the number of passengers for first 25 ages
train_df.groupby('Age').size().head(25)
# another way to do the above
#train_df['Age'].value_counts().sort_index().head(25)
# In[ ]:
# convert ages to ints
age = train_df[['Age','Survived']].dropna() # returns a copy with blanks removed
age['Age'] = age['Age'].astype(int) # floors floats
# count passengers by age (smoothed via gaussian kernels)
plt.subplots(figsize=(18,6))
plt.subplot(311)
sns.kdeplot(age['Age'], shade=True, cut=0)
# count passengers by age (no smoothing)
plt.subplot(312)
sns.countplot(x='Age', data=age, palette='GnBu_d')
# survival rates by age
plt.subplot(313)
sns.barplot(x='Age', y='Survived', data=age, ci=None, palette='Oranges_d') # takes mean by default
# Observations:
#
# - Under 16s tend to have the highest survival rates
# - Very high survival rates at 53, 63 and 80
# - Survival of over 16s is fairly noisy. Possible that survival might increase with age.
# ## Survival by age group and sex ##
#
# Now let's look at survival by age groups *and* sex to see if any patterns become clearer.
# In[ ]:
# bin age into groups
train_df['AgeGroup'] = pd.cut(train_df['Age'],[0,4,15,25,35,45,65,100])
test_df['AgeGroup'] = pd.cut(test_df['Age'],[0,4,15,25,35,45,65,100])
# survival by age group
train_df.groupby('AgeGroup')['Survived'].mean()
# In[ ]:
# survival by age group and sex
train_df[['Survived','AgeGroup', 'Sex']].groupby(['Sex', 'AgeGroup']).mean()
# In[ ]:
# count passengers by age group and sex
sns.factorplot(x='AgeGroup', col='Sex', data=train_df, kind='count')
# survival by age group and sex
sns.factorplot(x='AgeGroup', y='Survived', col='Sex', data=train_df, kind='bar')
# The relationship between survival and age group looks very different for males and females:
#
# - Males: survival rates increase *inversely* with age for (0, 25] and (25, 100). That is, younger boys fare better than older boys and younger men survive more than older men.
# - Females: no obvious relationship between surviving and age. In particular, girls and baby girls do not fare better than women; in fact, girls (4, 15] have the *lowest* survival rates of females.
#
# A feature space containing (child, man, woman) would do a decent job of representing this relationship to survivability.
#
# Non-linear classifiers (e.g. decision trees, multi-layer nn, nearest neighbour) applied to both sex and age group might do even better because of the noticeable relationship between survivability and age group for males.
# ## Family Size##
#
# We create a new feature, FamilySize, that sums Parch and SibSp. This will enable us to drop Parch and SibSp from the datasets.
# In[ ]:
# calculate family size
train_df['FamilySize'] = train_df['SibSp'] + train_df['Parch'] + 1
test_df['FamilySize'] = test_df['SibSp'] + test_df['Parch'] + 1
# count passengers by age group and sex
plt.subplot(211)
sns.countplot(x='FamilySize', data=train_df)
# survival by age group and sex
plt.subplot(212)
sns.barplot(x='FamilySize', y='Survived', data=train_df)
# Survival increases with family size, until families of size 4. Family sizes of 5 and above have reduced survival.
# Deck
# ----
#
# Cabin might be conceivably be related to survival, but unfortunately most values are missing. Nevertheless, by way of an exercise, we will extract the feature, Deck, from cabin by taking the first character of the label and analyze survival rates by deck.
# In[ ]:
# deck is the first letter of cabin
train_df['Deck'] = train_df['Cabin'].dropna().apply(lambda x: str(x)[0])
train_df[['PassengerId','Name', 'Cabin', 'Deck']].head(2).T
# In[ ]:
# count passengers by the deck their cabin is on
plt.subplots(figsize=(8,6))
plt.subplot(211)
sns.countplot(x='Deck', data=train_df)
# survival rate by deck
plt.subplot(212)
sns.barplot(x='Deck', y='Survived', data=train_df)
# ## Other attributes ##
# For this first attempt, I am ignoring the attributes below as they seem unlikely to be related to survival:
#
# - PassengerId
# - Name (however, extracting titles from names might be informative)
# - Ticket
# - Fare (could be related to socioeconomic status but we already have a class attribute)
# - Embarked
# # Data wrangling - Age group#
#
# Fill missing age group values. We don't want to drop them as this would lose many rows. Instead, we will randomly generate age groups according to the frequency that they occur in the data. We will calculate the frequency separately for males and females.
# In[ ]:
# number of males/females without an age
def get_na(dataset):
na_males = dataset[dataset.Sex == 'male'].loc[:,'AgeGroup'].isnull().sum()
na_females = dataset[dataset.Sex == 'female'].loc[:,'AgeGroup'].isnull().sum()
return {'male': na_males, 'female': na_females}
# number of males and females by age group
def get_counts(dataset):
return dataset.groupby(['Sex', 'AgeGroup']).size()
# randomly generate a list of age groups based on age group frequency (for each sex separately)
def generate_age_groups(num, freq):
age_groups = {}
for sex in ['male','female']:
|
return age_groups
# insert the new age group values
def insert_age_group_values(dataset, age_groups):
for sex in ['male','female']:
tmp = pd.DataFrame(dataset[(dataset.Sex == sex) & dataset.Age.isnull()]) # filter on sex and null ages
tmp['AgeGroup'] = age_groups[sex] # index age group values
dataset = dataset.combine_first(tmp) # uses tmp to fill holes
return dataset
# fill holes for train_df
na = get_na(train_df)
counts = get_counts(train_df)
counts['female']
age_groups = generate_age_groups(na, counts)
age_groups['female']
train_df = insert_age_group_values(train_df, age_groups)
train_df.info() # check all nulls have been filled
print('-'*40)
# repeat for test_df
na = get_na(test_df)
counts = get_counts(train_df) # reuse the frequencies taken over the training data as it is larger
age_groups = generate_age_groups(na, counts)
test_df = insert_age_group_values(test_df, age_groups)
test_df.info() # check all nulls have been filled
# # Feature engineering #
#
# Now that we've explored the data let's create some features:
#
# - **Sex:** Convert to a single binary feature, Female. No need to create a feature for Male, that would be redundant.
# - **Pclass:** Convert to two binary features, PClass_1 and PClass_2. Similar to Male above, having a PClass_3 would be redundant.
# - **Age group:** The age attribute binned using separators [0, 4, 15, 25, 35, 45, 65, 100]. Convert to a number of binary features, one for each age group.
# - **Family size:** The sum of SibSp and Parch plus 1.
# In[ ]:
# Sex -> Female
# training set
dummy = pd.get_dummies(train_df['Sex'])
dummy.columns = ['Female','Male']
train_df = train_df.join(dummy['Female'])
# test set
dummy = pd.get_dummies(test_df['Sex'])
dummy.columns = ['Female','Male']
test_df = test_df.join(dummy['Female'])
train_df[['Name', 'Sex', 'Female']].head(2).T
#train_df.columns
# In[ ]:
# Pclass -> PClass_1, PClass_2
# training set
dummy = pd.get_dummies(train_df['Pclass'])
dummy.columns = ['PClass_1','PClass_2','PClass_3']
train_df = train_df.join(dummy[['PClass_1', 'PClass_2']])
# test set
dummy = pd.get_dummies(test_df['Pclass'])
dummy.columns = ['PClass_1','PClass_2','PClass_3']
test_df = test_df.join(dummy[['PClass_1', 'PClass_2']])
train_df[['Name', 'Pclass', 'PClass_1', 'PClass_2']].head(2).T
#train_df.columns
# In[ ]:
# AgeGroup -> binary features
# training set
dummy = pd.get_dummies(train_df['AgeGroup'])
dummy.columns = ['Ages_4','Ages_15','Ages_25','Ages_35','Ages_45','Ages_65','Ages_100']
train_df = train_df.join(dummy)
# test set
dummy = pd.get_dummies(test_df['AgeGroup'])
dummy.columns = ['Ages_4','Ages_15','Ages_25','Ages_35','Ages_45','Ages_65','Ages_100']
test_df = test_df.join(dummy)
# ## Experimental features ##
# Some additional features to explore.
# In[ ]:
# Fare
# there is a single missing "Fare" value
test_df['Fare'].fillna(test_df['Fare'].median(), inplace=True)
# convert from float to int (floor)
#train_df['Fare'] = train_df['Fare'].astype(int)
#test_df['Fare'] = test_df['Fare'].astype(int)
# In[ ]:
# Embarked -> PortC, PortQ
# Fill missing values with the most occurred value
print(train_df.groupby('Embarked').size().sort_values())
train_df['Embarked'] = train_df['Embarked'].fillna('S')
# training set
dummy = pd.get_dummies(train_df['Embarked'])
#dummy.columns
dummy.columns = ['Port_C','Port_Q','Port_S']
#train_df = train_df.join(dummy[['Port_C','Port_Q']])
# test set
dummy = pd.get_dummies(test_df['Embarked'])
dummy.columns = ['Port_C','Port_Q','Port_S']
#test_df = test_df.join(dummy[['Port_C','Port_Q']])
# ## Dropping attributes ##
# Drop unused attributes to avoid detecting spurious relationships.
# In[ ]:
# drop the attributes that will be unused
train_df.drop(['PassengerId', 'Pclass', 'Name', 'Sex', 'Age',
'SibSp', 'Parch', 'Ticket', 'Cabin', 'Fare',
'Embarked', 'Deck', 'AgeGroup'], axis=1, inplace=True)
test_df.drop(['Pclass', 'Name', 'Sex', 'Age',
'SibSp', 'Parch', 'Ticket', 'Cabin', 'Fare',
'Embarked', 'AgeGroup'], axis=1, inplace=True)
train_df.head(10).T
# The sample above shows the features and their values for the first ten training examples.
# # Modeling #
#
# Our task is a binary classification problem: we want to formulate a relationship that predicts an output (Survived or not) from engineered features (Sex, Age group, Family size...). This is type of learning is supervised learning, since a model will be trained on a dataset containing pairs of inputs and outputs.
#
# Suitable methods for performing classification include:
#
# - Logistic Regression*
# - Perceptron*
# - Support Vector Machines (SVMs)*
# - Naive Bayes classifier*
# - KNN or k-Nearest Neighbors
# - Decision Tree
# - Random Forrest
# - Artificial neural network
# - Relevance Vector Machine
#
# The methods marked * either discover linear classification boundaries (logistic regression, perceptron, and SVMs if using linear kernels) or assume no relationship between features (naive bayes) and thus are not expected to perform as well (see the section above on the relationship between survival, age group and sex).
# ## Training data ##
# Let's use cross validation to perform the evaluation. This method will give a reasonable indication of predictive accuracy as evaluation will take place on data that is not seen during training. The package **`sklearn.model_selection`** includes support for cross validation.
# In[ ]:
# split the datasets into matched input and ouput pairs
X_train = train_df.drop("Survived", axis=1) # X = inputs
Y_train = train_df["Survived"] # Y = outputs
X_test = test_df.drop("PassengerId", axis=1).copy()
X_train.shape, Y_train.shape, X_test.shape
# Model fitting
# ----------
# (Some of this section is based on [this titanic tutorial][1].)
#
# Logistic Regression is a useful model to run early in the workflow. Logistic regression measures the relationship between the categorical dependent variable (feature) and one or more independent variables (features) by estimating probabilities using a logistic function, which is the cumulative logistic distribution. See [Logistic regression on Wikipedia][2].
#
# Note the confidence score generated by the model based on our training dataset.
#
#
# [1]: https://www.kaggle.com/startupsci/titanic/titanic-data-science-solutions
# [2]: https://en.wikipedia.org/wiki/Logistic_regression
# In[ ]:
# Logistic Regression
logreg = LogisticRegression()
scores = cross_val_score(logreg, X_train, Y_train, cv=10)
acc_log = round(scores.mean() * 100, 2)
acc_log
#Y_pred = logreg.predict(X_test)
# We can use Logistic Regression to validate our assumptions and decisions for feature creating and completing goals. This can be done by calculating the coefficient of the features in the decision function.
# Positive coefficients increase the log-odds of the response (and thus increase the probability), and negative coefficients decrease the log-odds of the response (and thus decrease the probability).
# In[ ]:
logreg.fit(X_train, Y_train)
coeff_df = pd.DataFrame(train_df.columns.delete(0))
coeff_df.columns = ['Feature']
coeff_df["Correlation"] = pd.Series(logreg.coef_[0])
coeff_df.sort_values(by='Correlation', ascending=False)
# In[ ]:
# Gaussian Naive Bayes
gaussian = GaussianNB()
scores = cross_val_score(gaussian, X_train, Y_train, cv=10)
acc_gaussian = round(scores.mean() * 100, 2)
acc_gaussian
# In[ ]:
# Perceptron (a single layer neural net)
perceptron = Perceptron()
scores = cross_val_score(perceptron, X_train, Y_train, cv=10)
acc_perceptron = round(scores.mean() * 100, 2)
acc_perceptron
# In[ ]:
# Neural Network (a multi layer neural net)
neural_net = MLPClassifier()
scores = cross_val_score(neural_net, X_train, Y_train, cv=10)
acc_neural_net = round(scores.mean() * 100, 2)
acc_neural_net
# In[ ]:
# Stochastic Gradient Descent
sgd = SGDClassifier()
scores = cross_val_score(sgd, X_train, Y_train, cv=10)
acc_sgd = round(scores.mean() * 100, 2)
acc_sgd
# In[ ]:
# Linear SVC
linear_svc = LinearSVC()
scores = cross_val_score(linear_svc, X_train, Y_train, cv=10)
acc_linear_svc = round(scores.mean() * 100, 2)
acc_linear_svc
# In[ ]:
# Support Vector Machine
svc = SVC() # uses a rbf kernel by default (i.e. can discover non-linear boundaries)
scores = cross_val_score(svc, X_train, Y_train, cv=10)
acc_svc = round(scores.mean() * 100, 2)
acc_svc
# In[ ]:
# Decision Tree
decision_tree = DecisionTreeClassifier()
scores = cross_val_score(decision_tree, X_train, Y_train, cv=10)
acc_decision_tree = round(scores.mean() * 100, 2)
acc_decision_tree
# In[ ]:
# Random Forest - an ensemble model
random_forest = RandomForestClassifier(n_estimators=100)
scores = cross_val_score(random_forest, X_train, Y_train, cv=10)
acc_random_forest = round(scores.mean() * 100, 2)
acc_random_forest
# In[ ]:
# AdaBoost - an ensemble method
ada_boost = AdaBoostClassifier(n_estimators=100)
scores = cross_val_score(ada_boost, X_train, Y_train, cv=10)
acc_ada_boost = round(scores.mean() * 100, 2)
acc_ada_boost
# In[ ]:
# k-Nearest Neighbors - a non-parametric method
knn = KNeighborsClassifier(n_neighbors = 5)
scores = cross_val_score(knn, X_train, Y_train, cv=10)
acc_knn = round(scores.mean() * 100, 2)
acc_knn
# Model evaluation
# ----------------
#
# We now rank the models and choose a high performing one for our problem. The Support Vector Machine consistently tops the chart.
#
# Decision Tree and Random Forest also both score high, but we prefer Random Forest as it avoids overfitting to the training set better than a decision tree and is therefore likely to perform better on the test dataset.
# In[ ]:
models = pd.DataFrame({
'Model': ['Support Vector Machine', 'kNN', 'Logistic Regression',
'Random Forest', 'Naive Bayes', 'Perceptron',
'Stochastic Gradient Descent', 'Linear SVC',
'Decision Tree', 'AdaBoost', 'Neural Network'],
'Score': [acc_svc, acc_knn, acc_log,
acc_random_forest, acc_gaussian, acc_perceptron,
acc_sgd, acc_linear_svc, acc_decision_tree,
acc_ada_boost, acc_neural_net]})
models.sort_values(by='Score', ascending=False)
# In[ ]:
# using random forest for submission
random_forest.fit(X_train, Y_train)
Y_pred = random_forest.predict(X_test)
submission = pd.DataFrame({
"PassengerId": test_df["PassengerId"],
"Survived": Y_pred
})
submission.to_csv('titanic_submission_1.csv', index=False)
#pd.set_option('display.max_rows', len(submission))
#submission
# Use cross validation to assess predictive accuracy
# --------------------------------------------------
#
# We can easily improve the above scores by evaluating on the training data (compare the random forest scores above and below). However, scores produced like this are not truly indicative of predictive accuracy and should be avoided. To see why, consider that a classifier that simply memorizes each input and output pair will score perfectly but be unable to generalise to other examples.
#
# In[ ]:
# Random Forest : scoring on training data
random_forest = RandomForestClassifier(n_estimators=100)
random_forest.fit(X_train, Y_train)
acc_random_forest = round(random_forest.score(X_train, Y_train) * 100, 2)
acc_random_forest
# What next?
# -------------------------------
#
# **_More feature exploration:_**
# Including *Fare* significantly increases the best accuracy to about 92% when *fare* is floored and 94% otherwise. Additionally including *Embarked* brings it up to 95%. It may worth be investigating if any relationship between these attributes and survival can be detected, especially for *fare*.
#
# Other possibilities for features include *Deck* and *Title*, which can be extracted from *Cabin* and *Name* respectively.
#
# Could also try two or more overlapping binnings for age groups (e.g. bins as defined by cutting on [0,4,15,25,35,45,65,100] and [10,20,30,40,55,100]). If going down this path, focus on introducing extra bins for age groups that contain many passengers and have a steeper gradient on the survival curve (such as for the twenties, e.g. cut on [10,20,30]).
#
# **_Refitting:_**
# Most of the models above used their default parameters. Choose a few promising models and attempt to optimize their (hyper-)parameters. The sklearn library used above offers a couple of ways to do this automatically (via grid search and cross-validated models, see [Model selection][1] and [Tuning the hyper-parameters of an estimator][2]).
#
#
# [1]: http://scikit-learn.org/stable/tutorial/statistical_inference/model_selection.html
# [2]: http://scikit-learn.org/stable/modules/grid_search.html#grid-search
| relfreq = freq[sex] / freq[sex].sum()
age_groups[sex] = np.random.choice(freq[sex].index, size=num[sex], replace=True, p=relfreq) | conditional_block |
contacts-details.component.ts | import { Component, OnInit, Input, forwardRef, ViewChild, OnDestroy, Inject, ChangeDetectionStrategy, ChangeDetectorRef, OnChanges, SimpleChanges } from '@angular/core';
import { FormControl, FormGroup, Validators, FormBuilder, NG_VALUE_ACCESSOR, ControlValueAccessor } from '@angular/forms';
import { HttpClient, HttpHeaders, HttpParams } from '@angular/common/http';
import { TimeSheetService, GlobalService, view, ClientService, StaffService, ListService, UploadService, contactGroups, days, gender, types, titles, caldStatuses, roles, ShareService } from '@services/index';
import * as _ from 'lodash';
import { mergeMap, takeUntil, concatMap, switchMap } from 'rxjs/operators';
import { EMPTY,Subject } from 'rxjs';
import { TitleCasePipe } from '@angular/common';
import { ProfileInterface} from '@modules/modules';
const noop = () => {
};
@Component({
selector: 'app-contacts-details',
templateUrl: './contacts-details.component.html',
styleUrls: ['./contacts-details.component.css'],
providers: [
{
provide: NG_VALUE_ACCESSOR,
multi: true,
useExisting: forwardRef(() => ContactsDetailsComponent),
}
],
changeDetection: ChangeDetectionStrategy.OnPush
})
export class ContactsDetailsComponent implements OnInit, OnDestroy, OnChanges,ControlValueAccessor {
private unsubscribe: Subject<void> = new Subject();
selectedCompany;
doctor: any;
@Input() user: any;
private onTouchedCallback: () => void = noop;
private onChangeCallback: (_: any) => void = noop;
innerValue: ProfileInterface;
kinsArray: Array<any> = [];
kindetailsGroup: FormGroup;
inputForm: FormGroup;
contactGroups: Array<string> = contactGroups;
contactTypes : Array<string>;
modalOpen: boolean = false;
postLoading: boolean = false;
selected: any;
current: number = 0;
loading: boolean;
tocken: any;
doctors: Array<any> = [];
constructor(
private globalS: GlobalService,
private clientS: ClientService,
private staffS: StaffService,
private timeS: TimeSheetService,
private sharedS: ShareService,
private listS: ListService,
private formBuilder: FormBuilder,
private cd: ChangeDetectorRef,
private http: HttpClient,
private titleCase: TitleCasePipe
) { }
ngOnInit(): void {
this.user = this.sharedS.getPicked();
this.buildForm();
}
ngOnChanges(changes: SimpleChanges) {
for (let property in changes) {
console.log('run contacts')
this.searchKin(this.user);
}
}
doctorChangeEvent(data: any){
var doc = this.doctors.filter(x => x.name == data).shift();
if(!doc){
this.inputForm.patchValue({
address1: '',
address2: '',
phone1: '',
phone2:'',
email: '',
mobile: '',
fax: '',
name: ''
})
return;
}
this.inputForm.patchValue({
address1: doc.address1,
address2: doc.address2,
phone1: doc.phone1,
phone2:doc.phone2,
email: doc.email,
mobile: doc.mobile,
fax: doc.fax,
name: doc.name
})
}
populate(){
this.listS.getdoctorinformation().subscribe(data => {
console.log(data);
this.doctors = data;
})
}
buildForm(): void {
this.kindetailsGroup = this.formBuilder.group({
listOrder: [''],
type: [''],
name: [''],
email: [''],
address1: [''],
address2: [''],
suburbcode: [''],
suburb: [''],
postcode: [''],
phone1: [''],
phone2: [''],
mobile: [''],
fax: [''],
notes: [''],
oni1: false,
oni2: false,
ecode: [''],
creator: [''],
recordNumber: null,
subType: ''
});
this.inputForm = this.formBuilder.group({
group: [''],
listOrder: [''],
type: [''],
name: [''],
email: [''],
address1: [''],
address2: [''],
suburbcode: [null],
suburb: [''],
state: [],
postcode: [''],
phone1: [''],
phone2: [''],
mobile: [''],
fax: [''],
notes: [''],
oni1: false,
oni2: false,
ecode: [''],
creator: [''],
recordNumber: null
})
this.inputForm.get('group').valueChanges.pipe(
switchMap(x => {
if(!x)
return EMPTY;
console.log(x);
return this.listS.gettypeother(x) })
).subscribe(data => {
this.contactTypes = data;
});
}
ngAfterViewInit(): void{
}
ngOnDestroy(): void{
this.unsubscribe.next();
this.unsubscribe.complete();
}
searchKin(token: ProfileInterface){
this.loading = true;
console.log(token)
if (token.view == view.recipient) {
this.timeS.getcontactskinrecipient(token.id)
.subscribe(data => {
this.kinsArray = data.list;
if (this.kinsArray.length > 0) {
this.selected = this.kinsArray[0];
this.showDetails(this.kinsArray[0]);
}
this.loading = false
this.cd.markForCheck();
this.cd.detectChanges();
});
}
if (token.view == view.staff) {
this.timeS.getcontactskinstaff(token.code)
.subscribe(data => {
this.kinsArray = data;
if (this.kinsArray.length > 0) {
this.selected = this.kinsArray[0];
this.showDetails(this.kinsArray[0]);
}
this.loading = false
this.cd.markForCheck();
this.cd.detectChanges();
});
}
}
showDetails(kin: any) {
this.timeS.getcontactskinstaffdetails(kin.recordNumber)
.subscribe(data => {
this.kindetailsGroup.patchValue({
address1: data.address1,
address2: data.address2,
name: data.contactName,
type: data.subType,
email: data.email,
fax: data.fax,
mobile: data.mobile,
notes: data.notes,
phone1: data.phone1,
phone2: data.phone2,
suburbcode: (data.postcode != '') ? (data.postcode || '').trim() + ' ' + (data.suburb || '').trim() : '',
suburb: data.suburb,
postcode: data.postcode,
listOrder: '',
oni1: (data.equipmentCode || '').toUpperCase() == 'PERSON1',
oni2: (data.equipmentCode || '').toUpperCase() == 'PERSON2',
recordNumber: data.recordNumber,
// subType: data.subType
})
})
}
//From ControlValueAccessor interface
writeValue(value: any) {
if (value != null) {
console.log(value)
this.innerValue = value;
this.searchKin(this.innerValue);
}
}
//From ControlValueAccessor interface
registerOnChange(fn: any) {
this.onChangeCallback = fn;
}
//From ControlValueAccessor interface
registerOnTouched(fn: any) {
this.onTouchedCallback = fn;
}
save() {
if (this.user.view === view.staff)
{
var sub = this.kindetailsGroup.get('suburbcode').value;
let address = sub ? this.getPostCodeAndSuburb(sub) : null;
if (!this.globalS.isEmpty(address)) {
this.kindetailsGroup.controls["postcode"].setValue(address.pcode);
this.kindetailsGroup.controls["suburb"].setValue(address.suburb);
}
if (this.kindetailsGroup.get('oni1').value) {
this.kindetailsGroup.controls['ecode'].setValue('PERSON1')
} else if (this.kindetailsGroup.get('oni2').value) {
this.kindetailsGroup.controls['ecode'].setValue('PERSON2')
}
const details = this.kindetailsGroup.value;
this.timeS.updatecontactskinstaffdetails(
details,
details.recordNumber
).subscribe(data => {
// this.searchKin(this.user);
this.globalS.sToast('Success', 'Contact Updated');
});
}
if (this.user.view === view.recipient)
{
console.log('recipient');
var sub = this.kindetailsGroup.get('suburbcode').value;
let address = sub ? this.getPostCodeAndSuburb(sub) : null;
if (!this.globalS.isEmpty(address)) {
this.kindetailsGroup.controls["postcode"].setValue(address.pcode);
this.kindetailsGroup.controls["suburb"].setValue(address.suburb);
}
if (this.kindetailsGroup.get('oni1').value) {
this.kindetailsGroup.controls['ecode'].setValue('PERSON1')
} else if (this.kindetailsGroup.get('oni2').value) {
this.kindetailsGroup.controls['ecode'].setValue('PERSON2')
}
const details = this.kindetailsGroup.value;
this.timeS.updatecontactskinrecipientdetails(details,details.recordNumber)
.subscribe(data => {
// this.searchKin(this.user);
this.handleCancel();
this.globalS.sToast('Success', 'Contact Updated');
});
}
}
getPostCodeAndSuburb(address: any): any {
const rs = address;
let pcode = /(\d+)/g.test(rs) ? rs.match(/(\d+)/g)[0] : "";
let suburb = /(\D+)/g.test(rs) ? rs.match(/(\D+)/g)[0].split(',')[0] : "";
return {
pcode: pcode.trim() || '',
suburb: suburb.trim() || ''
}
}
get nextRequired() {
const { group, type, name } = this.inputForm.value;
if (this.current == 0 && this.globalS.isEmpty(group)) {
return false;
}
if (this.current == 1 && (this.globalS.isEmpty(type) || this.globalS.isEmpty(name)) ) {
return false;
}
return true;
}
add() {
if (this.inputForm.controls['suburbcode'].dirty) {
var rs = this.inputForm.get('suburbcode').value;
let pcode = /(\d+)/g.test(rs) ? rs.match(/(\d+)/g)[0].trim() : "";
let suburb = /(\D+)/g.test(rs) ? rs.match(/(\D+)/g)[0].trim() : "";
let state = /(\D+)/g.test(rs) ? rs.match(/(\D+)/g)[1].replace(/,/g, '').trim() : "";
if (pcode !== "") {
this.inputForm.controls["postcode"].setValue(pcode);
this.inputForm.controls["suburb"].setValue(suburb);
this.inputForm.controls["state"].setValue(state);
}
}
if (this.inputForm.get('oni1').value) {
this.inputForm.controls['ecode'].setValue('PERSON1')
} else if (this.inputForm.get('oni2').value) {
this.inputForm.controls['ecode'].setValue('PERSON2')
}
this.timeS.postcontactskinstaffdetails(
this.inputForm.value,
this.user.id
).pipe(takeUntil(this.unsubscribe)).subscribe(data => {
this.globalS.sToast('Success', 'Contact Inserted');
this.handleCancel();
this.searchKin(this.user);
this.handleCancel();
});
}
delete() {
this.timeS.deletecontactskin(this.kindetailsGroup.value.recordNumber).subscribe(data => { |
handleCancel() {
this.modalOpen = false;
this.inputForm.reset();
this.current = 0;
}
pre() {
this.current -= 1;
}
next() {
this.current += 1;
}
} | this.globalS.sToast('Success', 'Contact Deleted');
this.searchKin(this.user);
});
} | random_line_split |
contacts-details.component.ts | import { Component, OnInit, Input, forwardRef, ViewChild, OnDestroy, Inject, ChangeDetectionStrategy, ChangeDetectorRef, OnChanges, SimpleChanges } from '@angular/core';
import { FormControl, FormGroup, Validators, FormBuilder, NG_VALUE_ACCESSOR, ControlValueAccessor } from '@angular/forms';
import { HttpClient, HttpHeaders, HttpParams } from '@angular/common/http';
import { TimeSheetService, GlobalService, view, ClientService, StaffService, ListService, UploadService, contactGroups, days, gender, types, titles, caldStatuses, roles, ShareService } from '@services/index';
import * as _ from 'lodash';
import { mergeMap, takeUntil, concatMap, switchMap } from 'rxjs/operators';
import { EMPTY,Subject } from 'rxjs';
import { TitleCasePipe } from '@angular/common';
import { ProfileInterface} from '@modules/modules';
const noop = () => {
};
@Component({
selector: 'app-contacts-details',
templateUrl: './contacts-details.component.html',
styleUrls: ['./contacts-details.component.css'],
providers: [
{
provide: NG_VALUE_ACCESSOR,
multi: true,
useExisting: forwardRef(() => ContactsDetailsComponent),
}
],
changeDetection: ChangeDetectionStrategy.OnPush
})
export class ContactsDetailsComponent implements OnInit, OnDestroy, OnChanges,ControlValueAccessor {
private unsubscribe: Subject<void> = new Subject();
selectedCompany;
doctor: any;
@Input() user: any;
private onTouchedCallback: () => void = noop;
private onChangeCallback: (_: any) => void = noop;
innerValue: ProfileInterface;
kinsArray: Array<any> = [];
kindetailsGroup: FormGroup;
inputForm: FormGroup;
contactGroups: Array<string> = contactGroups;
contactTypes : Array<string>;
modalOpen: boolean = false;
postLoading: boolean = false;
selected: any;
current: number = 0;
loading: boolean;
tocken: any;
doctors: Array<any> = [];
constructor(
private globalS: GlobalService,
private clientS: ClientService,
private staffS: StaffService,
private timeS: TimeSheetService,
private sharedS: ShareService,
private listS: ListService,
private formBuilder: FormBuilder,
private cd: ChangeDetectorRef,
private http: HttpClient,
private titleCase: TitleCasePipe
) { }
ngOnInit(): void {
this.user = this.sharedS.getPicked();
this.buildForm();
}
ngOnChanges(changes: SimpleChanges) {
for (let property in changes) {
console.log('run contacts')
this.searchKin(this.user);
}
}
doctorChangeEvent(data: any){
var doc = this.doctors.filter(x => x.name == data).shift();
if(!doc){
this.inputForm.patchValue({
address1: '',
address2: '',
phone1: '',
phone2:'',
email: '',
mobile: '',
fax: '',
name: ''
})
return;
}
this.inputForm.patchValue({
address1: doc.address1,
address2: doc.address2,
phone1: doc.phone1,
phone2:doc.phone2,
email: doc.email,
mobile: doc.mobile,
fax: doc.fax,
name: doc.name
})
}
populate() |
buildForm(): void {
this.kindetailsGroup = this.formBuilder.group({
listOrder: [''],
type: [''],
name: [''],
email: [''],
address1: [''],
address2: [''],
suburbcode: [''],
suburb: [''],
postcode: [''],
phone1: [''],
phone2: [''],
mobile: [''],
fax: [''],
notes: [''],
oni1: false,
oni2: false,
ecode: [''],
creator: [''],
recordNumber: null,
subType: ''
});
this.inputForm = this.formBuilder.group({
group: [''],
listOrder: [''],
type: [''],
name: [''],
email: [''],
address1: [''],
address2: [''],
suburbcode: [null],
suburb: [''],
state: [],
postcode: [''],
phone1: [''],
phone2: [''],
mobile: [''],
fax: [''],
notes: [''],
oni1: false,
oni2: false,
ecode: [''],
creator: [''],
recordNumber: null
})
this.inputForm.get('group').valueChanges.pipe(
switchMap(x => {
if(!x)
return EMPTY;
console.log(x);
return this.listS.gettypeother(x) })
).subscribe(data => {
this.contactTypes = data;
});
}
ngAfterViewInit(): void{
}
ngOnDestroy(): void{
this.unsubscribe.next();
this.unsubscribe.complete();
}
searchKin(token: ProfileInterface){
this.loading = true;
console.log(token)
if (token.view == view.recipient) {
this.timeS.getcontactskinrecipient(token.id)
.subscribe(data => {
this.kinsArray = data.list;
if (this.kinsArray.length > 0) {
this.selected = this.kinsArray[0];
this.showDetails(this.kinsArray[0]);
}
this.loading = false
this.cd.markForCheck();
this.cd.detectChanges();
});
}
if (token.view == view.staff) {
this.timeS.getcontactskinstaff(token.code)
.subscribe(data => {
this.kinsArray = data;
if (this.kinsArray.length > 0) {
this.selected = this.kinsArray[0];
this.showDetails(this.kinsArray[0]);
}
this.loading = false
this.cd.markForCheck();
this.cd.detectChanges();
});
}
}
showDetails(kin: any) {
this.timeS.getcontactskinstaffdetails(kin.recordNumber)
.subscribe(data => {
this.kindetailsGroup.patchValue({
address1: data.address1,
address2: data.address2,
name: data.contactName,
type: data.subType,
email: data.email,
fax: data.fax,
mobile: data.mobile,
notes: data.notes,
phone1: data.phone1,
phone2: data.phone2,
suburbcode: (data.postcode != '') ? (data.postcode || '').trim() + ' ' + (data.suburb || '').trim() : '',
suburb: data.suburb,
postcode: data.postcode,
listOrder: '',
oni1: (data.equipmentCode || '').toUpperCase() == 'PERSON1',
oni2: (data.equipmentCode || '').toUpperCase() == 'PERSON2',
recordNumber: data.recordNumber,
// subType: data.subType
})
})
}
//From ControlValueAccessor interface
writeValue(value: any) {
if (value != null) {
console.log(value)
this.innerValue = value;
this.searchKin(this.innerValue);
}
}
//From ControlValueAccessor interface
registerOnChange(fn: any) {
this.onChangeCallback = fn;
}
//From ControlValueAccessor interface
registerOnTouched(fn: any) {
this.onTouchedCallback = fn;
}
save() {
if (this.user.view === view.staff)
{
var sub = this.kindetailsGroup.get('suburbcode').value;
let address = sub ? this.getPostCodeAndSuburb(sub) : null;
if (!this.globalS.isEmpty(address)) {
this.kindetailsGroup.controls["postcode"].setValue(address.pcode);
this.kindetailsGroup.controls["suburb"].setValue(address.suburb);
}
if (this.kindetailsGroup.get('oni1').value) {
this.kindetailsGroup.controls['ecode'].setValue('PERSON1')
} else if (this.kindetailsGroup.get('oni2').value) {
this.kindetailsGroup.controls['ecode'].setValue('PERSON2')
}
const details = this.kindetailsGroup.value;
this.timeS.updatecontactskinstaffdetails(
details,
details.recordNumber
).subscribe(data => {
// this.searchKin(this.user);
this.globalS.sToast('Success', 'Contact Updated');
});
}
if (this.user.view === view.recipient)
{
console.log('recipient');
var sub = this.kindetailsGroup.get('suburbcode').value;
let address = sub ? this.getPostCodeAndSuburb(sub) : null;
if (!this.globalS.isEmpty(address)) {
this.kindetailsGroup.controls["postcode"].setValue(address.pcode);
this.kindetailsGroup.controls["suburb"].setValue(address.suburb);
}
if (this.kindetailsGroup.get('oni1').value) {
this.kindetailsGroup.controls['ecode'].setValue('PERSON1')
} else if (this.kindetailsGroup.get('oni2').value) {
this.kindetailsGroup.controls['ecode'].setValue('PERSON2')
}
const details = this.kindetailsGroup.value;
this.timeS.updatecontactskinrecipientdetails(details,details.recordNumber)
.subscribe(data => {
// this.searchKin(this.user);
this.handleCancel();
this.globalS.sToast('Success', 'Contact Updated');
});
}
}
getPostCodeAndSuburb(address: any): any {
const rs = address;
let pcode = /(\d+)/g.test(rs) ? rs.match(/(\d+)/g)[0] : "";
let suburb = /(\D+)/g.test(rs) ? rs.match(/(\D+)/g)[0].split(',')[0] : "";
return {
pcode: pcode.trim() || '',
suburb: suburb.trim() || ''
}
}
get nextRequired() {
const { group, type, name } = this.inputForm.value;
if (this.current == 0 && this.globalS.isEmpty(group)) {
return false;
}
if (this.current == 1 && (this.globalS.isEmpty(type) || this.globalS.isEmpty(name)) ) {
return false;
}
return true;
}
add() {
if (this.inputForm.controls['suburbcode'].dirty) {
var rs = this.inputForm.get('suburbcode').value;
let pcode = /(\d+)/g.test(rs) ? rs.match(/(\d+)/g)[0].trim() : "";
let suburb = /(\D+)/g.test(rs) ? rs.match(/(\D+)/g)[0].trim() : "";
let state = /(\D+)/g.test(rs) ? rs.match(/(\D+)/g)[1].replace(/,/g, '').trim() : "";
if (pcode !== "") {
this.inputForm.controls["postcode"].setValue(pcode);
this.inputForm.controls["suburb"].setValue(suburb);
this.inputForm.controls["state"].setValue(state);
}
}
if (this.inputForm.get('oni1').value) {
this.inputForm.controls['ecode'].setValue('PERSON1')
} else if (this.inputForm.get('oni2').value) {
this.inputForm.controls['ecode'].setValue('PERSON2')
}
this.timeS.postcontactskinstaffdetails(
this.inputForm.value,
this.user.id
).pipe(takeUntil(this.unsubscribe)).subscribe(data => {
this.globalS.sToast('Success', 'Contact Inserted');
this.handleCancel();
this.searchKin(this.user);
this.handleCancel();
});
}
delete() {
this.timeS.deletecontactskin(this.kindetailsGroup.value.recordNumber).subscribe(data => {
this.globalS.sToast('Success', 'Contact Deleted');
this.searchKin(this.user);
});
}
handleCancel() {
this.modalOpen = false;
this.inputForm.reset();
this.current = 0;
}
pre() {
this.current -= 1;
}
next() {
this.current += 1;
}
}
| {
this.listS.getdoctorinformation().subscribe(data => {
console.log(data);
this.doctors = data;
})
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.