file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
storage_service.go | _, _ = ss.Engine.Get(&user)
userCache[tag] = user
}
list[i].UserId = user.Id
list[i].Tenant = user.UserName
list[i].CpuTotal = user.CpuAll
list[i].MemTotal = int(user.MemAll)
pod := models.Instance{Id: list[i].PodId}
_, _ = ss.Engine.Cols("name").Get(&pod)
list[i].PodName = pod.Name
sc := models.Sc{Id: list[i].ScId}
_, _ = ss.Engine.Cols("name").Get(&sc)
list[i].SCName = sc.Name
}
return
}
func (ss *stor
ageService) CreateMysqlByPV(pvId int, storageMap map[string]interface{}, remark string, userId int, mysqlName string, qos *models.Qos) (err error) {
nameExist, _ := ss.Engine.Where("is_deploy = true").Exist(&models.ClusterInstance{Name: mysqlName})
if nameExist {
return errors.New("This name already exists ")
}
exist, _ := ss.Engine.Exist(&models.ClusterInstance{PvId: pvId})
if exist {
return errors.New("The MySQL created by the current pv already exists ")
}
dbPV := models.PersistentVolume{Id: pvId}
_, err = ss.Engine.Get(&dbPV)
if err != nil {
return
}
// 检查pv是否存在
err, pvSource := ss.cs.GetResources("pv", dbPV.Name, ss.cs.GetNameSpace(), meta1.GetOptions{})
if err != nil {
return fmt.Errorf("pv does not exist in k8s, err: %v", err)
}
// 根据pv查询已删除的pod
pod := models.Instance{Id: dbPV.PodId}
existPod, err := ss.Engine.Unscoped().Cols("cluster_id").Get(&pod)
if !existPod {
return errors.New(fmt.Sprintf("Found pod is error: %v", err))
}
cluster := models.ClusterInstance{Id: pod.ClusterId}
existCluster, err := ss.Engine.Unscoped().Cols("user_id", "image_id", "storage", "user_tag", "org_tag", "secret").Get(&cluster)
if !existCluster {
return errors.New(fmt.Sprintf("Found cluster is error: %v", err))
}
limitMem, limitCpu := int(storageMap["mem"].(float64)), int(storageMap["cpu"].(float64))
enough, msg, _ := getUserResource(userId, limitCpu, limitMem, cluster.Storage, ss.Engine)
if !enough {
return errors.New(msg)
}
pvcName := mysqlName + "-pvc"
scName := mysqlName + "-sc"
svcName := mysqlName + "-svc"
mysqlImage := models.Images{Id: cluster.ImageId}
hasImage, err := ss.Engine.Get(&mysqlImage)
if !hasImage {
return errors.New(fmt.Sprintf("Found mysql image is error: %v", err))
}
var imageURL string
if mysqlImage.Status == "Invalid" {
imageURL = fmt.Sprintf("%v:%v", mysqlImage.ImageName, mysqlImage.Version)
} else {
imageURL = fmt.Sprintf("%v/%v:%v", getImageAddress(ss.Engine), mysqlImage.ImageName, mysqlImage.Version)
}
// 设置pv的sc名称
pv := (*pvSource).(*core1.PersistentVolume)
pv.Spec.ClaimRef = nil
pv.Spec.StorageClassName = scName
_, err = ss.cs.GetClientSet().CoreV1().PersistentVolumes().Update(*ss.cs.GetCtx(), pv, meta1.UpdateOptions{})
if err != nil {
return
}
pvcConfig := core1.PersistentVolumeClaim{
TypeMeta: meta1.TypeMeta{Kind: "PersistentVolumeClaim", APIVersion: "v1"},
ObjectMeta: meta1.ObjectMeta{Name: pvcName},
Spec: core1.PersistentVolumeClaimSpec{
StorageClassName: &scName,
AccessModes: []core1.PersistentVolumeAccessMode{core1.ReadWriteOnce},
Resources: core1.ResourceRequirements{
Requests: map[core1.ResourceName]resource.Quantity{
"storage": resource.MustParse(dbPV.Capacity),
},
},
},
}
err = ss.cs.CreateOption("pvc", ss.cs.GetNameSpace(), &pvcConfig, meta1.CreateOptions{})
if err != nil {
return
}
svcConfig := core1.Service{
TypeMeta: meta1.TypeMeta{Kind: "Service", APIVersion: "v1"},
ObjectMeta: meta1.ObjectMeta{Name: svcName},
Spec: core1.ServiceSpec{
Type: core1.ServiceTypeNodePort,
Ports: []core1.ServicePort{
{Name: "mysqlport", Port: 3306, TargetPort: intstr.FromInt(3306)},
{Name: "sidecar-ttyd", Port: 7681, TargetPort: intstr.FromInt(7681)},
},
Selector: map[string]string{"app": "mysql"},
},
}
err = ss.cs.CreateOption("service", ss.cs.GetNameSpace(), &svcConfig, meta1.CreateOptions{})
if err != nil {
return
}
secretMap := map[string]string{}
err = json.Unmarshal([]byte(cluster.Secret), &secretMap)
if err != nil {
return
}
mysqlConfig := appsv1.Deployment{
TypeMeta: meta1.TypeMeta{Kind: "Deployment", APIVersion: "apps/v1"},
ObjectMeta: meta1.ObjectMeta{Name: mysqlName},
Spec: appsv1.DeploymentSpec{
Selector: &meta1.LabelSelector{MatchLabels: map[string]string{"app": "mysql"}},
Strategy: appsv1.DeploymentStrategy{Type: appsv1.RecreateDeploymentStrategyType},
Template: core1.PodTemplateSpec{
ObjectMeta: meta1.ObjectMeta{Labels: map[string]string{"app": "mysql"}},
Spec: core1.PodSpec{
Containers: []core1.Container{
{
Name: "mysql",
Image: imageURL,
Resources: core1.ResourceRequirements{
Limits: core1.ResourceList{
"memory": resource.MustParse(fmt.Sprintf("%vGi", limitMem)),
"cpu": resource.MustParse(strconv.Itoa(limitCpu)),
},
},
Env: []core1.EnvVar{{Name: "MYSQL_ROOT_PASSWORD", Value: secretMap["ROOT_PASSWORD"]}},
Ports: []core1.ContainerPort{{Name: "mysql", ContainerPort: 3306}},
VolumeMounts: []core1.VolumeMount{{Name: "mysql-persistent-storage", MountPath: "/var/lib/mysql"}},
},
{
Name: "sidecar",
Image: "10.45.10.107:8099/k8s/mysql-sidecar:ttyd",
Ports: []core1.ContainerPort{
{Name: "mysql", ContainerPort: 7681},
},
},
},
Volumes: []core1.Volume{
{Name: "mysql-persistent-storage", VolumeSource: core1.VolumeSource{
PersistentVolumeClaim: &core1.PersistentVolumeClaimVolumeSource{ClaimName: pvcName},
}},
},
},
},
},
}
_, err = ss.cs.GetClientSet().AppsV1().Deployments(ss.cs.GetNameSpace()).Create(*ss.cs.GetCtx(), &mysqlConfig, meta1.CreateOptions{})
if err != nil {
return err
}
// 查找pod名称
var podName string
for i := 0; i < 10; i++ {
err, podListSource := ss.cs.GetResources("pod", "", ss.cs.GetNameSpace(), meta1.ListOptions{LabelSelector: "app=mysql"})
if err != nil {
utils.LoggerError(err)
<-time.After(time.Second)
continue
}
podList := (*podListSource).(*core1.PodList)
for _, pod := range podList.Items {
for _, volume := range pod.Spec.Volumes {
if volume.PersistentVolumeClaim == nil {
continue
}
if volume.PersistentVolumeClaim.ClaimName == pvcName {
podName = pod.Name
goto FindPodNameEnd
}
}
}
<-time.After(time.Second)
| {
list = make([]models.PersistentVolume, 0)
where := "name like ?"
args := []interface{}{"%" + key + "%"}
// AAAA为root用户可查看所有PV
if userTag != "AAAA" {
where += " AND user_tag = ?"
args = append(args, userTag)
}
err = ss.Engine.Where(where, args...).Limit(pageSize, pageSize*(page-1)).Desc("id").Find(&list)
if err != nil {
return
}
count, _ = ss.Engine.Where(where, args...).Count(&models.PersistentVolume{})
userCache := map[string]models.User{}
for i := range list {
tag := list[i].UserTag
user, ok := userCache[tag]
if !ok {
user = models.User{UserTag: tag} | identifier_body |
|
storage_service.go | _id = ?", id).Update(&scUser)
}
} else {
// <userId,index>
dbUsersM := map[int]int{}
for i := range dbUsers {
dbUsersM[dbUsers[i].UserId] = i
}
insertList := make([]models.ScUser, 0)
for i := range userStrList {
if len(userStrList[i]) == 0 {
continue
}
var userId, err = strconv.Atoi(userStrList[i])
if err != nil {
utils.LoggerError(err)
continue
}
if _, ok := dbUsersM[userId]; ok {
// 数据库已存在直接跳过,并标记为-1
dbUsersM[userId] = -1
continue
}
insertList = append(insertList, models.ScUser{ScId: id, UserId: userId})
}
for _, v := range dbUsersM {
if v != -1 {
clusterCount, err := ss.Engine.Where("sc_name = ?", sc.Name).And("user_id = ?", dbUsers[v].UserId).Count(new(models.ClusterInstance))
if err != nil {
return err
}
if clusterCount > 0 {
u := models.User{}
_, _ = ss.Engine.ID(dbUsers[v].UserId).Cols("user_name").Get(&u)
return response.NewMsg(fmt.Sprintf("%v have already used this storage, this user cannot be deleted", u.UserName), fmt.Sprintf("%v已使用此存储,不能删除此用户", u.UserName))
}
}
}
if len(insertList) > 0 {
_, err := ss.Engine.Insert(&insertList)
if err != nil {
return err
}
}
for _, v := range dbUsersM {
if v != -1 {
_, _ = ss.Engine.ID(dbUsers[v].Id).Delete(new(models.ScUser))
}
}
}
}
_, err = ss.Engine.ID(id).Cols("assign_all").Update(&models.Sc{AssignAll: assignAll})
return err
}
func (ss *storageService) List(page int, pageSize int, key string, userId int, userTag string, isFilter bool) ([]models.ReturnSc, int64) {
scPv := make([]models.Sc, 0)
var count int64
if userId <= 0 && len(userTag) != 0 {
u := models.User{}
_, _ = ss.Engine.Where("user_tag = ?", userTag).Cols("id").Get(&u)
userId = u.Id
}
if userId > 0 {
scList := make([]models.ScUser, 0)
err := ss.Engine.Where("user_id = ?", userId).Find(&scList)
utils.LoggerError(err)
for _, v := range scList {
sc := models.Sc{Id: v.ScId}
_, err = ss.Engine.Where("name like ? or describe like ? ", "%"+key+"%", "%"+key+"%").Get(&sc)
utils.LoggerError(err)
if len(sc.Name) > 0 {
scPv = append(scPv, sc)
}
}
assignAllSc := make([]models.Sc, 0)
err = ss.Engine.Where("assign_all = true").Find(&assignAllSc)
utils.LoggerError(err)
if len(assignAllSc) > 0 {
scPv = append(scPv, assignAllSc...)
}
count = int64(len(scPv))
if utils.MustInt(page, pageSize) {
min := pageSize * (page - 1)
max := min + pageSize
scPv = scPv[min:utils.Min(max, len(scPv))]
}
} else {
err := ss.Engine.Where("name like ? or describe like ? ", "%"+key+"%", "%"+key+"%").Limit(pageSize, pageSize*(page-1)).Desc("id").Find(&scPv)
utils.LoggerError(err)
count, _ = ss.Engine.Where("name like ? or describe like ? ", "%"+key+"%", "%"+key+"%").Count(&models.Sc{})
}
scReturn := make([]models.ReturnSc, len(scPv))
for i, sc := range scPv {
pv := make([]models.PersistentVolume, 0)
if sc.ScType == models.ScTypeUnique {
err := ss.Engine.Where(" sc_id = ?", sc.Id).OrderBy("id").Find(&pv)
utils.LoggerError(err)
sc.NodeNum = len(pv)
}
cluster := make([]models.ClusterInstance, 0)
err := ss.Engine.Where("sc_name = ?", sc.Name).Omit("yaml_text").Find(&cluster)
utils.LoggerError(err)
var scUserRaw json.RawMessage
if sc.AssignAll {
scUserRaw = []byte("-1")
} else {
scUser := make([]models.ScUser, 0)
err = ss.Engine.Where("sc_id = ?", sc.Id).Find(&scUser)
utils.LoggerError(err)
for i, user := range scUser {
u := models.User{Id: user.UserId}
_, err = ss.Engine.Get(&u)
utils.LoggerError(err)
scUser[i].UserName = u.UserName
}
scUserRaw, _ = json.Marshal(scUser)
}
scReturn[i] = models.ReturnSc{Sc: sc, Children: pv, Cluster: cluster, ScUser: scUserRaw}
}
if isFilter {
for i, sc := range scReturn {
if sc.ScType == models.ScTypeUnique && len(sc.Cluster) > 0 {
scReturn = append(scReturn[0:i], scReturn[i+1:]...)
}
}
}
return scReturn, count
}
func (ss *storageService) Add(scName string, reclaimPolicy string, remark string, orgTag string, userTag string, userId int, scType string, nodeNum int, userIdStr string) (models.Sc, error) {
sc := models.Sc{
Name: scName,
ScType: scType,
NodeNum: nodeNum,
ReclaimPolicy: reclaimPolicy,
Describe: remark,
OrgTag: orgTag,
UserTag: userTag,
AssignAll: userIdStr == "-1",
}
namespace := ss.cs.GetNameSpace()
// 独有存储,在k8s里面新建
if scType == "unique-storage" {
reclaimPolicyCore := core1.PersistentVolumeReclaimPolicy(reclaimPolicy)
scConfig := storage1.StorageClass{
TypeMeta: meta1.TypeMeta{
Kind: "StorageClass",
APIVersion: "storage.k8s.io/v1",
},
ObjectMeta: meta1.ObjectMeta{
Name: scName,
},
Provisioner: "kubernetes.io/no-provisioner",
ReclaimPolicy: &reclaimPolicyCore,
}
err := ss.cs.CreateOption("sc", namespace, &scConfig, meta1.CreateOptions{})
if err != nil {
return sc, err
}
} else {
err, scAddr := ss.cs.GetResources("sc", scName, namespace, meta1.GetOptions{})
if err != nil {
return sc, err
}
if value, ok := (*scAddr).(*storage1.StorageClass); ok {
sc.ReclaimPolicy = string(*value.ReclaimPolicy)
}
}
_, err := ss.Engine.Insert(&sc)
if err != nil || sc.AssignAll {
return sc, err
}
userIds := strings.Split(userIdStr, ",")
su := make([]models.ScUser, 0)
for i := range userIds {
if len(userIds[i]) == 0 {
continue
}
id, err := strconv.Atoi(userIds[i])
if err != nil {
utils.LoggerError(err)
continue
}
su = append(su, models.ScUser{UserId: id, ScId: sc.Id})
}
if userId > 0 {
su = append(su, models.ScUser{UserId: userId, ScId: sc.Id})
}
if len(su) > 0 {
_, _ = ss.Engine.Insert(&su)
}
return sc, nil
}
func (ss *storageService) Update(id int, remake string, nodeNum int) error {
sc := models.Sc{
Id: id,
Describe: remake,
NodeNum: nodeNum,
}
_, err := ss.Engine.ID(sc.Id).Update(&sc)
return err
}
func (ss *storageService) Delete(id int) error {
if id <= 0 {
return errors.New("storage id must > 0")
}
sc := models.Sc{
Id: id,
}
exist, err := ss.Engine.Get(&sc)
if err != nil {
return err
}
if !exist {
return nil
}
clusterCount, err := ss.Engine.Where("sc_name = ?", sc.Name).Count(new(models.ClusterInstance))
if err != nil {
return err
}
if clusterCount > 0 {
return response.NewMsg("This storage is occupie | d by the cluster | conditional_block |
|
lib.rs | lattice probe for inventory. Note that these responses are returned
/// through regular (non-queue) subscriptions via a scatter-gather like pattern, so the
/// client is responsible for aggregating many of these replies.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
pub enum InventoryResponse {
/// A single host probe response
Host(HostProfile),
/// A list of all registered actors within a host
Actors {
host: String,
actors: Vec<Claims<Actor>>,
},
/// A list of configuration bindings of actors originating from the given host
Bindings {
host: String,
bindings: Vec<Binding>,
},
/// A list of capability providers currently running within the given host
Capabilities {
host: String,
capabilities: Vec<HostedCapability>,
},
}
/// An overview of host information
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
pub struct HostProfile {
/// The public key (subject) of the host
pub id: String,
/// The host's labels
pub labels: HashMap<String, String>,
/// Host uptime in milliseconds
pub uptime_ms: u128,
}
/// Represents an instance of a capability, which is a binding name and
/// the capability descriptor
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
pub struct HostedCapability {
pub binding_name: String,
pub descriptor: wascc_codec::capabilities::CapabilityDescriptor,
}
/// Represents a single configuration binding from an actor to a capability ID and binding
/// name, with the specified configuration values.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
pub struct Binding {
pub actor: String,
pub capability_id: String,
pub binding_name: String,
pub configuration: HashMap<String, String>,
}
/// A client for interacting with the lattice
pub struct Client {
nc: nats::Connection,
namespace: Option<String>,
timeout: Duration,
}
impl Client {
/// Creates a new lattice client, connecting to the NATS server at the
/// given host with an optional set of credentials (JWT auth)
pub fn new(
host: &str,
credsfile: Option<PathBuf>,
call_timeout: Duration,
namespace: Option<String>,
) -> Self {
Client {
nc: get_connection(host, credsfile),
timeout: call_timeout,
namespace,
}
}
pub fn with_connection(
nc: nats::Connection,
call_timeout: Duration,
namespace: Option<String>,
) -> Self {
Client {
nc,
timeout: call_timeout,
namespace,
}
}
/// Retrieves the list of all hosts running within the lattice. If it takes a host longer
/// than the call timeout period to reply to the probe, it will not be included in the list
/// of hosts.
pub fn get_hosts(&self) -> std::result::Result<Vec<HostProfile>, Box<dyn std::error::Error>> {
let mut hosts = vec![];
let sub = self
.nc
.request_multi(self.gen_subject(INVENTORY_HOSTS).as_ref(), &[])?;
for msg in sub.timeout_iter(self.timeout) {
let ir: InventoryResponse = serde_json::from_slice(&msg.data)?;
if let InventoryResponse::Host(h) = ir {
hosts.push(h);
}
}
Ok(hosts)
}
/// Retrieves a list of all bindings from actors to capabilities within the lattice (provided
/// the host responds to the probe within the client timeout period)
pub fn get_bindings(
&self,
) -> std::result::Result<HashMap<String, Vec<Binding>>, Box<dyn std::error::Error>> {
let mut host_bindings = HashMap::new();
let sub = self
.nc
.request_multi(self.gen_subject(INVENTORY_BINDINGS).as_ref(), &[])?;
for msg in sub.timeout_iter(self.timeout) {
let ir: InventoryResponse = serde_json::from_slice(&msg.data)?;
if let InventoryResponse::Bindings { bindings: b, host } = ir {
host_bindings
.entry(host)
.and_modify(|e: &mut Vec<Binding>| e.extend_from_slice(&b))
.or_insert(b.clone());
}
}
Ok(host_bindings)
}
/// Retrieves the list of all actors currently running within the lattice (as discovered within
/// the client timeout period)
pub fn get_actors(
&self,
) -> std::result::Result<HashMap<String, Vec<Claims<Actor>>>, Box<dyn std::error::Error>> |
/// Retrieves the list of all capabilities within the lattice (discovery limited by the client timeout period)
pub fn get_capabilities(
&self,
) -> std::result::Result<HashMap<String, Vec<HostedCapability>>, Box<dyn std::error::Error>>
{
let mut host_caps = HashMap::new();
let sub = self
.nc
.request_multi(self.gen_subject(INVENTORY_CAPABILITIES).as_ref(), &[])?;
for msg in sub.timeout_iter(self.timeout) {
let ir: InventoryResponse = serde_json::from_slice(&msg.data)?;
if let InventoryResponse::Capabilities { host, capabilities } = ir {
host_caps
.entry(host)
.and_modify(|e: &mut Vec<HostedCapability>| e.extend_from_slice(&capabilities))
.or_insert(capabilities.clone());
}
}
Ok(host_caps)
}
/// Watches the lattice for bus events. This will create a subscription in a background thread, so callers
/// are responsible for ensuring their process remains alive however long is appropriate. Pass the sender
/// half of a channel to receive the events
pub fn watch_events(&self, sender: Sender<BusEvent>) -> Result<(), Box<dyn std::error::Error>> {
let _sub = self
.nc
.subscribe(self.gen_subject(EVENTS).as_ref())?
.with_handler(move |msg| {
let ce: CloudEvent = serde_json::from_slice(&msg.data).unwrap();
let be: BusEvent = serde_json::from_str(&ce.data).unwrap();
let _ = sender.send(be);
Ok(())
});
Ok(())
}
/// Performs an auction among all hosts on the lattice, requesting that the given actor be launched (loaded+started)
/// on a suitable host as described by the set of constraints. Only hosts that believe they can launch the actor
/// will reply. In other words, there will be no negative responses in the result vector, only a list of suitable
/// hosts. The actor to be launched is identified by an OCI registry reference
pub fn perform_actor_launch_auction(
&self,
actor_id: &str,
constraints: HashMap<String, String>,
) -> Result<Vec<LaunchAuctionResponse>, Box<dyn std::error::Error>> {
let mut results = vec![];
let req = LaunchAuctionRequest::new(actor_id, constraints);
let sub = self.nc.request_multi(
self.gen_subject(&format!(
"{}.{}",
controlplane::CPLANE_PREFIX,
controlplane::AUCTION_REQ
))
.as_ref(),
&serde_json::to_vec(&req)?,
)?;
for msg in sub.timeout_iter(Duration::from_secs(AUCTION_TIMEOUT_SECONDS)) {
let resp: LaunchAuctionResponse = serde_json::from_slice(&msg.data)?;
results.push(resp);
}
Ok(results)
}
/// Performs an auction among all hosts on the lattice, requesting that the given capability provider
/// (indicated by OCI image reference) be loaded/started. Hosts that believe they can host the
/// provider given the constraints will respond to the auction
pub fn perform_provider_launch_auction(
&self,
provider_ref: &str,
binding_name: &str,
constraints: HashMap<String, String>,
) -> Result<Vec<ProviderAuctionResponse>, Box<dyn std::error::Error>> {
let mut results = vec![];
let req = ProviderAuctionRequest::new(provider_ref, binding_name, constraints);
let sub = self.nc.request_multi(
self.gen_subject(&format!(
"{}.{}",
controlplane::CPLANE_PREFIX,
controlplane::PROVIDER_AUCTION_REQ
))
.as_ref(),
&serde_json::to_vec(&req)?,
)?;
for msg in sub.timeout_iter(Duration::from_secs(AUCTION_TIMEOUT_SECONDS)) {
let resp: ProviderAuctionResponse = serde_json::from_slice(&msg.data)?;
results.push(resp);
}
Ok(results)
}
/// After collecting the results of a provider launch auction, a "winner" from among the hosts
/// can be selected and told to launch the given provider. The provider's bytes will be retrieved
/// from the OCI registry. This function does _not_ | {
let mut host_actors = HashMap::new();
let sub = self
.nc
.request_multi(self.gen_subject(INVENTORY_ACTORS).as_ref(), &[])?;
for msg in sub.timeout_iter(self.timeout) {
let ir: InventoryResponse = serde_json::from_slice(&msg.data)?;
if let InventoryResponse::Actors { host, actors } = ir {
host_actors
.entry(host)
.and_modify(|e: &mut Vec<Claims<Actor>>| e.extend_from_slice(&actors))
.or_insert(actors.clone());
}
}
Ok(host_actors)
} | identifier_body |
lib.rs | call_timeout: Duration,
namespace: Option<String>,
) -> Self {
Client {
nc,
timeout: call_timeout,
namespace,
}
}
/// Retrieves the list of all hosts running within the lattice. If it takes a host longer
/// than the call timeout period to reply to the probe, it will not be included in the list
/// of hosts.
pub fn get_hosts(&self) -> std::result::Result<Vec<HostProfile>, Box<dyn std::error::Error>> {
let mut hosts = vec![];
let sub = self
.nc
.request_multi(self.gen_subject(INVENTORY_HOSTS).as_ref(), &[])?;
for msg in sub.timeout_iter(self.timeout) {
let ir: InventoryResponse = serde_json::from_slice(&msg.data)?;
if let InventoryResponse::Host(h) = ir {
hosts.push(h);
}
}
Ok(hosts)
}
/// Retrieves a list of all bindings from actors to capabilities within the lattice (provided
/// the host responds to the probe within the client timeout period)
pub fn get_bindings(
&self,
) -> std::result::Result<HashMap<String, Vec<Binding>>, Box<dyn std::error::Error>> {
let mut host_bindings = HashMap::new();
let sub = self
.nc
.request_multi(self.gen_subject(INVENTORY_BINDINGS).as_ref(), &[])?;
for msg in sub.timeout_iter(self.timeout) {
let ir: InventoryResponse = serde_json::from_slice(&msg.data)?;
if let InventoryResponse::Bindings { bindings: b, host } = ir {
host_bindings
.entry(host)
.and_modify(|e: &mut Vec<Binding>| e.extend_from_slice(&b))
.or_insert(b.clone());
}
}
Ok(host_bindings)
}
/// Retrieves the list of all actors currently running within the lattice (as discovered within
/// the client timeout period)
pub fn get_actors(
&self,
) -> std::result::Result<HashMap<String, Vec<Claims<Actor>>>, Box<dyn std::error::Error>> {
let mut host_actors = HashMap::new();
let sub = self
.nc
.request_multi(self.gen_subject(INVENTORY_ACTORS).as_ref(), &[])?;
for msg in sub.timeout_iter(self.timeout) {
let ir: InventoryResponse = serde_json::from_slice(&msg.data)?;
if let InventoryResponse::Actors { host, actors } = ir {
host_actors
.entry(host)
.and_modify(|e: &mut Vec<Claims<Actor>>| e.extend_from_slice(&actors))
.or_insert(actors.clone());
}
}
Ok(host_actors)
}
/// Retrieves the list of all capabilities within the lattice (discovery limited by the client timeout period)
pub fn get_capabilities(
&self,
) -> std::result::Result<HashMap<String, Vec<HostedCapability>>, Box<dyn std::error::Error>>
{
let mut host_caps = HashMap::new();
let sub = self
.nc
.request_multi(self.gen_subject(INVENTORY_CAPABILITIES).as_ref(), &[])?;
for msg in sub.timeout_iter(self.timeout) {
let ir: InventoryResponse = serde_json::from_slice(&msg.data)?;
if let InventoryResponse::Capabilities { host, capabilities } = ir {
host_caps
.entry(host)
.and_modify(|e: &mut Vec<HostedCapability>| e.extend_from_slice(&capabilities))
.or_insert(capabilities.clone());
}
}
Ok(host_caps)
}
/// Watches the lattice for bus events. This will create a subscription in a background thread, so callers
/// are responsible for ensuring their process remains alive however long is appropriate. Pass the sender
/// half of a channel to receive the events
pub fn watch_events(&self, sender: Sender<BusEvent>) -> Result<(), Box<dyn std::error::Error>> {
let _sub = self
.nc
.subscribe(self.gen_subject(EVENTS).as_ref())?
.with_handler(move |msg| {
let ce: CloudEvent = serde_json::from_slice(&msg.data).unwrap();
let be: BusEvent = serde_json::from_str(&ce.data).unwrap();
let _ = sender.send(be);
Ok(())
});
Ok(())
}
/// Performs an auction among all hosts on the lattice, requesting that the given actor be launched (loaded+started)
/// on a suitable host as described by the set of constraints. Only hosts that believe they can launch the actor
/// will reply. In other words, there will be no negative responses in the result vector, only a list of suitable
/// hosts. The actor to be launched is identified by an OCI registry reference
pub fn perform_actor_launch_auction(
&self,
actor_id: &str,
constraints: HashMap<String, String>,
) -> Result<Vec<LaunchAuctionResponse>, Box<dyn std::error::Error>> {
let mut results = vec![];
let req = LaunchAuctionRequest::new(actor_id, constraints);
let sub = self.nc.request_multi(
self.gen_subject(&format!(
"{}.{}",
controlplane::CPLANE_PREFIX,
controlplane::AUCTION_REQ
))
.as_ref(),
&serde_json::to_vec(&req)?,
)?;
for msg in sub.timeout_iter(Duration::from_secs(AUCTION_TIMEOUT_SECONDS)) {
let resp: LaunchAuctionResponse = serde_json::from_slice(&msg.data)?;
results.push(resp);
}
Ok(results)
}
/// Performs an auction among all hosts on the lattice, requesting that the given capability provider
/// (indicated by OCI image reference) be loaded/started. Hosts that believe they can host the
/// provider given the constraints will respond to the auction
pub fn perform_provider_launch_auction(
&self,
provider_ref: &str,
binding_name: &str,
constraints: HashMap<String, String>,
) -> Result<Vec<ProviderAuctionResponse>, Box<dyn std::error::Error>> {
let mut results = vec![];
let req = ProviderAuctionRequest::new(provider_ref, binding_name, constraints);
let sub = self.nc.request_multi(
self.gen_subject(&format!(
"{}.{}",
controlplane::CPLANE_PREFIX,
controlplane::PROVIDER_AUCTION_REQ
))
.as_ref(),
&serde_json::to_vec(&req)?,
)?;
for msg in sub.timeout_iter(Duration::from_secs(AUCTION_TIMEOUT_SECONDS)) {
let resp: ProviderAuctionResponse = serde_json::from_slice(&msg.data)?;
results.push(resp);
}
Ok(results)
}
/// After collecting the results of a provider launch auction, a "winner" from among the hosts
/// can be selected and told to launch the given provider. The provider's bytes will be retrieved
/// from the OCI registry. This function does _not_ confirm successful launch, only receipt
/// of the launch request.
pub fn launch_provider_on_host(
&self,
provider_ref: &str,
host_id: &str,
binding_name: &str,
) -> Result<ProviderLaunchAck, Box<dyn std::error::Error>> {
let msg = LaunchProviderCommand {
provider_ref: provider_ref.to_string(),
binding_name: binding_name.to_string(),
};
let ack: ProviderLaunchAck = serde_json::from_slice(
&self
.nc
.request_timeout(
&self.gen_launch_provider_subject(host_id),
&serde_json::to_vec(&msg)?,
self.timeout,
)?
.data,
)?;
Ok(ack)
}
/// After collecting the results of a launch auction, a "winner" from among the hosts can be selected and
/// told to launch a given actor. Note that the actor's bytes will be retrieved from the OCI registry.
/// This function does _not_ confirm successful launch, only that the target host acknowledged the request
/// to launch.
pub fn launch_actor_on_host(
&self,
actor_id: &str,
host_id: &str,
) -> Result<LaunchAck, Box<dyn std::error::Error>> {
let msg = LaunchCommand {
actor_id: actor_id.to_string(),
};
let ack: LaunchAck = serde_json::from_slice(
&self
.nc
.request_timeout(
&self.gen_launch_actor_subject(host_id),
&serde_json::to_vec(&msg)?,
self.timeout,
)?
.data,
)?;
Ok(ack)
}
/// Sends a command to the specified host telling it to terminate an actor. The success of this command indicates
/// a successful publication, and not necessarily a successful remote actor termination. Monitor the lattice
/// events to see if the actor was successfully terminated
pub fn stop_actor_on_host(
&self,
actor_id: &str,
host_id: &str,
) -> Result<(), Box<dyn std::error::Error>> {
let msg = TerminateCommand {
actor_id: actor_id.to_string(),
};
self.nc.publish(
&self.gen_terminate_actor_subject(host_id),
&serde_json::to_vec(&msg)?,
)?;
let _ = self.nc.flush();
Ok(())
}
fn | gen_subject | identifier_name |
|
lib.rs | pub const INVENTORY_CAPABILITIES: &str = "inventory.capabilities";
pub const EVENTS: &str = "events";
const AUCTION_TIMEOUT_SECONDS: u64 = 5;
/// A response to a lattice probe for inventory. Note that these responses are returned
/// through regular (non-queue) subscriptions via a scatter-gather like pattern, so the
/// client is responsible for aggregating many of these replies.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
pub enum InventoryResponse {
/// A single host probe response
Host(HostProfile),
/// A list of all registered actors within a host
Actors {
host: String,
actors: Vec<Claims<Actor>>,
},
/// A list of configuration bindings of actors originating from the given host
Bindings {
host: String,
bindings: Vec<Binding>,
},
/// A list of capability providers currently running within the given host
Capabilities {
host: String,
capabilities: Vec<HostedCapability>,
},
}
/// An overview of host information
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
pub struct HostProfile {
/// The public key (subject) of the host
pub id: String,
/// The host's labels
pub labels: HashMap<String, String>,
/// Host uptime in milliseconds
pub uptime_ms: u128,
}
/// Represents an instance of a capability, which is a binding name and
/// the capability descriptor
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
pub struct HostedCapability {
pub binding_name: String,
pub descriptor: wascc_codec::capabilities::CapabilityDescriptor,
}
/// Represents a single configuration binding from an actor to a capability ID and binding
/// name, with the specified configuration values.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
pub struct Binding {
pub actor: String,
pub capability_id: String,
pub binding_name: String,
pub configuration: HashMap<String, String>,
}
/// A client for interacting with the lattice
pub struct Client {
nc: nats::Connection,
namespace: Option<String>,
timeout: Duration,
}
impl Client {
/// Creates a new lattice client, connecting to the NATS server at the
/// given host with an optional set of credentials (JWT auth)
pub fn new(
host: &str,
credsfile: Option<PathBuf>,
call_timeout: Duration,
namespace: Option<String>,
) -> Self {
Client {
nc: get_connection(host, credsfile),
timeout: call_timeout,
namespace,
}
}
pub fn with_connection(
nc: nats::Connection,
call_timeout: Duration,
namespace: Option<String>,
) -> Self {
Client {
nc,
timeout: call_timeout,
namespace,
}
}
/// Retrieves the list of all hosts running within the lattice. If it takes a host longer
/// than the call timeout period to reply to the probe, it will not be included in the list
/// of hosts.
pub fn get_hosts(&self) -> std::result::Result<Vec<HostProfile>, Box<dyn std::error::Error>> {
let mut hosts = vec![];
let sub = self
.nc
.request_multi(self.gen_subject(INVENTORY_HOSTS).as_ref(), &[])?;
for msg in sub.timeout_iter(self.timeout) {
let ir: InventoryResponse = serde_json::from_slice(&msg.data)?;
if let InventoryResponse::Host(h) = ir {
hosts.push(h);
}
}
Ok(hosts)
}
/// Retrieves a list of all bindings from actors to capabilities within the lattice (provided
/// the host responds to the probe within the client timeout period)
pub fn get_bindings(
&self,
) -> std::result::Result<HashMap<String, Vec<Binding>>, Box<dyn std::error::Error>> {
let mut host_bindings = HashMap::new();
let sub = self
.nc
.request_multi(self.gen_subject(INVENTORY_BINDINGS).as_ref(), &[])?;
for msg in sub.timeout_iter(self.timeout) {
let ir: InventoryResponse = serde_json::from_slice(&msg.data)?;
if let InventoryResponse::Bindings { bindings: b, host } = ir {
host_bindings
.entry(host)
.and_modify(|e: &mut Vec<Binding>| e.extend_from_slice(&b))
.or_insert(b.clone());
}
}
Ok(host_bindings)
}
/// Retrieves the list of all actors currently running within the lattice (as discovered within
/// the client timeout period)
pub fn get_actors(
&self,
) -> std::result::Result<HashMap<String, Vec<Claims<Actor>>>, Box<dyn std::error::Error>> {
let mut host_actors = HashMap::new();
let sub = self
.nc
.request_multi(self.gen_subject(INVENTORY_ACTORS).as_ref(), &[])?;
for msg in sub.timeout_iter(self.timeout) {
let ir: InventoryResponse = serde_json::from_slice(&msg.data)?;
if let InventoryResponse::Actors { host, actors } = ir {
host_actors
.entry(host)
.and_modify(|e: &mut Vec<Claims<Actor>>| e.extend_from_slice(&actors))
.or_insert(actors.clone());
}
}
Ok(host_actors)
}
/// Retrieves the list of all capabilities within the lattice (discovery limited by the client timeout period)
pub fn get_capabilities(
&self,
) -> std::result::Result<HashMap<String, Vec<HostedCapability>>, Box<dyn std::error::Error>>
{
let mut host_caps = HashMap::new();
let sub = self
.nc
.request_multi(self.gen_subject(INVENTORY_CAPABILITIES).as_ref(), &[])?;
for msg in sub.timeout_iter(self.timeout) {
let ir: InventoryResponse = serde_json::from_slice(&msg.data)?;
if let InventoryResponse::Capabilities { host, capabilities } = ir {
host_caps
.entry(host)
.and_modify(|e: &mut Vec<HostedCapability>| e.extend_from_slice(&capabilities))
.or_insert(capabilities.clone());
}
}
Ok(host_caps)
}
/// Watches the lattice for bus events. This will create a subscription in a background thread, so callers
/// are responsible for ensuring their process remains alive however long is appropriate. Pass the sender
/// half of a channel to receive the events
pub fn watch_events(&self, sender: Sender<BusEvent>) -> Result<(), Box<dyn std::error::Error>> {
let _sub = self
.nc
.subscribe(self.gen_subject(EVENTS).as_ref())?
.with_handler(move |msg| {
let ce: CloudEvent = serde_json::from_slice(&msg.data).unwrap();
let be: BusEvent = serde_json::from_str(&ce.data).unwrap();
let _ = sender.send(be);
Ok(())
});
Ok(())
}
/// Performs an auction among all hosts on the lattice, requesting that the given actor be launched (loaded+started)
/// on a suitable host as described by the set of constraints. Only hosts that believe they can launch the actor
/// will reply. In other words, there will be no negative responses in the result vector, only a list of suitable
/// hosts. The actor to be launched is identified by an OCI registry reference
pub fn perform_actor_launch_auction(
&self,
actor_id: &str,
constraints: HashMap<String, String>,
) -> Result<Vec<LaunchAuctionResponse>, Box<dyn std::error::Error>> {
let mut results = vec![];
let req = LaunchAuctionRequest::new(actor_id, constraints);
let sub = self.nc.request_multi(
self.gen_subject(&format!(
"{}.{}",
controlplane::CPLANE_PREFIX,
controlplane::AUCTION_REQ
))
.as_ref(),
&serde_json::to_vec(&req)?,
)?;
for msg in sub.timeout_iter(Duration::from_secs(AUCTION_TIMEOUT_SECONDS)) {
let resp: LaunchAuctionResponse = serde_json::from_slice(&msg.data)?;
results.push(resp);
}
Ok(results)
}
/// Performs an auction among all hosts on the lattice, requesting that the given capability provider
/// (indicated by OCI image reference) be loaded/started. Hosts that believe they can host the
/// provider given the constraints will respond to the auction
pub fn perform_provider_launch_auction(
&self,
provider_ref: &str,
binding_name: &str,
constraints: HashMap<String, String>,
) -> Result<Vec<ProviderAuctionResponse>, Box<dyn std::error::Error>> {
let mut results = vec![];
let req = ProviderAuctionRequest::new(provider_ref, binding_name, constraints);
let sub = self.nc.request_multi(
self.gen_subject(&format!(
"{}.{}",
controlplane::CPLANE_PREFIX,
controlplane::PROVIDER_AUCTION_REQ
))
.as_ref(),
&serde_json::to_vec(&req)?,
)?;
for msg in sub.timeout_iter(Duration::from_secs(AUCTION_TIMEOUT_SECONDS |
pub const INVENTORY_ACTORS: &str = "inventory.actors";
pub const INVENTORY_HOSTS: &str = "inventory.hosts";
pub const INVENTORY_BINDINGS: &str = "inventory.bindings"; | random_line_split |
|
mod.rs | = FastPaxos::new(
host_addr.clone(),
view.get_membership_size(),
view.get_current_config_id(),
);
Self {
host_addr,
view,
cut_detector,
monitor,
paxos,
alerts: VecDeque::default(),
last_enqueued_alert: Instant::now(),
joiners_to_respond: Vec::new(),
batch_window: Duration::new(10, 0),
announced_proposal: false,
joiner_data: HashMap::default(),
monitor_cancellers: vec![],
event_tx,
messages: VecDeque::new(),
}
}
#[allow(dead_code)]
fn send_initial_notification(&self) {
self.event_tx
.send(Event::ViewChange(self.get_inititial_view_changes()))
.expect("Unable to send response");
}
fn get_inititial_view_changes(&self) -> Vec<NodeStatusChange> {
let nodes = self.view.get_ring(0);
nodes
.iter()
.map(|_| NodeStatusChange {
endpoint: self.host_addr.clone(),
status: EdgeStatus::Up,
metadata: Metadata::default(),
})
.collect()
}
pub fn view(&self) -> Vec<&Endpoint> {
self.view
.get_ring(0)
.expect("There is always a ring!")
.iter()
.collect()
}
pub fn step(&mut self, from: Endpoint, msg: proto::RequestKind) {
use proto::RequestKind::*;
match msg {
PreJoin(msg) => self.handle_pre_join(from, msg),
Join(msg) => self.handle_join(from, msg),
BatchedAlert(msg) => self.handle_batched_alert_message(msg),
Consensus(msg) => {
let view = self
.view
.get_ring(0)
.expect("Ring zero should always exist")
.iter()
.collect();
let msgs = self.paxos.step(msg, view);
self.messages.extend(msgs);
}
_ => todo!("request type not implemented yet"),
}
}
pub fn start_classic_round(&mut self) -> Result<()> {
// TODO: make paxos syncrhonous
// self.paxos.start_classic_round()
todo!()
}
pub fn handle_pre_join(&mut self, from: Endpoint, msg: PreJoinMessage) {
let PreJoinMessage {
sender, node_id, ..
} = msg;
let status = self.view.is_safe_to_join(&sender, &node_id);
let config_id = self.view.get_config().config_id();
let endpoints =
if status == JoinStatus::SafeToJoin || status == JoinStatus::HostnameAlreadyInRing {
self.view.get_expected_observers(&sender)
} else {
Vec::new()
};
let join_res = JoinResponse {
sender,
status,
config_id,
endpoints,
identifiers: Vec::new(),
cluster_metadata: HashMap::new(),
};
info!(
message = "Join at seed.",
seed = %self.host_addr,
sender = %join_res.sender,
config = %join_res.config_id,
size = %self.view.get_membership_size()
);
self.messages
.push_back((from, proto::ResponseKind::Join(join_res).into()));
}
pub fn handle_join(&mut self, from: Endpoint, msg: JoinMessage) {
if msg.config_id == self.view.get_current_config_id() {
let config = self.view.get_config();
// TODO: do we still need to do this?
// self.joiners_to_respond
// .entry(msg.sender.clone())
// .or_insert_with(VecDeque::new)
// .push_back(from);
let alert = proto::Alert {
src: self.host_addr.clone(),
dst: msg.sender.clone(),
edge_status: proto::EdgeStatus::Up,
config_id: config.config_id(),
node_id: Some(msg.node_id.clone()),
ring_number: msg.ring_number,
metadata: None,
};
self.enqueue_alert(alert);
} else {
// This is the case where the config changed between phase 1
// and phase 2 of the join process.
let response = if self.view.is_host_present(&msg.sender)
&& self.view.is_node_id_present(&msg.node_id)
{
let config = self.view.get_config();
// Race condition where a observer already crossed H messages for the joiner and
// changed the configuration, but the JoinPhase2 message shows up at the observer
// after it has already added the joiner. In this case, simply tell the joiner it's
// safe to join
proto::JoinResponse {
sender: self.host_addr.clone(),
status: JoinStatus::SafeToJoin,
config_id: config.config_id(),
endpoints: config.endpoints.clone(),
identifiers: config.node_ids.clone(),
cluster_metadata: HashMap::new(),
}
} else {
proto::JoinResponse {
sender: self.host_addr.clone(),
status: JoinStatus::ConfigChanged,
config_id: self.view.get_current_config_id(),
endpoints: vec![],
identifiers: vec![],
cluster_metadata: HashMap::new(),
}
};
self.messages
.push_back((from, proto::ResponseKind::Join(response).into()));
}
}
// Invoked by observers of a node for failure detection
fn handle_probe_message(&self) -> Response {
Response::new_probe(NodeStatus::Up) // TODO: FIXME THIS IS WRONG
}
// Receives edge update events and delivers them to the cut detector to check if it will
// return a valid proposal.
//
// Edge update messages that do not affect the ongoing proposal need to be dropped.
fn handle_batched_alert_message(&mut self, msg_batch: BatchedAlertMessage) {
let current_config_id = self.view.get_current_config_id();
let size = self.view.get_membership_size();
let mut proposal: Vec<Endpoint> = msg_batch
.alerts
.iter()
// filter out messages which violate membership invariants
// And then run the cut detector to see if there is a new proposal
.filter_map(|message| {
if !self.filter_alert_messages(&msg_batch, message, size, current_config_id) {
return None;
}
Some(self.cut_detector.aggregate(message))
})
.flatten()
.collect();
proposal.extend(self.cut_detector.invalidate_failing_edges(&mut self.view));
if !proposal.is_empty() {
self.announced_proposal = true;
self.event_tx
.send(Event::ViewChangeProposal(
self.create_node_status_change_list(proposal.clone()),
))
.expect("Unable to send response");
// TODO: make paxos syncrhonous
// self.paxos.propose(proposal, scheduler).await?
}
}
fn create_node_status_change_list(&self, proposal: Vec<Endpoint>) -> Vec<NodeStatusChange> {
proposal
.iter()
.map(|node| NodeStatusChange {
endpoint: node.to_string(),
status: if self.view.is_host_present(node) {
EdgeStatus::Down
} else | ,
metadata: Metadata::default(),
})
.collect()
}
// Filter for removing invalid edge update messages. These include messages
// that were for a configuration that the current node is not a part of, and messages
// that violate teh semantics of being a part of a configuration
fn filter_alert_messages(
&mut self,
_message_batch: &BatchedAlertMessage, // Might require this later for loggign
message: &Alert,
_size: usize,
config_id: ConfigId,
) -> bool {
let dst = &message.dst;
if config_id != message.config_id {
return false;
}
// An invariant to maintain is that a node can only go into the membership set once
// and leave it once
if message.edge_status == EdgeStatus::Down && !self.view.is_host_present(&dst) {
return false;
}
if message.edge_status == EdgeStatus::Up {
// Add joiner data after the node is done being added to the set. Store in a
// temp location for now.
self.joiner_data.insert(
dst.clone(),
(
message.node_id.clone().take().unwrap(),
message.metadata.clone().take().unwrap(),
),
);
}
true
}
pub fn create_failure_detectors(
&mut self,
scheduler: &mut Scheduler,
) -> Result<mpsc::Receiver<(Endpoint, ConfigId)>> {
todo!()
// let (tx, rx) = mpsc::channel(1000);
// for subject in self.view.get_subjects(&self.host_addr)? {
// let (mon_tx, mon_rx) = oneshot::channel();
// let fut = self.monitor.monitor(
// subject.clone(),
// client.clone(),
// self.view.get_current_config_id(),
// tx.clone(),
// mon_rx,
// );
// scheduler.push(Box::pin(fut.map(|_| SchedulerEvents::None)));
// self.monitor_cancellers.push(mon_tx);
// }
// Ok(rx)
}
#[allow(dead_code)]
pub fn edge_failure_notification(&mut | {
EdgeStatus::Up
} | conditional_block |
mod.rs | = FastPaxos::new(
host_addr.clone(),
view.get_membership_size(),
view.get_current_config_id(),
);
Self {
host_addr,
view,
cut_detector,
monitor,
paxos,
alerts: VecDeque::default(),
last_enqueued_alert: Instant::now(),
joiners_to_respond: Vec::new(),
batch_window: Duration::new(10, 0),
announced_proposal: false,
joiner_data: HashMap::default(),
monitor_cancellers: vec![],
event_tx,
messages: VecDeque::new(),
}
}
#[allow(dead_code)]
fn send_initial_notification(&self) {
self.event_tx
.send(Event::ViewChange(self.get_inititial_view_changes()))
.expect("Unable to send response");
}
fn get_inititial_view_changes(&self) -> Vec<NodeStatusChange> {
let nodes = self.view.get_ring(0);
nodes
.iter()
.map(|_| NodeStatusChange {
endpoint: self.host_addr.clone(),
status: EdgeStatus::Up,
metadata: Metadata::default(),
})
.collect()
}
pub fn view(&self) -> Vec<&Endpoint> {
self.view
.get_ring(0)
.expect("There is always a ring!")
.iter()
.collect()
}
pub fn step(&mut self, from: Endpoint, msg: proto::RequestKind) {
use proto::RequestKind::*;
match msg {
PreJoin(msg) => self.handle_pre_join(from, msg),
Join(msg) => self.handle_join(from, msg),
BatchedAlert(msg) => self.handle_batched_alert_message(msg),
Consensus(msg) => {
let view = self
.view
.get_ring(0)
.expect("Ring zero should always exist")
.iter()
.collect();
let msgs = self.paxos.step(msg, view);
self.messages.extend(msgs);
}
_ => todo!("request type not implemented yet"),
}
}
pub fn start_classic_round(&mut self) -> Result<()> {
// TODO: make paxos syncrhonous
// self.paxos.start_classic_round()
todo!()
}
pub fn handle_pre_join(&mut self, from: Endpoint, msg: PreJoinMessage) {
let PreJoinMessage {
sender, node_id, ..
} = msg;
let status = self.view.is_safe_to_join(&sender, &node_id);
let config_id = self.view.get_config().config_id();
let endpoints =
if status == JoinStatus::SafeToJoin || status == JoinStatus::HostnameAlreadyInRing {
self.view.get_expected_observers(&sender)
} else {
Vec::new()
};
let join_res = JoinResponse {
sender,
status,
config_id,
endpoints,
identifiers: Vec::new(),
cluster_metadata: HashMap::new(),
};
info!(
message = "Join at seed.",
seed = %self.host_addr,
sender = %join_res.sender,
config = %join_res.config_id,
size = %self.view.get_membership_size()
);
self.messages
.push_back((from, proto::ResponseKind::Join(join_res).into()));
}
pub fn handle_join(&mut self, from: Endpoint, msg: JoinMessage) {
if msg.config_id == self.view.get_current_config_id() {
let config = self.view.get_config();
// TODO: do we still need to do this?
// self.joiners_to_respond
// .entry(msg.sender.clone())
// .or_insert_with(VecDeque::new)
// .push_back(from);
let alert = proto::Alert {
src: self.host_addr.clone(),
dst: msg.sender.clone(),
edge_status: proto::EdgeStatus::Up,
config_id: config.config_id(),
node_id: Some(msg.node_id.clone()),
ring_number: msg.ring_number,
metadata: None,
};
self.enqueue_alert(alert);
} else {
// This is the case where the config changed between phase 1
// and phase 2 of the join process.
let response = if self.view.is_host_present(&msg.sender)
&& self.view.is_node_id_present(&msg.node_id)
{
let config = self.view.get_config();
// Race condition where a observer already crossed H messages for the joiner and
// changed the configuration, but the JoinPhase2 message shows up at the observer
// after it has already added the joiner. In this case, simply tell the joiner it's
// safe to join
proto::JoinResponse {
sender: self.host_addr.clone(),
status: JoinStatus::SafeToJoin,
config_id: config.config_id(),
endpoints: config.endpoints.clone(),
identifiers: config.node_ids.clone(),
cluster_metadata: HashMap::new(),
}
} else {
proto::JoinResponse { | cluster_metadata: HashMap::new(),
}
};
self.messages
.push_back((from, proto::ResponseKind::Join(response).into()));
}
}
// Invoked by observers of a node for failure detection
fn handle_probe_message(&self) -> Response {
Response::new_probe(NodeStatus::Up) // TODO: FIXME THIS IS WRONG
}
// Receives edge update events and delivers them to the cut detector to check if it will
// return a valid proposal.
//
// Edge update messages that do not affect the ongoing proposal need to be dropped.
fn handle_batched_alert_message(&mut self, msg_batch: BatchedAlertMessage) {
let current_config_id = self.view.get_current_config_id();
let size = self.view.get_membership_size();
let mut proposal: Vec<Endpoint> = msg_batch
.alerts
.iter()
// filter out messages which violate membership invariants
// And then run the cut detector to see if there is a new proposal
.filter_map(|message| {
if !self.filter_alert_messages(&msg_batch, message, size, current_config_id) {
return None;
}
Some(self.cut_detector.aggregate(message))
})
.flatten()
.collect();
proposal.extend(self.cut_detector.invalidate_failing_edges(&mut self.view));
if !proposal.is_empty() {
self.announced_proposal = true;
self.event_tx
.send(Event::ViewChangeProposal(
self.create_node_status_change_list(proposal.clone()),
))
.expect("Unable to send response");
// TODO: make paxos syncrhonous
// self.paxos.propose(proposal, scheduler).await?
}
}
fn create_node_status_change_list(&self, proposal: Vec<Endpoint>) -> Vec<NodeStatusChange> {
proposal
.iter()
.map(|node| NodeStatusChange {
endpoint: node.to_string(),
status: if self.view.is_host_present(node) {
EdgeStatus::Down
} else {
EdgeStatus::Up
},
metadata: Metadata::default(),
})
.collect()
}
// Filter for removing invalid edge update messages. These include messages
// that were for a configuration that the current node is not a part of, and messages
// that violate teh semantics of being a part of a configuration
fn filter_alert_messages(
&mut self,
_message_batch: &BatchedAlertMessage, // Might require this later for loggign
message: &Alert,
_size: usize,
config_id: ConfigId,
) -> bool {
let dst = &message.dst;
if config_id != message.config_id {
return false;
}
// An invariant to maintain is that a node can only go into the membership set once
// and leave it once
if message.edge_status == EdgeStatus::Down && !self.view.is_host_present(&dst) {
return false;
}
if message.edge_status == EdgeStatus::Up {
// Add joiner data after the node is done being added to the set. Store in a
// temp location for now.
self.joiner_data.insert(
dst.clone(),
(
message.node_id.clone().take().unwrap(),
message.metadata.clone().take().unwrap(),
),
);
}
true
}
pub fn create_failure_detectors(
&mut self,
scheduler: &mut Scheduler,
) -> Result<mpsc::Receiver<(Endpoint, ConfigId)>> {
todo!()
// let (tx, rx) = mpsc::channel(1000);
// for subject in self.view.get_subjects(&self.host_addr)? {
// let (mon_tx, mon_rx) = oneshot::channel();
// let fut = self.monitor.monitor(
// subject.clone(),
// client.clone(),
// self.view.get_current_config_id(),
// tx.clone(),
// mon_rx,
// );
// scheduler.push(Box::pin(fut.map(|_| SchedulerEvents::None)));
// self.monitor_cancellers.push(mon_tx);
// }
// Ok(rx)
}
#[allow(dead_code)]
pub fn edge_failure_notification(&mut self, | sender: self.host_addr.clone(),
status: JoinStatus::ConfigChanged,
config_id: self.view.get_current_config_id(),
endpoints: vec![],
identifiers: vec![], | random_line_split |
mod.rs | !(
message = "Join at seed.",
seed = %self.host_addr,
sender = %join_res.sender,
config = %join_res.config_id,
size = %self.view.get_membership_size()
);
self.messages
.push_back((from, proto::ResponseKind::Join(join_res).into()));
}
pub fn handle_join(&mut self, from: Endpoint, msg: JoinMessage) {
if msg.config_id == self.view.get_current_config_id() {
let config = self.view.get_config();
// TODO: do we still need to do this?
// self.joiners_to_respond
// .entry(msg.sender.clone())
// .or_insert_with(VecDeque::new)
// .push_back(from);
let alert = proto::Alert {
src: self.host_addr.clone(),
dst: msg.sender.clone(),
edge_status: proto::EdgeStatus::Up,
config_id: config.config_id(),
node_id: Some(msg.node_id.clone()),
ring_number: msg.ring_number,
metadata: None,
};
self.enqueue_alert(alert);
} else {
// This is the case where the config changed between phase 1
// and phase 2 of the join process.
let response = if self.view.is_host_present(&msg.sender)
&& self.view.is_node_id_present(&msg.node_id)
{
let config = self.view.get_config();
// Race condition where a observer already crossed H messages for the joiner and
// changed the configuration, but the JoinPhase2 message shows up at the observer
// after it has already added the joiner. In this case, simply tell the joiner it's
// safe to join
proto::JoinResponse {
sender: self.host_addr.clone(),
status: JoinStatus::SafeToJoin,
config_id: config.config_id(),
endpoints: config.endpoints.clone(),
identifiers: config.node_ids.clone(),
cluster_metadata: HashMap::new(),
}
} else {
proto::JoinResponse {
sender: self.host_addr.clone(),
status: JoinStatus::ConfigChanged,
config_id: self.view.get_current_config_id(),
endpoints: vec![],
identifiers: vec![],
cluster_metadata: HashMap::new(),
}
};
self.messages
.push_back((from, proto::ResponseKind::Join(response).into()));
}
}
// Invoked by observers of a node for failure detection
fn handle_probe_message(&self) -> Response {
Response::new_probe(NodeStatus::Up) // TODO: FIXME THIS IS WRONG
}
// Receives edge update events and delivers them to the cut detector to check if it will
// return a valid proposal.
//
// Edge update messages that do not affect the ongoing proposal need to be dropped.
fn handle_batched_alert_message(&mut self, msg_batch: BatchedAlertMessage) {
let current_config_id = self.view.get_current_config_id();
let size = self.view.get_membership_size();
let mut proposal: Vec<Endpoint> = msg_batch
.alerts
.iter()
// filter out messages which violate membership invariants
// And then run the cut detector to see if there is a new proposal
.filter_map(|message| {
if !self.filter_alert_messages(&msg_batch, message, size, current_config_id) {
return None;
}
Some(self.cut_detector.aggregate(message))
})
.flatten()
.collect();
proposal.extend(self.cut_detector.invalidate_failing_edges(&mut self.view));
if !proposal.is_empty() {
self.announced_proposal = true;
self.event_tx
.send(Event::ViewChangeProposal(
self.create_node_status_change_list(proposal.clone()),
))
.expect("Unable to send response");
// TODO: make paxos syncrhonous
// self.paxos.propose(proposal, scheduler).await?
}
}
fn create_node_status_change_list(&self, proposal: Vec<Endpoint>) -> Vec<NodeStatusChange> {
proposal
.iter()
.map(|node| NodeStatusChange {
endpoint: node.to_string(),
status: if self.view.is_host_present(node) {
EdgeStatus::Down
} else {
EdgeStatus::Up
},
metadata: Metadata::default(),
})
.collect()
}
// Filter for removing invalid edge update messages. These include messages
// that were for a configuration that the current node is not a part of, and messages
// that violate teh semantics of being a part of a configuration
fn filter_alert_messages(
&mut self,
_message_batch: &BatchedAlertMessage, // Might require this later for loggign
message: &Alert,
_size: usize,
config_id: ConfigId,
) -> bool {
let dst = &message.dst;
if config_id != message.config_id {
return false;
}
// An invariant to maintain is that a node can only go into the membership set once
// and leave it once
if message.edge_status == EdgeStatus::Down && !self.view.is_host_present(&dst) {
return false;
}
if message.edge_status == EdgeStatus::Up {
// Add joiner data after the node is done being added to the set. Store in a
// temp location for now.
self.joiner_data.insert(
dst.clone(),
(
message.node_id.clone().take().unwrap(),
message.metadata.clone().take().unwrap(),
),
);
}
true
}
pub fn create_failure_detectors(
&mut self,
scheduler: &mut Scheduler,
) -> Result<mpsc::Receiver<(Endpoint, ConfigId)>> {
todo!()
// let (tx, rx) = mpsc::channel(1000);
// for subject in self.view.get_subjects(&self.host_addr)? {
// let (mon_tx, mon_rx) = oneshot::channel();
// let fut = self.monitor.monitor(
// subject.clone(),
// client.clone(),
// self.view.get_current_config_id(),
// tx.clone(),
// mon_rx,
// );
// scheduler.push(Box::pin(fut.map(|_| SchedulerEvents::None)));
// self.monitor_cancellers.push(mon_tx);
// }
// Ok(rx)
}
#[allow(dead_code)]
pub fn edge_failure_notification(&mut self, subject: Endpoint, config_id: ConfigId) {
if config_id != self.view.get_current_config_id() {
// TODO: Figure out why &String does not impl Value
// info!(
// target: "Failure notification from old config.",
// subject = subject,
// config = self.view.get_current_config_id(),
// old_config = config_id
// );
//
return;
}
let alert = proto::Alert {
src: self.host_addr.clone(),
dst: subject.clone(),
edge_status: proto::EdgeStatus::Down,
config_id,
node_id: None,
ring_number: self
.view
.get_ring_numbers(&self.host_addr, &subject)
.expect("Unable to get ring number"),
metadata: None,
};
self.enqueue_alert(alert);
}
pub fn get_batch_alerts(&mut self) -> Option<proto::BatchedAlertMessage> {
if !self.alerts.is_empty()
&& (Instant::now() - self.last_enqueued_alert) > self.batch_window
{
let alerts = self.alerts.drain(..).collect();
Some(proto::BatchedAlertMessage {
sender: self.host_addr.clone(),
alerts,
})
} else {
None
}
}
pub fn enqueue_alert(&mut self, alert: proto::Alert) {
self.last_enqueued_alert = Instant::now();
self.alerts.push_back(alert);
}
/// This is invoked when the consensus module decides on a proposal
///
/// Any node that is not in the membership list will be added to the cluster,
/// and any node that is currently in the membership list, but not in the proposal
/// will be removed.
pub fn on_decide(&mut self, proposal: Vec<Endpoint>) {
// TODO: Handle metadata updates
// TODO: Handle subscriptions
self.cancel_failure_detectors();
for node in &proposal {
if self.view.is_host_present(&node) {
self.view.ring_delete(&node);
} else if let Some((node_id, _metadata)) = self.joiner_data.remove(node) {
self.view.ring_add(node.clone(), node_id);
} else {
panic!("Node not present in pre-join metadata")
}
}
let _current_config_id = self.view.get_current_config_id();
// clear data structures
self.cut_detector.clear();
self.announced_proposal = false;
if self.view.is_host_present(&self.host_addr) {
// TODO: inform edge failure detector about config change
} else {
// We need to gracefully exit by calling a user handler and invalidating the current
// session
unimplemented!("How do you manage a callback again?");
}
// TODO: Instantiate new consensus instance
// self.paxos = FastPaxos::new(self.host_addr, self.view.get_membership_size(), )
self.respond_to_joiners(proposal);
}
fn | cancel_failure_detectors | identifier_name |
|
mod.rs | = FastPaxos::new(
host_addr.clone(),
view.get_membership_size(),
view.get_current_config_id(),
);
Self {
host_addr,
view,
cut_detector,
monitor,
paxos,
alerts: VecDeque::default(),
last_enqueued_alert: Instant::now(),
joiners_to_respond: Vec::new(),
batch_window: Duration::new(10, 0),
announced_proposal: false,
joiner_data: HashMap::default(),
monitor_cancellers: vec![],
event_tx,
messages: VecDeque::new(),
}
}
#[allow(dead_code)]
fn send_initial_notification(&self) {
self.event_tx
.send(Event::ViewChange(self.get_inititial_view_changes()))
.expect("Unable to send response");
}
fn get_inititial_view_changes(&self) -> Vec<NodeStatusChange> |
pub fn view(&self) -> Vec<&Endpoint> {
self.view
.get_ring(0)
.expect("There is always a ring!")
.iter()
.collect()
}
pub fn step(&mut self, from: Endpoint, msg: proto::RequestKind) {
use proto::RequestKind::*;
match msg {
PreJoin(msg) => self.handle_pre_join(from, msg),
Join(msg) => self.handle_join(from, msg),
BatchedAlert(msg) => self.handle_batched_alert_message(msg),
Consensus(msg) => {
let view = self
.view
.get_ring(0)
.expect("Ring zero should always exist")
.iter()
.collect();
let msgs = self.paxos.step(msg, view);
self.messages.extend(msgs);
}
_ => todo!("request type not implemented yet"),
}
}
pub fn start_classic_round(&mut self) -> Result<()> {
// TODO: make paxos syncrhonous
// self.paxos.start_classic_round()
todo!()
}
pub fn handle_pre_join(&mut self, from: Endpoint, msg: PreJoinMessage) {
let PreJoinMessage {
sender, node_id, ..
} = msg;
let status = self.view.is_safe_to_join(&sender, &node_id);
let config_id = self.view.get_config().config_id();
let endpoints =
if status == JoinStatus::SafeToJoin || status == JoinStatus::HostnameAlreadyInRing {
self.view.get_expected_observers(&sender)
} else {
Vec::new()
};
let join_res = JoinResponse {
sender,
status,
config_id,
endpoints,
identifiers: Vec::new(),
cluster_metadata: HashMap::new(),
};
info!(
message = "Join at seed.",
seed = %self.host_addr,
sender = %join_res.sender,
config = %join_res.config_id,
size = %self.view.get_membership_size()
);
self.messages
.push_back((from, proto::ResponseKind::Join(join_res).into()));
}
pub fn handle_join(&mut self, from: Endpoint, msg: JoinMessage) {
if msg.config_id == self.view.get_current_config_id() {
let config = self.view.get_config();
// TODO: do we still need to do this?
// self.joiners_to_respond
// .entry(msg.sender.clone())
// .or_insert_with(VecDeque::new)
// .push_back(from);
let alert = proto::Alert {
src: self.host_addr.clone(),
dst: msg.sender.clone(),
edge_status: proto::EdgeStatus::Up,
config_id: config.config_id(),
node_id: Some(msg.node_id.clone()),
ring_number: msg.ring_number,
metadata: None,
};
self.enqueue_alert(alert);
} else {
// This is the case where the config changed between phase 1
// and phase 2 of the join process.
let response = if self.view.is_host_present(&msg.sender)
&& self.view.is_node_id_present(&msg.node_id)
{
let config = self.view.get_config();
// Race condition where a observer already crossed H messages for the joiner and
// changed the configuration, but the JoinPhase2 message shows up at the observer
// after it has already added the joiner. In this case, simply tell the joiner it's
// safe to join
proto::JoinResponse {
sender: self.host_addr.clone(),
status: JoinStatus::SafeToJoin,
config_id: config.config_id(),
endpoints: config.endpoints.clone(),
identifiers: config.node_ids.clone(),
cluster_metadata: HashMap::new(),
}
} else {
proto::JoinResponse {
sender: self.host_addr.clone(),
status: JoinStatus::ConfigChanged,
config_id: self.view.get_current_config_id(),
endpoints: vec![],
identifiers: vec![],
cluster_metadata: HashMap::new(),
}
};
self.messages
.push_back((from, proto::ResponseKind::Join(response).into()));
}
}
// Invoked by observers of a node for failure detection
fn handle_probe_message(&self) -> Response {
Response::new_probe(NodeStatus::Up) // TODO: FIXME THIS IS WRONG
}
// Receives edge update events and delivers them to the cut detector to check if it will
// return a valid proposal.
//
// Edge update messages that do not affect the ongoing proposal need to be dropped.
fn handle_batched_alert_message(&mut self, msg_batch: BatchedAlertMessage) {
let current_config_id = self.view.get_current_config_id();
let size = self.view.get_membership_size();
let mut proposal: Vec<Endpoint> = msg_batch
.alerts
.iter()
// filter out messages which violate membership invariants
// And then run the cut detector to see if there is a new proposal
.filter_map(|message| {
if !self.filter_alert_messages(&msg_batch, message, size, current_config_id) {
return None;
}
Some(self.cut_detector.aggregate(message))
})
.flatten()
.collect();
proposal.extend(self.cut_detector.invalidate_failing_edges(&mut self.view));
if !proposal.is_empty() {
self.announced_proposal = true;
self.event_tx
.send(Event::ViewChangeProposal(
self.create_node_status_change_list(proposal.clone()),
))
.expect("Unable to send response");
// TODO: make paxos syncrhonous
// self.paxos.propose(proposal, scheduler).await?
}
}
fn create_node_status_change_list(&self, proposal: Vec<Endpoint>) -> Vec<NodeStatusChange> {
proposal
.iter()
.map(|node| NodeStatusChange {
endpoint: node.to_string(),
status: if self.view.is_host_present(node) {
EdgeStatus::Down
} else {
EdgeStatus::Up
},
metadata: Metadata::default(),
})
.collect()
}
// Filter for removing invalid edge update messages. These include messages
// that were for a configuration that the current node is not a part of, and messages
// that violate teh semantics of being a part of a configuration
fn filter_alert_messages(
&mut self,
_message_batch: &BatchedAlertMessage, // Might require this later for loggign
message: &Alert,
_size: usize,
config_id: ConfigId,
) -> bool {
let dst = &message.dst;
if config_id != message.config_id {
return false;
}
// An invariant to maintain is that a node can only go into the membership set once
// and leave it once
if message.edge_status == EdgeStatus::Down && !self.view.is_host_present(&dst) {
return false;
}
if message.edge_status == EdgeStatus::Up {
// Add joiner data after the node is done being added to the set. Store in a
// temp location for now.
self.joiner_data.insert(
dst.clone(),
(
message.node_id.clone().take().unwrap(),
message.metadata.clone().take().unwrap(),
),
);
}
true
}
pub fn create_failure_detectors(
&mut self,
scheduler: &mut Scheduler,
) -> Result<mpsc::Receiver<(Endpoint, ConfigId)>> {
todo!()
// let (tx, rx) = mpsc::channel(1000);
// for subject in self.view.get_subjects(&self.host_addr)? {
// let (mon_tx, mon_rx) = oneshot::channel();
// let fut = self.monitor.monitor(
// subject.clone(),
// client.clone(),
// self.view.get_current_config_id(),
// tx.clone(),
// mon_rx,
// );
// scheduler.push(Box::pin(fut.map(|_| SchedulerEvents::None)));
// self.monitor_cancellers.push(mon_tx);
// }
// Ok(rx)
}
#[allow(dead_code)]
pub fn edge_failure_notification(&mut | {
let nodes = self.view.get_ring(0);
nodes
.iter()
.map(|_| NodeStatusChange {
endpoint: self.host_addr.clone(),
status: EdgeStatus::Up,
metadata: Metadata::default(),
})
.collect()
} | identifier_body |
font.rs | ("size", &self.size)
.finish()
}
}
impl Font {
/// Returns a [font builder](support/struct.FontBuilder.html) for font construction.
///
/// # Examples
///
/// ```rust
/// # use radiant_rs::*;
/// # let display = Display::builder().hidden().build().unwrap();
/// # let renderer = Renderer::new(&display).unwrap();
/// # let context = display.context();
/// let my_font = Font::builder(&context).family("Arial").size(16.0).build().unwrap();
/// ```
pub fn builder(context: &Context) -> FontBuilder {
FontBuilder::new(context)
}
/// Creates a font instance from a file.
pub fn from_file(context: &Context, file: &str) -> core::Result<Font> {
use std::io::Read;
let mut f = File::open(Path::new(file))?;
let mut font_data = Vec::new();
f.read_to_end(&mut font_data)?;
Ok(Self::create(context, font_data, 12.0))
}
/// Returns the names of all available system fonts.
pub fn query_all() -> Vec<String> {
system_fonts::query_all()
}
/// Returns a query builder to retrieve the names of all available system fonts with the given properties (e.g. monospace).
///
/// # Examples
///
/// ```rust
/// # use radiant_rs::*;
/// let monospace_fonts = Font::query().monospace().italic().fetch();
/// ```
pub fn query() -> FontQueryBuilder {
FontQueryBuilder::new()
}
/// Returns a new font instance with given size.
pub fn clone_with_size(self: &Self, size: f32) -> Font {
let mut font = (*self).clone();
font.size = size;
font
}
/// Write to given layer.
pub fn write<T>(self: &Self, layer: &Layer, text: &str, position: T, color: Color) -> &Font where Point2<f32>: From<T> {
let position = Point2::from(position);
self.write_paragraph(layer, text, position.0, position.1, 0.0, color, 0.0, 1.0, 1.0);
self
}
/// Write to given layer. Breaks lines after max_width pixels.
pub fn write_wrapped<T>(self: &Self, layer: &Layer, text: &str, position: T, color: Color, max_width: f32) -> &Font where Point2<f32>: From<T> {
let position = Point2::from(position);
self.write_paragraph(layer, text, position.0, position.1, max_width, color, 0.0, 1.0, 1.0);
self
}
/// Write to given layer. Breaks lines after max_width pixels and applies given rotation and scaling.
pub fn write_transformed<T, U>(self: &Self, layer: &Layer, text: &str, position: T, color: Color, max_width: f32, rotation: f32, scale: U) -> &Font where Point2<f32>: From<T>+From<U> {
let position = Point2::from(position);
let scale = Point2::from(scale);
self.write_paragraph(layer, text, position.0, position.1, max_width, color, rotation, scale.0, scale.1);
self
}
/// Returns the font wrapped in an std::Arc.
pub fn arc(self: Self) -> Arc<Self> {
Arc::new(self)
}
/// Returns the names of all available system fonts with the given properties (e.g. monospace).
pub(crate) fn query_specific(info: FontInfo) -> Vec<String> {
system_fonts::query_specific(&mut Self::build_property(&info))
}
/// Creates a new font instance from given FontInfo struct.
pub(crate) fn from_info(context: &Context, info: FontInfo) -> core::Result<Font> {
if let Some((font_data, _)) = system_fonts::get(&Self::build_property(&info)) {
Ok(Self::create(context, font_data, info.size))
} else {
Err(core::Error::FontError("Failed to get system font".to_string()))
}
}
/// Creates a new unique font
fn | (context: &Context, font_data: Vec<u8>, size: f32) -> Font {
Font {
data : font_data,
font_id : FONT_COUNTER.fetch_add(1, Ordering::Relaxed),
size : size,
context : context.clone(),
}
}
/// Write text to given layer using given font
fn write_paragraph(self: &Self, layer: &Layer, text: &str, x: f32, y: f32, max_width: f32, color: Color, rotation: f32, scale_x: f32, scale_y: f32) {
// !todo probably expensive, but rusttype is completely opaque. would be nice to be able to store Font::info outside of a "may or may not own" container
let rt_font = rusttype::FontCollection::from_bytes(&self.data[..]).unwrap().into_font().unwrap();
let bucket_id = 0;
let glyphs = Self::layout_paragraph(&rt_font, rusttype::Scale::uniform(self.size), max_width, &text);
let context = self.context.lock();
context.font_cache.queue(self.font_id, &glyphs);
let anchor = (0., 0.);
let scale = (scale_x, scale_y);
let cos_rot = rotation.cos();
let sin_rot = rotation.sin();
for glyph in &glyphs {
if let Some((uv, pos, dim)) = context.font_cache.rect_for(self.font_id, glyph) {
let dist_x = pos.0 * scale_x;
let dist_y = pos.1 * scale_y;
let offset_x = x + dist_x * cos_rot - dist_y * sin_rot;
let offset_y = y + dist_x * sin_rot + dist_y * cos_rot;
layer.add_rect(None, bucket_id, 0, 1, uv, (offset_x, offset_y), anchor, dim, color, rotation, scale);
}
}
}
/// Layout a paragraph of glyphs
fn layout_paragraph<'a>(font: &'a rusttype::Font, scale: rusttype::Scale, width: f32, text: &str) -> Vec<rusttype::PositionedGlyph<'a>> {
use unicode_normalization::UnicodeNormalization;
let mut result = Vec::new();
let v_metrics = font.v_metrics(scale);
let advance_height = v_metrics.ascent - v_metrics.descent + v_metrics.line_gap;
let mut caret = rusttype::point(0.0, v_metrics.ascent);
let mut last_glyph_id = None;
for c in text.nfc() {
if c.is_control() {
match c {
'\n' => {
caret = rusttype::point(0.0, caret.y + advance_height);
},
_ => {}
}
continue;
}
let base_glyph = font.glyph(c);
if let Some(id) = last_glyph_id.take() {
caret.x += font.pair_kerning(scale, id, base_glyph.id());
}
last_glyph_id = Some(base_glyph.id());
let mut glyph = base_glyph.scaled(scale).positioned(caret);
if let Some(bb) = glyph.pixel_bounding_box() {
if width > 0.0 && bb.max.x > width as i32 {
caret = rusttype::point(0.0, caret.y + advance_height);
glyph = glyph.into_unpositioned().positioned(caret);
last_glyph_id = None;
}
}
caret.x += glyph.unpositioned().h_metrics().advance_width;
result.push(glyph);
}
result
}
/// Builds a FontProperty for the underlying system_fonts library
fn build_property(info: &FontInfo) -> system_fonts::FontProperty {
let mut property = system_fonts::FontPropertyBuilder::new();
if info.family != "" {
property = property.family(&info.family);
}
if info.italic {
property = property.italic();
}
if info.oblique {
property = property.oblique();
}
if info.bold {
property = property.bold();
}
if info.monospace {
property = property.monospace();
}
property.build()
}
}
/// A wrapper around rusttype's font cache.
pub struct FontCache {
cache : Mutex<rusttype::gpu_cache::Cache<'static>>,
queue : Mutex<Vec<(Rect<u32>, Vec<u8>)>>,
dirty : AtomicBool,
}
impl FontCache {
/// Creates a new fontcache instant.
pub fn new(width: u32, height: u32, scale_tolerance: f32, position_tolerance: f32) -> FontCache {
let cache = rusttype::gpu_cache::CacheBuilder {
width,
height,
scale_tolerance,
position_tolerance,
pad_glyphs: true,
}.build();
FontCache {
cache: Mutex::new(cache),
queue: Mutex::new(Vec | create | identifier_name |
font.rs | ("size", &self.size)
.finish()
}
}
impl Font {
/// Returns a [font builder](support/struct.FontBuilder.html) for font construction.
///
/// # Examples
///
/// ```rust
/// # use radiant_rs::*;
/// # let display = Display::builder().hidden().build().unwrap();
/// # let renderer = Renderer::new(&display).unwrap();
/// # let context = display.context();
/// let my_font = Font::builder(&context).family("Arial").size(16.0).build().unwrap();
/// ```
pub fn builder(context: &Context) -> FontBuilder {
FontBuilder::new(context)
}
/// Creates a font instance from a file.
pub fn from_file(context: &Context, file: &str) -> core::Result<Font> {
use std::io::Read;
let mut f = File::open(Path::new(file))?;
let mut font_data = Vec::new();
f.read_to_end(&mut font_data)?;
Ok(Self::create(context, font_data, 12.0))
}
/// Returns the names of all available system fonts.
pub fn query_all() -> Vec<String> {
system_fonts::query_all()
}
/// Returns a query builder to retrieve the names of all available system fonts with the given properties (e.g. monospace).
///
/// # Examples
///
/// ```rust
/// # use radiant_rs::*;
/// let monospace_fonts = Font::query().monospace().italic().fetch();
/// ```
pub fn query() -> FontQueryBuilder {
FontQueryBuilder::new()
}
/// Returns a new font instance with given size.
pub fn clone_with_size(self: &Self, size: f32) -> Font {
let mut font = (*self).clone();
font.size = size;
font
}
/// Write to given layer.
pub fn write<T>(self: &Self, layer: &Layer, text: &str, position: T, color: Color) -> &Font where Point2<f32>: From<T> {
let position = Point2::from(position);
self.write_paragraph(layer, text, position.0, position.1, 0.0, color, 0.0, 1.0, 1.0);
self
}
/// Write to given layer. Breaks lines after max_width pixels.
pub fn write_wrapped<T>(self: &Self, layer: &Layer, text: &str, position: T, color: Color, max_width: f32) -> &Font where Point2<f32>: From<T> {
let position = Point2::from(position);
self.write_paragraph(layer, text, position.0, position.1, max_width, color, 0.0, 1.0, 1.0);
self
}
/// Write to given layer. Breaks lines after max_width pixels and applies given rotation and scaling.
pub fn write_transformed<T, U>(self: &Self, layer: &Layer, text: &str, position: T, color: Color, max_width: f32, rotation: f32, scale: U) -> &Font where Point2<f32>: From<T>+From<U> {
let position = Point2::from(position);
let scale = Point2::from(scale);
self.write_paragraph(layer, text, position.0, position.1, max_width, color, rotation, scale.0, scale.1);
self
}
/// Returns the font wrapped in an std::Arc.
pub fn arc(self: Self) -> Arc<Self> {
Arc::new(self)
}
/// Returns the names of all available system fonts with the given properties (e.g. monospace).
pub(crate) fn query_specific(info: FontInfo) -> Vec<String> {
system_fonts::query_specific(&mut Self::build_property(&info))
}
/// Creates a new font instance from given FontInfo struct.
pub(crate) fn from_info(context: &Context, info: FontInfo) -> core::Result<Font> |
/// Creates a new unique font
fn create(context: &Context, font_data: Vec<u8>, size: f32) -> Font {
Font {
data : font_data,
font_id : FONT_COUNTER.fetch_add(1, Ordering::Relaxed),
size : size,
context : context.clone(),
}
}
/// Write text to given layer using given font
fn write_paragraph(self: &Self, layer: &Layer, text: &str, x: f32, y: f32, max_width: f32, color: Color, rotation: f32, scale_x: f32, scale_y: f32) {
// !todo probably expensive, but rusttype is completely opaque. would be nice to be able to store Font::info outside of a "may or may not own" container
let rt_font = rusttype::FontCollection::from_bytes(&self.data[..]).unwrap().into_font().unwrap();
let bucket_id = 0;
let glyphs = Self::layout_paragraph(&rt_font, rusttype::Scale::uniform(self.size), max_width, &text);
let context = self.context.lock();
context.font_cache.queue(self.font_id, &glyphs);
let anchor = (0., 0.);
let scale = (scale_x, scale_y);
let cos_rot = rotation.cos();
let sin_rot = rotation.sin();
for glyph in &glyphs {
if let Some((uv, pos, dim)) = context.font_cache.rect_for(self.font_id, glyph) {
let dist_x = pos.0 * scale_x;
let dist_y = pos.1 * scale_y;
let offset_x = x + dist_x * cos_rot - dist_y * sin_rot;
let offset_y = y + dist_x * sin_rot + dist_y * cos_rot;
layer.add_rect(None, bucket_id, 0, 1, uv, (offset_x, offset_y), anchor, dim, color, rotation, scale);
}
}
}
/// Layout a paragraph of glyphs
fn layout_paragraph<'a>(font: &'a rusttype::Font, scale: rusttype::Scale, width: f32, text: &str) -> Vec<rusttype::PositionedGlyph<'a>> {
use unicode_normalization::UnicodeNormalization;
let mut result = Vec::new();
let v_metrics = font.v_metrics(scale);
let advance_height = v_metrics.ascent - v_metrics.descent + v_metrics.line_gap;
let mut caret = rusttype::point(0.0, v_metrics.ascent);
let mut last_glyph_id = None;
for c in text.nfc() {
if c.is_control() {
match c {
'\n' => {
caret = rusttype::point(0.0, caret.y + advance_height);
},
_ => {}
}
continue;
}
let base_glyph = font.glyph(c);
if let Some(id) = last_glyph_id.take() {
caret.x += font.pair_kerning(scale, id, base_glyph.id());
}
last_glyph_id = Some(base_glyph.id());
let mut glyph = base_glyph.scaled(scale).positioned(caret);
if let Some(bb) = glyph.pixel_bounding_box() {
if width > 0.0 && bb.max.x > width as i32 {
caret = rusttype::point(0.0, caret.y + advance_height);
glyph = glyph.into_unpositioned().positioned(caret);
last_glyph_id = None;
}
}
caret.x += glyph.unpositioned().h_metrics().advance_width;
result.push(glyph);
}
result
}
/// Builds a FontProperty for the underlying system_fonts library
fn build_property(info: &FontInfo) -> system_fonts::FontProperty {
let mut property = system_fonts::FontPropertyBuilder::new();
if info.family != "" {
property = property.family(&info.family);
}
if info.italic {
property = property.italic();
}
if info.oblique {
property = property.oblique();
}
if info.bold {
property = property.bold();
}
if info.monospace {
property = property.monospace();
}
property.build()
}
}
/// A wrapper around rusttype's font cache.
pub struct FontCache {
cache : Mutex<rusttype::gpu_cache::Cache<'static>>,
queue : Mutex<Vec<(Rect<u32>, Vec<u8>)>>,
dirty : AtomicBool,
}
impl FontCache {
/// Creates a new fontcache instant.
pub fn new(width: u32, height: u32, scale_tolerance: f32, position_tolerance: f32) -> FontCache {
let cache = rusttype::gpu_cache::CacheBuilder {
width,
height,
scale_tolerance,
position_tolerance,
pad_glyphs: true,
}.build();
FontCache {
cache: Mutex::new(cache),
queue: Mutex::new | {
if let Some((font_data, _)) = system_fonts::get(&Self::build_property(&info)) {
Ok(Self::create(context, font_data, info.size))
} else {
Err(core::Error::FontError("Failed to get system font".to_string()))
}
} | identifier_body |
font.rs | .finish()
}
}
impl Font {
/// Returns a [font builder](support/struct.FontBuilder.html) for font construction.
///
/// # Examples
///
/// ```rust
/// # use radiant_rs::*;
/// # let display = Display::builder().hidden().build().unwrap();
/// # let renderer = Renderer::new(&display).unwrap();
/// # let context = display.context();
/// let my_font = Font::builder(&context).family("Arial").size(16.0).build().unwrap();
/// ```
pub fn builder(context: &Context) -> FontBuilder {
FontBuilder::new(context)
}
/// Creates a font instance from a file.
pub fn from_file(context: &Context, file: &str) -> core::Result<Font> {
use std::io::Read;
let mut f = File::open(Path::new(file))?;
let mut font_data = Vec::new();
f.read_to_end(&mut font_data)?;
Ok(Self::create(context, font_data, 12.0))
}
/// Returns the names of all available system fonts.
pub fn query_all() -> Vec<String> {
system_fonts::query_all()
}
/// Returns a query builder to retrieve the names of all available system fonts with the given properties (e.g. monospace).
///
/// # Examples
///
/// ```rust
/// # use radiant_rs::*;
/// let monospace_fonts = Font::query().monospace().italic().fetch();
/// ```
pub fn query() -> FontQueryBuilder {
FontQueryBuilder::new()
}
/// Returns a new font instance with given size.
pub fn clone_with_size(self: &Self, size: f32) -> Font {
let mut font = (*self).clone();
font.size = size;
font
}
/// Write to given layer.
pub fn write<T>(self: &Self, layer: &Layer, text: &str, position: T, color: Color) -> &Font where Point2<f32>: From<T> {
let position = Point2::from(position);
self.write_paragraph(layer, text, position.0, position.1, 0.0, color, 0.0, 1.0, 1.0);
self
}
/// Write to given layer. Breaks lines after max_width pixels.
pub fn write_wrapped<T>(self: &Self, layer: &Layer, text: &str, position: T, color: Color, max_width: f32) -> &Font where Point2<f32>: From<T> {
let position = Point2::from(position);
self.write_paragraph(layer, text, position.0, position.1, max_width, color, 0.0, 1.0, 1.0);
self
}
/// Write to given layer. Breaks lines after max_width pixels and applies given rotation and scaling.
pub fn write_transformed<T, U>(self: &Self, layer: &Layer, text: &str, position: T, color: Color, max_width: f32, rotation: f32, scale: U) -> &Font where Point2<f32>: From<T>+From<U> {
let position = Point2::from(position);
let scale = Point2::from(scale);
self.write_paragraph(layer, text, position.0, position.1, max_width, color, rotation, scale.0, scale.1);
self
}
/// Returns the font wrapped in an std::Arc.
pub fn arc(self: Self) -> Arc<Self> {
Arc::new(self)
}
/// Returns the names of all available system fonts with the given properties (e.g. monospace).
pub(crate) fn query_specific(info: FontInfo) -> Vec<String> {
system_fonts::query_specific(&mut Self::build_property(&info))
}
/// Creates a new font instance from given FontInfo struct.
pub(crate) fn from_info(context: &Context, info: FontInfo) -> core::Result<Font> {
if let Some((font_data, _)) = system_fonts::get(&Self::build_property(&info)) {
Ok(Self::create(context, font_data, info.size))
} else {
Err(core::Error::FontError("Failed to get system font".to_string()))
}
}
/// Creates a new unique font
fn create(context: &Context, font_data: Vec<u8>, size: f32) -> Font {
Font {
data : font_data,
font_id : FONT_COUNTER.fetch_add(1, Ordering::Relaxed),
size : size,
context : context.clone(),
}
}
/// Write text to given layer using given font
fn write_paragraph(self: &Self, layer: &Layer, text: &str, x: f32, y: f32, max_width: f32, color: Color, rotation: f32, scale_x: f32, scale_y: f32) {
// !todo probably expensive, but rusttype is completely opaque. would be nice to be able to store Font::info outside of a "may or may not own" container
let rt_font = rusttype::FontCollection::from_bytes(&self.data[..]).unwrap().into_font().unwrap();
let bucket_id = 0;
let glyphs = Self::layout_paragraph(&rt_font, rusttype::Scale::uniform(self.size), max_width, &text);
let context = self.context.lock();
context.font_cache.queue(self.font_id, &glyphs);
let anchor = (0., 0.);
let scale = (scale_x, scale_y);
let cos_rot = rotation.cos();
let sin_rot = rotation.sin();
for glyph in &glyphs {
if let Some((uv, pos, dim)) = context.font_cache.rect_for(self.font_id, glyph) {
let dist_x = pos.0 * scale_x;
let dist_y = pos.1 * scale_y;
let offset_x = x + dist_x * cos_rot - dist_y * sin_rot;
let offset_y = y + dist_x * sin_rot + dist_y * cos_rot;
layer.add_rect(None, bucket_id, 0, 1, uv, (offset_x, offset_y), anchor, dim, color, rotation, scale);
}
}
}
/// Layout a paragraph of glyphs
fn layout_paragraph<'a>(font: &'a rusttype::Font, scale: rusttype::Scale, width: f32, text: &str) -> Vec<rusttype::PositionedGlyph<'a>> {
use unicode_normalization::UnicodeNormalization;
let mut result = Vec::new();
let v_metrics = font.v_metrics(scale);
let advance_height = v_metrics.ascent - v_metrics.descent + v_metrics.line_gap;
let mut caret = rusttype::point(0.0, v_metrics.ascent);
let mut last_glyph_id = None;
for c in text.nfc() {
if c.is_control() {
match c {
'\n' => {
caret = rusttype::point(0.0, caret.y + advance_height);
},
_ => {}
}
continue;
}
let base_glyph = font.glyph(c);
if let Some(id) = last_glyph_id.take() {
caret.x += font.pair_kerning(scale, id, base_glyph.id());
}
last_glyph_id = Some(base_glyph.id());
let mut glyph = base_glyph.scaled(scale).positioned(caret);
if let Some(bb) = glyph.pixel_bounding_box() {
if width > 0.0 && bb.max.x > width as i32 {
caret = rusttype::point(0.0, caret.y + advance_height);
glyph = glyph.into_unpositioned().positioned(caret);
last_glyph_id = None;
}
}
caret.x += glyph.unpositioned().h_metrics().advance_width;
result.push(glyph);
}
result
}
/// Builds a FontProperty for the underlying system_fonts library
fn build_property(info: &FontInfo) -> system_fonts::FontProperty {
let mut property = system_fonts::FontPropertyBuilder::new();
if info.family != "" {
property = property.family(&info.family);
}
if info.italic {
property = property.italic();
}
if info.oblique {
property = property.oblique();
}
if info.bold {
property = property.bold();
}
if info.monospace {
property = property.monospace();
}
property.build()
}
}
/// A wrapper around rusttype's font cache.
pub struct FontCache {
cache : Mutex<rusttype::gpu_cache::Cache<'static>>,
queue : Mutex<Vec<(Rect<u32>, Vec<u8>)>>,
dirty : AtomicBool,
}
impl FontCache {
/// Creates a new fontcache instant.
pub fn new(width: u32, height: u32, scale_tolerance: f32, position_tolerance: f32) -> FontCache {
let cache = rusttype::gpu_cache::CacheBuilder {
width,
height,
scale_tolerance,
position_tolerance,
pad_glyphs: true,
}.build();
FontCache {
cache: Mutex::new(cache),
queue: Mutex:: | .field("size", &self.size) | random_line_split |
|
font.rs | ("size", &self.size)
.finish()
}
}
impl Font {
/// Returns a [font builder](support/struct.FontBuilder.html) for font construction.
///
/// # Examples
///
/// ```rust
/// # use radiant_rs::*;
/// # let display = Display::builder().hidden().build().unwrap();
/// # let renderer = Renderer::new(&display).unwrap();
/// # let context = display.context();
/// let my_font = Font::builder(&context).family("Arial").size(16.0).build().unwrap();
/// ```
pub fn builder(context: &Context) -> FontBuilder {
FontBuilder::new(context)
}
/// Creates a font instance from a file.
pub fn from_file(context: &Context, file: &str) -> core::Result<Font> {
use std::io::Read;
let mut f = File::open(Path::new(file))?;
let mut font_data = Vec::new();
f.read_to_end(&mut font_data)?;
Ok(Self::create(context, font_data, 12.0))
}
/// Returns the names of all available system fonts.
pub fn query_all() -> Vec<String> {
system_fonts::query_all()
}
/// Returns a query builder to retrieve the names of all available system fonts with the given properties (e.g. monospace).
///
/// # Examples
///
/// ```rust
/// # use radiant_rs::*;
/// let monospace_fonts = Font::query().monospace().italic().fetch();
/// ```
pub fn query() -> FontQueryBuilder {
FontQueryBuilder::new()
}
/// Returns a new font instance with given size.
pub fn clone_with_size(self: &Self, size: f32) -> Font {
let mut font = (*self).clone();
font.size = size;
font
}
/// Write to given layer.
pub fn write<T>(self: &Self, layer: &Layer, text: &str, position: T, color: Color) -> &Font where Point2<f32>: From<T> {
let position = Point2::from(position);
self.write_paragraph(layer, text, position.0, position.1, 0.0, color, 0.0, 1.0, 1.0);
self
}
/// Write to given layer. Breaks lines after max_width pixels.
pub fn write_wrapped<T>(self: &Self, layer: &Layer, text: &str, position: T, color: Color, max_width: f32) -> &Font where Point2<f32>: From<T> {
let position = Point2::from(position);
self.write_paragraph(layer, text, position.0, position.1, max_width, color, 0.0, 1.0, 1.0);
self
}
/// Write to given layer. Breaks lines after max_width pixels and applies given rotation and scaling.
pub fn write_transformed<T, U>(self: &Self, layer: &Layer, text: &str, position: T, color: Color, max_width: f32, rotation: f32, scale: U) -> &Font where Point2<f32>: From<T>+From<U> {
let position = Point2::from(position);
let scale = Point2::from(scale);
self.write_paragraph(layer, text, position.0, position.1, max_width, color, rotation, scale.0, scale.1);
self
}
/// Returns the font wrapped in an std::Arc.
pub fn arc(self: Self) -> Arc<Self> {
Arc::new(self)
}
/// Returns the names of all available system fonts with the given properties (e.g. monospace).
pub(crate) fn query_specific(info: FontInfo) -> Vec<String> {
system_fonts::query_specific(&mut Self::build_property(&info))
}
/// Creates a new font instance from given FontInfo struct.
pub(crate) fn from_info(context: &Context, info: FontInfo) -> core::Result<Font> {
if let Some((font_data, _)) = system_fonts::get(&Self::build_property(&info)) {
Ok(Self::create(context, font_data, info.size))
} else {
Err(core::Error::FontError("Failed to get system font".to_string()))
}
}
/// Creates a new unique font
fn create(context: &Context, font_data: Vec<u8>, size: f32) -> Font {
Font {
data : font_data,
font_id : FONT_COUNTER.fetch_add(1, Ordering::Relaxed),
size : size,
context : context.clone(),
}
}
/// Write text to given layer using given font
fn write_paragraph(self: &Self, layer: &Layer, text: &str, x: f32, y: f32, max_width: f32, color: Color, rotation: f32, scale_x: f32, scale_y: f32) {
// !todo probably expensive, but rusttype is completely opaque. would be nice to be able to store Font::info outside of a "may or may not own" container
let rt_font = rusttype::FontCollection::from_bytes(&self.data[..]).unwrap().into_font().unwrap();
let bucket_id = 0;
let glyphs = Self::layout_paragraph(&rt_font, rusttype::Scale::uniform(self.size), max_width, &text);
let context = self.context.lock();
context.font_cache.queue(self.font_id, &glyphs);
let anchor = (0., 0.);
let scale = (scale_x, scale_y);
let cos_rot = rotation.cos();
let sin_rot = rotation.sin();
for glyph in &glyphs {
if let Some((uv, pos, dim)) = context.font_cache.rect_for(self.font_id, glyph) {
let dist_x = pos.0 * scale_x;
let dist_y = pos.1 * scale_y;
let offset_x = x + dist_x * cos_rot - dist_y * sin_rot;
let offset_y = y + dist_x * sin_rot + dist_y * cos_rot;
layer.add_rect(None, bucket_id, 0, 1, uv, (offset_x, offset_y), anchor, dim, color, rotation, scale);
}
}
}
/// Layout a paragraph of glyphs
fn layout_paragraph<'a>(font: &'a rusttype::Font, scale: rusttype::Scale, width: f32, text: &str) -> Vec<rusttype::PositionedGlyph<'a>> {
use unicode_normalization::UnicodeNormalization;
let mut result = Vec::new();
let v_metrics = font.v_metrics(scale);
let advance_height = v_metrics.ascent - v_metrics.descent + v_metrics.line_gap;
let mut caret = rusttype::point(0.0, v_metrics.ascent);
let mut last_glyph_id = None;
for c in text.nfc() {
if c.is_control() {
match c {
'\n' => {
caret = rusttype::point(0.0, caret.y + advance_height);
},
_ => {}
}
continue;
}
let base_glyph = font.glyph(c);
if let Some(id) = last_glyph_id.take() {
caret.x += font.pair_kerning(scale, id, base_glyph.id());
}
last_glyph_id = Some(base_glyph.id());
let mut glyph = base_glyph.scaled(scale).positioned(caret);
if let Some(bb) = glyph.pixel_bounding_box() {
if width > 0.0 && bb.max.x > width as i32 {
caret = rusttype::point(0.0, caret.y + advance_height);
glyph = glyph.into_unpositioned().positioned(caret);
last_glyph_id = None;
}
}
caret.x += glyph.unpositioned().h_metrics().advance_width;
result.push(glyph);
}
result
}
/// Builds a FontProperty for the underlying system_fonts library
fn build_property(info: &FontInfo) -> system_fonts::FontProperty {
let mut property = system_fonts::FontPropertyBuilder::new();
if info.family != "" {
property = property.family(&info.family);
}
if info.italic {
property = property.italic();
}
if info.oblique {
property = property.oblique();
}
if info.bold |
if info.monospace {
property = property.monospace();
}
property.build()
}
}
/// A wrapper around rusttype's font cache.
pub struct FontCache {
cache : Mutex<rusttype::gpu_cache::Cache<'static>>,
queue : Mutex<Vec<(Rect<u32>, Vec<u8>)>>,
dirty : AtomicBool,
}
impl FontCache {
/// Creates a new fontcache instant.
pub fn new(width: u32, height: u32, scale_tolerance: f32, position_tolerance: f32) -> FontCache {
let cache = rusttype::gpu_cache::CacheBuilder {
width,
height,
scale_tolerance,
position_tolerance,
pad_glyphs: true,
}.build();
FontCache {
cache: Mutex::new(cache),
queue: Mutex::new | {
property = property.bold();
} | conditional_block |
app.js | ')){
map = new google.maps.Map(document.getElementById('mapPlacement'), {
center: {lat: data.location.lat, lng: data.location.lng},
zoom: 15
})
$.ajax({
method: 'POST',
url: '/api/locations',
data: data,
success: showRestaurants,
error: noRestaurants
})
} else {
activeUser.location = data.location;
console.log(activeUser);
$.ajax({
method: 'POST',
url: '/api/locations',
data: activeUser,
success: appendRestaurants,
error: noRestaurants
})
}
}
function noLocation(data){
console.log('could not find location ', data)
}
// looks at each restaraunt sent from yelp
function showRestaurants(data){
console.log('you found restaurants! ', data);
data.forEach(function(restaurant){
var location = {
lat: restaurant.coordinates.latitude,
lng: restaurant.coordinates.longitude
}
// this is the content that goes on the card associated with each restaurant in the map
var content = '<h6>' + restaurant.name + '</h6>' + '<p>' + restaurant.location.address1 + '</p>'
addMarker(location, content)
})
}
// places a marker on the map for each restaraunt
function addMarker(position, content){
var myLatlng, marker, infowindow,contentString;
// places each marker
marker = new google.maps.Marker({
position: position,
map: map
});
// fills in data for the card that appears when clicking on any marker
contentString = content;
infowindow = new google.maps.InfoWindow({
content: contentString
});
// listen for click to open the window when a marker is clicked on
marker.addListener('click', function() {
// open the restaraunt info when marker clicked on
infowindow.open(map, marker);
});
}
function noRestaurants(data){
console.log('you found no restaurants :( NO SOUP FOR YOU ... wait ... sandwich ... NO SANDWICH FOR YOU!!', data);
}
//Detects clicking and dragging on the map, shows the button to search
$('.hero-map').mousedown(function(){
if ($('.hero-map').mousemove(function(){
})){
$('.change-location').show(600);
}
})
// Listener for searching where the user currently is
$('.current-location').on('click', '#current-location', findLocation)
function findLocation (){
console.log('I know where you live!')
$.ajax({
method: 'POST',
url: 'https://www.googleapis.com/geolocation/v1/geolocate?key=AIzaSyDN9w5iCC44NN-_bnoO7Yu8ZXnmHB_QmJg',
success: createMap,
error: noLocation
})
}
// Listener for searching where the map is currently centered
$('.change-location').on('click', '#change-location', function(){
console.log('Searching in the new map location');
var movedMapLocation = {
location: {
lat: map.getCenter().lat(),
lng: map.getCenter().lng()
}
}
createMap(movedMapLocation);
})
// button listener to hide the map area once it's open
$('.map-section').on('click', '#hide-map-button', function(){
$('#hero-map').hide();
$('.find-hero-button').show();
})
// this is the end of the map area
$('.business-search').on('submit', function(event) {
event.preventDefault();
console.log('submit clicked');
activeUser.term = $(this).serializeArray()[0].value
console.log(activeUser, $(this).serializeArray())
findLocation()
})
function appendRestaurants(restaurants){
console.log(restaurants)
$('#business-submit-form').removeClass('hidden')
$('.restaurant-list').html('')
restaurants.forEach(function (restaurant){
$('.restaurant-list').append(templateRestaurant({restaurantName: restaurant.name}))
})
}
$('.business-submit').on('submit', function(event) {
event.preventDefault();
console.log('submit clicked');
var restaurant = $(this).serializeArray()[0].value;
$('.business-forms').addClass('hidden');
$('#review-form').removeClass('hidden');
$('#review-form').prepend('<h2>'+restaurant+'</h2>');
$('#restaurant-input').val(restaurant)
console.log(restaurant);
})
// this is what spits out each review onto the page.
function appendReviews(allReviews) {
var reviewHtml;
// for each review:
allReviews.forEach(function(reviewData){
// create HTML for individual review
var reviewInfo = {
reviewContent: reviewData.reviewContent,
reviewStars: reviewData.stars,
// turnary cheking to see if reviewData is true or false - if true return yes, if false return no
reviewRecommend: reviewData.recommend ? "Yes" : "No",
reviewGif: reviewData.gif,
reviewId: reviewData._id,
author: reviewData.username
};
console.log(reviewData.username)
if (activeUser.reviews.indexOf(reviewData._id)>=0){
reviewHtml = templateReviewButtons(reviewInfo)
} else {
reviewHtml = templateReview(reviewInfo)
}
if(activeUser._id){
$('.login').hide()
$('.sign-up').hide()
} else {
$('.create').hide()
$('.logout').hide()
}
// add review to top of review area
$('.appendReviews').prepend(reviewHtml);
});
// listener for pressing the edit review. Directs to edit page.
$('.reviewIndividual').on('click', '#edit-button', function(){
localStorage.setItem('classes', $(this).attr("class").split(' ')[0]);
console.log('the edit button was pressed! Review Id is ' + classes);
window.location.href="../edit";
})
// listener for the create review button. Directs to create page.
$('#create-button').on('click', function(){
console.log('the create button was pressed!');
window.location.href="../create";
})
$('.edit-review').on('submit', function(event) {
console.log('edit review submit clicked');
event.preventDefault();
$.ajax({
method: 'PUT',
url: '/api/reviews/' + localStorage.getItem("classes"),
data: $(this).serializeArray(),
success: newReviewSuccess,
error: newReviewError
})
})
// click event for pressing the delete review button. hits the delete route with Id from review
$('.reviewIndividual').on('click', '#delete-button', function(){
// sets variable to be the first class associated with this button (which is the id of the review)
var classes = $(this).attr("class").split(' ')[0];
console.log('the delete button was pressed! Review Id is ' + classes);
$.ajax({
method: 'DELETE',
url: '/api/reviews/' + classes,
success: deleteReview,
error: deleteFailure
})
location.reload();
})
// this is the end of append reviews function
};
function editReview(data){
console.log('Trying to edit the review below', data);
templateReview({
reviewContent: data.reviewContent2
})
console.log('The review was edited', data);
return templateReview;
window.location.href="../"
}
function heroChat() {
smackTalk = setInterval(function(){
$('.batwich-chat').empty();
$('.hero-chat').empty();
var chance = Math.round(Math.random());
if (chance) {
$('.hero-chat').hide();
$('.batwich-chat').show(400);
$('.batwich-chat').html(batwichSmack[Math.round(Math.random() * (batwichSmack.length - 1))]);
} else {
$('.batwich-chat').hide();
$('.hero-chat').show(400);
$('.hero-chat').html(heroSmack[Math.round(Math.random() * (heroSmack.length - 1))]);
}
}, 5500);
}
heroChat();
// This is the end of on ready function
})
function newReviewSuccess(review){
console.log('ajax call on review successful. Review: ', review);
window.location.href="../"
}
function newReviewError(error){
console.log('ajax call on review dun messed up. Error: ', error);
}
function yelpSuccess(restaurant){
console.log(restaurant)
}
function yelpError (error){
console.log('ajax call on yelp dun messed up. Error: ', error);
}
function yelpCallback (data){
console.log('this is the yelp callback', data)
}
function noAppend (err){
console.log('the reviews did not append', err)
}
function newGifSearchError(error){
console.log('ajax call on gif search went bad, boss. Error: ', error);
}
function deleteReview(data){
console.log('delete review triggered!', data);
}
function deleteFailure(error){
console.log('The delete went bad. Did you delete the right thing? Did you delete everything?', error);
}
function editFailure(error){
console.log('Oh, no! We have failed to edit! Things remained the same, and you hated that stuff! Error: ', error);
}
function | saveUser | identifier_name |
|
app.js | console.log('The DOM body is ready')
console.log('Body parser parsing that body!');
$('.batwich-chat').hide();
$('.hero-chat').hide();
//*****************
//*****************
//Gif Handlebars templates
var sourceOne = $('#selectableGif-template2').html(),
templateGif = Handlebars.compile(sourceOne),
sourceThree = $('#gif-choice').html(),
templateGifChoice = Handlebars.compile(sourceThree),
// Review Handlebars template
$reviewsList = ('#review-form'),
sourceTwo = $("#review-template").html(),
templateReview = Handlebars.compile(sourceTwo);
//Restaurant Handlebars templates
sourceRestaurant = $('#restaurant-template').html(),
templateRestaurant = Handlebars.compile(sourceRestaurant),
sourceTwoButtons = $('#review-template-buttons').html(),
templateReviewButtons = Handlebars.compile(sourceTwoButtons);
// this is what submits the form to add a review in
$('.new-review').on('submit', function(event) {
console.log('submit clicked');
event.preventDefault();
$.ajax({
method: 'POST',
url: '/api/reviews',
data: $(this).serializeArray(),
success: newReviewSuccess,
error: newReviewError
})
})
// this is what searches giphy for images
$('.form-gif').on('submit', function(event){
console.log('gif submit clicked');
event.preventDefault();
$.ajax({
method: 'GET',
url: giphyApi,
data: $(this).serializeArray(),
success: newGifSearchSuccess,
error: newGifSearchError
})
})
// this is what handles clicking on a gif
$('.gifSelectionField2').on('click', '.gifBox', function(event){
$('.gifSelectionField2').empty();
var pickedGifHtml = templateGifChoice({ userChosenGif: this.src});
$('.selected-gif').empty();
$('.selected-gif').append(pickedGifHtml);
})
// this is what populates selectable gifs
function newGifSearchSuccess(json){
console.log('ajax call for gif successful. Gif: ', json);
// empty space to prevent gifs from multiple searches showing at the same time
$('.gifSelectionField2').empty();
json.data.forEach(function(gif){
var giphyHtml = templateGif({ insertGifHere: gif.images.fixed_height_small.url});
$(".gifSelectionField2").append(giphyHtml);
});
}
// when pages loads this will trigger and runs the append review function. gets reviews from review all endpoint
$.ajax({
method: 'GET',
url: '/api/reviews',
success: appendReviews,
error: noAppend
})
//when page loads, save active user to a variable
$.ajax({
method: 'GET',
url: '/api/user/active',
success: saveUser,
// error: noAppend
})
// this is the area that deals with the map | // listener for find hero button. hides button to search again until map is moved.
$('.map-section').on('click', '#map-button', function(){
console.log('map button pressed');
$('#hero-map').show();
$('.find-hero-button').hide();
// set default location as Hell Mi
var defaultLocation = {
location: {
lat: 42.4347,
lng: -83.9850
}
}
// crete the map using the default location
createMap(defaultLocation);
})
// creates a google map using location info
function createMap(data){
console.log('location found - lat: ', data.location.lat, 'lng: ', data.location.lng);
$('.change-location').hide();
if (document.getElementById('mapPlacement')){
map = new google.maps.Map(document.getElementById('mapPlacement'), {
center: {lat: data.location.lat, lng: data.location.lng},
zoom: 15
})
$.ajax({
method: 'POST',
url: '/api/locations',
data: data,
success: showRestaurants,
error: noRestaurants
})
} else {
activeUser.location = data.location;
console.log(activeUser);
$.ajax({
method: 'POST',
url: '/api/locations',
data: activeUser,
success: appendRestaurants,
error: noRestaurants
})
}
}
function noLocation(data){
console.log('could not find location ', data)
}
// looks at each restaraunt sent from yelp
function showRestaurants(data){
console.log('you found restaurants! ', data);
data.forEach(function(restaurant){
var location = {
lat: restaurant.coordinates.latitude,
lng: restaurant.coordinates.longitude
}
// this is the content that goes on the card associated with each restaurant in the map
var content = '<h6>' + restaurant.name + '</h6>' + '<p>' + restaurant.location.address1 + '</p>'
addMarker(location, content)
})
}
// places a marker on the map for each restaraunt
function addMarker(position, content){
var myLatlng, marker, infowindow,contentString;
// places each marker
marker = new google.maps.Marker({
position: position,
map: map
});
// fills in data for the card that appears when clicking on any marker
contentString = content;
infowindow = new google.maps.InfoWindow({
content: contentString
});
// listen for click to open the window when a marker is clicked on
marker.addListener('click', function() {
// open the restaraunt info when marker clicked on
infowindow.open(map, marker);
});
}
function noRestaurants(data){
console.log('you found no restaurants :( NO SOUP FOR YOU ... wait ... sandwich ... NO SANDWICH FOR YOU!!', data);
}
//Detects clicking and dragging on the map, shows the button to search
$('.hero-map').mousedown(function(){
if ($('.hero-map').mousemove(function(){
})){
$('.change-location').show(600);
}
})
// Listener for searching where the user currently is
$('.current-location').on('click', '#current-location', findLocation)
function findLocation (){
console.log('I know where you live!')
$.ajax({
method: 'POST',
url: 'https://www.googleapis.com/geolocation/v1/geolocate?key=AIzaSyDN9w5iCC44NN-_bnoO7Yu8ZXnmHB_QmJg',
success: createMap,
error: noLocation
})
}
// Listener for searching where the map is currently centered
$('.change-location').on('click', '#change-location', function(){
console.log('Searching in the new map location');
var movedMapLocation = {
location: {
lat: map.getCenter().lat(),
lng: map.getCenter().lng()
}
}
createMap(movedMapLocation);
})
// button listener to hide the map area once it's open
$('.map-section').on('click', '#hide-map-button', function(){
$('#hero-map').hide();
$('.find-hero-button').show();
})
// this is the end of the map area
$('.business-search').on('submit', function(event) {
event.preventDefault();
console.log('submit clicked');
activeUser.term = $(this).serializeArray()[0].value
console.log(activeUser, $(this).serializeArray())
findLocation()
})
function appendRestaurants(restaurants){
console.log(restaurants)
$('#business-submit-form').removeClass('hidden')
$('.restaurant-list').html('')
restaurants.forEach(function (restaurant){
$('.restaurant-list').append(templateRestaurant({restaurantName: restaurant.name}))
})
}
$('.business-submit').on('submit', function(event) {
event.preventDefault();
console.log('submit clicked');
var restaurant = $(this).serializeArray()[0].value;
$('.business-forms').addClass('hidden');
$('#review-form').removeClass('hidden');
$('#review-form').prepend('<h2>'+restaurant+'</h2>');
$('#restaurant-input').val(restaurant)
console.log(restaurant);
})
// this is what spits out each review onto the page.
function appendReviews(allReviews) {
var reviewHtml;
// for each review:
allReviews.forEach(function(reviewData){
// create HTML for individual review
var reviewInfo = {
reviewContent: reviewData.reviewContent,
reviewStars: reviewData.stars,
// turnary cheking to see if reviewData is true or false - if true return yes, if false return no
reviewRecommend: reviewData.recommend ? "Yes" : "No",
reviewGif: reviewData.gif,
reviewId: reviewData._id,
author: reviewData.username
};
console.log(reviewData.username)
if (activeUser.reviews.indexOf(reviewData._id)>=0){
reviewHtml = templateReviewButtons(reviewInfo)
} else {
reviewHtml = templateReview(reviewInfo)
}
if(activeUser._id){
$('.login').hide()
$('.sign-up').hide()
} else {
$('.create').hide()
$('.logout').hide | //hide map area when page loads
$('#hero-map').hide();
| random_line_split |
app.js | .log('The DOM body is ready')
console.log('Body parser parsing that body!');
$('.batwich-chat').hide();
$('.hero-chat').hide();
//*****************
//*****************
//Gif Handlebars templates
var sourceOne = $('#selectableGif-template2').html(),
templateGif = Handlebars.compile(sourceOne),
sourceThree = $('#gif-choice').html(),
templateGifChoice = Handlebars.compile(sourceThree),
// Review Handlebars template
$reviewsList = ('#review-form'),
sourceTwo = $("#review-template").html(),
templateReview = Handlebars.compile(sourceTwo);
//Restaurant Handlebars templates
sourceRestaurant = $('#restaurant-template').html(),
templateRestaurant = Handlebars.compile(sourceRestaurant),
sourceTwoButtons = $('#review-template-buttons').html(),
templateReviewButtons = Handlebars.compile(sourceTwoButtons);
// this is what submits the form to add a review in
$('.new-review').on('submit', function(event) {
console.log('submit clicked');
event.preventDefault();
$.ajax({
method: 'POST',
url: '/api/reviews',
data: $(this).serializeArray(),
success: newReviewSuccess,
error: newReviewError
})
})
// this is what searches giphy for images
$('.form-gif').on('submit', function(event){
console.log('gif submit clicked');
event.preventDefault();
$.ajax({
method: 'GET',
url: giphyApi,
data: $(this).serializeArray(),
success: newGifSearchSuccess,
error: newGifSearchError
})
})
// this is what handles clicking on a gif
$('.gifSelectionField2').on('click', '.gifBox', function(event){
$('.gifSelectionField2').empty();
var pickedGifHtml = templateGifChoice({ userChosenGif: this.src});
$('.selected-gif').empty();
$('.selected-gif').append(pickedGifHtml);
})
// this is what populates selectable gifs
function newGifSearchSuccess(json){
console.log('ajax call for gif successful. Gif: ', json);
// empty space to prevent gifs from multiple searches showing at the same time
$('.gifSelectionField2').empty();
json.data.forEach(function(gif){
var giphyHtml = templateGif({ insertGifHere: gif.images.fixed_height_small.url});
$(".gifSelectionField2").append(giphyHtml);
});
}
// when pages loads this will trigger and runs the append review function. gets reviews from review all endpoint
$.ajax({
method: 'GET',
url: '/api/reviews',
success: appendReviews,
error: noAppend
})
//when page loads, save active user to a variable
$.ajax({
method: 'GET',
url: '/api/user/active',
success: saveUser,
// error: noAppend
})
// this is the area that deals with the map
//hide map area when page loads
$('#hero-map').hide();
// listener for find hero button. hides button to search again until map is moved.
$('.map-section').on('click', '#map-button', function(){
console.log('map button pressed');
$('#hero-map').show();
$('.find-hero-button').hide();
// set default location as Hell Mi
var defaultLocation = {
location: {
lat: 42.4347,
lng: -83.9850
}
}
// crete the map using the default location
createMap(defaultLocation);
})
// creates a google map using location info
function createMap(data){
console.log('location found - lat: ', data.location.lat, 'lng: ', data.location.lng);
$('.change-location').hide();
if (document.getElementById('mapPlacement')) | else {
activeUser.location = data.location;
console.log(activeUser);
$.ajax({
method: 'POST',
url: '/api/locations',
data: activeUser,
success: appendRestaurants,
error: noRestaurants
})
}
}
function noLocation(data){
console.log('could not find location ', data)
}
// looks at each restaraunt sent from yelp
function showRestaurants(data){
console.log('you found restaurants! ', data);
data.forEach(function(restaurant){
var location = {
lat: restaurant.coordinates.latitude,
lng: restaurant.coordinates.longitude
}
// this is the content that goes on the card associated with each restaurant in the map
var content = '<h6>' + restaurant.name + '</h6>' + '<p>' + restaurant.location.address1 + '</p>'
addMarker(location, content)
})
}
// places a marker on the map for each restaraunt
function addMarker(position, content){
var myLatlng, marker, infowindow,contentString;
// places each marker
marker = new google.maps.Marker({
position: position,
map: map
});
// fills in data for the card that appears when clicking on any marker
contentString = content;
infowindow = new google.maps.InfoWindow({
content: contentString
});
// listen for click to open the window when a marker is clicked on
marker.addListener('click', function() {
// open the restaraunt info when marker clicked on
infowindow.open(map, marker);
});
}
function noRestaurants(data){
console.log('you found no restaurants :( NO SOUP FOR YOU ... wait ... sandwich ... NO SANDWICH FOR YOU!!', data);
}
//Detects clicking and dragging on the map, shows the button to search
$('.hero-map').mousedown(function(){
if ($('.hero-map').mousemove(function(){
})){
$('.change-location').show(600);
}
})
// Listener for searching where the user currently is
$('.current-location').on('click', '#current-location', findLocation)
function findLocation (){
console.log('I know where you live!')
$.ajax({
method: 'POST',
url: 'https://www.googleapis.com/geolocation/v1/geolocate?key=AIzaSyDN9w5iCC44NN-_bnoO7Yu8ZXnmHB_QmJg',
success: createMap,
error: noLocation
})
}
// Listener for searching where the map is currently centered
$('.change-location').on('click', '#change-location', function(){
console.log('Searching in the new map location');
var movedMapLocation = {
location: {
lat: map.getCenter().lat(),
lng: map.getCenter().lng()
}
}
createMap(movedMapLocation);
})
// button listener to hide the map area once it's open
$('.map-section').on('click', '#hide-map-button', function(){
$('#hero-map').hide();
$('.find-hero-button').show();
})
// this is the end of the map area
$('.business-search').on('submit', function(event) {
event.preventDefault();
console.log('submit clicked');
activeUser.term = $(this).serializeArray()[0].value
console.log(activeUser, $(this).serializeArray())
findLocation()
})
function appendRestaurants(restaurants){
console.log(restaurants)
$('#business-submit-form').removeClass('hidden')
$('.restaurant-list').html('')
restaurants.forEach(function (restaurant){
$('.restaurant-list').append(templateRestaurant({restaurantName: restaurant.name}))
})
}
$('.business-submit').on('submit', function(event) {
event.preventDefault();
console.log('submit clicked');
var restaurant = $(this).serializeArray()[0].value;
$('.business-forms').addClass('hidden');
$('#review-form').removeClass('hidden');
$('#review-form').prepend('<h2>'+restaurant+'</h2>');
$('#restaurant-input').val(restaurant)
console.log(restaurant);
})
// this is what spits out each review onto the page.
function appendReviews(allReviews) {
var reviewHtml;
// for each review:
allReviews.forEach(function(reviewData){
// create HTML for individual review
var reviewInfo = {
reviewContent: reviewData.reviewContent,
reviewStars: reviewData.stars,
// turnary cheking to see if reviewData is true or false - if true return yes, if false return no
reviewRecommend: reviewData.recommend ? "Yes" : "No",
reviewGif: reviewData.gif,
reviewId: reviewData._id,
author: reviewData.username
};
console.log(reviewData.username)
if (activeUser.reviews.indexOf(reviewData._id)>=0){
reviewHtml = templateReviewButtons(reviewInfo)
} else {
reviewHtml = templateReview(reviewInfo)
}
if(activeUser._id){
$('.login').hide()
$('.sign-up').hide()
} else {
$('.create').hide()
$('.logout'). | {
map = new google.maps.Map(document.getElementById('mapPlacement'), {
center: {lat: data.location.lat, lng: data.location.lng},
zoom: 15
})
$.ajax({
method: 'POST',
url: '/api/locations',
data: data,
success: showRestaurants,
error: noRestaurants
})
} | conditional_block |
app.js | GifHtml);
})
// this is what populates selectable gifs
function newGifSearchSuccess(json){
console.log('ajax call for gif successful. Gif: ', json);
// empty space to prevent gifs from multiple searches showing at the same time
$('.gifSelectionField2').empty();
json.data.forEach(function(gif){
var giphyHtml = templateGif({ insertGifHere: gif.images.fixed_height_small.url});
$(".gifSelectionField2").append(giphyHtml);
});
}
// when pages loads this will trigger and runs the append review function. gets reviews from review all endpoint
$.ajax({
method: 'GET',
url: '/api/reviews',
success: appendReviews,
error: noAppend
})
//when page loads, save active user to a variable
$.ajax({
method: 'GET',
url: '/api/user/active',
success: saveUser,
// error: noAppend
})
// this is the area that deals with the map
//hide map area when page loads
$('#hero-map').hide();
// listener for find hero button. hides button to search again until map is moved.
$('.map-section').on('click', '#map-button', function(){
console.log('map button pressed');
$('#hero-map').show();
$('.find-hero-button').hide();
// set default location as Hell Mi
var defaultLocation = {
location: {
lat: 42.4347,
lng: -83.9850
}
}
// crete the map using the default location
createMap(defaultLocation);
})
// creates a google map using location info
function createMap(data){
console.log('location found - lat: ', data.location.lat, 'lng: ', data.location.lng);
$('.change-location').hide();
if (document.getElementById('mapPlacement')){
map = new google.maps.Map(document.getElementById('mapPlacement'), {
center: {lat: data.location.lat, lng: data.location.lng},
zoom: 15
})
$.ajax({
method: 'POST',
url: '/api/locations',
data: data,
success: showRestaurants,
error: noRestaurants
})
} else {
activeUser.location = data.location;
console.log(activeUser);
$.ajax({
method: 'POST',
url: '/api/locations',
data: activeUser,
success: appendRestaurants,
error: noRestaurants
})
}
}
function noLocation(data){
console.log('could not find location ', data)
}
// looks at each restaraunt sent from yelp
function showRestaurants(data){
console.log('you found restaurants! ', data);
data.forEach(function(restaurant){
var location = {
lat: restaurant.coordinates.latitude,
lng: restaurant.coordinates.longitude
}
// this is the content that goes on the card associated with each restaurant in the map
var content = '<h6>' + restaurant.name + '</h6>' + '<p>' + restaurant.location.address1 + '</p>'
addMarker(location, content)
})
}
// places a marker on the map for each restaraunt
function addMarker(position, content){
var myLatlng, marker, infowindow,contentString;
// places each marker
marker = new google.maps.Marker({
position: position,
map: map
});
// fills in data for the card that appears when clicking on any marker
contentString = content;
infowindow = new google.maps.InfoWindow({
content: contentString
});
// listen for click to open the window when a marker is clicked on
marker.addListener('click', function() {
// open the restaraunt info when marker clicked on
infowindow.open(map, marker);
});
}
function noRestaurants(data){
console.log('you found no restaurants :( NO SOUP FOR YOU ... wait ... sandwich ... NO SANDWICH FOR YOU!!', data);
}
//Detects clicking and dragging on the map, shows the button to search
$('.hero-map').mousedown(function(){
if ($('.hero-map').mousemove(function(){
})){
$('.change-location').show(600);
}
})
// Listener for searching where the user currently is
$('.current-location').on('click', '#current-location', findLocation)
function findLocation (){
console.log('I know where you live!')
$.ajax({
method: 'POST',
url: 'https://www.googleapis.com/geolocation/v1/geolocate?key=AIzaSyDN9w5iCC44NN-_bnoO7Yu8ZXnmHB_QmJg',
success: createMap,
error: noLocation
})
}
// Listener for searching where the map is currently centered
$('.change-location').on('click', '#change-location', function(){
console.log('Searching in the new map location');
var movedMapLocation = {
location: {
lat: map.getCenter().lat(),
lng: map.getCenter().lng()
}
}
createMap(movedMapLocation);
})
// button listener to hide the map area once it's open
$('.map-section').on('click', '#hide-map-button', function(){
$('#hero-map').hide();
$('.find-hero-button').show();
})
// this is the end of the map area
$('.business-search').on('submit', function(event) {
event.preventDefault();
console.log('submit clicked');
activeUser.term = $(this).serializeArray()[0].value
console.log(activeUser, $(this).serializeArray())
findLocation()
})
function appendRestaurants(restaurants){
console.log(restaurants)
$('#business-submit-form').removeClass('hidden')
$('.restaurant-list').html('')
restaurants.forEach(function (restaurant){
$('.restaurant-list').append(templateRestaurant({restaurantName: restaurant.name}))
})
}
$('.business-submit').on('submit', function(event) {
event.preventDefault();
console.log('submit clicked');
var restaurant = $(this).serializeArray()[0].value;
$('.business-forms').addClass('hidden');
$('#review-form').removeClass('hidden');
$('#review-form').prepend('<h2>'+restaurant+'</h2>');
$('#restaurant-input').val(restaurant)
console.log(restaurant);
})
// this is what spits out each review onto the page.
function appendReviews(allReviews) {
var reviewHtml;
// for each review:
allReviews.forEach(function(reviewData){
// create HTML for individual review
var reviewInfo = {
reviewContent: reviewData.reviewContent,
reviewStars: reviewData.stars,
// turnary cheking to see if reviewData is true or false - if true return yes, if false return no
reviewRecommend: reviewData.recommend ? "Yes" : "No",
reviewGif: reviewData.gif,
reviewId: reviewData._id,
author: reviewData.username
};
console.log(reviewData.username)
if (activeUser.reviews.indexOf(reviewData._id)>=0){
reviewHtml = templateReviewButtons(reviewInfo)
} else {
reviewHtml = templateReview(reviewInfo)
}
if(activeUser._id){
$('.login').hide()
$('.sign-up').hide()
} else {
$('.create').hide()
$('.logout').hide()
}
// add review to top of review area
$('.appendReviews').prepend(reviewHtml);
});
// listener for pressing the edit review. Directs to edit page.
$('.reviewIndividual').on('click', '#edit-button', function(){
localStorage.setItem('classes', $(this).attr("class").split(' ')[0]);
console.log('the edit button was pressed! Review Id is ' + classes);
window.location.href="../edit";
})
// listener for the create review button. Directs to create page.
$('#create-button').on('click', function(){
console.log('the create button was pressed!');
window.location.href="../create";
})
$('.edit-review').on('submit', function(event) {
console.log('edit review submit clicked');
event.preventDefault();
$.ajax({
method: 'PUT',
url: '/api/reviews/' + localStorage.getItem("classes"),
data: $(this).serializeArray(),
success: newReviewSuccess,
error: newReviewError
})
})
// click event for pressing the delete review button. hits the delete route with Id from review
$('.reviewIndividual').on('click', '#delete-button', function(){
// sets variable to be the first class associated with this button (which is the id of the review)
var classes = $(this).attr("class").split(' ')[0];
console.log('the delete button was pressed! Review Id is ' + classes);
$.ajax({
method: 'DELETE',
url: '/api/reviews/' + classes,
success: deleteReview,
error: deleteFailure
})
location.reload();
})
// this is the end of append reviews function
};
function editReview(data) | {
console.log('Trying to edit the review below', data);
templateReview({
reviewContent: data.reviewContent2
})
console.log('The review was edited', data);
return templateReview;
window.location.href="../"
} | identifier_body |
|
type_checker.rs | );
Ok(ty)
}
pub fn check_def(&mut self, def: &Def) -> Result<Tm, String> {
let mut s = self.save_ctx();
for ext in def.ctx.iter() {
s.extend(ext)?;
}
let ret_ty = s.check_ty(&def.ret_ty)?;
s.check_tm_ty(&def.body, &ret_ty)
}
fn check_let<T, F>(
&mut self, check_body: F,
name: &DefId, ty: &Expr, val: &Expr, body: &Expr) -> Result<T, String>
where F : FnOnce(&mut Self, &Expr) -> Result<T, String>
{
let mut s = self.save_ctx();
let ty = s.check_ty(ty)?;
let val = s.check_tm_ty(val, &ty)?;
if let Some(name) = name {
s.ctxs.last_mut().unwrap().defs.push((name.clone(), val, ty));
};
check_body(&mut s, body)
}
pub fn check_ty(&mut self, expr: &Expr) -> Result<Ty, String> {
let cur_ctx_syn = &self.ctxs.last().unwrap().syntax;
match expr {
Expr::App(id, v) =>
match (id.as_str(), &v[..]) {
("bool", []) => Ok(self.model.bool_ty(cur_ctx_syn)),
("eq", [a, b]) => self.check_eq(a, b),
(s, v) => Err(format!("Unexpected {} with {} args", s, v.len()))
},
Expr::Let { name, ty, val, body } =>
self.check_let(|s, body| s.check_ty(body), name, &*ty, &*val, &*body),
_ => Err(format!("Unhandled type {:?}", expr))
}
}
pub fn check_tm(&mut self, expr: &Expr) -> Result<(Tm, Ty), String> {
match expr {
Expr::App(id, v) =>
match (id.as_str(), &v[..]) {
("refl", [a]) => self.refl(&*a),
("true", []) => Ok(self.true_tm()),
("false", []) => Ok(self.false_tm()),
(v, []) => self.access_var(v),
(s, v) => Err(format!("Unexpected {} with {} args", s, v.len()))
},
Expr::Let { name, ty, val, body } =>
self.check_let(|s, body| s.check_tm(body), name, &*ty, &*val, &*body),
Expr::Elim { val, into_ctx, into_ty, cases } =>
self.check_elim(&*val, into_ctx, &*into_ty, cases),
}
}
fn | (&mut self, expr: &Expr) -> Result<(Tm, Ty), String> {
let (tm, _) = self.check_tm(expr)?;
let eq_ty = self.model.eq_ty(&tm, &tm);
let refl_tm = self.model.refl(&tm);
Ok((refl_tm, eq_ty))
}
fn true_tm(&mut self) -> (Tm, Ty) {
let cur_ctx_syn = &self.ctxs.last().unwrap().syntax;
let bool_ty = self.model.bool_ty(cur_ctx_syn);
let tm = self.model.true_tm(cur_ctx_syn);
(tm, bool_ty)
}
fn false_tm(&mut self) -> (Tm, Ty) {
let cur_ctx_syn = &self.ctxs.last().unwrap().syntax;
let bool_ty = self.model.bool_ty(cur_ctx_syn);
let tm = self.model.false_tm(cur_ctx_syn);
(tm, bool_ty)
}
// Given G |- a : A, construct the morphism <1(G), A, a> : G.A -> G
// substituting the last A for a in any term in G.A.
fn bar_tm(model: &mut TModel, ctx: &Ctx, ty: &Ty, tm: &Tm) -> Morph {
let id = model.id_morph(ctx);
model.extension(&id, ty, tm)
}
fn check_elim(
&mut self,
val: &Expr, into_ctx: &Vec<CtxExt>, into_ty: &Expr,
cases: &Vec<ElimCase>) -> Result<(Tm, Ty), String>
{
let (val_tm, val_ty) = self.check_tm(val)?;
let bool_ty = self.model.bool_ty(&self.ctxs.last().unwrap().syntax);
let (elim_tm, elim_ty) =
if self.model.ty_eq(&val_ty, &bool_ty) {
self.elim_bool(into_ctx, into_ty, cases)?
} else {
return Err(format!("Cannot eliminate {:?} of type {:?}", val, val_ty))
};
// Substitute bar(val_tm) into elimination term and type, which live
// live in an extended context.
let cur_ctx_syn = &self.ctxs.last().unwrap().syntax;
let bar = Self::bar_tm(&mut self.model, cur_ctx_syn, &val_ty, &val_tm);
let tm = self.model.subst_tm(&bar, &elim_tm);
let ty = self.model.subst_ty(&bar, &elim_ty);
Ok((tm, ty))
}
fn elim_bool(
&mut self,
into_ctx: &Vec<CtxExt>, into_ty: &Expr,
cases: &Vec<ElimCase>) -> Result<(Tm, Ty), String>
{
if into_ctx.len() != 1 || cases.len() != 2 ||
cases[0].0.len() != 0 || cases[1].0.len() != 0
{
return Err("Invalid bool elimination".to_owned())
}
let cur_ctx_syn = self.ctxs.last().unwrap().syntax.clone();
let bool_ty = self.model.bool_ty(&cur_ctx_syn);
let into_ty = {
let mut s = self.save_ctx();
let ext_ty = s.extend(&into_ctx[0])?;
if !s.model.ty_eq(&ext_ty, &bool_ty) {
return Err("Invalid extension for into-type: expected bool".to_owned());
}
s.check_ty(into_ty)?
};
let true_tm = self.model.true_tm(&cur_ctx_syn);
let true_bar = Self::bar_tm(&mut self.model, &cur_ctx_syn, &bool_ty, &true_tm);
let expected_ty_true_case = Self::subst_ty(&mut self.model, &true_bar, &into_ty);
let false_tm = self.model.false_tm(&cur_ctx_syn);
let false_bar = Self::bar_tm(&mut self.model, &cur_ctx_syn, &bool_ty, &false_tm);
let expected_ty_false_case = Self::subst_ty(&mut self.model, &false_bar, &into_ty);
let true_case_tm = self.check_tm_ty(&cases[0].1, &expected_ty_true_case)?;
let false_case_tm = self.check_tm_ty(&cases[1].1, &expected_ty_false_case)?;
let tm = self.model.elim_bool(&cur_ctx_syn, &into_ty, &true_case_tm, &false_case_tm);
// Define substitutions by true and false
Self::subst_tm(&mut self.model, &true_bar, &tm);
Self::subst_tm(&mut self.model, &false_bar, &tm);
Ok((tm, into_ty))
}
fn check_tm_ty(&mut self, expr: &Expr, expected_ty: &Ty) -> Result<Tm, String> {
let (tm, ty) = self.check_tm(expr)?;
if self.model.ty_eq(&ty, expected_ty) {
Ok(tm)
} else {
Err(format!("expected:\n{:?}\ngot:\n{:?}", expected_ty, ty))
}
}
fn access_var(&mut self, name: &str) -> Result<(Tm, Ty), String> {
let mut ctx_index = self.ctxs.len();
for ctx in self.ctxs.iter().rev() {
ctx_index -= 1;
for (ref ctx_var_name, ref tm, ref ty) in ctx.defs.iter().rev() {
if ctx_var_name != name {
continue
}
let mut tm = tm.clone();
let mut ty = ty.clone();
// Found term, inject it into current context.
for ctx in &self.ctxs[ctx_index+1..] {
let weakening = match ctx.weakening {
Some(ref w) => w,
None => panic!("expected weakening to be available")
};
tm = Self::subst_tm(&mut self.model, &weakening, &tm);
ty = Self::subst_ty(&mut self.model, &weakening, &ty);
}
return Ok((tm, ty))
}
}
Err(format!("unknown definition {}", name))
}
fn check_eq(&mut self, a: &Expr, b: &Expr) -> Result<Ty, String> {
let (tma, tya) = self.check_tm(a)?;
let tmb = self.check_tm_ty(b, &tya)?;
Ok(self.model.eq_ty(&tma, &tmb))
}
fn subst_ty(model: &mut TModel, g: &Morph, ty: &Ty) -> Ty {
model.subst_ty(g, ty);
| refl | identifier_name |
type_checker.rs | _info);
Ok(ty)
}
pub fn check_def(&mut self, def: &Def) -> Result<Tm, String> {
let mut s = self.save_ctx();
for ext in def.ctx.iter() {
s.extend(ext)?;
}
let ret_ty = s.check_ty(&def.ret_ty)?;
s.check_tm_ty(&def.body, &ret_ty)
}
fn check_let<T, F>(
&mut self, check_body: F,
name: &DefId, ty: &Expr, val: &Expr, body: &Expr) -> Result<T, String>
where F : FnOnce(&mut Self, &Expr) -> Result<T, String>
{
let mut s = self.save_ctx();
let ty = s.check_ty(ty)?;
let val = s.check_tm_ty(val, &ty)?; | if let Some(name) = name {
s.ctxs.last_mut().unwrap().defs.push((name.clone(), val, ty));
};
check_body(&mut s, body)
}
pub fn check_ty(&mut self, expr: &Expr) -> Result<Ty, String> {
let cur_ctx_syn = &self.ctxs.last().unwrap().syntax;
match expr {
Expr::App(id, v) =>
match (id.as_str(), &v[..]) {
("bool", []) => Ok(self.model.bool_ty(cur_ctx_syn)),
("eq", [a, b]) => self.check_eq(a, b),
(s, v) => Err(format!("Unexpected {} with {} args", s, v.len()))
},
Expr::Let { name, ty, val, body } =>
self.check_let(|s, body| s.check_ty(body), name, &*ty, &*val, &*body),
_ => Err(format!("Unhandled type {:?}", expr))
}
}
pub fn check_tm(&mut self, expr: &Expr) -> Result<(Tm, Ty), String> {
match expr {
Expr::App(id, v) =>
match (id.as_str(), &v[..]) {
("refl", [a]) => self.refl(&*a),
("true", []) => Ok(self.true_tm()),
("false", []) => Ok(self.false_tm()),
(v, []) => self.access_var(v),
(s, v) => Err(format!("Unexpected {} with {} args", s, v.len()))
},
Expr::Let { name, ty, val, body } =>
self.check_let(|s, body| s.check_tm(body), name, &*ty, &*val, &*body),
Expr::Elim { val, into_ctx, into_ty, cases } =>
self.check_elim(&*val, into_ctx, &*into_ty, cases),
}
}
fn refl(&mut self, expr: &Expr) -> Result<(Tm, Ty), String> {
let (tm, _) = self.check_tm(expr)?;
let eq_ty = self.model.eq_ty(&tm, &tm);
let refl_tm = self.model.refl(&tm);
Ok((refl_tm, eq_ty))
}
fn true_tm(&mut self) -> (Tm, Ty) {
let cur_ctx_syn = &self.ctxs.last().unwrap().syntax;
let bool_ty = self.model.bool_ty(cur_ctx_syn);
let tm = self.model.true_tm(cur_ctx_syn);
(tm, bool_ty)
}
fn false_tm(&mut self) -> (Tm, Ty) {
let cur_ctx_syn = &self.ctxs.last().unwrap().syntax;
let bool_ty = self.model.bool_ty(cur_ctx_syn);
let tm = self.model.false_tm(cur_ctx_syn);
(tm, bool_ty)
}
// Given G |- a : A, construct the morphism <1(G), A, a> : G.A -> G
// substituting the last A for a in any term in G.A.
fn bar_tm(model: &mut TModel, ctx: &Ctx, ty: &Ty, tm: &Tm) -> Morph {
let id = model.id_morph(ctx);
model.extension(&id, ty, tm)
}
fn check_elim(
&mut self,
val: &Expr, into_ctx: &Vec<CtxExt>, into_ty: &Expr,
cases: &Vec<ElimCase>) -> Result<(Tm, Ty), String>
{
let (val_tm, val_ty) = self.check_tm(val)?;
let bool_ty = self.model.bool_ty(&self.ctxs.last().unwrap().syntax);
let (elim_tm, elim_ty) =
if self.model.ty_eq(&val_ty, &bool_ty) {
self.elim_bool(into_ctx, into_ty, cases)?
} else {
return Err(format!("Cannot eliminate {:?} of type {:?}", val, val_ty))
};
// Substitute bar(val_tm) into elimination term and type, which live
// live in an extended context.
let cur_ctx_syn = &self.ctxs.last().unwrap().syntax;
let bar = Self::bar_tm(&mut self.model, cur_ctx_syn, &val_ty, &val_tm);
let tm = self.model.subst_tm(&bar, &elim_tm);
let ty = self.model.subst_ty(&bar, &elim_ty);
Ok((tm, ty))
}
fn elim_bool(
&mut self,
into_ctx: &Vec<CtxExt>, into_ty: &Expr,
cases: &Vec<ElimCase>) -> Result<(Tm, Ty), String>
{
if into_ctx.len() != 1 || cases.len() != 2 ||
cases[0].0.len() != 0 || cases[1].0.len() != 0
{
return Err("Invalid bool elimination".to_owned())
}
let cur_ctx_syn = self.ctxs.last().unwrap().syntax.clone();
let bool_ty = self.model.bool_ty(&cur_ctx_syn);
let into_ty = {
let mut s = self.save_ctx();
let ext_ty = s.extend(&into_ctx[0])?;
if !s.model.ty_eq(&ext_ty, &bool_ty) {
return Err("Invalid extension for into-type: expected bool".to_owned());
}
s.check_ty(into_ty)?
};
let true_tm = self.model.true_tm(&cur_ctx_syn);
let true_bar = Self::bar_tm(&mut self.model, &cur_ctx_syn, &bool_ty, &true_tm);
let expected_ty_true_case = Self::subst_ty(&mut self.model, &true_bar, &into_ty);
let false_tm = self.model.false_tm(&cur_ctx_syn);
let false_bar = Self::bar_tm(&mut self.model, &cur_ctx_syn, &bool_ty, &false_tm);
let expected_ty_false_case = Self::subst_ty(&mut self.model, &false_bar, &into_ty);
let true_case_tm = self.check_tm_ty(&cases[0].1, &expected_ty_true_case)?;
let false_case_tm = self.check_tm_ty(&cases[1].1, &expected_ty_false_case)?;
let tm = self.model.elim_bool(&cur_ctx_syn, &into_ty, &true_case_tm, &false_case_tm);
// Define substitutions by true and false
Self::subst_tm(&mut self.model, &true_bar, &tm);
Self::subst_tm(&mut self.model, &false_bar, &tm);
Ok((tm, into_ty))
}
fn check_tm_ty(&mut self, expr: &Expr, expected_ty: &Ty) -> Result<Tm, String> {
let (tm, ty) = self.check_tm(expr)?;
if self.model.ty_eq(&ty, expected_ty) {
Ok(tm)
} else {
Err(format!("expected:\n{:?}\ngot:\n{:?}", expected_ty, ty))
}
}
fn access_var(&mut self, name: &str) -> Result<(Tm, Ty), String> {
let mut ctx_index = self.ctxs.len();
for ctx in self.ctxs.iter().rev() {
ctx_index -= 1;
for (ref ctx_var_name, ref tm, ref ty) in ctx.defs.iter().rev() {
if ctx_var_name != name {
continue
}
let mut tm = tm.clone();
let mut ty = ty.clone();
// Found term, inject it into current context.
for ctx in &self.ctxs[ctx_index+1..] {
let weakening = match ctx.weakening {
Some(ref w) => w,
None => panic!("expected weakening to be available")
};
tm = Self::subst_tm(&mut self.model, &weakening, &tm);
ty = Self::subst_ty(&mut self.model, &weakening, &ty);
}
return Ok((tm, ty))
}
}
Err(format!("unknown definition {}", name))
}
fn check_eq(&mut self, a: &Expr, b: &Expr) -> Result<Ty, String> {
let (tma, tya) = self.check_tm(a)?;
let tmb = self.check_tm_ty(b, &tya)?;
Ok(self.model.eq_ty(&tma, &tmb))
}
fn subst_ty(model: &mut TModel, g: &Morph, ty: &Ty) -> Ty {
model.subst_ty(g, ty);
| random_line_split |
|
type_checker.rs | );
Ok(ty)
}
pub fn check_def(&mut self, def: &Def) -> Result<Tm, String> {
let mut s = self.save_ctx();
for ext in def.ctx.iter() {
s.extend(ext)?;
}
let ret_ty = s.check_ty(&def.ret_ty)?;
s.check_tm_ty(&def.body, &ret_ty)
}
fn check_let<T, F>(
&mut self, check_body: F,
name: &DefId, ty: &Expr, val: &Expr, body: &Expr) -> Result<T, String>
where F : FnOnce(&mut Self, &Expr) -> Result<T, String>
{
let mut s = self.save_ctx();
let ty = s.check_ty(ty)?;
let val = s.check_tm_ty(val, &ty)?;
if let Some(name) = name {
s.ctxs.last_mut().unwrap().defs.push((name.clone(), val, ty));
};
check_body(&mut s, body)
}
pub fn check_ty(&mut self, expr: &Expr) -> Result<Ty, String> {
let cur_ctx_syn = &self.ctxs.last().unwrap().syntax;
match expr {
Expr::App(id, v) =>
match (id.as_str(), &v[..]) {
("bool", []) => Ok(self.model.bool_ty(cur_ctx_syn)),
("eq", [a, b]) => self.check_eq(a, b),
(s, v) => Err(format!("Unexpected {} with {} args", s, v.len()))
},
Expr::Let { name, ty, val, body } =>
self.check_let(|s, body| s.check_ty(body), name, &*ty, &*val, &*body),
_ => Err(format!("Unhandled type {:?}", expr))
}
}
pub fn check_tm(&mut self, expr: &Expr) -> Result<(Tm, Ty), String> {
match expr {
Expr::App(id, v) =>
match (id.as_str(), &v[..]) {
("refl", [a]) => self.refl(&*a),
("true", []) => Ok(self.true_tm()),
("false", []) => Ok(self.false_tm()),
(v, []) => self.access_var(v),
(s, v) => Err(format!("Unexpected {} with {} args", s, v.len()))
},
Expr::Let { name, ty, val, body } =>
self.check_let(|s, body| s.check_tm(body), name, &*ty, &*val, &*body),
Expr::Elim { val, into_ctx, into_ty, cases } =>
self.check_elim(&*val, into_ctx, &*into_ty, cases),
}
}
fn refl(&mut self, expr: &Expr) -> Result<(Tm, Ty), String> |
fn true_tm(&mut self) -> (Tm, Ty) {
let cur_ctx_syn = &self.ctxs.last().unwrap().syntax;
let bool_ty = self.model.bool_ty(cur_ctx_syn);
let tm = self.model.true_tm(cur_ctx_syn);
(tm, bool_ty)
}
fn false_tm(&mut self) -> (Tm, Ty) {
let cur_ctx_syn = &self.ctxs.last().unwrap().syntax;
let bool_ty = self.model.bool_ty(cur_ctx_syn);
let tm = self.model.false_tm(cur_ctx_syn);
(tm, bool_ty)
}
// Given G |- a : A, construct the morphism <1(G), A, a> : G.A -> G
// substituting the last A for a in any term in G.A.
fn bar_tm(model: &mut TModel, ctx: &Ctx, ty: &Ty, tm: &Tm) -> Morph {
let id = model.id_morph(ctx);
model.extension(&id, ty, tm)
}
fn check_elim(
&mut self,
val: &Expr, into_ctx: &Vec<CtxExt>, into_ty: &Expr,
cases: &Vec<ElimCase>) -> Result<(Tm, Ty), String>
{
let (val_tm, val_ty) = self.check_tm(val)?;
let bool_ty = self.model.bool_ty(&self.ctxs.last().unwrap().syntax);
let (elim_tm, elim_ty) =
if self.model.ty_eq(&val_ty, &bool_ty) {
self.elim_bool(into_ctx, into_ty, cases)?
} else {
return Err(format!("Cannot eliminate {:?} of type {:?}", val, val_ty))
};
// Substitute bar(val_tm) into elimination term and type, which live
// live in an extended context.
let cur_ctx_syn = &self.ctxs.last().unwrap().syntax;
let bar = Self::bar_tm(&mut self.model, cur_ctx_syn, &val_ty, &val_tm);
let tm = self.model.subst_tm(&bar, &elim_tm);
let ty = self.model.subst_ty(&bar, &elim_ty);
Ok((tm, ty))
}
fn elim_bool(
&mut self,
into_ctx: &Vec<CtxExt>, into_ty: &Expr,
cases: &Vec<ElimCase>) -> Result<(Tm, Ty), String>
{
if into_ctx.len() != 1 || cases.len() != 2 ||
cases[0].0.len() != 0 || cases[1].0.len() != 0
{
return Err("Invalid bool elimination".to_owned())
}
let cur_ctx_syn = self.ctxs.last().unwrap().syntax.clone();
let bool_ty = self.model.bool_ty(&cur_ctx_syn);
let into_ty = {
let mut s = self.save_ctx();
let ext_ty = s.extend(&into_ctx[0])?;
if !s.model.ty_eq(&ext_ty, &bool_ty) {
return Err("Invalid extension for into-type: expected bool".to_owned());
}
s.check_ty(into_ty)?
};
let true_tm = self.model.true_tm(&cur_ctx_syn);
let true_bar = Self::bar_tm(&mut self.model, &cur_ctx_syn, &bool_ty, &true_tm);
let expected_ty_true_case = Self::subst_ty(&mut self.model, &true_bar, &into_ty);
let false_tm = self.model.false_tm(&cur_ctx_syn);
let false_bar = Self::bar_tm(&mut self.model, &cur_ctx_syn, &bool_ty, &false_tm);
let expected_ty_false_case = Self::subst_ty(&mut self.model, &false_bar, &into_ty);
let true_case_tm = self.check_tm_ty(&cases[0].1, &expected_ty_true_case)?;
let false_case_tm = self.check_tm_ty(&cases[1].1, &expected_ty_false_case)?;
let tm = self.model.elim_bool(&cur_ctx_syn, &into_ty, &true_case_tm, &false_case_tm);
// Define substitutions by true and false
Self::subst_tm(&mut self.model, &true_bar, &tm);
Self::subst_tm(&mut self.model, &false_bar, &tm);
Ok((tm, into_ty))
}
fn check_tm_ty(&mut self, expr: &Expr, expected_ty: &Ty) -> Result<Tm, String> {
let (tm, ty) = self.check_tm(expr)?;
if self.model.ty_eq(&ty, expected_ty) {
Ok(tm)
} else {
Err(format!("expected:\n{:?}\ngot:\n{:?}", expected_ty, ty))
}
}
fn access_var(&mut self, name: &str) -> Result<(Tm, Ty), String> {
let mut ctx_index = self.ctxs.len();
for ctx in self.ctxs.iter().rev() {
ctx_index -= 1;
for (ref ctx_var_name, ref tm, ref ty) in ctx.defs.iter().rev() {
if ctx_var_name != name {
continue
}
let mut tm = tm.clone();
let mut ty = ty.clone();
// Found term, inject it into current context.
for ctx in &self.ctxs[ctx_index+1..] {
let weakening = match ctx.weakening {
Some(ref w) => w,
None => panic!("expected weakening to be available")
};
tm = Self::subst_tm(&mut self.model, &weakening, &tm);
ty = Self::subst_ty(&mut self.model, &weakening, &ty);
}
return Ok((tm, ty))
}
}
Err(format!("unknown definition {}", name))
}
fn check_eq(&mut self, a: &Expr, b: &Expr) -> Result<Ty, String> {
let (tma, tya) = self.check_tm(a)?;
let tmb = self.check_tm_ty(b, &tya)?;
Ok(self.model.eq_ty(&tma, &tmb))
}
fn subst_ty(model: &mut TModel, g: &Morph, ty: &Ty) -> Ty {
model.subst_ty(g, ty);
| {
let (tm, _) = self.check_tm(expr)?;
let eq_ty = self.model.eq_ty(&tm, &tm);
let refl_tm = self.model.refl(&tm);
Ok((refl_tm, eq_ty))
} | identifier_body |
app.component.local.ts | { isImageResponse, isHtmlResponse, isXmlResponse, handleHtmlResponse, handleXmlResponse, handleJsonResponse, handleImageResponse, insertHeadersIntoResponseViewer, showResults } from "./response-handlers";
import { saveHistoryToLocalStorage, loadHistoryFromLocalStorage } from "./history";
import { createHeaders, getParameterByName } from "./util";
import { getRequestBodyEditor, getAceEditorFromElId, getJsonViewer } from "./api-explorer-jseditor";
import { parseMetadata, constructGraphLinksFromFullPath } from "./graph-structure";
import { ResponseStatusBarComponent } from "./response-status-bar.component";
import { GenericDialogComponent } from "./generic-message-dialog.component";
import { getString } from "./localization-helpers";
declare let mwf, ga, moment;
@Component({
selector: 'api-explorer',
providers: [GraphService],
templateUrl: './app.component.html',
styles: [`
#explorer-main {
padding-left: 12px;
}
sidebar {
padding: 0px;
}
`]
})
export class AppComponent extends GraphExplorerComponent implements OnInit, AfterViewInit {
ngAfterViewInit(): void {
// Headers aren't updated when that tab is hidden, so when clicking on any tab reinsert the headers
if (typeof $ !== "undefined") {
$("#response-viewer-labels .ms-Pivot-link").on('click', () => {
insertHeadersIntoResponseViewer(AppComponent.lastApiCallHeaders)
});
}
parseMetadata(this.GraphService, "v1.0");
parseMetadata(this.GraphService, "beta");
}
static svc:GraphService;
static messageBarContent:MessageBarContent;
static lastApiCallHeaders: Headers;
static _changeDetectionRef:ChangeDetectorRef;
static message:Message;
constructor(private GraphService: GraphService, private chRef: ChangeDetectorRef) {
super();
AppComponent.svc = GraphService;
AppComponent._changeDetectionRef = chRef;
}
ngOnInit() {
for (let key in AppComponent.Options) {
if (key in window)
AppComponent.Options[key] = window[key];
}
AppComponent.Options.GraphVersions.push("Other");
initAuth(AppComponent.Options, this.GraphService, this.chRef);
initFabricComponents();
mwf.ComponentFactory.create([{
'component': mwf.Drawer,
}])
moment.locale(AppComponent.Options.Language);
}
static Options: ExplorerOptions = {
ClientId: "43c48040-3ceb-43fe-a504-9a9bcb856e40",
Language: "en-US",
DefaultUserScopes: "openid profile User.ReadWrite User.ReadBasic.All Sites.ReadWrite.All Contacts.ReadWrite People.Read Notes.ReadWrite.All Tasks.ReadWrite Mail.ReadWrite Files.ReadWrite.All Calendars.ReadWrite",
AuthUrl: "https://login.microsoftonline.com",
GraphUrl: "https://graph.microsoft.com",
GraphVersions: GraphApiVersions,
PathToBuildDir: ""
};
static explorerValues:ExplorerValues = {
endpointUrl: AppComponent.Options.GraphUrl + `/${(getParameterByName("version") || "v1.0")}/${getParameterByName("request") || 'me/'}`,
selectedOption: getParameterByName("method") as RequestType || "GET",
selectedVersion: getParameterByName("version") as GraphApiVersion || "v1.0",
authentication: {
user: {}
},
showImage: false,
requestInProgress: false,
headers: [],
postBody: ""
};
static requestHistory: GraphApiCall[] = loadHistoryFromLocalStorage();
static addRequestToHistory(request:GraphApiCall) {
AppComponent.requestHistory.splice(0, 0, request); //add history object to the array
saveHistoryToLocalStorage(AppComponent.requestHistory);
}
static removeRequestFromHistory(request:GraphApiCall) {
const idx = AppComponent.requestHistory.indexOf(request);
if (idx > -1) {
AppComponent.requestHistory.splice(idx, 1);
} else {
console.error("Trying to remove history item that doesn't exist")
}
saveHistoryToLocalStorage(AppComponent.requestHistory);
}
static setMessage(message:Message) {
AppComponent.message = message;
setTimeout(() => {GenericDialogComponent.showDialog();});
}
static executeExplorerQuery(fromSample?:boolean) {
// #hack. When clicking on an autocomplete option, the model isn't updated
if (fromSample != true)
AppComponent.explorerValues.endpointUrl = $("#graph-request-url input").val();
let query:GraphApiCall = {
requestUrl: AppComponent.explorerValues.endpointUrl,
method: AppComponent.explorerValues.selectedOption,
requestSentAt: new Date(),
headers: AppComponent.explorerValues.headers,
postBody: getRequestBodyEditor().getSession().getValue()
};
checkHasValidAuthToken();
let graphRequest:Promise<Response>;
if (isAuthenticated()) {
graphRequest = AppComponent.svc.performQuery(query.method, query.requestUrl, query.postBody, createHeaders(query.headers));
} else {
graphRequest = AppComponent.svc.performAnonymousQuery(query.method, query.requestUrl, createHeaders(query.headers));
}
this.explorerValues.requestInProgress = true;
graphRequest.then((res) => {
handleSuccessfulQueryResponse(res, query);
}).catch((res) => {
handleUnsuccessfulQueryResponse(res, query);
});
}
static clearResponse() {
// clear response preview and headers
getAceEditorFromElId("response-header-viewer").getSession().setValue("");
getJsonViewer().getSession().setValue("")
this.explorerValues.showImage = false;
ResponseStatusBarComponent.clearMessage()
}
}
function isSuccessful(query:GraphApiCall) {
return query.statusCode >= 200 && query.statusCode < 300;
}
function createTextSummary(query:GraphApiCall) {
let text = "";
if (isSuccessful(query)) {
text += getString(AppComponent.Options, "Success");
} else {
text += getString(AppComponent.Options, "Failure");
}
text += ` - ${getString(AppComponent.Options, "Status Code")} ${query.statusCode}`
text += `<span style="font-weight: 800; margin-left: 40px;">${query.duration}ms</span>`;
if (query.statusCode == 401 || query.statusCode == 403) {
text += `<span style="margin-left: 40px;">Looks like you may not have the permissions for this call. Please <a href="#" class="c-hyperlink" onclick="window.launchPermissionsDialog()" class="">modify your permissions</a>.</span>`
}
return text;
}
function commonResponseHandler(res:Response, query:GraphApiCall) {
AppComponent.clearResponse();
// common ops for successful and unsuccessful
AppComponent.explorerValues.requestInProgress = false;
AppComponent.lastApiCallHeaders = res.headers;
let {status, headers} = res;
query.duration = (new Date()).getTime() - query.requestSentAt.getTime();
query.statusCode = status;
AppComponent.addRequestToHistory(query);
AppComponent.messageBarContent = {
text: createTextSummary(query),
backgroundClass: isSuccessful(query) ? "ms-MessageBar--success" : "ms-MessageBar--error",
icon: isSuccessful(query) ? "ms-Icon--Completed" : "ms-Icon--ErrorBadge"
}
let dataPoints:any[] = [query.statusCode]
let urlGraph = constructGraphLinksFromFullPath(query.requestUrl);
if (urlGraph && urlGraph.length > 0) {
let cleanedUrl = urlGraph.map((link) => link.type).join("/");
dataPoints.push(cleanedUrl);
} else {
dataPoints.push("UnknownUrl");
}
dataPoints.push(isAuthenticated() ? "authenticated" : "demo");
if (typeof ga !== 'undefined') {
ga('send', {
hitType: 'event',
eventCategory: 'GraphExplorer',
eventAction: 'ExecuteQuery',
eventLabel: dataPoints.join(",")
});
}
}
function handleSuccessfulQueryResponse(res:Response, query:GraphApiCall) {
commonResponseHandler(res, query);
let {status, headers} = res;
let resultBody = res.text();
AppComponent.explorerValues.showImage = false;
if (isImageResponse(headers)) {
let method = isAuthenticated() ? AppComponent.svc.performQuery : AppComponent.svc.performAnonymousQuery;;
handleImageResponse(method, headers, status, handleUnsuccessfulQueryResponse);
} else if (isHtmlResponse(headers)) {
insertHeadersIntoResponseViewer(headers);
handleHtmlResponse(resultBody);
} else if (isXmlResponse(resultBody)) {
insertHeadersIntoResponseViewer(headers);
handleXmlResponse(resultBody);
} else {
insertHeadersIntoResponseViewer(headers);
if (res.text() != "")
handleJsonResponse(res.json());
}
}
function handleUnsuccessfulQueryResponse(res:Response, query:GraphApiCall) | {
commonResponseHandler(res, query);
insertHeadersIntoResponseViewer(res.headers);
let errorText;
try {
errorText = res.json();
handleJsonResponse(errorText);
return;
} catch(e) {
errorText = res.text();
}
if (errorText.indexOf("<!DOCTYPE html>") != -1) {
handleHtmlResponse(errorText);
} else {
showResults(errorText, "text")
}
} | identifier_body |
|
app.component.local.ts | Ref, DoCheck, AfterViewInit } from '@angular/core';
import { Response, Headers } from '@angular/http';
import { ExplorerOptions, RequestType, ExplorerValues, GraphApiCall, GraphRequestHeader, Message, SampleQuery, MessageBarContent, GraphApiVersions, GraphApiVersion } from "./base";
import { GraphExplorerComponent } from "./GraphExplorerComponent";
import { initAuth, checkHasValidAuthToken, isAuthenticated } from "./auth";
import { initFabricComponents } from "./fabric-components";
import { GraphService } from "./graph-service";
import { isImageResponse, isHtmlResponse, isXmlResponse, handleHtmlResponse, handleXmlResponse, handleJsonResponse, handleImageResponse, insertHeadersIntoResponseViewer, showResults } from "./response-handlers";
import { saveHistoryToLocalStorage, loadHistoryFromLocalStorage } from "./history";
import { createHeaders, getParameterByName } from "./util";
import { getRequestBodyEditor, getAceEditorFromElId, getJsonViewer } from "./api-explorer-jseditor";
import { parseMetadata, constructGraphLinksFromFullPath } from "./graph-structure";
import { ResponseStatusBarComponent } from "./response-status-bar.component";
import { GenericDialogComponent } from "./generic-message-dialog.component";
import { getString } from "./localization-helpers";
declare let mwf, ga, moment;
@Component({
selector: 'api-explorer',
providers: [GraphService],
templateUrl: './app.component.html',
styles: [`
#explorer-main {
padding-left: 12px;
}
sidebar {
padding: 0px;
}
`]
})
export class AppComponent extends GraphExplorerComponent implements OnInit, AfterViewInit {
ngAfterViewInit(): void {
// Headers aren't updated when that tab is hidden, so when clicking on any tab reinsert the headers
if (typeof $ !== "undefined") {
$("#response-viewer-labels .ms-Pivot-link").on('click', () => {
insertHeadersIntoResponseViewer(AppComponent.lastApiCallHeaders)
});
}
parseMetadata(this.GraphService, "v1.0");
parseMetadata(this.GraphService, "beta");
}
static svc:GraphService;
static messageBarContent:MessageBarContent;
static lastApiCallHeaders: Headers;
static _changeDetectionRef:ChangeDetectorRef;
static message:Message;
constructor(private GraphService: GraphService, private chRef: ChangeDetectorRef) {
super();
AppComponent.svc = GraphService;
AppComponent._changeDetectionRef = chRef;
}
ngOnInit() {
for (let key in AppComponent.Options) {
if (key in window)
AppComponent.Options[key] = window[key];
}
AppComponent.Options.GraphVersions.push("Other");
initAuth(AppComponent.Options, this.GraphService, this.chRef);
initFabricComponents();
mwf.ComponentFactory.create([{
'component': mwf.Drawer,
}])
moment.locale(AppComponent.Options.Language);
}
static Options: ExplorerOptions = {
ClientId: "43c48040-3ceb-43fe-a504-9a9bcb856e40",
Language: "en-US",
DefaultUserScopes: "openid profile User.ReadWrite User.ReadBasic.All Sites.ReadWrite.All Contacts.ReadWrite People.Read Notes.ReadWrite.All Tasks.ReadWrite Mail.ReadWrite Files.ReadWrite.All Calendars.ReadWrite",
AuthUrl: "https://login.microsoftonline.com",
GraphUrl: "https://graph.microsoft.com",
GraphVersions: GraphApiVersions,
PathToBuildDir: ""
};
static explorerValues:ExplorerValues = {
endpointUrl: AppComponent.Options.GraphUrl + `/${(getParameterByName("version") || "v1.0")}/${getParameterByName("request") || 'me/'}`,
selectedOption: getParameterByName("method") as RequestType || "GET",
selectedVersion: getParameterByName("version") as GraphApiVersion || "v1.0",
authentication: {
user: {}
},
showImage: false,
requestInProgress: false,
headers: [],
postBody: ""
};
static requestHistory: GraphApiCall[] = loadHistoryFromLocalStorage();
static addRequestToHistory(request:GraphApiCall) {
AppComponent.requestHistory.splice(0, 0, request); //add history object to the array
saveHistoryToLocalStorage(AppComponent.requestHistory);
}
static removeRequestFromHistory(request:GraphApiCall) {
const idx = AppComponent.requestHistory.indexOf(request);
if (idx > -1) {
AppComponent.requestHistory.splice(idx, 1);
} else {
console.error("Trying to remove history item that doesn't exist")
}
saveHistoryToLocalStorage(AppComponent.requestHistory);
}
static setMessage(message:Message) {
AppComponent.message = message;
setTimeout(() => {GenericDialogComponent.showDialog();});
}
static executeExplorerQuery(fromSample?:boolean) {
// #hack. When clicking on an autocomplete option, the model isn't updated
if (fromSample != true)
AppComponent.explorerValues.endpointUrl = $("#graph-request-url input").val();
let query:GraphApiCall = {
requestUrl: AppComponent.explorerValues.endpointUrl,
method: AppComponent.explorerValues.selectedOption,
requestSentAt: new Date(),
headers: AppComponent.explorerValues.headers,
postBody: getRequestBodyEditor().getSession().getValue()
};
checkHasValidAuthToken();
let graphRequest:Promise<Response>;
if (isAuthenticated()) {
graphRequest = AppComponent.svc.performQuery(query.method, query.requestUrl, query.postBody, createHeaders(query.headers));
} else {
graphRequest = AppComponent.svc.performAnonymousQuery(query.method, query.requestUrl, createHeaders(query.headers));
}
this.explorerValues.requestInProgress = true;
graphRequest.then((res) => {
handleSuccessfulQueryResponse(res, query);
}).catch((res) => {
handleUnsuccessfulQueryResponse(res, query);
});
}
static clearResponse() {
// clear response preview and headers
getAceEditorFromElId("response-header-viewer").getSession().setValue("");
getJsonViewer().getSession().setValue("")
this.explorerValues.showImage = false;
ResponseStatusBarComponent.clearMessage()
}
}
function isSuccessful(query:GraphApiCall) {
return query.statusCode >= 200 && query.statusCode < 300;
}
function | (query:GraphApiCall) {
let text = "";
if (isSuccessful(query)) {
text += getString(AppComponent.Options, "Success");
} else {
text += getString(AppComponent.Options, "Failure");
}
text += ` - ${getString(AppComponent.Options, "Status Code")} ${query.statusCode}`
text += `<span style="font-weight: 800; margin-left: 40px;">${query.duration}ms</span>`;
if (query.statusCode == 401 || query.statusCode == 403) {
text += `<span style="margin-left: 40px;">Looks like you may not have the permissions for this call. Please <a href="#" class="c-hyperlink" onclick="window.launchPermissionsDialog()" class="">modify your permissions</a>.</span>`
}
return text;
}
function commonResponseHandler(res:Response, query:GraphApiCall) {
AppComponent.clearResponse();
// common ops for successful and unsuccessful
AppComponent.explorerValues.requestInProgress = false;
AppComponent.lastApiCallHeaders = res.headers;
let {status, headers} = res;
query.duration = (new Date()).getTime() - query.requestSentAt.getTime();
query.statusCode = status;
AppComponent.addRequestToHistory(query);
AppComponent.messageBarContent = {
text: createTextSummary(query),
backgroundClass: isSuccessful(query) ? "ms-MessageBar--success" : "ms-MessageBar--error",
icon: isSuccessful(query) ? "ms-Icon--Completed" : "ms-Icon--ErrorBadge"
}
let dataPoints:any[] = [query.statusCode]
let urlGraph = constructGraphLinksFromFullPath(query.requestUrl);
if (urlGraph && urlGraph.length > 0) {
let cleanedUrl = urlGraph.map((link) => link.type).join("/");
dataPoints.push(cleanedUrl);
} else {
dataPoints.push("UnknownUrl");
}
dataPoints.push(isAuthenticated() ? "authenticated" : "demo");
if (typeof ga !== 'undefined') {
ga('send', {
hitType: 'event',
eventCategory: 'GraphExplorer',
eventAction: 'ExecuteQuery',
eventLabel: dataPoints.join(",")
});
}
}
function handleSuccessfulQueryResponse(res:Response, query:GraphApiCall) {
commonResponseHandler(res, query);
let {status, headers} = res;
let resultBody = res.text();
AppComponent.explorerValues.showImage = false;
if (isImageResponse(headers)) {
let method = isAuthenticated() ? AppComponent.svc.performQuery : AppComponent.svc.performAnonymousQuery;;
handleImageResponse(method, headers, status, handleUnsuccessfulQueryResponse);
} else if (isHtmlResponse(headers)) {
insertHeadersIntoResponseViewer(headers);
handleHtmlResponse(resultBody);
} else if (isXmlResponse(resultBody)) {
insertHeadersIntoResponseViewer(headers);
handleXmlResponse(resultBody);
} else {
insertHeadersIntoResponseViewer(headers);
if (res.text() != "")
handleJsonResponse(res.json());
}
}
function | createTextSummary | identifier_name |
app.component.local.ts | Ref, DoCheck, AfterViewInit } from '@angular/core';
import { Response, Headers } from '@angular/http';
import { ExplorerOptions, RequestType, ExplorerValues, GraphApiCall, GraphRequestHeader, Message, SampleQuery, MessageBarContent, GraphApiVersions, GraphApiVersion } from "./base";
import { GraphExplorerComponent } from "./GraphExplorerComponent";
import { initAuth, checkHasValidAuthToken, isAuthenticated } from "./auth";
import { initFabricComponents } from "./fabric-components";
import { GraphService } from "./graph-service";
import { isImageResponse, isHtmlResponse, isXmlResponse, handleHtmlResponse, handleXmlResponse, handleJsonResponse, handleImageResponse, insertHeadersIntoResponseViewer, showResults } from "./response-handlers";
import { saveHistoryToLocalStorage, loadHistoryFromLocalStorage } from "./history";
import { createHeaders, getParameterByName } from "./util";
import { getRequestBodyEditor, getAceEditorFromElId, getJsonViewer } from "./api-explorer-jseditor";
import { parseMetadata, constructGraphLinksFromFullPath } from "./graph-structure";
import { ResponseStatusBarComponent } from "./response-status-bar.component";
import { GenericDialogComponent } from "./generic-message-dialog.component";
import { getString } from "./localization-helpers";
declare let mwf, ga, moment;
@Component({
selector: 'api-explorer',
providers: [GraphService],
templateUrl: './app.component.html',
styles: [`
#explorer-main {
padding-left: 12px;
}
sidebar { |
`]
})
export class AppComponent extends GraphExplorerComponent implements OnInit, AfterViewInit {
ngAfterViewInit(): void {
// Headers aren't updated when that tab is hidden, so when clicking on any tab reinsert the headers
if (typeof $ !== "undefined") {
$("#response-viewer-labels .ms-Pivot-link").on('click', () => {
insertHeadersIntoResponseViewer(AppComponent.lastApiCallHeaders)
});
}
parseMetadata(this.GraphService, "v1.0");
parseMetadata(this.GraphService, "beta");
}
static svc:GraphService;
static messageBarContent:MessageBarContent;
static lastApiCallHeaders: Headers;
static _changeDetectionRef:ChangeDetectorRef;
static message:Message;
constructor(private GraphService: GraphService, private chRef: ChangeDetectorRef) {
super();
AppComponent.svc = GraphService;
AppComponent._changeDetectionRef = chRef;
}
ngOnInit() {
for (let key in AppComponent.Options) {
if (key in window)
AppComponent.Options[key] = window[key];
}
AppComponent.Options.GraphVersions.push("Other");
initAuth(AppComponent.Options, this.GraphService, this.chRef);
initFabricComponents();
mwf.ComponentFactory.create([{
'component': mwf.Drawer,
}])
moment.locale(AppComponent.Options.Language);
}
static Options: ExplorerOptions = {
ClientId: "43c48040-3ceb-43fe-a504-9a9bcb856e40",
Language: "en-US",
DefaultUserScopes: "openid profile User.ReadWrite User.ReadBasic.All Sites.ReadWrite.All Contacts.ReadWrite People.Read Notes.ReadWrite.All Tasks.ReadWrite Mail.ReadWrite Files.ReadWrite.All Calendars.ReadWrite",
AuthUrl: "https://login.microsoftonline.com",
GraphUrl: "https://graph.microsoft.com",
GraphVersions: GraphApiVersions,
PathToBuildDir: ""
};
static explorerValues:ExplorerValues = {
endpointUrl: AppComponent.Options.GraphUrl + `/${(getParameterByName("version") || "v1.0")}/${getParameterByName("request") || 'me/'}`,
selectedOption: getParameterByName("method") as RequestType || "GET",
selectedVersion: getParameterByName("version") as GraphApiVersion || "v1.0",
authentication: {
user: {}
},
showImage: false,
requestInProgress: false,
headers: [],
postBody: ""
};
static requestHistory: GraphApiCall[] = loadHistoryFromLocalStorage();
static addRequestToHistory(request:GraphApiCall) {
AppComponent.requestHistory.splice(0, 0, request); //add history object to the array
saveHistoryToLocalStorage(AppComponent.requestHistory);
}
static removeRequestFromHistory(request:GraphApiCall) {
const idx = AppComponent.requestHistory.indexOf(request);
if (idx > -1) {
AppComponent.requestHistory.splice(idx, 1);
} else {
console.error("Trying to remove history item that doesn't exist")
}
saveHistoryToLocalStorage(AppComponent.requestHistory);
}
static setMessage(message:Message) {
AppComponent.message = message;
setTimeout(() => {GenericDialogComponent.showDialog();});
}
static executeExplorerQuery(fromSample?:boolean) {
// #hack. When clicking on an autocomplete option, the model isn't updated
if (fromSample != true)
AppComponent.explorerValues.endpointUrl = $("#graph-request-url input").val();
let query:GraphApiCall = {
requestUrl: AppComponent.explorerValues.endpointUrl,
method: AppComponent.explorerValues.selectedOption,
requestSentAt: new Date(),
headers: AppComponent.explorerValues.headers,
postBody: getRequestBodyEditor().getSession().getValue()
};
checkHasValidAuthToken();
let graphRequest:Promise<Response>;
if (isAuthenticated()) {
graphRequest = AppComponent.svc.performQuery(query.method, query.requestUrl, query.postBody, createHeaders(query.headers));
} else {
graphRequest = AppComponent.svc.performAnonymousQuery(query.method, query.requestUrl, createHeaders(query.headers));
}
this.explorerValues.requestInProgress = true;
graphRequest.then((res) => {
handleSuccessfulQueryResponse(res, query);
}).catch((res) => {
handleUnsuccessfulQueryResponse(res, query);
});
}
static clearResponse() {
// clear response preview and headers
getAceEditorFromElId("response-header-viewer").getSession().setValue("");
getJsonViewer().getSession().setValue("")
this.explorerValues.showImage = false;
ResponseStatusBarComponent.clearMessage()
}
}
function isSuccessful(query:GraphApiCall) {
return query.statusCode >= 200 && query.statusCode < 300;
}
function createTextSummary(query:GraphApiCall) {
let text = "";
if (isSuccessful(query)) {
text += getString(AppComponent.Options, "Success");
} else {
text += getString(AppComponent.Options, "Failure");
}
text += ` - ${getString(AppComponent.Options, "Status Code")} ${query.statusCode}`
text += `<span style="font-weight: 800; margin-left: 40px;">${query.duration}ms</span>`;
if (query.statusCode == 401 || query.statusCode == 403) {
text += `<span style="margin-left: 40px;">Looks like you may not have the permissions for this call. Please <a href="#" class="c-hyperlink" onclick="window.launchPermissionsDialog()" class="">modify your permissions</a>.</span>`
}
return text;
}
function commonResponseHandler(res:Response, query:GraphApiCall) {
AppComponent.clearResponse();
// common ops for successful and unsuccessful
AppComponent.explorerValues.requestInProgress = false;
AppComponent.lastApiCallHeaders = res.headers;
let {status, headers} = res;
query.duration = (new Date()).getTime() - query.requestSentAt.getTime();
query.statusCode = status;
AppComponent.addRequestToHistory(query);
AppComponent.messageBarContent = {
text: createTextSummary(query),
backgroundClass: isSuccessful(query) ? "ms-MessageBar--success" : "ms-MessageBar--error",
icon: isSuccessful(query) ? "ms-Icon--Completed" : "ms-Icon--ErrorBadge"
}
let dataPoints:any[] = [query.statusCode]
let urlGraph = constructGraphLinksFromFullPath(query.requestUrl);
if (urlGraph && urlGraph.length > 0) {
let cleanedUrl = urlGraph.map((link) => link.type).join("/");
dataPoints.push(cleanedUrl);
} else {
dataPoints.push("UnknownUrl");
}
dataPoints.push(isAuthenticated() ? "authenticated" : "demo");
if (typeof ga !== 'undefined') {
ga('send', {
hitType: 'event',
eventCategory: 'GraphExplorer',
eventAction: 'ExecuteQuery',
eventLabel: dataPoints.join(",")
});
}
}
function handleSuccessfulQueryResponse(res:Response, query:GraphApiCall) {
commonResponseHandler(res, query);
let {status, headers} = res;
let resultBody = res.text();
AppComponent.explorerValues.showImage = false;
if (isImageResponse(headers)) {
let method = isAuthenticated() ? AppComponent.svc.performQuery : AppComponent.svc.performAnonymousQuery;;
handleImageResponse(method, headers, status, handleUnsuccessfulQueryResponse);
} else if (isHtmlResponse(headers)) {
insertHeadersIntoResponseViewer(headers);
handleHtmlResponse(resultBody);
} else if (isXmlResponse(resultBody)) {
insertHeadersIntoResponseViewer(headers);
handleXmlResponse(resultBody);
} else {
insertHeadersIntoResponseViewer(headers);
if (res.text() != "")
handleJsonResponse(res.json());
}
}
function | padding: 0px;
} | random_line_split |
__init__.py | _json_path)
def _setup_env():
"""Setup the local working environment."""
env.home_path = os.path.expanduser('~')
env.env_path = os.getenv('WORKON_HOME')
if not env.env_path:
warn("You should set the WORKON_HOME environment variable to" \
" the root directory for your virtual environments.")
env.env_path = env.sites_path
env.project_path = join(env.sites_path, env.project_name)
env.ve_path = join(env.env_path, env.project_name)
env.activate_path = join(env.ve_path, 'bin', 'activate')
def _s3cmd_put(src_path, bucket):
"""Copy local directory to S3 bucket"""
if not os.path.exists(env.s3cmd_cfg):
abort("Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'." % env)
with lcd(env.sites_path):
local('fablib/bin/s3cmd --config=%s put' \
' --rexclude ".*/\.[^/]*$"' \
' --acl-public' \
' --add-header="Cache-Control:max-age=300"' \
' -r %s/ s3://%s/' \
% (env.s3cmd_cfg, src_path, bucket))
def _s3cmd_sync(src_path, bucket):
"""Sync local directory with S3 bucket"""
if not os.path.exists(env.s3cmd_cfg):
abort("Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'." % env)
with lcd(env.sites_path):
local('fablib/bin/s3cmd --config=%s sync' \
' --rexclude ".*/\.[^/]*$"' \
' --delete-removed --acl-public' \
' --add-header="Cache-Control:max-age=300"' \
' --no-preserve' \
' %s/ s3://%s/' \
% (env.s3cmd_cfg, src_path, bucket))
############################################################
# JS libraries
############################################################
if _config:
# Set env.cdn_path = path to cdn repository
env.cdn_path = abspath(join(_config['root_path'],
'cdn.knightlab.com', 'app', 'libs', _config['name']))
def _make_zip(file_path):
notice('Creating zip file: %s' % file_path)
with zipfile.ZipFile(file_path, 'w', zipfile.ZIP_DEFLATED) as f_zip:
for r in _config['stage']:
static.add_zip_files(f_zip, _config, [{
"src": r['src'],
"dst": _config['name'], "regex": r['regex']}])
@task
def build():
"""Build lib version"""
_setup_env()
# Get build config
if not 'build' in _config:
abort('Could not find "build" in config file')
# Check version
if not 'version' in _config:
_config['version'] = datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S')
warn('Using development version value "%(version)s"' % _config)
notice('Building version %(version)s...' % _config)
# Clean build directory
clean(_config['build_path'])
# Build it
for key, param in _config['build'].iteritems():
getattr(static, key)(_config, param)
@task
def stage():
"""Build/commit/tag/push lib version, copy to local cdn repo"""
_setup_env()
if not 'stage' in _config:
abort('Could not find "stage" in config file')
# Make sure cdn exists
exists(dirname(env.cdn_path), required=True)
# Ask user for a new version
_config['version'] = git.prompt_tag('Enter a new version number',
unique=True)
# Build version
# use execute to allow for other implementations of 'build'
execute('build')
# Commit/push/tag
with lcd(env.project_path):
with settings(warn_only=True):
local('git add build')
# support builds where there's no change; sometimes comes up when
# reusing a tag because of an unexpected problem
with settings(warn_only=True):
msg = local('git commit -m "Release %(version)s"' % _config,capture=True)
if 'nothing to commit' in msg:
warn(msg)
warn('continuing anyway')
elif not msg.startswith('[master'):
abort("Unexpected result: %s" % msg)
local('git push')
git.push_tag(_config['version'])
# Copy to local CDN repository
cdn_path = join(env.cdn_path, _config['version'])
clean(cdn_path)
for r in _config['stage']:
static.copy(_config, [{
"src": r['src'],
"dst": cdn_path, "regex": r['regex']}])
# Create zip file in local CDN repository
_make_zip(join(cdn_path, '%(name)s.zip' % _config))
@task
def stage_dev():
"""
Build lib and copy to local cdn repository as 'dev' version
No tagging/committing/etc/
"""
_setup_env()
if not 'stage' in _config:
abort('Could not find "stage" in config file')
# Make sure cdn exists
exists(dirname(env.cdn_path), required=True)
# Build version
build()
# Copy to local CDN repository
cdn_path = join(env.cdn_path, 'dev')
clean(cdn_path)
for r in _config['stage']:
static.copy(_config, [{
"src": r['src'],
"dst": cdn_path, "regex": r['regex']}])
# Create zip file in local CDN repository
_make_zip(join(cdn_path, '%(name)s.zip' % _config))
@task
def stage_latest():
"""Copy lib version to latest within local cdn repo"""
_setup_env()
if 'version' in _config:
version = _config['version']
else:
version = git.prompt_tag('Which version to stage as "latest"?')
notice('stage_latest: %s' % version)
# Make sure version has been staged
version_cdn_path = join(env.cdn_path, version)
if not os.path.exists(version_cdn_path):
abort("Version '%s' has not been staged" % version)
# Stage version as latest
latest_cdn_path = join(env.cdn_path, 'latest')
clean(latest_cdn_path)
static.copy(_config, [{
"src": version_cdn_path, "dst": latest_cdn_path}])
@task
def untag():
"""Delete a tag (in case of error)"""
version = git.prompt_tag('Which tag to delete?')
if not version:
abort('No available version tag')
git.delete_tag(version)
############################################################
# Static websites deployed to S3
############################################################
if _config and 'deploy' in _config:
@task
def undeploy(env_type):
"""Delete website from S3 bucket. Specify stg|prd as argument."""
_setup_env()
# Activate local virtual environment (for render_templates+flask?)
local('. %s' % env.activate_path)
if not os.path.exists(env.s3cmd_cfg):
abort("Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'.")
if not env_type in _config['deploy']:
abort('Could not find "%s" in "deploy" in config file' % env_type)
if not "bucket" in _config['deploy'][env_type]:
|
bucket = _config['deploy'][env_type]['bucket']
warn('YOU ARE ABOUT TO DELETE EVERYTHING IN %s' % bucket)
if not do(prompt("Are you ABSOLUTELY sure you want to do this? (y/n): ").strip()):
abort('Aborting.')
with lcd(env.sites_path):
local('fablib/bin/s3cmd --config=%s del -r --force s3://%s/' \
% (env.s3cmd_cfg, bucket))
@task
def render(env_type):
"""Render templates (deploy except for actual sync with S3)"""
_setup_env()
# Activate local virtual environment (for render_templates+flask?)
local('. %s' % env.activate_path)
if not os.path.exists(env.s3cmd_cfg):
abort("Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'.")
if not env_type in _config['deploy']:
abort('Could not find "%s" in "deploy" in config file' % env_type)
if not "bucket" in _config['deploy'][env_type]:
abort('Could not find "bucket" in deploy.%s" in config file' % env_type)
if 'usemin_context' in _config['deploy'][env_type]:
usemin_context = _config['deploy'][env_type]['usemin_context']
else:
usemin_context = None
template_path = join(_config['project_path'], 'website', 'templates')
deploy | abort('Could not find "bucket" in deploy.%s" in config file' % env_type) | conditional_block |
__init__.py | _json_path)
def _setup_env():
"""Setup the local working environment."""
env.home_path = os.path.expanduser('~')
env.env_path = os.getenv('WORKON_HOME')
if not env.env_path:
warn("You should set the WORKON_HOME environment variable to" \
" the root directory for your virtual environments.")
env.env_path = env.sites_path
env.project_path = join(env.sites_path, env.project_name)
env.ve_path = join(env.env_path, env.project_name)
env.activate_path = join(env.ve_path, 'bin', 'activate')
def _s3cmd_put(src_path, bucket):
"""Copy local directory to S3 bucket"""
if not os.path.exists(env.s3cmd_cfg):
abort("Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'." % env)
with lcd(env.sites_path):
local('fablib/bin/s3cmd --config=%s put' \
' --rexclude ".*/\.[^/]*$"' \
' --acl-public' \
' --add-header="Cache-Control:max-age=300"' \
' -r %s/ s3://%s/' \
% (env.s3cmd_cfg, src_path, bucket))
def _s3cmd_sync(src_path, bucket):
"""Sync local directory with S3 bucket"""
if not os.path.exists(env.s3cmd_cfg):
abort("Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'." % env)
with lcd(env.sites_path):
local('fablib/bin/s3cmd --config=%s sync' \
' --rexclude ".*/\.[^/]*$"' \
' --delete-removed --acl-public' \
' --add-header="Cache-Control:max-age=300"' \
' --no-preserve' \
' %s/ s3://%s/' \
% (env.s3cmd_cfg, src_path, bucket))
############################################################
# JS libraries
############################################################
if _config:
# Set env.cdn_path = path to cdn repository
env.cdn_path = abspath(join(_config['root_path'],
'cdn.knightlab.com', 'app', 'libs', _config['name']))
def _make_zip(file_path):
notice('Creating zip file: %s' % file_path)
with zipfile.ZipFile(file_path, 'w', zipfile.ZIP_DEFLATED) as f_zip:
for r in _config['stage']:
static.add_zip_files(f_zip, _config, [{
"src": r['src'],
"dst": _config['name'], "regex": r['regex']}])
@task
def build():
|
@task
def stage():
"""Build/commit/tag/push lib version, copy to local cdn repo"""
_setup_env()
if not 'stage' in _config:
abort('Could not find "stage" in config file')
# Make sure cdn exists
exists(dirname(env.cdn_path), required=True)
# Ask user for a new version
_config['version'] = git.prompt_tag('Enter a new version number',
unique=True)
# Build version
# use execute to allow for other implementations of 'build'
execute('build')
# Commit/push/tag
with lcd(env.project_path):
with settings(warn_only=True):
local('git add build')
# support builds where there's no change; sometimes comes up when
# reusing a tag because of an unexpected problem
with settings(warn_only=True):
msg = local('git commit -m "Release %(version)s"' % _config,capture=True)
if 'nothing to commit' in msg:
warn(msg)
warn('continuing anyway')
elif not msg.startswith('[master'):
abort("Unexpected result: %s" % msg)
local('git push')
git.push_tag(_config['version'])
# Copy to local CDN repository
cdn_path = join(env.cdn_path, _config['version'])
clean(cdn_path)
for r in _config['stage']:
static.copy(_config, [{
"src": r['src'],
"dst": cdn_path, "regex": r['regex']}])
# Create zip file in local CDN repository
_make_zip(join(cdn_path, '%(name)s.zip' % _config))
@task
def stage_dev():
"""
Build lib and copy to local cdn repository as 'dev' version
No tagging/committing/etc/
"""
_setup_env()
if not 'stage' in _config:
abort('Could not find "stage" in config file')
# Make sure cdn exists
exists(dirname(env.cdn_path), required=True)
# Build version
build()
# Copy to local CDN repository
cdn_path = join(env.cdn_path, 'dev')
clean(cdn_path)
for r in _config['stage']:
static.copy(_config, [{
"src": r['src'],
"dst": cdn_path, "regex": r['regex']}])
# Create zip file in local CDN repository
_make_zip(join(cdn_path, '%(name)s.zip' % _config))
@task
def stage_latest():
"""Copy lib version to latest within local cdn repo"""
_setup_env()
if 'version' in _config:
version = _config['version']
else:
version = git.prompt_tag('Which version to stage as "latest"?')
notice('stage_latest: %s' % version)
# Make sure version has been staged
version_cdn_path = join(env.cdn_path, version)
if not os.path.exists(version_cdn_path):
abort("Version '%s' has not been staged" % version)
# Stage version as latest
latest_cdn_path = join(env.cdn_path, 'latest')
clean(latest_cdn_path)
static.copy(_config, [{
"src": version_cdn_path, "dst": latest_cdn_path}])
@task
def untag():
"""Delete a tag (in case of error)"""
version = git.prompt_tag('Which tag to delete?')
if not version:
abort('No available version tag')
git.delete_tag(version)
############################################################
# Static websites deployed to S3
############################################################
if _config and 'deploy' in _config:
@task
def undeploy(env_type):
"""Delete website from S3 bucket. Specify stg|prd as argument."""
_setup_env()
# Activate local virtual environment (for render_templates+flask?)
local('. %s' % env.activate_path)
if not os.path.exists(env.s3cmd_cfg):
abort("Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'.")
if not env_type in _config['deploy']:
abort('Could not find "%s" in "deploy" in config file' % env_type)
if not "bucket" in _config['deploy'][env_type]:
abort('Could not find "bucket" in deploy.%s" in config file' % env_type)
bucket = _config['deploy'][env_type]['bucket']
warn('YOU ARE ABOUT TO DELETE EVERYTHING IN %s' % bucket)
if not do(prompt("Are you ABSOLUTELY sure you want to do this? (y/n): ").strip()):
abort('Aborting.')
with lcd(env.sites_path):
local('fablib/bin/s3cmd --config=%s del -r --force s3://%s/' \
% (env.s3cmd_cfg, bucket))
@task
def render(env_type):
"""Render templates (deploy except for actual sync with S3)"""
_setup_env()
# Activate local virtual environment (for render_templates+flask?)
local('. %s' % env.activate_path)
if not os.path.exists(env.s3cmd_cfg):
abort("Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'.")
if not env_type in _config['deploy']:
abort('Could not find "%s" in "deploy" in config file' % env_type)
if not "bucket" in _config['deploy'][env_type]:
abort('Could not find "bucket" in deploy.%s" in config file' % env_type)
if 'usemin_context' in _config['deploy'][env_type]:
usemin_context = _config['deploy'][env_type]['usemin_context']
else:
usemin_context = None
template_path = join(_config['project_path'], 'website', 'templates')
deploy | """Build lib version"""
_setup_env()
# Get build config
if not 'build' in _config:
abort('Could not find "build" in config file')
# Check version
if not 'version' in _config:
_config['version'] = datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S')
warn('Using development version value "%(version)s"' % _config)
notice('Building version %(version)s...' % _config)
# Clean build directory
clean(_config['build_path'])
# Build it
for key, param in _config['build'].iteritems():
getattr(static, key)(_config, param) | identifier_body |
__init__.py | _json_path)
def _setup_env():
"""Setup the local working environment."""
env.home_path = os.path.expanduser('~')
env.env_path = os.getenv('WORKON_HOME')
if not env.env_path:
warn("You should set the WORKON_HOME environment variable to" \
" the root directory for your virtual environments.")
env.env_path = env.sites_path
env.project_path = join(env.sites_path, env.project_name)
env.ve_path = join(env.env_path, env.project_name)
env.activate_path = join(env.ve_path, 'bin', 'activate')
def _s3cmd_put(src_path, bucket):
"""Copy local directory to S3 bucket"""
if not os.path.exists(env.s3cmd_cfg):
abort("Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'." % env)
with lcd(env.sites_path):
local('fablib/bin/s3cmd --config=%s put' \
' --rexclude ".*/\.[^/]*$"' \
' --acl-public' \
' --add-header="Cache-Control:max-age=300"' \
' -r %s/ s3://%s/' \
% (env.s3cmd_cfg, src_path, bucket))
def _s3cmd_sync(src_path, bucket):
"""Sync local directory with S3 bucket"""
if not os.path.exists(env.s3cmd_cfg):
abort("Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'." % env)
with lcd(env.sites_path):
local('fablib/bin/s3cmd --config=%s sync' \
' --rexclude ".*/\.[^/]*$"' \
' --delete-removed --acl-public' \
' --add-header="Cache-Control:max-age=300"' \
' --no-preserve' \
' %s/ s3://%s/' \
% (env.s3cmd_cfg, src_path, bucket))
############################################################
# JS libraries
############################################################
if _config:
# Set env.cdn_path = path to cdn repository
env.cdn_path = abspath(join(_config['root_path'],
'cdn.knightlab.com', 'app', 'libs', _config['name']))
def _make_zip(file_path):
notice('Creating zip file: %s' % file_path)
with zipfile.ZipFile(file_path, 'w', zipfile.ZIP_DEFLATED) as f_zip:
for r in _config['stage']:
static.add_zip_files(f_zip, _config, [{
"src": r['src'],
"dst": _config['name'], "regex": r['regex']}])
@task
def build():
"""Build lib version"""
_setup_env()
# Get build config
if not 'build' in _config:
abort('Could not find "build" in config file')
# Check version
if not 'version' in _config:
_config['version'] = datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S')
warn('Using development version value "%(version)s"' % _config)
notice('Building version %(version)s...' % _config)
# Clean build directory
clean(_config['build_path'])
# Build it
for key, param in _config['build'].iteritems():
getattr(static, key)(_config, param)
@task
def stage():
"""Build/commit/tag/push lib version, copy to local cdn repo"""
_setup_env()
if not 'stage' in _config:
abort('Could not find "stage" in config file')
# Make sure cdn exists
exists(dirname(env.cdn_path), required=True)
# Ask user for a new version
_config['version'] = git.prompt_tag('Enter a new version number',
unique=True)
# Build version
# use execute to allow for other implementations of 'build'
execute('build')
# Commit/push/tag
with lcd(env.project_path):
with settings(warn_only=True):
local('git add build')
# support builds where there's no change; sometimes comes up when
# reusing a tag because of an unexpected problem
with settings(warn_only=True):
msg = local('git commit -m "Release %(version)s"' % _config,capture=True)
if 'nothing to commit' in msg:
warn(msg)
warn('continuing anyway')
elif not msg.startswith('[master'):
abort("Unexpected result: %s" % msg)
local('git push')
git.push_tag(_config['version'])
# Copy to local CDN repository
cdn_path = join(env.cdn_path, _config['version'])
clean(cdn_path)
for r in _config['stage']:
static.copy(_config, [{
"src": r['src'],
"dst": cdn_path, "regex": r['regex']}])
# Create zip file in local CDN repository
_make_zip(join(cdn_path, '%(name)s.zip' % _config))
@task
def stage_dev():
"""
Build lib and copy to local cdn repository as 'dev' version
No tagging/committing/etc/
"""
_setup_env()
if not 'stage' in _config:
abort('Could not find "stage" in config file')
# Make sure cdn exists
exists(dirname(env.cdn_path), required=True)
# Build version
build()
# Copy to local CDN repository
cdn_path = join(env.cdn_path, 'dev')
clean(cdn_path)
for r in _config['stage']:
static.copy(_config, [{
"src": r['src'],
"dst": cdn_path, "regex": r['regex']}])
# Create zip file in local CDN repository
_make_zip(join(cdn_path, '%(name)s.zip' % _config))
@task
def stage_latest():
"""Copy lib version to latest within local cdn repo"""
_setup_env()
if 'version' in _config:
version = _config['version']
else:
version = git.prompt_tag('Which version to stage as "latest"?')
notice('stage_latest: %s' % version)
# Make sure version has been staged
version_cdn_path = join(env.cdn_path, version)
if not os.path.exists(version_cdn_path):
abort("Version '%s' has not been staged" % version)
# Stage version as latest
latest_cdn_path = join(env.cdn_path, 'latest')
clean(latest_cdn_path)
static.copy(_config, [{
"src": version_cdn_path, "dst": latest_cdn_path}])
@task
def untag():
"""Delete a tag (in case of error)"""
version = git.prompt_tag('Which tag to delete?')
if not version:
abort('No available version tag')
git.delete_tag(version)
############################################################
# Static websites deployed to S3
############################################################
if _config and 'deploy' in _config:
@task
def | (env_type):
"""Delete website from S3 bucket. Specify stg|prd as argument."""
_setup_env()
# Activate local virtual environment (for render_templates+flask?)
local('. %s' % env.activate_path)
if not os.path.exists(env.s3cmd_cfg):
abort("Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'.")
if not env_type in _config['deploy']:
abort('Could not find "%s" in "deploy" in config file' % env_type)
if not "bucket" in _config['deploy'][env_type]:
abort('Could not find "bucket" in deploy.%s" in config file' % env_type)
bucket = _config['deploy'][env_type]['bucket']
warn('YOU ARE ABOUT TO DELETE EVERYTHING IN %s' % bucket)
if not do(prompt("Are you ABSOLUTELY sure you want to do this? (y/n): ").strip()):
abort('Aborting.')
with lcd(env.sites_path):
local('fablib/bin/s3cmd --config=%s del -r --force s3://%s/' \
% (env.s3cmd_cfg, bucket))
@task
def render(env_type):
"""Render templates (deploy except for actual sync with S3)"""
_setup_env()
# Activate local virtual environment (for render_templates+flask?)
local('. %s' % env.activate_path)
if not os.path.exists(env.s3cmd_cfg):
abort("Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'.")
if not env_type in _config['deploy']:
abort('Could not find "%s" in "deploy" in config file' % env_type)
if not "bucket" in _config['deploy'][env_type]:
abort('Could not find "bucket" in deploy.%s" in config file' % env_type)
if 'usemin_context' in _config['deploy'][env_type]:
usemin_context = _config['deploy'][env_type]['usemin_context']
else:
usemin_context = None
template_path = join(_config['project_path'], 'website', 'templates')
deploy | undeploy | identifier_name |
__init__.py | config_json_path)
def _setup_env():
"""Setup the local working environment."""
env.home_path = os.path.expanduser('~')
env.env_path = os.getenv('WORKON_HOME')
if not env.env_path:
warn("You should set the WORKON_HOME environment variable to" \
" the root directory for your virtual environments.")
env.env_path = env.sites_path
env.project_path = join(env.sites_path, env.project_name)
env.ve_path = join(env.env_path, env.project_name)
env.activate_path = join(env.ve_path, 'bin', 'activate')
def _s3cmd_put(src_path, bucket):
"""Copy local directory to S3 bucket"""
if not os.path.exists(env.s3cmd_cfg):
abort("Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'." % env)
with lcd(env.sites_path):
local('fablib/bin/s3cmd --config=%s put' \
' --rexclude ".*/\.[^/]*$"' \
' --acl-public' \
' --add-header="Cache-Control:max-age=300"' \
' -r %s/ s3://%s/' \
% (env.s3cmd_cfg, src_path, bucket))
def _s3cmd_sync(src_path, bucket):
"""Sync local directory with S3 bucket"""
if not os.path.exists(env.s3cmd_cfg):
abort("Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'." % env)
with lcd(env.sites_path):
local('fablib/bin/s3cmd --config=%s sync' \
' --rexclude ".*/\.[^/]*$"' \
' --delete-removed --acl-public' \
' --add-header="Cache-Control:max-age=300"' \
' --no-preserve' \
' %s/ s3://%s/' \
% (env.s3cmd_cfg, src_path, bucket))
############################################################
# JS libraries
############################################################
if _config:
# Set env.cdn_path = path to cdn repository
env.cdn_path = abspath(join(_config['root_path'],
'cdn.knightlab.com', 'app', 'libs', _config['name']))
def _make_zip(file_path):
notice('Creating zip file: %s' % file_path)
with zipfile.ZipFile(file_path, 'w', zipfile.ZIP_DEFLATED) as f_zip:
for r in _config['stage']:
static.add_zip_files(f_zip, _config, [{
"src": r['src'],
"dst": _config['name'], "regex": r['regex']}])
@task
def build():
"""Build lib version"""
_setup_env()
# Get build config
if not 'build' in _config:
abort('Could not find "build" in config file')
# Check version
if not 'version' in _config:
_config['version'] = datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S')
warn('Using development version value "%(version)s"' % _config)
notice('Building version %(version)s...' % _config)
# Clean build directory
clean(_config['build_path'])
# Build it
for key, param in _config['build'].iteritems():
getattr(static, key)(_config, param)
@task
def stage():
"""Build/commit/tag/push lib version, copy to local cdn repo"""
_setup_env()
if not 'stage' in _config:
abort('Could not find "stage" in config file')
# Make sure cdn exists
exists(dirname(env.cdn_path), required=True)
# Ask user for a new version
_config['version'] = git.prompt_tag('Enter a new version number',
unique=True)
# Build version
# use execute to allow for other implementations of 'build'
execute('build')
# Commit/push/tag
with lcd(env.project_path):
with settings(warn_only=True):
local('git add build')
# support builds where there's no change; sometimes comes up when
# reusing a tag because of an unexpected problem
with settings(warn_only=True):
msg = local('git commit -m "Release %(version)s"' % _config,capture=True)
if 'nothing to commit' in msg:
warn(msg)
warn('continuing anyway')
elif not msg.startswith('[master'):
abort("Unexpected result: %s" % msg)
local('git push')
git.push_tag(_config['version'])
# Copy to local CDN repository
cdn_path = join(env.cdn_path, _config['version'])
clean(cdn_path)
for r in _config['stage']:
static.copy(_config, [{
"src": r['src'],
"dst": cdn_path, "regex": r['regex']}])
# Create zip file in local CDN repository
_make_zip(join(cdn_path, '%(name)s.zip' % _config))
@task
def stage_dev():
"""
Build lib and copy to local cdn repository as 'dev' version
No tagging/committing/etc/
"""
_setup_env()
if not 'stage' in _config:
abort('Could not find "stage" in config file')
# Make sure cdn exists
exists(dirname(env.cdn_path), required=True)
# Build version
build()
# Copy to local CDN repository
cdn_path = join(env.cdn_path, 'dev')
clean(cdn_path)
for r in _config['stage']:
static.copy(_config, [{
"src": r['src'],
"dst": cdn_path, "regex": r['regex']}])
# Create zip file in local CDN repository
_make_zip(join(cdn_path, '%(name)s.zip' % _config))
@task
def stage_latest():
"""Copy lib version to latest within local cdn repo"""
_setup_env()
if 'version' in _config:
version = _config['version']
else:
version = git.prompt_tag('Which version to stage as "latest"?')
notice('stage_latest: %s' % version)
# Make sure version has been staged
version_cdn_path = join(env.cdn_path, version)
if not os.path.exists(version_cdn_path):
abort("Version '%s' has not been staged" % version)
# Stage version as latest
latest_cdn_path = join(env.cdn_path, 'latest')
clean(latest_cdn_path)
static.copy(_config, [{
"src": version_cdn_path, "dst": latest_cdn_path}])
@task
def untag():
"""Delete a tag (in case of error)"""
version = git.prompt_tag('Which tag to delete?')
if not version:
abort('No available version tag')
git.delete_tag(version)
| ############################################################
# Static websites deployed to S3
############################################################
if _config and 'deploy' in _config:
@task
def undeploy(env_type):
"""Delete website from S3 bucket. Specify stg|prd as argument."""
_setup_env()
# Activate local virtual environment (for render_templates+flask?)
local('. %s' % env.activate_path)
if not os.path.exists(env.s3cmd_cfg):
abort("Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'.")
if not env_type in _config['deploy']:
abort('Could not find "%s" in "deploy" in config file' % env_type)
if not "bucket" in _config['deploy'][env_type]:
abort('Could not find "bucket" in deploy.%s" in config file' % env_type)
bucket = _config['deploy'][env_type]['bucket']
warn('YOU ARE ABOUT TO DELETE EVERYTHING IN %s' % bucket)
if not do(prompt("Are you ABSOLUTELY sure you want to do this? (y/n): ").strip()):
abort('Aborting.')
with lcd(env.sites_path):
local('fablib/bin/s3cmd --config=%s del -r --force s3://%s/' \
% (env.s3cmd_cfg, bucket))
@task
def render(env_type):
"""Render templates (deploy except for actual sync with S3)"""
_setup_env()
# Activate local virtual environment (for render_templates+flask?)
local('. %s' % env.activate_path)
if not os.path.exists(env.s3cmd_cfg):
abort("Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'.")
if not env_type in _config['deploy']:
abort('Could not find "%s" in "deploy" in config file' % env_type)
if not "bucket" in _config['deploy'][env_type]:
abort('Could not find "bucket" in deploy.%s" in config file' % env_type)
if 'usemin_context' in _config['deploy'][env_type]:
usemin_context = _config['deploy'][env_type]['usemin_context']
else:
usemin_context = None
template_path = join(_config['project_path'], 'website', 'templates')
deploy | random_line_split |
|
secp256k1_recover.rs | bytes in length this function returns
/// [`Secp256k1RecoverError::InvalidHash`], though see notes
/// on SBF-specific behavior below.
///
/// If `recovery_id` is not in the range [0, 3] this function returns
/// [`Secp256k1RecoverError::InvalidRecoveryId`].
///
/// If `signature` is not 64 bytes in length this function returns
/// [`Secp256k1RecoverError::InvalidSignature`], though see notes
/// on SBF-specific behavior below.
///
/// If `signature` represents an "overflowing" signature this function returns
/// [`Secp256k1RecoverError::InvalidSignature`]. Overflowing signatures are
/// non-standard and should not be encountered in practice.
///
/// If `signature` is otherwise invalid this function returns
/// [`Secp256k1RecoverError::InvalidSignature`].
///
/// # SBF-specific behavior
///
/// When calling this function on-chain the caller must verify the correct
/// lengths of `hash` and `signature` beforehand.
///
/// When run on-chain this function will not directly validate the lengths of
/// `hash` and `signature`. It will assume they are the the correct lengths and
/// pass their pointers to the runtime, which will interpret them as 32-byte and
/// 64-byte buffers. If the provided slices are too short, the runtime will read
/// invalid data and attempt to interpret it, most likely returning an error,
/// though in some scenarios it may be possible to incorrectly return
/// successfully, or the transaction will abort if the syscall reads data
/// outside of the program's memory space. If the provided slices are too long
/// then they may be used to "smuggle" uninterpreted data.
///
/// # Examples
///
/// This example demonstrates recovering a public key and using it to very a
/// signature with the `secp256k1_recover` syscall. It has three parts: a Solana
/// program, an RPC client to call the program, and common definitions shared
/// between the two.
///
/// Common definitions:
///
/// ```
/// use borsh::{BorshDeserialize, BorshSerialize};
///
/// #[derive(BorshSerialize, BorshDeserialize, Debug)]
/// pub struct DemoSecp256k1RecoverInstruction {
/// pub message: Vec<u8>,
/// pub signature: [u8; 64],
/// pub recovery_id: u8,
/// }
/// ```
///
/// The Solana program. Note that it uses `libsecp256k1` version 0.7.0 to parse
/// the secp256k1 signature to prevent malleability.
///
/// ```no_run
/// use solana_program::{
/// entrypoint::ProgramResult,
/// keccak, msg,
/// program_error::ProgramError,
/// secp256k1_recover::secp256k1_recover,
/// };
///
/// /// The key we expect to sign secp256k1 messages,
/// /// as serialized by `libsecp256k1::PublicKey::serialize`.
/// const AUTHORIZED_PUBLIC_KEY: [u8; 64] = [
/// 0x8C, 0xD6, 0x47, 0xF8, 0xA5, 0xBF, 0x59, 0xA0, 0x4F, 0x77, 0xFA, 0xFA, 0x6C, 0xA0, 0xE6, 0x4D,
/// 0x94, 0x5B, 0x46, 0x55, 0xA6, 0x2B, 0xB0, 0x6F, 0x10, 0x4C, 0x9E, 0x2C, 0x6F, 0x42, 0x0A, 0xBE,
/// 0x18, 0xDF, 0x0B, 0xF0, 0x87, 0x42, 0xBA, 0x88, 0xB4, 0xCF, 0x87, 0x5A, 0x35, 0x27, 0xBE, 0x0F,
/// 0x45, 0xAE, 0xFC, 0x66, 0x9C, 0x2C, 0x6B, 0xF3, 0xEF, 0xCA, 0x5C, 0x32, 0x11, 0xF7, 0x2A, 0xC7,
/// ];
/// # pub struct DemoSecp256k1RecoverInstruction {
/// # pub message: Vec<u8>,
/// # pub signature: [u8; 64],
/// # pub recovery_id: u8,
/// # }
///
/// pub fn process_secp256k1_recover(
/// instruction: DemoSecp256k1RecoverInstruction,
/// ) -> ProgramResult {
/// // The secp256k1 recovery operation accepts a cryptographically-hashed
/// // message only. Passing it anything else is insecure and allows signatures
/// // to be forged.
/// //
/// // This means that the code calling `secp256k1_recover` must perform the hash
/// // itself, and not assume that data passed to it has been properly hashed.
/// let message_hash = {
/// let mut hasher = keccak::Hasher::default();
/// hasher.hash(&instruction.message);
/// hasher.result()
/// };
///
/// // Reject high-s value signatures to prevent malleability.
/// // Solana does not do this itself.
/// // This may or may not be necessary depending on use case.
/// {
/// let signature = libsecp256k1::Signature::parse_standard_slice(&instruction.signature)
/// .map_err(|_| ProgramError::InvalidArgument)?;
///
/// if signature.s.is_high() {
/// msg!("signature with high-s value");
/// return Err(ProgramError::InvalidArgument);
/// }
/// }
///
/// let recovered_pubkey = secp256k1_recover(
/// &message_hash.0,
/// instruction.recovery_id,
/// &instruction.signature,
/// )
/// .map_err(|_| ProgramError::InvalidArgument)?;
///
/// // If we're using this function for signature verification then we
/// // need to check the pubkey is an expected value.
/// // Here we are checking the secp256k1 pubkey against a known authorized pubkey.
/// if recovered_pubkey.0 != AUTHORIZED_PUBLIC_KEY {
/// return Err(ProgramError::InvalidArgument);
/// }
///
/// Ok(())
/// }
/// ```
///
/// The RPC client program:
///
/// ```no_run
/// # use solana_program::example_mocks::solana_rpc_client;
/// # use solana_program::example_mocks::solana_sdk;
/// use anyhow::Result;
/// use solana_rpc_client::rpc_client::RpcClient;
/// use solana_sdk::{
/// instruction::Instruction,
/// keccak,
/// pubkey::Pubkey,
/// signature::{Keypair, Signer},
/// transaction::Transaction,
/// };
/// # use borsh::{BorshDeserialize, BorshSerialize};
/// # #[derive(BorshSerialize, BorshDeserialize, Debug)]
/// # pub struct DemoSecp256k1RecoverInstruction {
/// # pub message: Vec<u8>,
/// # pub signature: [u8; 64],
/// # pub recovery_id: u8,
/// # }
///
/// pub fn demo_secp256k1_recover(
/// payer_keypair: &Keypair,
/// secp256k1_secret_key: &libsecp256k1::SecretKey,
/// client: &RpcClient,
/// program_keypair: &Keypair,
/// ) -> Result<()> {
/// let message = b"hello world";
/// let message_hash = {
/// let mut hasher = keccak::Hasher::default();
/// hasher.hash(message);
/// hasher.result()
/// };
///
/// let secp_message = libsecp256k1::Message::parse(&message_hash.0);
/// let (signature, recovery_id) = libsecp256k1::sign(&secp_message, &secp256k1_secret_key);
///
/// let signature = signature.serialize();
///
/// let instr = DemoSecp256k1RecoverInstruction {
/// message: message.to_vec(),
/// signature,
/// recovery_id: recovery_id.serialize(),
/// };
/// let instr = Instruction::new_with_borsh(
/// program_keypair.pubkey(),
/// &instr,
/// vec![],
/// );
///
/// let blockhash = client.get_latest_blockhash()?;
/// let tx = Transaction::new_signed_with_payer(
/// &[instr],
/// Some(&payer_keypair.pubkey()),
/// &[payer_keypair],
/// blockhash,
/// ); | /// | random_line_split |
|
secp256k1_recover.rs | BF-specific behavior below.
///
/// If `recovery_id` is not in the range [0, 3] this function returns
/// [`Secp256k1RecoverError::InvalidRecoveryId`].
///
/// If `signature` is not 64 bytes in length this function returns
/// [`Secp256k1RecoverError::InvalidSignature`], though see notes
/// on SBF-specific behavior below.
///
/// If `signature` represents an "overflowing" signature this function returns
/// [`Secp256k1RecoverError::InvalidSignature`]. Overflowing signatures are
/// non-standard and should not be encountered in practice.
///
/// If `signature` is otherwise invalid this function returns
/// [`Secp256k1RecoverError::InvalidSignature`].
///
/// # SBF-specific behavior
///
/// When calling this function on-chain the caller must verify the correct
/// lengths of `hash` and `signature` beforehand.
///
/// When run on-chain this function will not directly validate the lengths of
/// `hash` and `signature`. It will assume they are the the correct lengths and
/// pass their pointers to the runtime, which will interpret them as 32-byte and
/// 64-byte buffers. If the provided slices are too short, the runtime will read
/// invalid data and attempt to interpret it, most likely returning an error,
/// though in some scenarios it may be possible to incorrectly return
/// successfully, or the transaction will abort if the syscall reads data
/// outside of the program's memory space. If the provided slices are too long
/// then they may be used to "smuggle" uninterpreted data.
///
/// # Examples
///
/// This example demonstrates recovering a public key and using it to very a
/// signature with the `secp256k1_recover` syscall. It has three parts: a Solana
/// program, an RPC client to call the program, and common definitions shared
/// between the two.
///
/// Common definitions:
///
/// ```
/// use borsh::{BorshDeserialize, BorshSerialize};
///
/// #[derive(BorshSerialize, BorshDeserialize, Debug)]
/// pub struct DemoSecp256k1RecoverInstruction {
/// pub message: Vec<u8>,
/// pub signature: [u8; 64],
/// pub recovery_id: u8,
/// }
/// ```
///
/// The Solana program. Note that it uses `libsecp256k1` version 0.7.0 to parse
/// the secp256k1 signature to prevent malleability.
///
/// ```no_run
/// use solana_program::{
/// entrypoint::ProgramResult,
/// keccak, msg,
/// program_error::ProgramError,
/// secp256k1_recover::secp256k1_recover,
/// };
///
/// /// The key we expect to sign secp256k1 messages,
/// /// as serialized by `libsecp256k1::PublicKey::serialize`.
/// const AUTHORIZED_PUBLIC_KEY: [u8; 64] = [
/// 0x8C, 0xD6, 0x47, 0xF8, 0xA5, 0xBF, 0x59, 0xA0, 0x4F, 0x77, 0xFA, 0xFA, 0x6C, 0xA0, 0xE6, 0x4D,
/// 0x94, 0x5B, 0x46, 0x55, 0xA6, 0x2B, 0xB0, 0x6F, 0x10, 0x4C, 0x9E, 0x2C, 0x6F, 0x42, 0x0A, 0xBE,
/// 0x18, 0xDF, 0x0B, 0xF0, 0x87, 0x42, 0xBA, 0x88, 0xB4, 0xCF, 0x87, 0x5A, 0x35, 0x27, 0xBE, 0x0F,
/// 0x45, 0xAE, 0xFC, 0x66, 0x9C, 0x2C, 0x6B, 0xF3, 0xEF, 0xCA, 0x5C, 0x32, 0x11, 0xF7, 0x2A, 0xC7,
/// ];
/// # pub struct DemoSecp256k1RecoverInstruction {
/// # pub message: Vec<u8>,
/// # pub signature: [u8; 64],
/// # pub recovery_id: u8,
/// # }
///
/// pub fn process_secp256k1_recover(
/// instruction: DemoSecp256k1RecoverInstruction,
/// ) -> ProgramResult {
/// // The secp256k1 recovery operation accepts a cryptographically-hashed
/// // message only. Passing it anything else is insecure and allows signatures
/// // to be forged.
/// //
/// // This means that the code calling `secp256k1_recover` must perform the hash
/// // itself, and not assume that data passed to it has been properly hashed.
/// let message_hash = {
/// let mut hasher = keccak::Hasher::default();
/// hasher.hash(&instruction.message);
/// hasher.result()
/// };
///
/// // Reject high-s value signatures to prevent malleability.
/// // Solana does not do this itself.
/// // This may or may not be necessary depending on use case.
/// {
/// let signature = libsecp256k1::Signature::parse_standard_slice(&instruction.signature)
/// .map_err(|_| ProgramError::InvalidArgument)?;
///
/// if signature.s.is_high() {
/// msg!("signature with high-s value");
/// return Err(ProgramError::InvalidArgument);
/// }
/// }
///
/// let recovered_pubkey = secp256k1_recover(
/// &message_hash.0,
/// instruction.recovery_id,
/// &instruction.signature,
/// )
/// .map_err(|_| ProgramError::InvalidArgument)?;
///
/// // If we're using this function for signature verification then we
/// // need to check the pubkey is an expected value.
/// // Here we are checking the secp256k1 pubkey against a known authorized pubkey.
/// if recovered_pubkey.0 != AUTHORIZED_PUBLIC_KEY {
/// return Err(ProgramError::InvalidArgument);
/// }
///
/// Ok(())
/// }
/// ```
///
/// The RPC client program:
///
/// ```no_run
/// # use solana_program::example_mocks::solana_rpc_client;
/// # use solana_program::example_mocks::solana_sdk;
/// use anyhow::Result;
/// use solana_rpc_client::rpc_client::RpcClient;
/// use solana_sdk::{
/// instruction::Instruction,
/// keccak,
/// pubkey::Pubkey,
/// signature::{Keypair, Signer},
/// transaction::Transaction,
/// };
/// # use borsh::{BorshDeserialize, BorshSerialize};
/// # #[derive(BorshSerialize, BorshDeserialize, Debug)]
/// # pub struct DemoSecp256k1RecoverInstruction {
/// # pub message: Vec<u8>,
/// # pub signature: [u8; 64],
/// # pub recovery_id: u8,
/// # }
///
/// pub fn demo_secp256k1_recover(
/// payer_keypair: &Keypair,
/// secp256k1_secret_key: &libsecp256k1::SecretKey,
/// client: &RpcClient,
/// program_keypair: &Keypair,
/// ) -> Result<()> {
/// let message = b"hello world";
/// let message_hash = {
/// let mut hasher = keccak::Hasher::default();
/// hasher.hash(message);
/// hasher.result()
/// };
///
/// let secp_message = libsecp256k1::Message::parse(&message_hash.0);
/// let (signature, recovery_id) = libsecp256k1::sign(&secp_message, &secp256k1_secret_key);
///
/// let signature = signature.serialize();
///
/// let instr = DemoSecp256k1RecoverInstruction {
/// message: message.to_vec(),
/// signature,
/// recovery_id: recovery_id.serialize(),
/// };
/// let instr = Instruction::new_with_borsh(
/// program_keypair.pubkey(),
/// &instr,
/// vec![],
/// );
///
/// let blockhash = client.get_latest_blockhash()?;
/// let tx = Transaction::new_signed_with_payer(
/// &[instr],
/// Some(&payer_keypair.pubkey()),
/// &[payer_keypair],
/// blockhash,
/// );
///
/// client.send_and_confirm_transaction(&tx)?;
///
/// Ok(())
/// }
/// ```
pub fn | secp256k1_recover | identifier_name |
|
secp256k1_recover.rs |
pub fn to_bytes(self) -> [u8; 64] {
self.0
}
}
/// Recover the public key from a [secp256k1] ECDSA signature and
/// cryptographically-hashed message.
///
/// [secp256k1]: https://en.bitcoin.it/wiki/Secp256k1
///
/// This function is specifically intended for efficiently implementing
/// Ethereum's [`ecrecover`] builtin contract, for use by Ethereum integrators.
/// It may be useful for other purposes.
///
/// [`ecrecover`]: https://docs.soliditylang.org/en/v0.8.14/units-and-global-variables.html?highlight=ecrecover#mathematical-and-cryptographic-functions
///
/// `hash` is the 32-byte cryptographic hash (typically [`keccak`]) of an
/// arbitrary message, signed by some public key.
///
/// The recovery ID is a value in the range [0, 3] that is generated during
/// signing, and allows the recovery process to be more efficent. Note that the
/// `recovery_id` here does not directly correspond to an Ethereum recovery ID
/// as used in `ecrecover`. This function accepts recovery IDs in the range of
/// [0, 3], while Ethereum's recovery IDs have a value of 27 or 28. To convert
/// an Ethereum recovery ID to a value this function will accept subtract 27
/// from it, checking for underflow. In practice this function will not succeed
/// if given a recovery ID of 2 or 3, as these values represent an
/// "overflowing" signature, and this function returns an error when parsing
/// overflowing signatures.
///
/// [`keccak`]: crate::keccak
/// [`wrapping_sub`]: https://doc.rust-lang.org/std/primitive.u8.html#method.wrapping_sub
///
/// On success this function returns a [`Secp256k1Pubkey`], a wrapper around a
/// 64-byte secp256k1 public key. This public key corresponds to the secret key
/// that previously signed the message `hash` to produce the provided
/// `signature`.
///
/// While `secp256k1_recover` can be used to verify secp256k1 signatures by
/// comparing the recovered key against an expected key, Solana also provides
/// the [secp256k1 program][sp], which is more flexible, has lower CPU cost, and
/// can validate many signatures at once.
///
/// [sp]: crate::secp256k1_program
///
/// The `secp256k1_recover` syscall is implemented with the [`libsecp256k1`]
/// crate, which clients may also want to use.
///
/// [`libsecp256k1`]: https://docs.rs/libsecp256k1/latest/libsecp256k1
///
/// # Hashing messages
///
/// In ECDSA signing and key recovery the signed "message" is always a
/// crytographic hash, not the original message itself. If not a cryptographic
/// hash, then an adversary can craft signatures that recover to arbitrary
/// public keys. This means the caller of this function generally must hash the
/// original message themselves and not rely on another party to provide the
/// hash.
///
/// Ethereum uses the [`keccak`] hash.
///
/// # Signature malleability
///
/// With the ECDSA signature algorithm it is possible for any party, given a
/// valid signature of some message, to create a second signature that is
/// equally valid. This is known as _signature malleability_. In many cases this
/// is not a concern, but in cases where applications rely on signatures to have
/// a unique representation this can be the source of bugs, potentially with
/// security implications.
///
/// **The solana `secp256k1_recover` function does not prevent signature
/// malleability**. This is in contrast to the Bitcoin secp256k1 library, which
/// does prevent malleability by default. Solana accepts signatures with `S`
/// values that are either in the _high order_ or in the _low order_, and it
/// is trivial to produce one from the other.
///
/// To prevent signature malleability, it is common for secp256k1 signature
/// validators to only accept signatures with low-order `S` values, and reject
/// signatures with high-order `S` values. The following code will accomplish
/// this:
///
/// ```rust
/// # use solana_program::program_error::ProgramError;
/// # let signature_bytes = [
/// # 0x83, 0x55, 0x81, 0xDF, 0xB1, 0x02, 0xA7, 0xD2,
/// # 0x2D, 0x33, 0xA4, 0x07, 0xDD, 0x7E, 0xFA, 0x9A,
/// # 0xE8, 0x5F, 0x42, 0x6B, 0x2A, 0x05, 0xBB, 0xFB,
/// # 0xA1, 0xAE, 0x93, 0x84, 0x46, 0x48, 0xE3, 0x35,
/// # 0x74, 0xE1, 0x6D, 0xB4, 0xD0, 0x2D, 0xB2, 0x0B,
/// # 0x3C, 0x89, 0x8D, 0x0A, 0x44, 0xDF, 0x73, 0x9C,
/// # 0x1E, 0xBF, 0x06, 0x8E, 0x8A, 0x9F, 0xA9, 0xC3,
/// # 0xA5, 0xEA, 0x21, 0xAC, 0xED, 0x5B, 0x22, 0x13,
/// # ];
/// let signature = libsecp256k1::Signature::parse_standard_slice(&signature_bytes)
/// .map_err(|_| ProgramError::InvalidArgument)?;
///
/// if signature.s.is_high() {
/// return Err(ProgramError::InvalidArgument);
/// }
/// # Ok::<_, ProgramError>(())
/// ```
///
/// This has the downside that the program must link to the [`libsecp256k1`]
/// crate and parse the signature just for this check. Note that `libsecp256k1`
/// version 0.7.0 or greater is required for running on the Solana SBF target.
///
/// [`libsecp256k1`]: https://docs.rs/libsecp256k1/latest/libsecp256k1
///
/// For the most accurate description of signature malleability, and its
/// prevention in secp256k1, refer to comments in [`secp256k1.h`] in the Bitcoin
/// Core secp256k1 library, the documentation of the [OpenZeppelin `recover`
/// method for Solidity][ozr], and [this description of the problem on
/// StackExchange][sxr].
///
/// [`secp256k1.h`]: https://github.com/bitcoin-core/secp256k1/blob/44c2452fd387f7ca604ab42d73746e7d3a44d8a2/include/secp256k1.h
/// [ozr]: https://docs.openzeppelin.com/contracts/2.x/api/cryptography#ECDSA-recover-bytes32-bytes-
/// [sxr]: https://bitcoin.stackexchange.com/questions/81115/if-someone-wanted-to-pretend-to-be-satoshi-by-posting-a-fake-signature-to-defrau/81116#81116
///
/// # Errors
///
/// If `hash` is not 32 bytes in length this function returns
/// [`Secp256k1RecoverError::InvalidHash`], though see notes
/// on SBF-specific behavior below.
///
/// If `recovery_id` is not in the range [0, 3] this function returns
/// [`Secp256k1RecoverError::InvalidRecoveryId`].
///
/// If `signature` is not 64 bytes in length this function returns
/// [`Secp256k1RecoverError::InvalidSignature`], though see notes
/// on SBF-specific behavior below.
///
/// If `signature` represents an "overflowing" signature this function returns
/// [`Secp256k1Re | {
Self(
<[u8; SECP256K1_PUBLIC_KEY_LENGTH]>::try_from(<&[u8]>::clone(&pubkey_vec))
.expect("Slice must be the same length as a Pubkey"),
)
} | identifier_body |
|
VismoController.js | = document.createElement("div");
shape.innerHTML = response;
}
else{
shape = document.createElement('object');
shape.setAttribute('codebase', 'http://www.adobe.com/svg/viewer/install/');
if(VismoUtils.browser.isIE)shape.setAttribute('classid', '15');
shape.setAttribute('style',"overflow:hidden;position:absolute;z-index:0;width:60px;height:120px;");
shape.setAttribute('type',"image/svg+xml");
var dataString = 'data:image/svg+xml,'+ response;
shape.setAttribute('data', dataString); // the "<svg>...</svg>" returned from Ajax call
jQuery(shape).css({width:60,height:120})
}
that.controlDiv.appendChild(shape);
jQuery(that.controlDiv).css({"background-image":"none"});
hidebuttons();
};
if(pan && zoom) callback(this.panzoomcontrolsSVG);
}
,getTransformation: function(){
return this.transformation;
}
,translate: function(x,y){
var t= this.getTransformation();
t.translate.x = x;
t.translate.y = y;
this.transform();
},
addMouseWheelZooming: function(){ /*not supported for internet explorer*/
var that = this;
this._addEnabledControl("mousewheelzooming");
this.crosshair = {lastdelta:false};
this.crosshair.pos = {x:0,y:0};
var t = this.getTransformation();
var mw = this.wrapper.onmousewheel;
var that = this;
var mm = this.wrapper.onmousemove;
var doingmw = false;
var mwactive = false;
var cancelMouseZoomCursor = function(){
if(VismoUtils.browser.isIE6)that.wrapper.style.cursor = "";
else jQuery(that.wrapper).removeClass("zooming");
}
jQuery(this.wrapper).mousedown(function(e){
mwactive = true;
if(VismoUtils.browser.isIE6)this.style.cursor = "crosshair";
else {
if(!jQuery(that.wrapper).hasClass("panning")){
jQuery(that.wrapper).addClass("zooming");
}
}
window.setTimeout(cancelMouseZoomCursor,2000);
});
jQuery(this.wrapper).mouseout(function(e){
var newTarget;
if(e.toElement) newTarget = e.toElement;
else newTarget = e.relatedTarget;
if(jQuery(newTarget,that.wrapper).length == 0){ //if not a child turn off
mwactive = false;
}
cancelMouseZoomCursor();
});
var domw = function(e){
if(!that.enabled) return;
/* thanks to http://adomas.org/javascript-mouse-wheel */
var delta = 0;
if(!that.goodToTransform(e)) {
doingmw = false;
return false;
}
var t = VismoClickingUtils.resolveTargetWithVismo(e);
if(t != that.wrapper && t.parentNode !=that.wrapper) return false;
if (e.wheelDelta) { /* IE/Opera. */
delta = e.wheelDelta/120;
/** In Opera 9, delta differs in sign as compared to IE.
*/
if (window.opera)
delta = -delta;
} else if (e.detail) { /** Mozilla case. */
/** In Mozilla, sign of delta is different than in IE.
* Also, delta is multiple of 3.
*/
delta = -e.detail/3;
}
var sensitivity = 0.4;
var transform = that.getTransformation();
var scale =transform.scale;
var origin = transform.origin;
var mousepos = VismoClickingUtils.getMouseFromEvent(e);
var w = parseInt(that.wrapper.style.width) / 2;
var h = parseInt(that.wrapper.style.height) / 2;
var translation = VismoTransformations.undoTransformation(mousepos.x,mousepos.y,that.transformation);
transform.translate= {x: -translation.x, y: -translation.y};
//{x: -mousepos.x + w,y: -mousepos.y + h};
transform.origin = {
x: mousepos.x,
y: mousepos.y
};
if(delta > that.crosshair.lastdelta + sensitivity || delta < that.crosshair.lastdelta - sensitivity){
var newx,newy;
if(delta > 0){
newx = parseFloat(scale.x) * 2;
newy = parseFloat(scale.y) * 2;
}
else{
newx = parseFloat(scale.x) / 2;
newy = parseFloat(scale.y) / 2;
}
if(newx > 0 && newy > 0){
scale.x = newx;
scale.y = newy;
that.setTransformation(transform);
}
}
that.crosshair.lastdelta = delta;
doingmw = false;
return false;
};
var onmousewheel = function(e){
if(!VismoUtils.browser.isIE){
jQuery(that.wrapper).addClass("zooming");
}
if(e.preventDefault){e.preventDefault();}
if (e && e.stopPropagation) {
e.stopPropagation();
}
e.cancelBubble=true;
if(!mwactive) return false;
if(!doingmw) {
var f = function(){
domw(e);
return false;
};
window.setTimeout(f,50);
doingmw = true;
}
return false;
};
|
var el = e.target;
//var el = e.srcElement;
if(!el) return;
while(el != element){
if(el == element) {
onmousewheel(e);
return false;
}
el = el.parentNode;
}
return;
};
window.onmousewheel = document.onmousewheel;
return;
}
else if (element.addEventListener){
element.onmousewheel = onmousewheel; //safari
element.addEventListener('DOMMouseScroll', onmousewheel, false);/** DOMMouseScroll is for mozilla. */
}
else if(element.attachEvent){
element.attachEvent("onmousewheel", onmousewheel); //safari
}
else{ //it's ie.. or something non-standardised. do nowt
//window.onmousewheel = document.onmousewheel = onmousewheel;
}
}
,disable: function(){
//console.log("disabled");
jQuery(".vismoControls",this.wrapper).css({display:"none"});
this.enabled = false;
}
,enable: function(){
//console.log("enabled");
this.enabled = true;
jQuery(".vismoControls",this.wrapper).css({display:""});
}
,goodToTransform: function(e){
var t = VismoClickingUtils.resolveTarget(e);
switch(t.tagName){
case "INPUT":
return false;
case "SELECT":
return false;
case "OPTION":
return false;
}
if(t && t.getAttribute("class") == "vismoControl") return false;
return true;
}
,addMousePanning: function(){
this._addEnabledControl("mousepanning");
var that = this;
var el = that.wrapper;
var md = el.onmousedown;
var mu = el.onmouseup;
var mm = el.onmousemove;
var panning_status = false;
//alert('here');
//jQuery(document).mouseup(function(e){alert("cool");}); //doesn't work?!
var intervalMS = 100;
if(VismoUtils.browser.isIE6){
intervalMS = 300;
}
var interval;
var cancelPanning = function(e){
if(interval)window.clearInterval(interval);
panning_status = false;
that.transform();
if(!VismoUtils.browser.isIE6){jQuery(that.wrapper).removeClass("panning");}
//style.cursor= that.defaultCursor;
that.wrapper.onmousemove = mm;
return false;
};
jQuery(that.controlDiv).mousedown(function(e){
cancelPanning();
});
var onmousemove = function(e){
if(e && e.shiftKey) {return false;}
if(mm){mm(e);}
if(!that.enabled) {return;}
if(!panning_status) {
return;
}
if(!VismoUtils.browser.isIE && !jQuery(that.wrapper).hasClass("panning")){
jQuery(that.wrapper).addClass("panning")
}
if(!that.goodToTransform(e)) {return;}
var pos = VismoClickingUtils.getMouseFromEventRelativeToElement(e,panning_status.clickpos.x,panning_status.clickpos.y,panning_status.elem);
if(!pos){return;}
var t = that.getTransformation();
//if(this.transformation) t = this.transformation;
var sc = t.scale;
/* work out deltas */
var xd =parseFloat(pos.x /sc.x);
var yd = parseFloat(pos.y / sc.y);
t.translate.x = panning_status.translate.x + xd;
| var element = this.wrapper;
if(VismoUtils.browser.isIE) {
document.onmousewheel = function(e){
if(!e)e = window.event; | random_line_split |
VismoController.js | document.createElement("div");
shape.innerHTML = response;
}
else{
shape = document.createElement('object');
shape.setAttribute('codebase', 'http://www.adobe.com/svg/viewer/install/');
if(VismoUtils.browser.isIE)shape.setAttribute('classid', '15');
shape.setAttribute('style',"overflow:hidden;position:absolute;z-index:0;width:60px;height:120px;");
shape.setAttribute('type',"image/svg+xml");
var dataString = 'data:image/svg+xml,'+ response;
shape.setAttribute('data', dataString); // the "<svg>...</svg>" returned from Ajax call
jQuery(shape).css({width:60,height:120})
}
that.controlDiv.appendChild(shape);
jQuery(that.controlDiv).css({"background-image":"none"});
hidebuttons();
};
if(pan && zoom) callback(this.panzoomcontrolsSVG);
}
,getTransformation: function(){
return this.transformation;
}
,translate: function(x,y){
var t= this.getTransformation();
t.translate.x = x;
t.translate.y = y;
this.transform();
},
addMouseWheelZooming: function(){ /*not supported for internet explorer*/
var that = this;
this._addEnabledControl("mousewheelzooming");
this.crosshair = {lastdelta:false};
this.crosshair.pos = {x:0,y:0};
var t = this.getTransformation();
var mw = this.wrapper.onmousewheel;
var that = this;
var mm = this.wrapper.onmousemove;
var doingmw = false;
var mwactive = false;
var cancelMouseZoomCursor = function(){
if(VismoUtils.browser.isIE6)that.wrapper.style.cursor = "";
else jQuery(that.wrapper).removeClass("zooming");
}
jQuery(this.wrapper).mousedown(function(e){
mwactive = true;
if(VismoUtils.browser.isIE6)this.style.cursor = "crosshair";
else {
if(!jQuery(that.wrapper).hasClass("panning")){
jQuery(that.wrapper).addClass("zooming");
}
}
window.setTimeout(cancelMouseZoomCursor,2000);
});
jQuery(this.wrapper).mouseout(function(e){
var newTarget;
if(e.toElement) newTarget = e.toElement;
else newTarget = e.relatedTarget;
if(jQuery(newTarget,that.wrapper).length == 0){ //if not a child turn off
mwactive = false;
}
cancelMouseZoomCursor();
});
var domw = function(e){
if(!that.enabled) return;
/* thanks to http://adomas.org/javascript-mouse-wheel */
var delta = 0;
if(!that.goodToTransform(e)) {
doingmw = false;
return false;
}
var t = VismoClickingUtils.resolveTargetWithVismo(e);
if(t != that.wrapper && t.parentNode !=that.wrapper) return false;
if (e.wheelDelta) { /* IE/Opera. */
delta = e.wheelDelta/120;
/** In Opera 9, delta differs in sign as compared to IE.
*/
if (window.opera)
delta = -delta;
} else if (e.detail) { /** Mozilla case. */
/** In Mozilla, sign of delta is different than in IE.
* Also, delta is multiple of 3.
*/
delta = -e.detail/3;
}
var sensitivity = 0.4;
var transform = that.getTransformation();
var scale =transform.scale;
var origin = transform.origin;
var mousepos = VismoClickingUtils.getMouseFromEvent(e);
var w = parseInt(that.wrapper.style.width) / 2;
var h = parseInt(that.wrapper.style.height) / 2;
var translation = VismoTransformations.undoTransformation(mousepos.x,mousepos.y,that.transformation);
transform.translate= {x: -translation.x, y: -translation.y};
//{x: -mousepos.x + w,y: -mousepos.y + h};
transform.origin = {
x: mousepos.x,
y: mousepos.y
};
if(delta > that.crosshair.lastdelta + sensitivity || delta < that.crosshair.lastdelta - sensitivity){
var newx,newy;
if(delta > 0){
newx = parseFloat(scale.x) * 2;
newy = parseFloat(scale.y) * 2;
}
else{
newx = parseFloat(scale.x) / 2;
newy = parseFloat(scale.y) / 2;
}
if(newx > 0 && newy > 0){
scale.x = newx;
scale.y = newy;
that.setTransformation(transform);
}
}
that.crosshair.lastdelta = delta;
doingmw = false;
return false;
};
var onmousewheel = function(e){
if(!VismoUtils.browser.isIE){
jQuery(that.wrapper).addClass("zooming");
}
if(e.preventDefault){e.preventDefault();}
if (e && e.stopPropagation) {
e.stopPropagation();
}
e.cancelBubble=true;
if(!mwactive) return false;
if(!doingmw) {
var f = function(){
domw(e);
return false;
};
window.setTimeout(f,50);
doingmw = true;
}
return false;
};
var element = this.wrapper;
if(VismoUtils.browser.isIE) {
document.onmousewheel = function(e){
if(!e)e = window.event;
var el = e.target;
//var el = e.srcElement;
if(!el) return;
while(el != element){
if(el == element) {
onmousewheel(e);
return false;
}
el = el.parentNode;
}
return;
};
window.onmousewheel = document.onmousewheel;
return;
}
else if (element.addEventListener){
element.onmousewheel = onmousewheel; //safari
element.addEventListener('DOMMouseScroll', onmousewheel, false);/** DOMMouseScroll is for mozilla. */
}
else if(element.attachEvent){
element.attachEvent("onmousewheel", onmousewheel); //safari
}
else{ //it's ie.. or something non-standardised. do nowt
//window.onmousewheel = document.onmousewheel = onmousewheel;
}
}
,disable: function(){
//console.log("disabled");
jQuery(".vismoControls",this.wrapper).css({display:"none"});
this.enabled = false;
}
,enable: function(){
//console.log("enabled");
this.enabled = true;
jQuery(".vismoControls",this.wrapper).css({display:""});
}
,goodToTransform: function(e){
var t = VismoClickingUtils.resolveTarget(e);
switch(t.tagName){
case "INPUT":
return false;
case "SELECT":
return false;
case "OPTION":
return false;
}
if(t && t.getAttribute("class") == "vismoControl") return false;
return true;
}
,addMousePanning: function(){
this._addEnabledControl("mousepanning");
var that = this;
var el = that.wrapper;
var md = el.onmousedown;
var mu = el.onmouseup;
var mm = el.onmousemove;
var panning_status = false;
//alert('here');
//jQuery(document).mouseup(function(e){alert("cool");}); //doesn't work?!
var intervalMS = 100;
if(VismoUtils.browser.isIE6){
intervalMS = 300;
}
var interval;
var cancelPanning = function(e){
if(interval)window.clearInterval(interval);
panning_status = false;
that.transform();
if(!VismoUtils.browser.isIE6){jQuery(that.wrapper).removeClass("panning");}
//style.cursor= that.defaultCursor;
that.wrapper.onmousemove = mm;
return false;
};
jQuery(that.controlDiv).mousedown(function(e){
cancelPanning();
});
var onmousemove = function(e){
if(e && e.shiftKey) {return false;}
if(mm){mm(e);}
if(!that.enabled) {return;}
if(!panning_status) |
if(!VismoUtils.browser.isIE && !jQuery(that.wrapper).hasClass("panning")){
jQuery(that.wrapper).addClass("panning")
}
if(!that.goodToTransform(e)) {return;}
var pos = VismoClickingUtils.getMouseFromEventRelativeToElement(e,panning_status.clickpos.x,panning_status.clickpos.y,panning_status.elem);
if(!pos){return;}
var t = that.getTransformation();
//if(this.transformation) t = this.transformation;
var sc = t.scale;
/* work out deltas */
var xd =parseFloat(pos.x /sc.x);
var yd = parseFloat(pos.y / sc.y);
t.translate.x = panning_status.translate.x + xd;
| {
return;
} | conditional_block |
model.py |
def _find_weights(weights_dir, mode='last'):
"""Find weights path if not provided manually during model initialization"""
if mode == 'last':
file_name = sorted(os.listdir(weights_dir))[-1]
weights_path = os.path.join(weights_dir, file_name)
elif mode == 'best':
raise NotImplementedError
else:
raise NotImplementedError
return weights_path
def _find_model(model_chkp_dir, mode='last'):
"""Find weights path if not provided manually during model initialization"""
if mode == 'last':
file_name = sorted(os.listdir(model_chkp_dir))[-1]
model_path = os.path.join(model_chkp_dir, file_name)
elif mode == 'best':
raise NotImplementedError
return model_path
def load_model(model_dir, mode='inference', config_path='auto', graph_path='auto',
weights_path='auto', model_path='auto', custom_objects=None):
if config_path == 'auto':
config_path = os.path.join(model_dir, 'config.json')
if graph_path == 'auto':
graph_path = os.path.join(model_dir, 'graph.json')
if weights_path == 'auto':
weights_dir = os.path.join(model_dir, 'weights')
weights_path = _find_weights(weights_dir)
if model_path == 'auto':
model_chkp_dir = os.path.join(model_dir, 'models')
model_path = _find_model(model_chkp_dir)
# load configuration file
config = OrthoSegmModelConfig.load_config(config_path)
# load model graph file
with open(graph_path, 'r') as f:
graph = json.load(f)
if mode == 'train':
model = keras.models.load_model(model_path, custom_objects=custom_objects, compile=True)
if mode == 'inference':
try:
model = keras.models.load_model(model_path, custom_objects=custom_objects, compile=False)
except:
model = keras.models.model_from_json(json.dumps(graph))
model.load_weights(weights_path)
segmentation_model = SegmentationModel(model_dir)
segmentation_model.build(model, config)
return segmentation_model
class SegmentationModel(Model):
"""
"""
def __init__(self, model_dir):
self.config = None
self.model = None
self._built = False
self.model_dir = _create_dir(model_dir)
self.log_dir = _create_dir(model_dir, 'log')
self.weights_dir = _create_dir(model_dir, 'weights')
self.models_dir = _create_dir(model_dir, 'models') # be careful with model and models! dirs
# input standardization pipeline function
self._input_standart = None
def __getattr__(self, attr):
return getattr(self.model, attr)
def build(self, model, config):
self.model = model
self.config = config
# save configurations of model
config_path = os.path.join(self.model_dir, 'config.json')
if not os.path.exists(config_path):
self.config.save(config_path, indent=2)
# save graph of model
graph_path = os.path.join(self.model_dir, 'graph.json')
model_graph = json.loads(model.to_json())
with open(graph_path, 'w') as f:
json.dump(model_graph, f, indent=2)
st = Standardizer(**self.config.STANDARDISING_PARAMS)
self._input_standart = st.build_pipline(self.config.STANDARDISING_FUNCTIONS)
self._built = True
def built(func):
@wraps(func)
def wrapped(self, *args, **kwargs):
if self._built:
return func(self, *args, **kwargs)
else:
raise RuntimeError('Your model is not built! Please provide keras model and config.')
return wrapped
@built
def _get_gsd(self):
gsd = self.config.GSD
if np.isscalar(gsd):
gsd = (gsd, gsd)
gsd_x = gsd[0]
gsd_y = gsd[1]
return gsd_x, gsd_y
@built
def _load_image(self, path, target_size=None, return_transform=False, return_crs=True):
dataset_element_name = os.path.basename(path)
path = os.path.normpath(path)
channels = self.config.CHANNELS
target_gsd_x, target_gsd_y = self._get_gsd()
# defining local variables for memorizing best of them during iterations
transform = None
crs = None
min_gsd_x = 10e5
min_gsd_y = 10e5
gsd_x = min_gsd_x
gsd_y = min_gsd_y
max_h = 0
max_w = 0
image_ids = ['20170304', '20170404']
channels_list = []
for image_id in image_ids:
channels_ = [os.path.join(path, image_id, '{}_channel_{}.tif'.format(dataset_element_name, ch)) for ch in
channels]
for channel_name in channels_:
try:
# open image(channel) file
# use 'r+' mode to support on windows >__<
# (otherwise, in 'r' mode, cv2.resize fails with python int to C int conversion overflow)
with rasterio.open(channel_name, 'r+') as img_obj:
# read metadata from image(channel) file
tm = list(img_obj.transform)
gsd_x = np.sqrt(tm[0] ** 2 + tm[3] ** 2)
gsd_y = np.sqrt(tm[1] ** 2 + tm[4] ** 2)
crs = img_obj.crs
# remember best gsd and h and w for future resizing
if gsd_x * gsd_y < min_gsd_x * min_gsd_y:
transform = tm
min_gsd_x = gsd_x
min_gsd_y = gsd_y
max_h = img_obj.height
max_w = img_obj.width
# read channels
img = img_obj.read()
img = np.squeeze(img)
channels_list.append(img)
except FileNotFoundError:
print('No such image {}'.format(os.path.basename(channel_name)))
raise Exception('No channels!')
# define width and heights of our images for our model gsd
w = int(max_w * gsd_x / target_gsd_x)
h = int(max_h * gsd_y / target_gsd_y)
if target_size:
w = target_size[1]
h = target_size[0]
channels_list = [cv2.resize(ch, (w, h), cv2.INTER_LINEAR) for ch in channels_list]
image = np.array(channels_list)
image = np.rollaxis(image, 0, 3)
if return_transform:
if return_crs:
return image, transform, crs
else:
return image, transform
return image
@built
def _load_masks(self, path):
path = os.path.normpath(path)
classes = self.config.CLASSES
mask_id = os.path.basename(path)
masks = [os.path.join(path, '{}_class_{}.tif'.format(mask_id, cls)) for cls in classes]
masks_list = []
for m, cls in zip(masks, classes):
try:
with rasterio.open(m, 'r') as mask_obj:
mask = mask_obj.read()
mask = np.squeeze(mask)
masks_list.append(mask)
except FileNotFoundError:
print('No such image {}'.format(os.path.basename(m)))
raise Exception('No mask for class {}!'.format(cls))
masks = np.array(masks_list)
masks = np.rollaxis(masks, 0, 3)
# if target_size:
# cv2.resize(masks, target_size, cv2.INTER_NEAREST)
return masks
def _to_binary_masks(self, image, tm):
gsd_x, gsd_y = self._get_gsd()
target_gsd_x = np.sqrt(tm[0] ** 2 + tm[3] ** 2)
target_gsd_y = np.sqrt(tm[1] ** 2 + tm[4] ** 2)
# define width and heights of our masks for our model gsd
w = int(image.shape[1] * gsd_x / target_gsd_x)
h = int(image.shape[0] * gsd_y / target_gsd_y)
image = cv2.resize(image, (w, h), cv2.INTER_LINEAR)
if image.ndim == 2:
image = np.expand_dims(image, axis=-1)
return np.rollaxis(image, 2, 0), (w, h)
@built
def _save_raster_masks(self, image, path, save_postfix='pred', transform_matrix=None,
crs=None):
image, shape = self._to_binary_masks(image, transform_matrix)
path = os.path.normpath(path) # delete '\' or '//' in the end of filepath
if not os.path.exists(path):
os.makedirs(path)
w, h = shape
image_basename = os.path.basename(path)
saved_images_names = []
for i, cls in | path = os.path.join(*args)
if not os.path.exists(path):
os.makedirs(path)
return path | identifier_body |
|
model.py | _path, custom_objects=custom_objects, compile=True)
if mode == 'inference':
try:
model = keras.models.load_model(model_path, custom_objects=custom_objects, compile=False)
except:
model = keras.models.model_from_json(json.dumps(graph))
model.load_weights(weights_path)
segmentation_model = SegmentationModel(model_dir)
segmentation_model.build(model, config)
return segmentation_model
class SegmentationModel(Model):
"""
"""
def __init__(self, model_dir):
self.config = None
self.model = None
self._built = False
self.model_dir = _create_dir(model_dir)
self.log_dir = _create_dir(model_dir, 'log')
self.weights_dir = _create_dir(model_dir, 'weights')
self.models_dir = _create_dir(model_dir, 'models') # be careful with model and models! dirs
# input standardization pipeline function
self._input_standart = None
def __getattr__(self, attr):
return getattr(self.model, attr)
def build(self, model, config):
self.model = model
self.config = config
# save configurations of model
config_path = os.path.join(self.model_dir, 'config.json')
if not os.path.exists(config_path):
self.config.save(config_path, indent=2)
# save graph of model
graph_path = os.path.join(self.model_dir, 'graph.json')
model_graph = json.loads(model.to_json())
with open(graph_path, 'w') as f:
json.dump(model_graph, f, indent=2)
st = Standardizer(**self.config.STANDARDISING_PARAMS)
self._input_standart = st.build_pipline(self.config.STANDARDISING_FUNCTIONS)
self._built = True
def built(func):
@wraps(func)
def wrapped(self, *args, **kwargs):
if self._built:
return func(self, *args, **kwargs)
else:
raise RuntimeError('Your model is not built! Please provide keras model and config.')
return wrapped
@built
def _get_gsd(self):
gsd = self.config.GSD
if np.isscalar(gsd):
gsd = (gsd, gsd)
gsd_x = gsd[0]
gsd_y = gsd[1]
return gsd_x, gsd_y
@built
def _load_image(self, path, target_size=None, return_transform=False, return_crs=True):
dataset_element_name = os.path.basename(path)
path = os.path.normpath(path)
channels = self.config.CHANNELS
target_gsd_x, target_gsd_y = self._get_gsd()
# defining local variables for memorizing best of them during iterations
transform = None
crs = None
min_gsd_x = 10e5
min_gsd_y = 10e5
gsd_x = min_gsd_x
gsd_y = min_gsd_y
max_h = 0
max_w = 0
image_ids = ['20170304', '20170404']
channels_list = []
for image_id in image_ids:
channels_ = [os.path.join(path, image_id, '{}_channel_{}.tif'.format(dataset_element_name, ch)) for ch in
channels]
for channel_name in channels_:
try:
# open image(channel) file
# use 'r+' mode to support on windows >__<
# (otherwise, in 'r' mode, cv2.resize fails with python int to C int conversion overflow)
with rasterio.open(channel_name, 'r+') as img_obj:
# read metadata from image(channel) file
tm = list(img_obj.transform)
gsd_x = np.sqrt(tm[0] ** 2 + tm[3] ** 2)
gsd_y = np.sqrt(tm[1] ** 2 + tm[4] ** 2)
crs = img_obj.crs
# remember best gsd and h and w for future resizing
if gsd_x * gsd_y < min_gsd_x * min_gsd_y:
transform = tm
min_gsd_x = gsd_x
min_gsd_y = gsd_y
max_h = img_obj.height
max_w = img_obj.width
# read channels
img = img_obj.read()
img = np.squeeze(img)
channels_list.append(img)
except FileNotFoundError:
print('No such image {}'.format(os.path.basename(channel_name)))
raise Exception('No channels!')
# define width and heights of our images for our model gsd
w = int(max_w * gsd_x / target_gsd_x)
h = int(max_h * gsd_y / target_gsd_y)
if target_size:
w = target_size[1]
h = target_size[0]
channels_list = [cv2.resize(ch, (w, h), cv2.INTER_LINEAR) for ch in channels_list]
image = np.array(channels_list)
image = np.rollaxis(image, 0, 3)
if return_transform:
if return_crs:
return image, transform, crs
else:
return image, transform
return image
@built
def _load_masks(self, path):
path = os.path.normpath(path)
classes = self.config.CLASSES
mask_id = os.path.basename(path)
masks = [os.path.join(path, '{}_class_{}.tif'.format(mask_id, cls)) for cls in classes]
masks_list = []
for m, cls in zip(masks, classes):
try:
with rasterio.open(m, 'r') as mask_obj:
mask = mask_obj.read()
mask = np.squeeze(mask)
masks_list.append(mask)
except FileNotFoundError:
print('No such image {}'.format(os.path.basename(m)))
raise Exception('No mask for class {}!'.format(cls))
masks = np.array(masks_list)
masks = np.rollaxis(masks, 0, 3)
# if target_size:
# cv2.resize(masks, target_size, cv2.INTER_NEAREST)
return masks
def _to_binary_masks(self, image, tm):
gsd_x, gsd_y = self._get_gsd()
target_gsd_x = np.sqrt(tm[0] ** 2 + tm[3] ** 2)
target_gsd_y = np.sqrt(tm[1] ** 2 + tm[4] ** 2)
# define width and heights of our masks for our model gsd
w = int(image.shape[1] * gsd_x / target_gsd_x)
h = int(image.shape[0] * gsd_y / target_gsd_y)
image = cv2.resize(image, (w, h), cv2.INTER_LINEAR)
if image.ndim == 2:
image = np.expand_dims(image, axis=-1)
return np.rollaxis(image, 2, 0), (w, h)
@built
def _save_raster_masks(self, image, path, save_postfix='pred', transform_matrix=None,
crs=None):
image, shape = self._to_binary_masks(image, transform_matrix)
path = os.path.normpath(path) # delete '\' or '//' in the end of filepath
if not os.path.exists(path):
os.makedirs(path)
w, h = shape
image_basename = os.path.basename(path)
saved_images_names = []
for i, cls in enumerate(self.config.CLASSES):
# save each mask to separate file
image_name = image_basename + '_class_{}_{}.tif'.format(cls, save_postfix)
saved_images_names.append(image_name)
image_path = os.path.join(path, image_name)
with rasterio.open(image_path, 'w', width=w, height=h, driver='GTiff', count=1,
dtype='uint8', NBITS=1, transform=transform_matrix[:6], crs=crs) as dst:
dst.write(image[i].astype(rasterio.uint8), 1)
return saved_images_names
def get_vector_markup(self, mask, geotransform, trg_crs='epsg:3857'):
"""
Saves vector mask from raw model output as .geojson
:param raw_mask_path:
:param transform: geotransform of initial dataset
:param filename: output location absolute path
:param trg_crs: target coordinate reference system
:param threshold: a threshold for raw mask low-pass filtering
:return:
"""
# plt.imsave(os.path.join(time_series_path, time_frame, '_'.join([dataset_element_name, mask_name, time_frame, self.get_timestamp()])+'.png'), raw)
shapes = rasterio.features.shapes(mask, transform=geotransform)
# the last shape contains all geometry
shapes = list(shapes)[:-1]
polygons = [geojson.Feature(geometry=geojson.Polygon(shape[0]['coordinates'])) for shape in shapes]
crs = {
"type": "name",
"properties": {
"name": trg_crs}}
gs = geojson.FeatureCollection(polygons, crs=crs)
return geojson.dumps(gs)
@built
def | _save_vector_masks | identifier_name |
|
model.py | from .standardizer import Standardizer
def _create_dir(*args):
path = os.path.join(*args)
if not os.path.exists(path):
os.makedirs(path)
return path
def _find_weights(weights_dir, mode='last'):
"""Find weights path if not provided manually during model initialization"""
if mode == 'last':
file_name = sorted(os.listdir(weights_dir))[-1]
weights_path = os.path.join(weights_dir, file_name)
elif mode == 'best':
raise NotImplementedError
else:
raise NotImplementedError
return weights_path
def _find_model(model_chkp_dir, mode='last'):
"""Find weights path if not provided manually during model initialization"""
if mode == 'last':
file_name = sorted(os.listdir(model_chkp_dir))[-1]
model_path = os.path.join(model_chkp_dir, file_name)
elif mode == 'best':
raise NotImplementedError
return model_path
def load_model(model_dir, mode='inference', config_path='auto', graph_path='auto',
weights_path='auto', model_path='auto', custom_objects=None):
if config_path == 'auto':
config_path = os.path.join(model_dir, 'config.json')
if graph_path == 'auto':
graph_path = os.path.join(model_dir, 'graph.json')
if weights_path == 'auto':
weights_dir = os.path.join(model_dir, 'weights')
weights_path = _find_weights(weights_dir)
if model_path == 'auto':
model_chkp_dir = os.path.join(model_dir, 'models')
model_path = _find_model(model_chkp_dir)
# load configuration file
config = OrthoSegmModelConfig.load_config(config_path)
# load model graph file
with open(graph_path, 'r') as f:
graph = json.load(f)
if mode == 'train':
model = keras.models.load_model(model_path, custom_objects=custom_objects, compile=True)
if mode == 'inference':
try:
model = keras.models.load_model(model_path, custom_objects=custom_objects, compile=False)
except:
model = keras.models.model_from_json(json.dumps(graph))
model.load_weights(weights_path)
segmentation_model = SegmentationModel(model_dir)
segmentation_model.build(model, config)
return segmentation_model
class SegmentationModel(Model):
"""
"""
def __init__(self, model_dir):
self.config = None
self.model = None
self._built = False
self.model_dir = _create_dir(model_dir)
self.log_dir = _create_dir(model_dir, 'log')
self.weights_dir = _create_dir(model_dir, 'weights')
self.models_dir = _create_dir(model_dir, 'models') # be careful with model and models! dirs
# input standardization pipeline function
self._input_standart = None
def __getattr__(self, attr):
return getattr(self.model, attr)
def build(self, model, config):
self.model = model
self.config = config
# save configurations of model
config_path = os.path.join(self.model_dir, 'config.json')
if not os.path.exists(config_path):
self.config.save(config_path, indent=2)
# save graph of model
graph_path = os.path.join(self.model_dir, 'graph.json')
model_graph = json.loads(model.to_json())
with open(graph_path, 'w') as f:
json.dump(model_graph, f, indent=2)
st = Standardizer(**self.config.STANDARDISING_PARAMS)
self._input_standart = st.build_pipline(self.config.STANDARDISING_FUNCTIONS)
self._built = True
def built(func):
@wraps(func)
def wrapped(self, *args, **kwargs):
if self._built:
return func(self, *args, **kwargs)
else:
raise RuntimeError('Your model is not built! Please provide keras model and config.')
return wrapped
@built
def _get_gsd(self):
gsd = self.config.GSD
if np.isscalar(gsd):
gsd = (gsd, gsd)
gsd_x = gsd[0]
gsd_y = gsd[1]
return gsd_x, gsd_y
@built
def _load_image(self, path, target_size=None, return_transform=False, return_crs=True):
dataset_element_name = os.path.basename(path)
path = os.path.normpath(path)
channels = self.config.CHANNELS
target_gsd_x, target_gsd_y = self._get_gsd()
# defining local variables for memorizing best of them during iterations
transform = None
crs = None
min_gsd_x = 10e5
min_gsd_y = 10e5
gsd_x = min_gsd_x
gsd_y = min_gsd_y
max_h = 0
max_w = 0
image_ids = ['20170304', '20170404']
channels_list = []
for image_id in image_ids:
channels_ = [os.path.join(path, image_id, '{}_channel_{}.tif'.format(dataset_element_name, ch)) for ch in
channels]
for channel_name in channels_:
try:
# open image(channel) file
# use 'r+' mode to support on windows >__<
# (otherwise, in 'r' mode, cv2.resize fails with python int to C int conversion overflow)
with rasterio.open(channel_name, 'r+') as img_obj:
# read metadata from image(channel) file
tm = list(img_obj.transform)
gsd_x = np.sqrt(tm[0] ** 2 + tm[3] ** 2)
gsd_y = np.sqrt(tm[1] ** 2 + tm[4] ** 2)
crs = img_obj.crs
# remember best gsd and h and w for future resizing
if gsd_x * gsd_y < min_gsd_x * min_gsd_y:
transform = tm
min_gsd_x = gsd_x
min_gsd_y = gsd_y
max_h = img_obj.height
max_w = img_obj.width
# read channels
img = img_obj.read()
img = np.squeeze(img)
channels_list.append(img)
except FileNotFoundError:
print('No such image {}'.format(os.path.basename(channel_name)))
raise Exception('No channels!')
# define width and heights of our images for our model gsd
w = int(max_w * gsd_x / target_gsd_x)
h = int(max_h * gsd_y / target_gsd_y)
if target_size:
w = target_size[1]
h = target_size[0]
channels_list = [cv2.resize(ch, (w, h), cv2.INTER_LINEAR) for ch in channels_list]
image = np.array(channels_list)
image = np.rollaxis(image, 0, 3)
if return_transform:
if return_crs:
return image, transform, crs
else:
return image, transform
return image
@built
def _load_masks(self, path):
path = os.path.normpath(path)
classes = self.config.CLASSES
mask_id = os.path.basename(path)
masks = [os.path.join(path, '{}_class_{}.tif'.format(mask_id, cls)) for cls in classes]
masks_list = []
for m, cls in zip(masks, classes):
try:
with rasterio.open(m, 'r') as mask_obj:
mask = mask_obj.read()
mask = np.squeeze(mask)
masks_list.append(mask)
except FileNotFoundError:
print('No such image {}'.format(os.path.basename(m)))
raise Exception('No mask for class {}!'.format(cls))
masks = np.array(masks_list)
masks = np.rollaxis(masks, 0, 3)
# if target_size:
# cv2.resize(masks, target_size, cv2.INTER_NEAREST)
return masks
def _to_binary_masks(self, image, tm):
gsd_x, gsd_y = self._get_gsd()
target_gsd_x = np.sqrt(tm[0] ** 2 + tm[3] ** 2)
target_gsd_y = np.sqrt(tm[1] ** 2 + tm[4] ** 2)
# define width and heights of our masks for our model gsd
w = int(image.shape[1] * gsd_x / target_gsd_x)
h = int(image.shape[0] * gsd_y / target_gsd_y)
image = cv2.resize(image, (w, h), cv2.INTER_LINEAR)
if image.ndim == 2:
image = np.expand_dims(image, axis=-1)
return np.rollaxis(image, 2, 0), (w, h)
@built
def _save_raster_masks(self, image, path, save_postfix='pred', transform_matrix=None,
crs=None):
image, shape = self._to_binary_masks(image, transform_matrix)
path = os.path.normpath(path) # delete '\' or '//' in the end of filepath
if not os.path.exists(path):
os.makedirs(path)
w | from .config import OrthoSegmModelConfig | random_line_split |
|
model.py | auto', graph_path='auto',
weights_path='auto', model_path='auto', custom_objects=None):
if config_path == 'auto':
config_path = os.path.join(model_dir, 'config.json')
if graph_path == 'auto':
graph_path = os.path.join(model_dir, 'graph.json')
if weights_path == 'auto':
weights_dir = os.path.join(model_dir, 'weights')
weights_path = _find_weights(weights_dir)
if model_path == 'auto':
model_chkp_dir = os.path.join(model_dir, 'models')
model_path = _find_model(model_chkp_dir)
# load configuration file
config = OrthoSegmModelConfig.load_config(config_path)
# load model graph file
with open(graph_path, 'r') as f:
graph = json.load(f)
if mode == 'train':
|
if mode == 'inference':
try:
model = keras.models.load_model(model_path, custom_objects=custom_objects, compile=False)
except:
model = keras.models.model_from_json(json.dumps(graph))
model.load_weights(weights_path)
segmentation_model = SegmentationModel(model_dir)
segmentation_model.build(model, config)
return segmentation_model
class SegmentationModel(Model):
"""
"""
def __init__(self, model_dir):
self.config = None
self.model = None
self._built = False
self.model_dir = _create_dir(model_dir)
self.log_dir = _create_dir(model_dir, 'log')
self.weights_dir = _create_dir(model_dir, 'weights')
self.models_dir = _create_dir(model_dir, 'models') # be careful with model and models! dirs
# input standardization pipeline function
self._input_standart = None
def __getattr__(self, attr):
return getattr(self.model, attr)
def build(self, model, config):
self.model = model
self.config = config
# save configurations of model
config_path = os.path.join(self.model_dir, 'config.json')
if not os.path.exists(config_path):
self.config.save(config_path, indent=2)
# save graph of model
graph_path = os.path.join(self.model_dir, 'graph.json')
model_graph = json.loads(model.to_json())
with open(graph_path, 'w') as f:
json.dump(model_graph, f, indent=2)
st = Standardizer(**self.config.STANDARDISING_PARAMS)
self._input_standart = st.build_pipline(self.config.STANDARDISING_FUNCTIONS)
self._built = True
def built(func):
@wraps(func)
def wrapped(self, *args, **kwargs):
if self._built:
return func(self, *args, **kwargs)
else:
raise RuntimeError('Your model is not built! Please provide keras model and config.')
return wrapped
@built
def _get_gsd(self):
gsd = self.config.GSD
if np.isscalar(gsd):
gsd = (gsd, gsd)
gsd_x = gsd[0]
gsd_y = gsd[1]
return gsd_x, gsd_y
@built
def _load_image(self, path, target_size=None, return_transform=False, return_crs=True):
dataset_element_name = os.path.basename(path)
path = os.path.normpath(path)
channels = self.config.CHANNELS
target_gsd_x, target_gsd_y = self._get_gsd()
# defining local variables for memorizing best of them during iterations
transform = None
crs = None
min_gsd_x = 10e5
min_gsd_y = 10e5
gsd_x = min_gsd_x
gsd_y = min_gsd_y
max_h = 0
max_w = 0
image_ids = ['20170304', '20170404']
channels_list = []
for image_id in image_ids:
channels_ = [os.path.join(path, image_id, '{}_channel_{}.tif'.format(dataset_element_name, ch)) for ch in
channels]
for channel_name in channels_:
try:
# open image(channel) file
# use 'r+' mode to support on windows >__<
# (otherwise, in 'r' mode, cv2.resize fails with python int to C int conversion overflow)
with rasterio.open(channel_name, 'r+') as img_obj:
# read metadata from image(channel) file
tm = list(img_obj.transform)
gsd_x = np.sqrt(tm[0] ** 2 + tm[3] ** 2)
gsd_y = np.sqrt(tm[1] ** 2 + tm[4] ** 2)
crs = img_obj.crs
# remember best gsd and h and w for future resizing
if gsd_x * gsd_y < min_gsd_x * min_gsd_y:
transform = tm
min_gsd_x = gsd_x
min_gsd_y = gsd_y
max_h = img_obj.height
max_w = img_obj.width
# read channels
img = img_obj.read()
img = np.squeeze(img)
channels_list.append(img)
except FileNotFoundError:
print('No such image {}'.format(os.path.basename(channel_name)))
raise Exception('No channels!')
# define width and heights of our images for our model gsd
w = int(max_w * gsd_x / target_gsd_x)
h = int(max_h * gsd_y / target_gsd_y)
if target_size:
w = target_size[1]
h = target_size[0]
channels_list = [cv2.resize(ch, (w, h), cv2.INTER_LINEAR) for ch in channels_list]
image = np.array(channels_list)
image = np.rollaxis(image, 0, 3)
if return_transform:
if return_crs:
return image, transform, crs
else:
return image, transform
return image
@built
def _load_masks(self, path):
path = os.path.normpath(path)
classes = self.config.CLASSES
mask_id = os.path.basename(path)
masks = [os.path.join(path, '{}_class_{}.tif'.format(mask_id, cls)) for cls in classes]
masks_list = []
for m, cls in zip(masks, classes):
try:
with rasterio.open(m, 'r') as mask_obj:
mask = mask_obj.read()
mask = np.squeeze(mask)
masks_list.append(mask)
except FileNotFoundError:
print('No such image {}'.format(os.path.basename(m)))
raise Exception('No mask for class {}!'.format(cls))
masks = np.array(masks_list)
masks = np.rollaxis(masks, 0, 3)
# if target_size:
# cv2.resize(masks, target_size, cv2.INTER_NEAREST)
return masks
def _to_binary_masks(self, image, tm):
gsd_x, gsd_y = self._get_gsd()
target_gsd_x = np.sqrt(tm[0] ** 2 + tm[3] ** 2)
target_gsd_y = np.sqrt(tm[1] ** 2 + tm[4] ** 2)
# define width and heights of our masks for our model gsd
w = int(image.shape[1] * gsd_x / target_gsd_x)
h = int(image.shape[0] * gsd_y / target_gsd_y)
image = cv2.resize(image, (w, h), cv2.INTER_LINEAR)
if image.ndim == 2:
image = np.expand_dims(image, axis=-1)
return np.rollaxis(image, 2, 0), (w, h)
@built
def _save_raster_masks(self, image, path, save_postfix='pred', transform_matrix=None,
crs=None):
image, shape = self._to_binary_masks(image, transform_matrix)
path = os.path.normpath(path) # delete '\' or '//' in the end of filepath
if not os.path.exists(path):
os.makedirs(path)
w, h = shape
image_basename = os.path.basename(path)
saved_images_names = []
for i, cls in enumerate(self.config.CLASSES):
# save each mask to separate file
image_name = image_basename + '_class_{}_{}.tif'.format(cls, save_postfix)
saved_images_names.append(image_name)
image_path = os.path.join(path, image_name)
with rasterio.open(image_path, 'w', width=w, height=h, driver='GTiff', count=1,
dtype='uint8', NBITS=1, transform=transform_matrix[:6], crs=crs) as dst:
dst.write(image[i].astype(rasterio.uint8), 1)
return saved_images_names
def get_vector_markup(self, mask, geotransform, trg_crs='epsg:3857'):
"""
Saves vector mask from raw model output as .geojson
:param raw_mask_path:
:param transform: geotransform of initial dataset
:param filename: | model = keras.models.load_model(model_path, custom_objects=custom_objects, compile=True) | conditional_block |
run_ERDCA.py | fam_ecc/'
data_path = '/home/eclay/Pfam-A.full'
preprocess_path = '/home/eclay/DCA_ER/biowulf/pfam_ecc/'
#pfam_id = 'PF00025'
pfam_id = sys.argv[1]
cpus_per_job = int(sys.argv[2])
job_id = sys.argv[3]
print("Calculating DI for %s using %d (of %d) threads (JOBID: %s)"%(pfam_id,cpus_per_job-4,cpus_per_job,job_id))
# Read in Reference Protein Structure
pdb = np.load('%s/%s/pdb_refs.npy'%(data_path,pfam_id))
# convert bytes to str (python 2 to python 3)
pdb = np.array([pdb[t,i].decode('UTF-8') for t in range(pdb.shape[0]) for i in range(pdb.shape[1])]).reshape(pdb.shape[0],pdb.shape[1])
ipdb = 0
tpdb = int(pdb[ipdb,1])
print('Ref Sequence # should be : ',tpdb-1)
# Load Multiple Sequence Alignment
s = dp.load_msa(data_path,pfam_id)
# Load Polypeptide Sequence from PDB as reference sequence
print(pdb[ipdb,:])
pdb_id = pdb[ipdb,5]
pdb_chain = pdb[ipdb,6]
pdb_start,pdb_end = int(pdb[ipdb,7]),int(pdb[ipdb,8])
pdb_range = [pdb_start-1, pdb_end]
#print('pdb id, chain, start, end, length:',pdb_id,pdb_chain,pdb_start,pdb_end,pdb_end-pdb_start+1)
#print('download pdb file')
pdb_file = pdb_list.retrieve_pdb_file(str(pdb_id),file_format='pdb')
#pdb_file = pdb_list.retrieve_pdb_file(pdb_id)
pfam_dict = {}
#---------------------------------------------------------------------------------------------------------------------#
#--------------------------------------- Create PDB-PP Reference Sequence --------------------------------------------#
#---------------------------------------------------------------------------------------------------------------------#
msa_file, ref_file = tools.write_FASTA(s[tpdb], s, pfam_id, number_form=False,processed=False,path='./pfam_ecc/',nickname='orig')
erdca_visualizer = contact_visualizer.DCAVisualizer('protein', pdb[ipdb,6], pdb[ipdb,5],refseq_file=ref_file)
biomol_info,er_pdb_seq = erdca_visualizer.pdb_content.pdb_chain_sequences[erdca_visualizer.pdb_chain_id]
print('\n\nERDCA-Visualizer pdb seq')
print(er_pdb_seq)
erdca_msa_file, erdca_ref_file = tools.write_FASTA(er_pdb_seq, s, pfam_id, number_form=False,processed=False,path='./pfam_ecc/')
#---------------------------------------------------------------------------------------------------------------------#
if 1: # DCA read in
# Load Multiple Sequence Alignment
s = dp.load_msa(data_path,pfam_id)
# Load Polypeptide Sequence from PDB as reference sequence
print(pdb[ipdb,:])
pdb_id = pdb[ipdb,5]
pdb_chain = pdb[ipdb,6]
pdb_start,pdb_end = int(pdb[ipdb,7]),int(pdb[ipdb,8])
pdb_range = [pdb_start-1, pdb_end]
#print('pdb id, chain, start, end, length:',pdb_id,pdb_chain,pdb_start,pdb_end,pdb_end-pdb_start+1)
#print('download pdb file')
pdb_file = pdb_list.retrieve_pdb_file(str(pdb_id),file_format='pdb')
#pdb_file = pdb_list.retrieve_pdb_file(pdb_id)
pfam_dict = {}
#---------------------------------------------------------------------------------------------------------------------#
chain = pdb_parser.get_structure(str(pdb_id),pdb_file)[0][pdb_chain]
ppb = PPBuilder().build_peptides(chain)
# print(pp.get_sequence())
print('peptide build of chain produced %d elements'%(len(ppb)))
matching_seq_dict = {}
poly_seq = list()
for i,pp in enumerate(ppb):
for char in str(pp.get_sequence()):
poly_seq.append(char)
print('PDB Polypeptide Sequence: \n',poly_seq)
#check that poly_seq matches up with given MSA
poly_seq_range = poly_seq[pdb_range[0]:pdb_range[1]]
print('PDB Polypeptide Sequence (In Proteins PDB range len=%d): \n'%len(poly_seq_range),poly_seq_range)
if len(poly_seq_range) < 10:
print('PP sequence overlap with PDB range is too small.\nWe will find a match\nBAD PDB-RANGE')
poly_seq_range = poly_seq
else:
pp_msa_file_range, pp_ref_file_range = tools.write_FASTA(poly_seq_range, s, pfam_id, number_form=False,processed=False,path='./pfam_ecc/',nickname='range')
erdca_msa_file, erdca_ref_file = tools.write_FASTA(poly_seq_range, s, pfam_id, number_form=False,processed=False,path='./pfam_ecc/')
pp_msa_file, pp_ref_file = tools.write_FASTA(poly_seq, s, pfam_id, number_form=False,processed=False,path='./pfam_ecc/')
erdca_msa_file, erdca_ref_file = tools.write_FASTA(poly_seq, s, pfam_id, number_form=False,processed=False,path='./pfam_ecc/')
#---------------------------------------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------------------------------------#
#---------------------------------- PreProcess FASTA Alignment -------------------------------------------------------#
#---------------------------------------------------------------------------------------------------------------------#
preprocessed_data_outfile = preprocess_path+'MSA_%s_PreProcessed.fa'%pfam_id
print(preprocessed_data_outfile)
print('\n\nPre-Processing MSA with Range PP Seq\n\n')
trimmer = msa_trimmer.MSATrimmer(
erdca_msa_file, biomolecule='PROTEIN',
refseq_file = erdca_ref_file
)
pfam_dict['ref_file'] = erdca_ref_file
try:
preprocessed_data,s_index, cols_removed,s_ipdb,s = trimmer.get_preprocessed_msa(printing=True, saving = False)
except(MSATrimmerException):
ERR = 'PPseq-MSA'
print('Error with MSA trimms\n%s\n'%ERR)
sys.exit()
print('\n\n\n',s[s_ipdb])
#write trimmed msa to file in FASTA format
with open(preprocessed_data_outfile, 'w') as fh:
for seqid, seq in preprocessed_data:
fh.write('>{}\n{}\n'.format(seqid, seq))
#---------------------------------------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------------------------------------#
#----------------------------------------- Run Simulation ERDCA ------------------------------------------------------#
#---------------------------------------------------------------------------------------------------------------------#
try:
print('Initializing ER instance\n\n')
# Compute DI scores using Expectation Reflection algorithm
erdca_inst = erdca.ERDCA(
preprocessed_data_outfile,
'PROTEIN',
s_index = s_index,
pseudocount = 0.5,
num_threads = cpus_per_job-4,
seqid = 0.8)
except:
ref_seq = s[tpdb,:]
print('Using PDB defined reference sequence from MSA:\n',ref_seq)
msa_file, ref_file = tools.write_FASTA(ref_seq, s, pfam_id, number_form=False,processed=False,path=preprocess_path)
pfam_dict['ref_file'] = ref_file
print('Re-trimming MSA with pdb index defined ref_seq')
# create MSATrimmer instance
trimmer = msa_trimmer.MSATrimmer(
msa_file, biomolecule='protein',
refseq_file=ref_file
)
preprocessed_data,s_index, cols_removed,s_ipdb,s = trimmer.get_preprocessed_msa(printing=True, saving = False)
#write trimmed msa to file in FASTA format
with open(preprocessed_data_outfile, 'w') as fh:
for seqid, seq in preprocessed_data:
fh.write('>{}\n{}\n'.format(seqid, seq))
erdca_inst = erdca.ERDCA(
preprocessed_data_outfile,
'PROTEIN',
s_index = s_index,
pseudocount = 0.5,
num_threads = cpus_per_job-4,
seqid = 0.8)
# Save processed data dictionary and FASTA file
pfam_dict['processed_msa'] = preprocessed_data
pfam_dict['msa'] = s
pfam_dict['s_index'] = s_index
pfam_dict['s_ipdb'] = s_ipdb
pfam_dict['cols_removed'] = cols_removed
input_data_file = preprocess_path+"%s_DP_ER.pickle"%(pfam_id)
with open(input_data_file,"wb") as f:
pickle.dump(pfam_dict, f)
f.close()
print('Running ER simulation\n\n')
# Compute average product corrected Frobenius norm of the couplings
start_time = timeit.default_timer()
erdca_DI = erdca_inst.compute_sorted_DI()
run_time = timeit.default_timer() - start_time
print('ER run time:',run_time)
for site_pair, score in erdca_DI[:5]:
| print(site_pair, score) | conditional_block |
|
run_ERDCA.py | _backmapper import sequence_backmapper
from pydca.msa_trimmer import msa_trimmer
from pydca.contact_visualizer import contact_visualizer
from pydca.dca_utilities import dca_utilities
warnings.filterwarnings("error")
warnings.simplefilter('ignore', BiopythonWarning)
warnings.simplefilter('ignore', DeprecationWarning)
warnings.simplefilter('ignore', FutureWarning)
warnings.simplefilter('ignore', ResourceWarning)
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
#========================================================================================
data_path = '/data/cresswellclayec/hoangd2_data/Pfam-A.full'
preprocess_path = '/data/cresswellclayec/DCA_ER/biowulf/pfam_ecc/'
data_path = '/home/eclay/Pfam-A.full'
preprocess_path = '/home/eclay/DCA_ER/biowulf/pfam_ecc/'
#pfam_id = 'PF00025'
pfam_id = sys.argv[1]
cpus_per_job = int(sys.argv[2])
job_id = sys.argv[3]
print("Calculating DI for %s using %d (of %d) threads (JOBID: %s)"%(pfam_id,cpus_per_job-4,cpus_per_job,job_id))
# Read in Reference Protein Structure
pdb = np.load('%s/%s/pdb_refs.npy'%(data_path,pfam_id))
# convert bytes to str (python 2 to python 3)
pdb = np.array([pdb[t,i].decode('UTF-8') for t in range(pdb.shape[0]) for i in range(pdb.shape[1])]).reshape(pdb.shape[0],pdb.shape[1])
ipdb = 0
tpdb = int(pdb[ipdb,1])
print('Ref Sequence # should be : ',tpdb-1)
# Load Multiple Sequence Alignment
s = dp.load_msa(data_path,pfam_id)
# Load Polypeptide Sequence from PDB as reference sequence
print(pdb[ipdb,:])
pdb_id = pdb[ipdb,5]
pdb_chain = pdb[ipdb,6]
pdb_start,pdb_end = int(pdb[ipdb,7]),int(pdb[ipdb,8])
pdb_range = [pdb_start-1, pdb_end]
#print('pdb id, chain, start, end, length:',pdb_id,pdb_chain,pdb_start,pdb_end,pdb_end-pdb_start+1)
#print('download pdb file')
pdb_file = pdb_list.retrieve_pdb_file(str(pdb_id),file_format='pdb')
#pdb_file = pdb_list.retrieve_pdb_file(pdb_id)
pfam_dict = {}
#---------------------------------------------------------------------------------------------------------------------#
#--------------------------------------- Create PDB-PP Reference Sequence --------------------------------------------#
#---------------------------------------------------------------------------------------------------------------------#
msa_file, ref_file = tools.write_FASTA(s[tpdb], s, pfam_id, number_form=False,processed=False,path='./pfam_ecc/',nickname='orig')
erdca_visualizer = contact_visualizer.DCAVisualizer('protein', pdb[ipdb,6], pdb[ipdb,5],refseq_file=ref_file)
biomol_info,er_pdb_seq = erdca_visualizer.pdb_content.pdb_chain_sequences[erdca_visualizer.pdb_chain_id]
print('\n\nERDCA-Visualizer pdb seq')
print(er_pdb_seq)
erdca_msa_file, erdca_ref_file = tools.write_FASTA(er_pdb_seq, s, pfam_id, number_form=False,processed=False,path='./pfam_ecc/')
#---------------------------------------------------------------------------------------------------------------------#
if 1: # DCA read in
# Load Multiple Sequence Alignment
s = dp.load_msa(data_path,pfam_id)
# Load Polypeptide Sequence from PDB as reference sequence
print(pdb[ipdb,:])
pdb_id = pdb[ipdb,5]
pdb_chain = pdb[ipdb,6]
pdb_start,pdb_end = int(pdb[ipdb,7]),int(pdb[ipdb,8])
pdb_range = [pdb_start-1, pdb_end]
#print('pdb id, chain, start, end, length:',pdb_id,pdb_chain,pdb_start,pdb_end,pdb_end-pdb_start+1)
#print('download pdb file')
pdb_file = pdb_list.retrieve_pdb_file(str(pdb_id),file_format='pdb')
#pdb_file = pdb_list.retrieve_pdb_file(pdb_id)
pfam_dict = {}
#---------------------------------------------------------------------------------------------------------------------#
chain = pdb_parser.get_structure(str(pdb_id),pdb_file)[0][pdb_chain]
ppb = PPBuilder().build_peptides(chain)
# print(pp.get_sequence())
print('peptide build of chain produced %d elements'%(len(ppb)))
matching_seq_dict = {}
poly_seq = list()
for i,pp in enumerate(ppb):
for char in str(pp.get_sequence()):
poly_seq.append(char)
print('PDB Polypeptide Sequence: \n',poly_seq)
#check that poly_seq matches up with given MSA
poly_seq_range = poly_seq[pdb_range[0]:pdb_range[1]]
print('PDB Polypeptide Sequence (In Proteins PDB range len=%d): \n'%len(poly_seq_range),poly_seq_range)
if len(poly_seq_range) < 10:
print('PP sequence overlap with PDB range is too small.\nWe will find a match\nBAD PDB-RANGE')
poly_seq_range = poly_seq
else:
pp_msa_file_range, pp_ref_file_range = tools.write_FASTA(poly_seq_range, s, pfam_id, number_form=False,processed=False,path='./pfam_ecc/',nickname='range')
erdca_msa_file, erdca_ref_file = tools.write_FASTA(poly_seq_range, s, pfam_id, number_form=False,processed=False,path='./pfam_ecc/')
pp_msa_file, pp_ref_file = tools.write_FASTA(poly_seq, s, pfam_id, number_form=False,processed=False,path='./pfam_ecc/')
erdca_msa_file, erdca_ref_file = tools.write_FASTA(poly_seq, s, pfam_id, number_form=False,processed=False,path='./pfam_ecc/')
#---------------------------------------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------------------------------------# | print(preprocessed_data_outfile)
print('\n\nPre-Processing MSA with Range PP Seq\n\n')
trimmer = msa_trimmer.MSATrimmer(
erdca_msa_file, biomolecule='PROTEIN',
refseq_file = erdca_ref_file
)
pfam_dict['ref_file'] = erdca_ref_file
try:
preprocessed_data,s_index, cols_removed,s_ipdb,s = trimmer.get_preprocessed_msa(printing=True, saving = False)
except(MSATrimmerException):
ERR = 'PPseq-MSA'
print('Error with MSA trimms\n%s\n'%ERR)
sys.exit()
print('\n\n\n',s[s_ipdb])
#write trimmed msa to file in FASTA format
with open(preprocessed_data_outfile, 'w') as fh:
for seqid, seq in preprocessed_data:
fh.write('>{}\n{}\n'.format(seqid, seq))
#---------------------------------------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------------------------------------#
#----------------------------------------- Run Simulation ERDCA ------------------------------------------------------#
#---------------------------------------------------------------------------------------------------------------------#
try:
print('Initializing ER instance\n\n')
# Compute DI scores using Expectation Reflection algorithm
erdca_inst = erdca.ERDCA(
preprocessed_data_outfile,
'PROTEIN',
s_index = s_index,
pseudocount = 0.5,
num_threads = cpus_per_job-4,
seqid = 0.8)
except:
ref_seq = s[tpdb,:]
print('Using PDB defined reference sequence from MSA:\n',ref_seq)
msa_file, ref_file = tools.write_FASTA(ref_seq, s, pfam_id, number_form=False,processed=False,path=preprocess_path)
pfam_dict['ref_file'] = ref_file
print('Re-trimming MSA with pdb index defined ref_seq')
# create MSATrimmer instance
trimmer = msa_trimmer.MSATrimmer(
msa_file, biomolecule='protein',
refseq_file=ref_file
)
preprocessed_data,s_index, cols_removed,s_ipdb,s = trimmer.get_preprocessed_msa(printing=True, saving = False)
#write trimmed msa to file in FASTA format
with open(preprocessed_data_outfile, 'w') as fh:
for seqid, seq in preprocessed_data:
fh.write('>{}\n{}\n'.format(seqid, seq))
erdca_inst = erdca.ERDCA(
preprocessed_data_outfile,
'PROTEIN',
s_index = s_index,
pseudocount = 0.5,
num_threads = cpus_per_job-4,
seqid = 0.8)
# Save processed data dictionary and FASTA file
pfam_dict['processed_msa'] = preprocessed_data
pfam_dict['ms | #---------------------------------- PreProcess FASTA Alignment -------------------------------------------------------#
#---------------------------------------------------------------------------------------------------------------------#
preprocessed_data_outfile = preprocess_path+'MSA_%s_PreProcessed.fa'%pfam_id | random_line_split |
buffer.rs | pub fn clear(&mut self) {
self.start_offset = 0;
self.rd_pos = 0;
self.data.truncate(0);
}
/// Truncate this buffer.
pub fn truncate(&mut self, size: usize) {
if size == 0 {
self.clear();
return;
}
if size > self.len() {
panic!("Buffer::truncate(size): size > self.len()");
}
if self.rd_pos > size {
self.rd_pos = size;
}
self.data.truncate(size + self.start_offset);
}
pub fn bytes(&self) -> &[u8] {
if self.rd_pos >= self.len() {
return &[][..];
}
&self.data[self.start_offset + self.rd_pos..]
}
/// Split this Buffer in two parts.
///
/// The first part remains in this buffer. The second part is
/// returned as a new Buffer.
pub fn split_off(&mut self, at: usize) -> Buffer {
if at > self.len() {
panic!("Buffer:split_off(size): size > self.len()");
}
if self.rd_pos > at {
self.rd_pos = at;
}
// If "header" < 32K and "body" >= 32K, use a start_offset
// for "body" and copy "header".
if self.start_offset == 0 && at < 32000 && self.len() - at >= 32000 {
let mut bnew = Buffer::with_capacity(at);
mem::swap(self, &mut bnew);
self.extend_from_slice(&bnew[0..at]);
bnew.start_offset = at;
return bnew;
}
let mut bnew = Buffer::new();
let bytes = self.bytes();
bnew.extend_from_slice(&bytes[at..]);
self.truncate(at);
bnew
}
/// Add data to this buffer.
#[inline]
pub fn extend_from_slice(&mut self, extend: &[u8]) {
self.reserve(extend.len());
self.data.extend_from_slice(extend);
}
#[inline]
fn round_size_up(size: usize) -> usize {
if size < 128 {
128
} else if size < 4096 {
4096
} else if size < 65536 {
65536
} else if size < 2097152 {
size.next_power_of_two()
} else {
(1 + size / 1048576) * 1048576
}
}
/// Make sure at least `size` bytes are available.
#[inline]
pub fn reserve(&mut self, size: usize) {
let end = self.data.len() + size;
if end < self.data.capacity() {
return;
}
self.data.reserve_exact(Self::round_size_up(end) - self.data.len());
}
/// total length of all data in this Buffer.
#[inline]
pub fn len(&self) -> usize {
self.data.len() - self.start_offset
}
/// Split this Buffer in two parts.
///
/// The second part remains in this buffer. The first part is
/// returned to the caller.
pub fn split_to(&mut self, size: usize) -> Buffer {
let mut other = self.split_off(size);
mem::swap(self, &mut other);
other
}
/// Write all data in this `Buffer` to a file.
pub fn write_all(&mut self, mut file: impl Write) -> io::Result<()> {
while self.rd_pos < self.len() {
let bytes = self.bytes();
let size = bytes.len();
file.write_all(bytes)?;
self.rd_pos += size;
}
Ok(())
}
/// Add text data to this buffer.
#[inline]
pub fn push_str(&mut self, s: &str) |
/// Add a string to the buffer.
#[inline]
pub fn put_str(&mut self, s: impl AsRef<str>) {
self.extend_from_slice(s.as_ref().as_bytes());
}
/// Return a reference to this Buffer as an UTF-8 string.
#[inline]
pub fn as_utf8_str(&self) -> Result<&str, std::str::Utf8Error> {
std::str::from_utf8(self.bytes())
}
/// Convert this buffer into a Vec<u8>.
pub fn into_bytes(self) -> Vec<u8> {
if self.start_offset > 0 {
let mut v = Vec::with_capacity(Self::round_size_up(self.len()));
v.extend_from_slice(self.bytes());
v
} else {
self.data
}
}
//
// ===== Begin unsafe code =====
//
/// Read an exact number of bytes.
pub fn read_exact(&mut self, reader: &mut std::fs::File, len: usize) -> io::Result<()> {
self.reserve(len);
// Safety: it is safe for a std::fs::File to read into uninitialized memory.
unsafe {
let buf = self.spare_capacity_mut();
reader.read_exact(&mut buf[..len])?;
self.advance_mut(len);
}
Ok(())
}
unsafe fn spare_capacity_mut<T>(&mut self) -> &mut [T] {
let len = self.data.len();
let spare = self.data.capacity() - len;
let ptr = self.data.as_mut_ptr().add(len) as *mut T;
&mut slice::from_raw_parts_mut(ptr, spare)[..]
}
unsafe fn advance_mut(&mut self, cnt: usize) {
if self.data.len() + cnt > self.data.capacity() {
panic!("Buffer::advance_mut(cnt): would advance past end of Buffer");
}
self.data.set_len(self.data.len() + cnt);
}
pub fn poll_read<R>(&mut self, reader: Pin<&mut R>, cx: &mut Context<'_>) -> Poll<io::Result<usize>>
where R: AsyncRead + Unpin + ?Sized {
// Safety: ReadBuf::uninit takes a MaybeUninit.
let mut buf = ReadBuf::uninit(unsafe { self.spare_capacity_mut() });
futures::ready!(reader.poll_read(cx, &mut buf))?;
let len = buf.filled().len();
// Safety: len = buf.filled().len() is guaranteed to be correct.
unsafe {
self.advance_mut(len);
}
Poll::Ready(Ok(len))
}
//
// ===== End unsafe code =====
//
}
impl bytes::Buf for Buffer {
fn advance(&mut self, cnt: usize) {
// advance buffer read pointer.
self.rd_pos += cnt;
if self.rd_pos > self.len() {
// "It is recommended for implementations of advance to
// panic if cnt > self.remaining()"
panic!("read position advanced beyond end of buffer");
}
}
#[inline]
fn chunk(&self) -> &[u8] {
self.bytes()
}
#[inline]
fn remaining(&self) -> usize {
self.len() - self.rd_pos
}
}
impl Deref for Buffer {
type Target = [u8];
#[inline]
fn deref(&self) -> &[u8] {
self.bytes()
}
}
impl DerefMut for Buffer {
#[inline]
fn deref_mut(&mut self) -> &mut [u8] {
&mut self.data[self.start_offset + self.rd_pos..]
}
}
impl fmt::Write for Buffer {
fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> {
self.push_str(s);
Ok(())
}
}
impl From<&[u8]> for Buffer {
fn from(src: &[u8]) -> Self {
let mut buffer = Buffer::new();
buffer.extend_from_slice(src);
buffer
}
}
impl From<Vec<u8>> for Buffer {
fn from(src: Vec<u8>) -> Self {
Buffer {
start_offset: 0,
rd_pos: 0,
data: src,
}
}
}
impl From<&str> for Buffer {
fn from(src: &str) -> Self {
Buffer::from(src.as_bytes())
}
}
impl From<String> for Buffer {
fn from(src: String) -> Self {
Buffer::from(src.into_bytes())
}
}
impl From<bytes::Bytes> for Buffer {
fn from(src: bytes::Bytes) -> Self {
Buffer::from(&src[..])
}
}
impl Default for Buffer {
fn default() -> Self {
Buffer::new()
}
}
impl fmt::Debug for Buffer {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let cap = self.data.capacity();
let len = self.len();
f.debug_struct("Buffer")
.field("start_offset", &self.start_offset)
.field("rd_pos", &self.rd_pos)
.field("len", &len)
.field("capacity", &cap)
.field("data", &"[data]")
.finish()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[ | {
self.extend_from_slice(s.as_bytes());
} | identifier_body |
buffer.rs | pub fn clear(&mut self) {
self.start_offset = 0;
self.rd_pos = 0;
self.data.truncate(0);
}
/// Truncate this buffer.
pub fn truncate(&mut self, size: usize) {
if size == 0 {
self.clear();
return;
}
if size > self.len() {
panic!("Buffer::truncate(size): size > self.len()");
}
if self.rd_pos > size {
self.rd_pos = size;
}
self.data.truncate(size + self.start_offset);
}
pub fn bytes(&self) -> &[u8] {
if self.rd_pos >= self.len() {
return &[][..];
}
&self.data[self.start_offset + self.rd_pos..]
}
/// Split this Buffer in two parts.
///
/// The first part remains in this buffer. The second part is
/// returned as a new Buffer.
pub fn split_off(&mut self, at: usize) -> Buffer {
if at > self.len() {
panic!("Buffer:split_off(size): size > self.len()");
}
if self.rd_pos > at {
self.rd_pos = at;
}
// If "header" < 32K and "body" >= 32K, use a start_offset
// for "body" and copy "header".
if self.start_offset == 0 && at < 32000 && self.len() - at >= 32000 {
let mut bnew = Buffer::with_capacity(at);
mem::swap(self, &mut bnew);
self.extend_from_slice(&bnew[0..at]);
bnew.start_offset = at;
return bnew;
}
let mut bnew = Buffer::new();
let bytes = self.bytes();
bnew.extend_from_slice(&bytes[at..]);
self.truncate(at);
bnew
}
/// Add data to this buffer.
#[inline]
pub fn extend_from_slice(&mut self, extend: &[u8]) {
self.reserve(extend.len());
self.data.extend_from_slice(extend);
}
#[inline]
fn round_size_up(size: usize) -> usize {
if size < 128 {
128
} else if size < 4096 {
4096
} else if size < 65536 {
65536
} else if size < 2097152 {
size.next_power_of_two()
} else {
(1 + size / 1048576) * 1048576
}
}
/// Make sure at least `size` bytes are available.
#[inline]
pub fn reserve(&mut self, size: usize) {
let end = self.data.len() + size;
if end < self.data.capacity() {
return;
}
self.data.reserve_exact(Self::round_size_up(end) - self.data.len());
}
/// total length of all data in this Buffer.
#[inline]
pub fn len(&self) -> usize {
self.data.len() - self.start_offset
}
/// Split this Buffer in two parts.
///
/// The second part remains in this buffer. The first part is
/// returned to the caller.
pub fn split_to(&mut self, size: usize) -> Buffer {
let mut other = self.split_off(size);
mem::swap(self, &mut other);
other
}
/// Write all data in this `Buffer` to a file.
pub fn write_all(&mut self, mut file: impl Write) -> io::Result<()> {
while self.rd_pos < self.len() {
let bytes = self.bytes();
let size = bytes.len();
file.write_all(bytes)?;
self.rd_pos += size;
}
Ok(())
}
/// Add text data to this buffer.
#[inline]
pub fn push_str(&mut self, s: &str) {
self.extend_from_slice(s.as_bytes());
}
/// Add a string to the buffer.
#[inline]
pub fn put_str(&mut self, s: impl AsRef<str>) {
self.extend_from_slice(s.as_ref().as_bytes());
}
/// Return a reference to this Buffer as an UTF-8 string.
#[inline]
pub fn as_utf8_str(&self) -> Result<&str, std::str::Utf8Error> {
std::str::from_utf8(self.bytes())
}
/// Convert this buffer into a Vec<u8>.
pub fn into_bytes(self) -> Vec<u8> {
if self.start_offset > 0 {
let mut v = Vec::with_capacity(Self::round_size_up(self.len()));
v.extend_from_slice(self.bytes());
v
} else {
self.data
}
}
//
// ===== Begin unsafe code =====
//
/// Read an exact number of bytes.
pub fn read_exact(&mut self, reader: &mut std::fs::File, len: usize) -> io::Result<()> {
self.reserve(len);
// Safety: it is safe for a std::fs::File to read into uninitialized memory.
unsafe {
let buf = self.spare_capacity_mut();
reader.read_exact(&mut buf[..len])?;
self.advance_mut(len);
}
Ok(())
}
unsafe fn spare_capacity_mut<T>(&mut self) -> &mut [T] {
let len = self.data.len();
let spare = self.data.capacity() - len;
let ptr = self.data.as_mut_ptr().add(len) as *mut T;
&mut slice::from_raw_parts_mut(ptr, spare)[..]
}
unsafe fn advance_mut(&mut self, cnt: usize) {
if self.data.len() + cnt > self.data.capacity() {
panic!("Buffer::advance_mut(cnt): would advance past end of Buffer");
}
self.data.set_len(self.data.len() + cnt);
}
pub fn poll_read<R>(&mut self, reader: Pin<&mut R>, cx: &mut Context<'_>) -> Poll<io::Result<usize>>
where R: AsyncRead + Unpin + ?Sized {
// Safety: ReadBuf::uninit takes a MaybeUninit.
let mut buf = ReadBuf::uninit(unsafe { self.spare_capacity_mut() });
futures::ready!(reader.poll_read(cx, &mut buf))?;
let len = buf.filled().len();
// Safety: len = buf.filled().len() is guaranteed to be correct.
unsafe {
self.advance_mut(len);
}
Poll::Ready(Ok(len))
}
//
// ===== End unsafe code =====
//
}
impl bytes::Buf for Buffer {
fn advance(&mut self, cnt: usize) {
// advance buffer read pointer.
self.rd_pos += cnt;
if self.rd_pos > self.len() {
// "It is recommended for implementations of advance to
// panic if cnt > self.remaining()"
panic!("read position advanced beyond end of buffer");
}
}
#[inline]
fn chunk(&self) -> &[u8] {
self.bytes()
}
#[inline]
fn remaining(&self) -> usize {
self.len() - self.rd_pos
}
}
impl Deref for Buffer {
type Target = [u8];
#[inline]
fn deref(&self) -> &[u8] {
self.bytes()
}
}
impl DerefMut for Buffer {
#[inline]
fn deref_mut(&mut self) -> &mut [u8] {
&mut self.data[self.start_offset + self.rd_pos..]
}
}
impl fmt::Write for Buffer {
fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> {
self.push_str(s);
Ok(())
}
}
impl From<&[u8]> for Buffer {
fn from(src: &[u8]) -> Self {
let mut buffer = Buffer::new();
buffer.extend_from_slice(src);
buffer
}
}
impl From<Vec<u8>> for Buffer {
fn from(src: Vec<u8>) -> Self {
Buffer {
start_offset: 0,
rd_pos: 0,
data: src,
}
}
}
impl From<&str> for Buffer {
fn from(src: &str) -> Self {
Buffer::from(src.as_bytes())
}
}
impl From<String> for Buffer {
fn from(src: String) -> Self {
Buffer::from(src.into_bytes())
}
}
impl From<bytes::Bytes> for Buffer {
fn from(src: bytes::Bytes) -> Self {
Buffer::from(&src[..])
}
}
impl Default for Buffer {
fn default() -> Self {
Buffer::new()
}
}
impl fmt::Debug for Buffer {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let cap = self.data.capacity();
let len = self.len();
f.debug_struct("Buffer")
.field("start_offset", &self.start_offset)
.field("rd_pos", &self.rd_pos)
.field("len", &len)
.field("capacity", &cap)
.field("data", &"[data]")
.finish()
}
}
|
#[ | #[cfg(test)]
mod tests {
use super::*; | random_line_split |
buffer.rs | pub fn clear(&mut self) {
self.start_offset = 0;
self.rd_pos = 0;
self.data.truncate(0);
}
/// Truncate this buffer.
pub fn | (&mut self, size: usize) {
if size == 0 {
self.clear();
return;
}
if size > self.len() {
panic!("Buffer::truncate(size): size > self.len()");
}
if self.rd_pos > size {
self.rd_pos = size;
}
self.data.truncate(size + self.start_offset);
}
pub fn bytes(&self) -> &[u8] {
if self.rd_pos >= self.len() {
return &[][..];
}
&self.data[self.start_offset + self.rd_pos..]
}
/// Split this Buffer in two parts.
///
/// The first part remains in this buffer. The second part is
/// returned as a new Buffer.
pub fn split_off(&mut self, at: usize) -> Buffer {
if at > self.len() {
panic!("Buffer:split_off(size): size > self.len()");
}
if self.rd_pos > at {
self.rd_pos = at;
}
// If "header" < 32K and "body" >= 32K, use a start_offset
// for "body" and copy "header".
if self.start_offset == 0 && at < 32000 && self.len() - at >= 32000 {
let mut bnew = Buffer::with_capacity(at);
mem::swap(self, &mut bnew);
self.extend_from_slice(&bnew[0..at]);
bnew.start_offset = at;
return bnew;
}
let mut bnew = Buffer::new();
let bytes = self.bytes();
bnew.extend_from_slice(&bytes[at..]);
self.truncate(at);
bnew
}
/// Add data to this buffer.
#[inline]
pub fn extend_from_slice(&mut self, extend: &[u8]) {
self.reserve(extend.len());
self.data.extend_from_slice(extend);
}
#[inline]
fn round_size_up(size: usize) -> usize {
if size < 128 {
128
} else if size < 4096 {
4096
} else if size < 65536 {
65536
} else if size < 2097152 {
size.next_power_of_two()
} else {
(1 + size / 1048576) * 1048576
}
}
/// Make sure at least `size` bytes are available.
#[inline]
pub fn reserve(&mut self, size: usize) {
let end = self.data.len() + size;
if end < self.data.capacity() {
return;
}
self.data.reserve_exact(Self::round_size_up(end) - self.data.len());
}
/// total length of all data in this Buffer.
#[inline]
pub fn len(&self) -> usize {
self.data.len() - self.start_offset
}
/// Split this Buffer in two parts.
///
/// The second part remains in this buffer. The first part is
/// returned to the caller.
pub fn split_to(&mut self, size: usize) -> Buffer {
let mut other = self.split_off(size);
mem::swap(self, &mut other);
other
}
/// Write all data in this `Buffer` to a file.
pub fn write_all(&mut self, mut file: impl Write) -> io::Result<()> {
while self.rd_pos < self.len() {
let bytes = self.bytes();
let size = bytes.len();
file.write_all(bytes)?;
self.rd_pos += size;
}
Ok(())
}
/// Add text data to this buffer.
#[inline]
pub fn push_str(&mut self, s: &str) {
self.extend_from_slice(s.as_bytes());
}
/// Add a string to the buffer.
#[inline]
pub fn put_str(&mut self, s: impl AsRef<str>) {
self.extend_from_slice(s.as_ref().as_bytes());
}
/// Return a reference to this Buffer as an UTF-8 string.
#[inline]
pub fn as_utf8_str(&self) -> Result<&str, std::str::Utf8Error> {
std::str::from_utf8(self.bytes())
}
/// Convert this buffer into a Vec<u8>.
pub fn into_bytes(self) -> Vec<u8> {
if self.start_offset > 0 {
let mut v = Vec::with_capacity(Self::round_size_up(self.len()));
v.extend_from_slice(self.bytes());
v
} else {
self.data
}
}
//
// ===== Begin unsafe code =====
//
/// Read an exact number of bytes.
pub fn read_exact(&mut self, reader: &mut std::fs::File, len: usize) -> io::Result<()> {
self.reserve(len);
// Safety: it is safe for a std::fs::File to read into uninitialized memory.
unsafe {
let buf = self.spare_capacity_mut();
reader.read_exact(&mut buf[..len])?;
self.advance_mut(len);
}
Ok(())
}
unsafe fn spare_capacity_mut<T>(&mut self) -> &mut [T] {
let len = self.data.len();
let spare = self.data.capacity() - len;
let ptr = self.data.as_mut_ptr().add(len) as *mut T;
&mut slice::from_raw_parts_mut(ptr, spare)[..]
}
unsafe fn advance_mut(&mut self, cnt: usize) {
if self.data.len() + cnt > self.data.capacity() {
panic!("Buffer::advance_mut(cnt): would advance past end of Buffer");
}
self.data.set_len(self.data.len() + cnt);
}
pub fn poll_read<R>(&mut self, reader: Pin<&mut R>, cx: &mut Context<'_>) -> Poll<io::Result<usize>>
where R: AsyncRead + Unpin + ?Sized {
// Safety: ReadBuf::uninit takes a MaybeUninit.
let mut buf = ReadBuf::uninit(unsafe { self.spare_capacity_mut() });
futures::ready!(reader.poll_read(cx, &mut buf))?;
let len = buf.filled().len();
// Safety: len = buf.filled().len() is guaranteed to be correct.
unsafe {
self.advance_mut(len);
}
Poll::Ready(Ok(len))
}
//
// ===== End unsafe code =====
//
}
impl bytes::Buf for Buffer {
fn advance(&mut self, cnt: usize) {
// advance buffer read pointer.
self.rd_pos += cnt;
if self.rd_pos > self.len() {
// "It is recommended for implementations of advance to
// panic if cnt > self.remaining()"
panic!("read position advanced beyond end of buffer");
}
}
#[inline]
fn chunk(&self) -> &[u8] {
self.bytes()
}
#[inline]
fn remaining(&self) -> usize {
self.len() - self.rd_pos
}
}
impl Deref for Buffer {
type Target = [u8];
#[inline]
fn deref(&self) -> &[u8] {
self.bytes()
}
}
impl DerefMut for Buffer {
#[inline]
fn deref_mut(&mut self) -> &mut [u8] {
&mut self.data[self.start_offset + self.rd_pos..]
}
}
impl fmt::Write for Buffer {
fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> {
self.push_str(s);
Ok(())
}
}
impl From<&[u8]> for Buffer {
fn from(src: &[u8]) -> Self {
let mut buffer = Buffer::new();
buffer.extend_from_slice(src);
buffer
}
}
impl From<Vec<u8>> for Buffer {
fn from(src: Vec<u8>) -> Self {
Buffer {
start_offset: 0,
rd_pos: 0,
data: src,
}
}
}
impl From<&str> for Buffer {
fn from(src: &str) -> Self {
Buffer::from(src.as_bytes())
}
}
impl From<String> for Buffer {
fn from(src: String) -> Self {
Buffer::from(src.into_bytes())
}
}
impl From<bytes::Bytes> for Buffer {
fn from(src: bytes::Bytes) -> Self {
Buffer::from(&src[..])
}
}
impl Default for Buffer {
fn default() -> Self {
Buffer::new()
}
}
impl fmt::Debug for Buffer {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let cap = self.data.capacity();
let len = self.len();
f.debug_struct("Buffer")
.field("start_offset", &self.start_offset)
.field("rd_pos", &self.rd_pos)
.field("len", &len)
.field("capacity", &cap)
.field("data", &"[data]")
.finish()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[ | truncate | identifier_name |
buffer.rs | pub fn clear(&mut self) {
self.start_offset = 0;
self.rd_pos = 0;
self.data.truncate(0);
}
/// Truncate this buffer.
pub fn truncate(&mut self, size: usize) {
if size == 0 {
self.clear();
return;
}
if size > self.len() {
panic!("Buffer::truncate(size): size > self.len()");
}
if self.rd_pos > size {
self.rd_pos = size;
}
self.data.truncate(size + self.start_offset);
}
pub fn bytes(&self) -> &[u8] {
if self.rd_pos >= self.len() {
return &[][..];
}
&self.data[self.start_offset + self.rd_pos..]
}
/// Split this Buffer in two parts.
///
/// The first part remains in this buffer. The second part is
/// returned as a new Buffer.
pub fn split_off(&mut self, at: usize) -> Buffer {
if at > self.len() {
panic!("Buffer:split_off(size): size > self.len()");
}
if self.rd_pos > at {
self.rd_pos = at;
}
// If "header" < 32K and "body" >= 32K, use a start_offset
// for "body" and copy "header".
if self.start_offset == 0 && at < 32000 && self.len() - at >= 32000 {
let mut bnew = Buffer::with_capacity(at);
mem::swap(self, &mut bnew);
self.extend_from_slice(&bnew[0..at]);
bnew.start_offset = at;
return bnew;
}
let mut bnew = Buffer::new();
let bytes = self.bytes();
bnew.extend_from_slice(&bytes[at..]);
self.truncate(at);
bnew
}
/// Add data to this buffer.
#[inline]
pub fn extend_from_slice(&mut self, extend: &[u8]) {
self.reserve(extend.len());
self.data.extend_from_slice(extend);
}
#[inline]
fn round_size_up(size: usize) -> usize {
if size < 128 {
128
} else if size < 4096 {
4096
} else if size < 65536 {
65536
} else if size < 2097152 {
size.next_power_of_two()
} else {
(1 + size / 1048576) * 1048576
}
}
/// Make sure at least `size` bytes are available.
#[inline]
pub fn reserve(&mut self, size: usize) {
let end = self.data.len() + size;
if end < self.data.capacity() {
return;
}
self.data.reserve_exact(Self::round_size_up(end) - self.data.len());
}
/// total length of all data in this Buffer.
#[inline]
pub fn len(&self) -> usize {
self.data.len() - self.start_offset
}
/// Split this Buffer in two parts.
///
/// The second part remains in this buffer. The first part is
/// returned to the caller.
pub fn split_to(&mut self, size: usize) -> Buffer {
let mut other = self.split_off(size);
mem::swap(self, &mut other);
other
}
/// Write all data in this `Buffer` to a file.
pub fn write_all(&mut self, mut file: impl Write) -> io::Result<()> {
while self.rd_pos < self.len() {
let bytes = self.bytes();
let size = bytes.len();
file.write_all(bytes)?;
self.rd_pos += size;
}
Ok(())
}
/// Add text data to this buffer.
#[inline]
pub fn push_str(&mut self, s: &str) {
self.extend_from_slice(s.as_bytes());
}
/// Add a string to the buffer.
#[inline]
pub fn put_str(&mut self, s: impl AsRef<str>) {
self.extend_from_slice(s.as_ref().as_bytes());
}
/// Return a reference to this Buffer as an UTF-8 string.
#[inline]
pub fn as_utf8_str(&self) -> Result<&str, std::str::Utf8Error> {
std::str::from_utf8(self.bytes())
}
/// Convert this buffer into a Vec<u8>.
pub fn into_bytes(self) -> Vec<u8> {
if self.start_offset > 0 {
let mut v = Vec::with_capacity(Self::round_size_up(self.len()));
v.extend_from_slice(self.bytes());
v
} else {
self.data
}
}
//
// ===== Begin unsafe code =====
//
/// Read an exact number of bytes.
pub fn read_exact(&mut self, reader: &mut std::fs::File, len: usize) -> io::Result<()> {
self.reserve(len);
// Safety: it is safe for a std::fs::File to read into uninitialized memory.
unsafe {
let buf = self.spare_capacity_mut();
reader.read_exact(&mut buf[..len])?;
self.advance_mut(len);
}
Ok(())
}
unsafe fn spare_capacity_mut<T>(&mut self) -> &mut [T] {
let len = self.data.len();
let spare = self.data.capacity() - len;
let ptr = self.data.as_mut_ptr().add(len) as *mut T;
&mut slice::from_raw_parts_mut(ptr, spare)[..]
}
unsafe fn advance_mut(&mut self, cnt: usize) {
if self.data.len() + cnt > self.data.capacity() {
panic!("Buffer::advance_mut(cnt): would advance past end of Buffer");
}
self.data.set_len(self.data.len() + cnt);
}
pub fn poll_read<R>(&mut self, reader: Pin<&mut R>, cx: &mut Context<'_>) -> Poll<io::Result<usize>>
where R: AsyncRead + Unpin + ?Sized {
// Safety: ReadBuf::uninit takes a MaybeUninit.
let mut buf = ReadBuf::uninit(unsafe { self.spare_capacity_mut() });
futures::ready!(reader.poll_read(cx, &mut buf))?;
let len = buf.filled().len();
// Safety: len = buf.filled().len() is guaranteed to be correct.
unsafe {
self.advance_mut(len);
}
Poll::Ready(Ok(len))
}
//
// ===== End unsafe code =====
//
}
impl bytes::Buf for Buffer {
fn advance(&mut self, cnt: usize) {
// advance buffer read pointer.
self.rd_pos += cnt;
if self.rd_pos > self.len() |
}
#[inline]
fn chunk(&self) -> &[u8] {
self.bytes()
}
#[inline]
fn remaining(&self) -> usize {
self.len() - self.rd_pos
}
}
impl Deref for Buffer {
type Target = [u8];
#[inline]
fn deref(&self) -> &[u8] {
self.bytes()
}
}
impl DerefMut for Buffer {
#[inline]
fn deref_mut(&mut self) -> &mut [u8] {
&mut self.data[self.start_offset + self.rd_pos..]
}
}
impl fmt::Write for Buffer {
fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> {
self.push_str(s);
Ok(())
}
}
impl From<&[u8]> for Buffer {
fn from(src: &[u8]) -> Self {
let mut buffer = Buffer::new();
buffer.extend_from_slice(src);
buffer
}
}
impl From<Vec<u8>> for Buffer {
fn from(src: Vec<u8>) -> Self {
Buffer {
start_offset: 0,
rd_pos: 0,
data: src,
}
}
}
impl From<&str> for Buffer {
fn from(src: &str) -> Self {
Buffer::from(src.as_bytes())
}
}
impl From<String> for Buffer {
fn from(src: String) -> Self {
Buffer::from(src.into_bytes())
}
}
impl From<bytes::Bytes> for Buffer {
fn from(src: bytes::Bytes) -> Self {
Buffer::from(&src[..])
}
}
impl Default for Buffer {
fn default() -> Self {
Buffer::new()
}
}
impl fmt::Debug for Buffer {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let cap = self.data.capacity();
let len = self.len();
f.debug_struct("Buffer")
.field("start_offset", &self.start_offset)
.field("rd_pos", &self.rd_pos)
.field("len", &len)
.field("capacity", &cap)
.field("data", &"[data]")
.finish()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[ | {
// "It is recommended for implementations of advance to
// panic if cnt > self.remaining()"
panic!("read position advanced beyond end of buffer");
} | conditional_block |
udp.rs | 6,
}
struct Packet<B: ByteSlice> {
header: LayoutVerified<B, Header>,
data: B,
}
impl<B: ByteSlice> Packet<B> {
fn parse(bytes: B) -> Option<Packet<B>> {
let (header, data) = LayoutVerified::new_from_prefix(bytes)?;
Some(Self { header, data })
}
#[allow(dead_code)]
fn is_continuation(&self) -> bool {
self.header.flags & 0x001 != 0
}
fn packet_type(&self) -> Result<PacketType> {
match self.header.id {
0x00 => Ok(PacketType::Error),
0x01 => Ok(PacketType::Query),
0x02 => Ok(PacketType::Init),
0x03 => Ok(PacketType::Fastboot),
_ => bail!("Unknown packet type"),
}
}
}
pub struct UdpNetworkInterface {
maximum_size: u16,
sequence: Wrapping<u16>,
socket: UdpSocket,
read_task: Option<Pin<Box<dyn Future<Output = std::io::Result<(usize, Vec<u8>)>>>>>,
write_task: Option<Pin<Box<dyn Future<Output = std::io::Result<usize>>>>>,
}
impl UdpNetworkInterface {
fn create_fastboot_packets(&mut self, buf: &[u8]) -> Result<Vec<Vec<u8>>> {
// Leave four bytes for the header.
let header_size = std::mem::size_of::<Header>() as u16;
let max_chunk_size = self.maximum_size - header_size;
let mut seq = self.sequence;
let mut result = Vec::new();
let mut iter = buf.chunks(max_chunk_size.into()).peekable();
while let Some(chunk) = iter.next() {
let mut packet: Vec<u8> = Vec::with_capacity(chunk.len() + header_size as usize);
packet.push(0x03);
if iter.peek().is_none() {
packet.push(0x00);
} else {
packet.push(0x01); // Mark as continuation.
}
for _ in 0..2 {
packet.push(0);
}
BigEndian::write_u16(&mut packet[2..4], seq.0);
seq += Wrapping(1u16);
packet.extend_from_slice(chunk);
result.push(packet);
}
Ok(result)
}
}
impl AsyncRead for UdpNetworkInterface {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<std::io::Result<usize>> {
if self.read_task.is_none() {
let socket = self.socket.clone();
let seq = self.sequence;
self.read_task.replace(Box::pin(async move {
let (out_buf, sz) = send_to_device(&make_empty_fastboot_packet(seq.0), &socket)
.await
.map_err(|e| {
std::io::Error::new(
ErrorKind::Other,
format!("Could not send emtpy fastboot packet to device: {}", e),
)
})?;
let packet = Packet::parse(&out_buf[..sz]).ok_or(std::io::Error::new(
ErrorKind::Other,
format!("Could not parse response packet"),
))?;
let mut buf_inner = Vec::new();
match packet.packet_type() {
Ok(PacketType::Fastboot) => {
let size = packet.data.len();
buf_inner.extend(packet.data);
Ok((size, buf_inner))
}
_ => Err(std::io::Error::new(
ErrorKind::Other,
format!("Unexpected reply from device"),
)),
}
}));
}
if let Some(ref mut task) = self.read_task {
match task.as_mut().poll(cx) {
Poll::Ready(Ok((sz, out_buf))) => {
self.read_task = None;
for i in 0..sz {
buf[i] = out_buf[i];
}
self.sequence += Wrapping(1u16);
Poll::Ready(Ok(sz))
}
Poll::Ready(Err(e)) => {
self.read_task = None;
Poll::Ready(Err(e))
}
Poll::Pending => Poll::Pending,
}
} else {
// Really shouldn't get here
Poll::Ready(Err(std::io::Error::new(
ErrorKind::Other,
format!("Could not add async task to read"),
)))
}
}
}
impl AsyncWrite for UdpNetworkInterface {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<std::io::Result<usize>> {
if self.write_task.is_none() | format!("Could not parse response packet"),
))?;
match response.packet_type() {
Ok(PacketType::Fastboot) => (),
_ => {
return Err(std::io::Error::new(
ErrorKind::Other,
format!("Unexpected Response packet"),
))
}
}
}
Ok(packets.len())
}));
}
if let Some(ref mut task) = self.write_task {
match task.as_mut().poll(cx) {
Poll::Ready(Ok(s)) => {
self.write_task = None;
for _i in 0..s {
self.sequence += Wrapping(1u16);
}
Poll::Ready(Ok(buf.len()))
}
Poll::Ready(Err(e)) => {
self.write_task = None;
Poll::Ready(Err(e))
}
Poll::Pending => Poll::Pending,
}
} else {
// Really shouldn't get here
Poll::Ready(Err(std::io::Error::new(
ErrorKind::Other,
format!("Could not add async task to write"),
)))
}
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
unimplemented!();
}
fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
unimplemented!();
}
}
async fn send_to_device(buf: &[u8], socket: &UdpSocket) -> Result<([u8; 1500], usize)> {
// Try sending twice
socket.send(buf).await?;
match wait_for_response(socket).await {
Ok(r) => Ok(r),
Err(e) => {
tracing::error!("Could not get reply from Fastboot device - trying again: {}", e);
socket.send(buf).await?;
wait_for_response(socket)
.await
.or_else(|e| bail!("Did not get reply from Fastboot device: {}", e))
}
}
}
async fn wait_for_response(socket: &UdpSocket) -> Result<([u8; 1500], usize)> {
let mut buf = [0u8; 1500]; // Responses should never get this big.
timeout(REPLY_TIMEOUT, Box::pin(socket.recv(&mut buf[..])))
.await
.map_err(|_| anyhow!("Timed out waiting for reply"))?
.map_err(|e| anyhow!("Recv error: {}", e))
.map(|size| (buf, size))
}
async fn make_sender_socket(addr: SocketAddr) -> Result<UdpSocket> {
let socket: std::net::UdpSocket = match addr {
SocketAddr::V4(ref _saddr) => socket2::Socket::new(
socket2::Domain::IPV4,
socket2::Type::DGRAM,
Some(socket2::Protocol::UDP),
)
.context("construct datagram socket")?,
SocketAddr::V6(ref _saddr) => socket2::Socket::new(
socket2::Domain::IPV6,
socket2::Type::DGRAM,
Some(socket2::Protocol::UDP),
)
.context("construct datagram socket")?,
}
.into();
let result: UdpSocket = Async::new(socket)?.into();
result.connect(addr).await.context("connect to remote address")?;
Ok(result)
}
fn make_query_packet() -> [u8; 4] {
let mut packet = [0u8; 4];
packet[0 | {
// TODO(fxb/78975): unfortunately the Task requires the 'static lifetime so we have to
// copy the bytes and move them into the async block.
let packets = self.create_fastboot_packets(buf).map_err(|e| {
std::io::Error::new(
ErrorKind::Other,
format!("Could not create fastboot packets: {}", e),
)
})?;
let socket = self.socket.clone();
self.write_task.replace(Box::pin(async move {
for packet in &packets {
let (out_buf, sz) = send_to_device(&packet, &socket).await.map_err(|e| {
std::io::Error::new(
ErrorKind::Other,
format!("Could not send emtpy fastboot packet to device: {}", e),
)
})?;
let response = Packet::parse(&out_buf[..sz]).ok_or(std::io::Error::new(
ErrorKind::Other, | conditional_block |
udp.rs | 16,
}
struct Packet<B: ByteSlice> {
header: LayoutVerified<B, Header>,
data: B,
}
impl<B: ByteSlice> Packet<B> {
fn parse(bytes: B) -> Option<Packet<B>> {
let (header, data) = LayoutVerified::new_from_prefix(bytes)?;
Some(Self { header, data })
}
#[allow(dead_code)]
fn is_continuation(&self) -> bool {
self.header.flags & 0x001 != 0
}
fn packet_type(&self) -> Result<PacketType> {
match self.header.id {
0x00 => Ok(PacketType::Error),
0x01 => Ok(PacketType::Query),
0x02 => Ok(PacketType::Init),
0x03 => Ok(PacketType::Fastboot),
_ => bail!("Unknown packet type"),
}
}
}
pub struct UdpNetworkInterface {
maximum_size: u16,
sequence: Wrapping<u16>,
socket: UdpSocket,
read_task: Option<Pin<Box<dyn Future<Output = std::io::Result<(usize, Vec<u8>)>>>>>,
write_task: Option<Pin<Box<dyn Future<Output = std::io::Result<usize>>>>>,
}
impl UdpNetworkInterface {
fn create_fastboot_packets(&mut self, buf: &[u8]) -> Result<Vec<Vec<u8>>> {
// Leave four bytes for the header.
let header_size = std::mem::size_of::<Header>() as u16;
let max_chunk_size = self.maximum_size - header_size;
let mut seq = self.sequence;
let mut result = Vec::new();
let mut iter = buf.chunks(max_chunk_size.into()).peekable();
while let Some(chunk) = iter.next() {
let mut packet: Vec<u8> = Vec::with_capacity(chunk.len() + header_size as usize);
packet.push(0x03);
if iter.peek().is_none() {
packet.push(0x00);
} else {
packet.push(0x01); // Mark as continuation.
}
for _ in 0..2 {
packet.push(0);
}
BigEndian::write_u16(&mut packet[2..4], seq.0);
seq += Wrapping(1u16);
packet.extend_from_slice(chunk);
result.push(packet);
}
Ok(result)
}
}
impl AsyncRead for UdpNetworkInterface {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<std::io::Result<usize>> {
if self.read_task.is_none() {
let socket = self.socket.clone();
let seq = self.sequence;
self.read_task.replace(Box::pin(async move {
let (out_buf, sz) = send_to_device(&make_empty_fastboot_packet(seq.0), &socket)
.await
.map_err(|e| {
std::io::Error::new(
ErrorKind::Other,
format!("Could not send emtpy fastboot packet to device: {}", e),
)
})?;
let packet = Packet::parse(&out_buf[..sz]).ok_or(std::io::Error::new(
ErrorKind::Other,
format!("Could not parse response packet"),
))?;
let mut buf_inner = Vec::new();
match packet.packet_type() {
Ok(PacketType::Fastboot) => {
let size = packet.data.len();
buf_inner.extend(packet.data);
Ok((size, buf_inner))
}
_ => Err(std::io::Error::new(
ErrorKind::Other,
format!("Unexpected reply from device"),
)),
}
}));
}
if let Some(ref mut task) = self.read_task {
match task.as_mut().poll(cx) {
Poll::Ready(Ok((sz, out_buf))) => {
self.read_task = None;
for i in 0..sz {
buf[i] = out_buf[i];
}
self.sequence += Wrapping(1u16);
Poll::Ready(Ok(sz))
}
Poll::Ready(Err(e)) => {
self.read_task = None;
Poll::Ready(Err(e))
}
Poll::Pending => Poll::Pending,
}
} else {
// Really shouldn't get here
Poll::Ready(Err(std::io::Error::new(
ErrorKind::Other,
format!("Could not add async task to read"),
)))
}
}
}
impl AsyncWrite for UdpNetworkInterface {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<std::io::Result<usize>> {
if self.write_task.is_none() { | format!("Could not create fastboot packets: {}", e),
)
})?;
let socket = self.socket.clone();
self.write_task.replace(Box::pin(async move {
for packet in &packets {
let (out_buf, sz) = send_to_device(&packet, &socket).await.map_err(|e| {
std::io::Error::new(
ErrorKind::Other,
format!("Could not send emtpy fastboot packet to device: {}", e),
)
})?;
let response = Packet::parse(&out_buf[..sz]).ok_or(std::io::Error::new(
ErrorKind::Other,
format!("Could not parse response packet"),
))?;
match response.packet_type() {
Ok(PacketType::Fastboot) => (),
_ => {
return Err(std::io::Error::new(
ErrorKind::Other,
format!("Unexpected Response packet"),
))
}
}
}
Ok(packets.len())
}));
}
if let Some(ref mut task) = self.write_task {
match task.as_mut().poll(cx) {
Poll::Ready(Ok(s)) => {
self.write_task = None;
for _i in 0..s {
self.sequence += Wrapping(1u16);
}
Poll::Ready(Ok(buf.len()))
}
Poll::Ready(Err(e)) => {
self.write_task = None;
Poll::Ready(Err(e))
}
Poll::Pending => Poll::Pending,
}
} else {
// Really shouldn't get here
Poll::Ready(Err(std::io::Error::new(
ErrorKind::Other,
format!("Could not add async task to write"),
)))
}
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
unimplemented!();
}
fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
unimplemented!();
}
}
async fn send_to_device(buf: &[u8], socket: &UdpSocket) -> Result<([u8; 1500], usize)> {
// Try sending twice
socket.send(buf).await?;
match wait_for_response(socket).await {
Ok(r) => Ok(r),
Err(e) => {
tracing::error!("Could not get reply from Fastboot device - trying again: {}", e);
socket.send(buf).await?;
wait_for_response(socket)
.await
.or_else(|e| bail!("Did not get reply from Fastboot device: {}", e))
}
}
}
async fn wait_for_response(socket: &UdpSocket) -> Result<([u8; 1500], usize)> {
let mut buf = [0u8; 1500]; // Responses should never get this big.
timeout(REPLY_TIMEOUT, Box::pin(socket.recv(&mut buf[..])))
.await
.map_err(|_| anyhow!("Timed out waiting for reply"))?
.map_err(|e| anyhow!("Recv error: {}", e))
.map(|size| (buf, size))
}
async fn make_sender_socket(addr: SocketAddr) -> Result<UdpSocket> {
let socket: std::net::UdpSocket = match addr {
SocketAddr::V4(ref _saddr) => socket2::Socket::new(
socket2::Domain::IPV4,
socket2::Type::DGRAM,
Some(socket2::Protocol::UDP),
)
.context("construct datagram socket")?,
SocketAddr::V6(ref _saddr) => socket2::Socket::new(
socket2::Domain::IPV6,
socket2::Type::DGRAM,
Some(socket2::Protocol::UDP),
)
.context("construct datagram socket")?,
}
.into();
let result: UdpSocket = Async::new(socket)?.into();
result.connect(addr).await.context("connect to remote address")?;
Ok(result)
}
fn make_query_packet() -> [u8; 4] {
let mut packet = [0u8; 4];
packet[0 | // TODO(fxb/78975): unfortunately the Task requires the 'static lifetime so we have to
// copy the bytes and move them into the async block.
let packets = self.create_fastboot_packets(buf).map_err(|e| {
std::io::Error::new(
ErrorKind::Other, | random_line_split |
udp.rs | <B, Header>,
data: B,
}
impl<B: ByteSlice> Packet<B> {
fn parse(bytes: B) -> Option<Packet<B>> {
let (header, data) = LayoutVerified::new_from_prefix(bytes)?;
Some(Self { header, data })
}
#[allow(dead_code)]
fn is_continuation(&self) -> bool {
self.header.flags & 0x001 != 0
}
fn packet_type(&self) -> Result<PacketType> {
match self.header.id {
0x00 => Ok(PacketType::Error),
0x01 => Ok(PacketType::Query),
0x02 => Ok(PacketType::Init),
0x03 => Ok(PacketType::Fastboot),
_ => bail!("Unknown packet type"),
}
}
}
pub struct UdpNetworkInterface {
maximum_size: u16,
sequence: Wrapping<u16>,
socket: UdpSocket,
read_task: Option<Pin<Box<dyn Future<Output = std::io::Result<(usize, Vec<u8>)>>>>>,
write_task: Option<Pin<Box<dyn Future<Output = std::io::Result<usize>>>>>,
}
impl UdpNetworkInterface {
fn create_fastboot_packets(&mut self, buf: &[u8]) -> Result<Vec<Vec<u8>>> {
// Leave four bytes for the header.
let header_size = std::mem::size_of::<Header>() as u16;
let max_chunk_size = self.maximum_size - header_size;
let mut seq = self.sequence;
let mut result = Vec::new();
let mut iter = buf.chunks(max_chunk_size.into()).peekable();
while let Some(chunk) = iter.next() {
let mut packet: Vec<u8> = Vec::with_capacity(chunk.len() + header_size as usize);
packet.push(0x03);
if iter.peek().is_none() {
packet.push(0x00);
} else {
packet.push(0x01); // Mark as continuation.
}
for _ in 0..2 {
packet.push(0);
}
BigEndian::write_u16(&mut packet[2..4], seq.0);
seq += Wrapping(1u16);
packet.extend_from_slice(chunk);
result.push(packet);
}
Ok(result)
}
}
impl AsyncRead for UdpNetworkInterface {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<std::io::Result<usize>> {
if self.read_task.is_none() {
let socket = self.socket.clone();
let seq = self.sequence;
self.read_task.replace(Box::pin(async move {
let (out_buf, sz) = send_to_device(&make_empty_fastboot_packet(seq.0), &socket)
.await
.map_err(|e| {
std::io::Error::new(
ErrorKind::Other,
format!("Could not send emtpy fastboot packet to device: {}", e),
)
})?;
let packet = Packet::parse(&out_buf[..sz]).ok_or(std::io::Error::new(
ErrorKind::Other,
format!("Could not parse response packet"),
))?;
let mut buf_inner = Vec::new();
match packet.packet_type() {
Ok(PacketType::Fastboot) => {
let size = packet.data.len();
buf_inner.extend(packet.data);
Ok((size, buf_inner))
}
_ => Err(std::io::Error::new(
ErrorKind::Other,
format!("Unexpected reply from device"),
)),
}
}));
}
if let Some(ref mut task) = self.read_task {
match task.as_mut().poll(cx) {
Poll::Ready(Ok((sz, out_buf))) => {
self.read_task = None;
for i in 0..sz {
buf[i] = out_buf[i];
}
self.sequence += Wrapping(1u16);
Poll::Ready(Ok(sz))
}
Poll::Ready(Err(e)) => {
self.read_task = None;
Poll::Ready(Err(e))
}
Poll::Pending => Poll::Pending,
}
} else {
// Really shouldn't get here
Poll::Ready(Err(std::io::Error::new(
ErrorKind::Other,
format!("Could not add async task to read"),
)))
}
}
}
impl AsyncWrite for UdpNetworkInterface {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<std::io::Result<usize>> {
if self.write_task.is_none() {
// TODO(fxb/78975): unfortunately the Task requires the 'static lifetime so we have to
// copy the bytes and move them into the async block.
let packets = self.create_fastboot_packets(buf).map_err(|e| {
std::io::Error::new(
ErrorKind::Other,
format!("Could not create fastboot packets: {}", e),
)
})?;
let socket = self.socket.clone();
self.write_task.replace(Box::pin(async move {
for packet in &packets {
let (out_buf, sz) = send_to_device(&packet, &socket).await.map_err(|e| {
std::io::Error::new(
ErrorKind::Other,
format!("Could not send emtpy fastboot packet to device: {}", e),
)
})?;
let response = Packet::parse(&out_buf[..sz]).ok_or(std::io::Error::new(
ErrorKind::Other,
format!("Could not parse response packet"),
))?;
match response.packet_type() {
Ok(PacketType::Fastboot) => (),
_ => {
return Err(std::io::Error::new(
ErrorKind::Other,
format!("Unexpected Response packet"),
))
}
}
}
Ok(packets.len())
}));
}
if let Some(ref mut task) = self.write_task {
match task.as_mut().poll(cx) {
Poll::Ready(Ok(s)) => {
self.write_task = None;
for _i in 0..s {
self.sequence += Wrapping(1u16);
}
Poll::Ready(Ok(buf.len()))
}
Poll::Ready(Err(e)) => {
self.write_task = None;
Poll::Ready(Err(e))
}
Poll::Pending => Poll::Pending,
}
} else {
// Really shouldn't get here
Poll::Ready(Err(std::io::Error::new(
ErrorKind::Other,
format!("Could not add async task to write"),
)))
}
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
unimplemented!();
}
fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
unimplemented!();
}
}
async fn send_to_device(buf: &[u8], socket: &UdpSocket) -> Result<([u8; 1500], usize)> {
// Try sending twice
socket.send(buf).await?;
match wait_for_response(socket).await {
Ok(r) => Ok(r),
Err(e) => {
tracing::error!("Could not get reply from Fastboot device - trying again: {}", e);
socket.send(buf).await?;
wait_for_response(socket)
.await
.or_else(|e| bail!("Did not get reply from Fastboot device: {}", e))
}
}
}
async fn wait_for_response(socket: &UdpSocket) -> Result<([u8; 1500], usize)> {
let mut buf = [0u8; 1500]; // Responses should never get this big.
timeout(REPLY_TIMEOUT, Box::pin(socket.recv(&mut buf[..])))
.await
.map_err(|_| anyhow!("Timed out waiting for reply"))?
.map_err(|e| anyhow!("Recv error: {}", e))
.map(|size| (buf, size))
}
async fn make_sender_socket(addr: SocketAddr) -> Result<UdpSocket> {
let socket: std::net::UdpSocket = match addr {
SocketAddr::V4(ref _saddr) => socket2::Socket::new(
socket2::Domain::IPV4,
socket2::Type::DGRAM,
Some(socket2::Protocol::UDP),
)
.context("construct datagram socket")?,
SocketAddr::V6(ref _saddr) => socket2::Socket::new(
socket2::Domain::IPV6,
socket2::Type::DGRAM,
Some(socket2::Protocol::UDP),
)
.context("construct datagram socket")?,
}
.into();
let result: UdpSocket = Async::new(socket)?.into();
result.connect(addr).await.context("connect to remote address")?;
Ok(result)
}
fn make_query_packet() -> [u8; 4] {
let mut packet = [0u8; 4];
packet[0] = 0x01;
packet
}
fn | make_init_packet | identifier_name |
|
udp.rs | 6,
}
struct Packet<B: ByteSlice> {
header: LayoutVerified<B, Header>,
data: B,
}
impl<B: ByteSlice> Packet<B> {
fn parse(bytes: B) -> Option<Packet<B>> {
let (header, data) = LayoutVerified::new_from_prefix(bytes)?;
Some(Self { header, data })
}
#[allow(dead_code)]
fn is_continuation(&self) -> bool {
self.header.flags & 0x001 != 0
}
fn packet_type(&self) -> Result<PacketType> {
match self.header.id {
0x00 => Ok(PacketType::Error),
0x01 => Ok(PacketType::Query),
0x02 => Ok(PacketType::Init),
0x03 => Ok(PacketType::Fastboot),
_ => bail!("Unknown packet type"),
}
}
}
pub struct UdpNetworkInterface {
maximum_size: u16,
sequence: Wrapping<u16>,
socket: UdpSocket,
read_task: Option<Pin<Box<dyn Future<Output = std::io::Result<(usize, Vec<u8>)>>>>>,
write_task: Option<Pin<Box<dyn Future<Output = std::io::Result<usize>>>>>,
}
impl UdpNetworkInterface {
fn create_fastboot_packets(&mut self, buf: &[u8]) -> Result<Vec<Vec<u8>>> {
// Leave four bytes for the header.
let header_size = std::mem::size_of::<Header>() as u16;
let max_chunk_size = self.maximum_size - header_size;
let mut seq = self.sequence;
let mut result = Vec::new();
let mut iter = buf.chunks(max_chunk_size.into()).peekable();
while let Some(chunk) = iter.next() {
let mut packet: Vec<u8> = Vec::with_capacity(chunk.len() + header_size as usize);
packet.push(0x03);
if iter.peek().is_none() {
packet.push(0x00);
} else {
packet.push(0x01); // Mark as continuation.
}
for _ in 0..2 {
packet.push(0);
}
BigEndian::write_u16(&mut packet[2..4], seq.0);
seq += Wrapping(1u16);
packet.extend_from_slice(chunk);
result.push(packet);
}
Ok(result)
}
}
impl AsyncRead for UdpNetworkInterface {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<std::io::Result<usize>> {
if self.read_task.is_none() {
let socket = self.socket.clone();
let seq = self.sequence;
self.read_task.replace(Box::pin(async move {
let (out_buf, sz) = send_to_device(&make_empty_fastboot_packet(seq.0), &socket)
.await
.map_err(|e| {
std::io::Error::new(
ErrorKind::Other,
format!("Could not send emtpy fastboot packet to device: {}", e),
)
})?;
let packet = Packet::parse(&out_buf[..sz]).ok_or(std::io::Error::new(
ErrorKind::Other,
format!("Could not parse response packet"),
))?;
let mut buf_inner = Vec::new();
match packet.packet_type() {
Ok(PacketType::Fastboot) => {
let size = packet.data.len();
buf_inner.extend(packet.data);
Ok((size, buf_inner))
}
_ => Err(std::io::Error::new(
ErrorKind::Other,
format!("Unexpected reply from device"),
)),
}
}));
}
if let Some(ref mut task) = self.read_task {
match task.as_mut().poll(cx) {
Poll::Ready(Ok((sz, out_buf))) => {
self.read_task = None;
for i in 0..sz {
buf[i] = out_buf[i];
}
self.sequence += Wrapping(1u16);
Poll::Ready(Ok(sz))
}
Poll::Ready(Err(e)) => {
self.read_task = None;
Poll::Ready(Err(e))
}
Poll::Pending => Poll::Pending,
}
} else {
// Really shouldn't get here
Poll::Ready(Err(std::io::Error::new(
ErrorKind::Other,
format!("Could not add async task to read"),
)))
}
}
}
impl AsyncWrite for UdpNetworkInterface {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<std::io::Result<usize>> {
if self.write_task.is_none() {
// TODO(fxb/78975): unfortunately the Task requires the 'static lifetime so we have to
// copy the bytes and move them into the async block.
let packets = self.create_fastboot_packets(buf).map_err(|e| {
std::io::Error::new(
ErrorKind::Other,
format!("Could not create fastboot packets: {}", e),
)
})?;
let socket = self.socket.clone();
self.write_task.replace(Box::pin(async move {
for packet in &packets {
let (out_buf, sz) = send_to_device(&packet, &socket).await.map_err(|e| {
std::io::Error::new(
ErrorKind::Other,
format!("Could not send emtpy fastboot packet to device: {}", e),
)
})?;
let response = Packet::parse(&out_buf[..sz]).ok_or(std::io::Error::new(
ErrorKind::Other,
format!("Could not parse response packet"),
))?;
match response.packet_type() {
Ok(PacketType::Fastboot) => (),
_ => {
return Err(std::io::Error::new(
ErrorKind::Other,
format!("Unexpected Response packet"),
))
}
}
}
Ok(packets.len())
}));
}
if let Some(ref mut task) = self.write_task {
match task.as_mut().poll(cx) {
Poll::Ready(Ok(s)) => {
self.write_task = None;
for _i in 0..s {
self.sequence += Wrapping(1u16);
}
Poll::Ready(Ok(buf.len()))
}
Poll::Ready(Err(e)) => {
self.write_task = None;
Poll::Ready(Err(e))
}
Poll::Pending => Poll::Pending,
}
} else {
// Really shouldn't get here
Poll::Ready(Err(std::io::Error::new(
ErrorKind::Other,
format!("Could not add async task to write"),
)))
}
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
unimplemented!();
}
fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
unimplemented!();
}
}
async fn send_to_device(buf: &[u8], socket: &UdpSocket) -> Result<([u8; 1500], usize)> {
// Try sending twice
socket.send(buf).await?;
match wait_for_response(socket).await {
Ok(r) => Ok(r),
Err(e) => {
tracing::error!("Could not get reply from Fastboot device - trying again: {}", e);
socket.send(buf).await?;
wait_for_response(socket)
.await
.or_else(|e| bail!("Did not get reply from Fastboot device: {}", e))
}
}
}
async fn wait_for_response(socket: &UdpSocket) -> Result<([u8; 1500], usize)> |
async fn make_sender_socket(addr: SocketAddr) -> Result<UdpSocket> {
let socket: std::net::UdpSocket = match addr {
SocketAddr::V4(ref _saddr) => socket2::Socket::new(
socket2::Domain::IPV4,
socket2::Type::DGRAM,
Some(socket2::Protocol::UDP),
)
.context("construct datagram socket")?,
SocketAddr::V6(ref _saddr) => socket2::Socket::new(
socket2::Domain::IPV6,
socket2::Type::DGRAM,
Some(socket2::Protocol::UDP),
)
.context("construct datagram socket")?,
}
.into();
let result: UdpSocket = Async::new(socket)?.into();
result.connect(addr).await.context("connect to remote address")?;
Ok(result)
}
fn make_query_packet() -> [u8; 4] {
let mut packet = [0u8; 4];
packet[ | {
let mut buf = [0u8; 1500]; // Responses should never get this big.
timeout(REPLY_TIMEOUT, Box::pin(socket.recv(&mut buf[..])))
.await
.map_err(|_| anyhow!("Timed out waiting for reply"))?
.map_err(|e| anyhow!("Recv error: {}", e))
.map(|size| (buf, size))
} | identifier_body |
plate.ts | ertia: IMatrix3Array;
invMomentOfInertia: IMatrix3Array;
center: null | IVec3Array;
hotSpot: {
position: IVec3Array;
force: IVec3Array;
};
fields: ISerializedField[];
adjacentFields: ISerializedField[];
subplate: ISerializedSubplate;
plateGroup: ISerializedPlateGroup | null;
}
export default class Plate extends PlateBase<Field> {
id: number;
density: number;
hue: number;
adjacentFields: Map<number, Field>;
center: null | THREE.Vector3;
invMomentOfInertia: THREE.Matrix3;
momentOfInertia: THREE.Matrix3;
mass: number;
subplate: Subplate;
quaternion: THREE.Quaternion;
angularVelocity: THREE.Vector3;
fields: Map<number, Field>;
isSubplate = false;
plateGroup: PlateGroup | null;
hotSpot: {
position: THREE.Vector3;
force: THREE.Vector3;
};
constructor({ id, density, hue }: IOptions) {
super();
this.id = id;
// Decides whether plate goes under or above another plate while subducting (ocean-ocean).
this.density = density || 0;
// Base color / hue of the plate used to visually identify it.
this.hue = hue || 0;
this.quaternion = new THREE.Quaternion();
this.angularVelocity = new THREE.Vector3();
this.fields = new Map();
this.adjacentFields = new Map();
// Physics properties:
this.mass = 0;
this.invMomentOfInertia = new THREE.Matrix3();
this.momentOfInertia = new THREE.Matrix3();
this.center = null;
// Torque / force that is pushing plate. It might be constant or decrease with time ().
this.hotSpot = { position: new THREE.Vector3(0, 0, 0), force: new THREE.Vector3(0, 0, 0) };
// Subplate is a container for some additional fields attached to this plate.
// At this point mostly fields that were subducting under and were detached from the original plate.
this.subplate = new Subplate(this);
this.plateGroup = null;
}
serialize(): ISerializedPlate {
return {
id: this.id,
quaternion: this.quaternion.toArray(),
angularVelocity: this.angularVelocity.toArray(),
hue: this.hue,
density: this.density,
mass: this.mass,
momentOfInertia: this.momentOfInertia.toArray(),
invMomentOfInertia: this.invMomentOfInertia.toArray(),
center: this.center?.toArray() || null,
hotSpot: {
force: this.hotSpot.force.toArray(),
position: this.hotSpot.position.toArray()
},
fields: Array.from(this.fields.values()).map(field => field.serialize()),
adjacentFields: Array.from(this.adjacentFields.values()).map(field => field.serialize()),
subplate: this.subplate.serialize(),
plateGroup: this.plateGroup?.serialize() || null
};
}
static deserialize(props: ISerializedPlate) {
const plate = new Plate({ id: props.id });
plate.quaternion = (new THREE.Quaternion()).fromArray(props.quaternion);
plate.angularVelocity = (new THREE.Vector3()).fromArray(props.angularVelocity);
plate.hue = props.hue;
plate.density = props.density;
plate.mass = props.mass;
plate.momentOfInertia = (new THREE.Matrix3()).fromArray(props.momentOfInertia);
plate.invMomentOfInertia = (new THREE.Matrix3()).fromArray(props.invMomentOfInertia);
plate.center = props.center && (new THREE.Vector3()).fromArray(props.center);
plate.hotSpot.force = (new THREE.Vector3()).fromArray(props.hotSpot.force);
plate.hotSpot.position = (new THREE.Vector3()).fromArray(props.hotSpot.position);
props.fields.forEach((serializedField: ISerializedField) => {
const field = Field.deserialize(serializedField, plate);
plate.fields.set(field.id, field);
});
props.adjacentFields.forEach((serializedField: ISerializedField) => {
const field = Field.deserialize(serializedField, plate);
plate.adjacentFields.set(field.id, field);
});
plate.subplate = props.subplate && Subplate.deserialize(props.subplate, plate);
plate.plateGroup = null; // this needs to be deserialized by parent (model) that has access to all the plates
return plate;
}
// It depends on current angular velocity and velocities of other, colliding plates.
// Note that this is pretty expensive to calculate, so if used much, the current value should be cached.
get totalTorque() {
const totalTorque = this.hotSpot.position.clone().cross(this.hotSpot.force);
this.fields.forEach((field: Field) => {
totalTorque.add(field.torque);
});
return totalTorque;
}
get angularAcceleration() {
if (this.plateGroup) {
return this.plateGroup.angularAcceleration;
}
return this.totalTorque.applyMatrix3(this.invMomentOfInertia);
}
mergedWith(anotherPlate: Plate) {
if (!this.plateGroup || !anotherPlate.plateGroup) {
return false;
}
return this.plateGroup === anotherPlate.plateGroup;
}
updateCenter() {
const safeFields: Record<string, Field> = {};
const safeSum = new THREE.Vector3();
let safeArea = 0;
this.fields.forEach((field: Field) => {
if (!field.subduction) {
let safe = true;
// Some subducting fields do not get marked because they move so slowly
// Ignore fields adjacent to subducting fields just to be safe
field.forEachNeighbor((neighbor: Field) => {
if (neighbor.subduction) {
safe = false;
}
});
if (safe) {
safeFields[field.id] = field;
safeSum.add(field.absolutePos);
safeArea += field.area;
}
}
});
if (safeArea < MIN_PLATE_SIZE) {
// If the visible area of a plate is too small, don't bother labelling
this.center = new THREE.Vector3();
} else {
// Otherwise, use the field nearest the center
const geographicCenter = safeSum.normalize();
let closestPoint = new THREE.Vector3(0, 0, 0);
let minDist = Number.MAX_VALUE;
for (const id in safeFields) {
const field = safeFields[id];
const dist = field.absolutePos.distanceTo(geographicCenter);
if (dist < minDist) {
closestPoint = field.absolutePos;
minDist = dist;
}
}
this.center = closestPoint;
}
}
updateInertiaTensor() {
this.mass = 0;
let ixx = 0;
let iyy = 0;
let izz = 0;
let ixy = 0;
let ixz = 0;
let iyz = 0;
this.fields.forEach((field: Field) => {
const mass = field.mass;
const p = field.absolutePos;
ixx += mass * (p.y * p.y + p.z * p.z);
iyy += mass * (p.x * p.x + p.z * p.z);
izz += mass * (p.x * p.x + p.y * p.y);
ixy -= mass * p.x * p.y;
ixz -= mass * p.x * p.z;
iyz -= mass * p.y * p.z;
this.mass += mass;
});
this.momentOfInertia = new THREE.Matrix3();
this.momentOfInertia.set(ixx, ixy, ixz, ixy, iyy, iyz, ixz, iyz, izz);
this.invMomentOfInertia = new THREE.Matrix3();
this.invMomentOfInertia.copy(this.momentOfInertia).invert();
}
updateHotSpot(timestep: number) {
const len = this.hotSpot.force.length();
if (len > 0) {
this.hotSpot.force.setLength(Math.max(0, len - timestep * HOT_SPOT_TORQUE_DECREASE));
}
}
setHotSpot(position: THREE.Vector3, force: THREE.Vector3) {
this.hotSpot = { position, force };
}
setDensity(density: number) {
this.density = density;
}
removeUnnecessaryFields() {
this.fields.forEach((f: Field) => {
if (!f.alive) {
this.deleteField(f.id);
}
});
}
addField(props: Omit<IFieldOptions, "plate">) |
addFieldAt(props: Omit<IFieldOptions, "id" | "plate">, absolutePos: THREE.Vector3) {
const localPos = this.localPosition(absolutePos);
const id = getGrid().nearestFieldId(localPos);
if (!this.fields.has(id)) {
return this.addField({ ...props, id });
}
}
addExistingField(field: Field) {
const id = field.id;
field.plate = this;
| {
const field = new Field({ ...props, plate: this });
this.addExistingField(field);
return field;
} | identifier_body |
plate.ts | ertia: IMatrix3Array;
invMomentOfInertia: IMatrix3Array;
center: null | IVec3Array;
hotSpot: {
position: IVec3Array;
force: IVec3Array;
};
fields: ISerializedField[];
adjacentFields: ISerializedField[];
subplate: ISerializedSubplate;
plateGroup: ISerializedPlateGroup | null;
}
export default class Plate extends PlateBase<Field> {
id: number;
density: number;
hue: number;
adjacentFields: Map<number, Field>;
center: null | THREE.Vector3;
invMomentOfInertia: THREE.Matrix3;
momentOfInertia: THREE.Matrix3;
mass: number;
subplate: Subplate;
quaternion: THREE.Quaternion;
angularVelocity: THREE.Vector3;
fields: Map<number, Field>;
isSubplate = false;
plateGroup: PlateGroup | null;
hotSpot: {
position: THREE.Vector3;
force: THREE.Vector3;
};
constructor({ id, density, hue }: IOptions) {
super();
this.id = id;
// Decides whether plate goes under or above another plate while subducting (ocean-ocean).
this.density = density || 0;
// Base color / hue of the plate used to visually identify it.
this.hue = hue || 0;
this.quaternion = new THREE.Quaternion();
this.angularVelocity = new THREE.Vector3();
this.fields = new Map();
this.adjacentFields = new Map();
// Physics properties:
this.mass = 0;
this.invMomentOfInertia = new THREE.Matrix3();
this.momentOfInertia = new THREE.Matrix3();
this.center = null;
// Torque / force that is pushing plate. It might be constant or decrease with time ().
this.hotSpot = { position: new THREE.Vector3(0, 0, 0), force: new THREE.Vector3(0, 0, 0) };
// Subplate is a container for some additional fields attached to this plate.
// At this point mostly fields that were subducting under and were detached from the original plate.
this.subplate = new Subplate(this);
this.plateGroup = null;
}
serialize(): ISerializedPlate {
return {
id: this.id,
quaternion: this.quaternion.toArray(),
angularVelocity: this.angularVelocity.toArray(),
hue: this.hue,
density: this.density,
mass: this.mass,
momentOfInertia: this.momentOfInertia.toArray(),
invMomentOfInertia: this.invMomentOfInertia.toArray(),
center: this.center?.toArray() || null,
hotSpot: {
force: this.hotSpot.force.toArray(),
position: this.hotSpot.position.toArray()
},
fields: Array.from(this.fields.values()).map(field => field.serialize()),
adjacentFields: Array.from(this.adjacentFields.values()).map(field => field.serialize()),
subplate: this.subplate.serialize(),
plateGroup: this.plateGroup?.serialize() || null
};
}
static deserialize(props: ISerializedPlate) {
const plate = new Plate({ id: props.id });
plate.quaternion = (new THREE.Quaternion()).fromArray(props.quaternion);
plate.angularVelocity = (new THREE.Vector3()).fromArray(props.angularVelocity);
plate.hue = props.hue;
plate.density = props.density;
plate.mass = props.mass;
plate.momentOfInertia = (new THREE.Matrix3()).fromArray(props.momentOfInertia);
plate.invMomentOfInertia = (new THREE.Matrix3()).fromArray(props.invMomentOfInertia);
plate.center = props.center && (new THREE.Vector3()).fromArray(props.center);
plate.hotSpot.force = (new THREE.Vector3()).fromArray(props.hotSpot.force);
plate.hotSpot.position = (new THREE.Vector3()).fromArray(props.hotSpot.position);
props.fields.forEach((serializedField: ISerializedField) => {
const field = Field.deserialize(serializedField, plate);
plate.fields.set(field.id, field);
});
props.adjacentFields.forEach((serializedField: ISerializedField) => {
const field = Field.deserialize(serializedField, plate);
plate.adjacentFields.set(field.id, field);
});
plate.subplate = props.subplate && Subplate.deserialize(props.subplate, plate);
plate.plateGroup = null; // this needs to be deserialized by parent (model) that has access to all the plates
return plate;
}
// It depends on current angular velocity and velocities of other, colliding plates.
// Note that this is pretty expensive to calculate, so if used much, the current value should be cached.
get totalTorque() {
const totalTorque = this.hotSpot.position.clone().cross(this.hotSpot.force);
this.fields.forEach((field: Field) => {
totalTorque.add(field.torque);
});
return totalTorque;
}
get angularAcceleration() {
if (this.plateGroup) {
return this.plateGroup.angularAcceleration;
}
return this.totalTorque.applyMatrix3(this.invMomentOfInertia);
}
mergedWith(anotherPlate: Plate) {
if (!this.plateGroup || !anotherPlate.plateGroup) {
return false;
}
return this.plateGroup === anotherPlate.plateGroup;
}
updateCenter() {
const safeFields: Record<string, Field> = {};
const safeSum = new THREE.Vector3();
let safeArea = 0;
this.fields.forEach((field: Field) => {
if (!field.subduction) {
let safe = true;
// Some subducting fields do not get marked because they move so slowly
// Ignore fields adjacent to subducting fields just to be safe
field.forEachNeighbor((neighbor: Field) => {
if (neighbor.subduction) {
safe = false;
}
});
if (safe) {
safeFields[field.id] = field;
safeSum.add(field.absolutePos);
safeArea += field.area;
}
}
});
if (safeArea < MIN_PLATE_SIZE) {
// If the visible area of a plate is too small, don't bother labelling
this.center = new THREE.Vector3();
} else {
// Otherwise, use the field nearest the center
const geographicCenter = safeSum.normalize();
let closestPoint = new THREE.Vector3(0, 0, 0);
let minDist = Number.MAX_VALUE;
for (const id in safeFields) {
const field = safeFields[id];
const dist = field.absolutePos.distanceTo(geographicCenter);
if (dist < minDist) {
closestPoint = field.absolutePos;
minDist = dist;
}
}
this.center = closestPoint;
}
}
updateInertiaTensor() {
this.mass = 0;
let ixx = 0;
let iyy = 0;
let izz = 0;
let ixy = 0;
let ixz = 0;
let iyz = 0;
this.fields.forEach((field: Field) => {
const mass = field.mass;
const p = field.absolutePos;
ixx += mass * (p.y * p.y + p.z * p.z);
iyy += mass * (p.x * p.x + p.z * p.z);
izz += mass * (p.x * p.x + p.y * p.y);
ixy -= mass * p.x * p.y;
ixz -= mass * p.x * p.z;
iyz -= mass * p.y * p.z;
this.mass += mass;
});
this.momentOfInertia = new THREE.Matrix3();
this.momentOfInertia.set(ixx, ixy, ixz, ixy, iyy, iyz, ixz, iyz, izz);
this.invMomentOfInertia = new THREE.Matrix3();
this.invMomentOfInertia.copy(this.momentOfInertia).invert();
}
updateHotSpot(timestep: number) { | }
setHotSpot(position: THREE.Vector3, force: THREE.Vector3) {
this.hotSpot = { position, force };
}
setDensity(density: number) {
this.density = density;
}
removeUnnecessaryFields() {
this.fields.forEach((f: Field) => {
if (!f.alive) {
this.deleteField(f.id);
}
});
}
addField(props: Omit<IFieldOptions, "plate">) {
const field = new Field({ ...props, plate: this });
this.addExistingField(field);
return field;
}
addFieldAt(props: Omit<IFieldOptions, "id" | "plate">, absolutePos: THREE.Vector3) {
const localPos = this.localPosition(absolutePos);
const id = getGrid().nearestFieldId(localPos);
if (!this.fields.has(id)) {
return this.addField({ ...props, id });
}
}
addExistingField(field: Field) {
const id = field.id;
field.plate = this;
this | const len = this.hotSpot.force.length();
if (len > 0) {
this.hotSpot.force.setLength(Math.max(0, len - timestep * HOT_SPOT_TORQUE_DECREASE));
} | random_line_split |
plate.ts | Field>;
center: null | THREE.Vector3;
invMomentOfInertia: THREE.Matrix3;
momentOfInertia: THREE.Matrix3;
mass: number;
subplate: Subplate;
quaternion: THREE.Quaternion;
angularVelocity: THREE.Vector3;
fields: Map<number, Field>;
isSubplate = false;
plateGroup: PlateGroup | null;
hotSpot: {
position: THREE.Vector3;
force: THREE.Vector3;
};
constructor({ id, density, hue }: IOptions) {
super();
this.id = id;
// Decides whether plate goes under or above another plate while subducting (ocean-ocean).
this.density = density || 0;
// Base color / hue of the plate used to visually identify it.
this.hue = hue || 0;
this.quaternion = new THREE.Quaternion();
this.angularVelocity = new THREE.Vector3();
this.fields = new Map();
this.adjacentFields = new Map();
// Physics properties:
this.mass = 0;
this.invMomentOfInertia = new THREE.Matrix3();
this.momentOfInertia = new THREE.Matrix3();
this.center = null;
// Torque / force that is pushing plate. It might be constant or decrease with time ().
this.hotSpot = { position: new THREE.Vector3(0, 0, 0), force: new THREE.Vector3(0, 0, 0) };
// Subplate is a container for some additional fields attached to this plate.
// At this point mostly fields that were subducting under and were detached from the original plate.
this.subplate = new Subplate(this);
this.plateGroup = null;
}
serialize(): ISerializedPlate {
return {
id: this.id,
quaternion: this.quaternion.toArray(),
angularVelocity: this.angularVelocity.toArray(),
hue: this.hue,
density: this.density,
mass: this.mass,
momentOfInertia: this.momentOfInertia.toArray(),
invMomentOfInertia: this.invMomentOfInertia.toArray(),
center: this.center?.toArray() || null,
hotSpot: {
force: this.hotSpot.force.toArray(),
position: this.hotSpot.position.toArray()
},
fields: Array.from(this.fields.values()).map(field => field.serialize()),
adjacentFields: Array.from(this.adjacentFields.values()).map(field => field.serialize()),
subplate: this.subplate.serialize(),
plateGroup: this.plateGroup?.serialize() || null
};
}
static deserialize(props: ISerializedPlate) {
const plate = new Plate({ id: props.id });
plate.quaternion = (new THREE.Quaternion()).fromArray(props.quaternion);
plate.angularVelocity = (new THREE.Vector3()).fromArray(props.angularVelocity);
plate.hue = props.hue;
plate.density = props.density;
plate.mass = props.mass;
plate.momentOfInertia = (new THREE.Matrix3()).fromArray(props.momentOfInertia);
plate.invMomentOfInertia = (new THREE.Matrix3()).fromArray(props.invMomentOfInertia);
plate.center = props.center && (new THREE.Vector3()).fromArray(props.center);
plate.hotSpot.force = (new THREE.Vector3()).fromArray(props.hotSpot.force);
plate.hotSpot.position = (new THREE.Vector3()).fromArray(props.hotSpot.position);
props.fields.forEach((serializedField: ISerializedField) => {
const field = Field.deserialize(serializedField, plate);
plate.fields.set(field.id, field);
});
props.adjacentFields.forEach((serializedField: ISerializedField) => {
const field = Field.deserialize(serializedField, plate);
plate.adjacentFields.set(field.id, field);
});
plate.subplate = props.subplate && Subplate.deserialize(props.subplate, plate);
plate.plateGroup = null; // this needs to be deserialized by parent (model) that has access to all the plates
return plate;
}
// It depends on current angular velocity and velocities of other, colliding plates.
// Note that this is pretty expensive to calculate, so if used much, the current value should be cached.
get totalTorque() {
const totalTorque = this.hotSpot.position.clone().cross(this.hotSpot.force);
this.fields.forEach((field: Field) => {
totalTorque.add(field.torque);
});
return totalTorque;
}
get angularAcceleration() {
if (this.plateGroup) {
return this.plateGroup.angularAcceleration;
}
return this.totalTorque.applyMatrix3(this.invMomentOfInertia);
}
mergedWith(anotherPlate: Plate) {
if (!this.plateGroup || !anotherPlate.plateGroup) {
return false;
}
return this.plateGroup === anotherPlate.plateGroup;
}
updateCenter() {
const safeFields: Record<string, Field> = {};
const safeSum = new THREE.Vector3();
let safeArea = 0;
this.fields.forEach((field: Field) => {
if (!field.subduction) {
let safe = true;
// Some subducting fields do not get marked because they move so slowly
// Ignore fields adjacent to subducting fields just to be safe
field.forEachNeighbor((neighbor: Field) => {
if (neighbor.subduction) {
safe = false;
}
});
if (safe) {
safeFields[field.id] = field;
safeSum.add(field.absolutePos);
safeArea += field.area;
}
}
});
if (safeArea < MIN_PLATE_SIZE) {
// If the visible area of a plate is too small, don't bother labelling
this.center = new THREE.Vector3();
} else {
// Otherwise, use the field nearest the center
const geographicCenter = safeSum.normalize();
let closestPoint = new THREE.Vector3(0, 0, 0);
let minDist = Number.MAX_VALUE;
for (const id in safeFields) {
const field = safeFields[id];
const dist = field.absolutePos.distanceTo(geographicCenter);
if (dist < minDist) {
closestPoint = field.absolutePos;
minDist = dist;
}
}
this.center = closestPoint;
}
}
updateInertiaTensor() {
this.mass = 0;
let ixx = 0;
let iyy = 0;
let izz = 0;
let ixy = 0;
let ixz = 0;
let iyz = 0;
this.fields.forEach((field: Field) => {
const mass = field.mass;
const p = field.absolutePos;
ixx += mass * (p.y * p.y + p.z * p.z);
iyy += mass * (p.x * p.x + p.z * p.z);
izz += mass * (p.x * p.x + p.y * p.y);
ixy -= mass * p.x * p.y;
ixz -= mass * p.x * p.z;
iyz -= mass * p.y * p.z;
this.mass += mass;
});
this.momentOfInertia = new THREE.Matrix3();
this.momentOfInertia.set(ixx, ixy, ixz, ixy, iyy, iyz, ixz, iyz, izz);
this.invMomentOfInertia = new THREE.Matrix3();
this.invMomentOfInertia.copy(this.momentOfInertia).invert();
}
updateHotSpot(timestep: number) {
const len = this.hotSpot.force.length();
if (len > 0) {
this.hotSpot.force.setLength(Math.max(0, len - timestep * HOT_SPOT_TORQUE_DECREASE));
}
}
setHotSpot(position: THREE.Vector3, force: THREE.Vector3) {
this.hotSpot = { position, force };
}
setDensity(density: number) {
this.density = density;
}
removeUnnecessaryFields() {
this.fields.forEach((f: Field) => {
if (!f.alive) {
this.deleteField(f.id);
}
});
}
addField(props: Omit<IFieldOptions, "plate">) {
const field = new Field({ ...props, plate: this });
this.addExistingField(field);
return field;
}
addFieldAt(props: Omit<IFieldOptions, "id" | "plate">, absolutePos: THREE.Vector3) {
const localPos = this.localPosition(absolutePos);
const id = getGrid().nearestFieldId(localPos);
if (!this.fields.has(id)) {
return this.addField({ ...props, id });
}
}
addExistingField(field: Field) {
const id = field.id;
field.plate = this;
this.fields.set(id, field);
if (this.adjacentFields.has(id)) {
this.adjacentFields.delete(id);
}
field.adjacentFields.forEach((adjFieldId: number) => {
if (!this.fields.has(adjFieldId)) {
this.addAdjacentField(adjFieldId);
} else {
const adjField = this.fields.get(adjFieldId);
if (adjField) {
adjField.boundary = adjField.isBoundary();
}
}
});
field.boundary = field.isBoundary();
return field;
}
| deleteField | identifier_name |
|
plate.ts | OfInertia.toArray(),
center: this.center?.toArray() || null,
hotSpot: {
force: this.hotSpot.force.toArray(),
position: this.hotSpot.position.toArray()
},
fields: Array.from(this.fields.values()).map(field => field.serialize()),
adjacentFields: Array.from(this.adjacentFields.values()).map(field => field.serialize()),
subplate: this.subplate.serialize(),
plateGroup: this.plateGroup?.serialize() || null
};
}
static deserialize(props: ISerializedPlate) {
const plate = new Plate({ id: props.id });
plate.quaternion = (new THREE.Quaternion()).fromArray(props.quaternion);
plate.angularVelocity = (new THREE.Vector3()).fromArray(props.angularVelocity);
plate.hue = props.hue;
plate.density = props.density;
plate.mass = props.mass;
plate.momentOfInertia = (new THREE.Matrix3()).fromArray(props.momentOfInertia);
plate.invMomentOfInertia = (new THREE.Matrix3()).fromArray(props.invMomentOfInertia);
plate.center = props.center && (new THREE.Vector3()).fromArray(props.center);
plate.hotSpot.force = (new THREE.Vector3()).fromArray(props.hotSpot.force);
plate.hotSpot.position = (new THREE.Vector3()).fromArray(props.hotSpot.position);
props.fields.forEach((serializedField: ISerializedField) => {
const field = Field.deserialize(serializedField, plate);
plate.fields.set(field.id, field);
});
props.adjacentFields.forEach((serializedField: ISerializedField) => {
const field = Field.deserialize(serializedField, plate);
plate.adjacentFields.set(field.id, field);
});
plate.subplate = props.subplate && Subplate.deserialize(props.subplate, plate);
plate.plateGroup = null; // this needs to be deserialized by parent (model) that has access to all the plates
return plate;
}
// It depends on current angular velocity and velocities of other, colliding plates.
// Note that this is pretty expensive to calculate, so if used much, the current value should be cached.
get totalTorque() {
const totalTorque = this.hotSpot.position.clone().cross(this.hotSpot.force);
this.fields.forEach((field: Field) => {
totalTorque.add(field.torque);
});
return totalTorque;
}
get angularAcceleration() {
if (this.plateGroup) {
return this.plateGroup.angularAcceleration;
}
return this.totalTorque.applyMatrix3(this.invMomentOfInertia);
}
mergedWith(anotherPlate: Plate) {
if (!this.plateGroup || !anotherPlate.plateGroup) {
return false;
}
return this.plateGroup === anotherPlate.plateGroup;
}
updateCenter() {
const safeFields: Record<string, Field> = {};
const safeSum = new THREE.Vector3();
let safeArea = 0;
this.fields.forEach((field: Field) => {
if (!field.subduction) {
let safe = true;
// Some subducting fields do not get marked because they move so slowly
// Ignore fields adjacent to subducting fields just to be safe
field.forEachNeighbor((neighbor: Field) => {
if (neighbor.subduction) {
safe = false;
}
});
if (safe) {
safeFields[field.id] = field;
safeSum.add(field.absolutePos);
safeArea += field.area;
}
}
});
if (safeArea < MIN_PLATE_SIZE) {
// If the visible area of a plate is too small, don't bother labelling
this.center = new THREE.Vector3();
} else {
// Otherwise, use the field nearest the center
const geographicCenter = safeSum.normalize();
let closestPoint = new THREE.Vector3(0, 0, 0);
let minDist = Number.MAX_VALUE;
for (const id in safeFields) {
const field = safeFields[id];
const dist = field.absolutePos.distanceTo(geographicCenter);
if (dist < minDist) {
closestPoint = field.absolutePos;
minDist = dist;
}
}
this.center = closestPoint;
}
}
updateInertiaTensor() {
this.mass = 0;
let ixx = 0;
let iyy = 0;
let izz = 0;
let ixy = 0;
let ixz = 0;
let iyz = 0;
this.fields.forEach((field: Field) => {
const mass = field.mass;
const p = field.absolutePos;
ixx += mass * (p.y * p.y + p.z * p.z);
iyy += mass * (p.x * p.x + p.z * p.z);
izz += mass * (p.x * p.x + p.y * p.y);
ixy -= mass * p.x * p.y;
ixz -= mass * p.x * p.z;
iyz -= mass * p.y * p.z;
this.mass += mass;
});
this.momentOfInertia = new THREE.Matrix3();
this.momentOfInertia.set(ixx, ixy, ixz, ixy, iyy, iyz, ixz, iyz, izz);
this.invMomentOfInertia = new THREE.Matrix3();
this.invMomentOfInertia.copy(this.momentOfInertia).invert();
}
updateHotSpot(timestep: number) {
const len = this.hotSpot.force.length();
if (len > 0) {
this.hotSpot.force.setLength(Math.max(0, len - timestep * HOT_SPOT_TORQUE_DECREASE));
}
}
setHotSpot(position: THREE.Vector3, force: THREE.Vector3) {
this.hotSpot = { position, force };
}
setDensity(density: number) {
this.density = density;
}
removeUnnecessaryFields() {
this.fields.forEach((f: Field) => {
if (!f.alive) {
this.deleteField(f.id);
}
});
}
addField(props: Omit<IFieldOptions, "plate">) {
const field = new Field({ ...props, plate: this });
this.addExistingField(field);
return field;
}
addFieldAt(props: Omit<IFieldOptions, "id" | "plate">, absolutePos: THREE.Vector3) {
const localPos = this.localPosition(absolutePos);
const id = getGrid().nearestFieldId(localPos);
if (!this.fields.has(id)) {
return this.addField({ ...props, id });
}
}
addExistingField(field: Field) {
const id = field.id;
field.plate = this;
this.fields.set(id, field);
if (this.adjacentFields.has(id)) {
this.adjacentFields.delete(id);
}
field.adjacentFields.forEach((adjFieldId: number) => {
if (!this.fields.has(adjFieldId)) {
this.addAdjacentField(adjFieldId);
} else {
const adjField = this.fields.get(adjFieldId);
if (adjField) {
adjField.boundary = adjField.isBoundary();
}
}
});
field.boundary = field.isBoundary();
return field;
}
deleteField(id: number) {
const field = this.fields.get(id);
if (!field) {
return;
}
this.fields.delete(id);
this.subplate.deleteField(id);
this.addAdjacentField(id);
field.adjacentFields.forEach((adjFieldId: number) => {
let adjField = this.adjacentFields.get(adjFieldId);
if (adjField && !adjField.isAdjacentField()) {
this.adjacentFields.delete(adjFieldId);
}
adjField = this.fields.get(adjFieldId);
if (adjField) {
adjField.boundary = true;
}
});
}
addAdjacentField(id: number) {
if (!this.adjacentFields.has(id)) {
const newField = new Field({ id, plate: this, adjacent: true });
if (newField.isAdjacentField()) {
this.adjacentFields.set(id, newField);
}
}
}
neighborsCount(absolutePos: THREE.Vector3) {
const localPos = this.localPosition(absolutePos);
const id = getGrid().nearestFieldId(localPos);
let count = 0;
getGrid().fields[id].adjacentFields.forEach((adjId: number) => {
if (this.fields.has(adjId)) {
count += 1;
}
});
return count;
}
calculateContinentBuffers() {
const grid = getGrid();
const queue: Field[] = [];
const dist: Record<string, number> = {};
const getDist = (field: Field) => {
const id = field.id;
if (dist[id] !== undefined) {
return dist[id];
}
return Infinity;
};
this.forEachField((field: Field) => {
field.isContinentBuffer = false;
if (field.continentalCrust) {
field.forEachNeighbor((adjField: Field) => {
if (adjField.oceanicCrust && getDist(adjField) > grid.fieldDiameterInKm) | {
dist[adjField.id] = grid.fieldDiameterInKm;
queue.push(adjField);
} | conditional_block |
|
utils.py | threading
import shutil
import os
import ftplib
import ftputil
import requests
import logging
import re
import bs4
import string
try:
from cryptography import fernet
except ImportError:
fernet = None
from buchschloss import core, config
class FormattedDate(date):
"""print a datetime.date as specified in config.core.date_format"""
def __str__(self):
return self.strftime(config.core.date_format)
@classmethod
def fromdate(cls, date_: date):
"""Create a FormattedDate from a datetime.date"""
if date_ is None:
return None
else:
return cls(date_.year, date_.month, date_.day)
def todate(self):
"""transform self to a datetime.date"""
return date(self.year, self.month, self.day)
def run_checks():
"""Run stuff to do as specified by times set in config"""
while True:
if datetime.now() > core.misc_data.check_date+timedelta(minutes=45):
for stuff in stuff_to_do:
threading.Thread(target=stuff).start()
core.misc_data.check_date = datetime.now() + config.utils.tasks.repeat_every
time.sleep(5*60*60)
def late_books():
"""Check for late and nearly late books.
Call the functions in late_handlers with arguments (late, warn).
late and warn are sequences of core.Borrow instances.
"""
late = []
warn = []
today = date.today()
for b in core.Borrow.search((
('is_back', 'eq', False),
'and', ('return_date', 'gt', today+config.utils.late_books_warn_time))):
if b.return_date < today:
late.append(b)
else:
warn.append(b)
for h in late_handlers:
h(late, warn)
def backup():
"""Local backups.
Run backup_shift and copy "name" db to "name.1", encrypting if a key is given in config
"""
backup_shift(os, config.utils.tasks.backup_depth)
if config.utils.tasks.secret_key is None:
shutil.copyfile(config.core.database_name, config.core.database_name+'.1')
else:
data = get_encrypted_database()
with open(config.core.database_name+'.1', 'wb') as f:
f.write(data)
def get_encrypted_database():
"""get the encrypted contents of the database file"""
if fernet is None:
raise RuntimeError('encryption requested, but no cryptography available')
with open(config.core.database_name, 'rb') as f:
plain = f.read()
key = base64.urlsafe_b64encode(config.utils.tasks.secret_key)
cipher = fernet.Fernet(key).encrypt(plain)
return base64.urlsafe_b64decode(cipher)
def web_backup():
"""Remote backups.
Run backup_shift and upload "name" DB as "name.1", encrypted if a key is given in config
"""
conf = config.utils
if conf.tasks.secret_key is None:
upload_path = config.core.database_name
file = None
else:
file = tempfile.NamedTemporaryFile(delete=False)
file.write(get_encrypted_database())
file.close()
upload_path = file.name
factory = ftplib.FTP_TLS if conf.tls else ftplib.FTP
# noinspection PyDeprecation
with ftputil.FTPHost(conf.ftp.host, conf.ftp.username, conf.ftp.password,
session_factory=factory, use_list_a_option=False) as host:
backup_shift(host, conf.tasks.web_backup_depth)
host.upload(upload_path, config.core.database_name+'.1')
if file is not None:
os.unlink(file.name)
def backup_shift(fs, depth):
"""shift all name.number up one number to the given depth
in the given filesystem (os or remote FTP host)"""
number_name = lambda n: '.'.join((config.core.database_name, str(n)))
try:
fs.remove(number_name(depth))
except FileNotFoundError:
pass
for f in range(depth, 1, -1):
try:
fs.rename(number_name(f-1), number_name(f))
except FileNotFoundError:
pass
def send_email(subject, text):
"""Send an email to the recipient specified in config"""
cfg = config.utils.email
msg = email.message.Message()
msg['From'] = cfg['from']
msg['To'] = cfg.recipient
msg['Subject'] = subject
msg.set_payload(text)
try:
with smtplib.SMTP(cfg.smtp.host, cfg.smtp.port) as conn:
if cfg.smtp.tls:
conn.starttls(context=ssl.create_default_context())
if cfg.smtp.username is not None:
conn.login(cfg.smtp.username, cfg.smtp.password)
conn.send_message(msg)
except smtplib.SMTPException as e:
logging.error('error while sending email: {}: {}'.format(type(e).__name__, e))
def get_name(internal: str):
"""Get an end-user suitable name.
Try lookup in config.utils.names.
"__" is replaced by ": " with components looked up individually
If a name isn't found, a warning is logged and the internal name returned, potentially modified
"<namespace>::<name>" may specify a namespace in which lookups are performed first,
falling back to the global names if nothing is found
"__" takes precedence over "::"
"""
if '__' in internal:
return ': '.join(get_name(s) for s in internal.split('__'))
*path, name = internal.split('::')
current = config.utils.names
look_in = [current]
try:
for k in path:
current = current[k]
look_in.append(current)
except KeyError:
# noinspection PyUnboundLocalVariable
logging.warning('invalid namespace {!r} of {!r}'.format(k, internal))
look_in.reverse()
for ns in look_in:
try:
val = ns[name]
if isinstance(val, str):
return val
elif isinstance(val, dict):
return val['*this*']
else:
raise TypeError('{!r} is neither dict nor str'.format(val))
except KeyError:
pass
logging.warning('Name "{}" was not found in the namefile'.format('::'.join(path+[name])))
return '::'.join(path+[name])
def break_string(text, size, break_char=string.punctuation, cut_char=string.whitespace):
"""Insert newlines every `size` characters.
Insert '\n' before the given amount of characters
if a character in `break_char` is encountered.
If the character is in `cut_char`, it is replaced by the newline.
"""
# TODO: move to misc
break_char += cut_char
r = []
while len(text) > size:
i = size
cut = False
while i:
if text[i] in break_char:
cut = text[i] in cut_char
break
i -= 1
else:
i = size-1
i += 1
r.append(text[:i-cut])
text = text[i:]
r.append(text)
return '\n'.join(r) |
def get_book_data(isbn: int):
"""Attempt to get book data via the ISBN from the DB, if that fails,
try the DNB (https://portal.dnb.de)"""
try:
book = next(iter(core.Book.search(('isbn', 'eq', isbn))))
except StopIteration:
pass # actually, I could put the whole rest of the function here
else:
data = core.Book.view_str(book.id)
del data['id'], data['status'], data['return_date'], data['borrowed_by']
del data['borrowed_by_id'], data['__str__']
return data
try:
r = requests.get('https://portal.dnb.de/opac.htm?query=isbn%3D'
+ str(isbn) + '&method=simpleSearch&cqlMode=true')
r.raise_for_status()
except requests.exceptions.RequestException:
raise core.BuchSchlossError('no_connection', 'no_connection')
person_re = re.compile(r'(\w*, \w*) \((\w*)\)')
results = {'concerned_people': []}
page = bs4.BeautifulSoup(r.text)
table = page.select_one('#fullRecordTable')
if table is None:
# see if we got multiple results
link_to_first = page.select_one('#recordLink_0')
if link_to_first is None:
raise core.BuchSchlossError(
'Book_not_found', 'Book_with_ISBN_{}_not_in_DNB', isbn)
r = requests.get('https://portal.dnb.de'+link_to_first['href'])
page = bs4.BeautifulSoup(r.text)
table = page.select_one('#fullRecordTable')
for tr in table.select('tr'):
td = [x.get_text('\n').strip() for x in tr.select('td')]
if len(td) == 2:
if td[0] == 'Titel':
results['title'] = td[1].split('/')[0].strip()
elif td[0] == 'Person(en)':
for p in td[1].split('\n'):
g = person_re.search(p)
if g is None:
continue
g = g.groups()
if g[1 | random_line_split |
|
utils.py | threading
import shutil
import os
import ftplib
import ftputil
import requests
import logging
import re
import bs4
import string
try:
from cryptography import fernet
except ImportError:
fernet = None
from buchschloss import core, config
class FormattedDate(date):
"""print a datetime.date as specified in config.core.date_format"""
def __str__(self):
return self.strftime(config.core.date_format)
@classmethod
def fromdate(cls, date_: date):
"""Create a FormattedDate from a datetime.date"""
if date_ is None:
return None
else:
return cls(date_.year, date_.month, date_.day)
def todate(self):
"""transform self to a datetime.date"""
return date(self.year, self.month, self.day)
def run_checks():
"""Run stuff to do as specified by times set in config"""
while True:
if datetime.now() > core.misc_data.check_date+timedelta(minutes=45):
for stuff in stuff_to_do:
threading.Thread(target=stuff).start()
core.misc_data.check_date = datetime.now() + config.utils.tasks.repeat_every
time.sleep(5*60*60)
def late_books():
"""Check for late and nearly late books.
Call the functions in late_handlers with arguments (late, warn).
late and warn are sequences of core.Borrow instances.
"""
late = []
warn = []
today = date.today()
for b in core.Borrow.search((
('is_back', 'eq', False),
'and', ('return_date', 'gt', today+config.utils.late_books_warn_time))):
if b.return_date < today:
late.append(b)
else:
warn.append(b)
for h in late_handlers:
h(late, warn)
def backup():
"""Local backups.
Run backup_shift and copy "name" db to "name.1", encrypting if a key is given in config
"""
backup_shift(os, config.utils.tasks.backup_depth)
if config.utils.tasks.secret_key is None:
shutil.copyfile(config.core.database_name, config.core.database_name+'.1')
else:
data = get_encrypted_database()
with open(config.core.database_name+'.1', 'wb') as f:
f.write(data)
def | ():
"""get the encrypted contents of the database file"""
if fernet is None:
raise RuntimeError('encryption requested, but no cryptography available')
with open(config.core.database_name, 'rb') as f:
plain = f.read()
key = base64.urlsafe_b64encode(config.utils.tasks.secret_key)
cipher = fernet.Fernet(key).encrypt(plain)
return base64.urlsafe_b64decode(cipher)
def web_backup():
"""Remote backups.
Run backup_shift and upload "name" DB as "name.1", encrypted if a key is given in config
"""
conf = config.utils
if conf.tasks.secret_key is None:
upload_path = config.core.database_name
file = None
else:
file = tempfile.NamedTemporaryFile(delete=False)
file.write(get_encrypted_database())
file.close()
upload_path = file.name
factory = ftplib.FTP_TLS if conf.tls else ftplib.FTP
# noinspection PyDeprecation
with ftputil.FTPHost(conf.ftp.host, conf.ftp.username, conf.ftp.password,
session_factory=factory, use_list_a_option=False) as host:
backup_shift(host, conf.tasks.web_backup_depth)
host.upload(upload_path, config.core.database_name+'.1')
if file is not None:
os.unlink(file.name)
def backup_shift(fs, depth):
"""shift all name.number up one number to the given depth
in the given filesystem (os or remote FTP host)"""
number_name = lambda n: '.'.join((config.core.database_name, str(n)))
try:
fs.remove(number_name(depth))
except FileNotFoundError:
pass
for f in range(depth, 1, -1):
try:
fs.rename(number_name(f-1), number_name(f))
except FileNotFoundError:
pass
def send_email(subject, text):
"""Send an email to the recipient specified in config"""
cfg = config.utils.email
msg = email.message.Message()
msg['From'] = cfg['from']
msg['To'] = cfg.recipient
msg['Subject'] = subject
msg.set_payload(text)
try:
with smtplib.SMTP(cfg.smtp.host, cfg.smtp.port) as conn:
if cfg.smtp.tls:
conn.starttls(context=ssl.create_default_context())
if cfg.smtp.username is not None:
conn.login(cfg.smtp.username, cfg.smtp.password)
conn.send_message(msg)
except smtplib.SMTPException as e:
logging.error('error while sending email: {}: {}'.format(type(e).__name__, e))
def get_name(internal: str):
"""Get an end-user suitable name.
Try lookup in config.utils.names.
"__" is replaced by ": " with components looked up individually
If a name isn't found, a warning is logged and the internal name returned, potentially modified
"<namespace>::<name>" may specify a namespace in which lookups are performed first,
falling back to the global names if nothing is found
"__" takes precedence over "::"
"""
if '__' in internal:
return ': '.join(get_name(s) for s in internal.split('__'))
*path, name = internal.split('::')
current = config.utils.names
look_in = [current]
try:
for k in path:
current = current[k]
look_in.append(current)
except KeyError:
# noinspection PyUnboundLocalVariable
logging.warning('invalid namespace {!r} of {!r}'.format(k, internal))
look_in.reverse()
for ns in look_in:
try:
val = ns[name]
if isinstance(val, str):
return val
elif isinstance(val, dict):
return val['*this*']
else:
raise TypeError('{!r} is neither dict nor str'.format(val))
except KeyError:
pass
logging.warning('Name "{}" was not found in the namefile'.format('::'.join(path+[name])))
return '::'.join(path+[name])
def break_string(text, size, break_char=string.punctuation, cut_char=string.whitespace):
"""Insert newlines every `size` characters.
Insert '\n' before the given amount of characters
if a character in `break_char` is encountered.
If the character is in `cut_char`, it is replaced by the newline.
"""
# TODO: move to misc
break_char += cut_char
r = []
while len(text) > size:
i = size
cut = False
while i:
if text[i] in break_char:
cut = text[i] in cut_char
break
i -= 1
else:
i = size-1
i += 1
r.append(text[:i-cut])
text = text[i:]
r.append(text)
return '\n'.join(r)
def get_book_data(isbn: int):
"""Attempt to get book data via the ISBN from the DB, if that fails,
try the DNB (https://portal.dnb.de)"""
try:
book = next(iter(core.Book.search(('isbn', 'eq', isbn))))
except StopIteration:
pass # actually, I could put the whole rest of the function here
else:
data = core.Book.view_str(book.id)
del data['id'], data['status'], data['return_date'], data['borrowed_by']
del data['borrowed_by_id'], data['__str__']
return data
try:
r = requests.get('https://portal.dnb.de/opac.htm?query=isbn%3D'
+ str(isbn) + '&method=simpleSearch&cqlMode=true')
r.raise_for_status()
except requests.exceptions.RequestException:
raise core.BuchSchlossError('no_connection', 'no_connection')
person_re = re.compile(r'(\w*, \w*) \((\w*)\)')
results = {'concerned_people': []}
page = bs4.BeautifulSoup(r.text)
table = page.select_one('#fullRecordTable')
if table is None:
# see if we got multiple results
link_to_first = page.select_one('#recordLink_0')
if link_to_first is None:
raise core.BuchSchlossError(
'Book_not_found', 'Book_with_ISBN_{}_not_in_DNB', isbn)
r = requests.get('https://portal.dnb.de'+link_to_first['href'])
page = bs4.BeautifulSoup(r.text)
table = page.select_one('#fullRecordTable')
for tr in table.select('tr'):
td = [x.get_text('\n').strip() for x in tr.select('td')]
if len(td) == 2:
if td[0] == 'Titel':
results['title'] = td[1].split('/')[0].strip()
elif td[0] == 'Person(en)':
for p in td[1].split('\n'):
g = person_re.search(p)
if g is None:
continue
g = g.groups()
if g[1 | get_encrypted_database | identifier_name |
utils.py | threading
import shutil
import os
import ftplib
import ftputil
import requests
import logging
import re
import bs4
import string
try:
from cryptography import fernet
except ImportError:
fernet = None
from buchschloss import core, config
class FormattedDate(date):
"""print a datetime.date as specified in config.core.date_format"""
def __str__(self):
return self.strftime(config.core.date_format)
@classmethod
def fromdate(cls, date_: date):
"""Create a FormattedDate from a datetime.date"""
if date_ is None:
return None
else:
return cls(date_.year, date_.month, date_.day)
def todate(self):
"""transform self to a datetime.date"""
return date(self.year, self.month, self.day)
def run_checks():
|
def late_books():
"""Check for late and nearly late books.
Call the functions in late_handlers with arguments (late, warn).
late and warn are sequences of core.Borrow instances.
"""
late = []
warn = []
today = date.today()
for b in core.Borrow.search((
('is_back', 'eq', False),
'and', ('return_date', 'gt', today+config.utils.late_books_warn_time))):
if b.return_date < today:
late.append(b)
else:
warn.append(b)
for h in late_handlers:
h(late, warn)
def backup():
"""Local backups.
Run backup_shift and copy "name" db to "name.1", encrypting if a key is given in config
"""
backup_shift(os, config.utils.tasks.backup_depth)
if config.utils.tasks.secret_key is None:
shutil.copyfile(config.core.database_name, config.core.database_name+'.1')
else:
data = get_encrypted_database()
with open(config.core.database_name+'.1', 'wb') as f:
f.write(data)
def get_encrypted_database():
"""get the encrypted contents of the database file"""
if fernet is None:
raise RuntimeError('encryption requested, but no cryptography available')
with open(config.core.database_name, 'rb') as f:
plain = f.read()
key = base64.urlsafe_b64encode(config.utils.tasks.secret_key)
cipher = fernet.Fernet(key).encrypt(plain)
return base64.urlsafe_b64decode(cipher)
def web_backup():
"""Remote backups.
Run backup_shift and upload "name" DB as "name.1", encrypted if a key is given in config
"""
conf = config.utils
if conf.tasks.secret_key is None:
upload_path = config.core.database_name
file = None
else:
file = tempfile.NamedTemporaryFile(delete=False)
file.write(get_encrypted_database())
file.close()
upload_path = file.name
factory = ftplib.FTP_TLS if conf.tls else ftplib.FTP
# noinspection PyDeprecation
with ftputil.FTPHost(conf.ftp.host, conf.ftp.username, conf.ftp.password,
session_factory=factory, use_list_a_option=False) as host:
backup_shift(host, conf.tasks.web_backup_depth)
host.upload(upload_path, config.core.database_name+'.1')
if file is not None:
os.unlink(file.name)
def backup_shift(fs, depth):
"""shift all name.number up one number to the given depth
in the given filesystem (os or remote FTP host)"""
number_name = lambda n: '.'.join((config.core.database_name, str(n)))
try:
fs.remove(number_name(depth))
except FileNotFoundError:
pass
for f in range(depth, 1, -1):
try:
fs.rename(number_name(f-1), number_name(f))
except FileNotFoundError:
pass
def send_email(subject, text):
"""Send an email to the recipient specified in config"""
cfg = config.utils.email
msg = email.message.Message()
msg['From'] = cfg['from']
msg['To'] = cfg.recipient
msg['Subject'] = subject
msg.set_payload(text)
try:
with smtplib.SMTP(cfg.smtp.host, cfg.smtp.port) as conn:
if cfg.smtp.tls:
conn.starttls(context=ssl.create_default_context())
if cfg.smtp.username is not None:
conn.login(cfg.smtp.username, cfg.smtp.password)
conn.send_message(msg)
except smtplib.SMTPException as e:
logging.error('error while sending email: {}: {}'.format(type(e).__name__, e))
def get_name(internal: str):
"""Get an end-user suitable name.
Try lookup in config.utils.names.
"__" is replaced by ": " with components looked up individually
If a name isn't found, a warning is logged and the internal name returned, potentially modified
"<namespace>::<name>" may specify a namespace in which lookups are performed first,
falling back to the global names if nothing is found
"__" takes precedence over "::"
"""
if '__' in internal:
return ': '.join(get_name(s) for s in internal.split('__'))
*path, name = internal.split('::')
current = config.utils.names
look_in = [current]
try:
for k in path:
current = current[k]
look_in.append(current)
except KeyError:
# noinspection PyUnboundLocalVariable
logging.warning('invalid namespace {!r} of {!r}'.format(k, internal))
look_in.reverse()
for ns in look_in:
try:
val = ns[name]
if isinstance(val, str):
return val
elif isinstance(val, dict):
return val['*this*']
else:
raise TypeError('{!r} is neither dict nor str'.format(val))
except KeyError:
pass
logging.warning('Name "{}" was not found in the namefile'.format('::'.join(path+[name])))
return '::'.join(path+[name])
def break_string(text, size, break_char=string.punctuation, cut_char=string.whitespace):
"""Insert newlines every `size` characters.
Insert '\n' before the given amount of characters
if a character in `break_char` is encountered.
If the character is in `cut_char`, it is replaced by the newline.
"""
# TODO: move to misc
break_char += cut_char
r = []
while len(text) > size:
i = size
cut = False
while i:
if text[i] in break_char:
cut = text[i] in cut_char
break
i -= 1
else:
i = size-1
i += 1
r.append(text[:i-cut])
text = text[i:]
r.append(text)
return '\n'.join(r)
def get_book_data(isbn: int):
"""Attempt to get book data via the ISBN from the DB, if that fails,
try the DNB (https://portal.dnb.de)"""
try:
book = next(iter(core.Book.search(('isbn', 'eq', isbn))))
except StopIteration:
pass # actually, I could put the whole rest of the function here
else:
data = core.Book.view_str(book.id)
del data['id'], data['status'], data['return_date'], data['borrowed_by']
del data['borrowed_by_id'], data['__str__']
return data
try:
r = requests.get('https://portal.dnb.de/opac.htm?query=isbn%3D'
+ str(isbn) + '&method=simpleSearch&cqlMode=true')
r.raise_for_status()
except requests.exceptions.RequestException:
raise core.BuchSchlossError('no_connection', 'no_connection')
person_re = re.compile(r'(\w*, \w*) \((\w*)\)')
results = {'concerned_people': []}
page = bs4.BeautifulSoup(r.text)
table = page.select_one('#fullRecordTable')
if table is None:
# see if we got multiple results
link_to_first = page.select_one('#recordLink_0')
if link_to_first is None:
raise core.BuchSchlossError(
'Book_not_found', 'Book_with_ISBN_{}_not_in_DNB', isbn)
r = requests.get('https://portal.dnb.de'+link_to_first['href'])
page = bs4.BeautifulSoup(r.text)
table = page.select_one('#fullRecordTable')
for tr in table.select('tr'):
td = [x.get_text('\n').strip() for x in tr.select('td')]
if len(td) == 2:
if td[0] == 'Titel':
results['title'] = td[1].split('/')[0].strip()
elif td[0] == 'Person(en)':
for p in td[1].split('\n'):
g = person_re.search(p)
if g is None:
continue
g = g.groups()
if g[1 | """Run stuff to do as specified by times set in config"""
while True:
if datetime.now() > core.misc_data.check_date+timedelta(minutes=45):
for stuff in stuff_to_do:
threading.Thread(target=stuff).start()
core.misc_data.check_date = datetime.now() + config.utils.tasks.repeat_every
time.sleep(5*60*60) | identifier_body |
utils.py | import fernet
except ImportError:
fernet = None
from buchschloss import core, config
class FormattedDate(date):
"""print a datetime.date as specified in config.core.date_format"""
def __str__(self):
return self.strftime(config.core.date_format)
@classmethod
def fromdate(cls, date_: date):
"""Create a FormattedDate from a datetime.date"""
if date_ is None:
return None
else:
return cls(date_.year, date_.month, date_.day)
def todate(self):
"""transform self to a datetime.date"""
return date(self.year, self.month, self.day)
def run_checks():
"""Run stuff to do as specified by times set in config"""
while True:
if datetime.now() > core.misc_data.check_date+timedelta(minutes=45):
for stuff in stuff_to_do:
threading.Thread(target=stuff).start()
core.misc_data.check_date = datetime.now() + config.utils.tasks.repeat_every
time.sleep(5*60*60)
def late_books():
"""Check for late and nearly late books.
Call the functions in late_handlers with arguments (late, warn).
late and warn are sequences of core.Borrow instances.
"""
late = []
warn = []
today = date.today()
for b in core.Borrow.search((
('is_back', 'eq', False),
'and', ('return_date', 'gt', today+config.utils.late_books_warn_time))):
if b.return_date < today:
late.append(b)
else:
warn.append(b)
for h in late_handlers:
h(late, warn)
def backup():
"""Local backups.
Run backup_shift and copy "name" db to "name.1", encrypting if a key is given in config
"""
backup_shift(os, config.utils.tasks.backup_depth)
if config.utils.tasks.secret_key is None:
shutil.copyfile(config.core.database_name, config.core.database_name+'.1')
else:
data = get_encrypted_database()
with open(config.core.database_name+'.1', 'wb') as f:
f.write(data)
def get_encrypted_database():
"""get the encrypted contents of the database file"""
if fernet is None:
raise RuntimeError('encryption requested, but no cryptography available')
with open(config.core.database_name, 'rb') as f:
plain = f.read()
key = base64.urlsafe_b64encode(config.utils.tasks.secret_key)
cipher = fernet.Fernet(key).encrypt(plain)
return base64.urlsafe_b64decode(cipher)
def web_backup():
"""Remote backups.
Run backup_shift and upload "name" DB as "name.1", encrypted if a key is given in config
"""
conf = config.utils
if conf.tasks.secret_key is None:
upload_path = config.core.database_name
file = None
else:
file = tempfile.NamedTemporaryFile(delete=False)
file.write(get_encrypted_database())
file.close()
upload_path = file.name
factory = ftplib.FTP_TLS if conf.tls else ftplib.FTP
# noinspection PyDeprecation
with ftputil.FTPHost(conf.ftp.host, conf.ftp.username, conf.ftp.password,
session_factory=factory, use_list_a_option=False) as host:
backup_shift(host, conf.tasks.web_backup_depth)
host.upload(upload_path, config.core.database_name+'.1')
if file is not None:
os.unlink(file.name)
def backup_shift(fs, depth):
"""shift all name.number up one number to the given depth
in the given filesystem (os or remote FTP host)"""
number_name = lambda n: '.'.join((config.core.database_name, str(n)))
try:
fs.remove(number_name(depth))
except FileNotFoundError:
pass
for f in range(depth, 1, -1):
try:
fs.rename(number_name(f-1), number_name(f))
except FileNotFoundError:
pass
def send_email(subject, text):
"""Send an email to the recipient specified in config"""
cfg = config.utils.email
msg = email.message.Message()
msg['From'] = cfg['from']
msg['To'] = cfg.recipient
msg['Subject'] = subject
msg.set_payload(text)
try:
with smtplib.SMTP(cfg.smtp.host, cfg.smtp.port) as conn:
if cfg.smtp.tls:
conn.starttls(context=ssl.create_default_context())
if cfg.smtp.username is not None:
conn.login(cfg.smtp.username, cfg.smtp.password)
conn.send_message(msg)
except smtplib.SMTPException as e:
logging.error('error while sending email: {}: {}'.format(type(e).__name__, e))
def get_name(internal: str):
"""Get an end-user suitable name.
Try lookup in config.utils.names.
"__" is replaced by ": " with components looked up individually
If a name isn't found, a warning is logged and the internal name returned, potentially modified
"<namespace>::<name>" may specify a namespace in which lookups are performed first,
falling back to the global names if nothing is found
"__" takes precedence over "::"
"""
if '__' in internal:
return ': '.join(get_name(s) for s in internal.split('__'))
*path, name = internal.split('::')
current = config.utils.names
look_in = [current]
try:
for k in path:
current = current[k]
look_in.append(current)
except KeyError:
# noinspection PyUnboundLocalVariable
logging.warning('invalid namespace {!r} of {!r}'.format(k, internal))
look_in.reverse()
for ns in look_in:
try:
val = ns[name]
if isinstance(val, str):
return val
elif isinstance(val, dict):
return val['*this*']
else:
raise TypeError('{!r} is neither dict nor str'.format(val))
except KeyError:
pass
logging.warning('Name "{}" was not found in the namefile'.format('::'.join(path+[name])))
return '::'.join(path+[name])
def break_string(text, size, break_char=string.punctuation, cut_char=string.whitespace):
"""Insert newlines every `size` characters.
Insert '\n' before the given amount of characters
if a character in `break_char` is encountered.
If the character is in `cut_char`, it is replaced by the newline.
"""
# TODO: move to misc
break_char += cut_char
r = []
while len(text) > size:
i = size
cut = False
while i:
if text[i] in break_char:
cut = text[i] in cut_char
break
i -= 1
else:
i = size-1
i += 1
r.append(text[:i-cut])
text = text[i:]
r.append(text)
return '\n'.join(r)
def get_book_data(isbn: int):
"""Attempt to get book data via the ISBN from the DB, if that fails,
try the DNB (https://portal.dnb.de)"""
try:
book = next(iter(core.Book.search(('isbn', 'eq', isbn))))
except StopIteration:
pass # actually, I could put the whole rest of the function here
else:
data = core.Book.view_str(book.id)
del data['id'], data['status'], data['return_date'], data['borrowed_by']
del data['borrowed_by_id'], data['__str__']
return data
try:
r = requests.get('https://portal.dnb.de/opac.htm?query=isbn%3D'
+ str(isbn) + '&method=simpleSearch&cqlMode=true')
r.raise_for_status()
except requests.exceptions.RequestException:
raise core.BuchSchlossError('no_connection', 'no_connection')
person_re = re.compile(r'(\w*, \w*) \((\w*)\)')
results = {'concerned_people': []}
page = bs4.BeautifulSoup(r.text)
table = page.select_one('#fullRecordTable')
if table is None:
# see if we got multiple results
link_to_first = page.select_one('#recordLink_0')
if link_to_first is None:
raise core.BuchSchlossError(
'Book_not_found', 'Book_with_ISBN_{}_not_in_DNB', isbn)
r = requests.get('https://portal.dnb.de'+link_to_first['href'])
page = bs4.BeautifulSoup(r.text)
table = page.select_one('#fullRecordTable')
for tr in table.select('tr'):
td = [x.get_text('\n').strip() for x in tr.select('td')]
if len(td) == 2:
if td[0] == 'Titel':
results['title'] = td[1].split('/')[0].strip()
elif td[0] == 'Person(en)':
| for p in td[1].split('\n'):
g = person_re.search(p)
if g is None:
continue
g = g.groups()
if g[1] == 'Verfasser':
results['author'] = g[0]
else:
results['concerned_people'].append(g[1]+': '+g[0]) | conditional_block |
|
peer_connection.rs | Claim,
) -> Result<PeerConnection, ConnectionManagerError> {
trace!(
target: LOG_TARGET,
"(Peer={}) Socket successfully upgraded to multiplexed socket",
peer_node_id.short_str()
);
// All requests are request/response, so a channel size of 1 is all that is needed
let (peer_tx, peer_rx) = mpsc::channel(1);
let id = ID_COUNTER.fetch_add(1, Ordering::Relaxed); // Monotonic
let substream_counter = connection.substream_counter();
let peer_conn = PeerConnection::new(
id,
peer_tx,
peer_node_id.clone(),
peer_features,
peer_addr,
direction,
substream_counter,
peer_identity_claim,
);
let peer_actor = PeerConnectionActor::new(
id,
peer_node_id,
direction,
connection,
peer_rx,
event_notifier,
our_supported_protocols,
their_supported_protocols,
);
tokio::spawn(peer_actor.run());
Ok(peer_conn)
}
/// Request types for the PeerConnection actor.
#[derive(Debug)]
pub enum PeerConnectionRequest {
/// Open a new substream and negotiate the given protocol
OpenSubstream {
protocol_id: ProtocolId,
reply_tx: oneshot::Sender<Result<NegotiatedSubstream<Substream>, PeerConnectionError>>,
},
/// Disconnect all substreams and close the transport connection
Disconnect(bool, oneshot::Sender<Result<(), PeerConnectionError>>),
}
/// ID type for peer connections
pub type ConnectionId = usize;
/// Request handle for an active peer connection
#[derive(Debug, Clone)]
pub struct PeerConnection {
id: ConnectionId,
peer_node_id: NodeId,
peer_features: PeerFeatures,
request_tx: mpsc::Sender<PeerConnectionRequest>,
address: Arc<Multiaddr>,
direction: ConnectionDirection,
started_at: Instant,
substream_counter: AtomicRefCounter,
handle_counter: Arc<()>,
peer_identity_claim: Option<PeerIdentityClaim>,
}
impl PeerConnection {
pub(crate) fn new(
id: ConnectionId,
request_tx: mpsc::Sender<PeerConnectionRequest>,
peer_node_id: NodeId,
peer_features: PeerFeatures,
address: Multiaddr,
direction: ConnectionDirection,
substream_counter: AtomicRefCounter,
peer_identity_claim: PeerIdentityClaim,
) -> Self {
Self {
id,
request_tx,
peer_node_id,
peer_features,
address: Arc::new(address),
direction,
started_at: Instant::now(),
substream_counter,
handle_counter: Arc::new(()),
peer_identity_claim: Some(peer_identity_claim),
}
}
/// Should only be used in tests
pub(crate) fn unverified(
id: ConnectionId,
request_tx: mpsc::Sender<PeerConnectionRequest>,
peer_node_id: NodeId,
peer_features: PeerFeatures,
address: Multiaddr,
direction: ConnectionDirection,
substream_counter: AtomicRefCounter,
) -> Self {
Self {
id,
request_tx,
peer_node_id,
peer_features,
address: Arc::new(address),
direction,
started_at: Instant::now(),
substream_counter,
handle_counter: Arc::new(()),
peer_identity_claim: None,
}
}
pub fn peer_node_id(&self) -> &NodeId {
&self.peer_node_id
}
pub fn peer_features(&self) -> PeerFeatures {
self.peer_features
}
pub fn direction(&self) -> ConnectionDirection {
self.direction
}
pub fn address(&self) -> &Multiaddr {
&self.address
}
pub fn id(&self) -> ConnectionId {
self.id
}
pub fn is_connected(&self) -> bool {
!self.request_tx.is_closed()
}
/// Returns a owned future that resolves on disconnection
pub fn on_disconnect(&self) -> impl Future<Output = ()> + 'static {
let request_tx = self.request_tx.clone();
async move { request_tx.closed().await }
}
pub fn age(&self) -> Duration {
self.started_at.elapsed()
}
pub fn substream_count(&self) -> usize {
self.substream_counter.get()
}
pub fn handle_count(&self) -> usize {
Arc::strong_count(&self.handle_counter)
}
pub fn peer_identity_claim(&self) -> Option<&PeerIdentityClaim> {
self.peer_identity_claim.as_ref()
}
#[tracing::instrument(level = "trace", "peer_connection::open_substream", skip(self))]
pub async fn open_substream(
&mut self,
protocol_id: &ProtocolId,
) -> Result<NegotiatedSubstream<Substream>, PeerConnectionError> {
let (reply_tx, reply_rx) = oneshot::channel();
self.request_tx
.send(PeerConnectionRequest::OpenSubstream {
protocol_id: protocol_id.clone(),
reply_tx,
})
.await?;
reply_rx
.await
.map_err(|_| PeerConnectionError::InternalReplyCancelled)?
}
#[tracing::instrument(level = "trace", "peer_connection::open_framed_substream", skip(self))]
pub async fn open_framed_substream(
&mut self,
protocol_id: &ProtocolId,
max_frame_size: usize,
) -> Result<CanonicalFraming<Substream>, PeerConnectionError> |
#[cfg(feature = "rpc")]
#[tracing::instrument("peer_connection::connect_rpc", level="trace", skip(self), fields(peer_node_id = self.peer_node_id.to_string().as_str()))]
pub async fn connect_rpc<T>(&mut self) -> Result<T, RpcError>
where T: From<RpcClient> + NamedProtocolService {
self.connect_rpc_using_builder(Default::default()).await
}
#[cfg(feature = "rpc")]
#[tracing::instrument("peer_connection::connect_rpc_with_builder", level = "trace", skip(self, builder))]
pub async fn connect_rpc_using_builder<T>(&mut self, builder: RpcClientBuilder<T>) -> Result<T, RpcError>
where T: From<RpcClient> + NamedProtocolService {
let protocol = ProtocolId::from_static(T::PROTOCOL_NAME);
debug!(
target: LOG_TARGET,
"Attempting to establish RPC protocol `{}` to peer `{}`",
String::from_utf8_lossy(&protocol),
self.peer_node_id
);
let framed = self.open_framed_substream(&protocol, RPC_MAX_FRAME_SIZE).await?;
builder
.with_protocol_id(protocol)
.with_node_id(self.peer_node_id.clone())
.connect(framed)
.await
}
/// Creates a new RpcClientPool that can be shared between tasks. The client pool will lazily establish up to
/// `max_sessions` sessions and provides client session that is least used.
#[cfg(feature = "rpc")]
pub fn create_rpc_client_pool<T>(
&self,
max_sessions: usize,
client_config: RpcClientBuilder<T>,
) -> RpcClientPool<T>
where
T: RpcPoolClient + From<RpcClient> + NamedProtocolService + Clone,
{
RpcClientPool::new(self.clone(), max_sessions, client_config)
}
/// Immediately disconnects the peer connection. This can only fail if the peer connection worker
/// is shut down (and the peer is already disconnected)
pub async fn disconnect(&mut self) -> Result<(), PeerConnectionError> {
let (reply_tx, reply_rx) = oneshot::channel();
self.request_tx
.send(PeerConnectionRequest::Disconnect(false, reply_tx))
.await?;
reply_rx
.await
.map_err(|_| PeerConnectionError::InternalReplyCancelled)?
}
pub(crate) async fn disconnect_silent(&mut self) -> Result<(), PeerConnectionError> {
let (reply_tx, reply_rx) = oneshot::channel();
self.request_tx
.send(PeerConnectionRequest::Disconnect(true, reply_tx))
.await?;
reply_rx
.await
.map_err(|_| PeerConnectionError::InternalReplyCancelled)?
}
}
impl fmt::Display for PeerConnection {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(
f,
"Id: {}, Node ID: {}, Direction: {}, Peer Address: {}, Age: {:.0?}, #Substreams: {}, #Refs: {}",
self.id,
self.peer_node_id.short_str(),
self.direction,
self.address,
self.age(),
self.substream_count(),
self.handle_count()
)
}
}
impl PartialEq for PeerConnection {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
/// Actor for an active connection to a peer.
struct PeerConnectionActor {
id: ConnectionId,
peer_node_id: NodeId,
request_rx: mpsc::Receiver<PeerConnectionRequest>,
direction: ConnectionDirection,
incoming_substreams: IncomingSubstreams,
control: Control,
event_notifier: mpsc::Sender<ConnectionManagerEvent>,
our_supported_protocols: Vec< | {
let substream = self.open_substream(protocol_id).await?;
Ok(framing::canonical(substream.stream, max_frame_size))
} | identifier_body |
peer_connection.rs | : Arc::new(()),
peer_identity_claim: Some(peer_identity_claim),
}
}
/// Should only be used in tests
pub(crate) fn unverified(
id: ConnectionId,
request_tx: mpsc::Sender<PeerConnectionRequest>,
peer_node_id: NodeId,
peer_features: PeerFeatures,
address: Multiaddr,
direction: ConnectionDirection,
substream_counter: AtomicRefCounter,
) -> Self {
Self {
id,
request_tx,
peer_node_id,
peer_features,
address: Arc::new(address),
direction,
started_at: Instant::now(),
substream_counter,
handle_counter: Arc::new(()),
peer_identity_claim: None,
}
}
pub fn peer_node_id(&self) -> &NodeId {
&self.peer_node_id
}
pub fn peer_features(&self) -> PeerFeatures {
self.peer_features
}
pub fn direction(&self) -> ConnectionDirection {
self.direction
}
pub fn address(&self) -> &Multiaddr {
&self.address
}
pub fn id(&self) -> ConnectionId {
self.id
}
pub fn is_connected(&self) -> bool {
!self.request_tx.is_closed()
}
/// Returns a owned future that resolves on disconnection
pub fn on_disconnect(&self) -> impl Future<Output = ()> + 'static {
let request_tx = self.request_tx.clone();
async move { request_tx.closed().await }
}
pub fn age(&self) -> Duration {
self.started_at.elapsed()
}
pub fn substream_count(&self) -> usize {
self.substream_counter.get()
}
pub fn handle_count(&self) -> usize {
Arc::strong_count(&self.handle_counter)
}
pub fn peer_identity_claim(&self) -> Option<&PeerIdentityClaim> {
self.peer_identity_claim.as_ref()
}
#[tracing::instrument(level = "trace", "peer_connection::open_substream", skip(self))]
pub async fn open_substream(
&mut self,
protocol_id: &ProtocolId,
) -> Result<NegotiatedSubstream<Substream>, PeerConnectionError> {
let (reply_tx, reply_rx) = oneshot::channel();
self.request_tx
.send(PeerConnectionRequest::OpenSubstream {
protocol_id: protocol_id.clone(),
reply_tx,
})
.await?;
reply_rx
.await
.map_err(|_| PeerConnectionError::InternalReplyCancelled)?
}
#[tracing::instrument(level = "trace", "peer_connection::open_framed_substream", skip(self))]
pub async fn open_framed_substream(
&mut self,
protocol_id: &ProtocolId,
max_frame_size: usize,
) -> Result<CanonicalFraming<Substream>, PeerConnectionError> {
let substream = self.open_substream(protocol_id).await?;
Ok(framing::canonical(substream.stream, max_frame_size))
}
#[cfg(feature = "rpc")]
#[tracing::instrument("peer_connection::connect_rpc", level="trace", skip(self), fields(peer_node_id = self.peer_node_id.to_string().as_str()))]
pub async fn connect_rpc<T>(&mut self) -> Result<T, RpcError>
where T: From<RpcClient> + NamedProtocolService {
self.connect_rpc_using_builder(Default::default()).await
}
#[cfg(feature = "rpc")]
#[tracing::instrument("peer_connection::connect_rpc_with_builder", level = "trace", skip(self, builder))]
pub async fn connect_rpc_using_builder<T>(&mut self, builder: RpcClientBuilder<T>) -> Result<T, RpcError>
where T: From<RpcClient> + NamedProtocolService {
let protocol = ProtocolId::from_static(T::PROTOCOL_NAME);
debug!(
target: LOG_TARGET,
"Attempting to establish RPC protocol `{}` to peer `{}`",
String::from_utf8_lossy(&protocol),
self.peer_node_id
);
let framed = self.open_framed_substream(&protocol, RPC_MAX_FRAME_SIZE).await?;
builder
.with_protocol_id(protocol)
.with_node_id(self.peer_node_id.clone())
.connect(framed)
.await
}
/// Creates a new RpcClientPool that can be shared between tasks. The client pool will lazily establish up to
/// `max_sessions` sessions and provides client session that is least used.
#[cfg(feature = "rpc")]
pub fn create_rpc_client_pool<T>(
&self,
max_sessions: usize,
client_config: RpcClientBuilder<T>,
) -> RpcClientPool<T>
where
T: RpcPoolClient + From<RpcClient> + NamedProtocolService + Clone,
{
RpcClientPool::new(self.clone(), max_sessions, client_config)
}
/// Immediately disconnects the peer connection. This can only fail if the peer connection worker
/// is shut down (and the peer is already disconnected)
pub async fn disconnect(&mut self) -> Result<(), PeerConnectionError> {
let (reply_tx, reply_rx) = oneshot::channel();
self.request_tx
.send(PeerConnectionRequest::Disconnect(false, reply_tx))
.await?;
reply_rx
.await
.map_err(|_| PeerConnectionError::InternalReplyCancelled)?
}
pub(crate) async fn disconnect_silent(&mut self) -> Result<(), PeerConnectionError> {
let (reply_tx, reply_rx) = oneshot::channel();
self.request_tx
.send(PeerConnectionRequest::Disconnect(true, reply_tx))
.await?;
reply_rx
.await
.map_err(|_| PeerConnectionError::InternalReplyCancelled)?
}
}
impl fmt::Display for PeerConnection {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(
f,
"Id: {}, Node ID: {}, Direction: {}, Peer Address: {}, Age: {:.0?}, #Substreams: {}, #Refs: {}",
self.id,
self.peer_node_id.short_str(),
self.direction,
self.address,
self.age(),
self.substream_count(),
self.handle_count()
)
}
}
impl PartialEq for PeerConnection {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
/// Actor for an active connection to a peer.
struct PeerConnectionActor {
id: ConnectionId,
peer_node_id: NodeId,
request_rx: mpsc::Receiver<PeerConnectionRequest>,
direction: ConnectionDirection,
incoming_substreams: IncomingSubstreams,
control: Control,
event_notifier: mpsc::Sender<ConnectionManagerEvent>,
our_supported_protocols: Vec<ProtocolId>,
their_supported_protocols: Vec<ProtocolId>,
}
impl PeerConnectionActor {
fn new(
id: ConnectionId,
peer_node_id: NodeId,
direction: ConnectionDirection,
connection: Yamux,
request_rx: mpsc::Receiver<PeerConnectionRequest>,
event_notifier: mpsc::Sender<ConnectionManagerEvent>,
our_supported_protocols: Vec<ProtocolId>,
their_supported_protocols: Vec<ProtocolId>,
) -> Self {
Self {
id,
peer_node_id,
direction,
control: connection.get_yamux_control(),
incoming_substreams: connection.into_incoming(),
request_rx,
event_notifier,
our_supported_protocols,
their_supported_protocols,
}
}
pub async fn run(mut self) {
loop {
tokio::select! {
maybe_request = self.request_rx.recv() => {
match maybe_request {
Some(request) => self.handle_request(request).await,
None => {
debug!(target: LOG_TARGET, "[{}] All peer connection handles dropped closing the connection", self);
break;
}
}
},
maybe_substream = self.incoming_substreams.next() => {
match maybe_substream {
Some(substream) => {
if let Err(err) = self.handle_incoming_substream(substream).await {
error!(
target: LOG_TARGET,
"[{}] Incoming substream for peer '{}' failed to open because '{error}'",
self,
self.peer_node_id.short_str(),
error = err
)
}
},
None => {
debug!(target: LOG_TARGET, "[{}] Peer '{}' closed the connection", self, self.peer_node_id.short_str());
break;
},
}
}
}
}
if let Err(err) = self.disconnect(false).await {
warn!(
target: LOG_TARGET,
"[{}] Failed to politely close connection to peer '{}' because '{}'",
self,
self.peer_node_id.short_str(),
err
);
}
}
async fn handle_request(&mut self, request: PeerConnectionRequest) {
use PeerConnectionRequest::{Disconnect, OpenSubstream};
match request {
OpenSubstream { protocol_id, reply_tx } => | {
let tracing_id = tracing::Span::current().id();
let span = span!(Level::TRACE, "handle_request");
span.follows_from(tracing_id);
let result = self.open_negotiated_protocol_stream(protocol_id).instrument(span).await;
log_if_error_fmt!(
target: LOG_TARGET,
reply_tx.send(result),
"Reply oneshot closed when sending reply",
);
} | conditional_block |
|
peer_connection.rs | Claim,
) -> Result<PeerConnection, ConnectionManagerError> {
trace!(
target: LOG_TARGET,
"(Peer={}) Socket successfully upgraded to multiplexed socket",
peer_node_id.short_str()
);
// All requests are request/response, so a channel size of 1 is all that is needed
let (peer_tx, peer_rx) = mpsc::channel(1);
let id = ID_COUNTER.fetch_add(1, Ordering::Relaxed); // Monotonic
let substream_counter = connection.substream_counter();
let peer_conn = PeerConnection::new(
id,
peer_tx,
peer_node_id.clone(),
peer_features,
peer_addr,
direction,
substream_counter,
peer_identity_claim,
);
let peer_actor = PeerConnectionActor::new(
id,
peer_node_id,
direction,
connection,
peer_rx,
event_notifier,
our_supported_protocols,
their_supported_protocols,
);
tokio::spawn(peer_actor.run());
Ok(peer_conn)
}
/// Request types for the PeerConnection actor.
#[derive(Debug)]
pub enum PeerConnectionRequest {
/// Open a new substream and negotiate the given protocol
OpenSubstream {
protocol_id: ProtocolId,
reply_tx: oneshot::Sender<Result<NegotiatedSubstream<Substream>, PeerConnectionError>>,
},
/// Disconnect all substreams and close the transport connection
Disconnect(bool, oneshot::Sender<Result<(), PeerConnectionError>>),
}
/// ID type for peer connections
pub type ConnectionId = usize;
/// Request handle for an active peer connection
#[derive(Debug, Clone)]
pub struct PeerConnection {
id: ConnectionId,
peer_node_id: NodeId,
peer_features: PeerFeatures,
request_tx: mpsc::Sender<PeerConnectionRequest>,
address: Arc<Multiaddr>,
direction: ConnectionDirection,
started_at: Instant,
substream_counter: AtomicRefCounter,
handle_counter: Arc<()>,
peer_identity_claim: Option<PeerIdentityClaim>,
}
impl PeerConnection {
pub(crate) fn new(
id: ConnectionId,
request_tx: mpsc::Sender<PeerConnectionRequest>,
peer_node_id: NodeId,
peer_features: PeerFeatures,
address: Multiaddr,
direction: ConnectionDirection,
substream_counter: AtomicRefCounter,
peer_identity_claim: PeerIdentityClaim,
) -> Self {
Self {
id,
request_tx,
peer_node_id,
peer_features,
address: Arc::new(address),
direction,
started_at: Instant::now(),
substream_counter,
handle_counter: Arc::new(()),
peer_identity_claim: Some(peer_identity_claim),
}
}
/// Should only be used in tests
pub(crate) fn unverified(
id: ConnectionId,
request_tx: mpsc::Sender<PeerConnectionRequest>,
peer_node_id: NodeId,
peer_features: PeerFeatures,
address: Multiaddr,
direction: ConnectionDirection,
substream_counter: AtomicRefCounter,
) -> Self {
Self {
id,
request_tx,
peer_node_id,
peer_features,
address: Arc::new(address),
direction,
started_at: Instant::now(),
substream_counter,
handle_counter: Arc::new(()),
peer_identity_claim: None,
}
}
pub fn peer_node_id(&self) -> &NodeId {
&self.peer_node_id
}
pub fn peer_features(&self) -> PeerFeatures {
self.peer_features
}
pub fn direction(&self) -> ConnectionDirection {
self.direction
}
pub fn address(&self) -> &Multiaddr {
&self.address
}
pub fn id(&self) -> ConnectionId {
self.id
}
pub fn is_connected(&self) -> bool {
!self.request_tx.is_closed()
}
/// Returns a owned future that resolves on disconnection
pub fn on_disconnect(&self) -> impl Future<Output = ()> + 'static {
let request_tx = self.request_tx.clone();
async move { request_tx.closed().await }
}
pub fn age(&self) -> Duration {
self.started_at.elapsed()
}
pub fn substream_count(&self) -> usize {
self.substream_counter.get()
}
pub fn handle_count(&self) -> usize {
Arc::strong_count(&self.handle_counter)
}
pub fn peer_identity_claim(&self) -> Option<&PeerIdentityClaim> {
self.peer_identity_claim.as_ref()
}
#[tracing::instrument(level = "trace", "peer_connection::open_substream", skip(self))]
pub async fn open_substream(
&mut self,
protocol_id: &ProtocolId,
) -> Result<NegotiatedSubstream<Substream>, PeerConnectionError> {
let (reply_tx, reply_rx) = oneshot::channel();
self.request_tx
.send(PeerConnectionRequest::OpenSubstream {
protocol_id: protocol_id.clone(),
reply_tx,
})
.await?;
reply_rx
.await
.map_err(|_| PeerConnectionError::InternalReplyCancelled)?
}
#[tracing::instrument(level = "trace", "peer_connection::open_framed_substream", skip(self))]
pub async fn open_framed_substream(
&mut self,
protocol_id: &ProtocolId,
max_frame_size: usize,
) -> Result<CanonicalFraming<Substream>, PeerConnectionError> {
let substream = self.open_substream(protocol_id).await?;
Ok(framing::canonical(substream.stream, max_frame_size))
}
#[cfg(feature = "rpc")]
#[tracing::instrument("peer_connection::connect_rpc", level="trace", skip(self), fields(peer_node_id = self.peer_node_id.to_string().as_str()))]
pub async fn connect_rpc<T>(&mut self) -> Result<T, RpcError>
where T: From<RpcClient> + NamedProtocolService {
self.connect_rpc_using_builder(Default::default()).await
}
#[cfg(feature = "rpc")]
#[tracing::instrument("peer_connection::connect_rpc_with_builder", level = "trace", skip(self, builder))]
pub async fn connect_rpc_using_builder<T>(&mut self, builder: RpcClientBuilder<T>) -> Result<T, RpcError>
where T: From<RpcClient> + NamedProtocolService {
let protocol = ProtocolId::from_static(T::PROTOCOL_NAME);
debug!(
target: LOG_TARGET,
"Attempting to establish RPC protocol `{}` to peer `{}`",
String::from_utf8_lossy(&protocol),
self.peer_node_id
);
let framed = self.open_framed_substream(&protocol, RPC_MAX_FRAME_SIZE).await?;
builder
.with_protocol_id(protocol)
.with_node_id(self.peer_node_id.clone())
.connect(framed)
.await
}
/// Creates a new RpcClientPool that can be shared between tasks. The client pool will lazily establish up to
/// `max_sessions` sessions and provides client session that is least used.
#[cfg(feature = "rpc")]
pub fn create_rpc_client_pool<T>(
&self,
max_sessions: usize,
client_config: RpcClientBuilder<T>,
) -> RpcClientPool<T>
where
T: RpcPoolClient + From<RpcClient> + NamedProtocolService + Clone,
{
RpcClientPool::new(self.clone(), max_sessions, client_config)
}
/// Immediately disconnects the peer connection. This can only fail if the peer connection worker
/// is shut down (and the peer is already disconnected)
pub async fn disconnect(&mut self) -> Result<(), PeerConnectionError> {
let (reply_tx, reply_rx) = oneshot::channel();
self.request_tx
.send(PeerConnectionRequest::Disconnect(false, reply_tx))
.await?;
reply_rx
.await
.map_err(|_| PeerConnectionError::InternalReplyCancelled)?
}
pub(crate) async fn disconnect_silent(&mut self) -> Result<(), PeerConnectionError> {
let (reply_tx, reply_rx) = oneshot::channel();
self.request_tx
.send(PeerConnectionRequest::Disconnect(true, reply_tx))
.await?;
reply_rx
.await
.map_err(|_| PeerConnectionError::InternalReplyCancelled)?
}
}
impl fmt::Display for PeerConnection {
fn | (&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(
f,
"Id: {}, Node ID: {}, Direction: {}, Peer Address: {}, Age: {:.0?}, #Substreams: {}, #Refs: {}",
self.id,
self.peer_node_id.short_str(),
self.direction,
self.address,
self.age(),
self.substream_count(),
self.handle_count()
)
}
}
impl PartialEq for PeerConnection {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
/// Actor for an active connection to a peer.
struct PeerConnectionActor {
id: ConnectionId,
peer_node_id: NodeId,
request_rx: mpsc::Receiver<PeerConnectionRequest>,
direction: ConnectionDirection,
incoming_substreams: IncomingSubstreams,
control: Control,
event_notifier: mpsc::Sender<ConnectionManagerEvent>,
our_supported_protocols: Vec<Protocol | fmt | identifier_name |
peer_connection.rs | Claim,
) -> Result<PeerConnection, ConnectionManagerError> {
trace!(
target: LOG_TARGET,
"(Peer={}) Socket successfully upgraded to multiplexed socket",
peer_node_id.short_str()
);
// All requests are request/response, so a channel size of 1 is all that is needed
let (peer_tx, peer_rx) = mpsc::channel(1);
let id = ID_COUNTER.fetch_add(1, Ordering::Relaxed); // Monotonic
let substream_counter = connection.substream_counter();
let peer_conn = PeerConnection::new(
id,
peer_tx,
peer_node_id.clone(),
peer_features,
peer_addr,
direction,
substream_counter,
peer_identity_claim,
);
let peer_actor = PeerConnectionActor::new(
id,
peer_node_id,
direction,
connection,
peer_rx,
event_notifier,
our_supported_protocols,
their_supported_protocols,
);
tokio::spawn(peer_actor.run());
Ok(peer_conn)
}
/// Request types for the PeerConnection actor.
#[derive(Debug)]
pub enum PeerConnectionRequest {
/// Open a new substream and negotiate the given protocol
OpenSubstream {
protocol_id: ProtocolId,
reply_tx: oneshot::Sender<Result<NegotiatedSubstream<Substream>, PeerConnectionError>>,
},
/// Disconnect all substreams and close the transport connection
Disconnect(bool, oneshot::Sender<Result<(), PeerConnectionError>>),
}
/// ID type for peer connections
pub type ConnectionId = usize;
/// Request handle for an active peer connection
#[derive(Debug, Clone)]
pub struct PeerConnection {
id: ConnectionId,
peer_node_id: NodeId, | address: Arc<Multiaddr>,
direction: ConnectionDirection,
started_at: Instant,
substream_counter: AtomicRefCounter,
handle_counter: Arc<()>,
peer_identity_claim: Option<PeerIdentityClaim>,
}
impl PeerConnection {
pub(crate) fn new(
id: ConnectionId,
request_tx: mpsc::Sender<PeerConnectionRequest>,
peer_node_id: NodeId,
peer_features: PeerFeatures,
address: Multiaddr,
direction: ConnectionDirection,
substream_counter: AtomicRefCounter,
peer_identity_claim: PeerIdentityClaim,
) -> Self {
Self {
id,
request_tx,
peer_node_id,
peer_features,
address: Arc::new(address),
direction,
started_at: Instant::now(),
substream_counter,
handle_counter: Arc::new(()),
peer_identity_claim: Some(peer_identity_claim),
}
}
/// Should only be used in tests
pub(crate) fn unverified(
id: ConnectionId,
request_tx: mpsc::Sender<PeerConnectionRequest>,
peer_node_id: NodeId,
peer_features: PeerFeatures,
address: Multiaddr,
direction: ConnectionDirection,
substream_counter: AtomicRefCounter,
) -> Self {
Self {
id,
request_tx,
peer_node_id,
peer_features,
address: Arc::new(address),
direction,
started_at: Instant::now(),
substream_counter,
handle_counter: Arc::new(()),
peer_identity_claim: None,
}
}
pub fn peer_node_id(&self) -> &NodeId {
&self.peer_node_id
}
pub fn peer_features(&self) -> PeerFeatures {
self.peer_features
}
pub fn direction(&self) -> ConnectionDirection {
self.direction
}
pub fn address(&self) -> &Multiaddr {
&self.address
}
pub fn id(&self) -> ConnectionId {
self.id
}
pub fn is_connected(&self) -> bool {
!self.request_tx.is_closed()
}
/// Returns a owned future that resolves on disconnection
pub fn on_disconnect(&self) -> impl Future<Output = ()> + 'static {
let request_tx = self.request_tx.clone();
async move { request_tx.closed().await }
}
pub fn age(&self) -> Duration {
self.started_at.elapsed()
}
pub fn substream_count(&self) -> usize {
self.substream_counter.get()
}
pub fn handle_count(&self) -> usize {
Arc::strong_count(&self.handle_counter)
}
pub fn peer_identity_claim(&self) -> Option<&PeerIdentityClaim> {
self.peer_identity_claim.as_ref()
}
#[tracing::instrument(level = "trace", "peer_connection::open_substream", skip(self))]
pub async fn open_substream(
&mut self,
protocol_id: &ProtocolId,
) -> Result<NegotiatedSubstream<Substream>, PeerConnectionError> {
let (reply_tx, reply_rx) = oneshot::channel();
self.request_tx
.send(PeerConnectionRequest::OpenSubstream {
protocol_id: protocol_id.clone(),
reply_tx,
})
.await?;
reply_rx
.await
.map_err(|_| PeerConnectionError::InternalReplyCancelled)?
}
#[tracing::instrument(level = "trace", "peer_connection::open_framed_substream", skip(self))]
pub async fn open_framed_substream(
&mut self,
protocol_id: &ProtocolId,
max_frame_size: usize,
) -> Result<CanonicalFraming<Substream>, PeerConnectionError> {
let substream = self.open_substream(protocol_id).await?;
Ok(framing::canonical(substream.stream, max_frame_size))
}
#[cfg(feature = "rpc")]
#[tracing::instrument("peer_connection::connect_rpc", level="trace", skip(self), fields(peer_node_id = self.peer_node_id.to_string().as_str()))]
pub async fn connect_rpc<T>(&mut self) -> Result<T, RpcError>
where T: From<RpcClient> + NamedProtocolService {
self.connect_rpc_using_builder(Default::default()).await
}
#[cfg(feature = "rpc")]
#[tracing::instrument("peer_connection::connect_rpc_with_builder", level = "trace", skip(self, builder))]
pub async fn connect_rpc_using_builder<T>(&mut self, builder: RpcClientBuilder<T>) -> Result<T, RpcError>
where T: From<RpcClient> + NamedProtocolService {
let protocol = ProtocolId::from_static(T::PROTOCOL_NAME);
debug!(
target: LOG_TARGET,
"Attempting to establish RPC protocol `{}` to peer `{}`",
String::from_utf8_lossy(&protocol),
self.peer_node_id
);
let framed = self.open_framed_substream(&protocol, RPC_MAX_FRAME_SIZE).await?;
builder
.with_protocol_id(protocol)
.with_node_id(self.peer_node_id.clone())
.connect(framed)
.await
}
/// Creates a new RpcClientPool that can be shared between tasks. The client pool will lazily establish up to
/// `max_sessions` sessions and provides client session that is least used.
#[cfg(feature = "rpc")]
pub fn create_rpc_client_pool<T>(
&self,
max_sessions: usize,
client_config: RpcClientBuilder<T>,
) -> RpcClientPool<T>
where
T: RpcPoolClient + From<RpcClient> + NamedProtocolService + Clone,
{
RpcClientPool::new(self.clone(), max_sessions, client_config)
}
/// Immediately disconnects the peer connection. This can only fail if the peer connection worker
/// is shut down (and the peer is already disconnected)
pub async fn disconnect(&mut self) -> Result<(), PeerConnectionError> {
let (reply_tx, reply_rx) = oneshot::channel();
self.request_tx
.send(PeerConnectionRequest::Disconnect(false, reply_tx))
.await?;
reply_rx
.await
.map_err(|_| PeerConnectionError::InternalReplyCancelled)?
}
pub(crate) async fn disconnect_silent(&mut self) -> Result<(), PeerConnectionError> {
let (reply_tx, reply_rx) = oneshot::channel();
self.request_tx
.send(PeerConnectionRequest::Disconnect(true, reply_tx))
.await?;
reply_rx
.await
.map_err(|_| PeerConnectionError::InternalReplyCancelled)?
}
}
impl fmt::Display for PeerConnection {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(
f,
"Id: {}, Node ID: {}, Direction: {}, Peer Address: {}, Age: {:.0?}, #Substreams: {}, #Refs: {}",
self.id,
self.peer_node_id.short_str(),
self.direction,
self.address,
self.age(),
self.substream_count(),
self.handle_count()
)
}
}
impl PartialEq for PeerConnection {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
/// Actor for an active connection to a peer.
struct PeerConnectionActor {
id: ConnectionId,
peer_node_id: NodeId,
request_rx: mpsc::Receiver<PeerConnectionRequest>,
direction: ConnectionDirection,
incoming_substreams: IncomingSubstreams,
control: Control,
event_notifier: mpsc::Sender<ConnectionManagerEvent>,
our_supported_protocols: Vec<ProtocolId | peer_features: PeerFeatures,
request_tx: mpsc::Sender<PeerConnectionRequest>, | random_line_split |
web.js | success:function(data){
if(data.error){
return alert(data.error);
}
if(data.html){
if(display){
$(display).html(data.html);
}else{
$("body").append(data.html);
}
}
if(template && display && data && data.length){
var html = _.map(data, function(media){
return jade.render(template,{item:media});
});
html = html.join('');
$(display).html(html);
}
},
error:function(err){
vod.handleError(err);
}
}
if(method != 'get'){
q.data = payload;
}
$.ajax(q)
},
handleCommandEvent : function handleCommandEvent(event){
var data = {};
var el = $(this);
data.route = el.attr('data-cmd-route');
data.id = el.attr('data-id');
data.method = el.attr('data-cmd-method');
data.template = el.attr('data-cmd-template');
data.display = el.attr('data-cmd-display');
data.route = el.attr('data-cmd-route');
data.confirm = el.attr('data-cmd-confirm');
data.payload = new FormData();
$.each(this.attributes, function(){
if(this.name.indexOf('data-payload') != -1){
var attr = this.name.replace('data-payload-', '');
data.payload.append(attr,this.value);
}
});
vod.handleCommand(data);
},
refreshCart: function(checkout){
$.getJSON('/cart', function(res){
if(res.error){
return alert(res.error);
}
if(res.medias && res.medias.length){
var html = jade.render('cart-items',{items:res.medias, total_bytes:res.total_bytes, total_price:res.total_price, checkout:checkout||false});
$("#cart-items").html(html);
}
});
},
getRequests: function(){
async.forever(function(next){
$.getJSON('/cart/pending-payment', function(res){
//remove transactions which doesn't exist
console.log(res);
$('.cart-authorize-item').each(function(){
var i = $(this);
var id = i.attr('id');
var c = _.find(res,function(c){
console.log(c._id, id);
return c._id == id;
});
if(!c){
i.remove();
}
});
res.forEach(function(c){
var exist = $("#" + c._id);
if(exist.length){
return;
}
var html = jade.render('cart-items-admin',{item:c});
$("#requests").append(html);
});
setTimeout(function(){
vod.getRequests();
},500)
}).fail(function(){
setTimeout(function(){
vod.getRequests();
},500)
});
}, function(err){
})
}
};
var files_container = [];
$(function(){
$('body').on('click', '.approve-item', function(){
var id = $(this).attr('data-id');
$.post('/cart/'+id+'/approve', function(res){
});
});
$('body').on('click', '.remove-cart-item', function(){
var id = $(this).attr('data-id');
$.post('/cart/remove', {id:id}, function(res){
if(res.error){
return alert(res.error);
}
vod.refreshCart();
});
});
var ads = $('.advertisement').map(function(){
var id = $(this).attr('data-id');
return id
});
setTimeout(function(){
var arr = ads.toArray();
if(!arr.length) return;
$.post('/ad/tick',{ids:arr});
},1000);
$(window).scroll(function(){
if ($(window).scrollTop() + $(window).height() >= $(document).height() - 50){
$("#search-more").trigger('click');
}
});
$('body').on('click', '.chart', function(){
var self = $(this);
});
$('body').on('click', '.command', vod.handleCommandEvent);
$('body').on('click', '#send-reset-password', function(){
var email = $("#reset-password-email").val();
if(email != ''){
$.post('/user/reset',{email:email}, function(res){
$("#error-message").text(res.error || res.message);
$("#modal-error").modal('show');
});
}
})
$('body').on('click', '#confirm-reset-password', function(){
var email = $("#reset-password-email").val();
var code = $("#reset-password-code").val();
var password = $("#reset-password-password").val();
if(email != ''){
$.post('/user/reset-password',{email:email, code:code, password:password}, function(res){
$("#error-message").text(res.error || res.message);
$("#modal-error").modal('show');
if(res.message){
$('#modal-reset-account').modal('hide');
}
});
}
})
$('body').on('click', '#signin-help', function(){
$('#modal-reset-account').modal('show')
})
$("body").on('click', '#send-request', function(){
var title = $('#request-title').val();
var details = $('#request-details').val();
if(title == ''){
return;
}
$.post('/media/request',{title:title, details:details}, function(res){
if(res.error){
$("#error-message").text(res.error);
$("#modal-error").modal('show');
}else{
$("#error-message").text(res.message);
$("#modal-error").modal('show');
$("#modal-request").modal('hide');
$('#request-title').val('');
$('#request-details').val('');
}
});
});
$("body").on('click', '.show-request-dialog', function(){
var val = $("#finder").val();
$("#request-title").val(val);
$('#modal-request').modal('show')
});
$("body").on('click', '#confirm-verification', function(){
var code = $('#verification-code').val();
if(!code || code == ""){
return;
}
$.post('/user/verify',{code:code}, function(res){
if(res.error){
return vod.handleError(res.error);
}
$('#modal-verify').modal('hide');
});
});
$("body").on('click', '#send-verification-code', function(){
$.post('/user/send-verification-sms')
});
$('body').on('click', '.flag-file a', function(){
var id = $(this).parent().parent().attr('data-id');
var reason = $(this).text();
$.post('/media/flag', {id:id, reason:reason}, function(res){
});
})
$("body").on('click', '.subscription-toggle', function(){
var self = $(this);
var id = self.attr('data-id');
$.post('/user/subscription-toggle',{id:id}, function(res){
if(res.error){
if(!res.error.verified){
$('#modal-verify').modal('show')
}
return;
}
if(res.watchlisted){
self.addClass('active');
self.find('span:first').removeClass('glyphicon-plus').addClass('glyphicon-ok');
self.find('.watchlist-label').html(' watchlisted');
}else{
self.removeClass('active');
self.find('span:first').removeClass('glyphicon-ok').addClass('glyphicon-plus');
self.find('.watchlist-label').html(' add to watchlist');
}
})
});
$("body").on('click', '#signup', function(){
var fields = "username password".split(" ");
var vals = {};
_.each(fields, function(f){vals[f] = $("#signup-" +f).val()});
var isEmpty = _.find(fields, function(f){return $("#signup-" +f).val() == "";}) ? true : false;
if(isEmpty){
//return alert('Please fill all fields');
}
//enforce a strong pass
if(vals.password.length <=5 || "123456 asdfg".indexOf(vals.password) != -1){
$("#error-message").text('Please use a stronger password');
$("#modal-error").modal('show');
return;
}
if(vals.username.length != 7){
$("#error-message").text('Incorrect mobile number');
$("#modal-error").modal('show');
return;
}
$.post('/user/register',vals, function(res){
if(res.error){
$("#error-message").text(res.error);
$("#modal-error").modal('show');
}else if(res.message){
$("#error-message").text(res.message);
$("#modal-error").modal('show');
$('#signup-form').slideUp('fast');
}
});
})
$('body').on('click', '#listings-subpropfilter .btn-group-vertical label', function(){
setTimeout(window.vod.filterSearch,0);
});
$('body').on('click', '.vod-list-inline li', function(){
var self = $(this);
self.siblings().removeClass('active'); | self.addClass("active"); | random_line_split |
|
web.js | ('data-cmd-confirm');
data.payload = new FormData();
$.each(this.attributes, function(){
if(this.name.indexOf('data-payload') != -1){
var attr = this.name.replace('data-payload-', '');
data.payload.append(attr,this.value);
}
});
vod.handleCommand(data);
},
refreshCart: function(checkout){
$.getJSON('/cart', function(res){
if(res.error){
return alert(res.error);
}
if(res.medias && res.medias.length){
var html = jade.render('cart-items',{items:res.medias, total_bytes:res.total_bytes, total_price:res.total_price, checkout:checkout||false});
$("#cart-items").html(html);
}
});
},
getRequests: function(){
async.forever(function(next){
$.getJSON('/cart/pending-payment', function(res){
//remove transactions which doesn't exist
console.log(res);
$('.cart-authorize-item').each(function(){
var i = $(this);
var id = i.attr('id');
var c = _.find(res,function(c){
console.log(c._id, id);
return c._id == id;
});
if(!c){
i.remove();
}
});
res.forEach(function(c){
var exist = $("#" + c._id);
if(exist.length){
return;
}
var html = jade.render('cart-items-admin',{item:c});
$("#requests").append(html);
});
setTimeout(function(){
vod.getRequests();
},500)
}).fail(function(){
setTimeout(function(){
vod.getRequests();
},500)
});
}, function(err){
})
}
};
var files_container = [];
$(function(){
$('body').on('click', '.approve-item', function(){
var id = $(this).attr('data-id');
$.post('/cart/'+id+'/approve', function(res){
});
});
$('body').on('click', '.remove-cart-item', function(){
var id = $(this).attr('data-id');
$.post('/cart/remove', {id:id}, function(res){
if(res.error){
return alert(res.error);
}
vod.refreshCart();
});
});
var ads = $('.advertisement').map(function(){
var id = $(this).attr('data-id');
return id
});
setTimeout(function(){
var arr = ads.toArray();
if(!arr.length) return;
$.post('/ad/tick',{ids:arr});
},1000);
$(window).scroll(function(){
if ($(window).scrollTop() + $(window).height() >= $(document).height() - 50){
$("#search-more").trigger('click');
}
});
$('body').on('click', '.chart', function(){
var self = $(this);
});
$('body').on('click', '.command', vod.handleCommandEvent);
$('body').on('click', '#send-reset-password', function(){
var email = $("#reset-password-email").val();
if(email != ''){
$.post('/user/reset',{email:email}, function(res){
$("#error-message").text(res.error || res.message);
$("#modal-error").modal('show');
});
}
})
$('body').on('click', '#confirm-reset-password', function(){
var email = $("#reset-password-email").val();
var code = $("#reset-password-code").val();
var password = $("#reset-password-password").val();
if(email != ''){
$.post('/user/reset-password',{email:email, code:code, password:password}, function(res){
$("#error-message").text(res.error || res.message);
$("#modal-error").modal('show');
if(res.message){
$('#modal-reset-account').modal('hide');
}
});
}
})
$('body').on('click', '#signin-help', function(){
$('#modal-reset-account').modal('show')
})
$("body").on('click', '#send-request', function(){
var title = $('#request-title').val();
var details = $('#request-details').val();
if(title == ''){
return;
}
$.post('/media/request',{title:title, details:details}, function(res){
if(res.error){
$("#error-message").text(res.error);
$("#modal-error").modal('show');
}else{
$("#error-message").text(res.message);
$("#modal-error").modal('show');
$("#modal-request").modal('hide');
$('#request-title').val('');
$('#request-details').val('');
}
});
});
$("body").on('click', '.show-request-dialog', function(){
var val = $("#finder").val();
$("#request-title").val(val);
$('#modal-request').modal('show')
});
$("body").on('click', '#confirm-verification', function(){
var code = $('#verification-code').val();
if(!code || code == ""){
return;
}
$.post('/user/verify',{code:code}, function(res){
if(res.error){
return vod.handleError(res.error);
}
$('#modal-verify').modal('hide');
});
});
$("body").on('click', '#send-verification-code', function(){
$.post('/user/send-verification-sms')
});
$('body').on('click', '.flag-file a', function(){
var id = $(this).parent().parent().attr('data-id');
var reason = $(this).text();
$.post('/media/flag', {id:id, reason:reason}, function(res){
});
})
$("body").on('click', '.subscription-toggle', function(){
var self = $(this);
var id = self.attr('data-id');
$.post('/user/subscription-toggle',{id:id}, function(res){
if(res.error){
if(!res.error.verified){
$('#modal-verify').modal('show')
}
return;
}
if(res.watchlisted){
self.addClass('active');
self.find('span:first').removeClass('glyphicon-plus').addClass('glyphicon-ok');
self.find('.watchlist-label').html(' watchlisted');
}else{
self.removeClass('active');
self.find('span:first').removeClass('glyphicon-ok').addClass('glyphicon-plus');
self.find('.watchlist-label').html(' add to watchlist');
}
})
});
$("body").on('click', '#signup', function(){
var fields = "username password".split(" ");
var vals = {};
_.each(fields, function(f){vals[f] = $("#signup-" +f).val()});
var isEmpty = _.find(fields, function(f){return $("#signup-" +f).val() == "";}) ? true : false;
if(isEmpty){
//return alert('Please fill all fields');
}
//enforce a strong pass
if(vals.password.length <=5 || "123456 asdfg".indexOf(vals.password) != -1){
$("#error-message").text('Please use a stronger password');
$("#modal-error").modal('show');
return;
}
if(vals.username.length != 7){
$("#error-message").text('Incorrect mobile number');
$("#modal-error").modal('show');
return;
}
$.post('/user/register',vals, function(res){
if(res.error){
$("#error-message").text(res.error);
$("#modal-error").modal('show');
}else if(res.message){
$("#error-message").text(res.message);
$("#modal-error").modal('show');
$('#signup-form').slideUp('fast');
}
});
})
$('body').on('click', '#listings-subpropfilter .btn-group-vertical label', function(){
setTimeout(window.vod.filterSearch,0);
});
$('body').on('click', '.vod-list-inline li', function(){
var self = $(this);
self.siblings().removeClass('active');
self.addClass("active");
});
$('body').on('click', '#search-more', function(){
var last = $("#search-content .media:last");
var id = last.attr('media-id');
vod.searchSince = id;
vod.filterSearch();
})
$('body').on('click', '#filter-container-elements li', function(){
var self = $(this);
var props = self.attr('filter-subprops');
if(props) | {
props = JSON.parse(props);
var html = jade.render("listings-subpropfilter",{props:props});
var subprop = $("#listings-subpropfilter");
if(subprop.length){
subprop.html(html);
}else{
$("#sub-contents").prepend("<section id='listings-subpropfilter'>" + html + "</section>");
}
$("#listings-subpropfilter .btn-group-vertical").each(function(){
var self = $(this);
var prop = self.attr('filter-prop');
self.find('label').each(function(){
var label = $(this);
var text = label.text();
label.html('<input type="radio" name="'+prop+'">' + text);
})
});
} | conditional_block |
|
web.js | if(vals.username.length != 7){
$("#error-message").text('Incorrect mobile number');
$("#modal-error").modal('show');
return;
}
$.post('/user/register',vals, function(res){
if(res.error){
$("#error-message").text(res.error);
$("#modal-error").modal('show');
}else if(res.message){
$("#error-message").text(res.message);
$("#modal-error").modal('show');
$('#signup-form').slideUp('fast');
}
});
})
$('body').on('click', '#listings-subpropfilter .btn-group-vertical label', function(){
setTimeout(window.vod.filterSearch,0);
});
$('body').on('click', '.vod-list-inline li', function(){
var self = $(this);
self.siblings().removeClass('active');
self.addClass("active");
});
$('body').on('click', '#search-more', function(){
var last = $("#search-content .media:last");
var id = last.attr('media-id');
vod.searchSince = id;
vod.filterSearch();
})
$('body').on('click', '#filter-container-elements li', function(){
var self = $(this);
var props = self.attr('filter-subprops');
if(props){
props = JSON.parse(props);
var html = jade.render("listings-subpropfilter",{props:props});
var subprop = $("#listings-subpropfilter");
if(subprop.length){
subprop.html(html);
}else{
$("#sub-contents").prepend("<section id='listings-subpropfilter'>" + html + "</section>");
}
$("#listings-subpropfilter .btn-group-vertical").each(function(){
var self = $(this);
var prop = self.attr('filter-prop');
self.find('label').each(function(){
var label = $(this);
var text = label.text();
label.html('<input type="radio" name="'+prop+'">' + text);
})
});
}else{
$("#listings-subpropfilter").html('');
}
setTimeout(window.vod.filterSearch,0);
})
$("body").on('keyup', '#finder', function(){
var t = window.vod.searchTime;
if(t){
window.clearTimeout(window.vod.searchTime);
}
window.vod.searchTime = setTimeout(function(){
var val = $("#finder").val();
if(val == '' || val.length < 2){
$("#main-content").show();
$("#search-content").hide();
return;
}
if(window.vod.searchXHR){
window.vod.searchXHR.abort();
}
var search_params = {
query:val
}
$("#main-content").hide();
$("#search-content").show();
if(vod.searchSince){
search_params.since = vod.searchSince;
}
if(vod.searchType){
search_params.type = vod.searchType;
}
window.vod.searchXHR = $.getJSON('/media/search',search_params,function(res){
vod.searchSince = null;
if(!res.length && !search_params.since){
var html = jade.render('not-found');
$("#search-content").html(html);
return;
}
var items = _.map(res, function(m){
m.files = _.sortBy(m.files, function(a){
var score = parseFloat(a.season + '.' + a.episode);
return score;
});
var lastfile = _.last(m.files);
m.lastfile = lastfile;
return jade.render('item',{item:m, lastfile:lastfile});
});
if(search_params.since){
$("#search-content .listing").append(items.join(''));
if(res.length < 4){
$("#search-more").remove();
}
}else{
var more = res.length < 4 ?'':jade.render('search-more');
var html = '<ul class="dl-horizontal listing list-inline">'+items.join('')+'</ul>' + more;
$("#search-content").html(html);
}
});
},300);
})
$("body").on('click', '.media a', function(e){
// e.preventDefault();
// e.stopPropagation();
// var media = $(this).parent();
//vod.displayMedia(media, "html");
});
$("body").on("change", "#media-retrieve-data", function(){
var val = $(this).val();
$("#loader").show();
$.post('/media/retrieve-data',{url:val}, function(res){
$("#loader").hide();
for(prop in res){
$("#new-media-info-container input[media-prop='"+prop+"']").val(res[prop]);
}
$("#new-media-info-container textarea[media-prop='description']").html(res.description);
$("#media-image").attr('src',"/media/pipe?url=" + res.poster);
});
});
$("body").on("dragenter","#file-dropper", function(e){
e.stopPropagation();
e.preventDefault();
var self = $(this);
self.css('border', '2px solid #0B85A1');
});
$("body").on("dragend","#file-dropper", function(e){
e.stopPropagation();
e.preventDefault();
var self = $(this);
self.css('border', '2px solid red');
});
$("body").on("dragover","#file-dropper", function(e){
e.stopPropagation();
e.preventDefault();
});
$("body").on("drop","#file-dropper", function(e){
e.stopPropagation();
e.preventDefault();
var self = $(this);
self.css('border', '2px solid transparent');
var files = e.originalEvent.dataTransfer.files;
for(var i=0; i<files.length; i++){
files_container.push(files[i]);
}
renderFiles();
});
$("body").on('click', '#media-type label', function(){
$("#media-file-type").hide();
$("#media-image").attr("src","");
files_container.length = 0;
window.vod.selectedMedia = {};
var self = $(this);
var type = self.text();
var template = self.attr('media-template');
var html = jade.render(template);
$("#file-container").html('');
files_container.length = 0;
$("#new-media-info-container").html(html);
if(type == "App"){
type = "application";
}
var engine = new Bloodhound({
datumTokenizer: function(d){
return Bloodhound.tokenizers.whitespace(d.title)
},
queryTokenizer: Bloodhound.tokenizers.whitespace,
prefetch:{
url:'/media/search?type='+type.toLowerCase()+'&fields=title&include_unpublished=1&limit=1000000',
ttl:1000,
filter:function(media){
return $.map(media, function (data) {
data.value = data.title;
return data;
});
}
}
});
engine.clearPrefetchCache();
engine.initialize();
$(".search").typeahead(
{
hint: true,
highlight: true,
minLength: 1
},
{
displayKey: 'value',
source: engine.ttAdapter()
}
)
.on("typeahead:selected", function(datum, obj){
$("#loader").show();
$.getJSON('/media/' + obj._id, function(obj){
$("#loader").hide();
$("#media-file-type label:first").trigger('click');
window.vod.selectedMedia = obj;
$("[media-prop-type='text']").each(function(){
var self = $(this);
var prop = self.attr("media-prop");
self.val(obj[prop]);
});
$("[media-prop-type='group']").each(function(){
var self = $(this);
var prop = self.attr("media-prop");
self.find('label').each(function(){
if($(this).text().trim() == obj[prop]){
$(this).addClass('active');
}else{
$(this).removeClass('active');
}
});
});
$("#new-media-form-content").show();
$("#media-file-type").show();
var html = jade.render('file-details',{files:obj.files});
$("#display-uploaded-media").html(html);
var options = jade.render('newmedia-media-options',{id:obj._id});
$("#save-info").parent().append(options);
});
})
})
$("body").on('click', '#media-file-type label', function(){
var self = $(this);
var template = self.attr('media-template');
var html = jade.render(template);
$("#media-file-type-container").html(html);
});
$('body').on('click', function (e) {
if (!$(e.target).parent().parent().hasClass('media') && $(e.target).parents('.popover.in').length === 0) {
$(".media").popover('destroy').removeClass('selected');
}
});
$("body").on('click', '.remove-upload-file', function(){
var index = $(this).attr('data-index');
files_container.splice(index,1);
renderFiles();
});
});
function | renderFiles | identifier_name |
|
web.js | text(res.error);
$("#modal-error").modal('show');
}else if(res.message){
$("#error-message").text(res.message);
$("#modal-error").modal('show');
$('#signup-form').slideUp('fast');
}
});
})
$('body').on('click', '#listings-subpropfilter .btn-group-vertical label', function(){
setTimeout(window.vod.filterSearch,0);
});
$('body').on('click', '.vod-list-inline li', function(){
var self = $(this);
self.siblings().removeClass('active');
self.addClass("active");
});
$('body').on('click', '#search-more', function(){
var last = $("#search-content .media:last");
var id = last.attr('media-id');
vod.searchSince = id;
vod.filterSearch();
})
$('body').on('click', '#filter-container-elements li', function(){
var self = $(this);
var props = self.attr('filter-subprops');
if(props){
props = JSON.parse(props);
var html = jade.render("listings-subpropfilter",{props:props});
var subprop = $("#listings-subpropfilter");
if(subprop.length){
subprop.html(html);
}else{
$("#sub-contents").prepend("<section id='listings-subpropfilter'>" + html + "</section>");
}
$("#listings-subpropfilter .btn-group-vertical").each(function(){
var self = $(this);
var prop = self.attr('filter-prop');
self.find('label').each(function(){
var label = $(this);
var text = label.text();
label.html('<input type="radio" name="'+prop+'">' + text);
})
});
}else{
$("#listings-subpropfilter").html('');
}
setTimeout(window.vod.filterSearch,0);
})
$("body").on('keyup', '#finder', function(){
var t = window.vod.searchTime;
if(t){
window.clearTimeout(window.vod.searchTime);
}
window.vod.searchTime = setTimeout(function(){
var val = $("#finder").val();
if(val == '' || val.length < 2){
$("#main-content").show();
$("#search-content").hide();
return;
}
if(window.vod.searchXHR){
window.vod.searchXHR.abort();
}
var search_params = {
query:val
}
$("#main-content").hide();
$("#search-content").show();
if(vod.searchSince){
search_params.since = vod.searchSince;
}
if(vod.searchType){
search_params.type = vod.searchType;
}
window.vod.searchXHR = $.getJSON('/media/search',search_params,function(res){
vod.searchSince = null;
if(!res.length && !search_params.since){
var html = jade.render('not-found');
$("#search-content").html(html);
return;
}
var items = _.map(res, function(m){
m.files = _.sortBy(m.files, function(a){
var score = parseFloat(a.season + '.' + a.episode);
return score;
});
var lastfile = _.last(m.files);
m.lastfile = lastfile;
return jade.render('item',{item:m, lastfile:lastfile});
});
if(search_params.since){
$("#search-content .listing").append(items.join(''));
if(res.length < 4){
$("#search-more").remove();
}
}else{
var more = res.length < 4 ?'':jade.render('search-more');
var html = '<ul class="dl-horizontal listing list-inline">'+items.join('')+'</ul>' + more;
$("#search-content").html(html);
}
});
},300);
})
$("body").on('click', '.media a', function(e){
// e.preventDefault();
// e.stopPropagation();
// var media = $(this).parent();
//vod.displayMedia(media, "html");
});
$("body").on("change", "#media-retrieve-data", function(){
var val = $(this).val();
$("#loader").show();
$.post('/media/retrieve-data',{url:val}, function(res){
$("#loader").hide();
for(prop in res){
$("#new-media-info-container input[media-prop='"+prop+"']").val(res[prop]);
}
$("#new-media-info-container textarea[media-prop='description']").html(res.description);
$("#media-image").attr('src',"/media/pipe?url=" + res.poster);
});
});
$("body").on("dragenter","#file-dropper", function(e){
e.stopPropagation();
e.preventDefault();
var self = $(this);
self.css('border', '2px solid #0B85A1');
});
$("body").on("dragend","#file-dropper", function(e){
e.stopPropagation();
e.preventDefault();
var self = $(this);
self.css('border', '2px solid red');
});
$("body").on("dragover","#file-dropper", function(e){
e.stopPropagation();
e.preventDefault();
});
$("body").on("drop","#file-dropper", function(e){
e.stopPropagation();
e.preventDefault();
var self = $(this);
self.css('border', '2px solid transparent');
var files = e.originalEvent.dataTransfer.files;
for(var i=0; i<files.length; i++){
files_container.push(files[i]);
}
renderFiles();
});
$("body").on('click', '#media-type label', function(){
$("#media-file-type").hide();
$("#media-image").attr("src","");
files_container.length = 0;
window.vod.selectedMedia = {};
var self = $(this);
var type = self.text();
var template = self.attr('media-template');
var html = jade.render(template);
$("#file-container").html('');
files_container.length = 0;
$("#new-media-info-container").html(html);
if(type == "App"){
type = "application";
}
var engine = new Bloodhound({
datumTokenizer: function(d){
return Bloodhound.tokenizers.whitespace(d.title)
},
queryTokenizer: Bloodhound.tokenizers.whitespace,
prefetch:{
url:'/media/search?type='+type.toLowerCase()+'&fields=title&include_unpublished=1&limit=1000000',
ttl:1000,
filter:function(media){
return $.map(media, function (data) {
data.value = data.title;
return data;
});
}
}
});
engine.clearPrefetchCache();
engine.initialize();
$(".search").typeahead(
{
hint: true,
highlight: true,
minLength: 1
},
{
displayKey: 'value',
source: engine.ttAdapter()
}
)
.on("typeahead:selected", function(datum, obj){
$("#loader").show();
$.getJSON('/media/' + obj._id, function(obj){
$("#loader").hide();
$("#media-file-type label:first").trigger('click');
window.vod.selectedMedia = obj;
$("[media-prop-type='text']").each(function(){
var self = $(this);
var prop = self.attr("media-prop");
self.val(obj[prop]);
});
$("[media-prop-type='group']").each(function(){
var self = $(this);
var prop = self.attr("media-prop");
self.find('label').each(function(){
if($(this).text().trim() == obj[prop]){
$(this).addClass('active');
}else{
$(this).removeClass('active');
}
});
});
$("#new-media-form-content").show();
$("#media-file-type").show();
var html = jade.render('file-details',{files:obj.files});
$("#display-uploaded-media").html(html);
var options = jade.render('newmedia-media-options',{id:obj._id});
$("#save-info").parent().append(options);
});
})
})
$("body").on('click', '#media-file-type label', function(){
var self = $(this);
var template = self.attr('media-template');
var html = jade.render(template);
$("#media-file-type-container").html(html);
});
$('body').on('click', function (e) {
if (!$(e.target).parent().parent().hasClass('media') && $(e.target).parents('.popover.in').length === 0) {
$(".media").popover('destroy').removeClass('selected');
}
});
$("body").on('click', '.remove-upload-file', function(){
var index = $(this).attr('data-index');
files_container.splice(index,1);
renderFiles();
});
});
function renderFiles() | {
$("#file-container").html('');
files_container.forEach(function(file, index){
var html = jade.render('file-container',{index:index, file:file.name||file.fileName, size:prettyBytes(file.size||file.fileSize)});
$("#file-container").append(html);
})
} | identifier_body |
|
db_explorer.go | int, int) {
q := u.Query()
limit := DEFAULT_LIMIT
offset := DEFAULT_OFFSET
limitParam, _ := q["limit"]
if len(limitParam) > 0 {
limitInt, err := strconv.Atoi(limitParam[0])
if err == nil {
limit = limitInt
}
}
offsetParam, _ := q["offset"]
if len(offsetParam) > 0 {
offsetInt, err := strconv.Atoi(offsetParam[0])
if err == nil {
offset = offsetInt
}
}
return limit, offset
}
func (h *Handler) List(w http.ResponseWriter, r *http.Request) {
table, err := getTable(r, h)
if err != nil {
errorResponse(http.StatusNotFound, err.Error(), w)
return
}
limit, offset := getLimitOffset(r.URL)
rows, err := h.DB.Query("SELECT * FROM " + table.Name + " LIMIT ? OFFSET ?", limit, offset)
if err != nil {
InternalServerError(err, w)
return
}
defer rows.Close()
result, err := parseResponse(rows, table)
if err != nil {
InternalServerError(err, w)
return
}
successResponse(http.StatusOK, map[string]interface{}{"records": result}, w)
}
func parseResponse(rows *sql.Rows, table *Table) ([]map[string]interface{}, error) {
columns, err := rows.Columns()
if err != nil {
return nil, err
}
count := len(columns)
result := make([]map[string]interface{}, 0)
for rows.Next() {
values := make([]interface{}, count)
valuePtrs := make([]interface{}, count)
for i := range columns {
valuePtrs[i] = &values[i]
}
err = rows.Scan(valuePtrs...)
if err != nil {
return nil, err
}
result = append(result, parseRow(table, columns, values))
}
return result, nil
}
func (h *Handler) Show(w http.ResponseWriter, r *http.Request) {
table, err := getTable(r, h)
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
key, err := getKey(r)
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
rows, err := h.DB.Query("SELECT * FROM " + table.Name + " WHERE " + table.PrimaryKey + " = ? LIMIT 1", key)
if err != nil {
InternalServerError(err, w)
return
}
defer rows.Close()
result, err := parseResponse(rows, table)
if err != nil {
InternalServerError(err, w)
return
}
if len(result) == 0 {
errorResponse(http.StatusNotFound, "record not found", w)
return
}
successResponse(http.StatusOK, map[string]interface{}{"record": result[0]}, w)
}
func (h *Handler) Create(w http.ResponseWriter, r *http.Request) {
table, err := getTable(r, h)
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
body, err := ioutil.ReadAll(r.Body)
defer r.Body.Close()
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
var params map[string]interface{}
err = json.Unmarshal(body, ¶ms)
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
var fields []string
var values []interface{}
for _,field := range table.Fields {
if field.AutoIncrement {
continue
}
fields = append(fields, field.Name)
value, _ := params[field.Name]
if value == nil {
values = append(values, field.Default)
continue
}
values = append(values, value)
}
insertSQL := "INSERT INTO $table_name (`$fields`) VALUES ($values)"
insertSQL = strings.Replace(insertSQL, "$table_name", table.Name, 1)
insertSQL = strings.Replace(insertSQL, "$fields", strings.Join(fields, "`, `"), 1)
insertSQL = strings.Replace(insertSQL, "$values", strings.Repeat("?, ", len(fields) - 1) + "?", 1)
fmt.Println(insertSQL)
_, err = h.DB.Exec(insertSQL, values...)
if err != nil {
fmt.Println(err)
InternalServerError(err, w)
return
}
rows, err := h.DB.Query("SELECT LAST_INSERT_ID()")
if err != nil {
InternalServerError(err, w)
return
}
defer rows.Close()
var key int
for rows.Next() {
err = rows.Scan(&key)
if err != nil {
InternalServerError(err, w)
return
}
}
successResponse(http.StatusOK, map[string]int{table.PrimaryKey: key}, w)
}
func (h *Handler) Edit(w http.ResponseWriter, r *http.Request) {
table, err := getTable(r, h)
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
key, err := getKey(r)
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
body, err := ioutil.ReadAll(r.Body)
defer r.Body.Close()
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
var params map[string]interface{}
err = json.Unmarshal(body, ¶ms)
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
var fields []string
var values []interface{}
fmt.Printf("Params: %v",params)
for key, value := range params {
field, ok := table.Fields[key]
if !ok | // skip unknown fields
fields = append(fields, field.Name)
invalidTypeMessage := "field " + field.Name + " have invalid type"
// update auto increment field does not allowed
if field.AutoIncrement {
errorResponse(http.StatusBadRequest, invalidTypeMessage, w)
return
}
switch {
case value == nil && field.Nullable:
values = append(values, nil)
case field.Type == "string":
val, ok := value.(string)
if !ok {
errorResponse(http.StatusBadRequest, invalidTypeMessage, w)
return
}
values = append(values, val)
case field.Type == "int":
val, ok := value.(int)
if !ok {
errorResponse(http.StatusBadRequest, invalidTypeMessage, w)
return
}
values = append(values, val)
}
}
values = append(values, key)
fmt.Printf("\nFields: %v",fields)
fmt.Printf("\nValues: %v",values)
updateSQL := "UPDATE $table_name SET $fields = ? WHERE $primary_key = ?"
updateSQL = strings.Replace(updateSQL, "$table_name", table.Name, 1)
updateSQL = strings.Replace(updateSQL, "$primary_key", table.PrimaryKey, 1)
updateSQL = strings.Replace(updateSQL, "$fields", strings.Join(fields, " = ?, "), 1)
fmt.Println()
fmt.Printf(updateSQL, values...)
res, err := h.DB.Exec(updateSQL, values...)
if err != nil {
fmt.Println(err)
InternalServerError(err, w)
return
}
affected, err := res.RowsAffected()
if err != nil {
InternalServerError(err, w)
return
}
successResponse(http.StatusOK, map[string]int64{"updated": affected}, w)
}
func getKey(r *http.Request) (int, error) {
params := strings.Split(r.URL.Path, "/")
key, err := strconv.Atoi(params[2])
if err != nil {
return 0, fmt.Errorf("Primary key must be int")
}
return key, nil
}
func getTable(r *http.Request, h *Handler) (*Table, error) {
params := strings.Split(r.URL.Path, "/")
tableName := params[1]
table, ok := h.tables[tableName]
if !ok {
return nil, fmt.Errorf("unknown table")
}
return &table, nil
}
func strToBool(in string) bool {
if in == "YES" {
return true
}
return false
}
type NullString struct {
Valid bool
String string
}
func (s NullString) MarshalJSON() ([]byte, error) {
if s.Valid {
return json.Marshal(s.String)
}
return []byte("null"), nil
}
func parseRow(table *Table, columns []string, vals []interface{}) map[string]interface{} {
result := map[string]interface{}{}
for i, val := range vals {
fieldName := columns[i]
field, ok := table.Fields[fieldName]
if !ok {
panic("Can't find field description")
}
switch {
case field.Type == "string":
b, ok := val.([]byte)
if ok {
result[fieldName] = NullString{true, string(b)}
} else {
result[fieldName] = NullString{false, ""}
}
case field.Type == "int":
result[fieldName] = val
}
}
return result
}
func (h *Handler) Delete(w http.ResponseWriter, r *http.Request) {
table, err := getTable(r, h)
if | { continue } | conditional_block |
db_explorer.go | int, int) {
q := u.Query()
limit := DEFAULT_LIMIT
offset := DEFAULT_OFFSET
limitParam, _ := q["limit"]
if len(limitParam) > 0 {
limitInt, err := strconv.Atoi(limitParam[0])
if err == nil {
limit = limitInt
}
}
offsetParam, _ := q["offset"]
if len(offsetParam) > 0 {
offsetInt, err := strconv.Atoi(offsetParam[0])
if err == nil {
offset = offsetInt
}
}
return limit, offset
}
func (h *Handler) List(w http.ResponseWriter, r *http.Request) {
table, err := getTable(r, h)
if err != nil {
errorResponse(http.StatusNotFound, err.Error(), w)
return
}
limit, offset := getLimitOffset(r.URL)
rows, err := h.DB.Query("SELECT * FROM " + table.Name + " LIMIT ? OFFSET ?", limit, offset)
if err != nil {
InternalServerError(err, w)
return
}
defer rows.Close()
result, err := parseResponse(rows, table)
if err != nil {
InternalServerError(err, w)
return
}
successResponse(http.StatusOK, map[string]interface{}{"records": result}, w)
}
func parseResponse(rows *sql.Rows, table *Table) ([]map[string]interface{}, error) {
columns, err := rows.Columns()
if err != nil {
return nil, err
}
count := len(columns)
result := make([]map[string]interface{}, 0)
for rows.Next() {
values := make([]interface{}, count)
valuePtrs := make([]interface{}, count)
for i := range columns {
valuePtrs[i] = &values[i]
}
err = rows.Scan(valuePtrs...)
if err != nil {
return nil, err
}
result = append(result, parseRow(table, columns, values))
}
return result, nil
}
func (h *Handler) Show(w http.ResponseWriter, r *http.Request) {
table, err := getTable(r, h)
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
key, err := getKey(r)
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
rows, err := h.DB.Query("SELECT * FROM " + table.Name + " WHERE " + table.PrimaryKey + " = ? LIMIT 1", key)
if err != nil {
InternalServerError(err, w)
return
}
defer rows.Close()
result, err := parseResponse(rows, table)
if err != nil {
InternalServerError(err, w)
return
}
if len(result) == 0 {
errorResponse(http.StatusNotFound, "record not found", w)
return
}
successResponse(http.StatusOK, map[string]interface{}{"record": result[0]}, w)
}
func (h *Handler) Create(w http.ResponseWriter, r *http.Request) |
var fields []string
var values []interface{}
for _,field := range table.Fields {
if field.AutoIncrement {
continue
}
fields = append(fields, field.Name)
value, _ := params[field.Name]
if value == nil {
values = append(values, field.Default)
continue
}
values = append(values, value)
}
insertSQL := "INSERT INTO $table_name (`$fields`) VALUES ($values)"
insertSQL = strings.Replace(insertSQL, "$table_name", table.Name, 1)
insertSQL = strings.Replace(insertSQL, "$fields", strings.Join(fields, "`, `"), 1)
insertSQL = strings.Replace(insertSQL, "$values", strings.Repeat("?, ", len(fields) - 1) + "?", 1)
fmt.Println(insertSQL)
_, err = h.DB.Exec(insertSQL, values...)
if err != nil {
fmt.Println(err)
InternalServerError(err, w)
return
}
rows, err := h.DB.Query("SELECT LAST_INSERT_ID()")
if err != nil {
InternalServerError(err, w)
return
}
defer rows.Close()
var key int
for rows.Next() {
err = rows.Scan(&key)
if err != nil {
InternalServerError(err, w)
return
}
}
successResponse(http.StatusOK, map[string]int{table.PrimaryKey: key}, w)
}
func (h *Handler) Edit(w http.ResponseWriter, r *http.Request) {
table, err := getTable(r, h)
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
key, err := getKey(r)
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
body, err := ioutil.ReadAll(r.Body)
defer r.Body.Close()
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
var params map[string]interface{}
err = json.Unmarshal(body, ¶ms)
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
var fields []string
var values []interface{}
fmt.Printf("Params: %v",params)
for key, value := range params {
field, ok := table.Fields[key]
if !ok { continue } // skip unknown fields
fields = append(fields, field.Name)
invalidTypeMessage := "field " + field.Name + " have invalid type"
// update auto increment field does not allowed
if field.AutoIncrement {
errorResponse(http.StatusBadRequest, invalidTypeMessage, w)
return
}
switch {
case value == nil && field.Nullable:
values = append(values, nil)
case field.Type == "string":
val, ok := value.(string)
if !ok {
errorResponse(http.StatusBadRequest, invalidTypeMessage, w)
return
}
values = append(values, val)
case field.Type == "int":
val, ok := value.(int)
if !ok {
errorResponse(http.StatusBadRequest, invalidTypeMessage, w)
return
}
values = append(values, val)
}
}
values = append(values, key)
fmt.Printf("\nFields: %v",fields)
fmt.Printf("\nValues: %v",values)
updateSQL := "UPDATE $table_name SET $fields = ? WHERE $primary_key = ?"
updateSQL = strings.Replace(updateSQL, "$table_name", table.Name, 1)
updateSQL = strings.Replace(updateSQL, "$primary_key", table.PrimaryKey, 1)
updateSQL = strings.Replace(updateSQL, "$fields", strings.Join(fields, " = ?, "), 1)
fmt.Println()
fmt.Printf(updateSQL, values...)
res, err := h.DB.Exec(updateSQL, values...)
if err != nil {
fmt.Println(err)
InternalServerError(err, w)
return
}
affected, err := res.RowsAffected()
if err != nil {
InternalServerError(err, w)
return
}
successResponse(http.StatusOK, map[string]int64{"updated": affected}, w)
}
func getKey(r *http.Request) (int, error) {
params := strings.Split(r.URL.Path, "/")
key, err := strconv.Atoi(params[2])
if err != nil {
return 0, fmt.Errorf("Primary key must be int")
}
return key, nil
}
func getTable(r *http.Request, h *Handler) (*Table, error) {
params := strings.Split(r.URL.Path, "/")
tableName := params[1]
table, ok := h.tables[tableName]
if !ok {
return nil, fmt.Errorf("unknown table")
}
return &table, nil
}
func strToBool(in string) bool {
if in == "YES" {
return true
}
return false
}
type NullString struct {
Valid bool
String string
}
func (s NullString) MarshalJSON() ([]byte, error) {
if s.Valid {
return json.Marshal(s.String)
}
return []byte("null"), nil
}
func parseRow(table *Table, columns []string, vals []interface{}) map[string]interface{} {
result := map[string]interface{}{}
for i, val := range vals {
fieldName := columns[i]
field, ok := table.Fields[fieldName]
if !ok {
panic("Can't find field description")
}
switch {
case field.Type == "string":
b, ok := val.([]byte)
if ok {
result[fieldName] = NullString{true, string(b)}
} else {
result[fieldName] = NullString{false, ""}
}
case field.Type == "int":
result[fieldName] = val
}
}
return result
}
func (h *Handler) Delete(w http.ResponseWriter, r *http.Request) {
table, err := getTable(r, h)
| {
table, err := getTable(r, h)
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
body, err := ioutil.ReadAll(r.Body)
defer r.Body.Close()
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
var params map[string]interface{}
err = json.Unmarshal(body, ¶ms)
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
} | identifier_body |
db_explorer.go | int, int) {
q := u.Query()
limit := DEFAULT_LIMIT
offset := DEFAULT_OFFSET
limitParam, _ := q["limit"]
if len(limitParam) > 0 {
limitInt, err := strconv.Atoi(limitParam[0])
if err == nil {
limit = limitInt
}
}
offsetParam, _ := q["offset"]
if len(offsetParam) > 0 {
offsetInt, err := strconv.Atoi(offsetParam[0])
if err == nil {
offset = offsetInt
}
}
return limit, offset
}
func (h *Handler) List(w http.ResponseWriter, r *http.Request) {
table, err := getTable(r, h)
if err != nil {
errorResponse(http.StatusNotFound, err.Error(), w)
return
}
limit, offset := getLimitOffset(r.URL)
rows, err := h.DB.Query("SELECT * FROM " + table.Name + " LIMIT ? OFFSET ?", limit, offset)
if err != nil {
InternalServerError(err, w)
return
}
defer rows.Close()
result, err := parseResponse(rows, table)
if err != nil {
InternalServerError(err, w)
return
}
successResponse(http.StatusOK, map[string]interface{}{"records": result}, w)
}
func parseResponse(rows *sql.Rows, table *Table) ([]map[string]interface{}, error) {
columns, err := rows.Columns()
if err != nil {
return nil, err
}
count := len(columns)
result := make([]map[string]interface{}, 0)
for rows.Next() {
values := make([]interface{}, count)
valuePtrs := make([]interface{}, count)
for i := range columns {
valuePtrs[i] = &values[i]
}
err = rows.Scan(valuePtrs...)
if err != nil {
return nil, err
}
result = append(result, parseRow(table, columns, values))
}
return result, nil
}
func (h *Handler) Show(w http.ResponseWriter, r *http.Request) {
table, err := getTable(r, h)
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
key, err := getKey(r)
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
rows, err := h.DB.Query("SELECT * FROM " + table.Name + " WHERE " + table.PrimaryKey + " = ? LIMIT 1", key)
if err != nil {
InternalServerError(err, w) | return
}
defer rows.Close()
result, err := parseResponse(rows, table)
if err != nil {
InternalServerError(err, w)
return
}
if len(result) == 0 {
errorResponse(http.StatusNotFound, "record not found", w)
return
}
successResponse(http.StatusOK, map[string]interface{}{"record": result[0]}, w)
}
func (h *Handler) Create(w http.ResponseWriter, r *http.Request) {
table, err := getTable(r, h)
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
body, err := ioutil.ReadAll(r.Body)
defer r.Body.Close()
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
var params map[string]interface{}
err = json.Unmarshal(body, ¶ms)
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
var fields []string
var values []interface{}
for _,field := range table.Fields {
if field.AutoIncrement {
continue
}
fields = append(fields, field.Name)
value, _ := params[field.Name]
if value == nil {
values = append(values, field.Default)
continue
}
values = append(values, value)
}
insertSQL := "INSERT INTO $table_name (`$fields`) VALUES ($values)"
insertSQL = strings.Replace(insertSQL, "$table_name", table.Name, 1)
insertSQL = strings.Replace(insertSQL, "$fields", strings.Join(fields, "`, `"), 1)
insertSQL = strings.Replace(insertSQL, "$values", strings.Repeat("?, ", len(fields) - 1) + "?", 1)
fmt.Println(insertSQL)
_, err = h.DB.Exec(insertSQL, values...)
if err != nil {
fmt.Println(err)
InternalServerError(err, w)
return
}
rows, err := h.DB.Query("SELECT LAST_INSERT_ID()")
if err != nil {
InternalServerError(err, w)
return
}
defer rows.Close()
var key int
for rows.Next() {
err = rows.Scan(&key)
if err != nil {
InternalServerError(err, w)
return
}
}
successResponse(http.StatusOK, map[string]int{table.PrimaryKey: key}, w)
}
func (h *Handler) Edit(w http.ResponseWriter, r *http.Request) {
table, err := getTable(r, h)
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
key, err := getKey(r)
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
body, err := ioutil.ReadAll(r.Body)
defer r.Body.Close()
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
var params map[string]interface{}
err = json.Unmarshal(body, ¶ms)
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
var fields []string
var values []interface{}
fmt.Printf("Params: %v",params)
for key, value := range params {
field, ok := table.Fields[key]
if !ok { continue } // skip unknown fields
fields = append(fields, field.Name)
invalidTypeMessage := "field " + field.Name + " have invalid type"
// update auto increment field does not allowed
if field.AutoIncrement {
errorResponse(http.StatusBadRequest, invalidTypeMessage, w)
return
}
switch {
case value == nil && field.Nullable:
values = append(values, nil)
case field.Type == "string":
val, ok := value.(string)
if !ok {
errorResponse(http.StatusBadRequest, invalidTypeMessage, w)
return
}
values = append(values, val)
case field.Type == "int":
val, ok := value.(int)
if !ok {
errorResponse(http.StatusBadRequest, invalidTypeMessage, w)
return
}
values = append(values, val)
}
}
values = append(values, key)
fmt.Printf("\nFields: %v",fields)
fmt.Printf("\nValues: %v",values)
updateSQL := "UPDATE $table_name SET $fields = ? WHERE $primary_key = ?"
updateSQL = strings.Replace(updateSQL, "$table_name", table.Name, 1)
updateSQL = strings.Replace(updateSQL, "$primary_key", table.PrimaryKey, 1)
updateSQL = strings.Replace(updateSQL, "$fields", strings.Join(fields, " = ?, "), 1)
fmt.Println()
fmt.Printf(updateSQL, values...)
res, err := h.DB.Exec(updateSQL, values...)
if err != nil {
fmt.Println(err)
InternalServerError(err, w)
return
}
affected, err := res.RowsAffected()
if err != nil {
InternalServerError(err, w)
return
}
successResponse(http.StatusOK, map[string]int64{"updated": affected}, w)
}
func getKey(r *http.Request) (int, error) {
params := strings.Split(r.URL.Path, "/")
key, err := strconv.Atoi(params[2])
if err != nil {
return 0, fmt.Errorf("Primary key must be int")
}
return key, nil
}
func getTable(r *http.Request, h *Handler) (*Table, error) {
params := strings.Split(r.URL.Path, "/")
tableName := params[1]
table, ok := h.tables[tableName]
if !ok {
return nil, fmt.Errorf("unknown table")
}
return &table, nil
}
func strToBool(in string) bool {
if in == "YES" {
return true
}
return false
}
type NullString struct {
Valid bool
String string
}
func (s NullString) MarshalJSON() ([]byte, error) {
if s.Valid {
return json.Marshal(s.String)
}
return []byte("null"), nil
}
func parseRow(table *Table, columns []string, vals []interface{}) map[string]interface{} {
result := map[string]interface{}{}
for i, val := range vals {
fieldName := columns[i]
field, ok := table.Fields[fieldName]
if !ok {
panic("Can't find field description")
}
switch {
case field.Type == "string":
b, ok := val.([]byte)
if ok {
result[fieldName] = NullString{true, string(b)}
} else {
result[fieldName] = NullString{false, ""}
}
case field.Type == "int":
result[fieldName] = val
}
}
return result
}
func (h *Handler) Delete(w http.ResponseWriter, r *http.Request) {
table, err := getTable(r, h)
if err | random_line_split |
|
db_explorer.go | int, int) {
q := u.Query()
limit := DEFAULT_LIMIT
offset := DEFAULT_OFFSET
limitParam, _ := q["limit"]
if len(limitParam) > 0 {
limitInt, err := strconv.Atoi(limitParam[0])
if err == nil {
limit = limitInt
}
}
offsetParam, _ := q["offset"]
if len(offsetParam) > 0 {
offsetInt, err := strconv.Atoi(offsetParam[0])
if err == nil {
offset = offsetInt
}
}
return limit, offset
}
func (h *Handler) List(w http.ResponseWriter, r *http.Request) {
table, err := getTable(r, h)
if err != nil {
errorResponse(http.StatusNotFound, err.Error(), w)
return
}
limit, offset := getLimitOffset(r.URL)
rows, err := h.DB.Query("SELECT * FROM " + table.Name + " LIMIT ? OFFSET ?", limit, offset)
if err != nil {
InternalServerError(err, w)
return
}
defer rows.Close()
result, err := parseResponse(rows, table)
if err != nil {
InternalServerError(err, w)
return
}
successResponse(http.StatusOK, map[string]interface{}{"records": result}, w)
}
func parseResponse(rows *sql.Rows, table *Table) ([]map[string]interface{}, error) {
columns, err := rows.Columns()
if err != nil {
return nil, err
}
count := len(columns)
result := make([]map[string]interface{}, 0)
for rows.Next() {
values := make([]interface{}, count)
valuePtrs := make([]interface{}, count)
for i := range columns {
valuePtrs[i] = &values[i]
}
err = rows.Scan(valuePtrs...)
if err != nil {
return nil, err
}
result = append(result, parseRow(table, columns, values))
}
return result, nil
}
func (h *Handler) Show(w http.ResponseWriter, r *http.Request) {
table, err := getTable(r, h)
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
key, err := getKey(r)
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
rows, err := h.DB.Query("SELECT * FROM " + table.Name + " WHERE " + table.PrimaryKey + " = ? LIMIT 1", key)
if err != nil {
InternalServerError(err, w)
return
}
defer rows.Close()
result, err := parseResponse(rows, table)
if err != nil {
InternalServerError(err, w)
return
}
if len(result) == 0 {
errorResponse(http.StatusNotFound, "record not found", w)
return
}
successResponse(http.StatusOK, map[string]interface{}{"record": result[0]}, w)
}
func (h *Handler) Create(w http.ResponseWriter, r *http.Request) {
table, err := getTable(r, h)
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
body, err := ioutil.ReadAll(r.Body)
defer r.Body.Close()
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
var params map[string]interface{}
err = json.Unmarshal(body, ¶ms)
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
var fields []string
var values []interface{}
for _,field := range table.Fields {
if field.AutoIncrement {
continue
}
fields = append(fields, field.Name)
value, _ := params[field.Name]
if value == nil {
values = append(values, field.Default)
continue
}
values = append(values, value)
}
insertSQL := "INSERT INTO $table_name (`$fields`) VALUES ($values)"
insertSQL = strings.Replace(insertSQL, "$table_name", table.Name, 1)
insertSQL = strings.Replace(insertSQL, "$fields", strings.Join(fields, "`, `"), 1)
insertSQL = strings.Replace(insertSQL, "$values", strings.Repeat("?, ", len(fields) - 1) + "?", 1)
fmt.Println(insertSQL)
_, err = h.DB.Exec(insertSQL, values...)
if err != nil {
fmt.Println(err)
InternalServerError(err, w)
return
}
rows, err := h.DB.Query("SELECT LAST_INSERT_ID()")
if err != nil {
InternalServerError(err, w)
return
}
defer rows.Close()
var key int
for rows.Next() {
err = rows.Scan(&key)
if err != nil {
InternalServerError(err, w)
return
}
}
successResponse(http.StatusOK, map[string]int{table.PrimaryKey: key}, w)
}
func (h *Handler) Edit(w http.ResponseWriter, r *http.Request) {
table, err := getTable(r, h)
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
key, err := getKey(r)
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
body, err := ioutil.ReadAll(r.Body)
defer r.Body.Close()
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
var params map[string]interface{}
err = json.Unmarshal(body, ¶ms)
if err != nil {
errorResponse(http.StatusBadRequest, err.Error(), w)
return
}
var fields []string
var values []interface{}
fmt.Printf("Params: %v",params)
for key, value := range params {
field, ok := table.Fields[key]
if !ok { continue } // skip unknown fields
fields = append(fields, field.Name)
invalidTypeMessage := "field " + field.Name + " have invalid type"
// update auto increment field does not allowed
if field.AutoIncrement {
errorResponse(http.StatusBadRequest, invalidTypeMessage, w)
return
}
switch {
case value == nil && field.Nullable:
values = append(values, nil)
case field.Type == "string":
val, ok := value.(string)
if !ok {
errorResponse(http.StatusBadRequest, invalidTypeMessage, w)
return
}
values = append(values, val)
case field.Type == "int":
val, ok := value.(int)
if !ok {
errorResponse(http.StatusBadRequest, invalidTypeMessage, w)
return
}
values = append(values, val)
}
}
values = append(values, key)
fmt.Printf("\nFields: %v",fields)
fmt.Printf("\nValues: %v",values)
updateSQL := "UPDATE $table_name SET $fields = ? WHERE $primary_key = ?"
updateSQL = strings.Replace(updateSQL, "$table_name", table.Name, 1)
updateSQL = strings.Replace(updateSQL, "$primary_key", table.PrimaryKey, 1)
updateSQL = strings.Replace(updateSQL, "$fields", strings.Join(fields, " = ?, "), 1)
fmt.Println()
fmt.Printf(updateSQL, values...)
res, err := h.DB.Exec(updateSQL, values...)
if err != nil {
fmt.Println(err)
InternalServerError(err, w)
return
}
affected, err := res.RowsAffected()
if err != nil {
InternalServerError(err, w)
return
}
successResponse(http.StatusOK, map[string]int64{"updated": affected}, w)
}
func getKey(r *http.Request) (int, error) {
params := strings.Split(r.URL.Path, "/")
key, err := strconv.Atoi(params[2])
if err != nil {
return 0, fmt.Errorf("Primary key must be int")
}
return key, nil
}
func getTable(r *http.Request, h *Handler) (*Table, error) {
params := strings.Split(r.URL.Path, "/")
tableName := params[1]
table, ok := h.tables[tableName]
if !ok {
return nil, fmt.Errorf("unknown table")
}
return &table, nil
}
func strToBool(in string) bool {
if in == "YES" {
return true
}
return false
}
type NullString struct {
Valid bool
String string
}
func (s NullString) MarshalJSON() ([]byte, error) {
if s.Valid {
return json.Marshal(s.String)
}
return []byte("null"), nil
}
func parseRow(table *Table, columns []string, vals []interface{}) map[string]interface{} {
result := map[string]interface{}{}
for i, val := range vals {
fieldName := columns[i]
field, ok := table.Fields[fieldName]
if !ok {
panic("Can't find field description")
}
switch {
case field.Type == "string":
b, ok := val.([]byte)
if ok {
result[fieldName] = NullString{true, string(b)}
} else {
result[fieldName] = NullString{false, ""}
}
case field.Type == "int":
result[fieldName] = val
}
}
return result
}
func (h *Handler) | (w http.ResponseWriter, r *http.Request) {
table, err := getTable(r, h)
if | Delete | identifier_name |
DQN_1.py | .create_rectangle(
origin[0] - 15, origin[1] - 15,
origin[0] + 15, origin[1] + 15,
fill='red')
# pack all
self.canvas.pack()
def reset(self):
self.update()
time.sleep(0.1)
self.canvas.delete(self.rect)
origin = np.array([20, 20])
self.rect = self.canvas.create_rectangle(
origin[0] - 15, origin[1] - 15,
origin[0] + 15, origin[1] + 15,
fill='red')
# return observation
return (np.array(self.canvas.coords(self.rect)[:2]) - np.array(self.canvas.coords(self.oval)[:2])) / (
MAZE_H * UNIT)
def step(self, action):
s = self.canvas.coords(self.rect)
base_action = np.array([0, 0])
if action == 0: # up
if s[1] > UNIT:
base_action[1] -= UNIT
elif action == 1: # down
if s[1] < (MAZE_H - 1) * UNIT:
base_action[1] += UNIT
elif action == 2: # right
if s[0] < (MAZE_W - 1) * UNIT:
base_action[0] += UNIT
elif action == 3: # left
if s[0] > UNIT:
base_action[0] -= UNIT
self.canvas.move(self.rect, base_action[0], base_action[1]) # move agent
next_coords = self.canvas.coords(self.rect) # next state
# reward function
if next_coords == self.canvas.coords(self.oval):
reward = 1
done = True
elif next_coords in [self.canvas.coords(self.hell1)]:
reward = -1
done = True
else:
reward = 0
done = False
s_ = (np.array(next_coords[:2]) - np.array(self.canvas.coords(self.oval)[:2])) / (MAZE_H * UNIT)
return s_, reward, done
def render(self):
# | leep(0.01)
self.update()
np.random.seed(1)
tf.set_random_seed(1)
class DeepQNetwork:
# 建立神经网络
def _build_net(self):
# -------------- 创建 eval 神经网络, 及时提升参数 --------------
tf.compat.v1.disable_eager_execution()
self.s = tf.placeholder(tf.float32, [None, self.n_features], name='s') # 用来接收 observation
self.q_target = tf.placeholder(tf.float32, [None, self.n_actions], name='Q_target') # 用来接收 q_target 的值, 这个之后会通过计算得到
with tf.variable_scope('eval_net'):
# c_names(collections_names) 是在更新 target_net 参数时会用到
c_names, n_l1, w_initializer, b_initializer = \
['eval_net_params', tf.GraphKeys.GLOBAL_VARIABLES], 10, \
tf.random_normal_initializer(0., 0.3), tf.constant_initializer(0.1) # config of layers
# eval_net 的第一层. collections 是在更新 target_net 参数时会用到
with tf.variable_scope('l1'):
w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names)
b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names)
l1 = tf.nn.relu(tf.matmul(self.s, w1) + b1)
# eval_net 的第二层. collections 是在更新 target_net 参数时会用到
with tf.variable_scope('l2'):
w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names)
b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names)
self.q_eval = tf.matmul(l1, w2) + b2
with tf.variable_scope('loss'): # 求误差
self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, self.q_eval))
with tf.variable_scope('train'): # 梯度下降
self._train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss)
# ---------------- 创建 target 神经网络, 提供 target Q ---------------------
self.s_ = tf.placeholder(tf.float32, [None, self.n_features], name='s_') # 接收下个 observation
with tf.variable_scope('target_net'):
# c_names(collections_names) 是在更新 target_net 参数时会用到
c_names = ['target_net_params', tf.GraphKeys.GLOBAL_VARIABLES]
# target_net 的第一层. collections 是在更新 target_net 参数时会用到
with tf.variable_scope('l1'):
w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names)
b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names)
l1 = tf.nn.relu(tf.matmul(self.s_, w1) + b1)
# target_net 的第二层. collections 是在更新 target_net 参数时会用到
with tf.variable_scope('l2'):
w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names)
b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names)
self.q_next = tf.matmul(l1, w2) + b2
def __init__(
self,
n_actions,
n_features,
learning_rate=0.01,
reward_decay=0.9,
e_greedy=0.9,
replace_target_iter=300,
memory_size=500,
batch_size=32,
e_greedy_increment=None,
output_graph=False,
):
self.n_actions = n_actions
self.n_features = n_features
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon_max = e_greedy # epsilon 的最大值
self.replace_target_iter = replace_target_iter # 更换 target_net 的步数
self.memory_size = memory_size # 记忆上限
self.batch_size = batch_size # 每次更新时从 memory 里面取多少记忆出来
self.epsilon_increment = e_greedy_increment # epsilon 的增量
self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max # 是否开启探索模式, 并逐步减少探索次数
# 记录学习次数 (用于判断是否更换 target_net 参数)
self.learn_step_counter = 0
# 初始化全 0 记忆 [s, a, r, s_]
self.memory = np.zeros((self.memory_size, n_features * 2 + 2)) # 和视频中不同, 因为 pandas 运算比较慢, 这里改为直接用 numpy
# 创建 [target_net, evaluate_net]
self._build_net()
# 替换 target net 的参数
t_params = tf.get_collection('target_net_params') # 提取 target_net 的参数
e_params = tf.get_collection('eval_net_params') # 提取 eval_net 的参数
self.replace_target_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)] # 更新 target_net 参数
self.sess = tf.Session()
# 输出 tensorboard 文件
if output_graph:
# $ tensorboard --logdir=logs
tf.summary.FileWriter("logs/", self.sess.graph)
self.sess.run(tf.global_variables_initializer())
self.cost_his = [] # 记录所有 cost 变化, 用于最后 plot 出来观看
def store_transition(self, s, a, r, s_):
if not hasattr(self, 'memory_counter'):
self.memory_counter = 0
# 记录一条 [s, a, r, s_] 记录
transition = np.hstack((s, [a, r], s_))
# 总 memory 大小是固定的, 如果超出总大小, 旧 memory 就被新 memory 替换
index = self.memory_counter % self.memory_size
self.memory[index, :] = transition # 替换过程
self.memory_counter += 1
def choose_action(self, observation):
# 统一 observation 的 shape (1, size_of_observation)
observation = observation[np.newaxis, :]
if np.random.uniform() < self.epsilon:
# 让 eval_net 神经网络生成所有 action 的值, 并选择值最大的 action
actions_value = self.sess.run(self.q_eval, feed_dict={self.s: observation})
action = np.argmax(actions_value)
else:
action = np.random.randint(0, self.n_actions) # � | time.s | identifier_name |
DQN_1.py | .create_rectangle(
origin[0] - 15, origin[1] - 15,
origin[0] + 15, origin[1] + 15,
fill='red')
# pack all
self.canvas.pack()
def reset(self):
self.update()
time.sleep(0.1)
self.canvas.delete(self.rect)
origin = np.array([20, 20])
self.rect = self.canvas.create_rectangle(
origin[0] - 15, origin[1] - 15,
origin[0] + 15, origin[1] + 15,
fill='red')
# return observation
return (np.array(self.canvas.coords(self.rect)[:2]) - np.array(self.canvas.coords(self.oval)[:2])) / (
MAZE_H * UNIT)
def step(self, action):
s = self.canvas.coords(self.rect)
base_action = np.array([0, 0])
if action == 0: # up
if s[1] > UNIT:
base_action[1] -= UNIT
elif action == 1: # down
if s[1] < (MAZE_H - 1) * UNIT:
base_action[1] += UNIT
elif action == 2: # right
if s[0] < (MAZE_W - 1) * UNIT:
base_action[0] += UNIT
elif action == 3: # left
if s[0] > UNIT:
base_action[0] -= UNIT
| ve(self.rect, base_action[0], base_action[1]) # move agent
next_coords = self.canvas.coords(self.rect) # next state
# reward function
if next_coords == self.canvas.coords(self.oval):
reward = 1
done = True
elif next_coords in [self.canvas.coords(self.hell1)]:
reward = -1
done = True
else:
reward = 0
done = False
s_ = (np.array(next_coords[:2]) - np.array(self.canvas.coords(self.oval)[:2])) / (MAZE_H * UNIT)
return s_, reward, done
def render(self):
# time.sleep(0.01)
self.update()
np.random.seed(1)
tf.set_random_seed(1)
class DeepQNetwork:
# 建立神经网络
def _build_net(self):
# -------------- 创建 eval 神经网络, 及时提升参数 --------------
tf.compat.v1.disable_eager_execution()
self.s = tf.placeholder(tf.float32, [None, self.n_features], name='s') # 用来接收 observation
self.q_target = tf.placeholder(tf.float32, [None, self.n_actions], name='Q_target') # 用来接收 q_target 的值, 这个之后会通过计算得到
with tf.variable_scope('eval_net'):
# c_names(collections_names) 是在更新 target_net 参数时会用到
c_names, n_l1, w_initializer, b_initializer = \
['eval_net_params', tf.GraphKeys.GLOBAL_VARIABLES], 10, \
tf.random_normal_initializer(0., 0.3), tf.constant_initializer(0.1) # config of layers
# eval_net 的第一层. collections 是在更新 target_net 参数时会用到
with tf.variable_scope('l1'):
w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names)
b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names)
l1 = tf.nn.relu(tf.matmul(self.s, w1) + b1)
# eval_net 的第二层. collections 是在更新 target_net 参数时会用到
with tf.variable_scope('l2'):
w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names)
b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names)
self.q_eval = tf.matmul(l1, w2) + b2
with tf.variable_scope('loss'): # 求误差
self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, self.q_eval))
with tf.variable_scope('train'): # 梯度下降
self._train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss)
# ---------------- 创建 target 神经网络, 提供 target Q ---------------------
self.s_ = tf.placeholder(tf.float32, [None, self.n_features], name='s_') # 接收下个 observation
with tf.variable_scope('target_net'):
# c_names(collections_names) 是在更新 target_net 参数时会用到
c_names = ['target_net_params', tf.GraphKeys.GLOBAL_VARIABLES]
# target_net 的第一层. collections 是在更新 target_net 参数时会用到
with tf.variable_scope('l1'):
w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names)
b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names)
l1 = tf.nn.relu(tf.matmul(self.s_, w1) + b1)
# target_net 的第二层. collections 是在更新 target_net 参数时会用到
with tf.variable_scope('l2'):
w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names)
b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names)
self.q_next = tf.matmul(l1, w2) + b2
def __init__(
self,
n_actions,
n_features,
learning_rate=0.01,
reward_decay=0.9,
e_greedy=0.9,
replace_target_iter=300,
memory_size=500,
batch_size=32,
e_greedy_increment=None,
output_graph=False,
):
self.n_actions = n_actions
self.n_features = n_features
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon_max = e_greedy # epsilon 的最大值
self.replace_target_iter = replace_target_iter # 更换 target_net 的步数
self.memory_size = memory_size # 记忆上限
self.batch_size = batch_size # 每次更新时从 memory 里面取多少记忆出来
self.epsilon_increment = e_greedy_increment # epsilon 的增量
self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max # 是否开启探索模式, 并逐步减少探索次数
# 记录学习次数 (用于判断是否更换 target_net 参数)
self.learn_step_counter = 0
# 初始化全 0 记忆 [s, a, r, s_]
self.memory = np.zeros((self.memory_size, n_features * 2 + 2)) # 和视频中不同, 因为 pandas 运算比较慢, 这里改为直接用 numpy
# 创建 [target_net, evaluate_net]
self._build_net()
# 替换 target net 的参数
t_params = tf.get_collection('target_net_params') # 提取 target_net 的参数
e_params = tf.get_collection('eval_net_params') # 提取 eval_net 的参数
self.replace_target_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)] # 更新 target_net 参数
self.sess = tf.Session()
# 输出 tensorboard 文件
if output_graph:
# $ tensorboard --logdir=logs
tf.summary.FileWriter("logs/", self.sess.graph)
self.sess.run(tf.global_variables_initializer())
self.cost_his = [] # 记录所有 cost 变化, 用于最后 plot 出来观看
def store_transition(self, s, a, r, s_):
if not hasattr(self, 'memory_counter'):
self.memory_counter = 0
# 记录一条 [s, a, r, s_] 记录
transition = np.hstack((s, [a, r], s_))
# 总 memory 大小是固定的, 如果超出总大小, 旧 memory 就被新 memory 替换
index = self.memory_counter % self.memory_size
self.memory[index, :] = transition # 替换过程
self.memory_counter += 1
def choose_action(self, observation):
# 统一 observation 的 shape (1, size_of_observation)
observation = observation[np.newaxis, :]
if np.random.uniform() < self.epsilon:
# 让 eval_net 神经网络生成所有 action 的值, 并选择值最大的 action
actions_value = self.sess.run(self.q_eval, feed_dict={self.s: observation})
action = np.argmax(actions_value)
else:
action = np.random.randint(0, self.n_actions) # 随 | self.canvas.mo | conditional_block |
DQN_1.py | lf):
self.canvas = tk.Canvas(self, bg='white',
height=MAZE_H * UNIT,
width=MAZE_W * UNIT)
# create grids
for c in range(0, MAZE_W * UNIT, UNIT):
x0, y0, x1, y1 = c, 0, c, MAZE_H * UNIT
self.canvas.create_line(x0, y0, x1, y1)
for r in range(0, MAZE_H * UNIT, UNIT):
x0, y0, x1, y1 = 0, r, MAZE_W * UNIT, r
self.canvas.create_line(x0, y0, x1, y1)
# create origin
origin = np.array([20, 20])
# hell
hell1_center = origin + np.array([UNIT * 2, UNIT])
self.hell1 = self.canvas.create_rectangle(
hell1_center[0] - 15, hell1_center[1] - 15,
hell1_center[0] + 15, hell1_center[1] + 15,
fill='black')
# hell
# hell2_center = origin + np.array([UNIT, UNIT * 2])
# self.hell2 = self.canvas.create_rectangle(
# hell2_center[0] - 15, hell2_center[1] - 15,
# hell2_center[0] + 15, hell2_center[1] + 15,
# fill='black')
# create oval
oval_center = origin + UNIT * 2
self.oval = self.canvas.create_oval(
oval_center[0] - 15, oval_center[1] - 15,
oval_center[0] + 15, oval_center[1] + 15,
fill='yellow')
# create red rect
self.rect = self.canvas.create_rectangle(
origin[0] - 15, origin[1] - 15,
origin[0] + 15, origin[1] + 15,
fill='red')
# pack all
self.canvas.pack()
def reset(self):
self.update()
time.sleep(0.1)
self.canvas.delete(self.rect)
origin = np.array([20, 20])
self.rect = self.canvas.create_rectangle(
origin[0] - 15, origin[1] - 15,
origin[0] + 15, origin[1] + 15,
fill='red')
# return observation
return (np.array(self.canvas.coords(self.rect)[:2]) - np.array(self.canvas.coords(self.oval)[:2])) / (
MAZE_H * UNIT)
def step(self, action):
s = self.canvas.coords(self.rect)
base_action = np.array([0, 0])
if action == 0: # up
if s[1] > UNIT:
base_action[1] -= UNIT
elif action == 1: # down
if s[1] < (MAZE_H - 1) * UNIT:
base_action[1] += UNIT
elif action == 2: # right
if s[0] < (MAZE_W - 1) * UNIT:
base_action[0] += UNIT
elif action == 3: # left
if s[0] > UNIT:
base_action[0] -= UNIT
self.canvas.move(self.rect, base_action[0], base_action[1]) # move agent
next_coords = self.canvas.coords(self.rect) # next state
# reward function
if next_coords == self.canvas.coords(self.oval):
reward = 1
done = True
elif next_coords in [self.canvas.coords(self.hell1)]:
reward = -1
done = True
else:
reward = 0
done = False
s_ = (np.array(next_coords[:2]) - np.array(self.canvas.coords(self.oval)[:2])) / (MAZE_H * UNIT)
return s_, reward, done
def render(self):
# time.sleep(0.01)
self.update()
np.random.seed(1)
tf.set_random_seed(1)
class DeepQNetwork:
# 建立神经网络
def _build_net(self):
# -------------- 创建 eval 神经网络, 及时提升参数 --------------
tf.compat.v1.disable_eager_execution()
self.s = tf.placeholder(tf.float32, [None, self.n_features], name='s') # 用来接收 observation
self.q_target = tf.placeholder(tf.float32, [None, self.n_actions], name='Q_target') # 用来接收 q_target 的值, 这个之后会通过计算得到
with tf.variable_scope('eval_net'):
# c_names(collections_names) 是在更新 target_net 参数时会用到
c_names, n_l1, w_initializer, b_initializer = \
['eval_net_params', tf.GraphKeys.GLOBAL_VARIABLES], 10, \
tf.random_normal_initializer(0., 0.3), tf.constant_initializer(0.1) # config of layers
# eval_net 的第一层. collections 是在更新 target_net 参数时会用到
with tf.variable_scope('l1'):
w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names)
b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names)
l1 = tf.nn.relu(tf.matmul(self.s, w1) + b1)
# eval_net 的第二层. collections 是在更新 target_net 参数时会用到
with tf.variable_scope('l2'):
w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names)
b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names)
self.q_eval = tf.matmul(l1, w2) + b2
with tf.variable_scope('loss'): # 求误差
self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, self.q_eval))
with tf.variable_scope('train'): # 梯度下降
self._train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss)
# ---------------- 创建 target 神经网络, 提供 target Q ---------------------
self.s_ = tf.placeholder(tf.float32, [None, self.n_features], name='s_') # 接收下个 observation
with tf.variable_scope('target_net'):
# c_names(collections_names) 是在更新 target_net 参数时会用到
c_names = ['target_net_params', tf.GraphKeys.GLOBAL_VARIABLES]
# target_net 的第一层. collections 是在更新 target_net 参数时会用到
with tf.variable_scope('l1'):
w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names)
b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names)
l1 = tf.nn.relu(tf.matmul(self.s_, w1) + b1)
# target_net 的第二层. collections 是在更新 target_net 参数时会用到
with tf.variable_scope('l2'):
w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names)
b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names)
self.q_next = tf.matmul(l1, w2) + b2
def __init__(
self,
n_actions,
n_features,
learning_rate=0.01,
reward_decay=0.9,
e_greedy=0.9,
replace_target_iter=300,
memory_size=500,
batch_size=32,
e_greedy_increment=None,
output_graph=False,
):
self.n_actions = n_actions
self.n_features = n_features
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon_max = e_greedy # epsilon 的最大值
self.replace_target_iter = replace_target_iter # 更换 target_net 的步数
self.memory_size = memory_size # 记忆上限
self.batch_size = batch_size # 每次更新时从 memory 里面取多少记忆出来
self.epsilon_increment = e_greedy_increment # epsilon 的增量
self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max # 是否开启探索模式, 并逐步减少探索次数
| __()
self.action_space = ['u', 'd', 'l', 'r']
self.n_actions = len(self.action_space)
self.n_features = 2
self.title('maze')
self.geometry('{0}x{1}'.format(MAZE_H * UNIT, MAZE_H * UNIT))
self._build_maze()
def _build_maze(se | identifier_body |
|
DQN_1.py | .variable_scope('l1'):
w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names)
b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names)
l1 = tf.nn.relu(tf.matmul(self.s_, w1) + b1)
# target_net 的第二层. collections 是在更新 target_net 参数时会用到
with tf.variable_scope('l2'):
w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names)
b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names)
self.q_next = tf.matmul(l1, w2) + b2
def __init__(
self,
n_actions,
n_features,
learning_rate=0.01,
reward_decay=0.9,
e_greedy=0.9,
replace_target_iter=300,
memory_size=500,
batch_size=32,
e_greedy_increment=None,
output_graph=False,
):
self.n_actions = n_actions
self.n_features = n_features
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon_max = e_greedy # epsilon 的最大值
self.replace_target_iter = replace_target_iter # 更换 target_net 的步数
self.memory_size = memory_size # 记忆上限
self.batch_size = batch_size # 每次更新时从 memory 里面取多少记忆出来
self.epsilon_increment = e_greedy_increment # epsilon 的增量
self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max # 是否开启探索模式, 并逐步减少探索次数
# 记录学习次数 (用于判断是否更换 target_net 参数)
self.learn_step_counter = 0
# 初始化全 0 记忆 [s, a, r, s_]
self.memory = np.zeros((self.memory_size, n_features * 2 + 2)) # 和视频中不同, 因为 pandas 运算比较慢, 这里改为直接用 numpy
# 创建 [target_net, evaluate_net]
self._build_net()
# 替换 target net 的参数
t_params = tf.get_collection('target_net_params') # 提取 target_net 的参数
e_params = tf.get_collection('eval_net_params') # 提取 eval_net 的参数
self.replace_target_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)] # 更新 target_net 参数
self.sess = tf.Session()
# 输出 tensorboard 文件
if output_graph:
# $ tensorboard --logdir=logs
tf.summary.FileWriter("logs/", self.sess.graph)
self.sess.run(tf.global_variables_initializer())
self.cost_his = [] # 记录所有 cost 变化, 用于最后 plot 出来观看
def store_transition(self, s, a, r, s_):
if not hasattr(self, 'memory_counter'):
self.memory_counter = 0
# 记录一条 [s, a, r, s_] 记录
transition = np.hstack((s, [a, r], s_))
# 总 memory 大小是固定的, 如果超出总大小, 旧 memory 就被新 memory 替换
index = self.memory_counter % self.memory_size
self.memory[index, :] = transition # 替换过程
self.memory_counter += 1
def choose_action(self, observation):
# 统一 observation 的 shape (1, size_of_observation)
observation = observation[np.newaxis, :]
if np.random.uniform() < self.epsilon:
# 让 eval_net 神经网络生成所有 action 的值, 并选择值最大的 action
actions_value = self.sess.run(self.q_eval, feed_dict={self.s: observation})
action = np.argmax(actions_value)
else:
action = np.random.randint(0, self.n_actions) # 随机选择
return action
def learn(self):
# 检查是否替换 target_net 参数
if self.learn_step_counter % self.replace_target_iter == 0:
self.sess.run(self.replace_target_op)
print('\ntarget_params_replaced\n')
# 从 memory 中随机抽取 batch_size 这么多记忆
if self.memory_counter > self.memory_size:
sample_index = np.random.choice(self.memory_size, size=self.batch_size)
else:
sample_index = np.random.choice(self.memory_counter, size=self.batch_size)
batch_memory = self.memory[sample_index, :]
# 获取 q_next (target_net 产生了 q) 和 q_eval(eval_net 产生的 q)
q_next, q_eval = self.sess.run(
[self.q_next, self.q_eval],
feed_dict={
self.s_: batch_memory[:, -self.n_features:],
self.s: batch_memory[:, :self.n_features]
})
# 下面这几步十分重要. q_next, q_eval 包含所有 action 的值,
# 而我们需要的只是已经选择好的 action 的值, 其他的并不需要.
# 所以我们将其他的 action 值全变成 0, 将用到的 action 误差值 反向传递回去, 作为更新凭据.
# 这是我们最终要达到的样子, 比如 q_target - q_eval = [1, 0, 0] - [-1, 0, 0] = [2, 0, 0]
# q_eval = [-1, 0, 0] 表示这一个记忆中有我选用过 action 0, 而 action 0 带来的 Q(s, a0) = -1, 所以其他的 Q(s, a1) = Q(s, a2) = 0.
# q_target = [1, 0, 0] 表示这个记忆中的 r+gamma*maxQ(s_) = 1, 而且不管在 s_ 上我们取了哪个 action,
# 我们都需要对应上 q_eval 中的 action 位置, 所以就将 1 放在了 action 0 的位置.
# 下面也是为了达到上面说的目的, 不过为了更方面让程序运算, 达到目的的过程有点不同.
# 是将 q_eval 全部赋值给 q_target, 这时 q_target-q_eval 全为 0,
# 不过 我们再根据 batch_memory 当中的 action 这个 column 来给 q_target 中的对应的 memory-action 位置来修改赋值.
# 使新的赋值为 reward + gamma * maxQ(s_), 这样 q_target-q_eval 就可以变成我们所需的样子.
# 具体在下面还有一个举例说明.
q_target = q_eval.copy()
batch_index = np.arange(self.batch_size, dtype=np.int32)
eval_act_index = batch_memory[:, self.n_features].astype(int)
reward = batch_memory[:, self.n_features + 1]
q_target[batch_index, eval_act_index] = reward + self.gamma * np.max(q_next, axis=1)
"""
假如在这个 batch 中, 我们有2个提取的记忆, 根据每个记忆可以生产3个 action 的值:
q_eval =
[[1, 2, 3],
[4, 5, 6]]
q_target = q_eval =
[[1, 2, 3],
[4, 5, 6]]
然后根据 memory 当中的具体 action 位置来修改 q_target 对应 action 上的值:
比如在:
记忆 0 的 q_target 计算值是 -1, 而且我用了 action 0;
记忆 1 的 q_target 计算值是 -2, 而且我用了 action 2:
q_target =
[[-1, 2, 3],
[4, 5, -2]]
所以 (q_target - q_eval) 就变成了:
[[(-1)-(1), 0, 0],
[0, 0, (-2)-(6)]]
最后我们将这个 (q_target - q_eval) 当成误差, 反向传递会神经网络.
所有为 0 的 action 值是当时没有选择的 action, 之前有选择的 action 才有不为0的值.
我们只反向传递之前选择的 action 的值,
"""
# 训练 eval_net
_, self.cost = self.sess.run([self._train_op, self.loss],
feed_dict={self.s: batch_memory[:, :self.n_features],
self.q_target: q_target})
self.cost_his.append(self.cost) # 记录 cost 误差
# 逐渐增加 epsilon, 降低行为的随机性
self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max | self.learn_step_counter += 1
def plot_cost(self):
import matplotlib.pyplot as plt | random_line_split |
|
utils.py | olvers import reverse
from django.test import RequestFactory
class UnAuthorized(Exception):
pass
class NotFound(Exception):
|
class NoBasesFound(Exception):
pass
logger = logging.getLogger(__name__)
class Connection(object):
def __init__(self, dropbox_access_token):
self.client = dropbox.client.DropboxClient(dropbox_access_token)
super(Connection, self).__init__()
def info(self):
account_info = self.client.account_info()
email = account_info['email']
name = account_info['display_name']
return email, name
def listing(self):
bases = []
for base in self._call('metadata', '/')['contents']:
bases.append(base['path'].lstrip('/'))
if len(bases) == 0:
raise NoBasesFound()
return bases
def get_file(self, path):
logger.debug("get file %s" % path)
return self._call('get_file', path)
def get_file_content_and_rev(self, path):
file, metadata = self._call('get_file_and_metadata', path)
content = file.read()
file.close()
rev = metadata['rev']
return content, rev
def get_file_content(self, path):
logger.debug("return content %s" % path)
return self.get_file(path).read()
def put_file(self, path, content):
f = StringIO.StringIO(content)
return self._call('put_file', path, f, True)
def delete_file(self, path):
return self._call('file_delete', path)
def create_folder(self, path):
return self._call('file_create_folder', path)
def delta(self, cursor):
return self._call('delta', cursor)
def _call(self, ms, *args):
try:
m = getattr(self.client, ms)
return m(*args)
except ErrorResponse, e:
if e.__dict__['status'] == 401:
raise UnAuthorized(e.__dict__['body']['error'])
if e.__dict__['status'] == 404:
raise NotFound(e.__dict__['body']['error'])
raise e
except Exception, e:
raise e
def metadata(self, path):
return self._call('metadata', path)
def directory_zip(self, path, zf):
logger.info("download "+path)
try:
f_metadata = self.metadata(path)
if f_metadata['is_dir']:
for content in f_metadata['contents']:
logger.info("download "+content['path'])
if content['is_dir']:
self.directory_zip(content['path'], zf)
else:
# get the file
filepath = content['path']
try:
file = self.get_file(filepath)
filepath_new = re.sub(r"(.*?)/(.+?)(\/.*)", r"\2", filepath)
logger.debug("Add file '%s' as '%s' to zip" % (filepath, filepath_new))
zf.writestr(os.path.relpath(filepath_new, "/"), file.read())
file.close()
except ErrorResponse, e:
logger.error(e)
except ErrorResponse, e:
logger.error(e)
return zf
def message(request, level, message):
dt = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if level == logging.ERROR:
tag = "alert-danger"
elif level == logging.INFO:
tag = "alert-info"
elif level == logging.WARN:
tag = "alert-info"
messages.error(request, dt + " " + str(message)[:1000], extra_tags="%s safe" % tag)
def sign(data):
m = hashlib.md5()
m.update(data)
m.update(settings.SECRET_KEY)
return "%s-%s" % (data, m.hexdigest()[:10])
def check_code(code, name):
errors = []
class CustomMessage(object):
pass
reporter = modReporter._makeDefaultReporter()
try:
tree = compile(code, name, "exec", _ast.PyCF_ONLY_AST)
except SyntaxError:
value = sys.exc_info()[1]
msg = value.args[0]
(lineno, offset, text) = value.lineno, value.offset, value.text
# If there's an encoding problem with the file, the text is None.
if text is None:
# Avoid using msg, since for the only known case, it contains a
# bogus message that claims the encoding the file declared was
# unknown.
reporter.unexpectedError(name, 'problem decoding source')
else:
reporter.syntaxError(name, msg, lineno, offset, text)
loc = CustomMessage()
loc.lineno = lineno
loc.offset = offset
msg = Message(name, loc)
msg.message = "SyntaxError"
errors.append(msg)
except Exception, e:
loc = CustomMessage()
loc.lineno = lineno
loc.offset = offset
msg = Message(name, loc)
msg.message = "Problem decoding source"
errors.append(msg)
reporter.unexpectedError(name, 'problem decoding source')
logger.error("problem decoding source")
logger.exception()
r = []
try:
w = checker.Checker(tree, name)
r = w.messages
for message in w.messages:
logger.info(str(message))
except UnboundLocalError, e:
pass
return not (len(r) > 0 or len(errors) > 0), r, errors
def load_setting(name, fail=True):
v = None
default = getattr(defaults, name, None)
setting = getattr(settings, name, None)
if setting:
v = setting
logger.debug("Loaded setting from settings %s with value: %s" % (name, v))
elif default:
v = default
logger.debug("Loaded setting from defaults %s with value: %s" % (name, v))
if not v and fail:
logger.error("Could not load setting %s" % name)
raise ImproperlyConfigured(name)
return v
def load_var_to_file(var):
path = "/tmp/"
fq_file = os.path.join(path, var)
content = os.environ[var]
if not os.path.exists(path):
os.mkdir(path)
if not os.path.exists(fq_file):
f = open(fq_file, 'w')
f.write(content)
f.close()
if sys.platform == "darwin":
os.popen4("echo $(cat %s) > %s" % (fq_file, fq_file))
else:
os.popen4("echo -e $(cat %s) > %s" % (fq_file, fq_file))
return fq_file
def call_apy(base_name, apy_name):
logger.info("START call_apy")
try:
from core.models import Apy
apy = Apy.objects.get(name=apy_name, base__name=base_name)
logger.info("START call_apy %s" % apy.name)
url = reverse('exec', kwargs={'base': apy.base.name, 'id': apy.id})
request_factory = RequestFactory()
request = request_factory.get(url, data={'json': "", 'base': apy.base.name,
'id': apy.id})
# TODO: fails if user admin is not created, and must have a authprofile, knockknock
request.user = get_user_model().objects.get(username='admin')
request.META['HTTP_ACCEPT'] = "text/html"
from core.views import ExecView
view = ExecView()
response = view.get(request, base=apy.base.name, id=apy.id)
logger.info("method called for base %s, response_code: %s" % (apy.base.name, response.status_code))
logger.info("END call_apy %s" % apy.name)
except Exception, e:
logger.error("ERROR call_apy")
logger.exception(e)
def profileit(func):
"""
Taken from http://stackoverflow.com/questions/5375624/a-decorator-that-profiles-a-method-call-and-logs-the-profiling-result
"""
def wrapper(*args, **kwargs):
prof = cProfile.Profile()
# if not os.environ.has_key("PROFILE_DO_FUNC"):
# return func(*args, **kwargs)
retval = prof.runcall(func, *args, **kwargs)
# Note use of name from outer scope
# prof.dump_stats(name)
import pstats
s = pstats.Stats(prof).sort_stats('time')
s.print_stats(8)
return retval
return wrapper
def totimestamp(t):
logger.debug("totimestamp: %s" % t)
return (t-datetime.datetime(1970, 1, 1)).total_seconds()
def fromtimestamp(t):
logger.debug("fromtimestamp: %s" % t)
return datetime.datetime.fromtimestamp(t)
def create_jwt(user, secret):
"""The above token need to be saved in database, and a one-to-one relation should exist with the username/user_pk."""
logger.debug("Create JWT with secret %s" % secret)
# username = request.POST['username']
# password = request.POST['password'
expiry = datetime.datetime.now() + datetime.timedelta(seconds=30)
expiry_s = time.mktime(expiry.timetuple())
if user.is_authenticated():
| pass | identifier_body |
utils.py | olvers import reverse
from django.test import RequestFactory
class UnAuthorized(Exception):
pass
class NotFound(Exception):
pass
class NoBasesFound(Exception):
pass
logger = logging.getLogger(__name__)
class Connection(object):
def __init__(self, dropbox_access_token):
self.client = dropbox.client.DropboxClient(dropbox_access_token)
super(Connection, self).__init__()
def info(self):
account_info = self.client.account_info()
email = account_info['email']
name = account_info['display_name']
return email, name
def listing(self):
bases = []
for base in self._call('metadata', '/')['contents']:
bases.append(base['path'].lstrip('/'))
if len(bases) == 0:
raise NoBasesFound()
return bases
def get_file(self, path):
logger.debug("get file %s" % path)
return self._call('get_file', path)
def get_file_content_and_rev(self, path):
file, metadata = self._call('get_file_and_metadata', path)
content = file.read()
file.close()
rev = metadata['rev']
return content, rev
def get_file_content(self, path):
logger.debug("return content %s" % path)
return self.get_file(path).read()
def put_file(self, path, content):
f = StringIO.StringIO(content)
return self._call('put_file', path, f, True)
def delete_file(self, path):
return self._call('file_delete', path)
def create_folder(self, path):
return self._call('file_create_folder', path)
def delta(self, cursor):
return self._call('delta', cursor)
def _call(self, ms, *args):
try:
m = getattr(self.client, ms)
return m(*args)
except ErrorResponse, e:
if e.__dict__['status'] == 401:
raise UnAuthorized(e.__dict__['body']['error'])
if e.__dict__['status'] == 404:
raise NotFound(e.__dict__['body']['error'])
raise e
except Exception, e:
raise e
def metadata(self, path):
return self._call('metadata', path)
def directory_zip(self, path, zf):
logger.info("download "+path)
try:
f_metadata = self.metadata(path)
if f_metadata['is_dir']:
for content in f_metadata['contents']:
logger.info("download "+content['path'])
if content['is_dir']:
self.directory_zip(content['path'], zf)
else:
# get the file
filepath = content['path']
try:
file = self.get_file(filepath)
filepath_new = re.sub(r"(.*?)/(.+?)(\/.*)", r"\2", filepath)
logger.debug("Add file '%s' as '%s' to zip" % (filepath, filepath_new))
zf.writestr(os.path.relpath(filepath_new, "/"), file.read())
file.close()
except ErrorResponse, e:
logger.error(e)
except ErrorResponse, e:
logger.error(e)
return zf
def message(request, level, message):
dt = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if level == logging.ERROR:
tag = "alert-danger"
elif level == logging.INFO:
tag = "alert-info"
elif level == logging.WARN:
tag = "alert-info"
messages.error(request, dt + " " + str(message)[:1000], extra_tags="%s safe" % tag)
def sign(data):
m = hashlib.md5()
m.update(data)
m.update(settings.SECRET_KEY)
return "%s-%s" % (data, m.hexdigest()[:10])
def | (code, name):
errors = []
class CustomMessage(object):
pass
reporter = modReporter._makeDefaultReporter()
try:
tree = compile(code, name, "exec", _ast.PyCF_ONLY_AST)
except SyntaxError:
value = sys.exc_info()[1]
msg = value.args[0]
(lineno, offset, text) = value.lineno, value.offset, value.text
# If there's an encoding problem with the file, the text is None.
if text is None:
# Avoid using msg, since for the only known case, it contains a
# bogus message that claims the encoding the file declared was
# unknown.
reporter.unexpectedError(name, 'problem decoding source')
else:
reporter.syntaxError(name, msg, lineno, offset, text)
loc = CustomMessage()
loc.lineno = lineno
loc.offset = offset
msg = Message(name, loc)
msg.message = "SyntaxError"
errors.append(msg)
except Exception, e:
loc = CustomMessage()
loc.lineno = lineno
loc.offset = offset
msg = Message(name, loc)
msg.message = "Problem decoding source"
errors.append(msg)
reporter.unexpectedError(name, 'problem decoding source')
logger.error("problem decoding source")
logger.exception()
r = []
try:
w = checker.Checker(tree, name)
r = w.messages
for message in w.messages:
logger.info(str(message))
except UnboundLocalError, e:
pass
return not (len(r) > 0 or len(errors) > 0), r, errors
def load_setting(name, fail=True):
v = None
default = getattr(defaults, name, None)
setting = getattr(settings, name, None)
if setting:
v = setting
logger.debug("Loaded setting from settings %s with value: %s" % (name, v))
elif default:
v = default
logger.debug("Loaded setting from defaults %s with value: %s" % (name, v))
if not v and fail:
logger.error("Could not load setting %s" % name)
raise ImproperlyConfigured(name)
return v
def load_var_to_file(var):
path = "/tmp/"
fq_file = os.path.join(path, var)
content = os.environ[var]
if not os.path.exists(path):
os.mkdir(path)
if not os.path.exists(fq_file):
f = open(fq_file, 'w')
f.write(content)
f.close()
if sys.platform == "darwin":
os.popen4("echo $(cat %s) > %s" % (fq_file, fq_file))
else:
os.popen4("echo -e $(cat %s) > %s" % (fq_file, fq_file))
return fq_file
def call_apy(base_name, apy_name):
logger.info("START call_apy")
try:
from core.models import Apy
apy = Apy.objects.get(name=apy_name, base__name=base_name)
logger.info("START call_apy %s" % apy.name)
url = reverse('exec', kwargs={'base': apy.base.name, 'id': apy.id})
request_factory = RequestFactory()
request = request_factory.get(url, data={'json': "", 'base': apy.base.name,
'id': apy.id})
# TODO: fails if user admin is not created, and must have a authprofile, knockknock
request.user = get_user_model().objects.get(username='admin')
request.META['HTTP_ACCEPT'] = "text/html"
from core.views import ExecView
view = ExecView()
response = view.get(request, base=apy.base.name, id=apy.id)
logger.info("method called for base %s, response_code: %s" % (apy.base.name, response.status_code))
logger.info("END call_apy %s" % apy.name)
except Exception, e:
logger.error("ERROR call_apy")
logger.exception(e)
def profileit(func):
"""
Taken from http://stackoverflow.com/questions/5375624/a-decorator-that-profiles-a-method-call-and-logs-the-profiling-result
"""
def wrapper(*args, **kwargs):
prof = cProfile.Profile()
# if not os.environ.has_key("PROFILE_DO_FUNC"):
# return func(*args, **kwargs)
retval = prof.runcall(func, *args, **kwargs)
# Note use of name from outer scope
# prof.dump_stats(name)
import pstats
s = pstats.Stats(prof).sort_stats('time')
s.print_stats(8)
return retval
return wrapper
def totimestamp(t):
logger.debug("totimestamp: %s" % t)
return (t-datetime.datetime(1970, 1, 1)).total_seconds()
def fromtimestamp(t):
logger.debug("fromtimestamp: %s" % t)
return datetime.datetime.fromtimestamp(t)
def create_jwt(user, secret):
"""The above token need to be saved in database, and a one-to-one relation should exist with the username/user_pk."""
logger.debug("Create JWT with secret %s" % secret)
# username = request.POST['username']
# password = request.POST['password'
expiry = datetime.datetime.now() + datetime.timedelta(seconds=30)
expiry_s = time.mktime(expiry.timetuple())
if user.is_authenticated():
| check_code | identifier_name |
utils.py | .append(base['path'].lstrip('/'))
if len(bases) == 0:
raise NoBasesFound()
return bases
def get_file(self, path):
logger.debug("get file %s" % path)
return self._call('get_file', path)
def get_file_content_and_rev(self, path):
file, metadata = self._call('get_file_and_metadata', path)
content = file.read()
file.close()
rev = metadata['rev']
return content, rev
def get_file_content(self, path):
logger.debug("return content %s" % path)
return self.get_file(path).read()
def put_file(self, path, content):
f = StringIO.StringIO(content)
return self._call('put_file', path, f, True)
def delete_file(self, path):
return self._call('file_delete', path)
def create_folder(self, path):
return self._call('file_create_folder', path)
def delta(self, cursor):
return self._call('delta', cursor)
def _call(self, ms, *args):
try:
m = getattr(self.client, ms)
return m(*args)
except ErrorResponse, e:
if e.__dict__['status'] == 401:
raise UnAuthorized(e.__dict__['body']['error'])
if e.__dict__['status'] == 404:
raise NotFound(e.__dict__['body']['error'])
raise e
except Exception, e:
raise e
def metadata(self, path):
return self._call('metadata', path)
def directory_zip(self, path, zf):
logger.info("download "+path)
try:
f_metadata = self.metadata(path)
if f_metadata['is_dir']:
for content in f_metadata['contents']:
logger.info("download "+content['path'])
if content['is_dir']:
self.directory_zip(content['path'], zf)
else:
# get the file
filepath = content['path']
try:
file = self.get_file(filepath)
filepath_new = re.sub(r"(.*?)/(.+?)(\/.*)", r"\2", filepath)
logger.debug("Add file '%s' as '%s' to zip" % (filepath, filepath_new))
zf.writestr(os.path.relpath(filepath_new, "/"), file.read())
file.close()
except ErrorResponse, e:
logger.error(e)
except ErrorResponse, e:
logger.error(e)
return zf
def message(request, level, message):
dt = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if level == logging.ERROR:
tag = "alert-danger"
elif level == logging.INFO:
tag = "alert-info"
elif level == logging.WARN:
tag = "alert-info"
messages.error(request, dt + " " + str(message)[:1000], extra_tags="%s safe" % tag)
def sign(data):
m = hashlib.md5()
m.update(data)
m.update(settings.SECRET_KEY)
return "%s-%s" % (data, m.hexdigest()[:10])
def check_code(code, name):
errors = []
class CustomMessage(object):
pass
reporter = modReporter._makeDefaultReporter()
try:
tree = compile(code, name, "exec", _ast.PyCF_ONLY_AST)
except SyntaxError:
value = sys.exc_info()[1]
msg = value.args[0]
(lineno, offset, text) = value.lineno, value.offset, value.text
# If there's an encoding problem with the file, the text is None.
if text is None:
# Avoid using msg, since for the only known case, it contains a
# bogus message that claims the encoding the file declared was
# unknown.
reporter.unexpectedError(name, 'problem decoding source')
else:
reporter.syntaxError(name, msg, lineno, offset, text)
loc = CustomMessage()
loc.lineno = lineno
loc.offset = offset
msg = Message(name, loc)
msg.message = "SyntaxError"
errors.append(msg)
except Exception, e:
loc = CustomMessage()
loc.lineno = lineno
loc.offset = offset
msg = Message(name, loc)
msg.message = "Problem decoding source"
errors.append(msg)
reporter.unexpectedError(name, 'problem decoding source')
logger.error("problem decoding source")
logger.exception()
r = []
try:
w = checker.Checker(tree, name)
r = w.messages
for message in w.messages:
logger.info(str(message))
except UnboundLocalError, e:
pass
return not (len(r) > 0 or len(errors) > 0), r, errors
def load_setting(name, fail=True):
v = None
default = getattr(defaults, name, None)
setting = getattr(settings, name, None)
if setting:
v = setting
logger.debug("Loaded setting from settings %s with value: %s" % (name, v))
elif default:
v = default
logger.debug("Loaded setting from defaults %s with value: %s" % (name, v))
if not v and fail:
logger.error("Could not load setting %s" % name)
raise ImproperlyConfigured(name)
return v
def load_var_to_file(var):
path = "/tmp/"
fq_file = os.path.join(path, var)
content = os.environ[var]
if not os.path.exists(path):
os.mkdir(path)
if not os.path.exists(fq_file):
f = open(fq_file, 'w')
f.write(content)
f.close()
if sys.platform == "darwin":
os.popen4("echo $(cat %s) > %s" % (fq_file, fq_file))
else:
os.popen4("echo -e $(cat %s) > %s" % (fq_file, fq_file))
return fq_file
def call_apy(base_name, apy_name):
logger.info("START call_apy")
try:
from core.models import Apy
apy = Apy.objects.get(name=apy_name, base__name=base_name)
logger.info("START call_apy %s" % apy.name)
url = reverse('exec', kwargs={'base': apy.base.name, 'id': apy.id})
request_factory = RequestFactory()
request = request_factory.get(url, data={'json': "", 'base': apy.base.name,
'id': apy.id})
# TODO: fails if user admin is not created, and must have a authprofile, knockknock
request.user = get_user_model().objects.get(username='admin')
request.META['HTTP_ACCEPT'] = "text/html"
from core.views import ExecView
view = ExecView()
response = view.get(request, base=apy.base.name, id=apy.id)
logger.info("method called for base %s, response_code: %s" % (apy.base.name, response.status_code))
logger.info("END call_apy %s" % apy.name)
except Exception, e:
logger.error("ERROR call_apy")
logger.exception(e)
def profileit(func):
"""
Taken from http://stackoverflow.com/questions/5375624/a-decorator-that-profiles-a-method-call-and-logs-the-profiling-result
"""
def wrapper(*args, **kwargs):
prof = cProfile.Profile()
# if not os.environ.has_key("PROFILE_DO_FUNC"):
# return func(*args, **kwargs)
retval = prof.runcall(func, *args, **kwargs)
# Note use of name from outer scope
# prof.dump_stats(name)
import pstats
s = pstats.Stats(prof).sort_stats('time')
s.print_stats(8)
return retval
return wrapper
def totimestamp(t):
logger.debug("totimestamp: %s" % t)
return (t-datetime.datetime(1970, 1, 1)).total_seconds()
def fromtimestamp(t):
logger.debug("fromtimestamp: %s" % t)
return datetime.datetime.fromtimestamp(t)
def create_jwt(user, secret):
"""The above token need to be saved in database, and a one-to-one relation should exist with the username/user_pk."""
logger.debug("Create JWT with secret %s" % secret)
# username = request.POST['username']
# password = request.POST['password'
expiry = datetime.datetime.now() + datetime.timedelta(seconds=30)
expiry_s = time.mktime(expiry.timetuple())
if user.is_authenticated():
internalid = user.authprofile.internalid
payload = {'username': user.username, 'expiry': expiry_s, 'type': "AuthenticatedUser", 'internalid': internalid, 'email': user.email}
token = jws.sign(payload, secret, algorithm='HS256')
else:
payload = {'expiry':expiry_s, 'type': "AnonymousUser", 'internalid': None, 'email': None}
token = jws.sign(payload, secret, algorithm='HS256')
logger.debug("Payload: %s" % payload)
# logger.info("Token: %s" % token) | return token
def read_jwt(payload, secret): | random_line_split |
|
utils.py | olvers import reverse
from django.test import RequestFactory
class UnAuthorized(Exception):
pass
class NotFound(Exception):
pass
class NoBasesFound(Exception):
pass
logger = logging.getLogger(__name__)
class Connection(object):
def __init__(self, dropbox_access_token):
self.client = dropbox.client.DropboxClient(dropbox_access_token)
super(Connection, self).__init__()
def info(self):
account_info = self.client.account_info()
email = account_info['email']
name = account_info['display_name']
return email, name
def listing(self):
bases = []
for base in self._call('metadata', '/')['contents']:
bases.append(base['path'].lstrip('/'))
if len(bases) == 0:
raise NoBasesFound()
return bases
def get_file(self, path):
logger.debug("get file %s" % path)
return self._call('get_file', path)
def get_file_content_and_rev(self, path):
file, metadata = self._call('get_file_and_metadata', path)
content = file.read()
file.close()
rev = metadata['rev']
return content, rev
def get_file_content(self, path):
logger.debug("return content %s" % path)
return self.get_file(path).read()
def put_file(self, path, content):
f = StringIO.StringIO(content)
return self._call('put_file', path, f, True)
def delete_file(self, path):
return self._call('file_delete', path)
def create_folder(self, path):
return self._call('file_create_folder', path)
def delta(self, cursor):
return self._call('delta', cursor)
def _call(self, ms, *args):
try:
m = getattr(self.client, ms)
return m(*args)
except ErrorResponse, e:
if e.__dict__['status'] == 401:
raise UnAuthorized(e.__dict__['body']['error'])
if e.__dict__['status'] == 404:
raise NotFound(e.__dict__['body']['error'])
raise e
except Exception, e:
raise e
def metadata(self, path):
return self._call('metadata', path)
def directory_zip(self, path, zf):
logger.info("download "+path)
try:
f_metadata = self.metadata(path)
if f_metadata['is_dir']:
for content in f_metadata['contents']:
logger.info("download "+content['path'])
if content['is_dir']:
self.directory_zip(content['path'], zf)
else:
# get the file
filepath = content['path']
try:
file = self.get_file(filepath)
filepath_new = re.sub(r"(.*?)/(.+?)(\/.*)", r"\2", filepath)
logger.debug("Add file '%s' as '%s' to zip" % (filepath, filepath_new))
zf.writestr(os.path.relpath(filepath_new, "/"), file.read())
file.close()
except ErrorResponse, e:
logger.error(e)
except ErrorResponse, e:
logger.error(e)
return zf
def message(request, level, message):
dt = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if level == logging.ERROR:
tag = "alert-danger"
elif level == logging.INFO:
tag = "alert-info"
elif level == logging.WARN:
tag = "alert-info"
messages.error(request, dt + " " + str(message)[:1000], extra_tags="%s safe" % tag)
def sign(data):
m = hashlib.md5()
m.update(data)
m.update(settings.SECRET_KEY)
return "%s-%s" % (data, m.hexdigest()[:10])
def check_code(code, name):
errors = []
class CustomMessage(object):
pass
reporter = modReporter._makeDefaultReporter()
try:
tree = compile(code, name, "exec", _ast.PyCF_ONLY_AST)
except SyntaxError:
value = sys.exc_info()[1]
msg = value.args[0]
(lineno, offset, text) = value.lineno, value.offset, value.text
# If there's an encoding problem with the file, the text is None.
if text is None:
# Avoid using msg, since for the only known case, it contains a
# bogus message that claims the encoding the file declared was
# unknown.
reporter.unexpectedError(name, 'problem decoding source')
else:
reporter.syntaxError(name, msg, lineno, offset, text)
loc = CustomMessage()
loc.lineno = lineno
loc.offset = offset
msg = Message(name, loc)
msg.message = "SyntaxError"
errors.append(msg)
except Exception, e:
loc = CustomMessage()
loc.lineno = lineno
loc.offset = offset
msg = Message(name, loc)
msg.message = "Problem decoding source"
errors.append(msg)
reporter.unexpectedError(name, 'problem decoding source')
logger.error("problem decoding source")
logger.exception()
r = []
try:
w = checker.Checker(tree, name)
r = w.messages
for message in w.messages:
logger.info(str(message))
except UnboundLocalError, e:
pass
return not (len(r) > 0 or len(errors) > 0), r, errors
def load_setting(name, fail=True):
v = None
default = getattr(defaults, name, None)
setting = getattr(settings, name, None)
if setting:
v = setting
logger.debug("Loaded setting from settings %s with value: %s" % (name, v))
elif default:
v = default
logger.debug("Loaded setting from defaults %s with value: %s" % (name, v))
if not v and fail:
logger.error("Could not load setting %s" % name)
raise ImproperlyConfigured(name)
return v
def load_var_to_file(var):
path = "/tmp/"
fq_file = os.path.join(path, var)
content = os.environ[var]
if not os.path.exists(path):
|
if not os.path.exists(fq_file):
f = open(fq_file, 'w')
f.write(content)
f.close()
if sys.platform == "darwin":
os.popen4("echo $(cat %s) > %s" % (fq_file, fq_file))
else:
os.popen4("echo -e $(cat %s) > %s" % (fq_file, fq_file))
return fq_file
def call_apy(base_name, apy_name):
logger.info("START call_apy")
try:
from core.models import Apy
apy = Apy.objects.get(name=apy_name, base__name=base_name)
logger.info("START call_apy %s" % apy.name)
url = reverse('exec', kwargs={'base': apy.base.name, 'id': apy.id})
request_factory = RequestFactory()
request = request_factory.get(url, data={'json': "", 'base': apy.base.name,
'id': apy.id})
# TODO: fails if user admin is not created, and must have a authprofile, knockknock
request.user = get_user_model().objects.get(username='admin')
request.META['HTTP_ACCEPT'] = "text/html"
from core.views import ExecView
view = ExecView()
response = view.get(request, base=apy.base.name, id=apy.id)
logger.info("method called for base %s, response_code: %s" % (apy.base.name, response.status_code))
logger.info("END call_apy %s" % apy.name)
except Exception, e:
logger.error("ERROR call_apy")
logger.exception(e)
def profileit(func):
"""
Taken from http://stackoverflow.com/questions/5375624/a-decorator-that-profiles-a-method-call-and-logs-the-profiling-result
"""
def wrapper(*args, **kwargs):
prof = cProfile.Profile()
# if not os.environ.has_key("PROFILE_DO_FUNC"):
# return func(*args, **kwargs)
retval = prof.runcall(func, *args, **kwargs)
# Note use of name from outer scope
# prof.dump_stats(name)
import pstats
s = pstats.Stats(prof).sort_stats('time')
s.print_stats(8)
return retval
return wrapper
def totimestamp(t):
logger.debug("totimestamp: %s" % t)
return (t-datetime.datetime(1970, 1, 1)).total_seconds()
def fromtimestamp(t):
logger.debug("fromtimestamp: %s" % t)
return datetime.datetime.fromtimestamp(t)
def create_jwt(user, secret):
"""The above token need to be saved in database, and a one-to-one relation should exist with the username/user_pk."""
logger.debug("Create JWT with secret %s" % secret)
# username = request.POST['username']
# password = request.POST['password'
expiry = datetime.datetime.now() + datetime.timedelta(seconds=30)
expiry_s = time.mktime(expiry.timetuple())
if user.is_authenticated():
| os.mkdir(path) | conditional_block |
courses.py | # Go back to course listing.
self.logger.debug('Returning to course list')
ic_action = {'ICAction': return_state}
self._request_page(ic_action)
self.logger.debug('Done department')
except Exception:
self.scraper.handle_error()
self.logger.debug('Done letter %s', letter)
def _navigate_and_parse_course(self, soup):
try:
# Course parse.
course_data = self._parse_course_data(soup)
save_course_data(course_data, self.scraper, self.location)
# Section(s) parse.
if not self._has_course_sections(soup):
self.logger.debug('No course sections. Skipping deep scrape')
else:
# Go to sections page.
ic_action = {'ICAction': 'DERIVED_SAA_CRS_SSR_PB_GO'}
soup = self._request_page(ic_action)
terms = soup.find(
'select', id='DERIVED_SAA_CRS_TERM_ALT').find_all('option')
self.logger.debug('%s terms available.', len(terms))
for term in terms:
try:
term_number = int(term['value'])
self.logger.debug('Starting term: %s (%s)',
term.text.strip(), term_number)
payload = {
'ICAction': 'DERIVED_SAA_CRS_SSR_PB_GO$3$',
'DERIVED_SAA_CRS_TERM_ALT': term_number,
}
soup = self._request_page(payload)
# NOTE: PeopleSoft maintains state of 'View All' for
# sections per every other new section you select.
# This means it only needs to be expanded ONCE.
if self._is_view_sections_closed(soup):
self.logger.debug(
"'View All' tab is minimized. "
"Requesting 'View All' for current term...")
payload.update(
{'ICAction': 'CLASS_TBL_VW5$hviewall$0'})
soup = self._request_page(payload)
self.logger.debug("'View All' request complete.")
sections = self._get_sections(soup)
self.logger.debug('Total sections: %s', len(sections))
for section in sections:
try:
section_name = soup.find(
'a', id=section).text.strip().split(' ')[0]
self.logger.debug(
'Section name: %s', section_name)
# Go to sections page.
payload.update({'ICAction': section})
section_soup = self._request_page(payload)
section_base_data, section_data = (
self._parse_course_section_data(
section_soup,
course_data,
section_name,
)
)
save_section_data(
section_base_data,
section_data,
self.scraper,
self.location
)
except Exception:
self.scraper.handle_error()
# Go back to sections.
ic_action = {
'ICAction': 'CLASS_SRCH_WRK2_SSR_PB_CLOSE'
}
self._request_page(ic_action)
self.logger.debug('Done term')
except Exception:
self.scraper.handle_error()
self.logger.debug('Done course')
except Exception:
self.scraper.handle_error()
ic_action = {'ICAction': 'DERIVED_SAA_CRS_RETURN_PB$163$'}
self._request_page(ic_action)
def _login(self):
# Emulate a SOLUS login via a Selenium webdriver. Mainly used for user
# authentication. Returns session cookies, which are retrieved and used
# for the remainder of this scraping session.
def | (func):
"""Execute Selenium task and retry upon failure."""
retries = 0
while retries < 3:
try:
return func()
except Exception as ex:
self.logger.error(
'Selenium error #%s: %s', retries + 1, ex,
exc_info=True)
retries += 1
continue
self.logger.info('Running webdriver for authentication...')
chrome_options = Options()
# Prevent images from loading.
prefs = {'profile.managed_default_content_settings.images': 2}
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
chrome_options.add_experimental_option('prefs', prefs)
driver = webdriver.Chrome(chrome_options=chrome_options)
# Timeout to for an element to be found.
driver.implicitly_wait(30)
driver.set_page_load_timeout(30)
driver.get('https://my.queensu.ca')
# Sometimes, Selenium errors out when searching for certain fields.
# Retry this routine until it succeeds.
run_selenium_routine(
lambda: driver.find_element_by_id('username').send_keys(
QUEENS_USERNAME
)
)
run_selenium_routine(
lambda: driver.find_element_by_id('password').send_keys(
QUEENS_PASSWORD
)
)
run_selenium_routine(
lambda: driver.find_element_by_class_name('form-button').click()
)
run_selenium_routine(
lambda: driver.find_element_by_class_name('solus-tab').click()
)
iframe = run_selenium_routine(
lambda: driver.find_element_by_id('ptifrmtgtframe')
)
driver.switch_to_frame(iframe)
run_selenium_routine(
lambda: driver.find_element_by_link_text('Search').click()
)
session_cookies = {}
for cookie in driver.get_cookies():
session_cookies[cookie['name']] = cookie['value']
driver.close()
self.logger.info('Webdriver authentication complete')
return session_cookies
def _request_page(self, params=None):
return self.scraper.http_request(
url=self.host,
params=params,
cookies=self.cookies
)
def _get_hidden_params(self, soup):
# Parses HTML for hidden values that represent SOLUS parameters. SOLUS
# uses dynamic parameters to represent user state given certain actions
# taken.
params = {}
hidden = soup.find('div', id=re.compile(r'win\ddivPSHIDDENFIELDS'))
if not hidden:
hidden = soup.find(
'field', id=re.compile(r'win\ddivPSHIDDENFIELDS'))
params.update({
x.get('name'): x.get('value') for x in hidden.find_all('input')
})
return params
def _get_departments(self, soup, letter):
# Click and expand a certain letter to see departments.
# E.g.: 'A' has AGHE, ANAT, 'B' has BIOL, BCMP, etc.
def update_params_and_make_request(soup, ic_action):
"""Update payload with hidden params and request page."""
payload = self._get_hidden_params(soup)
payload.update(ic_action)
soup = self._request_page(payload)
return soup
# Get all departments for a certain letter.
ic_action = {
'ICAction': 'DERIVED_SSS_BCC_SSR_ALPHANUM_{}'.format(letter)
}
soup = update_params_and_make_request(soup, ic_action)
# Expand all department courses.
ic_action = {'ICAction': 'DERIVED_SSS_BCC_SSS_EXPAND_ALL$97$'}
soup = update_params_and_make_request(soup, ic_action)
departments = soup.find_all(
'table', id=re.compile('ACE_DERIVED_SSS_BCC_GROUP_BOX_1')
)
return departments
def _get_sections(self, soup):
return [sec['id'] for sec in soup.find_all(
'a', id=re.compile(r'CLASS_SECTION\$'))]
def _has_multiple_course_offerings(self, soup):
return soup.find('table', id='CRSE_OFFERINGS$scroll$0')
def _has_course_sections(self, soup):
return soup.find('input', id='DERIVED_SAA_CRS_SSR_PB_GO')
def _is_view_sections_closed(self, soup):
view_all_tab = soup.find('a', id='CLASS_TBL_VW5$hviewall$0')
return view_all_tab and 'View All' in view_all_tab
def _get_academic_levels(self, soup):
return [url for url in soup.find_all('a', id=re.compile(r'CAREER\$'))]
def _parse_department_data(self, department):
regex_title = re.compile(r'DERIVED_SSS_BCC_GROUP_BOX_1\$147\$\$span\$')
dept_str = department.find('span', id=regex_title).text.strip()
self.logger.debug('Department: %s', dept_str)
# Some departments have more than one hypen, such as
# "MEI - Entrepreneur & Innov - Masters".
# Find first index of '-' to split code from name.
name_idx = dept_str.find('-')
code = dept_str[:name_idx].strip()
name = dept_str[name_idx + 2:].strip()
data = {
'id': code,
'code': code,
'name': name,
}
return data
def _parse_course_data(self, soup):
# All HTML IDs used via regular expressions.
regex_title = re.compile('DERIVED_CRSECAT_DESCR200')
regex_campus = re.compile('CAMPUS_TBL_DESCR')
regex_desc = re.compile('SSR_CRSE_OFF_VW_DESCRLONG')
regex_units = re.compile('DERIVED_CRSECAT_UNITS_RANGE')
regex_basis = re.compile(' | run_selenium_routine | identifier_name |
courses.py | )',
term.text.strip(), term_number)
payload = {
'ICAction': 'DERIVED_SAA_CRS_SSR_PB_GO$3$',
'DERIVED_SAA_CRS_TERM_ALT': term_number,
}
soup = self._request_page(payload)
# NOTE: PeopleSoft maintains state of 'View All' for
# sections per every other new section you select.
# This means it only needs to be expanded ONCE.
if self._is_view_sections_closed(soup):
self.logger.debug(
"'View All' tab is minimized. "
"Requesting 'View All' for current term...")
payload.update(
{'ICAction': 'CLASS_TBL_VW5$hviewall$0'})
soup = self._request_page(payload)
self.logger.debug("'View All' request complete.")
sections = self._get_sections(soup)
self.logger.debug('Total sections: %s', len(sections))
for section in sections:
try:
section_name = soup.find(
'a', id=section).text.strip().split(' ')[0]
self.logger.debug(
'Section name: %s', section_name)
# Go to sections page.
payload.update({'ICAction': section})
section_soup = self._request_page(payload)
section_base_data, section_data = (
self._parse_course_section_data(
section_soup,
course_data,
section_name,
)
)
save_section_data(
section_base_data,
section_data,
self.scraper,
self.location
)
except Exception:
self.scraper.handle_error()
# Go back to sections.
ic_action = {
'ICAction': 'CLASS_SRCH_WRK2_SSR_PB_CLOSE'
}
self._request_page(ic_action)
self.logger.debug('Done term')
except Exception:
self.scraper.handle_error()
self.logger.debug('Done course')
except Exception:
self.scraper.handle_error()
ic_action = {'ICAction': 'DERIVED_SAA_CRS_RETURN_PB$163$'}
self._request_page(ic_action)
def _login(self):
# Emulate a SOLUS login via a Selenium webdriver. Mainly used for user
# authentication. Returns session cookies, which are retrieved and used
# for the remainder of this scraping session.
def run_selenium_routine(func):
"""Execute Selenium task and retry upon failure."""
retries = 0
while retries < 3:
try:
return func()
except Exception as ex:
self.logger.error(
'Selenium error #%s: %s', retries + 1, ex,
exc_info=True)
retries += 1
continue
self.logger.info('Running webdriver for authentication...')
chrome_options = Options()
# Prevent images from loading.
prefs = {'profile.managed_default_content_settings.images': 2}
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
chrome_options.add_experimental_option('prefs', prefs)
driver = webdriver.Chrome(chrome_options=chrome_options)
# Timeout to for an element to be found.
driver.implicitly_wait(30)
driver.set_page_load_timeout(30)
driver.get('https://my.queensu.ca')
# Sometimes, Selenium errors out when searching for certain fields.
# Retry this routine until it succeeds.
run_selenium_routine(
lambda: driver.find_element_by_id('username').send_keys(
QUEENS_USERNAME
)
)
run_selenium_routine(
lambda: driver.find_element_by_id('password').send_keys(
QUEENS_PASSWORD
)
)
run_selenium_routine(
lambda: driver.find_element_by_class_name('form-button').click()
)
run_selenium_routine(
lambda: driver.find_element_by_class_name('solus-tab').click()
)
iframe = run_selenium_routine(
lambda: driver.find_element_by_id('ptifrmtgtframe')
)
driver.switch_to_frame(iframe)
run_selenium_routine(
lambda: driver.find_element_by_link_text('Search').click()
)
session_cookies = {}
for cookie in driver.get_cookies():
session_cookies[cookie['name']] = cookie['value']
driver.close()
self.logger.info('Webdriver authentication complete')
return session_cookies
def _request_page(self, params=None):
return self.scraper.http_request(
url=self.host,
params=params,
cookies=self.cookies
)
def _get_hidden_params(self, soup):
# Parses HTML for hidden values that represent SOLUS parameters. SOLUS
# uses dynamic parameters to represent user state given certain actions
# taken.
params = {}
hidden = soup.find('div', id=re.compile(r'win\ddivPSHIDDENFIELDS'))
if not hidden:
hidden = soup.find(
'field', id=re.compile(r'win\ddivPSHIDDENFIELDS'))
params.update({
x.get('name'): x.get('value') for x in hidden.find_all('input')
})
return params
def _get_departments(self, soup, letter):
# Click and expand a certain letter to see departments.
# E.g.: 'A' has AGHE, ANAT, 'B' has BIOL, BCMP, etc.
def update_params_and_make_request(soup, ic_action):
"""Update payload with hidden params and request page."""
payload = self._get_hidden_params(soup)
payload.update(ic_action)
soup = self._request_page(payload)
return soup
# Get all departments for a certain letter.
ic_action = {
'ICAction': 'DERIVED_SSS_BCC_SSR_ALPHANUM_{}'.format(letter)
}
soup = update_params_and_make_request(soup, ic_action)
# Expand all department courses.
ic_action = {'ICAction': 'DERIVED_SSS_BCC_SSS_EXPAND_ALL$97$'}
soup = update_params_and_make_request(soup, ic_action)
departments = soup.find_all(
'table', id=re.compile('ACE_DERIVED_SSS_BCC_GROUP_BOX_1')
)
return departments
def _get_sections(self, soup):
return [sec['id'] for sec in soup.find_all(
'a', id=re.compile(r'CLASS_SECTION\$'))]
def _has_multiple_course_offerings(self, soup):
return soup.find('table', id='CRSE_OFFERINGS$scroll$0')
def _has_course_sections(self, soup):
return soup.find('input', id='DERIVED_SAA_CRS_SSR_PB_GO')
def _is_view_sections_closed(self, soup):
view_all_tab = soup.find('a', id='CLASS_TBL_VW5$hviewall$0')
return view_all_tab and 'View All' in view_all_tab
def _get_academic_levels(self, soup):
return [url for url in soup.find_all('a', id=re.compile(r'CAREER\$'))]
def _parse_department_data(self, department):
regex_title = re.compile(r'DERIVED_SSS_BCC_GROUP_BOX_1\$147\$\$span\$')
dept_str = department.find('span', id=regex_title).text.strip()
self.logger.debug('Department: %s', dept_str)
# Some departments have more than one hypen, such as
# "MEI - Entrepreneur & Innov - Masters".
# Find first index of '-' to split code from name.
name_idx = dept_str.find('-')
code = dept_str[:name_idx].strip()
name = dept_str[name_idx + 2:].strip()
data = {
'id': code,
'code': code,
'name': name,
}
return data
def _parse_course_data(self, soup):
# All HTML IDs used via regular expressions.
regex_title = re.compile('DERIVED_CRSECAT_DESCR200')
regex_campus = re.compile('CAMPUS_TBL_DESCR')
regex_desc = re.compile('SSR_CRSE_OFF_VW_DESCRLONG')
regex_units = re.compile('DERIVED_CRSECAT_UNITS_RANGE')
regex_basis = re.compile('SSR_CRSE_OFF_VW_GRADING_BASIS')
regex_ac_lvl = re.compile('SSR_CRSE_OFF_VW_ACAD_CAREER')
regex_ac_grp = re.compile('ACAD_GROUP_TBL_DESCR')
regex_ac_org = re.compile('ACAD_ORG_TBL_DESCR')
regex_crse_cmps = re.compile('ACE_SSR_DUMMY_RECVW')
regex_enroll_tbl = re.compile('ACE_DERIVED_CRSECAT_SSR_GROUP2')
regex_enroll_div = re.compile('win0div')
regex_ceab = re.compile('ACE_DERIVED_CLSRCH')
def filter_course_name(soup):
| """Preprocess and reformat course name."""
course_title = soup.find('span', id=regex_title).text.strip()
name_idx = course_title.find('-')
dept_raw, course_code_raw = course_title[:name_idx - 1].split(' ')
course_name = course_title[name_idx + 1:].strip()
dept = dept_raw.encode('ascii', 'ignore').decode().strip()
course_code = course_code_raw.encode(
'ascii', 'ignore').decode().strip()
return dept, course_code, course_name | identifier_body |
|
courses.py | is minimized. "
"Requesting 'View All' for current term...")
payload.update(
{'ICAction': 'CLASS_TBL_VW5$hviewall$0'})
soup = self._request_page(payload)
self.logger.debug("'View All' request complete.")
sections = self._get_sections(soup)
self.logger.debug('Total sections: %s', len(sections))
for section in sections:
try:
section_name = soup.find(
'a', id=section).text.strip().split(' ')[0]
self.logger.debug(
'Section name: %s', section_name)
# Go to sections page.
payload.update({'ICAction': section})
section_soup = self._request_page(payload)
section_base_data, section_data = (
self._parse_course_section_data(
section_soup,
course_data,
section_name,
)
)
save_section_data(
section_base_data,
section_data,
self.scraper,
self.location
)
except Exception:
self.scraper.handle_error()
# Go back to sections.
ic_action = {
'ICAction': 'CLASS_SRCH_WRK2_SSR_PB_CLOSE'
}
self._request_page(ic_action)
self.logger.debug('Done term')
except Exception:
self.scraper.handle_error()
self.logger.debug('Done course')
except Exception:
self.scraper.handle_error()
ic_action = {'ICAction': 'DERIVED_SAA_CRS_RETURN_PB$163$'}
self._request_page(ic_action)
def _login(self):
# Emulate a SOLUS login via a Selenium webdriver. Mainly used for user
# authentication. Returns session cookies, which are retrieved and used
# for the remainder of this scraping session.
def run_selenium_routine(func):
"""Execute Selenium task and retry upon failure."""
retries = 0
while retries < 3:
try:
return func()
except Exception as ex:
self.logger.error(
'Selenium error #%s: %s', retries + 1, ex,
exc_info=True)
retries += 1
continue
self.logger.info('Running webdriver for authentication...')
chrome_options = Options()
# Prevent images from loading.
prefs = {'profile.managed_default_content_settings.images': 2}
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
chrome_options.add_experimental_option('prefs', prefs)
driver = webdriver.Chrome(chrome_options=chrome_options)
# Timeout to for an element to be found.
driver.implicitly_wait(30)
driver.set_page_load_timeout(30)
driver.get('https://my.queensu.ca')
# Sometimes, Selenium errors out when searching for certain fields.
# Retry this routine until it succeeds.
run_selenium_routine(
lambda: driver.find_element_by_id('username').send_keys(
QUEENS_USERNAME
)
)
run_selenium_routine(
lambda: driver.find_element_by_id('password').send_keys(
QUEENS_PASSWORD
)
)
run_selenium_routine(
lambda: driver.find_element_by_class_name('form-button').click()
)
run_selenium_routine(
lambda: driver.find_element_by_class_name('solus-tab').click()
)
iframe = run_selenium_routine(
lambda: driver.find_element_by_id('ptifrmtgtframe')
)
driver.switch_to_frame(iframe)
run_selenium_routine(
lambda: driver.find_element_by_link_text('Search').click()
)
session_cookies = {}
for cookie in driver.get_cookies():
session_cookies[cookie['name']] = cookie['value']
driver.close()
self.logger.info('Webdriver authentication complete')
return session_cookies
def _request_page(self, params=None):
return self.scraper.http_request(
url=self.host,
params=params,
cookies=self.cookies
)
def _get_hidden_params(self, soup):
# Parses HTML for hidden values that represent SOLUS parameters. SOLUS
# uses dynamic parameters to represent user state given certain actions
# taken.
params = {}
hidden = soup.find('div', id=re.compile(r'win\ddivPSHIDDENFIELDS'))
if not hidden:
hidden = soup.find(
'field', id=re.compile(r'win\ddivPSHIDDENFIELDS'))
params.update({
x.get('name'): x.get('value') for x in hidden.find_all('input')
})
return params
def _get_departments(self, soup, letter):
# Click and expand a certain letter to see departments.
# E.g.: 'A' has AGHE, ANAT, 'B' has BIOL, BCMP, etc.
def update_params_and_make_request(soup, ic_action):
"""Update payload with hidden params and request page."""
payload = self._get_hidden_params(soup)
payload.update(ic_action)
soup = self._request_page(payload)
return soup
# Get all departments for a certain letter.
ic_action = {
'ICAction': 'DERIVED_SSS_BCC_SSR_ALPHANUM_{}'.format(letter)
}
soup = update_params_and_make_request(soup, ic_action)
# Expand all department courses.
ic_action = {'ICAction': 'DERIVED_SSS_BCC_SSS_EXPAND_ALL$97$'}
soup = update_params_and_make_request(soup, ic_action)
departments = soup.find_all(
'table', id=re.compile('ACE_DERIVED_SSS_BCC_GROUP_BOX_1')
)
return departments
def _get_sections(self, soup):
return [sec['id'] for sec in soup.find_all(
'a', id=re.compile(r'CLASS_SECTION\$'))]
def _has_multiple_course_offerings(self, soup):
return soup.find('table', id='CRSE_OFFERINGS$scroll$0')
def _has_course_sections(self, soup):
return soup.find('input', id='DERIVED_SAA_CRS_SSR_PB_GO')
def _is_view_sections_closed(self, soup):
view_all_tab = soup.find('a', id='CLASS_TBL_VW5$hviewall$0')
return view_all_tab and 'View All' in view_all_tab
def _get_academic_levels(self, soup):
return [url for url in soup.find_all('a', id=re.compile(r'CAREER\$'))]
def _parse_department_data(self, department):
regex_title = re.compile(r'DERIVED_SSS_BCC_GROUP_BOX_1\$147\$\$span\$')
dept_str = department.find('span', id=regex_title).text.strip()
self.logger.debug('Department: %s', dept_str)
# Some departments have more than one hypen, such as
# "MEI - Entrepreneur & Innov - Masters".
# Find first index of '-' to split code from name.
name_idx = dept_str.find('-')
code = dept_str[:name_idx].strip()
name = dept_str[name_idx + 2:].strip()
data = {
'id': code,
'code': code,
'name': name,
}
return data
def _parse_course_data(self, soup):
# All HTML IDs used via regular expressions.
regex_title = re.compile('DERIVED_CRSECAT_DESCR200')
regex_campus = re.compile('CAMPUS_TBL_DESCR')
regex_desc = re.compile('SSR_CRSE_OFF_VW_DESCRLONG')
regex_units = re.compile('DERIVED_CRSECAT_UNITS_RANGE')
regex_basis = re.compile('SSR_CRSE_OFF_VW_GRADING_BASIS')
regex_ac_lvl = re.compile('SSR_CRSE_OFF_VW_ACAD_CAREER')
regex_ac_grp = re.compile('ACAD_GROUP_TBL_DESCR')
regex_ac_org = re.compile('ACAD_ORG_TBL_DESCR')
regex_crse_cmps = re.compile('ACE_SSR_DUMMY_RECVW')
regex_enroll_tbl = re.compile('ACE_DERIVED_CRSECAT_SSR_GROUP2')
regex_enroll_div = re.compile('win0div')
regex_ceab = re.compile('ACE_DERIVED_CLSRCH')
def filter_course_name(soup):
"""Preprocess and reformat course name."""
course_title = soup.find('span', id=regex_title).text.strip()
name_idx = course_title.find('-')
dept_raw, course_code_raw = course_title[:name_idx - 1].split(' ')
course_name = course_title[name_idx + 1:].strip()
dept = dept_raw.encode('ascii', 'ignore').decode().strip()
course_code = course_code_raw.encode(
'ascii', 'ignore').decode().strip()
return dept, course_code, course_name
def filter_description(soup):
"""Filter description for the course description text only."""
# TODO: Filter different text sections from description, such as
# 'NOTE', 'LEARNING HOURS', etc.
descr_raw = soup.find('span', id=regex_desc)
if not descr_raw:
return ''
# If <br/> tags exist, there will be additional information other
# than the description. Filter for description only. | if descr_raw.find_all('br'):
return descr_raw.find_all('br')[0].previous_sibling
| random_line_split |
|
courses.py | # Go back to course listing.
self.logger.debug('Returning to course list')
ic_action = {'ICAction': return_state}
self._request_page(ic_action)
self.logger.debug('Done department')
except Exception:
self.scraper.handle_error()
self.logger.debug('Done letter %s', letter)
def _navigate_and_parse_course(self, soup):
try:
# Course parse.
course_data = self._parse_course_data(soup)
save_course_data(course_data, self.scraper, self.location)
# Section(s) parse.
if not self._has_course_sections(soup):
self.logger.debug('No course sections. Skipping deep scrape')
else:
# Go to sections page.
ic_action = {'ICAction': 'DERIVED_SAA_CRS_SSR_PB_GO'}
soup = self._request_page(ic_action)
terms = soup.find(
'select', id='DERIVED_SAA_CRS_TERM_ALT').find_all('option')
self.logger.debug('%s terms available.', len(terms))
for term in terms:
| payload.update(
{'ICAction': 'CLASS_TBL_VW5$hviewall$0'})
soup = self._request_page(payload)
self.logger.debug("'View All' request complete.")
sections = self._get_sections(soup)
self.logger.debug('Total sections: %s', len(sections))
for section in sections:
try:
section_name = soup.find(
'a', id=section).text.strip().split(' ')[0]
self.logger.debug(
'Section name: %s', section_name)
# Go to sections page.
payload.update({'ICAction': section})
section_soup = self._request_page(payload)
section_base_data, section_data = (
self._parse_course_section_data(
section_soup,
course_data,
section_name,
)
)
save_section_data(
section_base_data,
section_data,
self.scraper,
self.location
)
except Exception:
self.scraper.handle_error()
# Go back to sections.
ic_action = {
'ICAction': 'CLASS_SRCH_WRK2_SSR_PB_CLOSE'
}
self._request_page(ic_action)
self.logger.debug('Done term')
except Exception:
self.scraper.handle_error()
self.logger.debug('Done course')
except Exception:
self.scraper.handle_error()
ic_action = {'ICAction': 'DERIVED_SAA_CRS_RETURN_PB$163$'}
self._request_page(ic_action)
def _login(self):
# Emulate a SOLUS login via a Selenium webdriver. Mainly used for user
# authentication. Returns session cookies, which are retrieved and used
# for the remainder of this scraping session.
def run_selenium_routine(func):
"""Execute Selenium task and retry upon failure."""
retries = 0
while retries < 3:
try:
return func()
except Exception as ex:
self.logger.error(
'Selenium error #%s: %s', retries + 1, ex,
exc_info=True)
retries += 1
continue
self.logger.info('Running webdriver for authentication...')
chrome_options = Options()
# Prevent images from loading.
prefs = {'profile.managed_default_content_settings.images': 2}
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
chrome_options.add_experimental_option('prefs', prefs)
driver = webdriver.Chrome(chrome_options=chrome_options)
# Timeout to for an element to be found.
driver.implicitly_wait(30)
driver.set_page_load_timeout(30)
driver.get('https://my.queensu.ca')
# Sometimes, Selenium errors out when searching for certain fields.
# Retry this routine until it succeeds.
run_selenium_routine(
lambda: driver.find_element_by_id('username').send_keys(
QUEENS_USERNAME
)
)
run_selenium_routine(
lambda: driver.find_element_by_id('password').send_keys(
QUEENS_PASSWORD
)
)
run_selenium_routine(
lambda: driver.find_element_by_class_name('form-button').click()
)
run_selenium_routine(
lambda: driver.find_element_by_class_name('solus-tab').click()
)
iframe = run_selenium_routine(
lambda: driver.find_element_by_id('ptifrmtgtframe')
)
driver.switch_to_frame(iframe)
run_selenium_routine(
lambda: driver.find_element_by_link_text('Search').click()
)
session_cookies = {}
for cookie in driver.get_cookies():
session_cookies[cookie['name']] = cookie['value']
driver.close()
self.logger.info('Webdriver authentication complete')
return session_cookies
def _request_page(self, params=None):
return self.scraper.http_request(
url=self.host,
params=params,
cookies=self.cookies
)
def _get_hidden_params(self, soup):
# Parses HTML for hidden values that represent SOLUS parameters. SOLUS
# uses dynamic parameters to represent user state given certain actions
# taken.
params = {}
hidden = soup.find('div', id=re.compile(r'win\ddivPSHIDDENFIELDS'))
if not hidden:
hidden = soup.find(
'field', id=re.compile(r'win\ddivPSHIDDENFIELDS'))
params.update({
x.get('name'): x.get('value') for x in hidden.find_all('input')
})
return params
def _get_departments(self, soup, letter):
# Click and expand a certain letter to see departments.
# E.g.: 'A' has AGHE, ANAT, 'B' has BIOL, BCMP, etc.
def update_params_and_make_request(soup, ic_action):
"""Update payload with hidden params and request page."""
payload = self._get_hidden_params(soup)
payload.update(ic_action)
soup = self._request_page(payload)
return soup
# Get all departments for a certain letter.
ic_action = {
'ICAction': 'DERIVED_SSS_BCC_SSR_ALPHANUM_{}'.format(letter)
}
soup = update_params_and_make_request(soup, ic_action)
# Expand all department courses.
ic_action = {'ICAction': 'DERIVED_SSS_BCC_SSS_EXPAND_ALL$97$'}
soup = update_params_and_make_request(soup, ic_action)
departments = soup.find_all(
'table', id=re.compile('ACE_DERIVED_SSS_BCC_GROUP_BOX_1')
)
return departments
def _get_sections(self, soup):
return [sec['id'] for sec in soup.find_all(
'a', id=re.compile(r'CLASS_SECTION\$'))]
def _has_multiple_course_offerings(self, soup):
return soup.find('table', id='CRSE_OFFERINGS$scroll$0')
def _has_course_sections(self, soup):
return soup.find('input', id='DERIVED_SAA_CRS_SSR_PB_GO')
def _is_view_sections_closed(self, soup):
view_all_tab = soup.find('a', id='CLASS_TBL_VW5$hviewall$0')
return view_all_tab and 'View All' in view_all_tab
def _get_academic_levels(self, soup):
return [url for url in soup.find_all('a', id=re.compile(r'CAREER\$'))]
def _parse_department_data(self, department):
regex_title = re.compile(r'DERIVED_SSS_BCC_GROUP_BOX_1\$147\$\$span\$')
dept_str = department.find('span', id=regex_title).text.strip()
self.logger.debug('Department: %s', dept_str)
# Some departments have more than one hypen, such as
# "MEI - Entrepreneur & Innov - Masters".
# Find first index of '-' to split code from name.
name_idx = dept_str.find('-')
code = dept_str[:name_idx].strip()
name = dept_str[name_idx + 2:].strip()
data = {
'id': code,
'code': code,
'name': name,
}
return data
def _parse_course_data(self, soup):
# All HTML IDs used via regular expressions.
regex_title = re.compile('DERIVED_CRSECAT_DESCR200')
regex_campus = re.compile('CAMPUS_TBL_DESCR')
regex_desc = re.compile('SSR_CRSE_OFF_VW_DESCRLONG')
regex_units = re.compile('DERIVED_CRSECAT_UNITS_RANGE')
regex_basis = re.compile('SSR | try:
term_number = int(term['value'])
self.logger.debug('Starting term: %s (%s)',
term.text.strip(), term_number)
payload = {
'ICAction': 'DERIVED_SAA_CRS_SSR_PB_GO$3$',
'DERIVED_SAA_CRS_TERM_ALT': term_number,
}
soup = self._request_page(payload)
# NOTE: PeopleSoft maintains state of 'View All' for
# sections per every other new section you select.
# This means it only needs to be expanded ONCE.
if self._is_view_sections_closed(soup):
self.logger.debug(
"'View All' tab is minimized. "
"Requesting 'View All' for current term...")
| conditional_block |
leetcodeFunc.go | temp := nums[i] + jw
if jw > 0 {
jw--
}
if temp >= 10 {
temp = temp - 10
jw++
}
r = append(r, temp)
}
}
} else {
for i := 0; i < lg1; i++ {
if i < lg {
temp := nums[i] + nums1[i] + jw
if jw > 0 {
jw--
}
if temp >= 10 {
temp = temp - 10
jw++
}
r = append(r, temp)
} else {
temp := nums1[i] + jw
if jw > 0 {
jw--
}
if temp >= 10 {
temp = temp - 10
jw++
}
r = append(r, temp)
}
}
}
if jw > 0 {
r = append(r, jw)
jw--
}
return SetRListNode(r)
}
type ListNode struct {
Val int
Next *ListNode
}
func SetRListNode(nums []int) (n *ListNode) {
n = &ListNode{
Val: nums[len(nums)-1],
Next: nil,
}
for i := len(nums) - 2; i >= 0; i-- {
insertEndNode(n, nums[i])
}
return
}
func SetListNode(nums []int) (n *ListNode) {
n = &ListNode{
Val: nums[0],
Next: nil,
}
for i := 1; i < len(nums); i++ {
insertEndNode(n, nums[i])
}
return
}
func insertEndNode(l *ListNode, val int) {
if l.Next == nil {
l.Next = &ListNode{
Val: val,
Next: nil,
}
return
}
insertEndNode(l.Next, val)
}
func changeToNums(n *ListNode) (nums []int) {
if n.Next == nil {
nums = append(nums, n.Val)
return
}
for {
nums = append(nums, findIntFromListNode(n))
if n.Next == nil {
nums = append(nums, n.Val)
break
}
}
return
}
func findIntFromListNode(n *ListNode) int {
if n.Next == nil {
return n.Val
}
if n.Next.Next == nil {
a := n.Next.Val
n.Next = nil
return a
}
return findIntFromListNode(n.Next)
}
/**
剑指 Offer 61. 扑克牌中的顺子
*/
func isStraight(nums []int) bool {
l := len(nums)
count := 0
if l != 5 {
return false
}
sort.Ints(nums)
for i := 0; i < l-1; i++ {
if nums[i] == 0 {
count++
} else {
if nums[i+1]-nums[i] > 1 {
count = count - (nums[i+1] - nums[i] - 1)
}
if nums[i+1]-nums[i] == 0 {
return false
}
}
}
if count < 0 {
return false
}
return true
}
/**
1343. 大小为 K 且平均值大于等于阈值的子数组数目
*/
func numOfSubarrays(arr []int, k int, threshold int) int {
count := 0
sum := 0
num := k * threshold
for i := 0; i < len(arr); i++ {
if i < k {
sum += arr[i]
} else {
if i == k {
if sum >= num {
count++
}
}
sum = sum - arr[i-k] + arr[i]
if sum >= num {
count++
}
}
}
if k == len(arr) {
if sum >= num {
count++
}
}
return count
}
/**
1330. 翻转子数组得到最大的数组值
使用数轴表示
a-----b
c------d
差值为 |c-b|*2 所以需要 b 最小 c 最大 才能获得最大的值
*/
func maxValueAfterReverse(nums []int) int {
sum := 0
length := len(nums)
a := -100000 //区间小值
b := 100000 //区间大值
for i := 0; i < length-1; i++ {
sum += IntAbs(nums[i] - nums[i+1])
a = IntMax(a, IntMin(nums[i], nums[i+1]))
b = IntMin(b, IntMax(nums[i], nums[i+1]))
}
ans := sum
ans = IntMax(ans, 2*(a-b)+sum)
for i := 0; i < length-1; i++ {
if i > 0 {
minus := IntAbs(nums[0]-nums[i+1]) - IntAbs(nums[i]-nums[i+1])
ans = IntMax(ans, sum+minus)
minus = IntAbs(nums[i-1]-nums[length-1]) - IntAbs(nums[i-1]-nums[i])
ans = IntMax(ans, sum+minus)
}
//for j:=i+1;j<length-1;j++ {
// minus:= IntAbs(nums[i]-nums[j])+IntAbs(nums[i+1]-nums[j+1])-(IntAbs(nums[i]-nums[i+1])+IntAbs(nums[j]-nums[j+1]))
// ans = IntMax(ans,sum+minus)
//}
}
return ans
}
func IntAbs(a int) int {
if a < 0 {
return -a
}
return a
}
func IntMin(a, b int) int {
if a > b {
return b
}
return a
}
func IntMax(a, b int) int {
if a < b {
return b
}
return a
}
/**
1258 查找双位数
*/
func findNumbers(nums []int) int {
count := 0
for _, value := range nums {
valueStr := strconv.Itoa(value)
if len(valueStr)%2 == 0 {
count++
}
}
return count
}
/*
998. 最大二叉树 II
*/
func insertIntoMaxTree(root *TreeNode, val int) *TreeNode {
//right 为空返回 新增又树
if root == nil {
return &TreeNode{
Val: val,
Left: nil,
Right: nil,
}
}
//节点大于树,原树入左侧 ,节点做根
if root.Val < val {
return &TreeNode{
Val: val,
Left: root,
Right: nil,
}
}
root.Right = insertIntoMaxTree(root.Right, val)
return root
}
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
/**
654. 最大二叉树 递归解法
*/
func constructMaximumBinaryTree(nums []int) *TreeNode {
return construct(nums, 0, len(nums))
}
func construct(nums []int, l, r int) *TreeNode {
if l == r {
return nil
}
max_index := max(nums, l, r)
root := &TreeNode{
Val: nums[max_index],
Left: construct(nums, l, max_index),
Right: construct(nums, max_index+1, r),
}
return root
}
func max(nums []int, l, r int) int {
max_index := l
for i := l; i < r; i++ {
if nums[i] > nums[max_index] {
max_index = i
}
}
return max_index
}
/*
1300. 转变数组后最接近目标值的数组和
*/
func findBestValue(arr []int, target int) int {
//默认排序
sort.Ints(arr)
length := len(arr)
presum := 0
endLen := length
for index, value := range arr {
k := endLen - index
// 条件 未改变的和 + 当前 value*剩余的项 与 target 的比较
d := presum + value*k - target
if d >= 0 {
//fmt.Println(d,value,endLen,index)
// c小于等于0.5那么取小 大于0.5 去取上值
c := value - (d+k/2)/k
return c
}
presum += value
}
return arr[length-1]
}
/**
1052. 爱生气的书店老板 | mp := nums[i] + nums1[i] + jw
if jw > 0 {
jw--
}
if temp >= 10 {
temp = temp - 10
jw++
}
r = append(r, temp)
} else {
| conditional_block |
|
leetcodeFunc.go | if sum >= num {
count++
}
}
}
if k == len(arr) {
if sum >= num {
count++
}
}
return count
}
/**
1330. 翻转子数组得到最大的数组值
使用数轴表示
a-----b
c------d
差值为 |c-b|*2 所以需要 b 最小 c 最大 才能获得最大的值
*/
func maxValueAfterReverse(nums []int) int {
sum := 0
length := len(nums)
a := -100000 //区间小值
b := 100000 //区间大值
for i := 0; i < length-1; i++ {
sum += IntAbs(nums[i] - nums[i+1])
a = IntMax(a, IntMin(nums[i], nums[i+1]))
b = IntMin(b, IntMax(nums[i], nums[i+1]))
}
ans := sum
ans = IntMax(ans, 2*(a-b)+sum)
for i := 0; i < length-1; i++ {
if i > 0 {
minus := IntAbs(nums[0]-nums[i+1]) - IntAbs(nums[i]-nums[i+1])
ans = IntMax(ans, sum+minus)
minus = IntAbs(nums[i-1]-nums[length-1]) - IntAbs(nums[i-1]-nums[i])
ans = IntMax(ans, sum+minus)
}
//for j:=i+1;j<length-1;j++ {
// minus:= IntAbs(nums[i]-nums[j])+IntAbs(nums[i+1]-nums[j+1])-(IntAbs(nums[i]-nums[i+1])+IntAbs(nums[j]-nums[j+1]))
// ans = IntMax(ans,sum+minus)
//}
}
return ans
}
func IntAbs(a int) int {
if a < 0 {
return -a
}
return a
}
func IntMin(a, b int) int {
if a > b {
return b
}
return a
}
func IntMax(a, b int) int {
if a < b {
return b
}
return a
}
/**
1258 查找双位数
*/
func findNumbers(nums []int) int {
count := 0
for _, value := range nums {
valueStr := strconv.Itoa(value)
if len(valueStr)%2 == 0 {
count++
}
}
return count
| 最大二叉树 II
*/
func insertIntoMaxTree(root *TreeNode, val int) *TreeNode {
//right 为空返回 新增又树
if root == nil {
return &TreeNode{
Val: val,
Left: nil,
Right: nil,
}
}
//节点大于树,原树入左侧 ,节点做根
if root.Val < val {
return &TreeNode{
Val: val,
Left: root,
Right: nil,
}
}
root.Right = insertIntoMaxTree(root.Right, val)
return root
}
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
/**
654. 最大二叉树 递归解法
*/
func constructMaximumBinaryTree(nums []int) *TreeNode {
return construct(nums, 0, len(nums))
}
func construct(nums []int, l, r int) *TreeNode {
if l == r {
return nil
}
max_index := max(nums, l, r)
root := &TreeNode{
Val: nums[max_index],
Left: construct(nums, l, max_index),
Right: construct(nums, max_index+1, r),
}
return root
}
func max(nums []int, l, r int) int {
max_index := l
for i := l; i < r; i++ {
if nums[i] > nums[max_index] {
max_index = i
}
}
return max_index
}
/*
1300. 转变数组后最接近目标值的数组和
*/
func findBestValue(arr []int, target int) int {
//默认排序
sort.Ints(arr)
length := len(arr)
presum := 0
endLen := length
for index, value := range arr {
k := endLen - index
// 条件 未改变的和 + 当前 value*剩余的项 与 target 的比较
d := presum + value*k - target
if d >= 0 {
//fmt.Println(d,value,endLen,index)
// c小于等于0.5那么取小 大于0.5 去取上值
c := value - (d+k/2)/k
return c
}
presum += value
}
return arr[length-1]
}
/**
1052. 爱生气的书店老板
*/
func maxSatisfied(customers []int, grumpy []int, X int) int {
count := 0
//默认的值
for i := 0; i < len(customers); i++ {
if grumpy[i] == 0 {
count += customers[i]
//设为0
customers[i] = 0
}
}
max := 0
temp := 0
for i := 0; i < len(customers); i++ {
//for j:=0 ;j<X;j++{
// if grumpy[i+j]==1{
// temp+=customers[i+j]
// }
//}
if i < X {
max += customers[i]
if temp < max {
temp = max
}
} else {
temp = temp + customers[i] - customers[i-X]
if temp > max {
max = temp
}
}
}
return count + max
}
/**
747. 至少是其他数字两倍的最大数
*/
func dominantIndex(nums []int) int {
if len(nums) <= 1 {
return 0
}
big := nums[0]
secdbig := nums[1]
if nums[1] >= nums[0] {
big = nums[0]
secdbig = nums[1]
}
count := 0
for i := 1; i < len(nums); i++ {
if big < nums[i] {
secdbig = big
big = nums[i]
count = i
}
if big > nums[i] && nums[i] > secdbig {
secdbig = nums[i]
}
}
if secdbig*2 <= big {
return count
}
return -1
}
//221. 最大正方形
func maximalSquare(matrix [][]byte) int {
side := 0
for i := 0; i < len(matrix); i++ {
for j := 0; j < len(matrix[i]); j++ {
matrix[i][j] = byte(int(matrix[i][j]) % 48)
if i < 1 || j < 1 {
if matrix[i][j] == 1 && side < 1 {
side = 1
}
continue
} else {
if matrix[i][j] == 1 {
temp := min(min(int(matrix[i-1][j]), int(matrix[i][j-1])), int(matrix[i-1][j-1])) + 1
matrix[i][j] = byte(temp)
if temp > side {
side = temp
}
}
}
}
}
return side * side
}
//1201. 丑数
//请你帮忙设计一个程序,用来找出第 n 个丑数
//丑数是可以被 a 或 b 或 c 整除的 正整数。
// x/a +x/b+x/c-x/ab-x/ac-x/bc+x/abc
func nthUglyNumber3(n int, a int, b int, c int) int {
ab := int64(lcm(a, b))
ac := int64(lcm(a, c))
bc := int64(lcm(b, c))
abc := int64(lcm(lcm(a, b), c))
l := int64(min(a, min(b, c)))
r := int64(2 * 10e9)
//while (l < r)
for {
if l >= r {
break
}
// 从中间开始查找,每次的偏移量是了l/2
m := l + (r-l)/2
count := m/int64(a) + m/int64(b) + m/int64(c) - m/ab - m/ac - m/bc + m/abc
//计算的数量如果小于 n 则 l= m+1
if count < int64(n) {
l = m + 1
} else {
//如果大于 n ,则继续二分
r = m
}
}
return int(l)
}
func min(i int, j | }
/*
998. | identifier_name |
leetcodeFunc.go | if sum >= num {
count++
}
}
}
if k == len(arr) {
if sum >= num {
count++
}
}
return count
}
/**
1330. 翻转子数组得到最大的数组值
使用数轴表示
a-----b
c------d
差值为 |c-b|*2 所以需要 b 最小 c 最大 才能获得最大的值
*/
func maxValueAfterReverse(nums []int) int {
sum := 0
length := len(nums)
a := -100000 //区间小值
b := 100000 //区间大值
for i := 0; i < length-1; i++ {
sum += IntAbs(nums[i] - nums[i+1])
a = IntMax(a, IntMin(nums[i], nums[i+1]))
b = IntMin(b, IntMax(nums[i], nums[i+1]))
}
ans := sum
ans = IntMax(ans, 2*(a-b)+sum)
for i := 0; i < length-1; i++ {
if i > 0 {
minus := IntAbs(nums[0]-nums[i+1]) - IntAbs(nums[i]-nums[i+1])
ans = IntMax(ans, sum+minus)
minus = IntAbs(nums[i-1]-nums[length-1]) - IntAbs(nums[i-1]-nums[i])
ans = IntMax(ans, sum+minus)
}
//for j:=i+1;j<length-1;j++ {
// minus:= IntAbs(nums[i]-nums[j])+IntAbs(nums[i+1]-nums[j+1])-(IntAbs(nums[i]-nums[i+1])+IntAbs(nums[j]-nums[j+1]))
// ans = IntMax(ans,sum+minus)
//}
}
return ans
}
func IntAbs(a int) int {
if a < 0 {
return -a
}
return a
}
func IntMin(a, b int) int {
if a > b {
return b
}
return a
}
func IntMax(a, b int) int {
if a < b {
return b
}
return a
}
/**
1258 查找双位数
*/
func findNumbers(nums []int) int {
count := 0
for _, value := range nums {
valueStr := strconv.Itoa(value)
if len(valueStr)%2 == 0 {
count++
}
}
return count
}
/*
998. 最大二叉树 II
*/
func insertIntoMaxTree(root *TreeNode, val int) *TreeNode {
//right 为空返回 新增又树
if root == nil {
return &TreeNode{
Val: val,
Left: nil,
Right: nil,
}
}
//节点大于树,原树入左侧 ,节点做根
if root.Val < val {
return &TreeNode{
Val: val,
Left: root,
Right: nil,
}
}
root.Right = insertIntoMaxTree(root.Right, val)
return root
}
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
/**
654. 最大二叉树 递归解法
*/
func constructMaximumBinaryTree(nums []int) *TreeNode {
return construct(nums, 0, len(nums))
}
func construct(nums []int, l, r int) *TreeNode {
if l == r {
return nil
}
max_index := max(nums, l, r)
root := &TreeNode{
Val: nums[max_index],
Left: construct(nums, l, max_index),
Right: construct(nums, max_index+1, r),
}
return root
}
func max(nums []int, l, r int) int {
max_index := l
for i := l; i < r; i++ {
if nums[i] > nums[max_index] {
max_index = i
}
}
return max_index
}
/*
1300. 转变数组后最接近目标值的数组和
*/
func findBestValue(arr []int, target int) int {
//默认排序
sort.Ints(arr)
length := len(arr)
| arget 的比较
d := presum + value*k - target
if d >= 0 {
//fmt.Println(d,value,endLen,index)
// c小于等于0.5那么取小 大于0.5 去取上值
c := value - (d+k/2)/k
return c
}
presum += value
}
return arr[length-1]
}
/**
1052. 爱生气的书店老板
*/
func maxSatisfied(customers []int, grumpy []int, X int) int {
count := 0
//默认的值
for i := 0; i < len(customers); i++ {
if grumpy[i] == 0 {
count += customers[i]
//设为0
customers[i] = 0
}
}
max := 0
temp := 0
for i := 0; i < len(customers); i++ {
//for j:=0 ;j<X;j++{
// if grumpy[i+j]==1{
// temp+=customers[i+j]
// }
//}
if i < X {
max += customers[i]
if temp < max {
temp = max
}
} else {
temp = temp + customers[i] - customers[i-X]
if temp > max {
max = temp
}
}
}
return count + max
}
/**
747. 至少是其他数字两倍的最大数
*/
func dominantIndex(nums []int) int {
if len(nums) <= 1 {
return 0
}
big := nums[0]
secdbig := nums[1]
if nums[1] >= nums[0] {
big = nums[0]
secdbig = nums[1]
}
count := 0
for i := 1; i < len(nums); i++ {
if big < nums[i] {
secdbig = big
big = nums[i]
count = i
}
if big > nums[i] && nums[i] > secdbig {
secdbig = nums[i]
}
}
if secdbig*2 <= big {
return count
}
return -1
}
//221. 最大正方形
func maximalSquare(matrix [][]byte) int {
side := 0
for i := 0; i < len(matrix); i++ {
for j := 0; j < len(matrix[i]); j++ {
matrix[i][j] = byte(int(matrix[i][j]) % 48)
if i < 1 || j < 1 {
if matrix[i][j] == 1 && side < 1 {
side = 1
}
continue
} else {
if matrix[i][j] == 1 {
temp := min(min(int(matrix[i-1][j]), int(matrix[i][j-1])), int(matrix[i-1][j-1])) + 1
matrix[i][j] = byte(temp)
if temp > side {
side = temp
}
}
}
}
}
return side * side
}
//1201. 丑数
//请你帮忙设计一个程序,用来找出第 n 个丑数
//丑数是可以被 a 或 b 或 c 整除的 正整数。
// x/a +x/b+x/c-x/ab-x/ac-x/bc+x/abc
func nthUglyNumber3(n int, a int, b int, c int) int {
ab := int64(lcm(a, b))
ac := int64(lcm(a, c))
bc := int64(lcm(b, c))
abc := int64(lcm(lcm(a, b), c))
l := int64(min(a, min(b, c)))
r := int64(2 * 10e9)
//while (l < r)
for {
if l >= r {
break
}
// 从中间开始查找,每次的偏移量是了l/2
m := l + (r-l)/2
count := m/int64(a) + m/int64(b) + m/int64(c) - m/ab - m/ac - m/bc + m/abc
//计算的数量如果小于 n 则 l= m+1
if count < int64(n) {
l = m + 1
} else {
//如果大于 n ,则继续二分
r = m
}
}
return int(l)
}
func min(i int, | presum := 0
endLen := length
for index, value := range arr {
k := endLen - index
// 条件 未改变的和 + 当前 value*剩余的项 与 t | identifier_body |
leetcodeFunc.go | if sum >= num {
count++
}
}
}
if k == len(arr) {
if sum >= num {
count++
} | 1330. 翻转子数组得到最大的数组值
使用数轴表示
a-----b
c------d
差值为 |c-b|*2 所以需要 b 最小 c 最大 才能获得最大的值
*/
func maxValueAfterReverse(nums []int) int {
sum := 0
length := len(nums)
a := -100000 //区间小值
b := 100000 //区间大值
for i := 0; i < length-1; i++ {
sum += IntAbs(nums[i] - nums[i+1])
a = IntMax(a, IntMin(nums[i], nums[i+1]))
b = IntMin(b, IntMax(nums[i], nums[i+1]))
}
ans := sum
ans = IntMax(ans, 2*(a-b)+sum)
for i := 0; i < length-1; i++ {
if i > 0 {
minus := IntAbs(nums[0]-nums[i+1]) - IntAbs(nums[i]-nums[i+1])
ans = IntMax(ans, sum+minus)
minus = IntAbs(nums[i-1]-nums[length-1]) - IntAbs(nums[i-1]-nums[i])
ans = IntMax(ans, sum+minus)
}
//for j:=i+1;j<length-1;j++ {
// minus:= IntAbs(nums[i]-nums[j])+IntAbs(nums[i+1]-nums[j+1])-(IntAbs(nums[i]-nums[i+1])+IntAbs(nums[j]-nums[j+1]))
// ans = IntMax(ans,sum+minus)
//}
}
return ans
}
func IntAbs(a int) int {
if a < 0 {
return -a
}
return a
}
func IntMin(a, b int) int {
if a > b {
return b
}
return a
}
func IntMax(a, b int) int {
if a < b {
return b
}
return a
}
/**
1258 查找双位数
*/
func findNumbers(nums []int) int {
count := 0
for _, value := range nums {
valueStr := strconv.Itoa(value)
if len(valueStr)%2 == 0 {
count++
}
}
return count
}
/*
998. 最大二叉树 II
*/
func insertIntoMaxTree(root *TreeNode, val int) *TreeNode {
//right 为空返回 新增又树
if root == nil {
return &TreeNode{
Val: val,
Left: nil,
Right: nil,
}
}
//节点大于树,原树入左侧 ,节点做根
if root.Val < val {
return &TreeNode{
Val: val,
Left: root,
Right: nil,
}
}
root.Right = insertIntoMaxTree(root.Right, val)
return root
}
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
/**
654. 最大二叉树 递归解法
*/
func constructMaximumBinaryTree(nums []int) *TreeNode {
return construct(nums, 0, len(nums))
}
func construct(nums []int, l, r int) *TreeNode {
if l == r {
return nil
}
max_index := max(nums, l, r)
root := &TreeNode{
Val: nums[max_index],
Left: construct(nums, l, max_index),
Right: construct(nums, max_index+1, r),
}
return root
}
func max(nums []int, l, r int) int {
max_index := l
for i := l; i < r; i++ {
if nums[i] > nums[max_index] {
max_index = i
}
}
return max_index
}
/*
1300. 转变数组后最接近目标值的数组和
*/
func findBestValue(arr []int, target int) int {
//默认排序
sort.Ints(arr)
length := len(arr)
presum := 0
endLen := length
for index, value := range arr {
k := endLen - index
// 条件 未改变的和 + 当前 value*剩余的项 与 target 的比较
d := presum + value*k - target
if d >= 0 {
//fmt.Println(d,value,endLen,index)
// c小于等于0.5那么取小 大于0.5 去取上值
c := value - (d+k/2)/k
return c
}
presum += value
}
return arr[length-1]
}
/**
1052. 爱生气的书店老板
*/
func maxSatisfied(customers []int, grumpy []int, X int) int {
count := 0
//默认的值
for i := 0; i < len(customers); i++ {
if grumpy[i] == 0 {
count += customers[i]
//设为0
customers[i] = 0
}
}
max := 0
temp := 0
for i := 0; i < len(customers); i++ {
//for j:=0 ;j<X;j++{
// if grumpy[i+j]==1{
// temp+=customers[i+j]
// }
//}
if i < X {
max += customers[i]
if temp < max {
temp = max
}
} else {
temp = temp + customers[i] - customers[i-X]
if temp > max {
max = temp
}
}
}
return count + max
}
/**
747. 至少是其他数字两倍的最大数
*/
func dominantIndex(nums []int) int {
if len(nums) <= 1 {
return 0
}
big := nums[0]
secdbig := nums[1]
if nums[1] >= nums[0] {
big = nums[0]
secdbig = nums[1]
}
count := 0
for i := 1; i < len(nums); i++ {
if big < nums[i] {
secdbig = big
big = nums[i]
count = i
}
if big > nums[i] && nums[i] > secdbig {
secdbig = nums[i]
}
}
if secdbig*2 <= big {
return count
}
return -1
}
//221. 最大正方形
func maximalSquare(matrix [][]byte) int {
side := 0
for i := 0; i < len(matrix); i++ {
for j := 0; j < len(matrix[i]); j++ {
matrix[i][j] = byte(int(matrix[i][j]) % 48)
if i < 1 || j < 1 {
if matrix[i][j] == 1 && side < 1 {
side = 1
}
continue
} else {
if matrix[i][j] == 1 {
temp := min(min(int(matrix[i-1][j]), int(matrix[i][j-1])), int(matrix[i-1][j-1])) + 1
matrix[i][j] = byte(temp)
if temp > side {
side = temp
}
}
}
}
}
return side * side
}
//1201. 丑数
//请你帮忙设计一个程序,用来找出第 n 个丑数
//丑数是可以被 a 或 b 或 c 整除的 正整数。
// x/a +x/b+x/c-x/ab-x/ac-x/bc+x/abc
func nthUglyNumber3(n int, a int, b int, c int) int {
ab := int64(lcm(a, b))
ac := int64(lcm(a, c))
bc := int64(lcm(b, c))
abc := int64(lcm(lcm(a, b), c))
l := int64(min(a, min(b, c)))
r := int64(2 * 10e9)
//while (l < r)
for {
if l >= r {
break
}
// 从中间开始查找,每次的偏移量是了l/2
m := l + (r-l)/2
count := m/int64(a) + m/int64(b) + m/int64(c) - m/ab - m/ac - m/bc + m/abc
//计算的数量如果小于 n 则 l= m+1
if count < int64(n) {
l = m + 1
} else {
//如果大于 n ,则继续二分
r = m
}
}
return int(l)
}
func min(i int, j | }
return count
}
/** | random_line_split |
types.go | specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
componentbaseconfigv1alpha1 "k8s.io/component-base/config/v1alpha1"
)
// VolumeConfiguration contains *all* enumerated flags meant to configure all volume
// plugins. From this config, the controller-manager binary will create many instances of
// volume.VolumeConfig, each containing only the configuration needed for that plugin which
// are then passed to the appropriate plugin. The ControllerManager binary is the only part
// of the code which knows what plugins are supported and which flags correspond to each plugin.
type VolumeConfiguration struct {
// enableHostPathProvisioning enables HostPath PV provisioning when running without a
// cloud provider. This allows testing and development of provisioning features. HostPath
// provisioning is not supported in any way, won't work in a multi-node cluster, and
// should not be used for anything other than testing or development.
EnableHostPathProvisioning *bool
// enableDynamicProvisioning enables the provisioning of volumes when running within an environment
// that supports dynamic provisioning. Defaults to true.
EnableDynamicProvisioning *bool
// volumePluginDir is the full path of the directory in which the flex
// volume plugin should search for additional third party volume plugins
FlexVolumePluginDir string
}
// GroupResource describes an group resource.
type GroupResource struct {
// group is the group portion of the GroupResource.
Group string
// resource is the resource portion of the GroupResource.
Resource string
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// KubeControllerManagerConfiguration contains elements describing kube-controller manager.
type KubeControllerManagerConfiguration struct {
metav1.TypeMeta `json:",inline"`
// Generic holds configuration for a generic controller-manager
Generic GenericControllerManagerConfiguration
// KubeCloudSharedConfiguration holds configuration for shared related features
// both in cloud controller manager and kube-controller manager.
KubeCloudShared KubeCloudSharedConfiguration
// CSRSigningControllerConfiguration holds configuration for
// CSRSigningController related features.
CSRSigningController CSRSigningControllerConfiguration
// DeprecatedControllerConfiguration holds configuration for some deprecated
// features.
DeprecatedController DeprecatedControllerConfiguration
// EndpointControllerConfiguration holds configuration for EndpointController
// related features. | EndpointController EndpointControllerConfiguration
// GarbageCollectorControllerConfiguration holds configuration for
// GarbageCollectorController related features.
GarbageCollectorController GarbageCollectorControllerConfiguration
// NamespaceControllerConfiguration holds configuration for NamespaceController
// related features.
NamespaceController NamespaceControllerConfiguration
// PodGCControllerConfiguration holds configuration for PodGCController
// related features.
PodGCController PodGCControllerConfiguration
// ReplicaSetControllerConfiguration holds configuration for ReplicaSet related features.
ReplicaSetController ReplicaSetControllerConfiguration
// SAControllerConfiguration holds configuration for ServiceAccountController
// related features.
SAController SAControllerConfiguration
// TenantControllerConfiguration holds configuration for TenantController
// related features.
TenantController TenantControllerConfiguration `json:"tenantController,omitempty"`
}
// GenericControllerManagerConfiguration holds configuration for a generic controller-manager.
type GenericControllerManagerConfiguration struct {
// port is the port that the controller-manager's http service runs on.
Port int32
// address is the IP address to serve on (set to 0.0.0.0 for all interfaces).
Address string
// minResyncPeriod is the resync period in reflectors; will be random between
// minResyncPeriod and 2*minResyncPeriod.
MinResyncPeriod metav1.Duration
// ClientConnection specifies the kubeconfig file and client connection
// settings for the proxy server to use when communicating with the apiserver.
ClientConnection componentbaseconfigv1alpha1.ClientConnectionConfiguration
// How long to wait between starting controller managers
ControllerStartInterval metav1.Duration
// leaderElection defines the configuration of leader election client.
LeaderElection componentbaseconfigv1alpha1.LeaderElectionConfiguration
// Controllers is the list of controllers to enable or disable
// '*' means "all enabled by default controllers"
// 'foo' means "enable 'foo'"
// '-foo' means "disable 'foo'"
// first item for a particular name wins
Controllers []string
// DebuggingConfiguration holds configuration for Debugging related features.
Debugging componentbaseconfigv1alpha1.DebuggingConfiguration
}
// KubeCloudSharedConfiguration contains elements shared by both kube-controller manager
// and cloud-controller manager, but not genericconfig.
type KubeCloudSharedConfiguration struct {
// CloudProviderConfiguration holds configuration for CloudProvider related features.
CloudProvider CloudProviderConfiguration
// externalCloudVolumePlugin specifies the plugin to use when cloudProvider is "external".
// It is currently used by the in repo cloud providers to handle node and volume control in the KCM.
ExternalCloudVolumePlugin string
// useServiceAccountCredentials indicates whether controllers should be run with
// individual service account credentials.
UseServiceAccountCredentials bool
// run with untagged cloud instances
AllowUntaggedCloud bool
// routeReconciliationPeriod is the period for reconciling routes created for Nodes by cloud provider..
RouteReconciliationPeriod metav1.Duration
// nodeMonitorPeriod is the period for syncing NodeStatus in NodeController.
NodeMonitorPeriod metav1.Duration
// clusterName is the instance prefix for the cluster.
ClusterName string
// clusterCIDR is CIDR Range for Pods in cluster.
ClusterCIDR string
// AllocateNodeCIDRs enables CIDRs for Pods to be allocated and, if
// ConfigureCloudRoutes is true, to be set on the cloud provider.
AllocateNodeCIDRs bool
// CIDRAllocatorType determines what kind of pod CIDR allocator will be used.
CIDRAllocatorType string
// configureCloudRoutes enables CIDRs allocated with allocateNodeCIDRs
// to be configured on the cloud provider.
ConfigureCloudRoutes *bool
// nodeSyncPeriod is the period for syncing nodes from cloudprovider. Longer
// periods will result in fewer calls to cloud provider, but may delay addition
// of new nodes to cluster.
NodeSyncPeriod metav1.Duration
}
// CloudProviderConfiguration contains basically elements about cloud provider.
type CloudProviderConfiguration struct {
// Name is the provider for cloud services.
Name string
// cloudConfigFile is the path to the cloud provider configuration file.
CloudConfigFile string
}
// CSRSigningControllerConfiguration contains elements describing CSRSigningController.
type CSRSigningControllerConfiguration struct {
// clusterSigningCertFile is the filename containing a PEM-encoded
// X509 CA certificate used to issue cluster-scoped certificates
ClusterSigningCertFile string
// clusterSigningCertFile is the filename containing a PEM-encoded
// RSA or ECDSA private key used to issue cluster-scoped certificates
ClusterSigningKeyFile string
// clusterSigningDuration is the length of duration signed certificates
// will be given.
ClusterSigningDuration metav1.Duration
}
// DeprecatedControllerConfiguration contains elements be deprecated.
type DeprecatedControllerConfiguration struct {
// DEPRECATED: deletingPodsQps is the number of nodes per second on which pods are deleted in
// case of node failure.
DeletingPodsQPS float32
// DEPRECATED: deletingPodsBurst is the number of nodes on which pods are bursty deleted in
// case of node failure. For more details look into RateLimiter.
DeletingPodsBurst int32
// registerRetryCount is the number of retries for initial node registration.
// Retry interval equals node-sync-period.
RegisterRetryCount int32
}
// EndpointControllerConfiguration contains elements describing EndpointController.
type EndpointControllerConfiguration struct {
// concurrentEndpointSyncs is the number of endpoint syncing operations
// that will be done concurrently. Larger number = faster endpoint updating,
// but more CPU (and network) load.
ConcurrentEndpointSyncs int32
}
// GarbageCollectorControllerConfiguration contains elements describing GarbageCollectorController.
type GarbageCollectorControllerConfiguration struct {
// enables the generic garbage collector. MUST be synced with the
// corresponding flag of the kube-apiserver. WARNING: the generic garbage
// collector is an alpha feature.
EnableGarbageCollector *bool
// concurrentGCSyncs is the number of garbage collector workers that are
// allowed to sync concurrently.
ConcurrentGCSyncs int32
// gcIgnoredResources is the list of GroupResources that garbage collection should ignore.
GCIgnoredResources []GroupResource
}
// NamespaceControllerConfiguration contains elements describing NamespaceController.
type NamespaceControllerConfiguration struct {
// namespaceSyncPeriod is the period for syncing namespace life-cycle
// updates.
NamespaceSyncPeriod metav1.Duration
// concurrentNamespaceSyncs is the number of namespace objects that are
// allowed to sync concurrently.
ConcurrentNamespaceSyncs int32
}
// PodGCControllerConfiguration contains elements describing PodGCController.
type PodGCControllerConfiguration struct {
// terminatedPodGCThreshold is the number of terminated pods that can exist
// before the terminated pod garbage collector starts deleting terminated pods.
// If <= 0, the terminated pod garbage collector is disabled.
TerminatedPodGCThreshold int32
}
// ReplicaSetControllerConfiguration contains elements describing ReplicaSetController.
type ReplicaSetControllerConfiguration struct {
// concurrentRSSyncs is the number of replica sets that are allowed to | random_line_split |
|
html_tools.py | (out_dir, chapter_dirs, static_host):
build_dir = os.path.dirname(out_dir)
if static_host and not static_host.endswith('/'):
static_host += '/'
for path in _walk(build_dir):
rewrite_file_links(path, out_dir, chapter_dirs, static_host)
def rewrite_file_links(path, root, chapter_dirs, static_host):
content = _read_file(path)
link_elements = [
('a', 'href'),
]
other_elements = [
('img', 'src'),
('script', 'src'),
('iframe', 'src'),
('link', 'href'),
('video', 'poster'),
('source', 'src'),
]
if path.endswith(".yaml"):
# YAML files are handled separately because rewriting links with
# a regexp could add YAML syntax errors to the file if quotes are not
# escaped properly. Escaping is now taken care of by the YAML module.
yaml_data_dict = yaml.safe_load(content)
recursive_rewrite_links(
yaml_data_dict,
path,
root,
link_elements,
other_elements,
static_host,
chapter_dirs,
'data-aplus-chapter ',
'data-aplus-path="/static/{course}" ',
yaml_data_dict.get('_rst_srcpath|i18n', yaml_data_dict.get('_rst_srcpath')),
)
# _rst_srcpath is an internal value stored in the YAML file.
# It is the path of the RST source file that contains the exercise.
# The path is needed for fixing relative URLs, usually links pointing
# to other chapters and exercises. It may have multiple values for
# different languages in multilingual courses or only one string value
# in monolingual courses.
content = yaml.safe_dump(yaml_data_dict, default_flow_style=False,
allow_unicode=True)
else:
content = rewrite_links(
content,
path,
root,
link_elements,
other_elements,
static_host,
chapter_dirs,
'data-aplus-chapter ',
'data-aplus-path="/static/{course}" ',
)
_write_file(path, content)
def rewrite_links(content, path, root, link_elements, other_elements,
static_host, chapter_dirs, chapter_append, yaml_append,
rst_src_path=None):
q1 = re.compile(r'^(\w+:|//|#)') # Starts with "https:", "//" or "#".
q2 = re.compile(r'^(' + '|'.join(chapter_dirs) + r')(/|\\)') # Starts with a module directory name.
for tag, attr in link_elements:
content = rewrite_elements(content, tag, attr, path, root,
q1, static_host, q2, chapter_append,
yaml_append, rst_src_path)
for tag, attr in other_elements:
content = rewrite_elements(content, tag, attr, path, root,
q1, static_host, None, None, yaml_append,
rst_src_path)
return content
def rewrite_elements(content, tag, attr, path, root, q1, static_host, q2, append,
yaml_append, rst_src_path=None):
dir_name = os.path.dirname(path)
out = ""
p = re.compile(
r'<' + tag + r'\s+[^<>]*'
r'(?P<attr>' + attr + r')=(?P<slash>\\?)"(?P<val>[^"?#]*)'
)
i = 0
for m in p.finditer(content):
val = m.group('val')
if val and not q1.search(val):
# Add content up to attribute.
j = m.start('attr')
out += content[i:j]
i = j
full = ''
if path.endswith('.yaml'):
# content in yaml file
# rst_src_path: The RST source file path is needed for fixing
# relative URLs in the exercise description.
# It should have been saved in the YAML data by the exercise directive.
if rst_src_path:
full = os.path.realpath(os.path.join(
root,
os.path.dirname(rst_src_path),
val
))
else:
# We don't know which directory the relative path starts from,
# so just assume the build root. It is likely incorrect.
full = os.path.realpath(os.path.join(root, val))
else:
# content in html file
# dir_name points to either _build/html or _build/html/<round>
full = os.path.realpath(os.path.join(dir_name, val))
if full.startswith(root): # NB: root ends with "_build/html"
val_path_from_root = full[len(root)+1:].replace('\\', '/')
# Replace Windows path separator backslash to the forward slash.
# Links to chapters.
if q2 and q2.search(val_path_from_root):
if not out.endswith(append):
# Directory depth (starting from _build/html) of the source file
# that contains the link val.
if path.endswith('.yaml'):
# yaml files are always directly under _build/yaml,
# but A+ can fix the URL when we prepend "../" once.
# Most courses place chapters and exercises directly
# under the module directory, in which case one
# "../" is logical.
dir_depth = 1
else:
dir_depth = path[len(root)+1:].count(os.sep)
val_path_from_root = ('../' * dir_depth) + val_path_from_root
j = m.start('val')
out += append + content[i:j] + val_path_from_root
i = m.end('val')
# Other links.
elif static_host:
j = m.start('val')
out += content[i:j] + static_host + val_path_from_root
i = m.end('val')
elif path.endswith('.yaml') and yaml_append and not out.endswith(yaml_append):
# Sphinx sets URLs to local files as relative URLs that work in
# the local filesystem (e.g., ../_images/myimage.png).
# The A+ frontend converts the URLs correctly when they are in
# the chapter content. (The URL must be converted to an absolute
# URL that refers to the MOOC grader course static files.)
# However, the conversion does not work for URLs in exercise
# descriptions because unlike for chapters, the service URL of
# an exercise does not refer to the course static files.
# Therefore, we add the attribute data-aplus-path="/static/{course}"
# that A+ frontend uses to set the correct URL path.
# Unfortunately, we must hardcode the MOOC grader static URL
# (/static) here.
out += yaml_append
out += content[i:]
return out
def _walk(html_dir):
files = []
for root, dirnames, filenames in os.walk(html_dir):
for filename in fnmatch.filter(filenames, '*.html'):
files.append(os.path.join(root, filename))
for filename in fnmatch.filter(filenames, '*.yaml'):
files.append(os.path.join(root, filename))
return files
def _read_file(file_path):
with io.open(file_path, 'r', encoding='utf-8') as f:
return f.read()
def _write_file(file_path, content):
|
def recursive_rewrite_links(data_dict, path, root, link_elements, other_elements,
static_host, chapter_dirs, chapter_append, yaml_append, rst_src_path,
lang_key=False, lang=None):
'''Rewrite links in the string values inside the data_dict.'''
# YAML file may have a list or a dictionary in the topmost level.
# lang_key and lang are used to pick the correct language from rst_src_path.
if isinstance(data_dict, dict):
for key, val in data_dict.items():
if lang_key:
# data_dict is the value for a key that had the ending "|i18n",
# so now key is a language code.
lang = key
if isinstance(val, dict) or isinstance(val, list):
recursive_rewrite_links(val, path, root, link_elements,
other_elements, static_host, chapter_dirs, chapter_append,
yaml_append, rst_src_path, key.endswith('|i18n'), lang)
# lang_key: if key is, e.g., "title|i18n", then the val dict
# contains keys like "en" and "fi".
elif isinstance(val, str):
if isinstance(rst_src_path, dict):
lang_rst_src_path = rst_src_path.get(lang if lang else 'en')
else:
lang_rst_src_path = rst_src_path
data_dict[key] = rewrite_links(val, path, root, link_elements,
other_elements, static_host, chapter_dirs, chapter_append,
yaml_append, lang_rst_src_path)
elif isinstance(data_dict, list):
for i, a in enumerate(data_dict):
if isinstance(a, dict) or isinstance(a, list):
recursive_rewrite_links(a, path, root, link_elements,
other_elements, static_host, chapter_dirs,
chapter_append, yaml_append, rst_src_path, lang_key, lang)
elif isinstance(a, str):
if isinstance(rst_src_path, dict):
lang_rst_src_path = rst_src_path.get(lang if lang else 'en')
else:
lang_rst_src_path = rst | with io.open(file_path, 'w', encoding='utf-8') as f:
f.write(content) | identifier_body |
html_tools.py | (out_dir, chapter_dirs, static_host):
build_dir = os.path.dirname(out_dir)
if static_host and not static_host.endswith('/'):
static_host += '/'
for path in _walk(build_dir):
rewrite_file_links(path, out_dir, chapter_dirs, static_host)
def rewrite_file_links(path, root, chapter_dirs, static_host):
content = _read_file(path)
link_elements = [
('a', 'href'),
]
other_elements = [
('img', 'src'),
('script', 'src'),
('iframe', 'src'),
('link', 'href'),
('video', 'poster'),
('source', 'src'),
]
if path.endswith(".yaml"):
# YAML files are handled separately because rewriting links with
# a regexp could add YAML syntax errors to the file if quotes are not
# escaped properly. Escaping is now taken care of by the YAML module.
yaml_data_dict = yaml.safe_load(content)
recursive_rewrite_links(
yaml_data_dict,
path,
root,
link_elements,
other_elements,
static_host,
chapter_dirs,
'data-aplus-chapter ',
'data-aplus-path="/static/{course}" ',
yaml_data_dict.get('_rst_srcpath|i18n', yaml_data_dict.get('_rst_srcpath')),
)
# _rst_srcpath is an internal value stored in the YAML file.
# It is the path of the RST source file that contains the exercise.
# The path is needed for fixing relative URLs, usually links pointing
# to other chapters and exercises. It may have multiple values for
# different languages in multilingual courses or only one string value
# in monolingual courses.
content = yaml.safe_dump(yaml_data_dict, default_flow_style=False,
allow_unicode=True)
else:
content = rewrite_links(
content,
path,
root,
link_elements,
other_elements,
static_host,
chapter_dirs,
'data-aplus-chapter ',
'data-aplus-path="/static/{course}" ',
)
_write_file(path, content)
def | (content, path, root, link_elements, other_elements,
static_host, chapter_dirs, chapter_append, yaml_append,
rst_src_path=None):
q1 = re.compile(r'^(\w+:|//|#)') # Starts with "https:", "//" or "#".
q2 = re.compile(r'^(' + '|'.join(chapter_dirs) + r')(/|\\)') # Starts with a module directory name.
for tag, attr in link_elements:
content = rewrite_elements(content, tag, attr, path, root,
q1, static_host, q2, chapter_append,
yaml_append, rst_src_path)
for tag, attr in other_elements:
content = rewrite_elements(content, tag, attr, path, root,
q1, static_host, None, None, yaml_append,
rst_src_path)
return content
def rewrite_elements(content, tag, attr, path, root, q1, static_host, q2, append,
yaml_append, rst_src_path=None):
dir_name = os.path.dirname(path)
out = ""
p = re.compile(
r'<' + tag + r'\s+[^<>]*'
r'(?P<attr>' + attr + r')=(?P<slash>\\?)"(?P<val>[^"?#]*)'
)
i = 0
for m in p.finditer(content):
val = m.group('val')
if val and not q1.search(val):
# Add content up to attribute.
j = m.start('attr')
out += content[i:j]
i = j
full = ''
if path.endswith('.yaml'):
# content in yaml file
# rst_src_path: The RST source file path is needed for fixing
# relative URLs in the exercise description.
# It should have been saved in the YAML data by the exercise directive.
if rst_src_path:
full = os.path.realpath(os.path.join(
root,
os.path.dirname(rst_src_path),
val
))
else:
# We don't know which directory the relative path starts from,
# so just assume the build root. It is likely incorrect.
full = os.path.realpath(os.path.join(root, val))
else:
# content in html file
# dir_name points to either _build/html or _build/html/<round>
full = os.path.realpath(os.path.join(dir_name, val))
if full.startswith(root): # NB: root ends with "_build/html"
val_path_from_root = full[len(root)+1:].replace('\\', '/')
# Replace Windows path separator backslash to the forward slash.
# Links to chapters.
if q2 and q2.search(val_path_from_root):
if not out.endswith(append):
# Directory depth (starting from _build/html) of the source file
# that contains the link val.
if path.endswith('.yaml'):
# yaml files are always directly under _build/yaml,
# but A+ can fix the URL when we prepend "../" once.
# Most courses place chapters and exercises directly
# under the module directory, in which case one
# "../" is logical.
dir_depth = 1
else:
dir_depth = path[len(root)+1:].count(os.sep)
val_path_from_root = ('../' * dir_depth) + val_path_from_root
j = m.start('val')
out += append + content[i:j] + val_path_from_root
i = m.end('val')
# Other links.
elif static_host:
j = m.start('val')
out += content[i:j] + static_host + val_path_from_root
i = m.end('val')
elif path.endswith('.yaml') and yaml_append and not out.endswith(yaml_append):
# Sphinx sets URLs to local files as relative URLs that work in
# the local filesystem (e.g., ../_images/myimage.png).
# The A+ frontend converts the URLs correctly when they are in
# the chapter content. (The URL must be converted to an absolute
# URL that refers to the MOOC grader course static files.)
# However, the conversion does not work for URLs in exercise
# descriptions because unlike for chapters, the service URL of
# an exercise does not refer to the course static files.
# Therefore, we add the attribute data-aplus-path="/static/{course}"
# that A+ frontend uses to set the correct URL path.
# Unfortunately, we must hardcode the MOOC grader static URL
# (/static) here.
out += yaml_append
out += content[i:]
return out
def _walk(html_dir):
files = []
for root, dirnames, filenames in os.walk(html_dir):
for filename in fnmatch.filter(filenames, '*.html'):
files.append(os.path.join(root, filename))
for filename in fnmatch.filter(filenames, '*.yaml'):
files.append(os.path.join(root, filename))
return files
def _read_file(file_path):
with io.open(file_path, 'r', encoding='utf-8') as f:
return f.read()
def _write_file(file_path, content):
with io.open(file_path, 'w', encoding='utf-8') as f:
f.write(content)
def recursive_rewrite_links(data_dict, path, root, link_elements, other_elements,
static_host, chapter_dirs, chapter_append, yaml_append, rst_src_path,
lang_key=False, lang=None):
'''Rewrite links in the string values inside the data_dict.'''
# YAML file may have a list or a dictionary in the topmost level.
# lang_key and lang are used to pick the correct language from rst_src_path.
if isinstance(data_dict, dict):
for key, val in data_dict.items():
if lang_key:
# data_dict is the value for a key that had the ending "|i18n",
# so now key is a language code.
lang = key
if isinstance(val, dict) or isinstance(val, list):
recursive_rewrite_links(val, path, root, link_elements,
other_elements, static_host, chapter_dirs, chapter_append,
yaml_append, rst_src_path, key.endswith('|i18n'), lang)
# lang_key: if key is, e.g., "title|i18n", then the val dict
# contains keys like "en" and "fi".
elif isinstance(val, str):
if isinstance(rst_src_path, dict):
lang_rst_src_path = rst_src_path.get(lang if lang else 'en')
else:
lang_rst_src_path = rst_src_path
data_dict[key] = rewrite_links(val, path, root, link_elements,
other_elements, static_host, chapter_dirs, chapter_append,
yaml_append, lang_rst_src_path)
elif isinstance(data_dict, list):
for i, a in enumerate(data_dict):
if isinstance(a, dict) or isinstance(a, list):
recursive_rewrite_links(a, path, root, link_elements,
other_elements, static_host, chapter_dirs,
chapter_append, yaml_append, rst_src_path, lang_key, lang)
elif isinstance(a, str):
if isinstance(rst_src_path, dict):
lang_rst_src_path = rst_src_path.get(lang if lang else 'en')
else:
lang_rst_src_path = rst | rewrite_links | identifier_name |
html_tools.py | (out_dir, chapter_dirs, static_host):
build_dir = os.path.dirname(out_dir)
if static_host and not static_host.endswith('/'):
static_host += '/'
for path in _walk(build_dir):
rewrite_file_links(path, out_dir, chapter_dirs, static_host)
def rewrite_file_links(path, root, chapter_dirs, static_host):
content = _read_file(path)
link_elements = [
('a', 'href'),
]
other_elements = [
('img', 'src'),
('script', 'src'),
('iframe', 'src'),
('link', 'href'),
('video', 'poster'),
('source', 'src'),
]
if path.endswith(".yaml"):
# YAML files are handled separately because rewriting links with
# a regexp could add YAML syntax errors to the file if quotes are not
# escaped properly. Escaping is now taken care of by the YAML module.
yaml_data_dict = yaml.safe_load(content)
recursive_rewrite_links(
yaml_data_dict,
path,
root,
link_elements,
other_elements,
static_host,
chapter_dirs,
'data-aplus-chapter ',
'data-aplus-path="/static/{course}" ',
yaml_data_dict.get('_rst_srcpath|i18n', yaml_data_dict.get('_rst_srcpath')),
)
# _rst_srcpath is an internal value stored in the YAML file.
# It is the path of the RST source file that contains the exercise.
# The path is needed for fixing relative URLs, usually links pointing
# to other chapters and exercises. It may have multiple values for
# different languages in multilingual courses or only one string value
# in monolingual courses.
content = yaml.safe_dump(yaml_data_dict, default_flow_style=False,
allow_unicode=True)
else:
content = rewrite_links(
content,
path,
root,
link_elements,
other_elements,
static_host,
chapter_dirs,
'data-aplus-chapter ',
'data-aplus-path="/static/{course}" ',
)
_write_file(path, content)
def rewrite_links(content, path, root, link_elements, other_elements,
static_host, chapter_dirs, chapter_append, yaml_append,
rst_src_path=None):
q1 = re.compile(r'^(\w+:|//|#)') # Starts with "https:", "//" or "#".
q2 = re.compile(r'^(' + '|'.join(chapter_dirs) + r')(/|\\)') # Starts with a module directory name.
for tag, attr in link_elements:
content = rewrite_elements(content, tag, attr, path, root,
q1, static_host, q2, chapter_append,
yaml_append, rst_src_path)
for tag, attr in other_elements:
content = rewrite_elements(content, tag, attr, path, root,
q1, static_host, None, None, yaml_append,
rst_src_path)
return content
def rewrite_elements(content, tag, attr, path, root, q1, static_host, q2, append,
yaml_append, rst_src_path=None):
dir_name = os.path.dirname(path)
out = ""
p = re.compile(
r'<' + tag + r'\s+[^<>]*'
r'(?P<attr>' + attr + r')=(?P<slash>\\?)"(?P<val>[^"?#]*)'
)
i = 0
for m in p.finditer(content):
val = m.group('val')
if val and not q1.search(val):
# Add content up to attribute.
j = m.start('attr')
out += content[i:j]
i = j
full = ''
if path.endswith('.yaml'):
# content in yaml file
# rst_src_path: The RST source file path is needed for fixing
# relative URLs in the exercise description.
# It should have been saved in the YAML data by the exercise directive.
if rst_src_path:
full = os.path.realpath(os.path.join(
root,
os.path.dirname(rst_src_path),
val
))
else:
# We don't know which directory the relative path starts from,
# so just assume the build root. It is likely incorrect.
full = os.path.realpath(os.path.join(root, val))
else:
# content in html file
# dir_name points to either _build/html or _build/html/<round>
full = os.path.realpath(os.path.join(dir_name, val))
if full.startswith(root): # NB: root ends with "_build/html"
val_path_from_root = full[len(root)+1:].replace('\\', '/')
# Replace Windows path separator backslash to the forward slash.
# Links to chapters.
if q2 and q2.search(val_path_from_root):
if not out.endswith(append):
# Directory depth (starting from _build/html) of the source file
# that contains the link val.
if path.endswith('.yaml'):
# yaml files are always directly under _build/yaml,
# but A+ can fix the URL when we prepend "../" once.
# Most courses place chapters and exercises directly
# under the module directory, in which case one
# "../" is logical.
dir_depth = 1
else:
dir_depth = path[len(root)+1:].count(os.sep)
val_path_from_root = ('../' * dir_depth) + val_path_from_root
j = m.start('val')
out += append + content[i:j] + val_path_from_root
i = m.end('val')
# Other links.
elif static_host:
j = m.start('val')
out += content[i:j] + static_host + val_path_from_root
i = m.end('val')
elif path.endswith('.yaml') and yaml_append and not out.endswith(yaml_append):
# Sphinx sets URLs to local files as relative URLs that work in
# the local filesystem (e.g., ../_images/myimage.png).
# The A+ frontend converts the URLs correctly when they are in
# the chapter content. (The URL must be converted to an absolute
# URL that refers to the MOOC grader course static files.)
# However, the conversion does not work for URLs in exercise
# descriptions because unlike for chapters, the service URL of
# an exercise does not refer to the course static files.
# Therefore, we add the attribute data-aplus-path="/static/{course}"
# that A+ frontend uses to set the correct URL path.
# Unfortunately, we must hardcode the MOOC grader static URL
# (/static) here.
out += yaml_append |
out += content[i:]
return out
def _walk(html_dir):
files = []
for root, dirnames, filenames in os.walk(html_dir):
for filename in fnmatch.filter(filenames, '*.html'):
files.append(os.path.join(root, filename))
for filename in fnmatch.filter(filenames, '*.yaml'):
files.append(os.path.join(root, filename))
return files
def _read_file(file_path):
with io.open(file_path, 'r', encoding='utf-8') as f:
return f.read()
def _write_file(file_path, content):
with io.open(file_path, 'w', encoding='utf-8') as f:
f.write(content)
def recursive_rewrite_links(data_dict, path, root, link_elements, other_elements,
static_host, chapter_dirs, chapter_append, yaml_append, rst_src_path,
lang_key=False, lang=None):
'''Rewrite links in the string values inside the data_dict.'''
# YAML file may have a list or a dictionary in the topmost level.
# lang_key and lang are used to pick the correct language from rst_src_path.
if isinstance(data_dict, dict):
for key, val in data_dict.items():
if lang_key:
# data_dict is the value for a key that had the ending "|i18n",
# so now key is a language code.
lang = key
if isinstance(val, dict) or isinstance(val, list):
recursive_rewrite_links(val, path, root, link_elements,
other_elements, static_host, chapter_dirs, chapter_append,
yaml_append, rst_src_path, key.endswith('|i18n'), lang)
# lang_key: if key is, e.g., "title|i18n", then the val dict
# contains keys like "en" and "fi".
elif isinstance(val, str):
if isinstance(rst_src_path, dict):
lang_rst_src_path = rst_src_path.get(lang if lang else 'en')
else:
lang_rst_src_path = rst_src_path
data_dict[key] = rewrite_links(val, path, root, link_elements,
other_elements, static_host, chapter_dirs, chapter_append,
yaml_append, lang_rst_src_path)
elif isinstance(data_dict, list):
for i, a in enumerate(data_dict):
if isinstance(a, dict) or isinstance(a, list):
recursive_rewrite_links(a, path, root, link_elements,
other_elements, static_host, chapter_dirs,
chapter_append, yaml_append, rst_src_path, lang_key, lang)
elif isinstance(a, str):
if isinstance(rst_src_path, dict):
lang_rst_src_path = rst_src_path.get(lang if lang else 'en')
else:
lang_rst_src_path = rst_src | random_line_split |
|
html_tools.py | (out_dir, chapter_dirs, static_host):
build_dir = os.path.dirname(out_dir)
if static_host and not static_host.endswith('/'):
static_host += '/'
for path in _walk(build_dir):
rewrite_file_links(path, out_dir, chapter_dirs, static_host)
def rewrite_file_links(path, root, chapter_dirs, static_host):
content = _read_file(path)
link_elements = [
('a', 'href'),
]
other_elements = [
('img', 'src'),
('script', 'src'),
('iframe', 'src'),
('link', 'href'),
('video', 'poster'),
('source', 'src'),
]
if path.endswith(".yaml"):
# YAML files are handled separately because rewriting links with
# a regexp could add YAML syntax errors to the file if quotes are not
# escaped properly. Escaping is now taken care of by the YAML module.
yaml_data_dict = yaml.safe_load(content)
recursive_rewrite_links(
yaml_data_dict,
path,
root,
link_elements,
other_elements,
static_host,
chapter_dirs,
'data-aplus-chapter ',
'data-aplus-path="/static/{course}" ',
yaml_data_dict.get('_rst_srcpath|i18n', yaml_data_dict.get('_rst_srcpath')),
)
# _rst_srcpath is an internal value stored in the YAML file.
# It is the path of the RST source file that contains the exercise.
# The path is needed for fixing relative URLs, usually links pointing
# to other chapters and exercises. It may have multiple values for
# different languages in multilingual courses or only one string value
# in monolingual courses.
content = yaml.safe_dump(yaml_data_dict, default_flow_style=False,
allow_unicode=True)
else:
content = rewrite_links(
content,
path,
root,
link_elements,
other_elements,
static_host,
chapter_dirs,
'data-aplus-chapter ',
'data-aplus-path="/static/{course}" ',
)
_write_file(path, content)
def rewrite_links(content, path, root, link_elements, other_elements,
static_host, chapter_dirs, chapter_append, yaml_append,
rst_src_path=None):
q1 = re.compile(r'^(\w+:|//|#)') # Starts with "https:", "//" or "#".
q2 = re.compile(r'^(' + '|'.join(chapter_dirs) + r')(/|\\)') # Starts with a module directory name.
for tag, attr in link_elements:
content = rewrite_elements(content, tag, attr, path, root,
q1, static_host, q2, chapter_append,
yaml_append, rst_src_path)
for tag, attr in other_elements:
content = rewrite_elements(content, tag, attr, path, root,
q1, static_host, None, None, yaml_append,
rst_src_path)
return content
def rewrite_elements(content, tag, attr, path, root, q1, static_host, q2, append,
yaml_append, rst_src_path=None):
dir_name = os.path.dirname(path)
out = ""
p = re.compile(
r'<' + tag + r'\s+[^<>]*'
r'(?P<attr>' + attr + r')=(?P<slash>\\?)"(?P<val>[^"?#]*)'
)
i = 0
for m in p.finditer(content):
val = m.group('val')
if val and not q1.search(val):
# Add content up to attribute.
j = m.start('attr')
out += content[i:j]
i = j
full = ''
if path.endswith('.yaml'):
# content in yaml file
# rst_src_path: The RST source file path is needed for fixing
# relative URLs in the exercise description.
# It should have been saved in the YAML data by the exercise directive.
if rst_src_path:
full = os.path.realpath(os.path.join(
root,
os.path.dirname(rst_src_path),
val
))
else:
# We don't know which directory the relative path starts from,
# so just assume the build root. It is likely incorrect.
full = os.path.realpath(os.path.join(root, val))
else:
# content in html file
# dir_name points to either _build/html or _build/html/<round>
full = os.path.realpath(os.path.join(dir_name, val))
if full.startswith(root): # NB: root ends with "_build/html"
val_path_from_root = full[len(root)+1:].replace('\\', '/')
# Replace Windows path separator backslash to the forward slash.
# Links to chapters.
if q2 and q2.search(val_path_from_root):
if not out.endswith(append):
# Directory depth (starting from _build/html) of the source file
# that contains the link val.
if path.endswith('.yaml'):
# yaml files are always directly under _build/yaml,
# but A+ can fix the URL when we prepend "../" once.
# Most courses place chapters and exercises directly
# under the module directory, in which case one
# "../" is logical.
dir_depth = 1
else:
dir_depth = path[len(root)+1:].count(os.sep)
val_path_from_root = ('../' * dir_depth) + val_path_from_root
j = m.start('val')
out += append + content[i:j] + val_path_from_root
i = m.end('val')
# Other links.
elif static_host:
j = m.start('val')
out += content[i:j] + static_host + val_path_from_root
i = m.end('val')
elif path.endswith('.yaml') and yaml_append and not out.endswith(yaml_append):
# Sphinx sets URLs to local files as relative URLs that work in
# the local filesystem (e.g., ../_images/myimage.png).
# The A+ frontend converts the URLs correctly when they are in
# the chapter content. (The URL must be converted to an absolute
# URL that refers to the MOOC grader course static files.)
# However, the conversion does not work for URLs in exercise
# descriptions because unlike for chapters, the service URL of
# an exercise does not refer to the course static files.
# Therefore, we add the attribute data-aplus-path="/static/{course}"
# that A+ frontend uses to set the correct URL path.
# Unfortunately, we must hardcode the MOOC grader static URL
# (/static) here.
out += yaml_append
out += content[i:]
return out
def _walk(html_dir):
files = []
for root, dirnames, filenames in os.walk(html_dir):
for filename in fnmatch.filter(filenames, '*.html'):
files.append(os.path.join(root, filename))
for filename in fnmatch.filter(filenames, '*.yaml'):
files.append(os.path.join(root, filename))
return files
def _read_file(file_path):
with io.open(file_path, 'r', encoding='utf-8') as f:
return f.read()
def _write_file(file_path, content):
with io.open(file_path, 'w', encoding='utf-8') as f:
f.write(content)
def recursive_rewrite_links(data_dict, path, root, link_elements, other_elements,
static_host, chapter_dirs, chapter_append, yaml_append, rst_src_path,
lang_key=False, lang=None):
'''Rewrite links in the string values inside the data_dict.'''
# YAML file may have a list or a dictionary in the topmost level.
# lang_key and lang are used to pick the correct language from rst_src_path.
if isinstance(data_dict, dict):
for key, val in data_dict.items():
if lang_key:
# data_dict is the value for a key that had the ending "|i18n",
# so now key is a language code.
lang = key
if isinstance(val, dict) or isinstance(val, list):
|
elif isinstance(val, str):
if isinstance(rst_src_path, dict):
lang_rst_src_path = rst_src_path.get(lang if lang else 'en')
else:
lang_rst_src_path = rst_src_path
data_dict[key] = rewrite_links(val, path, root, link_elements,
other_elements, static_host, chapter_dirs, chapter_append,
yaml_append, lang_rst_src_path)
elif isinstance(data_dict, list):
for i, a in enumerate(data_dict):
if isinstance(a, dict) or isinstance(a, list):
recursive_rewrite_links(a, path, root, link_elements,
other_elements, static_host, chapter_dirs,
chapter_append, yaml_append, rst_src_path, lang_key, lang)
elif isinstance(a, str):
if isinstance(rst_src_path, dict):
lang_rst_src_path = rst_src_path.get(lang if lang else 'en')
else:
lang_rst_src_path = rst | recursive_rewrite_links(val, path, root, link_elements,
other_elements, static_host, chapter_dirs, chapter_append,
yaml_append, rst_src_path, key.endswith('|i18n'), lang)
# lang_key: if key is, e.g., "title|i18n", then the val dict
# contains keys like "en" and "fi". | conditional_block |
common.js | navigator.userAgent.match(/BlackBerry/i)
|| navigator.userAgent.match(/Windows Phone/i)
){
return true;
}
else {
return false;
}
}
function getScroll() {
var data, scrOfX = 0, scrOfY = 0;
if( typeof( window.pageYOffset ) == 'number' ) {
//Netscape compliant
scrOfY = window.pageYOffset;
scrOfX = window.pageXOffset;
} else if( document.body && ( document.body.scrollLeft || document.body.scrollTop ) ) {
//DOM compliant
scrOfY = document.body.scrollTop;
scrOfX = document.body.scrollLeft;
} else if( document.documentElement && ( document.documentElement.scrollLeft || document.documentElement.scrollTop ) ) {
//IE6 Strict
scrOfY = document.documentElement.scrollTop;
scrOfX = document.documentElement.scrollLeft;
}
data = {'left': scrOfX, 'top' : scrOfY};
return data;
}
function initMap(){
// text overlay proto
function TxtOverlay(pos, txt, cls, map) {
this.pos = pos;
this.txt_ = txt;
this.cls_ = cls;
this.map_ = map;
this.div_ = null;
this.setMap(map);
}
TxtOverlay.prototype = new google.maps.OverlayView();
TxtOverlay.prototype.onAdd = function() {
var div = document.createElement('DIV');
div.className = this.cls_;
div.innerHTML = this.txt_;
this.div_ = div;
var overlayProjection = this.getProjection();
var position = overlayProjection.fromLatLngToDivPixel(this.pos);
div.style.left = position.x + 'px';
div.style.top = position.y + 'px';
var panes = this.getPanes();
panes.floatPane.appendChild(div);
}
TxtOverlay.prototype.draw = function() {
var overlayProjection = this.getProjection();
var position = overlayProjection.fromLatLngToDivPixel(this.pos);
var div = this.div_;
div.style.left = position.x + 'px';
div.style.top = position.y + 'px';
console.log(position)
}
// create map
var point = new google.maps.LatLng(55.774210, 37.520200);
var tooltipTemplate = '4-я Магистральная, дом 5, подъезд 3, этаж 3';
var myMapOptions = {
zoom: 16,
center: point,
mapTypeId: google.maps.MapTypeId.TERRAIN
};
var map = new google.maps.Map(document.getElementById("map_canvas"),myMapOptions);
var image = new google.maps.MarkerImage(
'img/map-image.png',
new google.maps.Size(61,82),
new google.maps.Point(0,0),
new google.maps.Point(31,82)
);
var shadow = new google.maps.MarkerImage(
'img/map-shadow.png',
new google.maps.Size(106,82),
new google.maps.Point(0,0),
new google.maps.Point(31,82)
);
var shape = {
coord: [37,1,40,2,43,3,45,4,46,5,48,6,49,7,50,8,51,9,52,10,53,11,54,12,54,13,55,14,56,15,56,16,57,17,57,18,57,19,58,20,58,21,58,22,59,23,60,24,60,25,60,26,60,27,60,28,60,29,60,30,60,31,60,32,60,33,60,34,59,35,59,36,59,37,59,38,59,39,58,40,58,41,58,42,57,43,57,44,57,45,56,46,56,47,55,48,54,49,54,50,53,51,52,52,52,53,51,54,50,55,50,56,49,57,48,58,48,59,47,60,46,61,45,62,45,63,44,64,43,65,43,66,43,67,42,68,41,69,40,70,40,71,39,72,38,73,37,74,36,75,36,76,35,77,34,78,33,79,32,80,31,81,30,81,29,80,28,79,27,78,26,77,25,76,25,75,24,74,23,73,22,72,22,71,21,70,20,69,20,68,19,67,18,66,17,65,17,64,17,63,16,62,15,61,14,60,14,59,13,58,12,57,11,56,11,55,10,54,9,53,9,52,8,51,7,50,7,49,6,48,6,47,5,46,4,45,4,44,3,43,3,42,2,41,2,40,2,39,2,38,1,37,1,36,1,35,1,34,1,33,1,32,1,31,1,30,1,29,1,28,1,27,1,26,1,25,1,24,1,23,2,22,2,21,2,20,3,19,3,18,3,17,4,16,4,15,5,14,6,13,6,12,7,11,8,10,9,9,10,8,11,7,12,6,14,5,15,4,17,3,20,2,23,1,37,1],
type: 'poly'
};
var marker = new google.maps.Marker({
draggable: true,
raiseOnDrag: false,
icon: image,
shadow: shadow,
shape: shape,
map: map,
position: point
});
txt = new TxtOverlay(point, tooltipTemplate, "map_label", map)
}
var forms = {
errors: {empty: 'Поле не заполнено', email: 'Это не почта' },
checkemail: function(emailAddress) {
var pattern = new RegExp(/^(("[\w-\s]+")|([\w-]+(?:\.[\w-]+)*)|("[\w-\s]+")([\w-]+(?:\.[\w-]+)*))(@((?:[\w-]+\.)*\w[\w-]{0,66})\.([a-z]{2,6}(?:\.[a-z]{2})?)$)|(@\[?((25[0-5]\.|2[0-4][0-9]\.|1[0-9]{2}\.|[0-9]{1,2}\.))((25[0-5]|2[0-4][0-9]|1[0-9]{2}|[0-9]{1,2})\.){2}(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[0-9]{1,2})\]?$)/i);
return pattern.test(emailAddress);
},
prevalidate: function () {
$('[name="name"], [name="email"]').css('border', 'rgba(0, 0, 0, 0.2) 1px solid');
},
makeerrfield: function(name) {
$('[name="'+name+'"]').css('border', '#ff6850 1px solid');
// $('[name="'+name+'"]').css('color', '#ff6850'); | },
showError: function( name, msg ) { | random_line_split |
|
common.js | img.src+')'});
}
syncBg();
$('.overlay').removeClass('show');
}
}
// if mobile
else {
$section.backstretch({url: img.src, alignY: 0});
$bg.backstretch({url: img.src, alignY: 0});
}
}
function syncBg() {
// if no mobile not use alg
if(!detectmob()) {
return false;
}
$.each( $('.menu-bg.img-bg'), function(i) {
var $bg = $(this);
var $section = $bg.parent();
var s = getScroll();
var is_top = ($section.offset().top - s.top);
// for empty bgbg
if( $bg.hasClass('empty-bg') ) {
if ( is_top < 0 ) {
$bg.css({opacity: 1});
}
else {
$bg.css({opacity: 0});
}
}
// for bg
else {
if ( is_top < 0 ) {
$section.children('.backstretch').css({position: 'fixed'});
$section.find('.menu-bg').css({position: 'fixed'});
}
else {
$section.children('.backstretch').css({position: 'absolute'});
$section.find('.menu-bg').css({position: 'absolute',top: 0});
}
}
})
}
function listIndicate() {
var $section = $('.faq');
if( !$section.length ) return false;
var $i = $('.faq .list');
var $pointer = $('.list-indicator');
var s = getScroll();
var is_top = ($section.offset().top - s.top);
var faq_height = ($section.height() - 100);
var faq_nav_height = $i.height() ;
var faq_pointer_height = $pointer.height() ;
console.log(faq_height + ' ' +is_top)
var bottomstopeffect = -450;
var correct = -110;
if ( is_top < 0 && is_top > bottomstopeffect ) {
$i.css({position: 'fixed', 'top': 120, 'width': $section.width() / 4});
console.log( (faq_height + is_top) / faq_height )
$pointer.css({position: 'fixed', 'top': 120 + faq_nav_height * ( 1 - (faq_height + is_top) / faq_height )});
}
else if(is_top < bottomstopeffect) {
$i.css({position: 'absolute', 'top': faq_nav_height + correct});
var ptop = faq_nav_height + correct + faq_nav_height * ( 1 - (faq_height + is_top) / faq_height );
if(ptop > 2*faq_nav_height + correct - faq_pointer_height) {
ptop = 2*faq_nav_height + correct - faq_pointer_height;
}
$pointer.css({position: 'absolute',top: ptop });
}
else {
$i.css({position: 'absolute', 'top': 0});
$pointer.css({position: 'absolute',top: 0});
}
}
function test(text) {
$('.test').text(text);
}
function detectmob() {
return true;
if( navigator.userAgent.match(/Android/i)
|| navigator.userAgent.match(/webOS/i)
|| navigator.userAgent.match(/iPhone/i)
|| navigator.userAgent.match(/iPad/i)
|| navigator.userAgent.match(/iPod/i)
|| navigator.userAgent.match(/BlackBerry/i)
|| navigator.userAgent.match(/Windows Phone/i)
){
return true;
}
else {
return false;
}
}
function getScroll() |
function initMap(){
// text overlay proto
function TxtOverlay(pos, txt, cls, map) {
this.pos = pos;
this.txt_ = txt;
this.cls_ = cls;
this.map_ = map;
this.div_ = null;
this.setMap(map);
}
TxtOverlay.prototype = new google.maps.OverlayView();
TxtOverlay.prototype.onAdd = function() {
var div = document.createElement('DIV');
div.className = this.cls_;
div.innerHTML = this.txt_;
this.div_ = div;
var overlayProjection = this.getProjection();
var position = overlayProjection.fromLatLngToDivPixel(this.pos);
div.style.left = position.x + 'px';
div.style.top = position.y + 'px';
var panes = this.getPanes();
panes.floatPane.appendChild(div);
}
TxtOverlay.prototype.draw = function() {
var overlayProjection = this.getProjection();
var position = overlayProjection.fromLatLngToDivPixel(this.pos);
var div = this.div_;
div.style.left = position.x + 'px';
div.style.top = position.y + 'px';
console.log(position)
}
// create map
var point = new google.maps.LatLng(55.774210, 37.520200);
var tooltipTemplate = '4-я Магистральная, дом 5, подъезд 3, этаж 3';
var myMapOptions = {
zoom: 16,
center: point,
mapTypeId: google.maps.MapTypeId.TERRAIN
};
var map = new google.maps.Map(document.getElementById("map_canvas"),myMapOptions);
var image = new google.maps.MarkerImage(
'img/map-image.png',
new google.maps.Size(61,82),
new google.maps.Point(0,0),
new google.maps.Point(31,82)
);
var shadow = new google.maps.MarkerImage(
'img/map-shadow.png',
new google.maps.Size(106,82),
new google.maps.Point(0,0),
new google.maps.Point(31,82)
);
var shape = {
coord: [37,1,40,2,43,3,45,4,46,5,48,6,49,7,50,8,51,9,52,10,53,11,54,12,54,13,55,14,56,15,56,16,57,17,57,18,57,19,58,20,58,21,58,22,59,23,60,24,60,25,60,26,60,27,60,28,60,29,60,30,60,31,60,32,60,33,60,34,59,35,59,36,59,37,59,38,59,39,58,40,58,41,58,42,57,43,57,44,57,45,56,46,56,47,55,48,54,49,54,50,53,51,52,52,52,53,51,54,50,55,50,56,49,57,48,58,48,59,47,60,46,61,45,62,45,63,44,64,43,65,43,66,43,67,42,68,41,69,40,70,40,71,39,72,38,73,37,74,36,75,36,76,35,77,34,78,33,79,32,80,31,81,30,81,29,80,28,79,27,78,26,77,25,76,25,75,24,74,23,73,22,72,22,71,21,70,20,69,20,68,19,67,18,66,17,65, | {
var data, scrOfX = 0, scrOfY = 0;
if( typeof( window.pageYOffset ) == 'number' ) {
//Netscape compliant
scrOfY = window.pageYOffset;
scrOfX = window.pageXOffset;
} else if( document.body && ( document.body.scrollLeft || document.body.scrollTop ) ) {
//DOM compliant
scrOfY = document.body.scrollTop;
scrOfX = document.body.scrollLeft;
} else if( document.documentElement && ( document.documentElement.scrollLeft || document.documentElement.scrollTop ) ) {
//IE6 Strict
scrOfY = document.documentElement.scrollTop;
scrOfX = document.documentElement.scrollLeft;
}
data = {'left': scrOfX, 'top' : scrOfY};
return data;
} | identifier_body |
common.js | ,60,29,60,30,60,31,60,32,60,33,60,34,59,35,59,36,59,37,59,38,59,39,58,40,58,41,58,42,57,43,57,44,57,45,56,46,56,47,55,48,54,49,54,50,53,51,52,52,52,53,51,54,50,55,50,56,49,57,48,58,48,59,47,60,46,61,45,62,45,63,44,64,43,65,43,66,43,67,42,68,41,69,40,70,40,71,39,72,38,73,37,74,36,75,36,76,35,77,34,78,33,79,32,80,31,81,30,81,29,80,28,79,27,78,26,77,25,76,25,75,24,74,23,73,22,72,22,71,21,70,20,69,20,68,19,67,18,66,17,65,17,64,17,63,16,62,15,61,14,60,14,59,13,58,12,57,11,56,11,55,10,54,9,53,9,52,8,51,7,50,7,49,6,48,6,47,5,46,4,45,4,44,3,43,3,42,2,41,2,40,2,39,2,38,1,37,1,36,1,35,1,34,1,33,1,32,1,31,1,30,1,29,1,28,1,27,1,26,1,25,1,24,1,23,2,22,2,21,2,20,3,19,3,18,3,17,4,16,4,15,5,14,6,13,6,12,7,11,8,10,9,9,10,8,11,7,12,6,14,5,15,4,17,3,20,2,23,1,37,1],
type: 'poly'
};
var marker = new google.maps.Marker({
draggable: true,
raiseOnDrag: false,
icon: image,
shadow: shadow,
shape: shape,
map: map,
position: point
});
txt = new TxtOverlay(point, tooltipTemplate, "map_label", map)
}
var forms = {
errors: {empty: 'Поле не заполнено', email: 'Это не почта' },
checkemail: function(emailAddress) {
var pattern = new RegExp(/^(("[\w-\s]+")|([\w-]+(?:\.[\w-]+)*)|("[\w-\s]+")([\w-]+(?:\.[\w-]+)*))(@((?:[\w-]+\.)*\w[\w-]{0,66})\.([a-z]{2,6}(?:\.[a-z]{2})?)$)|(@\[?((25[0-5]\.|2[0-4][0-9]\.|1[0-9]{2}\.|[0-9]{1,2}\.))((25[0-5]|2[0-4][0-9]|1[0-9]{2}|[0-9]{1,2})\.){2}(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[0-9]{1,2})\]?$)/i);
return pattern.test(emailAddress);
},
prevalidate: function () {
$('[name="name"], [name="email"]').css('border', 'rgba(0, 0, 0, 0.2) 1px solid');
},
makeerrfield: function(name) {
$('[name="'+name+'"]').css('border', '#ff6850 1px solid');
// $('[name="'+name+'"]').css('color', '#ff6850');
},
showError: function( name, msg ) {
forms.makeerrfield(name);
$("#error_mes").html(msg);
$('[name="'+name+'"]').after( $("#error_mes") );
$("#error_mes").fadeIn("slow");
},
validate: function() {
forms.prevalidate();
if( !$('[name="name"]').val() ) {
forms.showError('name', forms.errors.empty );
return false;
}
if( !$('[name="email"]').val() ) {
forms.showError('email', forms.errors.empty );
return false;
}
if( !forms.checkemail( $('[name="email"]').val() ) ) {
forms.showError('email', forms.errors.email );
return false;
}
return true;
}
}
function validate() {
return forms.validate();
}
/*
$(function(){
var field = new Array("name", "email");
$("form").submit(function() {
var error=0;
$("form").find(":input").each(function() {
for(var i=0;i<field.length;i++){
if($(this).attr("name")==field[i]){
if(!$(this).val()){
$(this).css('border', '#ff6850 1px solid');
$(this).css('color', '#ff6850');
error=1;
}
else{
$(this).css('border', 'rgba(0, 0, 0, 0.2) 1px solid');
}
}
}
})
var email = $("#email").val();
if(!isValidEmailAddress(email)){
error=2;
$("#email").css('border', '#ff6850 1px solid');
}
if(error==0){
return true;
}
else{
if(error==1) err_text="Поле не заполнено";
if(error==2) err_text="Это не почта";
$("#error_mes").html(err_text);
$("#error_mes").fadeIn("slow");
return false;
}
})
});
function isValidEmailAddress(emailAddress) {
var pattern = new RegExp(/^(("[\w-\s]+")|([\w-]+(?:\.[\w-]+)*)|("[\w-\s]+")([\w-]+(?:\.[\w-]+)*))(@((?:[\w-]+\.)*\w[\w-]{0,66})\.([a-z]{2,6}(?:\.[a-z]{2})?)$)|(@\[?((25[0-5]\.|2[0-4][0-9]\.|1[0-9]{2}\.|[0-9]{1,2}\.))((25[0-5]|2[0-4][0-9]|1[0-9]{2}|[0-9]{1,2})\.){2}(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[0-9]{1,2})\]?$)/i);
return pattern.test(emailAddress);
}
*/
var fixObj = {
fix: function(wrapper, obj, w) {
if(wrapper.length && obj.length){
this.minTop = wrapper.offset().top;
if (obj.attr("id") == "js-home-howto-pass-btn") {
this.maxTop = ($("#js-home-test-wrapper").offset().top + 300) - $(window).height();
} else {
this.maxTop = wrapper.offset().top - 50 + (wrapper.height() - obj.height());
}
if ($(window).scrollTop() >= this.minTop && $(window).scrollTop() < this.maxTop) {
obj.removeClass("fixed_bottom").addClass("fixed_top");
} else | if ($(window).scrollTop() >= this.maxTop) {
obj.removeClass("fix | conditional_block |
|
common.js | ('+img.src+')'});
}
syncBg();
$('.overlay').removeClass('show');
}
}
// if mobile
else {
$section.backstretch({url: img.src, alignY: 0});
$bg.backstretch({url: img.src, alignY: 0});
}
}
function syncBg() {
// if no mobile not use alg
if(!detectmob()) {
return false;
}
$.each( $('.menu-bg.img-bg'), function(i) {
var $bg = $(this);
var $section = $bg.parent();
var s = getScroll();
var is_top = ($section.offset().top - s.top);
// for empty bgbg
if( $bg.hasClass('empty-bg') ) {
if ( is_top < 0 ) {
$bg.css({opacity: 1});
}
else {
$bg.css({opacity: 0});
}
}
// for bg
else {
if ( is_top < 0 ) {
$section.children('.backstretch').css({position: 'fixed'});
$section.find('.menu-bg').css({position: 'fixed'});
}
else {
$section.children('.backstretch').css({position: 'absolute'});
$section.find('.menu-bg').css({position: 'absolute',top: 0});
}
}
})
}
function | () {
var $section = $('.faq');
if( !$section.length ) return false;
var $i = $('.faq .list');
var $pointer = $('.list-indicator');
var s = getScroll();
var is_top = ($section.offset().top - s.top);
var faq_height = ($section.height() - 100);
var faq_nav_height = $i.height() ;
var faq_pointer_height = $pointer.height() ;
console.log(faq_height + ' ' +is_top)
var bottomstopeffect = -450;
var correct = -110;
if ( is_top < 0 && is_top > bottomstopeffect ) {
$i.css({position: 'fixed', 'top': 120, 'width': $section.width() / 4});
console.log( (faq_height + is_top) / faq_height )
$pointer.css({position: 'fixed', 'top': 120 + faq_nav_height * ( 1 - (faq_height + is_top) / faq_height )});
}
else if(is_top < bottomstopeffect) {
$i.css({position: 'absolute', 'top': faq_nav_height + correct});
var ptop = faq_nav_height + correct + faq_nav_height * ( 1 - (faq_height + is_top) / faq_height );
if(ptop > 2*faq_nav_height + correct - faq_pointer_height) {
ptop = 2*faq_nav_height + correct - faq_pointer_height;
}
$pointer.css({position: 'absolute',top: ptop });
}
else {
$i.css({position: 'absolute', 'top': 0});
$pointer.css({position: 'absolute',top: 0});
}
}
function test(text) {
$('.test').text(text);
}
function detectmob() {
return true;
if( navigator.userAgent.match(/Android/i)
|| navigator.userAgent.match(/webOS/i)
|| navigator.userAgent.match(/iPhone/i)
|| navigator.userAgent.match(/iPad/i)
|| navigator.userAgent.match(/iPod/i)
|| navigator.userAgent.match(/BlackBerry/i)
|| navigator.userAgent.match(/Windows Phone/i)
){
return true;
}
else {
return false;
}
}
function getScroll() {
var data, scrOfX = 0, scrOfY = 0;
if( typeof( window.pageYOffset ) == 'number' ) {
//Netscape compliant
scrOfY = window.pageYOffset;
scrOfX = window.pageXOffset;
} else if( document.body && ( document.body.scrollLeft || document.body.scrollTop ) ) {
//DOM compliant
scrOfY = document.body.scrollTop;
scrOfX = document.body.scrollLeft;
} else if( document.documentElement && ( document.documentElement.scrollLeft || document.documentElement.scrollTop ) ) {
//IE6 Strict
scrOfY = document.documentElement.scrollTop;
scrOfX = document.documentElement.scrollLeft;
}
data = {'left': scrOfX, 'top' : scrOfY};
return data;
}
function initMap(){
// text overlay proto
function TxtOverlay(pos, txt, cls, map) {
this.pos = pos;
this.txt_ = txt;
this.cls_ = cls;
this.map_ = map;
this.div_ = null;
this.setMap(map);
}
TxtOverlay.prototype = new google.maps.OverlayView();
TxtOverlay.prototype.onAdd = function() {
var div = document.createElement('DIV');
div.className = this.cls_;
div.innerHTML = this.txt_;
this.div_ = div;
var overlayProjection = this.getProjection();
var position = overlayProjection.fromLatLngToDivPixel(this.pos);
div.style.left = position.x + 'px';
div.style.top = position.y + 'px';
var panes = this.getPanes();
panes.floatPane.appendChild(div);
}
TxtOverlay.prototype.draw = function() {
var overlayProjection = this.getProjection();
var position = overlayProjection.fromLatLngToDivPixel(this.pos);
var div = this.div_;
div.style.left = position.x + 'px';
div.style.top = position.y + 'px';
console.log(position)
}
// create map
var point = new google.maps.LatLng(55.774210, 37.520200);
var tooltipTemplate = '4-я Магистральная, дом 5, подъезд 3, этаж 3';
var myMapOptions = {
zoom: 16,
center: point,
mapTypeId: google.maps.MapTypeId.TERRAIN
};
var map = new google.maps.Map(document.getElementById("map_canvas"),myMapOptions);
var image = new google.maps.MarkerImage(
'img/map-image.png',
new google.maps.Size(61,82),
new google.maps.Point(0,0),
new google.maps.Point(31,82)
);
var shadow = new google.maps.MarkerImage(
'img/map-shadow.png',
new google.maps.Size(106,82),
new google.maps.Point(0,0),
new google.maps.Point(31,82)
);
var shape = {
coord: [37,1,40,2,43,3,45,4,46,5,48,6,49,7,50,8,51,9,52,10,53,11,54,12,54,13,55,14,56,15,56,16,57,17,57,18,57,19,58,20,58,21,58,22,59,23,60,24,60,25,60,26,60,27,60,28,60,29,60,30,60,31,60,32,60,33,60,34,59,35,59,36,59,37,59,38,59,39,58,40,58,41,58,42,57,43,57,44,57,45,56,46,56,47,55,48,54,49,54,50,53,51,52,52,52,53,51,54,50,55,50,56,49,57,48,58,48,59,47,60,46,61,45,62,45,63,44,64,43,65,43,66,43,67,42,68,41,69,40,70,40,71,39,72,38,73,37,74,36,75,36,76,35,77,34,78,33,79,32,80,31,81,30,81,29,80,28,79,27,78,26,77,25,76,25,75,24,74,23,73,22,72,22,71,21,70,20,69,20,68,19,67,18,66,17,65, | listIndicate | identifier_name |
dsm2dtm.py |
def extract_dtm(dsm_path, ground_dem_path, non_ground_dem_path, radius, terrain_slope):
"""
Generates a ground DEM and non-ground DEM raster from the input DSM raster.
Input:
dsm_path: {string} path to the DSM raster
radius: {int} Search radius of kernel in cells.
terrain_slope: {float} average slope of the input terrain
Output:
ground_dem_path: {string} path to the generated ground DEM raster
non_ground_dem_path: {string} path to the generated non-ground DEM raster
"""
cmd = "saga_cmd grid_filter 7 -INPUT {} -RADIUS {} -TERRAINSLOPE {} -GROUND {} -NONGROUND {}".format(
dsm_path, radius, terrain_slope, ground_dem_path, non_ground_dem_path
)
os.system(cmd)
def remove_noise(ground_dem_path, out_path, ignore_value=-99999.0):
"""
Removes noise (high elevation data points like roofs, etc.) from the ground DEM raster.
Replaces values in those pixels with No data Value (-99999.0)
Input:
ground_dem_path: {string} path to the generated ground DEM raster
no_data_value: {float} replacing value in the ground raster (to be treated as No Data Value)
Output:
out_path: {string} path to the filtered ground DEM raster
"""
ground_np = np.array(gdal.Open(ground_dem_path).ReadAsArray())
std = ground_np[ground_np != ignore_value].std()
mean = ground_np[ground_np != ignore_value].mean()
threshold_value = mean + 1.5 * std
ground_np[ground_np >= threshold_value] = -99999.0
save_array_as_geotif(ground_np, ground_dem_path, out_path)
def save_array_as_geotif(array, source_tif_path, out_path):
"""
Generates a geotiff raster from the input numpy array (height * width * depth)
Input:
array: {numpy array} numpy array to be saved as geotiff
source_tif_path: {string} path to the geotiff from which projection and geotransformation information will be extracted.
Output:
out_path: {string} path to the generated Geotiff raster
"""
if len(array.shape) > 2:
height, width, depth = array.shape
else:
height, width = array.shape
depth = 1
source_tif = gdal.Open(source_tif_path)
driver = gdal.GetDriverByName("GTiff")
dataset = driver.Create(out_path, width, height, depth, gdal.GDT_Float32)
if depth != 1:
for i in range(depth):
dataset.GetRasterBand(i + 1).WriteArray(array[:, :, i])
else:
dataset.GetRasterBand(1).WriteArray(array)
geotrans = source_tif.GetGeoTransform()
proj = source_tif.GetProjection()
dataset.SetGeoTransform(geotrans)
dataset.SetProjection(proj)
dataset.FlushCache()
dataset = None
def sdat_to_gtiff(sdat_raster_path, out_gtiff_path):
gdal.Translate(
out_gtiff_path,
sdat_raster_path,
format="GTiff",
)
def close_gaps(in_path, out_path, threshold=0.1):
"""
Interpolates the holes (no data value) in the input raster.
Input:
in_path: {string} path to the input raster with holes
threshold: {float} Tension Threshold
Output:
out_path: {string} path to the generated raster with closed holes.
"""
cmd = "saga_cmd grid_tools 7 -INPUT {} -THRESHOLD {} -RESULT {}".format(
in_path, threshold, out_path
)
os.system(cmd)
def smoothen_raster(in_path, out_path, radius=2):
"""
Applies gaussian filter to the input raster.
Input:
in_path: {string} path to the input raster
radius: {int} kernel radius to be used for smoothing
Output:
out_path: {string} path to the generated smoothened raster
"""
cmd = "saga_cmd grid_filter 1 -INPUT {} -RESULT {} -KERNEL_TYPE 0 -KERNEL_RADIUS {}".format(
in_path, out_path, radius
)
os.system(cmd)
def subtract_rasters(rasterA_path, rasterB_path, out_path, no_data_value=-99999.0):
cmd = 'gdal_calc.py -A {} -B {} --outfile {} --NoDataValue={} --calc="A-B"'.format(
rasterA_path, rasterB_path, out_path, no_data_value
)
os.system(cmd)
def replace_values(
rasterA_path, rasterB_path, out_path, no_data_value=-99999.0, threshold=0.98
):
"""
Replaces values in input rasterA with no_data_value where cell value >= threshold in rasterB
Input:
rasterA_path: {string} path to the input rasterA
rasterB_path: {string} path to the input rasterB
Output:
out_path: {string} path to the generated raster
"""
cmd = 'gdal_calc.py -A {} --NoDataValue={} -B {} --outfile {} --calc="{}*(B>={}) + (A)*(B<{})"'.format(
rasterA_path,
no_data_value,
rasterB_path,
out_path,
no_data_value,
threshold,
threshold,
)
os.system(cmd)
def expand_holes_in_raster(
in_path, search_window=7, no_data_value=-99999.0, threshold=50
):
"""
Expands holes (cells with no_data_value) in the input raster.
Input:
in_path: {string} path to the input raster
search_window: {int} kernel size to be used as window
threshold: {float} threshold on percentage of cells with no_data_value
Output:
np_raster: {numpy array} Returns the modified input raster's array
"""
np_raster = np.array(gdal.Open(in_path).ReadAsArray())
height, width = np_raster.shape[0], np_raster.shape[1]
for i in range(int((search_window - 1) / 2), width, 1):
for j in range(int((search_window - 1) / 2), height, 1):
window = np_raster[
int(i - (search_window - 1) / 2) : int(i - (search_window - 1) / 2)
+ search_window,
int(j - (search_window - 1) / 2) : int(j - (search_window - 1) / 2)
+ search_window,
]
if (
np.count_nonzero(window == no_data_value)
>= (threshold * search_window ** 2) / 100
):
try:
np_raster[i, j] = no_data_value
except:
pass
return np_raster
def get_raster_crs(raster_path):
"""
Returns the CRS (Coordinate Reference System) of the raster
Input:
raster_path: {string} path to the source tif image
"""
raster = rasterio.open(raster_path)
return raster.crs
def get_raster_resolution(raster_path):
raster = gdal.Open(raster_path)
raster_geotrans = raster.GetGeoTransform()
x_res = raster_geotrans[1]
y_res = -raster_geotrans[5]
return x_res, y_res
def get_res_and_downsample(dsm_path, temp_dir):
# check DSM resolution. Downsample if DSM is of very high resolution to save processing time.
x_res, y_res = get_raster_resolution(dsm_path) # resolutions are in meters
dsm_name = dsm_path.split("/")[-1].split(".")[0]
dsm_crs = get_raster_crs(dsm_path)
if dsm_crs != 4326:
if x_res < 0.3 or y_res < 0.3:
target_res = 0.3 # downsample to this resolution (in meters)
downsampling_factor = target_res / gdal.Open(dsm_path).GetGeoTransform()[1]
downsampled_dsm_path = os.path.join(temp_dir, dsm_name + "_ds.tif")
# Dowmsampling DSM
downsample_raster(dsm_path, downsampled_dsm_path, downsampling_factor)
dsm_path = downsampled_dsm_path
else:
if x_res < 2.514e-06 or y_res < 2.514e-06:
target_res = 2.514e-06 # downsample to this resolution (in degrees)
downsampling_factor = target_res / gdal.Open(dsm_path).GetGeoTransform()[1]
down | np_raster = np.array(gdal.Open(raster_path).ReadAsArray())
return np_raster[np_raster != ignore_value].mean() | identifier_body |
|
dsm2dtm.py | to the generated ground DEM raster
non_ground_dem_path: {string} path to the generated non-ground DEM raster
"""
cmd = "saga_cmd grid_filter 7 -INPUT {} -RADIUS {} -TERRAINSLOPE {} -GROUND {} -NONGROUND {}".format(
dsm_path, radius, terrain_slope, ground_dem_path, non_ground_dem_path
)
os.system(cmd)
def remove_noise(ground_dem_path, out_path, ignore_value=-99999.0):
"""
Removes noise (high elevation data points like roofs, etc.) from the ground DEM raster.
Replaces values in those pixels with No data Value (-99999.0)
Input:
ground_dem_path: {string} path to the generated ground DEM raster
no_data_value: {float} replacing value in the ground raster (to be treated as No Data Value)
Output:
out_path: {string} path to the filtered ground DEM raster
"""
ground_np = np.array(gdal.Open(ground_dem_path).ReadAsArray())
std = ground_np[ground_np != ignore_value].std()
mean = ground_np[ground_np != ignore_value].mean()
threshold_value = mean + 1.5 * std
ground_np[ground_np >= threshold_value] = -99999.0
save_array_as_geotif(ground_np, ground_dem_path, out_path)
def save_array_as_geotif(array, source_tif_path, out_path):
"""
Generates a geotiff raster from the input numpy array (height * width * depth)
Input:
array: {numpy array} numpy array to be saved as geotiff
source_tif_path: {string} path to the geotiff from which projection and geotransformation information will be extracted.
Output:
out_path: {string} path to the generated Geotiff raster
"""
if len(array.shape) > 2:
height, width, depth = array.shape
else:
height, width = array.shape
depth = 1
source_tif = gdal.Open(source_tif_path)
driver = gdal.GetDriverByName("GTiff")
dataset = driver.Create(out_path, width, height, depth, gdal.GDT_Float32)
if depth != 1:
for i in range(depth):
dataset.GetRasterBand(i + 1).WriteArray(array[:, :, i])
else:
dataset.GetRasterBand(1).WriteArray(array)
geotrans = source_tif.GetGeoTransform()
proj = source_tif.GetProjection()
dataset.SetGeoTransform(geotrans)
dataset.SetProjection(proj)
dataset.FlushCache()
dataset = None
def sdat_to_gtiff(sdat_raster_path, out_gtiff_path):
gdal.Translate(
out_gtiff_path,
sdat_raster_path,
format="GTiff",
)
def close_gaps(in_path, out_path, threshold=0.1):
"""
Interpolates the holes (no data value) in the input raster.
Input:
in_path: {string} path to the input raster with holes
threshold: {float} Tension Threshold
Output:
out_path: {string} path to the generated raster with closed holes.
"""
cmd = "saga_cmd grid_tools 7 -INPUT {} -THRESHOLD {} -RESULT {}".format(
in_path, threshold, out_path
)
os.system(cmd)
def smoothen_raster(in_path, out_path, radius=2):
"""
Applies gaussian filter to the input raster.
Input:
in_path: {string} path to the input raster
radius: {int} kernel radius to be used for smoothing
Output:
out_path: {string} path to the generated smoothened raster
"""
cmd = "saga_cmd grid_filter 1 -INPUT {} -RESULT {} -KERNEL_TYPE 0 -KERNEL_RADIUS {}".format(
in_path, out_path, radius
)
os.system(cmd)
def subtract_rasters(rasterA_path, rasterB_path, out_path, no_data_value=-99999.0):
cmd = 'gdal_calc.py -A {} -B {} --outfile {} --NoDataValue={} --calc="A-B"'.format(
rasterA_path, rasterB_path, out_path, no_data_value
)
os.system(cmd)
def replace_values(
rasterA_path, rasterB_path, out_path, no_data_value=-99999.0, threshold=0.98
):
"""
Replaces values in input rasterA with no_data_value where cell value >= threshold in rasterB
Input:
rasterA_path: {string} path to the input rasterA
rasterB_path: {string} path to the input rasterB
Output:
out_path: {string} path to the generated raster
"""
cmd = 'gdal_calc.py -A {} --NoDataValue={} -B {} --outfile {} --calc="{}*(B>={}) + (A)*(B<{})"'.format(
rasterA_path,
no_data_value,
rasterB_path,
out_path,
no_data_value,
threshold,
threshold,
)
os.system(cmd)
def expand_holes_in_raster(
in_path, search_window=7, no_data_value=-99999.0, threshold=50
):
"""
Expands holes (cells with no_data_value) in the input raster.
Input:
in_path: {string} path to the input raster
search_window: {int} kernel size to be used as window
threshold: {float} threshold on percentage of cells with no_data_value
Output:
np_raster: {numpy array} Returns the modified input raster's array
"""
np_raster = np.array(gdal.Open(in_path).ReadAsArray())
height, width = np_raster.shape[0], np_raster.shape[1]
for i in range(int((search_window - 1) / 2), width, 1):
for j in range(int((search_window - 1) / 2), height, 1):
window = np_raster[
int(i - (search_window - 1) / 2) : int(i - (search_window - 1) / 2)
+ search_window,
int(j - (search_window - 1) / 2) : int(j - (search_window - 1) / 2)
+ search_window,
]
if (
np.count_nonzero(window == no_data_value)
>= (threshold * search_window ** 2) / 100
):
try:
np_raster[i, j] = no_data_value
except:
pass
return np_raster
def get_raster_crs(raster_path):
"""
Returns the CRS (Coordinate Reference System) of the raster
Input:
raster_path: {string} path to the source tif image
"""
raster = rasterio.open(raster_path)
return raster.crs
def get_raster_resolution(raster_path):
raster = gdal.Open(raster_path)
raster_geotrans = raster.GetGeoTransform()
x_res = raster_geotrans[1]
y_res = -raster_geotrans[5]
return x_res, y_res
def | (dsm_path, temp_dir):
# check DSM resolution. Downsample if DSM is of very high resolution to save processing time.
x_res, y_res = get_raster_resolution(dsm_path) # resolutions are in meters
dsm_name = dsm_path.split("/")[-1].split(".")[0]
dsm_crs = get_raster_crs(dsm_path)
if dsm_crs != 4326:
if x_res < 0.3 or y_res < 0.3:
target_res = 0.3 # downsample to this resolution (in meters)
downsampling_factor = target_res / gdal.Open(dsm_path).GetGeoTransform()[1]
downsampled_dsm_path = os.path.join(temp_dir, dsm_name + "_ds.tif")
# Dowmsampling DSM
downsample_raster(dsm_path, downsampled_dsm_path, downsampling_factor)
dsm_path = downsampled_dsm_path
else:
if x_res < 2.514e-06 or y_res < 2.514e-06:
target_res = 2.514e-06 # downsample to this resolution (in degrees)
downsampling_factor = target_res / gdal.Open(dsm_path).GetGeoTransform()[1]
downsampled_dsm_path = os.path.join(temp_dir, dsm_name + "_ds.tif")
# Dowmsampling DSM
downsample_raster(dsm_path, downsampled_dsm_path, downsampling_factor)
dsm_path = downsampled_dsm_path
return dsm_path
def get_updated_params(dsm_path, search_radius, smoothen_radius):
# search_radius and smoothen_radius are set wrt to 30cm DSM
# returns updated parameters if DSM is of coarser resolution
x_res, y_res = get_raster_resolution(dsm | get_res_and_downsample | identifier_name |
dsm2dtm.py | to the generated ground DEM raster
non_ground_dem_path: {string} path to the generated non-ground DEM raster
"""
cmd = "saga_cmd grid_filter 7 -INPUT {} -RADIUS {} -TERRAINSLOPE {} -GROUND {} -NONGROUND {}".format(
dsm_path, radius, terrain_slope, ground_dem_path, non_ground_dem_path
)
os.system(cmd)
def remove_noise(ground_dem_path, out_path, ignore_value=-99999.0):
"""
Removes noise (high elevation data points like roofs, etc.) from the ground DEM raster.
Replaces values in those pixels with No data Value (-99999.0)
Input:
ground_dem_path: {string} path to the generated ground DEM raster
no_data_value: {float} replacing value in the ground raster (to be treated as No Data Value)
Output:
out_path: {string} path to the filtered ground DEM raster
"""
ground_np = np.array(gdal.Open(ground_dem_path).ReadAsArray())
std = ground_np[ground_np != ignore_value].std()
mean = ground_np[ground_np != ignore_value].mean()
threshold_value = mean + 1.5 * std
ground_np[ground_np >= threshold_value] = -99999.0
save_array_as_geotif(ground_np, ground_dem_path, out_path)
def save_array_as_geotif(array, source_tif_path, out_path):
"""
Generates a geotiff raster from the input numpy array (height * width * depth)
Input:
array: {numpy array} numpy array to be saved as geotiff
source_tif_path: {string} path to the geotiff from which projection and geotransformation information will be extracted.
Output:
out_path: {string} path to the generated Geotiff raster
"""
if len(array.shape) > 2:
height, width, depth = array.shape
else:
height, width = array.shape
depth = 1
source_tif = gdal.Open(source_tif_path)
driver = gdal.GetDriverByName("GTiff")
dataset = driver.Create(out_path, width, height, depth, gdal.GDT_Float32)
if depth != 1:
for i in range(depth):
dataset.GetRasterBand(i + 1).WriteArray(array[:, :, i])
else:
dataset.GetRasterBand(1).WriteArray(array)
geotrans = source_tif.GetGeoTransform()
proj = source_tif.GetProjection()
dataset.SetGeoTransform(geotrans)
dataset.SetProjection(proj)
dataset.FlushCache()
dataset = None
def sdat_to_gtiff(sdat_raster_path, out_gtiff_path):
gdal.Translate(
out_gtiff_path,
sdat_raster_path,
format="GTiff",
)
def close_gaps(in_path, out_path, threshold=0.1):
"""
Interpolates the holes (no data value) in the input raster.
Input:
in_path: {string} path to the input raster with holes
threshold: {float} Tension Threshold
Output:
out_path: {string} path to the generated raster with closed holes.
"""
cmd = "saga_cmd grid_tools 7 -INPUT {} -THRESHOLD {} -RESULT {}".format(
in_path, threshold, out_path
)
os.system(cmd)
def smoothen_raster(in_path, out_path, radius=2):
"""
Applies gaussian filter to the input raster.
Input:
in_path: {string} path to the input raster
radius: {int} kernel radius to be used for smoothing
Output:
out_path: {string} path to the generated smoothened raster
"""
cmd = "saga_cmd grid_filter 1 -INPUT {} -RESULT {} -KERNEL_TYPE 0 -KERNEL_RADIUS {}".format(
in_path, out_path, radius
)
os.system(cmd)
def subtract_rasters(rasterA_path, rasterB_path, out_path, no_data_value=-99999.0):
cmd = 'gdal_calc.py -A {} -B {} --outfile {} --NoDataValue={} --calc="A-B"'.format(
rasterA_path, rasterB_path, out_path, no_data_value
)
os.system(cmd)
def replace_values(
rasterA_path, rasterB_path, out_path, no_data_value=-99999.0, threshold=0.98
):
"""
Replaces values in input rasterA with no_data_value where cell value >= threshold in rasterB
Input:
rasterA_path: {string} path to the input rasterA
rasterB_path: {string} path to the input rasterB
Output:
out_path: {string} path to the generated raster
"""
cmd = 'gdal_calc.py -A {} --NoDataValue={} -B {} --outfile {} --calc="{}*(B>={}) + (A)*(B<{})"'.format(
rasterA_path,
no_data_value,
rasterB_path,
out_path,
no_data_value,
threshold,
threshold,
)
os.system(cmd)
def expand_holes_in_raster(
in_path, search_window=7, no_data_value=-99999.0, threshold=50
):
"""
Expands holes (cells with no_data_value) in the input raster.
Input:
in_path: {string} path to the input raster
search_window: {int} kernel size to be used as window
threshold: {float} threshold on percentage of cells with no_data_value
Output:
np_raster: {numpy array} Returns the modified input raster's array
"""
np_raster = np.array(gdal.Open(in_path).ReadAsArray())
height, width = np_raster.shape[0], np_raster.shape[1]
for i in range(int((search_window - 1) / 2), width, 1):
for j in range(int((search_window - 1) / 2), height, 1):
window = np_raster[
int(i - (search_window - 1) / 2) : int(i - (search_window - 1) / 2)
+ search_window,
int(j - (search_window - 1) / 2) : int(j - (search_window - 1) / 2)
+ search_window,
]
if (
np.count_nonzero(window == no_data_value)
>= (threshold * search_window ** 2) / 100
):
|
return np_raster
def get_raster_crs(raster_path):
"""
Returns the CRS (Coordinate Reference System) of the raster
Input:
raster_path: {string} path to the source tif image
"""
raster = rasterio.open(raster_path)
return raster.crs
def get_raster_resolution(raster_path):
raster = gdal.Open(raster_path)
raster_geotrans = raster.GetGeoTransform()
x_res = raster_geotrans[1]
y_res = -raster_geotrans[5]
return x_res, y_res
def get_res_and_downsample(dsm_path, temp_dir):
# check DSM resolution. Downsample if DSM is of very high resolution to save processing time.
x_res, y_res = get_raster_resolution(dsm_path) # resolutions are in meters
dsm_name = dsm_path.split("/")[-1].split(".")[0]
dsm_crs = get_raster_crs(dsm_path)
if dsm_crs != 4326:
if x_res < 0.3 or y_res < 0.3:
target_res = 0.3 # downsample to this resolution (in meters)
downsampling_factor = target_res / gdal.Open(dsm_path).GetGeoTransform()[1]
downsampled_dsm_path = os.path.join(temp_dir, dsm_name + "_ds.tif")
# Dowmsampling DSM
downsample_raster(dsm_path, downsampled_dsm_path, downsampling_factor)
dsm_path = downsampled_dsm_path
else:
if x_res < 2.514e-06 or y_res < 2.514e-06:
target_res = 2.514e-06 # downsample to this resolution (in degrees)
downsampling_factor = target_res / gdal.Open(dsm_path).GetGeoTransform()[1]
downsampled_dsm_path = os.path.join(temp_dir, dsm_name + "_ds.tif")
# Dowmsampling DSM
downsample_raster(dsm_path, downsampled_dsm_path, downsampling_factor)
dsm_path = downsampled_dsm_path
return dsm_path
def get_updated_params(dsm_path, search_radius, smoothen_radius):
# search_radius and smoothen_radius are set wrt to 30cm DSM
# returns updated parameters if DSM is of coarser resolution
x_res, y_res = get_raster_resolution(dsm_path | try:
np_raster[i, j] = no_data_value
except:
pass | conditional_block |
dsm2dtm.py | to the generated ground DEM raster
non_ground_dem_path: {string} path to the generated non-ground DEM raster
"""
cmd = "saga_cmd grid_filter 7 -INPUT {} -RADIUS {} -TERRAINSLOPE {} -GROUND {} -NONGROUND {}".format(
dsm_path, radius, terrain_slope, ground_dem_path, non_ground_dem_path
)
os.system(cmd)
def remove_noise(ground_dem_path, out_path, ignore_value=-99999.0):
"""
Removes noise (high elevation data points like roofs, etc.) from the ground DEM raster.
Replaces values in those pixels with No data Value (-99999.0)
Input:
ground_dem_path: {string} path to the generated ground DEM raster
no_data_value: {float} replacing value in the ground raster (to be treated as No Data Value)
Output:
out_path: {string} path to the filtered ground DEM raster
"""
ground_np = np.array(gdal.Open(ground_dem_path).ReadAsArray())
std = ground_np[ground_np != ignore_value].std()
mean = ground_np[ground_np != ignore_value].mean()
threshold_value = mean + 1.5 * std
ground_np[ground_np >= threshold_value] = -99999.0
save_array_as_geotif(ground_np, ground_dem_path, out_path)
def save_array_as_geotif(array, source_tif_path, out_path):
"""
Generates a geotiff raster from the input numpy array (height * width * depth)
Input:
array: {numpy array} numpy array to be saved as geotiff
source_tif_path: {string} path to the geotiff from which projection and geotransformation information will be extracted.
Output:
out_path: {string} path to the generated Geotiff raster
"""
if len(array.shape) > 2:
height, width, depth = array.shape
else:
height, width = array.shape
depth = 1
source_tif = gdal.Open(source_tif_path)
driver = gdal.GetDriverByName("GTiff")
dataset = driver.Create(out_path, width, height, depth, gdal.GDT_Float32)
if depth != 1:
for i in range(depth):
dataset.GetRasterBand(i + 1).WriteArray(array[:, :, i])
else:
dataset.GetRasterBand(1).WriteArray(array)
geotrans = source_tif.GetGeoTransform()
proj = source_tif.GetProjection()
dataset.SetGeoTransform(geotrans)
dataset.SetProjection(proj)
dataset.FlushCache()
dataset = None
def sdat_to_gtiff(sdat_raster_path, out_gtiff_path):
gdal.Translate(
out_gtiff_path,
sdat_raster_path,
format="GTiff",
)
def close_gaps(in_path, out_path, threshold=0.1):
"""
Interpolates the holes (no data value) in the input raster.
Input:
in_path: {string} path to the input raster with holes
threshold: {float} Tension Threshold
Output:
out_path: {string} path to the generated raster with closed holes.
"""
cmd = "saga_cmd grid_tools 7 -INPUT {} -THRESHOLD {} -RESULT {}".format(
in_path, threshold, out_path
)
os.system(cmd)
def smoothen_raster(in_path, out_path, radius=2):
"""
Applies gaussian filter to the input raster.
Input:
in_path: {string} path to the input raster
radius: {int} kernel radius to be used for smoothing
Output:
out_path: {string} path to the generated smoothened raster
"""
cmd = "saga_cmd grid_filter 1 -INPUT {} -RESULT {} -KERNEL_TYPE 0 -KERNEL_RADIUS {}".format(
in_path, out_path, radius
)
os.system(cmd)
def subtract_rasters(rasterA_path, rasterB_path, out_path, no_data_value=-99999.0):
cmd = 'gdal_calc.py -A {} -B {} --outfile {} --NoDataValue={} --calc="A-B"'.format(
rasterA_path, rasterB_path, out_path, no_data_value
)
os.system(cmd)
def replace_values(
rasterA_path, rasterB_path, out_path, no_data_value=-99999.0, threshold=0.98
):
"""
Replaces values in input rasterA with no_data_value where cell value >= threshold in rasterB
Input:
rasterA_path: {string} path to the input rasterA
rasterB_path: {string} path to the input rasterB
Output:
out_path: {string} path to the generated raster
"""
cmd = 'gdal_calc.py -A {} --NoDataValue={} -B {} --outfile {} --calc="{}*(B>={}) + (A)*(B<{})"'.format(
rasterA_path,
no_data_value,
rasterB_path,
out_path,
no_data_value,
threshold,
threshold,
)
os.system(cmd)
def expand_holes_in_raster(
in_path, search_window=7, no_data_value=-99999.0, threshold=50
):
"""
Expands holes (cells with no_data_value) in the input raster.
Input:
in_path: {string} path to the input raster
search_window: {int} kernel size to be used as window
threshold: {float} threshold on percentage of cells with no_data_value
Output:
np_raster: {numpy array} Returns the modified input raster's array
"""
np_raster = np.array(gdal.Open(in_path).ReadAsArray())
height, width = np_raster.shape[0], np_raster.shape[1]
for i in range(int((search_window - 1) / 2), width, 1):
for j in range(int((search_window - 1) / 2), height, 1):
window = np_raster[
int(i - (search_window - 1) / 2) : int(i - (search_window - 1) / 2)
+ search_window,
int(j - (search_window - 1) / 2) : int(j - (search_window - 1) / 2)
+ search_window,
]
if (
np.count_nonzero(window == no_data_value)
>= (threshold * search_window ** 2) / 100
):
try:
np_raster[i, j] = no_data_value
except:
pass
return np_raster
def get_raster_crs(raster_path):
"""
Returns the CRS (Coordinate Reference System) of the raster
Input:
raster_path: {string} path to the source tif image
"""
raster = rasterio.open(raster_path)
return raster.crs
def get_raster_resolution(raster_path):
raster = gdal.Open(raster_path)
raster_geotrans = raster.GetGeoTransform()
x_res = raster_geotrans[1]
y_res = -raster_geotrans[5]
return x_res, y_res
def get_res_and_downsample(dsm_path, temp_dir):
# check DSM resolution. Downsample if DSM is of very high resolution to save processing time.
x_res, y_res = get_raster_resolution(dsm_path) # resolutions are in meters
dsm_name = dsm_path.split("/")[-1].split(".")[0] | if x_res < 0.3 or y_res < 0.3:
target_res = 0.3 # downsample to this resolution (in meters)
downsampling_factor = target_res / gdal.Open(dsm_path).GetGeoTransform()[1]
downsampled_dsm_path = os.path.join(temp_dir, dsm_name + "_ds.tif")
# Dowmsampling DSM
downsample_raster(dsm_path, downsampled_dsm_path, downsampling_factor)
dsm_path = downsampled_dsm_path
else:
if x_res < 2.514e-06 or y_res < 2.514e-06:
target_res = 2.514e-06 # downsample to this resolution (in degrees)
downsampling_factor = target_res / gdal.Open(dsm_path).GetGeoTransform()[1]
downsampled_dsm_path = os.path.join(temp_dir, dsm_name + "_ds.tif")
# Dowmsampling DSM
downsample_raster(dsm_path, downsampled_dsm_path, downsampling_factor)
dsm_path = downsampled_dsm_path
return dsm_path
def get_updated_params(dsm_path, search_radius, smoothen_radius):
# search_radius and smoothen_radius are set wrt to 30cm DSM
# returns updated parameters if DSM is of coarser resolution
x_res, y_res = get_raster_resolution(dsm_path | dsm_crs = get_raster_crs(dsm_path)
if dsm_crs != 4326: | random_line_split |
lib.rs | to more of the underlying
//! details of the WebSocket connection.
//!
//! Another method you will probably want to implement is `on_close`. This method is called anytime
//! the other side of the WebSocket connection attempts to close the connection. Implementing
//! `on_close` gives you a mechanism for informing the user regarding why the WebSocket connection
//! may have been closed, and it also gives you an opportunity to clean up any resources or state
//! that may be dependent on the connection that is now about to disconnect.
//!
//! An example server might use this as follows:
//!
//! ```no_run
//! use ws::{listen, Handler, Sender, Result, Message, CloseCode};
//!
//! struct Server {
//! out: Sender,
//! }
//!
//! impl Handler for Server {
//!
//! fn on_message(&mut self, msg: Message) -> Result<()> {
//! // Echo the message back
//! self.out.send(msg)
//! }
//!
//! fn on_close(&mut self, code: CloseCode, reason: &str) {
//! // The WebSocket protocol allows for a utf8 reason for the closing state after the
//! // close code. WS-RS will attempt to interpret this data as a utf8 description of the
//! // reason for closing the connection. I many cases, `reason` will be an empty string.
//! // So, you may not normally want to display `reason` to the user,
//! // but let's assume that we know that `reason` is human-readable.
//! match code {
//! CloseCode::Normal => println!("The client is done with the connection."),
//! CloseCode::Away => println!("The client is leaving the site."),
//! _ => println!("The client encountered an error: {}", reason),
//! }
//! }
//! }
//!
//! listen("127.0.0.1:3012", |out| { Server { out: out } }).unwrap()
//! ```
//!
//! Errors don't just occur on the other side of the connection, sometimes your code will encounter
//! an exceptional state too. You can access errors by implementing `on_error`. By implementing
//! `on_error` you can inform the user of an error and tear down any resources that you may have
//! setup for the connection, but which are not owned by the Handler. Also, note that certain kinds
//! of errors have certain ramifications within the WebSocket protocol. WS-RS will take care of
//! sending the appropriate close code.
//!
//! A server that tracks state outside of the handler might be as follows:
//!
//! ```no_run
//!
//! use std::rc::Rc;
//! use std::cell::RefCell;
//!
//! use ws::{listen, Handler, Sender, Result, Message, Handshake, CloseCode, Error};
//!
//! struct Server {
//! out: Sender,
//! count: Rc<RefCell<usize>>,
//! }
//!
//! impl Handler for Server {
//!
//! fn on_open(&mut self, _: Handshake) -> Result<()> {
//! // We have a new connection, so we increment the connection counter
//! Ok(*self.count.borrow_mut() += 1)
//! }
//!
//! fn on_message(&mut self, msg: Message) -> Result<()> {
//! // Tell the user the current count
//! println!("The number of live connections is {}", *self.count.borrow());
//!
//! // Echo the message back
//! self.out.send(msg)
//! }
//!
//! fn on_close(&mut self, code: CloseCode, reason: &str) {
//! match code {
//! CloseCode::Normal => println!("The client is done with the connection."),
//! CloseCode::Away => println!("The client is leaving the site."),
//! _ => println!("The client encountered an error: {}", reason),
//! }
//!
//! // The connection is going down, so we need to decrement the count
//! *self.count.borrow_mut() -= 1
//! }
//!
//! fn on_error(&mut self, err: Error) {
//! println!("The server encountered an error: {:?}", err);
//!
//! // The connection is going down, so we need to decrement the count
//! *self.count.borrow_mut() -= 1
//! }
//!
//! }
//! // RefCell enforces Rust borrowing rules at runtime.
//! // Calling borrow_mut will panic if the count being borrowed,
//! // but we know already that only one handler at a time will ever try to change the count.
//! // Rc is a reference-counted box for sharing the count between handlers
//! // since each handler needs to own its contents.
//! let count = Rc::new(RefCell::new(0));
//! listen("127.0.0.1:3012", |out| { Server { out: out, count: count.clone() } }).unwrap()
//! ```
//!
//! There are other Handler methods that allow even more fine-grained access, but most applications
//! will usually only need these four methods.
//!
extern crate httparse;
extern crate mio;
extern crate sha1;
extern crate rand;
extern crate url;
#[macro_use] extern crate log;
mod result;
mod connection;
mod frame;
mod message;
mod handshake;
mod protocol;
mod communication;
mod io;
pub use connection::factory::Factory;
pub use connection::factory::Settings as WebSocketSettings;
pub use connection::handler::Handler;
pub use connection::handler::Settings as ConnectionSettings;
pub use result::{Result, Error};
pub use result::Kind as ErrorKind;
pub use message::Message;
pub use communication::Sender;
pub use protocol::CloseCode;
pub use handshake::{Handshake, Request, Response};
use std::fmt;
use std::net::ToSocketAddrs;
use mio::EventLoopConfig;
use std::borrow::Borrow;
/// A utility function for setting up a WebSocket server.
///
/// # Safety
///
/// This function blocks until the EventLoop finishes running. Avoid calling this method within
/// another WebSocket handler.
///
/// # Examples
///
/// ```no_run
/// use ws::listen;
///
/// listen("127.0.0.1:3012", |out| {
/// move |msg| {
/// out.send(msg)
/// }
/// }).unwrap()
/// ```
///
pub fn listen<A, F, H>(addr: A, factory: F) -> Result<()>
where
A: ToSocketAddrs + fmt::Debug,
F: FnMut(Sender) -> H,
H: Handler,
{
let ws = try!(WebSocket::new(factory));
try!(ws.listen(addr));
Ok(())
}
/// A utility function for setting up a WebSocket client.
///
/// # Safety
///
/// This function blocks until the EventLoop finishes running. Avoid calling this method within
/// another WebSocket handler. If you need to establish a connection from inside of a handler,
/// use the `connect` method on the Sender.
///
/// # Examples
///
/// ```no_run
/// use ws::{connect, CloseCode};
///
/// connect("ws://127.0.0.1:3012", |out| {
/// out.send("Hello WebSocket").unwrap();
///
/// move |msg| {
/// println!("Got message: {}", msg);
/// out.close(CloseCode::Normal)
/// }
/// }).unwrap()
/// ```
///
pub fn connect<U, F, H>(url: U, factory: F) -> Result<()>
where
U: Borrow<str>,
F: FnMut(Sender) -> H,
H: Handler
{
let mut ws = try!(WebSocket::new(factory));
let parsed = try!(
url::Url::parse(url.borrow())
.map_err(|err| Error::new(
ErrorKind::Internal,
format!("Unable to parse {} as url due to {:?}", url.borrow(), err))));
try!(ws.connect(parsed));
try!(ws.run());
Ok(())
}
/// The WebSocket struct. A WebSocket can support multiple incoming and outgoing connections.
pub struct WebSocket<F>
where F: Factory
{
event_loop: io::Loop<F>,
handler: io::Handler<F>,
}
impl<F> WebSocket<F>
where F: Factory
{
/// Create a new WebSocket using the given Factory to create handlers.
pub fn new(mut factory: F) -> Result<WebSocket<F>> {
let max = factory.settings().max_connections;
let mut config = EventLoopConfig::new();
config.notify_capacity(max + 1000);
WebSocket::with_config(factory, config)
}
/// Create a new WebSocket with a Factory and use the event loop config to provide settings for
/// the event loop.
pub fn with_config(factory: F, config: EventLoopConfig) -> Result<WebSocket<F>> {
Ok(WebSocket {
event_loop: try!(io::Loop::configured(config)),
handler: io::Handler::new(factory),
})
}
/// Consume the WebSocket and listen for new connections on the specified address.
///
/// # Safety
///
/// This method will block until the event loop finishes running.
pub fn listen<A>(mut self, addr_spec: A) -> Result<WebSocket<F>>
where A: ToSocketAddrs + fmt::Debug
{
let mut result = Err(Error::new(ErrorKind::Internal, format!("Unable to listen on {:?}", addr_spec)));
for addr in try!(addr_spec.to_socket_addrs()) {
result = self.handler.listen(&mut self.event_loop, &addr).map(|_| ());
if result.is_ok() | {
return self.run()
} | conditional_block |
|
lib.rs | a message immediately when a WebSocket connection is
//! established, you will need to write a Handler that implements the `on_open` method. For
//! example:
//!
//! ```no_run
//! use ws::{connect, Handler, Sender, Handshake, Result, Message, CloseCode};
//!
//! // Our Handler struct.
//! // Here we explicity indicate that the Client needs a Sender,
//! // whereas a closure captures the Sender for us automatically.
//! struct Client {
//! out: Sender,
//! }
//!
//! // We implement the Handler trait for Client so that we can get more
//! // fine-grained control of the connection.
//! impl Handler for Client {
//!
//! // `on_open` will be called only after the WebSocket handshake is successful
//! // so at this point we know that the connection is ready to send/receive messages.
//! // We ignore the `Handshake` for now, but you could also use this method to setup
//! // Handler state or reject the connection based on the details of the Request
//! // or Response, such as by checking cookies or Auth headers.
//! fn on_open(&mut self, _: Handshake) -> Result<()> {
//! // Now we don't need to call unwrap since `on_open` returns a `Result<()>`.
//! // If this call fails, it will only result in this connection disconnecting.
//! self.out.send("Hello WebSocket")
//! }
//!
//! // `on_message` is roughly equivalent to the Handler closure. It takes a `Message`
//! // and returns a `Result<()>`.
//! fn on_message(&mut self, msg: Message) -> Result<()> {
//! // Close the connection when we get a response from the server
//! println!("Got message: {}", msg);
//! self.out.close(CloseCode::Normal)
//! }
//! }
//!
//! // Now, instead of a closure, the Factory returns a new instance of our Handler.
//! connect("ws://127.0.0.1:3012", |out| { Client { out: out } }).unwrap()
//! ```
//!
//! That is a big increase in verbosity in order to accomplish the same effect as the
//! original example, but this way is more flexible and gives you access to more of the underlying
//! details of the WebSocket connection.
//!
//! Another method you will probably want to implement is `on_close`. This method is called anytime
//! the other side of the WebSocket connection attempts to close the connection. Implementing
//! `on_close` gives you a mechanism for informing the user regarding why the WebSocket connection
//! may have been closed, and it also gives you an opportunity to clean up any resources or state
//! that may be dependent on the connection that is now about to disconnect.
//!
//! An example server might use this as follows:
//!
//! ```no_run
//! use ws::{listen, Handler, Sender, Result, Message, CloseCode};
//!
//! struct Server {
//! out: Sender,
//! }
//!
//! impl Handler for Server {
//!
//! fn on_message(&mut self, msg: Message) -> Result<()> {
//! // Echo the message back
//! self.out.send(msg)
//! }
//!
//! fn on_close(&mut self, code: CloseCode, reason: &str) {
//! // The WebSocket protocol allows for a utf8 reason for the closing state after the
//! // close code. WS-RS will attempt to interpret this data as a utf8 description of the
//! // reason for closing the connection. I many cases, `reason` will be an empty string.
//! // So, you may not normally want to display `reason` to the user,
//! // but let's assume that we know that `reason` is human-readable.
//! match code {
//! CloseCode::Normal => println!("The client is done with the connection."),
//! CloseCode::Away => println!("The client is leaving the site."),
//! _ => println!("The client encountered an error: {}", reason),
//! }
//! }
//! }
//!
//! listen("127.0.0.1:3012", |out| { Server { out: out } }).unwrap()
//! ```
//!
//! Errors don't just occur on the other side of the connection, sometimes your code will encounter
//! an exceptional state too. You can access errors by implementing `on_error`. By implementing
//! `on_error` you can inform the user of an error and tear down any resources that you may have
//! setup for the connection, but which are not owned by the Handler. Also, note that certain kinds
//! of errors have certain ramifications within the WebSocket protocol. WS-RS will take care of
//! sending the appropriate close code.
//!
//! A server that tracks state outside of the handler might be as follows:
//!
//! ```no_run
//!
//! use std::rc::Rc;
//! use std::cell::RefCell;
//!
//! use ws::{listen, Handler, Sender, Result, Message, Handshake, CloseCode, Error};
//!
//! struct Server {
//! out: Sender,
//! count: Rc<RefCell<usize>>,
//! }
//!
//! impl Handler for Server {
//!
//! fn on_open(&mut self, _: Handshake) -> Result<()> {
//! // We have a new connection, so we increment the connection counter
//! Ok(*self.count.borrow_mut() += 1)
//! }
//!
//! fn on_message(&mut self, msg: Message) -> Result<()> {
//! // Tell the user the current count
//! println!("The number of live connections is {}", *self.count.borrow());
//!
//! // Echo the message back
//! self.out.send(msg)
//! }
//!
//! fn on_close(&mut self, code: CloseCode, reason: &str) {
//! match code {
//! CloseCode::Normal => println!("The client is done with the connection."),
//! CloseCode::Away => println!("The client is leaving the site."),
//! _ => println!("The client encountered an error: {}", reason),
//! }
//!
//! // The connection is going down, so we need to decrement the count
//! *self.count.borrow_mut() -= 1
//! }
//!
//! fn on_error(&mut self, err: Error) {
//! println!("The server encountered an error: {:?}", err);
//!
//! // The connection is going down, so we need to decrement the count
//! *self.count.borrow_mut() -= 1
//! }
//!
//! }
//! // RefCell enforces Rust borrowing rules at runtime.
//! // Calling borrow_mut will panic if the count being borrowed,
//! // but we know already that only one handler at a time will ever try to change the count.
//! // Rc is a reference-counted box for sharing the count between handlers
//! // since each handler needs to own its contents.
//! let count = Rc::new(RefCell::new(0));
//! listen("127.0.0.1:3012", |out| { Server { out: out, count: count.clone() } }).unwrap()
//! ```
//!
//! There are other Handler methods that allow even more fine-grained access, but most applications
//! will usually only need these four methods.
//!
extern crate httparse;
extern crate mio;
extern crate sha1;
extern crate rand;
extern crate url;
#[macro_use] extern crate log;
mod result;
mod connection;
mod frame;
mod message;
mod handshake;
mod protocol;
mod communication;
mod io;
pub use connection::factory::Factory;
pub use connection::factory::Settings as WebSocketSettings;
pub use connection::handler::Handler;
pub use connection::handler::Settings as ConnectionSettings;
pub use result::{Result, Error};
pub use result::Kind as ErrorKind;
pub use message::Message;
pub use communication::Sender;
pub use protocol::CloseCode;
pub use handshake::{Handshake, Request, Response};
use std::fmt;
use std::net::ToSocketAddrs;
use mio::EventLoopConfig;
use std::borrow::Borrow;
/// A utility function for setting up a WebSocket server.
///
/// # Safety
///
/// This function blocks until the EventLoop finishes running. Avoid calling this method within
/// another WebSocket handler.
///
/// # Examples
///
/// ```no_run
/// use ws::listen;
///
/// listen("127.0.0.1:3012", |out| {
/// move |msg| {
/// out.send(msg)
/// }
/// }).unwrap()
/// ```
///
pub fn listen<A, F, H>(addr: A, factory: F) -> Result<()>
where
A: ToSocketAddrs + fmt::Debug,
F: FnMut(Sender) -> H,
H: Handler,
{
let ws = try!(WebSocket::new(factory));
try!(ws.listen(addr));
Ok(())
}
/// A utility function for setting up a WebSocket client.
///
/// # Safety
///
/// This function blocks until the EventLoop finishes running. Avoid calling this method within
/// another WebSocket handler. If you need to establish a connection from inside of a handler,
/// use the `connect` method on the Sender.
///
/// # Examples
///
/// ```no_run
/// use ws::{connect, CloseCode};
///
/// connect("ws://127.0.0.1:3012", |out| {
/// out.send("Hello WebSocket").unwrap();
///
/// move |msg| {
/// println!("Got message: {}", msg);
/// out.close(CloseCode::Normal)
/// }
/// }).unwrap()
/// ```
///
pub fn | connect | identifier_name |
|
lib.rs | or Response, such as by checking cookies or Auth headers.
//! fn on_open(&mut self, _: Handshake) -> Result<()> {
//! // Now we don't need to call unwrap since `on_open` returns a `Result<()>`.
//! // If this call fails, it will only result in this connection disconnecting.
//! self.out.send("Hello WebSocket")
//! }
//!
//! // `on_message` is roughly equivalent to the Handler closure. It takes a `Message`
//! // and returns a `Result<()>`.
//! fn on_message(&mut self, msg: Message) -> Result<()> {
//! // Close the connection when we get a response from the server
//! println!("Got message: {}", msg);
//! self.out.close(CloseCode::Normal)
//! }
//! }
//!
//! // Now, instead of a closure, the Factory returns a new instance of our Handler.
//! connect("ws://127.0.0.1:3012", |out| { Client { out: out } }).unwrap()
//! ```
//!
//! That is a big increase in verbosity in order to accomplish the same effect as the
//! original example, but this way is more flexible and gives you access to more of the underlying
//! details of the WebSocket connection.
//!
//! Another method you will probably want to implement is `on_close`. This method is called anytime
//! the other side of the WebSocket connection attempts to close the connection. Implementing
//! `on_close` gives you a mechanism for informing the user regarding why the WebSocket connection
//! may have been closed, and it also gives you an opportunity to clean up any resources or state
//! that may be dependent on the connection that is now about to disconnect.
//!
//! An example server might use this as follows:
//!
//! ```no_run
//! use ws::{listen, Handler, Sender, Result, Message, CloseCode};
//!
//! struct Server {
//! out: Sender,
//! }
//!
//! impl Handler for Server {
//!
//! fn on_message(&mut self, msg: Message) -> Result<()> {
//! // Echo the message back
//! self.out.send(msg)
//! }
//!
//! fn on_close(&mut self, code: CloseCode, reason: &str) {
//! // The WebSocket protocol allows for a utf8 reason for the closing state after the
//! // close code. WS-RS will attempt to interpret this data as a utf8 description of the
//! // reason for closing the connection. I many cases, `reason` will be an empty string.
//! // So, you may not normally want to display `reason` to the user,
//! // but let's assume that we know that `reason` is human-readable.
//! match code {
//! CloseCode::Normal => println!("The client is done with the connection."),
//! CloseCode::Away => println!("The client is leaving the site."),
//! _ => println!("The client encountered an error: {}", reason),
//! }
//! }
//! }
//!
//! listen("127.0.0.1:3012", |out| { Server { out: out } }).unwrap()
//! ```
//!
//! Errors don't just occur on the other side of the connection, sometimes your code will encounter
//! an exceptional state too. You can access errors by implementing `on_error`. By implementing
//! `on_error` you can inform the user of an error and tear down any resources that you may have
//! setup for the connection, but which are not owned by the Handler. Also, note that certain kinds
//! of errors have certain ramifications within the WebSocket protocol. WS-RS will take care of
//! sending the appropriate close code.
//!
//! A server that tracks state outside of the handler might be as follows:
//!
//! ```no_run
//!
//! use std::rc::Rc;
//! use std::cell::RefCell;
//!
//! use ws::{listen, Handler, Sender, Result, Message, Handshake, CloseCode, Error};
//!
//! struct Server {
//! out: Sender,
//! count: Rc<RefCell<usize>>,
//! }
//!
//! impl Handler for Server {
//!
//! fn on_open(&mut self, _: Handshake) -> Result<()> {
//! // We have a new connection, so we increment the connection counter
//! Ok(*self.count.borrow_mut() += 1)
//! }
//!
//! fn on_message(&mut self, msg: Message) -> Result<()> {
//! // Tell the user the current count
//! println!("The number of live connections is {}", *self.count.borrow());
//!
//! // Echo the message back
//! self.out.send(msg)
//! }
//!
//! fn on_close(&mut self, code: CloseCode, reason: &str) {
//! match code {
//! CloseCode::Normal => println!("The client is done with the connection."),
//! CloseCode::Away => println!("The client is leaving the site."),
//! _ => println!("The client encountered an error: {}", reason),
//! }
//!
//! // The connection is going down, so we need to decrement the count
//! *self.count.borrow_mut() -= 1
//! }
//!
//! fn on_error(&mut self, err: Error) {
//! println!("The server encountered an error: {:?}", err);
//!
//! // The connection is going down, so we need to decrement the count
//! *self.count.borrow_mut() -= 1
//! }
//!
//! }
//! // RefCell enforces Rust borrowing rules at runtime.
//! // Calling borrow_mut will panic if the count being borrowed,
//! // but we know already that only one handler at a time will ever try to change the count.
//! // Rc is a reference-counted box for sharing the count between handlers
//! // since each handler needs to own its contents.
//! let count = Rc::new(RefCell::new(0));
//! listen("127.0.0.1:3012", |out| { Server { out: out, count: count.clone() } }).unwrap()
//! ```
//!
//! There are other Handler methods that allow even more fine-grained access, but most applications
//! will usually only need these four methods.
//!
extern crate httparse;
extern crate mio;
extern crate sha1;
extern crate rand;
extern crate url;
#[macro_use] extern crate log;
mod result;
mod connection;
mod frame;
mod message;
mod handshake;
mod protocol;
mod communication;
mod io;
pub use connection::factory::Factory;
pub use connection::factory::Settings as WebSocketSettings;
pub use connection::handler::Handler;
pub use connection::handler::Settings as ConnectionSettings;
pub use result::{Result, Error};
pub use result::Kind as ErrorKind;
pub use message::Message;
pub use communication::Sender;
pub use protocol::CloseCode;
pub use handshake::{Handshake, Request, Response};
use std::fmt;
use std::net::ToSocketAddrs;
use mio::EventLoopConfig;
use std::borrow::Borrow;
/// A utility function for setting up a WebSocket server.
///
/// # Safety
///
/// This function blocks until the EventLoop finishes running. Avoid calling this method within
/// another WebSocket handler.
///
/// # Examples
///
/// ```no_run
/// use ws::listen;
///
/// listen("127.0.0.1:3012", |out| {
/// move |msg| {
/// out.send(msg)
/// }
/// }).unwrap()
/// ```
///
pub fn listen<A, F, H>(addr: A, factory: F) -> Result<()>
where
A: ToSocketAddrs + fmt::Debug,
F: FnMut(Sender) -> H,
H: Handler,
{
let ws = try!(WebSocket::new(factory));
try!(ws.listen(addr));
Ok(())
}
/// A utility function for setting up a WebSocket client.
///
/// # Safety
///
/// This function blocks until the EventLoop finishes running. Avoid calling this method within
/// another WebSocket handler. If you need to establish a connection from inside of a handler,
/// use the `connect` method on the Sender.
///
/// # Examples
///
/// ```no_run
/// use ws::{connect, CloseCode};
///
/// connect("ws://127.0.0.1:3012", |out| {
/// out.send("Hello WebSocket").unwrap();
///
/// move |msg| {
/// println!("Got message: {}", msg);
/// out.close(CloseCode::Normal)
/// }
/// }).unwrap()
/// ```
///
pub fn connect<U, F, H>(url: U, factory: F) -> Result<()>
where
U: Borrow<str>,
F: FnMut(Sender) -> H,
H: Handler
{
let mut ws = try!(WebSocket::new(factory));
let parsed = try!(
url::Url::parse(url.borrow())
.map_err(|err| Error::new(
ErrorKind::Internal,
format!("Unable to parse {} as url due to {:?}", url.borrow(), err))));
try!(ws.connect(parsed));
try!(ws.run());
Ok(())
}
/// The WebSocket struct. A WebSocket can support multiple incoming and outgoing connections.
pub struct WebSocket<F>
where F: Factory
{
event_loop: io::Loop<F>,
handler: io::Handler<F>,
}
impl<F> WebSocket<F> | where F: Factory
{
/// Create a new WebSocket using the given Factory to create handlers.
pub fn new(mut factory: F) -> Result<WebSocket<F>> {
let max = factory.settings().max_connections; | random_line_split |
|
index.ts | a revert error.
let revert: RevertError;
try {
revert = decodeBytesAsRevertError(rawCallResult);
} catch (err) {
// Can't decode it as a revert error, so assume it didn't revert.
return;
}
throw revert;
}
protected static _throwIfThrownErrorIsRevertError(error: Error): void {
// Try to decode a thrown error.
let revertError: RevertError;
try {
revertError = decodeThrownErrorAsRevertError(error);
} catch (err) {
// Can't decode it.
return;
}
// Re-cast StringRevertErrors as plain Errors for backwards-compatibility.
if (revertError instanceof StringRevertError) {
throw new Error(revertError.values.message as string);
}
throw revertError;
}
protected static _throwIfUnexpectedEmptyCallResult(rawCallResult: string, methodAbi: AbiEncoder.Method): void {
// With live nodes, we will receive an empty call result if:
// 1. The function has no return value.
// 2. The contract reverts without data.
// 3. The contract reverts with an invalid opcode (`assert(false)` or `invalid()`).
if (!rawCallResult || rawCallResult === '0x') {
const returnValueDataItem = methodAbi.getReturnValueDataItem();
if (returnValueDataItem.components === undefined || returnValueDataItem.components.length === 0) {
// Expected no result (which makes it hard to tell if the call reverted).
return;
}
throw new Error(`Function "${methodAbi.getSignature()}" reverted with no data`);
}
}
// Throws if the given arguments cannot be safely/correctly encoded based on
// the given inputAbi. An argument may not be considered safely encodeable
// if it overflows the corresponding Solidity type, there is a bug in the
// encoder, or the encoder performs unsafe type coercion.
public static strictArgumentEncodingCheck(inputAbi: DataItem[], args: any[]): string {
const abiEncoder = AbiEncoder.create(inputAbi);
const params = abiUtils.parseEthersParams(inputAbi);
const rawEncoded = abiEncoder.encode(args);
const rawDecoded = abiEncoder.decodeAsArray(rawEncoded);
for (let i = 0; i < rawDecoded.length; i++) {
const original = args[i];
const decoded = rawDecoded[i];
if (!abiUtils.isAbiDataEqual(params.names[i], params.types[i], original, decoded)) {
throw new Error(
`Cannot safely encode argument: ${params.names[i]} (${original}) of type ${
params.types[i]
}. (Possible type overflow or other encoding error)`,
);
}
}
return rawEncoded;
}
protected static async _applyDefaultsToContractTxDataAsync<T extends Partial<TxData | TxDataPayable>>(
txData: T,
estimateGasAsync?: (txData: T) => Promise<number>,
): Promise<TxData> {
const txDataWithDefaults = BaseContract._removeUndefinedProperties<T>(txData);
if (txDataWithDefaults.gas === undefined && estimateGasAsync !== undefined) {
txDataWithDefaults.gas = await estimateGasAsync(txDataWithDefaults);
}
if (txDataWithDefaults.from !== undefined) {
txDataWithDefaults.from = txDataWithDefaults.from.toLowerCase();
}
return txDataWithDefaults as TxData;
}
protected static _assertCallParams(callData: Partial<CallData>, defaultBlock?: BlockParam): void {
assert.doesConformToSchema('callData', callData, schemas.callDataSchema, [
schemas.addressSchema,
schemas.numberSchema,
schemas.jsNumber,
]);
if (defaultBlock !== undefined) {
assert.isBlockParam('defaultBlock', defaultBlock);
}
}
private static _removeUndefinedProperties<T>(props: any): T {
const clonedProps = { ...props };
Object.keys(clonedProps).forEach(key => clonedProps[key] === undefined && delete clonedProps[key]);
return clonedProps;
}
protected _promiseWithTransactionHash(
txHashPromise: Promise<string>,
opts: AwaitTransactionSuccessOpts,
): PromiseWithTransactionHash<TransactionReceiptWithDecodedLogs> {
return new PromiseWithTransactionHash<TransactionReceiptWithDecodedLogs>(
txHashPromise,
(async (): Promise<TransactionReceiptWithDecodedLogs> => {
// When the transaction hash resolves, wait for it to be mined.
return this._web3Wrapper.awaitTransactionSuccessAsync(
await txHashPromise,
opts.pollingIntervalMs,
opts.timeoutMs,
);
})(),
);
}
protected async _applyDefaultsToTxDataAsync<T extends Partial<TxData | TxDataPayable>>(
txData: T,
estimateGasAsync?: (txData: T) => Promise<number>,
): Promise<TxData> {
// Gas amount sourced with the following priorities:
// 1. Optional param passed in to public method call
// 2. Global config passed in at library instantiation
// 3. Gas estimate calculation + safety margin
// tslint:disable-next-line:no-object-literal-type-assertion
const txDataWithDefaults = {
to: this.address,
...this._web3Wrapper.getContractDefaults(),
...BaseContract._removeUndefinedProperties(txData),
} as T;
if (txDataWithDefaults.gas === undefined && estimateGasAsync !== undefined) {
txDataWithDefaults.gas = await estimateGasAsync(txDataWithDefaults);
}
if (txDataWithDefaults.from !== undefined) {
txDataWithDefaults.from = txDataWithDefaults.from.toLowerCase();
}
return txDataWithDefaults as TxData;
}
protected async _evmExecAsync(encodedData: string): Promise<string> {
const encodedDataBytes = Buffer.from(encodedData.substr(2), 'hex');
const addressBuf = Buffer.from(this.address.substr(2), 'hex');
// should only run once, the first time it is called
if (this._evmIfExists === undefined) {
const vm = new VM({});
const psm = new PStateManager(vm.stateManager);
// create an account with 1 ETH
const accountPk = Buffer.from(ARBITRARY_PRIVATE_KEY, 'hex');
const accountAddress = util.privateToAddress(accountPk);
const account = new Account({ balance: 1e18 });
await psm.putAccount(accountAddress, account);
// 'deploy' the contract
if (this._deployedBytecodeIfExists === undefined) {
const contractCode = await this._web3Wrapper.getContractCodeAsync(this.address);
this._deployedBytecodeIfExists = Buffer.from(contractCode.substr(2), 'hex');
}
await psm.putContractCode(addressBuf, this._deployedBytecodeIfExists);
// save for later
this._evmIfExists = vm;
this._evmAccountIfExists = accountAddress;
}
let rawCallResult;
try {
const result = await this._evmIfExists.runCall({
to: addressBuf,
caller: this._evmAccountIfExists,
origin: this._evmAccountIfExists,
data: encodedDataBytes,
});
rawCallResult = `0x${result.execResult.returnValue.toString('hex')}`;
} catch (err) {
BaseContract._throwIfThrownErrorIsRevertError(err);
throw err;
}
BaseContract._throwIfCallResultIsRevertError(rawCallResult);
return rawCallResult;
}
protected async _performCallAsync(callData: Partial<CallData>, defaultBlock?: BlockParam): Promise<string> {
const callDataWithDefaults = await this._applyDefaultsToTxDataAsync(callData);
let rawCallResult: string;
try {
rawCallResult = await this._web3Wrapper.callAsync(callDataWithDefaults, defaultBlock);
} catch (err) {
BaseContract._throwIfThrownErrorIsRevertError(err);
throw err;
}
BaseContract._throwIfCallResultIsRevertError(rawCallResult);
return rawCallResult;
}
protected _lookupAbiEncoder(functionSignature: string): AbiEncoder.Method {
const abiEncoder = this._abiEncoderByFunctionSignature[functionSignature];
if (abiEncoder === undefined) {
throw new Error(`Failed to lookup method with function signature '${functionSignature}'`);
}
return abiEncoder;
}
protected _lookupAbi(functionSignature: string): MethodAbi {
const methodAbi = this.abi.find((abiDefinition: AbiDefinition) => {
if (abiDefinition.type !== AbiType.Function) {
return false;
}
// tslint:disable-next-line:no-unnecessary-type-assertion
const abiFunctionSignature = new AbiEncoder.Method(abiDefinition as MethodAbi).getSignature();
if (abiFunctionSignature === functionSignature) {
return true;
}
return false;
}) as MethodAbi;
return methodAbi;
}
protected _strictEncodeArguments(functionSignature: string, functionArguments: any): string {
const abiEncoder = this._lookupAbiEncoder(functionSignature);
const inputAbi = abiEncoder.getDataItem().components;
if (inputAbi === undefined) | {
throw new Error(`Undefined Method Input ABI`);
} | conditional_block |
|
index.ts | v: T) => TResult | Promise<TResult>,
onRejected?: (reason: any) => Promise<never>,
): Promise<TResult> {
return this._promise.then<TResult>(onFulfilled, onRejected);
}
public catch<TResult>(onRejected?: (reason: any) => Promise<TResult>): Promise<TResult | T> {
return this._promise.catch(onRejected);
}
public finally(onFinally?: (() => void) | null): Promise<T> {
return this._promise.finally(onFinally);
}
// tslint:enable:promise-function-async
// tslint:enable:async-suffix
get [Symbol.toStringTag](): 'Promise' {
return this._promise[Symbol.toStringTag];
}
}
export class BaseContract {
protected _abiEncoderByFunctionSignature: AbiEncoderByFunctionSignature;
protected _web3Wrapper: Web3Wrapper;
public abi: ContractAbi;
public address: string;
public contractName: string;
public constructorArgs: any[] = [];
public _deployedBytecodeIfExists?: Buffer;
private _evmIfExists?: VM;
private _evmAccountIfExists?: Buffer;
protected static _formatABIDataItemList(
abis: DataItem[],
values: any[],
formatter: (type: string, value: any) => any,
): any {
return values.map((value: any, i: number) => formatABIDataItem(abis[i], value, formatter));
}
protected static _lowercaseAddress(type: string, value: string): string {
return type === 'address' ? value.toLowerCase() : value;
}
protected static _bigNumberToString(_type: string, value: any): any {
return BigNumber.isBigNumber(value) ? value.toString() : value;
}
protected static _lookupConstructorAbi(abi: ContractAbi): ConstructorAbi |
protected static _throwIfCallResultIsRevertError(rawCallResult: string): void {
// Try to decode the call result as a revert error.
let revert: RevertError;
try {
revert = decodeBytesAsRevertError(rawCallResult);
} catch (err) {
// Can't decode it as a revert error, so assume it didn't revert.
return;
}
throw revert;
}
protected static _throwIfThrownErrorIsRevertError(error: Error): void {
// Try to decode a thrown error.
let revertError: RevertError;
try {
revertError = decodeThrownErrorAsRevertError(error);
} catch (err) {
// Can't decode it.
return;
}
// Re-cast StringRevertErrors as plain Errors for backwards-compatibility.
if (revertError instanceof StringRevertError) {
throw new Error(revertError.values.message as string);
}
throw revertError;
}
protected static _throwIfUnexpectedEmptyCallResult(rawCallResult: string, methodAbi: AbiEncoder.Method): void {
// With live nodes, we will receive an empty call result if:
// 1. The function has no return value.
// 2. The contract reverts without data.
// 3. The contract reverts with an invalid opcode (`assert(false)` or `invalid()`).
if (!rawCallResult || rawCallResult === '0x') {
const returnValueDataItem = methodAbi.getReturnValueDataItem();
if (returnValueDataItem.components === undefined || returnValueDataItem.components.length === 0) {
// Expected no result (which makes it hard to tell if the call reverted).
return;
}
throw new Error(`Function "${methodAbi.getSignature()}" reverted with no data`);
}
}
// Throws if the given arguments cannot be safely/correctly encoded based on
// the given inputAbi. An argument may not be considered safely encodeable
// if it overflows the corresponding Solidity type, there is a bug in the
// encoder, or the encoder performs unsafe type coercion.
public static strictArgumentEncodingCheck(inputAbi: DataItem[], args: any[]): string {
const abiEncoder = AbiEncoder.create(inputAbi);
const params = abiUtils.parseEthersParams(inputAbi);
const rawEncoded = abiEncoder.encode(args);
const rawDecoded = abiEncoder.decodeAsArray(rawEncoded);
for (let i = 0; i < rawDecoded.length; i++) {
const original = args[i];
const decoded = rawDecoded[i];
if (!abiUtils.isAbiDataEqual(params.names[i], params.types[i], original, decoded)) {
throw new Error(
`Cannot safely encode argument: ${params.names[i]} (${original}) of type ${
params.types[i]
}. (Possible type overflow or other encoding error)`,
);
}
}
return rawEncoded;
}
protected static async _applyDefaultsToContractTxDataAsync<T extends Partial<TxData | TxDataPayable>>(
txData: T,
estimateGasAsync?: (txData: T) => Promise<number>,
): Promise<TxData> {
const txDataWithDefaults = BaseContract._removeUndefinedProperties<T>(txData);
if (txDataWithDefaults.gas === undefined && estimateGasAsync !== undefined) {
txDataWithDefaults.gas = await estimateGasAsync(txDataWithDefaults);
}
if (txDataWithDefaults.from !== undefined) {
txDataWithDefaults.from = txDataWithDefaults.from.toLowerCase();
}
return txDataWithDefaults as TxData;
}
protected static _assertCallParams(callData: Partial<CallData>, defaultBlock?: BlockParam): void {
assert.doesConformToSchema('callData', callData, schemas.callDataSchema, [
schemas.addressSchema,
schemas.numberSchema,
schemas.jsNumber,
]);
if (defaultBlock !== undefined) {
assert.isBlockParam('defaultBlock', defaultBlock);
}
}
private static _removeUndefinedProperties<T>(props: any): T {
const clonedProps = { ...props };
Object.keys(clonedProps).forEach(key => clonedProps[key] === undefined && delete clonedProps[key]);
return clonedProps;
}
protected _promiseWithTransactionHash(
txHashPromise: Promise<string>,
opts: AwaitTransactionSuccessOpts,
): PromiseWithTransactionHash<TransactionReceiptWithDecodedLogs> {
return new PromiseWithTransactionHash<TransactionReceiptWithDecodedLogs>(
txHashPromise,
(async (): Promise<TransactionReceiptWithDecodedLogs> => {
// When the transaction hash resolves, wait for it to be mined.
return this._web3Wrapper.awaitTransactionSuccessAsync(
await txHashPromise,
opts.pollingIntervalMs,
opts.timeoutMs,
);
})(),
);
}
protected async _applyDefaultsToTxDataAsync<T extends Partial<TxData | TxDataPayable>>(
txData: T,
estimateGasAsync?: (txData: T) => Promise<number>,
): Promise<TxData> {
// Gas amount sourced with the following priorities:
// 1. Optional param passed in to public method call
// 2. Global config passed in at library instantiation
// 3. Gas estimate calculation + safety margin
// tslint:disable-next-line:no-object-literal-type-assertion
const txDataWithDefaults = {
to: this.address,
...this._web3Wrapper.getContractDefaults(),
...BaseContract._removeUndefinedProperties(txData),
} as T;
if (txDataWithDefaults.gas === undefined && estimateGasAsync !== undefined) {
txDataWithDefaults.gas = await estimateGasAsync(txDataWithDefaults);
}
if (txDataWithDefaults.from !== undefined) {
txDataWithDefaults.from = txDataWithDefaults.from.toLowerCase();
}
return txDataWithDefaults as TxData;
}
protected async _evmExecAsync(encodedData: string): Promise<string> {
const encodedDataBytes = Buffer.from(encodedData.substr(2), 'hex');
const addressBuf = Buffer.from(this.address.substr(2), 'hex');
// should only run once, the first time it is called
if (this._evmIfExists === undefined) {
const vm = new VM({});
const psm = new PStateManager(vm.stateManager);
// create an account with 1 ETH
const accountPk = Buffer.from(ARBITRARY_PRIVATE_KEY, 'hex');
const accountAddress = util.privateToAddress(accountPk);
const account = new Account({ balance: 1e18 });
await psm.putAccount(accountAddress, account);
// 'deploy' the contract
if (this._deployedBytecodeIfExists === undefined | {
const constructorAbiIfExists = abi.find(
(abiDefinition: AbiDefinition) => abiDefinition.type === AbiType.Constructor,
// tslint:disable-next-line:no-unnecessary-type-assertion
) as ConstructorAbi | undefined;
if (constructorAbiIfExists !== undefined) {
return constructorAbiIfExists;
} else {
// If the constructor is not explicitly defined, it won't be included in the ABI. It is
// still callable however, so we construct what the ABI would look like were it to exist.
const defaultConstructorAbi: ConstructorAbi = {
type: AbiType.Constructor,
stateMutability: 'nonpayable',
payable: false,
inputs: [],
};
return defaultConstructorAbi;
}
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.