file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
modules.go | api.FullNode
Messager api.IMessager
MarketClient api2.MarketFullNode
MetadataService *service.MetadataService
LogService *service.LogService
SectorInfoService *service.SectorInfoService
Sealer sectorstorage.SectorManager
SectorIDCounter types2.SectorIDCounter
Verifier ffiwrapper.Verifier
Prover ffiwrapper.Prover
GetSealingConfigFn types2.GetSealingConfigFunc
Journal journal.Journal
AddrSel *storage.AddressSelector
NetworkParams *config.NetParamsConfig
}
func StorageMiner(fc config.MinerFeeConfig) func(params StorageMinerParams) (*storage.Miner, error) {
return func(params StorageMinerParams) (*storage.Miner, error) {
var (
metadataService = params.MetadataService
sectorinfoService = params.SectorInfoService
logService = params.LogService
mctx = params.MetricsCtx
lc = params.Lifecycle
api = params.API
messager = params.Messager
marketClient = params.MarketClient
sealer = params.Sealer
sc = params.SectorIDCounter
verif = params.Verifier
prover = params.Prover
gsd = params.GetSealingConfigFn
j = params.Journal
as = params.AddrSel
np = params.NetworkParams
)
maddr, err := metadataService.GetMinerAddress()
if err != nil {
return nil, err
}
ctx := LifecycleCtx(mctx, lc)
fps, err := storage.NewWindowedPoStScheduler(api, messager, fc, as, sealer, verif, sealer, j, maddr, np)
if err != nil {
return nil, err
}
sm, err := storage.NewMiner(api, messager, marketClient, maddr, metadataService, sectorinfoService, logService, sealer, sc, verif, prover, gsd, fc, j, as, np)
if err != nil {
return nil, err
}
lc.Append(fx.Hook{
OnStart: func(context.Context) error {
go fps.Run(ctx)
return sm.Run(ctx)
},
OnStop: sm.Stop,
})
return sm, nil
}
}
func DoPoStWarmup(ctx MetricsCtx, api api.FullNode, metadataService *service.MetadataService, prover storage.WinningPoStProver) error {
maddr, err := metadataService.GetMinerAddress()
if err != nil {
return err
}
deadlines, err := api.StateMinerDeadlines(ctx, maddr, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("getting deadlines: %w", err)
}
var sector abi.SectorNumber = math.MaxUint64
out:
for dlIdx := range deadlines {
partitions, err := api.StateMinerPartitions(ctx, maddr, uint64(dlIdx), types.EmptyTSK)
if err != nil {
return xerrors.Errorf("getting partitions for deadline %d: %w", dlIdx, err)
}
for _, partition := range partitions {
b, err := partition.ActiveSectors.First()
if err == bitfield.ErrNoBitsSet {
continue
}
if err != nil {
return err
}
sector = abi.SectorNumber(b)
break out
}
}
if sector == math.MaxUint64 {
log.Info("skipping winning PoSt warmup, no sectors")
return nil
}
log.Infow("starting winning PoSt warmup", "sector", sector)
start := time.Now()
var r abi.PoStRandomness = make([]byte, abi.RandomnessLength)
_, _ = rand.Read(r)
si, err := api.StateSectorGetInfo(ctx, maddr, sector, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("getting sector info: %w", err)
}
_, err = prover.ComputeProof(ctx, []proof2.SectorInfo{
{
SealProof: si.SealProof,
SectorNumber: sector,
SealedCID: si.SealedCID,
},
}, r)
if err != nil {
log.Errorw("failed to compute proof: %w, please check your storage and restart sealer after fixed", err)
return nil
}
log.Infow("winning PoSt warmup successful", "took", time.Since(start))
return nil
}
func NewSetSealConfigFunc(r *config.StorageMiner) (types2.SetSealingConfigFunc, error) {
return func(cfg sealiface.Config) (err error) {
err = mutateCfg(r, func(c *config.StorageMiner) {
c.Sealing = config.SealingConfig{
MaxWaitDealsSectors: cfg.MaxWaitDealsSectors,
MaxSealingSectors: cfg.MaxSealingSectors,
MaxSealingSectorsForDeals: cfg.MaxSealingSectorsForDeals,
WaitDealsDelay: config.Duration(cfg.WaitDealsDelay),
AlwaysKeepUnsealedCopy: cfg.AlwaysKeepUnsealedCopy,
FinalizeEarly: cfg.FinalizeEarly,
BatchPreCommits: cfg.BatchPreCommits,
MaxPreCommitBatch: cfg.MaxPreCommitBatch,
PreCommitBatchWait: config.Duration(cfg.PreCommitBatchWait),
PreCommitBatchSlack: config.Duration(cfg.PreCommitBatchSlack),
AggregateCommits: cfg.AggregateCommits,
MinCommitBatch: cfg.MinCommitBatch,
MaxCommitBatch: cfg.MaxCommitBatch,
CommitBatchWait: config.Duration(cfg.CommitBatchWait),
CommitBatchSlack: config.Duration(cfg.CommitBatchSlack),
AggregateAboveBaseFee: types.FIL(cfg.AggregateAboveBaseFee),
BatchPreCommitAboveBaseFee: types.FIL(cfg.BatchPreCommitAboveBaseFee),
CollateralFromMinerBalance: cfg.CollateralFromMinerBalance,
AvailableBalanceBuffer: types.FIL(cfg.AvailableBalanceBuffer),
DisableCollateralFallback: cfg.DisableCollateralFallback,
TerminateBatchMax: cfg.TerminateBatchMax,
TerminateBatchMin: cfg.TerminateBatchMin,
TerminateBatchWait: config.Duration(cfg.TerminateBatchWait),
}
})
return
}, nil
}
func NewGetSealConfigFunc(r *config.StorageMiner) (types2.GetSealingConfigFunc, error) {
return func() (out sealiface.Config, err error) {
err = readCfg(r, func(cfg *config.StorageMiner) {
// log.Infof("max sealing sectors: %v", cfg.Sealing.MaxSealingSectors)
out = sealiface.Config{
MaxWaitDealsSectors: cfg.Sealing.MaxWaitDealsSectors,
MaxSealingSectors: cfg.Sealing.MaxSealingSectors,
MaxSealingSectorsForDeals: cfg.Sealing.MaxSealingSectorsForDeals,
WaitDealsDelay: time.Duration(cfg.Sealing.WaitDealsDelay),
CommittedCapacitySectorLifetime: time.Duration(cfg.Sealing.CommittedCapacitySectorLifetime),
AlwaysKeepUnsealedCopy: cfg.Sealing.AlwaysKeepUnsealedCopy,
FinalizeEarly: cfg.Sealing.FinalizeEarly,
BatchPreCommits: cfg.Sealing.BatchPreCommits,
MaxPreCommitBatch: cfg.Sealing.MaxPreCommitBatch,
PreCommitBatchWait: time.Duration(cfg.Sealing.PreCommitBatchWait),
PreCommitBatchSlack: time.Duration(cfg.Sealing.PreCommitBatchSlack),
AggregateCommits: cfg.Sealing.AggregateCommits,
MinCommitBatch: cfg.Sealing.MinCommitBatch,
MaxCommitBatch: cfg.Sealing.MaxCommitBatch,
CommitBatchWait: time.Duration(cfg.Sealing.CommitBatchWait),
CommitBatchSlack: time.Duration(cfg.Sealing.CommitBatchSlack),
AggregateAboveBaseFee: types.BigInt(cfg.Sealing.AggregateAboveBaseFee),
TerminateBatchMax: cfg.Sealing.TerminateBatchMax,
TerminateBatchMin: cfg.Sealing.TerminateBatchMin,
TerminateBatchWait: time.Duration(cfg.Sealing.TerminateBatchWait),
BatchPreCommitAboveBaseFee: types.BigInt(cfg.Sealing.BatchPreCommitAboveBaseFee),
CollateralFromMinerBalance: cfg.Sealing.CollateralFromMinerBalance,
AvailableBalanceBuffer: types.BigInt(cfg.Sealing.AvailableBalanceBuffer),
DisableCollateralFallback: cfg.Sealing.DisableCollateralFallback,
StartEpochSealingBuffer: abi.ChainEpoch(cfg.Dealmaking.StartEpochSealingBuffer),
}
})
return
}, nil
}
func readCfg(cfg *config.StorageMiner, accessor func(*config.StorageMiner)) error | {
accessor(cfg)
return nil
} | identifier_body |
|
modules.go | .AuthNew(ctx, []auth.Permission{"admin"})
if err != nil {
return nil, xerrors.Errorf("creating storage auth header: %w", err)
}
headers := http.Header{}
headers.Add("Authorization", "Bearer "+string(token))
return sectorstorage.StorageAuth(headers), nil
}
func MinerID(ma types2.MinerAddress) (types2.MinerID, error) {
id, err := address.IDFromAddress(address.Address(ma))
return types2.MinerID(id), err
}
func MinerAddress(metaDataService *service.MetadataService) (types2.MinerAddress, error) {
ma, err := metaDataService.GetMinerAddress()
return types2.MinerAddress(ma), err
}
func SealProofType(maddr types2.MinerAddress, fnapi api.FullNode) (abi.RegisteredSealProof, error) {
mi, err := fnapi.StateMinerInfo(context.TODO(), address.Address(maddr), types.EmptyTSK)
if err != nil {
return 0, err
}
networkVersion, err := fnapi.StateNetworkVersion(context.TODO(), types.EmptyTSK)
if err != nil {
return 0, err
}
return miner.PreferredSealProofTypeFromWindowPoStType(networkVersion, mi.WindowPoStProofType)
}
var StorageCounterDSPrefix = "/storage/nextid"
// nolint
type sidsc struct {
sc *storedcounter.StoredCounter
}
// nolint
func (s *sidsc) Next() (abi.SectorNumber, error) {
i, err := s.sc.Next()
return abi.SectorNumber(i), err
}
func SectorIDCounter(metaDataService *service.MetadataService) types2.SectorIDCounter {
return metaDataService
}
var WorkerCallsPrefix = datastore.NewKey("/worker/calls")
var ManagerWorkPrefix = datastore.NewKey("/stmgr/calls")
func LocalStorage(mctx MetricsCtx, lc fx.Lifecycle, ls stores.LocalStorage, si stores.SectorIndex, urls sectorstorage.URLs) (*stores.Local, error) {
ctx := LifecycleCtx(mctx, lc)
return stores.NewLocal(ctx, ls, si, urls)
}
func RemoteStorage(lstor *stores.Local, si stores.SectorIndex, sa sectorstorage.StorageAuth, sc sectorstorage.SealerConfig) *stores.Remote {
return stores.NewRemote(lstor, si, http.Header(sa), sc.ParallelFetchLimit, &stores.DefaultPartialFileHandler{})
}
func SectorStorage(mctx MetricsCtx, lc fx.Lifecycle, lstor *stores.Local, stor *stores.Remote, ls stores.LocalStorage, si stores.SectorIndex, sc sectorstorage.SealerConfig, repo repo.Repo) (*sectorstorage.Manager, error) {
ctx := LifecycleCtx(mctx, lc)
wsts := service.NewWorkCallService(repo, "sealer")
smsts := service.NewWorkStateService(repo)
sst, err := sectorstorage.New(ctx, lstor, stor, ls, si, sc, wsts, smsts)
if err != nil {
return nil, err
}
lc.Append(fx.Hook{
OnStop: sst.Close,
})
return sst, nil
}
func GetParams(mctx MetricsCtx, spt abi.RegisteredSealProof) error {
ssize, err := spt.SectorSize()
if err != nil {
return err
}
ps, err := asset.Asset("fixtures/_assets/proof-params/parameters.json")
if err != nil {
return err
}
srs, err := asset.Asset("fixtures/_assets/proof-params/srs-inner-product.json")
if err != nil {
return err
}
if err := paramfetch.GetParams(mctx, ps, srs, uint64(ssize)); err != nil {
return xerrors.Errorf("get params: %w", err)
}
return nil
}
func StorageNetworkName(ctx MetricsCtx, a api.FullNode) (types2.NetworkName, error) {
/* if !build.Devnet {
return "testnetnet", nil
}*/
return a.StateNetworkName(ctx)
}
func AddressSelector(addrConf *config.MinerAddressConfig) func() (*storage.AddressSelector, error) {
return func() (*storage.AddressSelector, error) {
as := &storage.AddressSelector{}
if addrConf == nil {
return as, nil
}
log.Infof("miner address config: %v", *addrConf)
for _, s := range addrConf.PreCommitControl {
addr, err := address.NewFromString(s)
if err != nil {
return nil, xerrors.Errorf("parsing precommit control address: %w", err)
}
as.PreCommitControl = append(as.PreCommitControl, addr)
}
for _, s := range addrConf.CommitControl {
addr, err := address.NewFromString(s)
if err != nil {
return nil, xerrors.Errorf("parsing commit control address: %w", err)
}
as.CommitControl = append(as.CommitControl, addr)
}
as.DisableOwnerFallback = addrConf.DisableOwnerFallback
as.DisableWorkerFallback = addrConf.DisableWorkerFallback
return as, nil
}
}
type StorageMinerParams struct {
fx.In
Lifecycle fx.Lifecycle
MetricsCtx MetricsCtx
API api.FullNode
Messager api.IMessager
MarketClient api2.MarketFullNode
MetadataService *service.MetadataService
LogService *service.LogService
SectorInfoService *service.SectorInfoService
Sealer sectorstorage.SectorManager
SectorIDCounter types2.SectorIDCounter
Verifier ffiwrapper.Verifier
Prover ffiwrapper.Prover
GetSealingConfigFn types2.GetSealingConfigFunc
Journal journal.Journal
AddrSel *storage.AddressSelector
NetworkParams *config.NetParamsConfig
}
func StorageMiner(fc config.MinerFeeConfig) func(params StorageMinerParams) (*storage.Miner, error) {
return func(params StorageMinerParams) (*storage.Miner, error) {
var (
metadataService = params.MetadataService
sectorinfoService = params.SectorInfoService
logService = params.LogService
mctx = params.MetricsCtx
lc = params.Lifecycle
api = params.API
messager = params.Messager
marketClient = params.MarketClient
sealer = params.Sealer
sc = params.SectorIDCounter
verif = params.Verifier
prover = params.Prover
gsd = params.GetSealingConfigFn
j = params.Journal
as = params.AddrSel
np = params.NetworkParams
)
maddr, err := metadataService.GetMinerAddress()
if err != nil {
return nil, err
}
ctx := LifecycleCtx(mctx, lc)
fps, err := storage.NewWindowedPoStScheduler(api, messager, fc, as, sealer, verif, sealer, j, maddr, np)
if err != nil {
return nil, err
}
sm, err := storage.NewMiner(api, messager, marketClient, maddr, metadataService, sectorinfoService, logService, sealer, sc, verif, prover, gsd, fc, j, as, np)
if err != nil {
return nil, err
}
lc.Append(fx.Hook{
OnStart: func(context.Context) error {
go fps.Run(ctx)
return sm.Run(ctx)
},
OnStop: sm.Stop,
})
return sm, nil
}
}
func DoPoStWarmup(ctx MetricsCtx, api api.FullNode, metadataService *service.MetadataService, prover storage.WinningPoStProver) error {
maddr, err := metadataService.GetMinerAddress()
if err != nil {
return err
}
deadlines, err := api.StateMinerDeadlines(ctx, maddr, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("getting deadlines: %w", err)
}
var sector abi.SectorNumber = math.MaxUint64
out:
for dlIdx := range deadlines {
partitions, err := api.StateMinerPartitions(ctx, maddr, uint64(dlIdx), types.EmptyTSK)
if err != nil {
return xerrors.Errorf("getting partitions for deadline %d: %w", dlIdx, err)
}
for _, partition := range partitions {
b, err := partition.ActiveSectors.First()
if err == bitfield.ErrNoBitsSet {
continue
}
if err != nil {
return err
}
sector = abi.SectorNumber(b)
break out
}
}
if sector == math.MaxUint64 {
log.Info("skipping winning PoSt warmup, no sectors")
return nil
}
log.Infow("starting winning PoSt warmup", "sector", sector)
start := time.Now()
var r abi.PoStRandomness = make([]byte, abi.RandomnessLength)
_, _ = rand.Read(r) |
si, err := api.StateSectorGetInfo(ctx, maddr, sector, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("getting sector info: %w", err)
} | random_line_split |
|
asset.rs | use uuid::Uuid;
use std::sync::Arc;
use error::{Error, Result};
pub struct Asset {
pub name: String,
pub bundle_offset: u64,
pub objects: HashMap<i64, ObjectInfo>,
pub is_loaded: bool,
pub endianness: Endianness,
pub tree: Option<TypeMetadata>,
pub types: HashMap<i64, Arc<TypeNode>>,
pub asset_refs: Vec<AssetOrRef>,
adds: Vec<(i64, i32)>,
pub typenames: HashMap<i64, String>,
// properties
metadata_size: u32,
file_size: u32,
pub format: u32,
pub data_offset: u32,
pub long_object_ids: bool,
}
impl Asset {
pub fn new(bundle: &mut AssetBundle) -> Result<Asset> {
let is_compressed = bundle.is_compressed();
let ref descriptor = bundle.descriptor;
let decompressed: Vec<u8>;
let mut asset = Asset {
bundle_offset: 0,
name: String::new(),
objects: HashMap::new(),
is_loaded: false,
endianness: Endianness::Big,
tree: None,
types: HashMap::new(),
// when requesting first element it should be the asset itself
asset_refs: vec![AssetOrRef::Asset],
adds: Vec::new(),
typenames: HashMap::new(),
metadata_size: 0,
file_size: 0,
format: 0,
data_offset: 0,
long_object_ids: false,
};
{
let buffer = match &mut bundle.signature {
&mut Signature::UnityFS(ref mut buf) => {
asset.bundle_offset = buf.tell();
return Ok(asset);
}
&mut Signature::UnityWeb(ref mut buf) | &mut Signature::UnityRaw(ref mut buf) => {
buf
}
_ => {
return Err(Error::InvalidSignatureError);
}
};
let offset = buffer.tell();
let header_size: u32;
if !is_compressed {
asset.name = buffer.read_string()?;
header_size = buffer.read_u32(&Endianness::Big)?;
buffer.read_u32(&Endianness::Big)?; // size
} else {
header_size = match descriptor {
&FSDescriptor::Raw(ref desc) => desc.asset_header_size,
_ => {
return Err(Error::AssetError("Invalid raw descriptor".to_string()));
}
};
}
let ofs = buffer.tell(); // save current offset so pointer can be later restored
if is_compressed {
let mut compressed_data = Vec::new();
try!(buffer.read_to_end(&mut compressed_data));
decompressed = match lzma::decompress(&mut compressed_data) {
Ok(data) => data,
Err(err) => {
return Err(Error::LZMADecompressionError(Box::new(err)));
}
};
asset.bundle_offset = 0;
try!(buffer.seek(SeekFrom::Start(ofs))); // restore pointer
} else {
asset.bundle_offset = offset + header_size as u64 - 4;
if asset.is_resource() {
asset.bundle_offset -= asset.name.len() as u64;
}
return Ok(asset);
}
}
// replace buffer in signature
bundle.signature = Signature::UnityRawCompressed(decompressed);
Ok(asset)
}
pub fn is_resource(&self) -> bool {
self.name.as_str().ends_with(".resource")
}
pub fn load_objects(&mut self, signature: &mut Signature) -> io::Result<()> {
if !self.is_loaded {
self.load(signature)?;
}
Ok(())
}
fn load(&mut self, signature: &mut Signature) -> Result<()> {
if self.is_resource() {
self.is_loaded = true;
return Ok(());
}
match signature {
&mut Signature::UnityFS(ref mut buf) => {
self.load_from_buffer(buf)?;
}
&mut Signature::UnityRaw(ref mut buf) => {
self.load_from_buffer(buf)?;
}
&mut Signature::UnityRawCompressed(ref mut buf) => {
self.load_from_buffer(&mut BufReader::new(Cursor::new(buf.as_slice())))?;
}
_ => {
return Err(Error::AssetError(format!(
"Signature not supported for loading objects: {:?}",
signature
)))
}
};
Ok(())
}
fn load_from_buffer<R: Read + Seek + Teller>(&mut self, buffer: &mut R) -> Result<()> {
let _ = buffer.seek(SeekFrom::Start(self.bundle_offset));
self.metadata_size = buffer.read_u32(&self.endianness)?;
self.file_size = buffer.read_u32(&self.endianness)?;
self.format = buffer.read_u32(&self.endianness)?;
self.data_offset = buffer.read_u32(&self.endianness)?;
if self.format >= 9 {
self.endianness = match buffer.read_u32(&self.endianness)? {
0 => Endianness::Little,
_ => Endianness::Big,
};
}
let tree = TypeMetadata::new(buffer, self.format, &self.endianness)?;
self.tree = Some(tree);
if (self.format >= 7) && (self.format <= 13) {
self.long_object_ids = buffer.read_u32(&self.endianness)? != 0
}
let num_objects = buffer.read_u32(&self.endianness)?;
for _ in 0..num_objects {
if self.format >= 14 {
buffer.align();
}
let obj = ObjectInfo::new(self, buffer)?;
self.register_object(obj)?;
}
if self.format >= 11 {
let num_adds = buffer.read_u32(&self.endianness)?;
for _ in 0..num_adds {
if self.format >= 14 {
buffer.align();
}
let id = self.read_id(buffer)?;
let add = buffer.read_i32(&self.endianness)?;
self.adds.push((id, add));
}
}
let num_refs = buffer.read_u32(&self.endianness)?;
if self.format >= 6 {
for _ in 0..num_refs {
let asset_ref = AssetRef::new(buffer, &self.endianness)?;
self.asset_refs.push(AssetOrRef::AssetRef(asset_ref));
}
}
let unk_string = buffer.read_string()?;
if unk_string != "" {
return Err(Error::AssetError(format!(
"Error while loading Asset, ending string is not empty but {:?}",
unk_string
)));
}
// we need to clone the keys to avoid borrow-checker problems
let mut keys: Vec<i64> = Vec::with_capacity(self.objects.keys().len());
{
let hashed_keys = self.objects.keys();
for k in hashed_keys {
keys.push(*k);
}
}
for k in keys {
let mut obj = self.objects.remove(&k).unwrap();
let type_name = obj.get_type(self, buffer);
obj.type_name = type_name;
self.typenames.insert(obj.type_id, obj.type_name.clone());
self.objects.insert(k, obj);
}
self.is_loaded = true;
Ok(())
}
fn register_object(&mut self, obj: ObjectInfo) -> Result<()> {
let ref tree = match &self.tree {
&Some(ref t) => t,
&None => return Ok(()),
};
match tree.type_trees.get(&obj.type_id) {
Some(o_type) => {
self.types.insert(obj.type_id, o_type.clone());
}
None => {
match self.types.get(&obj.type_id) {
Some(_) => {}
None => {
let ref trees = default_type_metadata()?.type_trees;
match trees.get(&(obj.class_id as i64)) {
Some(o) => {
self.types.insert(obj.type_id, o.clone());
}
None => {
// log warning
println!("Warning: {:?} is absent from structs.dat", obj.class_id);
// self.types.insert(obj.type_id, None)
}
};
}
};
}
};
match self.objects.get(&obj.path_id) {
Some(_) => {
return Err(Error::AssetError(format!(
"Duplicate asset object: {} (path_id={})",
obj,
obj.path_id
)))
}
None => {}
}
self.objects.insert(obj.path_id, obj);
Ok(())
}
pub fn read_id<R: Read + Seek + Teller>(&self, buffer: &mut R) -> io::Result<i64> {
if self.format >= 14 {
return buffer.read_i64(&self.endianness);
}
let result = buffer.read_i32(&self.endianness)? as i64;
return Ok(result);
}
pub fn get_file_by_id(&self, id: &i32) -> Result<String> {
match &self.asset_refs[*id as usize] {
&AssetOrRef::Asset => Ok(self.name.clone()),
&AssetOrRef::AssetRef(ref a_ref) => Ok(a_ref.file_path.clone()),
}
}
}
#[allow(dead_code)]
pub struct | AssetRef | identifier_name |
|
asset.rs | Read, Seek, SeekFrom};
use std::io;
use lzma;
use uuid::Uuid;
use std::sync::Arc;
use error::{Error, Result};
pub struct Asset {
pub name: String,
pub bundle_offset: u64,
pub objects: HashMap<i64, ObjectInfo>,
pub is_loaded: bool,
pub endianness: Endianness,
pub tree: Option<TypeMetadata>,
pub types: HashMap<i64, Arc<TypeNode>>,
pub asset_refs: Vec<AssetOrRef>,
adds: Vec<(i64, i32)>,
pub typenames: HashMap<i64, String>,
// properties
metadata_size: u32,
file_size: u32,
pub format: u32,
pub data_offset: u32,
pub long_object_ids: bool,
}
impl Asset {
pub fn new(bundle: &mut AssetBundle) -> Result<Asset> {
let is_compressed = bundle.is_compressed();
let ref descriptor = bundle.descriptor;
let decompressed: Vec<u8>;
let mut asset = Asset {
bundle_offset: 0,
name: String::new(),
objects: HashMap::new(),
is_loaded: false,
endianness: Endianness::Big,
tree: None,
types: HashMap::new(),
// when requesting first element it should be the asset itself
asset_refs: vec![AssetOrRef::Asset],
adds: Vec::new(),
typenames: HashMap::new(),
metadata_size: 0,
file_size: 0,
format: 0,
data_offset: 0,
long_object_ids: false,
};
{
let buffer = match &mut bundle.signature {
&mut Signature::UnityFS(ref mut buf) => {
asset.bundle_offset = buf.tell();
return Ok(asset);
}
&mut Signature::UnityWeb(ref mut buf) | &mut Signature::UnityRaw(ref mut buf) => {
buf
}
_ => {
return Err(Error::InvalidSignatureError);
}
};
let offset = buffer.tell();
let header_size: u32;
if !is_compressed {
asset.name = buffer.read_string()?;
header_size = buffer.read_u32(&Endianness::Big)?;
buffer.read_u32(&Endianness::Big)?; // size
} else {
header_size = match descriptor {
&FSDescriptor::Raw(ref desc) => desc.asset_header_size,
_ => {
return Err(Error::AssetError("Invalid raw descriptor".to_string()));
}
};
}
let ofs = buffer.tell(); // save current offset so pointer can be later restored
if is_compressed {
let mut compressed_data = Vec::new();
try!(buffer.read_to_end(&mut compressed_data));
decompressed = match lzma::decompress(&mut compressed_data) {
Ok(data) => data,
Err(err) => {
return Err(Error::LZMADecompressionError(Box::new(err)));
}
};
asset.bundle_offset = 0;
try!(buffer.seek(SeekFrom::Start(ofs))); // restore pointer
} else {
asset.bundle_offset = offset + header_size as u64 - 4;
if asset.is_resource() {
asset.bundle_offset -= asset.name.len() as u64;
}
return Ok(asset);
}
}
// replace buffer in signature
bundle.signature = Signature::UnityRawCompressed(decompressed);
Ok(asset)
}
pub fn is_resource(&self) -> bool {
self.name.as_str().ends_with(".resource")
}
pub fn load_objects(&mut self, signature: &mut Signature) -> io::Result<()> {
if !self.is_loaded {
self.load(signature)?;
}
Ok(())
}
fn load(&mut self, signature: &mut Signature) -> Result<()> {
if self.is_resource() {
self.is_loaded = true;
return Ok(());
}
match signature {
&mut Signature::UnityFS(ref mut buf) => {
self.load_from_buffer(buf)?;
}
&mut Signature::UnityRaw(ref mut buf) => {
self.load_from_buffer(buf)?;
}
&mut Signature::UnityRawCompressed(ref mut buf) => {
self.load_from_buffer(&mut BufReader::new(Cursor::new(buf.as_slice())))?;
}
_ => {
return Err(Error::AssetError(format!(
"Signature not supported for loading objects: {:?}",
signature
)))
}
};
Ok(())
}
fn load_from_buffer<R: Read + Seek + Teller>(&mut self, buffer: &mut R) -> Result<()> {
let _ = buffer.seek(SeekFrom::Start(self.bundle_offset));
self.metadata_size = buffer.read_u32(&self.endianness)?;
self.file_size = buffer.read_u32(&self.endianness)?;
self.format = buffer.read_u32(&self.endianness)?;
self.data_offset = buffer.read_u32(&self.endianness)?;
if self.format >= 9 {
self.endianness = match buffer.read_u32(&self.endianness)? {
0 => Endianness::Little,
_ => Endianness::Big,
};
}
let tree = TypeMetadata::new(buffer, self.format, &self.endianness)?;
self.tree = Some(tree);
if (self.format >= 7) && (self.format <= 13) {
self.long_object_ids = buffer.read_u32(&self.endianness)? != 0
}
let num_objects = buffer.read_u32(&self.endianness)?;
for _ in 0..num_objects {
if self.format >= 14 {
buffer.align();
}
let obj = ObjectInfo::new(self, buffer)?;
self.register_object(obj)?;
}
if self.format >= 11 {
let num_adds = buffer.read_u32(&self.endianness)?;
for _ in 0..num_adds {
if self.format >= 14 {
buffer.align();
}
let id = self.read_id(buffer)?;
let add = buffer.read_i32(&self.endianness)?;
self.adds.push((id, add));
}
}
let num_refs = buffer.read_u32(&self.endianness)?;
if self.format >= 6 {
for _ in 0..num_refs {
let asset_ref = AssetRef::new(buffer, &self.endianness)?;
self.asset_refs.push(AssetOrRef::AssetRef(asset_ref));
}
}
let unk_string = buffer.read_string()?;
if unk_string != "" {
return Err(Error::AssetError(format!(
"Error while loading Asset, ending string is not empty but {:?}",
unk_string
)));
}
// we need to clone the keys to avoid borrow-checker problems
let mut keys: Vec<i64> = Vec::with_capacity(self.objects.keys().len());
{
let hashed_keys = self.objects.keys();
for k in hashed_keys {
keys.push(*k);
}
}
for k in keys {
let mut obj = self.objects.remove(&k).unwrap();
let type_name = obj.get_type(self, buffer);
obj.type_name = type_name;
self.typenames.insert(obj.type_id, obj.type_name.clone());
self.objects.insert(k, obj);
}
self.is_loaded = true;
Ok(())
}
fn register_object(&mut self, obj: ObjectInfo) -> Result<()> {
let ref tree = match &self.tree {
&Some(ref t) => t,
&None => return Ok(()),
};
match tree.type_trees.get(&obj.type_id) {
Some(o_type) => {
self.types.insert(obj.type_id, o_type.clone());
}
None => {
match self.types.get(&obj.type_id) {
Some(_) => {}
None => {
let ref trees = default_type_metadata()?.type_trees;
match trees.get(&(obj.class_id as i64)) {
Some(o) => {
self.types.insert(obj.type_id, o.clone());
}
None => {
// log warning
println!("Warning: {:?} is absent from structs.dat", obj.class_id);
// self.types.insert(obj.type_id, None)
}
};
}
};
}
};
match self.objects.get(&obj.path_id) {
Some(_) => {
return Err(Error::AssetError(format!(
"Duplicate asset object: {} (path_id={})",
obj,
obj.path_id
)))
}
None => |
}
self.objects.insert(obj.path_id, obj);
Ok(())
}
pub fn read_id<R: Read + Seek + Teller>(&self, buffer: &mut R) -> io::Result<i64> {
if self.format >= 14 {
return buffer.read_i64(&self.endianness);
}
let result = buffer.read_i32(&self.endianness)? as i64;
return Ok(result);
}
pub fn get_file_by_id(&self, id: &i32) -> Result<String> {
match &self.asset_refs[*id as usize] {
&AssetOrRef::Asset => Ok(self.name.clone()),
&AssetOrRef::AssetRef(ref a_ref) => Ok(a_ref.file | {} | conditional_block |
asset.rs | ,
name: String::new(),
objects: HashMap::new(),
is_loaded: false,
endianness: Endianness::Big,
tree: None,
types: HashMap::new(),
// when requesting first element it should be the asset itself
asset_refs: vec![AssetOrRef::Asset],
adds: Vec::new(),
typenames: HashMap::new(),
metadata_size: 0,
file_size: 0,
format: 0,
data_offset: 0,
long_object_ids: false,
};
{
let buffer = match &mut bundle.signature {
&mut Signature::UnityFS(ref mut buf) => {
asset.bundle_offset = buf.tell();
return Ok(asset);
}
&mut Signature::UnityWeb(ref mut buf) | &mut Signature::UnityRaw(ref mut buf) => {
buf
}
_ => {
return Err(Error::InvalidSignatureError);
}
};
let offset = buffer.tell();
let header_size: u32;
if !is_compressed {
asset.name = buffer.read_string()?;
header_size = buffer.read_u32(&Endianness::Big)?;
buffer.read_u32(&Endianness::Big)?; // size
} else {
header_size = match descriptor {
&FSDescriptor::Raw(ref desc) => desc.asset_header_size,
_ => {
return Err(Error::AssetError("Invalid raw descriptor".to_string()));
}
};
}
let ofs = buffer.tell(); // save current offset so pointer can be later restored
if is_compressed {
let mut compressed_data = Vec::new();
try!(buffer.read_to_end(&mut compressed_data));
decompressed = match lzma::decompress(&mut compressed_data) {
Ok(data) => data,
Err(err) => {
return Err(Error::LZMADecompressionError(Box::new(err)));
}
};
asset.bundle_offset = 0;
try!(buffer.seek(SeekFrom::Start(ofs))); // restore pointer
} else {
asset.bundle_offset = offset + header_size as u64 - 4;
if asset.is_resource() {
asset.bundle_offset -= asset.name.len() as u64;
}
return Ok(asset);
}
}
// replace buffer in signature
bundle.signature = Signature::UnityRawCompressed(decompressed);
Ok(asset)
}
pub fn is_resource(&self) -> bool {
self.name.as_str().ends_with(".resource")
}
pub fn load_objects(&mut self, signature: &mut Signature) -> io::Result<()> {
if !self.is_loaded {
self.load(signature)?;
}
Ok(())
}
fn load(&mut self, signature: &mut Signature) -> Result<()> {
if self.is_resource() {
self.is_loaded = true;
return Ok(());
}
match signature {
&mut Signature::UnityFS(ref mut buf) => {
self.load_from_buffer(buf)?;
}
&mut Signature::UnityRaw(ref mut buf) => {
self.load_from_buffer(buf)?;
}
&mut Signature::UnityRawCompressed(ref mut buf) => {
self.load_from_buffer(&mut BufReader::new(Cursor::new(buf.as_slice())))?;
}
_ => {
return Err(Error::AssetError(format!(
"Signature not supported for loading objects: {:?}",
signature
)))
}
};
Ok(())
}
fn load_from_buffer<R: Read + Seek + Teller>(&mut self, buffer: &mut R) -> Result<()> {
let _ = buffer.seek(SeekFrom::Start(self.bundle_offset));
self.metadata_size = buffer.read_u32(&self.endianness)?;
self.file_size = buffer.read_u32(&self.endianness)?;
self.format = buffer.read_u32(&self.endianness)?;
self.data_offset = buffer.read_u32(&self.endianness)?;
if self.format >= 9 {
self.endianness = match buffer.read_u32(&self.endianness)? {
0 => Endianness::Little,
_ => Endianness::Big,
};
}
let tree = TypeMetadata::new(buffer, self.format, &self.endianness)?;
self.tree = Some(tree);
if (self.format >= 7) && (self.format <= 13) {
self.long_object_ids = buffer.read_u32(&self.endianness)? != 0
}
let num_objects = buffer.read_u32(&self.endianness)?;
for _ in 0..num_objects {
if self.format >= 14 {
buffer.align();
}
let obj = ObjectInfo::new(self, buffer)?;
self.register_object(obj)?;
}
if self.format >= 11 {
let num_adds = buffer.read_u32(&self.endianness)?;
for _ in 0..num_adds {
if self.format >= 14 {
buffer.align();
}
let id = self.read_id(buffer)?;
let add = buffer.read_i32(&self.endianness)?;
self.adds.push((id, add));
}
}
let num_refs = buffer.read_u32(&self.endianness)?;
if self.format >= 6 {
for _ in 0..num_refs {
let asset_ref = AssetRef::new(buffer, &self.endianness)?;
self.asset_refs.push(AssetOrRef::AssetRef(asset_ref));
}
}
let unk_string = buffer.read_string()?;
if unk_string != "" {
return Err(Error::AssetError(format!(
"Error while loading Asset, ending string is not empty but {:?}",
unk_string
)));
}
// we need to clone the keys to avoid borrow-checker problems
let mut keys: Vec<i64> = Vec::with_capacity(self.objects.keys().len());
{
let hashed_keys = self.objects.keys();
for k in hashed_keys {
keys.push(*k);
}
}
for k in keys {
let mut obj = self.objects.remove(&k).unwrap();
let type_name = obj.get_type(self, buffer);
obj.type_name = type_name;
self.typenames.insert(obj.type_id, obj.type_name.clone());
self.objects.insert(k, obj);
}
self.is_loaded = true;
Ok(())
}
fn register_object(&mut self, obj: ObjectInfo) -> Result<()> {
let ref tree = match &self.tree {
&Some(ref t) => t,
&None => return Ok(()),
};
match tree.type_trees.get(&obj.type_id) {
Some(o_type) => {
self.types.insert(obj.type_id, o_type.clone());
}
None => {
match self.types.get(&obj.type_id) {
Some(_) => {}
None => {
let ref trees = default_type_metadata()?.type_trees;
match trees.get(&(obj.class_id as i64)) {
Some(o) => {
self.types.insert(obj.type_id, o.clone());
}
None => {
// log warning
println!("Warning: {:?} is absent from structs.dat", obj.class_id);
// self.types.insert(obj.type_id, None)
}
};
}
};
}
};
match self.objects.get(&obj.path_id) {
Some(_) => {
return Err(Error::AssetError(format!(
"Duplicate asset object: {} (path_id={})",
obj,
obj.path_id
)))
}
None => {}
}
self.objects.insert(obj.path_id, obj);
Ok(())
}
pub fn read_id<R: Read + Seek + Teller>(&self, buffer: &mut R) -> io::Result<i64> {
if self.format >= 14 {
return buffer.read_i64(&self.endianness);
}
let result = buffer.read_i32(&self.endianness)? as i64;
return Ok(result);
}
pub fn get_file_by_id(&self, id: &i32) -> Result<String> {
match &self.asset_refs[*id as usize] {
&AssetOrRef::Asset => Ok(self.name.clone()),
&AssetOrRef::AssetRef(ref a_ref) => Ok(a_ref.file_path.clone()),
}
}
}
#[allow(dead_code)]
pub struct AssetRef {
asset_path: String,
guid: Uuid,
asset_type: i32,
pub file_path: String,
// probably want to add a reference to the calling Asset itself
}
impl AssetRef {
pub fn new<R: Read + Seek + Teller>(
buffer: &mut R,
endianness: &Endianness,
) -> Result<AssetRef> {
let asset_path = buffer.read_string()?;
let mut uuid_buffer = [0; 16];
buffer.read_exact(&mut uuid_buffer)?;
let guid = match Uuid::from_bytes(&uuid_buffer) {
Ok(uuid) => uuid,
Err(err) => return Err(Error::UuidError(format!("{}", err))),
};
let asset_type = buffer.read_i32(endianness)?;
let file_path = buffer.read_string()?;
Ok(AssetRef {
asset_path: asset_path,
guid: guid,
asset_type: asset_type,
file_path: file_path,
})
}
}
| pub enum AssetOrRef {
Asset,
AssetRef(AssetRef), | random_line_split |
|
TripAdvisor.py | .get(URL)
labelEng =''
try:
labelEng = driver.find_element_by_css_selector("[for='taplc_prodp13n_hr_sur_review_filter_controls_0_filterLang_en']")
print('no exception occured')
except:
continue
strNum = labelEng.find_element_by_css_selector('span')
if len(strNum.text[1:-1]) > 4:
numOfEng = int(strNum.text[1]+strNum.text[3:-1])
else: numOfEng = int(strNum.text[1:-1])
printTheNameOfRestaurant(driver, numOfEng)
if numOfEng == 0:
continue
numOfPages = calculateNumOfPages(numOfEng)
collectReviews(driver, URL)
for i in range(1, numOfPages):
infixPage = 10*i
infix = '-or' + str(infixPage)
URL = preUrl+postfix[0]+infix+postfix[1]
driver.get(URL)
collectReviews(driver, URL)
print('----------completeURL finished----------')
driver.quit()
# Print the name of the restaurant on the 'TripAdvisor.com'
# Argument: web driver especially phantomjs, the number of English reviews, WEBDRIVER, INTEGER
# Return type: none
def printTheNameOfRestaurant(driver, numOfEng):
h1s = driver.find_elements_by_tag_name('h1')
for h1 in h1s:
if h1.get_attribute('id') == 'HEADING':
nameOfRestaurant = h1.text
print('RESTAURANT NAME : ' + nameOfRestaurant + ' (' + str(numOfEng) + ')')
writeNameToFile(fileName, nameOfRestaurant)
break
# Calculate the number of pages of reviews by using total number of reviews
# Argument: the number of reviews, INTEGER
# Return type: number of pages, INTEGER
def calculateNumOfPages(numOfReviews):
print('----------calculateNumOfPages is called----------')
if int(numOfReviews/10) > 0:
if numOfReviews%10 == 0:
numOfPages = int(numOfReviews/10)
else:
numOfPages = int(numOfReviews/10) + 1
elif int(numOfReviews/10) == 0:
if numOfReviews%10 == 0:
numOfPages = 0
else:
numOfPages = 1
print('----------calculateNumOfPages finished----------')
return numOfPages
# Collect the reviews from 'TripAdvisor.com'
# Argument: URL for collecting reviews and ratings, STRING
# Return type: none
def collectReviews(driver, URL):
print('----------collectReviews is called----------')
rating = []
dates = []
locations = []
try:
parentOfMore = driver.find_element_by_class_name('partnerRvw')
more = parentOfMore.find_element_by_class_name('taLnk')
more.click()
time.sleep(1)
except:
print('there are no MORE reviews')
finally:
# get the reviews
reviews = driver.find_elements_by_class_name('entry')
reviews = getRidOfEmpty(reviews)
# get the ratings
basicReview = driver.find_elements_by_class_name('basic_review')
print("---------------start------------ : " + str(len(basicReview)))
print("URL : " + URL)
rating = getRating(driver)
ratings = analyzeRating(rating)
print(ratings)
locations = getMemberLocationInfo(driver)
dates = getDateOfReviews(driver)
# put the data into file
print('**************************************** START PRINT ****************************************')
print('size: ', str(len(reviews)), str(len(ratings)), str(len(locations)), str(len(dates)))
writeReviewsToFile(fileName, reviews, ratings, locations, dates)
print('**************************************** END PRINT ****************************************')
print('----------collectReviews finished----------')
def getRating(driver):
result = []
basicReviews = driver.find_elements_by_class_name('basic_review')
for inner in basicReviews:
ratingClass = inner.find_element_by_class_name('ui_bubble_rating')
result.append(ratingClass.get_attribute('alt'))
return result
# Get the date when the reviews were posted
# Argument:
# Return type: result, LIST
def getDateOfReviews(driver):
result = []
basics = driver.find_elements_by_class_name('inlineReviewUpdate')
for inner in basics:
if inner.text == '': continue
col = inner.find_element_by_class_name('col2of2')
date = col.find_element_by_class_name('ratingDate')
realDate = ''
if 'ago' in date.text or 'yesterday' in date.text or 'today' in date.text:
realDate = date.get_attribute('title')
result.append(convertIntoOtherForm(realDate))
else:
realDate = date.text[9:]
result.append(convertIntoOtherForm(realDate))
return result
def convertIntoOtherForm(realDate):
result = ''
splitedDate = realDate.split(' ')
if splitedDate[0] == 'January': splitedDate[0] = '01'
elif splitedDate[0] == 'February': splitedDate[0] = '02'
elif splitedDate[0] == 'March': splitedDate[0] = '03'
elif splitedDate[0] == 'April': splitedDate[0] = '04'
elif splitedDate[0] == 'May': splitedDate[0] = '05'
elif splitedDate[0] == 'June': splitedDate[0] = '06'
elif splitedDate[0] == 'July': splitedDate[0] = '07'
elif splitedDate[0] == 'August': splitedDate[0] = '08'
elif splitedDate[0] == 'September': splitedDate[0] = '09'
elif splitedDate[0] == 'October': splitedDate[0] = '10'
elif splitedDate[0] == 'November': splitedDate[0] = '11'
elif splitedDate[0] == 'December': splitedDate[0] = '12'
for s in splitedDate[1]:
if s == ',': break
result = result+s
splitedDate[1] = result
if len(splitedDate[1]) == 1:
splitedDate[1] = '0'+splitedDate[1]
#print('DATE is '+splitedDate[2]+splitedDate[0]+splitedDate[1])
return splitedDate[2]+splitedDate[0]+splitedDate[1]
def organizeReviews(date, country):
res = []
if country == 'korea':
filename = 'korea_'+prefixOfFileName+'.csv'
flag = 20161107
elif country == 'result_shanghai':
filename = 'shanghai_'+prefixOfFileName+'.csv'
flag = 20160921
elif country == 'result_singapore':
filename = 'singapore_'+prefixOfFileName+'.csv'
flag = 20160721
if int(date) >= flag:
res.append('after')
writeReviewsToFile(filename, res)
else:
res.append('before')
writeReviewsToFile(filename, res)
return res
# Get the country information from each reviews
# Argument: driver
# Return type: result, LIST
def getMemberLocationInfo(driver):
result = []
basics = driver.find_elements_by_class_name('inlineReviewUpdate')
for inner in basics:
if inner.text == '': continue
#col = inner.find_element_by_class_name('col1of2')
#memberInfo = inner.find_element_by_class_name('member_info')
try:
locationClass = inner.find_element_by_class_name('location')
location = locationClass.text
result.append(location)
except:
result.append('')
return result
# Get rid of the empty elements in the list of reviews
def getRidOfEmpty(review):
result = []
for e in review:
if e.text != '':
result.append(e.text)
return result
# Convert the crawled rating into useful data
# Argument: rating , LIST
# Return: rating, LIST
def analyzeRating(rating):
print('----------analyzeRating is called----------')
result = []
#i = 0
for e in rating:
| if e == '1.0 of 5 bubbles':
result.append('1')
elif e == '2.0 of 5 bubbles':
result.append('2')
elif e == '3.0 of 5 bubbles':
result.append('3')
elif e == '4.0 of 5 bubbles':
result.append('4')
elif e == '5.0 of 5 bubbles':
result.append('5')
elif e == '1.5 of 5 bubbles':
result.append('1.5')
elif e == '2.5 of 5 bubbles':
result.append('2.5')
elif e == '3.5 of 5 bubbles':
result.append('3.5')
elif e == '4.5 of 5 bubbles':
result.append('4.5') | conditional_block |
|
TripAdvisor.py | URL is called----------')
for postfix in information:
URL = preUrl+postfix[0]+postfix[1]
driver.get(URL)
labelEng =''
try:
labelEng = driver.find_element_by_css_selector("[for='taplc_prodp13n_hr_sur_review_filter_controls_0_filterLang_en']")
print('no exception occured')
except:
continue
strNum = labelEng.find_element_by_css_selector('span')
if len(strNum.text[1:-1]) > 4:
numOfEng = int(strNum.text[1]+strNum.text[3:-1])
else: numOfEng = int(strNum.text[1:-1])
printTheNameOfRestaurant(driver, numOfEng)
if numOfEng == 0:
continue
numOfPages = calculateNumOfPages(numOfEng)
collectReviews(driver, URL)
for i in range(1, numOfPages):
infixPage = 10*i
infix = '-or' + str(infixPage)
URL = preUrl+postfix[0]+infix+postfix[1]
driver.get(URL)
collectReviews(driver, URL)
print('----------completeURL finished----------')
driver.quit()
# Print the name of the restaurant on the 'TripAdvisor.com'
# Argument: web driver especially phantomjs, the number of English reviews, WEBDRIVER, INTEGER
# Return type: none
def printTheNameOfRestaurant(driver, numOfEng):
h1s = driver.find_elements_by_tag_name('h1')
for h1 in h1s:
if h1.get_attribute('id') == 'HEADING':
nameOfRestaurant = h1.text
print('RESTAURANT NAME : ' + nameOfRestaurant + ' (' + str(numOfEng) + ')')
writeNameToFile(fileName, nameOfRestaurant)
break
# Calculate the number of pages of reviews by using total number of reviews
# Argument: the number of reviews, INTEGER
# Return type: number of pages, INTEGER
def calculateNumOfPages(numOfReviews):
print('----------calculateNumOfPages is called----------')
if int(numOfReviews/10) > 0:
if numOfReviews%10 == 0:
numOfPages = int(numOfReviews/10)
else:
numOfPages = int(numOfReviews/10) + 1
elif int(numOfReviews/10) == 0:
if numOfReviews%10 == 0:
numOfPages = 0
else:
numOfPages = 1
print('----------calculateNumOfPages finished----------')
return numOfPages
# Collect the reviews from 'TripAdvisor.com'
# Argument: URL for collecting reviews and ratings, STRING
# Return type: none
def collectReviews(driver, URL):
print('----------collectReviews is called----------')
rating = []
dates = []
locations = []
try:
parentOfMore = driver.find_element_by_class_name('partnerRvw')
more = parentOfMore.find_element_by_class_name('taLnk')
more.click()
time.sleep(1)
except:
print('there are no MORE reviews')
finally:
# get the reviews
reviews = driver.find_elements_by_class_name('entry')
reviews = getRidOfEmpty(reviews)
# get the ratings
basicReview = driver.find_elements_by_class_name('basic_review')
print("---------------start------------ : " + str(len(basicReview)))
print("URL : " + URL)
rating = getRating(driver)
ratings = analyzeRating(rating)
print(ratings)
locations = getMemberLocationInfo(driver)
dates = getDateOfReviews(driver)
# put the data into file
print('**************************************** START PRINT ****************************************')
print('size: ', str(len(reviews)), str(len(ratings)), str(len(locations)), str(len(dates)))
writeReviewsToFile(fileName, reviews, ratings, locations, dates)
print('**************************************** END PRINT ****************************************')
print('----------collectReviews finished----------')
def getRating(driver):
result = []
basicReviews = driver.find_elements_by_class_name('basic_review')
for inner in basicReviews:
ratingClass = inner.find_element_by_class_name('ui_bubble_rating')
result.append(ratingClass.get_attribute('alt'))
return result
# Get the date when the reviews were posted
# Argument:
# Return type: result, LIST
def getDateOfReviews(driver):
result = []
basics = driver.find_elements_by_class_name('inlineReviewUpdate')
for inner in basics:
if inner.text == '': continue
col = inner.find_element_by_class_name('col2of2')
date = col.find_element_by_class_name('ratingDate')
realDate = ''
if 'ago' in date.text or 'yesterday' in date.text or 'today' in date.text:
realDate = date.get_attribute('title')
result.append(convertIntoOtherForm(realDate))
else:
realDate = date.text[9:]
result.append(convertIntoOtherForm(realDate))
return result
def convertIntoOtherForm(realDate):
result = ''
splitedDate = realDate.split(' ')
if splitedDate[0] == 'January': splitedDate[0] = '01'
elif splitedDate[0] == 'February': splitedDate[0] = '02'
elif splitedDate[0] == 'March': splitedDate[0] = '03'
elif splitedDate[0] == 'April': splitedDate[0] = '04'
elif splitedDate[0] == 'May': splitedDate[0] = '05'
elif splitedDate[0] == 'June': splitedDate[0] = '06'
elif splitedDate[0] == 'July': splitedDate[0] = '07'
elif splitedDate[0] == 'August': splitedDate[0] = '08'
elif splitedDate[0] == 'September': splitedDate[0] = '09'
elif splitedDate[0] == 'October': splitedDate[0] = '10'
elif splitedDate[0] == 'November': splitedDate[0] = '11'
elif splitedDate[0] == 'December': splitedDate[0] = '12'
for s in splitedDate[1]:
if s == ',': break
result = result+s
splitedDate[1] = result
if len(splitedDate[1]) == 1:
splitedDate[1] = '0'+splitedDate[1]
#print('DATE is '+splitedDate[2]+splitedDate[0]+splitedDate[1])
return splitedDate[2]+splitedDate[0]+splitedDate[1]
def organizeReviews(date, country):
res = []
if country == 'korea':
filename = 'korea_'+prefixOfFileName+'.csv'
flag = 20161107
elif country == 'result_shanghai':
filename = 'shanghai_'+prefixOfFileName+'.csv'
flag = 20160921
elif country == 'result_singapore':
filename = 'singapore_'+prefixOfFileName+'.csv'
flag = 20160721
if int(date) >= flag:
res.append('after')
writeReviewsToFile(filename, res)
else:
res.append('before')
writeReviewsToFile(filename, res)
return res
# Get the country information from each reviews
# Argument: driver
# Return type: result, LIST
def | (driver):
result = []
basics = driver.find_elements_by_class_name('inlineReviewUpdate')
for inner in basics:
if inner.text == '': continue
#col = inner.find_element_by_class_name('col1of2')
#memberInfo = inner.find_element_by_class_name('member_info')
try:
locationClass = inner.find_element_by_class_name('location')
location = locationClass.text
result.append(location)
except:
result.append('')
return result
# Get rid of the empty elements in the list of reviews
def getRidOfEmpty(review):
result = []
for e in review:
if e.text != '':
result.append(e.text)
return result
# Convert the crawled rating into useful data
# Argument: rating , LIST
# Return: rating, LIST
def analyzeRating(rating):
print('----------analyzeRating is called----------')
result = []
#i = 0
for e in rating:
if e == '1.0 of 5 bubbles':
result.append('1')
elif e == '2.0 of 5 bubbles':
result.append('2')
elif e == '3.0 of 5 bubbles':
result.append('3')
elif e == '4.0 of 5 bubbles':
result.append('4')
elif e == '5.0 of 5 bubbles':
result.append('5')
elif e == '1.5 of 5 bubbles':
result.append('1.5')
elif e == '2.5 of 5 bubbles':
result.append('2.5')
elif e == '3.5 of 5 bubbles | getMemberLocationInfo | identifier_name |
TripAdvisor.py | URL is called----------')
for postfix in information:
URL = preUrl+postfix[0]+postfix[1]
driver.get(URL)
labelEng =''
try:
labelEng = driver.find_element_by_css_selector("[for='taplc_prodp13n_hr_sur_review_filter_controls_0_filterLang_en']")
print('no exception occured')
except:
continue
strNum = labelEng.find_element_by_css_selector('span')
if len(strNum.text[1:-1]) > 4:
numOfEng = int(strNum.text[1]+strNum.text[3:-1])
else: numOfEng = int(strNum.text[1:-1])
printTheNameOfRestaurant(driver, numOfEng)
if numOfEng == 0:
continue
numOfPages = calculateNumOfPages(numOfEng)
collectReviews(driver, URL)
for i in range(1, numOfPages):
infixPage = 10*i
infix = '-or' + str(infixPage)
URL = preUrl+postfix[0]+infix+postfix[1]
driver.get(URL)
collectReviews(driver, URL)
print('----------completeURL finished----------')
driver.quit()
# Print the name of the restaurant on the 'TripAdvisor.com'
# Argument: web driver especially phantomjs, the number of English reviews, WEBDRIVER, INTEGER
# Return type: none
def printTheNameOfRestaurant(driver, numOfEng):
h1s = driver.find_elements_by_tag_name('h1')
for h1 in h1s:
if h1.get_attribute('id') == 'HEADING':
nameOfRestaurant = h1.text
print('RESTAURANT NAME : ' + nameOfRestaurant + ' (' + str(numOfEng) + ')')
writeNameToFile(fileName, nameOfRestaurant)
break
# Calculate the number of pages of reviews by using total number of reviews
# Argument: the number of reviews, INTEGER
# Return type: number of pages, INTEGER
def calculateNumOfPages(numOfReviews):
|
# Collect the reviews from 'TripAdvisor.com'
# Argument: URL for collecting reviews and ratings, STRING
# Return type: none
def collectReviews(driver, URL):
print('----------collectReviews is called----------')
rating = []
dates = []
locations = []
try:
parentOfMore = driver.find_element_by_class_name('partnerRvw')
more = parentOfMore.find_element_by_class_name('taLnk')
more.click()
time.sleep(1)
except:
print('there are no MORE reviews')
finally:
# get the reviews
reviews = driver.find_elements_by_class_name('entry')
reviews = getRidOfEmpty(reviews)
# get the ratings
basicReview = driver.find_elements_by_class_name('basic_review')
print("---------------start------------ : " + str(len(basicReview)))
print("URL : " + URL)
rating = getRating(driver)
ratings = analyzeRating(rating)
print(ratings)
locations = getMemberLocationInfo(driver)
dates = getDateOfReviews(driver)
# put the data into file
print('**************************************** START PRINT ****************************************')
print('size: ', str(len(reviews)), str(len(ratings)), str(len(locations)), str(len(dates)))
writeReviewsToFile(fileName, reviews, ratings, locations, dates)
print('**************************************** END PRINT ****************************************')
print('----------collectReviews finished----------')
def getRating(driver):
result = []
basicReviews = driver.find_elements_by_class_name('basic_review')
for inner in basicReviews:
ratingClass = inner.find_element_by_class_name('ui_bubble_rating')
result.append(ratingClass.get_attribute('alt'))
return result
# Get the date when the reviews were posted
# Argument:
# Return type: result, LIST
def getDateOfReviews(driver):
result = []
basics = driver.find_elements_by_class_name('inlineReviewUpdate')
for inner in basics:
if inner.text == '': continue
col = inner.find_element_by_class_name('col2of2')
date = col.find_element_by_class_name('ratingDate')
realDate = ''
if 'ago' in date.text or 'yesterday' in date.text or 'today' in date.text:
realDate = date.get_attribute('title')
result.append(convertIntoOtherForm(realDate))
else:
realDate = date.text[9:]
result.append(convertIntoOtherForm(realDate))
return result
def convertIntoOtherForm(realDate):
result = ''
splitedDate = realDate.split(' ')
if splitedDate[0] == 'January': splitedDate[0] = '01'
elif splitedDate[0] == 'February': splitedDate[0] = '02'
elif splitedDate[0] == 'March': splitedDate[0] = '03'
elif splitedDate[0] == 'April': splitedDate[0] = '04'
elif splitedDate[0] == 'May': splitedDate[0] = '05'
elif splitedDate[0] == 'June': splitedDate[0] = '06'
elif splitedDate[0] == 'July': splitedDate[0] = '07'
elif splitedDate[0] == 'August': splitedDate[0] = '08'
elif splitedDate[0] == 'September': splitedDate[0] = '09'
elif splitedDate[0] == 'October': splitedDate[0] = '10'
elif splitedDate[0] == 'November': splitedDate[0] = '11'
elif splitedDate[0] == 'December': splitedDate[0] = '12'
for s in splitedDate[1]:
if s == ',': break
result = result+s
splitedDate[1] = result
if len(splitedDate[1]) == 1:
splitedDate[1] = '0'+splitedDate[1]
#print('DATE is '+splitedDate[2]+splitedDate[0]+splitedDate[1])
return splitedDate[2]+splitedDate[0]+splitedDate[1]
def organizeReviews(date, country):
res = []
if country == 'korea':
filename = 'korea_'+prefixOfFileName+'.csv'
flag = 20161107
elif country == 'result_shanghai':
filename = 'shanghai_'+prefixOfFileName+'.csv'
flag = 20160921
elif country == 'result_singapore':
filename = 'singapore_'+prefixOfFileName+'.csv'
flag = 20160721
if int(date) >= flag:
res.append('after')
writeReviewsToFile(filename, res)
else:
res.append('before')
writeReviewsToFile(filename, res)
return res
# Get the country information from each reviews
# Argument: driver
# Return type: result, LIST
def getMemberLocationInfo(driver):
result = []
basics = driver.find_elements_by_class_name('inlineReviewUpdate')
for inner in basics:
if inner.text == '': continue
#col = inner.find_element_by_class_name('col1of2')
#memberInfo = inner.find_element_by_class_name('member_info')
try:
locationClass = inner.find_element_by_class_name('location')
location = locationClass.text
result.append(location)
except:
result.append('')
return result
# Get rid of the empty elements in the list of reviews
def getRidOfEmpty(review):
result = []
for e in review:
if e.text != '':
result.append(e.text)
return result
# Convert the crawled rating into useful data
# Argument: rating , LIST
# Return: rating, LIST
def analyzeRating(rating):
print('----------analyzeRating is called----------')
result = []
#i = 0
for e in rating:
if e == '1.0 of 5 bubbles':
result.append('1')
elif e == '2.0 of 5 bubbles':
result.append('2')
elif e == '3.0 of 5 bubbles':
result.append('3')
elif e == '4.0 of 5 bubbles':
result.append('4')
elif e == '5.0 of 5 bubbles':
result.append('5')
elif e == '1.5 of 5 bubbles':
result.append('1.5')
elif e == '2.5 of 5 bubbles':
result.append('2.5')
elif e == '3.5 of 5 bubbles':
| print('----------calculateNumOfPages is called----------')
if int(numOfReviews/10) > 0:
if numOfReviews%10 == 0:
numOfPages = int(numOfReviews/10)
else:
numOfPages = int(numOfReviews/10) + 1
elif int(numOfReviews/10) == 0:
if numOfReviews%10 == 0:
numOfPages = 0
else:
numOfPages = 1
print('----------calculateNumOfPages finished----------')
return numOfPages | identifier_body |
TripAdvisor.py | )
for i in range(1, numOfPages):
infixPage = 10*i
infix = '-or' + str(infixPage)
URL = preUrl+postfix[0]+infix+postfix[1]
driver.get(URL)
collectReviews(driver, URL)
print('----------completeURL finished----------')
driver.quit()
# Print the name of the restaurant on the 'TripAdvisor.com'
# Argument: web driver especially phantomjs, the number of English reviews, WEBDRIVER, INTEGER
# Return type: none
def printTheNameOfRestaurant(driver, numOfEng):
h1s = driver.find_elements_by_tag_name('h1')
for h1 in h1s:
if h1.get_attribute('id') == 'HEADING':
nameOfRestaurant = h1.text
print('RESTAURANT NAME : ' + nameOfRestaurant + ' (' + str(numOfEng) + ')')
writeNameToFile(fileName, nameOfRestaurant)
break
# Calculate the number of pages of reviews by using total number of reviews
# Argument: the number of reviews, INTEGER
# Return type: number of pages, INTEGER
def calculateNumOfPages(numOfReviews):
print('----------calculateNumOfPages is called----------')
if int(numOfReviews/10) > 0:
if numOfReviews%10 == 0:
numOfPages = int(numOfReviews/10)
else:
numOfPages = int(numOfReviews/10) + 1
elif int(numOfReviews/10) == 0:
if numOfReviews%10 == 0:
numOfPages = 0
else:
numOfPages = 1
print('----------calculateNumOfPages finished----------')
return numOfPages
# Collect the reviews from 'TripAdvisor.com'
# Argument: URL for collecting reviews and ratings, STRING
# Return type: none
def collectReviews(driver, URL):
print('----------collectReviews is called----------')
rating = []
dates = []
locations = []
try:
parentOfMore = driver.find_element_by_class_name('partnerRvw')
more = parentOfMore.find_element_by_class_name('taLnk')
more.click()
time.sleep(1)
except:
print('there are no MORE reviews')
finally:
# get the reviews
reviews = driver.find_elements_by_class_name('entry')
reviews = getRidOfEmpty(reviews)
# get the ratings
basicReview = driver.find_elements_by_class_name('basic_review')
print("---------------start------------ : " + str(len(basicReview)))
print("URL : " + URL)
rating = getRating(driver)
ratings = analyzeRating(rating)
print(ratings)
locations = getMemberLocationInfo(driver)
dates = getDateOfReviews(driver)
# put the data into file
print('**************************************** START PRINT ****************************************')
print('size: ', str(len(reviews)), str(len(ratings)), str(len(locations)), str(len(dates)))
writeReviewsToFile(fileName, reviews, ratings, locations, dates)
print('**************************************** END PRINT ****************************************')
print('----------collectReviews finished----------')
def getRating(driver):
result = []
basicReviews = driver.find_elements_by_class_name('basic_review')
for inner in basicReviews:
ratingClass = inner.find_element_by_class_name('ui_bubble_rating')
result.append(ratingClass.get_attribute('alt'))
return result
# Get the date when the reviews were posted
# Argument:
# Return type: result, LIST
def getDateOfReviews(driver):
result = []
basics = driver.find_elements_by_class_name('inlineReviewUpdate')
for inner in basics:
if inner.text == '': continue
col = inner.find_element_by_class_name('col2of2')
date = col.find_element_by_class_name('ratingDate')
realDate = ''
if 'ago' in date.text or 'yesterday' in date.text or 'today' in date.text:
realDate = date.get_attribute('title')
result.append(convertIntoOtherForm(realDate))
else:
realDate = date.text[9:]
result.append(convertIntoOtherForm(realDate))
return result
def convertIntoOtherForm(realDate):
result = ''
splitedDate = realDate.split(' ')
if splitedDate[0] == 'January': splitedDate[0] = '01'
elif splitedDate[0] == 'February': splitedDate[0] = '02'
elif splitedDate[0] == 'March': splitedDate[0] = '03'
elif splitedDate[0] == 'April': splitedDate[0] = '04'
elif splitedDate[0] == 'May': splitedDate[0] = '05'
elif splitedDate[0] == 'June': splitedDate[0] = '06'
elif splitedDate[0] == 'July': splitedDate[0] = '07'
elif splitedDate[0] == 'August': splitedDate[0] = '08'
elif splitedDate[0] == 'September': splitedDate[0] = '09'
elif splitedDate[0] == 'October': splitedDate[0] = '10'
elif splitedDate[0] == 'November': splitedDate[0] = '11'
elif splitedDate[0] == 'December': splitedDate[0] = '12'
for s in splitedDate[1]:
if s == ',': break
result = result+s
splitedDate[1] = result
if len(splitedDate[1]) == 1:
splitedDate[1] = '0'+splitedDate[1]
#print('DATE is '+splitedDate[2]+splitedDate[0]+splitedDate[1])
return splitedDate[2]+splitedDate[0]+splitedDate[1]
def organizeReviews(date, country):
res = []
if country == 'korea':
filename = 'korea_'+prefixOfFileName+'.csv'
flag = 20161107
elif country == 'result_shanghai':
filename = 'shanghai_'+prefixOfFileName+'.csv'
flag = 20160921
elif country == 'result_singapore':
filename = 'singapore_'+prefixOfFileName+'.csv'
flag = 20160721
if int(date) >= flag:
res.append('after')
writeReviewsToFile(filename, res)
else:
res.append('before')
writeReviewsToFile(filename, res)
return res
# Get the country information from each reviews
# Argument: driver
# Return type: result, LIST
def getMemberLocationInfo(driver):
result = []
basics = driver.find_elements_by_class_name('inlineReviewUpdate')
for inner in basics:
if inner.text == '': continue
#col = inner.find_element_by_class_name('col1of2')
#memberInfo = inner.find_element_by_class_name('member_info')
try:
locationClass = inner.find_element_by_class_name('location')
location = locationClass.text
result.append(location)
except:
result.append('')
return result
# Get rid of the empty elements in the list of reviews
def getRidOfEmpty(review):
result = []
for e in review:
if e.text != '':
result.append(e.text)
return result
# Convert the crawled rating into useful data
# Argument: rating , LIST
# Return: rating, LIST
def analyzeRating(rating):
print('----------analyzeRating is called----------')
result = []
#i = 0
for e in rating:
if e == '1.0 of 5 bubbles':
result.append('1')
elif e == '2.0 of 5 bubbles':
result.append('2')
elif e == '3.0 of 5 bubbles':
result.append('3')
elif e == '4.0 of 5 bubbles':
result.append('4')
elif e == '5.0 of 5 bubbles':
result.append('5')
elif e == '1.5 of 5 bubbles':
result.append('1.5')
elif e == '2.5 of 5 bubbles':
result.append('2.5')
elif e == '3.5 of 5 bubbles':
result.append('3.5')
elif e == '4.5 of 5 bubbles':
result.append('4.5')
print('----------analyzeRating finished----------')
return result
# Read the postfix of URL file from desktop
# Argument: country one of (South Korea, Singapore, Shanghai), STRING
# Return type: information of restaurant, 2-D LIST [[numOfStar, postfixUrl, nameOfRestaurant], [numOfStar, ...], ...]
def readPostfixFromFile(country):
print('----------readPostifxFormFile called----------')
result = []
with open(country, 'r') as csvFile:
information = csv.reader(csvFile)
for row in information:
inner = []
inner.append(row[2])
inner.append(row[3])
result.append(inner) | print('----------readPostifxFormFile finished----------') | random_line_split |
|
hooks.py | _BQ_API_VERSION)
return getattr(messages, message_name)
def GetApiClient():
return apis.GetClientInstance(_BQ_API, _BQ_API_VERSION)
# Argument Processors
def JobListProjectionProcessor(show_config):
projection_enum = (
GetApiMessage('BigqueryJobsListRequest').ProjectionValueValuesEnum)
if show_config:
return projection_enum.full
return projection_enum.minimal
def JobIdProcessor(job_id_arg):
if job_id_arg:
return job_id_arg
job_id = '{}-{}'.format(_BQ_JOB_ID_PREFIX, uuid.uuid4().hex)
return job_id
def PermissionsFileProcessor(input_file):
"""Builds a bigquery AccessValueListEntry array from input file.
Expects YAML or JSON formatted file.
Args:
input_file: input file contents from argparse namespace.
Raises:
PermissionsFileError: if the file contents are not a valid JSON or YAML
file.
Returns:
[AccessValueListEntry]: Array of AccessValueListEntry messages specifying
access permissions defined input file.
"""
access_value_msg = GetApiMessage('Dataset').AccessValueListEntry
try:
permissions_array = []
permissions_from_file = yaml.load(input_file[0])
permissions_from_file = permissions_from_file.get('access', None)
if not permissions_from_file or not isinstance(permissions_from_file, list):
raise PermissionsFileError(
'Error parsing permissions file: no access list defined in file')
for access_yaml in permissions_from_file:
permission = encoding.PyValueToMessage(access_value_msg, access_yaml)
if _ValidatePermission(permission):
permissions_array.append(permission)
else:
raise PermissionsFileError(('Error parsing permissions file:'
' invalid permission definition'
' [{}]'.format(permission)))
return sorted(permissions_array, key=lambda x: x.role)
except yaml.YAMLParseError as ype:
raise PermissionsFileError('Error parsing permissions file [{}]'.format(
ype))
def _ValidatePermission(permission_obj):
is_valid = (permission_obj.domain or
permission_obj.userByEmail or
permission_obj.specialGroup or
permission_obj.view or
permission_obj.groupByEmail) and permission_obj.role
return is_valid
def ProcessTableExpiration(expire_duration):
"""Convert commandline duration into epoch timeoffset (in ms)."""
t = times.GetDateTimePlusDuration(datetime.datetime.now(), expire_duration)
return int(time.mktime(t.timetuple())) * 1000
def BqTableSchemaFileProcessor(file_arg):
"""Convert Input JSON file into TableSchema message."""
table_schema_type = GetApiMessage('TableSchema')
schema_field_type = GetApiMessage('TableFieldSchema')
try:
schema_json = yaml.load(file_arg)
schema_json = schema_json.get('schema', None)
if not schema_json or not isinstance(schema_json, list):
raise SchemaFileError(
'Error parsing schema file: no schema field list defined in file')
all_fields = []
for field in schema_json:
new_field = schema_field_type(name=field['name'],
type=field['type'],
mode=field.get('mode', 'NULLABLE'))
all_fields.append(new_field)
return table_schema_type(fields=sorted(all_fields, key=lambda x: x.name))
except yaml.YAMLParseError as ype:
raise SchemaFileError('Error parsing schema file [{}]'.format(ype))
except (AttributeError, KeyError) as e:
raise SchemaFileError(
'Error parsing schema file, invalid field definition [{}]'.format(e))
def BqTableDataFileProcessor(file_arg):
"""Convert Input JSON file into TableSchema message."""
data_insert_request_type = GetApiMessage('TableDataInsertAllRequest')
insert_row_type = data_insert_request_type.RowsValueListEntry
data_row_type = GetApiMessage('JsonObject')
try:
data_json = yaml.load(file_arg)
if not data_json or not isinstance(data_json, list):
raise TableDataFileError(
'Error parsing data file: no data records defined in file')
rows = []
for row in data_json:
rows.append(insert_row_type(json=encoding.DictToMessage(
row, data_row_type)))
return rows
except yaml.YAMLParseError as ype:
raise TableDataFileError('Error parsing data file [{}]'.format(ype))
# Request modifiers
def SetProjectId(ref, args, request):
"""Set projectId value for a BigQueryXXXRequests."""
del ref
project = args.project or properties.VALUES.core.project.Get(required=True)
project_ref = resources.REGISTRY.Parse(project,
collection='bigquery.projects')
request.projectId = project_ref.Name()
return request
def SetViewParameters(ref, args, request):
|
def ProcessDatasetOverwrite(ref, args, request):
"""Process the if-exists flag on datasets create."""
del ref
dataset_id = request.dataset.datasetReference.datasetId
project_id = request.projectId
if args.overwrite:
if _DatasetExists(dataset_id, project_id):
_TryDeleteDataset(dataset_id, project_id)
return request
def ProcessTableOverwrite(ref, args, request):
"""Process the overwrite flag on tables create."""
dataset_id = ref.datasetId
table_id = ref.Name()
project_id = ref.projectId
if args.overwrite:
if _TableExists(dataset_id, table_id, project_id):
_TryDeleteTable(dataset_id, table_id, project_id)
return request
def ProcessTableCopyOverwrite(ref, args, request):
"""Process the overwrite flag on tables copy."""
del ref # Unused
if args.overwrite:
request.job.configuration.copy.writeDisposition = 'WRITE_TRUNCATE'
return request
def ProcessTableCopyConfiguration(ref, args, request):
"""Build JobConfigurationTableCopy from request resource args."""
del ref # Unused
source_ref = args.CONCEPTS.source.Parse()
destination_ref = args.CONCEPTS.destination.Parse()
arg_utils.SetFieldInMessage(
request, 'job.configuration.copy.destinationTable.datasetId',
destination_ref.Parent().Name())
arg_utils.SetFieldInMessage(
request, 'job.configuration.copy.destinationTable.projectId',
destination_ref.projectId)
arg_utils.SetFieldInMessage(request,
'job.configuration.copy.destinationTable.tableId',
destination_ref.Name())
arg_utils.SetFieldInMessage(request,
'job.configuration.copy.sourceTable.datasetId',
source_ref.Parent().Name())
arg_utils.SetFieldInMessage(request,
'job.configuration.copy.sourceTable.projectId',
source_ref.projectId)
arg_utils.SetFieldInMessage(request,
'job.configuration.copy.sourceTable.tableId',
source_ref.Name())
return request
def ProcessSchemaUpdate(ref, args, request):
"""Process schema Updates (additions/mode changes) for the request.
Retrieves the current table schema for ref and attempts to merge in the schema
provided in the requests. This is necessary since the API backend does not
handle PATCH semantics for schema updates (e.g. process the deltas) so we must
always send the fully updated schema in the requests.
Args:
ref: resource reference for table.
args: argparse namespace for requests
request: BigqueryTablesPatchRequest object
Returns:
request: updated requests
Raises:
SchemaUpdateError: table not found or invalid an schema change.
"""
table = request.table
relaxed_columns = args.relax_columns
if not table.schema and not relaxed_columns: # if not updating schema,
return request # then just return.
original_schema = _TryGetCurrentSchema(ref.Parent().Name(),
ref.Name(),
ref.projectId)
new_schema_columns = table.schema
updated_fields = _GetUpdatedSchema(original_schema,
new_schema_columns,
relaxed_columns)
table_schema_type = GetApiMessage('TableSchema')
request.table.schema = table_schema_type(fields=updated_fields)
return request
def _TryGetCurrentSchema(dataset_id, table_id, project_id):
"""Try to retrieve the current BigQuery TableSchema for a table_ref.
Tries to fetch the schema of an existing table. Raises SchemaUpdateError if
table is not found or if table is not of type 'TABLE'.
Args:
dataset_id: the dataset id containing the table.
table_id: the table id for the table.
project_id: the project id containing the dataset and table.
Returns:
schema: the table schema object
Raises:
SchemaUpdateError: table not found or invalid table type.
"""
client = GetApiClient()
service = client.tables
get_request_type = GetApiMessage('BigqueryTablesGetRequest')
get_request = get_request_type(datasetId=dataset_id,
tableId=table_id,
projectId=project_id)
try:
table = service.Get(get_request)
if not table or table.type != 'TABLE':
raise SchemaUpdateError('Schema modifications only supported '
'on TABLE objects received [{}]'.format(
table))
except apitools_exceptions.HttpNotFoundError:
raise SchemaUpdateError('Table with id [{}:{}:{}] not found.'.format(
project_id, dataset_id, table_id))
return table.schema
def _GetUpdatedSchema(
original_schema,
new_columns=None,
relaxed_columns | """Ensure that view parameters are set properly tables create request."""
del ref # unused
if not args.view:
request.table.view = None
return request | identifier_body |
hooks.py | _BQ_API_VERSION)
return getattr(messages, message_name)
def GetApiClient():
return apis.GetClientInstance(_BQ_API, _BQ_API_VERSION)
# Argument Processors
def JobListProjectionProcessor(show_config):
projection_enum = (
GetApiMessage('BigqueryJobsListRequest').ProjectionValueValuesEnum)
if show_config:
return projection_enum.full
return projection_enum.minimal
def JobIdProcessor(job_id_arg):
if job_id_arg:
return job_id_arg
job_id = '{}-{}'.format(_BQ_JOB_ID_PREFIX, uuid.uuid4().hex)
return job_id
def PermissionsFileProcessor(input_file):
"""Builds a bigquery AccessValueListEntry array from input file.
Expects YAML or JSON formatted file.
Args:
input_file: input file contents from argparse namespace.
Raises:
PermissionsFileError: if the file contents are not a valid JSON or YAML
file.
Returns:
[AccessValueListEntry]: Array of AccessValueListEntry messages specifying
access permissions defined input file.
"""
access_value_msg = GetApiMessage('Dataset').AccessValueListEntry
try:
permissions_array = []
permissions_from_file = yaml.load(input_file[0])
permissions_from_file = permissions_from_file.get('access', None)
if not permissions_from_file or not isinstance(permissions_from_file, list):
raise PermissionsFileError(
'Error parsing permissions file: no access list defined in file')
for access_yaml in permissions_from_file:
permission = encoding.PyValueToMessage(access_value_msg, access_yaml)
if _ValidatePermission(permission):
permissions_array.append(permission)
else:
raise PermissionsFileError(('Error parsing permissions file:'
' invalid permission definition'
' [{}]'.format(permission)))
return sorted(permissions_array, key=lambda x: x.role)
except yaml.YAMLParseError as ype:
raise PermissionsFileError('Error parsing permissions file [{}]'.format(
ype))
def _ValidatePermission(permission_obj):
is_valid = (permission_obj.domain or
permission_obj.userByEmail or
permission_obj.specialGroup or
permission_obj.view or
permission_obj.groupByEmail) and permission_obj.role
return is_valid
def ProcessTableExpiration(expire_duration):
"""Convert commandline duration into epoch timeoffset (in ms)."""
t = times.GetDateTimePlusDuration(datetime.datetime.now(), expire_duration)
return int(time.mktime(t.timetuple())) * 1000
def BqTableSchemaFileProcessor(file_arg):
"""Convert Input JSON file into TableSchema message."""
table_schema_type = GetApiMessage('TableSchema')
schema_field_type = GetApiMessage('TableFieldSchema')
try:
schema_json = yaml.load(file_arg)
schema_json = schema_json.get('schema', None)
if not schema_json or not isinstance(schema_json, list):
raise SchemaFileError(
'Error parsing schema file: no schema field list defined in file')
all_fields = []
for field in schema_json:
new_field = schema_field_type(name=field['name'],
type=field['type'],
mode=field.get('mode', 'NULLABLE'))
all_fields.append(new_field)
return table_schema_type(fields=sorted(all_fields, key=lambda x: x.name))
except yaml.YAMLParseError as ype:
raise SchemaFileError('Error parsing schema file [{}]'.format(ype))
except (AttributeError, KeyError) as e:
raise SchemaFileError(
'Error parsing schema file, invalid field definition [{}]'.format(e))
def BqTableDataFileProcessor(file_arg):
"""Convert Input JSON file into TableSchema message."""
data_insert_request_type = GetApiMessage('TableDataInsertAllRequest')
insert_row_type = data_insert_request_type.RowsValueListEntry
data_row_type = GetApiMessage('JsonObject')
try:
data_json = yaml.load(file_arg)
if not data_json or not isinstance(data_json, list):
raise TableDataFileError(
'Error parsing data file: no data records defined in file')
rows = []
for row in data_json:
rows.append(insert_row_type(json=encoding.DictToMessage(
row, data_row_type)))
return rows
except yaml.YAMLParseError as ype:
raise TableDataFileError('Error parsing data file [{}]'.format(ype))
# Request modifiers
def SetProjectId(ref, args, request):
"""Set projectId value for a BigQueryXXXRequests."""
del ref
project = args.project or properties.VALUES.core.project.Get(required=True)
project_ref = resources.REGISTRY.Parse(project,
collection='bigquery.projects')
request.projectId = project_ref.Name()
return request
def SetViewParameters(ref, args, request):
"""Ensure that view parameters are set properly tables create request."""
del ref # unused
if not args.view:
request.table.view = None
return request
def ProcessDatasetOverwrite(ref, args, request):
"""Process the if-exists flag on datasets create."""
del ref
dataset_id = request.dataset.datasetReference.datasetId
project_id = request.projectId
if args.overwrite:
if _DatasetExists(dataset_id, project_id):
_TryDeleteDataset(dataset_id, project_id)
return request
def ProcessTableOverwrite(ref, args, request):
"""Process the overwrite flag on tables create."""
dataset_id = ref.datasetId
table_id = ref.Name()
project_id = ref.projectId
if args.overwrite:
if _TableExists(dataset_id, table_id, project_id):
_TryDeleteTable(dataset_id, table_id, project_id)
return request
def | (ref, args, request):
"""Process the overwrite flag on tables copy."""
del ref # Unused
if args.overwrite:
request.job.configuration.copy.writeDisposition = 'WRITE_TRUNCATE'
return request
def ProcessTableCopyConfiguration(ref, args, request):
"""Build JobConfigurationTableCopy from request resource args."""
del ref # Unused
source_ref = args.CONCEPTS.source.Parse()
destination_ref = args.CONCEPTS.destination.Parse()
arg_utils.SetFieldInMessage(
request, 'job.configuration.copy.destinationTable.datasetId',
destination_ref.Parent().Name())
arg_utils.SetFieldInMessage(
request, 'job.configuration.copy.destinationTable.projectId',
destination_ref.projectId)
arg_utils.SetFieldInMessage(request,
'job.configuration.copy.destinationTable.tableId',
destination_ref.Name())
arg_utils.SetFieldInMessage(request,
'job.configuration.copy.sourceTable.datasetId',
source_ref.Parent().Name())
arg_utils.SetFieldInMessage(request,
'job.configuration.copy.sourceTable.projectId',
source_ref.projectId)
arg_utils.SetFieldInMessage(request,
'job.configuration.copy.sourceTable.tableId',
source_ref.Name())
return request
def ProcessSchemaUpdate(ref, args, request):
"""Process schema Updates (additions/mode changes) for the request.
Retrieves the current table schema for ref and attempts to merge in the schema
provided in the requests. This is necessary since the API backend does not
handle PATCH semantics for schema updates (e.g. process the deltas) so we must
always send the fully updated schema in the requests.
Args:
ref: resource reference for table.
args: argparse namespace for requests
request: BigqueryTablesPatchRequest object
Returns:
request: updated requests
Raises:
SchemaUpdateError: table not found or invalid an schema change.
"""
table = request.table
relaxed_columns = args.relax_columns
if not table.schema and not relaxed_columns: # if not updating schema,
return request # then just return.
original_schema = _TryGetCurrentSchema(ref.Parent().Name(),
ref.Name(),
ref.projectId)
new_schema_columns = table.schema
updated_fields = _GetUpdatedSchema(original_schema,
new_schema_columns,
relaxed_columns)
table_schema_type = GetApiMessage('TableSchema')
request.table.schema = table_schema_type(fields=updated_fields)
return request
def _TryGetCurrentSchema(dataset_id, table_id, project_id):
"""Try to retrieve the current BigQuery TableSchema for a table_ref.
Tries to fetch the schema of an existing table. Raises SchemaUpdateError if
table is not found or if table is not of type 'TABLE'.
Args:
dataset_id: the dataset id containing the table.
table_id: the table id for the table.
project_id: the project id containing the dataset and table.
Returns:
schema: the table schema object
Raises:
SchemaUpdateError: table not found or invalid table type.
"""
client = GetApiClient()
service = client.tables
get_request_type = GetApiMessage('BigqueryTablesGetRequest')
get_request = get_request_type(datasetId=dataset_id,
tableId=table_id,
projectId=project_id)
try:
table = service.Get(get_request)
if not table or table.type != 'TABLE':
raise SchemaUpdateError('Schema modifications only supported '
'on TABLE objects received [{}]'.format(
table))
except apitools_exceptions.HttpNotFoundError:
raise SchemaUpdateError('Table with id [{}:{}:{}] not found.'.format(
project_id, dataset_id, table_id))
return table.schema
def _GetUpdatedSchema(
original_schema,
new_columns=None,
relaxed_columns | ProcessTableCopyOverwrite | identifier_name |
hooks.py | Error parsing data file [{}]'.format(ype))
# Request modifiers
def SetProjectId(ref, args, request):
"""Set projectId value for a BigQueryXXXRequests."""
del ref
project = args.project or properties.VALUES.core.project.Get(required=True)
project_ref = resources.REGISTRY.Parse(project,
collection='bigquery.projects')
request.projectId = project_ref.Name()
return request
def SetViewParameters(ref, args, request):
"""Ensure that view parameters are set properly tables create request."""
del ref # unused
if not args.view:
request.table.view = None
return request
def ProcessDatasetOverwrite(ref, args, request):
"""Process the if-exists flag on datasets create."""
del ref
dataset_id = request.dataset.datasetReference.datasetId
project_id = request.projectId
if args.overwrite:
if _DatasetExists(dataset_id, project_id):
_TryDeleteDataset(dataset_id, project_id)
return request
def ProcessTableOverwrite(ref, args, request):
"""Process the overwrite flag on tables create."""
dataset_id = ref.datasetId
table_id = ref.Name()
project_id = ref.projectId
if args.overwrite:
if _TableExists(dataset_id, table_id, project_id):
_TryDeleteTable(dataset_id, table_id, project_id)
return request
def ProcessTableCopyOverwrite(ref, args, request):
"""Process the overwrite flag on tables copy."""
del ref # Unused
if args.overwrite:
request.job.configuration.copy.writeDisposition = 'WRITE_TRUNCATE'
return request
def ProcessTableCopyConfiguration(ref, args, request):
"""Build JobConfigurationTableCopy from request resource args."""
del ref # Unused
source_ref = args.CONCEPTS.source.Parse()
destination_ref = args.CONCEPTS.destination.Parse()
arg_utils.SetFieldInMessage(
request, 'job.configuration.copy.destinationTable.datasetId',
destination_ref.Parent().Name())
arg_utils.SetFieldInMessage(
request, 'job.configuration.copy.destinationTable.projectId',
destination_ref.projectId)
arg_utils.SetFieldInMessage(request,
'job.configuration.copy.destinationTable.tableId',
destination_ref.Name())
arg_utils.SetFieldInMessage(request,
'job.configuration.copy.sourceTable.datasetId',
source_ref.Parent().Name())
arg_utils.SetFieldInMessage(request,
'job.configuration.copy.sourceTable.projectId',
source_ref.projectId)
arg_utils.SetFieldInMessage(request,
'job.configuration.copy.sourceTable.tableId',
source_ref.Name())
return request
def ProcessSchemaUpdate(ref, args, request):
"""Process schema Updates (additions/mode changes) for the request.
Retrieves the current table schema for ref and attempts to merge in the schema
provided in the requests. This is necessary since the API backend does not
handle PATCH semantics for schema updates (e.g. process the deltas) so we must
always send the fully updated schema in the requests.
Args:
ref: resource reference for table.
args: argparse namespace for requests
request: BigqueryTablesPatchRequest object
Returns:
request: updated requests
Raises:
SchemaUpdateError: table not found or invalid an schema change.
"""
table = request.table
relaxed_columns = args.relax_columns
if not table.schema and not relaxed_columns: # if not updating schema,
return request # then just return.
original_schema = _TryGetCurrentSchema(ref.Parent().Name(),
ref.Name(),
ref.projectId)
new_schema_columns = table.schema
updated_fields = _GetUpdatedSchema(original_schema,
new_schema_columns,
relaxed_columns)
table_schema_type = GetApiMessage('TableSchema')
request.table.schema = table_schema_type(fields=updated_fields)
return request
def _TryGetCurrentSchema(dataset_id, table_id, project_id):
"""Try to retrieve the current BigQuery TableSchema for a table_ref.
Tries to fetch the schema of an existing table. Raises SchemaUpdateError if
table is not found or if table is not of type 'TABLE'.
Args:
dataset_id: the dataset id containing the table.
table_id: the table id for the table.
project_id: the project id containing the dataset and table.
Returns:
schema: the table schema object
Raises:
SchemaUpdateError: table not found or invalid table type.
"""
client = GetApiClient()
service = client.tables
get_request_type = GetApiMessage('BigqueryTablesGetRequest')
get_request = get_request_type(datasetId=dataset_id,
tableId=table_id,
projectId=project_id)
try:
table = service.Get(get_request)
if not table or table.type != 'TABLE':
raise SchemaUpdateError('Schema modifications only supported '
'on TABLE objects received [{}]'.format(
table))
except apitools_exceptions.HttpNotFoundError:
raise SchemaUpdateError('Table with id [{}:{}:{}] not found.'.format(
project_id, dataset_id, table_id))
return table.schema
def _GetUpdatedSchema(
original_schema,
new_columns=None,
relaxed_columns=None):
"""Update original_schema by adding and/or relaxing mode on columns."""
orig_field_map = (
{f.name: f for f in original_schema.fields} if original_schema else {})
if relaxed_columns:
orig_field_map = _GetRelaxedCols(relaxed_columns, orig_field_map)
if new_columns:
orig_field_map = _AddNewColsToSchema(new_columns.fields, orig_field_map)
return sorted(orig_field_map.values(), key=lambda x: x.name)
def _GetRelaxedCols(relaxed_columns, orig_schema_map):
"""Change mode to `NULLABLE` for columns in existing schema.
Tries set mode on existing columns in orig_schema_map to `NULLABLE`. Raises
SchemaUpdateError if column is not found in orig_schema_map.
Args:
relaxed_columns: [string] the list columns to relax required mode for.
orig_schema_map: {string: TableSchemaField} map of field name to
TableSchemaField objects representing the original schema.
Returns:
updated_schema_map: {string: TableSchemaField} map of field name to
TableSchemaField objects representing the updated schema.
Raises:
SchemaUpdateError: if any of the fields to be relaxed are not in the
original schema.
"""
updated_schema_map = orig_schema_map.copy()
for col in relaxed_columns:
if col in orig_schema_map:
updated_schema_map[col].mode = 'NULLABLE'
else:
raise SchemaUpdateError(_INVALID_SCHEMA_UPDATE_MESSAGE)
return updated_schema_map
def _AddNewColsToSchema(new_fields, orig_schema_map):
"""Add new columns to an existing schema.
Tries add new fields to an existing schema. Raises SchemaUpdateError
if column already exists in the orig_schema_map.
Args:
new_fields: [TableSchemaField] the list columns add to schema.
orig_schema_map: {string: TableSchemaField} map of field name to
TableSchemaField objects representing the original schema.
Returns:
updated_schema_map: {string: TableSchemaField} map of field name to
TableSchemaField objects representing the updated schema.
Raises:
SchemaUpdateError: if any of the fields to be relaxed are not in the
original schema.
"""
updated_schema_map = orig_schema_map.copy()
for new_field in new_fields:
if new_field.name in orig_schema_map:
raise SchemaUpdateError(_INVALID_SCHEMA_UPDATE_MESSAGE)
updated_schema_map[new_field.name] = new_field
return updated_schema_map
def _DatasetExists(dataset_id, project_id):
"""Validate a resource of the given type with specified ID already exists."""
client = GetApiClient()
service = client.datasets
get_request_type = GetApiMessage('BigqueryDatasetsGetRequest')
get_request = get_request_type(datasetId=dataset_id, projectId=project_id)
try:
service.Get(get_request)
return True
except apitools_exceptions.HttpNotFoundError:
log.info('Dataset with id [{}:{}] not found.'.format(
project_id, dataset_id))
return False
def _TableExists(dataset_id, table_id, project_id):
"""Validate a resource of the given type with specified ID already exists."""
client = GetApiClient()
service = client.tables
get_request_type = GetApiMessage('BigqueryTablesGetRequest')
get_request = get_request_type(datasetId=dataset_id, tableId=table_id,
projectId=project_id)
try:
service.Get(get_request)
return True
except apitools_exceptions.HttpNotFoundError:
log.info('Table with id [{}:{}:{}] not found.'.format(
project_id, dataset_id, table_id))
return False
def _TryDeleteDataset(dataset_id, project_id):
"""Try to delete a dataset, propagating error on failure."""
client = GetApiClient()
service = client.datasets
delete_request_type = GetApiMessage('BigqueryDatasetsDeleteRequest')
delete_request = delete_request_type(datasetId=dataset_id,
projectId=project_id,
deleteContents=True)
service.Delete(delete_request)
log.info('Deleted dataset [{}:{}]'.format(project_id, dataset_id))
def _TryDeleteTable(dataset_id, table_id, project_id):
"""Try to delete a dataset, propagating error on failure."""
client = GetApiClient()
service = client.tables | random_line_split |
||
hooks.py | BQ_API_VERSION)
return getattr(messages, message_name)
def GetApiClient():
return apis.GetClientInstance(_BQ_API, _BQ_API_VERSION)
# Argument Processors
def JobListProjectionProcessor(show_config):
projection_enum = (
GetApiMessage('BigqueryJobsListRequest').ProjectionValueValuesEnum)
if show_config:
return projection_enum.full
return projection_enum.minimal
def JobIdProcessor(job_id_arg):
if job_id_arg:
return job_id_arg
job_id = '{}-{}'.format(_BQ_JOB_ID_PREFIX, uuid.uuid4().hex)
return job_id
def PermissionsFileProcessor(input_file):
"""Builds a bigquery AccessValueListEntry array from input file.
Expects YAML or JSON formatted file.
Args:
input_file: input file contents from argparse namespace.
Raises:
PermissionsFileError: if the file contents are not a valid JSON or YAML
file.
Returns:
[AccessValueListEntry]: Array of AccessValueListEntry messages specifying
access permissions defined input file.
"""
access_value_msg = GetApiMessage('Dataset').AccessValueListEntry
try:
permissions_array = []
permissions_from_file = yaml.load(input_file[0])
permissions_from_file = permissions_from_file.get('access', None)
if not permissions_from_file or not isinstance(permissions_from_file, list):
raise PermissionsFileError(
'Error parsing permissions file: no access list defined in file')
for access_yaml in permissions_from_file:
permission = encoding.PyValueToMessage(access_value_msg, access_yaml)
if _ValidatePermission(permission):
permissions_array.append(permission)
else:
raise PermissionsFileError(('Error parsing permissions file:'
' invalid permission definition'
' [{}]'.format(permission)))
return sorted(permissions_array, key=lambda x: x.role)
except yaml.YAMLParseError as ype:
raise PermissionsFileError('Error parsing permissions file [{}]'.format(
ype))
def _ValidatePermission(permission_obj):
is_valid = (permission_obj.domain or
permission_obj.userByEmail or
permission_obj.specialGroup or
permission_obj.view or
permission_obj.groupByEmail) and permission_obj.role
return is_valid
def ProcessTableExpiration(expire_duration):
"""Convert commandline duration into epoch timeoffset (in ms)."""
t = times.GetDateTimePlusDuration(datetime.datetime.now(), expire_duration)
return int(time.mktime(t.timetuple())) * 1000
def BqTableSchemaFileProcessor(file_arg):
"""Convert Input JSON file into TableSchema message."""
table_schema_type = GetApiMessage('TableSchema')
schema_field_type = GetApiMessage('TableFieldSchema')
try:
schema_json = yaml.load(file_arg)
schema_json = schema_json.get('schema', None)
if not schema_json or not isinstance(schema_json, list):
raise SchemaFileError(
'Error parsing schema file: no schema field list defined in file')
all_fields = []
for field in schema_json:
new_field = schema_field_type(name=field['name'],
type=field['type'],
mode=field.get('mode', 'NULLABLE'))
all_fields.append(new_field)
return table_schema_type(fields=sorted(all_fields, key=lambda x: x.name))
except yaml.YAMLParseError as ype:
raise SchemaFileError('Error parsing schema file [{}]'.format(ype))
except (AttributeError, KeyError) as e:
raise SchemaFileError(
'Error parsing schema file, invalid field definition [{}]'.format(e))
def BqTableDataFileProcessor(file_arg):
"""Convert Input JSON file into TableSchema message."""
data_insert_request_type = GetApiMessage('TableDataInsertAllRequest')
insert_row_type = data_insert_request_type.RowsValueListEntry
data_row_type = GetApiMessage('JsonObject')
try:
data_json = yaml.load(file_arg)
if not data_json or not isinstance(data_json, list):
raise TableDataFileError(
'Error parsing data file: no data records defined in file')
rows = []
for row in data_json:
rows.append(insert_row_type(json=encoding.DictToMessage(
row, data_row_type)))
return rows
except yaml.YAMLParseError as ype:
raise TableDataFileError('Error parsing data file [{}]'.format(ype))
# Request modifiers
def SetProjectId(ref, args, request):
"""Set projectId value for a BigQueryXXXRequests."""
del ref
project = args.project or properties.VALUES.core.project.Get(required=True)
project_ref = resources.REGISTRY.Parse(project,
collection='bigquery.projects')
request.projectId = project_ref.Name()
return request
def SetViewParameters(ref, args, request):
"""Ensure that view parameters are set properly tables create request."""
del ref # unused
if not args.view:
request.table.view = None
return request
def ProcessDatasetOverwrite(ref, args, request):
"""Process the if-exists flag on datasets create."""
del ref
dataset_id = request.dataset.datasetReference.datasetId
project_id = request.projectId
if args.overwrite:
if _DatasetExists(dataset_id, project_id):
_TryDeleteDataset(dataset_id, project_id)
return request
def ProcessTableOverwrite(ref, args, request):
"""Process the overwrite flag on tables create."""
dataset_id = ref.datasetId
table_id = ref.Name()
project_id = ref.projectId
if args.overwrite:
if _TableExists(dataset_id, table_id, project_id):
_TryDeleteTable(dataset_id, table_id, project_id)
return request
def ProcessTableCopyOverwrite(ref, args, request):
"""Process the overwrite flag on tables copy."""
del ref # Unused
if args.overwrite:
|
return request
def ProcessTableCopyConfiguration(ref, args, request):
"""Build JobConfigurationTableCopy from request resource args."""
del ref # Unused
source_ref = args.CONCEPTS.source.Parse()
destination_ref = args.CONCEPTS.destination.Parse()
arg_utils.SetFieldInMessage(
request, 'job.configuration.copy.destinationTable.datasetId',
destination_ref.Parent().Name())
arg_utils.SetFieldInMessage(
request, 'job.configuration.copy.destinationTable.projectId',
destination_ref.projectId)
arg_utils.SetFieldInMessage(request,
'job.configuration.copy.destinationTable.tableId',
destination_ref.Name())
arg_utils.SetFieldInMessage(request,
'job.configuration.copy.sourceTable.datasetId',
source_ref.Parent().Name())
arg_utils.SetFieldInMessage(request,
'job.configuration.copy.sourceTable.projectId',
source_ref.projectId)
arg_utils.SetFieldInMessage(request,
'job.configuration.copy.sourceTable.tableId',
source_ref.Name())
return request
def ProcessSchemaUpdate(ref, args, request):
"""Process schema Updates (additions/mode changes) for the request.
Retrieves the current table schema for ref and attempts to merge in the schema
provided in the requests. This is necessary since the API backend does not
handle PATCH semantics for schema updates (e.g. process the deltas) so we must
always send the fully updated schema in the requests.
Args:
ref: resource reference for table.
args: argparse namespace for requests
request: BigqueryTablesPatchRequest object
Returns:
request: updated requests
Raises:
SchemaUpdateError: table not found or invalid an schema change.
"""
table = request.table
relaxed_columns = args.relax_columns
if not table.schema and not relaxed_columns: # if not updating schema,
return request # then just return.
original_schema = _TryGetCurrentSchema(ref.Parent().Name(),
ref.Name(),
ref.projectId)
new_schema_columns = table.schema
updated_fields = _GetUpdatedSchema(original_schema,
new_schema_columns,
relaxed_columns)
table_schema_type = GetApiMessage('TableSchema')
request.table.schema = table_schema_type(fields=updated_fields)
return request
def _TryGetCurrentSchema(dataset_id, table_id, project_id):
"""Try to retrieve the current BigQuery TableSchema for a table_ref.
Tries to fetch the schema of an existing table. Raises SchemaUpdateError if
table is not found or if table is not of type 'TABLE'.
Args:
dataset_id: the dataset id containing the table.
table_id: the table id for the table.
project_id: the project id containing the dataset and table.
Returns:
schema: the table schema object
Raises:
SchemaUpdateError: table not found or invalid table type.
"""
client = GetApiClient()
service = client.tables
get_request_type = GetApiMessage('BigqueryTablesGetRequest')
get_request = get_request_type(datasetId=dataset_id,
tableId=table_id,
projectId=project_id)
try:
table = service.Get(get_request)
if not table or table.type != 'TABLE':
raise SchemaUpdateError('Schema modifications only supported '
'on TABLE objects received [{}]'.format(
table))
except apitools_exceptions.HttpNotFoundError:
raise SchemaUpdateError('Table with id [{}:{}:{}] not found.'.format(
project_id, dataset_id, table_id))
return table.schema
def _GetUpdatedSchema(
original_schema,
new_columns=None,
relaxed_columns | request.job.configuration.copy.writeDisposition = 'WRITE_TRUNCATE' | conditional_block |
mod.rs | = Duration::from_secs(60 * 30);
// 'DEBUG'
const DEFAULT_NODE_STATS_LOGGING_DELAY: Duration = Duration::from_millis(60_000);
const DEFAULT_NODE_STATS_UPDATING_DELAY: Duration = Duration::from_millis(30_000);
const DEFAULT_PACKET_FORWARDING_INITIAL_BACKOFF: Duration = Duration::from_millis(10_000);
const DEFAULT_PACKET_FORWARDING_MAXIMUM_BACKOFF: Duration = Duration::from_millis(300_000);
const DEFAULT_INITIAL_CONNECTION_TIMEOUT: Duration = Duration::from_millis(1_500);
const DEFAULT_MAXIMUM_CONNECTION_BUFFER_SIZE: usize = 2000;
/// Derive default path to mixnodes's config directory.
/// It should get resolved to `$HOME/.nym/mixnodes/<id>/config`
pub fn default_config_directory<P: AsRef<Path>>(id: P) -> PathBuf {
must_get_home()
.join(NYM_DIR)
.join(DEFAULT_MIXNODES_DIR)
.join(id)
.join(DEFAULT_CONFIG_DIR)
}
/// Derive default path to mixnodes's config file.
/// It should get resolved to `$HOME/.nym/mixnodes/<id>/config/config.toml`
pub fn default_config_filepath<P: AsRef<Path>>(id: P) -> PathBuf {
default_config_directory(id).join(DEFAULT_CONFIG_FILENAME)
}
/// Derive default path to mixnodes's data directory where files, such as keys, are stored.
/// It should get resolved to `$HOME/.nym/mixnodes/<id>/data`
pub fn default_data_directory<P: AsRef<Path>>(id: P) -> PathBuf {
must_get_home()
.join(NYM_DIR)
.join(DEFAULT_MIXNODES_DIR)
.join(id)
.join(DEFAULT_DATA_DIR)
}
#[derive(Debug, Deserialize, PartialEq, Serialize)]
#[serde(deny_unknown_fields)]
pub struct Config {
pub mixnode: MixNode,
pub storage_paths: MixNodePaths,
#[serde(default)]
pub verloc: Verloc,
#[serde(default)]
pub logging: LoggingSettings,
#[serde(default)]
pub debug: Debug,
}
impl NymConfigTemplate for Config {
fn template() -> &'static str {
CONFIG_TEMPLATE
}
}
impl Config {
pub fn new<S: AsRef<str>>(id: S) -> Self {
Config {
mixnode: MixNode::new_default(id.as_ref()),
storage_paths: MixNodePaths::new_default(id.as_ref()),
verloc: Default::default(),
logging: Default::default(),
debug: Default::default(),
}
}
pub fn read_from_toml_file<P: AsRef<Path>>(path: P) -> io::Result<Self> {
read_config_from_toml_file(path)
}
pub fn read_from_default_path<P: AsRef<Path>>(id: P) -> io::Result<Self> {
Self::read_from_toml_file(default_config_filepath(id))
}
pub fn default_location(&self) -> PathBuf {
default_config_filepath(&self.mixnode.id)
}
pub fn save_to_default_location(&self) -> io::Result<()> {
let config_save_location: PathBuf = self.default_location();
save_formatted_config_to_file(self, config_save_location)
}
// builder methods
pub fn with_custom_nym_apis(mut self, nym_api_urls: Vec<Url>) -> Self {
self.mixnode.nym_api_urls = nym_api_urls;
self
}
pub fn with_listening_address(mut self, listening_address: IpAddr) -> Self {
self.mixnode.listening_address = listening_address;
self
}
pub fn with_mix_port(mut self, port: u16) -> Self {
self.mixnode.mix_port = port;
self
}
pub fn with_verloc_port(mut self, port: u16) -> Self {
self.mixnode.verloc_port = port;
self
}
pub fn with_http_api_port(mut self, port: u16) -> Self {
self.mixnode.http_api_port = port;
self
}
pub fn get_nym_api_endpoints(&self) -> Vec<Url> {
self.mixnode.nym_api_urls.clone()
}
}
#[derive(Debug, Deserialize, PartialEq, Serialize)]
pub struct MixNode {
/// Version of the mixnode for which this configuration was created.
pub version: String,
/// ID specifies the human readable ID of this particular mixnode.
pub id: String,
/// Address to which this mixnode will bind to and will be listening for packets.
pub listening_address: IpAddr,
/// Port used for listening for all mixnet traffic.
/// (default: 1789)
pub mix_port: u16,
/// Port used for listening for verloc traffic.
/// (default: 1790)
pub verloc_port: u16,
/// Port used for listening for http requests.
/// (default: 8000)
pub http_api_port: u16,
/// Addresses to nym APIs from which the node gets the view of the network.
pub nym_api_urls: Vec<Url>,
}
impl MixNode {
pub fn new_default<S: Into<String>>(id: S) -> Self {
MixNode {
version: env!("CARGO_PKG_VERSION").to_string(),
id: id.into(),
listening_address: inaddr_any(),
mix_port: DEFAULT_MIX_LISTENING_PORT,
verloc_port: DEFAULT_VERLOC_LISTENING_PORT,
http_api_port: DEFAULT_HTTP_API_LISTENING_PORT,
nym_api_urls: vec![Url::from_str(mainnet::NYM_API).expect("Invalid default API URL")],
}
}
}
#[derive(Debug, Deserialize, PartialEq, Serialize)]
#[serde(deny_unknown_fields)]
pub struct Verloc {
/// Specifies number of echo packets sent to each node during a measurement run.
pub packets_per_node: usize,
/// Specifies maximum amount of time to wait for the connection to get established.
#[serde(with = "humantime_serde")]
pub connection_timeout: Duration,
/// Specifies maximum amount of time to wait for the reply packet to arrive before abandoning the test.
#[serde(with = "humantime_serde")]
pub packet_timeout: Duration,
/// Specifies delay between subsequent test packets being sent (after receiving a reply).
#[serde(with = "humantime_serde")]
pub delay_between_packets: Duration,
/// Specifies number of nodes being tested at once.
pub tested_nodes_batch_size: usize,
/// Specifies delay between subsequent test runs.
#[serde(with = "humantime_serde")]
pub testing_interval: Duration,
/// Specifies delay between attempting to run the measurement again if the previous run failed
/// due to being unable to get the list of nodes.
#[serde(with = "humantime_serde")]
pub retry_timeout: Duration,
}
impl Default for Verloc {
fn default() -> Self {
Verloc {
packets_per_node: DEFAULT_PACKETS_PER_NODE,
connection_timeout: DEFAULT_CONNECTION_TIMEOUT,
packet_timeout: DEFAULT_PACKET_TIMEOUT,
delay_between_packets: DEFAULT_DELAY_BETWEEN_PACKETS,
tested_nodes_batch_size: DEFAULT_BATCH_SIZE,
testing_interval: DEFAULT_TESTING_INTERVAL,
retry_timeout: DEFAULT_RETRY_TIMEOUT,
}
}
}
#[derive(Debug, Deserialize, PartialEq, Serialize)]
#[serde(default)]
pub struct Debug {
/// Delay between each subsequent node statistics being logged to the console
#[serde(with = "humantime_serde")]
pub node_stats_logging_delay: Duration,
/// Delay between each subsequent node statistics being updated
#[serde(with = "humantime_serde")]
pub node_stats_updating_delay: Duration,
/// Initial value of an exponential backoff to reconnect to dropped TCP connection when
/// forwarding sphinx packets.
#[serde(with = "humantime_serde")]
pub packet_forwarding_initial_backoff: Duration,
/// Maximum value of an exponential backoff to reconnect to dropped TCP connection when
/// forwarding sphinx packets.
#[serde(with = "humantime_serde")]
pub packet_forwarding_maximum_backoff: Duration,
/// Timeout for establishing initial connection when trying to forward a sphinx packet.
#[serde(with = "humantime_serde")]
pub initial_connection_timeout: Duration,
/// Maximum number of packets that can be stored waiting to get sent to a particular connection.
pub maximum_connection_buffer_size: usize,
/// Specifies whether the mixnode should be using the legacy framing for the sphinx packets.
// it's set to true by default. The reason for that decision is to preserve compatibility with the
// existing nodes whilst everyone else is upgrading and getting the code for handling the new field.
// It shall be disabled in the subsequent releases.
pub use_legacy_framed_packet_version: bool,
}
impl Default for Debug {
fn default() -> Self {
Debug {
node_stats_logging_delay: DEFAULT_NODE_STATS_LOGGING_DELAY,
node_stats_updating_delay: DEFAULT_NODE_STATS_UPDATING_DELAY,
packet_forwarding_initial_backoff: DEFAULT_PACKET_FORWARDING_INITIAL_BACKOFF,
packet_forwarding_maximum_backoff: DEFAULT_PACKET_FORWARDING_MAXIMUM_BACKOFF,
initial_connection_timeout: DEFAULT_INITIAL_CONNECTION_TIMEOUT,
maximum_connection_buffer_size: DEFAULT_MAXIMUM_CONNECTION_BUFFER_SIZE,
// TODO: remember to change it in one of future releases!!
use_legacy_framed_packet_version: true,
} | }
} | random_line_split |
|
mod.rs | ::helpers::inaddr_any;
use nym_config::{
must_get_home, read_config_from_toml_file, save_formatted_config_to_file, NymConfigTemplate,
DEFAULT_CONFIG_DIR, DEFAULT_CONFIG_FILENAME, DEFAULT_DATA_DIR, NYM_DIR,
};
use serde::{Deserialize, Serialize};
use std::io;
use std::net::IpAddr;
use std::path::{Path, PathBuf};
use std::str::FromStr;
use std::time::Duration;
use url::Url;
pub(crate) mod old_config_v1_1_21;
pub mod persistence;
mod template;
const DEFAULT_MIXNODES_DIR: &str = "mixnodes";
// 'RTT MEASUREMENT'
const DEFAULT_PACKETS_PER_NODE: usize = 100;
const DEFAULT_CONNECTION_TIMEOUT: Duration = Duration::from_millis(5000);
const DEFAULT_PACKET_TIMEOUT: Duration = Duration::from_millis(1500);
const DEFAULT_DELAY_BETWEEN_PACKETS: Duration = Duration::from_millis(50);
const DEFAULT_BATCH_SIZE: usize = 50;
const DEFAULT_TESTING_INTERVAL: Duration = Duration::from_secs(60 * 60 * 12);
const DEFAULT_RETRY_TIMEOUT: Duration = Duration::from_secs(60 * 30);
// 'DEBUG'
const DEFAULT_NODE_STATS_LOGGING_DELAY: Duration = Duration::from_millis(60_000);
const DEFAULT_NODE_STATS_UPDATING_DELAY: Duration = Duration::from_millis(30_000);
const DEFAULT_PACKET_FORWARDING_INITIAL_BACKOFF: Duration = Duration::from_millis(10_000);
const DEFAULT_PACKET_FORWARDING_MAXIMUM_BACKOFF: Duration = Duration::from_millis(300_000);
const DEFAULT_INITIAL_CONNECTION_TIMEOUT: Duration = Duration::from_millis(1_500);
const DEFAULT_MAXIMUM_CONNECTION_BUFFER_SIZE: usize = 2000;
/// Derive default path to mixnodes's config directory.
/// It should get resolved to `$HOME/.nym/mixnodes/<id>/config`
pub fn default_config_directory<P: AsRef<Path>>(id: P) -> PathBuf {
must_get_home()
.join(NYM_DIR)
.join(DEFAULT_MIXNODES_DIR)
.join(id)
.join(DEFAULT_CONFIG_DIR)
}
/// Derive default path to mixnodes's config file.
/// It should get resolved to `$HOME/.nym/mixnodes/<id>/config/config.toml`
pub fn default_config_filepath<P: AsRef<Path>>(id: P) -> PathBuf {
default_config_directory(id).join(DEFAULT_CONFIG_FILENAME)
}
/// Derive default path to mixnodes's data directory where files, such as keys, are stored.
/// It should get resolved to `$HOME/.nym/mixnodes/<id>/data`
pub fn default_data_directory<P: AsRef<Path>>(id: P) -> PathBuf {
must_get_home()
.join(NYM_DIR)
.join(DEFAULT_MIXNODES_DIR)
.join(id)
.join(DEFAULT_DATA_DIR)
}
#[derive(Debug, Deserialize, PartialEq, Serialize)]
#[serde(deny_unknown_fields)]
pub struct Config {
pub mixnode: MixNode,
pub storage_paths: MixNodePaths,
#[serde(default)]
pub verloc: Verloc,
#[serde(default)]
pub logging: LoggingSettings,
#[serde(default)]
pub debug: Debug,
}
impl NymConfigTemplate for Config {
fn template() -> &'static str {
CONFIG_TEMPLATE
}
}
impl Config {
pub fn new<S: AsRef<str>>(id: S) -> Self {
Config {
mixnode: MixNode::new_default(id.as_ref()),
storage_paths: MixNodePaths::new_default(id.as_ref()),
verloc: Default::default(),
logging: Default::default(),
debug: Default::default(),
}
}
pub fn read_from_toml_file<P: AsRef<Path>>(path: P) -> io::Result<Self> {
read_config_from_toml_file(path)
}
pub fn read_from_default_path<P: AsRef<Path>>(id: P) -> io::Result<Self> {
Self::read_from_toml_file(default_config_filepath(id))
}
pub fn default_location(&self) -> PathBuf {
default_config_filepath(&self.mixnode.id)
}
pub fn save_to_default_location(&self) -> io::Result<()> {
let config_save_location: PathBuf = self.default_location();
save_formatted_config_to_file(self, config_save_location)
}
// builder methods
pub fn with_custom_nym_apis(mut self, nym_api_urls: Vec<Url>) -> Self {
self.mixnode.nym_api_urls = nym_api_urls;
self
}
pub fn with_listening_address(mut self, listening_address: IpAddr) -> Self {
self.mixnode.listening_address = listening_address;
self
}
pub fn | (mut self, port: u16) -> Self {
self.mixnode.mix_port = port;
self
}
pub fn with_verloc_port(mut self, port: u16) -> Self {
self.mixnode.verloc_port = port;
self
}
pub fn with_http_api_port(mut self, port: u16) -> Self {
self.mixnode.http_api_port = port;
self
}
pub fn get_nym_api_endpoints(&self) -> Vec<Url> {
self.mixnode.nym_api_urls.clone()
}
}
#[derive(Debug, Deserialize, PartialEq, Serialize)]
pub struct MixNode {
/// Version of the mixnode for which this configuration was created.
pub version: String,
/// ID specifies the human readable ID of this particular mixnode.
pub id: String,
/// Address to which this mixnode will bind to and will be listening for packets.
pub listening_address: IpAddr,
/// Port used for listening for all mixnet traffic.
/// (default: 1789)
pub mix_port: u16,
/// Port used for listening for verloc traffic.
/// (default: 1790)
pub verloc_port: u16,
/// Port used for listening for http requests.
/// (default: 8000)
pub http_api_port: u16,
/// Addresses to nym APIs from which the node gets the view of the network.
pub nym_api_urls: Vec<Url>,
}
impl MixNode {
pub fn new_default<S: Into<String>>(id: S) -> Self {
MixNode {
version: env!("CARGO_PKG_VERSION").to_string(),
id: id.into(),
listening_address: inaddr_any(),
mix_port: DEFAULT_MIX_LISTENING_PORT,
verloc_port: DEFAULT_VERLOC_LISTENING_PORT,
http_api_port: DEFAULT_HTTP_API_LISTENING_PORT,
nym_api_urls: vec![Url::from_str(mainnet::NYM_API).expect("Invalid default API URL")],
}
}
}
#[derive(Debug, Deserialize, PartialEq, Serialize)]
#[serde(deny_unknown_fields)]
pub struct Verloc {
/// Specifies number of echo packets sent to each node during a measurement run.
pub packets_per_node: usize,
/// Specifies maximum amount of time to wait for the connection to get established.
#[serde(with = "humantime_serde")]
pub connection_timeout: Duration,
/// Specifies maximum amount of time to wait for the reply packet to arrive before abandoning the test.
#[serde(with = "humantime_serde")]
pub packet_timeout: Duration,
/// Specifies delay between subsequent test packets being sent (after receiving a reply).
#[serde(with = "humantime_serde")]
pub delay_between_packets: Duration,
/// Specifies number of nodes being tested at once.
pub tested_nodes_batch_size: usize,
/// Specifies delay between subsequent test runs.
#[serde(with = "humantime_serde")]
pub testing_interval: Duration,
/// Specifies delay between attempting to run the measurement again if the previous run failed
/// due to being unable to get the list of nodes.
#[serde(with = "humantime_serde")]
pub retry_timeout: Duration,
}
impl Default for Verloc {
fn default() -> Self {
Verloc {
packets_per_node: DEFAULT_PACKETS_PER_NODE,
connection_timeout: DEFAULT_CONNECTION_TIMEOUT,
packet_timeout: DEFAULT_PACKET_TIMEOUT,
delay_between_packets: DEFAULT_DELAY_BETWEEN_PACKETS,
tested_nodes_batch_size: DEFAULT_BATCH_SIZE,
testing_interval: DEFAULT_TESTING_INTERVAL,
retry_timeout: DEFAULT_RETRY_TIMEOUT,
}
}
}
#[derive(Debug, Deserialize, PartialEq, Serialize)]
#[serde(default)]
pub struct Debug {
/// Delay between each subsequent node statistics being logged to the console
#[serde(with = "humantime_serde")]
pub node_stats_logging_delay: Duration,
/// Delay between each subsequent node statistics being updated
#[serde(with = "humantime_serde")]
pub node_stats_updating_delay: Duration,
/// Initial value of an exponential backoff to reconnect to dropped TCP connection when
/// forwarding sphinx packets.
#[serde(with = "humantime_serde")]
pub packet_forwarding_initial_backoff: Duration,
/// Maximum value of an exponential backoff to reconnect to dropped TCP connection when
/// forwarding sphinx packets.
#[serde(with = "humantime_serde")]
pub packet_forwarding_maximum_backoff: Duration,
/// Timeout for establishing initial connection when trying to forward a sphinx packet.
| with_mix_port | identifier_name |
maximin.py | lower
own_tokens[token].append(pos)
score = num_defeated * 1.5
# assign values to own tokens as [# beatable]/[# identical * closest enemy dist]
for hand, positions in own_tokens.items():
if len(positions) > 0: # check that there are instances of the hand first
beats = beats_dict[hand]
targets = opp_tokens[beats]
init_value = len(targets)/len(positions)
for position in positions:
|
# subtract number of opponent tokens on the board
for hand, positions in opp_tokens.items():
for position in positions:
score -= 1
return score
class Player:
def __init__(self, player):
"""
Called once at the beginning of a game to initialise this player.
Set up an internal representation of the game state.
The parameter player is the string "upper" (if the instance will
play as Upper), or the string "lower" (if the instance will play
as Lower).
"""
self.colour = player
self.game_in_head = Board()
def action(self):
"""
Called at the beginning of each turn. Based on the current state
of the game, select an action to play this turn.
"""
# assume the smart opponent can always choose the best step
# Depth First Search
steps = 2
stack = [(self.game_in_head, (), 0)]
maxmin = None
good_paths = []
while len(stack) > 0:
parent_node, path, score = stack.pop(-1)
if len(path) >= steps*2:
# leaf node in the search tree
if maxmin is None:
maxmin = score
good_paths.append(path)
elif maxmin == score:
good_paths.append(path)
elif maxmin < score:
maxmin = score
good_paths.clear()
good_paths.append(path)
else:
# root node, find its leaves
children_nodes = self.one_step_infe(parent_node, path, score)
stack += children_nodes
path_dec = random.choice(good_paths)
if self.colour == 'upper':
return path_dec[0]
elif self.colour == 'lower':
return path_dec[1]
def one_step_infe(self, parent_node, path, score):
children_nodes = []
if self.colour == 'upper':
all_upper_act = list(parent_node.available_actions('upper'))
for upper_act in random.choices(all_upper_act, k=min(10, len(all_upper_act))):
min_onestep = None
opponent_acts = []
all_lower_act = list(parent_node.available_actions('lower'))
for lower_act in random.choices(all_lower_act, k=min(10, len(all_lower_act))):
# the smart opponent always make a decision to minimize the score
child_node = deepcopy(parent_node)
upper_defeated, lower_defeated = child_node.update(upper_act, lower_act)
child_score = eval_board(child_node.board, "upper", lower_defeated)
if min_onestep is None:
min_onestep = child_score
opponent_acts.append((lower_act, child_node))
elif min_onestep == child_score:
#opponent_acts.append((lower_act, child_node))
pass
elif min_onestep > child_score:
min_onestep = child_score
opponent_acts.clear()
opponent_acts.append((lower_act, child_node))
if min_onestep is not None:
oppo_dec, child_node = random.choice(opponent_acts)
children_nodes.append((child_node, path+(upper_act, oppo_dec), min_onestep))
else:
all_lower_act = list(parent_node.available_actions('lower'))
for lower_act in random.choices(all_lower_act, k=min(10, len(all_lower_act))):
min_onestep = None
opponent_acts = []
all_upper_act = list(parent_node.available_actions('upper'))
for upper_act in random.choices(all_upper_act, k=min(10, len(all_upper_act))):
child_node = deepcopy(parent_node)
upper_defeated, lower_defeated = child_node.update(upper_act, lower_act)
child_score = eval_board(child_node.board, "lower", upper_defeated)
if min_onestep is None:
min_onestep = child_score
opponent_acts.append((upper_act, child_node))
elif min_onestep == child_score:
#opponent_acts.append((upper_act, child_node))
pass
elif min_onestep > child_score:
min_onestep = child_score
opponent_acts.clear()
opponent_acts.append((upper_act, child_node))
if min_onestep is not None:
oppo_dec, child_node = random.choice(opponent_acts)
children_nodes.append((child_node, path+(oppo_dec, lower_act), min_onestep))
return children_nodes
def update(self, opponent_action, player_action):
"""
Called at the end of each turn to inform this player of both
players' chosen actions. Update your internal representation
of the game state.
The parameter opponent_action is the opponent's chosen action,
and player_action is this instance's latest chosen action.
"""
if self.colour == 'upper':
self.game_in_head.update(player_action, opponent_action)
else:
self.game_in_head.update(opponent_action, player_action)
# modified from referee.game.py
class Board:
def __init__(self):
self.throws = {"upper": 0, "lower": 0}
self.nturns = 0
# all hexes
_HEX_RANGE = range(-4, +4 + 1)
_ORD_HEXES = [
(r, q) for r in _HEX_RANGE for q in _HEX_RANGE if -r - q in _HEX_RANGE
]
self._SET_HEXES = frozenset(_ORD_HEXES)
# nearby hexes
self._HEX_STEPS = [(1, -1), (1, 0), (0, 1), (-1, 1), (-1, 0), (0, -1)]
self._BEATS_WHAT = {"r": "s", "p": "r", "s": "p"}
self._WHAT_BEATS = {"r": "p", "p": "s", "s": "r"}
self.board = {x: [] for x in _ORD_HEXES}
def _ADJACENT(self, x):
rx, qx = x
return self._SET_HEXES & {(rx + ry, qx + qy) for ry, qy in self._HEX_STEPS}
def _BATTLE(self, symbols):
types = {s.lower() for s in symbols}
upper_cnt = sum([s.isupper() for s in symbols])
lower_cnt = sum([s.islower() for s in symbols])
if len(types) == 1:
# no fights
return symbols, 0, 0
if len(types) == 3:
# everyone dies
return [], 0-upper_cnt, 0-lower_cnt
# else there are two, only some die:
for t in types:
# those who are not defeated stay
symbols = [s for s in symbols if s.lower() != self._BEATS_WHAT[t]]
return symbols, sum([s.isupper() for s in symbols])-upper_cnt, sum([s.islower() for s in symbols])-lower_cnt
def available_actions(self, colour):
"""
A generator of currently-available actions for a particular player
(assists validation).
"""
throws = self.throws[colour]
isplayer = str.islower if colour == "lower" else str.isupper
if throws < 9:
sign = -1 if colour == "lower" else 1
throw_zone = (
(r, q) for r, q in self._SET_HEXES if sign * r >= 4 - throws
)
for x in throw_zone:
for s in "rps":
yield "THROW", s, x
occupied = {x for x, s in self.board.items() if any(map(isplayer, s))}
for x in occupied:
adjacent_x = self._ADJACENT(x)
for y in adjacent_x:
yield "SLIDE", x, y
if y in occupied:
opposite_y = self._ADJACENT(y) - adjacent_x - {x}
for z in opposite_y:
yield "SWING", x, z
def update(self, upper_action, lower_action):
"""
Submit an action to the game for validation and application.
If the action is not allowed, raise an InvalidActionException with
a message describing allowed actions.
Otherwise, apply the action to | min_target_dist = inf
for target in targets:
target_dist = dist(position, target)
if target_dist < min_target_dist:
min_target_dist = target_dist
if min_target_dist == 0: # this shouldn't happen as the opponent should have died
print("position: {}, target position: {}".format(position, target))
score += init_value/min_target_dist | conditional_block |
maximin.py | lower
own_tokens[token].append(pos)
score = num_defeated * 1.5
# assign values to own tokens as [# beatable]/[# identical * closest enemy dist]
for hand, positions in own_tokens.items():
if len(positions) > 0: # check that there are instances of the hand first
beats = beats_dict[hand]
targets = opp_tokens[beats]
init_value = len(targets)/len(positions)
for position in positions:
min_target_dist = inf
for target in targets:
target_dist = dist(position, target)
if target_dist < min_target_dist:
min_target_dist = target_dist
if min_target_dist == 0: # this shouldn't happen as the opponent should have died
print("position: {}, target position: {}".format(position, target))
score += init_value/min_target_dist
# subtract number of opponent tokens on the board
for hand, positions in opp_tokens.items():
for position in positions:
score -= 1
return score
class Player:
def __init__(self, player):
"""
Called once at the beginning of a game to initialise this player.
Set up an internal representation of the game state.
The parameter player is the string "upper" (if the instance will
play as Upper), or the string "lower" (if the instance will play
as Lower).
"""
self.colour = player
self.game_in_head = Board()
def action(self):
"""
Called at the beginning of each turn. Based on the current state
of the game, select an action to play this turn.
"""
# assume the smart opponent can always choose the best step
# Depth First Search
steps = 2
stack = [(self.game_in_head, (), 0)]
maxmin = None
good_paths = []
while len(stack) > 0:
parent_node, path, score = stack.pop(-1)
if len(path) >= steps*2:
# leaf node in the search tree
if maxmin is None:
maxmin = score
good_paths.append(path)
elif maxmin == score:
good_paths.append(path)
elif maxmin < score:
maxmin = score
good_paths.clear()
good_paths.append(path)
else:
# root node, find its leaves
children_nodes = self.one_step_infe(parent_node, path, score)
stack += children_nodes
path_dec = random.choice(good_paths)
if self.colour == 'upper':
return path_dec[0]
elif self.colour == 'lower':
return path_dec[1]
def one_step_infe(self, parent_node, path, score):
children_nodes = []
if self.colour == 'upper':
all_upper_act = list(parent_node.available_actions('upper'))
for upper_act in random.choices(all_upper_act, k=min(10, len(all_upper_act))):
min_onestep = None
opponent_acts = []
all_lower_act = list(parent_node.available_actions('lower'))
for lower_act in random.choices(all_lower_act, k=min(10, len(all_lower_act))):
# the smart opponent always make a decision to minimize the score
child_node = deepcopy(parent_node)
upper_defeated, lower_defeated = child_node.update(upper_act, lower_act)
child_score = eval_board(child_node.board, "upper", lower_defeated)
if min_onestep is None:
min_onestep = child_score
opponent_acts.append((lower_act, child_node))
elif min_onestep == child_score:
#opponent_acts.append((lower_act, child_node))
pass
elif min_onestep > child_score:
min_onestep = child_score
opponent_acts.clear()
opponent_acts.append((lower_act, child_node))
if min_onestep is not None:
oppo_dec, child_node = random.choice(opponent_acts)
children_nodes.append((child_node, path+(upper_act, oppo_dec), min_onestep))
else:
all_lower_act = list(parent_node.available_actions('lower'))
for lower_act in random.choices(all_lower_act, k=min(10, len(all_lower_act))):
min_onestep = None
opponent_acts = []
all_upper_act = list(parent_node.available_actions('upper'))
for upper_act in random.choices(all_upper_act, k=min(10, len(all_upper_act))):
child_node = deepcopy(parent_node)
upper_defeated, lower_defeated = child_node.update(upper_act, lower_act)
child_score = eval_board(child_node.board, "lower", upper_defeated)
if min_onestep is None:
min_onestep = child_score
opponent_acts.append((upper_act, child_node))
elif min_onestep == child_score:
#opponent_acts.append((upper_act, child_node))
pass
elif min_onestep > child_score:
min_onestep = child_score
opponent_acts.clear()
opponent_acts.append((upper_act, child_node))
if min_onestep is not None:
oppo_dec, child_node = random.choice(opponent_acts)
children_nodes.append((child_node, path+(oppo_dec, lower_act), min_onestep))
return children_nodes
def update(self, opponent_action, player_action):
"""
Called at the end of each turn to inform this player of both
players' chosen actions. Update your internal representation
of the game state.
The parameter opponent_action is the opponent's chosen action,
and player_action is this instance's latest chosen action.
"""
if self.colour == 'upper':
self.game_in_head.update(player_action, opponent_action)
else:
self.game_in_head.update(opponent_action, player_action)
# modified from referee.game.py
class Board:
def __init__(self):
self.throws = {"upper": 0, "lower": 0}
self.nturns = 0
# all hexes
_HEX_RANGE = range(-4, +4 + 1)
_ORD_HEXES = [
(r, q) for r in _HEX_RANGE for q in _HEX_RANGE if -r - q in _HEX_RANGE
]
self._SET_HEXES = frozenset(_ORD_HEXES)
# nearby hexes
self._HEX_STEPS = [(1, -1), (1, 0), (0, 1), (-1, 1), (-1, 0), (0, -1)]
self._BEATS_WHAT = {"r": "s", "p": "r", "s": "p"}
self._WHAT_BEATS = {"r": "p", "p": "s", "s": "r"}
self.board = {x: [] for x in _ORD_HEXES}
def _ADJACENT(self, x):
rx, qx = x
return self._SET_HEXES & {(rx + ry, qx + qy) for ry, qy in self._HEX_STEPS}
def _BATTLE(self, symbols):
types = {s.lower() for s in symbols}
upper_cnt = sum([s.isupper() for s in symbols])
lower_cnt = sum([s.islower() for s in symbols])
if len(types) == 1:
# no fights
return symbols, 0, 0
if len(types) == 3:
# everyone dies
return [], 0-upper_cnt, 0-lower_cnt
# else there are two, only some die:
for t in types:
# those who are not defeated stay
symbols = [s for s in symbols if s.lower() != self._BEATS_WHAT[t]]
return symbols, sum([s.isupper() for s in symbols])-upper_cnt, sum([s.islower() for s in symbols])-lower_cnt
def available_actions(self, colour):
"""
A generator of currently-available actions for a particular player
(assists validation).
"""
throws = self.throws[colour]
isplayer = str.islower if colour == "lower" else str.isupper
if throws < 9:
sign = -1 if colour == "lower" else 1
throw_zone = ( | for s in "rps":
yield "THROW", s, x
occupied = {x for x, s in self.board.items() if any(map(isplayer, s))}
for x in occupied:
adjacent_x = self._ADJACENT(x)
for y in adjacent_x:
yield "SLIDE", x, y
if y in occupied:
opposite_y = self._ADJACENT(y) - adjacent_x - {x}
for z in opposite_y:
yield "SWING", x, z
def update(self, upper_action, lower_action):
"""
Submit an action to the game for validation and application.
If the action is not allowed, raise an InvalidActionException with
a message describing allowed actions.
Otherwise, apply the action to | (r, q) for r, q in self._SET_HEXES if sign * r >= 4 - throws
)
for x in throw_zone: | random_line_split |
maximin.py | {}, target position: {}".format(position, target))
score += init_value/min_target_dist
# subtract number of opponent tokens on the board
for hand, positions in opp_tokens.items():
for position in positions:
score -= 1
return score
class Player:
def __init__(self, player):
"""
Called once at the beginning of a game to initialise this player.
Set up an internal representation of the game state.
The parameter player is the string "upper" (if the instance will
play as Upper), or the string "lower" (if the instance will play
as Lower).
"""
self.colour = player
self.game_in_head = Board()
def action(self):
"""
Called at the beginning of each turn. Based on the current state
of the game, select an action to play this turn.
"""
# assume the smart opponent can always choose the best step
# Depth First Search
steps = 2
stack = [(self.game_in_head, (), 0)]
maxmin = None
good_paths = []
while len(stack) > 0:
parent_node, path, score = stack.pop(-1)
if len(path) >= steps*2:
# leaf node in the search tree
if maxmin is None:
maxmin = score
good_paths.append(path)
elif maxmin == score:
good_paths.append(path)
elif maxmin < score:
maxmin = score
good_paths.clear()
good_paths.append(path)
else:
# root node, find its leaves
children_nodes = self.one_step_infe(parent_node, path, score)
stack += children_nodes
path_dec = random.choice(good_paths)
if self.colour == 'upper':
return path_dec[0]
elif self.colour == 'lower':
return path_dec[1]
def one_step_infe(self, parent_node, path, score):
children_nodes = []
if self.colour == 'upper':
all_upper_act = list(parent_node.available_actions('upper'))
for upper_act in random.choices(all_upper_act, k=min(10, len(all_upper_act))):
min_onestep = None
opponent_acts = []
all_lower_act = list(parent_node.available_actions('lower'))
for lower_act in random.choices(all_lower_act, k=min(10, len(all_lower_act))):
# the smart opponent always make a decision to minimize the score
child_node = deepcopy(parent_node)
upper_defeated, lower_defeated = child_node.update(upper_act, lower_act)
child_score = eval_board(child_node.board, "upper", lower_defeated)
if min_onestep is None:
min_onestep = child_score
opponent_acts.append((lower_act, child_node))
elif min_onestep == child_score:
#opponent_acts.append((lower_act, child_node))
pass
elif min_onestep > child_score:
min_onestep = child_score
opponent_acts.clear()
opponent_acts.append((lower_act, child_node))
if min_onestep is not None:
oppo_dec, child_node = random.choice(opponent_acts)
children_nodes.append((child_node, path+(upper_act, oppo_dec), min_onestep))
else:
all_lower_act = list(parent_node.available_actions('lower'))
for lower_act in random.choices(all_lower_act, k=min(10, len(all_lower_act))):
min_onestep = None
opponent_acts = []
all_upper_act = list(parent_node.available_actions('upper'))
for upper_act in random.choices(all_upper_act, k=min(10, len(all_upper_act))):
child_node = deepcopy(parent_node)
upper_defeated, lower_defeated = child_node.update(upper_act, lower_act)
child_score = eval_board(child_node.board, "lower", upper_defeated)
if min_onestep is None:
min_onestep = child_score
opponent_acts.append((upper_act, child_node))
elif min_onestep == child_score:
#opponent_acts.append((upper_act, child_node))
pass
elif min_onestep > child_score:
min_onestep = child_score
opponent_acts.clear()
opponent_acts.append((upper_act, child_node))
if min_onestep is not None:
oppo_dec, child_node = random.choice(opponent_acts)
children_nodes.append((child_node, path+(oppo_dec, lower_act), min_onestep))
return children_nodes
def update(self, opponent_action, player_action):
"""
Called at the end of each turn to inform this player of both
players' chosen actions. Update your internal representation
of the game state.
The parameter opponent_action is the opponent's chosen action,
and player_action is this instance's latest chosen action.
"""
if self.colour == 'upper':
self.game_in_head.update(player_action, opponent_action)
else:
self.game_in_head.update(opponent_action, player_action)
# modified from referee.game.py
class Board:
def __init__(self):
self.throws = {"upper": 0, "lower": 0}
self.nturns = 0
# all hexes
_HEX_RANGE = range(-4, +4 + 1)
_ORD_HEXES = [
(r, q) for r in _HEX_RANGE for q in _HEX_RANGE if -r - q in _HEX_RANGE
]
self._SET_HEXES = frozenset(_ORD_HEXES)
# nearby hexes
self._HEX_STEPS = [(1, -1), (1, 0), (0, 1), (-1, 1), (-1, 0), (0, -1)]
self._BEATS_WHAT = {"r": "s", "p": "r", "s": "p"}
self._WHAT_BEATS = {"r": "p", "p": "s", "s": "r"}
self.board = {x: [] for x in _ORD_HEXES}
def _ADJACENT(self, x):
rx, qx = x
return self._SET_HEXES & {(rx + ry, qx + qy) for ry, qy in self._HEX_STEPS}
def _BATTLE(self, symbols):
types = {s.lower() for s in symbols}
upper_cnt = sum([s.isupper() for s in symbols])
lower_cnt = sum([s.islower() for s in symbols])
if len(types) == 1:
# no fights
return symbols, 0, 0
if len(types) == 3:
# everyone dies
return [], 0-upper_cnt, 0-lower_cnt
# else there are two, only some die:
for t in types:
# those who are not defeated stay
symbols = [s for s in symbols if s.lower() != self._BEATS_WHAT[t]]
return symbols, sum([s.isupper() for s in symbols])-upper_cnt, sum([s.islower() for s in symbols])-lower_cnt
def available_actions(self, colour):
"""
A generator of currently-available actions for a particular player
(assists validation).
"""
throws = self.throws[colour]
isplayer = str.islower if colour == "lower" else str.isupper
if throws < 9:
sign = -1 if colour == "lower" else 1
throw_zone = (
(r, q) for r, q in self._SET_HEXES if sign * r >= 4 - throws
)
for x in throw_zone:
for s in "rps":
yield "THROW", s, x
occupied = {x for x, s in self.board.items() if any(map(isplayer, s))}
for x in occupied:
adjacent_x = self._ADJACENT(x)
for y in adjacent_x:
yield "SLIDE", x, y
if y in occupied:
opposite_y = self._ADJACENT(y) - adjacent_x - {x}
for z in opposite_y:
yield "SWING", x, z
def update(self, upper_action, lower_action):
| """
Submit an action to the game for validation and application.
If the action is not allowed, raise an InvalidActionException with
a message describing allowed actions.
Otherwise, apply the action to the game state.
"""
# validate the actions:
for action, c in [(upper_action, "upper"), (lower_action, "lower")]:
actions = list(self.available_actions(c))
if action not in actions:
self.logger.info(f"error: {c}: illegal action {action!r}")
available_actions_list_str = "\n* ".join(
[f"{a!r} - {_FORMAT_ACTION(a)}" for a in actions]
)
# NOTE: The game instance _could_ potentially be recovered
# but pursue a simpler implementation that just exits now
raise Exception(
f"{c} player's action, {action!r}, is not well-"
"formed or not available. See specification and "
"game rules for details, or consider currently " | identifier_body |
|
maximin.py |
own_tokens[token].append(pos)
score = num_defeated * 1.5
# assign values to own tokens as [# beatable]/[# identical * closest enemy dist]
for hand, positions in own_tokens.items():
if len(positions) > 0: # check that there are instances of the hand first
beats = beats_dict[hand]
targets = opp_tokens[beats]
init_value = len(targets)/len(positions)
for position in positions:
min_target_dist = inf
for target in targets:
target_dist = dist(position, target)
if target_dist < min_target_dist:
min_target_dist = target_dist
if min_target_dist == 0: # this shouldn't happen as the opponent should have died
print("position: {}, target position: {}".format(position, target))
score += init_value/min_target_dist
# subtract number of opponent tokens on the board
for hand, positions in opp_tokens.items():
for position in positions:
score -= 1
return score
class | :
def __init__(self, player):
"""
Called once at the beginning of a game to initialise this player.
Set up an internal representation of the game state.
The parameter player is the string "upper" (if the instance will
play as Upper), or the string "lower" (if the instance will play
as Lower).
"""
self.colour = player
self.game_in_head = Board()
def action(self):
"""
Called at the beginning of each turn. Based on the current state
of the game, select an action to play this turn.
"""
# assume the smart opponent can always choose the best step
# Depth First Search
steps = 2
stack = [(self.game_in_head, (), 0)]
maxmin = None
good_paths = []
while len(stack) > 0:
parent_node, path, score = stack.pop(-1)
if len(path) >= steps*2:
# leaf node in the search tree
if maxmin is None:
maxmin = score
good_paths.append(path)
elif maxmin == score:
good_paths.append(path)
elif maxmin < score:
maxmin = score
good_paths.clear()
good_paths.append(path)
else:
# root node, find its leaves
children_nodes = self.one_step_infe(parent_node, path, score)
stack += children_nodes
path_dec = random.choice(good_paths)
if self.colour == 'upper':
return path_dec[0]
elif self.colour == 'lower':
return path_dec[1]
def one_step_infe(self, parent_node, path, score):
children_nodes = []
if self.colour == 'upper':
all_upper_act = list(parent_node.available_actions('upper'))
for upper_act in random.choices(all_upper_act, k=min(10, len(all_upper_act))):
min_onestep = None
opponent_acts = []
all_lower_act = list(parent_node.available_actions('lower'))
for lower_act in random.choices(all_lower_act, k=min(10, len(all_lower_act))):
# the smart opponent always make a decision to minimize the score
child_node = deepcopy(parent_node)
upper_defeated, lower_defeated = child_node.update(upper_act, lower_act)
child_score = eval_board(child_node.board, "upper", lower_defeated)
if min_onestep is None:
min_onestep = child_score
opponent_acts.append((lower_act, child_node))
elif min_onestep == child_score:
#opponent_acts.append((lower_act, child_node))
pass
elif min_onestep > child_score:
min_onestep = child_score
opponent_acts.clear()
opponent_acts.append((lower_act, child_node))
if min_onestep is not None:
oppo_dec, child_node = random.choice(opponent_acts)
children_nodes.append((child_node, path+(upper_act, oppo_dec), min_onestep))
else:
all_lower_act = list(parent_node.available_actions('lower'))
for lower_act in random.choices(all_lower_act, k=min(10, len(all_lower_act))):
min_onestep = None
opponent_acts = []
all_upper_act = list(parent_node.available_actions('upper'))
for upper_act in random.choices(all_upper_act, k=min(10, len(all_upper_act))):
child_node = deepcopy(parent_node)
upper_defeated, lower_defeated = child_node.update(upper_act, lower_act)
child_score = eval_board(child_node.board, "lower", upper_defeated)
if min_onestep is None:
min_onestep = child_score
opponent_acts.append((upper_act, child_node))
elif min_onestep == child_score:
#opponent_acts.append((upper_act, child_node))
pass
elif min_onestep > child_score:
min_onestep = child_score
opponent_acts.clear()
opponent_acts.append((upper_act, child_node))
if min_onestep is not None:
oppo_dec, child_node = random.choice(opponent_acts)
children_nodes.append((child_node, path+(oppo_dec, lower_act), min_onestep))
return children_nodes
def update(self, opponent_action, player_action):
"""
Called at the end of each turn to inform this player of both
players' chosen actions. Update your internal representation
of the game state.
The parameter opponent_action is the opponent's chosen action,
and player_action is this instance's latest chosen action.
"""
if self.colour == 'upper':
self.game_in_head.update(player_action, opponent_action)
else:
self.game_in_head.update(opponent_action, player_action)
# modified from referee.game.py
class Board:
def __init__(self):
self.throws = {"upper": 0, "lower": 0}
self.nturns = 0
# all hexes
_HEX_RANGE = range(-4, +4 + 1)
_ORD_HEXES = [
(r, q) for r in _HEX_RANGE for q in _HEX_RANGE if -r - q in _HEX_RANGE
]
self._SET_HEXES = frozenset(_ORD_HEXES)
# nearby hexes
self._HEX_STEPS = [(1, -1), (1, 0), (0, 1), (-1, 1), (-1, 0), (0, -1)]
self._BEATS_WHAT = {"r": "s", "p": "r", "s": "p"}
self._WHAT_BEATS = {"r": "p", "p": "s", "s": "r"}
self.board = {x: [] for x in _ORD_HEXES}
def _ADJACENT(self, x):
rx, qx = x
return self._SET_HEXES & {(rx + ry, qx + qy) for ry, qy in self._HEX_STEPS}
def _BATTLE(self, symbols):
types = {s.lower() for s in symbols}
upper_cnt = sum([s.isupper() for s in symbols])
lower_cnt = sum([s.islower() for s in symbols])
if len(types) == 1:
# no fights
return symbols, 0, 0
if len(types) == 3:
# everyone dies
return [], 0-upper_cnt, 0-lower_cnt
# else there are two, only some die:
for t in types:
# those who are not defeated stay
symbols = [s for s in symbols if s.lower() != self._BEATS_WHAT[t]]
return symbols, sum([s.isupper() for s in symbols])-upper_cnt, sum([s.islower() for s in symbols])-lower_cnt
def available_actions(self, colour):
"""
A generator of currently-available actions for a particular player
(assists validation).
"""
throws = self.throws[colour]
isplayer = str.islower if colour == "lower" else str.isupper
if throws < 9:
sign = -1 if colour == "lower" else 1
throw_zone = (
(r, q) for r, q in self._SET_HEXES if sign * r >= 4 - throws
)
for x in throw_zone:
for s in "rps":
yield "THROW", s, x
occupied = {x for x, s in self.board.items() if any(map(isplayer, s))}
for x in occupied:
adjacent_x = self._ADJACENT(x)
for y in adjacent_x:
yield "SLIDE", x, y
if y in occupied:
opposite_y = self._ADJACENT(y) - adjacent_x - {x}
for z in opposite_y:
yield "SWING", x, z
def update(self, upper_action, lower_action):
"""
Submit an action to the game for validation and application.
If the action is not allowed, raise an InvalidActionException with
a message describing allowed actions.
Otherwise, apply the action to | Player | identifier_name |
HAN_Evaluator.py | .val_max_sent_length
}
pred, a_word, a_sent = sess.run([predictions, model.alphas_word, model.alphas_sent], feed_dict=feed_dict)
#pred, a1, A = sess.run([predictions, model.alphas1, model.alphas2, model.alphas3, model.alphas4],
#feed_dict=feed_dict)
a_word = np.reshape(a_word, [-1, data.val_max_seq_length, data.val_max_sent_length, 1])
# filter on correct predictions
zipped = list(zip(x_val, pred['labels'], pred['predictions'], pred['probabilities'], a_word, a_sent))
# print(zipped[0:2])
selection = [list(x) for x in zipped][133]
zipped_correct = [list(x) for x in zipped if x[1]==x[2] and x[1] == 1]
# print(zipped_correct[0:2])
def get_predicted_prob(x):
return (x[3])[(x[2])]
sorted_correct = sorted(zipped_correct, key=get_predicted_prob, reverse=True)
print(sorted_correct[0:2])
#selection = sorted_correct[1]
selection_zipped_tuple = list(zip(selection[0], selection[4], selection[5]))
#selection_zipped_tuple = list(zip(selection[0], selection[4]))
selection_zipped = [list(x) for x in selection_zipped_tuple]
for s in selection_zipped:
s[0] = dp.translate_to_voc(s[0])
return selection_zipped
def visualize_attention(data):
#data = np.array(data)
data.reverse()
attention_weights_word = np.array([np.squeeze(x[1]) for x in data])
attention_weights_sent = np.array([np.squeeze(x[2]) for x in data])
#max_weight = attention_weights.max()
#attention_weights = attention_weights/max_weight # increase weights to make visualization clearer
#max_weight1 = np.array(attention_weights1.max(axis=-1))
#attention_weights1 = attention_weights1 / max_weight1[:, None] # increase weights to make visualization clearer
sentence = np.array([x[0] for x in data])
#labels = np.array(["label-{}, pred-{}, prob-{}".format(x[1], x[2], max(x[3])) for x in data])
max_idx = 0
empty_rows = 0
for i, s in enumerate(sentence):
idx = list(s).index("PAD")
attention_weights_word[i, idx:] = 0
# attention_weights3[i, idx:] = 0
# attention_weights4[i, idx:] = 0
if idx > max_idx:
max_idx = idx
if idx == 0:
empty_rows += 1
sentence = sentence[empty_rows:, 0:max_idx]
attention_weights_word = attention_weights_word[empty_rows:, 0:max_idx]
attention_weights_sent = attention_weights_sent[empty_rows:]
# attention_weights3 = attention_weights3[empty_rows:, 0:max_idx]
# attention_weights4 = attention_weights4[empty_rows:, 0:max_idx]
max_weight1 = attention_weights_word.max()
attention_weights_word = attention_weights_word / max_weight1 # increase weights to make visualization clearer
max_weight2 = attention_weights_sent.max()
attention_weights_sent = attention_weights_sent / max_weight2 # increase weights to make visualization clearer
# max_weight3 = attention_weights3.max()
# attention_weights3 = attention_weights3 / max_weight3 # increase weights to make visualization clearer
# max_weight4 = attention_weights4.max()
# attention_weights4 = attention_weights4 / max_weight4 # increase weights to make visualization clearer
#print(np.shape(attention_weights1))
print(np.shape(sentence))
#print(np.shape(labels))
MAX_FONTSIZE = 15
MIN_FONTSIZE = 10
def _font_size(word_len):
return max(int(round(MAX_FONTSIZE - 0.5 * word_len)), MIN_FONTSIZE)
def plot_attention(fname, attention_weights, attention_weights_sent, sentence):
length = np.vectorize(lambda s: len(s))
max_word_len = length(sentence).max()
font_size_max_len = _font_size(max_word_len)
plt.figure(figsize=(
attention_weights.shape[-1] * (max_word_len * font_size_max_len / 100 + 0.5), attention_weights.shape[0]))
plt.title("Attention")
plt.xlabel("words")
plt.ylabel("batch")
pc_sent = plt.pcolor(attention_weights_sent, edgecolors='k', linewidths=4, cmap='Reds', vmin=0.0, vmax=1.0)
pc_sent.update_scalarmappable()
pc = plt.pcolor(attention_weights, edgecolors='k', linewidths=4, cmap='Blues', vmin=0.0, vmax=1.0)
pc.update_scalarmappable()
ax = pc.axes
for p, color, value in zip(pc.get_paths(), pc.get_facecolors(), pc.get_array()):
x, y = p.vertices[:-2, :].mean(0)
if np.all(color[:3] > 0.5):
color = (0.0, 0.0, 0.0)
else:
color = (1.0, 1.0, 1.0)
j, i = int(floor(x)), int(floor(y))
if sentence[i, j] != "PAD":
word = sentence[i, j]
else:
word = ""
fontsize = _font_size(len(word))
ax.text(x, y, word, ha="center", va="center", color=color, size=fontsize)
idx = [i + 0.5 for i in range(attention_weights_sent.shape[0])]
plt.yticks(idx, attention_weights_sent)
for l, i in zip(ax.yaxis.get_ticklabels(), pc_sent.get_facecolors()):
l.set_color(i)
l.set_backgroundcolor(i)
l.set_fontsize(15)
plt.colorbar(pc)
plt.savefig(fname)
plot_attention("attention_real_han.png", attention_weights_word, np.array([[x] for x in attention_weights_sent]), sentence)
# plot_attention("attention_real3.png", attention_weights3, sentence)
# plot_attention("attention_real4.png", attention_weights4, sentence)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def get_confusion(data, embds):
tf.reset_default_graph()
now = "banl2norm_100d_163b_[10,10,10,10]cx_0.0001_0.5d_accstop"
tf.reset_default_graph()
with tf.Session() as sess:
model = BucketizedAttention(
num_classes=2,
vocab_size=embds.shape[0],
embedding_size=embds.shape[1]
)
root_logdir = "logs"
logdir = "{}/run-{}-{}/".format(root_logdir, now, 0)
checkpoint_dir = "{}checkpoints".format(logdir)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
sess.run(model.embedding_init, feed_dict={model.embedding_placeholder: embds})
saver.restore(sess, checkpoint_dir)
predictions = model.predict()
x_val, y_val, sent_lengths_val, seq_lengths_val = data.fetch_test()
feed_dict = {model.x: x_val, model.y: y_val, model.sent_lengths: sent_lengths_val,
model.seq_lengths: seq_lengths_val, model.dropout_keep_prob: 1,
model.max_seq_length: data.test_max_seq_length,
model.max_sent_length: data.test_max_sent_length
}
pred = sess.run(predictions, feed_dict=feed_dict)
def fn(x):
if x == 0:
return 3
elif x == 1:
return 4
elif x == 2:
return 2
elif x == 3:
return 1
elif x == 4:
return 5
elif x == 5:
| return 0 | conditional_block |
|
HAN_Evaluator.py | =0)
print(result_stds)
result = list(zip(result_averages, result_stds))
result.insert(0, now)
results.append(result)
print(result)
print("averages-------")
print(results)
print("------------")
def get_attention_weights(data, embds):
tf.reset_default_graph()
it = 0
now = "han_100d_163b_50cx_0.0001_0.5d"
with tf.Session() as sess:
model = HierarchicalAttention(
num_classes=2,
vocab_size=embds.shape[0],
embedding_size=embds.shape[1]
)
root_logdir = "logs"
logdir = "{}/run-{}-{}/".format(root_logdir, now, it)
checkpoint_dir = "{}checkpoints".format(logdir)
saver = tf.train.Saver()
# saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_dir))
# Training model
# training_op, global_step = model.optimize()
sess.run(tf.global_variables_initializer())
sess.run(model.embedding_init, feed_dict={model.embedding_placeholder: embds})
saver.restore(sess, checkpoint_dir)
predictions = model.predict()
# print("Evaluation:")
x_val, y_val, sent_lengths_val, seq_lengths_val = data.fetch_val()
feed_dict = {model.x: x_val, model.y: y_val, model.sent_lengths: sent_lengths_val,
model.seq_lengths: seq_lengths_val, model.dropout_keep_prob: 1,
model.max_seq_length: data.val_max_seq_length,
model.max_sent_length: data.val_max_sent_length
}
pred, a_word, a_sent = sess.run([predictions, model.alphas_word, model.alphas_sent], feed_dict=feed_dict)
#pred, a1, A = sess.run([predictions, model.alphas1, model.alphas2, model.alphas3, model.alphas4],
#feed_dict=feed_dict)
a_word = np.reshape(a_word, [-1, data.val_max_seq_length, data.val_max_sent_length, 1])
# filter on correct predictions
zipped = list(zip(x_val, pred['labels'], pred['predictions'], pred['probabilities'], a_word, a_sent))
# print(zipped[0:2])
selection = [list(x) for x in zipped][133]
zipped_correct = [list(x) for x in zipped if x[1]==x[2] and x[1] == 1]
# print(zipped_correct[0:2])
def get_predicted_prob(x):
return (x[3])[(x[2])]
sorted_correct = sorted(zipped_correct, key=get_predicted_prob, reverse=True)
print(sorted_correct[0:2])
#selection = sorted_correct[1]
selection_zipped_tuple = list(zip(selection[0], selection[4], selection[5]))
#selection_zipped_tuple = list(zip(selection[0], selection[4]))
selection_zipped = [list(x) for x in selection_zipped_tuple]
for s in selection_zipped:
s[0] = dp.translate_to_voc(s[0])
return selection_zipped
def visualize_attention(data):
#data = np.array(data)
data.reverse()
attention_weights_word = np.array([np.squeeze(x[1]) for x in data])
attention_weights_sent = np.array([np.squeeze(x[2]) for x in data])
#max_weight = attention_weights.max()
#attention_weights = attention_weights/max_weight # increase weights to make visualization clearer
#max_weight1 = np.array(attention_weights1.max(axis=-1))
#attention_weights1 = attention_weights1 / max_weight1[:, None] # increase weights to make visualization clearer
sentence = np.array([x[0] for x in data])
#labels = np.array(["label-{}, pred-{}, prob-{}".format(x[1], x[2], max(x[3])) for x in data])
max_idx = 0
empty_rows = 0
for i, s in enumerate(sentence):
idx = list(s).index("PAD")
attention_weights_word[i, idx:] = 0
# attention_weights3[i, idx:] = 0
# attention_weights4[i, idx:] = 0
if idx > max_idx:
max_idx = idx
if idx == 0:
empty_rows += 1
sentence = sentence[empty_rows:, 0:max_idx]
attention_weights_word = attention_weights_word[empty_rows:, 0:max_idx]
attention_weights_sent = attention_weights_sent[empty_rows:]
# attention_weights3 = attention_weights3[empty_rows:, 0:max_idx]
# attention_weights4 = attention_weights4[empty_rows:, 0:max_idx]
max_weight1 = attention_weights_word.max()
attention_weights_word = attention_weights_word / max_weight1 # increase weights to make visualization clearer
max_weight2 = attention_weights_sent.max()
attention_weights_sent = attention_weights_sent / max_weight2 # increase weights to make visualization clearer
# max_weight3 = attention_weights3.max()
# attention_weights3 = attention_weights3 / max_weight3 # increase weights to make visualization clearer
# max_weight4 = attention_weights4.max()
# attention_weights4 = attention_weights4 / max_weight4 # increase weights to make visualization clearer
#print(np.shape(attention_weights1))
print(np.shape(sentence))
#print(np.shape(labels))
MAX_FONTSIZE = 15
MIN_FONTSIZE = 10
def _font_size(word_len):
return max(int(round(MAX_FONTSIZE - 0.5 * word_len)), MIN_FONTSIZE)
def plot_attention(fname, attention_weights, attention_weights_sent, sentence):
length = np.vectorize(lambda s: len(s))
max_word_len = length(sentence).max()
font_size_max_len = _font_size(max_word_len)
plt.figure(figsize=(
attention_weights.shape[-1] * (max_word_len * font_size_max_len / 100 + 0.5), attention_weights.shape[0]))
plt.title("Attention")
plt.xlabel("words")
plt.ylabel("batch")
pc_sent = plt.pcolor(attention_weights_sent, edgecolors='k', linewidths=4, cmap='Reds', vmin=0.0, vmax=1.0)
pc_sent.update_scalarmappable()
pc = plt.pcolor(attention_weights, edgecolors='k', linewidths=4, cmap='Blues', vmin=0.0, vmax=1.0)
pc.update_scalarmappable()
ax = pc.axes
for p, color, value in zip(pc.get_paths(), pc.get_facecolors(), pc.get_array()): | color = (0.0, 0.0, 0.0)
else:
color = (1.0, 1.0, 1.0)
j, i = int(floor(x)), int(floor(y))
if sentence[i, j] != "PAD":
word = sentence[i, j]
else:
word = ""
fontsize = _font_size(len(word))
ax.text(x, y, word, ha="center", va="center", color=color, size=fontsize)
idx = [i + 0.5 for i in range(attention_weights_sent.shape[0])]
plt.yticks(idx, attention_weights_sent)
for l, i in zip(ax.yaxis.get_ticklabels(), pc_sent.get_facecolors()):
l.set_color(i)
l.set_backgroundcolor(i)
l.set_fontsize(15)
plt.colorbar(pc)
plt.savefig(fname)
plot_attention("attention_real_han.png", attention_weights_word, np.array([[x] for x in attention_weights_sent]), sentence)
# plot_attention("attention_real3.png", attention_weights3, sentence)
# plot_attention("attention_real4.png", attention_weights4, sentence)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def get_confusion(data, embds):
tf.reset_default_graph()
| x, y = p.vertices[:-2, :].mean(0)
if np.all(color[:3] > 0.5): | random_line_split |
HAN_Evaluator.py | 0)
print(result_stds)
result = list(zip(result_averages, result_stds))
result.insert(0, now)
results.append(result)
print(result)
print("averages-------")
print(results)
print("------------")
def get_attention_weights(data, embds):
tf.reset_default_graph()
it = 0
now = "han_100d_163b_50cx_0.0001_0.5d"
with tf.Session() as sess:
model = HierarchicalAttention(
num_classes=2,
vocab_size=embds.shape[0],
embedding_size=embds.shape[1]
)
root_logdir = "logs"
logdir = "{}/run-{}-{}/".format(root_logdir, now, it)
checkpoint_dir = "{}checkpoints".format(logdir)
saver = tf.train.Saver()
# saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_dir))
# Training model
# training_op, global_step = model.optimize()
sess.run(tf.global_variables_initializer())
sess.run(model.embedding_init, feed_dict={model.embedding_placeholder: embds})
saver.restore(sess, checkpoint_dir)
predictions = model.predict()
# print("Evaluation:")
x_val, y_val, sent_lengths_val, seq_lengths_val = data.fetch_val()
feed_dict = {model.x: x_val, model.y: y_val, model.sent_lengths: sent_lengths_val,
model.seq_lengths: seq_lengths_val, model.dropout_keep_prob: 1,
model.max_seq_length: data.val_max_seq_length,
model.max_sent_length: data.val_max_sent_length
}
pred, a_word, a_sent = sess.run([predictions, model.alphas_word, model.alphas_sent], feed_dict=feed_dict)
#pred, a1, A = sess.run([predictions, model.alphas1, model.alphas2, model.alphas3, model.alphas4],
#feed_dict=feed_dict)
a_word = np.reshape(a_word, [-1, data.val_max_seq_length, data.val_max_sent_length, 1])
# filter on correct predictions
zipped = list(zip(x_val, pred['labels'], pred['predictions'], pred['probabilities'], a_word, a_sent))
# print(zipped[0:2])
selection = [list(x) for x in zipped][133]
zipped_correct = [list(x) for x in zipped if x[1]==x[2] and x[1] == 1]
# print(zipped_correct[0:2])
def get_predicted_prob(x):
return (x[3])[(x[2])]
sorted_correct = sorted(zipped_correct, key=get_predicted_prob, reverse=True)
print(sorted_correct[0:2])
#selection = sorted_correct[1]
selection_zipped_tuple = list(zip(selection[0], selection[4], selection[5]))
#selection_zipped_tuple = list(zip(selection[0], selection[4]))
selection_zipped = [list(x) for x in selection_zipped_tuple]
for s in selection_zipped:
s[0] = dp.translate_to_voc(s[0])
return selection_zipped
def | (data):
#data = np.array(data)
data.reverse()
attention_weights_word = np.array([np.squeeze(x[1]) for x in data])
attention_weights_sent = np.array([np.squeeze(x[2]) for x in data])
#max_weight = attention_weights.max()
#attention_weights = attention_weights/max_weight # increase weights to make visualization clearer
#max_weight1 = np.array(attention_weights1.max(axis=-1))
#attention_weights1 = attention_weights1 / max_weight1[:, None] # increase weights to make visualization clearer
sentence = np.array([x[0] for x in data])
#labels = np.array(["label-{}, pred-{}, prob-{}".format(x[1], x[2], max(x[3])) for x in data])
max_idx = 0
empty_rows = 0
for i, s in enumerate(sentence):
idx = list(s).index("PAD")
attention_weights_word[i, idx:] = 0
# attention_weights3[i, idx:] = 0
# attention_weights4[i, idx:] = 0
if idx > max_idx:
max_idx = idx
if idx == 0:
empty_rows += 1
sentence = sentence[empty_rows:, 0:max_idx]
attention_weights_word = attention_weights_word[empty_rows:, 0:max_idx]
attention_weights_sent = attention_weights_sent[empty_rows:]
# attention_weights3 = attention_weights3[empty_rows:, 0:max_idx]
# attention_weights4 = attention_weights4[empty_rows:, 0:max_idx]
max_weight1 = attention_weights_word.max()
attention_weights_word = attention_weights_word / max_weight1 # increase weights to make visualization clearer
max_weight2 = attention_weights_sent.max()
attention_weights_sent = attention_weights_sent / max_weight2 # increase weights to make visualization clearer
# max_weight3 = attention_weights3.max()
# attention_weights3 = attention_weights3 / max_weight3 # increase weights to make visualization clearer
# max_weight4 = attention_weights4.max()
# attention_weights4 = attention_weights4 / max_weight4 # increase weights to make visualization clearer
#print(np.shape(attention_weights1))
print(np.shape(sentence))
#print(np.shape(labels))
MAX_FONTSIZE = 15
MIN_FONTSIZE = 10
def _font_size(word_len):
return max(int(round(MAX_FONTSIZE - 0.5 * word_len)), MIN_FONTSIZE)
def plot_attention(fname, attention_weights, attention_weights_sent, sentence):
length = np.vectorize(lambda s: len(s))
max_word_len = length(sentence).max()
font_size_max_len = _font_size(max_word_len)
plt.figure(figsize=(
attention_weights.shape[-1] * (max_word_len * font_size_max_len / 100 + 0.5), attention_weights.shape[0]))
plt.title("Attention")
plt.xlabel("words")
plt.ylabel("batch")
pc_sent = plt.pcolor(attention_weights_sent, edgecolors='k', linewidths=4, cmap='Reds', vmin=0.0, vmax=1.0)
pc_sent.update_scalarmappable()
pc = plt.pcolor(attention_weights, edgecolors='k', linewidths=4, cmap='Blues', vmin=0.0, vmax=1.0)
pc.update_scalarmappable()
ax = pc.axes
for p, color, value in zip(pc.get_paths(), pc.get_facecolors(), pc.get_array()):
x, y = p.vertices[:-2, :].mean(0)
if np.all(color[:3] > 0.5):
color = (0.0, 0.0, 0.0)
else:
color = (1.0, 1.0, 1.0)
j, i = int(floor(x)), int(floor(y))
if sentence[i, j] != "PAD":
word = sentence[i, j]
else:
word = ""
fontsize = _font_size(len(word))
ax.text(x, y, word, ha="center", va="center", color=color, size=fontsize)
idx = [i + 0.5 for i in range(attention_weights_sent.shape[0])]
plt.yticks(idx, attention_weights_sent)
for l, i in zip(ax.yaxis.get_ticklabels(), pc_sent.get_facecolors()):
l.set_color(i)
l.set_backgroundcolor(i)
l.set_fontsize(15)
plt.colorbar(pc)
plt.savefig(fname)
plot_attention("attention_real_han.png", attention_weights_word, np.array([[x] for x in attention_weights_sent]), sentence)
# plot_attention("attention_real3.png", attention_weights3, sentence)
# plot_attention("attention_real4.png", attention_weights4, sentence)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def get_confusion(data, embds):
tf.reset_default_graph()
| visualize_attention | identifier_name |
HAN_Evaluator.py | =0)
print(result_stds)
result = list(zip(result_averages, result_stds))
result.insert(0, now)
results.append(result)
print(result)
print("averages-------")
print(results)
print("------------")
def get_attention_weights(data, embds):
tf.reset_default_graph()
it = 0
now = "han_100d_163b_50cx_0.0001_0.5d"
with tf.Session() as sess:
model = HierarchicalAttention(
num_classes=2,
vocab_size=embds.shape[0],
embedding_size=embds.shape[1]
)
root_logdir = "logs"
logdir = "{}/run-{}-{}/".format(root_logdir, now, it)
checkpoint_dir = "{}checkpoints".format(logdir)
saver = tf.train.Saver()
# saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_dir))
# Training model
# training_op, global_step = model.optimize()
sess.run(tf.global_variables_initializer())
sess.run(model.embedding_init, feed_dict={model.embedding_placeholder: embds})
saver.restore(sess, checkpoint_dir)
predictions = model.predict()
# print("Evaluation:")
x_val, y_val, sent_lengths_val, seq_lengths_val = data.fetch_val()
feed_dict = {model.x: x_val, model.y: y_val, model.sent_lengths: sent_lengths_val,
model.seq_lengths: seq_lengths_val, model.dropout_keep_prob: 1,
model.max_seq_length: data.val_max_seq_length,
model.max_sent_length: data.val_max_sent_length
}
pred, a_word, a_sent = sess.run([predictions, model.alphas_word, model.alphas_sent], feed_dict=feed_dict)
#pred, a1, A = sess.run([predictions, model.alphas1, model.alphas2, model.alphas3, model.alphas4],
#feed_dict=feed_dict)
a_word = np.reshape(a_word, [-1, data.val_max_seq_length, data.val_max_sent_length, 1])
# filter on correct predictions
zipped = list(zip(x_val, pred['labels'], pred['predictions'], pred['probabilities'], a_word, a_sent))
# print(zipped[0:2])
selection = [list(x) for x in zipped][133]
zipped_correct = [list(x) for x in zipped if x[1]==x[2] and x[1] == 1]
# print(zipped_correct[0:2])
def get_predicted_prob(x):
|
sorted_correct = sorted(zipped_correct, key=get_predicted_prob, reverse=True)
print(sorted_correct[0:2])
#selection = sorted_correct[1]
selection_zipped_tuple = list(zip(selection[0], selection[4], selection[5]))
#selection_zipped_tuple = list(zip(selection[0], selection[4]))
selection_zipped = [list(x) for x in selection_zipped_tuple]
for s in selection_zipped:
s[0] = dp.translate_to_voc(s[0])
return selection_zipped
def visualize_attention(data):
#data = np.array(data)
data.reverse()
attention_weights_word = np.array([np.squeeze(x[1]) for x in data])
attention_weights_sent = np.array([np.squeeze(x[2]) for x in data])
#max_weight = attention_weights.max()
#attention_weights = attention_weights/max_weight # increase weights to make visualization clearer
#max_weight1 = np.array(attention_weights1.max(axis=-1))
#attention_weights1 = attention_weights1 / max_weight1[:, None] # increase weights to make visualization clearer
sentence = np.array([x[0] for x in data])
#labels = np.array(["label-{}, pred-{}, prob-{}".format(x[1], x[2], max(x[3])) for x in data])
max_idx = 0
empty_rows = 0
for i, s in enumerate(sentence):
idx = list(s).index("PAD")
attention_weights_word[i, idx:] = 0
# attention_weights3[i, idx:] = 0
# attention_weights4[i, idx:] = 0
if idx > max_idx:
max_idx = idx
if idx == 0:
empty_rows += 1
sentence = sentence[empty_rows:, 0:max_idx]
attention_weights_word = attention_weights_word[empty_rows:, 0:max_idx]
attention_weights_sent = attention_weights_sent[empty_rows:]
# attention_weights3 = attention_weights3[empty_rows:, 0:max_idx]
# attention_weights4 = attention_weights4[empty_rows:, 0:max_idx]
max_weight1 = attention_weights_word.max()
attention_weights_word = attention_weights_word / max_weight1 # increase weights to make visualization clearer
max_weight2 = attention_weights_sent.max()
attention_weights_sent = attention_weights_sent / max_weight2 # increase weights to make visualization clearer
# max_weight3 = attention_weights3.max()
# attention_weights3 = attention_weights3 / max_weight3 # increase weights to make visualization clearer
# max_weight4 = attention_weights4.max()
# attention_weights4 = attention_weights4 / max_weight4 # increase weights to make visualization clearer
#print(np.shape(attention_weights1))
print(np.shape(sentence))
#print(np.shape(labels))
MAX_FONTSIZE = 15
MIN_FONTSIZE = 10
def _font_size(word_len):
return max(int(round(MAX_FONTSIZE - 0.5 * word_len)), MIN_FONTSIZE)
def plot_attention(fname, attention_weights, attention_weights_sent, sentence):
length = np.vectorize(lambda s: len(s))
max_word_len = length(sentence).max()
font_size_max_len = _font_size(max_word_len)
plt.figure(figsize=(
attention_weights.shape[-1] * (max_word_len * font_size_max_len / 100 + 0.5), attention_weights.shape[0]))
plt.title("Attention")
plt.xlabel("words")
plt.ylabel("batch")
pc_sent = plt.pcolor(attention_weights_sent, edgecolors='k', linewidths=4, cmap='Reds', vmin=0.0, vmax=1.0)
pc_sent.update_scalarmappable()
pc = plt.pcolor(attention_weights, edgecolors='k', linewidths=4, cmap='Blues', vmin=0.0, vmax=1.0)
pc.update_scalarmappable()
ax = pc.axes
for p, color, value in zip(pc.get_paths(), pc.get_facecolors(), pc.get_array()):
x, y = p.vertices[:-2, :].mean(0)
if np.all(color[:3] > 0.5):
color = (0.0, 0.0, 0.0)
else:
color = (1.0, 1.0, 1.0)
j, i = int(floor(x)), int(floor(y))
if sentence[i, j] != "PAD":
word = sentence[i, j]
else:
word = ""
fontsize = _font_size(len(word))
ax.text(x, y, word, ha="center", va="center", color=color, size=fontsize)
idx = [i + 0.5 for i in range(attention_weights_sent.shape[0])]
plt.yticks(idx, attention_weights_sent)
for l, i in zip(ax.yaxis.get_ticklabels(), pc_sent.get_facecolors()):
l.set_color(i)
l.set_backgroundcolor(i)
l.set_fontsize(15)
plt.colorbar(pc)
plt.savefig(fname)
plot_attention("attention_real_han.png", attention_weights_word, np.array([[x] for x in attention_weights_sent]), sentence)
# plot_attention("attention_real3.png", attention_weights3, sentence)
# plot_attention("attention_real4.png", attention_weights4, sentence)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def get_confusion(data, embds):
tf.reset_default_graph()
| return (x[3])[(x[2])] | identifier_body |
inb4404.py | = os.path.join(script_path, "downloads")
# Whether to use the original filenames or UNIX timestamps
# True -> original filenames
# False -> UNIX timestamps
self.USE_NAMES = False
# Path to an archive file (holds MD5 hashes of downloaded files)
self.ARCHIVE = None
# How many connections to use with aiohttp's ClientSession
self.CONNECTIONS = 10
# How often to retry a thread (!) if errors occur
# N>0 -> retry N times
# N=0 -> disable
# N<0 -> retry indefinitely (not recommended)
self.RETRIES = 5
class CustomArgumentParser(argparse.ArgumentParser):
"""Override ArgumentParser's automatic help text."""
def format_help(self):
"""Return custom help text."""
help_text = dedent(f"""\
A4A is a Python script to download all files from 4chan(nel) threads.
Usage: {self.prog} [OPTIONS] THREAD [THREAD]...
{self.prog} [OPTIONS] -l LIST [-l LIST]...
Thread:
4chan(nel) thread URL
Options:
-h, --help show help
-l, --list LIST read thread links from file
-q, --quiet suppress non-error output
-p, --path PATH set output directory (def: {self.get_default("base_dir")})
-f, --filenames use original filenames instead of UNIX timestamps
-a, --archive FILE keep track of downloaded files by logging MD5 hashes
--connections N number of connections to use (def: {self.get_default("connections")})
--retries N how often to retry a thread if errors occur (def: {self.get_default("retries")})
N<0 to retry indefinitely (not recommended)
""")
return help_text
class DownloadableThread:
"""Store thread-related information and handle its processing."""
def __init__(self, position, link):
"""Initialize thread object."""
self.count = 0
self.files = []
self.pos = position
self.link = link.split("#")[0]
info = link.partition(".org/")[2]
# info has the form <board>/thread/<thread> or <board>/thread/<thread>/<dir name>
if len(info.split("/")) > 3:
self.board, _, self.id, self.dir = info.split("/")
else:
self.board, _, self.id = info.split("/")
self.dir = self.id
resp_json = self.get_json()
if not resp_json:
return
self.files = [
{
'link': f"https://i.4cdn.org/{self.board}/{p['tim']}{p['ext']}",
'name': f"{p['filename'] if opts.names else p['tim']}{p['ext']}",
'md5': b64decode(p['md5']).hex(),
} for p in resp_json['posts'] if 'tim' in p
]
def resolve_path(self):
"""Assemble final output path and change the working directory."""
# This is the fixed directory template
out_dir = os.path.join(opts.base_dir, self.board, self.dir)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
os.chdir(out_dir)
def get_json(self):
"""Contact 4chan's API to get the names of all files in a thread."""
api_call = f"https://a.4cdn.org/{self.board}/thread/{self.id}.json"
# Custom header value is necessary to avoid 403 errors on 4chan.org
# 4channel works just fine without
req = Request(api_call, headers={'User-Agent': '4chan Archiver'})
resp_json = None
for _ in range(2):
try:
with urlopen(req) as resp:
resp_json = resp.read()
resp_json = json.loads(resp_json)
break
except urllib.error.HTTPError:
time.sleep(5)
continue
except urllib.error.URLError:
if self.pos == 1:
err("Couldn't establish connection!")
else:
err("Lost connection!")
sys.exit(1)
return resp_json
def fetch_progress(self):
"""Return thread-wise and file-wise progress."""
threads = len(opts.thread)
files = len(self.files)
t_width = len(str(threads))
f_width = len(str(files))
t_progress = f"[{self.pos: >{t_width}}/{threads}]"
f_progress = f"[{self.count: >{f_width}}/{files}]"
if self.count:
progress = f"{t_progress} {f_progress}"
else:
progress = t_progress
return progress
async def get_file(self, link, name, md5, session):
"""Download a single file."""
if os.path.exists(name) or md5 in opts.archived_md5:
self.count += 1
return
async with session.get(link) as media:
# Open file initially with .part suffix
with open(f"{name}.part", "wb") as f:
while True:
chunk = await media.content.read(1024)
if not chunk:
break
f.write(chunk)
# Remove .part suffix once complete
# After this point file won't get removed if script gets interrupted
os.rename(f"{name}.part", name)
if opts.archive:
log_hash(md5)
self.count += 1
msg(f"{self.fetch_progress()} {self.board}/{self.dir}/{name}")
async def download(self):
"""Download a thread."""
if not self.files:
# In this case the progress line gets printed to stderr
err(f"{self.fetch_progress()} {self.link}")
err(f"Thread 404'd!")
return
msg(f"{self.fetch_progress()} {self.link}")
# Retries imply attempts after the first try failed
# So the max. number of attempts is opts.retries+1
attempt = 0
while attempt <= opts.retries or opts.retries < 0:
if attempt > 0:
err(f"Retrying... ({attempt} out of "
f"{opts.retries if opts.retries > 0 else 'Inf'} attempts)")
time.sleep(5)
try:
tout = aiohttp.ClientTimeout(total=None)
conn = aiohttp.TCPConnector(limit=opts.connections)
async with aiohttp.ClientSession(timeout=tout, connector=conn) as session:
tasks = [self.get_file(f['link'], f['name'], f['md5'], session)
for f in self.files]
await asyncio.gather(*tasks)
# Leave attempt loop early if all files were downloaded successfully
break
except aiohttp.ClientConnectionError:
err("Lost connection!")
self.count = 0
attempt += 1
except aiohttp.ClientPayloadError:
err("Malformed or missing chunk!")
self.count = 0
attempt += 1
finally:
clean()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Functions
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def err(*args, level=0, **kwargs):
"""Print to stderr."""
if level <= opts.verbosity:
print(f"[{time.strftime('%X')}]", *args, file=sys.stderr, **kwargs)
def msg(*args, level=1, **kwargs):
"""Print to stdout."""
if level <= opts.verbosity:
print(f"[{time.strftime('%X')}]", *args, **kwargs)
def positive_int(string):
"""Convert string provided by argparse to a positive int."""
try:
value = int(string)
if value <= 0:
raise ValueError
except ValueError:
error = f"invalid positive int value: {string}"
raise argparse.ArgumentTypeError(error)
return value
def valid_list(string):
"""Convert string provided by argparse to list path."""
path = os.path.abspath(string)
try:
with open(path, "r") as f:
_ = f.read(1)
except FileNotFoundError:
raise argparse.ArgumentTypeError(f"{path} does not exist!")
except (OSError, UnicodeError):
raise argparse.ArgumentTypeError(f"{path} is not a valid text file!")
return path
def valid_archive(string):
|
def parse_cli():
"""Parse the command line arguments with argparse."""
defaults = DefaultOptions()
parser = CustomArgumentParser(usage="%(prog)s [OPTIONS] THREAD [THREAD]...")
parser.add_argument("thread", nargs="*", help="thread URL")
parser.add_argument(
"-l", "--list", action="append", type=valid_list, default=defaults.LIST
)
parser.add_argument(
"-q", "--quiet", dest="verbosity", action="store_const",
const=0, default=defaults.VERBOSITY
)
parser.add_argument("-p", "--path", dest="base_dir", default=defaults.PATH)
| """Convert string provided by argparse to an archive path."""
path = os.path.abspath(string)
try:
with open(path, "r") as f:
_ = f.read(1)
except FileNotFoundError:
pass
except (OSError, UnicodeError):
raise argparse.ArgumentTypeError(f"{path} is not a valid archive!")
return path | identifier_body |
inb4404.py | = os.path.join(script_path, "downloads")
# Whether to use the original filenames or UNIX timestamps
# True -> original filenames
# False -> UNIX timestamps
self.USE_NAMES = False
# Path to an archive file (holds MD5 hashes of downloaded files)
self.ARCHIVE = None
# How many connections to use with aiohttp's ClientSession
self.CONNECTIONS = 10
# How often to retry a thread (!) if errors occur
# N>0 -> retry N times
# N=0 -> disable
# N<0 -> retry indefinitely (not recommended)
self.RETRIES = 5
class CustomArgumentParser(argparse.ArgumentParser):
"""Override ArgumentParser's automatic help text."""
def format_help(self):
"""Return custom help text."""
help_text = dedent(f"""\
A4A is a Python script to download all files from 4chan(nel) threads.
Usage: {self.prog} [OPTIONS] THREAD [THREAD]...
{self.prog} [OPTIONS] -l LIST [-l LIST]...
Thread:
4chan(nel) thread URL
Options:
-h, --help show help
-l, --list LIST read thread links from file
-q, --quiet suppress non-error output
-p, --path PATH set output directory (def: {self.get_default("base_dir")})
-f, --filenames use original filenames instead of UNIX timestamps
-a, --archive FILE keep track of downloaded files by logging MD5 hashes
--connections N number of connections to use (def: {self.get_default("connections")})
--retries N how often to retry a thread if errors occur (def: {self.get_default("retries")})
N<0 to retry indefinitely (not recommended)
""")
return help_text
class DownloadableThread:
"""Store thread-related information and handle its processing."""
def | (self, position, link):
"""Initialize thread object."""
self.count = 0
self.files = []
self.pos = position
self.link = link.split("#")[0]
info = link.partition(".org/")[2]
# info has the form <board>/thread/<thread> or <board>/thread/<thread>/<dir name>
if len(info.split("/")) > 3:
self.board, _, self.id, self.dir = info.split("/")
else:
self.board, _, self.id = info.split("/")
self.dir = self.id
resp_json = self.get_json()
if not resp_json:
return
self.files = [
{
'link': f"https://i.4cdn.org/{self.board}/{p['tim']}{p['ext']}",
'name': f"{p['filename'] if opts.names else p['tim']}{p['ext']}",
'md5': b64decode(p['md5']).hex(),
} for p in resp_json['posts'] if 'tim' in p
]
def resolve_path(self):
"""Assemble final output path and change the working directory."""
# This is the fixed directory template
out_dir = os.path.join(opts.base_dir, self.board, self.dir)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
os.chdir(out_dir)
def get_json(self):
"""Contact 4chan's API to get the names of all files in a thread."""
api_call = f"https://a.4cdn.org/{self.board}/thread/{self.id}.json"
# Custom header value is necessary to avoid 403 errors on 4chan.org
# 4channel works just fine without
req = Request(api_call, headers={'User-Agent': '4chan Archiver'})
resp_json = None
for _ in range(2):
try:
with urlopen(req) as resp:
resp_json = resp.read()
resp_json = json.loads(resp_json)
break
except urllib.error.HTTPError:
time.sleep(5)
continue
except urllib.error.URLError:
if self.pos == 1:
err("Couldn't establish connection!")
else:
err("Lost connection!")
sys.exit(1)
return resp_json
def fetch_progress(self):
"""Return thread-wise and file-wise progress."""
threads = len(opts.thread)
files = len(self.files)
t_width = len(str(threads))
f_width = len(str(files))
t_progress = f"[{self.pos: >{t_width}}/{threads}]"
f_progress = f"[{self.count: >{f_width}}/{files}]"
if self.count:
progress = f"{t_progress} {f_progress}"
else:
progress = t_progress
return progress
async def get_file(self, link, name, md5, session):
"""Download a single file."""
if os.path.exists(name) or md5 in opts.archived_md5:
self.count += 1
return
async with session.get(link) as media:
# Open file initially with .part suffix
with open(f"{name}.part", "wb") as f:
while True:
chunk = await media.content.read(1024)
if not chunk:
break
f.write(chunk)
# Remove .part suffix once complete
# After this point file won't get removed if script gets interrupted
os.rename(f"{name}.part", name)
if opts.archive:
log_hash(md5)
self.count += 1
msg(f"{self.fetch_progress()} {self.board}/{self.dir}/{name}")
async def download(self):
"""Download a thread."""
if not self.files:
# In this case the progress line gets printed to stderr
err(f"{self.fetch_progress()} {self.link}")
err(f"Thread 404'd!")
return
msg(f"{self.fetch_progress()} {self.link}")
# Retries imply attempts after the first try failed
# So the max. number of attempts is opts.retries+1
attempt = 0
while attempt <= opts.retries or opts.retries < 0:
if attempt > 0:
err(f"Retrying... ({attempt} out of "
f"{opts.retries if opts.retries > 0 else 'Inf'} attempts)")
time.sleep(5)
try:
tout = aiohttp.ClientTimeout(total=None)
conn = aiohttp.TCPConnector(limit=opts.connections)
async with aiohttp.ClientSession(timeout=tout, connector=conn) as session:
tasks = [self.get_file(f['link'], f['name'], f['md5'], session)
for f in self.files]
await asyncio.gather(*tasks)
# Leave attempt loop early if all files were downloaded successfully
break
except aiohttp.ClientConnectionError:
err("Lost connection!")
self.count = 0
attempt += 1
except aiohttp.ClientPayloadError:
err("Malformed or missing chunk!")
self.count = 0
attempt += 1
finally:
clean()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Functions
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def err(*args, level=0, **kwargs):
"""Print to stderr."""
if level <= opts.verbosity:
print(f"[{time.strftime('%X')}]", *args, file=sys.stderr, **kwargs)
def msg(*args, level=1, **kwargs):
"""Print to stdout."""
if level <= opts.verbosity:
print(f"[{time.strftime('%X')}]", *args, **kwargs)
def positive_int(string):
"""Convert string provided by argparse to a positive int."""
try:
value = int(string)
if value <= 0:
raise ValueError
except ValueError:
error = f"invalid positive int value: {string}"
raise argparse.ArgumentTypeError(error)
return value
def valid_list(string):
"""Convert string provided by argparse to list path."""
path = os.path.abspath(string)
try:
with open(path, "r") as f:
_ = f.read(1)
except FileNotFoundError:
raise argparse.ArgumentTypeError(f"{path} does not exist!")
except (OSError, UnicodeError):
raise argparse.ArgumentTypeError(f"{path} is not a valid text file!")
return path
def valid_archive(string):
"""Convert string provided by argparse to an archive path."""
path = os.path.abspath(string)
try:
with open(path, "r") as f:
_ = f.read(1)
except FileNotFoundError:
pass
except (OSError, UnicodeError):
raise argparse.ArgumentTypeError(f"{path} is not a valid archive!")
return path
def parse_cli():
"""Parse the command line arguments with argparse."""
defaults = DefaultOptions()
parser = CustomArgumentParser(usage="%(prog)s [OPTIONS] THREAD [THREAD]...")
parser.add_argument("thread", nargs="*", help="thread URL")
parser.add_argument(
"-l", "--list", action="append", type=valid_list, default=defaults.LIST
)
parser.add_argument(
"-q", "--quiet", dest="verbosity", action="store_const",
const=0, default=defaults.VERBOSITY
)
parser.add_argument("-p", "--path", dest="base_dir", default=defaults.PATH | __init__ | identifier_name |
inb4404.py | = os.path.join(script_path, "downloads")
# Whether to use the original filenames or UNIX timestamps
# True -> original filenames
# False -> UNIX timestamps
self.USE_NAMES = False
# Path to an archive file (holds MD5 hashes of downloaded files)
self.ARCHIVE = None
# How many connections to use with aiohttp's ClientSession
self.CONNECTIONS = 10
# How often to retry a thread (!) if errors occur
# N>0 -> retry N times
# N=0 -> disable
# N<0 -> retry indefinitely (not recommended)
self.RETRIES = 5
class CustomArgumentParser(argparse.ArgumentParser):
"""Override ArgumentParser's automatic help text."""
def format_help(self):
"""Return custom help text."""
help_text = dedent(f"""\
A4A is a Python script to download all files from 4chan(nel) threads.
Usage: {self.prog} [OPTIONS] THREAD [THREAD]...
{self.prog} [OPTIONS] -l LIST [-l LIST]...
Thread:
4chan(nel) thread URL
Options:
-h, --help show help
-l, --list LIST read thread links from file
-q, --quiet suppress non-error output
-p, --path PATH set output directory (def: {self.get_default("base_dir")})
-f, --filenames use original filenames instead of UNIX timestamps
-a, --archive FILE keep track of downloaded files by logging MD5 hashes
--connections N number of connections to use (def: {self.get_default("connections")})
--retries N how often to retry a thread if errors occur (def: {self.get_default("retries")})
N<0 to retry indefinitely (not recommended)
""")
return help_text
class DownloadableThread:
"""Store thread-related information and handle its processing."""
def __init__(self, position, link):
"""Initialize thread object."""
self.count = 0
self.files = []
self.pos = position
self.link = link.split("#")[0]
info = link.partition(".org/")[2]
# info has the form <board>/thread/<thread> or <board>/thread/<thread>/<dir name>
if len(info.split("/")) > 3:
self.board, _, self.id, self.dir = info.split("/")
else:
self.board, _, self.id = info.split("/")
self.dir = self.id
resp_json = self.get_json()
if not resp_json:
return
self.files = [
{
'link': f"https://i.4cdn.org/{self.board}/{p['tim']}{p['ext']}",
'name': f"{p['filename'] if opts.names else p['tim']}{p['ext']}",
'md5': b64decode(p['md5']).hex(),
} for p in resp_json['posts'] if 'tim' in p
]
def resolve_path(self):
"""Assemble final output path and change the working directory."""
# This is the fixed directory template
out_dir = os.path.join(opts.base_dir, self.board, self.dir)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
os.chdir(out_dir)
def get_json(self):
"""Contact 4chan's API to get the names of all files in a thread."""
api_call = f"https://a.4cdn.org/{self.board}/thread/{self.id}.json"
# Custom header value is necessary to avoid 403 errors on 4chan.org
# 4channel works just fine without
req = Request(api_call, headers={'User-Agent': '4chan Archiver'})
resp_json = None
for _ in range(2):
try:
with urlopen(req) as resp:
resp_json = resp.read()
resp_json = json.loads(resp_json)
break
except urllib.error.HTTPError:
time.sleep(5)
continue
except urllib.error.URLError:
if self.pos == 1:
err("Couldn't establish connection!")
else:
err("Lost connection!")
sys.exit(1)
return resp_json
def fetch_progress(self):
"""Return thread-wise and file-wise progress."""
threads = len(opts.thread)
files = len(self.files)
t_width = len(str(threads))
f_width = len(str(files))
t_progress = f"[{self.pos: >{t_width}}/{threads}]"
f_progress = f"[{self.count: >{f_width}}/{files}]"
if self.count:
progress = f"{t_progress} {f_progress}"
else:
progress = t_progress
return progress
async def get_file(self, link, name, md5, session):
"""Download a single file."""
if os.path.exists(name) or md5 in opts.archived_md5:
self.count += 1
return
async with session.get(link) as media:
# Open file initially with .part suffix
with open(f"{name}.part", "wb") as f:
while True:
chunk = await media.content.read(1024)
if not chunk:
break
f.write(chunk)
# Remove .part suffix once complete
# After this point file won't get removed if script gets interrupted
os.rename(f"{name}.part", name)
if opts.archive:
log_hash(md5)
self.count += 1
msg(f"{self.fetch_progress()} {self.board}/{self.dir}/{name}")
async def download(self):
"""Download a thread."""
if not self.files:
# In this case the progress line gets printed to stderr
err(f"{self.fetch_progress()} {self.link}") | # Retries imply attempts after the first try failed
# So the max. number of attempts is opts.retries+1
attempt = 0
while attempt <= opts.retries or opts.retries < 0:
if attempt > 0:
err(f"Retrying... ({attempt} out of "
f"{opts.retries if opts.retries > 0 else 'Inf'} attempts)")
time.sleep(5)
try:
tout = aiohttp.ClientTimeout(total=None)
conn = aiohttp.TCPConnector(limit=opts.connections)
async with aiohttp.ClientSession(timeout=tout, connector=conn) as session:
tasks = [self.get_file(f['link'], f['name'], f['md5'], session)
for f in self.files]
await asyncio.gather(*tasks)
# Leave attempt loop early if all files were downloaded successfully
break
except aiohttp.ClientConnectionError:
err("Lost connection!")
self.count = 0
attempt += 1
except aiohttp.ClientPayloadError:
err("Malformed or missing chunk!")
self.count = 0
attempt += 1
finally:
clean()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Functions
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def err(*args, level=0, **kwargs):
"""Print to stderr."""
if level <= opts.verbosity:
print(f"[{time.strftime('%X')}]", *args, file=sys.stderr, **kwargs)
def msg(*args, level=1, **kwargs):
"""Print to stdout."""
if level <= opts.verbosity:
print(f"[{time.strftime('%X')}]", *args, **kwargs)
def positive_int(string):
"""Convert string provided by argparse to a positive int."""
try:
value = int(string)
if value <= 0:
raise ValueError
except ValueError:
error = f"invalid positive int value: {string}"
raise argparse.ArgumentTypeError(error)
return value
def valid_list(string):
"""Convert string provided by argparse to list path."""
path = os.path.abspath(string)
try:
with open(path, "r") as f:
_ = f.read(1)
except FileNotFoundError:
raise argparse.ArgumentTypeError(f"{path} does not exist!")
except (OSError, UnicodeError):
raise argparse.ArgumentTypeError(f"{path} is not a valid text file!")
return path
def valid_archive(string):
"""Convert string provided by argparse to an archive path."""
path = os.path.abspath(string)
try:
with open(path, "r") as f:
_ = f.read(1)
except FileNotFoundError:
pass
except (OSError, UnicodeError):
raise argparse.ArgumentTypeError(f"{path} is not a valid archive!")
return path
def parse_cli():
"""Parse the command line arguments with argparse."""
defaults = DefaultOptions()
parser = CustomArgumentParser(usage="%(prog)s [OPTIONS] THREAD [THREAD]...")
parser.add_argument("thread", nargs="*", help="thread URL")
parser.add_argument(
"-l", "--list", action="append", type=valid_list, default=defaults.LIST
)
parser.add_argument(
"-q", "--quiet", dest="verbosity", action="store_const",
const=0, default=defaults.VERBOSITY
)
parser.add_argument("-p", "--path", dest="base_dir", default=defaults.PATH)
| err(f"Thread 404'd!")
return
msg(f"{self.fetch_progress()} {self.link}")
| random_line_split |
inb4404.py | = os.path.join(script_path, "downloads")
# Whether to use the original filenames or UNIX timestamps
# True -> original filenames
# False -> UNIX timestamps
self.USE_NAMES = False
# Path to an archive file (holds MD5 hashes of downloaded files)
self.ARCHIVE = None
# How many connections to use with aiohttp's ClientSession
self.CONNECTIONS = 10
# How often to retry a thread (!) if errors occur
# N>0 -> retry N times
# N=0 -> disable
# N<0 -> retry indefinitely (not recommended)
self.RETRIES = 5
class CustomArgumentParser(argparse.ArgumentParser):
"""Override ArgumentParser's automatic help text."""
def format_help(self):
"""Return custom help text."""
help_text = dedent(f"""\
A4A is a Python script to download all files from 4chan(nel) threads.
Usage: {self.prog} [OPTIONS] THREAD [THREAD]...
{self.prog} [OPTIONS] -l LIST [-l LIST]...
Thread:
4chan(nel) thread URL
Options:
-h, --help show help
-l, --list LIST read thread links from file
-q, --quiet suppress non-error output
-p, --path PATH set output directory (def: {self.get_default("base_dir")})
-f, --filenames use original filenames instead of UNIX timestamps
-a, --archive FILE keep track of downloaded files by logging MD5 hashes
--connections N number of connections to use (def: {self.get_default("connections")})
--retries N how often to retry a thread if errors occur (def: {self.get_default("retries")})
N<0 to retry indefinitely (not recommended)
""")
return help_text
class DownloadableThread:
"""Store thread-related information and handle its processing."""
def __init__(self, position, link):
"""Initialize thread object."""
self.count = 0
self.files = []
self.pos = position
self.link = link.split("#")[0]
info = link.partition(".org/")[2]
# info has the form <board>/thread/<thread> or <board>/thread/<thread>/<dir name>
if len(info.split("/")) > 3:
self.board, _, self.id, self.dir = info.split("/")
else:
self.board, _, self.id = info.split("/")
self.dir = self.id
resp_json = self.get_json()
if not resp_json:
return
self.files = [
{
'link': f"https://i.4cdn.org/{self.board}/{p['tim']}{p['ext']}",
'name': f"{p['filename'] if opts.names else p['tim']}{p['ext']}",
'md5': b64decode(p['md5']).hex(),
} for p in resp_json['posts'] if 'tim' in p
]
def resolve_path(self):
"""Assemble final output path and change the working directory."""
# This is the fixed directory template
out_dir = os.path.join(opts.base_dir, self.board, self.dir)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
os.chdir(out_dir)
def get_json(self):
"""Contact 4chan's API to get the names of all files in a thread."""
api_call = f"https://a.4cdn.org/{self.board}/thread/{self.id}.json"
# Custom header value is necessary to avoid 403 errors on 4chan.org
# 4channel works just fine without
req = Request(api_call, headers={'User-Agent': '4chan Archiver'})
resp_json = None
for _ in range(2):
try:
with urlopen(req) as resp:
resp_json = resp.read()
resp_json = json.loads(resp_json)
break
except urllib.error.HTTPError:
time.sleep(5)
continue
except urllib.error.URLError:
if self.pos == 1:
err("Couldn't establish connection!")
else:
err("Lost connection!")
sys.exit(1)
return resp_json
def fetch_progress(self):
"""Return thread-wise and file-wise progress."""
threads = len(opts.thread)
files = len(self.files)
t_width = len(str(threads))
f_width = len(str(files))
t_progress = f"[{self.pos: >{t_width}}/{threads}]"
f_progress = f"[{self.count: >{f_width}}/{files}]"
if self.count:
|
else:
progress = t_progress
return progress
async def get_file(self, link, name, md5, session):
"""Download a single file."""
if os.path.exists(name) or md5 in opts.archived_md5:
self.count += 1
return
async with session.get(link) as media:
# Open file initially with .part suffix
with open(f"{name}.part", "wb") as f:
while True:
chunk = await media.content.read(1024)
if not chunk:
break
f.write(chunk)
# Remove .part suffix once complete
# After this point file won't get removed if script gets interrupted
os.rename(f"{name}.part", name)
if opts.archive:
log_hash(md5)
self.count += 1
msg(f"{self.fetch_progress()} {self.board}/{self.dir}/{name}")
async def download(self):
"""Download a thread."""
if not self.files:
# In this case the progress line gets printed to stderr
err(f"{self.fetch_progress()} {self.link}")
err(f"Thread 404'd!")
return
msg(f"{self.fetch_progress()} {self.link}")
# Retries imply attempts after the first try failed
# So the max. number of attempts is opts.retries+1
attempt = 0
while attempt <= opts.retries or opts.retries < 0:
if attempt > 0:
err(f"Retrying... ({attempt} out of "
f"{opts.retries if opts.retries > 0 else 'Inf'} attempts)")
time.sleep(5)
try:
tout = aiohttp.ClientTimeout(total=None)
conn = aiohttp.TCPConnector(limit=opts.connections)
async with aiohttp.ClientSession(timeout=tout, connector=conn) as session:
tasks = [self.get_file(f['link'], f['name'], f['md5'], session)
for f in self.files]
await asyncio.gather(*tasks)
# Leave attempt loop early if all files were downloaded successfully
break
except aiohttp.ClientConnectionError:
err("Lost connection!")
self.count = 0
attempt += 1
except aiohttp.ClientPayloadError:
err("Malformed or missing chunk!")
self.count = 0
attempt += 1
finally:
clean()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Functions
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def err(*args, level=0, **kwargs):
"""Print to stderr."""
if level <= opts.verbosity:
print(f"[{time.strftime('%X')}]", *args, file=sys.stderr, **kwargs)
def msg(*args, level=1, **kwargs):
"""Print to stdout."""
if level <= opts.verbosity:
print(f"[{time.strftime('%X')}]", *args, **kwargs)
def positive_int(string):
"""Convert string provided by argparse to a positive int."""
try:
value = int(string)
if value <= 0:
raise ValueError
except ValueError:
error = f"invalid positive int value: {string}"
raise argparse.ArgumentTypeError(error)
return value
def valid_list(string):
"""Convert string provided by argparse to list path."""
path = os.path.abspath(string)
try:
with open(path, "r") as f:
_ = f.read(1)
except FileNotFoundError:
raise argparse.ArgumentTypeError(f"{path} does not exist!")
except (OSError, UnicodeError):
raise argparse.ArgumentTypeError(f"{path} is not a valid text file!")
return path
def valid_archive(string):
"""Convert string provided by argparse to an archive path."""
path = os.path.abspath(string)
try:
with open(path, "r") as f:
_ = f.read(1)
except FileNotFoundError:
pass
except (OSError, UnicodeError):
raise argparse.ArgumentTypeError(f"{path} is not a valid archive!")
return path
def parse_cli():
"""Parse the command line arguments with argparse."""
defaults = DefaultOptions()
parser = CustomArgumentParser(usage="%(prog)s [OPTIONS] THREAD [THREAD]...")
parser.add_argument("thread", nargs="*", help="thread URL")
parser.add_argument(
"-l", "--list", action="append", type=valid_list, default=defaults.LIST
)
parser.add_argument(
"-q", "--quiet", dest="verbosity", action="store_const",
const=0, default=defaults.VERBOSITY
)
parser.add_argument("-p", "--path", dest="base_dir", default=defaults.PATH | progress = f"{t_progress} {f_progress}" | conditional_block |
barista-examples.ts | -runner';
const ARGS_TO_OMIT = 3;
const DEPLOY_URL_ARG = 'deploy-url';
const projectRoot = join(__dirname, '../../..');
const environmentsDir = join(
projectRoot,
'src',
'barista-examples',
'environments',
);
const args = process.argv.splice(ARGS_TO_OMIT);
const getDeployUrl = () => {
const deployUrlArg = args.find(arg => arg.startsWith(`--${DEPLOY_URL_ARG}=`));
if (!deployUrlArg) {
return undefined;
}
// tslint:disable-next-line no-magic-numbers
const [, deployUrl] = deployUrlArg.split('=', 2);
return deployUrl;
};
const { examplesDir, libDir } = buildConfig;
interface ExampleMetadata {
component: string;
sourcePath: string;
fileContent?: string;
}
interface InvalidExampleReferences {
name: string;
sourcePath: string;
}
interface AppComponentRouteSetup {
name: string;
examples: Array<{ name: string; route: string; className: string, import: string }>;
}
/** Parses the examples and collects all data */
function getExampleMetadata(sourceFiles: string[]): ExampleMetadata[] {
const parsedData: Set<ExampleMetadata> = new Set();
sourceFiles.forEach((sourceFilePath) => {
const content = fs.readFileSync(sourceFilePath, { encoding: 'utf-8' });
const fileName = basename(sourceFilePath);
const components = retrieveExampleClassNames(fileName, content);
components.forEach((component) => {
const metadata: ExampleMetadata = {
component,
sourcePath: relative(examplesDir, sourceFilePath),
fileContent: content,
};
parsedData.add(metadata);
});
});
return [...parsedData];
}
/** Parse the AST of the given source file and collects Angular component metadata. */
export function retrieveExampleClassNames(fileName: string, content: string): string[] | }
/** Parse the AST of the given source file and collects Angular component metadata. */
export function retrieveExampleTemplateContent(fileName: string, content: string): string | undefined {
const sourceFile = ts.createSourceFile(fileName, content, ts.ScriptTarget.Latest, false);
const componentTemplates: string[] = [];
// tslint:disable-next-line:no-any
const visitNode = (node: any): void => {
if (node.kind === ts.SyntaxKind.PropertyAssignment) {
if (node.name && node.name.escapedText === 'template' && node.initializer && node.initializer.text) {
componentTemplates.push(sanitzeTemplateText(node.initializer.text));
}
}
ts.forEachChild(node, visitNode);
};
visitNode(sourceFile);
return componentTemplates.length > 0 ? componentTemplates[0] : undefined;
}
function sanitzeTemplateText(template: string) {
if (template.startsWith('\n ')) {
template = template.replace(/$\n /gm, '\n');
}
if (template.startsWith('\n')) {
template = template.replace('\n', ''); // remove leading newline
}
return template;
}
/** Build ES module import statements for the given example metadata. */
function buildImportsTemplate(data: ExampleMetadata): string {
const relativeSrcPath = data.sourcePath.replace(/\\/g, '/').replace('.ts', '');
return `import { ${data.component} } from './${relativeSrcPath}';`;
}
/**
* Generates the app module from the given source files and writes it to a specified output
* file.
*/
function generateAppModule(
parsedData: ExampleMetadata[],
outputFile: string,
baseDir: string,
): void {
const generatedModuleFile = populateAppModuleTemplate([...parsedData]);
const generatedFilePath = join(baseDir, outputFile);
if (!fs.existsSync(dirname(generatedFilePath))) {
fs.mkdirSync(dirname(generatedFilePath));
}
fs.writeFileSync(generatedFilePath, generatedModuleFile);
}
/** Inlines the app module template with the specified parsed data. */
function populateAppModuleTemplate(parsedData: ExampleMetadata[]): string {
const exampleImports = parsedData
.map(m => buildImportsTemplate(m))
.join('\n');
const exampleList = parsedData.map(m => m.component);
return fs
.readFileSync(join(examplesDir, './app.module.template'), 'utf8')
.replace('${imports}', exampleImports)
.replace('${examples}', `[\n ${exampleList.join(',\n ')},\n]`);
}
/** Generates the imports for each example file */
function generateImportsForExamples(
content: string,
routeMetadata: AppComponentRouteSetup[],
): string {
let imports = '';
routeMetadata.forEach(metadata => {
metadata.examples.forEach(example => {
imports = `${imports}\n${example.import}`;
});
});
return content.replace('${imports}', imports);
}
/** Generates the nav items list used to render the sidebar */
function generateNavItems(
content: string,
routeMetadata: AppComponentRouteSetup[],
): string {
const navItems = routeMetadata.map(metadata => {
const exampleData = metadata.examples.map(example => ({
name: example.name,
route: example.route,
}));
return { name: metadata.name, examples: exampleData };
});
return content.replace('${navItems}', JSON.stringify(navItems, null, '\t'));
}
/** Generates the route definitions */
function generateRoutes(
content: string,
routeMetadata: AppComponentRouteSetup[],
): string {
let routeString = '';
routeMetadata.forEach((metadata: AppComponentRouteSetup) => {
metadata.examples.forEach(example => {
routeString = `${routeString}
{ path: '${example.route}', component: ${example.className}},`;
});
});
if (routeString.endsWith(',')) {
routeString = routeString.slice(0, -1);
}
return content.replace('${routes}', `[${routeString}]`);
}
/** Generates the app component */
function generateAppComponent(parsedData: ExampleMetadata[]): void {
const routeMetadata = generateRouteMetadata(parsedData);
let content = fs.readFileSync(join(examplesDir, 'app.component.template'), {
encoding: 'utf8',
});
content = generateImportsForExamples(content, routeMetadata);
content = generateNavItems(content, routeMetadata);
content = generateRoutes(content, routeMetadata);
fs.writeFileSync(join(examplesDir, 'app.component.ts'), content, {
encoding: 'utf8',
});
}
/** Checks the content of given source files for invalid example names and returns them. */
function getInvalidExampleReferences(
sourceFiles: string[],
exampleNames: string[],
): InvalidExampleReferences[] {
const invalidRefs: InvalidExampleReferences[] = [];
sourceFiles.forEach(sourceFilePath => {
const content = fs.readFileSync(sourceFilePath, { encoding: 'utf-8' });
const regex = /<docs-source-example example=\"(.+?)\"(.*?)><\/docs-source-example>/g;
let matches;
// tslint:disable-next-line no-conditional-assignment
while ((matches = regex.exec(content)) !== null) {
if (!exampleNames.includes(matches[1])) {
const exampleRef = {
name: matches[1],
sourcePath: sourceFilePath,
};
invalidRefs.push(exampleRef);
}
}
});
return invalidRefs;
}
/** Validates Barista example names used in lib readme files. */
task('barista-examples:validate', done => {
const exampleNames = getExampleMetadata(
glob(join(examplesDir, '*/*.ts')),
).map(metadata => metadata.component);
const invalidExampleRefs = getInvalidExampleReferences(
glob(join(libDir, '**/README.md')),
exampleNames,
);
if (invalidExampleRefs.length > 0) {
const errors = invalidExampleRefs.map(
ref => `Invalid example name "${ref.name}" found in ${ref.sourcePath}.`,
);
const errorMsg = errors.join('\n');
done(errorMsg);
return;
}
done();
});
/** Creates the examples module */
task('barista-example:generate', done => {
const metadata = getExampleMetadata(glob(join(examplesDir, '*/*.ts')));
generateAppModule(metadata, 'app.module.ts', examplesDir);
generateAppComponent(metadata);
const routeData = flatten(generateRouteMetadata(metadata).map((route) =>
route.examples.map(route => ({name: route.name, route: route.route}))
));
fs.writeFileSync('src/barista-examples/routes.json', JSON.stringify({routes: routeData}, undefined,2));
done();
});
task('ide-completions', (done) => {
const metadata = getExampleMetadata(glob(join(examplesDir, '*/*.ts')));
const transformedMetaData = metadata
.map(metaData => {
const templateContent = retrieveExampleTemplateContent(
metaData.sourcePath,
metaData.fileContent as string,
);
return {
name: 'dt-' + basename(metaData.sourcePath).replace('-example.ts', '').replace('.ts', ''),
template: templateContent,
};
});
| {
const sourceFile = ts.createSourceFile(fileName, content, ts.ScriptTarget.Latest, false);
const componentClassNames: string[] = [];
// tslint:disable-next-line:no-any
const visitNode = (node: any): void => {
if (node.kind === ts.SyntaxKind.ClassDeclaration) {
if (node.decorators && node.decorators.length) {
for (const decorator of node.decorators) {
if (decorator.expression.expression.text === 'Component') {
componentClassNames.push(node.name.text);
}
}
}
}
ts.forEachChild(node, visitNode);
};
visitNode(sourceFile);
return componentClassNames; | identifier_body |
barista-examples.ts | -runner';
const ARGS_TO_OMIT = 3;
const DEPLOY_URL_ARG = 'deploy-url';
const projectRoot = join(__dirname, '../../..');
const environmentsDir = join(
projectRoot,
'src',
'barista-examples',
'environments',
);
const args = process.argv.splice(ARGS_TO_OMIT);
const getDeployUrl = () => {
const deployUrlArg = args.find(arg => arg.startsWith(`--${DEPLOY_URL_ARG}=`));
if (!deployUrlArg) {
return undefined;
}
// tslint:disable-next-line no-magic-numbers
const [, deployUrl] = deployUrlArg.split('=', 2);
return deployUrl;
};
const { examplesDir, libDir } = buildConfig;
interface ExampleMetadata {
component: string;
sourcePath: string;
fileContent?: string;
}
interface InvalidExampleReferences {
name: string;
sourcePath: string;
}
interface AppComponentRouteSetup {
name: string;
examples: Array<{ name: string; route: string; className: string, import: string }>;
}
/** Parses the examples and collects all data */
function getExampleMetadata(sourceFiles: string[]): ExampleMetadata[] {
const parsedData: Set<ExampleMetadata> = new Set();
sourceFiles.forEach((sourceFilePath) => {
const content = fs.readFileSync(sourceFilePath, { encoding: 'utf-8' });
const fileName = basename(sourceFilePath);
const components = retrieveExampleClassNames(fileName, content);
components.forEach((component) => {
const metadata: ExampleMetadata = {
component,
sourcePath: relative(examplesDir, sourceFilePath),
fileContent: content,
};
parsedData.add(metadata);
});
});
return [...parsedData];
}
/** Parse the AST of the given source file and collects Angular component metadata. */
export function retrieveExampleClassNames(fileName: string, content: string): string[] {
const sourceFile = ts.createSourceFile(fileName, content, ts.ScriptTarget.Latest, false);
const componentClassNames: string[] = [];
// tslint:disable-next-line:no-any
const visitNode = (node: any): void => {
if (node.kind === ts.SyntaxKind.ClassDeclaration) {
if (node.decorators && node.decorators.length) {
for (const decorator of node.decorators) {
if (decorator.expression.expression.text === 'Component') {
componentClassNames.push(node.name.text);
}
}
}
}
ts.forEachChild(node, visitNode);
};
visitNode(sourceFile);
return componentClassNames;
}
/** Parse the AST of the given source file and collects Angular component metadata. */
export function retrieveExampleTemplateContent(fileName: string, content: string): string | undefined {
const sourceFile = ts.createSourceFile(fileName, content, ts.ScriptTarget.Latest, false);
const componentTemplates: string[] = [];
// tslint:disable-next-line:no-any
const visitNode = (node: any): void => {
if (node.kind === ts.SyntaxKind.PropertyAssignment) {
if (node.name && node.name.escapedText === 'template' && node.initializer && node.initializer.text) {
componentTemplates.push(sanitzeTemplateText(node.initializer.text));
}
}
ts.forEachChild(node, visitNode);
};
visitNode(sourceFile);
return componentTemplates.length > 0 ? componentTemplates[0] : undefined;
}
function sanitzeTemplateText(template: string) {
if (template.startsWith('\n ')) {
template = template.replace(/$\n /gm, '\n');
}
if (template.startsWith('\n')) {
template = template.replace('\n', ''); // remove leading newline
}
return template;
}
/** Build ES module import statements for the given example metadata. */
function buildImportsTemplate(data: ExampleMetadata): string {
const relativeSrcPath = data.sourcePath.replace(/\\/g, '/').replace('.ts', '');
return `import { ${data.component} } from './${relativeSrcPath}';`;
}
/**
* Generates the app module from the given source files and writes it to a specified output
* file.
*/
function generateAppModule(
parsedData: ExampleMetadata[],
outputFile: string,
baseDir: string,
): void {
const generatedModuleFile = populateAppModuleTemplate([...parsedData]);
const generatedFilePath = join(baseDir, outputFile);
if (!fs.existsSync(dirname(generatedFilePath))) {
fs.mkdirSync(dirname(generatedFilePath));
}
fs.writeFileSync(generatedFilePath, generatedModuleFile);
} | .join('\n');
const exampleList = parsedData.map(m => m.component);
return fs
.readFileSync(join(examplesDir, './app.module.template'), 'utf8')
.replace('${imports}', exampleImports)
.replace('${examples}', `[\n ${exampleList.join(',\n ')},\n]`);
}
/** Generates the imports for each example file */
function generateImportsForExamples(
content: string,
routeMetadata: AppComponentRouteSetup[],
): string {
let imports = '';
routeMetadata.forEach(metadata => {
metadata.examples.forEach(example => {
imports = `${imports}\n${example.import}`;
});
});
return content.replace('${imports}', imports);
}
/** Generates the nav items list used to render the sidebar */
function generateNavItems(
content: string,
routeMetadata: AppComponentRouteSetup[],
): string {
const navItems = routeMetadata.map(metadata => {
const exampleData = metadata.examples.map(example => ({
name: example.name,
route: example.route,
}));
return { name: metadata.name, examples: exampleData };
});
return content.replace('${navItems}', JSON.stringify(navItems, null, '\t'));
}
/** Generates the route definitions */
function generateRoutes(
content: string,
routeMetadata: AppComponentRouteSetup[],
): string {
let routeString = '';
routeMetadata.forEach((metadata: AppComponentRouteSetup) => {
metadata.examples.forEach(example => {
routeString = `${routeString}
{ path: '${example.route}', component: ${example.className}},`;
});
});
if (routeString.endsWith(',')) {
routeString = routeString.slice(0, -1);
}
return content.replace('${routes}', `[${routeString}]`);
}
/** Generates the app component */
function generateAppComponent(parsedData: ExampleMetadata[]): void {
const routeMetadata = generateRouteMetadata(parsedData);
let content = fs.readFileSync(join(examplesDir, 'app.component.template'), {
encoding: 'utf8',
});
content = generateImportsForExamples(content, routeMetadata);
content = generateNavItems(content, routeMetadata);
content = generateRoutes(content, routeMetadata);
fs.writeFileSync(join(examplesDir, 'app.component.ts'), content, {
encoding: 'utf8',
});
}
/** Checks the content of given source files for invalid example names and returns them. */
function getInvalidExampleReferences(
sourceFiles: string[],
exampleNames: string[],
): InvalidExampleReferences[] {
const invalidRefs: InvalidExampleReferences[] = [];
sourceFiles.forEach(sourceFilePath => {
const content = fs.readFileSync(sourceFilePath, { encoding: 'utf-8' });
const regex = /<docs-source-example example=\"(.+?)\"(.*?)><\/docs-source-example>/g;
let matches;
// tslint:disable-next-line no-conditional-assignment
while ((matches = regex.exec(content)) !== null) {
if (!exampleNames.includes(matches[1])) {
const exampleRef = {
name: matches[1],
sourcePath: sourceFilePath,
};
invalidRefs.push(exampleRef);
}
}
});
return invalidRefs;
}
/** Validates Barista example names used in lib readme files. */
task('barista-examples:validate', done => {
const exampleNames = getExampleMetadata(
glob(join(examplesDir, '*/*.ts')),
).map(metadata => metadata.component);
const invalidExampleRefs = getInvalidExampleReferences(
glob(join(libDir, '**/README.md')),
exampleNames,
);
if (invalidExampleRefs.length > 0) {
const errors = invalidExampleRefs.map(
ref => `Invalid example name "${ref.name}" found in ${ref.sourcePath}.`,
);
const errorMsg = errors.join('\n');
done(errorMsg);
return;
}
done();
});
/** Creates the examples module */
task('barista-example:generate', done => {
const metadata = getExampleMetadata(glob(join(examplesDir, '*/*.ts')));
generateAppModule(metadata, 'app.module.ts', examplesDir);
generateAppComponent(metadata);
const routeData = flatten(generateRouteMetadata(metadata).map((route) =>
route.examples.map(route => ({name: route.name, route: route.route}))
));
fs.writeFileSync('src/barista-examples/routes.json', JSON.stringify({routes: routeData}, undefined,2));
done();
});
task('ide-completions', (done) => {
const metadata = getExampleMetadata(glob(join(examplesDir, '*/*.ts')));
const transformedMetaData = metadata
.map(metaData => {
const templateContent = retrieveExampleTemplateContent(
metaData.sourcePath,
metaData.fileContent as string,
);
return {
name: 'dt-' + basename(metaData.sourcePath).replace('-example.ts', '').replace('.ts', ''),
template: templateContent,
};
});
|
/** Inlines the app module template with the specified parsed data. */
function populateAppModuleTemplate(parsedData: ExampleMetadata[]): string {
const exampleImports = parsedData
.map(m => buildImportsTemplate(m)) | random_line_split |
barista-examples.ts | -runner';
const ARGS_TO_OMIT = 3;
const DEPLOY_URL_ARG = 'deploy-url';
const projectRoot = join(__dirname, '../../..');
const environmentsDir = join(
projectRoot,
'src',
'barista-examples',
'environments',
);
const args = process.argv.splice(ARGS_TO_OMIT);
const getDeployUrl = () => {
const deployUrlArg = args.find(arg => arg.startsWith(`--${DEPLOY_URL_ARG}=`));
if (!deployUrlArg) {
return undefined;
}
// tslint:disable-next-line no-magic-numbers
const [, deployUrl] = deployUrlArg.split('=', 2);
return deployUrl;
};
const { examplesDir, libDir } = buildConfig;
interface ExampleMetadata {
component: string;
sourcePath: string;
fileContent?: string;
}
interface InvalidExampleReferences {
name: string;
sourcePath: string;
}
interface AppComponentRouteSetup {
name: string;
examples: Array<{ name: string; route: string; className: string, import: string }>;
}
/** Parses the examples and collects all data */
function getExampleMetadata(sourceFiles: string[]): ExampleMetadata[] {
const parsedData: Set<ExampleMetadata> = new Set();
sourceFiles.forEach((sourceFilePath) => {
const content = fs.readFileSync(sourceFilePath, { encoding: 'utf-8' });
const fileName = basename(sourceFilePath);
const components = retrieveExampleClassNames(fileName, content);
components.forEach((component) => {
const metadata: ExampleMetadata = {
component,
sourcePath: relative(examplesDir, sourceFilePath),
fileContent: content,
};
parsedData.add(metadata);
});
});
return [...parsedData];
}
/** Parse the AST of the given source file and collects Angular component metadata. */
export function retrieveExampleClassNames(fileName: string, content: string): string[] {
const sourceFile = ts.createSourceFile(fileName, content, ts.ScriptTarget.Latest, false);
const componentClassNames: string[] = [];
// tslint:disable-next-line:no-any
const visitNode = (node: any): void => {
if (node.kind === ts.SyntaxKind.ClassDeclaration) {
if (node.decorators && node.decorators.length) {
for (const decorator of node.decorators) {
if (decorator.expression.expression.text === 'Component') {
componentClassNames.push(node.name.text);
}
}
}
}
ts.forEachChild(node, visitNode);
};
visitNode(sourceFile);
return componentClassNames;
}
/** Parse the AST of the given source file and collects Angular component metadata. */
export function retrieveExampleTemplateContent(fileName: string, content: string): string | undefined {
const sourceFile = ts.createSourceFile(fileName, content, ts.ScriptTarget.Latest, false);
const componentTemplates: string[] = [];
// tslint:disable-next-line:no-any
const visitNode = (node: any): void => {
if (node.kind === ts.SyntaxKind.PropertyAssignment) {
if (node.name && node.name.escapedText === 'template' && node.initializer && node.initializer.text) {
componentTemplates.push(sanitzeTemplateText(node.initializer.text));
}
}
ts.forEachChild(node, visitNode);
};
visitNode(sourceFile);
return componentTemplates.length > 0 ? componentTemplates[0] : undefined;
}
function sanitzeTemplateText(template: string) {
if (template.startsWith('\n ')) {
template = template.replace(/$\n /gm, '\n');
}
if (template.startsWith('\n')) {
template = template.replace('\n', ''); // remove leading newline
}
return template;
}
/** Build ES module import statements for the given example metadata. */
function buildImportsTemplate(data: ExampleMetadata): string {
const relativeSrcPath = data.sourcePath.replace(/\\/g, '/').replace('.ts', '');
return `import { ${data.component} } from './${relativeSrcPath}';`;
}
/**
* Generates the app module from the given source files and writes it to a specified output
* file.
*/
function generateAppModule(
parsedData: ExampleMetadata[],
outputFile: string,
baseDir: string,
): void {
const generatedModuleFile = populateAppModuleTemplate([...parsedData]);
const generatedFilePath = join(baseDir, outputFile);
if (!fs.existsSync(dirname(generatedFilePath))) {
fs.mkdirSync(dirname(generatedFilePath));
}
fs.writeFileSync(generatedFilePath, generatedModuleFile);
}
/** Inlines the app module template with the specified parsed data. */
function populateAppModuleTemplate(parsedData: ExampleMetadata[]): string {
const exampleImports = parsedData
.map(m => buildImportsTemplate(m))
.join('\n');
const exampleList = parsedData.map(m => m.component);
return fs
.readFileSync(join(examplesDir, './app.module.template'), 'utf8')
.replace('${imports}', exampleImports)
.replace('${examples}', `[\n ${exampleList.join(',\n ')},\n]`);
}
/** Generates the imports for each example file */
function generateImportsForExamples(
content: string,
routeMetadata: AppComponentRouteSetup[],
): string {
let imports = '';
routeMetadata.forEach(metadata => {
metadata.examples.forEach(example => {
imports = `${imports}\n${example.import}`;
});
});
return content.replace('${imports}', imports);
}
/** Generates the nav items list used to render the sidebar */
function generateNavItems(
content: string,
routeMetadata: AppComponentRouteSetup[],
): string {
const navItems = routeMetadata.map(metadata => {
const exampleData = metadata.examples.map(example => ({
name: example.name,
route: example.route,
}));
return { name: metadata.name, examples: exampleData };
});
return content.replace('${navItems}', JSON.stringify(navItems, null, '\t'));
}
/** Generates the route definitions */
function generateRoutes(
content: string,
routeMetadata: AppComponentRouteSetup[],
): string {
let routeString = '';
routeMetadata.forEach((metadata: AppComponentRouteSetup) => {
metadata.examples.forEach(example => {
routeString = `${routeString}
{ path: '${example.route}', component: ${example.className}},`;
});
});
if (routeString.endsWith(',')) {
routeString = routeString.slice(0, -1);
}
return content.replace('${routes}', `[${routeString}]`);
}
/** Generates the app component */
function generateAppComponent(parsedData: ExampleMetadata[]): void {
const routeMetadata = generateRouteMetadata(parsedData);
let content = fs.readFileSync(join(examplesDir, 'app.component.template'), {
encoding: 'utf8',
});
content = generateImportsForExamples(content, routeMetadata);
content = generateNavItems(content, routeMetadata);
content = generateRoutes(content, routeMetadata);
fs.writeFileSync(join(examplesDir, 'app.component.ts'), content, {
encoding: 'utf8',
});
}
/** Checks the content of given source files for invalid example names and returns them. */
function getInvalidExampleReferences(
sourceFiles: string[],
exampleNames: string[],
): InvalidExampleReferences[] {
const invalidRefs: InvalidExampleReferences[] = [];
sourceFiles.forEach(sourceFilePath => {
const content = fs.readFileSync(sourceFilePath, { encoding: 'utf-8' });
const regex = /<docs-source-example example=\"(.+?)\"(.*?)><\/docs-source-example>/g;
let matches;
// tslint:disable-next-line no-conditional-assignment
while ((matches = regex.exec(content)) !== null) {
if (!exampleNames.includes(matches[1])) {
const exampleRef = {
name: matches[1],
sourcePath: sourceFilePath,
};
invalidRefs.push(exampleRef);
}
}
});
return invalidRefs;
}
/** Validates Barista example names used in lib readme files. */
task('barista-examples:validate', done => {
const exampleNames = getExampleMetadata(
glob(join(examplesDir, '*/*.ts')),
).map(metadata => metadata.component);
const invalidExampleRefs = getInvalidExampleReferences(
glob(join(libDir, '**/README.md')),
exampleNames,
);
if (invalidExampleRefs.length > 0) |
done();
});
/** Creates the examples module */
task('barista-example:generate', done => {
const metadata = getExampleMetadata(glob(join(examplesDir, '*/*.ts')));
generateAppModule(metadata, 'app.module.ts', examplesDir);
generateAppComponent(metadata);
const routeData = flatten(generateRouteMetadata(metadata).map((route) =>
route.examples.map(route => ({name: route.name, route: route.route}))
));
fs.writeFileSync('src/barista-examples/routes.json', JSON.stringify({routes: routeData}, undefined,2));
done();
});
task('ide-completions', (done) => {
const metadata = getExampleMetadata(glob(join(examplesDir, '*/*.ts')));
const transformedMetaData = metadata
.map(metaData => {
const templateContent = retrieveExampleTemplateContent(
metaData.sourcePath,
metaData.fileContent as string,
);
return {
name: 'dt-' + basename(metaData.sourcePath).replace('-example.ts', '').replace('.ts', ''),
template: templateContent,
};
| {
const errors = invalidExampleRefs.map(
ref => `Invalid example name "${ref.name}" found in ${ref.sourcePath}.`,
);
const errorMsg = errors.join('\n');
done(errorMsg);
return;
} | conditional_block |
barista-examples.ts | -runner';
const ARGS_TO_OMIT = 3;
const DEPLOY_URL_ARG = 'deploy-url';
const projectRoot = join(__dirname, '../../..');
const environmentsDir = join(
projectRoot,
'src',
'barista-examples',
'environments',
);
const args = process.argv.splice(ARGS_TO_OMIT);
const getDeployUrl = () => {
const deployUrlArg = args.find(arg => arg.startsWith(`--${DEPLOY_URL_ARG}=`));
if (!deployUrlArg) {
return undefined;
}
// tslint:disable-next-line no-magic-numbers
const [, deployUrl] = deployUrlArg.split('=', 2);
return deployUrl;
};
const { examplesDir, libDir } = buildConfig;
interface ExampleMetadata {
component: string;
sourcePath: string;
fileContent?: string;
}
interface InvalidExampleReferences {
name: string;
sourcePath: string;
}
interface AppComponentRouteSetup {
name: string;
examples: Array<{ name: string; route: string; className: string, import: string }>;
}
/** Parses the examples and collects all data */
function getExampleMetadata(sourceFiles: string[]): ExampleMetadata[] {
const parsedData: Set<ExampleMetadata> = new Set();
sourceFiles.forEach((sourceFilePath) => {
const content = fs.readFileSync(sourceFilePath, { encoding: 'utf-8' });
const fileName = basename(sourceFilePath);
const components = retrieveExampleClassNames(fileName, content);
components.forEach((component) => {
const metadata: ExampleMetadata = {
component,
sourcePath: relative(examplesDir, sourceFilePath),
fileContent: content,
};
parsedData.add(metadata);
});
});
return [...parsedData];
}
/** Parse the AST of the given source file and collects Angular component metadata. */
export function retrieveExampleClassNames(fileName: string, content: string): string[] {
const sourceFile = ts.createSourceFile(fileName, content, ts.ScriptTarget.Latest, false);
const componentClassNames: string[] = [];
// tslint:disable-next-line:no-any
const visitNode = (node: any): void => {
if (node.kind === ts.SyntaxKind.ClassDeclaration) {
if (node.decorators && node.decorators.length) {
for (const decorator of node.decorators) {
if (decorator.expression.expression.text === 'Component') {
componentClassNames.push(node.name.text);
}
}
}
}
ts.forEachChild(node, visitNode);
};
visitNode(sourceFile);
return componentClassNames;
}
/** Parse the AST of the given source file and collects Angular component metadata. */
export function retrieveExampleTemplateContent(fileName: string, content: string): string | undefined {
const sourceFile = ts.createSourceFile(fileName, content, ts.ScriptTarget.Latest, false);
const componentTemplates: string[] = [];
// tslint:disable-next-line:no-any
const visitNode = (node: any): void => {
if (node.kind === ts.SyntaxKind.PropertyAssignment) {
if (node.name && node.name.escapedText === 'template' && node.initializer && node.initializer.text) {
componentTemplates.push(sanitzeTemplateText(node.initializer.text));
}
}
ts.forEachChild(node, visitNode);
};
visitNode(sourceFile);
return componentTemplates.length > 0 ? componentTemplates[0] : undefined;
}
function sanitzeTemplateText(template: string) {
if (template.startsWith('\n ')) {
template = template.replace(/$\n /gm, '\n');
}
if (template.startsWith('\n')) {
template = template.replace('\n', ''); // remove leading newline
}
return template;
}
/** Build ES module import statements for the given example metadata. */
function buildImportsTemplate(data: ExampleMetadata): string {
const relativeSrcPath = data.sourcePath.replace(/\\/g, '/').replace('.ts', '');
return `import { ${data.component} } from './${relativeSrcPath}';`;
}
/**
* Generates the app module from the given source files and writes it to a specified output
* file.
*/
function | (
parsedData: ExampleMetadata[],
outputFile: string,
baseDir: string,
): void {
const generatedModuleFile = populateAppModuleTemplate([...parsedData]);
const generatedFilePath = join(baseDir, outputFile);
if (!fs.existsSync(dirname(generatedFilePath))) {
fs.mkdirSync(dirname(generatedFilePath));
}
fs.writeFileSync(generatedFilePath, generatedModuleFile);
}
/** Inlines the app module template with the specified parsed data. */
function populateAppModuleTemplate(parsedData: ExampleMetadata[]): string {
const exampleImports = parsedData
.map(m => buildImportsTemplate(m))
.join('\n');
const exampleList = parsedData.map(m => m.component);
return fs
.readFileSync(join(examplesDir, './app.module.template'), 'utf8')
.replace('${imports}', exampleImports)
.replace('${examples}', `[\n ${exampleList.join(',\n ')},\n]`);
}
/** Generates the imports for each example file */
function generateImportsForExamples(
content: string,
routeMetadata: AppComponentRouteSetup[],
): string {
let imports = '';
routeMetadata.forEach(metadata => {
metadata.examples.forEach(example => {
imports = `${imports}\n${example.import}`;
});
});
return content.replace('${imports}', imports);
}
/** Generates the nav items list used to render the sidebar */
function generateNavItems(
content: string,
routeMetadata: AppComponentRouteSetup[],
): string {
const navItems = routeMetadata.map(metadata => {
const exampleData = metadata.examples.map(example => ({
name: example.name,
route: example.route,
}));
return { name: metadata.name, examples: exampleData };
});
return content.replace('${navItems}', JSON.stringify(navItems, null, '\t'));
}
/** Generates the route definitions */
function generateRoutes(
content: string,
routeMetadata: AppComponentRouteSetup[],
): string {
let routeString = '';
routeMetadata.forEach((metadata: AppComponentRouteSetup) => {
metadata.examples.forEach(example => {
routeString = `${routeString}
{ path: '${example.route}', component: ${example.className}},`;
});
});
if (routeString.endsWith(',')) {
routeString = routeString.slice(0, -1);
}
return content.replace('${routes}', `[${routeString}]`);
}
/** Generates the app component */
function generateAppComponent(parsedData: ExampleMetadata[]): void {
const routeMetadata = generateRouteMetadata(parsedData);
let content = fs.readFileSync(join(examplesDir, 'app.component.template'), {
encoding: 'utf8',
});
content = generateImportsForExamples(content, routeMetadata);
content = generateNavItems(content, routeMetadata);
content = generateRoutes(content, routeMetadata);
fs.writeFileSync(join(examplesDir, 'app.component.ts'), content, {
encoding: 'utf8',
});
}
/** Checks the content of given source files for invalid example names and returns them. */
function getInvalidExampleReferences(
sourceFiles: string[],
exampleNames: string[],
): InvalidExampleReferences[] {
const invalidRefs: InvalidExampleReferences[] = [];
sourceFiles.forEach(sourceFilePath => {
const content = fs.readFileSync(sourceFilePath, { encoding: 'utf-8' });
const regex = /<docs-source-example example=\"(.+?)\"(.*?)><\/docs-source-example>/g;
let matches;
// tslint:disable-next-line no-conditional-assignment
while ((matches = regex.exec(content)) !== null) {
if (!exampleNames.includes(matches[1])) {
const exampleRef = {
name: matches[1],
sourcePath: sourceFilePath,
};
invalidRefs.push(exampleRef);
}
}
});
return invalidRefs;
}
/** Validates Barista example names used in lib readme files. */
task('barista-examples:validate', done => {
const exampleNames = getExampleMetadata(
glob(join(examplesDir, '*/*.ts')),
).map(metadata => metadata.component);
const invalidExampleRefs = getInvalidExampleReferences(
glob(join(libDir, '**/README.md')),
exampleNames,
);
if (invalidExampleRefs.length > 0) {
const errors = invalidExampleRefs.map(
ref => `Invalid example name "${ref.name}" found in ${ref.sourcePath}.`,
);
const errorMsg = errors.join('\n');
done(errorMsg);
return;
}
done();
});
/** Creates the examples module */
task('barista-example:generate', done => {
const metadata = getExampleMetadata(glob(join(examplesDir, '*/*.ts')));
generateAppModule(metadata, 'app.module.ts', examplesDir);
generateAppComponent(metadata);
const routeData = flatten(generateRouteMetadata(metadata).map((route) =>
route.examples.map(route => ({name: route.name, route: route.route}))
));
fs.writeFileSync('src/barista-examples/routes.json', JSON.stringify({routes: routeData}, undefined,2));
done();
});
task('ide-completions', (done) => {
const metadata = getExampleMetadata(glob(join(examplesDir, '*/*.ts')));
const transformedMetaData = metadata
.map(metaData => {
const templateContent = retrieveExampleTemplateContent(
metaData.sourcePath,
metaData.fileContent as string,
);
return {
name: 'dt-' + basename(metaData.sourcePath).replace('-example.ts', '').replace('.ts', ''),
template: templateContent,
};
});
| generateAppModule | identifier_name |
interpreter.rs | : &[ir::Assignment],
env: &Environment,
) -> FutilResult<Environment> {
// Find the done signal in the sequence of assignments
let done_assign = get_done_signal(assigns);
// e2 = Clone the current environment
let mut write_env = env.clone();
// XXX: Prevent infinite loops. should probably be deleted later
// (unless we want to display the clock cycle)?
let mut counter = 0;
// Filter out the assignment statements that are not only from cells.
// XXX: for now, also excludes cells not in the env map
let ok_assigns = assigns
.iter()
.filter(|&a| {
!a.dst.borrow().is_hole()
// dummy way of making sure the map has the a.src cell
&& env.get_cell(&get_cell_from_port(&a.src)).is_some()
&& env.get_cell(&get_cell_from_port(&a.dst)).is_some()
})
.collect::<Vec<_>>();
// While done_assign.src is 0 (we use done_assign.src because done_assign.dst is not a cell's port; it should be a group's port)
while write_env.get_from_port(&done_assign.src.borrow()) == 0 && counter < 5
{
// println!("Clock cycle {}", counter);
/*println!(
"state of done_cell {:1} : {:?} \n",
&done_cell,
write_env.map.get(&done_cell)
);*/
// "staging" updates
//let mut iter_updates = write_env.clone();
// for assign in assigns
for assign in &ok_assigns {
// check if the assign.guard != 0
// should it be evaluating the guard in write_env environment?
if eval_guard(&assign.guard, &write_env) != 0 {
// check if the cells are constants?
// cell of assign.src
let src_cell = get_cell_from_port(&assign.src);
// cell of assign.dst
let dst_cell = get_cell_from_port(&assign.dst);
/*println!(
"src cell {:1} port: {:2}, dest cell {:3} port: {:4}",
src_cell,
&assign.src.borrow().name,
dst_cell,
&assign.dst.borrow().name
);*/
// perform a read from `env` for assign.src
// XXX(karen): should read from the previous iteration's env?
let read_val = env.get_from_port(&assign.src.borrow());
// update internal state of the cell and
// queue any required updates.
//determine if dst_cell is a combinational cell or not
if is_combinational(&dst_cell, &assign.dst.borrow().name, env) {
// write to assign.dst to e2 immediately, if combinational
write_env.put(
&dst_cell,
&assign.dst.borrow().name,
read_val,
);
/*println!(
"reg0.write_en = {}",
write_env.get(
&ir::Id::from("reg0"),
&ir::Id::from("write_en")
)
);*/
// now, update the internal state of the cell;
// for now, this only includes adds;
// TODO (use primitive Cell parameters)
let inputs;
let outputs;
// TODO: hacky way to avoid updating the cell state.
// Also, how to get input and output vectors in general??
if &assign.dst.borrow().name != "write_en" {
// get dst_cell's input vector
match &write_env.get_cell(&dst_cell) {
Some(cell) => {
inputs = vec![
(cell.borrow())
.get("left")
.borrow()
.name
.clone(),
(cell.borrow())
.get("right")
.borrow()
.name
.clone(),
]
}
_ => panic!("could not find cell"),
}
// get dst_cell's output vector
match &write_env.get_cell(&dst_cell) {
Some(cell) => {
outputs = vec![(cell.borrow())
.get("out")
.borrow()
.name
.clone()]
//clean this up later?
}
_ => panic!("could not find cell"),
}
// update the cell state in write_env
write_env = primitives::update_cell_state(
&dst_cell, &inputs, &outputs, &write_env,
)?;
}
} else {
// otherwise, add the write to the update queue; currently only handles registers
// get input cell
let inputs = vec![src_cell.clone()];
// get dst_cell's output port
let outputs = vec![assign.dst.borrow().name.clone()];
write_env =
init_cells(&dst_cell, inputs, outputs, write_env)?;
}
}
}
// write_env = iter_updates.do_tick()
write_env = write_env.do_tick();
counter += 1;
}
/*println!(
"\nFinal state of the done cell, i.e. {:1}: {:?} \n",
&done_cell, | Ok(write_env)
}
/// Evaluate guard implementation
#[allow(clippy::borrowed_box)]
// XXX: Allow for this warning. It would make sense to use a reference when we
// have the `box` match pattern available in Rust.
fn eval_guard(guard: &Box<ir::Guard>, env: &Environment) -> u64 {
(match &**guard {
ir::Guard::Or(g1, g2) => {
(eval_guard(g1, env) == 1) || (eval_guard(g2, env) == 1)
}
ir::Guard::And(g1, g2) => {
(eval_guard(g1, env) == 1) && (eval_guard(g2, env) == 1)
}
ir::Guard::Not(g) => eval_guard(g, &env) != 0,
ir::Guard::Eq(g1, g2) => {
env.get_from_port(&g1.borrow()) == env.get_from_port(&g2.borrow())
}
ir::Guard::Neq(g1, g2) => {
env.get_from_port(&g1.borrow()) != env.get_from_port(&g2.borrow())
}
ir::Guard::Gt(g1, g2) => {
env.get_from_port(&g1.borrow()) > env.get_from_port(&g2.borrow())
}
ir::Guard::Lt(g1, g2) => {
env.get_from_port(&g1.borrow()) < env.get_from_port(&g2.borrow())
}
ir::Guard::Geq(g1, g2) => {
env.get_from_port(&g1.borrow()) >= env.get_from_port(&g2.borrow())
}
ir::Guard::Leq(g1, g2) => {
env.get_from_port(&g1.borrow()) <= env.get_from_port(&g2.borrow())
}
ir::Guard::Port(p) => env.get_from_port(&p.borrow()) != 0,
ir::Guard::True => true,
}) as u64
}
/// Get the cell id a port belongs to.
/// Very similar to ir::Port::get_parent_name, except it can also panic
fn get_cell_from_port(port: &ir::RRC<ir::Port>) -> ir::Id {
if port.borrow().is_hole() {
panic!("Unexpected hole. Cannot get cell: {}", port.borrow().name)
}
port.borrow().get_parent_name()
}
/// Returns the assignment statement with the done signal; assumes there aren't other groups to check?
fn get_done_signal(assigns: &[ir::Assignment]) -> &ir::Assignment {
assigns
.iter()
.find(|assign| {
let dst = assign.dst.borrow();
dst.is_hole() && dst.name == "done"
})
.expect("Group does not have a done signal")
}
/// Determines if writing a particular cell and cell port is combinational or not. Will need to change implementation later.
fn is_combinational(cell: &ir::Id, port: &ir::Id, env: &Environment) -> bool {
// if cell is none,
let cellg = env
.get_cell(cell)
.unwrap_or_else(|| panic!("Cannot find cell with name"));
let cb = cellg.borrow();
let celltype = cb.type_name().unwrap_or_else(|| panic!("Constant?"));
// TODO; get cell attributes
match (*celltype).id.as_str() {
"std_reg" => match port.id.as_str() {
// XXX(rachit): Why is this a "combinational" port?
"write_en" => true,
"out" => false,
"done" => false,
_ => false,
},
"std_const"
| "std_slice"
| "std_lsh"
| "std_rsh"
| "std_add"
| "std_sub"
| "std_mod"
| "std_mult"
| "std_div"
| "std_not"
| "std_and"
| "std_or"
| "std_xor"
| "std_gt"
| "std_lt"
| "std_eq"
| "std_neq"
| "std_ge"
| "std_le"
| "fixed_p_std_const"
| write_env.map.get(&done_cell)
);*/ | random_line_split |
interpreter.rs | : &[ir::Assignment],
env: &Environment,
) -> FutilResult<Environment> {
// Find the done signal in the sequence of assignments
let done_assign = get_done_signal(assigns);
// e2 = Clone the current environment
let mut write_env = env.clone();
// XXX: Prevent infinite loops. should probably be deleted later
// (unless we want to display the clock cycle)?
let mut counter = 0;
// Filter out the assignment statements that are not only from cells.
// XXX: for now, also excludes cells not in the env map
let ok_assigns = assigns
.iter()
.filter(|&a| {
!a.dst.borrow().is_hole()
// dummy way of making sure the map has the a.src cell
&& env.get_cell(&get_cell_from_port(&a.src)).is_some()
&& env.get_cell(&get_cell_from_port(&a.dst)).is_some()
})
.collect::<Vec<_>>();
// While done_assign.src is 0 (we use done_assign.src because done_assign.dst is not a cell's port; it should be a group's port)
while write_env.get_from_port(&done_assign.src.borrow()) == 0 && counter < 5
{
// println!("Clock cycle {}", counter);
/*println!(
"state of done_cell {:1} : {:?} \n",
&done_cell,
write_env.map.get(&done_cell)
);*/
// "staging" updates
//let mut iter_updates = write_env.clone();
// for assign in assigns
for assign in &ok_assigns {
// check if the assign.guard != 0
// should it be evaluating the guard in write_env environment?
if eval_guard(&assign.guard, &write_env) != 0 {
// check if the cells are constants?
// cell of assign.src
let src_cell = get_cell_from_port(&assign.src);
// cell of assign.dst
let dst_cell = get_cell_from_port(&assign.dst);
/*println!(
"src cell {:1} port: {:2}, dest cell {:3} port: {:4}",
src_cell,
&assign.src.borrow().name,
dst_cell,
&assign.dst.borrow().name
);*/
// perform a read from `env` for assign.src
// XXX(karen): should read from the previous iteration's env?
let read_val = env.get_from_port(&assign.src.borrow());
// update internal state of the cell and
// queue any required updates.
//determine if dst_cell is a combinational cell or not
if is_combinational(&dst_cell, &assign.dst.borrow().name, env) {
// write to assign.dst to e2 immediately, if combinational
write_env.put(
&dst_cell,
&assign.dst.borrow().name,
read_val,
);
/*println!(
"reg0.write_en = {}",
write_env.get(
&ir::Id::from("reg0"),
&ir::Id::from("write_en")
)
);*/
// now, update the internal state of the cell;
// for now, this only includes adds;
// TODO (use primitive Cell parameters)
let inputs;
let outputs;
// TODO: hacky way to avoid updating the cell state.
// Also, how to get input and output vectors in general??
if &assign.dst.borrow().name != "write_en" {
// get dst_cell's input vector
match &write_env.get_cell(&dst_cell) {
Some(cell) => {
inputs = vec![
(cell.borrow())
.get("left")
.borrow()
.name
.clone(),
(cell.borrow())
.get("right")
.borrow()
.name
.clone(),
]
}
_ => panic!("could not find cell"),
}
// get dst_cell's output vector
match &write_env.get_cell(&dst_cell) {
Some(cell) => {
outputs = vec![(cell.borrow())
.get("out")
.borrow()
.name
.clone()]
//clean this up later?
}
_ => panic!("could not find cell"),
}
// update the cell state in write_env
write_env = primitives::update_cell_state(
&dst_cell, &inputs, &outputs, &write_env,
)?;
}
} else {
// otherwise, add the write to the update queue; currently only handles registers
// get input cell
let inputs = vec![src_cell.clone()];
// get dst_cell's output port
let outputs = vec![assign.dst.borrow().name.clone()];
write_env =
init_cells(&dst_cell, inputs, outputs, write_env)?;
}
}
}
// write_env = iter_updates.do_tick()
write_env = write_env.do_tick();
counter += 1;
}
/*println!(
"\nFinal state of the done cell, i.e. {:1}: {:?} \n",
&done_cell,
write_env.map.get(&done_cell)
);*/
Ok(write_env)
}
/// Evaluate guard implementation
#[allow(clippy::borrowed_box)]
// XXX: Allow for this warning. It would make sense to use a reference when we
// have the `box` match pattern available in Rust.
fn eval_guard(guard: &Box<ir::Guard>, env: &Environment) -> u64 {
(match &**guard {
ir::Guard::Or(g1, g2) => {
(eval_guard(g1, env) == 1) || (eval_guard(g2, env) == 1)
}
ir::Guard::And(g1, g2) => {
(eval_guard(g1, env) == 1) && (eval_guard(g2, env) == 1)
}
ir::Guard::Not(g) => eval_guard(g, &env) != 0,
ir::Guard::Eq(g1, g2) => {
env.get_from_port(&g1.borrow()) == env.get_from_port(&g2.borrow())
}
ir::Guard::Neq(g1, g2) => {
env.get_from_port(&g1.borrow()) != env.get_from_port(&g2.borrow())
}
ir::Guard::Gt(g1, g2) => {
env.get_from_port(&g1.borrow()) > env.get_from_port(&g2.borrow())
}
ir::Guard::Lt(g1, g2) => {
env.get_from_port(&g1.borrow()) < env.get_from_port(&g2.borrow())
}
ir::Guard::Geq(g1, g2) => {
env.get_from_port(&g1.borrow()) >= env.get_from_port(&g2.borrow())
}
ir::Guard::Leq(g1, g2) => {
env.get_from_port(&g1.borrow()) <= env.get_from_port(&g2.borrow())
}
ir::Guard::Port(p) => env.get_from_port(&p.borrow()) != 0,
ir::Guard::True => true,
}) as u64
}
/// Get the cell id a port belongs to.
/// Very similar to ir::Port::get_parent_name, except it can also panic
fn get_cell_from_port(port: &ir::RRC<ir::Port>) -> ir::Id {
if port.borrow().is_hole() {
panic!("Unexpected hole. Cannot get cell: {}", port.borrow().name)
}
port.borrow().get_parent_name()
}
/// Returns the assignment statement with the done signal; assumes there aren't other groups to check?
fn | (assigns: &[ir::Assignment]) -> &ir::Assignment {
assigns
.iter()
.find(|assign| {
let dst = assign.dst.borrow();
dst.is_hole() && dst.name == "done"
})
.expect("Group does not have a done signal")
}
/// Determines if writing a particular cell and cell port is combinational or not. Will need to change implementation later.
fn is_combinational(cell: &ir::Id, port: &ir::Id, env: &Environment) -> bool {
// if cell is none,
let cellg = env
.get_cell(cell)
.unwrap_or_else(|| panic!("Cannot find cell with name"));
let cb = cellg.borrow();
let celltype = cb.type_name().unwrap_or_else(|| panic!("Constant?"));
// TODO; get cell attributes
match (*celltype).id.as_str() {
"std_reg" => match port.id.as_str() {
// XXX(rachit): Why is this a "combinational" port?
"write_en" => true,
"out" => false,
"done" => false,
_ => false,
},
"std_const"
| "std_slice"
| "std_lsh"
| "std_rsh"
| "std_add"
| "std_sub"
| "std_mod"
| "std_mult"
| "std_div"
| "std_not"
| "std_and"
| "std_or"
| "std_xor"
| "std_gt"
| "std_lt"
| "std_eq"
| "std_neq"
| "std_ge"
| "std_le"
| "fixed_p_std_const"
| get_done_signal | identifier_name |
interpreter.rs | : &[ir::Assignment],
env: &Environment,
) -> FutilResult<Environment> {
// Find the done signal in the sequence of assignments
let done_assign = get_done_signal(assigns);
// e2 = Clone the current environment
let mut write_env = env.clone();
// XXX: Prevent infinite loops. should probably be deleted later
// (unless we want to display the clock cycle)?
let mut counter = 0;
// Filter out the assignment statements that are not only from cells.
// XXX: for now, also excludes cells not in the env map
let ok_assigns = assigns
.iter()
.filter(|&a| {
!a.dst.borrow().is_hole()
// dummy way of making sure the map has the a.src cell
&& env.get_cell(&get_cell_from_port(&a.src)).is_some()
&& env.get_cell(&get_cell_from_port(&a.dst)).is_some()
})
.collect::<Vec<_>>();
// While done_assign.src is 0 (we use done_assign.src because done_assign.dst is not a cell's port; it should be a group's port)
while write_env.get_from_port(&done_assign.src.borrow()) == 0 && counter < 5
{
// println!("Clock cycle {}", counter);
/*println!(
"state of done_cell {:1} : {:?} \n",
&done_cell,
write_env.map.get(&done_cell)
);*/
// "staging" updates
//let mut iter_updates = write_env.clone();
// for assign in assigns
for assign in &ok_assigns {
// check if the assign.guard != 0
// should it be evaluating the guard in write_env environment?
if eval_guard(&assign.guard, &write_env) != 0 | // queue any required updates.
//determine if dst_cell is a combinational cell or not
if is_combinational(&dst_cell, &assign.dst.borrow().name, env) {
// write to assign.dst to e2 immediately, if combinational
write_env.put(
&dst_cell,
&assign.dst.borrow().name,
read_val,
);
/*println!(
"reg0.write_en = {}",
write_env.get(
&ir::Id::from("reg0"),
&ir::Id::from("write_en")
)
);*/
// now, update the internal state of the cell;
// for now, this only includes adds;
// TODO (use primitive Cell parameters)
let inputs;
let outputs;
// TODO: hacky way to avoid updating the cell state.
// Also, how to get input and output vectors in general??
if &assign.dst.borrow().name != "write_en" {
// get dst_cell's input vector
match &write_env.get_cell(&dst_cell) {
Some(cell) => {
inputs = vec![
(cell.borrow())
.get("left")
.borrow()
.name
.clone(),
(cell.borrow())
.get("right")
.borrow()
.name
.clone(),
]
}
_ => panic!("could not find cell"),
}
// get dst_cell's output vector
match &write_env.get_cell(&dst_cell) {
Some(cell) => {
outputs = vec![(cell.borrow())
.get("out")
.borrow()
.name
.clone()]
//clean this up later?
}
_ => panic!("could not find cell"),
}
// update the cell state in write_env
write_env = primitives::update_cell_state(
&dst_cell, &inputs, &outputs, &write_env,
)?;
}
} else {
// otherwise, add the write to the update queue; currently only handles registers
// get input cell
let inputs = vec![src_cell.clone()];
// get dst_cell's output port
let outputs = vec![assign.dst.borrow().name.clone()];
write_env =
init_cells(&dst_cell, inputs, outputs, write_env)?;
}
}
}
// write_env = iter_updates.do_tick()
write_env = write_env.do_tick();
counter += 1;
}
/*println!(
"\nFinal state of the done cell, i.e. {:1}: {:?} \n",
&done_cell,
write_env.map.get(&done_cell)
);*/
Ok(write_env)
}
/// Evaluate guard implementation
#[allow(clippy::borrowed_box)]
// XXX: Allow for this warning. It would make sense to use a reference when we
// have the `box` match pattern available in Rust.
fn eval_guard(guard: &Box<ir::Guard>, env: &Environment) -> u64 {
(match &**guard {
ir::Guard::Or(g1, g2) => {
(eval_guard(g1, env) == 1) || (eval_guard(g2, env) == 1)
}
ir::Guard::And(g1, g2) => {
(eval_guard(g1, env) == 1) && (eval_guard(g2, env) == 1)
}
ir::Guard::Not(g) => eval_guard(g, &env) != 0,
ir::Guard::Eq(g1, g2) => {
env.get_from_port(&g1.borrow()) == env.get_from_port(&g2.borrow())
}
ir::Guard::Neq(g1, g2) => {
env.get_from_port(&g1.borrow()) != env.get_from_port(&g2.borrow())
}
ir::Guard::Gt(g1, g2) => {
env.get_from_port(&g1.borrow()) > env.get_from_port(&g2.borrow())
}
ir::Guard::Lt(g1, g2) => {
env.get_from_port(&g1.borrow()) < env.get_from_port(&g2.borrow())
}
ir::Guard::Geq(g1, g2) => {
env.get_from_port(&g1.borrow()) >= env.get_from_port(&g2.borrow())
}
ir::Guard::Leq(g1, g2) => {
env.get_from_port(&g1.borrow()) <= env.get_from_port(&g2.borrow())
}
ir::Guard::Port(p) => env.get_from_port(&p.borrow()) != 0,
ir::Guard::True => true,
}) as u64
}
/// Get the cell id a port belongs to.
/// Very similar to ir::Port::get_parent_name, except it can also panic
fn get_cell_from_port(port: &ir::RRC<ir::Port>) -> ir::Id {
if port.borrow().is_hole() {
panic!("Unexpected hole. Cannot get cell: {}", port.borrow().name)
}
port.borrow().get_parent_name()
}
/// Returns the assignment statement with the done signal; assumes there aren't other groups to check?
fn get_done_signal(assigns: &[ir::Assignment]) -> &ir::Assignment {
assigns
.iter()
.find(|assign| {
let dst = assign.dst.borrow();
dst.is_hole() && dst.name == "done"
})
.expect("Group does not have a done signal")
}
/// Determines if writing a particular cell and cell port is combinational or not. Will need to change implementation later.
fn is_combinational(cell: &ir::Id, port: &ir::Id, env: &Environment) -> bool {
// if cell is none,
let cellg = env
.get_cell(cell)
.unwrap_or_else(|| panic!("Cannot find cell with name"));
let cb = cellg.borrow();
let celltype = cb.type_name().unwrap_or_else(|| panic!("Constant?"));
// TODO; get cell attributes
match (*celltype).id.as_str() {
"std_reg" => match port.id.as_str() {
// XXX(rachit): Why is this a "combinational" port?
"write_en" => true,
"out" => false,
"done" => false,
_ => false,
},
"std_const"
| "std_slice"
| "std_lsh"
| "std_rsh"
| "std_add"
| "std_sub"
| "std_mod"
| "std_mult"
| "std_div"
| "std_not"
| "std_and"
| "std_or"
| "std_xor"
| "std_gt"
| "std_lt"
| "std_eq"
| "std_neq"
| "std_ge"
| "std_le"
| "fixed_p_std_const"
| {
// check if the cells are constants?
// cell of assign.src
let src_cell = get_cell_from_port(&assign.src);
// cell of assign.dst
let dst_cell = get_cell_from_port(&assign.dst);
/*println!(
"src cell {:1} port: {:2}, dest cell {:3} port: {:4}",
src_cell,
&assign.src.borrow().name,
dst_cell,
&assign.dst.borrow().name
);*/
// perform a read from `env` for assign.src
// XXX(karen): should read from the previous iteration's env?
let read_val = env.get_from_port(&assign.src.borrow());
// update internal state of the cell and | conditional_block |
interpreter.rs | : &[ir::Assignment],
env: &Environment,
) -> FutilResult<Environment> {
// Find the done signal in the sequence of assignments
let done_assign = get_done_signal(assigns);
// e2 = Clone the current environment
let mut write_env = env.clone();
// XXX: Prevent infinite loops. should probably be deleted later
// (unless we want to display the clock cycle)?
let mut counter = 0;
// Filter out the assignment statements that are not only from cells.
// XXX: for now, also excludes cells not in the env map
let ok_assigns = assigns
.iter()
.filter(|&a| {
!a.dst.borrow().is_hole()
// dummy way of making sure the map has the a.src cell
&& env.get_cell(&get_cell_from_port(&a.src)).is_some()
&& env.get_cell(&get_cell_from_port(&a.dst)).is_some()
})
.collect::<Vec<_>>();
// While done_assign.src is 0 (we use done_assign.src because done_assign.dst is not a cell's port; it should be a group's port)
while write_env.get_from_port(&done_assign.src.borrow()) == 0 && counter < 5
{
// println!("Clock cycle {}", counter);
/*println!(
"state of done_cell {:1} : {:?} \n",
&done_cell,
write_env.map.get(&done_cell)
);*/
// "staging" updates
//let mut iter_updates = write_env.clone();
// for assign in assigns
for assign in &ok_assigns {
// check if the assign.guard != 0
// should it be evaluating the guard in write_env environment?
if eval_guard(&assign.guard, &write_env) != 0 {
// check if the cells are constants?
// cell of assign.src
let src_cell = get_cell_from_port(&assign.src);
// cell of assign.dst
let dst_cell = get_cell_from_port(&assign.dst);
/*println!(
"src cell {:1} port: {:2}, dest cell {:3} port: {:4}",
src_cell,
&assign.src.borrow().name,
dst_cell,
&assign.dst.borrow().name
);*/
// perform a read from `env` for assign.src
// XXX(karen): should read from the previous iteration's env?
let read_val = env.get_from_port(&assign.src.borrow());
// update internal state of the cell and
// queue any required updates.
//determine if dst_cell is a combinational cell or not
if is_combinational(&dst_cell, &assign.dst.borrow().name, env) {
// write to assign.dst to e2 immediately, if combinational
write_env.put(
&dst_cell,
&assign.dst.borrow().name,
read_val,
);
/*println!(
"reg0.write_en = {}",
write_env.get(
&ir::Id::from("reg0"),
&ir::Id::from("write_en")
)
);*/
// now, update the internal state of the cell;
// for now, this only includes adds;
// TODO (use primitive Cell parameters)
let inputs;
let outputs;
// TODO: hacky way to avoid updating the cell state.
// Also, how to get input and output vectors in general??
if &assign.dst.borrow().name != "write_en" {
// get dst_cell's input vector
match &write_env.get_cell(&dst_cell) {
Some(cell) => {
inputs = vec![
(cell.borrow())
.get("left")
.borrow()
.name
.clone(),
(cell.borrow())
.get("right")
.borrow()
.name
.clone(),
]
}
_ => panic!("could not find cell"),
}
// get dst_cell's output vector
match &write_env.get_cell(&dst_cell) {
Some(cell) => {
outputs = vec![(cell.borrow())
.get("out")
.borrow()
.name
.clone()]
//clean this up later?
}
_ => panic!("could not find cell"),
}
// update the cell state in write_env
write_env = primitives::update_cell_state(
&dst_cell, &inputs, &outputs, &write_env,
)?;
}
} else {
// otherwise, add the write to the update queue; currently only handles registers
// get input cell
let inputs = vec![src_cell.clone()];
// get dst_cell's output port
let outputs = vec![assign.dst.borrow().name.clone()];
write_env =
init_cells(&dst_cell, inputs, outputs, write_env)?;
}
}
}
// write_env = iter_updates.do_tick()
write_env = write_env.do_tick();
counter += 1;
}
/*println!(
"\nFinal state of the done cell, i.e. {:1}: {:?} \n",
&done_cell,
write_env.map.get(&done_cell)
);*/
Ok(write_env)
}
/// Evaluate guard implementation
#[allow(clippy::borrowed_box)]
// XXX: Allow for this warning. It would make sense to use a reference when we
// have the `box` match pattern available in Rust.
fn eval_guard(guard: &Box<ir::Guard>, env: &Environment) -> u64 | }
ir::Guard::Geq(g1, g2) => {
env.get_from_port(&g1.borrow()) >= env.get_from_port(&g2.borrow())
}
ir::Guard::Leq(g1, g2) => {
env.get_from_port(&g1.borrow()) <= env.get_from_port(&g2.borrow())
}
ir::Guard::Port(p) => env.get_from_port(&p.borrow()) != 0,
ir::Guard::True => true,
}) as u64
}
/// Get the cell id a port belongs to.
/// Very similar to ir::Port::get_parent_name, except it can also panic
fn get_cell_from_port(port: &ir::RRC<ir::Port>) -> ir::Id {
if port.borrow().is_hole() {
panic!("Unexpected hole. Cannot get cell: {}", port.borrow().name)
}
port.borrow().get_parent_name()
}
/// Returns the assignment statement with the done signal; assumes there aren't other groups to check?
fn get_done_signal(assigns: &[ir::Assignment]) -> &ir::Assignment {
assigns
.iter()
.find(|assign| {
let dst = assign.dst.borrow();
dst.is_hole() && dst.name == "done"
})
.expect("Group does not have a done signal")
}
/// Determines if writing a particular cell and cell port is combinational or not. Will need to change implementation later.
fn is_combinational(cell: &ir::Id, port: &ir::Id, env: &Environment) -> bool {
// if cell is none,
let cellg = env
.get_cell(cell)
.unwrap_or_else(|| panic!("Cannot find cell with name"));
let cb = cellg.borrow();
let celltype = cb.type_name().unwrap_or_else(|| panic!("Constant?"));
// TODO; get cell attributes
match (*celltype).id.as_str() {
"std_reg" => match port.id.as_str() {
// XXX(rachit): Why is this a "combinational" port?
"write_en" => true,
"out" => false,
"done" => false,
_ => false,
},
"std_const"
| "std_slice"
| "std_lsh"
| "std_rsh"
| "std_add"
| "std_sub"
| "std_mod"
| "std_mult"
| "std_div"
| "std_not"
| "std_and"
| "std_or"
| "std_xor"
| "std_gt"
| "std_lt"
| "std_eq"
| "std_neq"
| "std_ge"
| "std_le"
| "fixed_p_std_const"
| {
(match &**guard {
ir::Guard::Or(g1, g2) => {
(eval_guard(g1, env) == 1) || (eval_guard(g2, env) == 1)
}
ir::Guard::And(g1, g2) => {
(eval_guard(g1, env) == 1) && (eval_guard(g2, env) == 1)
}
ir::Guard::Not(g) => eval_guard(g, &env) != 0,
ir::Guard::Eq(g1, g2) => {
env.get_from_port(&g1.borrow()) == env.get_from_port(&g2.borrow())
}
ir::Guard::Neq(g1, g2) => {
env.get_from_port(&g1.borrow()) != env.get_from_port(&g2.borrow())
}
ir::Guard::Gt(g1, g2) => {
env.get_from_port(&g1.borrow()) > env.get_from_port(&g2.borrow())
}
ir::Guard::Lt(g1, g2) => {
env.get_from_port(&g1.borrow()) < env.get_from_port(&g2.borrow()) | identifier_body |
dg.py | (csvfile)
spamwriter.writerow(first_row)
for index in xrange(len(Nlist)):
# print poi.get(str(Nlist[index]))
spamwriter.writerow(poi.get(str(Nlist[index]), None))
def plot_whole_network(DG):
# pos = random_layout(DG)
# pos = shell_layout(DG) | pos = spring_layout(DG)
# pos = spectral_layout(DG)
# plt.title('Plot of Network')
draw(DG, pos)
plt.show()
def pdf(data, xmin=None, xmax=None, linear_bins=False, **kwargs):
if not xmax:
xmax = max(data)
if not xmin:
xmin = min(data)
if linear_bins:
bins = range(int(xmin), int(xmax))
else:
log_min_size = np.log10(xmin)
log_max_size = np.log10(xmax)
number_of_bins = np.ceil((log_max_size-log_min_size)*10)
bins = np.unique(
np.floor(
np.logspace(
log_min_size, log_max_size, num=number_of_bins)))
hist, edges = np.histogram(data, bins, density=True)
bin_centers = (edges[1:]+edges[:-1])/2.0
new_x, new_y = [], []
for index in xrange(len(hist)):
if hist[index] != 0:
new_x.append(bin_centers[index])
new_y.append(hist[index])
return new_x, new_y
def pearson(x, y):
# calculate the pearson correlation of two list
n = len(x)
avg_x = float(sum(x))/n
avg_y = float(sum(y))/n
print 'The means of two lists:', avg_x, avg_y
diffprod = 0.0
xdiff2 = 0.0
ydiff2 = 0.0
for idx in range(n):
xdiff = x[idx] - avg_x
ydiff = y[idx] - avg_y
diffprod += xdiff*ydiff
xdiff2 += xdiff*xdiff
ydiff2 += ydiff*ydiff
return diffprod/math.sqrt(xdiff2*ydiff2)
def drop_zeros(list_a):
# discard the zeros in a list
return [i for i in list_a if i>0]
def rmse(predict, truth):
# calculate RMSE of a prediction
RMSE = mean_squared_error(truth, predict)**0.5
return RMSE
def mean_bin(list_x, list_y, linear_bins=False):
# the returned values are raw values, not logarithmic values
size = len(list_x)
xmin = min(list_x)
xmax = max(list_x)
if linear_bins:
bins = range(int(xmin), int(xmax+1))
else:
log_min_size = np.log10(xmin)
log_max_size = np.log10(xmax+1)
number_of_bins = np.ceil((log_max_size-log_min_size)*10)
bins = np.unique(
np.floor(
np.logspace(
log_min_size, log_max_size, num=number_of_bins)))
new_bin_meanx_x, new_bin_means_y = [], []
hist_x = np.histogram(list_x, bins)[0]
hist_x_w = np.histogram(list_x, bins, weights=list_x)[0].astype(float)
for index in xrange(len(bins)-1):
if hist_x[index] != 0:
new_bin_meanx_x.append(hist_x_w[index]/hist_x[index])
range_min, range_max = bins[index], bins[index+1]
sum_y = 0.0
for i in xrange(size):
key = list_x[i]
if (key >= range_min) and (key < range_max):
sum_y += list_y[i]
new_bin_means_y.append(sum_y/hist_x[index])
return new_bin_meanx_x, new_bin_means_y
def cut_lists(list_x, list_y, fit_start=-1, fit_end=-1):
if fit_start != -1:
new_x, new_y = [], []
for index in xrange(len(list_x)):
if list_x[index] >= fit_start:
new_x.append(list_x[index])
new_y.append(list_y[index])
list_x, list_y = new_x, new_y
if fit_end != -1:
new_x, new_y = [], []
for index in xrange(len(list_x)):
if list_x[index] < fit_end:
new_x.append(list_x[index])
new_y.append(list_y[index])
list_x, list_y = new_x, new_y
return (list_x, list_y)
def lr_ls(list_x, list_y, fit_start=-1, fit_end=-1):
list_x, list_y = cut_lists(list_x, list_y, fit_start, fit_end)
X = np.asarray(list_x, dtype=float)
Y = np.asarray(list_y, dtype=float)
logX = np.log10(X)
logY = np.log10(Y)
coefficients = np.polyfit(logX, logY, 1)
polynomial = np.poly1d(coefficients)
print 'Polynomial: (', fit_start, fit_end, ')', polynomial
logY_fit = polynomial(logX)
print 'Fitting RMSE(log)', rmse(logY, logY_fit)
print 'Fitting RMSE(raw)', rmse(Y, np.power(10, logY_fit))
# print Y
return (list_x, np.power(10, logY_fit))
# return logX, logY_fit
def lr_ml(list_x, list_y, fit_start=-1, fit_end=-1):
# TODO
list_x, list_y = cut_lists(list_x, list_y, fit_start, fit_end)
X = np.asarray(list_x, dtype=float)
Y = np.asarray(list_y, dtype=float)
logX = np.log10(X)
logY = np.log10(Y)
def lr_ks(list_x, list_y, fit_start=-1, fit_end=-1):
# TODO
list_x, list_y = cut_lists(list_x, list_y, fit_start, fit_end)
X = np.asarray(list_x, dtype=float)
Y = np.asarray(list_y, dtype=float)
logX = np.log10(X)
logY = np.log10(Y)
def neibors_static(DG, node, neib='pre', direct='in', weight=False):
if neib == 'suc':
neibors = DG.successors(node)
else:
neibors = DG.predecessors(node)
if direct == 'out':
if weight:
values = [DG.out_degree(n, weight='weight') for n in neibors]
else:
values = [DG.out_degree(n) for n in neibors]
else:
if weight:
values = [DG.in_degree(n, weight='weight') for n in neibors]
else:
values = [DG.in_degree(n) for n in neibors]
if len(neibors):
return float(sum(values))/len(neibors)
else:
return 0.0
def dependence(listx, listy, l, xlabel, ylabel, start=1, end=1000):
plt.clf()
plt.scatter(listx, listy, s=20, c='#fee8c8', marker='+', label='raw '+l)
ax = plt.gca()
xmeans, ymeans = mean_bin(listx, listy)
ax.scatter(xmeans, ymeans, s=50, c='#fdbb84', marker='o', label='binned '+l)
xfit, yfit = lr_ls(xmeans, ymeans, start, end)
ax.plot(xfit, yfit, c='#e34a33', linewidth=2, linestyle='--', label='Fitted '+l)
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
ax.set_xlim(xmin=1)
ax.set_ylim(ymin=1)
handles, labels = ax.get_legend_handles_labels()
leg = ax.legend(handles, labels, loc=4)
leg.draw_frame(True)
plt.show()
'''Plot PDF'''
def pdf_plot(data, name, fit_start, fit_end):
# plt.gcf()
# data = outstrength
list_x, list_y = pdf(data, linear_bins=True)
plt.plot(list_x, list_y, 'r+', label='Raw '+name)
ax = plt.gca()
list_x, list_y = pdf(data)
ax.plot(list_x, list_y, 'ro', label='Binned '+name)
# list_fit_x, list_fit_y = lr_ls(list_x, list_y, 1, 100)
# ax.plot(list_fit_x, list_fit_y, 'b--', label='Fitted outstrength')
list_fit_x, list_fit_y = lr_ls(list_x, list_y, fit_start, fit_end)
ax.plot(list_fit_x, list_fit_y, 'b--', label='Fitted '+name)
# data = outstrength
# list_x, list_y = pdf(data, linear_bins=True)
# ax.plot(list_x, list_y, 'b | random_line_split |
|
dg.py | (csvfile)
spamwriter.writerow(first_row)
for index in xrange(len(Nlist)):
# print poi.get(str(Nlist[index]))
spamwriter.writerow(poi.get(str(Nlist[index]), None))
def plot_whole_network(DG):
# pos = random_layout(DG)
# pos = shell_layout(DG)
pos = spring_layout(DG)
# pos = spectral_layout(DG)
# plt.title('Plot of Network')
draw(DG, pos)
plt.show()
def pdf(data, xmin=None, xmax=None, linear_bins=False, **kwargs):
if not xmax:
xmax = max(data)
if not xmin:
xmin = min(data)
if linear_bins:
bins = range(int(xmin), int(xmax))
else:
log_min_size = np.log10(xmin)
log_max_size = np.log10(xmax)
number_of_bins = np.ceil((log_max_size-log_min_size)*10)
bins = np.unique(
np.floor(
np.logspace(
log_min_size, log_max_size, num=number_of_bins)))
hist, edges = np.histogram(data, bins, density=True)
bin_centers = (edges[1:]+edges[:-1])/2.0
new_x, new_y = [], []
for index in xrange(len(hist)):
if hist[index] != 0:
new_x.append(bin_centers[index])
new_y.append(hist[index])
return new_x, new_y
def pearson(x, y):
# calculate the pearson correlation of two list
n = len(x)
avg_x = float(sum(x))/n
avg_y = float(sum(y))/n
print 'The means of two lists:', avg_x, avg_y
diffprod = 0.0
xdiff2 = 0.0
ydiff2 = 0.0
for idx in range(n):
xdiff = x[idx] - avg_x
ydiff = y[idx] - avg_y
diffprod += xdiff*ydiff
xdiff2 += xdiff*xdiff
ydiff2 += ydiff*ydiff
return diffprod/math.sqrt(xdiff2*ydiff2)
def drop_zeros(list_a):
# discard the zeros in a list
|
def rmse(predict, truth):
# calculate RMSE of a prediction
RMSE = mean_squared_error(truth, predict)**0.5
return RMSE
def mean_bin(list_x, list_y, linear_bins=False):
# the returned values are raw values, not logarithmic values
size = len(list_x)
xmin = min(list_x)
xmax = max(list_x)
if linear_bins:
bins = range(int(xmin), int(xmax+1))
else:
log_min_size = np.log10(xmin)
log_max_size = np.log10(xmax+1)
number_of_bins = np.ceil((log_max_size-log_min_size)*10)
bins = np.unique(
np.floor(
np.logspace(
log_min_size, log_max_size, num=number_of_bins)))
new_bin_meanx_x, new_bin_means_y = [], []
hist_x = np.histogram(list_x, bins)[0]
hist_x_w = np.histogram(list_x, bins, weights=list_x)[0].astype(float)
for index in xrange(len(bins)-1):
if hist_x[index] != 0:
new_bin_meanx_x.append(hist_x_w[index]/hist_x[index])
range_min, range_max = bins[index], bins[index+1]
sum_y = 0.0
for i in xrange(size):
key = list_x[i]
if (key >= range_min) and (key < range_max):
sum_y += list_y[i]
new_bin_means_y.append(sum_y/hist_x[index])
return new_bin_meanx_x, new_bin_means_y
def cut_lists(list_x, list_y, fit_start=-1, fit_end=-1):
if fit_start != -1:
new_x, new_y = [], []
for index in xrange(len(list_x)):
if list_x[index] >= fit_start:
new_x.append(list_x[index])
new_y.append(list_y[index])
list_x, list_y = new_x, new_y
if fit_end != -1:
new_x, new_y = [], []
for index in xrange(len(list_x)):
if list_x[index] < fit_end:
new_x.append(list_x[index])
new_y.append(list_y[index])
list_x, list_y = new_x, new_y
return (list_x, list_y)
def lr_ls(list_x, list_y, fit_start=-1, fit_end=-1):
list_x, list_y = cut_lists(list_x, list_y, fit_start, fit_end)
X = np.asarray(list_x, dtype=float)
Y = np.asarray(list_y, dtype=float)
logX = np.log10(X)
logY = np.log10(Y)
coefficients = np.polyfit(logX, logY, 1)
polynomial = np.poly1d(coefficients)
print 'Polynomial: (', fit_start, fit_end, ')', polynomial
logY_fit = polynomial(logX)
print 'Fitting RMSE(log)', rmse(logY, logY_fit)
print 'Fitting RMSE(raw)', rmse(Y, np.power(10, logY_fit))
# print Y
return (list_x, np.power(10, logY_fit))
# return logX, logY_fit
def lr_ml(list_x, list_y, fit_start=-1, fit_end=-1):
# TODO
list_x, list_y = cut_lists(list_x, list_y, fit_start, fit_end)
X = np.asarray(list_x, dtype=float)
Y = np.asarray(list_y, dtype=float)
logX = np.log10(X)
logY = np.log10(Y)
def lr_ks(list_x, list_y, fit_start=-1, fit_end=-1):
# TODO
list_x, list_y = cut_lists(list_x, list_y, fit_start, fit_end)
X = np.asarray(list_x, dtype=float)
Y = np.asarray(list_y, dtype=float)
logX = np.log10(X)
logY = np.log10(Y)
def neibors_static(DG, node, neib='pre', direct='in', weight=False):
if neib == 'suc':
neibors = DG.successors(node)
else:
neibors = DG.predecessors(node)
if direct == 'out':
if weight:
values = [DG.out_degree(n, weight='weight') for n in neibors]
else:
values = [DG.out_degree(n) for n in neibors]
else:
if weight:
values = [DG.in_degree(n, weight='weight') for n in neibors]
else:
values = [DG.in_degree(n) for n in neibors]
if len(neibors):
return float(sum(values))/len(neibors)
else:
return 0.0
def dependence(listx, listy, l, xlabel, ylabel, start=1, end=1000):
plt.clf()
plt.scatter(listx, listy, s=20, c='#fee8c8', marker='+', label='raw '+l)
ax = plt.gca()
xmeans, ymeans = mean_bin(listx, listy)
ax.scatter(xmeans, ymeans, s=50, c='#fdbb84', marker='o', label='binned '+l)
xfit, yfit = lr_ls(xmeans, ymeans, start, end)
ax.plot(xfit, yfit, c='#e34a33', linewidth=2, linestyle='--', label='Fitted '+l)
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
ax.set_xlim(xmin=1)
ax.set_ylim(ymin=1)
handles, labels = ax.get_legend_handles_labels()
leg = ax.legend(handles, labels, loc=4)
leg.draw_frame(True)
plt.show()
'''Plot PDF'''
def pdf_plot(data, name, fit_start, fit_end):
# plt.gcf()
# data = outstrength
list_x, list_y = pdf(data, linear_bins=True)
plt.plot(list_x, list_y, 'r+', label='Raw '+name)
ax = plt.gca()
list_x, list_y = pdf(data)
ax.plot(list_x, list_y, 'ro', label='Binned '+name)
# list_fit_x, list_fit_y = lr_ls(list_x, list_y, 1, 100)
# ax.plot(list_fit_x, list_fit_y, 'b--', label='Fitted outstrength')
list_fit_x, list_fit_y = lr_ls(list_x, list_y, fit_start, fit_end)
ax.plot(list_fit_x, list_fit_y, 'b--', label='Fitted '+name)
# data = outstrength
# list_x, list_y = pdf(data, linear_bins=True)
# ax.plot(list_x, list_y, ' | return [i for i in list_a if i>0] | identifier_body |
dg.py | (csvfile)
spamwriter.writerow(first_row)
for index in xrange(len(Nlist)):
# print poi.get(str(Nlist[index]))
spamwriter.writerow(poi.get(str(Nlist[index]), None))
def plot_whole_network(DG):
# pos = random_layout(DG)
# pos = shell_layout(DG)
pos = spring_layout(DG)
# pos = spectral_layout(DG)
# plt.title('Plot of Network')
draw(DG, pos)
plt.show()
def pdf(data, xmin=None, xmax=None, linear_bins=False, **kwargs):
if not xmax:
xmax = max(data)
if not xmin:
xmin = min(data)
if linear_bins:
bins = range(int(xmin), int(xmax))
else:
log_min_size = np.log10(xmin)
log_max_size = np.log10(xmax)
number_of_bins = np.ceil((log_max_size-log_min_size)*10)
bins = np.unique(
np.floor(
np.logspace(
log_min_size, log_max_size, num=number_of_bins)))
hist, edges = np.histogram(data, bins, density=True)
bin_centers = (edges[1:]+edges[:-1])/2.0
new_x, new_y = [], []
for index in xrange(len(hist)):
if hist[index] != 0:
new_x.append(bin_centers[index])
new_y.append(hist[index])
return new_x, new_y
def pearson(x, y):
# calculate the pearson correlation of two list
n = len(x)
avg_x = float(sum(x))/n
avg_y = float(sum(y))/n
print 'The means of two lists:', avg_x, avg_y
diffprod = 0.0
xdiff2 = 0.0
ydiff2 = 0.0
for idx in range(n):
xdiff = x[idx] - avg_x
ydiff = y[idx] - avg_y
diffprod += xdiff*ydiff
xdiff2 += xdiff*xdiff
ydiff2 += ydiff*ydiff
return diffprod/math.sqrt(xdiff2*ydiff2)
def drop_zeros(list_a):
# discard the zeros in a list
return [i for i in list_a if i>0]
def rmse(predict, truth):
# calculate RMSE of a prediction
RMSE = mean_squared_error(truth, predict)**0.5
return RMSE
def mean_bin(list_x, list_y, linear_bins=False):
# the returned values are raw values, not logarithmic values
size = len(list_x)
xmin = min(list_x)
xmax = max(list_x)
if linear_bins:
bins = range(int(xmin), int(xmax+1))
else:
log_min_size = np.log10(xmin)
log_max_size = np.log10(xmax+1)
number_of_bins = np.ceil((log_max_size-log_min_size)*10)
bins = np.unique(
np.floor(
np.logspace(
log_min_size, log_max_size, num=number_of_bins)))
new_bin_meanx_x, new_bin_means_y = [], []
hist_x = np.histogram(list_x, bins)[0]
hist_x_w = np.histogram(list_x, bins, weights=list_x)[0].astype(float)
for index in xrange(len(bins)-1):
if hist_x[index] != 0:
new_bin_meanx_x.append(hist_x_w[index]/hist_x[index])
range_min, range_max = bins[index], bins[index+1]
sum_y = 0.0
for i in xrange(size):
|
new_bin_means_y.append(sum_y/hist_x[index])
return new_bin_meanx_x, new_bin_means_y
def cut_lists(list_x, list_y, fit_start=-1, fit_end=-1):
if fit_start != -1:
new_x, new_y = [], []
for index in xrange(len(list_x)):
if list_x[index] >= fit_start:
new_x.append(list_x[index])
new_y.append(list_y[index])
list_x, list_y = new_x, new_y
if fit_end != -1:
new_x, new_y = [], []
for index in xrange(len(list_x)):
if list_x[index] < fit_end:
new_x.append(list_x[index])
new_y.append(list_y[index])
list_x, list_y = new_x, new_y
return (list_x, list_y)
def lr_ls(list_x, list_y, fit_start=-1, fit_end=-1):
list_x, list_y = cut_lists(list_x, list_y, fit_start, fit_end)
X = np.asarray(list_x, dtype=float)
Y = np.asarray(list_y, dtype=float)
logX = np.log10(X)
logY = np.log10(Y)
coefficients = np.polyfit(logX, logY, 1)
polynomial = np.poly1d(coefficients)
print 'Polynomial: (', fit_start, fit_end, ')', polynomial
logY_fit = polynomial(logX)
print 'Fitting RMSE(log)', rmse(logY, logY_fit)
print 'Fitting RMSE(raw)', rmse(Y, np.power(10, logY_fit))
# print Y
return (list_x, np.power(10, logY_fit))
# return logX, logY_fit
def lr_ml(list_x, list_y, fit_start=-1, fit_end=-1):
# TODO
list_x, list_y = cut_lists(list_x, list_y, fit_start, fit_end)
X = np.asarray(list_x, dtype=float)
Y = np.asarray(list_y, dtype=float)
logX = np.log10(X)
logY = np.log10(Y)
def lr_ks(list_x, list_y, fit_start=-1, fit_end=-1):
# TODO
list_x, list_y = cut_lists(list_x, list_y, fit_start, fit_end)
X = np.asarray(list_x, dtype=float)
Y = np.asarray(list_y, dtype=float)
logX = np.log10(X)
logY = np.log10(Y)
def neibors_static(DG, node, neib='pre', direct='in', weight=False):
if neib == 'suc':
neibors = DG.successors(node)
else:
neibors = DG.predecessors(node)
if direct == 'out':
if weight:
values = [DG.out_degree(n, weight='weight') for n in neibors]
else:
values = [DG.out_degree(n) for n in neibors]
else:
if weight:
values = [DG.in_degree(n, weight='weight') for n in neibors]
else:
values = [DG.in_degree(n) for n in neibors]
if len(neibors):
return float(sum(values))/len(neibors)
else:
return 0.0
def dependence(listx, listy, l, xlabel, ylabel, start=1, end=1000):
plt.clf()
plt.scatter(listx, listy, s=20, c='#fee8c8', marker='+', label='raw '+l)
ax = plt.gca()
xmeans, ymeans = mean_bin(listx, listy)
ax.scatter(xmeans, ymeans, s=50, c='#fdbb84', marker='o', label='binned '+l)
xfit, yfit = lr_ls(xmeans, ymeans, start, end)
ax.plot(xfit, yfit, c='#e34a33', linewidth=2, linestyle='--', label='Fitted '+l)
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
ax.set_xlim(xmin=1)
ax.set_ylim(ymin=1)
handles, labels = ax.get_legend_handles_labels()
leg = ax.legend(handles, labels, loc=4)
leg.draw_frame(True)
plt.show()
'''Plot PDF'''
def pdf_plot(data, name, fit_start, fit_end):
# plt.gcf()
# data = outstrength
list_x, list_y = pdf(data, linear_bins=True)
plt.plot(list_x, list_y, 'r+', label='Raw '+name)
ax = plt.gca()
list_x, list_y = pdf(data)
ax.plot(list_x, list_y, 'ro', label='Binned '+name)
# list_fit_x, list_fit_y = lr_ls(list_x, list_y, 1, 100)
# ax.plot(list_fit_x, list_fit_y, 'b--', label='Fitted outstrength')
list_fit_x, list_fit_y = lr_ls(list_x, list_y, fit_start, fit_end)
ax.plot(list_fit_x, list_fit_y, 'b--', label='Fitted '+name)
# data = outstrength
# list_x, list_y = pdf(data, linear_bins=True)
# ax.plot(list_x, list_y, ' | key = list_x[i]
if (key >= range_min) and (key < range_max):
sum_y += list_y[i] | conditional_block |
dg.py | (csvfile)
spamwriter.writerow(first_row)
for index in xrange(len(Nlist)):
# print poi.get(str(Nlist[index]))
spamwriter.writerow(poi.get(str(Nlist[index]), None))
def plot_whole_network(DG):
# pos = random_layout(DG)
# pos = shell_layout(DG)
pos = spring_layout(DG)
# pos = spectral_layout(DG)
# plt.title('Plot of Network')
draw(DG, pos)
plt.show()
def pdf(data, xmin=None, xmax=None, linear_bins=False, **kwargs):
if not xmax:
xmax = max(data)
if not xmin:
xmin = min(data)
if linear_bins:
bins = range(int(xmin), int(xmax))
else:
log_min_size = np.log10(xmin)
log_max_size = np.log10(xmax)
number_of_bins = np.ceil((log_max_size-log_min_size)*10)
bins = np.unique(
np.floor(
np.logspace(
log_min_size, log_max_size, num=number_of_bins)))
hist, edges = np.histogram(data, bins, density=True)
bin_centers = (edges[1:]+edges[:-1])/2.0
new_x, new_y = [], []
for index in xrange(len(hist)):
if hist[index] != 0:
new_x.append(bin_centers[index])
new_y.append(hist[index])
return new_x, new_y
def pearson(x, y):
# calculate the pearson correlation of two list
n = len(x)
avg_x = float(sum(x))/n
avg_y = float(sum(y))/n
print 'The means of two lists:', avg_x, avg_y
diffprod = 0.0
xdiff2 = 0.0
ydiff2 = 0.0
for idx in range(n):
xdiff = x[idx] - avg_x
ydiff = y[idx] - avg_y
diffprod += xdiff*ydiff
xdiff2 += xdiff*xdiff
ydiff2 += ydiff*ydiff
return diffprod/math.sqrt(xdiff2*ydiff2)
def drop_zeros(list_a):
# discard the zeros in a list
return [i for i in list_a if i>0]
def rmse(predict, truth):
# calculate RMSE of a prediction
RMSE = mean_squared_error(truth, predict)**0.5
return RMSE
def | (list_x, list_y, linear_bins=False):
# the returned values are raw values, not logarithmic values
size = len(list_x)
xmin = min(list_x)
xmax = max(list_x)
if linear_bins:
bins = range(int(xmin), int(xmax+1))
else:
log_min_size = np.log10(xmin)
log_max_size = np.log10(xmax+1)
number_of_bins = np.ceil((log_max_size-log_min_size)*10)
bins = np.unique(
np.floor(
np.logspace(
log_min_size, log_max_size, num=number_of_bins)))
new_bin_meanx_x, new_bin_means_y = [], []
hist_x = np.histogram(list_x, bins)[0]
hist_x_w = np.histogram(list_x, bins, weights=list_x)[0].astype(float)
for index in xrange(len(bins)-1):
if hist_x[index] != 0:
new_bin_meanx_x.append(hist_x_w[index]/hist_x[index])
range_min, range_max = bins[index], bins[index+1]
sum_y = 0.0
for i in xrange(size):
key = list_x[i]
if (key >= range_min) and (key < range_max):
sum_y += list_y[i]
new_bin_means_y.append(sum_y/hist_x[index])
return new_bin_meanx_x, new_bin_means_y
def cut_lists(list_x, list_y, fit_start=-1, fit_end=-1):
if fit_start != -1:
new_x, new_y = [], []
for index in xrange(len(list_x)):
if list_x[index] >= fit_start:
new_x.append(list_x[index])
new_y.append(list_y[index])
list_x, list_y = new_x, new_y
if fit_end != -1:
new_x, new_y = [], []
for index in xrange(len(list_x)):
if list_x[index] < fit_end:
new_x.append(list_x[index])
new_y.append(list_y[index])
list_x, list_y = new_x, new_y
return (list_x, list_y)
def lr_ls(list_x, list_y, fit_start=-1, fit_end=-1):
list_x, list_y = cut_lists(list_x, list_y, fit_start, fit_end)
X = np.asarray(list_x, dtype=float)
Y = np.asarray(list_y, dtype=float)
logX = np.log10(X)
logY = np.log10(Y)
coefficients = np.polyfit(logX, logY, 1)
polynomial = np.poly1d(coefficients)
print 'Polynomial: (', fit_start, fit_end, ')', polynomial
logY_fit = polynomial(logX)
print 'Fitting RMSE(log)', rmse(logY, logY_fit)
print 'Fitting RMSE(raw)', rmse(Y, np.power(10, logY_fit))
# print Y
return (list_x, np.power(10, logY_fit))
# return logX, logY_fit
def lr_ml(list_x, list_y, fit_start=-1, fit_end=-1):
# TODO
list_x, list_y = cut_lists(list_x, list_y, fit_start, fit_end)
X = np.asarray(list_x, dtype=float)
Y = np.asarray(list_y, dtype=float)
logX = np.log10(X)
logY = np.log10(Y)
def lr_ks(list_x, list_y, fit_start=-1, fit_end=-1):
# TODO
list_x, list_y = cut_lists(list_x, list_y, fit_start, fit_end)
X = np.asarray(list_x, dtype=float)
Y = np.asarray(list_y, dtype=float)
logX = np.log10(X)
logY = np.log10(Y)
def neibors_static(DG, node, neib='pre', direct='in', weight=False):
if neib == 'suc':
neibors = DG.successors(node)
else:
neibors = DG.predecessors(node)
if direct == 'out':
if weight:
values = [DG.out_degree(n, weight='weight') for n in neibors]
else:
values = [DG.out_degree(n) for n in neibors]
else:
if weight:
values = [DG.in_degree(n, weight='weight') for n in neibors]
else:
values = [DG.in_degree(n) for n in neibors]
if len(neibors):
return float(sum(values))/len(neibors)
else:
return 0.0
def dependence(listx, listy, l, xlabel, ylabel, start=1, end=1000):
plt.clf()
plt.scatter(listx, listy, s=20, c='#fee8c8', marker='+', label='raw '+l)
ax = plt.gca()
xmeans, ymeans = mean_bin(listx, listy)
ax.scatter(xmeans, ymeans, s=50, c='#fdbb84', marker='o', label='binned '+l)
xfit, yfit = lr_ls(xmeans, ymeans, start, end)
ax.plot(xfit, yfit, c='#e34a33', linewidth=2, linestyle='--', label='Fitted '+l)
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
ax.set_xlim(xmin=1)
ax.set_ylim(ymin=1)
handles, labels = ax.get_legend_handles_labels()
leg = ax.legend(handles, labels, loc=4)
leg.draw_frame(True)
plt.show()
'''Plot PDF'''
def pdf_plot(data, name, fit_start, fit_end):
# plt.gcf()
# data = outstrength
list_x, list_y = pdf(data, linear_bins=True)
plt.plot(list_x, list_y, 'r+', label='Raw '+name)
ax = plt.gca()
list_x, list_y = pdf(data)
ax.plot(list_x, list_y, 'ro', label='Binned '+name)
# list_fit_x, list_fit_y = lr_ls(list_x, list_y, 1, 100)
# ax.plot(list_fit_x, list_fit_y, 'b--', label='Fitted outstrength')
list_fit_x, list_fit_y = lr_ls(list_x, list_y, fit_start, fit_end)
ax.plot(list_fit_x, list_fit_y, 'b--', label='Fitted '+name)
# data = outstrength
# list_x, list_y = pdf(data, linear_bins=True)
# ax.plot(list_x, list_y, ' | mean_bin | identifier_name |
lib.rs | H256::len_bytes()).rev() {
let byte_index = H256::len_bytes() - i - 1;
let d: u8 = a[byte_index] ^ b[byte_index];
if d != 0 {
let high_bit_index = 7 - d.leading_zeros() as usize;
return Some(i * 8 + high_bit_index);
}
}
None // a and b are equal, so log distance is -inf
}
#[derive(Clone, Copy, Debug)]
pub enum PeerState {
New,
Ready,
}
#[derive(Derivative)]
#[derivative(Debug(bound = ""))]
pub struct NodeEntry<K: EnrKey> {
pub record: Enr<K>,
pub peer_state: PeerState,
pub liveness: Option<Instant>,
}
#[derive(Derivative)]
#[derivative(Default(bound = ""))]
struct Bucket<K: EnrKey> {
nodes: Box<FnvIndexMap<RawNodeId, NodeEntry<K>, U16>>,
recently_seen: Option<Enr<K>>,
}
pub struct NodeTable<K: EnrKey> {
host_id: H256,
buckets: Box<[Bucket<K>; 256]>,
all_nodes: Box<heapless::FnvIndexSet<RawNodeId, U4096>>,
}
impl<K: EnrKey> NodeTable<K> {
#[must_use]
pub fn new(host_id: H256) -> Self {
Self {
host_id,
buckets: Box::new(arr![Default::default(); 256]),
all_nodes: Default::default(),
}
}
#[must_use]
pub fn len(&self) -> usize {
self.all_nodes.len()
}
#[must_use]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
fn bucket_idx(&self, node_id: H256) -> Option<usize> {
logdistance(self.host_id, node_id)
}
fn bucket(&mut self, node_id: H256) -> Option<&mut Bucket<K>> {
Some(&mut self.buckets[self.bucket_idx(node_id)?])
}
pub fn node_mut(&mut self, node_id: H256) -> Option<&mut NodeEntry<K>> {
let bucket = self.bucket(node_id)?;
bucket.nodes.get_mut(&node_id.0)
}
pub fn add_node(&mut self, record: Enr<K>, peer_state: PeerState) {
let node_id = H256(record.node_id().raw());
// If we don't have such node already...
if !self.all_nodes.contains(&node_id.0) {
// Check that we're not adding self
if let Some(bucket_idx) = self.bucket_idx(node_id) {
let bucket = &mut self.buckets[bucket_idx];
// If there's space, add it...
if bucket.nodes.len() < bucket.nodes.capacity() {
let node_id = node_id.0;
let _ = self.all_nodes.insert(node_id);
let _ = bucket.nodes.insert(
node_id,
NodeEntry {
record,
peer_state,
liveness: None,
},
);
} else {
// ...or if at capacity, update replacement cache instead
bucket.recently_seen = Some(record);
}
}
}
}
pub fn evict_node(&mut self, node_id: H256) {
if let Some(bucket_idx) = self.bucket_idx(node_id) {
let bucket = &mut self.buckets[bucket_idx];
// If this node actually exists, remove it.
if bucket.nodes.remove(&node_id.0).is_some() {
self.all_nodes.remove(&node_id.0);
// And if there is a replacement, move it into the table
if let Some(record) = bucket.recently_seen.take() {
let node_id = record.node_id().raw();
let _ = self.all_nodes.insert(node_id);
let _ = bucket.nodes.insert(
node_id,
NodeEntry {
record,
peer_state: PeerState::New,
liveness: None,
},
);
}
}
}
}
pub fn update_liveness(&mut self, node_id: H256, timestamp: Instant) {
if let Some(node) = self.node_mut(node_id) {
node.liveness = Some(timestamp);
}
}
pub fn random_node(&mut self) -> Option<&mut NodeEntry<K>> {
let node_id = *self
.all_nodes
.iter()
.nth(rand::random::<usize>() % self.all_nodes.len())?;
Some(
self.node_mut(H256(node_id))
.expect("this node always exists at this point; qed"),
)
}
pub fn bucket_nodes(&mut self, logdistance: u8) -> BucketNodes<'_, K> {
BucketNodes(NodeEntries {
node_table: self,
current_bucket: logdistance as usize,
max_bucket: logdistance as usize,
current_bucket_remaining: None,
})
}
pub fn closest(&mut self) -> Closest<'_, K> {
Closest(NodeEntries {
node_table: self,
current_bucket: 0,
max_bucket: 255,
current_bucket_remaining: None,
})
}
}
pub struct BucketNodes<'a, K: EnrKey>(NodeEntries<'a, K>);
impl<'a, K: EnrKey> Iterator for BucketNodes<'a, K> {
type Item = &'a mut NodeEntry<K>;
fn next(&mut self) -> Option<Self::Item> {
self.0.next()
}
}
pub struct Closest<'a, K: EnrKey>(NodeEntries<'a, K>);
impl<'a, K: EnrKey> Iterator for Closest<'a, K> {
type Item = &'a mut NodeEntry<K>;
fn next(&mut self) -> Option<Self::Item> {
self.0.next()
}
}
struct NodeEntries<'a, K: EnrKey> {
node_table: &'a mut NodeTable<K>,
current_bucket: usize,
max_bucket: usize,
current_bucket_remaining: Option<Vec<NonNull<NodeEntry<K>>>>,
}
impl<'a, K: EnrKey> Iterator for NodeEntries<'a, K> {
type Item = &'a mut NodeEntry<K>;
fn next(&mut self) -> Option<Self::Item> {
loop {
let NodeEntries {
node_table,
current_bucket,
max_bucket,
current_bucket_remaining,
} = self;
trace!("Current bucket is {}", *current_bucket);
let host_id = node_table.host_id;
if let Some(ptr) = current_bucket_remaining
.get_or_insert_with(|| {
let mut nodes = node_table.buckets[*current_bucket]
.nodes
.values_mut()
.collect::<Vec<_>>();
trace!("Nodes before sorting: {:?}", nodes);
nodes.sort_by(|a, b| {
distance(host_id, H256(b.record.node_id().raw()))
.cmp(&distance(host_id, H256(a.record.node_id().raw())))
});
trace!("Nodes after sorting: {:?}", nodes);
nodes.into_iter().map(From::from).collect()
})
.pop()
|
if *current_bucket == *max_bucket {
return None;
}
*current_bucket += 1;
*current_bucket_remaining = None;
}
}
}
pub enum DiscoveryRequest {
Ping,
}
pub enum DiscoveryResponse {
Pong,
}
pub enum DiscoveryPacket {
WhoAreYou,
FindNode,
Ping,
Pong,
}
pub enum TableUpdate {
Added { node_id: H256, addr: SocketAddr },
Removed { node_id: H256 },
}
#[allow(dead_code)]
pub struct Discovery<K: EnrKey> {
node_table: Arc<Mutex<NodeTable<K>>>,
concurrency: usize,
}
impl<K: EnrKey + Send + 'static> Discovery<K> {
pub async fn new<
F: Fn(TableUpdate) -> Fut + Send + Sync + 'static,
Fut: Future<Output = ()> + Send,
>(
addr: String,
host_id: H256,
on_table_update: F,
) -> Self {
let socket = UdpSocket::bind(addr).await.unwrap();
let on_table_update = Arc::new(on_table_update);
// Create a node table
let node_table = Arc::new(Mutex::new(NodeTable::new(host_id)));
let (tx, mut rx) = futures::StreamExt::split(UdpFramed::new(socket, BytesCodec::new()));
// Ougoing router
let (outgoing_sender, mut rx) = tokio::sync::mpsc::channel::<(H256, DiscoveryPacket)>(1);
// tokio::spawn(async move {
// while let Some((node_id, request)) = rx.next().await {
// if let Some(node) = node_table.lock().unwrap().node_mut(node_id) {
// if let Some(ip) = node.record.ip() {}
// }
// | {
// Safety: we have exclusive access to underlying node table
return Some(unsafe { &mut *ptr.as_ptr() });
} | conditional_block |
lib.rs | H256::len_bytes()).rev() {
let byte_index = H256::len_bytes() - i - 1;
let d: u8 = a[byte_index] ^ b[byte_index];
if d != 0 {
let high_bit_index = 7 - d.leading_zeros() as usize;
return Some(i * 8 + high_bit_index);
}
}
None // a and b are equal, so log distance is -inf
}
#[derive(Clone, Copy, Debug)]
pub enum PeerState {
New,
Ready,
}
#[derive(Derivative)]
#[derivative(Debug(bound = ""))]
pub struct NodeEntry<K: EnrKey> {
pub record: Enr<K>,
pub peer_state: PeerState,
pub liveness: Option<Instant>,
}
#[derive(Derivative)]
#[derivative(Default(bound = ""))]
struct Bucket<K: EnrKey> {
nodes: Box<FnvIndexMap<RawNodeId, NodeEntry<K>, U16>>,
recently_seen: Option<Enr<K>>,
}
pub struct NodeTable<K: EnrKey> {
host_id: H256,
buckets: Box<[Bucket<K>; 256]>,
all_nodes: Box<heapless::FnvIndexSet<RawNodeId, U4096>>,
}
impl<K: EnrKey> NodeTable<K> {
#[must_use]
pub fn new(host_id: H256) -> Self {
Self {
host_id,
buckets: Box::new(arr![Default::default(); 256]),
all_nodes: Default::default(),
}
}
#[must_use]
pub fn len(&self) -> usize {
self.all_nodes.len()
}
#[must_use]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
fn bucket_idx(&self, node_id: H256) -> Option<usize> {
logdistance(self.host_id, node_id)
}
fn bucket(&mut self, node_id: H256) -> Option<&mut Bucket<K>> {
Some(&mut self.buckets[self.bucket_idx(node_id)?])
}
pub fn node_mut(&mut self, node_id: H256) -> Option<&mut NodeEntry<K>> {
let bucket = self.bucket(node_id)?;
bucket.nodes.get_mut(&node_id.0)
}
pub fn add_node(&mut self, record: Enr<K>, peer_state: PeerState) {
let node_id = H256(record.node_id().raw());
// If we don't have such node already...
if !self.all_nodes.contains(&node_id.0) {
// Check that we're not adding self
if let Some(bucket_idx) = self.bucket_idx(node_id) {
let bucket = &mut self.buckets[bucket_idx];
// If there's space, add it...
if bucket.nodes.len() < bucket.nodes.capacity() {
let node_id = node_id.0;
let _ = self.all_nodes.insert(node_id);
let _ = bucket.nodes.insert(
node_id,
NodeEntry {
record,
peer_state,
liveness: None,
},
);
} else {
// ...or if at capacity, update replacement cache instead
bucket.recently_seen = Some(record);
}
}
}
}
pub fn evict_node(&mut self, node_id: H256) {
if let Some(bucket_idx) = self.bucket_idx(node_id) {
let bucket = &mut self.buckets[bucket_idx];
// If this node actually exists, remove it.
if bucket.nodes.remove(&node_id.0).is_some() {
self.all_nodes.remove(&node_id.0);
// And if there is a replacement, move it into the table
if let Some(record) = bucket.recently_seen.take() {
let node_id = record.node_id().raw();
let _ = self.all_nodes.insert(node_id);
let _ = bucket.nodes.insert(
node_id,
NodeEntry {
record,
peer_state: PeerState::New,
liveness: None,
},
);
}
}
}
}
pub fn update_liveness(&mut self, node_id: H256, timestamp: Instant) {
if let Some(node) = self.node_mut(node_id) {
node.liveness = Some(timestamp);
}
}
pub fn random_node(&mut self) -> Option<&mut NodeEntry<K>> {
let node_id = *self
.all_nodes
.iter()
.nth(rand::random::<usize>() % self.all_nodes.len())?;
Some(
self.node_mut(H256(node_id))
.expect("this node always exists at this point; qed"),
)
}
pub fn bucket_nodes(&mut self, logdistance: u8) -> BucketNodes<'_, K> {
BucketNodes(NodeEntries {
node_table: self,
current_bucket: logdistance as usize,
max_bucket: logdistance as usize,
current_bucket_remaining: None,
})
}
pub fn closest(&mut self) -> Closest<'_, K> |
}
pub struct BucketNodes<'a, K: EnrKey>(NodeEntries<'a, K>);
impl<'a, K: EnrKey> Iterator for BucketNodes<'a, K> {
type Item = &'a mut NodeEntry<K>;
fn next(&mut self) -> Option<Self::Item> {
self.0.next()
}
}
pub struct Closest<'a, K: EnrKey>(NodeEntries<'a, K>);
impl<'a, K: EnrKey> Iterator for Closest<'a, K> {
type Item = &'a mut NodeEntry<K>;
fn next(&mut self) -> Option<Self::Item> {
self.0.next()
}
}
struct NodeEntries<'a, K: EnrKey> {
node_table: &'a mut NodeTable<K>,
current_bucket: usize,
max_bucket: usize,
current_bucket_remaining: Option<Vec<NonNull<NodeEntry<K>>>>,
}
impl<'a, K: EnrKey> Iterator for NodeEntries<'a, K> {
type Item = &'a mut NodeEntry<K>;
fn next(&mut self) -> Option<Self::Item> {
loop {
let NodeEntries {
node_table,
current_bucket,
max_bucket,
current_bucket_remaining,
} = self;
trace!("Current bucket is {}", *current_bucket);
let host_id = node_table.host_id;
if let Some(ptr) = current_bucket_remaining
.get_or_insert_with(|| {
let mut nodes = node_table.buckets[*current_bucket]
.nodes
.values_mut()
.collect::<Vec<_>>();
trace!("Nodes before sorting: {:?}", nodes);
nodes.sort_by(|a, b| {
distance(host_id, H256(b.record.node_id().raw()))
.cmp(&distance(host_id, H256(a.record.node_id().raw())))
});
trace!("Nodes after sorting: {:?}", nodes);
nodes.into_iter().map(From::from).collect()
})
.pop()
{
// Safety: we have exclusive access to underlying node table
return Some(unsafe { &mut *ptr.as_ptr() });
}
if *current_bucket == *max_bucket {
return None;
}
*current_bucket += 1;
*current_bucket_remaining = None;
}
}
}
pub enum DiscoveryRequest {
Ping,
}
pub enum DiscoveryResponse {
Pong,
}
pub enum DiscoveryPacket {
WhoAreYou,
FindNode,
Ping,
Pong,
}
pub enum TableUpdate {
Added { node_id: H256, addr: SocketAddr },
Removed { node_id: H256 },
}
#[allow(dead_code)]
pub struct Discovery<K: EnrKey> {
node_table: Arc<Mutex<NodeTable<K>>>,
concurrency: usize,
}
impl<K: EnrKey + Send + 'static> Discovery<K> {
pub async fn new<
F: Fn(TableUpdate) -> Fut + Send + Sync + 'static,
Fut: Future<Output = ()> + Send,
>(
addr: String,
host_id: H256,
on_table_update: F,
) -> Self {
let socket = UdpSocket::bind(addr).await.unwrap();
let on_table_update = Arc::new(on_table_update);
// Create a node table
let node_table = Arc::new(Mutex::new(NodeTable::new(host_id)));
let (tx, mut rx) = futures::StreamExt::split(UdpFramed::new(socket, BytesCodec::new()));
// Ougoing router
let (outgoing_sender, mut rx) = tokio::sync::mpsc::channel::<(H256, DiscoveryPacket)>(1);
// tokio::spawn(async move {
// while let Some((node_id, request)) = rx.next().await {
// if let Some(node) = node_table.lock().unwrap().node_mut(node_id) {
// if let Some(ip) = node.record.ip() {}
// }
| {
Closest(NodeEntries {
node_table: self,
current_bucket: 0,
max_bucket: 255,
current_bucket_remaining: None,
})
} | identifier_body |
lib.rs | self.bucket(node_id)?;
bucket.nodes.get_mut(&node_id.0)
}
pub fn add_node(&mut self, record: Enr<K>, peer_state: PeerState) {
let node_id = H256(record.node_id().raw());
// If we don't have such node already...
if !self.all_nodes.contains(&node_id.0) {
// Check that we're not adding self
if let Some(bucket_idx) = self.bucket_idx(node_id) {
let bucket = &mut self.buckets[bucket_idx];
// If there's space, add it...
if bucket.nodes.len() < bucket.nodes.capacity() {
let node_id = node_id.0;
let _ = self.all_nodes.insert(node_id);
let _ = bucket.nodes.insert(
node_id,
NodeEntry {
record,
peer_state,
liveness: None,
},
);
} else {
// ...or if at capacity, update replacement cache instead
bucket.recently_seen = Some(record);
}
}
}
}
pub fn evict_node(&mut self, node_id: H256) {
if let Some(bucket_idx) = self.bucket_idx(node_id) {
let bucket = &mut self.buckets[bucket_idx];
// If this node actually exists, remove it.
if bucket.nodes.remove(&node_id.0).is_some() {
self.all_nodes.remove(&node_id.0);
// And if there is a replacement, move it into the table
if let Some(record) = bucket.recently_seen.take() {
let node_id = record.node_id().raw();
let _ = self.all_nodes.insert(node_id);
let _ = bucket.nodes.insert(
node_id,
NodeEntry {
record,
peer_state: PeerState::New,
liveness: None,
},
);
}
}
}
}
pub fn update_liveness(&mut self, node_id: H256, timestamp: Instant) {
if let Some(node) = self.node_mut(node_id) {
node.liveness = Some(timestamp);
}
}
pub fn random_node(&mut self) -> Option<&mut NodeEntry<K>> {
let node_id = *self
.all_nodes
.iter()
.nth(rand::random::<usize>() % self.all_nodes.len())?;
Some(
self.node_mut(H256(node_id))
.expect("this node always exists at this point; qed"),
)
}
pub fn bucket_nodes(&mut self, logdistance: u8) -> BucketNodes<'_, K> {
BucketNodes(NodeEntries {
node_table: self,
current_bucket: logdistance as usize,
max_bucket: logdistance as usize,
current_bucket_remaining: None,
})
}
pub fn closest(&mut self) -> Closest<'_, K> {
Closest(NodeEntries {
node_table: self,
current_bucket: 0,
max_bucket: 255,
current_bucket_remaining: None,
})
}
}
pub struct BucketNodes<'a, K: EnrKey>(NodeEntries<'a, K>);
impl<'a, K: EnrKey> Iterator for BucketNodes<'a, K> {
type Item = &'a mut NodeEntry<K>;
fn next(&mut self) -> Option<Self::Item> {
self.0.next()
}
}
pub struct Closest<'a, K: EnrKey>(NodeEntries<'a, K>);
impl<'a, K: EnrKey> Iterator for Closest<'a, K> {
type Item = &'a mut NodeEntry<K>;
fn next(&mut self) -> Option<Self::Item> {
self.0.next()
}
}
struct NodeEntries<'a, K: EnrKey> {
node_table: &'a mut NodeTable<K>,
current_bucket: usize,
max_bucket: usize,
current_bucket_remaining: Option<Vec<NonNull<NodeEntry<K>>>>,
}
impl<'a, K: EnrKey> Iterator for NodeEntries<'a, K> {
type Item = &'a mut NodeEntry<K>;
fn next(&mut self) -> Option<Self::Item> {
loop {
let NodeEntries {
node_table,
current_bucket,
max_bucket,
current_bucket_remaining,
} = self;
trace!("Current bucket is {}", *current_bucket);
let host_id = node_table.host_id;
if let Some(ptr) = current_bucket_remaining
.get_or_insert_with(|| {
let mut nodes = node_table.buckets[*current_bucket]
.nodes
.values_mut()
.collect::<Vec<_>>();
trace!("Nodes before sorting: {:?}", nodes);
nodes.sort_by(|a, b| {
distance(host_id, H256(b.record.node_id().raw()))
.cmp(&distance(host_id, H256(a.record.node_id().raw())))
});
trace!("Nodes after sorting: {:?}", nodes);
nodes.into_iter().map(From::from).collect()
})
.pop()
{
// Safety: we have exclusive access to underlying node table
return Some(unsafe { &mut *ptr.as_ptr() });
}
if *current_bucket == *max_bucket {
return None;
}
*current_bucket += 1;
*current_bucket_remaining = None;
}
}
}
pub enum DiscoveryRequest {
Ping,
}
pub enum DiscoveryResponse {
Pong,
}
pub enum DiscoveryPacket {
WhoAreYou,
FindNode,
Ping,
Pong,
}
pub enum TableUpdate {
Added { node_id: H256, addr: SocketAddr },
Removed { node_id: H256 },
}
#[allow(dead_code)]
pub struct Discovery<K: EnrKey> {
node_table: Arc<Mutex<NodeTable<K>>>,
concurrency: usize,
}
impl<K: EnrKey + Send + 'static> Discovery<K> {
pub async fn new<
F: Fn(TableUpdate) -> Fut + Send + Sync + 'static,
Fut: Future<Output = ()> + Send,
>(
addr: String,
host_id: H256,
on_table_update: F,
) -> Self {
let socket = UdpSocket::bind(addr).await.unwrap();
let on_table_update = Arc::new(on_table_update);
// Create a node table
let node_table = Arc::new(Mutex::new(NodeTable::new(host_id)));
let (tx, mut rx) = futures::StreamExt::split(UdpFramed::new(socket, BytesCodec::new()));
// Ougoing router
let (outgoing_sender, mut rx) = tokio::sync::mpsc::channel::<(H256, DiscoveryPacket)>(1);
// tokio::spawn(async move {
// while let Some((node_id, request)) = rx.next().await {
// if let Some(node) = node_table.lock().unwrap().node_mut(node_id) {
// if let Some(ip) = node.record.ip() {}
// }
// let _ = io_tx.send((node_id, request)).await;
// }
// });
// Liveness check service
let unanswered_pings = Arc::new(Mutex::new(HashSet::<H256>::new()));
tokio::spawn({
let node_table = node_table.clone();
let unanswered_pings = unanswered_pings.clone();
async move {
const PING_TIMEOUT: u64 = 10;
const SCAN_IN: u64 = 30_000;
loop {
let d = {
let node_id = node_table
.lock()
.unwrap()
.random_node()
.map(|entry| H256(entry.record.node_id().raw()))
.filter(|node| !unanswered_pings.lock().unwrap().contains(node));
if let Some(node_id) = node_id {
let mut outgoing_sender = outgoing_sender.clone();
let on_table_update = on_table_update.clone();
let node_table = node_table.clone();
let unanswered_pings = unanswered_pings.clone();
tokio::spawn(async move {
let _ =
outgoing_sender.send((node_id, DiscoveryPacket::Ping)).await;
tokio::time::delay_for(std::time::Duration::from_secs(
PING_TIMEOUT,
))
.await;
if unanswered_pings.lock().unwrap().remove(&node_id) {
node_table.lock().unwrap().evict_node(node_id);
(on_table_update)(TableUpdate::Removed { node_id }).await;
}
});
}
tokio::time::delay_for(Duration::from_millis(
SCAN_IN / node_table.lock().unwrap().len() as u64,
))
};
d.await;
}
}
});
// Incoming router
// tokio::spawn(async move {
// while let Some((node_id, response)) = io_rx.next().await {
// match response {
// DiscoveryResponse::Pong => {
// unanswered_pings.lock().unwrap().remove(&node_id);
// }
// }
// }
// });
Self {
node_table,
concurrency: 3,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use secp256k1::SecretKey;
#[test]
fn | test_iterator | identifier_name |
|
lib.rs | ptr::NonNull,
sync::{Arc, Mutex},
time::{Duration, Instant},
};
use tokio::{
net::UdpSocket,
prelude::*,
stream::{StreamExt, *},
};
use tokio_util::{codec::*, udp::*};
pub mod proto;
pub mod topic;
pub type RawNodeId = [u8; 32];
#[must_use]
pub fn distance(a: H256, b: H256) -> U256 {
U256::from_big_endian(&a.bitxor(b).0)
}
#[must_use]
pub fn logdistance(a: H256, b: H256) -> Option<usize> {
for i in (0..H256::len_bytes()).rev() {
let byte_index = H256::len_bytes() - i - 1;
let d: u8 = a[byte_index] ^ b[byte_index];
if d != 0 {
let high_bit_index = 7 - d.leading_zeros() as usize;
return Some(i * 8 + high_bit_index);
}
}
None // a and b are equal, so log distance is -inf
}
#[derive(Clone, Copy, Debug)]
pub enum PeerState {
New,
Ready,
}
#[derive(Derivative)]
#[derivative(Debug(bound = ""))]
pub struct NodeEntry<K: EnrKey> {
pub record: Enr<K>,
pub peer_state: PeerState,
pub liveness: Option<Instant>,
}
#[derive(Derivative)]
#[derivative(Default(bound = ""))]
struct Bucket<K: EnrKey> {
nodes: Box<FnvIndexMap<RawNodeId, NodeEntry<K>, U16>>,
recently_seen: Option<Enr<K>>,
}
pub struct NodeTable<K: EnrKey> {
host_id: H256,
buckets: Box<[Bucket<K>; 256]>,
all_nodes: Box<heapless::FnvIndexSet<RawNodeId, U4096>>,
}
impl<K: EnrKey> NodeTable<K> {
#[must_use]
pub fn new(host_id: H256) -> Self {
Self {
host_id,
buckets: Box::new(arr![Default::default(); 256]),
all_nodes: Default::default(),
}
}
#[must_use]
pub fn len(&self) -> usize {
self.all_nodes.len()
}
#[must_use]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
fn bucket_idx(&self, node_id: H256) -> Option<usize> {
logdistance(self.host_id, node_id)
}
fn bucket(&mut self, node_id: H256) -> Option<&mut Bucket<K>> {
Some(&mut self.buckets[self.bucket_idx(node_id)?])
}
pub fn node_mut(&mut self, node_id: H256) -> Option<&mut NodeEntry<K>> {
let bucket = self.bucket(node_id)?;
bucket.nodes.get_mut(&node_id.0)
}
pub fn add_node(&mut self, record: Enr<K>, peer_state: PeerState) {
let node_id = H256(record.node_id().raw());
// If we don't have such node already...
if !self.all_nodes.contains(&node_id.0) {
// Check that we're not adding self
if let Some(bucket_idx) = self.bucket_idx(node_id) {
let bucket = &mut self.buckets[bucket_idx];
// If there's space, add it...
if bucket.nodes.len() < bucket.nodes.capacity() {
let node_id = node_id.0;
let _ = self.all_nodes.insert(node_id);
let _ = bucket.nodes.insert(
node_id,
NodeEntry {
record,
peer_state,
liveness: None,
},
);
} else {
// ...or if at capacity, update replacement cache instead
bucket.recently_seen = Some(record);
}
}
}
}
pub fn evict_node(&mut self, node_id: H256) {
if let Some(bucket_idx) = self.bucket_idx(node_id) {
let bucket = &mut self.buckets[bucket_idx];
// If this node actually exists, remove it.
if bucket.nodes.remove(&node_id.0).is_some() {
self.all_nodes.remove(&node_id.0);
// And if there is a replacement, move it into the table
if let Some(record) = bucket.recently_seen.take() {
let node_id = record.node_id().raw();
let _ = self.all_nodes.insert(node_id);
let _ = bucket.nodes.insert(
node_id,
NodeEntry {
record,
peer_state: PeerState::New,
liveness: None,
},
);
}
}
}
}
pub fn update_liveness(&mut self, node_id: H256, timestamp: Instant) {
if let Some(node) = self.node_mut(node_id) {
node.liveness = Some(timestamp);
}
}
pub fn random_node(&mut self) -> Option<&mut NodeEntry<K>> {
let node_id = *self
.all_nodes
.iter()
.nth(rand::random::<usize>() % self.all_nodes.len())?;
Some(
self.node_mut(H256(node_id))
.expect("this node always exists at this point; qed"),
)
}
pub fn bucket_nodes(&mut self, logdistance: u8) -> BucketNodes<'_, K> {
BucketNodes(NodeEntries {
node_table: self,
current_bucket: logdistance as usize,
max_bucket: logdistance as usize,
current_bucket_remaining: None,
})
}
pub fn closest(&mut self) -> Closest<'_, K> {
Closest(NodeEntries {
node_table: self,
current_bucket: 0,
max_bucket: 255,
current_bucket_remaining: None,
})
}
}
pub struct BucketNodes<'a, K: EnrKey>(NodeEntries<'a, K>);
impl<'a, K: EnrKey> Iterator for BucketNodes<'a, K> {
type Item = &'a mut NodeEntry<K>;
fn next(&mut self) -> Option<Self::Item> {
self.0.next()
}
}
pub struct Closest<'a, K: EnrKey>(NodeEntries<'a, K>);
impl<'a, K: EnrKey> Iterator for Closest<'a, K> {
type Item = &'a mut NodeEntry<K>;
fn next(&mut self) -> Option<Self::Item> {
self.0.next()
}
}
struct NodeEntries<'a, K: EnrKey> {
node_table: &'a mut NodeTable<K>,
current_bucket: usize,
max_bucket: usize,
current_bucket_remaining: Option<Vec<NonNull<NodeEntry<K>>>>,
}
impl<'a, K: EnrKey> Iterator for NodeEntries<'a, K> {
type Item = &'a mut NodeEntry<K>;
fn next(&mut self) -> Option<Self::Item> {
loop {
let NodeEntries {
node_table,
current_bucket,
max_bucket,
current_bucket_remaining,
} = self;
trace!("Current bucket is {}", *current_bucket);
let host_id = node_table.host_id;
if let Some(ptr) = current_bucket_remaining
.get_or_insert_with(|| {
let mut nodes = node_table.buckets[*current_bucket]
.nodes
.values_mut()
.collect::<Vec<_>>();
trace!("Nodes before sorting: {:?}", nodes);
nodes.sort_by(|a, b| {
distance(host_id, H256(b.record.node_id().raw()))
.cmp(&distance(host_id, H256(a.record.node_id().raw())))
});
trace!("Nodes after sorting: {:?}", nodes);
nodes.into_iter().map(From::from).collect()
})
.pop()
{
// Safety: we have exclusive access to underlying node table
return Some(unsafe { &mut *ptr.as_ptr() });
}
if *current_bucket == *max_bucket {
return None;
}
*current_bucket += 1;
*current_bucket_remaining = None;
}
}
}
pub enum DiscoveryRequest {
Ping,
}
pub enum DiscoveryResponse {
Pong,
}
pub enum DiscoveryPacket {
WhoAreYou,
FindNode,
Ping,
Pong,
}
pub enum TableUpdate {
Added { node_id: H256, addr: SocketAddr },
Removed { node_id: H256 },
}
#[allow(dead_code)]
pub struct Discovery<K: EnrKey> {
node_table: Arc<Mutex<NodeTable<K>>>,
concurrency: usize,
}
impl<K: EnrKey + Send + 'static> Discovery<K> {
pub async fn new<
F: Fn(TableUpdate) -> Fut + Send + Sync + 'static,
Fut: Future<Output = ()> + Send,
>(
addr: String,
host_id: H256,
on_table_update: F,
) -> Self {
let socket = UdpSocket::bind(addr).await.unwrap();
let on_table_update = Arc::new(on_table_update);
| ops::BitXor, | random_line_split |
|
codewander-plotlyScatterPlot.js | label:'lines+markers'}],
defaultValue:'markers'
},
markerSize: {
type: "integer",
label: "Marker Size",
ref: "markerSize",
defaultValue: 12
},
highlightFirstPoint:{
type: "boolean",
ref: "highlightFirstPoint",
label: "Highlight First Point",
defaultValue: false
}
}
},
XAxisSettings: {
type: "items",
ref: "xAxisSettings",
label: "X Axis",
items: {
xTitle:{
type:"string",
ref: "xAxisSettings.xTitle",
defaultValue:"",
expression: "always"
},
showGrid: {
type: "boolean",
ref: "xAxisSettings.showGrid",
label: "Show Grid",
defaultValue: true
},
showLine: {
type: "boolean",
ref: "xAxisSettings.showLine",
label: "Show Line",
defaultValue: true
},
showZeroLine: {
type: "boolean",
label: "Show Zero Line",
ref: "xAxisSettings.showZeroLine",
defaultValue:true
},
showTicklabels: {
type: "boolean",
label: "Show Tick Labels",
ref: "xAxisSettings.showTicklabels",
defaultValue: true
}
}
},
YAxisSettings: {
type: "items",
ref: "yAxisSettings",
label: "Y Axis",
items: {
yTitle:{
type:"string",
ref: "yAxisSettings.yTitle",
defaultValue:"",
expression: "always"
},
showGrid: {
type: "boolean",
ref: "yAxisSettings.showGrid",
label: "Show Grid",
defaultValue: true
},
showLine: {
type: "boolean",
ref: "yAxisSettings.showLine",
label: "Show Line",
defaultValue: true
},
showZeroLine: {
type: "boolean",
label: "Show Zero Line",
ref: "yAxisSettings.showZeroLine",
defaultValue:true
},
showTicklabels: {
type: "boolean",
label: "Show Tick Labels",
ref: "yAxisSettings.showTicklabels",
defaultValue: true
}
}
},
GeneralSettings: {
type: "items",
ref: "generalSettings",
label: "General Settings",
items: {
DisplayModeBar:{
type: "string",
component: "dropdown",
label: "Display Mode Bar",
ref: "generalSettings.displayModeBar",
options: [{value:'1',label:'Always'},{value:'0',label:'on Hover'},{value:'-1',label:'Never'}],
defaultValue:'0'
},
showLegend: {
type: "boolean",
ref: "generalSettings.showLegend",
label: "Show Legend",
defaultValue: true
}
}
},
}
},
sorting: {
uses: "sorting"
}
}
},
support: {
snapshot: true,
export: true,
exportData: true
},
paint: function (layout) {
var self =this;
var heightofChart = this.$element.height();
var widthofChart= this.$element.width();
var leftMargin = 10
var rightMargin = 10
var topMargin =30
var bottomMargin =10
if (self.$scope.layout.xAxisSettings.xTitle != null && self.$scope.layout.xAxisSettings.xTitle !=''){
bottomMargin = bottomMargin +20
}
if (self.$scope.layout.xAxisSettings.showTicklabels ){
bottomMargin = bottomMargin +20
}
if (self.$scope.layout.yAxisSettings.yTitle != null && self.$scope.layout.yAxisSettings.yTitle !=''){
leftMargin = leftMargin +20
}
if (self.$scope.layout.yAxisSettings.showTicklabels ){
leftMargin = leftMargin +20
}
if (self.$scope.layout.generalSettings.showLegend) |
//var ff=window.getComputedStyle(getElementsByTagName("body")[0],null).getPropertyValue("font-family") ;
var pdisplayModeBar=false
if (self.$scope.layout.generalSettings.displayModeBar=='1') {pdisplayModeBar=true}
else if (self.$scope.layout.generalSettings.displayModeBar=='-1') {pdisplayModeBar= false;}
else {pdisplayModeBar=null;}
var graph_layout_setting ={responsive:true}
if (pdisplayModeBar !=null){graph_layout_setting.displayModeBar=pdisplayModeBar}
var graph_layout = {
font:{
family: 'QlikView Sans'
},
showlegend: self.$scope.layout.generalSettings.showLegend,
xaxis: {
showline: self.$scope.layout.xAxisSettings.showLine,
showgrid: self.$scope.layout.xAxisSettings.showGrid,
showticklabels: self.$scope.layout.xAxisSettings.showTicklabels,
zeroline: self.$scope.layout.xAxisSettings.showZeroLine,
linecolor: 'rgb(204,204,204)',
tickangle: 'auto',
title: self.$scope.layout.xAxisSettings.xTitle
},
yaxis: {
showline: self.$scope.layout.yAxisSettings.showLine,
showgrid: self.$scope.layout.yAxisSettings.showGrid,
showticklabels: self.$scope.layout.yAxisSettings.showTicklabels,
zeroline: self.$scope.layout.yAxisSettings.showZeroLine,
linecolor: 'rgb(204,204,204)',
tickangle: 'auto',
title:self.$scope.layout.yAxisSettings.yTitle
},
hovermode:'closest',
autosize: false,
width: widthofChart,
height: heightofChart,
margin: {
autoexpand: false,
l: leftMargin,
r: rightMargin,
t: topMargin,
b:bottomMargin
},
annotations: [
]
};
var qElemNumber=[];
var min_x=0;
var max_x=0;
var min_y=0;
var max_y=0;
var max_z=1;
var dataMatrix=[];
var cols=[];
var dimensions_count= this.$scope.layout.qHyperCube.qDimensionInfo.length;
var measures_count=this.$scope.layout.qHyperCube.qMeasureInfo.length;
if (measures_count % 2 != 0 && measures_count > 0) {
$('#myDiv').text("The chart requires even number of measures to plot series");
return;
}
else{
$('#myDiv').empty();
}
$.each(this.$scope.layout.qHyperCube.qDimensionInfo,function(index,item){
cols.push((item.title !=null && item.title!="")?item.title : item.qFallbackTitle);
});
$.each(this.$scope.layout.qHyperCube.qMeasureInfo,function(index,item){
cols.push((item.title !=null && item.title!="")?item.title : item.qFallbackTitle);
});
//loop through the rows we have and render
this.backendApi.eachDataRow( function ( rownum, row ) {
self.$scope.lastrow = rownum;
dataMatrix.push(row);
});
var data=convert(dataMatrix);
/*for (var i =0 ; i<data.length; i++)
{
var start_annotation= {
xref: 'x',
yref: 'y',
text: '',
//font:{
//family: 'Arial',
//size: 30,
//color: 'rgb(37,37,37)'
//},
showarrow: true
}
start_annotation.x= data[i].x[0];
start_annotation.y= data[i].y[0];
start_annotation.text= 'start';
graph_layout.annotations.push( start_annotation)
}*/
render(data);
//needed for export
this.$scope.selections = [];
if(this.backendApi.getRowCount() > self.$scope.lastrow +1){
var requestPage = [{
qTop: self.$scope.lastrow + 1,
qLeft: 0,
qWidth: 10, //should be # of columns
qHeight: Math.min( 1000, this.backendApi.getRowCount() - self.$scope.lastrow )
}];
this.backendApi.getData( requestPage ).then( function ( dataPages ) {
//when we get the result trigger paint again
self.paint(layout );
} );
}
function convert(Matrix)
{
var data=[];
var total_series_count = measures_count/2;
var series | {
rightMargin=rightMargin + 100
} | conditional_block |
codewander-plotlyScatterPlot.js | ',label:'lines+markers'}],
defaultValue:'markers'
},
markerSize: {
type: "integer",
label: "Marker Size",
ref: "markerSize",
defaultValue: 12
},
highlightFirstPoint:{
type: "boolean",
ref: "highlightFirstPoint",
label: "Highlight First Point",
defaultValue: false
}
}
},
XAxisSettings: {
type: "items",
ref: "xAxisSettings",
label: "X Axis",
items: {
xTitle:{
type:"string",
ref: "xAxisSettings.xTitle",
defaultValue:"",
expression: "always"
},
showGrid: {
type: "boolean",
ref: "xAxisSettings.showGrid",
label: "Show Grid",
defaultValue: true
},
showLine: {
type: "boolean",
ref: "xAxisSettings.showLine",
label: "Show Line",
defaultValue: true
},
showZeroLine: {
type: "boolean",
label: "Show Zero Line",
ref: "xAxisSettings.showZeroLine",
defaultValue:true
},
showTicklabels: {
type: "boolean",
label: "Show Tick Labels",
ref: "xAxisSettings.showTicklabels",
defaultValue: true
}
}
},
YAxisSettings: {
type: "items",
ref: "yAxisSettings",
label: "Y Axis",
items: {
yTitle:{
type:"string",
ref: "yAxisSettings.yTitle",
defaultValue:"",
expression: "always"
},
showGrid: {
type: "boolean",
ref: "yAxisSettings.showGrid",
label: "Show Grid",
defaultValue: true
},
showLine: {
type: "boolean",
ref: "yAxisSettings.showLine",
label: "Show Line",
defaultValue: true
},
showZeroLine: {
type: "boolean",
label: "Show Zero Line",
ref: "yAxisSettings.showZeroLine",
defaultValue:true
},
showTicklabels: {
type: "boolean",
label: "Show Tick Labels",
ref: "yAxisSettings.showTicklabels",
defaultValue: true
}
}
},
GeneralSettings: {
type: "items",
ref: "generalSettings",
label: "General Settings",
items: {
DisplayModeBar:{
type: "string",
component: "dropdown",
label: "Display Mode Bar",
ref: "generalSettings.displayModeBar",
options: [{value:'1',label:'Always'},{value:'0',label:'on Hover'},{value:'-1',label:'Never'}],
defaultValue:'0'
},
showLegend: {
type: "boolean",
ref: "generalSettings.showLegend",
label: "Show Legend",
defaultValue: true
}
}
},
}
},
sorting: {
uses: "sorting"
}
}
},
support: {
snapshot: true,
export: true,
exportData: true
},
paint: function (layout) {
var self =this;
var heightofChart = this.$element.height();
var widthofChart= this.$element.width();
var leftMargin = 10
var rightMargin = 10
var topMargin =30
var bottomMargin =10
if (self.$scope.layout.xAxisSettings.xTitle != null && self.$scope.layout.xAxisSettings.xTitle !=''){
bottomMargin = bottomMargin +20
}
if (self.$scope.layout.xAxisSettings.showTicklabels ){
bottomMargin = bottomMargin +20
}
if (self.$scope.layout.yAxisSettings.yTitle != null && self.$scope.layout.yAxisSettings.yTitle !=''){
leftMargin = leftMargin +20
}
if (self.$scope.layout.yAxisSettings.showTicklabels ){
leftMargin = leftMargin +20
}
if (self.$scope.layout.generalSettings.showLegend){
rightMargin=rightMargin + 100
}
//var ff=window.getComputedStyle(getElementsByTagName("body")[0],null).getPropertyValue("font-family") ;
var pdisplayModeBar=false
if (self.$scope.layout.generalSettings.displayModeBar=='1') {pdisplayModeBar=true}
else if (self.$scope.layout.generalSettings.displayModeBar=='-1') {pdisplayModeBar= false;}
else {pdisplayModeBar=null;}
var graph_layout_setting ={responsive:true}
if (pdisplayModeBar !=null){graph_layout_setting.displayModeBar=pdisplayModeBar}
var graph_layout = {
font:{
family: 'QlikView Sans'
},
showlegend: self.$scope.layout.generalSettings.showLegend,
xaxis: {
showline: self.$scope.layout.xAxisSettings.showLine,
showgrid: self.$scope.layout.xAxisSettings.showGrid,
showticklabels: self.$scope.layout.xAxisSettings.showTicklabels,
zeroline: self.$scope.layout.xAxisSettings.showZeroLine,
linecolor: 'rgb(204,204,204)',
tickangle: 'auto',
title: self.$scope.layout.xAxisSettings.xTitle
},
yaxis: {
showline: self.$scope.layout.yAxisSettings.showLine,
showgrid: self.$scope.layout.yAxisSettings.showGrid,
showticklabels: self.$scope.layout.yAxisSettings.showTicklabels,
zeroline: self.$scope.layout.yAxisSettings.showZeroLine,
linecolor: 'rgb(204,204,204)',
tickangle: 'auto',
title:self.$scope.layout.yAxisSettings.yTitle
},
hovermode:'closest',
autosize: false,
width: widthofChart,
height: heightofChart,
margin: {
autoexpand: false,
l: leftMargin,
r: rightMargin,
t: topMargin,
b:bottomMargin
},
annotations: [
]
};
var qElemNumber=[];
var min_x=0;
var max_x=0;
var min_y=0;
var max_y=0;
var max_z=1;
var dataMatrix=[];
var cols=[];
var dimensions_count= this.$scope.layout.qHyperCube.qDimensionInfo.length;
var measures_count=this.$scope.layout.qHyperCube.qMeasureInfo.length;
if (measures_count % 2 != 0 && measures_count > 0) {
$('#myDiv').text("The chart requires even number of measures to plot series");
return;
}
else{
$('#myDiv').empty();
}
$.each(this.$scope.layout.qHyperCube.qDimensionInfo,function(index,item){
cols.push((item.title !=null && item.title!="")?item.title : item.qFallbackTitle);
});
$.each(this.$scope.layout.qHyperCube.qMeasureInfo,function(index,item){
cols.push((item.title !=null && item.title!="")?item.title : item.qFallbackTitle);
});
//loop through the rows we have and render
this.backendApi.eachDataRow( function ( rownum, row ) {
self.$scope.lastrow = rownum;
dataMatrix.push(row);
});
var data=convert(dataMatrix);
/*for (var i =0 ; i<data.length; i++)
{
var start_annotation= {
xref: 'x',
yref: 'y',
text: '',
//font:{
//family: 'Arial',
//size: 30,
//color: 'rgb(37,37,37)'
//},
showarrow: true
}
start_annotation.x= data[i].x[0];
start_annotation.y= data[i].y[0];
start_annotation.text= 'start';
graph_layout.annotations.push( start_annotation)
}*/
render(data);
//needed for export
this.$scope.selections = [];
if(this.backendApi.getRowCount() > self.$scope.lastrow +1){
var requestPage = [{
qTop: self.$scope.lastrow + 1,
qLeft: 0,
qWidth: 10, //should be # of columns
qHeight: Math.min( 1000, this.backendApi.getRowCount() - self.$scope.lastrow )
}];
this.backendApi.getData( requestPage ).then( function ( dataPages ) {
//when we get the result trigger paint again
self.paint(layout );
} );
}
function | (Matrix)
{
var data=[];
var total_series_count = measures_count/2;
var series | convert | identifier_name |
codewander-plotlyScatterPlot.js | Value:"",
expression: "always"
},
showGrid: {
type: "boolean",
ref: "xAxisSettings.showGrid",
label: "Show Grid",
defaultValue: true
},
showLine: {
type: "boolean",
ref: "xAxisSettings.showLine",
label: "Show Line",
defaultValue: true
},
showZeroLine: {
type: "boolean",
label: "Show Zero Line",
ref: "xAxisSettings.showZeroLine",
defaultValue:true
},
showTicklabels: {
type: "boolean",
label: "Show Tick Labels",
ref: "xAxisSettings.showTicklabels",
defaultValue: true
}
}
},
YAxisSettings: {
type: "items",
ref: "yAxisSettings",
label: "Y Axis",
items: {
yTitle:{
type:"string",
ref: "yAxisSettings.yTitle",
defaultValue:"",
expression: "always"
},
showGrid: {
type: "boolean",
ref: "yAxisSettings.showGrid",
label: "Show Grid",
defaultValue: true
},
showLine: {
type: "boolean",
ref: "yAxisSettings.showLine",
label: "Show Line",
defaultValue: true
},
showZeroLine: {
type: "boolean",
label: "Show Zero Line",
ref: "yAxisSettings.showZeroLine",
defaultValue:true
},
showTicklabels: {
type: "boolean",
label: "Show Tick Labels",
ref: "yAxisSettings.showTicklabels",
defaultValue: true
}
}
},
GeneralSettings: {
type: "items",
ref: "generalSettings",
label: "General Settings",
items: {
DisplayModeBar:{
type: "string",
component: "dropdown",
label: "Display Mode Bar",
ref: "generalSettings.displayModeBar",
options: [{value:'1',label:'Always'},{value:'0',label:'on Hover'},{value:'-1',label:'Never'}],
defaultValue:'0'
},
showLegend: {
type: "boolean",
ref: "generalSettings.showLegend",
label: "Show Legend",
defaultValue: true
}
}
},
}
},
sorting: {
uses: "sorting"
}
}
},
support: {
snapshot: true,
export: true,
exportData: true
},
paint: function (layout) {
var self =this;
var heightofChart = this.$element.height();
var widthofChart= this.$element.width();
var leftMargin = 10
var rightMargin = 10
var topMargin =30
var bottomMargin =10
if (self.$scope.layout.xAxisSettings.xTitle != null && self.$scope.layout.xAxisSettings.xTitle !=''){
bottomMargin = bottomMargin +20
}
if (self.$scope.layout.xAxisSettings.showTicklabels ){
bottomMargin = bottomMargin +20
}
if (self.$scope.layout.yAxisSettings.yTitle != null && self.$scope.layout.yAxisSettings.yTitle !=''){
leftMargin = leftMargin +20
}
if (self.$scope.layout.yAxisSettings.showTicklabels ){
leftMargin = leftMargin +20
}
if (self.$scope.layout.generalSettings.showLegend){
rightMargin=rightMargin + 100
}
//var ff=window.getComputedStyle(getElementsByTagName("body")[0],null).getPropertyValue("font-family") ;
var pdisplayModeBar=false
if (self.$scope.layout.generalSettings.displayModeBar=='1') {pdisplayModeBar=true}
else if (self.$scope.layout.generalSettings.displayModeBar=='-1') {pdisplayModeBar= false;}
else {pdisplayModeBar=null;}
var graph_layout_setting ={responsive:true}
if (pdisplayModeBar !=null){graph_layout_setting.displayModeBar=pdisplayModeBar}
var graph_layout = {
font:{
family: 'QlikView Sans'
},
showlegend: self.$scope.layout.generalSettings.showLegend,
xaxis: {
showline: self.$scope.layout.xAxisSettings.showLine,
showgrid: self.$scope.layout.xAxisSettings.showGrid,
showticklabels: self.$scope.layout.xAxisSettings.showTicklabels,
zeroline: self.$scope.layout.xAxisSettings.showZeroLine,
linecolor: 'rgb(204,204,204)',
tickangle: 'auto',
title: self.$scope.layout.xAxisSettings.xTitle
},
yaxis: {
showline: self.$scope.layout.yAxisSettings.showLine,
showgrid: self.$scope.layout.yAxisSettings.showGrid,
showticklabels: self.$scope.layout.yAxisSettings.showTicklabels,
zeroline: self.$scope.layout.yAxisSettings.showZeroLine,
linecolor: 'rgb(204,204,204)',
tickangle: 'auto',
title:self.$scope.layout.yAxisSettings.yTitle
},
hovermode:'closest',
autosize: false,
width: widthofChart,
height: heightofChart,
margin: {
autoexpand: false,
l: leftMargin,
r: rightMargin,
t: topMargin,
b:bottomMargin
},
annotations: [
]
};
var qElemNumber=[];
var min_x=0;
var max_x=0;
var min_y=0;
var max_y=0;
var max_z=1;
var dataMatrix=[];
var cols=[];
var dimensions_count= this.$scope.layout.qHyperCube.qDimensionInfo.length;
var measures_count=this.$scope.layout.qHyperCube.qMeasureInfo.length;
if (measures_count % 2 != 0 && measures_count > 0) {
$('#myDiv').text("The chart requires even number of measures to plot series");
return;
}
else{
$('#myDiv').empty();
}
$.each(this.$scope.layout.qHyperCube.qDimensionInfo,function(index,item){
cols.push((item.title !=null && item.title!="")?item.title : item.qFallbackTitle);
});
$.each(this.$scope.layout.qHyperCube.qMeasureInfo,function(index,item){
cols.push((item.title !=null && item.title!="")?item.title : item.qFallbackTitle);
});
//loop through the rows we have and render
this.backendApi.eachDataRow( function ( rownum, row ) {
self.$scope.lastrow = rownum;
dataMatrix.push(row);
});
var data=convert(dataMatrix);
/*for (var i =0 ; i<data.length; i++)
{
var start_annotation= {
xref: 'x',
yref: 'y',
text: '',
//font:{
//family: 'Arial',
//size: 30,
//color: 'rgb(37,37,37)'
//},
showarrow: true
}
start_annotation.x= data[i].x[0];
start_annotation.y= data[i].y[0];
start_annotation.text= 'start';
graph_layout.annotations.push( start_annotation)
}*/
render(data);
//needed for export
this.$scope.selections = [];
if(this.backendApi.getRowCount() > self.$scope.lastrow +1){
var requestPage = [{
qTop: self.$scope.lastrow + 1,
qLeft: 0,
qWidth: 10, //should be # of columns
qHeight: Math.min( 1000, this.backendApi.getRowCount() - self.$scope.lastrow )
}];
this.backendApi.getData( requestPage ).then( function ( dataPages ) {
//when we get the result trigger paint again
self.paint(layout );
} );
}
function convert(Matrix)
| {
var data=[];
var total_series_count = measures_count/2;
var series_array =[]
var settingArrayLength = self.$scope.layout.seriesSettings.length;
var settingArray=self.$scope.layout.seriesSettings;
for (var i =0 ;i<total_series_count;i++)
{
var series={}
series.x=[];
series.y=[];
if (i<=settingArrayLength-1){
series.mode=settingArray[i].seriesType;
series.type="scatter";
series.text=[];
series.name=settingArray[i].seriesName != null ? settingArray[i].seriesName : ""
series.marker={size:12}
series.marker.size= settingArray[i].markerSize
if (settingArray[i].seriesColor != null && settingArray[i].seriesColor !="")
{ | identifier_body |
|
codewander-plotlyScatterPlot.js | Zero Line",
ref: "xAxisSettings.showZeroLine",
defaultValue:true
},
showTicklabels: {
type: "boolean",
label: "Show Tick Labels",
ref: "xAxisSettings.showTicklabels",
defaultValue: true
}
}
},
YAxisSettings: {
type: "items",
ref: "yAxisSettings",
label: "Y Axis",
items: {
yTitle:{
type:"string",
ref: "yAxisSettings.yTitle",
defaultValue:"",
expression: "always"
},
showGrid: {
type: "boolean",
ref: "yAxisSettings.showGrid",
label: "Show Grid",
defaultValue: true
},
showLine: {
type: "boolean",
ref: "yAxisSettings.showLine",
label: "Show Line",
defaultValue: true
},
showZeroLine: {
type: "boolean",
label: "Show Zero Line",
ref: "yAxisSettings.showZeroLine",
defaultValue:true
},
showTicklabels: {
type: "boolean",
label: "Show Tick Labels",
ref: "yAxisSettings.showTicklabels",
defaultValue: true
}
}
},
GeneralSettings: {
type: "items",
ref: "generalSettings",
label: "General Settings",
items: {
DisplayModeBar:{
type: "string",
component: "dropdown",
label: "Display Mode Bar",
ref: "generalSettings.displayModeBar",
options: [{value:'1',label:'Always'},{value:'0',label:'on Hover'},{value:'-1',label:'Never'}],
defaultValue:'0'
},
showLegend: {
type: "boolean",
ref: "generalSettings.showLegend",
label: "Show Legend",
defaultValue: true
}
}
},
}
},
sorting: {
uses: "sorting"
}
}
},
support: {
snapshot: true,
export: true,
exportData: true
},
paint: function (layout) {
var self =this;
var heightofChart = this.$element.height();
var widthofChart= this.$element.width();
var leftMargin = 10
var rightMargin = 10
var topMargin =30
var bottomMargin =10
if (self.$scope.layout.xAxisSettings.xTitle != null && self.$scope.layout.xAxisSettings.xTitle !=''){
bottomMargin = bottomMargin +20
}
if (self.$scope.layout.xAxisSettings.showTicklabels ){
bottomMargin = bottomMargin +20
}
if (self.$scope.layout.yAxisSettings.yTitle != null && self.$scope.layout.yAxisSettings.yTitle !=''){
leftMargin = leftMargin +20
}
if (self.$scope.layout.yAxisSettings.showTicklabels ){
leftMargin = leftMargin +20
}
if (self.$scope.layout.generalSettings.showLegend){
rightMargin=rightMargin + 100
}
//var ff=window.getComputedStyle(getElementsByTagName("body")[0],null).getPropertyValue("font-family") ;
var pdisplayModeBar=false
if (self.$scope.layout.generalSettings.displayModeBar=='1') {pdisplayModeBar=true}
else if (self.$scope.layout.generalSettings.displayModeBar=='-1') {pdisplayModeBar= false;}
else {pdisplayModeBar=null;}
var graph_layout_setting ={responsive:true}
if (pdisplayModeBar !=null){graph_layout_setting.displayModeBar=pdisplayModeBar}
var graph_layout = {
font:{
family: 'QlikView Sans'
},
showlegend: self.$scope.layout.generalSettings.showLegend,
xaxis: {
showline: self.$scope.layout.xAxisSettings.showLine,
showgrid: self.$scope.layout.xAxisSettings.showGrid,
showticklabels: self.$scope.layout.xAxisSettings.showTicklabels,
zeroline: self.$scope.layout.xAxisSettings.showZeroLine,
linecolor: 'rgb(204,204,204)',
tickangle: 'auto',
title: self.$scope.layout.xAxisSettings.xTitle
},
yaxis: {
showline: self.$scope.layout.yAxisSettings.showLine,
showgrid: self.$scope.layout.yAxisSettings.showGrid,
showticklabels: self.$scope.layout.yAxisSettings.showTicklabels,
zeroline: self.$scope.layout.yAxisSettings.showZeroLine,
linecolor: 'rgb(204,204,204)',
tickangle: 'auto',
title:self.$scope.layout.yAxisSettings.yTitle
},
hovermode:'closest',
autosize: false,
width: widthofChart,
height: heightofChart,
margin: {
autoexpand: false,
l: leftMargin,
r: rightMargin,
t: topMargin,
b:bottomMargin
},
annotations: [
]
};
var qElemNumber=[];
var min_x=0;
var max_x=0;
var min_y=0;
var max_y=0;
var max_z=1;
var dataMatrix=[];
var cols=[];
var dimensions_count= this.$scope.layout.qHyperCube.qDimensionInfo.length;
var measures_count=this.$scope.layout.qHyperCube.qMeasureInfo.length;
if (measures_count % 2 != 0 && measures_count > 0) {
$('#myDiv').text("The chart requires even number of measures to plot series");
return;
}
else{
$('#myDiv').empty();
}
$.each(this.$scope.layout.qHyperCube.qDimensionInfo,function(index,item){
cols.push((item.title !=null && item.title!="")?item.title : item.qFallbackTitle);
});
$.each(this.$scope.layout.qHyperCube.qMeasureInfo,function(index,item){
cols.push((item.title !=null && item.title!="")?item.title : item.qFallbackTitle);
});
//loop through the rows we have and render
this.backendApi.eachDataRow( function ( rownum, row ) {
self.$scope.lastrow = rownum;
dataMatrix.push(row);
});
var data=convert(dataMatrix);
/*for (var i =0 ; i<data.length; i++)
{
var start_annotation= {
xref: 'x',
yref: 'y',
text: '',
//font:{
//family: 'Arial',
//size: 30,
//color: 'rgb(37,37,37)'
//},
showarrow: true
}
start_annotation.x= data[i].x[0];
start_annotation.y= data[i].y[0];
start_annotation.text= 'start';
graph_layout.annotations.push( start_annotation)
}*/
render(data);
//needed for export
this.$scope.selections = [];
if(this.backendApi.getRowCount() > self.$scope.lastrow +1){
var requestPage = [{
qTop: self.$scope.lastrow + 1,
qLeft: 0,
qWidth: 10, //should be # of columns
qHeight: Math.min( 1000, this.backendApi.getRowCount() - self.$scope.lastrow )
}];
this.backendApi.getData( requestPage ).then( function ( dataPages ) {
//when we get the result trigger paint again
self.paint(layout );
} );
}
function convert(Matrix)
{
var data=[];
var total_series_count = measures_count/2;
var series_array =[]
var settingArrayLength = self.$scope.layout.seriesSettings.length;
var settingArray=self.$scope.layout.seriesSettings;
for (var i =0 ;i<total_series_count;i++)
{
var series={}
series.x=[];
series.y=[];
if (i<=settingArrayLength-1){
series.mode=settingArray[i].seriesType;
series.type="scatter";
series.text=[];
series.name=settingArray[i].seriesName != null ? settingArray[i].seriesName : ""
series.marker={size:12}
series.marker.size= settingArray[i].markerSize
if (settingArray[i].seriesColor != null && settingArray[i].seriesColor !="")
{
series.marker.color=settingArray[i].seriesColor
}
if (settingArray[i].highlightFirstPoint){
series.marker.line={
color: ['rgb(0, 0, 0)'],
width: [3 ,1]
}
}
}
else{
series.mode="markers";
series.type="scatter";
series.text=[]; | series.marker={size:12}
}
series_array.push(series);
} | random_line_split |
|
sdr_rec_type.go | Sensor) SetMBExp(M int16, B int16, Bexp int8, Rexp int8) {
r.MTol = 0
r.Bacc = 0
r.RBexp = 0
_M := uint16(math.Abs(float64(M)))
_M = _M & 0x01ff //mask leave low 9bit
if M < 0 {
_M = (((^_M) + 1) & 0x01ff) | 0x0200
}
r.MTol = r.MTol | (_M & 0x00ff)
r.MTol = r.MTol | ((_M << 6) & 0xc000)
_B := uint16(math.Abs(float64(B)))
_B = _B & 0x01ff //mask leave low 9bit
if B < 0 {
_B = (((^_B) + 1) & 0x01ff) | 0x0200
}
r.Bacc = r.Bacc | (_B & 0x00ff)
r.Bacc = r.Bacc | ((_B << 6) & 0xc000)
_Bexp := uint8(math.Abs(float64(Bexp)))
_Bexp = _Bexp & 0x07 //mask leeve low 3bit
if Bexp < 0 {
_Bexp = (((^_Bexp) + 1) & 0x07) | 0x08
}
r.RBexp = r.RBexp | (_Bexp & 0x0f)
_Rexp := uint8(math.Abs(float64(Rexp)))
_Rexp = _Rexp & 0x07 //mask leave low 3bit
if Rexp < 0 {
_Rexp = (((^_Rexp) + 1) & 0x07) | 0x08
}
r.RBexp = r.RBexp | ((_Rexp << 4) & 0xf0)
}
func (r *SDRFullSensor) GetMBExp() (M int16, B int16, Bexp int8, Rexp int8) {
_M := uint16(((r.MTol & 0xc000) >> 6) | (r.MTol & 0x00ff))
if (_M & 0x0200) == 0x0200 { //most significate is 1, mean signed
//fmt.Printf("%d,0x%x\n", int16((_M & 0xfdff)), (_M & 0xfdff))
M = int16((_M & 0xfdff)) - 512 //2^9
} else {
M = int16(_M & 0xfdff)
}
_B := uint16(((r.Bacc & 0xc000) >> 6) | (r.Bacc & 0x00ff))
if (_B & 0x0200) == 0x0200 { //most significate is 1, mean signed
B = int16((_B & 0xfdff)) - 512 //2^9
} else {
B = int16(_B & 0xfdff)
}
_Bexp := uint8(r.RBexp & 0x0f)
if (_Bexp & 0x08) == 0x08 {
Bexp = int8((_Bexp & 0xf7)) - 8 //2^3
} else {
Bexp = int8(_Bexp & 0xf7)
}
_Rexp := uint8((r.RBexp & 0xf0) >> 4)
if (_Rexp & 0x08) == 0x08 {
Rexp = int8((_Rexp & 0xf7)) - 8 //2^3
} else {
Rexp = int8(_Rexp & 0xf7)
}
return
}
// calculate the given value into the SDR reading value, using current M,B,Bexp,Rexp setting
func (r *SDRFullSensor) CalValue(value float64) uint8 {
M, B, Bexp, Rexp := r.GetMBExp()
if M == 0 {
panic(ErrMZero)
}
//y=(M x V + B x pow(10,Bexp)) x pow(10,Rexp)
//know y, cal V
var neg bool = false
v := (value/math.Pow(10, float64(Rexp)) - float64(B)*math.Pow(10, float64(Bexp))) / float64(M)
if v < 0 {
neg = true
}
v = math.Abs(v)
uv := uint8(v)
if neg {
if (r.Unit & 0xc0) == 0x80 {
return ((128 - uv) | 0x80)
} else {
panic(ErrUnitNotSupport)
}
} else {
if (r.Unit & 0xc0) == 0x00 {
return uv
} else if (r.Unit & 0xc0) == 0x80 {
return uv & 0x7f
} else {
panic(ErrUnitNotSupport)
}
}
}
func (r *SDRFullSensor) MarshalBinary() (data []byte, err error) {
hb := new(bytes.Buffer)
fb := new(bytes.Buffer)
db := new(bytes.Buffer)
binary.Write(hb, binary.LittleEndian, r.SDRRecordHeader)
binary.Write(fb, binary.LittleEndian, r.sdrFullSensorFields)
db.WriteByte(byte(len(r.DeviceId())))
db.WriteString(r.DeviceId())
//merge all
recLen := uint8(fb.Len() + db.Len())
hb.WriteByte(byte(recLen))
hb.Write(fb.Bytes())
hb.Write(db.Bytes())
return hb.Bytes(), nil
}
func (r *SDRFullSensor) UnmarshalBinary(data []byte) error {
buffer := bytes.NewReader(data)
err := binary.Read(buffer, binary.LittleEndian, &r.SDRRecordHeader)
if err != nil {
return err
}
//skip the record length
_, err = buffer.ReadByte()
if err != nil {
return err
}
binary.Read(buffer, binary.LittleEndian, &r.sdrFullSensorFields)
idLen, err := buffer.ReadByte()
if err != nil {
return err
}
id := make([]byte, int(idLen))
n, err := buffer.Read(id)
if err != nil || n != int(idLen) {
return ErrIdStringLenNotMatch
}
r.deviceId = string(id)
return nil
}
// section 43.2
type sdrCompactSensorFields struct { //size 26
SensorOwnerId uint8
SensorOwnerLUN uint8
SensorNumber uint8
EntityId uint8
EntityIns uint8
SensorInit uint8
SensorCap uint8
SensorType SDRSensorType
ReadingType SDRSensorReadingType
AssertionEventMask uint16
DeassertionEventMask uint16
DiscreteReadingMask uint16
Unit uint8
BaseUnit uint8
ModifierUnit uint8
SensorRecSharing uint16
PThresHysteresisVal uint8
NThresHysteresisVal uint8
Reserved [2]byte
OEM uint8
IDStringTypeLen uint8
}
type SDRCompactSensor struct {
SDRRecordHeader
sdrCompactSensorFields
deviceId string
}
func NewSDRCompactSensor(id uint16, name string) (*SDRCompactSensor, error) {
if len(name) > 16 {
return nil, ErrDeviceIdMustLess16
}
r := &SDRCompactSensor{}
r.Recordid = id
r.Rtype = SDR_RECORD_TYPE_COMPACT_SENSOR
r.SDRVersion = 0x51
r.deviceId = name
return r, nil
}
func (r *SDRCompactSensor) MarshalBinary() (data []byte, err error) {
hb := new(bytes.Buffer)
fb := new(bytes.Buffer)
db := new(bytes.Buffer)
binary.Write(hb, binary.LittleEndian, r.SDRRecordHeader)
binary.Write(fb, binary.LittleEndian, r.sdrCompactSensorFields)
db.WriteByte(byte(len(r.DeviceId())))
db.WriteString(r.DeviceId())
//merge all
recLen := uint8(fb.Len() + db.Len())
hb.WriteByte(byte(recLen))
hb.Write(fb.Bytes())
hb.Write(db.Bytes())
return hb.Bytes(), nil
}
func (r *SDRCompactSensor) DeviceId() string {
return r.deviceId
}
func (r *SDRCompactSensor) RecordId() uint16 {
return r.Recordid
}
func (r *SDRCompactSensor) RecordType() SDRRecordType {
return r.Rtype
}
func (r *SDRCompactSensor) | UnmarshalBinary | identifier_name |
|
sdr_rec_type.go | 1ff) | 0x0200
}
r.Bacc = r.Bacc | (_B & 0x00ff)
r.Bacc = r.Bacc | ((_B << 6) & 0xc000)
_Bexp := uint8(math.Abs(float64(Bexp)))
_Bexp = _Bexp & 0x07 //mask leeve low 3bit
if Bexp < 0 {
_Bexp = (((^_Bexp) + 1) & 0x07) | 0x08
}
r.RBexp = r.RBexp | (_Bexp & 0x0f)
_Rexp := uint8(math.Abs(float64(Rexp)))
_Rexp = _Rexp & 0x07 //mask leave low 3bit
if Rexp < 0 {
_Rexp = (((^_Rexp) + 1) & 0x07) | 0x08
}
r.RBexp = r.RBexp | ((_Rexp << 4) & 0xf0)
}
func (r *SDRFullSensor) GetMBExp() (M int16, B int16, Bexp int8, Rexp int8) {
_M := uint16(((r.MTol & 0xc000) >> 6) | (r.MTol & 0x00ff))
if (_M & 0x0200) == 0x0200 { //most significate is 1, mean signed
//fmt.Printf("%d,0x%x\n", int16((_M & 0xfdff)), (_M & 0xfdff))
M = int16((_M & 0xfdff)) - 512 //2^9
} else {
M = int16(_M & 0xfdff)
}
_B := uint16(((r.Bacc & 0xc000) >> 6) | (r.Bacc & 0x00ff))
if (_B & 0x0200) == 0x0200 { //most significate is 1, mean signed
B = int16((_B & 0xfdff)) - 512 //2^9
} else {
B = int16(_B & 0xfdff)
}
_Bexp := uint8(r.RBexp & 0x0f)
if (_Bexp & 0x08) == 0x08 {
Bexp = int8((_Bexp & 0xf7)) - 8 //2^3
} else {
Bexp = int8(_Bexp & 0xf7)
}
_Rexp := uint8((r.RBexp & 0xf0) >> 4)
if (_Rexp & 0x08) == 0x08 {
Rexp = int8((_Rexp & 0xf7)) - 8 //2^3
} else {
Rexp = int8(_Rexp & 0xf7)
}
return
}
// calculate the given value into the SDR reading value, using current M,B,Bexp,Rexp setting
func (r *SDRFullSensor) CalValue(value float64) uint8 {
M, B, Bexp, Rexp := r.GetMBExp()
if M == 0 {
panic(ErrMZero)
}
//y=(M x V + B x pow(10,Bexp)) x pow(10,Rexp)
//know y, cal V
var neg bool = false
v := (value/math.Pow(10, float64(Rexp)) - float64(B)*math.Pow(10, float64(Bexp))) / float64(M)
if v < 0 {
neg = true
}
v = math.Abs(v)
uv := uint8(v)
if neg {
if (r.Unit & 0xc0) == 0x80 {
return ((128 - uv) | 0x80)
} else {
panic(ErrUnitNotSupport)
}
} else {
if (r.Unit & 0xc0) == 0x00 {
return uv
} else if (r.Unit & 0xc0) == 0x80 {
return uv & 0x7f
} else {
panic(ErrUnitNotSupport)
}
}
}
func (r *SDRFullSensor) MarshalBinary() (data []byte, err error) {
hb := new(bytes.Buffer)
fb := new(bytes.Buffer)
db := new(bytes.Buffer)
binary.Write(hb, binary.LittleEndian, r.SDRRecordHeader)
binary.Write(fb, binary.LittleEndian, r.sdrFullSensorFields)
db.WriteByte(byte(len(r.DeviceId())))
db.WriteString(r.DeviceId())
//merge all
recLen := uint8(fb.Len() + db.Len())
hb.WriteByte(byte(recLen))
hb.Write(fb.Bytes())
hb.Write(db.Bytes())
return hb.Bytes(), nil
}
func (r *SDRFullSensor) UnmarshalBinary(data []byte) error {
buffer := bytes.NewReader(data)
err := binary.Read(buffer, binary.LittleEndian, &r.SDRRecordHeader)
if err != nil {
return err
}
//skip the record length
_, err = buffer.ReadByte()
if err != nil {
return err
}
binary.Read(buffer, binary.LittleEndian, &r.sdrFullSensorFields)
idLen, err := buffer.ReadByte()
if err != nil {
return err
}
id := make([]byte, int(idLen))
n, err := buffer.Read(id)
if err != nil || n != int(idLen) {
return ErrIdStringLenNotMatch
}
r.deviceId = string(id)
return nil
}
// section 43.2
type sdrCompactSensorFields struct { //size 26
SensorOwnerId uint8
SensorOwnerLUN uint8
SensorNumber uint8
EntityId uint8
EntityIns uint8
SensorInit uint8
SensorCap uint8
SensorType SDRSensorType
ReadingType SDRSensorReadingType
AssertionEventMask uint16
DeassertionEventMask uint16
DiscreteReadingMask uint16
Unit uint8
BaseUnit uint8
ModifierUnit uint8
SensorRecSharing uint16
PThresHysteresisVal uint8
NThresHysteresisVal uint8
Reserved [2]byte
OEM uint8
IDStringTypeLen uint8
}
type SDRCompactSensor struct {
SDRRecordHeader
sdrCompactSensorFields
deviceId string
}
func NewSDRCompactSensor(id uint16, name string) (*SDRCompactSensor, error) {
if len(name) > 16 {
return nil, ErrDeviceIdMustLess16
}
r := &SDRCompactSensor{}
r.Recordid = id
r.Rtype = SDR_RECORD_TYPE_COMPACT_SENSOR
r.SDRVersion = 0x51
r.deviceId = name
return r, nil
}
func (r *SDRCompactSensor) MarshalBinary() (data []byte, err error) {
hb := new(bytes.Buffer)
fb := new(bytes.Buffer)
db := new(bytes.Buffer)
binary.Write(hb, binary.LittleEndian, r.SDRRecordHeader)
binary.Write(fb, binary.LittleEndian, r.sdrCompactSensorFields)
db.WriteByte(byte(len(r.DeviceId())))
db.WriteString(r.DeviceId())
//merge all
recLen := uint8(fb.Len() + db.Len())
hb.WriteByte(byte(recLen))
hb.Write(fb.Bytes())
hb.Write(db.Bytes())
return hb.Bytes(), nil
}
func (r *SDRCompactSensor) DeviceId() string {
return r.deviceId
}
func (r *SDRCompactSensor) RecordId() uint16 {
return r.Recordid
}
func (r *SDRCompactSensor) RecordType() SDRRecordType {
return r.Rtype
}
func (r *SDRCompactSensor) UnmarshalBinary(data []byte) error {
buffer := bytes.NewReader(data)
err := binary.Read(buffer, binary.LittleEndian, &r.SDRRecordHeader)
if err != nil {
return err
}
//skip the record length
_, err = buffer.ReadByte()
if err != nil {
return err
}
binary.Read(buffer, binary.LittleEndian, &r.sdrCompactSensorFields)
idLen, err := buffer.ReadByte()
if err != nil {
return err
}
id := make([]byte, int(idLen))
n, err := buffer.Read(id)
if err != nil || n != int(idLen) {
return ErrIdStringLenNotMatch
}
r.deviceId = string(id)
return nil
}
// section 43.9
type sdrMCDeviceLocFields struct { //size 26
DeviceSlaveAddr uint8
ChannNum uint8
PowerStaNotif uint8
DeviceCapab uint8 | Reserved [3]byte
EntityID uint8 | random_line_split |
|
sdr_rec_type.go | ())
//merge all
recLen := uint8(fb.Len() + db.Len())
hb.WriteByte(byte(recLen))
hb.Write(fb.Bytes())
hb.Write(db.Bytes())
return hb.Bytes(), nil
}
// section 43.1
type sdrFullSensorFields struct { //size 42
SensorOwnerId uint8
SensorOwnerLUN uint8
SensorNumber uint8
EntityId uint8
EntityIns uint8
SensorInit uint8
SensorCap uint8
SensorType SDRSensorType
ReadingType SDRSensorReadingType
AssertionEventMask uint16
DeassertionEventMask uint16
DiscreteReadingMask uint16
Unit uint8
BaseUnit uint8
ModifierUnit uint8
Linearization uint8
MTol uint16
Bacc uint16
Acc uint8
RBexp uint8
AnalogFlag uint8
NominalReading uint8
NormalMax uint8
NormalMin uint8
SensorMax uint8
SensorMin uint8
U_NR uint8
U_C uint8
U_NC uint8
L_NR uint8
L_C uint8
L_NC uint8
PositiveHysteresis uint8
NegativeHysteresis uint8
Reserved [2]byte
OEM uint8
}
type SDRFullSensor struct {
SDRRecordHeader
sdrFullSensorFields
deviceId string
}
func NewSDRFullSensor(id uint16, name string) (*SDRFullSensor, error) {
if len(name) > 16 {
return nil, ErrDeviceIdMustLess16
}
r := &SDRFullSensor{}
r.Recordid = id
r.Rtype = SDR_RECORD_TYPE_FULL_SENSOR
r.SDRVersion = 0x51
r.deviceId = name
return r, nil
}
func (r *SDRFullSensor) DeviceId() string {
return r.deviceId
}
func (r *SDRFullSensor) RecordId() uint16 {
return r.Recordid
}
func (r *SDRFullSensor) RecordType() SDRRecordType {
return r.Rtype
}
//M: 10bit signed 2's complement
//B: 10bit signed 2's complement
//Bexp: 4bit signed 2's complement
//Rexp: 4bit signed 2's complement
func (r *SDRFullSensor) SetMBExp(M int16, B int16, Bexp int8, Rexp int8) {
r.MTol = 0
r.Bacc = 0
r.RBexp = 0
_M := uint16(math.Abs(float64(M)))
_M = _M & 0x01ff //mask leave low 9bit
if M < 0 {
_M = (((^_M) + 1) & 0x01ff) | 0x0200
}
r.MTol = r.MTol | (_M & 0x00ff)
r.MTol = r.MTol | ((_M << 6) & 0xc000)
_B := uint16(math.Abs(float64(B)))
_B = _B & 0x01ff //mask leave low 9bit
if B < 0 {
_B = (((^_B) + 1) & 0x01ff) | 0x0200
}
r.Bacc = r.Bacc | (_B & 0x00ff)
r.Bacc = r.Bacc | ((_B << 6) & 0xc000)
_Bexp := uint8(math.Abs(float64(Bexp)))
_Bexp = _Bexp & 0x07 //mask leeve low 3bit
if Bexp < 0 {
_Bexp = (((^_Bexp) + 1) & 0x07) | 0x08
}
r.RBexp = r.RBexp | (_Bexp & 0x0f)
_Rexp := uint8(math.Abs(float64(Rexp)))
_Rexp = _Rexp & 0x07 //mask leave low 3bit
if Rexp < 0 {
_Rexp = (((^_Rexp) + 1) & 0x07) | 0x08
}
r.RBexp = r.RBexp | ((_Rexp << 4) & 0xf0)
}
func (r *SDRFullSensor) GetMBExp() (M int16, B int16, Bexp int8, Rexp int8) {
_M := uint16(((r.MTol & 0xc000) >> 6) | (r.MTol & 0x00ff))
if (_M & 0x0200) == 0x0200 { //most significate is 1, mean signed
//fmt.Printf("%d,0x%x\n", int16((_M & 0xfdff)), (_M & 0xfdff))
M = int16((_M & 0xfdff)) - 512 //2^9
} else {
M = int16(_M & 0xfdff)
}
_B := uint16(((r.Bacc & 0xc000) >> 6) | (r.Bacc & 0x00ff))
if (_B & 0x0200) == 0x0200 { //most significate is 1, mean signed
B = int16((_B & 0xfdff)) - 512 //2^9
} else {
B = int16(_B & 0xfdff)
}
_Bexp := uint8(r.RBexp & 0x0f)
if (_Bexp & 0x08) == 0x08 {
Bexp = int8((_Bexp & 0xf7)) - 8 //2^3
} else {
Bexp = int8(_Bexp & 0xf7)
}
_Rexp := uint8((r.RBexp & 0xf0) >> 4)
if (_Rexp & 0x08) == 0x08 {
Rexp = int8((_Rexp & 0xf7)) - 8 //2^3
} else {
Rexp = int8(_Rexp & 0xf7)
}
return
}
// calculate the given value into the SDR reading value, using current M,B,Bexp,Rexp setting
func (r *SDRFullSensor) CalValue(value float64) uint8 {
M, B, Bexp, Rexp := r.GetMBExp()
if M == 0 {
panic(ErrMZero)
}
//y=(M x V + B x pow(10,Bexp)) x pow(10,Rexp)
//know y, cal V
var neg bool = false
v := (value/math.Pow(10, float64(Rexp)) - float64(B)*math.Pow(10, float64(Bexp))) / float64(M)
if v < 0 {
neg = true
}
v = math.Abs(v)
uv := uint8(v)
if neg {
if (r.Unit & 0xc0) == 0x80 {
return ((128 - uv) | 0x80)
} else {
panic(ErrUnitNotSupport)
}
} else {
if (r.Unit & 0xc0) == 0x00 {
return uv
} else if (r.Unit & 0xc0) == 0x80 {
return uv & 0x7f
} else {
panic(ErrUnitNotSupport)
}
}
}
func (r *SDRFullSensor) MarshalBinary() (data []byte, err error) {
hb := new(bytes.Buffer)
fb := new(bytes.Buffer)
db := new(bytes.Buffer)
binary.Write(hb, binary.LittleEndian, r.SDRRecordHeader)
binary.Write(fb, binary.LittleEndian, r.sdrFullSensorFields)
db.WriteByte(byte(len(r.DeviceId())))
db.WriteString(r.DeviceId())
//merge all
recLen := uint8(fb.Len() + db.Len())
hb.WriteByte(byte(recLen))
hb.Write(fb.Bytes())
hb.Write(db.Bytes())
return hb.Bytes(), nil
}
func (r *SDRFullSensor) UnmarshalBinary(data []byte) error | {
buffer := bytes.NewReader(data)
err := binary.Read(buffer, binary.LittleEndian, &r.SDRRecordHeader)
if err != nil {
return err
}
//skip the record length
_, err = buffer.ReadByte()
if err != nil {
return err
}
binary.Read(buffer, binary.LittleEndian, &r.sdrFullSensorFields)
idLen, err := buffer.ReadByte()
if err != nil {
return err
}
| identifier_body |
|
sdr_rec_type.go | 0x01ff) | 0x0200
}
r.MTol = r.MTol | (_M & 0x00ff)
r.MTol = r.MTol | ((_M << 6) & 0xc000)
_B := uint16(math.Abs(float64(B)))
_B = _B & 0x01ff //mask leave low 9bit
if B < 0 {
_B = (((^_B) + 1) & 0x01ff) | 0x0200
}
r.Bacc = r.Bacc | (_B & 0x00ff)
r.Bacc = r.Bacc | ((_B << 6) & 0xc000)
_Bexp := uint8(math.Abs(float64(Bexp)))
_Bexp = _Bexp & 0x07 //mask leeve low 3bit
if Bexp < 0 {
_Bexp = (((^_Bexp) + 1) & 0x07) | 0x08
}
r.RBexp = r.RBexp | (_Bexp & 0x0f)
_Rexp := uint8(math.Abs(float64(Rexp)))
_Rexp = _Rexp & 0x07 //mask leave low 3bit
if Rexp < 0 {
_Rexp = (((^_Rexp) + 1) & 0x07) | 0x08
}
r.RBexp = r.RBexp | ((_Rexp << 4) & 0xf0)
}
func (r *SDRFullSensor) GetMBExp() (M int16, B int16, Bexp int8, Rexp int8) {
_M := uint16(((r.MTol & 0xc000) >> 6) | (r.MTol & 0x00ff))
if (_M & 0x0200) == 0x0200 { //most significate is 1, mean signed
//fmt.Printf("%d,0x%x\n", int16((_M & 0xfdff)), (_M & 0xfdff))
M = int16((_M & 0xfdff)) - 512 //2^9
} else {
M = int16(_M & 0xfdff)
}
_B := uint16(((r.Bacc & 0xc000) >> 6) | (r.Bacc & 0x00ff))
if (_B & 0x0200) == 0x0200 { //most significate is 1, mean signed
B = int16((_B & 0xfdff)) - 512 //2^9
} else {
B = int16(_B & 0xfdff)
}
_Bexp := uint8(r.RBexp & 0x0f)
if (_Bexp & 0x08) == 0x08 {
Bexp = int8((_Bexp & 0xf7)) - 8 //2^3
} else {
Bexp = int8(_Bexp & 0xf7)
}
_Rexp := uint8((r.RBexp & 0xf0) >> 4)
if (_Rexp & 0x08) == 0x08 {
Rexp = int8((_Rexp & 0xf7)) - 8 //2^3
} else {
Rexp = int8(_Rexp & 0xf7)
}
return
}
// calculate the given value into the SDR reading value, using current M,B,Bexp,Rexp setting
func (r *SDRFullSensor) CalValue(value float64) uint8 {
M, B, Bexp, Rexp := r.GetMBExp()
if M == 0 {
panic(ErrMZero)
}
//y=(M x V + B x pow(10,Bexp)) x pow(10,Rexp)
//know y, cal V
var neg bool = false
v := (value/math.Pow(10, float64(Rexp)) - float64(B)*math.Pow(10, float64(Bexp))) / float64(M)
if v < 0 {
neg = true
}
v = math.Abs(v)
uv := uint8(v)
if neg {
if (r.Unit & 0xc0) == 0x80 {
return ((128 - uv) | 0x80)
} else {
panic(ErrUnitNotSupport)
}
} else {
if (r.Unit & 0xc0) == 0x00 {
return uv
} else if (r.Unit & 0xc0) == 0x80 {
return uv & 0x7f
} else {
panic(ErrUnitNotSupport)
}
}
}
func (r *SDRFullSensor) MarshalBinary() (data []byte, err error) {
hb := new(bytes.Buffer)
fb := new(bytes.Buffer)
db := new(bytes.Buffer)
binary.Write(hb, binary.LittleEndian, r.SDRRecordHeader)
binary.Write(fb, binary.LittleEndian, r.sdrFullSensorFields)
db.WriteByte(byte(len(r.DeviceId())))
db.WriteString(r.DeviceId())
//merge all
recLen := uint8(fb.Len() + db.Len())
hb.WriteByte(byte(recLen))
hb.Write(fb.Bytes())
hb.Write(db.Bytes())
return hb.Bytes(), nil
}
func (r *SDRFullSensor) UnmarshalBinary(data []byte) error {
buffer := bytes.NewReader(data)
err := binary.Read(buffer, binary.LittleEndian, &r.SDRRecordHeader)
if err != nil {
return err
}
//skip the record length
_, err = buffer.ReadByte()
if err != nil {
return err
}
binary.Read(buffer, binary.LittleEndian, &r.sdrFullSensorFields)
idLen, err := buffer.ReadByte()
if err != nil {
return err
}
id := make([]byte, int(idLen))
n, err := buffer.Read(id)
if err != nil || n != int(idLen) {
return ErrIdStringLenNotMatch
}
r.deviceId = string(id)
return nil
}
// section 43.2
type sdrCompactSensorFields struct { //size 26
SensorOwnerId uint8
SensorOwnerLUN uint8
SensorNumber uint8
EntityId uint8
EntityIns uint8
SensorInit uint8
SensorCap uint8
SensorType SDRSensorType
ReadingType SDRSensorReadingType
AssertionEventMask uint16
DeassertionEventMask uint16
DiscreteReadingMask uint16
Unit uint8
BaseUnit uint8
ModifierUnit uint8
SensorRecSharing uint16
PThresHysteresisVal uint8
NThresHysteresisVal uint8
Reserved [2]byte
OEM uint8
IDStringTypeLen uint8
}
type SDRCompactSensor struct {
SDRRecordHeader
sdrCompactSensorFields
deviceId string
}
func NewSDRCompactSensor(id uint16, name string) (*SDRCompactSensor, error) {
if len(name) > 16 {
return nil, ErrDeviceIdMustLess16
}
r := &SDRCompactSensor{}
r.Recordid = id
r.Rtype = SDR_RECORD_TYPE_COMPACT_SENSOR
r.SDRVersion = 0x51
r.deviceId = name
return r, nil
}
func (r *SDRCompactSensor) MarshalBinary() (data []byte, err error) {
hb := new(bytes.Buffer)
fb := new(bytes.Buffer)
db := new(bytes.Buffer)
binary.Write(hb, binary.LittleEndian, r.SDRRecordHeader)
binary.Write(fb, binary.LittleEndian, r.sdrCompactSensorFields)
db.WriteByte(byte(len(r.DeviceId())))
db.WriteString(r.DeviceId())
//merge all
recLen := uint8(fb.Len() + db.Len())
hb.WriteByte(byte(recLen))
hb.Write(fb.Bytes())
hb.Write(db.Bytes())
return hb.Bytes(), nil
}
func (r *SDRCompactSensor) DeviceId() string {
return r.deviceId
}
func (r *SDRCompactSensor) RecordId() uint16 {
return r.Recordid
}
func (r *SDRCompactSensor) RecordType() SDRRecordType {
return r.Rtype
}
func (r *SDRCompactSensor) UnmarshalBinary(data []byte) error {
buffer := bytes.NewReader(data)
err := binary.Read(buffer, binary.LittleEndian, &r.SDRRecordHeader)
if err != nil {
return err
}
//skip the record length
_, err = buffer.ReadByte()
if err != nil {
return err
}
binary.Read(buffer, binary.LittleEndian, &r.sdrCompactSensorFields)
idLen, err := buffer.ReadByte()
if err != nil | {
return err
} | conditional_block |
|
bytesearch.rs | ::LENGTH) == 0 }
}
}
}
impl<const N: usize> ByteSearcher for [u8; N] {
#[inline(always)]
fn find_in(&self, rhs: &[u8]) -> Option<usize> {
if N == 1 {
return memchr::memchr(self[0], rhs);
}
for win in rhs.windows(Self::LENGTH) {
if self.equals_known_len(win) {
// Black magic?
return Some((win.as_ptr() as usize) - (rhs.as_ptr() as usize));
}
}
None
}
}
/// A ByteSet is any set of bytes.
pub trait ByteSet {
/// \return whether the ByteSet contains the byte.
fn contains(&self, b: u8) -> bool;
}
/// A ByteArraySet wraps a small array and uses linear equality.
#[derive(Copy, Clone, Debug)]
#[repr(align(4))]
pub struct ByteArraySet<ArraySet: SmallArraySet>(pub ArraySet);
/// Cover over contains() to avoid bumping into native contains call.
impl<ArraySet: SmallArraySet> ByteArraySet<ArraySet> {
#[inline(always)]
pub fn contains(self, b: u8) -> bool {
self.0.contains(b)
}
}
impl<ArraySet: SmallArraySet> ByteSearcher for ByteArraySet<ArraySet> {
#[inline(always)]
fn find_in(&self, rhs: &[u8]) -> Option<usize> {
self.0.find_in(rhs)
}
}
/// A SmallArraySet is a set implemented as a small byte array.
pub trait SmallArraySet: Copy {
fn contains(self, b: u8) -> bool;
fn find_in(self, rhs: &[u8]) -> Option<usize>;
}
// Beware: Rust is cranky about loop unrolling.
// Do not try to be too clever here.
impl SmallArraySet for [u8; 2] {
#[inline(always)]
fn contains(self, b: u8) -> bool {
b == self[0] || b == self[1]
}
#[inline(always)]
fn find_in(self, rhs: &[u8]) -> Option<usize> {
memchr::memchr2(self[0], self[1], rhs)
}
}
impl SmallArraySet for [u8; 3] {
#[inline(always)]
fn contains(self, b: u8) -> bool {
b == self[0] || b == self[1] || b == self[2]
}
#[inline(always)]
fn find_in(self, rhs: &[u8]) -> Option<usize> {
memchr::memchr3(self[0], self[1], self[2], rhs)
}
}
impl SmallArraySet for [u8; 4] {
#[inline(always)]
fn contains(self, b: u8) -> bool {
b == self[0] || b == self[1] || b == self[2] || b == self[3]
}
#[inline(always)]
fn find_in(self, rhs: &[u8]) -> Option<usize> {
// TODO.
for (idx, byte) in rhs.iter().enumerate() {
if self.contains(*byte) |
}
None
}
}
// CharSet helper. Avoid branching in the loop to get good unrolling.
#[allow(unused_parens)]
#[inline(always)]
pub fn charset_contains(set: &[u32; MAX_CHAR_SET_LENGTH], c: u32) -> bool {
let mut result = false;
for &v in set.iter() {
result |= (v == c);
}
result
}
/// A helper function for formatting bitmaps, using - ranges.
fn format_bitmap<Func>(name: &str, f: &mut fmt::Formatter<'_>, contains: Func) -> fmt::Result
where
Func: Fn(u8) -> bool,
{
write!(f, "{}[", name)?;
let mut idx = 0;
let mut maybe_space = "";
while idx <= 256 {
// Compute the next value not contained.
let mut end = idx;
while end <= 256 && contains(end as u8) {
end += 1;
}
match end - idx {
0 => (),
1 => write!(f, "{}{}", maybe_space, idx)?,
_ => write!(f, "{}{}-{}", maybe_space, idx, end - 1)?,
};
if end > idx {
maybe_space = " ";
}
idx = end + 1
}
write!(f, "]")?;
Ok(())
}
/// A bitmap covering ASCII characters.
#[derive(Default, Copy, Clone)]
#[repr(align(4))]
pub struct AsciiBitmap(pub [u8; 16]);
impl AsciiBitmap {
/// Set a byte val in this bitmap.
#[inline(always)]
pub fn set(&mut self, val: u8) {
debug_assert!(val <= 127, "Value should be ASCII");
self.0[(val >> 3) as usize] |= 1 << (val & 0x7);
}
}
impl fmt::Debug for AsciiBitmap {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
format_bitmap("AsciiBitmap", f, |v| self.contains(v))
}
}
impl ByteSet for AsciiBitmap {
/// \return whether this bitmap contains a given value.
/// The value does NOT have to be ASCII.
#[inline(always)]
fn contains(&self, val: u8) -> bool {
// Delicate tricks to avoid branches.
// In general we want to compute the byte via /8, and then mask into the
// byte. But if the value is not ASCII then the byte could be too large.
// So mask off the MSB so that the byte is always in range.
let byte = (val & 0x7F) >> 3;
let bit = val & 0x7;
// Now probe the bitmap. If our sign bit was set, we want the mask to be 0;
// otherwise we want to set only the 'bit' offset.
// Invert the sign bit and reuse it.
let mask = ((val >> 7) ^ 1) << bit;
// Probe the bitmap. We expect the compiler to elide the bounds check.
(self.0[byte as usize] & mask) != 0
}
}
/// A bitmap covering all bytes.
#[derive(Default, Copy, Clone)]
#[repr(align(4))]
pub struct ByteBitmap([u16; 16]);
// TODO: the codegen here is pretty horrible; LLVM is emitting a sequence of
// halfword instructions. Consider using a union?
impl ByteBitmap {
/// Construct from a sequence of bytes.
pub fn new(bytes: &[u8]) -> ByteBitmap {
let mut bb = ByteBitmap::default();
for &b in bytes {
bb.set(b)
}
bb
}
/// \return whether this bitmap contains a given byte val.
#[inline(always)]
pub fn contains(&self, val: u8) -> bool {
let byte = val >> 4;
let bit = val & 0xF;
(self.0[byte as usize] & (1 << bit)) != 0
}
/// Set a bit in this bitmap.
#[inline(always)]
pub fn set(&mut self, val: u8) {
let byte = val >> 4;
let bit = val & 0xF;
self.0[byte as usize] |= 1 << bit;
}
/// Update ourselves from another bitmap, in place.
pub fn bitor(&mut self, rhs: &ByteBitmap) {
for idx in 0..self.0.len() {
self.0[idx] |= rhs.0[idx];
}
}
/// Invert our bits, in place.
pub fn bitnot(&mut self) -> &mut Self {
for val in self.0.iter_mut() {
*val = !*val;
}
self
}
/// Count number of set bits.
pub fn count_bits(&self) -> u32 {
self.0.iter().map(|v| v.count_ones()).sum()
}
/// \return all set bytes, as a vec.
#[allow(clippy::wrong_self_convention)]
pub fn to_vec(&self) -> Vec<u8> {
(0..=255).filter(|b| self.contains(*b)).collect()
}
/// \return the index of the first byte in the slice that is present in this
/// bitmap, using some unsafe tricks.
#[inline(always)]
fn unsafe_find_in_slice(&self, bytes: &[u8]) -> Option<usize> {
type Chunk = u32;
let bm = &self.0;
let mut offset = 0;
let (prefix, body, suffix) = unsafe { bytes.align_to::<Chunk>() };
for &byte in prefix.iter() {
if self.contains(byte) {
return Some(offset);
}
offset += 1;
}
for &chunk in body {
// Use LE. Here index 0 is the earliest address.
let byte_idxs = ((chunk >> 4) & 0x | {
return Some(idx);
} | conditional_block |
bytesearch.rs | (&self, rhs: &[u8]) -> bool {
debug_assert!(rhs.len() == Self::LENGTH, "Slice has wrong length");
if cfg!(feature = "prohibit-unsafe") {
// Here's what we would like to do. However this will emit an unnecessary length compare, and an unnecessary pointer compare.
self == rhs
} else {
// Warning: this is delicate. We intend for the compiler to emit optimized bytewise comparisons of unaligned LENGTH bytes,
// where LENGTH is a compile-time constant. Rust's default == on slices will perform a pointer comparison which will always be false,
// and kill any vectorization.
// memcmp() will be optimized to the builtin.
unsafe { memcmp(self.as_ptr(), rhs.as_ptr(), Self::LENGTH) == 0 }
}
}
}
impl<const N: usize> ByteSearcher for [u8; N] {
#[inline(always)]
fn find_in(&self, rhs: &[u8]) -> Option<usize> {
if N == 1 {
return memchr::memchr(self[0], rhs);
}
for win in rhs.windows(Self::LENGTH) {
if self.equals_known_len(win) {
// Black magic?
return Some((win.as_ptr() as usize) - (rhs.as_ptr() as usize));
}
}
None
}
}
/// A ByteSet is any set of bytes.
pub trait ByteSet {
/// \return whether the ByteSet contains the byte.
fn contains(&self, b: u8) -> bool;
}
/// A ByteArraySet wraps a small array and uses linear equality.
#[derive(Copy, Clone, Debug)]
#[repr(align(4))]
pub struct ByteArraySet<ArraySet: SmallArraySet>(pub ArraySet);
/// Cover over contains() to avoid bumping into native contains call.
impl<ArraySet: SmallArraySet> ByteArraySet<ArraySet> {
#[inline(always)]
pub fn contains(self, b: u8) -> bool {
self.0.contains(b)
}
}
impl<ArraySet: SmallArraySet> ByteSearcher for ByteArraySet<ArraySet> {
#[inline(always)]
fn find_in(&self, rhs: &[u8]) -> Option<usize> {
self.0.find_in(rhs)
}
}
/// A SmallArraySet is a set implemented as a small byte array.
pub trait SmallArraySet: Copy {
fn contains(self, b: u8) -> bool;
fn find_in(self, rhs: &[u8]) -> Option<usize>;
}
// Beware: Rust is cranky about loop unrolling.
// Do not try to be too clever here.
impl SmallArraySet for [u8; 2] {
#[inline(always)]
fn contains(self, b: u8) -> bool {
b == self[0] || b == self[1]
}
#[inline(always)]
fn find_in(self, rhs: &[u8]) -> Option<usize> {
memchr::memchr2(self[0], self[1], rhs)
}
}
impl SmallArraySet for [u8; 3] {
#[inline(always)]
fn contains(self, b: u8) -> bool {
b == self[0] || b == self[1] || b == self[2]
}
#[inline(always)]
fn find_in(self, rhs: &[u8]) -> Option<usize> {
memchr::memchr3(self[0], self[1], self[2], rhs)
}
}
impl SmallArraySet for [u8; 4] {
#[inline(always)]
fn contains(self, b: u8) -> bool {
b == self[0] || b == self[1] || b == self[2] || b == self[3]
}
#[inline(always)]
fn find_in(self, rhs: &[u8]) -> Option<usize> {
// TODO.
for (idx, byte) in rhs.iter().enumerate() {
if self.contains(*byte) {
return Some(idx);
}
}
None
}
}
// CharSet helper. Avoid branching in the loop to get good unrolling.
#[allow(unused_parens)]
#[inline(always)]
pub fn charset_contains(set: &[u32; MAX_CHAR_SET_LENGTH], c: u32) -> bool {
let mut result = false;
for &v in set.iter() {
result |= (v == c);
}
result
}
/// A helper function for formatting bitmaps, using - ranges.
fn format_bitmap<Func>(name: &str, f: &mut fmt::Formatter<'_>, contains: Func) -> fmt::Result
where
Func: Fn(u8) -> bool,
{
write!(f, "{}[", name)?;
let mut idx = 0;
let mut maybe_space = "";
while idx <= 256 {
// Compute the next value not contained.
let mut end = idx;
while end <= 256 && contains(end as u8) {
end += 1;
}
match end - idx {
0 => (),
1 => write!(f, "{}{}", maybe_space, idx)?,
_ => write!(f, "{}{}-{}", maybe_space, idx, end - 1)?,
};
if end > idx {
maybe_space = " ";
}
idx = end + 1
}
write!(f, "]")?;
Ok(())
}
/// A bitmap covering ASCII characters.
#[derive(Default, Copy, Clone)]
#[repr(align(4))]
pub struct AsciiBitmap(pub [u8; 16]);
impl AsciiBitmap {
/// Set a byte val in this bitmap.
#[inline(always)]
pub fn set(&mut self, val: u8) {
debug_assert!(val <= 127, "Value should be ASCII");
self.0[(val >> 3) as usize] |= 1 << (val & 0x7);
}
}
impl fmt::Debug for AsciiBitmap {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
format_bitmap("AsciiBitmap", f, |v| self.contains(v))
}
}
impl ByteSet for AsciiBitmap {
/// \return whether this bitmap contains a given value.
/// The value does NOT have to be ASCII.
#[inline(always)]
fn contains(&self, val: u8) -> bool {
// Delicate tricks to avoid branches.
// In general we want to compute the byte via /8, and then mask into the
// byte. But if the value is not ASCII then the byte could be too large.
// So mask off the MSB so that the byte is always in range.
let byte = (val & 0x7F) >> 3;
let bit = val & 0x7;
// Now probe the bitmap. If our sign bit was set, we want the mask to be 0;
// otherwise we want to set only the 'bit' offset.
// Invert the sign bit and reuse it.
let mask = ((val >> 7) ^ 1) << bit;
// Probe the bitmap. We expect the compiler to elide the bounds check.
(self.0[byte as usize] & mask) != 0
}
}
/// A bitmap covering all bytes.
#[derive(Default, Copy, Clone)]
#[repr(align(4))]
pub struct ByteBitmap([u16; 16]);
// TODO: the codegen here is pretty horrible; LLVM is emitting a sequence of
// halfword instructions. Consider using a union?
impl ByteBitmap {
/// Construct from a sequence of bytes.
pub fn new(bytes: &[u8]) -> ByteBitmap {
let mut bb = ByteBitmap::default();
for &b in bytes {
bb.set(b)
}
bb
}
/// \return whether this bitmap contains a given byte val.
#[inline(always)]
pub fn contains(&self, val: u8) -> bool {
let byte = val >> 4;
let bit = val & 0xF;
(self.0[byte as usize] & (1 << bit)) != 0
}
/// Set a bit in this bitmap.
#[inline(always)]
pub fn set(&mut self, val: u8) {
let byte = val >> 4;
let bit = val & 0xF;
self.0[byte as usize] |= 1 << bit;
}
/// Update ourselves from another bitmap, in place.
pub fn bitor(&mut self, rhs: &ByteBitmap) {
for idx in 0..self.0.len() {
self.0[idx] |= rhs.0[idx];
}
}
/// Invert our bits, in place.
pub fn bitnot(&mut self) -> &mut Self {
for val in self.0.iter_mut() {
*val = !*val;
}
self
}
/// Count number of set bits.
pub fn count_bits(&self) -> u32 {
self.0.iter().map(|v| v.count_ones()).sum()
}
/// \return all set bytes, as a vec.
#[allow(clippy::wrong_self_convention)]
pub fn to_vec(&self) -> Vec<u8> {
(0..=255).filter(|b| self.contains(*b)).collect()
}
| equals_known_len | identifier_name |
|
bytesearch.rs | {
fn contains(self, b: u8) -> bool;
fn find_in(self, rhs: &[u8]) -> Option<usize>;
}
// Beware: Rust is cranky about loop unrolling.
// Do not try to be too clever here.
impl SmallArraySet for [u8; 2] {
#[inline(always)]
fn contains(self, b: u8) -> bool {
b == self[0] || b == self[1]
}
#[inline(always)]
fn find_in(self, rhs: &[u8]) -> Option<usize> {
memchr::memchr2(self[0], self[1], rhs)
}
}
impl SmallArraySet for [u8; 3] {
#[inline(always)]
fn contains(self, b: u8) -> bool {
b == self[0] || b == self[1] || b == self[2]
}
#[inline(always)]
fn find_in(self, rhs: &[u8]) -> Option<usize> {
memchr::memchr3(self[0], self[1], self[2], rhs)
}
}
impl SmallArraySet for [u8; 4] {
#[inline(always)]
fn contains(self, b: u8) -> bool {
b == self[0] || b == self[1] || b == self[2] || b == self[3]
}
#[inline(always)]
fn find_in(self, rhs: &[u8]) -> Option<usize> {
// TODO.
for (idx, byte) in rhs.iter().enumerate() {
if self.contains(*byte) {
return Some(idx);
}
}
None
}
}
// CharSet helper. Avoid branching in the loop to get good unrolling.
#[allow(unused_parens)]
#[inline(always)]
pub fn charset_contains(set: &[u32; MAX_CHAR_SET_LENGTH], c: u32) -> bool {
let mut result = false;
for &v in set.iter() {
result |= (v == c);
}
result
}
/// A helper function for formatting bitmaps, using - ranges.
fn format_bitmap<Func>(name: &str, f: &mut fmt::Formatter<'_>, contains: Func) -> fmt::Result
where
Func: Fn(u8) -> bool,
{
write!(f, "{}[", name)?;
let mut idx = 0;
let mut maybe_space = "";
while idx <= 256 {
// Compute the next value not contained.
let mut end = idx;
while end <= 256 && contains(end as u8) {
end += 1;
}
match end - idx {
0 => (),
1 => write!(f, "{}{}", maybe_space, idx)?,
_ => write!(f, "{}{}-{}", maybe_space, idx, end - 1)?,
};
if end > idx {
maybe_space = " ";
}
idx = end + 1
}
write!(f, "]")?;
Ok(())
}
/// A bitmap covering ASCII characters.
#[derive(Default, Copy, Clone)]
#[repr(align(4))]
pub struct AsciiBitmap(pub [u8; 16]);
impl AsciiBitmap {
/// Set a byte val in this bitmap.
#[inline(always)]
pub fn set(&mut self, val: u8) {
debug_assert!(val <= 127, "Value should be ASCII");
self.0[(val >> 3) as usize] |= 1 << (val & 0x7);
}
}
impl fmt::Debug for AsciiBitmap {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
format_bitmap("AsciiBitmap", f, |v| self.contains(v))
}
}
impl ByteSet for AsciiBitmap {
/// \return whether this bitmap contains a given value.
/// The value does NOT have to be ASCII.
#[inline(always)]
fn contains(&self, val: u8) -> bool {
// Delicate tricks to avoid branches.
// In general we want to compute the byte via /8, and then mask into the
// byte. But if the value is not ASCII then the byte could be too large.
// So mask off the MSB so that the byte is always in range.
let byte = (val & 0x7F) >> 3;
let bit = val & 0x7;
// Now probe the bitmap. If our sign bit was set, we want the mask to be 0;
// otherwise we want to set only the 'bit' offset.
// Invert the sign bit and reuse it.
let mask = ((val >> 7) ^ 1) << bit;
// Probe the bitmap. We expect the compiler to elide the bounds check.
(self.0[byte as usize] & mask) != 0
}
}
/// A bitmap covering all bytes.
#[derive(Default, Copy, Clone)]
#[repr(align(4))]
pub struct ByteBitmap([u16; 16]);
// TODO: the codegen here is pretty horrible; LLVM is emitting a sequence of
// halfword instructions. Consider using a union?
impl ByteBitmap {
/// Construct from a sequence of bytes.
pub fn new(bytes: &[u8]) -> ByteBitmap {
let mut bb = ByteBitmap::default();
for &b in bytes {
bb.set(b)
}
bb
}
/// \return whether this bitmap contains a given byte val.
#[inline(always)]
pub fn contains(&self, val: u8) -> bool {
let byte = val >> 4;
let bit = val & 0xF;
(self.0[byte as usize] & (1 << bit)) != 0
}
/// Set a bit in this bitmap.
#[inline(always)]
pub fn set(&mut self, val: u8) {
let byte = val >> 4;
let bit = val & 0xF;
self.0[byte as usize] |= 1 << bit;
}
/// Update ourselves from another bitmap, in place.
pub fn bitor(&mut self, rhs: &ByteBitmap) {
for idx in 0..self.0.len() {
self.0[idx] |= rhs.0[idx];
}
}
/// Invert our bits, in place.
pub fn bitnot(&mut self) -> &mut Self {
for val in self.0.iter_mut() {
*val = !*val;
}
self
}
/// Count number of set bits.
pub fn count_bits(&self) -> u32 {
self.0.iter().map(|v| v.count_ones()).sum()
}
/// \return all set bytes, as a vec.
#[allow(clippy::wrong_self_convention)]
pub fn to_vec(&self) -> Vec<u8> {
(0..=255).filter(|b| self.contains(*b)).collect()
}
/// \return the index of the first byte in the slice that is present in this
/// bitmap, using some unsafe tricks.
#[inline(always)]
fn unsafe_find_in_slice(&self, bytes: &[u8]) -> Option<usize> {
type Chunk = u32;
let bm = &self.0;
let mut offset = 0;
let (prefix, body, suffix) = unsafe { bytes.align_to::<Chunk>() };
for &byte in prefix.iter() {
if self.contains(byte) {
return Some(offset);
}
offset += 1;
}
for &chunk in body {
// Use LE. Here index 0 is the earliest address.
let byte_idxs = ((chunk >> 4) & 0x0F0F0F0F).to_le_bytes();
let bit_idxs = (chunk & 0x0F0F0F0F).to_le_bytes();
if (bm[byte_idxs[0] as usize] & (1 << bit_idxs[0])) != 0 {
return Some(offset);
}
if (bm[byte_idxs[1] as usize] & (1 << bit_idxs[1])) != 0 {
return Some(offset + 1);
}
if (bm[byte_idxs[2] as usize] & (1 << bit_idxs[2])) != 0 {
return Some(offset + 2);
}
if (bm[byte_idxs[3] as usize] & (1 << bit_idxs[3])) != 0 {
return Some(offset + 3);
}
offset += 4;
}
for &byte in suffix.iter() {
if self.contains(byte) {
return Some(offset);
}
offset += 1;
}
None
}
}
impl ByteSearcher for ByteBitmap {
#[inline(always)]
fn find_in(&self, bytes: &[u8]) -> Option<usize> {
if cfg!(feature = "prohibit-unsafe") {
for (idx, byte) in bytes.iter().enumerate() {
if self.contains(*byte) {
return Some(idx);
}
}
None
} else {
self.unsafe_find_in_slice(bytes)
}
}
}
| impl fmt::Debug for ByteBitmap {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { | random_line_split |
|
bytesearch.rs | ::LENGTH) == 0 }
}
}
}
impl<const N: usize> ByteSearcher for [u8; N] {
#[inline(always)]
fn find_in(&self, rhs: &[u8]) -> Option<usize> {
if N == 1 {
return memchr::memchr(self[0], rhs);
}
for win in rhs.windows(Self::LENGTH) {
if self.equals_known_len(win) {
// Black magic?
return Some((win.as_ptr() as usize) - (rhs.as_ptr() as usize));
}
}
None
}
}
/// A ByteSet is any set of bytes.
pub trait ByteSet {
/// \return whether the ByteSet contains the byte.
fn contains(&self, b: u8) -> bool;
}
/// A ByteArraySet wraps a small array and uses linear equality.
#[derive(Copy, Clone, Debug)]
#[repr(align(4))]
pub struct ByteArraySet<ArraySet: SmallArraySet>(pub ArraySet);
/// Cover over contains() to avoid bumping into native contains call.
impl<ArraySet: SmallArraySet> ByteArraySet<ArraySet> {
#[inline(always)]
pub fn contains(self, b: u8) -> bool {
self.0.contains(b)
}
}
impl<ArraySet: SmallArraySet> ByteSearcher for ByteArraySet<ArraySet> {
#[inline(always)]
fn find_in(&self, rhs: &[u8]) -> Option<usize> {
self.0.find_in(rhs)
}
}
/// A SmallArraySet is a set implemented as a small byte array.
pub trait SmallArraySet: Copy {
fn contains(self, b: u8) -> bool;
fn find_in(self, rhs: &[u8]) -> Option<usize>;
}
// Beware: Rust is cranky about loop unrolling.
// Do not try to be too clever here.
impl SmallArraySet for [u8; 2] {
#[inline(always)]
fn contains(self, b: u8) -> bool {
b == self[0] || b == self[1]
}
#[inline(always)]
fn find_in(self, rhs: &[u8]) -> Option<usize> {
memchr::memchr2(self[0], self[1], rhs)
}
}
impl SmallArraySet for [u8; 3] {
#[inline(always)]
fn contains(self, b: u8) -> bool {
b == self[0] || b == self[1] || b == self[2]
}
#[inline(always)]
fn find_in(self, rhs: &[u8]) -> Option<usize> {
memchr::memchr3(self[0], self[1], self[2], rhs)
}
}
impl SmallArraySet for [u8; 4] {
#[inline(always)]
fn contains(self, b: u8) -> bool {
b == self[0] || b == self[1] || b == self[2] || b == self[3]
}
#[inline(always)]
fn find_in(self, rhs: &[u8]) -> Option<usize> {
// TODO.
for (idx, byte) in rhs.iter().enumerate() {
if self.contains(*byte) {
return Some(idx);
}
}
None
}
}
// CharSet helper. Avoid branching in the loop to get good unrolling.
#[allow(unused_parens)]
#[inline(always)]
pub fn charset_contains(set: &[u32; MAX_CHAR_SET_LENGTH], c: u32) -> bool {
let mut result = false;
for &v in set.iter() {
result |= (v == c);
}
result
}
/// A helper function for formatting bitmaps, using - ranges.
fn format_bitmap<Func>(name: &str, f: &mut fmt::Formatter<'_>, contains: Func) -> fmt::Result
where
Func: Fn(u8) -> bool,
{
write!(f, "{}[", name)?;
let mut idx = 0;
let mut maybe_space = "";
while idx <= 256 {
// Compute the next value not contained.
let mut end = idx;
while end <= 256 && contains(end as u8) {
end += 1;
}
match end - idx {
0 => (),
1 => write!(f, "{}{}", maybe_space, idx)?,
_ => write!(f, "{}{}-{}", maybe_space, idx, end - 1)?,
};
if end > idx {
maybe_space = " ";
}
idx = end + 1
}
write!(f, "]")?;
Ok(())
}
/// A bitmap covering ASCII characters.
#[derive(Default, Copy, Clone)]
#[repr(align(4))]
pub struct AsciiBitmap(pub [u8; 16]);
impl AsciiBitmap {
/// Set a byte val in this bitmap.
#[inline(always)]
pub fn set(&mut self, val: u8) {
debug_assert!(val <= 127, "Value should be ASCII");
self.0[(val >> 3) as usize] |= 1 << (val & 0x7);
}
}
impl fmt::Debug for AsciiBitmap {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
format_bitmap("AsciiBitmap", f, |v| self.contains(v))
}
}
impl ByteSet for AsciiBitmap {
/// \return whether this bitmap contains a given value.
/// The value does NOT have to be ASCII.
#[inline(always)]
fn contains(&self, val: u8) -> bool {
// Delicate tricks to avoid branches.
// In general we want to compute the byte via /8, and then mask into the
// byte. But if the value is not ASCII then the byte could be too large.
// So mask off the MSB so that the byte is always in range.
let byte = (val & 0x7F) >> 3;
let bit = val & 0x7;
// Now probe the bitmap. If our sign bit was set, we want the mask to be 0;
// otherwise we want to set only the 'bit' offset.
// Invert the sign bit and reuse it.
let mask = ((val >> 7) ^ 1) << bit;
// Probe the bitmap. We expect the compiler to elide the bounds check.
(self.0[byte as usize] & mask) != 0
}
}
/// A bitmap covering all bytes.
#[derive(Default, Copy, Clone)]
#[repr(align(4))]
pub struct ByteBitmap([u16; 16]);
// TODO: the codegen here is pretty horrible; LLVM is emitting a sequence of
// halfword instructions. Consider using a union?
impl ByteBitmap {
/// Construct from a sequence of bytes.
pub fn new(bytes: &[u8]) -> ByteBitmap {
let mut bb = ByteBitmap::default();
for &b in bytes {
bb.set(b)
}
bb
}
/// \return whether this bitmap contains a given byte val.
#[inline(always)]
pub fn contains(&self, val: u8) -> bool |
/// Set a bit in this bitmap.
#[inline(always)]
pub fn set(&mut self, val: u8) {
let byte = val >> 4;
let bit = val & 0xF;
self.0[byte as usize] |= 1 << bit;
}
/// Update ourselves from another bitmap, in place.
pub fn bitor(&mut self, rhs: &ByteBitmap) {
for idx in 0..self.0.len() {
self.0[idx] |= rhs.0[idx];
}
}
/// Invert our bits, in place.
pub fn bitnot(&mut self) -> &mut Self {
for val in self.0.iter_mut() {
*val = !*val;
}
self
}
/// Count number of set bits.
pub fn count_bits(&self) -> u32 {
self.0.iter().map(|v| v.count_ones()).sum()
}
/// \return all set bytes, as a vec.
#[allow(clippy::wrong_self_convention)]
pub fn to_vec(&self) -> Vec<u8> {
(0..=255).filter(|b| self.contains(*b)).collect()
}
/// \return the index of the first byte in the slice that is present in this
/// bitmap, using some unsafe tricks.
#[inline(always)]
fn unsafe_find_in_slice(&self, bytes: &[u8]) -> Option<usize> {
type Chunk = u32;
let bm = &self.0;
let mut offset = 0;
let (prefix, body, suffix) = unsafe { bytes.align_to::<Chunk>() };
for &byte in prefix.iter() {
if self.contains(byte) {
return Some(offset);
}
offset += 1;
}
for &chunk in body {
// Use LE. Here index 0 is the earliest address.
let byte_idxs = ((chunk >> 4) & 0x | {
let byte = val >> 4;
let bit = val & 0xF;
(self.0[byte as usize] & (1 << bit)) != 0
} | identifier_body |
trainer.py | eg_index)
num_pos = len(pos_index)
dice = {"dice_all": dice}
if self.get_class_metric:
num_classes = probability.shape[1]
for c in range(num_classes):
iflat = probability[:, c,...].view(batch_size, -1)
tflat = truth[:, c,...].view(batch_size, -1)
intersection = (iflat * tflat).sum()
dice[str(c)] = ((2. * intersection) / (iflat.sum() + tflat.sum() + 1e-7)).item()
return iou, dice, neg, dice_pos, num_neg, num_pos
def update(self, phase, targets, outputs):
"""updates metrics lists every iteration"""
iou, dice, dice_neg, dice_pos, _, _ = self.metric(outputs, targets)
self.base_dice_scores[phase].append(dice)
self.dice_pos_scores[phase].append(dice_pos)
self.dice_neg_scores[phase].append(dice_neg)
self.iou_scores[phase].append(iou)
def get_metrics(self, phase):
"""averages computed metrics over the epoch"""
dice = {}
l = len(self.base_dice_scores[phase])
for i, d in enumerate(self.base_dice_scores[phase]):
for k in d:
if k not in dice:
dice[k] = 0
dice[k] += d[k] / l
dice_neg = np.mean(self.dice_neg_scores[phase])
dice_pos = np.mean(self.dice_pos_scores[phase])
dices = [dice, dice_neg, dice_pos]
iou = np.nanmean(self.iou_scores[phase])
return dices, iou
def epoch_log(self, phase, epoch_loss, itr):
'''logging the metrics at the end of an epoch'''
dices, iou = self.get_metrics(phase)
dice, dice_neg, dice_pos = dices
message = "Phase: %s | Loss: %0.4f | IoU: %0.4f | dice: %0.4f | dice_neg: %0.4f | dice_pos: %0.4f" \
% (phase, epoch_loss, iou, dice["dice_all"], dice_neg, dice_pos)
logging.info(message)
self.tb_log.add_scalar(f'{phase}_dice', dice["dice_all"], itr)
self.tb_log.add_scalar(f'{phase}_dice_neg', dice_neg, itr)
self.tb_log.add_scalar(f'{phase}_dice_pos', dice_pos, itr)
self.tb_log.add_scalar(f'{phase}_iou', iou, itr)
return dice, iou
class BCEDiceLoss:
"""
Note that PyTorch optimizers minimize a loss. In this
case, we would like to maximize the dice loss so we
return the negated dice loss.
Args:
true: a tensor of shape [B, 1, H, W].
logits: a tensor of shape [B, C, H, W]. Corresponds to
the raw output or logits of the model.
eps: added to the denominator for numerical stability.
Returns:
dice_loss: loss.
"""
def __init__(self, bce_weight=1., weight=None, eps=1e-7,
smooth=.0, class_weights=[], threshold=0., activate=False):
self.bce_weight = bce_weight
self.eps = eps
self.smooth = smooth
self.threshold = threshold # 0 or apply sigmoid and threshold > .5 instead
self.activate = activate
self.class_weights = class_weights
self.nll = torch.nn.BCEWithLogitsLoss(weight=weight)
def __call__(self, logits, true):
loss = self.bce_weight * self.nll(logits, true)
if self.bce_weight < 1.:
dice_loss = 0.
batch_size, num_classes = logits.shape[:2]
if self.activate:
logits = torch.sigmoid(logits)
logits = (logits > self.threshold).float()
for c in range(num_classes):
iflat = logits[:, c,...].view(batch_size, -1)
tflat = true[:, c,...].view(batch_size, -1)
intersection = (iflat * tflat).sum()
w = self.class_weights[c]
dice_loss += w * ((2. * intersection + self.smooth) /
(iflat.sum() + tflat.sum() + self.smooth + self.eps))
loss -= (1 - self.bce_weight) * torch.log(dice_loss)
return loss
def fix_multigpu_chkpt_names(state_dict, drop=False):
""" fix the DataParallel caused problem with keys names """
new_state_dict = {}
for k in state_dict:
if drop:
new_k = re.sub("module.", "", k)
else:
new_k = "module." + k
new_state_dict[new_k] = copy.deepcopy(state_dict[k])
return new_state_dict
class Trainer(object):
'''Basic functionality for models fitting'''
__params = ('num_workers', 'class_weights', 'accumulation_batches',
'lr', 'weights_decay', 'base_threshold', 'scheduler_patience', 'activate',
'freeze_n_iters', 'bce_loss_weight', 'key_metric')
def __init__(self, model=None, image_dataset=None, optimizer=None, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
self.possible_phases = ["train", "val", "test"]
# Initialize logger
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s : %(levelname)s : %(message)s'
)
if not isinstance(self.devices_ids, list):
self.devices_ids = list(self.devices_ids)
# Run two model's instances on multiple GPUs ##############################################
# seems like multi-GPU mode works fine only for torch==1.1.0
# in other cases - try `DistributedDataParallel` https://github.com/pytorch/examples/blob/master/imagenet/main.py
if torch.cuda.is_available():
main_device = "cuda:%i" % self.devices_ids[0]
else:
main_device = "cpu"
self.device = torch.device(main_device)
self.net = model
self.multi_gpu_flag = (torch.cuda.device_count() > 1) * (len(self.devices_ids) > 1)
if self.multi_gpu_flag:
self.net = nn.DataParallel(self.net, device_ids=self.devices_ids, output_device=self.devices_ids[0])
self.net.to(self.device)
############################################################################################
self.best_metric = float("inf")
self.phases = ["train", "val"]
torch.set_default_tensor_type("torch.cuda.FloatTensor")
self.freeze_flag = True
self.start_epoch = 0
cudnn.benchmark = True
self.image_dataset = image_dataset
self.criterion = BCEDiceLoss(bce_weight=self.bce_loss_weight, class_weights=self.class_weights,
threshold=self.base_threshold, activate=self.activate)
self.optimizer = optimizer(self.net.parameters(), lr=self.lr)
self.scheduler = ReduceLROnPlateau(self.optimizer, mode="min", patience=self.scheduler_patience, verbose=True)
self.losses = {phase: [] for phase in self.phases}
self.iou_scores = {phase: [] for phase in self.phases}
self.dice_scores = {phase: [] for phase in self.phases}
self.meter = Meter(self.model_path, self.base_threshold)
if self.load_checkpoint:
self.load_model(ckpt_name=self.load_checkpoint)
self.accumulation_steps = self.batch_size * self.accumulation_batches
# number of workers affect the GPU performance if the preprocessing too intensive (resizes \ augs)
self.num_workers = max(2, self.batch_size // 2)
# self.num_workers = self.batch_size
logging.info(f"Trainer initialized on {len(self.devices_ids)} devices!")
def load_model(self, ckpt_name="best_model.pth"):
"""Loads full model state and basic training params"""
path = "/".join(ckpt_name.split("/")[:-1])
chkpt = torch.load(ckpt_name)
self.start_epoch = chkpt['epoch'] | self.net.load_state_dict(new_state_dict)
else:
try:
self.net.load_state_dict(chkpt['state_dict'])
except:
new_state_dict = fix_multigpu_chkpt_names(chkpt['state_dict'], drop=True)
self.net.load_state_dict(new_state_dict)
if self.load_optimizer_state:
self.optimizer.load_state_dict(chkpt['optimizer'])
logging.info("******** State loaded ********")
training_meta = pickle.load(open(f"{path}/training_meta.pickle.dat", "rb"))
for k, v in training_meta.items():
if k in self.__class__.__params:
setattr(self, k, v)
logging.info("******** Training params loaded ********")
def forward(self, images, targets):
"""allocate data and runs forward pass through the network"""
# send all variables to selected device
images = images.to(self.device)
masks = targets | self.best_metric = chkpt['best_metric']
# fix the DataParallel caused problem with keys names
if self.multi_gpu_flag:
new_state_dict = fix_multigpu_chkpt_names(chkpt['state_dict'], drop=False) | random_line_split |
trainer.py | eg_index)
num_pos = len(pos_index)
dice = {"dice_all": dice}
if self.get_class_metric:
num_classes = probability.shape[1]
for c in range(num_classes):
iflat = probability[:, c,...].view(batch_size, -1)
tflat = truth[:, c,...].view(batch_size, -1)
intersection = (iflat * tflat).sum()
dice[str(c)] = ((2. * intersection) / (iflat.sum() + tflat.sum() + 1e-7)).item()
return iou, dice, neg, dice_pos, num_neg, num_pos
def update(self, phase, targets, outputs):
"""updates metrics lists every iteration"""
iou, dice, dice_neg, dice_pos, _, _ = self.metric(outputs, targets)
self.base_dice_scores[phase].append(dice)
self.dice_pos_scores[phase].append(dice_pos)
self.dice_neg_scores[phase].append(dice_neg)
self.iou_scores[phase].append(iou)
def get_metrics(self, phase):
"""averages computed metrics over the epoch"""
dice = {}
l = len(self.base_dice_scores[phase])
for i, d in enumerate(self.base_dice_scores[phase]):
for k in d:
if k not in dice:
dice[k] = 0
dice[k] += d[k] / l
dice_neg = np.mean(self.dice_neg_scores[phase])
dice_pos = np.mean(self.dice_pos_scores[phase])
dices = [dice, dice_neg, dice_pos]
iou = np.nanmean(self.iou_scores[phase])
return dices, iou
def epoch_log(self, phase, epoch_loss, itr):
'''logging the metrics at the end of an epoch'''
dices, iou = self.get_metrics(phase)
dice, dice_neg, dice_pos = dices
message = "Phase: %s | Loss: %0.4f | IoU: %0.4f | dice: %0.4f | dice_neg: %0.4f | dice_pos: %0.4f" \
% (phase, epoch_loss, iou, dice["dice_all"], dice_neg, dice_pos)
logging.info(message)
self.tb_log.add_scalar(f'{phase}_dice', dice["dice_all"], itr)
self.tb_log.add_scalar(f'{phase}_dice_neg', dice_neg, itr)
self.tb_log.add_scalar(f'{phase}_dice_pos', dice_pos, itr)
self.tb_log.add_scalar(f'{phase}_iou', iou, itr)
return dice, iou
class BCEDiceLoss:
"""
Note that PyTorch optimizers minimize a loss. In this
case, we would like to maximize the dice loss so we
return the negated dice loss.
Args:
true: a tensor of shape [B, 1, H, W].
logits: a tensor of shape [B, C, H, W]. Corresponds to
the raw output or logits of the model.
eps: added to the denominator for numerical stability.
Returns:
dice_loss: loss.
"""
def __init__(self, bce_weight=1., weight=None, eps=1e-7,
smooth=.0, class_weights=[], threshold=0., activate=False):
self.bce_weight = bce_weight
self.eps = eps
self.smooth = smooth
self.threshold = threshold # 0 or apply sigmoid and threshold > .5 instead
self.activate = activate
self.class_weights = class_weights
self.nll = torch.nn.BCEWithLogitsLoss(weight=weight)
def __call__(self, logits, true):
loss = self.bce_weight * self.nll(logits, true)
if self.bce_weight < 1.:
dice_loss = 0.
batch_size, num_classes = logits.shape[:2]
if self.activate:
logits = torch.sigmoid(logits)
logits = (logits > self.threshold).float()
for c in range(num_classes):
iflat = logits[:, c,...].view(batch_size, -1)
tflat = true[:, c,...].view(batch_size, -1)
intersection = (iflat * tflat).sum()
w = self.class_weights[c]
dice_loss += w * ((2. * intersection + self.smooth) /
(iflat.sum() + tflat.sum() + self.smooth + self.eps))
loss -= (1 - self.bce_weight) * torch.log(dice_loss)
return loss
def | (state_dict, drop=False):
""" fix the DataParallel caused problem with keys names """
new_state_dict = {}
for k in state_dict:
if drop:
new_k = re.sub("module.", "", k)
else:
new_k = "module." + k
new_state_dict[new_k] = copy.deepcopy(state_dict[k])
return new_state_dict
class Trainer(object):
'''Basic functionality for models fitting'''
__params = ('num_workers', 'class_weights', 'accumulation_batches',
'lr', 'weights_decay', 'base_threshold', 'scheduler_patience', 'activate',
'freeze_n_iters', 'bce_loss_weight', 'key_metric')
def __init__(self, model=None, image_dataset=None, optimizer=None, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
self.possible_phases = ["train", "val", "test"]
# Initialize logger
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s : %(levelname)s : %(message)s'
)
if not isinstance(self.devices_ids, list):
self.devices_ids = list(self.devices_ids)
# Run two model's instances on multiple GPUs ##############################################
# seems like multi-GPU mode works fine only for torch==1.1.0
# in other cases - try `DistributedDataParallel` https://github.com/pytorch/examples/blob/master/imagenet/main.py
if torch.cuda.is_available():
main_device = "cuda:%i" % self.devices_ids[0]
else:
main_device = "cpu"
self.device = torch.device(main_device)
self.net = model
self.multi_gpu_flag = (torch.cuda.device_count() > 1) * (len(self.devices_ids) > 1)
if self.multi_gpu_flag:
self.net = nn.DataParallel(self.net, device_ids=self.devices_ids, output_device=self.devices_ids[0])
self.net.to(self.device)
############################################################################################
self.best_metric = float("inf")
self.phases = ["train", "val"]
torch.set_default_tensor_type("torch.cuda.FloatTensor")
self.freeze_flag = True
self.start_epoch = 0
cudnn.benchmark = True
self.image_dataset = image_dataset
self.criterion = BCEDiceLoss(bce_weight=self.bce_loss_weight, class_weights=self.class_weights,
threshold=self.base_threshold, activate=self.activate)
self.optimizer = optimizer(self.net.parameters(), lr=self.lr)
self.scheduler = ReduceLROnPlateau(self.optimizer, mode="min", patience=self.scheduler_patience, verbose=True)
self.losses = {phase: [] for phase in self.phases}
self.iou_scores = {phase: [] for phase in self.phases}
self.dice_scores = {phase: [] for phase in self.phases}
self.meter = Meter(self.model_path, self.base_threshold)
if self.load_checkpoint:
self.load_model(ckpt_name=self.load_checkpoint)
self.accumulation_steps = self.batch_size * self.accumulation_batches
# number of workers affect the GPU performance if the preprocessing too intensive (resizes \ augs)
self.num_workers = max(2, self.batch_size // 2)
# self.num_workers = self.batch_size
logging.info(f"Trainer initialized on {len(self.devices_ids)} devices!")
def load_model(self, ckpt_name="best_model.pth"):
"""Loads full model state and basic training params"""
path = "/".join(ckpt_name.split("/")[:-1])
chkpt = torch.load(ckpt_name)
self.start_epoch = chkpt['epoch']
self.best_metric = chkpt['best_metric']
# fix the DataParallel caused problem with keys names
if self.multi_gpu_flag:
new_state_dict = fix_multigpu_chkpt_names(chkpt['state_dict'], drop=False)
self.net.load_state_dict(new_state_dict)
else:
try:
self.net.load_state_dict(chkpt['state_dict'])
except:
new_state_dict = fix_multigpu_chkpt_names(chkpt['state_dict'], drop=True)
self.net.load_state_dict(new_state_dict)
if self.load_optimizer_state:
self.optimizer.load_state_dict(chkpt['optimizer'])
logging.info("******** State loaded ********")
training_meta = pickle.load(open(f"{path}/training_meta.pickle.dat", "rb"))
for k, v in training_meta.items():
if k in self.__class__.__params:
setattr(self, k, v)
logging.info("******** Training params loaded ********")
def forward(self, images, targets):
"""allocate data and runs forward pass through the network"""
# send all variables to selected device
images = images.to(self.device)
masks = | fix_multigpu_chkpt_names | identifier_name |
trainer.py | dice_neg = np.mean(self.dice_neg_scores[phase])
dice_pos = np.mean(self.dice_pos_scores[phase])
dices = [dice, dice_neg, dice_pos]
iou = np.nanmean(self.iou_scores[phase])
return dices, iou
def epoch_log(self, phase, epoch_loss, itr):
'''logging the metrics at the end of an epoch'''
dices, iou = self.get_metrics(phase)
dice, dice_neg, dice_pos = dices
message = "Phase: %s | Loss: %0.4f | IoU: %0.4f | dice: %0.4f | dice_neg: %0.4f | dice_pos: %0.4f" \
% (phase, epoch_loss, iou, dice["dice_all"], dice_neg, dice_pos)
logging.info(message)
self.tb_log.add_scalar(f'{phase}_dice', dice["dice_all"], itr)
self.tb_log.add_scalar(f'{phase}_dice_neg', dice_neg, itr)
self.tb_log.add_scalar(f'{phase}_dice_pos', dice_pos, itr)
self.tb_log.add_scalar(f'{phase}_iou', iou, itr)
return dice, iou
class BCEDiceLoss:
"""
Note that PyTorch optimizers minimize a loss. In this
case, we would like to maximize the dice loss so we
return the negated dice loss.
Args:
true: a tensor of shape [B, 1, H, W].
logits: a tensor of shape [B, C, H, W]. Corresponds to
the raw output or logits of the model.
eps: added to the denominator for numerical stability.
Returns:
dice_loss: loss.
"""
def __init__(self, bce_weight=1., weight=None, eps=1e-7,
smooth=.0, class_weights=[], threshold=0., activate=False):
self.bce_weight = bce_weight
self.eps = eps
self.smooth = smooth
self.threshold = threshold # 0 or apply sigmoid and threshold > .5 instead
self.activate = activate
self.class_weights = class_weights
self.nll = torch.nn.BCEWithLogitsLoss(weight=weight)
def __call__(self, logits, true):
loss = self.bce_weight * self.nll(logits, true)
if self.bce_weight < 1.:
dice_loss = 0.
batch_size, num_classes = logits.shape[:2]
if self.activate:
logits = torch.sigmoid(logits)
logits = (logits > self.threshold).float()
for c in range(num_classes):
iflat = logits[:, c,...].view(batch_size, -1)
tflat = true[:, c,...].view(batch_size, -1)
intersection = (iflat * tflat).sum()
w = self.class_weights[c]
dice_loss += w * ((2. * intersection + self.smooth) /
(iflat.sum() + tflat.sum() + self.smooth + self.eps))
loss -= (1 - self.bce_weight) * torch.log(dice_loss)
return loss
def fix_multigpu_chkpt_names(state_dict, drop=False):
""" fix the DataParallel caused problem with keys names """
new_state_dict = {}
for k in state_dict:
if drop:
new_k = re.sub("module.", "", k)
else:
new_k = "module." + k
new_state_dict[new_k] = copy.deepcopy(state_dict[k])
return new_state_dict
class Trainer(object):
'''Basic functionality for models fitting'''
__params = ('num_workers', 'class_weights', 'accumulation_batches',
'lr', 'weights_decay', 'base_threshold', 'scheduler_patience', 'activate',
'freeze_n_iters', 'bce_loss_weight', 'key_metric')
def __init__(self, model=None, image_dataset=None, optimizer=None, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
self.possible_phases = ["train", "val", "test"]
# Initialize logger
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s : %(levelname)s : %(message)s'
)
if not isinstance(self.devices_ids, list):
self.devices_ids = list(self.devices_ids)
# Run two model's instances on multiple GPUs ##############################################
# seems like multi-GPU mode works fine only for torch==1.1.0
# in other cases - try `DistributedDataParallel` https://github.com/pytorch/examples/blob/master/imagenet/main.py
if torch.cuda.is_available():
main_device = "cuda:%i" % self.devices_ids[0]
else:
main_device = "cpu"
self.device = torch.device(main_device)
self.net = model
self.multi_gpu_flag = (torch.cuda.device_count() > 1) * (len(self.devices_ids) > 1)
if self.multi_gpu_flag:
self.net = nn.DataParallel(self.net, device_ids=self.devices_ids, output_device=self.devices_ids[0])
self.net.to(self.device)
############################################################################################
self.best_metric = float("inf")
self.phases = ["train", "val"]
torch.set_default_tensor_type("torch.cuda.FloatTensor")
self.freeze_flag = True
self.start_epoch = 0
cudnn.benchmark = True
self.image_dataset = image_dataset
self.criterion = BCEDiceLoss(bce_weight=self.bce_loss_weight, class_weights=self.class_weights,
threshold=self.base_threshold, activate=self.activate)
self.optimizer = optimizer(self.net.parameters(), lr=self.lr)
self.scheduler = ReduceLROnPlateau(self.optimizer, mode="min", patience=self.scheduler_patience, verbose=True)
self.losses = {phase: [] for phase in self.phases}
self.iou_scores = {phase: [] for phase in self.phases}
self.dice_scores = {phase: [] for phase in self.phases}
self.meter = Meter(self.model_path, self.base_threshold)
if self.load_checkpoint:
self.load_model(ckpt_name=self.load_checkpoint)
self.accumulation_steps = self.batch_size * self.accumulation_batches
# number of workers affect the GPU performance if the preprocessing too intensive (resizes \ augs)
self.num_workers = max(2, self.batch_size // 2)
# self.num_workers = self.batch_size
logging.info(f"Trainer initialized on {len(self.devices_ids)} devices!")
def load_model(self, ckpt_name="best_model.pth"):
"""Loads full model state and basic training params"""
path = "/".join(ckpt_name.split("/")[:-1])
chkpt = torch.load(ckpt_name)
self.start_epoch = chkpt['epoch']
self.best_metric = chkpt['best_metric']
# fix the DataParallel caused problem with keys names
if self.multi_gpu_flag:
new_state_dict = fix_multigpu_chkpt_names(chkpt['state_dict'], drop=False)
self.net.load_state_dict(new_state_dict)
else:
try:
self.net.load_state_dict(chkpt['state_dict'])
except:
new_state_dict = fix_multigpu_chkpt_names(chkpt['state_dict'], drop=True)
self.net.load_state_dict(new_state_dict)
if self.load_optimizer_state:
self.optimizer.load_state_dict(chkpt['optimizer'])
logging.info("******** State loaded ********")
training_meta = pickle.load(open(f"{path}/training_meta.pickle.dat", "rb"))
for k, v in training_meta.items():
if k in self.__class__.__params:
setattr(self, k, v)
logging.info("******** Training params loaded ********")
def forward(self, images, targets):
"""allocate data and runs forward pass through the network"""
# send all variables to selected device
images = images.to(self.device)
masks = targets.to(self.device)
# compute loss
outputs = self.net(images)
orig_size = self.image_dataset.orig_size
if outputs.size()[-2:] != orig_size:
# resize predictions back to the original size
outputs = nn.functional.interpolate(outputs, size=orig_size, mode='bilinear', align_corners=True)
loss = self.criterion(outputs, masks)
return loss, outputs
def dfs_freeze(self, model):
"""freezes weights of the input layer"""
for name, child in model.named_children():
for param in child.parameters():
param.requires_grad = False if self.freeze_flag else True
self.dfs_freeze(child)
# freezes UNet encoder
# doesn't work properely in Dataparallel mode
# since it's wrapps our model class
def freeze_encoder(self):
"""freezes encoder module in order to train the other part of the network"""
self.dfs_freeze(self.net.conv1)
self.dfs_freeze(self.net.conv2)
self.dfs_freeze(self.net.conv3)
self.dfs_freeze(self.net.conv4)
self.dfs_freeze(self.net.conv5)
def weights_decay(self):
"""adjust learning rate and weights decay"""
for param_group in self.optimizer.param_groups:
| for param in param_group['params']:
param.data = param.data.add(-1.*self.weights_decay * param_group['lr'], param.data) | conditional_block |
|
trainer.py |
def fix_multigpu_chkpt_names(state_dict, drop=False):
""" fix the DataParallel caused problem with keys names """
new_state_dict = {}
for k in state_dict:
if drop:
new_k = re.sub("module.", "", k)
else:
new_k = "module." + k
new_state_dict[new_k] = copy.deepcopy(state_dict[k])
return new_state_dict
class Trainer(object):
'''Basic functionality for models fitting'''
__params = ('num_workers', 'class_weights', 'accumulation_batches',
'lr', 'weights_decay', 'base_threshold', 'scheduler_patience', 'activate',
'freeze_n_iters', 'bce_loss_weight', 'key_metric')
def __init__(self, model=None, image_dataset=None, optimizer=None, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
self.possible_phases = ["train", "val", "test"]
# Initialize logger
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s : %(levelname)s : %(message)s'
)
if not isinstance(self.devices_ids, list):
self.devices_ids = list(self.devices_ids)
# Run two model's instances on multiple GPUs ##############################################
# seems like multi-GPU mode works fine only for torch==1.1.0
# in other cases - try `DistributedDataParallel` https://github.com/pytorch/examples/blob/master/imagenet/main.py
if torch.cuda.is_available():
main_device = "cuda:%i" % self.devices_ids[0]
else:
main_device = "cpu"
self.device = torch.device(main_device)
self.net = model
self.multi_gpu_flag = (torch.cuda.device_count() > 1) * (len(self.devices_ids) > 1)
if self.multi_gpu_flag:
self.net = nn.DataParallel(self.net, device_ids=self.devices_ids, output_device=self.devices_ids[0])
self.net.to(self.device)
############################################################################################
self.best_metric = float("inf")
self.phases = ["train", "val"]
torch.set_default_tensor_type("torch.cuda.FloatTensor")
self.freeze_flag = True
self.start_epoch = 0
cudnn.benchmark = True
self.image_dataset = image_dataset
self.criterion = BCEDiceLoss(bce_weight=self.bce_loss_weight, class_weights=self.class_weights,
threshold=self.base_threshold, activate=self.activate)
self.optimizer = optimizer(self.net.parameters(), lr=self.lr)
self.scheduler = ReduceLROnPlateau(self.optimizer, mode="min", patience=self.scheduler_patience, verbose=True)
self.losses = {phase: [] for phase in self.phases}
self.iou_scores = {phase: [] for phase in self.phases}
self.dice_scores = {phase: [] for phase in self.phases}
self.meter = Meter(self.model_path, self.base_threshold)
if self.load_checkpoint:
self.load_model(ckpt_name=self.load_checkpoint)
self.accumulation_steps = self.batch_size * self.accumulation_batches
# number of workers affect the GPU performance if the preprocessing too intensive (resizes \ augs)
self.num_workers = max(2, self.batch_size // 2)
# self.num_workers = self.batch_size
logging.info(f"Trainer initialized on {len(self.devices_ids)} devices!")
def load_model(self, ckpt_name="best_model.pth"):
"""Loads full model state and basic training params"""
path = "/".join(ckpt_name.split("/")[:-1])
chkpt = torch.load(ckpt_name)
self.start_epoch = chkpt['epoch']
self.best_metric = chkpt['best_metric']
# fix the DataParallel caused problem with keys names
if self.multi_gpu_flag:
new_state_dict = fix_multigpu_chkpt_names(chkpt['state_dict'], drop=False)
self.net.load_state_dict(new_state_dict)
else:
try:
self.net.load_state_dict(chkpt['state_dict'])
except:
new_state_dict = fix_multigpu_chkpt_names(chkpt['state_dict'], drop=True)
self.net.load_state_dict(new_state_dict)
if self.load_optimizer_state:
self.optimizer.load_state_dict(chkpt['optimizer'])
logging.info("******** State loaded ********")
training_meta = pickle.load(open(f"{path}/training_meta.pickle.dat", "rb"))
for k, v in training_meta.items():
if k in self.__class__.__params:
setattr(self, k, v)
logging.info("******** Training params loaded ********")
def forward(self, images, targets):
"""allocate data and runs forward pass through the network"""
# send all variables to selected device
images = images.to(self.device)
masks = targets.to(self.device)
# compute loss
outputs = self.net(images)
orig_size = self.image_dataset.orig_size
if outputs.size()[-2:] != orig_size:
# resize predictions back to the original size
outputs = nn.functional.interpolate(outputs, size=orig_size, mode='bilinear', align_corners=True)
loss = self.criterion(outputs, masks)
return loss, outputs
def dfs_freeze(self, model):
"""freezes weights of the input layer"""
for name, child in model.named_children():
for param in child.parameters():
param.requires_grad = False if self.freeze_flag else True
self.dfs_freeze(child)
# freezes UNet encoder
# doesn't work properely in Dataparallel mode
# since it's wrapps our model class
def freeze_encoder(self):
"""freezes encoder module in order to train the other part of the network"""
self.dfs_freeze(self.net.conv1)
self.dfs_freeze(self.net.conv2)
self.dfs_freeze(self.net.conv3)
self.dfs_freeze(self.net.conv4)
self.dfs_freeze(self.net.conv5)
def weights_decay(self):
"""adjust learning rate and weights decay"""
for param_group in self.optimizer.param_groups:
for param in param_group['params']:
param.data = param.data.add(-1.*self.weights_decay * param_group['lr'], param.data)
def iterate(self, epoch, phase, data_set):
"""main method for traning: creates metric aggregator, dataloaders and updates the model params"""
if phase not in self.possible_phases:
raise ValueError('Phase type must be on of: {}'.format(self.possible_phases))
self.meter.reset_dicts()
start = time.strftime("%H:%M:%S")
logging.info(f"Starting epoch: {epoch} | phase: {phase} | time: {start}")
self.net.train(phase == "train")
self.image_dataset.set_phase(phase, data_set)
dataloader = DataLoader(
self.image_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=True,
shuffle=True,
)
running_loss = 0.0
total_batches = len(dataloader)
self.optimizer.zero_grad()
tk0 = tqdm(dataloader, total=total_batches)
if self.freeze_n_iters:
self.freeze_encoder()
for itr, batch in enumerate(tk0):
if itr == (self.freeze_n_iters - 1):
self.freeze_flag = False
self.freeze_encoder()
images, targets, __ = batch
loss, outputs = self.forward(images, targets)
loss = loss / self.accumulation_steps
if phase == "train":
loss.backward()
if (itr + 1 ) % self.accumulation_steps == 0:
if self.weights_decay > 0:
self.weights_decay()
self.optimizer.step()
self.optimizer.zero_grad()
running_loss += loss.item()
if self.activate:
outputs = torch.sigmoid(outputs)
outputs = outputs.detach().cpu()
self.meter.update(phase, targets, outputs)
running_loss_tick = (running_loss * self.accumulation_steps) / (itr + 1)
self.meter.tb_log.add_scalar(f'{phase}_loss', running_loss_tick, itr+total_batches*epoch)
tk0.set_postfix(loss=(running_loss_tick))
last_itr = itr+total_batches*epoch
epoch_loss = (running_loss * self.accumulation_steps) / total_batches
dice, iou = self.meter.epoch_log(phase, epoch_loss, last_itr)
self.losses[phase].append(epoch_loss)
self.dice_scores[phase].append(dice["dice_all"])
self.iou_scores[phase].append(iou)
torch.cuda.empty_cache()
return epoch_loss, dice, iou, last_itr
def make_new_dir(self):
"""makes new directory instead of existing one"""
try:
shutil.rmtree(self.model_path)
except:
pass
os.mkdir(self.model_path)
def dump_meta(self):
| """dumpы all metrics and training meta-data"""
for dict_name in ["losses", "dice_scores", "iou_scores"]:
pickle.dump(self.__dict__[dict_name], open(f"{self.model_path}/{dict_name}.pickle.dat", "wb"))
# dump class variables for further traning
training_meta = {}
for k in self.__class__.__params:
training_meta[k] = getattr(self, k)
pickle.dump(training_meta, open(f"{self.model_path}/training_meta.pickle.dat", "wb"))
| identifier_body |
|
Zichen Pan - S&P500.py | 20))
_ = plt.title('Stock AA Daily Volume By Rolling of a Week', fontdict = {'fontsize':30})
# In[35]:
# Correlation
sp500_plot.corr()
# In[36]:
df_close = sp500_plot.loc[sp500_plot['Ticker'] == 'A', ['Daily Closing Price']]
pd.plotting.autocorrelation_plot(df_close);
# In[37]:
df_volume = sp500_plot.loc[sp500_plot['Ticker'] == 'A', ['Daily Volume']]
pd.plotting.autocorrelation_plot(df_volume);
# ## Clustering
# In[38]:
sp500_df.head()
# In[39]:
sp500_mean = sp500_df.groupby('Ticker').mean()
sp500_mean.reset_index('Ticker', inplace = True)
sp500_mean
# In[40]:
X_train = sp500_mean.loc[:, ['Daily Closing Price', 'Daily Volume']]
X_train
# ### Kmeans
# In[41]:
from sklearn.cluster import KMeans
from sklearn import metrics
from scipy.spatial.distance import cdist
# In[42]:
# Elbow method to determine K
distortions = []
K = range(1,10)
for k in K:
kmeanModel = KMeans(n_clusters=k).fit(X_train)
kmeanModel.fit(X_train)
distortions.append(sum(np.min(cdist(X_train, kmeanModel.cluster_centers_, 'euclidean'), axis=1)) / X_train.shape[0])
# In[43]:
# Plot the elbow
_ = plt.plot(K, distortions, 'bx-')
_ = plt.xlabel('k')
_ = plt.ylabel('Distortion')
_ = plt.title('The Elbow Method showing the optimal k')
# In[44]:
# K = 5
# In[45]:
# create a KMeans object which will generate 5 clusters
km = KMeans(n_clusters=5)
# In[46]:
# use fit_predict() to both fit our k-means model and generate cluster assignments
cluster_assignments = km.fit_predict(X_train)
cluster_assignments[:10]
# In[47]:
# Visualization of Clustering
fig = plt.figure(figsize=(10,10))
for i in range(5):
X_subset = X_train[cluster_assignments == i]
plt.scatter(X_subset.iloc[:,0], X_subset.iloc[:,1],s = 80,alpha = 0.8, label = 'cluster '+str(i))
plt.plot(km.cluster_centers_[i][0],km.cluster_centers_[i][1],marker='x',c='k', ms=20, mew=5, label=None);
_ = plt.legend();
_ = plt.xlabel('Daily Closing Price');
_ = plt.ylabel('Daily Volume')
_ = plt.title('Clustering', fontdict = {'fontsize':30})
# In[48]:
# Add the cluster assignment to sp500_mean
sp500_mean['Cluster'] = cluster_assignments
sp500_mean
# ## Descriptive Statistics
# In[49]:
from scipy.stats import trim_mean, kurtosis
from scipy.stats.mstats import mode, gmean, hmean
# In[50]:
sp500_plot.describe()
# In[51]:
df_group_daily_close = sp500_mean.groupby('Cluster')['Daily Closing Price']
df_group_daily_close.describe().sort_values('mean')
# In[52]:
df_group_daily_volume = sp500_mean.groupby('Cluster')['Daily Volume']
df_group_daily_volume.describe().sort_values('mean')
# In[53]:
# trim mean for Daily Closing Price sorted
df_group_daily_close.apply(trim_mean, .1).sort_values().reset_index()
# In[54]:
# trim mean for Daily Volume sorted
df_group_daily_volume.apply(trim_mean, .1).sort_values().reset_index()
# ## Prediction
# In[55]:
import warnings
import itertools
plt.style.use('fivethirtyeight')
import statsmodels.api as sm
# In[56]:
# We predict the Daily Closing Price of Stock A
df_A = sp500_plot.loc[sp500_plot['Ticker'] == 'A', ['Daily Closing Price']]
df_A.head()
# ### Check Stationarity
# In[57]:
from statsmodels.tsa.stattools import adfuller
def test_stationarity(timeseries):
#Determing rolling statistics
rolmean = timeseries.rolling(7).mean()
rolstd = timeseries.rolling(7).std()
#Plot rolling statistics:
orig = plt.plot(timeseries, color='blue',label='Original')
mean = plt.plot(rolmean, color='red', label='Rolling Mean')
std = plt.plot(rolstd, color='black', label = 'Rolling Std')
plt.legend(loc='best')
plt.title('Rolling Mean & Standard Deviation')
plt.show(block=False)
# In[58]:
test_stationarity(df_A)
# In[59]:
# We can tell that the dataset is approximately stationary
# In[60]:
df_A_weekly = df_A.resample('B').mean()
df_A_weekly.fillna(method='ffill', inplace = True)
df_A_weekly
# In[61]:
_ = df_A_weekly.plot(figsize = (20,6))
# ### ARIMA
# In[62]:
# ARIMA
p = d = q = range(0, 2)
pdq = list(itertools.product(p, d, q))
seasonal_pdq = [(x[0], x[1], x[2], 12) for x in list(itertools.product(p, d, q))]
for param in pdq:
for param_seasonal in seasonal_pdq:
try:
mod = sm.tsa.statespace.SARIMAX(df_A_weekly, order=param, seasonal_order=param_seasonal, enforce_stationarity=False, enforce_invertibility=False)
results = mod.fit()
print('ARIMA{}x{}12 - AIC:{}'.format(param, param_seasonal, results.aic))
except:
continue
# In[63]:
# SARIMAX(1, 1, 1)x(0, 1, 1, 12) yields the lowest AIC value of 810.1769504153424
# In[64]:
mod = sm.tsa.statespace.SARIMAX(df_A_weekly,
order=(1, 1, 1),
seasonal_order=(0, 1, 1, 12),
enforce_stationarity=False,
enforce_invertibility=False)
results = mod.fit()
print(results.summary().tables[1])
# In[65]:
_ = results.plot_diagnostics(figsize=(20, 8))
# In[66]:
pred = results.get_prediction(start=pd.to_datetime('2019-01-01'), dynamic=False)
pred_confidence_interval = pred.conf_int()
ax = df_A_weekly['2018':].plot(label='observed')
_ = pred.predicted_mean.plot(ax=ax, label='One-step ahead Forecast', alpha=.7, figsize=(20, 8))
ax.fill_between(pred_confidence_interval.index,
pred_confidence_interval.iloc[:, 0],
pred_confidence_interval.iloc[:, 1], color='k', alpha=.2)
ax.set_xlabel('Timestamp')
ax.set_ylabel('Weekly Mean Closing Price')
_ = plt.title('Stock A Weekly Closing Price', fontdict = {'fontsize':30})
_ = plt.legend()
# In[67]:
# RMSE
A_forecasted = pred.predicted_mean
A_truth = df_A_weekly['2019-01-01':].loc[:,'Daily Closing Price']
mse = ((A_forecasted - A_truth) ** 2).mean()
print('The Mean Squared Error of our forecasts is {}'.format(round(np.sqrt(mse), 2)))
# In[68]:
# Prediction Visualization
pred_uc = results.get_forecast(steps=240)
pred_ci = pred_uc.conf_int()
ax = df_A_weekly.plot(label='observed', figsize=(20, 7))
_ = pred_uc.predicted_mean.plot(ax=ax, label='Forecast')
ax.fill_between(pred_ci.index,
pred_ci.iloc[:, 0],
pred_ci.iloc[:, 1], color='k', alpha=.25)
ax.set_xlabel('Timestamp')
ax.set_ylabel('Weely Closing Price')
_ = plt.title('Stock A Weekly Closing Price Prediction In 2019', fontdict = {'fontsize':30})
_ = plt.legend()
# ### Prophet
# In[69]:
from fbprophet import Prophet
# In[70]:
# Dataframe must have columns 'ds' and 'y' with the dates and values respectively
df_A_prophet = df_A_weekly.reset_index('Timestamp')
df_A_prophet = df_A_prophet.rename(columns={'Timestamp': 'ds',
'Daily Closing Price': 'y'})
df_A_prophet.head()
# In[71]:
# set the uncertainty interval to 95%
my_model = Prophet(interval_width=0.95, daily_seasonality=True, weekly_seasonality=True)
# In[72]:
my_model.fit(df_A_prophet)
# In[73]: | random_line_split |
||
Zichen Pan - S&P500.py |
with open("sp500tickers.pickle", "wb") as f:
pickle.dump(tickers, f)
return tickers
# In[3]:
tickers = sorted(save_sp500_tickers())
tickers[0]
# In[4]:
# get first stock ('A') from yahoo
start_date = datetime.date(2018,1,1)
end_date = datetime.date.today()
current_ticker = pdr.get_data_yahoo(symbols='A', start = start_date, end = end_date)
current_ticker['Ticker'] = len(current_ticker) * ['A']
current_ticker.reset_index('Date',inplace = True)
sp500 = current_ticker
sp500.head()
# In[5]:
# get all S&P 500 stocks from yahoo
# successful stocks
i = 1 # already add stock 'A'
for ticker in tickers[1:]:
try:
current_ticker = pdr.get_data_yahoo(symbols=ticker, start = start_date, end = end_date)
except:
print("Error : skipping",ticker)
continue
current_ticker['Ticker'] = len(current_ticker) * [ticker]
current_ticker.reset_index('Date',inplace = True)
sp500 = sp500.append(current_ticker)
print(ticker,"finished")
i += 1
# In[6]:
i
# In[7]:
sp500
# In[8]:
# export to csv as original
sp500_original = sp500.reset_index(drop = True) # reset index
sp500_original.rename(columns={'Date':'Timestamp'}, inplace=True) # change column name
# reorder the columns
cols = sp500_original.columns.tolist()
cols = cols[:1] + cols[-1:] + cols[1:-1]
sp500_original = sp500_original[cols]
sp500_original.head()
# In[9]:
sp500_original.to_csv(r'/Users/panzichen/Global_AI/Zichen Pan - S&P500(Original).csv', index = None, header=True)
# In[10]:
# export to csv as requested
sp500_df = sp500_original
sp500_df.rename(columns={'Close':'Daily Closing Price', 'Volume':'Daily Volume'}, inplace = True)
sp500_df.drop(['High', 'Low', 'Open', 'Adj Close'], axis = 1,inplace = True)
sp500_df.head()
# In[11]:
sp500_df.to_csv(r'/Users/panzichen/Global_AI/Zichen Pan - S&P500(Requested).csv', index = None, header=True)
# ## Data Cleaning
# In[12]:
sp500_df.isnull().sum()
# ## Create SQL Table
# In[13]:
import sqlite3
# In[14]:
# connecting to the database
connection = sqlite3.connect("sp500.db")
# In[15]:
# cursor
crsr = connection.cursor()
# In[16]:
# drop table if necessary (to rerun the create command, or error: table already exists)
crsr.execute("""DROP TABLE sp500_df""")
# In[17]:
crsr.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = crsr.fetchall()
print(tables)
# In[18]:
# SQL command to create a table in the database
sql_command_create = """CREATE TABLE sp500_df (
NUM INTEGER PRIMARY KEY,
Ticker VARCHAR(20),
Timestamp DATE,
Close FLOAT(8,6),
Volume INTEGER);"""
# In[19]:
crsr.execute(sql_command_create)
# In[20]:
crsr.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = crsr.fetchall()
print(tables)
# In[21]:
# Insert data
for index, row in sp500_df.iterrows():
format_str = """INSERT INTO sp500_df (NUM, Ticker, Timestamp, Close, Volume)
VALUES ("{num}", "{ticker}", "{timestamp}", "{close}", "{volume}");"""
sql_command = format_str.format(num = index, ticker = row['Ticker'], timestamp = row['Timestamp'],
close = row['Daily Closing Price'], volume = int(row['Daily Volume']))
crsr.execute(sql_command)
# In[22]:
# print data
import pprint
sql_select = """SELECT * from sp500_df;"""
crsr.execute(sql_select)
pprint.pprint(crsr.fetchall())
# In[23]:
# To save the changes in the files. Never skip this.
# If we skip this, nothing will be saved in the database.
connection.commit()
# In[24]:
# close the connection
connection.close()
# ## EDA
# In[25]:
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
sns.set()
# In[26]:
sp500_df.head()
# In[27]:
sp500_plot = sp500_df.set_index("Timestamp")
sp500_plot.head()
# In[28]:
# Take Stock A as an exmple
# In[29]:
# data visualization of stock A Daily Closing Price by day
ax = sp500_plot.loc[sp500_plot['Ticker'] == 'A', ['Daily Closing Price']].plot(figsize = (20,15))
#ax.lines[0].set_alpha(0.3)
_ = plt.title('Stock A Daily Closing Price', fontdict = {'fontsize':30})
# In[30]:
# data visualization of stock A Daily Volume by day
ax = sp500_plot.loc[sp500_plot['Ticker'] == 'A', ['Daily Volume']].plot(figsize = (20,15))
#ax.lines[0].set_alpha(0.3)
_ = plt.title('Stock A Daily Volume', fontdict = {'fontsize':30})
# In[31]:
# Two features in one plot after Normalization
A_df = sp500_plot.loc[sp500_plot['Ticker'] == 'A', ['Daily Closing Price', 'Daily Volume']]
from sklearn import preprocessing
import numpy as np
import pandas as pd
close_array = np.array(A_df['Daily Closing Price'])
volume_array = np.array(A_df['Daily Volume'])
normalized_close = preprocessing.normalize([close_array])
normalized_volume = preprocessing.normalize([volume_array])
A_df['Daily Closing Price'] = normalized_close.flatten()
A_df['Daily Volume'] = normalized_volume.flatten()
A_df.head()
# In[32]:
# data visualization of stock A Daily Volume by day
ax = A_df.plot(figsize = (20,15))
_ = plt.title('Stock A Daily Data After Normalization', fontdict = {'fontsize':30})
# In[33]:
# data visualization of stock AA Closing Price by rolling of a week
ax = sp500_plot.loc[sp500_plot['Ticker'] == 'A', ['Daily Closing Price']].rolling(7) .mean().plot(style=['-',':'], figsize=(20,20))
_ = plt.title('Stock AA Daily Closing Price By Rolling of a Week', fontdict = {'fontsize':30})
# In[34]:
# data visualization of stock AA Volume by rolling of a week
ax = sp500_plot.loc[sp500_plot['Ticker'] == 'A', ['Daily Volume']].rolling(7) .mean().plot(style=['-',':'], figsize=(20,20))
_ = plt.title('Stock AA Daily Volume By Rolling of a Week', fontdict = {'fontsize':30})
# In[35]:
# Correlation
sp500_plot.corr()
# In[36]:
df_close = sp500_plot.loc[sp500_plot['Ticker'] == 'A', ['Daily Closing Price']]
pd.plotting.autocorrelation_plot(df_close);
# In[37]:
df_volume = sp500_plot.loc[sp500_plot['Ticker'] == 'A', ['Daily Volume']]
pd.plotting.autocorrelation_plot(df_volume);
# ## Clustering
# In[38]:
sp500_df.head()
# In[39]:
sp500_mean = sp500_df.groupby('Ticker').mean()
sp500_mean.reset_index('Ticker', inplace = True)
sp500_mean
# In[40]:
X_train = sp500_mean.loc[:, ['Daily Closing Price', 'Daily Volume']]
X_train
# ### Kmeans
# In[41]:
from sklearn.cluster import KMeans
from sklearn import metrics
from scipy.spatial.distance import cdist
# In[42]:
# Elbow method to determine K
distortions = []
K = range(1,10)
for k in K:
kmeanModel = KMeans(n_clusters=k).fit(X_train)
kmeanModel.fit(X_train)
distortions.append(sum(np.min(cdist(X_train, kmeanModel.cluster_centers_, 'euclidean'), axis=1)) / X_train.shape[0])
# In[43]:
# Plot the elbow
_ = plt.plot(K, distortions, 'bx-')
_ = plt | ticker = row.findAll('td')[0].text
tickers.append(ticker) | conditional_block |
|
Zichen Pan - S&P500.py | # cursor
crsr = connection.cursor()
# In[16]:
# drop table if necessary (to rerun the create command, or error: table already exists)
crsr.execute("""DROP TABLE sp500_df""")
# In[17]:
crsr.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = crsr.fetchall()
print(tables)
# In[18]:
# SQL command to create a table in the database
sql_command_create = """CREATE TABLE sp500_df (
NUM INTEGER PRIMARY KEY,
Ticker VARCHAR(20),
Timestamp DATE,
Close FLOAT(8,6),
Volume INTEGER);"""
# In[19]:
crsr.execute(sql_command_create)
# In[20]:
crsr.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = crsr.fetchall()
print(tables)
# In[21]:
# Insert data
for index, row in sp500_df.iterrows():
format_str = """INSERT INTO sp500_df (NUM, Ticker, Timestamp, Close, Volume)
VALUES ("{num}", "{ticker}", "{timestamp}", "{close}", "{volume}");"""
sql_command = format_str.format(num = index, ticker = row['Ticker'], timestamp = row['Timestamp'],
close = row['Daily Closing Price'], volume = int(row['Daily Volume']))
crsr.execute(sql_command)
# In[22]:
# print data
import pprint
sql_select = """SELECT * from sp500_df;"""
crsr.execute(sql_select)
pprint.pprint(crsr.fetchall())
# In[23]:
# To save the changes in the files. Never skip this.
# If we skip this, nothing will be saved in the database.
connection.commit()
# In[24]:
# close the connection
connection.close()
# ## EDA
# In[25]:
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
sns.set()
# In[26]:
sp500_df.head()
# In[27]:
sp500_plot = sp500_df.set_index("Timestamp")
sp500_plot.head()
# In[28]:
# Take Stock A as an exmple
# In[29]:
# data visualization of stock A Daily Closing Price by day
ax = sp500_plot.loc[sp500_plot['Ticker'] == 'A', ['Daily Closing Price']].plot(figsize = (20,15))
#ax.lines[0].set_alpha(0.3)
_ = plt.title('Stock A Daily Closing Price', fontdict = {'fontsize':30})
# In[30]:
# data visualization of stock A Daily Volume by day
ax = sp500_plot.loc[sp500_plot['Ticker'] == 'A', ['Daily Volume']].plot(figsize = (20,15))
#ax.lines[0].set_alpha(0.3)
_ = plt.title('Stock A Daily Volume', fontdict = {'fontsize':30})
# In[31]:
# Two features in one plot after Normalization
A_df = sp500_plot.loc[sp500_plot['Ticker'] == 'A', ['Daily Closing Price', 'Daily Volume']]
from sklearn import preprocessing
import numpy as np
import pandas as pd
close_array = np.array(A_df['Daily Closing Price'])
volume_array = np.array(A_df['Daily Volume'])
normalized_close = preprocessing.normalize([close_array])
normalized_volume = preprocessing.normalize([volume_array])
A_df['Daily Closing Price'] = normalized_close.flatten()
A_df['Daily Volume'] = normalized_volume.flatten()
A_df.head()
# In[32]:
# data visualization of stock A Daily Volume by day
ax = A_df.plot(figsize = (20,15))
_ = plt.title('Stock A Daily Data After Normalization', fontdict = {'fontsize':30})
# In[33]:
# data visualization of stock AA Closing Price by rolling of a week
ax = sp500_plot.loc[sp500_plot['Ticker'] == 'A', ['Daily Closing Price']].rolling(7) .mean().plot(style=['-',':'], figsize=(20,20))
_ = plt.title('Stock AA Daily Closing Price By Rolling of a Week', fontdict = {'fontsize':30})
# In[34]:
# data visualization of stock AA Volume by rolling of a week
ax = sp500_plot.loc[sp500_plot['Ticker'] == 'A', ['Daily Volume']].rolling(7) .mean().plot(style=['-',':'], figsize=(20,20))
_ = plt.title('Stock AA Daily Volume By Rolling of a Week', fontdict = {'fontsize':30})
# In[35]:
# Correlation
sp500_plot.corr()
# In[36]:
df_close = sp500_plot.loc[sp500_plot['Ticker'] == 'A', ['Daily Closing Price']]
pd.plotting.autocorrelation_plot(df_close);
# In[37]:
df_volume = sp500_plot.loc[sp500_plot['Ticker'] == 'A', ['Daily Volume']]
pd.plotting.autocorrelation_plot(df_volume);
# ## Clustering
# In[38]:
sp500_df.head()
# In[39]:
sp500_mean = sp500_df.groupby('Ticker').mean()
sp500_mean.reset_index('Ticker', inplace = True)
sp500_mean
# In[40]:
X_train = sp500_mean.loc[:, ['Daily Closing Price', 'Daily Volume']]
X_train
# ### Kmeans
# In[41]:
from sklearn.cluster import KMeans
from sklearn import metrics
from scipy.spatial.distance import cdist
# In[42]:
# Elbow method to determine K
distortions = []
K = range(1,10)
for k in K:
kmeanModel = KMeans(n_clusters=k).fit(X_train)
kmeanModel.fit(X_train)
distortions.append(sum(np.min(cdist(X_train, kmeanModel.cluster_centers_, 'euclidean'), axis=1)) / X_train.shape[0])
# In[43]:
# Plot the elbow
_ = plt.plot(K, distortions, 'bx-')
_ = plt.xlabel('k')
_ = plt.ylabel('Distortion')
_ = plt.title('The Elbow Method showing the optimal k')
# In[44]:
# K = 5
# In[45]:
# create a KMeans object which will generate 5 clusters
km = KMeans(n_clusters=5)
# In[46]:
# use fit_predict() to both fit our k-means model and generate cluster assignments
cluster_assignments = km.fit_predict(X_train)
cluster_assignments[:10]
# In[47]:
# Visualization of Clustering
fig = plt.figure(figsize=(10,10))
for i in range(5):
X_subset = X_train[cluster_assignments == i]
plt.scatter(X_subset.iloc[:,0], X_subset.iloc[:,1],s = 80,alpha = 0.8, label = 'cluster '+str(i))
plt.plot(km.cluster_centers_[i][0],km.cluster_centers_[i][1],marker='x',c='k', ms=20, mew=5, label=None);
_ = plt.legend();
_ = plt.xlabel('Daily Closing Price');
_ = plt.ylabel('Daily Volume')
_ = plt.title('Clustering', fontdict = {'fontsize':30})
# In[48]:
# Add the cluster assignment to sp500_mean
sp500_mean['Cluster'] = cluster_assignments
sp500_mean
# ## Descriptive Statistics
# In[49]:
from scipy.stats import trim_mean, kurtosis
from scipy.stats.mstats import mode, gmean, hmean
# In[50]:
sp500_plot.describe()
# In[51]:
df_group_daily_close = sp500_mean.groupby('Cluster')['Daily Closing Price']
df_group_daily_close.describe().sort_values('mean')
# In[52]:
df_group_daily_volume = sp500_mean.groupby('Cluster')['Daily Volume']
df_group_daily_volume.describe().sort_values('mean')
# In[53]:
# trim mean for Daily Closing Price sorted
df_group_daily_close.apply(trim_mean, .1).sort_values().reset_index()
# In[54]:
# trim mean for Daily Volume sorted
df_group_daily_volume.apply(trim_mean, .1).sort_values().reset_index()
# ## Prediction
# In[55]:
import warnings
import itertools
plt.style.use('fivethirtyeight')
import statsmodels.api as sm
# In[56]:
# We predict the Daily Closing Price of Stock A
df_A = sp500_plot.loc[sp500_plot['Ticker'] == 'A', ['Daily Closing Price']]
df_A.head()
# ### Check Stationarity
# In[57]:
from statsmodels.tsa.stattools import adfuller
def | (timeseries):
#Determing rolling statistics
rolmean = timeseries.rolling(7).mean()
rolstd = timeseries.rolling(7).std()
#Plot rolling statistics:
orig = plt.plot(timeseries, color='blue',label='Original')
mean = plt.plot(rolmean, color='red', label='Rolling Mean')
std = plt.plot(rolstd, color=' | test_stationarity | identifier_name |
Zichen Pan - S&P500.py | exists)
crsr.execute("""DROP TABLE sp500_df""")
# In[17]:
crsr.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = crsr.fetchall()
print(tables)
# In[18]:
# SQL command to create a table in the database
sql_command_create = """CREATE TABLE sp500_df (
NUM INTEGER PRIMARY KEY,
Ticker VARCHAR(20),
Timestamp DATE,
Close FLOAT(8,6),
Volume INTEGER);"""
# In[19]:
crsr.execute(sql_command_create)
# In[20]:
crsr.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = crsr.fetchall()
print(tables)
# In[21]:
# Insert data
for index, row in sp500_df.iterrows():
format_str = """INSERT INTO sp500_df (NUM, Ticker, Timestamp, Close, Volume)
VALUES ("{num}", "{ticker}", "{timestamp}", "{close}", "{volume}");"""
sql_command = format_str.format(num = index, ticker = row['Ticker'], timestamp = row['Timestamp'],
close = row['Daily Closing Price'], volume = int(row['Daily Volume']))
crsr.execute(sql_command)
# In[22]:
# print data
import pprint
sql_select = """SELECT * from sp500_df;"""
crsr.execute(sql_select)
pprint.pprint(crsr.fetchall())
# In[23]:
# To save the changes in the files. Never skip this.
# If we skip this, nothing will be saved in the database.
connection.commit()
# In[24]:
# close the connection
connection.close()
# ## EDA
# In[25]:
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
sns.set()
# In[26]:
sp500_df.head()
# In[27]:
sp500_plot = sp500_df.set_index("Timestamp")
sp500_plot.head()
# In[28]:
# Take Stock A as an exmple
# In[29]:
# data visualization of stock A Daily Closing Price by day
ax = sp500_plot.loc[sp500_plot['Ticker'] == 'A', ['Daily Closing Price']].plot(figsize = (20,15))
#ax.lines[0].set_alpha(0.3)
_ = plt.title('Stock A Daily Closing Price', fontdict = {'fontsize':30})
# In[30]:
# data visualization of stock A Daily Volume by day
ax = sp500_plot.loc[sp500_plot['Ticker'] == 'A', ['Daily Volume']].plot(figsize = (20,15))
#ax.lines[0].set_alpha(0.3)
_ = plt.title('Stock A Daily Volume', fontdict = {'fontsize':30})
# In[31]:
# Two features in one plot after Normalization
A_df = sp500_plot.loc[sp500_plot['Ticker'] == 'A', ['Daily Closing Price', 'Daily Volume']]
from sklearn import preprocessing
import numpy as np
import pandas as pd
close_array = np.array(A_df['Daily Closing Price'])
volume_array = np.array(A_df['Daily Volume'])
normalized_close = preprocessing.normalize([close_array])
normalized_volume = preprocessing.normalize([volume_array])
A_df['Daily Closing Price'] = normalized_close.flatten()
A_df['Daily Volume'] = normalized_volume.flatten()
A_df.head()
# In[32]:
# data visualization of stock A Daily Volume by day
ax = A_df.plot(figsize = (20,15))
_ = plt.title('Stock A Daily Data After Normalization', fontdict = {'fontsize':30})
# In[33]:
# data visualization of stock AA Closing Price by rolling of a week
ax = sp500_plot.loc[sp500_plot['Ticker'] == 'A', ['Daily Closing Price']].rolling(7) .mean().plot(style=['-',':'], figsize=(20,20))
_ = plt.title('Stock AA Daily Closing Price By Rolling of a Week', fontdict = {'fontsize':30})
# In[34]:
# data visualization of stock AA Volume by rolling of a week
ax = sp500_plot.loc[sp500_plot['Ticker'] == 'A', ['Daily Volume']].rolling(7) .mean().plot(style=['-',':'], figsize=(20,20))
_ = plt.title('Stock AA Daily Volume By Rolling of a Week', fontdict = {'fontsize':30})
# In[35]:
# Correlation
sp500_plot.corr()
# In[36]:
df_close = sp500_plot.loc[sp500_plot['Ticker'] == 'A', ['Daily Closing Price']]
pd.plotting.autocorrelation_plot(df_close);
# In[37]:
df_volume = sp500_plot.loc[sp500_plot['Ticker'] == 'A', ['Daily Volume']]
pd.plotting.autocorrelation_plot(df_volume);
# ## Clustering
# In[38]:
sp500_df.head()
# In[39]:
sp500_mean = sp500_df.groupby('Ticker').mean()
sp500_mean.reset_index('Ticker', inplace = True)
sp500_mean
# In[40]:
X_train = sp500_mean.loc[:, ['Daily Closing Price', 'Daily Volume']]
X_train
# ### Kmeans
# In[41]:
from sklearn.cluster import KMeans
from sklearn import metrics
from scipy.spatial.distance import cdist
# In[42]:
# Elbow method to determine K
distortions = []
K = range(1,10)
for k in K:
kmeanModel = KMeans(n_clusters=k).fit(X_train)
kmeanModel.fit(X_train)
distortions.append(sum(np.min(cdist(X_train, kmeanModel.cluster_centers_, 'euclidean'), axis=1)) / X_train.shape[0])
# In[43]:
# Plot the elbow
_ = plt.plot(K, distortions, 'bx-')
_ = plt.xlabel('k')
_ = plt.ylabel('Distortion')
_ = plt.title('The Elbow Method showing the optimal k')
# In[44]:
# K = 5
# In[45]:
# create a KMeans object which will generate 5 clusters
km = KMeans(n_clusters=5)
# In[46]:
# use fit_predict() to both fit our k-means model and generate cluster assignments
cluster_assignments = km.fit_predict(X_train)
cluster_assignments[:10]
# In[47]:
# Visualization of Clustering
fig = plt.figure(figsize=(10,10))
for i in range(5):
X_subset = X_train[cluster_assignments == i]
plt.scatter(X_subset.iloc[:,0], X_subset.iloc[:,1],s = 80,alpha = 0.8, label = 'cluster '+str(i))
plt.plot(km.cluster_centers_[i][0],km.cluster_centers_[i][1],marker='x',c='k', ms=20, mew=5, label=None);
_ = plt.legend();
_ = plt.xlabel('Daily Closing Price');
_ = plt.ylabel('Daily Volume')
_ = plt.title('Clustering', fontdict = {'fontsize':30})
# In[48]:
# Add the cluster assignment to sp500_mean
sp500_mean['Cluster'] = cluster_assignments
sp500_mean
# ## Descriptive Statistics
# In[49]:
from scipy.stats import trim_mean, kurtosis
from scipy.stats.mstats import mode, gmean, hmean
# In[50]:
sp500_plot.describe()
# In[51]:
df_group_daily_close = sp500_mean.groupby('Cluster')['Daily Closing Price']
df_group_daily_close.describe().sort_values('mean')
# In[52]:
df_group_daily_volume = sp500_mean.groupby('Cluster')['Daily Volume']
df_group_daily_volume.describe().sort_values('mean')
# In[53]:
# trim mean for Daily Closing Price sorted
df_group_daily_close.apply(trim_mean, .1).sort_values().reset_index()
# In[54]:
# trim mean for Daily Volume sorted
df_group_daily_volume.apply(trim_mean, .1).sort_values().reset_index()
# ## Prediction
# In[55]:
import warnings
import itertools
plt.style.use('fivethirtyeight')
import statsmodels.api as sm
# In[56]:
# We predict the Daily Closing Price of Stock A
df_A = sp500_plot.loc[sp500_plot['Ticker'] == 'A', ['Daily Closing Price']]
df_A.head()
# ### Check Stationarity
# In[57]:
from statsmodels.tsa.stattools import adfuller
def test_stationarity(timeseries):
#Determing rolling statistics
| rolmean = timeseries.rolling(7).mean()
rolstd = timeseries.rolling(7).std()
#Plot rolling statistics:
orig = plt.plot(timeseries, color='blue',label='Original')
mean = plt.plot(rolmean, color='red', label='Rolling Mean')
std = plt.plot(rolstd, color='black', label = 'Rolling Std')
plt.legend(loc='best')
plt.title('Rolling Mean & Standard Deviation')
plt.show(block=False) | identifier_body |
|
context.go | handlers/middlewares who used this context.
Errors errorMsgs
}
/************************************/
/************* 创建上下文 *************/
/********** CONTEXT CREATION ********/
/************************************/
func (c *Context) reset() {
c.Response.Headers = make(map[string]string) // 空 map 需要初始化后才可以使用
c.Keys = nil
c.index = -1
c.handlers = c.engine.Handlers // 增加这一行, 并配合 chaos.start 中的 c.engine = engine 才能给 ctx 增加 handler
c.Errors = c.Errors[0:0]
}
/************************************/
/************* 调用链控制 *************/
/*********** FLOW CONTROL ***********/
/************************************/
// Next 应该确保只在中间件中使用
// 调用 Next 时会开始执行被挂在调用链中的后续 Handlers
// Next should be used only inside middleware.
// It executes the pending handlers in the chain inside the calling handler.
// See example in GitHub.
func (c *Context) Next() {
c.index++
for c.index < int8(len(c.handlers)) {
c.handlers[c.index](c)
c.index++
}
}
// AbortWithError 会在内部调用 `AbortWithStatus()` 和 `Error()`
// 这个方法可以停止调用链, 并且把指定的 code 写入 HTTP 响应头, 然后把错误推送进 `c.Errors`
// 可以查看 Context.Error() 获取更多细节
// AbortWithError calls `AbortWithStatus()` and `Error()` internally.
// This method stops the chain, writes the status code and pushes the specified error to `c.Errors`.
// See Context.Error() for more details.
func (c *Context) AbortWithError(code int, err error) *Error {
c.AbortWithStatus(code)
return c.Error(err)
}
// Abort 可以结束调用链 ( 阻止将要被调用的 handlers 继续被调用 )
// 注意, 这个操作不会停止当前的 handler 继续执行
// 假设您有一个授权中间件来验证当前的请求是否已经授权
// 如果授权失败 ( 例如 : 密码不匹配 ), 则可以调用 Abort 来阻止这个请求上下文中剩余的 handlers
// Abort prevents pending handlers from being called. Note that this will not stop the current handler.
// Let's say you have an authorization middleware that validates that the current request is authorized.
// If the authorization fails (ex: the password does not match), call Abort to ensure the remaining handlers
// for this request are not called.
func (c *Context) Abort() {
c.index = abortIndex
}
// AbortWithStatus 会调用 `Abort()` 结束调用链, 并将指定的 code 写入 HTTP 响应头
// 例如, 身份验证失败的请求可以使用: context.AbortWithStatus(401)
// AbortWithStatus calls `Abort()` and writes the headers with the specified status code.
// For example, a failed attempt to authenticate a request could use: context.AbortWithStatus(401).
func (c *Context) AbortWithStatus(code int) {
c.Status(code)
c.Abort()
}
// AbortWithStatusJSON 会调用 `Abort()` 结束调用链, 并在内部调用 `JSON`
// AbortWithStatusJSON calls `Abort()` and then `JSON` internally.
// This method stops the chain, writes the status code and return a JSON body.
// It also sets the Content-Type as "application/json".
func (c *Context) AbortWithStatusJSON(code int, jsonObj interface{}) {
c.Abort()
c.JSON(code, jsonObj)
}
/************************************/
/************** 错误管理 *************/
/********* ERROR MANAGEMENT *********/
/************************************/
// Error 用来将错误附加到当前上下问中, 附加的错误会被推送到错误列表中
// 对于在解析请求期间发生的错误, 将其传递给 Error 会是一个很好的处理方式
// 中间件可以用来收集所有的错误并将他们一起推送到数据中、打印日志或将其追加到 HTTP 响应中。
// 如果 err 为 nil 时 Error 会返回 panic
// Error attaches an error to the current context. The error is pushed to a list of errors.
// It's a good idea to call Error for each error that occurred during the resolution of a request.
// A middleware can be used to collect all the errors and push them to a database together,
// print a log, or append it in the HTTP response.
// Error will panic if err is nil.
func (c *Context) Error(err error) *Error {
if err == nil {
panic("err is nil")
}
parsedError, ok := err.(*Error)
if !ok {
parsedError = &Error{
Err: err,
Type: ErrorTypePrivate,
}
}
c.Errors = append(c.Errors, parsedError)
return parsedError
}
/************************************/
/************* 元数据管理 *************/
/******** METADATA MANAGEMENT *******/
/************************************/
// Set 用于在上下文中创建键值对
// 如果 c.Keys 没有被初始化,他还会对其进行初始化
// Set is used to store a new key/value pair exclusively for this context.
// It also lazy initializes c.Keys if it was not used previously.
func (c *Context) Set(key string, value interface{}) {
if c.Keys == nil {
c.Keys = make(map[string]interface{})
}
c.Keys[key] = value
}
// Get 返回给定的 key 对应的 value
// 即: (value, true), 如果给定的值不存在, 则返回 (nil, false)
// Get returns the value for the given key, ie: (value, true).
// If the value does not exists it returns (nil, false)
func (c *Context) Get(key string) (value interface{}, exists bool) {
value, exists = c.Keys[key]
return
}
/************************************/
/************** 请求信息 **************/
/************ INPUT DATA ************/
/************************************/
// Param 可以返回 URL param 的值
// 与 gin 不同, 在本框架中, 它是 c.Request.PathParameters[key] 的语法糖
// Param returns the value of the URL param.
// It is a shortcut for c.Params.ByName(key)
// router.GET("/user/:id", func(c *gin.Context) {
// // a GET request to /user/john
// id := c.Param("id") // id == "john"
// })
func (c *Context) Param(key string) string {
return c.Request.PathParameters[key]
}
// | tcut for c.ShouldBindWith(obj, binding.JSON).
func (c *Context) ShouldBindJSON(obj interface{}) error {
return c.ShouldBindWith(obj, binding.JSON)
}
// ShouldBindWith 使用指定的绑定引擎绑定数据到传递到结构体指针
// ShouldBindWith binds the passed struct pointer using the specified binding engine.
// See the binding package.
func (c *Context) ShouldBindWith(obj interface{}, b binding.Binding) error {
return b.Bind(c.Request, obj)
}
// ClientIP 在修改后直接返回 SCF Api 网关触发器时间中的 RequestContext.SourceIP
func (c *Context) ClientIP() string {
return c.Request.RequestContext.SourceIP
}
/************************************/
/************* 生成响应体 *************/
/******** RESPONSE RENDERING ********/
/************************************/
// bodyAllowedForStatus 是 http 包中的 http.bodyAllowedForStatus 函数的副本
// bodyAllowedForStatus is a copy of http.bodyAllowedForStatus non-exported function.
func bodyAllowedForStatus(status int) bool {
switch {
case status >= 100 && status <= 199:
return false
case status == http.StatusNoContent:
return false
case status == http.StatusNotModified:
return false
}
return true
}
// Status 设置 HTTP 响应代码
// Status sets the HTTP response code.
func (c *Context) Status(code int) {
// 将 c.writermem.WriteHeader(code) 提取到此处实现
if code > 0 && c.Response.StatusCode != code {
if c.Response.StatusCode != 0 {
debugPrint("[WARNING] Headers were already written. Wanted to override status code %d with %d", c.Response.StatusCode, code)
}
c.Response.StatusCode = code
}
}
// Header 可以在响应中写入响应头
// 如果 value == "", 这个方法会删除 key 对应的响应头 fixme 此处也许需要像gin一样使用 header 的 getter setter 来实现
// Header is a intelligent shortcut for c.Writer.Header().Set(key, value).
// It writes a header in the response.
// If value == "", this method removes the header `c.Writer.Header().Del(key)`
func (c *Context) Header(key, value string) {
if value == "" {
delete(c.Response.Headers, key)
return
}
c.Response.Headers[key] = value
}
// GetHeader 从 c.Response.Headers 中取出值后返回
// GetHeader returns value from request headers.
func (c *Context) GetHeader(key string) string {
// 参照 net/textproto/header 实现
if len(c.Request.Headers[strings.ToLower(key)]) == 0 {
return ""
}
return c.Request.Headers[strings.ToLower(key)]
}
// Render 写入响应头并调用 render.Render 来呈现数据
// Render writes the | ShouldBindJSON 是 c.ShouldBindWith(obj, binding.JSON) 的快捷方式
// ShouldBindJSON is a shor | identifier_body |
context.go | the handlers/middlewares who used this context.
Errors errorMsgs
}
/************************************/
/************* 创建上下文 *************/
/********** CONTEXT CREATION ********/
/************************************/
func (c *Context) reset() {
c.Response.Headers = make(map[string]string) // 空 map 需要初始化后才可以使用
c.Keys = nil
c.index = -1
c.handlers = c.engine.Handlers // 增加这一行, 并配合 chaos.start 中的 c.engine = engine 才能给 ctx 增加 handler
c.Errors = c.Errors[0:0]
}
/************************************/
/************* 调用链控制 *************/
/*********** FLOW CONTROL ***********/
/************************************/
// Next 应该确保只在中间件中使用
// 调用 Next 时会开始执行被挂在调用链中的后续 Handlers
// Next should be used only inside middleware.
// It executes the pending handlers in the chain inside the calling handler.
// See example in GitHub.
func (c *Context) Next() {
c.index++
for c.index < int8(len(c.handlers)) {
c.handlers[c.index](c)
c.index++
}
}
// AbortWithError 会在内部调用 `AbortWithStatus()` 和 `Error()`
// 这个方法可以停止调用链, 并且把指定的 code 写入 HTTP 响应头, 然后把错误推送进 `c.Errors`
// 可以查看 Context.Error() 获取更多细节
// AbortWithError calls `AbortWithStatus()` and `Error()` internally.
// This method stops the chain, writes the status code and pushes the specified error to `c.Errors`.
// See Context.Error() for more details.
func (c *Context) AbortWithError(code int, err error) *Error {
c.AbortWithStatus(code)
return c.Error(err)
}
// Abort 可以结束调用链 ( 阻止将要被调用的 handlers 继续被调用 )
// 注意, 这个操作不会停止当前的 handler 继续执行
// 假设您有一个授权中间件来验证当前的请求是否已经授权
// 如果授权失败 ( 例如 : 密码不匹配 ), 则可以调用 Abort 来阻止这个请求上下文中剩余的 handlers
// Abort prevents pending handlers from being called. Note that this will not stop the current handler.
// Let's say you have an authorization middleware that validates that the current request is authorized.
// If the authorization fails (ex: the password does not match), call Abort to ensure the remaining handlers
// for this request are not called.
func (c *Context) Abort() {
c.index = abortIndex
}
// AbortWithStatus 会调用 `Abort()` 结束调用链, 并将指定的 code 写入 HTTP 响应头
// 例如, 身份验证失败的请求可以使用: context.AbortWithStatus(401)
// AbortWithStatus calls `Abort()` and writes the headers with the specified status code.
// For example, a failed attempt to authenticate a request could use: context.AbortWithStatus(401).
func (c *Context) AbortWithStatus(code int) {
c.Status(code)
c.Abort()
}
// AbortWithStatusJSON 会调用 `Abort()` 结束调用链, 并在内部调用 `JSON`
// AbortWithStatusJSON calls `Abort()` and then `JSON` internally.
// This method stops the chain, writes the status code and return a JSON body.
// It also sets the Content-Type as "application/json".
func (c *Context) AbortWithStatusJSON(code int, jsonObj interface{}) {
c.Abort()
c.JSON(code, jsonObj)
}
/************************************/
/************** 错误管理 *************/
/********* ERROR MANAGEMENT *********/
/************************************/
// Error 用来将错误附加到当前上下问中, 附加的错误会被推送到错误列表中
// 对于在解析请求期间发生的错误, 将其传递给 Error 会是一个很好的处理方式
// 中间件可以用来收集所有的错误并将他们一起推送到数据中、打印日志或将其追加到 HTTP 响应中。
// 如果 err 为 nil 时 Error 会返回 panic
// Error attaches an error to the current context. The error is pushed to a list of errors.
// It's a good idea to call Error for each error that occurred during the resolution of a request.
// A middleware can be used to collect all the errors and push them to a database together,
// print a log, or append it in the HTTP response.
// Error will panic if err is nil.
func (c *Context) Error(err error) *Error {
if err == nil {
panic("err is nil")
}
parsedError, ok := err.(*Error)
if !ok {
parsedError = &Error{
Err: err,
Type: ErrorTypePrivate,
}
}
c.Errors = append(c.Errors, parsedError)
return parsedError
}
/************************************/
/************* 元数据管理 *************/
/******** METADATA MANAGEMENT *******/
/************************************/
// Set 用于在上下文中创建键值对
// 如果 c.Keys 没有被初始化,他还会对其进行初始化
// Set is used to store a new key/value pair exclusively for this context.
// It also lazy initializes c.Keys if it was not used previously.
func (c *Context) Set(key string, value interface{}) {
if c.Keys == nil {
c.Keys = make(map[string]interface{})
}
c.Keys[key] = value
}
// Get 返回给定的 key 对应的 value
// 即: (value, true), 如果给定的值不存在, 则返回 (nil, false)
// Get returns the value for the given key, ie: (value, true).
// If the value does not exists it returns (nil, false)
func (c *Context) Get(key string) (value interface{}, exists bool) {
value, exists = c.Keys[key]
return
}
/************************************/
/************** 请求信息 **************/
/************ INPUT DATA ************/
/************************************/
// Param 可以返回 URL param 的值
// 与 gin 不同, 在本框架中, 它是 c.Request.PathParameters[key] 的语法糖
// Param returns the value of the URL param.
// It is a shortcut for c.Params.ByName(key)
// router.GET("/user/:id", func(c *gin.Context) {
// // a GET request to /user/john
// id := c.Param("id") // id == "john"
// })
func (c *Context) Param(key string) string {
return c.Request.PathParameters[key]
}
| // ShouldBindJSON 是 c.ShouldBindWith(obj, binding.JSON) 的快捷方式
// ShouldBindJSON is a shortcut for c.ShouldBindWith(obj, binding.JSON).
func (c *Context) ShouldBindJSON(obj interface{}) error {
return c.ShouldBindWith(obj, binding.JSON)
}
// ShouldBindWith 使用指定的绑定引擎绑定数据到传递到结构体指针
// ShouldBindWith binds the passed struct pointer using the specified binding engine.
// See the binding package.
func (c *Context) ShouldBindWith(obj interface{}, b binding.Binding) error {
return b.Bind(c.Request, obj)
}
// ClientIP 在修改后直接返回 SCF Api 网关触发器时间中的 RequestContext.SourceIP
func (c *Context) ClientIP() string {
return c.Request.RequestContext.SourceIP
}
/************************************/
/************* 生成响应体 *************/
/******** RESPONSE RENDERING ********/
/************************************/
// bodyAllowedForStatus 是 http 包中的 http.bodyAllowedForStatus 函数的副本
// bodyAllowedForStatus is a copy of http.bodyAllowedForStatus non-exported function.
func bodyAllowedForStatus(status int) bool {
switch {
case status >= 100 && status <= 199:
return false
case status == http.StatusNoContent:
return false
case status == http.StatusNotModified:
return false
}
return true
}
// Status 设置 HTTP 响应代码
// Status sets the HTTP response code.
func (c *Context) Status(code int) {
// 将 c.writermem.WriteHeader(code) 提取到此处实现
if code > 0 && c.Response.StatusCode != code {
if c.Response.StatusCode != 0 {
debugPrint("[WARNING] Headers were already written. Wanted to override status code %d with %d", c.Response.StatusCode, code)
}
c.Response.StatusCode = code
}
}
// Header 可以在响应中写入响应头
// 如果 value == "", 这个方法会删除 key 对应的响应头 fixme 此处也许需要像gin一样使用 header 的 getter setter 来实现
// Header is a intelligent shortcut for c.Writer.Header().Set(key, value).
// It writes a header in the response.
// If value == "", this method removes the header `c.Writer.Header().Del(key)`
func (c *Context) Header(key, value string) {
if value == "" {
delete(c.Response.Headers, key)
return
}
c.Response.Headers[key] = value
}
// GetHeader 从 c.Response.Headers 中取出值后返回
// GetHeader returns value from request headers.
func (c *Context) GetHeader(key string) string {
// 参照 net/textproto/header 实现
if len(c.Request.Headers[strings.ToLower(key)]) == 0 {
return ""
}
return c.Request.Headers[strings.ToLower(key)]
}
// Render 写入响应头并调用 render.Render 来呈现数据
// Render writes the response headers | random_line_split |
|
context.go | 入 HTTP 响应头, 然后把错误推送进 `c.Errors`
// 可以查看 Context.Error() 获取更多细节
// AbortWithError calls `AbortWithStatus()` and `Error()` internally.
// This method stops the chain, writes the status code and pushes the specified error to `c.Errors`.
// See Context.Error() for more details.
func (c *Context) AbortWithError(code int, err error) *Error {
c.AbortWithStatus(code)
return c.Error(err)
}
// Abort 可以结束调用链 ( 阻止将要被调用的 handlers 继续被调用 )
// 注意, 这个操作不会停止当前的 handler 继续执行
// 假设您有一个授权中间件来验证当前的请求是否已经授权
// 如果授权失败 ( 例如 : 密码不匹配 ), 则可以调用 Abort 来阻止这个请求上下文中剩余的 handlers
// Abort prevents pending handlers from being called. Note that this will not stop the current handler.
// Let's say you have an authorization middleware that validates that the current request is authorized.
// If the authorization fails (ex: the password does not match), call Abort to ensure the remaining handlers
// for this request are not called.
func (c *Context) Abort() {
c.index = abortIndex
}
// AbortWithStatus 会调用 `Abort()` 结束调用链, 并将指定的 code 写入 HTTP 响应头
// 例如, 身份验证失败的请求可以使用: context.AbortWithStatus(401)
// AbortWithStatus calls `Abort()` and writes the headers with the specified status code.
// For example, a failed attempt to authenticate a request could use: context.AbortWithStatus(401).
func (c *Context) AbortWithStatus(code int) {
c.Status(code)
c.Abort()
}
// AbortWithStatusJSON 会调用 `Abort()` 结束调用链, 并在内部调用 `JSON`
// AbortWithStatusJSON calls `Abort()` and then `JSON` internally.
// This method stops the chain, writes the status code and return a JSON body.
// It also sets the Content-Type as "application/json".
func (c *Context) AbortWithStatusJSON(code int, jsonObj interface{}) {
c.Abort()
c.JSON(code, jsonObj)
}
/************************************/
/************** 错误管理 *************/
/********* ERROR MANAGEMENT *********/
/************************************/
// Error 用来将错误附加到当前上下问中, 附加的错误会被推送到错误列表中
// 对于在解析请求期间发生的错误, 将其传递给 Error 会是一个很好的处理方式
// 中间件可以用来收集所有的错误并将他们一起推送到数据中、打印日志或将其追加到 HTTP 响应中。
// 如果 err 为 nil 时 Error 会返回 panic
// Error attaches an error to the current context. The error is pushed to a list of errors.
// It's a good idea to call Error for each error that occurred during the resolution of a request.
// A middleware can be used to collect all the errors and push them to a database together,
// print a log, or append it in the HTTP response.
// Error will panic if err is nil.
func (c *Context) Error(err error) *Error {
if err == nil {
panic("err is nil")
}
parsedError, ok := err.(*Error)
if !ok {
parsedError = &Error{
Err: err,
Type: ErrorTypePrivate,
}
}
c.Errors = append(c.Errors, parsedError)
return parsedError
}
/************************************/
/************* 元数据管理 *************/
/******** METADATA MANAGEMENT *******/
/************************************/
// Set 用于在上下文中创建键值对
// 如果 c.Keys 没有被初始化,他还会对其进行初始化
// Set is used to store a new key/value pair exclusively for this context.
// It also lazy initializes c.Keys if it was not used previously.
func (c *Context) Set(key string, value interface{}) {
if c.Keys == nil {
c.Keys = make(map[string]interface{})
}
c.Keys[key] = value
}
// Get 返回给定的 key 对应的 value
// 即: (value, true), 如果给定的值不存在, 则返回 (nil, false)
// Get returns the value for the given key, ie: (value, true).
// If the value does not exists it returns (nil, false)
func (c *Context) Get(key string) (value interface{}, exists bool) {
value, exists = c.Keys[key]
return
}
/************************************/
/************** 请求信息 **************/
/************ INPUT DATA ************/
/************************************/
// Param 可以返回 URL param 的值
// 与 gin 不同, 在本框架中, 它是 c.Request.PathParameters[key] 的语法糖
// Param returns the value of the URL param.
// It is a shortcut for c.Params.ByName(key)
// router.GET("/user/:id", func(c *gin.Context) {
// // a GET request to /user/john
// id := c.Param("id") // id == "john"
// })
func (c *Context) Param(key string) string {
return c.Request.PathParameters[key]
}
// ShouldBindJSON 是 c.ShouldBindWith(obj, binding.JSON) 的快捷方式
// ShouldBindJSON is a shortcut for c.ShouldBindWith(obj, binding.JSON).
func (c *Context) ShouldBindJSON(obj interface{}) error {
return c.ShouldBindWith(obj, binding.JSON)
}
// ShouldBindWith 使用指定的绑定引擎绑定数据到传递到结构体指针
// ShouldBindWith binds the passed struct pointer using the specified binding engine.
// See the binding package.
func (c *Context) ShouldBindWith(obj interface{}, b binding.Binding) error {
return b.Bind(c.Request, obj)
}
// ClientIP 在修改后直接返回 SCF Api 网关触发器时间中的 RequestContext.SourceIP
func (c *Context) ClientIP() string {
return c.Request.RequestContext.SourceIP
}
/************************************/
/************* 生成响应体 *************/
/******** RESPONSE RENDERING ********/
/************************************/
// bodyAllowedForStatus 是 http 包中的 http.bodyAllowedForStatus 函数的副本
// bodyAllowedForStatus is a copy of http.bodyAllowedForStatus non-exported function.
func bodyAllowedForStatus(status int) bool {
switch {
case status >= 100 && status <= 199:
return false
case status == http.StatusNoContent:
return false
case status == http.StatusNotModified:
return false
}
return true
}
// Status 设置 HTTP 响应代码
// Status sets the HTTP response code.
func (c *Context) Status(code int) {
// 将 c.writermem.WriteHeader(code) 提取到此处实现
if code > 0 && c.Response.StatusCode != code {
if c.Response.StatusCode != 0 {
debugPrint("[WARNING] Headers were already written. Wanted to override status code %d with %d", c.Response.StatusCode, code)
}
c.Response.StatusCode = code
}
}
// Header 可以在响应中写入响应头
// 如果 value == "", 这个方法会删除 key 对应的响应头 fixme 此处也许需要像gin一样使用 header 的 getter setter 来实现
// Header is a intelligent shortcut for c.Writer.Header().Set(key, value).
// It writes a header in the response.
// If value == "", this method removes the header `c.Writer.Header().Del(key)`
func (c *Context) Header(key, value string) {
if value == "" {
delete(c.Response.Headers, key)
return
}
c.Response.Headers[key] = value
}
// GetHeader 从 c.Response.Headers 中取出值后返回
// GetHeader returns value from request headers.
func (c *Context) GetHeader(key string) string {
// 参照 net/textproto/header 实现
if len(c.Request.Headers[strings.ToLower(key)]) == 0 {
return ""
}
return c.Request.Headers[strings.ToLower(key)]
}
// Render 写入响应头并调用 render.Render 来呈现数据
// Render writes the response headers and calls render.Render to render data.
func (c *Context) Render(code int, r render.Render) {
c.Status(code)
if !bodyAllowedForStatus(code) {
r.WriteContentType(&c.Response)
return
}
if err := r.Render(&c.Response); err != nil {
panic(err)
}
}
// JSON 将给定的结构序列化为 JSON 类型后添加到响应体中
// 并且设置响应头中的 Content-Type 为 "application/json"
func (c *Context) JSON(code int, obj interface{}) {
c.Render(code, render.JSON{Data: obj})
}
// String 将给定的字符串写入响应体
// String writes the given string into the response body.
func (c *Context) String(code int, format string, values ...interface{}) {
c.Render(code, render.String{Format: format, Data: values})
}
/************************************/
/*********** 实现上下文接口 ************/
/***** GOLANG.ORG/X/NET/CONTEXT *****/
/************************************/
// Deadline 返回返回此上下文的截止日期
// 当没有设置截止日期时,Deadline 返回 ok == false。 连续调用 Deadline 返回相同的结果
// Deadline returns the time when work done on behalf of this context
// should be canceled. Deadline returns ok==false when no deadline is
// | set. Suc | identifier_name |
|
context.go | 增加 handler
c.Errors = c.Errors[0:0]
}
/************************************/
/************* 调用链控制 *************/
/*********** FLOW CONTROL ***********/
/************************************/
// Next 应该确保只在中间件中使用
// 调用 Next 时会开始执行被挂在调用链中的后续 Handlers
// Next should be used only inside middleware.
// It executes the pending handlers in the chain inside the calling handler.
// See example in GitHub.
func (c *Context) Next() {
c.index++
for c.index < int8(len(c.handlers)) {
c.handlers[c.index](c)
c.index++
}
}
// AbortWithError 会在内部调用 `AbortWithStatus()` 和 `Error()`
// 这个方法可以停止调用链, 并且把指定的 code 写入 HTTP 响应头, 然后把错误推送进 `c.Errors`
// 可以查看 Context.Error() 获取更多细节
// AbortWithError calls `AbortWithStatus()` and `Error()` internally.
// This method stops the chain, writes the status code and pushes the specified error to `c.Errors`.
// See Context.Error() for more details.
func (c *Context) AbortWithError(code int, err error) *Error {
c.AbortWithStatus(code)
return c.Error(err)
}
// Abort 可以结束调用链 ( 阻止将要被调用的 handlers 继续被调用 )
// 注意, 这个操作不会停止当前的 handler 继续执行
// 假设您有一个授权中间件来验证当前的请求是否已经授权
// 如果授权失败 ( 例如 : 密码不匹配 ), 则可以调用 Abort 来阻止这个请求上下文中剩余的 handlers
// Abort prevents pending handlers from being called. Note that this will not stop the current handler.
// Let's say you have an authorization middleware that validates that the current request is authorized.
// If the authorization fails (ex: the password does not match), call Abort to ensure the remaining handlers
// for this request are not called.
func (c *Context) Abort() {
c.index = abortIndex
}
// AbortWithStatus 会调用 `Abort()` 结束调用链, 并将指定的 code 写入 HTTP 响应头
// 例如, 身份验证失败的请求可以使用: context.AbortWithStatus(401)
// AbortWithStatus calls `Abort()` and writes the headers with the specified status code.
// For example, a failed attempt to authenticate a request could use: context.AbortWithStatus(401).
func (c *Context) AbortWithStatus(code int) {
c.Status(code)
c.Abort()
}
// AbortWithStatusJSON 会调用 `Abort()` 结束调用链, 并在内部调用 `JSON`
// AbortWithStatusJSON calls `Abort()` and then `JSON` internally.
// This method stops the chain, writes the status code and return a JSON body.
// It also sets the Content-Type as "application/json".
func (c *Context) AbortWithStatusJSON(code int, jsonObj interface{}) {
c.Abort()
c.JSON(code, jsonObj)
}
/************************************/
/************** 错误管理 *************/
/********* ERROR MANAGEMENT *********/
/************************************/
// Error 用来将错误附加到当前上下问中, 附加的错误会被推送到错误列表中
// 对于在解析请求期间发生的错误, 将其传递给 Error 会是一个很好的处理方式
// 中间件可以用来收集所有的错误并将他们一起推送到数据中、打印日志或将其追加到 HTTP 响应中。
// 如果 err 为 nil 时 Error 会返回 panic
// Error attaches an error to the current context. The error is pushed to a list of errors.
// It's a good idea to call Error for each error that occurred during the resolution of a request.
// A middleware can be used to collect all the errors and push them to a database together,
// print a log, or append it in the HTTP response.
// Error will panic if err is nil.
func (c *Context) Error(err error) *Error {
if err == nil {
panic("err is nil")
}
parsedError, ok := err.(*Error)
if !ok {
parsedError = &Error{
Err: err,
Type: ErrorTypePrivate,
}
}
c.Errors = append(c.Errors, parsedError)
return parsedError
}
/************************************/
/************* 元数据管理 *************/
/******** METADATA MANAGEMENT *******/
/************************************/
// Set 用于在上下文中创建键值对
// 如果 c.Keys 没有被初始化,他还会对其进行初始化
// Set is used to store a new key/value pair exclusively for this context.
// It also lazy initializes c.Keys if it was not used previously.
func (c *Context) Set(key string, value interface{}) {
if c.Keys == nil {
c.Keys = make(map[string]interface{})
}
c.Keys[key] = value
}
// Get 返回给定的 key 对应的 value
// 即: (value, true), 如果给定的值不存在, 则返回 (nil, false)
// Get returns the value for the given key, ie: (value, true).
// If the value does not exists it returns (nil, false)
func (c *Context) Get(key string) (value interface{}, exists bool) {
value, exists = c.Keys[key]
return
}
/************************************/
/************** 请求信息 **************/
/************ INPUT DATA ************/
/************************************/
// Param 可以返回 URL param 的值
// 与 gin 不同, 在本框架中, 它是 c.Request.PathParameters[key] 的语法糖
// Param returns the value of the URL param.
// It is a shortcut for c.Params.ByName(key)
// router.GET("/user/:id", func(c *gin.Context) {
// // a GET request to /user/john
// id := c.Param("id") // id == "john"
// })
func (c *Context) Param(key string) string {
return c.Request.PathParameters[key]
}
// ShouldBindJSON 是 c.ShouldBindWith(obj, binding.JSON) 的快捷方式
// ShouldBindJSON is a shortcut for c.ShouldBindWith(obj, binding.JSON).
func (c *Context) ShouldBindJSON(obj interface{}) error {
return c.ShouldBindWith(obj, binding.JSON)
}
// ShouldBindWith 使用指定的绑定引擎绑定数据到传递到结构体指针
// ShouldBindWith binds the passed struct pointer using the specified binding engine.
// See the binding package.
func (c *Context) ShouldBindWith(obj interface{}, b binding.Binding) error {
return b.Bind(c.Request, obj)
}
// ClientIP 在修改后直接返回 SCF Api 网关触发器时间中的 RequestContext.SourceIP
func (c *Context) ClientIP() string {
return c.Request.RequestContext.SourceIP
}
/************************************/
/************* 生成响应体 *************/
/******** RESPONSE RENDERING ********/
/************************************/
// bodyAllowedForStatus 是 http 包中的 http.bodyAllowedForStatus 函数的副本
// bodyAllowedForStatus is a copy of http.bodyAllowedForStatus non-exported function.
func bodyAllowedForStatus(status int) bool {
switch {
case status >= 100 && status <= 199:
return false
case status == http.StatusNoContent:
return false
case status == http.StatusNotModified:
return false
}
return true
}
// Status 设置 HTTP 响应代码
// Status sets the HTTP response code.
func (c *Context) Status(code int) {
// 将 c.writermem.WriteHeader(code) 提取到此处实现
if code > 0 && c.Response.StatusCode != code {
if c.Response.StatusCode != 0 {
debugPrint("[WARNING] Headers were already written. Wanted to override status code %d with %d", c.Response.StatusCode, code)
}
c.Response.StatusCode = code
}
}
// Header 可以在响应中写入响应头
// 如果 value == "", 这个方法会删除 key 对应的响应头 fixme 此处也许需要像gin一样使用 header 的 getter setter 来实现
// Header is a intelligent shortcut for c.Writer.Header().Set(key, value).
// It writes a header in the response.
// If value == "", this method removes the header `c.Writer.Header().Del(key)`
func (c *Context) Header(key, value string) {
if value == "" {
delete(c.Response.Headers, key)
return
}
c.Response.Headers[key] = value
}
// GetHeader 从 c.Response.Headers 中取出值后返回
// GetHeader returns value from request headers.
func (c *Context) GetHeader(key string) string {
// 参照 net/textproto/header 实现
if len(c.Request.Headers[strings.ToLower(key)]) == 0 {
return ""
}
return c.Request.Headers[strings.ToLower(key)]
}
// Render 写入响应头并调用 render.Render 来呈现数据
// Render writes the response headers and calls render.Render to render data.
func (c *Context) Render(code int, r render.Render) {
c.Status(code)
if !bodyAllowedForStatus(code) {
r.WriteContentType(&c.Response)
return
}
if err := r.Render( | &c.Response); err != nil {
panic(err)
}
}
// JSON 将给定的结构序列化为 JSON 类型后添加到响应体中
// 并且设置响应头中的 Content-Type 为 "application/json"
func | conditional_block |
|
texel.rs | pub(crate) mod constants {
use super::{AsTexel, MaxAligned, Texel};
macro_rules! constant_texel {
($(($name:ident, $type:ty)),*) => {
$(pub const $name: Texel<$type> = Texel(core::marker::PhantomData) ;
impl AsTexel for $type {
fn texel() -> Texel<Self> {
const _: () = {
assert!(Texel::<$type>::check_invariants());
};
$name
}
}
)*
}
}
constant_texel!(
(I8, i8),
(U8, u8),
(I16, i16),
(U16, u16),
(I32, i32),
(U32, u32),
(F32, f32),
(I64, i64),
(U64, u64),
(F64, f64),
(USIZE, usize),
(ISIZE, isize),
(MAX, MaxAligned)
);
impl<T: AsTexel> AsTexel for [T; 1] {
fn texel() -> Texel<[T; 1]> {
T::texel().array::<1>()
}
}
impl<T: AsTexel> AsTexel for [T; 2] {
fn texel() -> Texel<[T; 2]> {
T::texel().array::<2>()
}
}
impl<T: AsTexel> AsTexel for [T; 3] {
fn texel() -> Texel<[T; 3]> {
T::texel().array::<3>()
}
}
impl<T: AsTexel> AsTexel for [T; 4] {
fn texel() -> Texel<[T; 4]> {
T::texel().array::<4>()
}
}
impl<T: AsTexel> AsTexel for [T; 5] {
fn texel() -> Texel<[T; 5]> {
T::texel().array::<5>()
}
}
impl<T: AsTexel> AsTexel for [T; 6] {
fn | () -> Texel<[T; 6]> {
T::texel().array::<6>()
}
}
impl<T: AsTexel> AsTexel for [T; 7] {
fn texel() -> Texel<[T; 7]> {
T::texel().array::<7>()
}
}
impl<T: AsTexel> AsTexel for [T; 8] {
fn texel() -> Texel<[T; 8]> {
T::texel().array::<8>()
}
}
impl<T: AsTexel> AsTexel for ::core::num::Wrapping<T> {
fn texel() -> Texel<::core::num::Wrapping<T>> {
T::texel().num_wrapping()
}
}
}
#[cfg(target_arch = "x86")]
mod x64 {
use super::{AsTexel, Texel};
use core::arch::x86;
builtin_texel!(x86::__m128);
builtin_texel!(x86::__m128);
builtin_texel!(x86::__m128d);
builtin_texel!(x86::__m128i);
builtin_texel!(x86::__m256);
builtin_texel!(x86::__m256d);
builtin_texel!(x86::__m256i);
}
#[cfg(target_arch = "x86_64")]
mod x64_64 {
use super::{AsTexel, Texel};
use core::arch::x86_64;
builtin_texel!(x86_64::__m128);
builtin_texel!(x86_64::__m128d);
builtin_texel!(x86_64::__m128i);
builtin_texel!(x86_64::__m256);
builtin_texel!(x86_64::__m256d);
builtin_texel!(x86_64::__m256i);
}
#[cfg(target_arch = "arm")]
mod arm { /* all types unstable */
}
#[cfg(target_arch = "aarch64")]
mod arm {
use super::{AsTexel, Texel};
use core::arch::aarch64;
builtin_texel!(aarch64::float64x1_t);
builtin_texel!(aarch64::float64x1x2_t);
builtin_texel!(aarch64::float64x1x3_t);
builtin_texel!(aarch64::float64x1x4_t);
builtin_texel!(aarch64::float64x2_t);
builtin_texel!(aarch64::float64x2x2_t);
builtin_texel!(aarch64::float64x2x3_t);
builtin_texel!(aarch64::float64x2x4_t);
}
#[cfg(target_arch = "wasm32")]
mod arm {
use super::{AsTexel, Texel};
use core::arch::wasm32;
builtin_texel!(wasm32::v128);
}
impl<P: bytemuck::Pod> Texel<P> {
/// Try to construct an instance of the marker.
///
/// If successful, you can freely use it to access the image buffers. This requires:
/// - The type must have an alignment of *at most* `MAX_ALIGN`.
/// - The type must *not* be a ZST.
/// - The type must *not* have any Drop-glue (no drop, any contain not part that is Drop).
pub const fn for_type() -> Option<Self> {
if Texel::<P>::check_invariants() {
Some(Texel(PhantomData))
} else {
None
}
}
}
impl<P, O: bytemuck::TransparentWrapper<P>> IsTransparentWrapper<P, O> {
pub const CONST: Self = IsTransparentWrapper(PhantomData);
}
/// The **only** ways to construct a `buf`, protecting the alignment invariant.
/// Hint: This is an unsized type so there is no safe way of constructing it.
impl buf {
pub const ALIGNMENT: usize = MAX_ALIGN;
/// Wrap bytes in a `buf`.
///
/// The bytes need to be aligned to `ALIGNMENT`.
pub fn from_bytes(bytes: &[u8]) -> Option<&Self> {
if bytes.as_ptr() as usize % Self::ALIGNMENT == 0 {
// SAFETY: this is an almost trivial cast of unsized references. Additionally, we still
// guarantee that this is at least aligned to `MAX_ALIGN`.
Some(unsafe { &*(bytes as *const [u8] as *const Self) })
} else {
None
}
}
/// Wrap bytes in a `buf`.
///
/// The bytes need to be aligned to `ALIGNMENT`.
pub fn from_bytes_mut(bytes: &mut [u8]) -> Option<&mut Self> {
if bytes.as_ptr() as usize % Self::ALIGNMENT == 0 {
// SAFETY: this is an almost trivial cast of unsized references. Additionally, we still
// guarantee that this is at least aligned to `MAX_ALIGN`.
Some(unsafe { &mut *(bytes as *mut [u8] as *mut Self) })
} else {
None
}
}
}
impl<P> Texel<P> {
/// Create a witness certifying `P` as a texel without checks.
///
/// # Safety
///
/// The type `P` must __not__:
/// * have any validity invariants, i.e. is mustn't contain any padding.
/// * have any safety invariants. This implies it can be copied.
/// * have an alignment larger than [`MaxAligned`].
/// * be a zero-size type.
///
/// Furthermore, tentatively, the type must not have any drop glue. That is its members are all
/// simple types without Drop implementations. This requirement exists mainly to avoid code
/// accidentally leaking instances, and ensures that copies created from their byte
/// representation—which is safe according to the other invairants— do not cause unexpected
/// effects.
///
/// Note that the alignment requirement with regards to `MaxAligned` is __architecture
/// dependent__ as the exact bound varies across the `target_arch` feature. Where possible, add
/// static assertions to each call site of this function.
///
/// [`MaxAligned`]: struct.MaxAligned.html
pub const unsafe fn new_unchecked() -> Self {
debug_assert!(Self::check_invariants());
Texel(PhantomData)
}
/// Note this isn't exhaustive. Indeed, we have no way to check for padding.
pub(crate) const fn check_invariants() -> bool {
mem::align_of::<P>() | texel | identifier_name |
texel.rs | to be aligned to `ALIGNMENT`.
pub fn from_bytes_mut(bytes: &mut [u8]) -> Option<&mut Self> {
if bytes.as_ptr() as usize % Self::ALIGNMENT == 0 {
// SAFETY: this is an almost trivial cast of unsized references. Additionally, we still
// guarantee that this is at least aligned to `MAX_ALIGN`.
Some(unsafe { &mut *(bytes as *mut [u8] as *mut Self) })
} else {
None
}
}
}
impl<P> Texel<P> {
/// Create a witness certifying `P` as a texel without checks.
///
/// # Safety
///
/// The type `P` must __not__:
/// * have any validity invariants, i.e. is mustn't contain any padding.
/// * have any safety invariants. This implies it can be copied.
/// * have an alignment larger than [`MaxAligned`].
/// * be a zero-size type.
///
/// Furthermore, tentatively, the type must not have any drop glue. That is its members are all
/// simple types without Drop implementations. This requirement exists mainly to avoid code
/// accidentally leaking instances, and ensures that copies created from their byte
/// representation—which is safe according to the other invairants— do not cause unexpected
/// effects.
///
/// Note that the alignment requirement with regards to `MaxAligned` is __architecture
/// dependent__ as the exact bound varies across the `target_arch` feature. Where possible, add
/// static assertions to each call site of this function.
///
/// [`MaxAligned`]: struct.MaxAligned.html
pub const unsafe fn new_unchecked() -> Self {
debug_assert!(Self::check_invariants());
Texel(PhantomData)
}
/// Note this isn't exhaustive. Indeed, we have no way to check for padding.
pub(crate) const fn check_invariants() -> bool {
mem::align_of::<P>() <= MAX_ALIGN && mem::size_of::<P>() > 0 && !mem::needs_drop::<P>()
}
/// Proxy of `core::mem::align_of`.
pub const fn align(self) -> usize {
mem::align_of::<P>()
}
/// Proxy of `core::mem::size_of`.
pub const fn size(self) -> usize {
mem::size_of::<P>()
}
/// Publicly visible function to use the guarantee of non-ZST.
pub const fn size_nz(self) -> core::num::NonZeroUsize {
match core::num::NonZeroUsize::new(self.size()) {
None => panic!(""),
Some(num) => num,
}
}
// A number of constructors that are technically unsafe. Note that we could write them as safe
// code here to pad our stats but they are not checked by the type system so it's risky. Better
// explain their safety in the code as comments.
/// Construct a texel as an array of no elements.
///
/// # Panics
///
/// This function panics when called with `N` equal to 0.
pub const fn array<const N: usize>(self) -> Texel<[P; N]> {
if N == 0 {
panic!()
}
// Safety:
// * has no validity/safety invariants
// * has the same alignment as P which is not larger then MaxAligned
unsafe { Texel::new_unchecked() }
}
/// Construct a texel by wrapping into a transparent wrapper.
///
/// TODO: a constructor for Texel<O> based on proof of transmutation from &mut P to &mut O,
/// based on the standard transmutation RFC. This is more flexible than bytemuck's
/// TransparentWrapper trait.
pub const fn transparent_wrap<O>(self, _: IsTransparentWrapper<P, O>) -> Texel<O> {
// Safety:
// * P and O must have the same invariants, none
// * P and O have the same alignment
unsafe { Texel::new_unchecked() }
}
/// Construct a texel by unwrapping a transparent wrapper.
pub const fn transparent_unwrap<O>(self, _: IsTransparentWrapper<O, P>) -> Texel<O> {
// Safety:
// * P and O must have the same invariants, none
// * P and O have the same alignment
unsafe { Texel::new_unchecked() }
}
/// Construct a texel that contains a number in the standard `Wrapping` type.
pub const fn num_wrapping(self) -> Texel<num::Wrapping<P>> {
// * Texel<P> = Self certifies the byte properties.
// * `core::num::Wrapping` is `repr(transparent)
unsafe { Texel::new_unchecked() }
}
}
impl<T, const N: usize> Texel<[T; N]> {
/// Construct a texel, from an array of elements.
pub const fn array_element(self) -> Texel<T> {
// Safety:
// We'll see that all properties are implied by _any_ suitable array.
// - The type must have an alignment of *at most* `MAX_ALIGN`. Array and inner type have
// the same alignment.
// - The type must *not* be a ZST. The array would otherwise be a ZST.
// - The type must *not* have any Drop-glue (no drop, any contain not part that is Drop).
// The array would otherwise have Drop-glue.
unsafe { Texel::new_unchecked() }
}
}
/// Operations that can be performed based on the evidence of Texel.
impl<P> Texel<P> {
/// Copy a texel.
///
/// Note that this does not require `Copy` because that requirement was part of the
/// requirements of constructing this `Texel` witness.
pub fn copy_val(self, val: &P) -> P {
// SAFETY: by the constructor, this type can be copied byte-by-byte.
unsafe { ptr::read(val) }
}
/// Reinterpret a slice of aligned bytes as a slice of the texel.
///
/// Note that the size (in bytes) of the slice will be shortened if the size of `P` is not a
/// divisor of the input slice's size.
pub fn to_slice<'buf>(self, buffer: &'buf [MaxAligned]) -> &'buf [P] {
self.cast_buf(buf::new(buffer))
}
/// Reinterpret a slice of aligned bytes as a mutable slice of the texel.
///
/// Note that the size (in bytes) of the slice will be shortened if the size of `P` is not a
/// divisor of the input slice's size.
pub fn to_mut_slice<'buf>(self, buffer: &'buf mut [MaxAligned]) -> &'buf mut [P] {
self.cast_mut_buf(buf::new_mut(buffer))
}
/// Try to reinterpret a slice of bytes as a slice of the texel.
///
/// This returns `Some` if the buffer is suitably aligned, and `None` otherwise.
pub fn try_to_slice<'buf>(self, bytes: &'buf [u8]) -> Option<&'buf [P]> {
if bytes.as_ptr() as usize % mem::align_of::<P>() == 0 {
// SAFETY:
// - The `pod`-ness is certified by `self`, which makes the bytes a valid
// representation of P.
// - The total size is at most `bytes` by construction.
let len = bytes.len() / mem::size_of::<P>();
Some(unsafe { &*ptr::slice_from_raw_parts(bytes.as_ptr() as *const P, len) })
} else {
None
}
}
/// Try to reinterpret a slice of bytes as a slice of the texel.
///
/// This returns `Some` if the buffer is suitably aligned, and `None` otherwise.
pub fn try_to_slice_mut<'buf>(self, bytes: &'buf mut [u8]) -> Option<&'buf mut [P]> {
if let Some(slice) = self.try_to_slice(bytes) {
// SAFETY:
// - The `pod`-ness is certified by `self`, which makes the bytes a valid
// representation of P. Conversely, it makes any P valid as bytes.
let len = slice.len();
Some(unsafe { &mut *ptr::slice_from_raw_parts_mut(bytes.as_mut_ptr() as *mut P, len) })
} else {
None
}
}
/// Reinterpret a slice of texel as memory.
///
/// Note that you can convert a reference to a single value by [`core::slice::from_ref`].
pub fn to_bytes<'buf>(self, texel: &'buf [P]) -> &'buf [u8] {
self.cast_bytes(texel)
}
/// Reinterpret a mutable slice of texel as memory.
///
/// Note that you can convert a reference to a single value by [`core::slice::from_mut`].
pub fn to_mut_bytes<'buf>(self, texel: &'buf mut [P]) -> &'buf mut [u8] {
| self.cast_mut_bytes(texel)
}
| identifier_body |
|
texel.rs | have an alignment of *at most* `MAX_ALIGN`.
/// - The type must *not* be a ZST.
/// - The type must *not* have any Drop-glue (no drop, any contain not part that is Drop).
pub const fn for_type() -> Option<Self> {
if Texel::<P>::check_invariants() {
Some(Texel(PhantomData))
} else {
None
}
}
}
impl<P, O: bytemuck::TransparentWrapper<P>> IsTransparentWrapper<P, O> {
pub const CONST: Self = IsTransparentWrapper(PhantomData);
}
/// The **only** ways to construct a `buf`, protecting the alignment invariant.
/// Hint: This is an unsized type so there is no safe way of constructing it.
impl buf {
pub const ALIGNMENT: usize = MAX_ALIGN;
/// Wrap bytes in a `buf`.
///
/// The bytes need to be aligned to `ALIGNMENT`.
pub fn from_bytes(bytes: &[u8]) -> Option<&Self> {
if bytes.as_ptr() as usize % Self::ALIGNMENT == 0 {
// SAFETY: this is an almost trivial cast of unsized references. Additionally, we still
// guarantee that this is at least aligned to `MAX_ALIGN`.
Some(unsafe { &*(bytes as *const [u8] as *const Self) })
} else {
None
}
}
/// Wrap bytes in a `buf`.
///
/// The bytes need to be aligned to `ALIGNMENT`.
pub fn from_bytes_mut(bytes: &mut [u8]) -> Option<&mut Self> {
if bytes.as_ptr() as usize % Self::ALIGNMENT == 0 {
// SAFETY: this is an almost trivial cast of unsized references. Additionally, we still
// guarantee that this is at least aligned to `MAX_ALIGN`.
Some(unsafe { &mut *(bytes as *mut [u8] as *mut Self) })
} else {
None
}
}
}
impl<P> Texel<P> {
/// Create a witness certifying `P` as a texel without checks.
///
/// # Safety
///
/// The type `P` must __not__:
/// * have any validity invariants, i.e. is mustn't contain any padding.
/// * have any safety invariants. This implies it can be copied.
/// * have an alignment larger than [`MaxAligned`].
/// * be a zero-size type.
///
/// Furthermore, tentatively, the type must not have any drop glue. That is its members are all
/// simple types without Drop implementations. This requirement exists mainly to avoid code
/// accidentally leaking instances, and ensures that copies created from their byte
/// representation—which is safe according to the other invairants— do not cause unexpected
/// effects.
///
/// Note that the alignment requirement with regards to `MaxAligned` is __architecture
/// dependent__ as the exact bound varies across the `target_arch` feature. Where possible, add
/// static assertions to each call site of this function.
///
/// [`MaxAligned`]: struct.MaxAligned.html
pub const unsafe fn new_unchecked() -> Self {
debug_assert!(Self::check_invariants());
Texel(PhantomData)
}
/// Note this isn't exhaustive. Indeed, we have no way to check for padding.
pub(crate) const fn check_invariants() -> bool {
mem::align_of::<P>() <= MAX_ALIGN && mem::size_of::<P>() > 0 && !mem::needs_drop::<P>()
}
/// Proxy of `core::mem::align_of`.
pub const fn align(self) -> usize {
mem::align_of::<P>()
}
/// Proxy of `core::mem::size_of`.
pub const fn size(self) -> usize {
mem::size_of::<P>()
}
/// Publicly visible function to use the guarantee of non-ZST.
pub const fn size_nz(self) -> core::num::NonZeroUsize {
match core::num::NonZeroUsize::new(self.size()) {
None => panic!(""),
Some(num) => num,
}
}
// A number of constructors that are technically unsafe. Note that we could write them as safe
// code here to pad our stats but they are not checked by the type system so it's risky. Better
// explain their safety in the code as comments.
/// Construct a texel as an array of no elements.
///
/// # Panics
///
/// This function panics when called with `N` equal to 0.
pub const fn array<const N: usize>(self) -> Texel<[P; N]> {
if N == 0 {
panic!()
}
// Safety:
// * has no validity/safety invariants
// * has the same alignment as P which is not larger then MaxAligned
unsafe { Texel::new_unchecked() }
}
/// Construct a texel by wrapping into a transparent wrapper.
///
/// TODO: a constructor for Texel<O> based on proof of transmutation from &mut P to &mut O,
/// based on the standard transmutation RFC. This is more flexible than bytemuck's
/// TransparentWrapper trait.
pub const fn transparent_wrap<O>(self, _: IsTransparentWrapper<P, O>) -> Texel<O> {
// Safety:
// * P and O must have the same invariants, none
// * P and O have the same alignment
unsafe { Texel::new_unchecked() }
}
/// Construct a texel by unwrapping a transparent wrapper.
pub const fn transparent_unwrap<O>(self, _: IsTransparentWrapper<O, P>) -> Texel<O> {
// Safety:
// * P and O must have the same invariants, none
// * P and O have the same alignment
unsafe { Texel::new_unchecked() }
}
/// Construct a texel that contains a number in the standard `Wrapping` type.
pub const fn num_wrapping(self) -> Texel<num::Wrapping<P>> {
// * Texel<P> = Self certifies the byte properties.
// * `core::num::Wrapping` is `repr(transparent)
unsafe { Texel::new_unchecked() }
}
}
impl<T, const N: usize> Texel<[T; N]> {
/// Construct a texel, from an array of elements.
pub const fn array_element(self) -> Texel<T> {
// Safety:
// We'll see that all properties are implied by _any_ suitable array.
// - The type must have an alignment of *at most* `MAX_ALIGN`. Array and inner type have
// the same alignment.
// - The type must *not* be a ZST. The array would otherwise be a ZST.
// - The type must *not* have any Drop-glue (no drop, any contain not part that is Drop).
// The array would otherwise have Drop-glue.
unsafe { Texel::new_unchecked() }
}
}
/// Operations that can be performed based on the evidence of Texel.
impl<P> Texel<P> {
/// Copy a texel.
///
/// Note that this does not require `Copy` because that requirement was part of the
/// requirements of constructing this `Texel` witness.
pub fn copy_val(self, val: &P) -> P {
// SAFETY: by the constructor, this type can be copied byte-by-byte.
unsafe { ptr::read(val) }
}
/// Reinterpret a slice of aligned bytes as a slice of the texel.
///
/// Note that the size (in bytes) of the slice will be shortened if the size of `P` is not a
/// divisor of the input slice's size.
pub fn to_slice<'buf>(self, buffer: &'buf [MaxAligned]) -> &'buf [P] {
self.cast_buf(buf::new(buffer))
}
/// Reinterpret a slice of aligned bytes as a mutable slice of the texel.
///
/// Note that the size (in bytes) of the slice will be shortened if the size of `P` is not a
/// divisor of the input slice's size.
pub fn to_mut_slice<'buf>(self, buffer: &'buf mut [MaxAligned]) -> &'buf mut [P] {
self.cast_mut_buf(buf::new_mut(buffer))
}
/// Try to reinterpret a slice of bytes as a slice of the texel.
///
/// This returns `Some` if the buffer is suitably aligned, and `None` otherwise.
pub fn try_to_slice<'buf>(self, bytes: &'buf [u8]) -> Option<&'buf [P]> {
if bytes.as_ptr() as usize % mem::align_of::<P>() == 0 {
// SAFETY:
// - The `pod`-ness is certified by `self`, which makes the bytes a valid
// representation of P.
// - The total size is at most `bytes` by construction.
let len = bytes.len() / mem::size_of::<P>();
Some(unsafe { &*ptr::slice_from_raw_parts(bytes.as_ptr() as *const P, len) })
} else {
| None
}
| conditional_block |
|
texel.rs | #![allow(unsafe_code)]
use core::cmp::{Eq, Ord, Ordering, PartialEq, PartialOrd};
use core::marker::PhantomData;
use core::{fmt, hash, mem, num, ptr, slice};
use crate::buf::buf;
/// Marker struct to denote a texel type.
///
/// Can be constructed only for types that have expected alignment and no byte invariants. It
/// always implements `Copy` and `Clone`, regardless of the underlying type and is zero-sized.
///
/// This is the central encapsulation of unsafety in this crate. It utilizes `bytemuck` for a safe
/// interface but permits other types with an unsafe interface, and offers the cast operations
/// without a bound on the `Pod` trait. Note that `Pod` is a pure marker trait; its properties must
/// hold even if it is not explicitly mentioned. If all constructors (safely or unsafely) ensure
/// that its properties hold we can use `Texel` as a witness type for the bound and subsequently
/// write interfaces to take an instance instead of having a static type bound. This achieves two
/// effects:
/// * Firstly, it makes the interface independent of the chosen transmutation crate. Potentially we
/// will have a method to construct the `Texel` via a `core` trait.
/// * Secondly, it allows creating texel of third-party types for which the bound can not be
/// implemented. Crucially, this includes SIMD representations that would be a burden to support
/// directly. And conversely you can also deal with arbitrary existing texel without a bound in
/// your own interfaces!
pub struct Texel<P: ?Sized>(PhantomData<P>);
/// Marker struct to denote that P is transparently wrapped in O.
///
/// The only way to construct it is by accessing its associated constant which only exists when the
/// bound `bytemuck::TransparentWrapper` holds as required. This encodes a type-level set and is
/// a workaround for such bounds not yet being allowed in `const fn`. Expect this type to be
/// deprecated sooner or later.
pub struct IsTransparentWrapper<P, O>(PhantomData<(P, O)>);
/// Describes a type which can represent a `Texel` and for which this is statically known.
pub trait AsTexel {
/// Get the texel struct for this type.
///
/// The naive implementation of merely unwrapping the result of `Texel::for_type` **panics** on
/// any invalid type. This trait should only be implemented when you know for sure that the
/// type is correct.
fn texel() -> Texel<Self>;
}
macro_rules! def_max_align {
(
$(#[$common_attr:meta])*
$($($arch:literal),* = $num:literal),*
) => {
/// A byte-like-type that is aligned to the required max alignment.
///
/// This type does not contain padding and implements `Pod`. Generally, the alignment and size
/// requirement is kept small to avoid overhead.
$(#[$common_attr])*
$(
#[cfg_attr(
any($(target_arch = $arch),*),
repr(align($num))
)]
)*
pub struct MaxAligned(pub(crate) [u8; MAX_ALIGN]);
$(
#[cfg(
any($(target_arch = $arch),*),
)]
pub(crate) const MAX_ALIGN: usize = $num;
)*
#[cfg(
not(any(
$(any($(target_arch = $arch),*)),*
)),
)]
pub(crate) const MAX_ALIGN: usize = 8;
}
}
def_max_align! {
/// A byte-like-type that is aligned to the required max alignment.
///
/// This type does not contain padding and implements `Pod`. Generally, the alignment and size
/// requirement is kept small to avoid overhead.
#[derive(Clone, Copy)]
#[repr(C)]
"x86", "x86_64" = 32,
"arm" = 16,
"aarch64" = 16,
"wasm32" = 16
}
unsafe impl bytemuck::Zeroable for MaxAligned {}
unsafe impl bytemuck::Pod for MaxAligned {}
macro_rules! builtin_texel {
( $name:ty ) => {
impl AsTexel for $name {
fn texel() -> Texel<Self> {
const _: () = {
assert!(Texel::<$name>::check_invariants());
};
unsafe { Texel::new_unchecked() }
}
}
};
}
pub(crate) mod constants {
use super::{AsTexel, MaxAligned, Texel};
macro_rules! constant_texel {
($(($name:ident, $type:ty)),*) => {
$(pub const $name: Texel<$type> = Texel(core::marker::PhantomData) ;
impl AsTexel for $type {
fn texel() -> Texel<Self> {
const _: () = {
assert!(Texel::<$type>::check_invariants());
};
$name
}
}
)*
}
}
constant_texel!(
(I8, i8),
(U8, u8),
(I16, i16),
(U16, u16),
(I32, i32),
(U32, u32),
(F32, f32),
(I64, i64),
(U64, u64),
(F64, f64),
(USIZE, usize),
(ISIZE, isize),
(MAX, MaxAligned)
);
impl<T: AsTexel> AsTexel for [T; 1] {
fn texel() -> Texel<[T; 1]> {
T::texel().array::<1>()
}
}
impl<T: AsTexel> AsTexel for [T; 2] {
fn texel() -> Texel<[T; 2]> {
T::texel().array::<2>()
}
}
impl<T: AsTexel> AsTexel for [T; 3] {
fn texel() -> Texel<[T; 3]> {
T::texel().array::<3>()
}
}
impl<T: AsTexel> AsTexel for [T; 4] {
fn texel() -> Texel<[T; 4]> {
T::texel().array::<4>()
}
}
impl<T: AsTexel> AsTexel for [T; 5] {
fn texel() -> Texel<[T; 5]> {
T::texel().array::<5>()
}
}
impl<T: AsTexel> AsTexel for [T; 6] {
fn texel() -> Texel<[T; 6]> {
T::texel().array::<6>()
}
}
impl<T: AsTexel> AsTexel for [T; 7] {
fn texel() -> Texel<[T; 7]> {
T::texel().array::<7>()
}
}
impl<T: AsTexel> AsTexel for [T; 8] {
fn texel() -> Texel<[T; 8]> {
T::texel().array::<8>()
}
}
impl<T: AsTexel> AsTexel for ::core::num::Wrapping<T> {
fn texel() -> Texel<::core::num::Wrapping<T>> {
T::texel().num_wrapping()
}
}
}
#[cfg(target_arch = "x86")]
mod x64 {
use super::{AsTexel, Texel};
use core::arch::x86;
builtin_texel!(x86::__m128);
builtin_texel!(x86::__m128);
builtin_texel!(x86::__m128d);
builtin_texel!(x86::__m128i);
builtin_texel!(x86::__m256);
builtin_texel!(x86::__m256d);
builtin_texel!(x86::__m256i);
}
#[cfg(target_arch = "x86_64")]
mod x64_64 {
use super::{AsTexel, Texel};
use core::arch::x86_64;
builtin_texel!(x86_64::__m128);
builtin_texel!(x86_64::__m128d);
builtin_texel!(x86_64::__m128i);
builtin_texel!(x86_64::__m256);
builtin_texel!(x86_64::__m256d);
builtin_texel!(x86_64::__m256i);
}
#[cfg(target_arch = "arm")]
mod arm { /* all types unstable */
}
#[cfg(target_arch = "aarch64")]
mod arm {
use super::{AsTexel, Texel};
use core::arch::aarch64;
builtin_texel!(aarch64::float64x1_t);
| random_line_split |
||
lib.rs | ))
}
fn into_path(py: Python, pointer: JSONPointer) -> PyResult<Py<PyList>> {
let path = PyList::empty(py);
for chunk in pointer {
match chunk {
jsonschema::paths::PathChunk::Property(property) => {
path.append(property.into_string())?
}
jsonschema::paths::PathChunk::Index(index) => path.append(index)?,
jsonschema::paths::PathChunk::Keyword(keyword) => path.append(keyword)?,
};
}
Ok(path.into_py(py))
}
fn get_draft(draft: u8) -> PyResult<Draft> {
match draft {
DRAFT4 => Ok(jsonschema::Draft::Draft4),
DRAFT6 => Ok(jsonschema::Draft::Draft6),
DRAFT7 => Ok(jsonschema::Draft::Draft7),
_ => Err(exceptions::PyValueError::new_err(format!(
"Unknown draft: {}",
draft
))),
}
}
fn make_options(
draft: Option<u8>,
with_meta_schemas: Option<bool>,
) -> PyResult<jsonschema::CompilationOptions> {
let mut options = jsonschema::JSONSchema::options();
if let Some(raw_draft_version) = draft {
options.with_draft(get_draft(raw_draft_version)?);
}
if let Some(true) = with_meta_schemas {
options.with_meta_schemas();
}
Ok(options)
}
fn iter_on_error(
py: Python,
compiled: &jsonschema::JSONSchema,
instance: &PyAny,
) -> PyResult<ValidationErrorIter> {
let instance = ser::to_value(instance)?;
let mut pyerrors = vec![];
if let Err(errors) = compiled.validate(&instance) {
for error in errors {
pyerrors.push(into_py_err(py, error)?);
}
};
Ok(ValidationErrorIter {
iter: Box::new(pyerrors.into_iter()),
})
}
fn raise_on_error(py: Python, compiled: &jsonschema::JSONSchema, instance: &PyAny) -> PyResult<()> {
let instance = ser::to_value(instance)?;
let result = compiled.validate(&instance);
let error = result
.err()
.map(|mut errors| errors.next().expect("Iterator should not be empty"));
error.map_or_else(|| Ok(()), |err| Err(into_py_err(py, err)?))
}
fn to_error_message(error: &jsonschema::ValidationError) -> String {
let mut message = error.to_string();
message.push('\n');
message.push('\n');
message.push_str("Failed validating");
let push_quoted = |m: &mut String, s: &str| {
m.push('"');
m.push_str(s);
m.push('"');
};
let push_chunk = |m: &mut String, chunk: &jsonschema::paths::PathChunk| {
match chunk {
jsonschema::paths::PathChunk::Property(property) => push_quoted(m, property),
jsonschema::paths::PathChunk::Index(index) => m.push_str(&index.to_string()),
jsonschema::paths::PathChunk::Keyword(keyword) => push_quoted(m, keyword),
};
};
if let Some(last) = error.schema_path.last() {
message.push(' ');
push_chunk(&mut message, last)
}
message.push_str(" in schema");
let mut chunks = error.schema_path.iter().peekable();
while let Some(chunk) = chunks.next() {
// Skip the last element as it is already mentioned in the message
if chunks.peek().is_none() {
break;
}
message.push('[');
push_chunk(&mut message, chunk);
message.push(']');
}
message.push('\n');
message.push('\n');
message.push_str("On instance");
for chunk in &error.instance_path {
message.push('[');
match chunk {
jsonschema::paths::PathChunk::Property(property) => push_quoted(&mut message, property),
jsonschema::paths::PathChunk::Index(index) => message.push_str(&index.to_string()),
// Keywords are not used for instances
jsonschema::paths::PathChunk::Keyword(_) => unreachable!("Internal error"),
};
message.push(']');
}
message.push(':');
message.push_str("\n ");
message.push_str(&error.instance.to_string());
message
}
/// is_valid(schema, instance, draft=None, with_meta_schemas=False)
///
/// A shortcut for validating the input instance against the schema.
///
/// >>> is_valid({"minimum": 5}, 3)
/// False
///
/// If your workflow implies validating against the same schema, consider using `JSONSchema.is_valid`
/// instead.
#[pyfunction]
#[pyo3(text_signature = "(schema, instance, draft=None, with_meta_schemas=False)")]
fn is_valid(
py: Python,
schema: &PyAny,
instance: &PyAny,
draft: Option<u8>,
with_meta_schemas: Option<bool>,
) -> PyResult<bool> {
let options = make_options(draft, with_meta_schemas)?;
let schema = ser::to_value(schema)?;
match options.compile(&schema) {
Ok(compiled) => {
let instance = ser::to_value(instance)?;
Ok(compiled.is_valid(&instance))
}
Err(error) => Err(into_py_err(py, error)?),
}
}
/// validate(schema, instance, draft=None, with_meta_schemas=False)
///
/// Validate the input instance and raise `ValidationError` in the error case
///
/// >>> validate({"minimum": 5}, 3)
/// ...
/// ValidationError: 3 is less than the minimum of 5
///
/// If the input instance is invalid, only the first occurred error is raised.
/// If your workflow implies validating against the same schema, consider using `JSONSchema.validate`
/// instead.
#[pyfunction]
#[pyo3(text_signature = "(schema, instance, draft=None, with_meta_schemas=False)")]
fn validate(
py: Python,
schema: &PyAny,
instance: &PyAny,
draft: Option<u8>,
with_meta_schemas: Option<bool>,
) -> PyResult<()> {
let options = make_options(draft, with_meta_schemas)?;
let schema = ser::to_value(schema)?;
match options.compile(&schema) {
Ok(compiled) => raise_on_error(py, &compiled, instance),
Err(error) => Err(into_py_err(py, error)?),
}
}
/// iter_errors(schema, instance, draft=None, with_meta_schemas=False)
///
/// Iterate the validation errors of the input instance
///
/// >>> next(iter_errors({"minimum": 5}, 3))
/// ...
/// ValidationError: 3 is less than the minimum of 5
///
/// If your workflow implies validating against the same schema, consider using `JSONSchema.iter_errors`
/// instead.
#[pyfunction]
#[pyo3(text_signature = "(schema, instance, draft=None, with_meta_schemas=False)")]
fn iter_errors(
py: Python,
schema: &PyAny,
instance: &PyAny,
draft: Option<u8>,
with_meta_schemas: Option<bool>,
) -> PyResult<ValidationErrorIter> {
let options = make_options(draft, with_meta_schemas)?;
let schema = ser::to_value(schema)?;
match options.compile(&schema) {
Ok(compiled) => iter_on_error(py, &compiled, instance),
Err(error) => Err(into_py_err(py, error)?),
}
}
/// JSONSchema(schema, draft=None, with_meta_schemas=False)
///
/// JSON Schema compiled into a validation tree.
///
/// >>> compiled = JSONSchema({"minimum": 5})
/// >>> compiled.is_valid(3)
/// False
///
/// By default Draft 7 will be used for compilation.
#[pyclass(module = "jsonschema_rs")]
#[pyo3(text_signature = "(schema, draft=None, with_meta_schemas=False)")]
struct JSONSchema {
schema: jsonschema::JSONSchema,
repr: String,
}
fn get_schema_repr(schema: &serde_json::Value) -> String {
// It could be more efficient, without converting the whole Value to a string
let mut repr = schema.to_string();
if repr.len() > SCHEMA_LENGTH_LIMIT {
repr.truncate(SCHEMA_LENGTH_LIMIT);
repr.push_str("...}");
}
repr
}
#[pymethods]
impl JSONSchema {
#[new]
fn new(
py: Python,
pyschema: &PyAny,
draft: Option<u8>,
with_meta_schemas: Option<bool>,
) -> PyResult<Self> {
let options = make_options(draft, with_meta_schemas)?;
let raw_schema = ser::to_value(pyschema)?;
match options.compile(&raw_schema) {
Ok(schema) => Ok(JSONSchema {
schema,
repr: get_schema_repr(&raw_schema),
}),
Err(error) => Err(into_py_err(py, error)?),
}
}
/// from_str(string, draft=None, with_meta_schemas=False)
///
/// Create `JSONSchema` from a serialized JSON string.
///
/// >>> compiled = JSONSchema.from_str('{"minimum": 5}')
///
/// Use it if you have your schema as a string and want to utilize Rust JSON parsing.
#[classmethod]
#[pyo3(text_signature = "(string, draft=None, with_meta_schemas=False)")]
fn | from_str | identifier_name |
|
lib.rs | PyRef<Self>) -> PyRef<Self> {
slf
}
fn __next__(mut slf: PyRefMut<Self>) -> Option<PyErr> {
slf.iter.next()
}
}
fn into_py_err(py: Python, error: jsonschema::ValidationError) -> PyResult<PyErr> {
let pyerror_type = PyType::new::<ValidationError>(py);
let message = error.to_string();
let verbose_message = to_error_message(&error);
let schema_path = into_path(py, error.schema_path)?;
let instance_path = into_path(py, error.instance_path)?;
Ok(PyErr::from_type(
pyerror_type,
(message, verbose_message, schema_path, instance_path),
))
}
fn into_path(py: Python, pointer: JSONPointer) -> PyResult<Py<PyList>> {
let path = PyList::empty(py);
for chunk in pointer {
match chunk {
jsonschema::paths::PathChunk::Property(property) => {
path.append(property.into_string())?
}
jsonschema::paths::PathChunk::Index(index) => path.append(index)?,
jsonschema::paths::PathChunk::Keyword(keyword) => path.append(keyword)?,
};
}
Ok(path.into_py(py))
}
fn get_draft(draft: u8) -> PyResult<Draft> {
match draft {
DRAFT4 => Ok(jsonschema::Draft::Draft4),
DRAFT6 => Ok(jsonschema::Draft::Draft6),
DRAFT7 => Ok(jsonschema::Draft::Draft7),
_ => Err(exceptions::PyValueError::new_err(format!(
"Unknown draft: {}",
draft
))),
}
}
fn make_options(
draft: Option<u8>,
with_meta_schemas: Option<bool>,
) -> PyResult<jsonschema::CompilationOptions> {
let mut options = jsonschema::JSONSchema::options();
if let Some(raw_draft_version) = draft {
options.with_draft(get_draft(raw_draft_version)?);
}
if let Some(true) = with_meta_schemas {
options.with_meta_schemas();
}
Ok(options)
}
fn iter_on_error(
py: Python,
compiled: &jsonschema::JSONSchema,
instance: &PyAny,
) -> PyResult<ValidationErrorIter> {
let instance = ser::to_value(instance)?;
let mut pyerrors = vec![];
if let Err(errors) = compiled.validate(&instance) {
for error in errors {
pyerrors.push(into_py_err(py, error)?);
}
};
Ok(ValidationErrorIter {
iter: Box::new(pyerrors.into_iter()),
})
}
fn raise_on_error(py: Python, compiled: &jsonschema::JSONSchema, instance: &PyAny) -> PyResult<()> {
let instance = ser::to_value(instance)?;
let result = compiled.validate(&instance);
let error = result
.err()
.map(|mut errors| errors.next().expect("Iterator should not be empty"));
error.map_or_else(|| Ok(()), |err| Err(into_py_err(py, err)?))
}
fn to_error_message(error: &jsonschema::ValidationError) -> String {
let mut message = error.to_string();
message.push('\n');
message.push('\n');
message.push_str("Failed validating");
let push_quoted = |m: &mut String, s: &str| {
m.push('"');
m.push_str(s);
m.push('"');
};
let push_chunk = |m: &mut String, chunk: &jsonschema::paths::PathChunk| {
match chunk {
jsonschema::paths::PathChunk::Property(property) => push_quoted(m, property),
jsonschema::paths::PathChunk::Index(index) => m.push_str(&index.to_string()),
jsonschema::paths::PathChunk::Keyword(keyword) => push_quoted(m, keyword),
};
};
if let Some(last) = error.schema_path.last() {
message.push(' ');
push_chunk(&mut message, last)
}
message.push_str(" in schema");
let mut chunks = error.schema_path.iter().peekable();
while let Some(chunk) = chunks.next() {
// Skip the last element as it is already mentioned in the message
if chunks.peek().is_none() {
break;
}
message.push('[');
push_chunk(&mut message, chunk);
message.push(']');
}
message.push('\n');
message.push('\n');
message.push_str("On instance");
for chunk in &error.instance_path {
message.push('[');
match chunk {
jsonschema::paths::PathChunk::Property(property) => push_quoted(&mut message, property), | message.push(']');
}
message.push(':');
message.push_str("\n ");
message.push_str(&error.instance.to_string());
message
}
/// is_valid(schema, instance, draft=None, with_meta_schemas=False)
///
/// A shortcut for validating the input instance against the schema.
///
/// >>> is_valid({"minimum": 5}, 3)
/// False
///
/// If your workflow implies validating against the same schema, consider using `JSONSchema.is_valid`
/// instead.
#[pyfunction]
#[pyo3(text_signature = "(schema, instance, draft=None, with_meta_schemas=False)")]
fn is_valid(
py: Python,
schema: &PyAny,
instance: &PyAny,
draft: Option<u8>,
with_meta_schemas: Option<bool>,
) -> PyResult<bool> {
let options = make_options(draft, with_meta_schemas)?;
let schema = ser::to_value(schema)?;
match options.compile(&schema) {
Ok(compiled) => {
let instance = ser::to_value(instance)?;
Ok(compiled.is_valid(&instance))
}
Err(error) => Err(into_py_err(py, error)?),
}
}
/// validate(schema, instance, draft=None, with_meta_schemas=False)
///
/// Validate the input instance and raise `ValidationError` in the error case
///
/// >>> validate({"minimum": 5}, 3)
/// ...
/// ValidationError: 3 is less than the minimum of 5
///
/// If the input instance is invalid, only the first occurred error is raised.
/// If your workflow implies validating against the same schema, consider using `JSONSchema.validate`
/// instead.
#[pyfunction]
#[pyo3(text_signature = "(schema, instance, draft=None, with_meta_schemas=False)")]
fn validate(
py: Python,
schema: &PyAny,
instance: &PyAny,
draft: Option<u8>,
with_meta_schemas: Option<bool>,
) -> PyResult<()> {
let options = make_options(draft, with_meta_schemas)?;
let schema = ser::to_value(schema)?;
match options.compile(&schema) {
Ok(compiled) => raise_on_error(py, &compiled, instance),
Err(error) => Err(into_py_err(py, error)?),
}
}
/// iter_errors(schema, instance, draft=None, with_meta_schemas=False)
///
/// Iterate the validation errors of the input instance
///
/// >>> next(iter_errors({"minimum": 5}, 3))
/// ...
/// ValidationError: 3 is less than the minimum of 5
///
/// If your workflow implies validating against the same schema, consider using `JSONSchema.iter_errors`
/// instead.
#[pyfunction]
#[pyo3(text_signature = "(schema, instance, draft=None, with_meta_schemas=False)")]
fn iter_errors(
py: Python,
schema: &PyAny,
instance: &PyAny,
draft: Option<u8>,
with_meta_schemas: Option<bool>,
) -> PyResult<ValidationErrorIter> {
let options = make_options(draft, with_meta_schemas)?;
let schema = ser::to_value(schema)?;
match options.compile(&schema) {
Ok(compiled) => iter_on_error(py, &compiled, instance),
Err(error) => Err(into_py_err(py, error)?),
}
}
/// JSONSchema(schema, draft=None, with_meta_schemas=False)
///
/// JSON Schema compiled into a validation tree.
///
/// >>> compiled = JSONSchema({"minimum": 5})
/// >>> compiled.is_valid(3)
/// False
///
/// By default Draft 7 will be used for compilation.
#[pyclass(module = "jsonschema_rs")]
#[pyo3(text_signature = "(schema, draft=None, with_meta_schemas=False)")]
struct JSONSchema {
schema: jsonschema::JSONSchema,
repr: String,
}
fn get_schema_repr(schema: &serde_json::Value) -> String {
// It could be more efficient, without converting the whole Value to a string
let mut repr = schema.to_string();
if repr.len() > SCHEMA_LENGTH_LIMIT {
repr.truncate(SCHEMA_LENGTH_LIMIT);
repr.push_str("...}");
}
repr
}
#[pymethods]
impl JSONSchema {
#[new]
fn new(
py: Python,
pyschema: &PyAny,
draft: Option<u8>,
with_meta_schemas: Option<bool>,
) -> PyResult<Self> {
let options = make_options(draft, with_meta_schemas)?;
let raw_schema = ser::to_value(pys | jsonschema::paths::PathChunk::Index(index) => message.push_str(&index.to_string()),
// Keywords are not used for instances
jsonschema::paths::PathChunk::Keyword(_) => unreachable!("Internal error"),
}; | random_line_split |
lib.rs | };
};
if let Some(last) = error.schema_path.last() {
message.push(' ');
push_chunk(&mut message, last)
}
message.push_str(" in schema");
let mut chunks = error.schema_path.iter().peekable();
while let Some(chunk) = chunks.next() {
// Skip the last element as it is already mentioned in the message
if chunks.peek().is_none() {
break;
}
message.push('[');
push_chunk(&mut message, chunk);
message.push(']');
}
message.push('\n');
message.push('\n');
message.push_str("On instance");
for chunk in &error.instance_path {
message.push('[');
match chunk {
jsonschema::paths::PathChunk::Property(property) => push_quoted(&mut message, property),
jsonschema::paths::PathChunk::Index(index) => message.push_str(&index.to_string()),
// Keywords are not used for instances
jsonschema::paths::PathChunk::Keyword(_) => unreachable!("Internal error"),
};
message.push(']');
}
message.push(':');
message.push_str("\n ");
message.push_str(&error.instance.to_string());
message
}
/// is_valid(schema, instance, draft=None, with_meta_schemas=False)
///
/// A shortcut for validating the input instance against the schema.
///
/// >>> is_valid({"minimum": 5}, 3)
/// False
///
/// If your workflow implies validating against the same schema, consider using `JSONSchema.is_valid`
/// instead.
#[pyfunction]
#[pyo3(text_signature = "(schema, instance, draft=None, with_meta_schemas=False)")]
fn is_valid(
py: Python,
schema: &PyAny,
instance: &PyAny,
draft: Option<u8>,
with_meta_schemas: Option<bool>,
) -> PyResult<bool> {
let options = make_options(draft, with_meta_schemas)?;
let schema = ser::to_value(schema)?;
match options.compile(&schema) {
Ok(compiled) => {
let instance = ser::to_value(instance)?;
Ok(compiled.is_valid(&instance))
}
Err(error) => Err(into_py_err(py, error)?),
}
}
/// validate(schema, instance, draft=None, with_meta_schemas=False)
///
/// Validate the input instance and raise `ValidationError` in the error case
///
/// >>> validate({"minimum": 5}, 3)
/// ...
/// ValidationError: 3 is less than the minimum of 5
///
/// If the input instance is invalid, only the first occurred error is raised.
/// If your workflow implies validating against the same schema, consider using `JSONSchema.validate`
/// instead.
#[pyfunction]
#[pyo3(text_signature = "(schema, instance, draft=None, with_meta_schemas=False)")]
fn validate(
py: Python,
schema: &PyAny,
instance: &PyAny,
draft: Option<u8>,
with_meta_schemas: Option<bool>,
) -> PyResult<()> {
let options = make_options(draft, with_meta_schemas)?;
let schema = ser::to_value(schema)?;
match options.compile(&schema) {
Ok(compiled) => raise_on_error(py, &compiled, instance),
Err(error) => Err(into_py_err(py, error)?),
}
}
/// iter_errors(schema, instance, draft=None, with_meta_schemas=False)
///
/// Iterate the validation errors of the input instance
///
/// >>> next(iter_errors({"minimum": 5}, 3))
/// ...
/// ValidationError: 3 is less than the minimum of 5
///
/// If your workflow implies validating against the same schema, consider using `JSONSchema.iter_errors`
/// instead.
#[pyfunction]
#[pyo3(text_signature = "(schema, instance, draft=None, with_meta_schemas=False)")]
fn iter_errors(
py: Python,
schema: &PyAny,
instance: &PyAny,
draft: Option<u8>,
with_meta_schemas: Option<bool>,
) -> PyResult<ValidationErrorIter> {
let options = make_options(draft, with_meta_schemas)?;
let schema = ser::to_value(schema)?;
match options.compile(&schema) {
Ok(compiled) => iter_on_error(py, &compiled, instance),
Err(error) => Err(into_py_err(py, error)?),
}
}
/// JSONSchema(schema, draft=None, with_meta_schemas=False)
///
/// JSON Schema compiled into a validation tree.
///
/// >>> compiled = JSONSchema({"minimum": 5})
/// >>> compiled.is_valid(3)
/// False
///
/// By default Draft 7 will be used for compilation.
#[pyclass(module = "jsonschema_rs")]
#[pyo3(text_signature = "(schema, draft=None, with_meta_schemas=False)")]
struct JSONSchema {
schema: jsonschema::JSONSchema,
repr: String,
}
fn get_schema_repr(schema: &serde_json::Value) -> String {
// It could be more efficient, without converting the whole Value to a string
let mut repr = schema.to_string();
if repr.len() > SCHEMA_LENGTH_LIMIT {
repr.truncate(SCHEMA_LENGTH_LIMIT);
repr.push_str("...}");
}
repr
}
#[pymethods]
impl JSONSchema {
#[new]
fn new(
py: Python,
pyschema: &PyAny,
draft: Option<u8>,
with_meta_schemas: Option<bool>,
) -> PyResult<Self> {
let options = make_options(draft, with_meta_schemas)?;
let raw_schema = ser::to_value(pyschema)?;
match options.compile(&raw_schema) {
Ok(schema) => Ok(JSONSchema {
schema,
repr: get_schema_repr(&raw_schema),
}),
Err(error) => Err(into_py_err(py, error)?),
}
}
/// from_str(string, draft=None, with_meta_schemas=False)
///
/// Create `JSONSchema` from a serialized JSON string.
///
/// >>> compiled = JSONSchema.from_str('{"minimum": 5}')
///
/// Use it if you have your schema as a string and want to utilize Rust JSON parsing.
#[classmethod]
#[pyo3(text_signature = "(string, draft=None, with_meta_schemas=False)")]
fn from_str(
_: &PyType,
py: Python,
pyschema: &PyAny,
draft: Option<u8>,
with_meta_schemas: Option<bool>,
) -> PyResult<Self> {
let obj_ptr = pyschema.as_ptr();
let object_type = unsafe { pyo3::ffi::Py_TYPE(obj_ptr) };
if unsafe { object_type != types::STR_TYPE } {
let type_name =
unsafe { std::ffi::CStr::from_ptr((*object_type).tp_name).to_string_lossy() };
Err(PyValueError::new_err(format!(
"Expected string, got {}",
type_name
)))
} else {
let mut str_size: pyo3::ffi::Py_ssize_t = 0;
let uni = unsafe { string::read_utf8_from_str(obj_ptr, &mut str_size) };
let slice = unsafe { std::slice::from_raw_parts(uni, str_size as usize) };
let raw_schema = serde_json::from_slice(slice)
.map_err(|error| PyValueError::new_err(format!("Invalid string: {}", error)))?;
let options = make_options(draft, with_meta_schemas)?;
match options.compile(&raw_schema) {
Ok(schema) => Ok(JSONSchema {
schema,
repr: get_schema_repr(&raw_schema),
}),
Err(error) => Err(into_py_err(py, error)?),
}
}
}
/// is_valid(instance)
///
/// Perform fast validation against the compiled schema.
///
/// >>> compiled = JSONSchema({"minimum": 5})
/// >>> compiled.is_valid(3)
/// False
///
/// The output is a boolean value, that indicates whether the instance is valid or not.
#[pyo3(text_signature = "(instance)")]
fn is_valid(&self, instance: &PyAny) -> PyResult<bool> {
let instance = ser::to_value(instance)?;
Ok(self.schema.is_valid(&instance))
}
/// validate(instance)
///
/// Validate the input instance and raise `ValidationError` in the error case
///
/// >>> compiled = JSONSchema({"minimum": 5})
/// >>> compiled.validate(3)
/// ...
/// ValidationError: 3 is less than the minimum of 5
///
/// If the input instance is invalid, only the first occurred error is raised.
#[pyo3(text_signature = "(instance)")]
fn validate(&self, py: Python, instance: &PyAny) -> PyResult<()> {
raise_on_error(py, &self.schema, instance)
}
/// iter_errors(instance)
///
/// Iterate the validation errors of the input instance
///
/// >>> compiled = JSONSchema({"minimum": 5})
/// >>> next(compiled.iter_errors(3))
/// ...
/// ValidationError: 3 is less than the minimum of 5
#[pyo3(text_signature = "(instance)")]
fn iter_errors(&self, py: Python, instance: &PyAny) -> PyResult<ValidationErrorIter> | {
iter_on_error(py, &self.schema, instance)
} | identifier_body |
|
view-profile.component.ts | { UserControllerService } from '../../services/api/user-controller.service';
import { Car } from '../../models/car.model';
import { Link } from '../../models/link.model';
import { GeocodeService } from '../../services/geocode.service';
import { CustomtimePipe} from '../../pipes/customtime.pipe';
/**
* Represents the page that allows users to view (and edit) their profile
*/
@Component({
selector: 'app-view-profile',
templateUrl: './view-profile.component.html',
styleUrls: ['./view-profile.component.css']
})
export class ViewProfileComponent implements OnInit {
/** The User being selected */
currentUser: User;
/** The current role of the logged on user in string form */
currentRole: Role;
/** The first name of the user (hooked to form item in html) */
firstName: string;
/** The last name of the user (hooked to form item in html) */
lastName: string;
/** The user name of the user (hooked to form item in html) */
username: string;
/** The old password of the user (will be hooked up to form in html) */
oldPassword: string;
/** The new password of the user (hooked to form item in html) */
password: string;
/** The new password of the user, used to confirm User knows the password (hooked to form item in html) */
confirmPassword: string;
/** The address of the user (hooked to form item in html) */
_address: string;
/** The day the User's batch ends*/
batchEnd: any;
contactInfoArray: ContactInfo[] = [];
/** Whether the user can make changes (Currently not used) */
canEdit = false;
/** List of offices held by the user */
officeObjectArray: Office[] = [];
/** Current office being examined */
officeObject: Office;
/** User's active state */
active: string;
existingBio: string;
existingBioStatus: boolean = false;
principal: User;
currentState: string;
/** Holds the list of all users in the system */
users: any[];
/** Holds the list of users filtered with search query */
filteredUsers: any[];
result: boolean;
car: Car;
location : Location;
startTime : Date;
pipe : CustomtimePipe = new CustomtimePipe();
session: boolean;
/**
* Sets up the component with the User Service injected
* @param userService - Allows the component to work with the user service (for updating)
* @param {AuthService} authService - Allows Authentication Services to be utilized
*/
constructor(private userService: UserControllerService,
private authService: AuthService, private zone: NgZone, private locationSerivce: GeocodeService,
private router: Router) {
this.router.routeReuseStrategy.shouldReuseRoute = () => false;
}
/**
* Sets up the form with data about the durrent user
*/
ngOnInit() |
//loads the first car. done this way because original batch made car-user relationship a 1 to many
//should've been a one to one
console.log("PRINTING OUT CAR = " + this.principal.cars[0].match(/\d+/)[0]);
this.userService.getCarById(Number(this.principal.cars[0].match(/\d+/)[0])).subscribe( e => {
this.car = e;
console.log("PRINTING OUT E KEVIN = " + JSON.stringify(e));
});
this.sessionCheck();
}
console.log(user);
if (this.principal) {
}
});
this.getOffice();
console.log(this.officeObject);
console.log(this.principal);
}
sessionCheck() {
if (this.principal.id > 0) {
this.session = true;
} else {
this.session = false;
}
}
/**
* Allows the form to be edited
*/
edit() {
document.getElementById('firstName').removeAttribute('disabled');
document.getElementById('lastName').removeAttribute('disabled');
// document.getElementById("email").removeAttribute("disabled");
// document.getElementById("password").removeAttribute("disabled");
// document.getElementById("confirmPassword").removeAttribute("disabled");
document.getElementById('address').removeAttribute('disabled');
//document.getElementById('batchEnd').removeAttribute('disabled');
//document.getElementById('dayStart').removeAttribute('disabled');
document.getElementById('switchRoles').removeAttribute('hidden');
// Had to put this in an if; Page would break if Admin or Trainer clicked edit
// Since for them, this button didn't exist to make visible
if (this.currentRole === Role.Driver || this.currentRole === Role.Rider) {
document.getElementById('switchStates').removeAttribute('hidden');
}
document.getElementById('edit').style.display = 'none';
document.getElementById('submit').style.display = 'inline';
// document.getElementById("batchEnd").setAttribute("type", "date");
// document.getElementById("currentOffice").style.display = "none";
// document.getElementById("selectOffice").style.display = "inline";
//document.getElementById('errorMessage').removeAttribute('hidden');
}
/**
* Updates the user once he/she is content with the updates
*/
submitChanges() {
this.principal.firstName = this.firstName;
this.principal.lastName = this.lastName;
// this.principal.address = this.address2;
// this.principal.startTime = this.startTime(); //Need this, but currently no value
this.authService.changePrincipal(this.principal);
this.userService.update().then();
this.authService.changePrincipal(this.principal);
// debug console.log("routing");
this.router.navigate(['userProfile']);
}
onAddressSelect(address: string) {
this.zone.run(() => (this.principal.location.address = address));
this.populateLocation();
}
//Populate user location by finding the latitude and logitude via Maps service.
populateLocation() {
this.locationSerivce.getlocation(this.principal.location).subscribe(data => {
console.log(data);
this.principal.location = data;
});
}
/**
* Enables limited ability to modify the User's role in the system
*/
switchRole() {
if (this.principal.role === Role.Driver) {
this.principal.role = Role.Rider;
this.getRole();
} else if (this.principal.role === Role.Rider) {
this.principal.role = Role.Driver;
this.getRole();
} else {
console.log('nope');
}
}
switchState() {
if (this.principal.active === 'ACTIVE') {
this.principal.active = 'INACTIVE';
this.getState();
} else if (this.principal.active === 'INACTIVE') {
this.principal.active = 'ACTIVE';
this.getState();
} else {
console.log('Invalid State');
}
}
/**
* Gets the list of offices from the database
*/
getOffices() {
this.userService.getAllOffices().subscribe(data => {
this.officeObjectArray = data;
});
}
getOffice() {
this.userService.getOfficeByLink(this.principal.office).subscribe(data => {
this.officeObject = data;
});
}
/**
* Sets up the User's current role in the system
*/
getRole() {
this.currentRole = this.principal.role;
}
getState() {
this.currentState = this.principal.active;
}
updatePassword() {
this.userService.updatePassword(this.principal.email, this.oldPassword, this.password).subscribe();
}
updateUserStatus(id: number, active: string) {
if (active !== 'DISABLED') {
this.result = window.confirm('Are you sure you want to disable this account?');
active = 'DISABLED';
} else {
this.result = window.confirm('Are you sure you want to enable this account?');
active = 'ACTIVE';
}
if (this.result) {
this.userService.updateStatus(id, active).then();
location.reload(true);
} else {
alert('No changes will be made');
}
}
/** Revert a trainer to a user */
makeRider(id: number) {
this.result = window.confirm('Are you sure you want to make this trainer a rider?');
if (this.result) {
this.userService.updateRole(id, Role.Rider).then();
location.reload(true);
} else {
alert('No changes will be made');
}
}
makeTrainer(id: number) {
this.result = window.confirm('Are you sure you want to make this user a trainer?');
if (this.result) {
this.userService.updateRole(id, Role.Trainer).then();
location.reload(true);
} else {
alert('No changes will be made');
}
}
makeAdmin(id: number) {
this.result = window.confirm('Are you sure you | {
this.authService.principal.subscribe(user => {
this.principal = user;
if (this.principal.id > 0) {
this.existingBio = this.principal.bio;
this.firstName = this.principal.firstName;
this.lastName = this.principal.lastName;
this.username = this.principal.email;
this._address = this.principal.location.address;
this.batchEnd = new Date(this.principal.batchEnd).toLocaleDateString();
this.startTime = this.pipe.transform(this.principal.startTime);
console.log(this.startTime);
//this.getOffice();
this.getRole();
this.getState();
this.filteredUsers = this.users; | identifier_body |
view-profile.component.ts | { UserControllerService } from '../../services/api/user-controller.service';
import { Car } from '../../models/car.model';
import { Link } from '../../models/link.model';
import { GeocodeService } from '../../services/geocode.service';
import { CustomtimePipe} from '../../pipes/customtime.pipe';
/**
* Represents the page that allows users to view (and edit) their profile
*/
@Component({
selector: 'app-view-profile',
templateUrl: './view-profile.component.html',
styleUrls: ['./view-profile.component.css']
})
export class ViewProfileComponent implements OnInit {
/** The User being selected */
currentUser: User;
/** The current role of the logged on user in string form */
currentRole: Role;
/** The first name of the user (hooked to form item in html) */
firstName: string;
/** The last name of the user (hooked to form item in html) */
lastName: string;
/** The user name of the user (hooked to form item in html) */
username: string;
/** The old password of the user (will be hooked up to form in html) */
oldPassword: string;
/** The new password of the user (hooked to form item in html) */
password: string;
/** The new password of the user, used to confirm User knows the password (hooked to form item in html) */
confirmPassword: string;
/** The address of the user (hooked to form item in html) */
_address: string;
/** The day the User's batch ends*/
batchEnd: any;
contactInfoArray: ContactInfo[] = [];
/** Whether the user can make changes (Currently not used) */
canEdit = false;
/** List of offices held by the user */
officeObjectArray: Office[] = [];
/** Current office being examined */
officeObject: Office;
/** User's active state */
active: string;
existingBio: string;
existingBioStatus: boolean = false;
principal: User;
currentState: string;
/** Holds the list of all users in the system */
users: any[];
/** Holds the list of users filtered with search query */
filteredUsers: any[];
result: boolean;
car: Car;
location : Location;
startTime : Date;
pipe : CustomtimePipe = new CustomtimePipe();
session: boolean;
/**
* Sets up the component with the User Service injected
* @param userService - Allows the component to work with the user service (for updating)
* @param {AuthService} authService - Allows Authentication Services to be utilized
*/
constructor(private userService: UserControllerService,
private authService: AuthService, private zone: NgZone, private locationSerivce: GeocodeService,
private router: Router) {
this.router.routeReuseStrategy.shouldReuseRoute = () => false;
}
/**
* Sets up the form with data about the durrent user
*/
ngOnInit() {
this.authService.principal.subscribe(user => {
this.principal = user;
if (this.principal.id > 0) {
this.existingBio = this.principal.bio;
this.firstName = this.principal.firstName;
this.lastName = this.principal.lastName;
this.username = this.principal.email;
this._address = this.principal.location.address;
this.batchEnd = new Date(this.principal.batchEnd).toLocaleDateString();
this.startTime = this.pipe.transform(this.principal.startTime);
console.log(this.startTime);
//this.getOffice();
this.getRole();
this.getState();
this.filteredUsers = this.users;
//loads the first car. done this way because original batch made car-user relationship a 1 to many
//should've been a one to one
console.log("PRINTING OUT CAR = " + this.principal.cars[0].match(/\d+/)[0]);
this.userService.getCarById(Number(this.principal.cars[0].match(/\d+/)[0])).subscribe( e => {
this.car = e;
console.log("PRINTING OUT E KEVIN = " + JSON.stringify(e));
});
this.sessionCheck();
}
console.log(user);
if (this.principal) {
}
});
this.getOffice();
console.log(this.officeObject);
console.log(this.principal);
}
sessionCheck() {
if (this.principal.id > 0) {
this.session = true;
} else {
this.session = false;
}
}
/**
* Allows the form to be edited
*/
edit() {
document.getElementById('firstName').removeAttribute('disabled');
document.getElementById('lastName').removeAttribute('disabled');
// document.getElementById("email").removeAttribute("disabled");
// document.getElementById("password").removeAttribute("disabled");
// document.getElementById("confirmPassword").removeAttribute("disabled");
document.getElementById('address').removeAttribute('disabled');
//document.getElementById('batchEnd').removeAttribute('disabled');
//document.getElementById('dayStart').removeAttribute('disabled');
document.getElementById('switchRoles').removeAttribute('hidden');
// Had to put this in an if; Page would break if Admin or Trainer clicked edit
// Since for them, this button didn't exist to make visible
if (this.currentRole === Role.Driver || this.currentRole === Role.Rider) {
document.getElementById('switchStates').removeAttribute('hidden');
}
document.getElementById('edit').style.display = 'none';
document.getElementById('submit').style.display = 'inline';
// document.getElementById("batchEnd").setAttribute("type", "date");
// document.getElementById("currentOffice").style.display = "none";
// document.getElementById("selectOffice").style.display = "inline";
//document.getElementById('errorMessage').removeAttribute('hidden');
}
/**
* Updates the user once he/she is content with the updates
*/
submitChanges() {
this.principal.firstName = this.firstName;
this.principal.lastName = this.lastName;
// this.principal.address = this.address2;
// this.principal.startTime = this.startTime(); //Need this, but currently no value
this.authService.changePrincipal(this.principal);
this.userService.update().then();
this.authService.changePrincipal(this.principal);
// debug console.log("routing");
this.router.navigate(['userProfile']);
}
onAddressSelect(address: string) {
this.zone.run(() => (this.principal.location.address = address)); | populateLocation() {
this.locationSerivce.getlocation(this.principal.location).subscribe(data => {
console.log(data);
this.principal.location = data;
});
}
/**
* Enables limited ability to modify the User's role in the system
*/
switchRole() {
if (this.principal.role === Role.Driver) {
this.principal.role = Role.Rider;
this.getRole();
} else if (this.principal.role === Role.Rider) {
this.principal.role = Role.Driver;
this.getRole();
} else {
console.log('nope');
}
}
switchState() {
if (this.principal.active === 'ACTIVE') {
this.principal.active = 'INACTIVE';
this.getState();
} else if (this.principal.active === 'INACTIVE') {
this.principal.active = 'ACTIVE';
this.getState();
} else {
console.log('Invalid State');
}
}
/**
* Gets the list of offices from the database
*/
getOffices() {
this.userService.getAllOffices().subscribe(data => {
this.officeObjectArray = data;
});
}
getOffice() {
this.userService.getOfficeByLink(this.principal.office).subscribe(data => {
this.officeObject = data;
});
}
/**
* Sets up the User's current role in the system
*/
getRole() {
this.currentRole = this.principal.role;
}
getState() {
this.currentState = this.principal.active;
}
updatePassword() {
this.userService.updatePassword(this.principal.email, this.oldPassword, this.password).subscribe();
}
updateUserStatus(id: number, active: string) {
if (active !== 'DISABLED') {
this.result = window.confirm('Are you sure you want to disable this account?');
active = 'DISABLED';
} else {
this.result = window.confirm('Are you sure you want to enable this account?');
active = 'ACTIVE';
}
if (this.result) {
this.userService.updateStatus(id, active).then();
location.reload(true);
} else {
alert('No changes will be made');
}
}
/** Revert a trainer to a user */
makeRider(id: number) {
this.result = window.confirm('Are you sure you want to make this trainer a rider?');
if (this.result) {
this.userService.updateRole(id, Role.Rider).then();
location.reload(true);
} else {
alert('No changes will be made');
}
}
makeTrainer(id: number) {
this.result = window.confirm('Are you sure you want to make this user a trainer?');
if (this.result) {
this.userService.updateRole(id, Role.Trainer).then();
location.reload(true);
} else {
alert('No changes will be made');
}
}
makeAdmin(id: number) {
this.result = window.confirm('Are you sure you want | this.populateLocation();
}
//Populate user location by finding the latitude and logitude via Maps service. | random_line_split |
view-profile.component.ts | { UserControllerService } from '../../services/api/user-controller.service';
import { Car } from '../../models/car.model';
import { Link } from '../../models/link.model';
import { GeocodeService } from '../../services/geocode.service';
import { CustomtimePipe} from '../../pipes/customtime.pipe';
/**
* Represents the page that allows users to view (and edit) their profile
*/
@Component({
selector: 'app-view-profile',
templateUrl: './view-profile.component.html',
styleUrls: ['./view-profile.component.css']
})
export class ViewProfileComponent implements OnInit {
/** The User being selected */
currentUser: User;
/** The current role of the logged on user in string form */
currentRole: Role;
/** The first name of the user (hooked to form item in html) */
firstName: string;
/** The last name of the user (hooked to form item in html) */
lastName: string;
/** The user name of the user (hooked to form item in html) */
username: string;
/** The old password of the user (will be hooked up to form in html) */
oldPassword: string;
/** The new password of the user (hooked to form item in html) */
password: string;
/** The new password of the user, used to confirm User knows the password (hooked to form item in html) */
confirmPassword: string;
/** The address of the user (hooked to form item in html) */
_address: string;
/** The day the User's batch ends*/
batchEnd: any;
contactInfoArray: ContactInfo[] = [];
/** Whether the user can make changes (Currently not used) */
canEdit = false;
/** List of offices held by the user */
officeObjectArray: Office[] = [];
/** Current office being examined */
officeObject: Office;
/** User's active state */
active: string;
existingBio: string;
existingBioStatus: boolean = false;
principal: User;
currentState: string;
/** Holds the list of all users in the system */
users: any[];
/** Holds the list of users filtered with search query */
filteredUsers: any[];
result: boolean;
car: Car;
location : Location;
startTime : Date;
pipe : CustomtimePipe = new CustomtimePipe();
session: boolean;
/**
* Sets up the component with the User Service injected
* @param userService - Allows the component to work with the user service (for updating)
* @param {AuthService} authService - Allows Authentication Services to be utilized
*/
constructor(private userService: UserControllerService,
private authService: AuthService, private zone: NgZone, private locationSerivce: GeocodeService,
private router: Router) {
this.router.routeReuseStrategy.shouldReuseRoute = () => false;
}
/**
* Sets up the form with data about the durrent user
*/
ngOnInit() {
this.authService.principal.subscribe(user => {
this.principal = user;
if (this.principal.id > 0) {
this.existingBio = this.principal.bio;
this.firstName = this.principal.firstName;
this.lastName = this.principal.lastName;
this.username = this.principal.email;
this._address = this.principal.location.address;
this.batchEnd = new Date(this.principal.batchEnd).toLocaleDateString();
this.startTime = this.pipe.transform(this.principal.startTime);
console.log(this.startTime);
//this.getOffice();
this.getRole();
this.getState();
this.filteredUsers = this.users;
//loads the first car. done this way because original batch made car-user relationship a 1 to many
//should've been a one to one
console.log("PRINTING OUT CAR = " + this.principal.cars[0].match(/\d+/)[0]);
this.userService.getCarById(Number(this.principal.cars[0].match(/\d+/)[0])).subscribe( e => {
this.car = e;
console.log("PRINTING OUT E KEVIN = " + JSON.stringify(e));
});
this.sessionCheck();
}
console.log(user);
if (this.principal) {
}
});
this.getOffice();
console.log(this.officeObject);
console.log(this.principal);
}
sessionCheck() {
if (this.principal.id > 0) {
this.session = true;
} else {
this.session = false;
}
}
/**
* Allows the form to be edited
*/
edit() {
document.getElementById('firstName').removeAttribute('disabled');
document.getElementById('lastName').removeAttribute('disabled');
// document.getElementById("email").removeAttribute("disabled");
// document.getElementById("password").removeAttribute("disabled");
// document.getElementById("confirmPassword").removeAttribute("disabled");
document.getElementById('address').removeAttribute('disabled');
//document.getElementById('batchEnd').removeAttribute('disabled');
//document.getElementById('dayStart').removeAttribute('disabled');
document.getElementById('switchRoles').removeAttribute('hidden');
// Had to put this in an if; Page would break if Admin or Trainer clicked edit
// Since for them, this button didn't exist to make visible
if (this.currentRole === Role.Driver || this.currentRole === Role.Rider) {
document.getElementById('switchStates').removeAttribute('hidden');
}
document.getElementById('edit').style.display = 'none';
document.getElementById('submit').style.display = 'inline';
// document.getElementById("batchEnd").setAttribute("type", "date");
// document.getElementById("currentOffice").style.display = "none";
// document.getElementById("selectOffice").style.display = "inline";
//document.getElementById('errorMessage').removeAttribute('hidden');
}
/**
* Updates the user once he/she is content with the updates
*/
submitChanges() {
this.principal.firstName = this.firstName;
this.principal.lastName = this.lastName;
// this.principal.address = this.address2;
// this.principal.startTime = this.startTime(); //Need this, but currently no value
this.authService.changePrincipal(this.principal);
this.userService.update().then();
this.authService.changePrincipal(this.principal);
// debug console.log("routing");
this.router.navigate(['userProfile']);
}
onAddressSelect(address: string) {
this.zone.run(() => (this.principal.location.address = address));
this.populateLocation();
}
//Populate user location by finding the latitude and logitude via Maps service.
populateLocation() {
this.locationSerivce.getlocation(this.principal.location).subscribe(data => {
console.log(data);
this.principal.location = data;
});
}
/**
* Enables limited ability to modify the User's role in the system
*/
switchRole() {
if (this.principal.role === Role.Driver) {
this.principal.role = Role.Rider;
this.getRole();
} else if (this.principal.role === Role.Rider) {
this.principal.role = Role.Driver;
this.getRole();
} else {
console.log('nope');
}
}
switchState() {
if (this.principal.active === 'ACTIVE') {
this.principal.active = 'INACTIVE';
this.getState();
} else if (this.principal.active === 'INACTIVE') {
this.principal.active = 'ACTIVE';
this.getState();
} else {
console.log('Invalid State');
}
}
/**
* Gets the list of offices from the database
*/
getOffices() {
this.userService.getAllOffices().subscribe(data => {
this.officeObjectArray = data;
});
}
getOffice() {
this.userService.getOfficeByLink(this.principal.office).subscribe(data => {
this.officeObject = data;
});
}
/**
* Sets up the User's current role in the system
*/
getRole() {
this.currentRole = this.principal.role;
}
getState() {
this.currentState = this.principal.active;
}
updatePassword() {
this.userService.updatePassword(this.principal.email, this.oldPassword, this.password).subscribe();
}
updateUserStatus(id: number, active: string) {
if (active !== 'DISABLED') {
this.result = window.confirm('Are you sure you want to disable this account?');
active = 'DISABLED';
} else {
this.result = window.confirm('Are you sure you want to enable this account?');
active = 'ACTIVE';
}
if (this.result) {
this.userService.updateStatus(id, active).then();
location.reload(true);
} else {
alert('No changes will be made');
}
}
/** Revert a trainer to a user */
| (id: number) {
this.result = window.confirm('Are you sure you want to make this trainer a rider?');
if (this.result) {
this.userService.updateRole(id, Role.Rider).then();
location.reload(true);
} else {
alert('No changes will be made');
}
}
makeTrainer(id: number) {
this.result = window.confirm('Are you sure you want to make this user a trainer?');
if (this.result) {
this.userService.updateRole(id, Role.Trainer).then();
location.reload(true);
} else {
alert('No changes will be made');
}
}
makeAdmin(id: number) {
this.result = window.confirm('Are you sure you want | makeRider | identifier_name |
view-profile.component.ts | ocodeService } from '../../services/geocode.service';
import { CustomtimePipe} from '../../pipes/customtime.pipe';
/**
* Represents the page that allows users to view (and edit) their profile
*/
@Component({
selector: 'app-view-profile',
templateUrl: './view-profile.component.html',
styleUrls: ['./view-profile.component.css']
})
export class ViewProfileComponent implements OnInit {
/** The User being selected */
currentUser: User;
/** The current role of the logged on user in string form */
currentRole: Role;
/** The first name of the user (hooked to form item in html) */
firstName: string;
/** The last name of the user (hooked to form item in html) */
lastName: string;
/** The user name of the user (hooked to form item in html) */
username: string;
/** The old password of the user (will be hooked up to form in html) */
oldPassword: string;
/** The new password of the user (hooked to form item in html) */
password: string;
/** The new password of the user, used to confirm User knows the password (hooked to form item in html) */
confirmPassword: string;
/** The address of the user (hooked to form item in html) */
_address: string;
/** The day the User's batch ends*/
batchEnd: any;
contactInfoArray: ContactInfo[] = [];
/** Whether the user can make changes (Currently not used) */
canEdit = false;
/** List of offices held by the user */
officeObjectArray: Office[] = [];
/** Current office being examined */
officeObject: Office;
/** User's active state */
active: string;
existingBio: string;
existingBioStatus: boolean = false;
principal: User;
currentState: string;
/** Holds the list of all users in the system */
users: any[];
/** Holds the list of users filtered with search query */
filteredUsers: any[];
result: boolean;
car: Car;
location : Location;
startTime : Date;
pipe : CustomtimePipe = new CustomtimePipe();
session: boolean;
/**
* Sets up the component with the User Service injected
* @param userService - Allows the component to work with the user service (for updating)
* @param {AuthService} authService - Allows Authentication Services to be utilized
*/
constructor(private userService: UserControllerService,
private authService: AuthService, private zone: NgZone, private locationSerivce: GeocodeService,
private router: Router) {
this.router.routeReuseStrategy.shouldReuseRoute = () => false;
}
/**
* Sets up the form with data about the durrent user
*/
ngOnInit() {
this.authService.principal.subscribe(user => {
this.principal = user;
if (this.principal.id > 0) {
this.existingBio = this.principal.bio;
this.firstName = this.principal.firstName;
this.lastName = this.principal.lastName;
this.username = this.principal.email;
this._address = this.principal.location.address;
this.batchEnd = new Date(this.principal.batchEnd).toLocaleDateString();
this.startTime = this.pipe.transform(this.principal.startTime);
console.log(this.startTime);
//this.getOffice();
this.getRole();
this.getState();
this.filteredUsers = this.users;
//loads the first car. done this way because original batch made car-user relationship a 1 to many
//should've been a one to one
console.log("PRINTING OUT CAR = " + this.principal.cars[0].match(/\d+/)[0]);
this.userService.getCarById(Number(this.principal.cars[0].match(/\d+/)[0])).subscribe( e => {
this.car = e;
console.log("PRINTING OUT E KEVIN = " + JSON.stringify(e));
});
this.sessionCheck();
}
console.log(user);
if (this.principal) {
}
});
this.getOffice();
console.log(this.officeObject);
console.log(this.principal);
}
sessionCheck() {
if (this.principal.id > 0) {
this.session = true;
} else {
this.session = false;
}
}
/**
* Allows the form to be edited
*/
edit() {
document.getElementById('firstName').removeAttribute('disabled');
document.getElementById('lastName').removeAttribute('disabled');
// document.getElementById("email").removeAttribute("disabled");
// document.getElementById("password").removeAttribute("disabled");
// document.getElementById("confirmPassword").removeAttribute("disabled");
document.getElementById('address').removeAttribute('disabled');
//document.getElementById('batchEnd').removeAttribute('disabled');
//document.getElementById('dayStart').removeAttribute('disabled');
document.getElementById('switchRoles').removeAttribute('hidden');
// Had to put this in an if; Page would break if Admin or Trainer clicked edit
// Since for them, this button didn't exist to make visible
if (this.currentRole === Role.Driver || this.currentRole === Role.Rider) {
document.getElementById('switchStates').removeAttribute('hidden');
}
document.getElementById('edit').style.display = 'none';
document.getElementById('submit').style.display = 'inline';
// document.getElementById("batchEnd").setAttribute("type", "date");
// document.getElementById("currentOffice").style.display = "none";
// document.getElementById("selectOffice").style.display = "inline";
//document.getElementById('errorMessage').removeAttribute('hidden');
}
/**
* Updates the user once he/she is content with the updates
*/
submitChanges() {
this.principal.firstName = this.firstName;
this.principal.lastName = this.lastName;
// this.principal.address = this.address2;
// this.principal.startTime = this.startTime(); //Need this, but currently no value
this.authService.changePrincipal(this.principal);
this.userService.update().then();
this.authService.changePrincipal(this.principal);
// debug console.log("routing");
this.router.navigate(['userProfile']);
}
onAddressSelect(address: string) {
this.zone.run(() => (this.principal.location.address = address));
this.populateLocation();
}
//Populate user location by finding the latitude and logitude via Maps service.
populateLocation() {
this.locationSerivce.getlocation(this.principal.location).subscribe(data => {
console.log(data);
this.principal.location = data;
});
}
/**
* Enables limited ability to modify the User's role in the system
*/
switchRole() {
if (this.principal.role === Role.Driver) {
this.principal.role = Role.Rider;
this.getRole();
} else if (this.principal.role === Role.Rider) {
this.principal.role = Role.Driver;
this.getRole();
} else {
console.log('nope');
}
}
switchState() {
if (this.principal.active === 'ACTIVE') {
this.principal.active = 'INACTIVE';
this.getState();
} else if (this.principal.active === 'INACTIVE') {
this.principal.active = 'ACTIVE';
this.getState();
} else {
console.log('Invalid State');
}
}
/**
* Gets the list of offices from the database
*/
getOffices() {
this.userService.getAllOffices().subscribe(data => {
this.officeObjectArray = data;
});
}
getOffice() {
this.userService.getOfficeByLink(this.principal.office).subscribe(data => {
this.officeObject = data;
});
}
/**
* Sets up the User's current role in the system
*/
getRole() {
this.currentRole = this.principal.role;
}
getState() {
this.currentState = this.principal.active;
}
updatePassword() {
this.userService.updatePassword(this.principal.email, this.oldPassword, this.password).subscribe();
}
updateUserStatus(id: number, active: string) {
if (active !== 'DISABLED') {
this.result = window.confirm('Are you sure you want to disable this account?');
active = 'DISABLED';
} else {
this.result = window.confirm('Are you sure you want to enable this account?');
active = 'ACTIVE';
}
if (this.result) {
this.userService.updateStatus(id, active).then();
location.reload(true);
} else {
alert('No changes will be made');
}
}
/** Revert a trainer to a user */
makeRider(id: number) {
this.result = window.confirm('Are you sure you want to make this trainer a rider?');
if (this.result) {
this.userService.updateRole(id, Role.Rider).then();
location.reload(true);
} else {
alert('No changes will be made');
}
}
makeTrainer(id: number) {
this.result = window.confirm('Are you sure you want to make this user a trainer?');
if (this.result) {
this.userService.updateRole(id, Role.Trainer).then();
location.reload(true);
} else {
alert('No changes will be made');
}
}
makeAdmin(id: number) {
this.result = window.confirm('Are you sure you want to make this user an admin?');
if (this.result) | {
this.userService.updateRole(id, Role.Admin).then();
location.reload(true);
} | conditional_block |
|
main_2.py |
exp_date BETWEEN '{year}-{month}-01' AND '{year}-{month}-{days_in_month}'
AND exp_type = 'DR'
AND username = '{user}';""")
try:
expense = cursor.fetchone()[0]
if expense == None:
expense = 0
except TypeError:
print("No records found. Setting expense to None")
expense = None
cursor.close()
return (income, expense)
def get_user_details(user: str) -> List[str]:
cursor = conn.cursor()
cursor.execute(f"""
SELECT user_id,username,passwd,email_id,first_name,last_name
FROM users
WHERE username = '{user}';
""")
user_details = cursor.fetchall()[0]
cursor.close()
return user_details
def username_used(user: str) -> bool:
cursor = conn.cursor()
cursor.execute(f"""
SELECT COUNT(username) FROM users
WHERE username = '{user}';
""")
user_count = cursor.fetchone()[0]
print(f"No. of users with username {user} = {user_count}")
if user_count != 0:
return True
else:
return False
def get_transactions(user: Union[str, int],
n: int = 10000,
start_date: str = f"{year}-{month}-1",
end_date: str = f"{year}-{month}-{day}",
asc_or_desc: str = "ASC",
orderer: str = "particulars") -> List[Tuple]:
headings = [
"Particulars",
"Type",
"Amount",
"Date"
]
cursor = conn.cursor()
where_clause_part_1 = f"username = '{user}'" if type(
user) is str else f"user_id = {user}"
where_clause = where_clause_part_1 + f"""
AND
exp_date BETWEEN '{start_date}' AND '{end_date}'
ORDER BY {orderer} {asc_or_desc}
"""
# <------------ Counts number of transactions falling into the requirements and returns them to the slider ----------------> #
query = f"""
SELECT COUNT(*)
FROM transactions
WHERE {where_clause};
"""
cursor.execute(query)
number_of_records = cursor.fetchone()[0]
cursor.reset()
query = f"""
SELECT particulars,exp_type,amount,exp_date
FROM transactions
WHERE {where_clause}
"""
if number_of_records < n:
limit = f" LIMIT {number_of_records};"
else:
limit = f" LIMIT {n};"
cursor.execute(query+limit)
transactions: List[Tuple] = cursor.fetchall()
print(transactions)
trans_table = Table(transactions, headings, key="table", right_click_menu=["Options", [
"Edit", "Delete"]], enable_events=True) if number_of_records != 0 else Table(["No records to display"], headings=[" "*50], key="table")
return transactions, trans_table, number_of_records
# <-------------------- PyPlot -------------------------> #
def get_graph_values(start_date: str = f"{year}-{month}-1",
end_date: str = f"{year}-{month}-{day}",
exp_type: str = "All"
):
global graph_active
cursor = conn.cursor()
q_cr = f"""
SELECT particulars,amount,DAY(exp_date)
FROM transactions
WHERE
exp_date BETWEEN "{start_date}" AND "{end_date}"
AND exp_type = "CR"
ORDER BY exp_date;
"""
q_dr = f"""
SELECT particulars,amount,DAY(exp_date)
FROM transactions
WHERE
exp_date BETWEEN "{start_date}" AND "{end_date}"
AND exp_type = "DR"
ORDER BY exp_date;
"""
def plot_graphs():
if exp_type == 'Credit':
q = q_cr
elif exp_type == 'Debit':
q = q_dr
elif exp_type == 'All':
q1 = q_cr
q2 = q_dr
x = np.arange(1, days_in_month)
plt.xticks(np.arange(1,days_in_month+1),range(1,days_in_month+1))
if exp_type in ("Credit", "Debit"):
cursor.execute(q)
points = cursor.fetchall()
x = np.array([point[2] for point in points])
y = np.array([point[1] for point in points])
plt.plot(x, y, marker = "o",label=exp_type)
plt.grid(True)
else:
# <------- Credit -------> #
cursor.execute(q1)
points_1 = cursor.fetchall()
x1 = np.array([point[2] for point in points_1]) # Dates
y1 = np.array([point[1] for point in points_1]) # Amount
cursor.reset()
# <------- Debit -------> #
cursor.execute(q2)
points_2 = cursor.fetchall()
x2 = np.array([point[2] for point in points_2])
y2 = np.array([point[1] for point in points_2])
plt.plot(x1, y1, marker="o", label="Credit")
plt.plot(x2, y2, marker="x", label="Debit")
plt.grid(True)
plt.title(f"Report for the month of {month_name}-{year}")
plt.legend()
fig = plt.gcf() # gcf -> get current figure #
fig_x, fig_y, fig_w, fig_h = fig.bbox.bounds
return fig, fig_w, fig_h
# q_all = f"""
# SELECT particulars,amount,DAY(exp_date)
# FROM transactions
# WHERE
# exp_date BETWEEN "{start_date}" AND "{end_date}"
# ORDER BY exp_date;
# """
if not graph_active:
|
else:
# plt.clf()
return plot_graphs()
# graph_active = True
cursor.close()
# <------------------------------- Beginning of Matplotlib helper code -----------------------> #
# <----- Taken from https://github.com/PySimpleGUI/PySimpleGUI/blob/master/DemoPrograms/Demo_Matplotlib.py -----> #
def draw_figure(canvas, figure, loc=(0, 0)):
figure_canvas_agg = FigureCanvasTkAgg(figure, canvas)
figure_canvas_agg.draw()
figure_canvas_agg.get_tk_widget().pack(side='top', fill='both', expand=1)
return figure_canvas_agg
def delete_figure_agg(figure_agg):
figure_agg.get_tk_widget().forget()
plt.close('all')
# <----------------------- GUI -----------------------------> #
# ________ ___ ___ ___
# |\ ____\|\ \|\ \|\ \
# \ \ \___|\ \ \\\ \ \ \
# \ \ \ __\ \ \\\ \ \ \
# \ \ \|\ \ \ \\\ \ \ \
# \ \_______\ \_______\ \__\
# \|_______|\|_______|\|__|
'''
Why am I using a class to store all my GUI functions?
-> So that I could use locally created values like vals and user_details within other functions
and to prevent me from getting a headache while managing scopes.
No, seriously though, making an object really helps while handling local objects as globals, making the programming
enjoyable and painless.
'''
class Xpnsit:
def __init__(self):
self.app_state: bool = True
# <------------------- Misc. Functions (Layouts and Updaters and stuff) --------------------> #
def Add_Trans(self, particulars: str, _type: str, amount: float, date: str):
cursor = conn.cursor()
try:
cursor.execute(f"""
INSERT INTO transactions (
user_id,
username,
particulars,
exp_type,
amount,
exp_date
)
VALUES (
{self.user.user_id},
'{self.user.uname}',
'{particulars}',
'{_type}',
{amount},
"{date}"
);
""")
conn.commit()
Popup("Transaction successfully added.")
self.win.Refresh()
except SQLErrors.ProgrammingError:
PopupError("ERROR: Invalid details.\nRectify and try again.")
cursor.close()
def Create_Add_Trans_Layout(self):
layout = [
[T("New Transaction", font=("Helvetica", 18))],
[T("NOTE:", font=("Helvetica", 20)), T(
"All fields are required to be filled.")],
[T("Particulars:"), Multiline("Enter details of transaction",
autoscroll=True, key="Particulars")],
[T("Transaction type:"), Combo(["Select", "Credit", "Debit"],
"Select", readonly=True, key="new_type")],
[T("Amount:"), Input(enable_events=True, key="amount")],
[T("Date Of Transaction:"), Input("YYYY-MM-DD or use the button on the right",
key="date"), CalendarButton("Select Date", target="date", format="%Y-%m-%d")],
[Submit()]
]
return layout
def History(self):
history_values, table, no_of_records = get_transactions(
self.user.uname)
self.slider = | return plot_graphs() | conditional_block |
main_2.py | Union[str, int],
n: int = 10000,
start_date: str = f"{year}-{month}-1",
end_date: str = f"{year}-{month}-{day}",
asc_or_desc: str = "ASC",
orderer: str = "particulars") -> List[Tuple]:
headings = [
"Particulars",
"Type",
"Amount",
"Date"
]
cursor = conn.cursor()
where_clause_part_1 = f"username = '{user}'" if type(
user) is str else f"user_id = {user}"
where_clause = where_clause_part_1 + f"""
AND
exp_date BETWEEN '{start_date}' AND '{end_date}'
ORDER BY {orderer} {asc_or_desc}
"""
# <------------ Counts number of transactions falling into the requirements and returns them to the slider ----------------> #
query = f"""
SELECT COUNT(*)
FROM transactions
WHERE {where_clause};
"""
cursor.execute(query)
number_of_records = cursor.fetchone()[0]
cursor.reset()
query = f"""
SELECT particulars,exp_type,amount,exp_date
FROM transactions
WHERE {where_clause}
"""
if number_of_records < n:
limit = f" LIMIT {number_of_records};"
else:
limit = f" LIMIT {n};"
cursor.execute(query+limit)
transactions: List[Tuple] = cursor.fetchall()
print(transactions)
trans_table = Table(transactions, headings, key="table", right_click_menu=["Options", [
"Edit", "Delete"]], enable_events=True) if number_of_records != 0 else Table(["No records to display"], headings=[" "*50], key="table")
return transactions, trans_table, number_of_records
# <-------------------- PyPlot -------------------------> #
def get_graph_values(start_date: str = f"{year}-{month}-1",
end_date: str = f"{year}-{month}-{day}",
exp_type: str = "All"
):
global graph_active
cursor = conn.cursor()
q_cr = f"""
SELECT particulars,amount,DAY(exp_date)
FROM transactions
WHERE
exp_date BETWEEN "{start_date}" AND "{end_date}"
AND exp_type = "CR"
ORDER BY exp_date;
"""
q_dr = f"""
SELECT particulars,amount,DAY(exp_date)
FROM transactions
WHERE
exp_date BETWEEN "{start_date}" AND "{end_date}"
AND exp_type = "DR"
ORDER BY exp_date;
"""
def plot_graphs():
if exp_type == 'Credit':
q = q_cr
elif exp_type == 'Debit':
q = q_dr
elif exp_type == 'All':
q1 = q_cr
q2 = q_dr
x = np.arange(1, days_in_month)
plt.xticks(np.arange(1,days_in_month+1),range(1,days_in_month+1))
if exp_type in ("Credit", "Debit"):
cursor.execute(q)
points = cursor.fetchall()
x = np.array([point[2] for point in points])
y = np.array([point[1] for point in points])
plt.plot(x, y, marker = "o",label=exp_type)
plt.grid(True)
else:
# <------- Credit -------> #
cursor.execute(q1)
points_1 = cursor.fetchall()
x1 = np.array([point[2] for point in points_1]) # Dates
y1 = np.array([point[1] for point in points_1]) # Amount
cursor.reset()
# <------- Debit -------> #
cursor.execute(q2)
points_2 = cursor.fetchall()
x2 = np.array([point[2] for point in points_2])
y2 = np.array([point[1] for point in points_2])
plt.plot(x1, y1, marker="o", label="Credit")
plt.plot(x2, y2, marker="x", label="Debit")
plt.grid(True)
plt.title(f"Report for the month of {month_name}-{year}")
plt.legend()
fig = plt.gcf() # gcf -> get current figure #
fig_x, fig_y, fig_w, fig_h = fig.bbox.bounds
return fig, fig_w, fig_h
# q_all = f"""
# SELECT particulars,amount,DAY(exp_date)
# FROM transactions
# WHERE
# exp_date BETWEEN "{start_date}" AND "{end_date}"
# ORDER BY exp_date;
# """
if not graph_active:
return plot_graphs()
else:
# plt.clf()
return plot_graphs()
# graph_active = True
cursor.close()
# <------------------------------- Beginning of Matplotlib helper code -----------------------> #
# <----- Taken from https://github.com/PySimpleGUI/PySimpleGUI/blob/master/DemoPrograms/Demo_Matplotlib.py -----> #
def draw_figure(canvas, figure, loc=(0, 0)):
figure_canvas_agg = FigureCanvasTkAgg(figure, canvas)
figure_canvas_agg.draw()
figure_canvas_agg.get_tk_widget().pack(side='top', fill='both', expand=1)
return figure_canvas_agg
def delete_figure_agg(figure_agg):
figure_agg.get_tk_widget().forget()
plt.close('all')
# <----------------------- GUI -----------------------------> #
# ________ ___ ___ ___
# |\ ____\|\ \|\ \|\ \
# \ \ \___|\ \ \\\ \ \ \
# \ \ \ __\ \ \\\ \ \ \
# \ \ \|\ \ \ \\\ \ \ \
# \ \_______\ \_______\ \__\
# \|_______|\|_______|\|__|
'''
Why am I using a class to store all my GUI functions?
-> So that I could use locally created values like vals and user_details within other functions
and to prevent me from getting a headache while managing scopes.
No, seriously though, making an object really helps while handling local objects as globals, making the programming
enjoyable and painless.
'''
class Xpnsit:
def __init__(self):
self.app_state: bool = True
# <------------------- Misc. Functions (Layouts and Updaters and stuff) --------------------> #
def Add_Trans(self, particulars: str, _type: str, amount: float, date: str):
cursor = conn.cursor()
try:
cursor.execute(f"""
INSERT INTO transactions (
user_id,
username,
particulars,
exp_type,
amount,
exp_date
)
VALUES (
{self.user.user_id},
'{self.user.uname}',
'{particulars}',
'{_type}',
{amount},
"{date}"
);
""")
conn.commit()
Popup("Transaction successfully added.")
self.win.Refresh()
except SQLErrors.ProgrammingError:
PopupError("ERROR: Invalid details.\nRectify and try again.")
cursor.close()
def Create_Add_Trans_Layout(self):
layout = [
[T("New Transaction", font=("Helvetica", 18))],
[T("NOTE:", font=("Helvetica", 20)), T(
"All fields are required to be filled.")],
[T("Particulars:"), Multiline("Enter details of transaction",
autoscroll=True, key="Particulars")],
[T("Transaction type:"), Combo(["Select", "Credit", "Debit"],
"Select", readonly=True, key="new_type")],
[T("Amount:"), Input(enable_events=True, key="amount")],
[T("Date Of Transaction:"), Input("YYYY-MM-DD or use the button on the right",
key="date"), CalendarButton("Select Date", target="date", format="%Y-%m-%d")],
[Submit()]
]
return layout
def History(self):
history_values, table, no_of_records = get_transactions(
self.user.uname)
self.slider = sg.Slider(
range=(0, no_of_records),
default_value=no_of_records,
orientation='h',
enable_events=True,
key='slider'
)
layout = [
[T("Transaction History", font=("Helvetica", 18))],
[T("All your transactions, in one place. Right click any one to delete or edit it.")],
[T('Number of records to be shown:'), self.slider],
[T("Show records from "),
Input(f"{year}-{month}-1", key="start_date", size=(10, 1)),
CalendarButton("Start date", target="start_date", default_date_m_d_y=(
month, 1, year), button_color=("white", "green"), format="%Y-%m-%d"),
T("to"),
Input(f"{year}-{month}-{day}", key="end_date", size=(10, 1)),
CalendarButton("End date", target="end_date", default_date_m_d_y=(
month, day, year), button_color=("white", "red"), format="%Y-%m-%d")
],
[T("Type:"), Combo(["All", "Credit", "Debit"], | random_line_split |
||
main_2.py |
exp_date BETWEEN '{year}-{month}-01' AND '{year}-{month}-{days_in_month}'
AND exp_type = 'DR'
AND username = '{user}';""")
try:
expense = cursor.fetchone()[0]
if expense == None:
expense = 0
except TypeError:
print("No records found. Setting expense to None")
expense = None
cursor.close()
return (income, expense)
def get_user_details(user: str) -> List[str]:
cursor = conn.cursor()
cursor.execute(f"""
SELECT user_id,username,passwd,email_id,first_name,last_name
FROM users
WHERE username = '{user}';
""")
user_details = cursor.fetchall()[0]
cursor.close()
return user_details
def username_used(user: str) -> bool:
cursor = conn.cursor()
cursor.execute(f"""
SELECT COUNT(username) FROM users
WHERE username = '{user}';
""")
user_count = cursor.fetchone()[0]
print(f"No. of users with username {user} = {user_count}")
if user_count != 0:
return True
else:
return False
def | (user: Union[str, int],
n: int = 10000,
start_date: str = f"{year}-{month}-1",
end_date: str = f"{year}-{month}-{day}",
asc_or_desc: str = "ASC",
orderer: str = "particulars") -> List[Tuple]:
headings = [
"Particulars",
"Type",
"Amount",
"Date"
]
cursor = conn.cursor()
where_clause_part_1 = f"username = '{user}'" if type(
user) is str else f"user_id = {user}"
where_clause = where_clause_part_1 + f"""
AND
exp_date BETWEEN '{start_date}' AND '{end_date}'
ORDER BY {orderer} {asc_or_desc}
"""
# <------------ Counts number of transactions falling into the requirements and returns them to the slider ----------------> #
query = f"""
SELECT COUNT(*)
FROM transactions
WHERE {where_clause};
"""
cursor.execute(query)
number_of_records = cursor.fetchone()[0]
cursor.reset()
query = f"""
SELECT particulars,exp_type,amount,exp_date
FROM transactions
WHERE {where_clause}
"""
if number_of_records < n:
limit = f" LIMIT {number_of_records};"
else:
limit = f" LIMIT {n};"
cursor.execute(query+limit)
transactions: List[Tuple] = cursor.fetchall()
print(transactions)
trans_table = Table(transactions, headings, key="table", right_click_menu=["Options", [
"Edit", "Delete"]], enable_events=True) if number_of_records != 0 else Table(["No records to display"], headings=[" "*50], key="table")
return transactions, trans_table, number_of_records
# <-------------------- PyPlot -------------------------> #
def get_graph_values(start_date: str = f"{year}-{month}-1",
end_date: str = f"{year}-{month}-{day}",
exp_type: str = "All"
):
global graph_active
cursor = conn.cursor()
q_cr = f"""
SELECT particulars,amount,DAY(exp_date)
FROM transactions
WHERE
exp_date BETWEEN "{start_date}" AND "{end_date}"
AND exp_type = "CR"
ORDER BY exp_date;
"""
q_dr = f"""
SELECT particulars,amount,DAY(exp_date)
FROM transactions
WHERE
exp_date BETWEEN "{start_date}" AND "{end_date}"
AND exp_type = "DR"
ORDER BY exp_date;
"""
def plot_graphs():
if exp_type == 'Credit':
q = q_cr
elif exp_type == 'Debit':
q = q_dr
elif exp_type == 'All':
q1 = q_cr
q2 = q_dr
x = np.arange(1, days_in_month)
plt.xticks(np.arange(1,days_in_month+1),range(1,days_in_month+1))
if exp_type in ("Credit", "Debit"):
cursor.execute(q)
points = cursor.fetchall()
x = np.array([point[2] for point in points])
y = np.array([point[1] for point in points])
plt.plot(x, y, marker = "o",label=exp_type)
plt.grid(True)
else:
# <------- Credit -------> #
cursor.execute(q1)
points_1 = cursor.fetchall()
x1 = np.array([point[2] for point in points_1]) # Dates
y1 = np.array([point[1] for point in points_1]) # Amount
cursor.reset()
# <------- Debit -------> #
cursor.execute(q2)
points_2 = cursor.fetchall()
x2 = np.array([point[2] for point in points_2])
y2 = np.array([point[1] for point in points_2])
plt.plot(x1, y1, marker="o", label="Credit")
plt.plot(x2, y2, marker="x", label="Debit")
plt.grid(True)
plt.title(f"Report for the month of {month_name}-{year}")
plt.legend()
fig = plt.gcf() # gcf -> get current figure #
fig_x, fig_y, fig_w, fig_h = fig.bbox.bounds
return fig, fig_w, fig_h
# q_all = f"""
# SELECT particulars,amount,DAY(exp_date)
# FROM transactions
# WHERE
# exp_date BETWEEN "{start_date}" AND "{end_date}"
# ORDER BY exp_date;
# """
if not graph_active:
return plot_graphs()
else:
# plt.clf()
return plot_graphs()
# graph_active = True
cursor.close()
# <------------------------------- Beginning of Matplotlib helper code -----------------------> #
# <----- Taken from https://github.com/PySimpleGUI/PySimpleGUI/blob/master/DemoPrograms/Demo_Matplotlib.py -----> #
def draw_figure(canvas, figure, loc=(0, 0)):
figure_canvas_agg = FigureCanvasTkAgg(figure, canvas)
figure_canvas_agg.draw()
figure_canvas_agg.get_tk_widget().pack(side='top', fill='both', expand=1)
return figure_canvas_agg
def delete_figure_agg(figure_agg):
figure_agg.get_tk_widget().forget()
plt.close('all')
# <----------------------- GUI -----------------------------> #
# ________ ___ ___ ___
# |\ ____\|\ \|\ \|\ \
# \ \ \___|\ \ \\\ \ \ \
# \ \ \ __\ \ \\\ \ \ \
# \ \ \|\ \ \ \\\ \ \ \
# \ \_______\ \_______\ \__\
# \|_______|\|_______|\|__|
'''
Why am I using a class to store all my GUI functions?
-> So that I could use locally created values like vals and user_details within other functions
and to prevent me from getting a headache while managing scopes.
No, seriously though, making an object really helps while handling local objects as globals, making the programming
enjoyable and painless.
'''
class Xpnsit:
def __init__(self):
self.app_state: bool = True
# <------------------- Misc. Functions (Layouts and Updaters and stuff) --------------------> #
def Add_Trans(self, particulars: str, _type: str, amount: float, date: str):
cursor = conn.cursor()
try:
cursor.execute(f"""
INSERT INTO transactions (
user_id,
username,
particulars,
exp_type,
amount,
exp_date
)
VALUES (
{self.user.user_id},
'{self.user.uname}',
'{particulars}',
'{_type}',
{amount},
"{date}"
);
""")
conn.commit()
Popup("Transaction successfully added.")
self.win.Refresh()
except SQLErrors.ProgrammingError:
PopupError("ERROR: Invalid details.\nRectify and try again.")
cursor.close()
def Create_Add_Trans_Layout(self):
layout = [
[T("New Transaction", font=("Helvetica", 18))],
[T("NOTE:", font=("Helvetica", 20)), T(
"All fields are required to be filled.")],
[T("Particulars:"), Multiline("Enter details of transaction",
autoscroll=True, key="Particulars")],
[T("Transaction type:"), Combo(["Select", "Credit", "Debit"],
"Select", readonly=True, key="new_type")],
[T("Amount:"), Input(enable_events=True, key="amount")],
[T("Date Of Transaction:"), Input("YYYY-MM-DD or use the button on the right",
key="date"), CalendarButton("Select Date", target="date", format="%Y-%m-%d")],
[Submit()]
]
return layout
def History(self):
history_values, table, no_of_records = get_transactions(
self.user.uname)
self.slider = | get_transactions | identifier_name |
main_2.py | \ \
# \ \_______\ \_______\ \__\
# \|_______|\|_______|\|__|
'''
Why am I using a class to store all my GUI functions?
-> So that I could use locally created values like vals and user_details within other functions
and to prevent me from getting a headache while managing scopes.
No, seriously though, making an object really helps while handling local objects as globals, making the programming
enjoyable and painless.
'''
class Xpnsit:
def __init__(self):
self.app_state: bool = True
# <------------------- Misc. Functions (Layouts and Updaters and stuff) --------------------> #
def Add_Trans(self, particulars: str, _type: str, amount: float, date: str):
cursor = conn.cursor()
try:
cursor.execute(f"""
INSERT INTO transactions (
user_id,
username,
particulars,
exp_type,
amount,
exp_date
)
VALUES (
{self.user.user_id},
'{self.user.uname}',
'{particulars}',
'{_type}',
{amount},
"{date}"
);
""")
conn.commit()
Popup("Transaction successfully added.")
self.win.Refresh()
except SQLErrors.ProgrammingError:
PopupError("ERROR: Invalid details.\nRectify and try again.")
cursor.close()
def Create_Add_Trans_Layout(self):
layout = [
[T("New Transaction", font=("Helvetica", 18))],
[T("NOTE:", font=("Helvetica", 20)), T(
"All fields are required to be filled.")],
[T("Particulars:"), Multiline("Enter details of transaction",
autoscroll=True, key="Particulars")],
[T("Transaction type:"), Combo(["Select", "Credit", "Debit"],
"Select", readonly=True, key="new_type")],
[T("Amount:"), Input(enable_events=True, key="amount")],
[T("Date Of Transaction:"), Input("YYYY-MM-DD or use the button on the right",
key="date"), CalendarButton("Select Date", target="date", format="%Y-%m-%d")],
[Submit()]
]
return layout
def History(self):
history_values, table, no_of_records = get_transactions(
self.user.uname)
self.slider = sg.Slider(
range=(0, no_of_records),
default_value=no_of_records,
orientation='h',
enable_events=True,
key='slider'
)
layout = [
[T("Transaction History", font=("Helvetica", 18))],
[T("All your transactions, in one place. Right click any one to delete or edit it.")],
[T('Number of records to be shown:'), self.slider],
[T("Show records from "),
Input(f"{year}-{month}-1", key="start_date", size=(10, 1)),
CalendarButton("Start date", target="start_date", default_date_m_d_y=(
month, 1, year), button_color=("white", "green"), format="%Y-%m-%d"),
T("to"),
Input(f"{year}-{month}-{day}", key="end_date", size=(10, 1)),
CalendarButton("End date", target="end_date", default_date_m_d_y=(
month, day, year), button_color=("white", "red"), format="%Y-%m-%d")
],
[T("Type:"), Combo(["All", "Credit", "Debit"],
default_value="All", key="used_type", readonly=True)],
[T("Sort by:"), Combo(["Name", "Amount", "Date of Transaction"],
default_value="Name", key="sort_by", readonly=True), Combo(["Ascending", "Descending"], default_value="Ascending", key="asc_or_desc", readonly=True)],
[table, Button("Refresh", button_color=(
"white", "orange"), bind_return_key=True, key="refresh")],
]
self.history_active = True
return layout
def update_table(self):
start, end = self.values['start_date'], self.values["end_date"]
aod = 'ASC' if self.values["asc_or_desc"] == "Ascending" else "DESC"
sort = "particulars" if self.values["sort_by"] == "Name" else "amount" if self.values["sort_by"] == "Amount" else "exp_date"
n = self.values["slider"] if self.event == 'slider' else 10000
new_trans, new_table, new_number_of_trans = get_transactions(
self.user.user_id,
int(n),
start,
end,
aod, # a(scending)o(r)d(escending)
sort
)
print(new_trans, new_table, new_number_of_trans)
self.win["table"].Update(new_trans) # Updates table
# Updates max number of records to be possibly displayed
self.win["slider"].Update(range=(0, new_number_of_trans+1))
# Updates the default value of the slider to be the max
self.slider.Update(value=new_number_of_trans)
self.win.Refresh()
def create_graph(self):
fig, w, h = get_graph_values(
self.values['a_start_date'],
self.values['a_end_date'],
self.values["a_type"],
)
self.figure_agg = draw_figure(
self.win['canvas'].TKCanvas, fig)
# <------------------ Main Screens --------------------> #
def Login(self):
login_active = True
layout = [
[T("Xpnsit", **heading_format)],
[T("Username:"), Input(key="user")],
[T("Password:"), Input(key="pass", password_char='*')],
[Button("Login", bind_return_key=True), Button("Signup")]
]
win = Window("Xpnsit", layout=layout)
while login_active: # <------------ Event Loop -----------------> #
event, values = win.Read()
if event is None:
print("Exiting event loop")
login_active = False
self.app_state = False
win.close()
del win
break
if event == "Login":
success = check_login_info(values["user"], values["pass"])
if success == True:
print("Login Successful.")
self.user_details = get_user_details(values["user"])
self.user = NewUser(*self.user_details)
win.close()
self.Interface()
login_active = False
else:
PopupError(
"ERROR: Username or password incorrect.\nPlease try again.")
if event == "Signup":
self.Signup()
def Signup(self):
signup_active = True
layout = [
[T("Signup for Xpnsit", **heading_format), ],
[T("First Name:"), Input(size=(15, 1), key="f_name"), T(
" "), T("Last Name:"), Input(size=(15, 1), key="l_name")],
[T("Username:", justification='center'),
Input(size=(35, 1), key="user")],
[T("Password:", justification='center'), Input(
size=(35, 1), key="pass", password_char="*")],
[],
[T(' '*40), Submit()]
]
signup_win = Window("Xpnsit - Signup", layout=layout)
while signup_active: # <------------ Event Loop -----------------> #
event, values = signup_win.Read()
if event in (None, 'Exit'):
signup_active = False
login_active = True
if event == 'Submit':
self.vals = [values["user"], values["pass"],
values["mail"], values["f_name"], values["l_name"]]
if not username_used(self.vals[0]):
create_account(*self.vals)
# <------------------- Confirmation of Insertion ------------------> #
success = check_login_info(values["user"], values["pass"])
if success == True:
print("Signup Successful.")
Popup(
"Signup Successful!",
"Exit this popup to return to the login page"
)
signup_win.close()
signup_active = False
login_active = True
else:
PopupError("ERROR: Username already in usage",
title="Username already taken")
def Dashboard(self):
| income, expenses = get_income_and_expense(self.user.uname)
if (income, expenses) == (None, None):
dash_layout = [
[T(f"Welcome {self.user.first_name}")],
[T("Looks like you have no transactions!\nGo add one in the Transactions tab.",
justification="center")],
[T("-"*60, text_color="gray")],
]
else:
dash_layout = [
[T(f"Welcome {self.user.first_name}")],
[T(f"Your expenses for {month_name}-{year} are:"),
T(str(expenses), font=("Arial", 20))],
[T(f"Your income for {month_name}-{year} is:"),
T(str(income), font=("Arial", 20))],
[T("-"*80, text_color="gray")],
[T("Net Profit/Loss:", font=("Segoe", 18)),
T(str(income-expenses), font=("Arial", 24))]
] | identifier_body |
|
calSettings.py | size: self.size
pos: self.pos
<HolidayBtn>:
font_name: 'fonts/moon-bold.otf'
canvas.before:
Color:
rgba: (128, 0, 128, 0.5)
Rectangle:
pos: self.pos
size: self.size
<HalfdayBtn>:
font_name: 'fonts/moon-bold.otf'
canvas.before:
Color:
rgba: (0, 255, 255, 0.25)
Rectangle:
pos: self.pos
size: self.size
""")
class ToggleBtn(ToggleButton):
pass
class HolidayBtn(ToggleButton):
pass
class HalfdayBtn(ToggleButton):
pass
class CalendarWidgetS(RelativeLayout):
""" Basic calendar widget """
def __init__(self, as_popup=False, touch_switch=False, *args, **kwargs):
super(CalendarWidgetS, self).__init__(*args, **kwargs)
self.as_popup = as_popup
self.touch_switch = touch_switch
#self.selectedDates = []
self.prepare_data()
self.init_ui()
def init_ui(self):
|
self.right_arrow = Button(text=">", on_press=self.go_next,
pos_hint={"top": 1, "right": 1}, size_hint=(.1, .1))
self.add_widget(self.left_arrow)
self.add_widget(self.right_arrow)
# Title
self.title_label = Label(text=self.title, pos_hint={"top": 1, "center_x": .5}, size_hint=(None, 0.15), halign=("center"))
self.add_widget(self.title_label)
# ScreenManager
self.sm = ScreenManager(pos_hint={"top": .9}, size_hint=(1, .9))
self.add_widget(self.sm)
self.create_month_scr(self.quarter[1], toogle_today=True)
def create_month_scr(self, month, toogle_today=False):
""" Screen with calendar for one month """
scr = Screen()
m = self.month_names_eng[self.active_date[1] - 1]
scr.name = "%s-%s" % (m, self.active_date[2]) # like march-2015
# Grid for days
grid_layout = GridLayout(cols=7, rows=7, size_hint=(1, 1), pos_hint={"top": 1})
scr.add_widget(grid_layout)
# Days abbrs
for i in range(7):
if i >= 5: # weekends
l = Label(text=self.days_abrs[i], color=(1, 0, 0, 1))
else: # work days
l = Label(text=self.days_abrs[i], text_size=(self.size[0], None), halign="center")
grid_layout.add_widget(l)
global holiday, halfday
# Buttons with days numbers
for week in month:
for day in week:
if day[1] >= 6: # weekends
self.tbtn = ToggleBtn(text=str(day[0]), color=(0, 0, 0, 1))
else:
self.tbtn = ToggleBtn(text=str(day[0]), color=(0, 0, 0, 1))
for i in range(len(holiday)):
if self.active_date[2] == holiday[i][2]:
if self.active_date[1] == holiday[i][1]:
if day[0] == holiday[i][0]:
self.tbtn.background_color=(128, 0, 128, 1)
for i in range(len(halfday)):
if self.active_date[2] == halfday[i][2]:
if self.active_date[1] == halfday[i][1]:
if day[0] == halfday[i][0]:
self.tbtn.background_color=(0, 255, 255, 0.5)
self.tbtn.bind(on_press=self.get_btn_value)
if toogle_today:
# Down today button
if day[0] == self.active_date[0] and day[2] == 1:
self.tbtn.state = "down"
# Disable buttons with days from other months
if day[2] == 0:
self.tbtn.text = " "
self.tbtn.disabled = True
self.tbtn.background_color = (0, 0, 0, 0.1)
grid_layout.add_widget(self.tbtn)
self.sm.add_widget(scr)
def prepare_data(self):
""" Prepare data for showing on widget loading """
# Get days abbrs and month names lists
self.month_names = cal_data.get_month_names()
self.month_names_eng = cal_data.get_month_names_eng()
self.days_abrs = cal_data.get_days_abbrs()
# Today date
self.active_date = cal_data.today_date_list()
# Set title
self.title = "%s - %s" % (self.month_names[self.active_date[1] - 1],
self.active_date[2])
# Quarter where current month in the self.quarter[1]
self.get_quarter()
def get_quarter(self):
""" Get caledar and months/years nums for quarter """
self.quarter_nums = cal_data.calc_quarter(self.active_date[2],
self.active_date[1])
self.quarter = cal_data.get_quarter(self.active_date[2],
self.active_date[1])
def get_btn_value(self, inst):
""" Get day value from pressed button """
self.active_date[0] = int(inst.text)
selected = [self.active_date[0], self.active_date[1], self.active_date[2]]
global selectedDates
if selected in selectedDates:
selectedDates.remove(selected)
else:
selectedDates.append(selected)
if self.as_popup:
self.parent_popup.dismiss()
#getInfo.openPopup()
def go_prev(self, inst):
""" Go to screen with previous month """
# Change active date
self.active_date = [self.active_date[0], self.quarter_nums[0][1],
self.quarter_nums[0][0]]
# Name of prev screen
n = self.quarter_nums[0][1] - 1
prev_scr_name = "%s-%s" % (self.month_names_eng[n],
self.quarter_nums[0][0])
# If it's doen't exitst, create it
if not self.sm.has_screen(prev_scr_name):
self.create_month_scr(self.quarter[0])
self.sm.current = prev_scr_name
self.sm.transition.direction = "left"
self.get_quarter()
self.title = "%s - %s" % (self.month_names[self.active_date[1] - 1],
self.active_date[2])
self.title_label.text = self.title
def go_next(self, inst):
""" Go to screen with next month """
# Change active date
self.active_date = [self.active_date[0], self.quarter_nums[2][1],
self.quarter_nums[2][0]]
# Name of prev screen
n = self.quarter_nums[2][1] - 1
next_scr_name = "%s-%s" % (self.month_names_eng[n],
self.quarter_nums[2][0])
# If it's doen't exitst, create it
if not self.sm.has_screen(next_scr_name):
self.create_month_scr(self.quarter[2])
self.sm.current = next_scr_name
self.sm.transition.direction = "right"
self.get_quarter()
self.title = "%s - %s" % (self.month_names[self.active_date[1] - 1],
self.active_date[2])
self.title_label.text = self.title
def on_touch_move(self, touch):
""" Switch months pages by touch move """
if self.touch_switch:
# Left - prev
if touch.dpos[0] < -30:
self.go_prev(None)
# Right - next
elif touch.dpos[0] > 30:
self.go_next(None)
import pymysql
def paintDates():
global holiday, halfday
db = pymysql.connect(credentials['address'], credentials['username'], credentials['password'], credentials['db'], autocommit=True, connect_timeout=1)
cur = db.cursor()
cur.execute("SELECT DAY, MONTH, YEAR, DETAIL FROM essl.month_details")
for data in cur.fetchall():
if data[3] == 'HOLIDAY':
holiday.append([data[0], data[1], data[2]])
else:
halfday.append([data[0], data[1], data[2]])
def setup():
paintDates()
calSettingsLayout = BoxLayout(orientation='vertical')
daySetLayout = BoxLayout(orientation='horizontal', size_hint_y=0.2)
holidayBtn = HolidayBtn(text='HOLIDAY', size_hint_x=0.5, color=(128, 0, 128, 1), bold=True)
daySetLayout.add_widget(holidayBtn)
halfdayBtn = HalfdayBtn(text=' | self.left_arrow = Button(text="<", on_press=self.go_prev,
pos_hint={"top": 1, "left": 0}, size_hint=(.1, .1)) | random_line_split |
calSettings.py | size: self.size
pos: self.pos
<HolidayBtn>:
font_name: 'fonts/moon-bold.otf'
canvas.before:
Color:
rgba: (128, 0, 128, 0.5)
Rectangle:
pos: self.pos
size: self.size
<HalfdayBtn>:
font_name: 'fonts/moon-bold.otf'
canvas.before:
Color:
rgba: (0, 255, 255, 0.25)
Rectangle:
pos: self.pos
size: self.size
""")
class ToggleBtn(ToggleButton):
pass
class HolidayBtn(ToggleButton):
pass
class HalfdayBtn(ToggleButton):
pass
class CalendarWidgetS(RelativeLayout):
| self.add_widget(self.right_arrow)
# Title
self.title_label = Label(text=self.title, pos_hint={"top": 1, "center_x": .5}, size_hint=(None, 0.15), halign=("center"))
self.add_widget(self.title_label)
# ScreenManager
self.sm = ScreenManager(pos_hint={"top": .9}, size_hint=(1, .9))
self.add_widget(self.sm)
self.create_month_scr(self.quarter[1], toogle_today=True)
def create_month_scr(self, month, toogle_today=False):
""" Screen with calendar for one month """
scr = Screen()
m = self.month_names_eng[self.active_date[1] - 1]
scr.name = "%s-%s" % (m, self.active_date[2]) # like march-2015
# Grid for days
grid_layout = GridLayout(cols=7, rows=7, size_hint=(1, 1), pos_hint={"top": 1})
scr.add_widget(grid_layout)
# Days abbrs
for i in range(7):
if i >= 5: # weekends
l = Label(text=self.days_abrs[i], color=(1, 0, 0, 1))
else: # work days
l = Label(text=self.days_abrs[i], text_size=(self.size[0], None), halign="center")
grid_layout.add_widget(l)
global holiday, halfday
# Buttons with days numbers
for week in month:
for day in week:
if day[1] >= 6: # weekends
self.tbtn = ToggleBtn(text=str(day[0]), color=(0, 0, 0, 1))
else:
self.tbtn = ToggleBtn(text=str(day[0]), color=(0, 0, 0, 1))
for i in range(len(holiday)):
if self.active_date[2] == holiday[i][2]:
if self.active_date[1] == holiday[i][1]:
if day[0] == holiday[i][0]:
self.tbtn.background_color=(128, 0, 128, 1)
for i in range(len(halfday)):
if self.active_date[2] == halfday[i][2]:
if self.active_date[1] == halfday[i][1]:
if day[0] == halfday[i][0]:
self.tbtn.background_color=(0, 255, 255, 0.5)
self.tbtn.bind(on_press=self.get_btn_value)
if toogle_today:
# Down today button
if day[0] == self.active_date[0] and day[2] == 1:
self.tbtn.state = "down"
# Disable buttons with days from other months
if day[2] == 0:
self.tbtn.text = " "
self.tbtn.disabled = True
self.tbtn.background_color = (0, 0, 0, 0.1)
grid_layout.add_widget(self.tbtn)
self.sm.add_widget(scr)
def prepare_data(self):
""" Prepare data for showing on widget loading """
# Get days abbrs and month names lists
self.month_names = cal_data.get_month_names()
self.month_names_eng = cal_data.get_month_names_eng()
self.days_abrs = cal_data.get_days_abbrs()
# Today date
self.active_date = cal_data.today_date_list()
# Set title
self.title = "%s - %s" % (self.month_names[self.active_date[1] - 1],
self.active_date[2])
# Quarter where current month in the self.quarter[1]
self.get_quarter()
def get_quarter(self):
""" Get caledar and months/years nums for quarter """
self.quarter_nums = cal_data.calc_quarter(self.active_date[2],
self.active_date[1])
self.quarter = cal_data.get_quarter(self.active_date[2],
self.active_date[1])
def get_btn_value(self, inst):
""" Get day value from pressed button """
self.active_date[0] = int(inst.text)
selected = [self.active_date[0], self.active_date[1], self.active_date[2]]
global selectedDates
if selected in selectedDates:
selectedDates.remove(selected)
else:
selectedDates.append(selected)
if self.as_popup:
self.parent_popup.dismiss()
#getInfo.openPopup()
def go_prev(self, inst):
""" Go to screen with previous month """
# Change active date
self.active_date = [self.active_date[0], self.quarter_nums[0][1],
self.quarter_nums[0][0]]
# Name of prev screen
n = self.quarter_nums[0][1] - 1
prev_scr_name = "%s-%s" % (self.month_names_eng[n],
self.quarter_nums[0][0])
# If it's doen't exitst, create it
if not self.sm.has_screen(prev_scr_name):
self.create_month_scr(self.quarter[0])
self.sm.current = prev_scr_name
self.sm.transition.direction = "left"
self.get_quarter()
self.title = "%s - %s" % (self.month_names[self.active_date[1] - 1],
self.active_date[2])
self.title_label.text = self.title
def go_next(self, inst):
""" Go to screen with next month """
# Change active date
self.active_date = [self.active_date[0], self.quarter_nums[2][1],
self.quarter_nums[2][0]]
# Name of prev screen
n = self.quarter_nums[2][1] - 1
next_scr_name = "%s-%s" % (self.month_names_eng[n],
self.quarter_nums[2][0])
# If it's doen't exitst, create it
if not self.sm.has_screen(next_scr_name):
self.create_month_scr(self.quarter[2])
self.sm.current = next_scr_name
self.sm.transition.direction = "right"
self.get_quarter()
self.title = "%s - %s" % (self.month_names[self.active_date[1] - 1],
self.active_date[2])
self.title_label.text = self.title
def on_touch_move(self, touch):
""" Switch months pages by touch move """
if self.touch_switch:
# Left - prev
if touch.dpos[0] < -30:
self.go_prev(None)
# Right - next
elif touch.dpos[0] > 30:
self.go_next(None)
import pymysql
def paintDates():
global holiday, halfday
db = pymysql.connect(credentials['address'], credentials['username'], credentials['password'], credentials['db'], autocommit=True, connect_timeout=1)
cur = db.cursor()
cur.execute("SELECT DAY, MONTH, YEAR, DETAIL FROM essl.month_details")
for data in cur.fetchall():
if data[3] == 'HOLIDAY':
holiday.append([data[0], data[1], data[2]])
else:
halfday.append([data[0], data[1], data[2]])
def setup():
paintDates()
calSettingsLayout = BoxLayout(orientation='vertical')
daySetLayout = BoxLayout(orientation='horizontal', size_hint_y=0.2)
holidayBtn = HolidayBtn(text='HOLIDAY', size_hint_x=0.5, color=(128, 0, 128, 1), bold=True)
daySetLayout.add_widget(holidayBtn)
halfdayBtn = HalfdayBtn(text='HAL | """ Basic calendar widget """
def __init__(self, as_popup=False, touch_switch=False, *args, **kwargs):
super(CalendarWidgetS, self).__init__(*args, **kwargs)
self.as_popup = as_popup
self.touch_switch = touch_switch
#self.selectedDates = []
self.prepare_data()
self.init_ui()
def init_ui(self):
self.left_arrow = Button(text="<", on_press=self.go_prev,
pos_hint={"top": 1, "left": 0}, size_hint=(.1, .1))
self.right_arrow = Button(text=">", on_press=self.go_next,
pos_hint={"top": 1, "right": 1}, size_hint=(.1, .1))
self.add_widget(self.left_arrow) | identifier_body |
calSettings.py | size: self.size
pos: self.pos
<HolidayBtn>:
font_name: 'fonts/moon-bold.otf'
canvas.before:
Color:
rgba: (128, 0, 128, 0.5)
Rectangle:
pos: self.pos
size: self.size
<HalfdayBtn>:
font_name: 'fonts/moon-bold.otf'
canvas.before:
Color:
rgba: (0, 255, 255, 0.25)
Rectangle:
pos: self.pos
size: self.size
""")
class | (ToggleButton):
pass
class HolidayBtn(ToggleButton):
pass
class HalfdayBtn(ToggleButton):
pass
class CalendarWidgetS(RelativeLayout):
""" Basic calendar widget """
def __init__(self, as_popup=False, touch_switch=False, *args, **kwargs):
super(CalendarWidgetS, self).__init__(*args, **kwargs)
self.as_popup = as_popup
self.touch_switch = touch_switch
#self.selectedDates = []
self.prepare_data()
self.init_ui()
def init_ui(self):
self.left_arrow = Button(text="<", on_press=self.go_prev,
pos_hint={"top": 1, "left": 0}, size_hint=(.1, .1))
self.right_arrow = Button(text=">", on_press=self.go_next,
pos_hint={"top": 1, "right": 1}, size_hint=(.1, .1))
self.add_widget(self.left_arrow)
self.add_widget(self.right_arrow)
# Title
self.title_label = Label(text=self.title, pos_hint={"top": 1, "center_x": .5}, size_hint=(None, 0.15), halign=("center"))
self.add_widget(self.title_label)
# ScreenManager
self.sm = ScreenManager(pos_hint={"top": .9}, size_hint=(1, .9))
self.add_widget(self.sm)
self.create_month_scr(self.quarter[1], toogle_today=True)
def create_month_scr(self, month, toogle_today=False):
""" Screen with calendar for one month """
scr = Screen()
m = self.month_names_eng[self.active_date[1] - 1]
scr.name = "%s-%s" % (m, self.active_date[2]) # like march-2015
# Grid for days
grid_layout = GridLayout(cols=7, rows=7, size_hint=(1, 1), pos_hint={"top": 1})
scr.add_widget(grid_layout)
# Days abbrs
for i in range(7):
if i >= 5: # weekends
l = Label(text=self.days_abrs[i], color=(1, 0, 0, 1))
else: # work days
l = Label(text=self.days_abrs[i], text_size=(self.size[0], None), halign="center")
grid_layout.add_widget(l)
global holiday, halfday
# Buttons with days numbers
for week in month:
for day in week:
if day[1] >= 6: # weekends
self.tbtn = ToggleBtn(text=str(day[0]), color=(0, 0, 0, 1))
else:
self.tbtn = ToggleBtn(text=str(day[0]), color=(0, 0, 0, 1))
for i in range(len(holiday)):
if self.active_date[2] == holiday[i][2]:
if self.active_date[1] == holiday[i][1]:
if day[0] == holiday[i][0]:
self.tbtn.background_color=(128, 0, 128, 1)
for i in range(len(halfday)):
if self.active_date[2] == halfday[i][2]:
if self.active_date[1] == halfday[i][1]:
if day[0] == halfday[i][0]:
self.tbtn.background_color=(0, 255, 255, 0.5)
self.tbtn.bind(on_press=self.get_btn_value)
if toogle_today:
# Down today button
if day[0] == self.active_date[0] and day[2] == 1:
self.tbtn.state = "down"
# Disable buttons with days from other months
if day[2] == 0:
self.tbtn.text = " "
self.tbtn.disabled = True
self.tbtn.background_color = (0, 0, 0, 0.1)
grid_layout.add_widget(self.tbtn)
self.sm.add_widget(scr)
def prepare_data(self):
""" Prepare data for showing on widget loading """
# Get days abbrs and month names lists
self.month_names = cal_data.get_month_names()
self.month_names_eng = cal_data.get_month_names_eng()
self.days_abrs = cal_data.get_days_abbrs()
# Today date
self.active_date = cal_data.today_date_list()
# Set title
self.title = "%s - %s" % (self.month_names[self.active_date[1] - 1],
self.active_date[2])
# Quarter where current month in the self.quarter[1]
self.get_quarter()
def get_quarter(self):
""" Get caledar and months/years nums for quarter """
self.quarter_nums = cal_data.calc_quarter(self.active_date[2],
self.active_date[1])
self.quarter = cal_data.get_quarter(self.active_date[2],
self.active_date[1])
def get_btn_value(self, inst):
""" Get day value from pressed button """
self.active_date[0] = int(inst.text)
selected = [self.active_date[0], self.active_date[1], self.active_date[2]]
global selectedDates
if selected in selectedDates:
selectedDates.remove(selected)
else:
selectedDates.append(selected)
if self.as_popup:
self.parent_popup.dismiss()
#getInfo.openPopup()
def go_prev(self, inst):
""" Go to screen with previous month """
# Change active date
self.active_date = [self.active_date[0], self.quarter_nums[0][1],
self.quarter_nums[0][0]]
# Name of prev screen
n = self.quarter_nums[0][1] - 1
prev_scr_name = "%s-%s" % (self.month_names_eng[n],
self.quarter_nums[0][0])
# If it's doen't exitst, create it
if not self.sm.has_screen(prev_scr_name):
self.create_month_scr(self.quarter[0])
self.sm.current = prev_scr_name
self.sm.transition.direction = "left"
self.get_quarter()
self.title = "%s - %s" % (self.month_names[self.active_date[1] - 1],
self.active_date[2])
self.title_label.text = self.title
def go_next(self, inst):
""" Go to screen with next month """
# Change active date
self.active_date = [self.active_date[0], self.quarter_nums[2][1],
self.quarter_nums[2][0]]
# Name of prev screen
n = self.quarter_nums[2][1] - 1
next_scr_name = "%s-%s" % (self.month_names_eng[n],
self.quarter_nums[2][0])
# If it's doen't exitst, create it
if not self.sm.has_screen(next_scr_name):
self.create_month_scr(self.quarter[2])
self.sm.current = next_scr_name
self.sm.transition.direction = "right"
self.get_quarter()
self.title = "%s - %s" % (self.month_names[self.active_date[1] - 1],
self.active_date[2])
self.title_label.text = self.title
def on_touch_move(self, touch):
""" Switch months pages by touch move """
if self.touch_switch:
# Left - prev
if touch.dpos[0] < -30:
self.go_prev(None)
# Right - next
elif touch.dpos[0] > 30:
self.go_next(None)
import pymysql
def paintDates():
global holiday, halfday
db = pymysql.connect(credentials['address'], credentials['username'], credentials['password'], credentials['db'], autocommit=True, connect_timeout=1)
cur = db.cursor()
cur.execute("SELECT DAY, MONTH, YEAR, DETAIL FROM essl.month_details")
for data in cur.fetchall():
if data[3] == 'HOLIDAY':
holiday.append([data[0], data[1], data[2]])
else:
halfday.append([data[0], data[1], data[2]])
def setup():
paintDates()
calSettingsLayout = BoxLayout(orientation='vertical')
daySetLayout = BoxLayout(orientation='horizontal', size_hint_y=0.2)
holidayBtn = HolidayBtn(text='HOLIDAY', size_hint_x=0.5, color=(128, 0, 128, 1), bold=True)
daySetLayout.add_widget(holidayBtn)
halfdayBtn = HalfdayBtn(text=' | ToggleBtn | identifier_name |
calSettings.py | size: self.size
pos: self.pos
<HolidayBtn>:
font_name: 'fonts/moon-bold.otf'
canvas.before:
Color:
rgba: (128, 0, 128, 0.5)
Rectangle:
pos: self.pos
size: self.size
<HalfdayBtn>:
font_name: 'fonts/moon-bold.otf'
canvas.before:
Color:
rgba: (0, 255, 255, 0.25)
Rectangle:
pos: self.pos
size: self.size
""")
class ToggleBtn(ToggleButton):
pass
class HolidayBtn(ToggleButton):
pass
class HalfdayBtn(ToggleButton):
pass
class CalendarWidgetS(RelativeLayout):
""" Basic calendar widget """
def __init__(self, as_popup=False, touch_switch=False, *args, **kwargs):
super(CalendarWidgetS, self).__init__(*args, **kwargs)
self.as_popup = as_popup
self.touch_switch = touch_switch
#self.selectedDates = []
self.prepare_data()
self.init_ui()
def init_ui(self):
self.left_arrow = Button(text="<", on_press=self.go_prev,
pos_hint={"top": 1, "left": 0}, size_hint=(.1, .1))
self.right_arrow = Button(text=">", on_press=self.go_next,
pos_hint={"top": 1, "right": 1}, size_hint=(.1, .1))
self.add_widget(self.left_arrow)
self.add_widget(self.right_arrow)
# Title
self.title_label = Label(text=self.title, pos_hint={"top": 1, "center_x": .5}, size_hint=(None, 0.15), halign=("center"))
self.add_widget(self.title_label)
# ScreenManager
self.sm = ScreenManager(pos_hint={"top": .9}, size_hint=(1, .9))
self.add_widget(self.sm)
self.create_month_scr(self.quarter[1], toogle_today=True)
def create_month_scr(self, month, toogle_today=False):
""" Screen with calendar for one month """
scr = Screen()
m = self.month_names_eng[self.active_date[1] - 1]
scr.name = "%s-%s" % (m, self.active_date[2]) # like march-2015
# Grid for days
grid_layout = GridLayout(cols=7, rows=7, size_hint=(1, 1), pos_hint={"top": 1})
scr.add_widget(grid_layout)
# Days abbrs
for i in range(7):
if i >= 5: # weekends
l = Label(text=self.days_abrs[i], color=(1, 0, 0, 1))
else: # work days
l = Label(text=self.days_abrs[i], text_size=(self.size[0], None), halign="center")
grid_layout.add_widget(l)
global holiday, halfday
# Buttons with days numbers
for week in month:
for day in week:
if day[1] >= 6: # weekends
self.tbtn = ToggleBtn(text=str(day[0]), color=(0, 0, 0, 1))
else:
self.tbtn = ToggleBtn(text=str(day[0]), color=(0, 0, 0, 1))
for i in range(len(holiday)):
if self.active_date[2] == holiday[i][2]:
if self.active_date[1] == holiday[i][1]:
if day[0] == holiday[i][0]:
self.tbtn.background_color=(128, 0, 128, 1)
for i in range(len(halfday)):
if self.active_date[2] == halfday[i][2]:
if self.active_date[1] == halfday[i][1]:
if day[0] == halfday[i][0]:
self.tbtn.background_color=(0, 255, 255, 0.5)
self.tbtn.bind(on_press=self.get_btn_value)
if toogle_today:
# Down today button
if day[0] == self.active_date[0] and day[2] == 1:
self.tbtn.state = "down"
# Disable buttons with days from other months
if day[2] == 0:
self.tbtn.text = " "
self.tbtn.disabled = True
self.tbtn.background_color = (0, 0, 0, 0.1)
grid_layout.add_widget(self.tbtn)
self.sm.add_widget(scr)
def prepare_data(self):
""" Prepare data for showing on widget loading """
# Get days abbrs and month names lists
self.month_names = cal_data.get_month_names()
self.month_names_eng = cal_data.get_month_names_eng()
self.days_abrs = cal_data.get_days_abbrs()
# Today date
self.active_date = cal_data.today_date_list()
# Set title
self.title = "%s - %s" % (self.month_names[self.active_date[1] - 1],
self.active_date[2])
# Quarter where current month in the self.quarter[1]
self.get_quarter()
def get_quarter(self):
""" Get caledar and months/years nums for quarter """
self.quarter_nums = cal_data.calc_quarter(self.active_date[2],
self.active_date[1])
self.quarter = cal_data.get_quarter(self.active_date[2],
self.active_date[1])
def get_btn_value(self, inst):
""" Get day value from pressed button """
self.active_date[0] = int(inst.text)
selected = [self.active_date[0], self.active_date[1], self.active_date[2]]
global selectedDates
if selected in selectedDates:
selectedDates.remove(selected)
else:
|
if self.as_popup:
self.parent_popup.dismiss()
#getInfo.openPopup()
def go_prev(self, inst):
""" Go to screen with previous month """
# Change active date
self.active_date = [self.active_date[0], self.quarter_nums[0][1],
self.quarter_nums[0][0]]
# Name of prev screen
n = self.quarter_nums[0][1] - 1
prev_scr_name = "%s-%s" % (self.month_names_eng[n],
self.quarter_nums[0][0])
# If it's doen't exitst, create it
if not self.sm.has_screen(prev_scr_name):
self.create_month_scr(self.quarter[0])
self.sm.current = prev_scr_name
self.sm.transition.direction = "left"
self.get_quarter()
self.title = "%s - %s" % (self.month_names[self.active_date[1] - 1],
self.active_date[2])
self.title_label.text = self.title
def go_next(self, inst):
""" Go to screen with next month """
# Change active date
self.active_date = [self.active_date[0], self.quarter_nums[2][1],
self.quarter_nums[2][0]]
# Name of prev screen
n = self.quarter_nums[2][1] - 1
next_scr_name = "%s-%s" % (self.month_names_eng[n],
self.quarter_nums[2][0])
# If it's doen't exitst, create it
if not self.sm.has_screen(next_scr_name):
self.create_month_scr(self.quarter[2])
self.sm.current = next_scr_name
self.sm.transition.direction = "right"
self.get_quarter()
self.title = "%s - %s" % (self.month_names[self.active_date[1] - 1],
self.active_date[2])
self.title_label.text = self.title
def on_touch_move(self, touch):
""" Switch months pages by touch move """
if self.touch_switch:
# Left - prev
if touch.dpos[0] < -30:
self.go_prev(None)
# Right - next
elif touch.dpos[0] > 30:
self.go_next(None)
import pymysql
def paintDates():
global holiday, halfday
db = pymysql.connect(credentials['address'], credentials['username'], credentials['password'], credentials['db'], autocommit=True, connect_timeout=1)
cur = db.cursor()
cur.execute("SELECT DAY, MONTH, YEAR, DETAIL FROM essl.month_details")
for data in cur.fetchall():
if data[3] == 'HOLIDAY':
holiday.append([data[0], data[1], data[2]])
else:
halfday.append([data[0], data[1], data[2]])
def setup():
paintDates()
calSettingsLayout = BoxLayout(orientation='vertical')
daySetLayout = BoxLayout(orientation='horizontal', size_hint_y=0.2)
holidayBtn = HolidayBtn(text='HOLIDAY', size_hint_x=0.5, color=(128, 0, 128, 1), bold=True)
daySetLayout.add_widget(holidayBtn)
halfdayBtn = HalfdayBtn(text=' | selectedDates.append(selected) | conditional_block |
checktime.go | of the RootCA and ServerName
// for servers that are not signed by a well known certificate
// authority. It will skip the authentication for the server. It
// is not recommended outside of a test environment.
NoValidate bool `yaml:"no_validate"`
// This option turns off encryption entirely
// it is only for testing
NoTLS bool `yaml:"no_tls"`
// Pretend true is run, but don't actually set the time
Pretend bool `yaml:"pretend"`
// This is the PEM encoded SSL client certificate. This is required
// for all https based client connections. It provides the relay identity
// to the server
// ClientCertificate []byte
// ClientCertificateString string `yaml:"client_cert"`
// // This is the PEM encoded SSL client private key. This is required
// // for all https based client connections.
// ClientKey []byte
// ClientKeyString string `yaml:"client_key"`
// // This is the hostname or IP address of the relaymq server
Host string `yaml:"host"`
// This is the port of the relaymq server
Port int `yaml:"port"`
// CheckTimeInterval in seconds
CheckTimeInterval int `yaml:"check_time_interval"`
// If this flag is set, client library logging will be printed
//EnableLogging bool
// number of buffers to hold. Remember, grease lib also holds its own buffers, so this sould be minimal
// (optional)
//NumBuffers uint32 `yaml:"num_buffers"`
// MaxBuffers is the max number of the said buffers
// MaxBuffers uint32 `yaml:"max_buffers"`
// // BufferSize is the size of each of these buffers in bytes
// //BufferSize uint32 `yaml:"buffer_size"`
// // SendSizeThreshold is the amount of bytes being held before the
// // worker will start sending
// SendSizeThreshold uint32 `yaml:"send_size_threshold"`
// // SendTimeThreshold is the amount of time in milliseconds before the worker
// // will start sending logs
// SendTimeThreshold uint32 `yaml:"send_time_threshold"`
}
type TimeClient struct {
client *http.Client
tlsconfig tls.Config
host string
port int
url string
statusChannel chan int
// used to shutdown the client only
stopChannel chan struct{}
running bool
locker sync.Mutex
checkTimeInterval time.Duration
pretend bool
// if true, the sender backs off "backoff" time
backingOff bool
backoff time.Duration
}
type ClientError struct {
StatusCode int
Status string
}
func (err *ClientError) Error() string {
return fmt.Sprintf("TIME Client Error: %d - %s", err.StatusCode, err.Status)
}
func newClientError(resp *http.Response) (ret *ClientError) {
ret = new(ClientError)
ret.StatusCode = resp.StatusCode
ret.Status = resp.Status
return
}
// StatusChannel returns the status channel which can be used to know if time is set
// if nothing reads the channel, the time will be set anyway, and a simple log message is
// printed out.
func (client *TimeClient) StatusChannel() (ok bool, status chan int) {
client.locker.Lock()
ok = client.running
status = client.statusChannel
client.locker.Unlock()
return
}
// Run starts the client
func (client *TimeClient) Run() {
go client.worker()
}
// Stop the current client's worker
func (client *TimeClient) Stop() {
client.locker.Lock()
if client.running {
close(client.stopChannel)
}
client.locker.Unlock()
}
// NewClient creates a new TimeClient and validates the config
func NewClient(config *ClientConfig) (ok bool, ret *TimeClient, err error) {
ret = new(TimeClient)
ret.statusChannel = make(chan int)
err = ret.Reconfigure(config)
if err == nil {
ok = true
}
return
}
// Reconfigure allows you to reconfigure the client
func (client *TimeClient) Reconfigure(config *ClientConfig) (err error) {
client.pretend = config.Pretend
client.host = config.Host
client.port = config.Port
client.checkTimeInterval = time.Duration(config.CheckTimeInterval) * time.Second
if client.checkTimeInterval < 5 {
client.checkTimeInterval = defaultCheckTimeInterval
}
if !config.NoTLS {
client.client = &http.Client{
Timeout: 35 * time.Second,
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
MaxIdleConnsPerHost: 100,
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
// TLSClientConfig: clientTLSConfig,
},
}
if len(config.Host) > 0 {
client.url = "https://" + config.Host + "/api/time"
} else {
// client.notValidConfig = true
err = errors.New("No Host field specified")
}
} else {
client.client = &http.Client{
Timeout: 35 * time.Second,
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
MaxIdleConnsPerHost: 100,
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
},
}
if len(config.Host) > 0 {
client.url = "http://" + config.Host
} else {
// client.notValidConfig = true
err = errors.New("No Host field specified")
}
}
return
}
// // this is actually from the golang source - but is really knew - 1.10 only:
// // TimeToTimespec converts t into a Timespec.
// // On some 32-bit systems the range of valid Timespec values are smaller
// // than that of time.Time values. So if t is out of the valid range of
// // Timespec, it returns a zero Timespec and EINVAL.
// func TimeToTimespec(t time.Time) (Timespec, error) {
// sec := t.Unix()
// nsec := int64(t.Nanosecond())
// ts := setTimespec(sec, nsec)
// // Currently all targets have either int32 or int64 for Timespec.Sec.
// // If there were a new target with floating point type for it, we have
// // to consider the rounding error.
// if int64(ts.Sec) != sec {
// return Timespec{}, EINVAL
// }
// return ts, nil
// }
type timeResponse struct {
Time int64 `json:"time"`
}
func (client *TimeClient) getTime() (err error, errcode int, ret *timeResponse) {
// adds log to the fifo
var req *http.Request
var resp *http.Response
debugging.DEBUG_OUT("TIME GET %s >>>\n", client.url)
// Client implements io.Reader's Read(), so we do this
req, err = http.NewRequest("GET", client.url, nil)
// req.Cancel = c
if err == nil {
resp, err = client.client.Do(req)
if err != nil {
if resp != nil {
defer resp.Body.Close()
}
}
debugging.DEBUG_OUT("TIME --> response +%v\n", resp)
if err == nil {
if resp != nil {
if resp.StatusCode != 200 {
debugging.DEBUG_OUT("TIME bad response - creating error object\n")
err = newClientError(resp)
errcode = BadResponse
return
} else {
ret = new(timeResponse)
dec := json.NewDecoder(resp.Body)
if dec != nil {
err = dec.Decode(ret)
if err != nil {
err = errors.New("Bad response")
errcode = BadResponse
ret = nil
}
} else {
err = errors.New("Failed to create decoder")
errcode = BadResponse
}
}
} else {
err = errors.New("No response")
errcode = TimedOut
}
}
} else {
log.MaestroErrorf("Error on GET request: %s\n", err.Error())
debugging.DEBUG_OUT("TIME ERROR: %s\n", err.Error())
err = errors.New("Failed to create request")
errcode = BadResponse
}
return
}
func (client *TimeClient) sendToStatusChannel(val int) {
select {
case client.statusChannel <- val:
default:
log.MaestroWarn("time status channel is blocking.")
}
}
// periodically asks for the time
func (client *TimeClient) worker() {
client.locker.Lock()
if client.running | {
client.locker.Unlock()
return
} | conditional_block |
|
checktime.go | // TimedOut means there was no response from server
TimedOut = 2
// BadResponse means the response from server was not formatted correctly
BadResponse = 3
// InsaneResponse means the server provided a time value which is crazy
InsaneResponse = 4
// SycallFailed means the syscall failed to work to set the time
SycallFailed = 5
)
// ClientConfig for getting time.
// /time is tricky, b/c if the time is not sane on the system
// SSL validation can break. So, even if we are using SSL, we will get
// time value with validation disabled on SSL.
// We also do some sanity checks to make sure the time value makes sense.
type ClientConfig struct {
// // The RootCA option should be a PEM encoded root ca chain
// // Use this if the server's TLS certificate is not signed
// // by a certificate authority in the default list. If the
// // server is signed by a certificate authority in the default
// // list it can be omitted.
// RootCA []byte // will be converted to byte array
// RootCAString string `yaml:"root_ca"`
// The ServerName is also only required if the root ca chain
// is not in the default list. This option should be omitted
// if RootCA is not specified. It should match the common name
// of the server's certificate.
// ServerName string `yaml:"server_name"`
// This option can be used in place of the RootCA and ServerName
// for servers that are not signed by a well known certificate
// authority. It will skip the authentication for the server. It
// is not recommended outside of a test environment.
NoValidate bool `yaml:"no_validate"`
// This option turns off encryption entirely
// it is only for testing
NoTLS bool `yaml:"no_tls"`
// Pretend true is run, but don't actually set the time
Pretend bool `yaml:"pretend"`
// This is the PEM encoded SSL client certificate. This is required
// for all https based client connections. It provides the relay identity
// to the server
// ClientCertificate []byte
// ClientCertificateString string `yaml:"client_cert"`
// // This is the PEM encoded SSL client private key. This is required
// // for all https based client connections.
// ClientKey []byte
// ClientKeyString string `yaml:"client_key"`
// // This is the hostname or IP address of the relaymq server
Host string `yaml:"host"`
// This is the port of the relaymq server
Port int `yaml:"port"`
// CheckTimeInterval in seconds
CheckTimeInterval int `yaml:"check_time_interval"`
// If this flag is set, client library logging will be printed
//EnableLogging bool
// number of buffers to hold. Remember, grease lib also holds its own buffers, so this sould be minimal
// (optional)
//NumBuffers uint32 `yaml:"num_buffers"`
// MaxBuffers is the max number of the said buffers
// MaxBuffers uint32 `yaml:"max_buffers"`
// // BufferSize is the size of each of these buffers in bytes
// //BufferSize uint32 `yaml:"buffer_size"`
// // SendSizeThreshold is the amount of bytes being held before the
// // worker will start sending
// SendSizeThreshold uint32 `yaml:"send_size_threshold"`
// // SendTimeThreshold is the amount of time in milliseconds before the worker
// // will start sending logs
// SendTimeThreshold uint32 `yaml:"send_time_threshold"`
}
type TimeClient struct {
client *http.Client
tlsconfig tls.Config
host string
port int
url string
statusChannel chan int
// used to shutdown the client only
stopChannel chan struct{}
running bool
locker sync.Mutex
checkTimeInterval time.Duration
pretend bool
// if true, the sender backs off "backoff" time
backingOff bool
backoff time.Duration
}
type ClientError struct {
StatusCode int
Status string
}
func (err *ClientError) Error() string {
return fmt.Sprintf("TIME Client Error: %d - %s", err.StatusCode, err.Status)
}
func newClientError(resp *http.Response) (ret *ClientError) {
ret = new(ClientError)
ret.StatusCode = resp.StatusCode
ret.Status = resp.Status
return
}
// StatusChannel returns the status channel which can be used to know if time is set
// if nothing reads the channel, the time will be set anyway, and a simple log message is
// printed out.
func (client *TimeClient) StatusChannel() (ok bool, status chan int) {
client.locker.Lock()
ok = client.running
status = client.statusChannel
client.locker.Unlock()
return
}
// Run starts the client
func (client *TimeClient) Run() {
go client.worker()
}
// Stop the current client's worker
func (client *TimeClient) Stop() {
client.locker.Lock()
if client.running {
close(client.stopChannel)
}
client.locker.Unlock()
}
// NewClient creates a new TimeClient and validates the config
func NewClient(config *ClientConfig) (ok bool, ret *TimeClient, err error) {
ret = new(TimeClient)
ret.statusChannel = make(chan int)
err = ret.Reconfigure(config)
if err == nil {
ok = true
}
return
}
// Reconfigure allows you to reconfigure the client
func (client *TimeClient) Reconfigure(config *ClientConfig) (err error) {
client.pretend = config.Pretend
client.host = config.Host
client.port = config.Port
client.checkTimeInterval = time.Duration(config.CheckTimeInterval) * time.Second
if client.checkTimeInterval < 5 {
client.checkTimeInterval = defaultCheckTimeInterval
}
if !config.NoTLS {
client.client = &http.Client{
Timeout: 35 * time.Second,
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
MaxIdleConnsPerHost: 100,
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
// TLSClientConfig: clientTLSConfig,
},
}
if len(config.Host) > 0 {
client.url = "https://" + config.Host + "/api/time"
} else {
// client.notValidConfig = true
err = errors.New("No Host field specified")
}
} else {
client.client = &http.Client{
Timeout: 35 * time.Second,
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
MaxIdleConnsPerHost: 100,
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
},
}
if len(config.Host) > 0 {
client.url = "http://" + config.Host
} else {
// client.notValidConfig = true
err = errors.New("No Host field specified")
}
}
return
}
// // this is actually from the golang source - but is really knew - 1.10 only:
// // TimeToTimespec converts t into a Timespec.
// // On some 32-bit systems the range of valid Timespec values are smaller
// // than that of time.Time values. So if t is out of the valid range of
// // Timespec, it returns a zero Timespec and EINVAL.
// func TimeToTimespec(t time.Time) (Timespec, error) {
// sec := t.Unix()
// nsec := int64(t.Nanosecond())
// ts := setTimespec(sec, nsec)
// // Currently all targets have either int32 or int64 for Timespec.Sec.
// // If there were a new target with floating point type for it, we have
// // to consider the rounding error.
// if int64(ts.Sec) != sec {
// return Timespec{}, EINVAL
// }
// return ts, nil
// }
type timeResponse struct {
Time int64 `json:"time"`
}
func (client *TimeClient) getTime() (err error, errcode int, ret *timeResponse) {
// adds log to the fifo
var req *http.Request
var resp *http.Response
debugging.DEBUG_OUT("TIME GET %s >>>\n", client.url)
// Client implements io.Reader's Read(), so we do this
req, err = http.NewRequest("GET", client.url, nil)
// req | recentTime = int64(1514786400000)
// SetTimeOk means the time was set Correctly
SetTimeOk = 1 | random_line_split |
|
checktime.go | means there was no response from server
TimedOut = 2
// BadResponse means the response from server was not formatted correctly
BadResponse = 3
// InsaneResponse means the server provided a time value which is crazy
InsaneResponse = 4
// SycallFailed means the syscall failed to work to set the time
SycallFailed = 5
)
// ClientConfig for getting time.
// /time is tricky, b/c if the time is not sane on the system
// SSL validation can break. So, even if we are using SSL, we will get
// time value with validation disabled on SSL.
// We also do some sanity checks to make sure the time value makes sense.
type ClientConfig struct {
// // The RootCA option should be a PEM encoded root ca chain
// // Use this if the server's TLS certificate is not signed
// // by a certificate authority in the default list. If the
// // server is signed by a certificate authority in the default
// // list it can be omitted.
// RootCA []byte // will be converted to byte array
// RootCAString string `yaml:"root_ca"`
// The ServerName is also only required if the root ca chain
// is not in the default list. This option should be omitted
// if RootCA is not specified. It should match the common name
// of the server's certificate.
// ServerName string `yaml:"server_name"`
// This option can be used in place of the RootCA and ServerName
// for servers that are not signed by a well known certificate
// authority. It will skip the authentication for the server. It
// is not recommended outside of a test environment.
NoValidate bool `yaml:"no_validate"`
// This option turns off encryption entirely
// it is only for testing
NoTLS bool `yaml:"no_tls"`
// Pretend true is run, but don't actually set the time
Pretend bool `yaml:"pretend"`
// This is the PEM encoded SSL client certificate. This is required
// for all https based client connections. It provides the relay identity
// to the server
// ClientCertificate []byte
// ClientCertificateString string `yaml:"client_cert"`
// // This is the PEM encoded SSL client private key. This is required
// // for all https based client connections.
// ClientKey []byte
// ClientKeyString string `yaml:"client_key"`
// // This is the hostname or IP address of the relaymq server
Host string `yaml:"host"`
// This is the port of the relaymq server
Port int `yaml:"port"`
// CheckTimeInterval in seconds
CheckTimeInterval int `yaml:"check_time_interval"`
// If this flag is set, client library logging will be printed
//EnableLogging bool
// number of buffers to hold. Remember, grease lib also holds its own buffers, so this sould be minimal
// (optional)
//NumBuffers uint32 `yaml:"num_buffers"`
// MaxBuffers is the max number of the said buffers
// MaxBuffers uint32 `yaml:"max_buffers"`
// // BufferSize is the size of each of these buffers in bytes
// //BufferSize uint32 `yaml:"buffer_size"`
// // SendSizeThreshold is the amount of bytes being held before the
// // worker will start sending
// SendSizeThreshold uint32 `yaml:"send_size_threshold"`
// // SendTimeThreshold is the amount of time in milliseconds before the worker
// // will start sending logs
// SendTimeThreshold uint32 `yaml:"send_time_threshold"`
}
type TimeClient struct {
client *http.Client
tlsconfig tls.Config
host string
port int
url string
statusChannel chan int
// used to shutdown the client only
stopChannel chan struct{}
running bool
locker sync.Mutex
checkTimeInterval time.Duration
pretend bool
// if true, the sender backs off "backoff" time
backingOff bool
backoff time.Duration
}
type ClientError struct {
StatusCode int
Status string
}
func (err *ClientError) Error() string {
return fmt.Sprintf("TIME Client Error: %d - %s", err.StatusCode, err.Status)
}
func newClientError(resp *http.Response) (ret *ClientError) {
ret = new(ClientError)
ret.StatusCode = resp.StatusCode
ret.Status = resp.Status
return
}
// StatusChannel returns the status channel which can be used to know if time is set
// if nothing reads the channel, the time will be set anyway, and a simple log message is
// printed out.
func (client *TimeClient) StatusChannel() (ok bool, status chan int) {
client.locker.Lock()
ok = client.running
status = client.statusChannel
client.locker.Unlock()
return
}
// Run starts the client
func (client *TimeClient) Run() {
go client.worker()
}
// Stop the current client's worker
func (client *TimeClient) Stop() {
client.locker.Lock()
if client.running {
close(client.stopChannel)
}
client.locker.Unlock()
}
// NewClient creates a new TimeClient and validates the config
func NewClient(config *ClientConfig) (ok bool, ret *TimeClient, err error) {
ret = new(TimeClient)
ret.statusChannel = make(chan int)
err = ret.Reconfigure(config)
if err == nil {
ok = true
}
return
}
// Reconfigure allows you to reconfigure the client
func (client *TimeClient) | (config *ClientConfig) (err error) {
client.pretend = config.Pretend
client.host = config.Host
client.port = config.Port
client.checkTimeInterval = time.Duration(config.CheckTimeInterval) * time.Second
if client.checkTimeInterval < 5 {
client.checkTimeInterval = defaultCheckTimeInterval
}
if !config.NoTLS {
client.client = &http.Client{
Timeout: 35 * time.Second,
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
MaxIdleConnsPerHost: 100,
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
// TLSClientConfig: clientTLSConfig,
},
}
if len(config.Host) > 0 {
client.url = "https://" + config.Host + "/api/time"
} else {
// client.notValidConfig = true
err = errors.New("No Host field specified")
}
} else {
client.client = &http.Client{
Timeout: 35 * time.Second,
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
MaxIdleConnsPerHost: 100,
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
},
}
if len(config.Host) > 0 {
client.url = "http://" + config.Host
} else {
// client.notValidConfig = true
err = errors.New("No Host field specified")
}
}
return
}
// // this is actually from the golang source - but is really knew - 1.10 only:
// // TimeToTimespec converts t into a Timespec.
// // On some 32-bit systems the range of valid Timespec values are smaller
// // than that of time.Time values. So if t is out of the valid range of
// // Timespec, it returns a zero Timespec and EINVAL.
// func TimeToTimespec(t time.Time) (Timespec, error) {
// sec := t.Unix()
// nsec := int64(t.Nanosecond())
// ts := setTimespec(sec, nsec)
// // Currently all targets have either int32 or int64 for Timespec.Sec.
// // If there were a new target with floating point type for it, we have
// // to consider the rounding error.
// if int64(ts.Sec) != sec {
// return Timespec{}, EINVAL
// }
// return ts, nil
// }
type timeResponse struct {
Time int64 `json:"time"`
}
func (client *TimeClient) getTime() (err error, errcode int, ret *timeResponse) {
// adds log to the fifo
var req *http.Request
var resp *http.Response
debugging.DEBUG_OUT("TIME GET %s >>>\n", client.url)
// Client implements io.Reader's Read(), so we do this
req, err = http.NewRequest("GET", client.url, nil)
// req.Cancel = c
if err == nil {
resp, err = client.client.Do(req)
if err != nil {
if resp != nil {
defer resp.Body.Close()
}
}
debug | Reconfigure | identifier_name |
checktime.go | means there was no response from server
TimedOut = 2
// BadResponse means the response from server was not formatted correctly
BadResponse = 3
// InsaneResponse means the server provided a time value which is crazy
InsaneResponse = 4
// SycallFailed means the syscall failed to work to set the time
SycallFailed = 5
)
// ClientConfig for getting time.
// /time is tricky, b/c if the time is not sane on the system
// SSL validation can break. So, even if we are using SSL, we will get
// time value with validation disabled on SSL.
// We also do some sanity checks to make sure the time value makes sense.
type ClientConfig struct {
// // The RootCA option should be a PEM encoded root ca chain
// // Use this if the server's TLS certificate is not signed
// // by a certificate authority in the default list. If the
// // server is signed by a certificate authority in the default
// // list it can be omitted.
// RootCA []byte // will be converted to byte array
// RootCAString string `yaml:"root_ca"`
// The ServerName is also only required if the root ca chain
// is not in the default list. This option should be omitted
// if RootCA is not specified. It should match the common name
// of the server's certificate.
// ServerName string `yaml:"server_name"`
// This option can be used in place of the RootCA and ServerName
// for servers that are not signed by a well known certificate
// authority. It will skip the authentication for the server. It
// is not recommended outside of a test environment.
NoValidate bool `yaml:"no_validate"`
// This option turns off encryption entirely
// it is only for testing
NoTLS bool `yaml:"no_tls"`
// Pretend true is run, but don't actually set the time
Pretend bool `yaml:"pretend"`
// This is the PEM encoded SSL client certificate. This is required
// for all https based client connections. It provides the relay identity
// to the server
// ClientCertificate []byte
// ClientCertificateString string `yaml:"client_cert"`
// // This is the PEM encoded SSL client private key. This is required
// // for all https based client connections.
// ClientKey []byte
// ClientKeyString string `yaml:"client_key"`
// // This is the hostname or IP address of the relaymq server
Host string `yaml:"host"`
// This is the port of the relaymq server
Port int `yaml:"port"`
// CheckTimeInterval in seconds
CheckTimeInterval int `yaml:"check_time_interval"`
// If this flag is set, client library logging will be printed
//EnableLogging bool
// number of buffers to hold. Remember, grease lib also holds its own buffers, so this sould be minimal
// (optional)
//NumBuffers uint32 `yaml:"num_buffers"`
// MaxBuffers is the max number of the said buffers
// MaxBuffers uint32 `yaml:"max_buffers"`
// // BufferSize is the size of each of these buffers in bytes
// //BufferSize uint32 `yaml:"buffer_size"`
// // SendSizeThreshold is the amount of bytes being held before the
// // worker will start sending
// SendSizeThreshold uint32 `yaml:"send_size_threshold"`
// // SendTimeThreshold is the amount of time in milliseconds before the worker
// // will start sending logs
// SendTimeThreshold uint32 `yaml:"send_time_threshold"`
}
type TimeClient struct {
client *http.Client
tlsconfig tls.Config
host string
port int
url string
statusChannel chan int
// used to shutdown the client only
stopChannel chan struct{}
running bool
locker sync.Mutex
checkTimeInterval time.Duration
pretend bool
// if true, the sender backs off "backoff" time
backingOff bool
backoff time.Duration
}
type ClientError struct {
StatusCode int
Status string
}
func (err *ClientError) Error() string |
func newClientError(resp *http.Response) (ret *ClientError) {
ret = new(ClientError)
ret.StatusCode = resp.StatusCode
ret.Status = resp.Status
return
}
// StatusChannel returns the status channel which can be used to know if time is set
// if nothing reads the channel, the time will be set anyway, and a simple log message is
// printed out.
func (client *TimeClient) StatusChannel() (ok bool, status chan int) {
client.locker.Lock()
ok = client.running
status = client.statusChannel
client.locker.Unlock()
return
}
// Run starts the client
func (client *TimeClient) Run() {
go client.worker()
}
// Stop the current client's worker
func (client *TimeClient) Stop() {
client.locker.Lock()
if client.running {
close(client.stopChannel)
}
client.locker.Unlock()
}
// NewClient creates a new TimeClient and validates the config
func NewClient(config *ClientConfig) (ok bool, ret *TimeClient, err error) {
ret = new(TimeClient)
ret.statusChannel = make(chan int)
err = ret.Reconfigure(config)
if err == nil {
ok = true
}
return
}
// Reconfigure allows you to reconfigure the client
func (client *TimeClient) Reconfigure(config *ClientConfig) (err error) {
client.pretend = config.Pretend
client.host = config.Host
client.port = config.Port
client.checkTimeInterval = time.Duration(config.CheckTimeInterval) * time.Second
if client.checkTimeInterval < 5 {
client.checkTimeInterval = defaultCheckTimeInterval
}
if !config.NoTLS {
client.client = &http.Client{
Timeout: 35 * time.Second,
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
MaxIdleConnsPerHost: 100,
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
// TLSClientConfig: clientTLSConfig,
},
}
if len(config.Host) > 0 {
client.url = "https://" + config.Host + "/api/time"
} else {
// client.notValidConfig = true
err = errors.New("No Host field specified")
}
} else {
client.client = &http.Client{
Timeout: 35 * time.Second,
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
MaxIdleConnsPerHost: 100,
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
},
}
if len(config.Host) > 0 {
client.url = "http://" + config.Host
} else {
// client.notValidConfig = true
err = errors.New("No Host field specified")
}
}
return
}
// // this is actually from the golang source - but is really knew - 1.10 only:
// // TimeToTimespec converts t into a Timespec.
// // On some 32-bit systems the range of valid Timespec values are smaller
// // than that of time.Time values. So if t is out of the valid range of
// // Timespec, it returns a zero Timespec and EINVAL.
// func TimeToTimespec(t time.Time) (Timespec, error) {
// sec := t.Unix()
// nsec := int64(t.Nanosecond())
// ts := setTimespec(sec, nsec)
// // Currently all targets have either int32 or int64 for Timespec.Sec.
// // If there were a new target with floating point type for it, we have
// // to consider the rounding error.
// if int64(ts.Sec) != sec {
// return Timespec{}, EINVAL
// }
// return ts, nil
// }
type timeResponse struct {
Time int64 `json:"time"`
}
func (client *TimeClient) getTime() (err error, errcode int, ret *timeResponse) {
// adds log to the fifo
var req *http.Request
var resp *http.Response
debugging.DEBUG_OUT("TIME GET %s >>>\n", client.url)
// Client implements io.Reader's Read(), so we do this
req, err = http.NewRequest("GET", client.url, nil)
// req.Cancel = c
if err == nil {
resp, err = client.client.Do(req)
if err != nil {
if resp != nil {
defer resp.Body.Close()
}
}
| {
return fmt.Sprintf("TIME Client Error: %d - %s", err.StatusCode, err.Status)
} | identifier_body |
x0CompilerUI.py |
if mod == 1:
while window.debug == 0:
time.sleep(0.05)
window.setCodeStatus(run.c, False)
if window.debug == 1: # next step
pass
if window.debug == 2: # step into
pass
if window.debug == 3: # over step
mod = 0
window.setDebugEnabled(False)
if window.debug == 4: # step out
run.paramInit()
window.RuntimePad.clear()
window.RuntimePad.textCursor().insertText("")
window.debug = 0
continue
window.debug = 0
# cancel the tip for TAC code just processing
window.setCodeStatus(run.c, False)
window.setDebugEnabled(False)
window.actionStop.setEnabled(False)
window.output("\n=== the processing is over ===")
'''
extending the QSyntaxHighlighter class for highlight the x0 texts
'''
class x0Highlighter(QSyntaxHighlighter):
Rules = []
Formats = {}
def __init__(self, parent=None):
super(x0Highlighter, self).__init__(parent)
self.initializeFormats()
BUILDINS = ["and", "not", "int", "char", "bool", "true", "false"]
OPERATORS = ["\+", "-", "\*", "/", "%", "&", "\|", "~", "\^", "\!",
"<", ">", "=", "\.","+="]
KEYWORDS = ["read", "if", "else",
"for", "do", "while", "repeat", "until",
"write", "return", "break", "continue",
"main", "switch", "case"]
FUNCTIONS = ["procedure", "call"]
CONSTANTS = ["False", "True"]
x0Highlighter.Rules.append((QRegExp(
"|".join([r"\b%s\b" % keyword for keyword in KEYWORDS])),
"keyword"))
x0Highlighter.Rules.append((QRegExp(
"|".join([r"\b%s\b" % buildin for buildin in BUILDINS])),
"buildin"))
x0Highlighter.Rules.append((QRegExp(
"|".join([r"%s" % operator for operator in OPERATORS])),
"operator"))
x0Highlighter.Rules.append((QRegExp(
"|".join([r"%s" % function for function in FUNCTIONS])),
"function"))
x0Highlighter.Rules.append((QRegExp(
r"\b[+-]?[0-9]+[lL]?\b"),
"number"))
x0Highlighter.Rules.append((QRegExp(
r"(/\*(.|\n)*\*/)|(\/\/.*/n)"),
"comment"))
x0Highlighter.Rules.append((QRegExp(
r"\".*\"|'.*'"),
"string"))
x0Highlighter.Rules.append((QRegExp(
r"procedure.*)\("),
"funcName"))
@staticmethod
def initializeFormats():
baseFormat = QTextCharFormat()
baseFormat.setFontFamily("Consolas")
baseFormat.setFontPointSize(12)
for name, fcolor, bcolor in (
("operator", QColor(103,166,228), None),
("keyword", QColor(249,35,112), None),
("buildin", QColor(104,216,235), None),
("normal", Qt.white, None),
("string", QColor(231,219,116), None),
("function", QColor(245,150,32), None),
("funcName", QColor(166,226,44), None),
("number", QColor(167,128,255), None),
("comment", QColor(90,88,85), None)):
format = QTextCharFormat(baseFormat)
format.setForeground(QColor(fcolor))
if bcolor is not None:
format.setBackground(QColor(bcolor))
if name in ("buildin"):
format.setFontWeight(QFont.Bold)
if name == "comment":
format.setFontItalic(True)
x0Highlighter.Formats[name] = format
def highlightBlock(self, text):
NORMAL, TRIPLESINGLE, TRIPLEDOUBLE = range(3)
textLength = len(text)
prevState = self.previousBlockState()
self.setFormat(0, textLength, x0Highlighter.Formats["normal"])
for regex, format in x0Highlighter.Rules:#素数测试
i = regex.indexIn(text)
while i >= 0:
length = regex.matchedLength()
self.setFormat(i, length, x0Highlighter.Formats[format])
i = regex.indexIn(text, i + length)
if not text:
pass
else:
stack, pre = [], None
for i, c in enumerate(text):
if c == "/" and pre == c:
self.setFormat(i-1, len(text)-i+1, x0Highlighter.Formats["comment"])
break
pre = c
self.setCurrentBlockState(NORMAL)
def rehighlight(self):
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
QSyntaxHighlighter.rehighlight(self)
QApplication.restoreOverrideCursor()
class x0Compiler(QMainWindow, UI_MainWindow):
inputWrite = False
def __init__(self):
QMainWindow.__init__(self)
UI_MainWindow.__init__(self)
self.setupUi(self)
self.runDlg = None
self.highlighter = x0Highlighter(self.codeTextEdit.document())
self.initUI()
def onclick(self):
self.inputWrite = True
text = self.inputEdit.toPlainText()
f = open("input.txt","w")
f.write(text)
f.close()
self.inputEdit.clear()
print("click")
def errTbInit(self):
'''
This function is used to initialize the errMsgTable
'''
self.errorMsgTable.clear()
self.errorMsgTable.setColumnCount(3)
self.errorMsgTable.setRowCount(1)
self.errorMsgTable.setHorizontalHeaderLabels(['errno', 'line', 'message'])
self.errorMsgTable.verticalHeader().setVisible(False)
self.errorMsgTable.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.errorMsgTable.setSelectionBehavior(QAbstractItemView.SelectRows)
self.errorMsgTable.setColumnWidth(0, 70)
self.errorMsgTable.setColumnWidth(2, 595)
for idx in range(self.errorMsgTable.columnCount()):
headItem = self.errorMsgTable.horizontalHeaderItem(idx)
headItem.setForeground(QColor(0, 0, 0))
def fileInit(self):
self.filetag = False
self.filepath = os.getcwd()
self.filename = ""
self.workPathLabel.setText("")
cleanfiles()
def initUI(self):
self.fileInit()
self.errTbInit()
#self.scroll = QScrollArea()
#self.scroll.setWidgrt(self.)
self.actionNew.triggered.connect(self.newFile)
self.actionOpen.triggered.connect(self.openFile)
self.actionSave.triggered.connect(self.saveFile)
self.actionBuildAndRun.triggered.connect(self.BuildAndRun)
self.actionDebug.triggered.connect(self.DebugMod)
self.linelabel.setText("")
lines=""
for num in range(1,23):
lines=lines+str(num)
if num < 22:
lines=lines+'\n'
num=num+1
self.linelabel.setText(lines)
self.linelabel.setFixedWidth(30)
font = QFont("Consolas",11,QFont.Normal)
QFont.setLetterSpacing(font,QFont.AbsoluteSpacing,0.5)
self.linelabel.setFont(font)
self.outputLabel.setFont(font)
self.tableWidget.setFont(font)
self.label.setFont(font)
self.codeTextEdit.setFont(font)
self.label.setFixedWidth(280)
self.label.setText(" pcode:\n")
self.label.setAlignment(Qt.AlignTop)
#设置表格属性:
self.tableWidget.setObjectName("Variable Watches")
self.tableWidget.setColumnCount(6)
for i in range(6):
self.tableWidget.setColumnWidth(i,60)
self.infoTabs.tabBar().setAutoFillBackground(True)
#添加表头:
list = ['idx','name','value','level','addr','size']
for i in range(6):
item = QTableWidgetItem(list[i])
item.setBackground(QColor(13,13,13))
self.tableWidget.setHorizontalHeaderItem(i, item)
self.tableWidget.horizontalHeader().setStyleSheet("QHeaderView::section{background-color: #282923;}")
self.tableWidget.horizontalHeader().setDefaultAlignment(QtCore.Qt.AlignCenter)
self.commitButton.clicked.connect(self.onclick)
self.show()
def setBuildEnabled(self, ok):
self.actionBuildAndRun.setEnabled(ok)
self.actionDebug.setEnabled(ok)
def startBuild(self):
'''
Preparation for build&run or debug a processing
'''
# clear output | break | conditional_block |
|
x0CompilerUI.py |
'''
class x0Highlighter(QSyntaxHighlighter):
Rules = []
Formats = {}
def __init__(self, parent=None):
super(x0Highlighter, self).__init__(parent)
self.initializeFormats()
BUILDINS = ["and", "not", "int", "char", "bool", "true", "false"]
OPERATORS = ["\+", "-", "\*", "/", "%", "&", "\|", "~", "\^", "\!",
"<", ">", "=", "\.","+="]
KEYWORDS = ["read", "if", "else",
"for", "do", "while", "repeat", "until",
"write", "return", "break", "continue",
"main", "switch", "case"]
FUNCTIONS = ["procedure", "call"]
CONSTANTS = ["False", "True"]
x0Highlighter.Rules.append((QRegExp(
"|".join([r"\b%s\b" % keyword for keyword in KEYWORDS])),
"keyword"))
x0Highlighter.Rules.append((QRegExp(
"|".join([r"\b%s\b" % buildin for buildin in BUILDINS])),
"buildin"))
x0Highlighter.Rules.append((QRegExp(
"|".join([r"%s" % operator for operator in OPERATORS])),
"operator"))
x0Highlighter.Rules.append((QRegExp(
"|".join([r"%s" % function for function in FUNCTIONS])),
"function"))
x0Highlighter.Rules.append((QRegExp(
r"\b[+-]?[0-9]+[lL]?\b"),
"number"))
x0Highlighter.Rules.append((QRegExp(
r"(/\*(.|\n)*\*/)|(\/\/.*/n)"),
"comment"))
x0Highlighter.Rules.append((QRegExp(
r"\".*\"|'.*'"),
"string"))
x0Highlighter.Rules.append((QRegExp(
r"procedure.*)\("),
"funcName"))
@staticmethod
def initializeFormats():
baseFormat = QTextCharFormat()
baseFormat.setFontFamily("Consolas")
baseFormat.setFontPointSize(12)
for name, fcolor, bcolor in (
("operator", QColor(103,166,228), None),
("keyword", QColor(249,35,112), None),
("buildin", QColor(104,216,235), None),
("normal", Qt.white, None),
("string", QColor(231,219,116), None),
("function", QColor(245,150,32), None),
("funcName", QColor(166,226,44), None),
("number", QColor(167,128,255), None),
("comment", QColor(90,88,85), None)):
format = QTextCharFormat(baseFormat)
format.setForeground(QColor(fcolor))
if bcolor is not None:
format.setBackground(QColor(bcolor))
if name in ("buildin"):
format.setFontWeight(QFont.Bold)
if name == "comment":
format.setFontItalic(True)
x0Highlighter.Formats[name] = format
def highlightBlock(self, text):
NORMAL, TRIPLESINGLE, TRIPLEDOUBLE = range(3)
textLength = len(text)
prevState = self.previousBlockState()
self.setFormat(0, textLength, x0Highlighter.Formats["normal"])
for regex, format in x0Highlighter.Rules:#素数测试
i = regex.indexIn(text)
while i >= 0:
length = regex.matchedLength()
self.setFormat(i, length, x0Highlighter.Formats[format])
i = regex.indexIn(text, i + length)
if not text:
pass
else:
stack, pre = [], None
for i, c in enumerate(text):
if c == "/" and pre == c:
self.setFormat(i-1, len(text)-i+1, x0Highlighter.Formats["comment"])
break
pre = c
self.setCurrentBlockState(NORMAL)
def rehighlight(self):
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
QSyntaxHighlighter.rehighlight(self)
QApplication.restoreOverrideCursor()
class x0Compiler(QMainWindow, UI_MainWindow):
inputWrite = False
def __init__(self):
QMainWindow.__init__(self)
UI_MainWindow.__init__(self)
self.setupUi(self)
self.runDlg = None
self.highlighter = x0Highlighter(self.codeTextEdit.document())
self.initUI()
def onclick(self):
self.inputWrite = True
text = self.inputEdit.toPlainText()
f = open("input.txt","w")
f.write(text)
f.close()
self.inputEdit.clear()
print("click")
def errTbInit(self):
'''
T | ileInit(self):
self.filetag = False
self.filepath = os.getcwd()
self.filename = ""
self.workPathLabel.setText("")
cleanfiles()
def initUI(self):
self.fileInit()
self.errTbInit()
#self.scroll = QScrollArea()
#self.scroll.setWidgrt(self.)
self.actionNew.triggered.connect(self.newFile)
self.actionOpen.triggered.connect(self.openFile)
self.actionSave.triggered.connect(self.saveFile)
self.actionBuildAndRun.triggered.connect(self.BuildAndRun)
self.actionDebug.triggered.connect(self.DebugMod)
self.linelabel.setText("")
lines=""
for num in range(1,23):
lines=lines+str(num)
if num < 22:
lines=lines+'\n'
num=num+1
self.linelabel.setText(lines)
self.linelabel.setFixedWidth(30)
font = QFont("Consolas",11,QFont.Normal)
QFont.setLetterSpacing(font,QFont.AbsoluteSpacing,0.5)
self.linelabel.setFont(font)
self.outputLabel.setFont(font)
self.tableWidget.setFont(font)
self.label.setFont(font)
self.codeTextEdit.setFont(font)
self.label.setFixedWidth(280)
self.label.setText(" pcode:\n")
self.label.setAlignment(Qt.AlignTop)
#设置表格属性:
self.tableWidget.setObjectName("Variable Watches")
self.tableWidget.setColumnCount(6)
for i in range(6):
self.tableWidget.setColumnWidth(i,60)
self.infoTabs.tabBar().setAutoFillBackground(True)
#添加表头:
list = ['idx','name','value','level','addr','size']
for i in range(6):
item = QTableWidgetItem(list[i])
item.setBackground(QColor(13,13,13))
self.tableWidget.setHorizontalHeaderItem(i, item)
self.tableWidget.horizontalHeader().setStyleSheet("QHeaderView::section{background-color: #282923;}")
self.tableWidget.horizontalHeader().setDefaultAlignment(QtCore.Qt.AlignCenter)
self.commitButton.clicked.connect(self.onclick)
self.show()
def setBuildEnabled(self, ok):
self.actionBuildAndRun.setEnabled(ok)
self.actionDebug.setEnabled(ok)
def startBuild(self):
'''
Preparation for build&run or debug a processing
'''
# clear output label and table contents
self.label.setText("")
self.outputLabel.setText("")
self.tableWidget.clear()
self.tableWidget.setRowCount(0);
#添加表头:
list = ['idx','name','value','level','addr','size']
for i in range(6):
item = QTableWidgetItem(list[i])
item.setBackground(QColor(13,13,13))
self.tableWidget.setHorizontalHeaderItem(i, item)
self.tableWidget.horizontalHeader().setStyleSheet("QHeaderView::section{background-color: #282923;}")
self.tableWidget.horizontalHeader().setDefaultAlignment(QtCore.Qt.AlignCenter)
text = self.codeTextEdit.toPlainText()
text.encode('utf-8')
if text == "":
text = u" "
# If the current working codefile is existed, use it directly
curfile = self.filepath+'\\'
if self.filetag == True:
| his function is used to initialize the errMsgTable
'''
self.errorMsgTable.clear()
self.errorMsgTable.setColumnCount(3)
self.errorMsgTable.setRowCount(1)
self.errorMsgTable.setHorizontalHeaderLabels(['errno', 'line', 'message'])
self.errorMsgTable.verticalHeader().setVisible(False)
self.errorMsgTable.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.errorMsgTable.setSelectionBehavior(QAbstractItemView.SelectRows)
self.errorMsgTable.setColumnWidth(0, 70)
self.errorMsgTable.setColumnWidth(2, 595)
for idx in range(self.errorMsgTable.columnCount()):
headItem = self.errorMsgTable.horizontalHeaderItem(idx)
headItem.setForeground(QColor(0, 0, 0))
def f | identifier_body |
x0CompilerUI.py | if isfile(".\\~.tmp"):
os.remove(".\\~.tmp")
if isfile(os.getcwd()+"\\ferr.json"):
os.remove(os.getcwd()+"\\ferr.json")
if isfile(os.getcwd()+"\\fcode.json"):
os.remove(os.getcwd()+"\\fcode.json")
'''
This function is the real processing for backstage-interpretation
and it should work in a new thread so that I/O cannot block the UI
'''
def procRun(codeList, window):
window.interpret = Interpret(codeList)
run = window.interpret
mod = window.mod
window.setStackValues(run.showStack())
while True:
# tip the current processing TAC code
window.setCodeStatus(run.p, True)
tag = run.sg_step()
window.setStackValues(run.showStack())
if tag == 1: #input
window.input()
if tag == 2: #output
window.output(run.recv())
if window.stopsgl == 1:
window.setCodeStatus(run.c, False)
break
if run.judge() == False:
break
if mod == 1:
while window.debug == 0:
time.sleep(0.05)
window.setCodeStatus(run.c, False)
if window.debug == 1: # next step
pass
if window.debug == 2: # step into
pass
if window.debug == 3: # over step
mod = 0
window.setDebugEnabled(False)
if window.debug == 4: # step out
run.paramInit()
window.RuntimePad.clear()
window.RuntimePad.textCursor().insertText("")
window.debug = 0
continue
window.debug = 0
# cancel the tip for TAC code just processing
window.setCodeStatus(run.c, False)
window.setDebugEnabled(False)
window.actionStop.setEnabled(False)
window.output("\n=== the processing is over ===")
'''
extending the QSyntaxHighlighter class for highlight the x0 texts
'''
class x0Highlighter(QSyntaxHighlighter):
Rules = []
Formats = {}
def __init__(self, parent=None):
super(x0Highlighter, self).__init__(parent)
self.initializeFormats()
BUILDINS = ["and", "not", "int", "char", "bool", "true", "false"]
OPERATORS = ["\+", "-", "\*", "/", "%", "&", "\|", "~", "\^", "\!",
"<", ">", "=", "\.","+="]
KEYWORDS = ["read", "if", "else",
"for", "do", "while", "repeat", "until",
"write", "return", "break", "continue",
"main", "switch", "case"]
FUNCTIONS = ["procedure", "call"]
CONSTANTS = ["False", "True"]
x0Highlighter.Rules.append((QRegExp(
"|".join([r"\b%s\b" % keyword for keyword in KEYWORDS])),
"keyword"))
x0Highlighter.Rules.append((QRegExp(
"|".join([r"\b%s\b" % buildin for buildin in BUILDINS])),
"buildin"))
x0Highlighter.Rules.append((QRegExp(
"|".join([r"%s" % operator for operator in OPERATORS])),
"operator"))
x0Highlighter.Rules.append((QRegExp(
"|".join([r"%s" % function for function in FUNCTIONS])),
"function"))
x0Highlighter.Rules.append((QRegExp(
r"\b[+-]?[0-9]+[lL]?\b"),
"number"))
x0Highlighter.Rules.append((QRegExp(
r"(/\*(.|\n)*\*/)|(\/\/.*/n)"),
"comment"))
x0Highlighter.Rules.append((QRegExp(
r"\".*\"|'.*'"),
"string"))
x0Highlighter.Rules.append((QRegExp(
r"procedure.*)\("),
"funcName"))
@staticmethod
def initializeFormats():
baseFormat = QTextCharFormat()
baseFormat.setFontFamily("Consolas")
baseFormat.setFontPointSize(12)
for name, fcolor, bcolor in (
("operator", QColor(103,166,228), None),
("keyword", QColor(249,35,112), None),
("buildin", QColor(104,216,235), None),
("normal", Qt.white, None),
("string", QColor(231,219,116), None),
("function", QColor(245,150,32), None),
("funcName", QColor(166,226,44), None),
("number", QColor(167,128,255), None),
("comment", QColor(90,88,85), None)):
format = QTextCharFormat(baseFormat)
format.setForeground(QColor(fcolor))
if bcolor is not None:
format.setBackground(QColor(bcolor))
if name in ("buildin"):
format.setFontWeight(QFont.Bold)
if name == "comment":
format.setFontItalic(True)
x0Highlighter.Formats[name] = format
def highlightBlock(self, text):
NORMAL, TRIPLESINGLE, TRIPLEDOUBLE = range(3)
textLength = len(text)
prevState = self.previousBlockState()
self.setFormat(0, textLength, x0Highlighter.Formats["normal"])
for regex, format in x0Highlighter.Rules:#素数测试
i = regex.indexIn(text)
while i >= 0:
length = regex.matchedLength()
self.setFormat(i, length, x0Highlighter.Formats[format])
i = regex.indexIn(text, i + length)
if not text:
pass
else:
stack, pre = [], None
for i, c in enumerate(text):
if c == "/" and pre == c:
self.setFormat(i-1, len(text)-i+1, x0Highlighter.Formats["comment"])
break
pre = c
self.setCurrentBlockState(NORMAL)
def rehighlight(self):
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
QSyntaxHighlighter.rehighlight(self)
QApplication.restoreOverrideCursor()
class x0Compiler(QMainWindow, UI_MainWindow):
inputWrite = False
def __init__(self):
QMainWindow.__init__(self)
UI_MainWindow.__init__(self)
self.setupUi(self)
self.runDlg = None
self.highlighter = x0Highlighter(self.codeTextEdit.document())
self.initUI()
def onclick(self):
self.inputWrite = True
text = self.inputEdit.toPlainText()
f = open("input.txt","w")
f.write(text)
f.close()
self.inputEdit.clear()
print("click")
def errTbInit(self):
'''
This function is used to initialize the errMsgTable
'''
self.errorMsgTable.clear()
self.errorMsgTable.setColumnCount(3)
self.errorMsgTable.setRowCount(1)
self.errorMsgTable.setHorizontalHeaderLabels(['errno', 'line', 'message'])
self.errorMsgTable.verticalHeader().setVisible(False)
self.errorMsgTable.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.errorMsgTable.setSelectionBehavior(QAbstractItemView.SelectRows)
self.errorMsgTable.setColumnWidth(0, 70)
self.errorMsgTable.setColumnWidth(2, 595)
for idx in range(self.errorMsgTable.columnCount()):
headItem = self.errorMsgTable.horizontalHeaderItem(idx)
headItem.setForeground(QColor(0, 0, 0))
def fileInit(self):
self.filetag = False
self.filepath = os.getcwd()
self.filename = ""
self.workPathLabel.setText("")
cleanfiles()
def initUI(self):
self.fileInit()
self.errTbInit()
#self.scroll = QScrollArea()
#self.scroll.setWidgrt(self.)
self.actionNew.triggered.connect(self.newFile)
self.actionOpen.triggered.connect(self.openFile)
self.actionSave.triggered.connect(self.saveFile)
self.actionBuildAndRun.triggered.connect(self.BuildAndRun)
self.actionDebug.triggered.connect(self.DebugMod)
self.linelabel.setText("")
lines=""
for num in range(1,23):
lines=lines+str(num)
if num < 22:
lines=lines+'\n'
num=num+1
self.linelabel.setText(lines)
self.linelabel.setFixedWidth(30)
font = QFont("Consolas",11,QFont.Normal)
QFont.setLetterSpacing(font,QFont.AbsoluteSpacing,0.5)
self.linelabel.setFont(font)
self.outputLabel.setFont(font)
self.tableWidget.setFont(font)
self.label.setFont(font)
self.codeTextEdit.setFont(font)
self.label.setFixedWidth(280)
self.label.setText(" pcode:\ | def cleanfiles():
from os.path import isfile | random_line_split |
|
x0CompilerUI.py | texts
'''
class x0Highlighter(QSyntaxHighlighter):
Rules = []
Formats = {}
def __init__(self, parent=None):
super(x0Highlighter, self).__init__(parent)
self.initializeFormats()
BUILDINS = ["and", "not", "int", "char", "bool", "true", "false"]
OPERATORS = ["\+", "-", "\*", "/", "%", "&", "\|", "~", "\^", "\!",
"<", ">", "=", "\.","+="]
KEYWORDS = ["read", "if", "else",
"for", "do", "while", "repeat", "until",
"write", "return", "break", "continue",
"main", "switch", "case"]
FUNCTIONS = ["procedure", "call"]
CONSTANTS = ["False", "True"]
x0Highlighter.Rules.append((QRegExp(
"|".join([r"\b%s\b" % keyword for keyword in KEYWORDS])),
"keyword"))
x0Highlighter.Rules.append((QRegExp(
"|".join([r"\b%s\b" % buildin for buildin in BUILDINS])),
"buildin"))
x0Highlighter.Rules.append((QRegExp(
"|".join([r"%s" % operator for operator in OPERATORS])),
"operator"))
x0Highlighter.Rules.append((QRegExp(
"|".join([r"%s" % function for function in FUNCTIONS])),
"function"))
x0Highlighter.Rules.append((QRegExp(
r"\b[+-]?[0-9]+[lL]?\b"),
"number"))
x0Highlighter.Rules.append((QRegExp(
r"(/\*(.|\n)*\*/)|(\/\/.*/n)"),
"comment"))
x0Highlighter.Rules.append((QRegExp(
r"\".*\"|'.*'"),
"string"))
x0Highlighter.Rules.append((QRegExp(
r"procedure.*)\("),
"funcName"))
@staticmethod
def initializeFormats():
baseFormat = QTextCharFormat()
baseFormat.setFontFamily("Consolas")
baseFormat.setFontPointSize(12)
for name, fcolor, bcolor in (
("operator", QColor(103,166,228), None),
("keyword", QColor(249,35,112), None),
("buildin", QColor(104,216,235), None),
("normal", Qt.white, None),
("string", QColor(231,219,116), None),
("function", QColor(245,150,32), None),
("funcName", QColor(166,226,44), None),
("number", QColor(167,128,255), None),
("comment", QColor(90,88,85), None)):
format = QTextCharFormat(baseFormat)
format.setForeground(QColor(fcolor))
if bcolor is not None:
format.setBackground(QColor(bcolor))
if name in ("buildin"):
format.setFontWeight(QFont.Bold)
if name == "comment":
format.setFontItalic(True)
x0Highlighter.Formats[name] = format
def highlightBlock(self, text):
NORMAL, TRIPLESINGLE, TRIPLEDOUBLE = range(3)
textLength = len(text)
prevState = self.previousBlockState()
self.setFormat(0, textLength, x0Highlighter.Formats["normal"])
for regex, format in x0Highlighter.Rules:#素数测试
i = regex.indexIn(text)
while i >= 0:
length = regex.matchedLength()
self.setFormat(i, length, x0Highlighter.Formats[format])
i = regex.indexIn(text, i + length)
if not text:
pass
else:
stack, pre = [], None
for i, c in enumerate(text):
if c == "/" and pre == c:
self.setFormat(i-1, len(text)-i+1, x0Highlighter.Formats["comment"])
break
pre = c
self.setCurrentBlockState(NORMAL)
def rehighlight(self):
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
QSyntaxHighlighter.rehighlight(self)
QApplication.restoreOverrideCursor()
class x0Compiler(QMainWindow, UI_MainWindow):
inputWrite = False
def __init__(self):
QMainWindow.__init__(self)
UI_MainWindow.__init__(self)
self.setupUi(self)
self.runDlg = None
self.highlighter = x0Highlighter(self.codeTextEdit.document())
self.initUI()
def onclick(self):
self.inputWrite = True
text = self.inputEdit.toPlainText()
f = open("input.txt","w")
f.write(text)
f.close()
self.inputEdit.clear()
print("click")
def errTbInit(self):
'''
This function is used to initialize the errMsgTable
'''
self.errorMsgTable.clear()
self.errorMsgTable.setColumnCount(3)
self.errorMsgTable.setRowCount(1)
self.errorMsgTable.setHorizontalHeaderLabels(['errno', 'line', 'message'])
self.errorMsgTable.verticalHeader().setVisible(False)
self.errorMsgTable.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.errorMsgTable.setSelectionBehavior(QAbstractItemView.SelectRows)
self.errorMsgTable.setColumnWidth(0, 70)
self.errorMsgTable.setColumnWidth(2, 595)
for idx in range(self.errorMsgTable.columnCount()):
headItem = self.errorMsgTable.horizontalHeaderItem(idx)
headItem.setForeground(QColor(0, 0, 0))
def fileInit(self):
self.filetag = False
self.filepath = os.getcwd()
self.filename = ""
self.workPathLabel.setText("")
cleanfiles()
def initUI(self):
self.fileInit()
self.errTbInit()
#self.scroll = QScrollArea()
#self.scroll.setWidgrt(self.)
self.actionNew.triggered.connect(self.newFile)
self.actionOpen.triggered.connect(self.openFile)
self.actionSave.triggered.connect(self.saveFile)
self.actionBuildAndRun.triggered.connect(self.BuildAndRun)
self.actionDebug.triggered.connect(self.DebugMod)
self.linelabel.setText("")
lines=""
for num in range(1,23):
lines=lines+str(num)
if num < 22:
lines=lines+'\n'
num=num+1
self.linelabel.setText(lines)
self.linelabel.setFixedWidth(30)
font = QFont("Consolas",11,QFont.Normal)
QFont.setLetterSpacing(font,QFont.AbsoluteSpacing,0.5)
self.linelabel.setFont(font)
self.outputLabel.setFont(font)
self.tableWidget.setFont(font)
self.label.setFont(font)
self.codeTextEdit.setFont(font)
self.label.setFixedWidth(280)
self.label.setText(" pcode:\n")
self.label.setAlignment(Qt.AlignTop)
#设置表格属性:
self.tableWidget.setObjectName("Variable Watches")
self.tableWidget.setColumnCount(6)
for i in range(6):
self.tableWidget.setColumnWidth(i,60)
self.infoTabs.tabBar().setAutoFillBackground(True)
#添加表头:
list = ['idx','name','value','level','addr','size']
for i in range(6):
item = QTableWidgetItem(list[i])
item.setBackground(QColor(13,13,13))
self.tableWidget.setHorizontalHeaderItem(i, item)
self.tableWidget.horizontalHeader().setStyleSheet("QHeaderView::section{background-color: #282923;}")
self.tableWidget.horizontalHeader().setDefaultAlignment(QtCore.Qt.AlignCenter)
self.commitButton.clicked.connect(self.onclick)
self.show()
def setBuildEnabled(self, ok):
self.actionBuildAndRun.setEnabled(ok)
self.actionDebug.setEnabled(ok)
def startBuild(self):
'''
Prepa | build&run or debug a processing
'''
# clear output label and table contents
self.label.setText("")
self.outputLabel.setText("")
self.tableWidget.clear()
self.tableWidget.setRowCount(0);
#添加表头:
list = ['idx','name','value','level','addr','size']
for i in range(6):
item = QTableWidgetItem(list[i])
item.setBackground(QColor(13,13,13))
self.tableWidget.setHorizontalHeaderItem(i, item)
self.tableWidget.horizontalHeader().setStyleSheet("QHeaderView::section{background-color: #282923;}")
self.tableWidget.horizontalHeader().setDefaultAlignment(QtCore.Qt.AlignCenter)
text = self.codeTextEdit.toPlainText()
text.encode('utf-8')
if text == "":
text = u" "
# If the current working codefile is existed, use it directly
curfile = self.filepath+'\\'
if self.filetag == True:
| ration for | identifier_name |
form_input.rs | styles::{Palette, Size};
///
/// pub struct FormInputExample {
/// pub link: ComponentLink<Self>,
/// pub value: String,
/// }
///
/// pub enum Msg {
/// Input(String),
/// }
///
/// impl Component for FormInputExample {
/// type Message = Msg; | /// type Properties = ();
/// fn create(_: Self::Properties, link: ComponentLink<Self>) -> Self {
/// FormInputExample {
/// link,
/// value: "".to_string(),
/// }
/// }
/// fn update(&mut self, msg: Self::Message) -> ShouldRender {
/// match msg {
/// Msg::Input(value) => {
/// self.value = value;
/// }
/// }
/// true
/// }
/// fn change(&mut self, _props: Self::Properties) -> ShouldRender {
/// false
/// }
///
/// fn view(&self) -> Html {
/// html!{
/// <FormInput
/// input_type=InputType::Text
/// input_palette=Palette::Standard
/// input_size=Size::Medium
/// id="form-input-example"
/// oninput_signal = form_page.link.callback(|e: InputData| Msg::Input(e.value))
/// placeholder="example"
/// underline=false
/// />
/// }
/// }
/// ```
pub struct FormInput {
link: ComponentLink<Self>,
props: Props,
}
/// Different type inputs supported. You can find more information [here](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input)
#[derive(Clone, PartialEq)]
pub enum InputType {
Button,
Checkbox,
Color,
Date,
Datetime,
DatetimeLocal,
Email,
Hidden,
Image,
Month,
Number,
Password,
Radio,
Range,
Reset,
Search,
Tel,
Text,
Time,
Url,
Week,
}
#[derive(Clone, PartialEq, Properties)]
pub struct Props {
/// The input type. Default `InputType::Text`
#[prop_or(InputType::Text)]
pub input_type: InputType,
/// The input style according with the purpose. Default `Palette::Standard`
#[prop_or(Palette::Standard)]
pub input_palette: Palette,
/// The size of the input. Default `Size::Medium`
#[prop_or(Size::Medium)]
pub input_size: Size,
/// Signal to emit the event input
#[prop_or(Callback::noop())]
pub oninput_signal: Callback<InputData>,
/// Signal to emit the event blur
#[prop_or(Callback::noop())]
pub onblur_signal: Callback<FocusEvent>,
/// Signal to emit the event keypress
#[prop_or(Callback::noop())]
pub onkeydown_signal: Callback<KeyboardEvent>,
/// Content to be appear in the form control when the form control is empty
#[prop_or_default]
pub placeholder: String,
/// Whether the command or control is checked
#[prop_or_default]
pub checked: bool,
/// General property to get the ref of the component
#[prop_or_default]
pub code_ref: NodeRef,
/// General property to add keys
#[prop_or_default]
pub key: String,
/// General property to add custom class styles
#[prop_or_default]
pub class_name: String,
/// General property to add custom id
#[prop_or_default]
pub id: String,
/// The name of the input
#[prop_or_default]
pub name: String,
/// Alt attribute for the image type
#[prop_or_default]
pub alt: String,
/// Automatically focus the form control when the page is loaded. Default `false`
#[prop_or(false)]
pub autofocus: bool,
/// Hint for form autofill feature. . Default `false`
#[prop_or(false)]
pub autocomplete: bool,
/// Value of the id attribute of the "datalist" of autocomplete options
#[prop_or_default]
pub list: String,
/// Minimum value
#[prop_or_default]
pub min: u16,
/// Maximum value
#[prop_or_default]
pub max: u16,
/// Minimum length (number of characters) of value
#[prop_or_default]
pub minlength: u16,
/// Maximum length (number of characters) of value. Default `1000`
#[prop_or(1000)]
pub maxlength: u16,
/// Pattern the value must match to be valid. Default `"[\\s\\S]*".to_string()`
#[prop_or("[\\s\\S]*".to_string())]
pub pattern: String,
/// The value is not editable. Default `false`
#[prop_or(false)]
pub readonly: bool,
/// A value is required or must be check for the form to be submittable. Default `false`
#[prop_or(false)]
pub required: bool,
/// Whether the form control is disabled. Default `false`
#[prop_or(false)]
pub disabled: bool,
/// Underline style instead of box, like Material. Default `false`
#[prop_or(false)]
pub underline: bool,
/// Incremental values that are valid
#[prop_or_default]
pub step: i16,
/// Error state for validation. Default `false`
#[prop_or(false)]
pub error_state: bool,
/// Show error message when error_state is true
#[prop_or_default]
pub error_message: String,
}
#[derive(Debug)]
pub enum Msg {
Input(InputData),
Blur(FocusEvent),
KeyPressed(KeyboardEvent),
}
impl Component for FormInput {
type Message = Msg;
type Properties = Props;
fn create(props: Self::Properties, link: ComponentLink<Self>) -> Self {
Self { link, props }
}
fn update(&mut self, msg: Self::Message) -> ShouldRender {
match msg {
Msg::Input(input_data) => {
self.props.oninput_signal.emit(input_data);
}
Msg::Blur(focus_event) => {
self.props.onblur_signal.emit(focus_event);
}
Msg::KeyPressed(keyboard_event) => {
self.props.onkeydown_signal.emit(keyboard_event);
}
};
true
}
fn change(&mut self, props: Self::Properties) -> ShouldRender {
if self.props != props {
self.props = props;
true
} else {
false
}
}
fn view(&self) -> Html {
html! {
<>
<input
id=self.props.id.clone()
class=classes!(
"form-input",
get_palette(self.props.input_palette.clone()),
get_size(self.props.input_size.clone()),
if self.props.underline { "underline" } else { "" },
self.props.class_name.clone(),
)
key=self.props.key.clone()
ref=self.props.code_ref.clone()
type=get_type(self.props.input_type.clone())
oninput=self.link.callback(Msg::Input)
checked=self.props.checked
onblur=self.link.callback(Msg::Blur)
onkeydown=self.link.callback(Msg::KeyPressed)
name=self.props.name.clone()
required=self.props.required
readonly=self.props.readonly
disabled=self.props.disabled
placeholder=self.props.placeholder.clone()
pattern=self.props.pattern.clone()
min=self.props.min.to_string()
minlength=self.props.minlength.to_string()
max=self.props.max.to_string()
maxlength=self.props.maxlength.to_string()
alt=self.props.alt.clone()
autofocus=self.props.autofocus
autocomplete=self.props.autocomplete.to_string()
step=self.props.step.to_string()
list=self.props.list.clone()
/>
{get_error_message(self.props.error_state, self.props.error_message.clone())}
</>
}
}
}
fn get_type(input_type: InputType) -> String {
match input_type {
InputType::Button => "button".to_string(),
InputType::Checkbox => "checkbox".to_string(),
InputType::Color => "color".to_string(),
InputType::Date => "date".to_string(),
InputType::Datetime => "datetime".to_string(),
InputType::DatetimeLocal => "datetime-local".to_string(),
InputType::Email => "email".to_string(),
InputType::Hidden => "hidden".to_string(),
InputType::Image => "image".to_string(),
InputType::Month => "month".to_string(),
InputType::Number => "number".to_string(),
InputType::Password => "password".to_string(),
InputType::Radio => "radio".to_string(),
InputType::Range => "range".to_string(),
InputType::Reset => "reset".to_string(),
InputType::Search => "search".to_string(),
InputType::Tel => "tel".to_string(),
InputType::Text => "text".to_string(),
InputType::Time => "time".to_string(),
InputType::Url => "url".to_string(),
InputType::Week => "week".to_string(),
}
}
#[wasm_bindgen_test]
fn should_create_form_input() {
let props = Props {
key: "".to_string(),
code_ref: NodeRef::default(),
id: "form-input-id-test".to_string(),
class_name: "form-input-class-test".to_string(),
input_type: InputType::Text,
oninput_signal: Callback::noop(),
onblur | random_line_split |
|
form_input.rs | ::{Palette, Size};
///
/// pub struct FormInputExample {
/// pub link: ComponentLink<Self>,
/// pub value: String,
/// }
///
/// pub enum Msg {
/// Input(String),
/// }
///
/// impl Component for FormInputExample {
/// type Message = Msg;
/// type Properties = ();
/// fn create(_: Self::Properties, link: ComponentLink<Self>) -> Self {
/// FormInputExample {
/// link,
/// value: "".to_string(),
/// }
/// }
/// fn update(&mut self, msg: Self::Message) -> ShouldRender {
/// match msg {
/// Msg::Input(value) => {
/// self.value = value;
/// }
/// }
/// true
/// }
/// fn change(&mut self, _props: Self::Properties) -> ShouldRender {
/// false
/// }
///
/// fn view(&self) -> Html {
/// html!{
/// <FormInput
/// input_type=InputType::Text
/// input_palette=Palette::Standard
/// input_size=Size::Medium
/// id="form-input-example"
/// oninput_signal = form_page.link.callback(|e: InputData| Msg::Input(e.value))
/// placeholder="example"
/// underline=false
/// />
/// }
/// }
/// ```
pub struct FormInput {
link: ComponentLink<Self>,
props: Props,
}
/// Different type inputs supported. You can find more information [here](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input)
#[derive(Clone, PartialEq)]
pub enum InputType {
Button,
Checkbox,
Color,
Date,
Datetime,
DatetimeLocal,
Email,
Hidden,
Image,
Month,
Number,
Password,
Radio,
Range,
Reset,
Search,
Tel,
Text,
Time,
Url,
Week,
}
#[derive(Clone, PartialEq, Properties)]
pub struct Props {
/// The input type. Default `InputType::Text`
#[prop_or(InputType::Text)]
pub input_type: InputType,
/// The input style according with the purpose. Default `Palette::Standard`
#[prop_or(Palette::Standard)]
pub input_palette: Palette,
/// The size of the input. Default `Size::Medium`
#[prop_or(Size::Medium)]
pub input_size: Size,
/// Signal to emit the event input
#[prop_or(Callback::noop())]
pub oninput_signal: Callback<InputData>,
/// Signal to emit the event blur
#[prop_or(Callback::noop())]
pub onblur_signal: Callback<FocusEvent>,
/// Signal to emit the event keypress
#[prop_or(Callback::noop())]
pub onkeydown_signal: Callback<KeyboardEvent>,
/// Content to be appear in the form control when the form control is empty
#[prop_or_default]
pub placeholder: String,
/// Whether the command or control is checked
#[prop_or_default]
pub checked: bool,
/// General property to get the ref of the component
#[prop_or_default]
pub code_ref: NodeRef,
/// General property to add keys
#[prop_or_default]
pub key: String,
/// General property to add custom class styles
#[prop_or_default]
pub class_name: String,
/// General property to add custom id
#[prop_or_default]
pub id: String,
/// The name of the input
#[prop_or_default]
pub name: String,
/// Alt attribute for the image type
#[prop_or_default]
pub alt: String,
/// Automatically focus the form control when the page is loaded. Default `false`
#[prop_or(false)]
pub autofocus: bool,
/// Hint for form autofill feature. . Default `false`
#[prop_or(false)]
pub autocomplete: bool,
/// Value of the id attribute of the "datalist" of autocomplete options
#[prop_or_default]
pub list: String,
/// Minimum value
#[prop_or_default]
pub min: u16,
/// Maximum value
#[prop_or_default]
pub max: u16,
/// Minimum length (number of characters) of value
#[prop_or_default]
pub minlength: u16,
/// Maximum length (number of characters) of value. Default `1000`
#[prop_or(1000)]
pub maxlength: u16,
/// Pattern the value must match to be valid. Default `"[\\s\\S]*".to_string()`
#[prop_or("[\\s\\S]*".to_string())]
pub pattern: String,
/// The value is not editable. Default `false`
#[prop_or(false)]
pub readonly: bool,
/// A value is required or must be check for the form to be submittable. Default `false`
#[prop_or(false)]
pub required: bool,
/// Whether the form control is disabled. Default `false`
#[prop_or(false)]
pub disabled: bool,
/// Underline style instead of box, like Material. Default `false`
#[prop_or(false)]
pub underline: bool,
/// Incremental values that are valid
#[prop_or_default]
pub step: i16,
/// Error state for validation. Default `false`
#[prop_or(false)]
pub error_state: bool,
/// Show error message when error_state is true
#[prop_or_default]
pub error_message: String,
}
#[derive(Debug)]
pub enum Msg {
Input(InputData),
Blur(FocusEvent),
KeyPressed(KeyboardEvent),
}
impl Component for FormInput {
type Message = Msg;
type Properties = Props;
fn create(props: Self::Properties, link: ComponentLink<Self>) -> Self {
Self { link, props }
}
fn update(&mut self, msg: Self::Message) -> ShouldRender {
match msg {
Msg::Input(input_data) => {
self.props.oninput_signal.emit(input_data);
}
Msg::Blur(focus_event) => {
self.props.onblur_signal.emit(focus_event);
}
Msg::KeyPressed(keyboard_event) => {
self.props.onkeydown_signal.emit(keyboard_event);
}
};
true
}
fn | (&mut self, props: Self::Properties) -> ShouldRender {
if self.props != props {
self.props = props;
true
} else {
false
}
}
fn view(&self) -> Html {
html! {
<>
<input
id=self.props.id.clone()
class=classes!(
"form-input",
get_palette(self.props.input_palette.clone()),
get_size(self.props.input_size.clone()),
if self.props.underline { "underline" } else { "" },
self.props.class_name.clone(),
)
key=self.props.key.clone()
ref=self.props.code_ref.clone()
type=get_type(self.props.input_type.clone())
oninput=self.link.callback(Msg::Input)
checked=self.props.checked
onblur=self.link.callback(Msg::Blur)
onkeydown=self.link.callback(Msg::KeyPressed)
name=self.props.name.clone()
required=self.props.required
readonly=self.props.readonly
disabled=self.props.disabled
placeholder=self.props.placeholder.clone()
pattern=self.props.pattern.clone()
min=self.props.min.to_string()
minlength=self.props.minlength.to_string()
max=self.props.max.to_string()
maxlength=self.props.maxlength.to_string()
alt=self.props.alt.clone()
autofocus=self.props.autofocus
autocomplete=self.props.autocomplete.to_string()
step=self.props.step.to_string()
list=self.props.list.clone()
/>
{get_error_message(self.props.error_state, self.props.error_message.clone())}
</>
}
}
}
fn get_type(input_type: InputType) -> String {
match input_type {
InputType::Button => "button".to_string(),
InputType::Checkbox => "checkbox".to_string(),
InputType::Color => "color".to_string(),
InputType::Date => "date".to_string(),
InputType::Datetime => "datetime".to_string(),
InputType::DatetimeLocal => "datetime-local".to_string(),
InputType::Email => "email".to_string(),
InputType::Hidden => "hidden".to_string(),
InputType::Image => "image".to_string(),
InputType::Month => "month".to_string(),
InputType::Number => "number".to_string(),
InputType::Password => "password".to_string(),
InputType::Radio => "radio".to_string(),
InputType::Range => "range".to_string(),
InputType::Reset => "reset".to_string(),
InputType::Search => "search".to_string(),
InputType::Tel => "tel".to_string(),
InputType::Text => "text".to_string(),
InputType::Time => "time".to_string(),
InputType::Url => "url".to_string(),
InputType::Week => "week".to_string(),
}
}
#[wasm_bindgen_test]
fn should_create_form_input() {
let props = Props {
key: "".to_string(),
code_ref: NodeRef::default(),
id: "form-input-id-test".to_string(),
class_name: "form-input-class-test".to_string(),
input_type: InputType::Text,
oninput_signal: Callback::noop(),
onblur | change | identifier_name |
form_input.rs | ::{Palette, Size};
///
/// pub struct FormInputExample {
/// pub link: ComponentLink<Self>,
/// pub value: String,
/// }
///
/// pub enum Msg {
/// Input(String),
/// }
///
/// impl Component for FormInputExample {
/// type Message = Msg;
/// type Properties = ();
/// fn create(_: Self::Properties, link: ComponentLink<Self>) -> Self {
/// FormInputExample {
/// link,
/// value: "".to_string(),
/// }
/// }
/// fn update(&mut self, msg: Self::Message) -> ShouldRender {
/// match msg {
/// Msg::Input(value) => {
/// self.value = value;
/// }
/// }
/// true
/// }
/// fn change(&mut self, _props: Self::Properties) -> ShouldRender {
/// false
/// }
///
/// fn view(&self) -> Html {
/// html!{
/// <FormInput
/// input_type=InputType::Text
/// input_palette=Palette::Standard
/// input_size=Size::Medium
/// id="form-input-example"
/// oninput_signal = form_page.link.callback(|e: InputData| Msg::Input(e.value))
/// placeholder="example"
/// underline=false
/// />
/// }
/// }
/// ```
pub struct FormInput {
link: ComponentLink<Self>,
props: Props,
}
/// Different type inputs supported. You can find more information [here](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input)
#[derive(Clone, PartialEq)]
pub enum InputType {
Button,
Checkbox,
Color,
Date,
Datetime,
DatetimeLocal,
Email,
Hidden,
Image,
Month,
Number,
Password,
Radio,
Range,
Reset,
Search,
Tel,
Text,
Time,
Url,
Week,
}
#[derive(Clone, PartialEq, Properties)]
pub struct Props {
/// The input type. Default `InputType::Text`
#[prop_or(InputType::Text)]
pub input_type: InputType,
/// The input style according with the purpose. Default `Palette::Standard`
#[prop_or(Palette::Standard)]
pub input_palette: Palette,
/// The size of the input. Default `Size::Medium`
#[prop_or(Size::Medium)]
pub input_size: Size,
/// Signal to emit the event input
#[prop_or(Callback::noop())]
pub oninput_signal: Callback<InputData>,
/// Signal to emit the event blur
#[prop_or(Callback::noop())]
pub onblur_signal: Callback<FocusEvent>,
/// Signal to emit the event keypress
#[prop_or(Callback::noop())]
pub onkeydown_signal: Callback<KeyboardEvent>,
/// Content to be appear in the form control when the form control is empty
#[prop_or_default]
pub placeholder: String,
/// Whether the command or control is checked
#[prop_or_default]
pub checked: bool,
/// General property to get the ref of the component
#[prop_or_default]
pub code_ref: NodeRef,
/// General property to add keys
#[prop_or_default]
pub key: String,
/// General property to add custom class styles
#[prop_or_default]
pub class_name: String,
/// General property to add custom id
#[prop_or_default]
pub id: String,
/// The name of the input
#[prop_or_default]
pub name: String,
/// Alt attribute for the image type
#[prop_or_default]
pub alt: String,
/// Automatically focus the form control when the page is loaded. Default `false`
#[prop_or(false)]
pub autofocus: bool,
/// Hint for form autofill feature. . Default `false`
#[prop_or(false)]
pub autocomplete: bool,
/// Value of the id attribute of the "datalist" of autocomplete options
#[prop_or_default]
pub list: String,
/// Minimum value
#[prop_or_default]
pub min: u16,
/// Maximum value
#[prop_or_default]
pub max: u16,
/// Minimum length (number of characters) of value
#[prop_or_default]
pub minlength: u16,
/// Maximum length (number of characters) of value. Default `1000`
#[prop_or(1000)]
pub maxlength: u16,
/// Pattern the value must match to be valid. Default `"[\\s\\S]*".to_string()`
#[prop_or("[\\s\\S]*".to_string())]
pub pattern: String,
/// The value is not editable. Default `false`
#[prop_or(false)]
pub readonly: bool,
/// A value is required or must be check for the form to be submittable. Default `false`
#[prop_or(false)]
pub required: bool,
/// Whether the form control is disabled. Default `false`
#[prop_or(false)]
pub disabled: bool,
/// Underline style instead of box, like Material. Default `false`
#[prop_or(false)]
pub underline: bool,
/// Incremental values that are valid
#[prop_or_default]
pub step: i16,
/// Error state for validation. Default `false`
#[prop_or(false)]
pub error_state: bool,
/// Show error message when error_state is true
#[prop_or_default]
pub error_message: String,
}
#[derive(Debug)]
pub enum Msg {
Input(InputData),
Blur(FocusEvent),
KeyPressed(KeyboardEvent),
}
impl Component for FormInput {
type Message = Msg;
type Properties = Props;
fn create(props: Self::Properties, link: ComponentLink<Self>) -> Self {
Self { link, props }
}
fn update(&mut self, msg: Self::Message) -> ShouldRender {
match msg {
Msg::Input(input_data) => {
self.props.oninput_signal.emit(input_data);
}
Msg::Blur(focus_event) => {
self.props.onblur_signal.emit(focus_event);
}
Msg::KeyPressed(keyboard_event) => {
self.props.onkeydown_signal.emit(keyboard_event);
}
};
true
}
fn change(&mut self, props: Self::Properties) -> ShouldRender {
if self.props != props {
self.props = props;
true
} else {
false
}
}
fn view(&self) -> Html {
html! {
<>
<input
id=self.props.id.clone()
class=classes!(
"form-input",
get_palette(self.props.input_palette.clone()),
get_size(self.props.input_size.clone()),
if self.props.underline { "underline" } else { "" },
self.props.class_name.clone(),
)
key=self.props.key.clone()
ref=self.props.code_ref.clone()
type=get_type(self.props.input_type.clone())
oninput=self.link.callback(Msg::Input)
checked=self.props.checked
onblur=self.link.callback(Msg::Blur)
onkeydown=self.link.callback(Msg::KeyPressed)
name=self.props.name.clone()
required=self.props.required
readonly=self.props.readonly
disabled=self.props.disabled
placeholder=self.props.placeholder.clone()
pattern=self.props.pattern.clone()
min=self.props.min.to_string()
minlength=self.props.minlength.to_string()
max=self.props.max.to_string()
maxlength=self.props.maxlength.to_string()
alt=self.props.alt.clone()
autofocus=self.props.autofocus
autocomplete=self.props.autocomplete.to_string()
step=self.props.step.to_string()
list=self.props.list.clone()
/>
{get_error_message(self.props.error_state, self.props.error_message.clone())}
</>
}
}
}
fn get_type(input_type: InputType) -> String | InputType::Time => "time".to_string(),
InputType::Url => "url".to_string(),
InputType::Week => "week".to_string(),
}
}
#[wasm_bindgen_test]
fn should_create_form_input() {
let props = Props {
key: "".to_string(),
code_ref: NodeRef::default(),
id: "form-input-id-test".to_string(),
class_name: "form-input-class-test".to_string(),
input_type: InputType::Text,
oninput_signal: Callback::noop(),
onblur | {
match input_type {
InputType::Button => "button".to_string(),
InputType::Checkbox => "checkbox".to_string(),
InputType::Color => "color".to_string(),
InputType::Date => "date".to_string(),
InputType::Datetime => "datetime".to_string(),
InputType::DatetimeLocal => "datetime-local".to_string(),
InputType::Email => "email".to_string(),
InputType::Hidden => "hidden".to_string(),
InputType::Image => "image".to_string(),
InputType::Month => "month".to_string(),
InputType::Number => "number".to_string(),
InputType::Password => "password".to_string(),
InputType::Radio => "radio".to_string(),
InputType::Range => "range".to_string(),
InputType::Reset => "reset".to_string(),
InputType::Search => "search".to_string(),
InputType::Tel => "tel".to_string(),
InputType::Text => "text".to_string(), | identifier_body |
main.rs | total_bytes_transferred: usize,
/// The number of times the Bytes Per Second has been measured.
total_measures: usize,
/// Accumulation of all of the Bytes Per Second measures.
total_bps: f64,
/// The Bytes Per Second during the last measure.
last_bps: f64,
/// The number of bytes transferred during the last measure.
last_bytes_transferred: usize,
}
#[inline]
fn exit_err() -> ! {
std::process::exit(1);
}
fn main() {
let matches = App::new("Throughput")
.version("1.1")
.author("Adolph C.")
.about("Measures the throughput of stdin or a socket.")
.arg(Arg::with_name("address")
.short("l")
.long("addr")
.value_name("IP Address")
.help("IP address to listen to. Defaults to 127.0.0.1. Must specify port.")
.takes_value(true))
.arg(Arg::with_name("buffer_size")
.short("b")
.long("bufsize")
.value_name("BYTES")
.help("The size of the buffer used to read from the stream in bytes. Defaults to 4096.")
.takes_value(true))
.arg(Arg::with_name("iterations")
.short("i")
.long("iterations")
.help("The number of times the buffer should be filled before a measure is taken. Defaults to 1.")
.takes_value(true))
.arg(Arg::with_name("port")
.short("p")
.long("port")
.value_name("PORT_NUMBER")
.help("Port to listen on. Must be specified if address is given.")
.takes_value(true))
.arg(Arg::with_name("pass")
.long("pass")
.help("If present, throughput will print to stderr and pass input to stdout.")
.takes_value(false))
.after_help("If a port/address is not specified, throughput will read from stdin.")
.get_matches();
let passthrough = matches.is_present("pass");
let buffer_size: usize;
let iterations: usize;
if let Some(buf_size_str) = matches.value_of("buffer_size") {
if let Ok(bsize) = buf_size_str.parse() {
buffer_size = bsize;
} else {
print_err!("Buffer size must be a valid number.");
exit_err();
}
} else {
buffer_size = DEFAULT_BUFFER_SIZE;
}
if let Some(iterations_str) = matches.value_of("iterations") {
if let Ok(it) = iterations_str.parse() {
iterations = it;
} else {
print_err!("Iterations must be a valid number.");
exit_err();
}
} else {
iterations = DEFAULT_ITERATION_COUNT;
}
let address_present = matches.is_present("address");
let port_present = matches.is_present("port");
if address_present || port_present {
if !port_present {
print_err!("A port must be speicified alongside a address.");
exit_err();
} else {
let address = matches.value_of("address").unwrap_or(DEFAULT_ADDRESS);
let port = matches.value_of("port").expect("Expected port arg to have value.");
if let Ok(parsed_port) = port.parse() {
measure_tcp_stream(address, parsed_port, buffer_size, iterations, passthrough);
} else {
print_err!("Port must be a valid number from 0 to 65535");
exit_err();
}
}
} else {
measure_stdin(buffer_size, iterations, passthrough);
}
}
fn measure_tcp_stream(address: &str, port: u16, buffer_size: usize, iterations: usize, passthrough: bool) {
let parsed_addr: IpAddr = match address.parse() {
Ok(parsed) => parsed,
Err(_) => {
print_err!("Bad IP address {}", address);
exit_err();
}
};
let socket_addr = SocketAddr::new(parsed_addr, port);
match TcpListener::bind(socket_addr) {
Ok(listener) => {
println!("Listening at {}", socket_addr);
match listener.accept() {
Ok((stream, incoming_addr)) => {
println!("Reading incoming data from {}", incoming_addr);
println!();
measure_reader(stream, buffer_size, iterations, passthrough);
},
Err(err) => {
print_err!("There was an error accepting a connection.");
print_err!("ERROR: {}", err);
exit_err();
}
}
},
Err(err) => {
print_err!("There was an error connecting to {}", socket_addr);
print_err!("ERROR: {}", err);
exit_err();
}
};
}
fn measure_stdin(buffer_size: usize, iterations: usize, passthrough: bool) {
let input = stdin();
measure_reader(input.lock(), buffer_size, iterations, passthrough);
}
fn measure_reader<R: Read>(mut reader: R, buffer_size: usize, iterations: usize, passthrough: bool) {
let output = stdout();
let mut locked_output = output.lock();
let err_out = stderr();
let mut locked_error = err_out.lock();
let mut buffer = Vec::with_capacity(buffer_size);
buffer.resize(buffer_size, 0);
let mut last_measured = Instant::now();
let mut transfer_info = TransferInfo::default();
loop {
let mut end_loop = false;
for _ in 0..iterations {
match reader.read(&mut buffer) {
Ok(bytes_read) => {
transfer_info.last_bytes_transferred += bytes_read;
transfer_info.total_bytes_transferred += bytes_read;
if bytes_read == 0 {
end_loop = true;
break;
} else if passthrough {
if let Err(err) = locked_output.write_all(&buffer[0..bytes_read]) {
print_err_into!(locked_error, "Error while writing buffer into stdout: {}", err);
exit_err();
}
}
}
Err(err) => {
print_err_into!(locked_error, "Error while reading into buffer: {}", err);
}
}
}
let measure_end = Instant::now();
let duration = measure_end.duration_since(last_measured);
if duration.as_secs() > 0 || end_loop {
transfer_info.last_bps = bytes_per_second(transfer_info.last_bytes_transferred, duration);
transfer_info.total_measures += 1;
transfer_info.total_bps += transfer_info.last_bps;
let _print_result = if passthrough {
print_info(&mut locked_error, &mut transfer_info)
} else {
print_info(&mut locked_output, &mut transfer_info)
};
match _print_result {
Ok(_) => {},
Err(err) => {
print_err_into!(locked_error, "Error while printing output: {}", err);
exit_err();
}
}
last_measured = measure_end;
transfer_info.last_bps = 0.0;
transfer_info.last_bytes_transferred = 0;
}
if end_loop { return; }
}
}
fn print_info<W: Write>(output: &mut W, transfer_info: &mut TransferInfo) -> Result<(), std::io::Error> {
if transfer_info.total_measures > 1 { term_move_up(output, 3)?; }
let (mem_total_transfer, unit_total_transfer) = byte_to_mem_units(transfer_info.total_bytes_transferred as f64);
print_fixed_width(output, "Data Transferred:", 24);
write!(output, "{:.3} {} ({} cycles)",
mem_total_transfer, unit_total_transfer, transfer_info.total_measures)?;
term_clear_line(output)?;
let (mem_single, unit_single) = byte_to_mem_units(transfer_info.last_bps);
print_fixed_width(output, "Transfer Speed:", 24);
write!(output, "{:.3} {}/sec", mem_single, unit_single)?;
term_clear_line(output)?;
let avg_bps = transfer_info.total_bps / transfer_info.total_measures as f64;
let (mem_avg, unit_avg) = byte_to_mem_units(avg_bps);
print_fixed_width(output, "Average Transfer Speed:", 24);
write!(output, "{:.3} {}/sec", mem_avg, unit_avg)?;
term_clear_line(output)?;
Ok(())
}
fn print_fixed_width<W: Write>(output: &mut W, text: &str, columns: usize) {
if let Err(err) = output.write(text.as_bytes()) {
panic!("[print_fixed_width] Error while writing to stream: {}", err);
}
if text.len() < columns {
let remaining = columns - text.len();
let pad = [b' '];
for _ in 0..remaining {
if let Err(err) = output.write(&pad) {
panic!("[print_fixed_width] Error while padding output: {}", err);
}
}
}
}
/// Clears to the end of the current line.
#[inline]
fn term_clear_line<W: Write>(output: &mut W) -> Result<(), std::io::Error> {
writeln!(output, "\x1b[K")?;
Ok(())
}
/// Moves the cursor up one line.
#[inline]
fn term_move_up<W: Write>(output: &mut W, lines: usize) -> Result<(), std::io::Error> | {
write!(output, "\x1b[{}A", lines)?;
Ok(())
} | identifier_body |
|
main.rs | io::Write;
if let Err(e) = writeln!($err_write, $fmt, $($arg)*) {
panic!("Error while writing to stderr: {}", e);
}
});
}
macro_rules! print_err {
($fmt:expr) => ({
use std::io::{stderr, Write};
if let Err(e) = writeln!(stderr(), $fmt) {
panic!("Error while writing to stderr: {}", e);
}
});
($fmt:expr, $($arg:tt)*) => ({
use std::io::{stderr, Write};
if let Err(e) = writeln!(stderr(), $fmt, $($arg)*) {
panic!("Error while writing to stderr: {}", e);
}
});
}
#[derive(Default)]
struct TransferInfo {
/// The total number of bytes transferred.
total_bytes_transferred: usize,
/// The number of times the Bytes Per Second has been measured.
total_measures: usize,
/// Accumulation of all of the Bytes Per Second measures.
total_bps: f64,
/// The Bytes Per Second during the last measure.
last_bps: f64,
/// The number of bytes transferred during the last measure.
last_bytes_transferred: usize,
}
#[inline]
fn exit_err() -> ! {
std::process::exit(1);
}
fn main() {
let matches = App::new("Throughput")
.version("1.1")
.author("Adolph C.")
.about("Measures the throughput of stdin or a socket.")
.arg(Arg::with_name("address")
.short("l")
.long("addr")
.value_name("IP Address")
.help("IP address to listen to. Defaults to 127.0.0.1. Must specify port.")
.takes_value(true))
.arg(Arg::with_name("buffer_size")
.short("b")
.long("bufsize")
.value_name("BYTES")
.help("The size of the buffer used to read from the stream in bytes. Defaults to 4096.")
.takes_value(true))
.arg(Arg::with_name("iterations")
.short("i")
.long("iterations")
.help("The number of times the buffer should be filled before a measure is taken. Defaults to 1.")
.takes_value(true))
.arg(Arg::with_name("port")
.short("p")
.long("port")
.value_name("PORT_NUMBER")
.help("Port to listen on. Must be specified if address is given.")
.takes_value(true))
.arg(Arg::with_name("pass")
.long("pass")
.help("If present, throughput will print to stderr and pass input to stdout.")
.takes_value(false))
.after_help("If a port/address is not specified, throughput will read from stdin.")
.get_matches();
let passthrough = matches.is_present("pass");
let buffer_size: usize;
let iterations: usize;
if let Some(buf_size_str) = matches.value_of("buffer_size") {
if let Ok(bsize) = buf_size_str.parse() {
buffer_size = bsize;
} else {
print_err!("Buffer size must be a valid number.");
exit_err();
}
} else {
buffer_size = DEFAULT_BUFFER_SIZE;
}
if let Some(iterations_str) = matches.value_of("iterations") {
if let Ok(it) = iterations_str.parse() {
iterations = it;
} else {
print_err!("Iterations must be a valid number.");
exit_err();
}
} else {
iterations = DEFAULT_ITERATION_COUNT;
}
let address_present = matches.is_present("address");
let port_present = matches.is_present("port");
if address_present || port_present {
if !port_present {
print_err!("A port must be speicified alongside a address.");
exit_err();
} else {
let address = matches.value_of("address").unwrap_or(DEFAULT_ADDRESS);
let port = matches.value_of("port").expect("Expected port arg to have value.");
if let Ok(parsed_port) = port.parse() {
measure_tcp_stream(address, parsed_port, buffer_size, iterations, passthrough);
} else {
print_err!("Port must be a valid number from 0 to 65535");
exit_err();
} | } else {
measure_stdin(buffer_size, iterations, passthrough);
}
}
fn measure_tcp_stream(address: &str, port: u16, buffer_size: usize, iterations: usize, passthrough: bool) {
let parsed_addr: IpAddr = match address.parse() {
Ok(parsed) => parsed,
Err(_) => {
print_err!("Bad IP address {}", address);
exit_err();
}
};
let socket_addr = SocketAddr::new(parsed_addr, port);
match TcpListener::bind(socket_addr) {
Ok(listener) => {
println!("Listening at {}", socket_addr);
match listener.accept() {
Ok((stream, incoming_addr)) => {
println!("Reading incoming data from {}", incoming_addr);
println!();
measure_reader(stream, buffer_size, iterations, passthrough);
},
Err(err) => {
print_err!("There was an error accepting a connection.");
print_err!("ERROR: {}", err);
exit_err();
}
}
},
Err(err) => {
print_err!("There was an error connecting to {}", socket_addr);
print_err!("ERROR: {}", err);
exit_err();
}
};
}
fn measure_stdin(buffer_size: usize, iterations: usize, passthrough: bool) {
let input = stdin();
measure_reader(input.lock(), buffer_size, iterations, passthrough);
}
fn measure_reader<R: Read>(mut reader: R, buffer_size: usize, iterations: usize, passthrough: bool) {
let output = stdout();
let mut locked_output = output.lock();
let err_out = stderr();
let mut locked_error = err_out.lock();
let mut buffer = Vec::with_capacity(buffer_size);
buffer.resize(buffer_size, 0);
let mut last_measured = Instant::now();
let mut transfer_info = TransferInfo::default();
loop {
let mut end_loop = false;
for _ in 0..iterations {
match reader.read(&mut buffer) {
Ok(bytes_read) => {
transfer_info.last_bytes_transferred += bytes_read;
transfer_info.total_bytes_transferred += bytes_read;
if bytes_read == 0 {
end_loop = true;
break;
} else if passthrough {
if let Err(err) = locked_output.write_all(&buffer[0..bytes_read]) {
print_err_into!(locked_error, "Error while writing buffer into stdout: {}", err);
exit_err();
}
}
}
Err(err) => {
print_err_into!(locked_error, "Error while reading into buffer: {}", err);
}
}
}
let measure_end = Instant::now();
let duration = measure_end.duration_since(last_measured);
if duration.as_secs() > 0 || end_loop {
transfer_info.last_bps = bytes_per_second(transfer_info.last_bytes_transferred, duration);
transfer_info.total_measures += 1;
transfer_info.total_bps += transfer_info.last_bps;
let _print_result = if passthrough {
print_info(&mut locked_error, &mut transfer_info)
} else {
print_info(&mut locked_output, &mut transfer_info)
};
match _print_result {
Ok(_) => {},
Err(err) => {
print_err_into!(locked_error, "Error while printing output: {}", err);
exit_err();
}
}
last_measured = measure_end;
transfer_info.last_bps = 0.0;
transfer_info.last_bytes_transferred = 0;
}
if end_loop { return; }
}
}
fn print_info<W: Write>(output: &mut W, transfer_info: &mut TransferInfo) -> Result<(), std::io::Error> {
if transfer_info.total_measures > 1 { term_move_up(output, 3)?; }
let (mem_total_transfer, unit_total_transfer) = byte_to_mem_units(transfer_info.total_bytes_transferred as f64);
print_fixed_width(output, "Data Transferred:", 24);
write!(output, "{:.3} {} ({} cycles)",
mem_total_transfer, unit_total_transfer, transfer_info.total_measures)?;
term_clear_line(output)?;
let (mem_single, unit_single) = byte_to_mem_units(transfer_info.last_bps);
print_fixed_width(output, "Transfer Speed:", 24);
write!(output, "{:.3} {}/sec", mem_single, unit_single)?;
term_clear_line(output)?;
let avg_bps = transfer_info.total_bps / transfer_info.total_measures as f64;
let (mem_avg, unit_avg) = byte_to_mem_units(avg_bps);
print_fixed_width(output, "Average Transfer Speed:", 24);
write!(output, "{:.3} {}/sec", mem_avg, unit_avg)?;
term_clear_line(output)?;
Ok(())
}
fn print_fixed_width<W: Write>(output: &mut W, text: &str, columns: usize) {
if let Err(err) = output.write(text.as_bytes()) {
panic!("[print_fixed_width] Error while writing to stream: {}", err);
}
if text.len() < columns {
let | } | random_line_split |
main.rs | usize,
/// The number of times the Bytes Per Second has been measured.
total_measures: usize,
/// Accumulation of all of the Bytes Per Second measures.
total_bps: f64,
/// The Bytes Per Second during the last measure.
last_bps: f64,
/// The number of bytes transferred during the last measure.
last_bytes_transferred: usize,
}
#[inline]
fn exit_err() -> ! {
std::process::exit(1);
}
fn main() {
let matches = App::new("Throughput")
.version("1.1")
.author("Adolph C.")
.about("Measures the throughput of stdin or a socket.")
.arg(Arg::with_name("address")
.short("l")
.long("addr")
.value_name("IP Address")
.help("IP address to listen to. Defaults to 127.0.0.1. Must specify port.")
.takes_value(true))
.arg(Arg::with_name("buffer_size")
.short("b")
.long("bufsize")
.value_name("BYTES")
.help("The size of the buffer used to read from the stream in bytes. Defaults to 4096.")
.takes_value(true))
.arg(Arg::with_name("iterations")
.short("i")
.long("iterations")
.help("The number of times the buffer should be filled before a measure is taken. Defaults to 1.")
.takes_value(true))
.arg(Arg::with_name("port")
.short("p")
.long("port")
.value_name("PORT_NUMBER")
.help("Port to listen on. Must be specified if address is given.")
.takes_value(true))
.arg(Arg::with_name("pass")
.long("pass")
.help("If present, throughput will print to stderr and pass input to stdout.")
.takes_value(false))
.after_help("If a port/address is not specified, throughput will read from stdin.")
.get_matches();
let passthrough = matches.is_present("pass");
let buffer_size: usize;
let iterations: usize;
if let Some(buf_size_str) = matches.value_of("buffer_size") {
if let Ok(bsize) = buf_size_str.parse() {
buffer_size = bsize;
} else {
print_err!("Buffer size must be a valid number.");
exit_err();
}
} else {
buffer_size = DEFAULT_BUFFER_SIZE;
}
if let Some(iterations_str) = matches.value_of("iterations") {
if let Ok(it) = iterations_str.parse() {
iterations = it;
} else {
print_err!("Iterations must be a valid number.");
exit_err();
}
} else {
iterations = DEFAULT_ITERATION_COUNT;
}
let address_present = matches.is_present("address");
let port_present = matches.is_present("port");
if address_present || port_present {
if !port_present {
print_err!("A port must be speicified alongside a address.");
exit_err();
} else {
let address = matches.value_of("address").unwrap_or(DEFAULT_ADDRESS);
let port = matches.value_of("port").expect("Expected port arg to have value.");
if let Ok(parsed_port) = port.parse() {
measure_tcp_stream(address, parsed_port, buffer_size, iterations, passthrough);
} else {
print_err!("Port must be a valid number from 0 to 65535");
exit_err();
}
}
} else {
measure_stdin(buffer_size, iterations, passthrough);
}
}
fn measure_tcp_stream(address: &str, port: u16, buffer_size: usize, iterations: usize, passthrough: bool) {
let parsed_addr: IpAddr = match address.parse() {
Ok(parsed) => parsed,
Err(_) => {
print_err!("Bad IP address {}", address);
exit_err();
}
};
let socket_addr = SocketAddr::new(parsed_addr, port);
match TcpListener::bind(socket_addr) {
Ok(listener) => {
println!("Listening at {}", socket_addr);
match listener.accept() {
Ok((stream, incoming_addr)) => {
println!("Reading incoming data from {}", incoming_addr);
println!();
measure_reader(stream, buffer_size, iterations, passthrough);
},
Err(err) => {
print_err!("There was an error accepting a connection.");
print_err!("ERROR: {}", err);
exit_err();
}
}
},
Err(err) => {
print_err!("There was an error connecting to {}", socket_addr);
print_err!("ERROR: {}", err);
exit_err();
}
};
}
fn measure_stdin(buffer_size: usize, iterations: usize, passthrough: bool) {
let input = stdin();
measure_reader(input.lock(), buffer_size, iterations, passthrough);
}
fn measure_reader<R: Read>(mut reader: R, buffer_size: usize, iterations: usize, passthrough: bool) {
let output = stdout();
let mut locked_output = output.lock();
let err_out = stderr();
let mut locked_error = err_out.lock();
let mut buffer = Vec::with_capacity(buffer_size);
buffer.resize(buffer_size, 0);
let mut last_measured = Instant::now();
let mut transfer_info = TransferInfo::default();
loop {
let mut end_loop = false;
for _ in 0..iterations {
match reader.read(&mut buffer) {
Ok(bytes_read) => {
transfer_info.last_bytes_transferred += bytes_read;
transfer_info.total_bytes_transferred += bytes_read;
if bytes_read == 0 {
end_loop = true;
break;
} else if passthrough {
if let Err(err) = locked_output.write_all(&buffer[0..bytes_read]) {
print_err_into!(locked_error, "Error while writing buffer into stdout: {}", err);
exit_err();
}
}
}
Err(err) => {
print_err_into!(locked_error, "Error while reading into buffer: {}", err);
}
}
}
let measure_end = Instant::now();
let duration = measure_end.duration_since(last_measured);
if duration.as_secs() > 0 || end_loop {
transfer_info.last_bps = bytes_per_second(transfer_info.last_bytes_transferred, duration);
transfer_info.total_measures += 1;
transfer_info.total_bps += transfer_info.last_bps;
let _print_result = if passthrough {
print_info(&mut locked_error, &mut transfer_info)
} else {
print_info(&mut locked_output, &mut transfer_info)
};
match _print_result {
Ok(_) => {},
Err(err) => {
print_err_into!(locked_error, "Error while printing output: {}", err);
exit_err();
}
}
last_measured = measure_end;
transfer_info.last_bps = 0.0;
transfer_info.last_bytes_transferred = 0;
}
if end_loop { return; }
}
}
fn print_info<W: Write>(output: &mut W, transfer_info: &mut TransferInfo) -> Result<(), std::io::Error> {
if transfer_info.total_measures > 1 { term_move_up(output, 3)?; }
let (mem_total_transfer, unit_total_transfer) = byte_to_mem_units(transfer_info.total_bytes_transferred as f64);
print_fixed_width(output, "Data Transferred:", 24);
write!(output, "{:.3} {} ({} cycles)",
mem_total_transfer, unit_total_transfer, transfer_info.total_measures)?;
term_clear_line(output)?;
let (mem_single, unit_single) = byte_to_mem_units(transfer_info.last_bps);
print_fixed_width(output, "Transfer Speed:", 24);
write!(output, "{:.3} {}/sec", mem_single, unit_single)?;
term_clear_line(output)?;
let avg_bps = transfer_info.total_bps / transfer_info.total_measures as f64;
let (mem_avg, unit_avg) = byte_to_mem_units(avg_bps);
print_fixed_width(output, "Average Transfer Speed:", 24);
write!(output, "{:.3} {}/sec", mem_avg, unit_avg)?;
term_clear_line(output)?;
Ok(())
}
fn print_fixed_width<W: Write>(output: &mut W, text: &str, columns: usize) {
if let Err(err) = output.write(text.as_bytes()) {
panic!("[print_fixed_width] Error while writing to stream: {}", err);
}
if text.len() < columns {
let remaining = columns - text.len();
let pad = [b' '];
for _ in 0..remaining {
if let Err(err) = output.write(&pad) {
panic!("[print_fixed_width] Error while padding output: {}", err);
}
}
}
}
/// Clears to the end of the current line.
#[inline]
fn term_clear_line<W: Write>(output: &mut W) -> Result<(), std::io::Error> {
writeln!(output, "\x1b[K")?;
Ok(())
}
/// Moves the cursor up one line.
#[inline]
fn term_move_up<W: Write>(output: &mut W, lines: usize) -> Result<(), std::io::Error> {
write!(output, "\x1b[{}A", lines)?;
Ok(())
}
fn | byte_to_mem_units | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.