file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
client_enum_component_type.go |
ComponentsCharacterActivitiesKey = 204
ComponentsCharacterEquipmentKey = 205
ComponentsItemInstancesKey = 300
ComponentsItemObjectivesKey = 301
ComponentsItemPerksKey = 302
ComponentsItemRenderDataKey = 303
ComponentsItemStatsKey = 304
ComponentsItemSocketsKey = 305
ComponentsItemTalentGridsKey = 306
ComponentsItemCommonDataKey = 307
ComponentsItemPlugStatesKey = 308
ComponentsVendorsKey = 400
ComponentsVendorCategoriesKey = 401
ComponentsVendorSalesKey = 402
ComponentsKiosksKey = 500
ComponentsCurrencyLookupsKey = 600
ComponentsPresentationNodesKey = 700
ComponentsCollectiblesKey = 800
ComponentsRecordsKey = 900
ComponentsTransitoryKey = 1000
)
type ComponentType string
func ComponentTypesManyS(keys ...int) (out []string) {
for _, e := range ComponentTypesMany(keys...) {
out = append(out, string(e))
}
return out
}
func ComponentTypesMany(keys ...int) (out []ComponentType) {
for _, key := range keys {
eout, _, _ := ComponentTypesE(key)
out = append(out, eout)
}
return out
}
func ComponentTypes(key int) ComponentType {
out, _, _ := ComponentTypesE(key)
return out
}
func | (key int) (ComponentType, string, error) {
switch key {
case ComponentsNoneKey:
return "None", "", nil
case ComponentsProfilesKey:
description := `
Profiles is the most basic component, only relevant when calling GetProfile.
This returns basic information about the profile, which is almost nothing:
- a list of characterIds,
- some information about the last time you logged in, and;
- that most sobering statistic: how long you've played.
`
return "Profiles", description, nil
case ComponentsVendorReceiptsKey:
description := `
Only applicable for GetProfile, this will return information about receipts for refundable vendor items.
`
return "VendorReceipts", description, nil
case ComponentsProfileInventoriesKey:
description := `
Asking for this will get you the profile-level inventories, such as your Vault buckets
(yeah, the Vault is really inventory buckets located on your Profile)
`
return "ProfileInventories", description, nil
case ComponentsProfileCurrenciesKey:
description := `
This will get you a summary of items on your Profile that we consider to be "currencies", such as Glimmer.
I mean, if there's Glimmer in Destiny 2. I didn't say there was Glimmer.
`
return "ProfileCurrencies", description, nil
case ComponentsProfileProgressionKey:
description := `
This will get you any progression-related information that exists on a Profile-wide level, across all characters.
`
return "ProfileProgression", description, nil
case ComponentsPlatformSilverKey:
description := `
This will get you information about the silver that this profile has on every platform on which it plays.
You may only request this component for the logged in user's Profile, and will not receive it if you request it for another Profile.
`
return "PlatformSilver", description, nil
case ComponentsCharactersKey:
description := `
This will get you summary info about each of the characters in the profile.
`
return "Characters", description, nil
case ComponentsCharacterInventoriesKey:
description := `
This will get you information about any non-equipped items on the character or character(s) in question,
if you're allowed to see it.
You have to either be authenticated as that user, or that user must allow anonymous viewing of their non-equipped items in Bungie.Net settings to actually get results.
`
return "CharacterInventories", description, nil
case ComponentsCharacterProgressionsKey:
description := `
This will get you information about the progression (faction, experience, etc... "levels") relevant to each character.
You have to either be authenticated as that user, or that user must allow anonymous viewing of their progression info in Bungie.Net settings to actually get results.
`
return "CharacterProgressions", description, nil
case CharacterRenderDataKey:
description := `
This will get you just enough information to be able to render the character in 3D if you have written a 3D rendering library for Destiny Characters, or "borrowed" ours.
It's okay, I won't tell anyone if you're using it.
I'm no snitch. (actually, we don't care if you use it - go to town)
`
return "RenderData", description, nil
case ComponentsCharacterActivitiesKey:
description := `
This will return info about activities that a user can see and gating on it, if you are the currently authenticated user or the user has elected to allow anonymous viewing of its progression info.
Note that the data returned by this can be unfortunately problematic and relatively unreliable in some cases.
We'll eventually work on making it more consistently reliable.
`
return "CharacterActivities", description, nil
case ComponentsCharacterEquipmentKey:
description := `
This will return info about the equipped items on the character(s). Everyone can see this.
`
return "CharacterEquipment", description, nil
case ComponentsItemInstancesKey:
description := `
This will return basic info about instanced items - whether they can be equipped, their tracked status, and some info commonly needed in many places (current damage type, primary stat value, etc)
`
return "ItemInstances", description, nil
case ComponentsItemObjectivesKey:
description := `
Items can have Objectives (DestinyObjectiveDefinition) bound to them.
If they do, this will return info for items that have such bound objectives.
`
return "ItemObjectives", description, nil
case ComponentsItemPerksKey:
description := `
Items can have perks (DestinyPerkDefinition).
If they do, this will return info for what perks are active on items.
`
return "ItemPerks", description, nil
case ComponentsItemRenderDataKey:
description := `
If you just want to render the weapon, this is just enough info to do that rendering.
`
return "ItemRenderData", description, nil
case ComponentsItemStatsKey:
description := `
Items can have stats, like rate of fire. Asking for this component will return requested item's stats if they have stats.
`
return "ItemStats", description, nil
case ComponentsItemSocketsKey:
description := `
Items can have sockets, where plugs can be inserted.
Asking for this component will return all info relevant to the sockets on items that have them.
`
return "ItemSockets", description, nil
case ComponentsItemTalentGridsKey:
description := `
Items can have talent grids, though that matters a lot less frequently than it used to.
Asking for this component will return all relevant info about activated Nodes and Steps on this talent grid, like the good ol' days.
`
return "ItemTalentGrids", description, nil
case ComponentsItemCommonDataKey:
description := `
Items that *aren't* instanced still have important information you need to know:
- how much of it you have,;
- the itemHash so you can look up their DestinyInventoryItemDefinition,;
- whether they're locked,;
- etc...
Both instanced and non-instanced items will have these properties.
You will get this automatically with Inventory components - you only need to pass this when calling GetItem on a specific item.
`
return "ItemCommonData", description, nil
case ComponentsItemPlugStatesKey:
description := `
Items that are "Plugs" can be inserted into sockets.
This returns statuses about those plugs and why they can/can't be inserted.
I hear you giggling, there's nothing funny about inserting plugs. Get your head out of the gutter and pay attention!
`
return "ItemPlugStates", description, nil
case ComponentsVendorsKey:
description := `
When obtaining vendor information, this will return summary information about the Vendor or Vendors being returned.
`
return "Vendors", description, nil
case ComponentsVendorCategoriesKey:
description := `
When obtaining vendor information, this will return information about the categories of items provided by the Vendor.
`
return "VendorCategories", description, nil
case ComponentsVendorSalesKey:
description := `
When obtaining vendor information, this will return the information about items being sold by the Vendor.
`
return "VendorSales", description, nil
case ComponentsKiosksKey:
description := `
Asking for this component will return you the account's Kiosk statuses: that is, what items have been filled out/acquired.
But only if you are the currently authenticated user or the user has elected to allow anonymous viewing of its progression info.
`
return "Kiosks", description, nil
case ComponentsCurrencyLookupsKey:
description := `
A "shortcut" component that will give you all of the item hashes/quantities of items that the requested character can use to determine if an action (purchasing, socket insertion) has the required currency.
(recall that all currencies are just items, and that some vendor purchases require items that you might not traditionally consider to be a "currency", like plugs/mods!)
`
return "CurrencyLookups", description, nil
case | ComponentTypesE | identifier_name |
client_enum_component_type.go |
ComponentsCharacterActivitiesKey = 204
ComponentsCharacterEquipmentKey = 205
ComponentsItemInstancesKey = 300
ComponentsItemObjectivesKey = 301
ComponentsItemPerksKey = 302
ComponentsItemRenderDataKey = 303
ComponentsItemStatsKey = 304
ComponentsItemSocketsKey = 305
ComponentsItemTalentGridsKey = 306
ComponentsItemCommonDataKey = 307
ComponentsItemPlugStatesKey = 308
ComponentsVendorsKey = 400
ComponentsVendorCategoriesKey = 401
ComponentsVendorSalesKey = 402
ComponentsKiosksKey = 500
ComponentsCurrencyLookupsKey = 600
ComponentsPresentationNodesKey = 700
ComponentsCollectiblesKey = 800
ComponentsRecordsKey = 900
ComponentsTransitoryKey = 1000
)
type ComponentType string
func ComponentTypesManyS(keys ...int) (out []string) {
for _, e := range ComponentTypesMany(keys...) {
out = append(out, string(e))
}
return out
}
func ComponentTypesMany(keys ...int) (out []ComponentType) {
for _, key := range keys |
return out
}
func ComponentTypes(key int) ComponentType {
out, _, _ := ComponentTypesE(key)
return out
}
func ComponentTypesE(key int) (ComponentType, string, error) {
switch key {
case ComponentsNoneKey:
return "None", "", nil
case ComponentsProfilesKey:
description := `
Profiles is the most basic component, only relevant when calling GetProfile.
This returns basic information about the profile, which is almost nothing:
- a list of characterIds,
- some information about the last time you logged in, and;
- that most sobering statistic: how long you've played.
`
return "Profiles", description, nil
case ComponentsVendorReceiptsKey:
description := `
Only applicable for GetProfile, this will return information about receipts for refundable vendor items.
`
return "VendorReceipts", description, nil
case ComponentsProfileInventoriesKey:
description := `
Asking for this will get you the profile-level inventories, such as your Vault buckets
(yeah, the Vault is really inventory buckets located on your Profile)
`
return "ProfileInventories", description, nil
case ComponentsProfileCurrenciesKey:
description := `
This will get you a summary of items on your Profile that we consider to be "currencies", such as Glimmer.
I mean, if there's Glimmer in Destiny 2. I didn't say there was Glimmer.
`
return "ProfileCurrencies", description, nil
case ComponentsProfileProgressionKey:
description := `
This will get you any progression-related information that exists on a Profile-wide level, across all characters.
`
return "ProfileProgression", description, nil
case ComponentsPlatformSilverKey:
description := `
This will get you information about the silver that this profile has on every platform on which it plays.
You may only request this component for the logged in user's Profile, and will not receive it if you request it for another Profile.
`
return "PlatformSilver", description, nil
case ComponentsCharactersKey:
description := `
This will get you summary info about each of the characters in the profile.
`
return "Characters", description, nil
case ComponentsCharacterInventoriesKey:
description := `
This will get you information about any non-equipped items on the character or character(s) in question,
if you're allowed to see it.
You have to either be authenticated as that user, or that user must allow anonymous viewing of their non-equipped items in Bungie.Net settings to actually get results.
`
return "CharacterInventories", description, nil
case ComponentsCharacterProgressionsKey:
description := `
This will get you information about the progression (faction, experience, etc... "levels") relevant to each character.
You have to either be authenticated as that user, or that user must allow anonymous viewing of their progression info in Bungie.Net settings to actually get results.
`
return "CharacterProgressions", description, nil
case CharacterRenderDataKey:
description := `
This will get you just enough information to be able to render the character in 3D if you have written a 3D rendering library for Destiny Characters, or "borrowed" ours.
It's okay, I won't tell anyone if you're using it.
I'm no snitch. (actually, we don't care if you use it - go to town)
`
return "RenderData", description, nil
case ComponentsCharacterActivitiesKey:
description := `
This will return info about activities that a user can see and gating on it, if you are the currently authenticated user or the user has elected to allow anonymous viewing of its progression info.
Note that the data returned by this can be unfortunately problematic and relatively unreliable in some cases.
We'll eventually work on making it more consistently reliable.
`
return "CharacterActivities", description, nil
case ComponentsCharacterEquipmentKey:
description := `
This will return info about the equipped items on the character(s). Everyone can see this.
`
return "CharacterEquipment", description, nil
case ComponentsItemInstancesKey:
description := `
This will return basic info about instanced items - whether they can be equipped, their tracked status, and some info commonly needed in many places (current damage type, primary stat value, etc)
`
return "ItemInstances", description, nil
case ComponentsItemObjectivesKey:
description := `
Items can have Objectives (DestinyObjectiveDefinition) bound to them.
If they do, this will return info for items that have such bound objectives.
`
return "ItemObjectives", description, nil
case ComponentsItemPerksKey:
description := `
Items can have perks (DestinyPerkDefinition).
If they do, this will return info for what perks are active on items.
`
return "ItemPerks", description, nil
case ComponentsItemRenderDataKey:
description := `
If you just want to render the weapon, this is just enough info to do that rendering.
`
return "ItemRenderData", description, nil
case ComponentsItemStatsKey:
description := `
Items can have stats, like rate of fire. Asking for this component will return requested item's stats if they have stats.
`
return "ItemStats", description, nil
case ComponentsItemSocketsKey:
description := `
Items can have sockets, where plugs can be inserted.
Asking for this component will return all info relevant to the sockets on items that have them.
`
return "ItemSockets", description, nil
case ComponentsItemTalentGridsKey:
description := `
Items can have talent grids, though that matters a lot less frequently than it used to.
Asking for this component will return all relevant info about activated Nodes and Steps on this talent grid, like the good ol' days.
`
return "ItemTalentGrids", description, nil
case ComponentsItemCommonDataKey:
description := `
Items that *aren't* instanced still have important information you need to know:
- how much of it you have,;
- the itemHash so you can look up their DestinyInventoryItemDefinition,;
- whether they're locked,;
- etc...
Both instanced and non-instanced items will have these properties.
You will get this automatically with Inventory components - you only need to pass this when calling GetItem on a specific item.
`
return "ItemCommonData", description, nil
case ComponentsItemPlugStatesKey:
description := `
Items that are "Plugs" can be inserted into sockets.
This returns statuses about those plugs and why they can/can't be inserted.
I hear you giggling, there's nothing funny about inserting plugs. Get your head out of the gutter and pay attention!
`
return "ItemPlugStates", description, nil
case ComponentsVendorsKey:
description := `
When obtaining vendor information, this will return summary information about the Vendor or Vendors being returned.
`
return "Vendors", description, nil
case ComponentsVendorCategoriesKey:
description := `
When obtaining vendor information, this will return information about the categories of items provided by the Vendor.
`
return "VendorCategories", description, nil
case ComponentsVendorSalesKey:
description := `
When obtaining vendor information, this will return the information about items being sold by the Vendor.
`
return "VendorSales", description, nil
case ComponentsKiosksKey:
description := `
Asking for this component will return you the account's Kiosk statuses: that is, what items have been filled out/acquired.
But only if you are the currently authenticated user or the user has elected to allow anonymous viewing of its progression info.
`
return "Kiosks", description, nil
case ComponentsCurrencyLookupsKey:
description := `
A "shortcut" component that will give you all of the item hashes/quantities of items that the requested character can use to determine if an action (purchasing, socket insertion) has the required currency.
(recall that all currencies are just items, and that some vendor purchases require items that you might not traditionally consider to be a "currency", like plugs/mods!)
`
return "CurrencyLookups", description, nil
| {
eout, _, _ := ComponentTypesE(key)
out = append(out, eout)
} | conditional_block |
client_enum_component_type.go |
ComponentsCharacterActivitiesKey = 204
ComponentsCharacterEquipmentKey = 205
ComponentsItemInstancesKey = 300
ComponentsItemObjectivesKey = 301
ComponentsItemPerksKey = 302
ComponentsItemRenderDataKey = 303
ComponentsItemStatsKey = 304
ComponentsItemSocketsKey = 305
ComponentsItemTalentGridsKey = 306
ComponentsItemCommonDataKey = 307
ComponentsItemPlugStatesKey = 308
ComponentsVendorsKey = 400
ComponentsVendorCategoriesKey = 401
ComponentsVendorSalesKey = 402
ComponentsKiosksKey = 500
ComponentsCurrencyLookupsKey = 600
ComponentsPresentationNodesKey = 700
ComponentsCollectiblesKey = 800
ComponentsRecordsKey = 900
ComponentsTransitoryKey = 1000
)
type ComponentType string
func ComponentTypesManyS(keys ...int) (out []string) {
for _, e := range ComponentTypesMany(keys...) {
out = append(out, string(e))
}
return out
}
func ComponentTypesMany(keys ...int) (out []ComponentType) {
for _, key := range keys {
eout, _, _ := ComponentTypesE(key)
out = append(out, eout)
}
return out
}
func ComponentTypes(key int) ComponentType {
out, _, _ := ComponentTypesE(key)
return out
}
func ComponentTypesE(key int) (ComponentType, string, error) | Asking for this will get you the profile-level inventories, such as your Vault buckets
(yeah, the Vault is really inventory buckets located on your Profile)
`
return "ProfileInventories", description, nil
case ComponentsProfileCurrenciesKey:
description := `
This will get you a summary of items on your Profile that we consider to be "currencies", such as Glimmer.
I mean, if there's Glimmer in Destiny 2. I didn't say there was Glimmer.
`
return "ProfileCurrencies", description, nil
case ComponentsProfileProgressionKey:
description := `
This will get you any progression-related information that exists on a Profile-wide level, across all characters.
`
return "ProfileProgression", description, nil
case ComponentsPlatformSilverKey:
description := `
This will get you information about the silver that this profile has on every platform on which it plays.
You may only request this component for the logged in user's Profile, and will not receive it if you request it for another Profile.
`
return "PlatformSilver", description, nil
case ComponentsCharactersKey:
description := `
This will get you summary info about each of the characters in the profile.
`
return "Characters", description, nil
case ComponentsCharacterInventoriesKey:
description := `
This will get you information about any non-equipped items on the character or character(s) in question,
if you're allowed to see it.
You have to either be authenticated as that user, or that user must allow anonymous viewing of their non-equipped items in Bungie.Net settings to actually get results.
`
return "CharacterInventories", description, nil
case ComponentsCharacterProgressionsKey:
description := `
This will get you information about the progression (faction, experience, etc... "levels") relevant to each character.
You have to either be authenticated as that user, or that user must allow anonymous viewing of their progression info in Bungie.Net settings to actually get results.
`
return "CharacterProgressions", description, nil
case CharacterRenderDataKey:
description := `
This will get you just enough information to be able to render the character in 3D if you have written a 3D rendering library for Destiny Characters, or "borrowed" ours.
It's okay, I won't tell anyone if you're using it.
I'm no snitch. (actually, we don't care if you use it - go to town)
`
return "RenderData", description, nil
case ComponentsCharacterActivitiesKey:
description := `
This will return info about activities that a user can see and gating on it, if you are the currently authenticated user or the user has elected to allow anonymous viewing of its progression info.
Note that the data returned by this can be unfortunately problematic and relatively unreliable in some cases.
We'll eventually work on making it more consistently reliable.
`
return "CharacterActivities", description, nil
case ComponentsCharacterEquipmentKey:
description := `
This will return info about the equipped items on the character(s). Everyone can see this.
`
return "CharacterEquipment", description, nil
case ComponentsItemInstancesKey:
description := `
This will return basic info about instanced items - whether they can be equipped, their tracked status, and some info commonly needed in many places (current damage type, primary stat value, etc)
`
return "ItemInstances", description, nil
case ComponentsItemObjectivesKey:
description := `
Items can have Objectives (DestinyObjectiveDefinition) bound to them.
If they do, this will return info for items that have such bound objectives.
`
return "ItemObjectives", description, nil
case ComponentsItemPerksKey:
description := `
Items can have perks (DestinyPerkDefinition).
If they do, this will return info for what perks are active on items.
`
return "ItemPerks", description, nil
case ComponentsItemRenderDataKey:
description := `
If you just want to render the weapon, this is just enough info to do that rendering.
`
return "ItemRenderData", description, nil
case ComponentsItemStatsKey:
description := `
Items can have stats, like rate of fire. Asking for this component will return requested item's stats if they have stats.
`
return "ItemStats", description, nil
case ComponentsItemSocketsKey:
description := `
Items can have sockets, where plugs can be inserted.
Asking for this component will return all info relevant to the sockets on items that have them.
`
return "ItemSockets", description, nil
case ComponentsItemTalentGridsKey:
description := `
Items can have talent grids, though that matters a lot less frequently than it used to.
Asking for this component will return all relevant info about activated Nodes and Steps on this talent grid, like the good ol' days.
`
return "ItemTalentGrids", description, nil
case ComponentsItemCommonDataKey:
description := `
Items that *aren't* instanced still have important information you need to know:
- how much of it you have,;
- the itemHash so you can look up their DestinyInventoryItemDefinition,;
- whether they're locked,;
- etc...
Both instanced and non-instanced items will have these properties.
You will get this automatically with Inventory components - you only need to pass this when calling GetItem on a specific item.
`
return "ItemCommonData", description, nil
case ComponentsItemPlugStatesKey:
description := `
Items that are "Plugs" can be inserted into sockets.
This returns statuses about those plugs and why they can/can't be inserted.
I hear you giggling, there's nothing funny about inserting plugs. Get your head out of the gutter and pay attention!
`
return "ItemPlugStates", description, nil
case ComponentsVendorsKey:
description := `
When obtaining vendor information, this will return summary information about the Vendor or Vendors being returned.
`
return "Vendors", description, nil
case ComponentsVendorCategoriesKey:
description := `
When obtaining vendor information, this will return information about the categories of items provided by the Vendor.
`
return "VendorCategories", description, nil
case ComponentsVendorSalesKey:
description := `
When obtaining vendor information, this will return the information about items being sold by the Vendor.
`
return "VendorSales", description, nil
case ComponentsKiosksKey:
description := `
Asking for this component will return you the account's Kiosk statuses: that is, what items have been filled out/acquired.
But only if you are the currently authenticated user or the user has elected to allow anonymous viewing of its progression info.
`
return "Kiosks", description, nil
case ComponentsCurrencyLookupsKey:
description := `
A "shortcut" component that will give you all of the item hashes/quantities of items that the requested character can use to determine if an action (purchasing, socket insertion) has the required currency.
(recall that all currencies are just items, and that some vendor purchases require items that you might not traditionally consider to be a "currency", like plugs/mods!)
`
return "CurrencyLookups", description, nil
case | {
switch key {
case ComponentsNoneKey:
return "None", "", nil
case ComponentsProfilesKey:
description := `
Profiles is the most basic component, only relevant when calling GetProfile.
This returns basic information about the profile, which is almost nothing:
- a list of characterIds,
- some information about the last time you logged in, and;
- that most sobering statistic: how long you've played.
`
return "Profiles", description, nil
case ComponentsVendorReceiptsKey:
description := `
Only applicable for GetProfile, this will return information about receipts for refundable vendor items.
`
return "VendorReceipts", description, nil
case ComponentsProfileInventoriesKey:
description := ` | identifier_body |
mysql.rs | If set, connected to the database through a Unix socket.
pub fn socket(&self) -> &Option<String> {
&self.query_params.socket
}
/// The database port, defaults to `3306`.
pub fn port(&self) -> u16 {
self.url.port().unwrap_or(3306)
}
fn default_connection_limit() -> usize {
num_cpus::get_physical() * 2 + 1
}
fn parse_query_params(url: &Url) -> Result<MysqlUrlQueryParams, Error> {
let mut connection_limit = Self::default_connection_limit();
let mut ssl_opts = my::SslOpts::default();
let mut use_ssl = false;
let mut socket = None;
let mut socket_timeout = None;
let mut connect_timeout = Duration::from_secs(5);
for (k, v) in url.query_pairs() {
match k.as_ref() {
"connection_limit" => {
let as_int: usize = v
.parse()
.map_err(|_| Error::builder(ErrorKind::InvalidConnectionArguments).build())?;
connection_limit = as_int;
}
"sslcert" => {
use_ssl = true;
ssl_opts.set_root_cert_path(Some(Path::new(&*v).to_path_buf()));
}
"sslidentity" => {
use_ssl = true;
ssl_opts.set_pkcs12_path(Some(Path::new(&*v).to_path_buf()));
}
"sslpassword" => {
use_ssl = true;
ssl_opts.set_password(Some(v.to_string()));
}
"socket" => {
socket = Some(v.replace("(", "").replace(")", ""));
}
"socket_timeout" => {
let as_int = v
.parse()
.map_err(|_| Error::builder(ErrorKind::InvalidConnectionArguments).build())?;
socket_timeout = Some(Duration::from_secs(as_int));
}
"connect_timeout" => {
let as_int = v
.parse()
.map_err(|_| Error::builder(ErrorKind::InvalidConnectionArguments).build())?;
connect_timeout = Duration::from_secs(as_int);
}
"sslaccept" => {
match v.as_ref() {
"strict" => {}
"accept_invalid_certs" => {
ssl_opts.set_danger_accept_invalid_certs(true);
}
_ => {
#[cfg(not(feature = "tracing-log"))]
debug!("Unsupported SSL accept mode {}, defaulting to `strict`", v);
#[cfg(feature = "tracing-log")]
tracing::debug!(
message = "Unsupported SSL accept mode, defaulting to `strict`",
mode = &*v
);
}
};
}
_ => {
#[cfg(not(feature = "tracing-log"))]
trace!("Discarding connection string param: {}", k);
#[cfg(feature = "tracing-log")]
tracing::trace!(message = "Discarding connection string param", param = &*k);
}
};
}
Ok(MysqlUrlQueryParams {
ssl_opts,
connection_limit,
use_ssl,
socket,
connect_timeout,
socket_timeout,
})
}
#[cfg(feature = "pooled")]
pub(crate) fn connection_limit(&self) -> usize {
self.query_params.connection_limit
}
pub(crate) fn to_opts_builder(&self) -> my::OptsBuilder {
let mut config = my::OptsBuilder::new();
config.user(Some(self.username()));
config.pass(self.password());
config.db_name(Some(self.dbname()));
match self.socket() {
Some(ref socket) => {
config.socket(Some(socket));
}
None => {
config.ip_or_hostname(self.host());
config.tcp_port(self.port());
}
}
config.stmt_cache_size(Some(1000));
config.conn_ttl(Some(Duration::from_secs(5)));
if self.query_params.use_ssl {
config.ssl_opts(Some(self.query_params.ssl_opts.clone()));
}
config
}
}
#[derive(Debug, Clone)]
pub(crate) struct MysqlUrlQueryParams {
ssl_opts: my::SslOpts,
connection_limit: usize,
use_ssl: bool,
socket: Option<String>,
socket_timeout: Option<Duration>,
connect_timeout: Duration,
}
impl Mysql {
/// Create a new MySQL connection using `OptsBuilder` from the `mysql` crate.
pub fn new(url: MysqlUrl) -> crate::Result<Self> {
let mut opts = url.to_opts_builder();
let pool_opts = my::PoolOptions::with_constraints(my::PoolConstraints::new(1, 1).unwrap());
opts.pool_options(pool_opts);
Ok(Self {
socket_timeout: url.query_params.socket_timeout,
connect_timeout: url.query_params.connect_timeout,
pool: my::Pool::new(opts),
url,
})
}
async fn timeout<T, F, E>(&self, f: F) -> crate::Result<T>
where
F: Future<Output = std::result::Result<T, E>>,
E: Into<Error>,
|
}
impl TransactionCapable for Mysql {}
impl Queryable for Mysql {
fn query<'a>(&'a self, q: Query<'a>) -> DBIO<'a, ResultSet> {
let (sql, params) = visitor::Mysql::build(q);
DBIO::new(async move { self.query_raw(&sql, ¶ms).await })
}
fn execute<'a>(&'a self, q: Query<'a>) -> DBIO<'a, u64> {
let (sql, params) = visitor::Mysql::build(q);
DBIO::new(async move { self.execute_raw(&sql, ¶ms).await })
}
fn query_raw<'a>(&'a self, sql: &'a str, params: &'a [ParameterizedValue]) -> DBIO<'a, ResultSet> {
metrics::query("mysql.query_raw", sql, params, move || async move {
let conn = timeout(self.connect_timeout, self.pool.get_conn()).await??;
let results = self
.timeout(conn.prep_exec(sql, conversion::conv_params(params)))
.await?;
let columns = results
.columns_ref()
.iter()
.map(|s| s.name_str().into_owned())
.collect();
let last_id = results.last_insert_id();
let mut result_set = ResultSet::new(columns, Vec::new());
let (_, rows) = self
.timeout(results.map_and_drop(|mut row| row.take_result_row()))
.await?;
for row in rows.into_iter() {
result_set.rows.push(row?);
}
if let Some(id) = last_id {
result_set.set_last_insert_id(id);
};
Ok(result_set)
})
}
fn execute_raw<'a>(&'a self, sql: &'a str, params: &'a [ParameterizedValue<'a>]) -> DBIO<'a, u64> {
metrics::query("mysql.execute_raw", sql, params, move || async move {
let conn = timeout(self.connect_timeout, self.pool.get_conn()).await??;
let results = self
.timeout(conn.prep_exec(sql, conversion::conv_params(params)))
.await?;
Ok(results.affected_rows())
})
}
fn raw_cmd<'a>(&'a self, cmd: &'a str) -> DBIO<'a, ()> {
metrics::query("mysql.raw_cmd", cmd, &[], move || async move {
let conn = timeout(self.connect_timeout, self.pool.get_conn()).await??;
self.timeout(conn.query(cmd)).await?;
Ok(())
})
}
}
#[cfg(test)]
mod tests {
use super::MysqlUrl;
use crate::{
ast::{Insert, ParameterizedValue, Select},
connector::Queryable,
error::*,
single::Quaint,
};
use once_cell::sync::Lazy;
use std::env;
use url::Url;
static CONN_STR: Lazy<String> = Lazy::new(|| env::var("TEST_MYSQL").expect("TEST_MYSQL env var"));
#[test]
fn should_parse_socket_url() {
let url = MysqlUrl::new(Url::parse("mysql://root@localhost/dbname?socket=(/tmp/mysql.sock)").unwrap()).unwrap();
assert_eq!("dbname", url.dbname());
assert_eq!(&Some(String::from("/tmp/mysql.sock")), url.socket());
}
#[tokio::test]
async fn should_provide_a_database_connection() {
let connection = Quaint::new(&CONN_STR).await.unwrap();
let res = connection
.query_raw(
"select * from information_schema.`COLUMNS` where COLUMN_NAME = 'unknown_123'",
&[],
)
.await
.unwrap();
assert!(res.is_empty());
}
const TABLE_DEF: &str = r#"
CREATE TABLE `user`(
id int4 PRIMARY KEY NOT NULL,
name text NOT NULL,
age int4 NOT NULL | {
match self.socket_timeout {
Some(duration) => match timeout(duration, f).await {
Ok(Ok(result)) => Ok(result),
Ok(Err(err)) => Err(err.into()),
Err(to) => Err(to.into()),
},
None => match f.await {
Ok(result) => Ok(result),
Err(err) => Err(err.into()),
},
}
} | identifier_body |
mysql.rs | fn timeout<T, F, E>(&self, f: F) -> crate::Result<T>
where
F: Future<Output = std::result::Result<T, E>>,
E: Into<Error>,
{
match self.socket_timeout {
Some(duration) => match timeout(duration, f).await {
Ok(Ok(result)) => Ok(result),
Ok(Err(err)) => Err(err.into()),
Err(to) => Err(to.into()),
},
None => match f.await {
Ok(result) => Ok(result),
Err(err) => Err(err.into()),
},
}
}
}
impl TransactionCapable for Mysql {}
impl Queryable for Mysql {
fn query<'a>(&'a self, q: Query<'a>) -> DBIO<'a, ResultSet> {
let (sql, params) = visitor::Mysql::build(q);
DBIO::new(async move { self.query_raw(&sql, ¶ms).await })
}
fn execute<'a>(&'a self, q: Query<'a>) -> DBIO<'a, u64> {
let (sql, params) = visitor::Mysql::build(q);
DBIO::new(async move { self.execute_raw(&sql, ¶ms).await })
}
fn query_raw<'a>(&'a self, sql: &'a str, params: &'a [ParameterizedValue]) -> DBIO<'a, ResultSet> {
metrics::query("mysql.query_raw", sql, params, move || async move {
let conn = timeout(self.connect_timeout, self.pool.get_conn()).await??;
let results = self
.timeout(conn.prep_exec(sql, conversion::conv_params(params)))
.await?;
let columns = results
.columns_ref()
.iter()
.map(|s| s.name_str().into_owned())
.collect();
let last_id = results.last_insert_id();
let mut result_set = ResultSet::new(columns, Vec::new());
let (_, rows) = self
.timeout(results.map_and_drop(|mut row| row.take_result_row()))
.await?;
for row in rows.into_iter() {
result_set.rows.push(row?);
}
if let Some(id) = last_id {
result_set.set_last_insert_id(id);
};
Ok(result_set)
})
}
fn execute_raw<'a>(&'a self, sql: &'a str, params: &'a [ParameterizedValue<'a>]) -> DBIO<'a, u64> {
metrics::query("mysql.execute_raw", sql, params, move || async move {
let conn = timeout(self.connect_timeout, self.pool.get_conn()).await??;
let results = self
.timeout(conn.prep_exec(sql, conversion::conv_params(params)))
.await?;
Ok(results.affected_rows())
})
}
fn raw_cmd<'a>(&'a self, cmd: &'a str) -> DBIO<'a, ()> {
metrics::query("mysql.raw_cmd", cmd, &[], move || async move {
let conn = timeout(self.connect_timeout, self.pool.get_conn()).await??;
self.timeout(conn.query(cmd)).await?;
Ok(())
})
}
}
#[cfg(test)]
mod tests {
use super::MysqlUrl;
use crate::{
ast::{Insert, ParameterizedValue, Select},
connector::Queryable,
error::*,
single::Quaint,
};
use once_cell::sync::Lazy;
use std::env;
use url::Url;
static CONN_STR: Lazy<String> = Lazy::new(|| env::var("TEST_MYSQL").expect("TEST_MYSQL env var"));
#[test]
fn should_parse_socket_url() {
let url = MysqlUrl::new(Url::parse("mysql://root@localhost/dbname?socket=(/tmp/mysql.sock)").unwrap()).unwrap();
assert_eq!("dbname", url.dbname());
assert_eq!(&Some(String::from("/tmp/mysql.sock")), url.socket());
}
#[tokio::test]
async fn should_provide_a_database_connection() {
let connection = Quaint::new(&CONN_STR).await.unwrap();
let res = connection
.query_raw(
"select * from information_schema.`COLUMNS` where COLUMN_NAME = 'unknown_123'",
&[],
)
.await
.unwrap();
assert!(res.is_empty());
}
const TABLE_DEF: &str = r#"
CREATE TABLE `user`(
id int4 PRIMARY KEY NOT NULL,
name text NOT NULL,
age int4 NOT NULL,
salary float4
);
"#;
const CREATE_USER: &str = r#"
INSERT INTO `user` (id, name, age, salary)
VALUES (1, 'Joe', 27, 20000.00 );
"#;
const DROP_TABLE: &str = "DROP TABLE IF EXISTS `user`;";
#[tokio::test]
async fn should_map_columns_correctly() {
let connection = Quaint::new(&CONN_STR).await.unwrap();
connection.query_raw(DROP_TABLE, &[]).await.unwrap();
connection.query_raw(TABLE_DEF, &[]).await.unwrap();
let ch_ch_ch_ch_changees = connection.execute_raw(CREATE_USER, &[]).await.unwrap();
assert_eq!(1, ch_ch_ch_ch_changees);
let rows = connection.query_raw("SELECT * FROM `user`", &[]).await.unwrap();
assert_eq!(rows.len(), 1);
let row = rows.get(0).unwrap();
assert_eq!(row["id"].as_i64(), Some(1));
assert_eq!(row["name"].as_str(), Some("Joe"));
assert!(row["name"].is_text());
assert_eq!(row["age"].as_i64(), Some(27));
assert_eq!(row["salary"].as_f64(), Some(20000.0));
}
#[tokio::test]
async fn blobs_roundtrip() {
let connection = Quaint::new(&CONN_STR).await.unwrap();
let blob: Vec<u8> = vec![4, 2, 0];
connection
.query_raw("DROP TABLE IF EXISTS mysql_blobs_roundtrip_test", &[])
.await
.unwrap();
connection
.query_raw(
"CREATE TABLE mysql_blobs_roundtrip_test (id int AUTO_INCREMENT PRIMARY KEY, bytes MEDIUMBLOB)",
&[],
)
.await
.unwrap();
let insert = Insert::single_into("mysql_blobs_roundtrip_test").value("bytes", blob.as_slice());
connection.query(insert.into()).await.unwrap();
let roundtripped = Select::from_table("mysql_blobs_roundtrip_test").column("bytes");
let roundtripped = connection.query(roundtripped.into()).await.unwrap();
assert_eq!(
roundtripped.into_single().unwrap().at(0).unwrap(),
&ParameterizedValue::Bytes(blob.as_slice().into())
);
}
#[tokio::test]
async fn should_map_nonexisting_database_error() {
let mut url = Url::parse(&CONN_STR).unwrap();
url.set_username("root").unwrap();
url.set_path("/this_does_not_exist");
let url = url.as_str().to_string();
let conn = Quaint::new(&url).await.unwrap();
let res = conn.query_raw("SELECT 1 + 1", &[]).await;
assert!(&res.is_err());
let err = res.unwrap_err();
match err.kind() {
ErrorKind::DatabaseDoesNotExist { db_name } => {
assert_eq!(Some("1049"), err.original_code());
assert_eq!(Some("Unknown database \'this_does_not_exist\'"), err.original_message());
assert_eq!("this_does_not_exist", db_name.as_str())
}
e => panic!("Expected `DatabaseDoesNotExist`, got {:?}", e),
}
}
#[tokio::test]
async fn test_uniq_constraint_violation() {
let conn = Quaint::new(&CONN_STR).await.unwrap();
let _ = conn.raw_cmd("DROP TABLE test_uniq_constraint_violation").await;
let _ = conn.raw_cmd("DROP INDEX idx_uniq_constraint_violation").await;
conn.raw_cmd("CREATE TABLE test_uniq_constraint_violation (id1 int, id2 int)")
.await
.unwrap();
conn.raw_cmd("CREATE UNIQUE INDEX idx_uniq_constraint_violation ON test_uniq_constraint_violation (id1, id2) USING btree").await.unwrap();
conn.query_raw(
"INSERT INTO test_uniq_constraint_violation (id1, id2) VALUES (1, 2)",
&[],
)
.await
.unwrap();
let res = conn
.query_raw(
"INSERT INTO test_uniq_constraint_violation (id1, id2) VALUES (1, 2)",
&[],
)
.await;
let err = res.unwrap_err();
match err.kind() {
ErrorKind::UniqueConstraintViolation { constraint } => {
assert_eq!(Some("1062"), err.original_code());
assert_eq!(
&DatabaseConstraint::Index(String::from("idx_uniq_constraint_violation")),
constraint,
)
}
_ => panic!(err),
}
}
#[tokio::test]
async fn | test_null_constraint_violation | identifier_name |
|
mysql.rs | use url::Url;
use crate::{
ast::{ParameterizedValue, Query},
connector::{metrics, queryable::*, ResultSet, DBIO},
error::{Error, ErrorKind},
visitor::{self, Visitor},
};
/// A connector interface for the MySQL database.
#[derive(Debug)]
pub struct Mysql {
pub(crate) pool: my::Pool,
pub(crate) url: MysqlUrl,
socket_timeout: Option<Duration>,
connect_timeout: Duration,
}
/// Wraps a connection url and exposes the parsing logic used by quaint, including default values.
#[derive(Debug, Clone)]
pub struct MysqlUrl {
url: Url,
query_params: MysqlUrlQueryParams,
}
impl MysqlUrl {
/// Parse `Url` to `MysqlUrl`. Returns error for mistyped connection
/// parameters.
pub fn new(url: Url) -> Result<Self, Error> {
let query_params = Self::parse_query_params(&url)?;
Ok(Self { url, query_params })
}
/// The bare `Url` to the database.
pub fn url(&self) -> &Url {
&self.url
}
/// The percent-decoded database username.
pub fn username(&self) -> Cow<str> {
match percent_decode(self.url.username().as_bytes()).decode_utf8() {
Ok(username) => username,
Err(_) => {
#[cfg(not(feature = "tracing-log"))]
warn!("Couldn't decode username to UTF-8, using the non-decoded version.");
#[cfg(feature = "tracing-log")]
tracing::warn!("Couldn't decode username to UTF-8, using the non-decoded version.");
self.url.username().into()
}
}
}
/// The percent-decoded database password.
pub fn password(&self) -> Option<Cow<str>> {
match self
.url
.password()
.and_then(|pw| percent_decode(pw.as_bytes()).decode_utf8().ok())
{
Some(password) => Some(password),
None => self.url.password().map(|s| s.into()),
}
}
/// Name of the database connected. Defaults to `mysql`.
pub fn dbname(&self) -> &str {
match self.url.path_segments() {
Some(mut segments) => segments.next().unwrap_or("mysql"),
None => "mysql",
}
}
/// The database host. If `socket` and `host` are not set, defaults to `localhost`.
pub fn host(&self) -> &str {
self.url.host_str().unwrap_or("localhost")
}
/// If set, connected to the database through a Unix socket.
pub fn socket(&self) -> &Option<String> {
&self.query_params.socket
}
/// The database port, defaults to `3306`.
pub fn port(&self) -> u16 {
self.url.port().unwrap_or(3306)
}
fn default_connection_limit() -> usize {
num_cpus::get_physical() * 2 + 1
}
fn parse_query_params(url: &Url) -> Result<MysqlUrlQueryParams, Error> {
let mut connection_limit = Self::default_connection_limit();
let mut ssl_opts = my::SslOpts::default();
let mut use_ssl = false;
let mut socket = None;
let mut socket_timeout = None;
let mut connect_timeout = Duration::from_secs(5);
for (k, v) in url.query_pairs() {
match k.as_ref() {
"connection_limit" => {
let as_int: usize = v
.parse()
.map_err(|_| Error::builder(ErrorKind::InvalidConnectionArguments).build())?;
connection_limit = as_int;
}
"sslcert" => {
use_ssl = true;
ssl_opts.set_root_cert_path(Some(Path::new(&*v).to_path_buf()));
}
"sslidentity" => {
use_ssl = true;
ssl_opts.set_pkcs12_path(Some(Path::new(&*v).to_path_buf()));
}
"sslpassword" => {
use_ssl = true;
ssl_opts.set_password(Some(v.to_string()));
}
"socket" => {
socket = Some(v.replace("(", "").replace(")", ""));
}
"socket_timeout" => {
let as_int = v
.parse()
.map_err(|_| Error::builder(ErrorKind::InvalidConnectionArguments).build())?;
socket_timeout = Some(Duration::from_secs(as_int));
}
"connect_timeout" => {
let as_int = v
.parse()
.map_err(|_| Error::builder(ErrorKind::InvalidConnectionArguments).build())?;
connect_timeout = Duration::from_secs(as_int);
}
"sslaccept" => {
match v.as_ref() {
"strict" => {}
"accept_invalid_certs" => {
ssl_opts.set_danger_accept_invalid_certs(true);
}
_ => {
#[cfg(not(feature = "tracing-log"))]
debug!("Unsupported SSL accept mode {}, defaulting to `strict`", v);
#[cfg(feature = "tracing-log")]
tracing::debug!(
message = "Unsupported SSL accept mode, defaulting to `strict`",
mode = &*v
);
}
};
}
_ => {
#[cfg(not(feature = "tracing-log"))]
trace!("Discarding connection string param: {}", k);
#[cfg(feature = "tracing-log")]
tracing::trace!(message = "Discarding connection string param", param = &*k);
}
};
}
Ok(MysqlUrlQueryParams {
ssl_opts,
connection_limit,
use_ssl,
socket,
connect_timeout,
socket_timeout,
})
}
#[cfg(feature = "pooled")]
pub(crate) fn connection_limit(&self) -> usize {
self.query_params.connection_limit
}
pub(crate) fn to_opts_builder(&self) -> my::OptsBuilder {
let mut config = my::OptsBuilder::new();
config.user(Some(self.username()));
config.pass(self.password());
config.db_name(Some(self.dbname()));
match self.socket() {
Some(ref socket) => {
config.socket(Some(socket));
}
None => {
config.ip_or_hostname(self.host());
config.tcp_port(self.port());
}
}
config.stmt_cache_size(Some(1000));
config.conn_ttl(Some(Duration::from_secs(5)));
if self.query_params.use_ssl {
config.ssl_opts(Some(self.query_params.ssl_opts.clone()));
}
config
}
}
#[derive(Debug, Clone)]
pub(crate) struct MysqlUrlQueryParams {
ssl_opts: my::SslOpts,
connection_limit: usize,
use_ssl: bool,
socket: Option<String>,
socket_timeout: Option<Duration>,
connect_timeout: Duration,
}
impl Mysql {
/// Create a new MySQL connection using `OptsBuilder` from the `mysql` crate.
pub fn new(url: MysqlUrl) -> crate::Result<Self> {
let mut opts = url.to_opts_builder();
let pool_opts = my::PoolOptions::with_constraints(my::PoolConstraints::new(1, 1).unwrap());
opts.pool_options(pool_opts);
Ok(Self {
socket_timeout: url.query_params.socket_timeout,
connect_timeout: url.query_params.connect_timeout,
pool: my::Pool::new(opts),
url,
})
}
async fn timeout<T, F, E>(&self, f: F) -> crate::Result<T>
where
F: Future<Output = std::result::Result<T, E>>,
E: Into<Error>,
{
match self.socket_timeout {
Some(duration) => match timeout(duration, f).await {
Ok(Ok(result)) => Ok(result),
Ok(Err(err)) => Err(err.into()),
Err(to) => Err(to.into()),
},
None => match f.await {
Ok(result) => Ok(result),
Err(err) => Err(err.into()),
},
}
}
}
impl TransactionCapable for Mysql {}
impl Queryable for Mysql {
fn query<'a>(&'a self, q: Query<'a>) -> DBIO<'a, ResultSet> {
let (sql, params) = visitor::Mysql::build(q);
DBIO::new(async move { self.query_raw(&sql, ¶ms).await })
}
fn execute<'a>(&'a self, q: Query<'a>) -> DBIO<'a, u64> {
let (sql, params) = visitor::Mysql::build(q);
DBIO::new(async move { self.execute_raw(&sql, ¶ms).await })
}
fn query_raw<'a>(&'a self, sql: &'a str, params: &'a [ParameterizedValue]) -> DBIO<'a, ResultSet> {
metrics::query("mysql.query_raw", sql, params, move || async move {
let conn = timeout(self.connect_timeout, self.pool.get_conn()).await??;
let results = self
.timeout(conn.prep_exec(sql, conversion::conv_params(params)))
.await?;
let columns = results
.columns_ref()
.iter()
.map(|s| s.name_str().into_owned())
.collect();
let last_id = results | use percent_encoding::percent_decode;
use std::{borrow::Cow, future::Future, path::Path, time::Duration};
use tokio::time::timeout; | random_line_split |
|
Bijlage_D.py | = context.scene.bond_distance
global draw_lattice
draw_lattice = context.scene.draw_lattice
global atom_name
atom_name = context.scene.atom_name
global print_data
print_data = context.scene.print_data
global draw_style
global atom_color
draw_style = context.scene.style_selection_mode
if(draw_style=="STICK"):
draw_bonds = True
atom_color = False
else:
atom_color = True
global draw_quality
draw_quality = context.scene.quality_selection_mode
global add_camera
add_camera = context.scene.add_camera
drawCrystal(file_path)
return {'FINISHED'}
@classmethod
def register(cls):
print("Registered class: %s " % cls.bl_label)
bpy.types.Scene.draw_bonds = bpy.props.BoolProperty(
name="Draw bonds",
description="Draw bonds between elements"
)
bpy.types.Scene.bond_distance = bpy.props.FloatProperty(
name="Bond distance",
description="Set max distance for bonds to occur",
default=2,
min=0.0,
max=10.0,
precision=2
)
bpy.types.Scene.atom_name = bpy.props.BoolProperty(
name="Atom names",
description="Display the name of atoms"
)
bpy.types.Scene.draw_lattice = bpy.props.BoolProperty(
name="Draw lattice",
description="Draw unit cell outline"
)
bpy.types.Scene.print_data = bpy.props.BoolProperty(
name="Print data",
description="Print crystal data in terminal"
)
# Dropdown menu for drawing style
selection_style = [
("SPACE FILLING", "SPACE FILLING", "", 1),
("BALL AND STICK", "BALL AND STICK", "", 2),
("STICK", "STICK", "", 3),
]
bpy.types.Scene.style_selection_mode = bpy.props.EnumProperty(
items=selection_style,
name="Style"
)
# Dropdown menu for drawing quality
selection_qual = [
("MIN", "MIN", "", 1),
("LOW", "LOW", "", 2),
("MED", "MED", "", 3),
("HIGH", "HIGH", "", 4),
("MAX", "MAX", "", 5)
]
bpy.types.Scene.quality_selection_mode = bpy.props.EnumProperty(
items=selection_qual,
name="Quality",
default="MED"
)
bpy.types.Scene.add_camera = bpy.props.BoolProperty(
name="Place camera",
description="Place a camera and light to make rendering possible"
)
@classmethod
def unregister(cls):
pr | # Panel to display add-on in Blender environment
class Panel(bpy.types.Panel):
bl_idname = "CDTB_Panel"
bl_label = "CDTB_Panel"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_context = "objectmode"
bl_category = "CDTB"
def draw(self,context):
scn = context.scene
layout = self.layout
layout.label(text = 'Input file',icon_value=112)
'''
for i in range(100):
layout.label(text = str(i),icon_value =i)
'''
box = layout.box()
row = box.row()
splitrow = row.split(factor=0.075)
left_col = splitrow.column()
right_col = splitrow.column()
left_col.operator('error.scan_file',icon_value=108,text="")
right_col.label(text=file_path.rsplit('\\', 2)[-1])
layout.label(text = 'Settings',icon_value =117)
box = layout.box()
box.prop(scn,'draw_bonds')
box.prop(scn,'bond_distance')
box.prop(scn,'draw_lattice')
box.prop(scn, 'atom_name')
box.prop(scn,'print_data')
box.prop(scn, 'style_selection_mode')
box.prop(scn, 'quality_selection_mode')
box.prop(scn, 'add_camera')
layout.separator()
splitrow = layout.split(factor=0.3)
col = splitrow.column()
col.operator('object.cdtb_operator',text="Draw Crystal")
col = splitrow.column()
col.label(text=user_feedback)
layout.separator()
@classmethod
def register(cls):
print("Registered class: %s " % cls.bl_label)
@classmethod
def unregister(cls):
print("Unregistered class: %s " % cls.bl_label)
def register():
bpy.utils.register_class(Operator)
bpy.utils.register_class(ScanFileOperator)
bpy.utils.register_class(Panel)
def unregister():
bpy.utils.unregister_class(Operator)
bpy.utils.unregister_class(Panel)
bpy.utils.unregister_class(ScanFileOperator)
#----------------------------------------------
# MAIN PROGRAM
#----------------------------------------------
class Crysdata():
def __init__(self,F,cb):
self.start = time.time()
print("Draw timer started")
self.name = F
self.cell = Cell(cb)
self.atoms = readEl(cb)
self.pos = readPos(cb)
c = self.cell
self.ftoc = self.get_fractional_to_cartesian_matrix(c.alen,c.blen,c.clen,c.alpha,c.beta,c.gamma)
def printout(self):
print(self.name)
print()
self.cell.printout()
print()
for element in self.pos:
element.printout()
print()
for element in self.atoms:
element.printout()
print()
print("Fractional to cartesian matrix:")
print(self.ftoc)
def get_fractional_to_cartesian_matrix(self,a, b, c, alpha, beta, gamma):
"""
Original code found at: https://gist.github.com/Bismarrck/a68da01f19b39320f78a
!changed formula to resemble one found on: https://en.wikipedia.org/wiki/Fractional_coordinates
Return the transformation matrix that converts fractional coordinates to
cartesian coordinates.
Parameters
----------
a, b, c : float
The lengths of the edges.
alpha, gamma, beta : float
The angles between the sides.
angle_in_degrees : bool
True if alpha, beta and gamma are expressed in degrees.
Returns
-------
r : array_like
The 3x3 rotation matrix. ``V_cart = np.dot(r, V_frac)``.
"""
alpha = np.deg2rad(alpha)
beta = np.deg2rad(beta)
gamma = np.deg2rad(gamma)
cosa = np.cos(alpha)
sina = np.sin(alpha)
cosb = np.cos(beta)
sinb = np.sin(beta)
cosg = np.cos(gamma)
sing = np.sin(gamma)
volume = 1.0 - cosa**2.0 - cosb**2.0 - cosg**2.0 + 2.0 * cosa * cosb * cosg
volume = a*b*c*np.sqrt(volume)
r = np.zeros((3, 3))
r[0, 0] = float(a)
r[0, 1] = float(b * cosg)
r[0, 2] = float(c * cosb)
r[1, 0] = float(0)
r[1, 1] = float(b * sing)
r[1, 2] = float(c * (cosa - cosb * cosg) / sing)
r[2, 0] = float(0)
r[2, 1] = float(0)
r[2, 2] = float(volume / (a*b*sing))
return r
def drawCrystal(self):
if draw_lattice:
self.drawCell()
print("Lattice drawn after {:.3f} seconds".format((time.time()-self.start)))
self.drawAtoms()
print("Atoms drawn after {:.3f} seconds".format((time.time()-self.start)))
if(draw_bonds):
self.drawBonds()
print("Bonds drawn after {:.3f} seconds".format((time.time()-self.start)))
def drawAtoms(self):
for a in self.atoms:
a.drawObj(self.ftoc)
print("Atoms drawn:",len(self.atoms))
def drawCell(self):
cell_corners=[]
cell_edges=[]
# calculate and draw corners
for i in range(2):
for j in range(2):
for k in range(2):
bpy.ops.mesh.primitive_uv_sphere_add(size=lattice_size,location=toCarth(self.ftoc,[i,j,k]))
activeObject = bpy.context.active_object # Set active object to variable
cell_corners.append(activeObject)
mat = bpy.data.materials.new(name="MaterialName") # set new material to variable
activeObject.data.materials.append(mat) # add the material to the object
bpy.context.object.active_material.diffuse_color = [0,0,0] # change color
# draw lines
for i,j in zip([0,0,0,1,1,2,2,3,4,4, | int("Unregistered class: %s " % cls.bl_label)
| identifier_body |
Bijlage_D.py | 100):
layout.label(text = str(i),icon_value =i)
'''
box = layout.box()
row = box.row()
splitrow = row.split(factor=0.075)
left_col = splitrow.column()
right_col = splitrow.column()
left_col.operator('error.scan_file',icon_value=108,text="")
right_col.label(text=file_path.rsplit('\\', 2)[-1])
layout.label(text = 'Settings',icon_value =117)
box = layout.box()
box.prop(scn,'draw_bonds')
box.prop(scn,'bond_distance')
box.prop(scn,'draw_lattice')
box.prop(scn, 'atom_name')
box.prop(scn,'print_data')
box.prop(scn, 'style_selection_mode')
box.prop(scn, 'quality_selection_mode')
box.prop(scn, 'add_camera')
layout.separator()
splitrow = layout.split(factor=0.3)
col = splitrow.column()
col.operator('object.cdtb_operator',text="Draw Crystal")
col = splitrow.column()
col.label(text=user_feedback)
layout.separator()
@classmethod
def register(cls):
print("Registered class: %s " % cls.bl_label)
@classmethod
def unregister(cls):
print("Unregistered class: %s " % cls.bl_label)
def register():
bpy.utils.register_class(Operator)
bpy.utils.register_class(ScanFileOperator)
bpy.utils.register_class(Panel)
def unregister():
bpy.utils.unregister_class(Operator)
bpy.utils.unregister_class(Panel)
bpy.utils.unregister_class(ScanFileOperator)
#----------------------------------------------
# MAIN PROGRAM
#----------------------------------------------
class Crysdata():
def __init__(self,F,cb):
self.start = time.time()
print("Draw timer started")
self.name = F
self.cell = Cell(cb)
self.atoms = readEl(cb)
self.pos = readPos(cb)
c = self.cell
self.ftoc = self.get_fractional_to_cartesian_matrix(c.alen,c.blen,c.clen,c.alpha,c.beta,c.gamma)
def printout(self):
print(self.name)
print()
self.cell.printout()
print()
for element in self.pos:
element.printout()
print()
for element in self.atoms:
element.printout()
print()
print("Fractional to cartesian matrix:")
print(self.ftoc)
def get_fractional_to_cartesian_matrix(self,a, b, c, alpha, beta, gamma):
"""
Original code found at: https://gist.github.com/Bismarrck/a68da01f19b39320f78a
!changed formula to resemble one found on: https://en.wikipedia.org/wiki/Fractional_coordinates
Return the transformation matrix that converts fractional coordinates to
cartesian coordinates.
Parameters
----------
a, b, c : float
The lengths of the edges.
alpha, gamma, beta : float
The angles between the sides.
angle_in_degrees : bool
True if alpha, beta and gamma are expressed in degrees.
Returns
-------
r : array_like
The 3x3 rotation matrix. ``V_cart = np.dot(r, V_frac)``.
"""
alpha = np.deg2rad(alpha)
beta = np.deg2rad(beta)
gamma = np.deg2rad(gamma)
cosa = np.cos(alpha)
sina = np.sin(alpha)
cosb = np.cos(beta)
sinb = np.sin(beta)
cosg = np.cos(gamma)
sing = np.sin(gamma)
volume = 1.0 - cosa**2.0 - cosb**2.0 - cosg**2.0 + 2.0 * cosa * cosb * cosg
volume = a*b*c*np.sqrt(volume)
r = np.zeros((3, 3))
r[0, 0] = float(a)
r[0, 1] = float(b * cosg)
r[0, 2] = float(c * cosb)
r[1, 0] = float(0)
r[1, 1] = float(b * sing)
r[1, 2] = float(c * (cosa - cosb * cosg) / sing)
r[2, 0] = float(0)
r[2, 1] = float(0)
r[2, 2] = float(volume / (a*b*sing))
return r
def drawCrystal(self):
if draw_lattice:
self.drawCell()
print("Lattice drawn after {:.3f} seconds".format((time.time()-self.start)))
self.drawAtoms()
print("Atoms drawn after {:.3f} seconds".format((time.time()-self.start)))
if(draw_bonds):
self.drawBonds()
print("Bonds drawn after {:.3f} seconds".format((time.time()-self.start)))
def drawAtoms(self):
for a in self.atoms:
a.drawObj(self.ftoc)
print("Atoms drawn:",len(self.atoms))
def drawCell(self):
cell_corners=[]
cell_edges=[]
# calculate and draw corners
for i in range(2):
for j in range(2):
for k in range(2):
bpy.ops.mesh.primitive_uv_sphere_add(size=lattice_size,location=toCarth(self.ftoc,[i,j,k]))
activeObject = bpy.context.active_object # Set active object to variable
cell_corners.append(activeObject)
mat = bpy.data.materials.new(name="MaterialName") # set new material to variable
activeObject.data.materials.append(mat) # add the material to the object
bpy.context.object.active_material.diffuse_color = [0,0,0] # change color
# draw lines
for i,j in zip([0,0,0,1,1,2,2,3,4,4,5,6],[1,2,4,3,5,3,6,7,5,6,7,7]):
cell_edges.append(self.drawLine(cell_corners[i].location,cell_corners[j].location))
# select all line and corners
for i in cell_corners:
i.select_set(action="SELECT")
for i in cell_edges:
i.select_set(action="SELECT")
# set corner in origin as active and join meshes as one object
bpy.context.view_layer.objects.active = cell_corners[0]
bpy.ops.object.join()
print("Cell box drawn")
def drawLine(self,ac,tc):
dx = tc[0] - ac[0]
dy = tc[1] - ac[1]
dz = tc[2] - ac[2]
dist = np.sqrt(dx**2 + dy**2 + dz**2)
bpy.ops.mesh.primitive_cylinder_add(vertices=qualitydic[draw_quality],radius=lattice_size,depth = dist,location = (dx/2 + ac[0], dy/2 + ac[1], dz/2 + ac[2]))
activeObject = bpy.context.active_object
mat = bpy.data.materials.new(name="MaterialName") # set new material to variable
activeObject.data.materials.append(mat) # add the material to the object
bpy.context.object.active_material.diffuse_color = [0,0,0] # change color
phi = math.atan2(dy, dx)
theta = math.acos(dz/dist)
bpy.context.object.rotation_euler[1] = theta
bpy.context.object.rotation_euler[2] = phi
return activeObject
def drawBonds(self):
cnt = 0
bpy.ops.curve.primitive_bezier_circle_add(location=(0,0,0),radius = bond_radius)
bpy.context.object.name = 'bez'
for atom in self.atoms:
for target in self.atoms:
if atom != target:
if("bond{}-{}".format(target.elid,atom.elid)in bpy.data.objects):
continue
if(atom.sym == 'H' and target.sym == 'H'):
continue
if calcDistance(self.ftoc,atom,target) <= bond_distance:
self.makeBond(atom,target)
cnt += 1
print("Atom bonds drawn:",cnt)
# This function hooks the bond to the atoms
def makeBond(self,atom,target):
if 'OBJECT'!=bpy.context.mode:
bpy.ops.object.mode_set(mode='OBJECT')
o1 = bpy.data.objects[atom.elid]
o2 = bpy.data.objects[target.elid]
bond = self.hookCurve(o1,o2, bpy.context.scene)
bpy.context.object.data.bevel_object = bpy.data.objects["bez"]
bpy.context.object.name = "bond{}-{}".format(atom.elid,target.elid)
activeObject = bpy.context.active_object # Set active object to variable
mat = bpy.data.materials.new(name="MaterialName") # set new material to variable
activeObject.data.materials.append(mat) # add the material to the object
bpy.context.object.active_material.diffuse_color = [255,255,255] # change color
if 'OBJECT'!=bpy.context.mode:
bpy.ops.object.mode_set(mode='OBJECT')
def ho | okCurve(s | identifier_name |
|
Bijlage_D.py | = context.scene.bond_distance
global draw_lattice
draw_lattice = context.scene.draw_lattice
global atom_name
atom_name = context.scene.atom_name
global print_data
print_data = context.scene.print_data
global draw_style
global atom_color
draw_style = context.scene.style_selection_mode
if(draw_style=="STICK"):
draw_bonds = True
atom_color = False
else:
atom_color = True
global draw_quality
draw_quality = context.scene.quality_selection_mode
global add_camera
add_camera = context.scene.add_camera
drawCrystal(file_path)
return {'FINISHED'}
@classmethod
def register(cls):
print("Registered class: %s " % cls.bl_label)
bpy.types.Scene.draw_bonds = bpy.props.BoolProperty(
name="Draw bonds",
description="Draw bonds between elements"
)
bpy.types.Scene.bond_distance = bpy.props.FloatProperty(
name="Bond distance",
description="Set max distance for bonds to occur",
default=2,
min=0.0,
max=10.0,
precision=2
)
bpy.types.Scene.atom_name = bpy.props.BoolProperty(
name="Atom names",
description="Display the name of atoms"
)
bpy.types.Scene.draw_lattice = bpy.props.BoolProperty(
name="Draw lattice",
description="Draw unit cell outline"
)
bpy.types.Scene.print_data = bpy.props.BoolProperty(
name="Print data",
description="Print crystal data in terminal"
)
# Dropdown menu for drawing style
selection_style = [
("SPACE FILLING", "SPACE FILLING", "", 1),
("BALL AND STICK", "BALL AND STICK", "", 2),
("STICK", "STICK", "", 3),
]
bpy.types.Scene.style_selection_mode = bpy.props.EnumProperty(
items=selection_style,
name="Style"
)
# Dropdown menu for drawing quality
selection_qual = [
("MIN", "MIN", "", 1),
("LOW", "LOW", "", 2),
("MED", "MED", "", 3),
("HIGH", "HIGH", "", 4),
("MAX", "MAX", "", 5)
]
bpy.types.Scene.quality_selection_mode = bpy.props.EnumProperty(
items=selection_qual,
name="Quality",
default="MED"
)
bpy.types.Scene.add_camera = bpy.props.BoolProperty(
name="Place camera",
description="Place a camera and light to make rendering possible"
)
@classmethod
def unregister(cls):
print("Unregistered class: %s " % cls.bl_label)
# Panel to display add-on in Blender environment
class Panel(bpy.types.Panel):
bl_idname = "CDTB_Panel"
bl_label = "CDTB_Panel"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_context = "objectmode"
bl_category = "CDTB"
def draw(self,context):
scn = context.scene
layout = self.layout
layout.label(text = 'Input file',icon_value=112)
'''
for i in range(100):
layout.label(text = str(i),icon_value =i)
'''
box = layout.box()
row = box.row()
splitrow = row.split(factor=0.075)
left_col = splitrow.column()
right_col = splitrow.column()
left_col.operator('error.scan_file',icon_value=108,text="")
right_col.label(text=file_path.rsplit('\\', 2)[-1])
layout.label(text = 'Settings',icon_value =117)
box = layout.box()
box.prop(scn,'draw_bonds')
box.prop(scn,'bond_distance')
box.prop(scn,'draw_lattice')
box.prop(scn, 'atom_name')
box.prop(scn,'print_data')
box.prop(scn, 'style_selection_mode')
box.prop(scn, 'quality_selection_mode')
box.prop(scn, 'add_camera')
layout.separator()
splitrow = layout.split(factor=0.3)
col = splitrow.column()
col.operator('object.cdtb_operator',text="Draw Crystal")
col = splitrow.column()
col.label(text=user_feedback)
layout.separator()
@classmethod
def register(cls):
print("Registered class: %s " % cls.bl_label)
@classmethod
def unregister(cls):
print("Unregistered class: %s " % cls.bl_label)
def register():
bpy.utils.register_class(Operator)
bpy.utils.register_class(ScanFileOperator)
bpy.utils.register_class(Panel)
def unregister():
bpy.utils.unregister_class(Operator)
bpy.utils.unregister_class(Panel)
bpy.utils.unregister_class(ScanFileOperator)
#----------------------------------------------
# MAIN PROGRAM
#----------------------------------------------
class Crysdata():
def __init__(self,F,cb):
self.start = time.time()
print("Draw timer started")
self.name = F
self.cell = Cell(cb)
self.atoms = readEl(cb)
self.pos = readPos(cb)
c = self.cell
self.ftoc = self.get_fractional_to_cartesian_matrix(c.alen,c.blen,c.clen,c.alpha,c.beta,c.gamma)
def printout(self):
print(self.name)
print()
self.cell.printout()
print()
for element in self.pos:
element.printout()
print()
for element in self.atoms:
element.printout()
print()
print("Fractional to cartesian matrix:")
print(self.ftoc)
def get_fractional_to_cartesian_matrix(self,a, b, c, alpha, beta, gamma):
"""
Original code found at: https://gist.github.com/Bismarrck/a68da01f19b39320f78a
!changed formula to resemble one found on: https://en.wikipedia.org/wiki/Fractional_coordinates
Return the transformation matrix that converts fractional coordinates to
cartesian coordinates.
Parameters
----------
a, b, c : float
The lengths of the edges.
alpha, gamma, beta : float
The angles between the sides.
angle_in_degrees : bool
True if alpha, beta and gamma are expressed in degrees.
Returns
-------
r : array_like
The 3x3 rotation matrix. ``V_cart = np.dot(r, V_frac)``.
"""
alpha = np.deg2rad(alpha)
beta = np.deg2rad(beta)
gamma = np.deg2rad(gamma)
cosa = np.cos(alpha)
sina = np.sin(alpha)
cosb = np.cos(beta)
sinb = np.sin(beta)
cosg = np.cos(gamma)
sing = np.sin(gamma)
volume = 1.0 - cosa**2.0 - cosb**2.0 - cosg**2.0 + 2.0 * cosa * cosb * cosg
volume = a*b*c*np.sqrt(volume)
r = np.zeros((3, 3))
r[0, 0] = float(a)
r[0, 1] = float(b * cosg)
r[0, 2] = float(c * cosb)
r[1, 0] = float(0)
r[1, 1] = float(b * sing)
r[1, 2] = float(c * (cosa - cosb * cosg) / sing)
r[2, 0] = float(0)
r[2, 1] = float(0)
r[2, 2] = float(volume / (a*b*sing))
return r
def drawCrystal(self):
if draw_lattice:
self.drawCell()
print("Lattice drawn after {:.3f} seconds".format((time.time()-self.start)))
self.drawAtoms()
print("Atoms drawn after {:.3f} seconds".format((time.time()-self.start)))
if(draw_bonds):
self.drawBonds()
print("Bonds drawn after {:.3f} seconds".format((time.time()-self.start)))
def drawAtoms(self):
for a in self.atoms:
a.drawObj(self.ftoc)
print("Atoms drawn:",len(self.atoms))
def drawCell(self):
cell_corners=[]
cell_edges=[]
# calculate and draw corners
for i in range(2):
for j in range(2):
fo | # draw lines
for i,j in zip([0,0,0,1,1,2,2,3,4,4 | r k in range(2):
bpy.ops.mesh.primitive_uv_sphere_add(size=lattice_size,location=toCarth(self.ftoc,[i,j,k]))
activeObject = bpy.context.active_object # Set active object to variable
cell_corners.append(activeObject)
mat = bpy.data.materials.new(name="MaterialName") # set new material to variable
activeObject.data.materials.append(mat) # add the material to the object
bpy.context.object.active_material.diffuse_color = [0,0,0] # change color
| conditional_block |
Bijlage_D.py | _distance = context.scene.bond_distance
global draw_lattice
draw_lattice = context.scene.draw_lattice
global atom_name
atom_name = context.scene.atom_name
global print_data
print_data = context.scene.print_data
global draw_style
global atom_color
draw_style = context.scene.style_selection_mode
if(draw_style=="STICK"):
draw_bonds = True
atom_color = False
else:
atom_color = True
global draw_quality
draw_quality = context.scene.quality_selection_mode
global add_camera
add_camera = context.scene.add_camera
drawCrystal(file_path)
return {'FINISHED'}
@classmethod
def register(cls):
print("Registered class: %s " % cls.bl_label)
bpy.types.Scene.draw_bonds = bpy.props.BoolProperty(
name="Draw bonds",
description="Draw bonds between elements"
)
bpy.types.Scene.bond_distance = bpy.props.FloatProperty(
name="Bond distance",
description="Set max distance for bonds to occur",
default=2,
min=0.0,
max=10.0,
precision=2
)
bpy.types.Scene.atom_name = bpy.props.BoolProperty(
name="Atom names",
description="Display the name of atoms"
)
bpy.types.Scene.draw_lattice = bpy.props.BoolProperty(
name="Draw lattice",
description="Draw unit cell outline"
)
bpy.types.Scene.print_data = bpy.props.BoolProperty(
name="Print data",
description="Print crystal data in terminal"
)
# Dropdown menu for drawing style
selection_style = [
("SPACE FILLING", "SPACE FILLING", "", 1),
("BALL AND STICK", "BALL AND STICK", "", 2),
("STICK", "STICK", "", 3),
]
bpy.types.Scene.style_selection_mode = bpy.props.EnumProperty(
items=selection_style,
name="Style"
)
# Dropdown menu for drawing quality
selection_qual = [
("MIN", "MIN", "", 1),
("LOW", "LOW", "", 2),
("MED", "MED", "", 3),
("HIGH", "HIGH", "", 4),
("MAX", "MAX", "", 5)
]
bpy.types.Scene.quality_selection_mode = bpy.props.EnumProperty(
items=selection_qual,
name="Quality",
default="MED"
)
bpy.types.Scene.add_camera = bpy.props.BoolProperty(
name="Place camera",
description="Place a camera and light to make rendering possible"
)
@classmethod
def unregister(cls):
print("Unregistered class: %s " % cls.bl_label)
# Panel to display add-on in Blender environment
class Panel(bpy.types.Panel):
bl_idname = "CDTB_Panel"
bl_label = "CDTB_Panel"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_context = "objectmode"
bl_category = "CDTB"
def draw(self,context):
scn = context.scene
layout = self.layout
layout.label(text = 'Input file',icon_value=112)
'''
for i in range(100):
layout.label(text = str(i),icon_value =i)
'''
box = layout.box()
row = box.row()
splitrow = row.split(factor=0.075)
left_col = splitrow.column()
right_col = splitrow.column()
left_col.operator('error.scan_file',icon_value=108,text="")
right_col.label(text=file_path.rsplit('\\', 2)[-1])
layout.label(text = 'Settings',icon_value =117)
box = layout.box()
box.prop(scn,'draw_bonds')
box.prop(scn,'bond_distance')
box.prop(scn,'draw_lattice')
box.prop(scn, 'atom_name')
box.prop(scn,'print_data')
box.prop(scn, 'style_selection_mode')
box.prop(scn, 'quality_selection_mode')
box.prop(scn, 'add_camera')
layout.separator()
splitrow = layout.split(factor=0.3)
col = splitrow.column()
col.operator('object.cdtb_operator',text="Draw Crystal")
col = splitrow.column()
col.label(text=user_feedback)
layout.separator()
@classmethod
def register(cls):
print("Registered class: %s " % cls.bl_label)
@classmethod
def unregister(cls):
print("Unregistered class: %s " % cls.bl_label)
def register():
bpy.utils.register_class(Operator)
bpy.utils.register_class(ScanFileOperator)
bpy.utils.register_class(Panel)
def unregister():
bpy.utils.unregister_class(Operator)
bpy.utils.unregister_class(Panel)
bpy.utils.unregister_class(ScanFileOperator)
#----------------------------------------------
# MAIN PROGRAM
#----------------------------------------------
class Crysdata():
def __init__(self,F,cb):
self.start = time.time()
print("Draw timer started")
self.name = F
self.cell = Cell(cb)
self.atoms = readEl(cb)
self.pos = readPos(cb)
c = self.cell
self.ftoc = self.get_fractional_to_cartesian_matrix(c.alen,c.blen,c.clen,c.alpha,c.beta,c.gamma)
def printout(self):
print(self.name)
print()
self.cell.printout()
print()
for element in self.pos:
element.printout()
print()
for element in self.atoms:
element.printout()
print()
print("Fractional to cartesian matrix:")
print(self.ftoc)
def get_fractional_to_cartesian_matrix(self,a, b, c, alpha, beta, gamma):
"""
Original code found at: https://gist.github.com/Bismarrck/a68da01f19b39320f78a
!changed formula to resemble one found on: https://en.wikipedia.org/wiki/Fractional_coordinates
Return the transformation matrix that converts fractional coordinates to
cartesian coordinates.
Parameters
----------
a, b, c : float
The lengths of the edges.
alpha, gamma, beta : float
The angles between the sides.
angle_in_degrees : bool
True if alpha, beta and gamma are expressed in degrees.
Returns
-------
r : array_like
The 3x3 rotation matrix. ``V_cart = np.dot(r, V_frac)``.
"""
alpha = np.deg2rad(alpha)
beta = np.deg2rad(beta)
gamma = np.deg2rad(gamma)
cosa = np.cos(alpha)
sina = np.sin(alpha)
cosb = np.cos(beta)
sinb = np.sin(beta)
cosg = np.cos(gamma)
sing = np.sin(gamma)
volume = 1.0 - cosa**2.0 - cosb**2.0 - cosg**2.0 + 2.0 * cosa * cosb * cosg
volume = a*b*c*np.sqrt(volume)
r = np.zeros((3, 3))
r[0, 0] = float(a)
r[0, 1] = float(b * cosg)
r[0, 2] = float(c * cosb)
r[1, 0] = float(0)
r[1, 1] = float(b * sing)
r[1, 2] = float(c * (cosa - cosb * cosg) / sing)
r[2, 0] = float(0) | r[2, 1] = float(0)
r[2, 2] = float(volume / (a*b*sing))
return r
def drawCrystal(self):
if draw_lattice:
self.drawCell()
print("Lattice drawn after {:.3f} seconds".format((time.time()-self.start)))
self.drawAtoms()
print("Atoms drawn after {:.3f} seconds".format((time.time()-self.start)))
if(draw_bonds):
self.drawBonds()
print("Bonds drawn after {:.3f} seconds".format((time.time()-self.start)))
def drawAtoms(self):
for a in self.atoms:
a.drawObj(self.ftoc)
print("Atoms drawn:",len(self.atoms))
def drawCell(self):
cell_corners=[]
cell_edges=[]
# calculate and draw corners
for i in range(2):
for j in range(2):
for k in range(2):
bpy.ops.mesh.primitive_uv_sphere_add(size=lattice_size,location=toCarth(self.ftoc,[i,j,k]))
activeObject = bpy.context.active_object # Set active object to variable
cell_corners.append(activeObject)
mat = bpy.data.materials.new(name="MaterialName") # set new material to variable
activeObject.data.materials.append(mat) # add the material to the object
bpy.context.object.active_material.diffuse_color = [0,0,0] # change color
# draw lines
for i,j in zip([0,0,0,1,1,2,2,3,4,4, | random_line_split |
|
gopro.py | timestamp(time_stamp)) if time_stamp is not None else None
def to_timestamp(start_utc):
return start_utc.timestamp() if start_utc is not None else None
class GoProCacheEncoder(JSONEncoder):
# Override the default method
def default(self, obj):
if isinstance(obj, (date, datetime)):
return obj.isoformat()
elif isinstance(obj, Location):
if obj.elevation is None:
return {'lat': obj.latitude, 'lon': obj.longitude}
else:
return {'lat': obj.latitude, 'lon': obj.longitude, 'alt': obj.elevation}
elif isinstance(obj, RawInstrData):
return obj.to_dict()
raise TypeError("Type %s not serializable" % type(obj))
def decode_gopro_cache(d):
for k in d:
if k == 'start_utc' or k == 'stop_utc':
if d[k] is None:
d[k] = None
elif 'utc' in k:
d[k] = datetime.fromisoformat(d[k])
elif 'instr_data' in k:
ii_list = []
for h in d[k]:
ii = RawInstrData()
ii.utc = h['utc']
ii.lat = h['lat']
ii.lon = h['lon']
ii.sog = h['sog']
ii.cog = h['cog']
ii.awa = h['awa']
ii.aws = h['aws']
ii.twa = h['twa']
ii.tws = h['tws']
ii.sow = h['sow']
ii.hdg = h['hdg']
ii.n2k_epoch = h['n2k_epoch']
ii_list.append(ii)
d[k] = ii_list
return d
class GoPro:
def | (self, sd_card_dir, work_dir):
cache_dir = work_dir + os.sep + 'gopro'
os.makedirs(cache_dir, exist_ok=True)
self.instr_data = []
# Build the list of clips
clips = []
print(f'Looking for GOPRO clips in {sd_card_dir} ...')
for root, dirs_list, files_list in os.walk(sd_card_dir):
for file_name in files_list:
if os.path.splitext(file_name)[-1] == ".MP4":
file_name_path = os.path.join(root, file_name)
if file_name[0] != '.':
clips.append({'name': file_name_path})
# Time stamp the clips
self.clips = [] # Only clips with valid UTC
for clip in clips:
clip_name = clip['name']
clip_cache_name = cache_dir + os.sep + os.path.basename(clip_name) + '.json'
clip_nmea_name = cache_dir + os.sep + os.path.basename(clip_name) + '.nmea'
clip['clip_nmea_name'] = clip_nmea_name
if os.path.isfile(clip_cache_name):
print(f'Reading GOPRO clip info from cache {clip_cache_name}')
with open(clip_cache_name, 'r') as f:
cache = json.load(f, object_hook=decode_gopro_cache)
start_utc = from_timestamp(cache['start_utc'])
stop_utc = from_timestamp(cache['stop_utc'])
instr_data = cache['instr_data']
else:
print(f'Scanning {clip_name}')
[start_utc, stop_utc, instr_data] = self.extract_sensor_data(clip_name, clip_nmea_name)
self.instr_data.append(instr_data)
cache = {
'start_utc': to_timestamp(start_utc),
'stop_utc': to_timestamp(stop_utc),
'instr_data': instr_data
}
with open(clip_cache_name, 'w') as f:
json.dump(cache, f, indent=4, cls=GoProCacheEncoder)
if start_utc is not None:
clip['start_utc'] = start_utc
clip['stop_utc'] = stop_utc
clip['instr_data'] = instr_data
self.clips.append(clip)
print(f'{clip["name"]} {start_utc} {stop_utc}')
else:
print(f'Warning: Clip {clip["name"]} contains no valid UTC')
# Sort clips by UTC start time
self.clips.sort(key=lambda x: x['start_utc'])
# Determine overall start and finish times
self.start_time_utc = None
self.finish_time_utc = None
if len(self.clips) > 0:
self.start_time_utc = self.clips[0]['start_utc']
self.finish_time_utc = self.clips[-1]['stop_utc']
# Create one NMEA file once clips are sorted
gopro_nmea_file = cache_dir + os.sep + 'gopro.nmea'
print(f'Creating GOPRO NMEA file {gopro_nmea_file}')
with open(gopro_nmea_file, 'w') as nmea_file:
for clip in clips:
self.instr_data += clip['instr_data']
with open(clip['clip_nmea_name'], 'r') as clip_nmea:
for line in clip_nmea:
nmea_file.write(line)
print(f'Done with GOPRO processing')
@staticmethod
def extract_sensor_data(mp4_name, clip_nmea_name):
instr_data = []
cmd = [GOPRO_GPMF_BIN, mp4_name]
result = subprocess.run(cmd, stdout=subprocess.PIPE)
start_utc = None
stop_utc = None
timezone = pytz.timezone("UTC")
if result.returncode == 0:
print(f'Creating NMEA file {clip_nmea_name}')
with open(clip_nmea_name, 'w') as nmea_file:
lines = result.stdout.decode('utf-8').split('\n')
reader = csv.DictReader(lines)
for row in reader:
utc = timezone.localize(datetime.fromisoformat(row['utc']))
if row['fix_valid'] == 'True':
signed_lat = float(row['lat'])
lat_sign = 'N' if signed_lat > 0 else 'S'
lat = abs(signed_lat)
lat_min = (lat - int(lat)) * 60
signed_lon = float(row['lon'])
lon_sign = 'E' if signed_lon > 0 else 'W'
lon = abs(signed_lon)
lon_min = (lon - int(lon)) * 60
sog = float(row['sog_ms']) * 3600. / 1852.
if 0 <= lat <= 90 and 0 <= lon <= 180:
rmc = f'$GPRMC,{utc.hour:02d}{utc.minute:02d}{utc.second:02d}.{int(utc.microsecond / 1000):03d},' \
f'A,{int(lat):02d}{lat_min:08.5f},{lat_sign},'\
f'{int(lon):03d}{lon_min:08.5f},{lon_sign},'\
f'{sog:.1f},,{utc.day:02d}{utc.month:02d}{utc.year % 100:02d},'
ii = RawInstrData(0, utc, signed_lat, signed_lon, sog)
instr_data.append(ii)
else:
print('GPRO GPMF bug')
if start_utc is None:
t_ms = int(float(row['t']) * 1000)
start_utc = utc - timedelta(milliseconds=t_ms)
stop_utc = utc
else:
rmc = f'$GPRMC,{utc.hour:02d}{utc.minute:02d}{utc.second:02d}.{int(utc.microsecond / 1000):03d},' \
f'V,,,'\
f',,'\
f',,{utc.day:02d}{utc.month:02d}{utc.year % 100:02d},'
body = rmc[1:] # string between $ and *
cc = reduce(lambda i, j: int(i) ^ int(j), [ord(x) for x in body])
nmea = f'{rmc}*{cc:02X}\r\n'
nmea_file.write(nmea)
return start_utc, stop_utc, instr_data
def get_clips_for_time_interval(self, start_utc, stop_utc):
# Find the clip containing the start of interval
clips = []
for start_idx in range(len(self.clips)):
clip = self.clips[start_idx]
if clip['start_utc'] <= start_utc <= clip['stop_utc']:
in_time = (start_utc - clip['start_utc']).seconds
# Now find the clip containing the stop time
for stop_idx in range(start_idx, len(self.clips)):
clip = self.clips[stop_idx]
if stop_utc <= clip['stop_utc']: # Last clip found
# Check for the corner case when the stop_utc falls inbetween start_utc of the previous clip
# and start_utc of this one
if stop_utc < clip['start_utc']:
return clips
out_time = (stop_utc - clip['start_utc']).seconds
clips.append({
'name': clip['name'],
'in_time': in_time,
'out_time': out_time,
})
return clips
| __init__ | identifier_name |
gopro.py | timestamp(time_stamp)) if time_stamp is not None else None
def to_timestamp(start_utc):
return start_utc.timestamp() if start_utc is not None else None
class GoProCacheEncoder(JSONEncoder):
# Override the default method
def default(self, obj):
if isinstance(obj, (date, datetime)):
return obj.isoformat()
elif isinstance(obj, Location):
if obj.elevation is None:
return {'lat': obj.latitude, 'lon': obj.longitude}
else:
return {'lat': obj.latitude, 'lon': obj.longitude, 'alt': obj.elevation}
elif isinstance(obj, RawInstrData):
return obj.to_dict()
raise TypeError("Type %s not serializable" % type(obj))
def decode_gopro_cache(d):
for k in d:
if k == 'start_utc' or k == 'stop_utc':
if d[k] is None:
d[k] = None
elif 'utc' in k:
d[k] = datetime.fromisoformat(d[k])
elif 'instr_data' in k:
ii_list = []
for h in d[k]:
ii = RawInstrData()
ii.utc = h['utc']
ii.lat = h['lat']
ii.lon = h['lon']
ii.sog = h['sog']
ii.cog = h['cog']
ii.awa = h['awa']
ii.aws = h['aws']
ii.twa = h['twa']
ii.tws = h['tws']
ii.sow = h['sow']
ii.hdg = h['hdg']
ii.n2k_epoch = h['n2k_epoch']
ii_list.append(ii)
d[k] = ii_list
return d
class GoPro:
def __init__(self, sd_card_dir, work_dir):
cache_dir = work_dir + os.sep + 'gopro'
os.makedirs(cache_dir, exist_ok=True)
self.instr_data = []
# Build the list of clips
clips = []
print(f'Looking for GOPRO clips in {sd_card_dir} ...')
for root, dirs_list, files_list in os.walk(sd_card_dir):
for file_name in files_list:
if os.path.splitext(file_name)[-1] == ".MP4":
file_name_path = os.path.join(root, file_name)
if file_name[0] != '.':
clips.append({'name': file_name_path})
# Time stamp the clips
self.clips = [] # Only clips with valid UTC
for clip in clips:
clip_name = clip['name']
clip_cache_name = cache_dir + os.sep + os.path.basename(clip_name) + '.json'
clip_nmea_name = cache_dir + os.sep + os.path.basename(clip_name) + '.nmea'
clip['clip_nmea_name'] = clip_nmea_name
if os.path.isfile(clip_cache_name):
print(f'Reading GOPRO clip info from cache {clip_cache_name}')
with open(clip_cache_name, 'r') as f:
cache = json.load(f, object_hook=decode_gopro_cache)
start_utc = from_timestamp(cache['start_utc'])
stop_utc = from_timestamp(cache['stop_utc'])
instr_data = cache['instr_data']
else:
print(f'Scanning {clip_name}')
[start_utc, stop_utc, instr_data] = self.extract_sensor_data(clip_name, clip_nmea_name)
self.instr_data.append(instr_data)
cache = {
'start_utc': to_timestamp(start_utc),
'stop_utc': to_timestamp(stop_utc),
'instr_data': instr_data
}
with open(clip_cache_name, 'w') as f:
json.dump(cache, f, indent=4, cls=GoProCacheEncoder)
if start_utc is not None:
clip['start_utc'] = start_utc
clip['stop_utc'] = stop_utc
clip['instr_data'] = instr_data
self.clips.append(clip)
print(f'{clip["name"]} {start_utc} {stop_utc}')
else:
print(f'Warning: Clip {clip["name"]} contains no valid UTC')
# Sort clips by UTC start time
self.clips.sort(key=lambda x: x['start_utc'])
# Determine overall start and finish times
self.start_time_utc = None
self.finish_time_utc = None
if len(self.clips) > 0:
self.start_time_utc = self.clips[0]['start_utc']
self.finish_time_utc = self.clips[-1]['stop_utc']
# Create one NMEA file once clips are sorted
gopro_nmea_file = cache_dir + os.sep + 'gopro.nmea'
print(f'Creating GOPRO NMEA file {gopro_nmea_file}')
with open(gopro_nmea_file, 'w') as nmea_file:
for clip in clips:
self.instr_data += clip['instr_data']
with open(clip['clip_nmea_name'], 'r') as clip_nmea:
for line in clip_nmea:
nmea_file.write(line)
print(f'Done with GOPRO processing')
@staticmethod
def extract_sensor_data(mp4_name, clip_nmea_name):
instr_data = []
cmd = [GOPRO_GPMF_BIN, mp4_name]
result = subprocess.run(cmd, stdout=subprocess.PIPE)
start_utc = None
stop_utc = None
timezone = pytz.timezone("UTC")
if result.returncode == 0:
print(f'Creating NMEA file {clip_nmea_name}')
with open(clip_nmea_name, 'w') as nmea_file:
lines = result.stdout.decode('utf-8').split('\n')
reader = csv.DictReader(lines)
for row in reader:
utc = timezone.localize(datetime.fromisoformat(row['utc']))
if row['fix_valid'] == 'True':
signed_lat = float(row['lat'])
lat_sign = 'N' if signed_lat > 0 else 'S'
lat = abs(signed_lat)
lat_min = (lat - int(lat)) * 60
signed_lon = float(row['lon'])
lon_sign = 'E' if signed_lon > 0 else 'W'
lon = abs(signed_lon)
lon_min = (lon - int(lon)) * 60
sog = float(row['sog_ms']) * 3600. / 1852.
if 0 <= lat <= 90 and 0 <= lon <= 180:
rmc = f'$GPRMC,{utc.hour:02d}{utc.minute:02d}{utc.second:02d}.{int(utc.microsecond / 1000):03d},' \
f'A,{int(lat):02d}{lat_min:08.5f},{lat_sign},'\
f'{int(lon):03d}{lon_min:08.5f},{lon_sign},'\
f'{sog:.1f},,{utc.day:02d}{utc.month:02d}{utc.year % 100:02d},'
ii = RawInstrData(0, utc, signed_lat, signed_lon, sog)
instr_data.append(ii)
else:
print('GPRO GPMF bug')
if start_utc is None:
t_ms = int(float(row['t']) * 1000)
start_utc = utc - timedelta(milliseconds=t_ms)
stop_utc = utc
else:
rmc = f'$GPRMC,{utc.hour:02d}{utc.minute:02d}{utc.second:02d}.{int(utc.microsecond / 1000):03d},' \
f'V,,,'\
f',,'\
f',,{utc.day:02d}{utc.month:02d}{utc.year % 100:02d},'
body = rmc[1:] # string between $ and *
cc = reduce(lambda i, j: int(i) ^ int(j), [ord(x) for x in body])
nmea = f'{rmc}*{cc:02X}\r\n'
nmea_file.write(nmea)
return start_utc, stop_utc, instr_data
def get_clips_for_time_interval(self, start_utc, stop_utc):
# Find the clip containing the start of interval
| clips = []
for start_idx in range(len(self.clips)):
clip = self.clips[start_idx]
if clip['start_utc'] <= start_utc <= clip['stop_utc']:
in_time = (start_utc - clip['start_utc']).seconds
# Now find the clip containing the stop time
for stop_idx in range(start_idx, len(self.clips)):
clip = self.clips[stop_idx]
if stop_utc <= clip['stop_utc']: # Last clip found
# Check for the corner case when the stop_utc falls inbetween start_utc of the previous clip
# and start_utc of this one
if stop_utc < clip['start_utc']:
return clips
out_time = (stop_utc - clip['start_utc']).seconds
clips.append({
'name': clip['name'],
'in_time': in_time,
'out_time': out_time,
})
return clips | identifier_body |
|
gopro.py | None
class GoProCacheEncoder(JSONEncoder):
# Override the default method
def default(self, obj):
if isinstance(obj, (date, datetime)):
return obj.isoformat()
elif isinstance(obj, Location):
if obj.elevation is None:
return {'lat': obj.latitude, 'lon': obj.longitude}
else:
return {'lat': obj.latitude, 'lon': obj.longitude, 'alt': obj.elevation}
elif isinstance(obj, RawInstrData):
return obj.to_dict()
raise TypeError("Type %s not serializable" % type(obj))
def decode_gopro_cache(d):
for k in d:
if k == 'start_utc' or k == 'stop_utc':
if d[k] is None:
d[k] = None
elif 'utc' in k:
d[k] = datetime.fromisoformat(d[k])
elif 'instr_data' in k:
ii_list = []
for h in d[k]:
ii = RawInstrData()
ii.utc = h['utc']
ii.lat = h['lat']
ii.lon = h['lon']
ii.sog = h['sog']
ii.cog = h['cog']
ii.awa = h['awa']
ii.aws = h['aws']
ii.twa = h['twa']
ii.tws = h['tws']
ii.sow = h['sow']
ii.hdg = h['hdg']
ii.n2k_epoch = h['n2k_epoch']
ii_list.append(ii)
d[k] = ii_list
return d
class GoPro:
def __init__(self, sd_card_dir, work_dir):
cache_dir = work_dir + os.sep + 'gopro'
os.makedirs(cache_dir, exist_ok=True)
self.instr_data = []
# Build the list of clips
clips = []
print(f'Looking for GOPRO clips in {sd_card_dir} ...')
for root, dirs_list, files_list in os.walk(sd_card_dir):
for file_name in files_list:
if os.path.splitext(file_name)[-1] == ".MP4":
file_name_path = os.path.join(root, file_name)
if file_name[0] != '.':
clips.append({'name': file_name_path})
# Time stamp the clips
self.clips = [] # Only clips with valid UTC
for clip in clips:
clip_name = clip['name']
clip_cache_name = cache_dir + os.sep + os.path.basename(clip_name) + '.json'
clip_nmea_name = cache_dir + os.sep + os.path.basename(clip_name) + '.nmea'
clip['clip_nmea_name'] = clip_nmea_name
if os.path.isfile(clip_cache_name):
print(f'Reading GOPRO clip info from cache {clip_cache_name}')
with open(clip_cache_name, 'r') as f:
cache = json.load(f, object_hook=decode_gopro_cache)
start_utc = from_timestamp(cache['start_utc'])
stop_utc = from_timestamp(cache['stop_utc'])
instr_data = cache['instr_data']
else:
print(f'Scanning {clip_name}')
[start_utc, stop_utc, instr_data] = self.extract_sensor_data(clip_name, clip_nmea_name)
self.instr_data.append(instr_data)
cache = {
'start_utc': to_timestamp(start_utc),
'stop_utc': to_timestamp(stop_utc),
'instr_data': instr_data
}
with open(clip_cache_name, 'w') as f:
json.dump(cache, f, indent=4, cls=GoProCacheEncoder)
if start_utc is not None:
clip['start_utc'] = start_utc
clip['stop_utc'] = stop_utc
clip['instr_data'] = instr_data
self.clips.append(clip)
print(f'{clip["name"]} {start_utc} {stop_utc}')
else:
print(f'Warning: Clip {clip["name"]} contains no valid UTC')
# Sort clips by UTC start time
self.clips.sort(key=lambda x: x['start_utc'])
# Determine overall start and finish times
self.start_time_utc = None
self.finish_time_utc = None
if len(self.clips) > 0:
self.start_time_utc = self.clips[0]['start_utc']
self.finish_time_utc = self.clips[-1]['stop_utc']
# Create one NMEA file once clips are sorted
gopro_nmea_file = cache_dir + os.sep + 'gopro.nmea'
print(f'Creating GOPRO NMEA file {gopro_nmea_file}')
with open(gopro_nmea_file, 'w') as nmea_file:
for clip in clips:
self.instr_data += clip['instr_data']
with open(clip['clip_nmea_name'], 'r') as clip_nmea:
for line in clip_nmea:
nmea_file.write(line)
print(f'Done with GOPRO processing')
@staticmethod
def extract_sensor_data(mp4_name, clip_nmea_name):
instr_data = []
cmd = [GOPRO_GPMF_BIN, mp4_name]
result = subprocess.run(cmd, stdout=subprocess.PIPE)
start_utc = None
stop_utc = None
timezone = pytz.timezone("UTC")
if result.returncode == 0:
print(f'Creating NMEA file {clip_nmea_name}')
with open(clip_nmea_name, 'w') as nmea_file:
lines = result.stdout.decode('utf-8').split('\n')
reader = csv.DictReader(lines)
for row in reader:
utc = timezone.localize(datetime.fromisoformat(row['utc']))
if row['fix_valid'] == 'True':
signed_lat = float(row['lat'])
lat_sign = 'N' if signed_lat > 0 else 'S'
lat = abs(signed_lat)
lat_min = (lat - int(lat)) * 60
signed_lon = float(row['lon'])
lon_sign = 'E' if signed_lon > 0 else 'W'
lon = abs(signed_lon)
lon_min = (lon - int(lon)) * 60
sog = float(row['sog_ms']) * 3600. / 1852.
if 0 <= lat <= 90 and 0 <= lon <= 180:
rmc = f'$GPRMC,{utc.hour:02d}{utc.minute:02d}{utc.second:02d}.{int(utc.microsecond / 1000):03d},' \
f'A,{int(lat):02d}{lat_min:08.5f},{lat_sign},'\
f'{int(lon):03d}{lon_min:08.5f},{lon_sign},'\
f'{sog:.1f},,{utc.day:02d}{utc.month:02d}{utc.year % 100:02d},'
ii = RawInstrData(0, utc, signed_lat, signed_lon, sog)
instr_data.append(ii)
else:
print('GPRO GPMF bug')
if start_utc is None:
t_ms = int(float(row['t']) * 1000)
start_utc = utc - timedelta(milliseconds=t_ms)
stop_utc = utc
else:
rmc = f'$GPRMC,{utc.hour:02d}{utc.minute:02d}{utc.second:02d}.{int(utc.microsecond / 1000):03d},' \
f'V,,,'\
f',,'\
f',,{utc.day:02d}{utc.month:02d}{utc.year % 100:02d},'
body = rmc[1:] # string between $ and *
cc = reduce(lambda i, j: int(i) ^ int(j), [ord(x) for x in body])
nmea = f'{rmc}*{cc:02X}\r\n'
nmea_file.write(nmea)
return start_utc, stop_utc, instr_data
def get_clips_for_time_interval(self, start_utc, stop_utc):
# Find the clip containing the start of interval
clips = []
for start_idx in range(len(self.clips)):
clip = self.clips[start_idx]
if clip['start_utc'] <= start_utc <= clip['stop_utc']:
| in_time = (start_utc - clip['start_utc']).seconds
# Now find the clip containing the stop time
for stop_idx in range(start_idx, len(self.clips)):
clip = self.clips[stop_idx]
if stop_utc <= clip['stop_utc']: # Last clip found
# Check for the corner case when the stop_utc falls inbetween start_utc of the previous clip
# and start_utc of this one
if stop_utc < clip['start_utc']:
return clips
out_time = (stop_utc - clip['start_utc']).seconds
clips.append({
'name': clip['name'],
'in_time': in_time,
'out_time': out_time,
})
return clips
else: # The time interval spans to subsequent clips
clips.append({
'name': clip['name'],
'in_time': in_time, | conditional_block |
|
gopro.py | timestamp(time_stamp)) if time_stamp is not None else None
def to_timestamp(start_utc):
return start_utc.timestamp() if start_utc is not None else None
class GoProCacheEncoder(JSONEncoder):
# Override the default method
def default(self, obj):
if isinstance(obj, (date, datetime)):
return obj.isoformat()
elif isinstance(obj, Location):
if obj.elevation is None:
return {'lat': obj.latitude, 'lon': obj.longitude}
else:
return {'lat': obj.latitude, 'lon': obj.longitude, 'alt': obj.elevation}
elif isinstance(obj, RawInstrData):
return obj.to_dict()
raise TypeError("Type %s not serializable" % type(obj))
def decode_gopro_cache(d):
for k in d:
if k == 'start_utc' or k == 'stop_utc':
if d[k] is None:
d[k] = None
elif 'utc' in k:
d[k] = datetime.fromisoformat(d[k])
elif 'instr_data' in k:
ii_list = []
for h in d[k]:
ii = RawInstrData()
ii.utc = h['utc']
ii.lat = h['lat']
ii.lon = h['lon']
ii.sog = h['sog']
ii.cog = h['cog']
ii.awa = h['awa']
ii.aws = h['aws'] | ii.sow = h['sow']
ii.hdg = h['hdg']
ii.n2k_epoch = h['n2k_epoch']
ii_list.append(ii)
d[k] = ii_list
return d
class GoPro:
def __init__(self, sd_card_dir, work_dir):
cache_dir = work_dir + os.sep + 'gopro'
os.makedirs(cache_dir, exist_ok=True)
self.instr_data = []
# Build the list of clips
clips = []
print(f'Looking for GOPRO clips in {sd_card_dir} ...')
for root, dirs_list, files_list in os.walk(sd_card_dir):
for file_name in files_list:
if os.path.splitext(file_name)[-1] == ".MP4":
file_name_path = os.path.join(root, file_name)
if file_name[0] != '.':
clips.append({'name': file_name_path})
# Time stamp the clips
self.clips = [] # Only clips with valid UTC
for clip in clips:
clip_name = clip['name']
clip_cache_name = cache_dir + os.sep + os.path.basename(clip_name) + '.json'
clip_nmea_name = cache_dir + os.sep + os.path.basename(clip_name) + '.nmea'
clip['clip_nmea_name'] = clip_nmea_name
if os.path.isfile(clip_cache_name):
print(f'Reading GOPRO clip info from cache {clip_cache_name}')
with open(clip_cache_name, 'r') as f:
cache = json.load(f, object_hook=decode_gopro_cache)
start_utc = from_timestamp(cache['start_utc'])
stop_utc = from_timestamp(cache['stop_utc'])
instr_data = cache['instr_data']
else:
print(f'Scanning {clip_name}')
[start_utc, stop_utc, instr_data] = self.extract_sensor_data(clip_name, clip_nmea_name)
self.instr_data.append(instr_data)
cache = {
'start_utc': to_timestamp(start_utc),
'stop_utc': to_timestamp(stop_utc),
'instr_data': instr_data
}
with open(clip_cache_name, 'w') as f:
json.dump(cache, f, indent=4, cls=GoProCacheEncoder)
if start_utc is not None:
clip['start_utc'] = start_utc
clip['stop_utc'] = stop_utc
clip['instr_data'] = instr_data
self.clips.append(clip)
print(f'{clip["name"]} {start_utc} {stop_utc}')
else:
print(f'Warning: Clip {clip["name"]} contains no valid UTC')
# Sort clips by UTC start time
self.clips.sort(key=lambda x: x['start_utc'])
# Determine overall start and finish times
self.start_time_utc = None
self.finish_time_utc = None
if len(self.clips) > 0:
self.start_time_utc = self.clips[0]['start_utc']
self.finish_time_utc = self.clips[-1]['stop_utc']
# Create one NMEA file once clips are sorted
gopro_nmea_file = cache_dir + os.sep + 'gopro.nmea'
print(f'Creating GOPRO NMEA file {gopro_nmea_file}')
with open(gopro_nmea_file, 'w') as nmea_file:
for clip in clips:
self.instr_data += clip['instr_data']
with open(clip['clip_nmea_name'], 'r') as clip_nmea:
for line in clip_nmea:
nmea_file.write(line)
print(f'Done with GOPRO processing')
@staticmethod
def extract_sensor_data(mp4_name, clip_nmea_name):
instr_data = []
cmd = [GOPRO_GPMF_BIN, mp4_name]
result = subprocess.run(cmd, stdout=subprocess.PIPE)
start_utc = None
stop_utc = None
timezone = pytz.timezone("UTC")
if result.returncode == 0:
print(f'Creating NMEA file {clip_nmea_name}')
with open(clip_nmea_name, 'w') as nmea_file:
lines = result.stdout.decode('utf-8').split('\n')
reader = csv.DictReader(lines)
for row in reader:
utc = timezone.localize(datetime.fromisoformat(row['utc']))
if row['fix_valid'] == 'True':
signed_lat = float(row['lat'])
lat_sign = 'N' if signed_lat > 0 else 'S'
lat = abs(signed_lat)
lat_min = (lat - int(lat)) * 60
signed_lon = float(row['lon'])
lon_sign = 'E' if signed_lon > 0 else 'W'
lon = abs(signed_lon)
lon_min = (lon - int(lon)) * 60
sog = float(row['sog_ms']) * 3600. / 1852.
if 0 <= lat <= 90 and 0 <= lon <= 180:
rmc = f'$GPRMC,{utc.hour:02d}{utc.minute:02d}{utc.second:02d}.{int(utc.microsecond / 1000):03d},' \
f'A,{int(lat):02d}{lat_min:08.5f},{lat_sign},'\
f'{int(lon):03d}{lon_min:08.5f},{lon_sign},'\
f'{sog:.1f},,{utc.day:02d}{utc.month:02d}{utc.year % 100:02d},'
ii = RawInstrData(0, utc, signed_lat, signed_lon, sog)
instr_data.append(ii)
else:
print('GPRO GPMF bug')
if start_utc is None:
t_ms = int(float(row['t']) * 1000)
start_utc = utc - timedelta(milliseconds=t_ms)
stop_utc = utc
else:
rmc = f'$GPRMC,{utc.hour:02d}{utc.minute:02d}{utc.second:02d}.{int(utc.microsecond / 1000):03d},' \
f'V,,,'\
f',,'\
f',,{utc.day:02d}{utc.month:02d}{utc.year % 100:02d},'
body = rmc[1:] # string between $ and *
cc = reduce(lambda i, j: int(i) ^ int(j), [ord(x) for x in body])
nmea = f'{rmc}*{cc:02X}\r\n'
nmea_file.write(nmea)
return start_utc, stop_utc, instr_data
def get_clips_for_time_interval(self, start_utc, stop_utc):
# Find the clip containing the start of interval
clips = []
for start_idx in range(len(self.clips)):
clip = self.clips[start_idx]
if clip['start_utc'] <= start_utc <= clip['stop_utc']:
in_time = (start_utc - clip['start_utc']).seconds
# Now find the clip containing the stop time
for stop_idx in range(start_idx, len(self.clips)):
clip = self.clips[stop_idx]
if stop_utc <= clip['stop_utc']: # Last clip found
# Check for the corner case when the stop_utc falls inbetween start_utc of the previous clip
# and start_utc of this one
if stop_utc < clip['start_utc']:
return clips
out_time = (stop_utc - clip['start_utc']).seconds
clips.append({
'name': clip['name'],
'in_time': in_time,
'out_time': out_time,
})
return clips
| ii.twa = h['twa']
ii.tws = h['tws'] | random_line_split |
Octagoat.py | for player
def move():
# This function includes movement, and collision with the ramp
global direction, ramp, score, sprite, speed, paused
if paused == 0:
if direction == "left":
canvas.move(sprite, -speed, 0)
elif direction == "right":
canvas.move(sprite, speed, 0)
elif direction == "up":
canvas.move(sprite, 0, -150)
window.after(350, lambda: canvas.move(sprite, 0, 150))
a = canvas.bbox(sprite)
c = canvas.bbox(box)
# Ramp collision
if a[1] in range((c[3]-100), c[3]) and(a[0] in range
(int(c[0]), int(c[2])) or a[2] in range(int(c[0]), int(c[2]))):
canvas.move(sprite, 0, -420)
ramp = True
if a[0] not in range(int(c[0]), int(c[2]-50.0)) and(
a[2] not in range(int(c[0]+50.0), int(c[2])) and ramp):
window.after(10, lambda: canvas.move(sprite, 0, 120))
window.after(250, lambda: canvas.move(sprite, 0, 150))
window.after(450, lambda: canvas.move(sprite, 0, 150))
ramp = False
def checkCollision(g, frame):
global width, dir, hit, score, scoreText
a = canvas.bbox(sprite)
if a[3] in range((g[3]-150), (g[3])) and(a[0] in range
((g[0]), (g[2]-75)) or a[2] in range((g[0]+75), (g[2]))):
if frame == enemy or frame == enemytr:
canvas.move(frame, (width-g[0]), 0)
else:
canvas.move(frame, (-g[0]), 0)
score += 10
txt = "Goats knocked out:" + str(score)
canvas.itemconfigure(scoreText, text=txt)
canvas.move(sprite, 0, -25)
window.after(250, lambda: canvas.move(sprite, 0, 25))
# Enemy collision (Losing condition)
if a[1] in range((g[1]), (g[3])) and(a[0] in range
((g[0]), (g[2]-75)) or a[2] in range((g[0]+75), (g[2]))):
canvas.move(sprite, 0, -250)
window.after(250, lambda: canvas.move(sprite, 0, 250))
hit += 1
canvas.delete(health1)
# Enemy hit player twice (game over).
if hit == 2:
canvas.move(sprite, 0, -height)
canvas.delete(health2)
messagebox.showinfo("Game over",
"Natural selection got you.\nSurvival of the fittest.\n")
gameover()
return
# Enemy movement function (only for normal mode)
# Update: now also controls mating season second goat on right side
def moveenemy():
global width, dir, hit, score, scoreText, paused, dir3
mov1 = 12
mov2 = 8
if paused == 0:
g = canvas.bbox(enemy)
if(diff == "mating"):
g3 = canvas.bbox(enemytr)
if g3[0] < 100:
dir3 = "right"
if g3[2] > (width-100):
dir3 = "left"
if dir3 == "left":
mov2 = -mov2
if g[0] < 100:
dir = "right"
if g[2] > (width-100):
dir = "left"
if dir == "left":
mov1 = -mov1
canvas.move(enemy, mov1, 0)
if(diff == "mating"):
canvas.move(enemytr, mov2, 0)
if(over == False):
if(diff == "mating"):
window.after(7, moveenemy)
else:
window.after(10, moveenemy)
checkCollision(g, enemy)
if(diff == "mating"):
checkCollision(g3, enemytr)
# Second enemy movement function in Hard mode.
# Update: now also controls mating season second goat on left side
def moveenemy2():
global width, dir2, hit, score, scoreText, paused, dir4
mov1 = 12
mov2 = 8
if paused == 0:
g2 = canvas.bbox(enemy2)
if(diff == "mating"):
g4 = canvas.bbox(enemytl)
if g4[0] < 100:
dir4 = "left"
if g4[2] > (width-100):
dir4 = "right"
if dir4 == "right":
mov2 = -mov2
if g2[0] < 100:
dir2 = "left"
if g2[2] > (width-100):
dir2 = "right"
if dir2 == "right":
mov1 = -mov1
canvas.move(enemy2, mov1, 0)
if(diff == "mating"):
canvas.move(enemytl, mov2, 0)
if (over == False):
if(diff == "mating"):
window.after(7, moveenemy2)
else:
window.after(10, moveenemy2)
checkCollision(g2, enemy2)
if(diff == "mating"):
checkCollision(g4, enemytl)
# Normal mode function which is called for hard and Mating season modes.
def normal():
global score, sprite, enemy, health2, health1, box, scoreText
global start, start2, start3, diff, boss, start4, start5, start6
start.destroy()
start2.destroy()
start3.destroy()
start4.destroy()
start5.destroy()
start6.destroy()
diff = "normal"
bg = canvas.create_image(0, 0, anchor='nw', image=mint)
sprite = canvas.create_image((width/2), (height-350),
anchor='nw', image=img)
enemy = canvas.create_image((width-250), (height-350),
anchor='nw', image=img2)
health1 = canvas.create_image(220, 0, anchor='ne', image=snoop)
health2 = canvas.create_image(100, 0, anchor='ne', image=snoop)
box = canvas.create_rectangle((width/2-190), (height-610),
(width/2+120), (height-560), fill="brown")
scoreText = canvas.create_text(width/2, 10, fill="black",
font="terminal 28", text=txt)
if diff == "normal":
boss = canvas.create_image(0, -width, anchor='nw', image=bosspic)
over = False
moveenemy()
return
# Hard Mode adds an extra goat.
def hard():
global enemy2, diff, boss
normal()
diff = "hard"
enemy2 = canvas.create_image(0, (height-350), anchor='nw', image=img)
if diff == "hard":
boss = canvas.create_image(0, -width, anchor='nw', image=bosspic)
over = False
moveenemy2()
return
# Mating season adds 2 extra fast goats.
def mating():
global enemytr, enemytl, diff, boss
normal()
hard()
diff = "mating"
enemytr = canvas.create_image((width-250), (height-350), # tr=top right
anchor='nw', image=img2)
enemytl = canvas.create_image(0, (height-350), anchor='nw', image=img) # tl=top left
boss = canvas.create_image(0, -width, anchor='nw', image=bosspic)
over = False
return
# Pause function
def pause(event):
global paused
if paused == 0:
pausetxt = Label(canvas, text="Game paused\
\nReturn?\nThe game will pause for 3 seconds when\
you press p again", font="terminal 15", bg="green")
pausetxt.place(x=width/3, y=100)
paused += 1
window.after(10000, lambda: pausetxt.destroy())
elif paused == 1:
time.sleep(3)
paused = 0
moveenemy()
move()
moveenemy2()
# Bosskey
def bosskey(event):
| global boss, width
canvas.move(boss, 0, width) | identifier_body |
|
Octagoat.py | width="17", height="3",
font=('terminal', 20), command=lambda: tutorial())
start6.place(x=50, y=50)
boss = canvas.create_image(0, -width, anchor='nw', image=bosspic)
# Tutorial explaining different game elements and how to play.
def tutorial():
messagebox.showinfo("Welcome to Octagoat",
"The name comes from Mixed Martial arts (MMA) cages, which are octagons,\
put goats in an octagon and you have an octagoat.")
messagebox.showinfo("Welcome to Octagoat",
"Your character is a goat fighting other goats,\
the goats are running from,\
one side of the cage to the other trying to ram your goat,\
and your goat is trying to stomp the other goats (in Super Mario fashion)")
messagebox.showinfo("Welcome to Octagoat",
" Stomp another goat once, and it will count as a win.\
but get rammed from another goat twice and you lose.\
You can stay on the ramp if you are too scared")
messagebox.showinfo("Welcome to Octagoat",
"You can use the arrow keys to move your goat from\
side to side or jump. To get on the ramp jump to the top\
from underneath it. To get off the ramp simply walk off the sides")
messagebox.showinfo("Welcome to Octagoat",
"Press B if someone passes. C stands for cheat and \
D stands for Diaz.\nWHEN PLAYING MATING \
SEASON TRY TO GET ON THE RAMP AS SOON AS YOU SPAWN")
def rightKey(event):
global direction
direction = "right"
move()
def leftKey(event):
global direction
direction = "left"
move()
def upKey(event):
global direction, sprite
direction = "up"
move()
# Movement function for player
def move():
# This function includes movement, and collision with the ramp
global direction, ramp, score, sprite, speed, paused
if paused == 0:
if direction == "left":
canvas.move(sprite, -speed, 0)
elif direction == "right":
canvas.move(sprite, speed, 0)
elif direction == "up":
canvas.move(sprite, 0, -150)
window.after(350, lambda: canvas.move(sprite, 0, 150))
a = canvas.bbox(sprite)
c = canvas.bbox(box)
# Ramp collision
if a[1] in range((c[3]-100), c[3]) and(a[0] in range
(int(c[0]), int(c[2])) or a[2] in range(int(c[0]), int(c[2]))):
canvas.move(sprite, 0, -420)
ramp = True
if a[0] not in range(int(c[0]), int(c[2]-50.0)) and(
a[2] not in range(int(c[0]+50.0), int(c[2])) and ramp):
window.after(10, lambda: canvas.move(sprite, 0, 120))
window.after(250, lambda: canvas.move(sprite, 0, 150))
window.after(450, lambda: canvas.move(sprite, 0, 150))
ramp = False
def checkCollision(g, frame):
global width, dir, hit, score, scoreText
a = canvas.bbox(sprite)
if a[3] in range((g[3]-150), (g[3])) and(a[0] in range
((g[0]), (g[2]-75)) or a[2] in range((g[0]+75), (g[2]))):
if frame == enemy or frame == enemytr:
canvas.move(frame, (width-g[0]), 0)
else:
canvas.move(frame, (-g[0]), 0)
score += 10
txt = "Goats knocked out:" + str(score)
canvas.itemconfigure(scoreText, text=txt)
canvas.move(sprite, 0, -25)
window.after(250, lambda: canvas.move(sprite, 0, 25))
# Enemy collision (Losing condition)
if a[1] in range((g[1]), (g[3])) and(a[0] in range
((g[0]), (g[2]-75)) or a[2] in range((g[0]+75), (g[2]))):
canvas.move(sprite, 0, -250)
window.after(250, lambda: canvas.move(sprite, 0, 250))
hit += 1
canvas.delete(health1)
# Enemy hit player twice (game over).
if hit == 2:
canvas.move(sprite, 0, -height)
canvas.delete(health2)
messagebox.showinfo("Game over",
"Natural selection got you.\nSurvival of the fittest.\n")
gameover()
return
# Enemy movement function (only for normal mode)
# Update: now also controls mating season second goat on right side
def moveenemy():
global width, dir, hit, score, scoreText, paused, dir3
mov1 = 12
mov2 = 8
if paused == 0:
g = canvas.bbox(enemy)
if(diff == "mating"):
g3 = canvas.bbox(enemytr)
if g3[0] < 100:
dir3 = "right"
if g3[2] > (width-100):
dir3 = "left"
if dir3 == "left":
mov2 = -mov2
if g[0] < 100:
dir = "right"
if g[2] > (width-100):
dir = "left"
if dir == "left":
mov1 = -mov1
canvas.move(enemy, mov1, 0)
if(diff == "mating"):
canvas.move(enemytr, mov2, 0)
if(over == False):
if(diff == "mating"):
window.after(7, moveenemy)
else:
window.after(10, moveenemy)
checkCollision(g, enemy)
if(diff == "mating"):
checkCollision(g3, enemytr)
# Second enemy movement function in Hard mode.
# Update: now also controls mating season second goat on left side
def moveenemy2():
global width, dir2, hit, score, scoreText, paused, dir4
mov1 = 12
mov2 = 8
if paused == 0:
g2 = canvas.bbox(enemy2)
if(diff == "mating"):
g4 = canvas.bbox(enemytl)
if g4[0] < 100:
|
if g4[2] > (width-100):
dir4 = "right"
if dir4 == "right":
mov2 = -mov2
if g2[0] < 100:
dir2 = "left"
if g2[2] > (width-100):
dir2 = "right"
if dir2 == "right":
mov1 = -mov1
canvas.move(enemy2, mov1, 0)
if(diff == "mating"):
canvas.move(enemytl, mov2, 0)
if (over == False):
if(diff == "mating"):
window.after(7, moveenemy2)
else:
window.after(10, moveenemy2)
checkCollision(g2, enemy2)
if(diff == "mating"):
checkCollision(g4, enemytl)
# Normal mode function which is called for hard and Mating season modes.
def normal():
global score, sprite, enemy, health2, health1, box, scoreText
global start, start2, start3, diff, boss, start4, start5, start6
start.destroy()
start2.destroy()
start3.destroy()
start4.destroy()
start5.destroy()
start6.destroy()
diff = "normal"
bg = canvas.create_image(0, 0, anchor='nw', image=mint)
sprite = canvas.create_image((width/2), (height-350),
anchor='nw', image=img)
enemy = canvas.create_image((width-250), (height-350),
anchor='nw', image=img2)
health1 = canvas.create_image(220, 0, anchor='ne', image=snoop)
health2 = canvas.create_image(100, 0, anchor='ne', image=snoop)
box = canvas.create_rectangle((width/2-190), (height-610),
(width/2+120), (height-560), fill="brown")
scoreText = canvas.create_text(width/2, 10, fill="black",
font="terminal 28", text=txt)
if diff == "normal":
boss = canvas.create_image(0, -width, | dir4 = "left" | conditional_block |
Octagoat.py |
# The goat pixel drawings and the cage were produced
# using PixilArt online tool and modified using GIMP.
from tkinter import Tk, PhotoImage, Button
from tkinter import Menu, messagebox, Canvas, Label
from PIL import Image, ImageTk
import time
# Window dimensions.
def setWindowDimensions(w, h):
window.title("Octagoat")
# title of window
ws = window.winfo_screenwidth()
# computers screen width used for window dimensions
hs = window.winfo_screenheight()
# computers screen height used for window dimensions
window.geometry(f"{ws}x{hs}")
# window size
return window
# Main menu with buttons and background picture.
def main_menu():
global window, start, start2, start3, over
global start4, start5, bg1, canvas, boss, start6
if(over==True):
Restart.destroy()
exitb.destroy()
menub.destroy()
if(over==False):
canvas = Canvas(window, bg="black", width=width, height=height)
over = False
background = canvas.create_image(0, 0, anchor='nw', image=bg1)
start = Button(window, text="Play Normal mode", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: normal())
start.place(x=400, y=50)
start2 = Button(window, text="Play Hard mode", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: hard())
start2.place(x=750, y=50)
start3 = Button(window, text="Play Mating\nseason mode", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: mating())
start3.place(x=1100, y=50)
start4 = Button(window, text="Leaderboard", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20))
start4.place(x=1450, y=50)
start5 = Button(window, text="Exit", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: window.destroy())
start5.place(x=50, y=200)
start6 = Button(window, text="Tutorial", background="black",
foreground="white", activebackground="green", width="17", height="3",
font=('terminal', 20), command=lambda: tutorial())
start6.place(x=50, y=50)
boss = canvas.create_image(0, -width, anchor='nw', image=bosspic)
# Tutorial explaining different game elements and how to play.
def tutorial():
messagebox.showinfo("Welcome to Octagoat",
"The name comes from Mixed Martial arts (MMA) cages, which are octagons,\
put goats in an octagon and you have an octagoat.")
messagebox.showinfo("Welcome to Octagoat",
"Your character is a goat fighting other goats,\
the goats are running from,\
one side of the cage to the other trying to ram your goat,\
and your goat is trying to stomp the other goats (in Super Mario fashion)")
messagebox.showinfo("Welcome to Octagoat",
" Stomp another goat once, and it will count as a win.\
but get rammed from another goat twice and you lose.\
You can stay on the ramp if you are too scared")
messagebox.showinfo("Welcome to Octagoat",
"You can use the arrow keys to move your goat from\
side to side or jump. To get on the ramp jump to the top\
from underneath it. To get off the ramp simply walk off the sides")
messagebox.showinfo("Welcome to Octagoat",
"Press B if someone passes. C stands for cheat and \
D stands for Diaz.\nWHEN PLAYING MATING \
SEASON TRY TO GET ON THE RAMP AS SOON AS YOU SPAWN")
def rightKey(event):
global direction
direction = "right"
move()
def leftKey(event):
global direction
direction = "left"
move()
def upKey(event):
global direction, sprite
direction = "up"
move()
# Movement function for player
def move():
# This function includes movement, and collision with the ramp
global direction, ramp, score, sprite, speed, paused
if paused == 0:
if direction == "left":
canvas.move(sprite, -speed, 0)
elif direction == "right":
canvas.move(sprite, speed, 0)
elif direction == "up":
canvas.move(sprite, 0, -150)
window.after(350, lambda: canvas.move(sprite, 0, 150))
a = canvas.bbox(sprite)
c = canvas.bbox(box)
# Ramp collision
if a[1] in range((c[3]-100), c[3]) and(a[0] in range
(int(c[0]), int(c[2])) or a[2] in range(int(c[0]), int(c[2]))):
canvas.move(sprite, 0, -420)
ramp = True
if a[0] not in range(int(c[0]), int(c[2]-50.0)) and(
a[2] not in range(int(c[0]+50.0), int(c[2])) and ramp):
window.after(10, lambda: canvas.move(sprite, 0, 120))
window.after(250, lambda: canvas.move(sprite, 0, 150))
window.after(450, lambda: canvas.move(sprite, 0, 150))
ramp = False
def checkCollision(g, frame):
global width, dir, hit, score, scoreText
a = canvas.bbox(sprite)
if a[3] in range((g[3]-150), (g[3])) and(a[0] in range
((g[0]), (g[2]-75)) or a[2] in range((g[0]+75), (g[2]))):
if frame == enemy or frame == enemytr:
canvas.move(frame, (width-g[0]), 0)
else:
canvas.move(frame, (-g[0]), 0)
score += 10
txt = "Goats knocked out:" + str(score)
canvas.itemconfigure(scoreText, text=txt)
canvas.move(sprite, 0, -25)
window.after(250, lambda: canvas.move(sprite, 0, 25))
# Enemy collision (Losing condition)
if a[1] in range((g[1]), (g[3])) and(a[0] in range
((g[0]), (g[2]-75)) or a[2] in range((g[0]+75), (g[2]))):
canvas.move(sprite, 0, -250)
window.after(250, lambda: canvas.move(sprite, 0, 250))
hit += 1
canvas.delete(health1)
# Enemy hit player twice (game over).
if hit == 2:
canvas.move(sprite, 0, -height)
canvas.delete(health2)
messagebox.showinfo("Game over",
"Natural selection got you.\nSurvival of the fittest.\n")
gameover()
return
# Enemy movement function (only for normal mode)
# Update: now also controls mating season second goat on right side
def moveenemy():
global width, dir, hit, score, scoreText, paused, dir3
mov1 = 12
mov2 = 8
if paused == 0:
g = canvas.bbox(enemy)
if(diff == "mating"):
g3 = canvas.bbox(enemytr)
if g3[0] < 100:
dir3 = "right"
if g3[2] > (width-100):
dir3 = "left"
if dir3 == "left":
mov2 = -mov2
if g[0] < 100:
dir = "right"
if g[2] > (width-100):
dir = "left"
if dir == "left":
mov1 = -mov1
canvas.move(enemy, mov1, 0)
if(diff == "mating"):
canvas.move(enemytr, mov2, 0)
if(over == False):
if(diff == "mating"):
window.after(7, moveenemy)
else:
window.after(10, moveenemy)
checkCollision(g, enemy)
|
# The goat background picture was taken on the
# 23rd of November 2019 at Snowdon, Wales by the author. | random_line_split |
|
Octagoat.py | move your goat from\
side to side or jump. To get on the ramp jump to the top\
from underneath it. To get off the ramp simply walk off the sides")
messagebox.showinfo("Welcome to Octagoat",
"Press B if someone passes. C stands for cheat and \
D stands for Diaz.\nWHEN PLAYING MATING \
SEASON TRY TO GET ON THE RAMP AS SOON AS YOU SPAWN")
def rightKey(event):
global direction
direction = "right"
move()
def leftKey(event):
global direction
direction = "left"
move()
def upKey(event):
global direction, sprite
direction = "up"
move()
# Movement function for player
def move():
# This function includes movement, and collision with the ramp
global direction, ramp, score, sprite, speed, paused
if paused == 0:
if direction == "left":
canvas.move(sprite, -speed, 0)
elif direction == "right":
canvas.move(sprite, speed, 0)
elif direction == "up":
canvas.move(sprite, 0, -150)
window.after(350, lambda: canvas.move(sprite, 0, 150))
a = canvas.bbox(sprite)
c = canvas.bbox(box)
# Ramp collision
if a[1] in range((c[3]-100), c[3]) and(a[0] in range
(int(c[0]), int(c[2])) or a[2] in range(int(c[0]), int(c[2]))):
canvas.move(sprite, 0, -420)
ramp = True
if a[0] not in range(int(c[0]), int(c[2]-50.0)) and(
a[2] not in range(int(c[0]+50.0), int(c[2])) and ramp):
window.after(10, lambda: canvas.move(sprite, 0, 120))
window.after(250, lambda: canvas.move(sprite, 0, 150))
window.after(450, lambda: canvas.move(sprite, 0, 150))
ramp = False
def checkCollision(g, frame):
global width, dir, hit, score, scoreText
a = canvas.bbox(sprite)
if a[3] in range((g[3]-150), (g[3])) and(a[0] in range
((g[0]), (g[2]-75)) or a[2] in range((g[0]+75), (g[2]))):
if frame == enemy or frame == enemytr:
canvas.move(frame, (width-g[0]), 0)
else:
canvas.move(frame, (-g[0]), 0)
score += 10
txt = "Goats knocked out:" + str(score)
canvas.itemconfigure(scoreText, text=txt)
canvas.move(sprite, 0, -25)
window.after(250, lambda: canvas.move(sprite, 0, 25))
# Enemy collision (Losing condition)
if a[1] in range((g[1]), (g[3])) and(a[0] in range
((g[0]), (g[2]-75)) or a[2] in range((g[0]+75), (g[2]))):
canvas.move(sprite, 0, -250)
window.after(250, lambda: canvas.move(sprite, 0, 250))
hit += 1
canvas.delete(health1)
# Enemy hit player twice (game over).
if hit == 2:
canvas.move(sprite, 0, -height)
canvas.delete(health2)
messagebox.showinfo("Game over",
"Natural selection got you.\nSurvival of the fittest.\n")
gameover()
return
# Enemy movement function (only for normal mode)
# Update: now also controls mating season second goat on right side
def moveenemy():
global width, dir, hit, score, scoreText, paused, dir3
mov1 = 12
mov2 = 8
if paused == 0:
g = canvas.bbox(enemy)
if(diff == "mating"):
g3 = canvas.bbox(enemytr)
if g3[0] < 100:
dir3 = "right"
if g3[2] > (width-100):
dir3 = "left"
if dir3 == "left":
mov2 = -mov2
if g[0] < 100:
dir = "right"
if g[2] > (width-100):
dir = "left"
if dir == "left":
mov1 = -mov1
canvas.move(enemy, mov1, 0)
if(diff == "mating"):
canvas.move(enemytr, mov2, 0)
if(over == False):
if(diff == "mating"):
window.after(7, moveenemy)
else:
window.after(10, moveenemy)
checkCollision(g, enemy)
if(diff == "mating"):
checkCollision(g3, enemytr)
# Second enemy movement function in Hard mode.
# Update: now also controls mating season second goat on left side
def moveenemy2():
global width, dir2, hit, score, scoreText, paused, dir4
mov1 = 12
mov2 = 8
if paused == 0:
g2 = canvas.bbox(enemy2)
if(diff == "mating"):
g4 = canvas.bbox(enemytl)
if g4[0] < 100:
dir4 = "left"
if g4[2] > (width-100):
dir4 = "right"
if dir4 == "right":
mov2 = -mov2
if g2[0] < 100:
dir2 = "left"
if g2[2] > (width-100):
dir2 = "right"
if dir2 == "right":
mov1 = -mov1
canvas.move(enemy2, mov1, 0)
if(diff == "mating"):
canvas.move(enemytl, mov2, 0)
if (over == False):
if(diff == "mating"):
window.after(7, moveenemy2)
else:
window.after(10, moveenemy2)
checkCollision(g2, enemy2)
if(diff == "mating"):
checkCollision(g4, enemytl)
# Normal mode function which is called for hard and Mating season modes.
def normal():
global score, sprite, enemy, health2, health1, box, scoreText
global start, start2, start3, diff, boss, start4, start5, start6
start.destroy()
start2.destroy()
start3.destroy()
start4.destroy()
start5.destroy()
start6.destroy()
diff = "normal"
bg = canvas.create_image(0, 0, anchor='nw', image=mint)
sprite = canvas.create_image((width/2), (height-350),
anchor='nw', image=img)
enemy = canvas.create_image((width-250), (height-350),
anchor='nw', image=img2)
health1 = canvas.create_image(220, 0, anchor='ne', image=snoop)
health2 = canvas.create_image(100, 0, anchor='ne', image=snoop)
box = canvas.create_rectangle((width/2-190), (height-610),
(width/2+120), (height-560), fill="brown")
scoreText = canvas.create_text(width/2, 10, fill="black",
font="terminal 28", text=txt)
if diff == "normal":
boss = canvas.create_image(0, -width, anchor='nw', image=bosspic)
over = False
moveenemy()
return
# Hard Mode adds an extra goat.
def hard():
global enemy2, diff, boss
normal()
diff = "hard"
enemy2 = canvas.create_image(0, (height-350), anchor='nw', image=img)
if diff == "hard":
boss = canvas.create_image(0, -width, anchor='nw', image=bosspic)
over = False
moveenemy2()
return
# Mating season adds 2 extra fast goats.
def mating():
global enemytr, enemytl, diff, boss
normal()
hard()
diff = "mating"
enemytr = canvas.create_image((width-250), (height-350), # tr=top right
anchor='nw', image=img2)
enemytl = canvas.create_image(0, (height-350), anchor='nw', image=img) # tl=top left
boss = canvas.create_image(0, -width, anchor='nw', image=bosspic)
over = False
return
# Pause function
def | pause | identifier_name |
|
main.py | , must use "import math" to start-----
#pi = 3.14
#x, y, z = 1, 2, 3
#print(round(pi)) # rounds the number
#print(math.ceil(pi)) # rounds the number up
#print(math.floor(pi)) # rounds the number down
#print(abs(-pi)) # returns the absolute value of the number
#print(pow(pi, 2)) # takes the first argument to the power of the second argument
#print(math.sqrt(pi)) # returns the square root of the number
#print(max(x,y,z)) # returns the max number from a set of numbers
#print(min(x,y,z)) # returns the min number from a set of numbers
#-----string slicing, creating a substring from a string-----
#name = "Ben Rafalski"
#first_name = name[0:3:1] # indexing[start(inclusive):stop(exclusive):step]
#last_name = name[4:12:1]
#reverse_name = name[::-1] # returns the string in reverse, index [0:end:-1]
#print(first_name + last_name)
#-----If statements-----
#age = int(input("how old are you?: "))
#if age >= 65:
# print("you are a senior")
#elif age >= 18:
# print("you are an adult")
#else:
# print("you are a child")
#-----logical operators (and, or, not)-----
#temp = float(input("what is the tempurature outside?: "))
#if not(temp <= 0 and temp >= 30):
# print("the tempuratue is good today")
#elif temp < 0 or temp > 30:
# print("the tempurature is bad today")
#-----while loops-----
#name = ""
#while len(name) == 0:
# name = input("enter your name: ")
#print("hello " + name)
#-----for loops-----
#for i in range(1,11,1): #executed 10 times 'range(inclusive, exclusive, step)'
# print(i)
#-----nested loops-----
#rows = int(input("how many rows?: "))
#columns = int(input("how many columns?: "))
#symbol = input("enter a symbol to use: ")
#for i in range(rows):
# for j in range(columns):
# print(symbol, end = "") # prevents a newline
# print() # newline
#-----loop control statements, break (terminate loop), continue (skip to next iteration), pass (does nothing)-----
#while True:
# name = input("enter your name: ")
# if name != "":
# break
#phone_number = "123-456-7890"
#for i in phone_number:
# if i == "-":
# continue
# print(i, end = "")
#for i in range(1,21):
# if i == 13:
# pass # does nothing, place holder for the word 'nothing'
# else:
# print(i)
#-----lists, like arrays-----
#food = ["pizza", "cookies", "hamburger", "hot dog"]
#food.append("ice cream") # adds the element to the end of the list
#food.remove("hot dog") # removes the selected element from the list
#food.pop() # removes the last element from the list
#food.insert(0,"cake") # inserts the second argument as an element into the index of the first argument in the list
#food.sort() # sorts the list alphabetically
#food.clear() # clears all items from the list
#-----2D lists, list of lists-----
#drinks = ["coffee", "soda", "tea"]
#dinner = ["pizza", "hamburger", "hotdog"]
#dessert = ["pie", "ice cream", "cake"]
#food = [drinks, dinner, dessert]
#for i in range(3):
# for j in range(3):
# print(food[i][j], end="")
# if(j != 3):
# print(", ", end="")
# print()
#-----tuples, collections which are ordered and unchangeable, useful for related data-----
#student = ("ben", 21, "male")
#student.count("ben") # returns how many times a value appears in a tuple
#student.index("male") # returns the index of the selected element in the tuple
#-----sets, collection that is unordered and unindexed, has no dublicates-----
#utensils = {"fork", "spoon", "knife"}
#dishes = {"bowl", "plate", "cup", "knife"}
#utensils.add("napkin") # adds a specified element to the set
#utensils.remove("fork") # removes a specified element from the set
#utensils.clear() # clears all elements from a set
#utensils.update(dishes) # adds the elements of one set to another
#dinner_table = utensils.union(dishes) # takes the union of 2 sets
#for x in dinner_table:
# print(x)
#utensils.difference(dishes) # what does utensils have that dishes does not?
#-----dictionaries = changeable, unordered collection of key:value pairs-----
#capitols = {'USA':'Washington DC', 'India':'New Dehli', 'China':'Beijing', 'Russia':'Moscow'}
#print(capitols['Russia']) # use the key instead of the index of the element
#print(capitols.get('Germany')) # safer way of accessing the elements
#print(capitols.keys()) # prints all the keys
#print(capitols.values()) # prints only the values
#print(capitols.items()) # prints the keys and the values together
#capitols.update({'Germany':'Berlin'}) # adds/changes an element in the dictionary
#capitols.pop('USA') # removes the key value pair from the dictionary
#capitols.clear() # clears the dictionary
#-----index operator [], used in strings, lists, and tuples-----
#name = "ben rafalski"
#first_name = name[:3].upper() # [start:end]
#last_name = name[4:].upper() # [start:end]
#last_character = name[-1] # access element in reverse
#print(first_name+last_name)
#-----functions-----
#def func(name): #use the def keyword
# print("this is " + name + "'s first function")
#input = input("what is your name?: ")
#func(input)
#-----function that returns something-----
#def multiply(factor, multiplier):
# multiplicand = factor * multiplier
# return multiplicand
#number_1 = int(input("enter a number: "))
#number_2 = int(input("enter another number: "))
#multiplying = multiply(number_1, number_2)
#print(multiplying)
#-----keyword arguments, preceded by an identifier when passing into a func-----
#def hello(first, middle, last):
# print("hello "+first+" "+middle+" "+last)
#hello(last = "rafalski", first = "benjamin", middle = "charles")
#-----*args paramenter, packs all arguments into a tuple-----
def argsAndKwargsParameters():
def add(*args):
sum = 0
for i in args:
|
return sum
print(add(5,5,5,5,5,5,5))
#-----**kwargs, packs all arguments into a dictionary-----
def hello(**kwargs):
print("hello", end = " ")
for key,value in kwargs.items():
print(value, end=" ")
hello(first="ben", middle = "charles", last = "rafalski")
#-----format method, helps display output-----
def formatMethod():
animal = "cow"
item = "moon"
pi = 3.1415
print("the {0:^10} jumped over the {1:^10}".format(animal, item)) # uses placeholders {}
print("the number pi is {:.2f}".format(pi))
#-----random module, use 'import random'-----
def randomModule():
x = random.randint(1,6)
y = random.random()
myList = ["rock", "paper", "scissors"]
z = random.choice(myList)
print(x, y, z)
cards = [1,2,3,4,5,6,7,8,9,"J", "Q", "K", "A"]
random.shuffle(cards)
print(cards)
#-----exception handling-----
def exceptionHandling():
try:
numerator = int(input("enter a number to divide: "))
denominator = int(input("enter a number to divide by: "))
result = numerator / denominator
except ZeroDivisionError:
print("you cannot divide by zero")
except ValueError:
print("enter only numbers please")
except Exception: # catches all exceptions
print("something went wrong")
else:
print(result)
finally: # executes everytime the try/except statement is executed
print("this will always execute")
#-----file detection, use 'import os'-----
def detectFile():
path = "C:\\Users\\benrafalski\\Documents\\CSE365\\Assignment3\\README.txt"
| sum += i | conditional_block |
main.py | range(1,21):
# if i == 13:
# pass # does nothing, place holder for the word 'nothing'
# else:
# print(i)
#-----lists, like arrays-----
#food = ["pizza", "cookies", "hamburger", "hot dog"]
#food.append("ice cream") # adds the element to the end of the list
#food.remove("hot dog") # removes the selected element from the list
#food.pop() # removes the last element from the list
#food.insert(0,"cake") # inserts the second argument as an element into the index of the first argument in the list
#food.sort() # sorts the list alphabetically
#food.clear() # clears all items from the list
#-----2D lists, list of lists-----
#drinks = ["coffee", "soda", "tea"]
#dinner = ["pizza", "hamburger", "hotdog"]
#dessert = ["pie", "ice cream", "cake"]
#food = [drinks, dinner, dessert]
#for i in range(3):
# for j in range(3):
# print(food[i][j], end="")
# if(j != 3):
# print(", ", end="")
# print()
#-----tuples, collections which are ordered and unchangeable, useful for related data-----
#student = ("ben", 21, "male")
#student.count("ben") # returns how many times a value appears in a tuple
#student.index("male") # returns the index of the selected element in the tuple
#-----sets, collection that is unordered and unindexed, has no dublicates-----
#utensils = {"fork", "spoon", "knife"}
#dishes = {"bowl", "plate", "cup", "knife"}
#utensils.add("napkin") # adds a specified element to the set
#utensils.remove("fork") # removes a specified element from the set
#utensils.clear() # clears all elements from a set
#utensils.update(dishes) # adds the elements of one set to another
#dinner_table = utensils.union(dishes) # takes the union of 2 sets
#for x in dinner_table:
# print(x)
#utensils.difference(dishes) # what does utensils have that dishes does not?
#-----dictionaries = changeable, unordered collection of key:value pairs-----
#capitols = {'USA':'Washington DC', 'India':'New Dehli', 'China':'Beijing', 'Russia':'Moscow'}
#print(capitols['Russia']) # use the key instead of the index of the element
#print(capitols.get('Germany')) # safer way of accessing the elements
#print(capitols.keys()) # prints all the keys
#print(capitols.values()) # prints only the values
#print(capitols.items()) # prints the keys and the values together
#capitols.update({'Germany':'Berlin'}) # adds/changes an element in the dictionary
#capitols.pop('USA') # removes the key value pair from the dictionary
#capitols.clear() # clears the dictionary
#-----index operator [], used in strings, lists, and tuples-----
#name = "ben rafalski"
#first_name = name[:3].upper() # [start:end]
#last_name = name[4:].upper() # [start:end]
#last_character = name[-1] # access element in reverse
#print(first_name+last_name)
#-----functions-----
#def func(name): #use the def keyword
# print("this is " + name + "'s first function")
#input = input("what is your name?: ")
#func(input)
#-----function that returns something-----
#def multiply(factor, multiplier):
# multiplicand = factor * multiplier
# return multiplicand
#number_1 = int(input("enter a number: "))
#number_2 = int(input("enter another number: "))
#multiplying = multiply(number_1, number_2)
#print(multiplying)
#-----keyword arguments, preceded by an identifier when passing into a func-----
#def hello(first, middle, last):
# print("hello "+first+" "+middle+" "+last)
#hello(last = "rafalski", first = "benjamin", middle = "charles")
#-----*args paramenter, packs all arguments into a tuple-----
def argsAndKwargsParameters():
def add(*args):
sum = 0
for i in args:
sum += i
return sum
print(add(5,5,5,5,5,5,5))
#-----**kwargs, packs all arguments into a dictionary-----
def hello(**kwargs):
print("hello", end = " ")
for key,value in kwargs.items():
print(value, end=" ")
hello(first="ben", middle = "charles", last = "rafalski")
#-----format method, helps display output-----
def formatMethod():
animal = "cow"
item = "moon"
pi = 3.1415
print("the {0:^10} jumped over the {1:^10}".format(animal, item)) # uses placeholders {}
print("the number pi is {:.2f}".format(pi))
#-----random module, use 'import random'-----
def randomModule():
x = random.randint(1,6)
y = random.random()
myList = ["rock", "paper", "scissors"]
z = random.choice(myList)
print(x, y, z)
cards = [1,2,3,4,5,6,7,8,9,"J", "Q", "K", "A"]
random.shuffle(cards)
print(cards)
#-----exception handling-----
def exceptionHandling():
try:
numerator = int(input("enter a number to divide: "))
denominator = int(input("enter a number to divide by: "))
result = numerator / denominator
except ZeroDivisionError:
print("you cannot divide by zero")
except ValueError:
print("enter only numbers please")
except Exception: # catches all exceptions
print("something went wrong")
else:
print(result)
finally: # executes everytime the try/except statement is executed
print("this will always execute")
#-----file detection, use 'import os'-----
def detectFile():
path = "C:\\Users\\benrafalski\\Documents\\CSE365\\Assignment3\\README.txt"
if os.path.exists(path):
print("that location exists")
if os.path.isfile(path):
print("that is a file")
elif os.path.isdir(path):
print("that is a folder")
else:
print("that location does not exist")
#-----reading a file-----
def readFile():
try:
with open('test.txt') as file:
print(file.read()) # closes file automatically after opening
except FileNotFoundError:
print("that file was not found")
#-----writing a file-----
def writeFile():
text = "this is my written text\nThis is a newline"
with open('test.txt', 'w') as file: # open in write mode
file.write(text) # ovewrites previous file, use 'a' instead of 'w' for appending instead
#-----copying a file, use 'import shutil'-----
def copyFile():
shutil.copyfile('test.txt', 'copy.txt') # src,dest
#-----moving a file, use 'import os'
def moveFile():
source = "test.txt"
destination = "C:\\Users\\benrafalski\\Downloads\\test.txt"
try:
if os.path.exists(destination):
print("this file is already there")
else:
os.replace(source, destination)
print(source+ " was moved")
except FileNotFoundError:
print(source + " was not found")
#-----deleting a file, use 'import os'-----
def deleteFile():
try:
os.remove("copy.txt")
except FileNotFoundError:
print("that file was not found")
#-----classes-----
def createClass():
class Car:
# class variable
wheels = 4
# self is the same as 'this' keyword, __init__ is the constructor
def __init__(self, make, model, year, color):
# instance variables
self.make = make
self.model = model
self.year = year
self.color = color
def drive(self):
print("this car is driving")
def stop(self):
print("this car is stopped")
new_car = Car("toyota", "tacoma", 2002, "silver")
print(new_car.make, new_car.model, new_car.year, new_car.color, new_car.wheels)
new_car.drive()
new_car.stop()
#-----Inheritance-----
def inheritance():
# parent class
class Organism:
alive = True
# child class of organism
class Animal(Organism):
def eat(self):
print("this animal is eating")
def sleep(self):
print("this animal is sleeping")
# child classes of animal
class Rabbit(Animal):
def hop(self):
print("this rabbit is hopping")
class Fish(Animal):
def swim(self):
print("this fish is swimming")
class Hawk(Animal):
def | fly | identifier_name |
|
main.py |
#-----variables-----
def variables():
name = "Ben Rafalski" # can use single or double quotes for a string in python
age = 20
height = 250.5
alive = True # booleans start with a capital letter
print("hello " + name + " who is: " + str(age) + " years old and " + str(250.5) + "cm tall, is ben alive? " + str(alive))
#-----multiple assignment, assign multiple variables in one line-----
#name, age, alive = "ben", 20, True
#print("name: " + name + ", age: " + str(age) + ", isAlive?: " + str(alive))
#-----string methods-----
#name = "ben"
#print(len(name)) # string length
#print(name.find("b")) # returns the index of the character selected
#print(name.capitalize()) # capitalizes the first letter
#print(name.upper()) #turns string all uppercase
#print(name.lower()) # turns string all lowercase
#print(name.isdigit()) # returns true if it is a number
#print(name.isalpha()) # returns true if the letters in the string are alphabetical
#print(name.count("e")) # returns the count of selected characters
#print(name.replace("e", "a")) # replaces first argument with the second one
#print(name*3) # prints a string multiple times
#-----Type Casting-----
#x, y, z = 1, 2.2, "3" # int, float, string
#print(int(y)) # drops the decimal
#print(float(z)) # adds a .0 to the end
#print(str(x)*3)
#-----user input-----
#name = input("what is your name?: ") # always returns a string, must cast if you need to do math operations
#age = float(input("what is your age?: "))
#age += 1
#print("your name is: " + name + ", next year you will be: " + str(age))
#-----math functions, must use "import math" to start-----
#pi = 3.14
#x, y, z = 1, 2, 3
#print(round(pi)) # rounds the number
#print(math.ceil(pi)) # rounds the number up
#print(math.floor(pi)) # rounds the number down
#print(abs(-pi)) # returns the absolute value of the number
#print(pow(pi, 2)) # takes the first argument to the power of the second argument
#print(math.sqrt(pi)) # returns the square root of the number
#print(max(x,y,z)) # returns the max number from a set of numbers
#print(min(x,y,z)) # returns the min number from a set of numbers
#-----string slicing, creating a substring from a string-----
#name = "Ben Rafalski"
#first_name = name[0:3:1] # indexing[start(inclusive):stop(exclusive):step]
#last_name = name[4:12:1]
#reverse_name = name[::-1] # returns the string in reverse, index [0:end:-1]
#print(first_name + last_name)
#-----If statements-----
#age = int(input("how old are you?: "))
#if age >= 65:
# print("you are a senior")
#elif age >= 18:
# print("you are an adult")
#else:
# print("you are a child")
#-----logical operators (and, or, not)-----
#temp = float(input("what is the tempurature outside?: "))
#if not(temp <= 0 and temp >= 30):
# print("the tempuratue is good today")
#elif temp < 0 or temp > 30:
# print("the tempurature is bad today")
#-----while loops-----
#name = ""
#while len(name) == 0:
# name = input("enter your name: ")
#print("hello " + name)
#-----for loops-----
#for i in range(1,11,1): #executed 10 times 'range(inclusive, exclusive, step)'
# print(i)
#-----nested loops-----
#rows = int(input("how many rows?: "))
#columns = int(input("how many columns?: "))
#symbol = input("enter a symbol to use: ")
#for i in range(rows):
# for j in range(columns):
# print(symbol, end = "") # prevents a newline
# print() # newline
#-----loop control statements, break (terminate loop), continue (skip to next iteration), pass (does nothing)-----
#while True:
# name = input("enter your name: ")
# if name != "":
# break
#phone_number = "123-456-7890"
#for i in phone_number:
# if i == "-":
# continue
# print(i, end = "")
#for i in range(1,21):
# if i == 13:
# pass # does nothing, place holder for the word 'nothing'
# else:
# print(i)
#-----lists, like arrays-----
#food = ["pizza", "cookies", "hamburger", "hot dog"]
#food.append("ice cream") # adds the element to the end of the list
#food.remove("hot dog") # removes the selected element from the list
#food.pop() # removes the last element from the list
#food.insert(0,"cake") # inserts the second argument as an element into the index of the first argument in the list
#food.sort() # sorts the list alphabetically
#food.clear() # clears all items from the list
#-----2D lists, list of lists-----
#drinks = ["coffee", "soda", "tea"]
#dinner = ["pizza", "hamburger", "hotdog"]
#dessert = ["pie", "ice cream", "cake"]
#food = [drinks, dinner, dessert]
#for i in range(3):
# for j in range(3):
# print(food[i][j], end="")
# if(j != 3):
# print(", ", end="")
# print()
#-----tuples, collections which are ordered and unchangeable, useful for related data-----
#student = ("ben", 21, "male")
#student.count("ben") # returns how many times a value appears in a tuple
#student.index("male") # returns the index of the selected element in the tuple
#-----sets, collection that is unordered and unindexed, has no dublicates-----
#utensils = {"fork", "spoon", "knife"}
#dishes = {"bowl", "plate", "cup", "knife"}
#utensils.add("napkin") # adds a specified element to the set
#utensils.remove("fork") # removes a specified element from the set
#utensils.clear() # clears all elements from a set
#utensils.update(dishes) # adds the elements of one set to another
#dinner_table = utensils.union(dishes) # takes the union of 2 sets
#for x in dinner_table:
# print(x)
#utensils.difference(dishes) # what does utensils have that dishes does not?
#-----dictionaries = changeable, unordered collection of key:value pairs-----
#capitols = {'USA':'Washington DC', 'India':'New Dehli', 'China':'Beijing', 'Russia':'Moscow'}
#print(capitols['Russia']) # use the key instead of the index of the element
#print(capitols.get('Germany')) # safer way of accessing the elements
#print(capitols.keys()) # prints all the keys
#print(capitols.values()) # prints only the values
#print(capitols.items()) # prints the keys and the values together
#capitols.update({'Germany':'Berlin'}) # adds/changes an element in the dictionary
#capitols.pop('USA') # removes the key value pair from the dictionary
#capitols.clear() # clears the dictionary
#-----index operator [], used in strings, lists, and tuples-----
#name = "ben rafalski"
#first_name = name[:3].upper() # [start:end]
#last_name = name[4:].upper() # [start:end]
#last_character = name[-1] # access element in reverse
#print(first_name+last_name)
#-----functions-----
#def func(name): #use the def keyword
# print("this is " + name + "'s first function")
#input = input("what is your name?: ")
#func(input)
#-----function that returns something-----
#def multiply(factor, multiplier):
# multiplicand = factor * multiplier
# return multiplicand
#number_1 = int(input("enter a number: "))
#number_2 = int(input("enter another number: "))
#multiplying = multiply(number_1, number_2)
#print(multiplying)
#-----keyword arguments, preceded by an identifier when passing into a func-----
#def hello(first, middle, last):
# print("hello "+first+" "+middle+" "+last)
#hello(last = "rafalski", first = "benjamin", middle = "charles")
#-----*args paramenter, packs all arguments into a tuple-----
def argsAndKwargsParameters():
def | print("hello world") # how to print
| identifier_body |
|
main.py |
#print(min(x,y,z)) # returns the min number from a set of numbers
#-----string slicing, creating a substring from a string-----
#name = "Ben Rafalski"
#first_name = name[0:3:1] # indexing[start(inclusive):stop(exclusive):step]
#last_name = name[4:12:1]
#reverse_name = name[::-1] # returns the string in reverse, index [0:end:-1]
#print(first_name + last_name)
#-----If statements-----
#age = int(input("how old are you?: "))
#if age >= 65:
# print("you are a senior")
#elif age >= 18:
# print("you are an adult")
#else:
# print("you are a child")
#-----logical operators (and, or, not)-----
#temp = float(input("what is the tempurature outside?: "))
#if not(temp <= 0 and temp >= 30):
# print("the tempuratue is good today")
#elif temp < 0 or temp > 30:
# print("the tempurature is bad today")
#-----while loops-----
#name = ""
#while len(name) == 0:
# name = input("enter your name: ")
#print("hello " + name)
#-----for loops-----
#for i in range(1,11,1): #executed 10 times 'range(inclusive, exclusive, step)'
# print(i)
#-----nested loops-----
#rows = int(input("how many rows?: "))
#columns = int(input("how many columns?: "))
#symbol = input("enter a symbol to use: ")
#for i in range(rows):
# for j in range(columns):
# print(symbol, end = "") # prevents a newline
# print() # newline
#-----loop control statements, break (terminate loop), continue (skip to next iteration), pass (does nothing)-----
#while True:
# name = input("enter your name: ")
# if name != "":
# break
#phone_number = "123-456-7890"
#for i in phone_number:
# if i == "-":
# continue
# print(i, end = "")
#for i in range(1,21):
# if i == 13:
# pass # does nothing, place holder for the word 'nothing'
# else:
# print(i)
#-----lists, like arrays-----
#food = ["pizza", "cookies", "hamburger", "hot dog"]
#food.append("ice cream") # adds the element to the end of the list
#food.remove("hot dog") # removes the selected element from the list
#food.pop() # removes the last element from the list
#food.insert(0,"cake") # inserts the second argument as an element into the index of the first argument in the list
#food.sort() # sorts the list alphabetically
#food.clear() # clears all items from the list
#-----2D lists, list of lists-----
#drinks = ["coffee", "soda", "tea"]
#dinner = ["pizza", "hamburger", "hotdog"]
#dessert = ["pie", "ice cream", "cake"]
#food = [drinks, dinner, dessert]
#for i in range(3):
# for j in range(3):
# print(food[i][j], end="")
# if(j != 3):
# print(", ", end="")
# print()
#-----tuples, collections which are ordered and unchangeable, useful for related data-----
#student = ("ben", 21, "male")
#student.count("ben") # returns how many times a value appears in a tuple
#student.index("male") # returns the index of the selected element in the tuple
#-----sets, collection that is unordered and unindexed, has no dublicates-----
#utensils = {"fork", "spoon", "knife"}
#dishes = {"bowl", "plate", "cup", "knife"}
#utensils.add("napkin") # adds a specified element to the set
#utensils.remove("fork") # removes a specified element from the set
#utensils.clear() # clears all elements from a set
#utensils.update(dishes) # adds the elements of one set to another
#dinner_table = utensils.union(dishes) # takes the union of 2 sets
#for x in dinner_table:
# print(x)
#utensils.difference(dishes) # what does utensils have that dishes does not?
#-----dictionaries = changeable, unordered collection of key:value pairs-----
#capitols = {'USA':'Washington DC', 'India':'New Dehli', 'China':'Beijing', 'Russia':'Moscow'}
#print(capitols['Russia']) # use the key instead of the index of the element
#print(capitols.get('Germany')) # safer way of accessing the elements
#print(capitols.keys()) # prints all the keys
#print(capitols.values()) # prints only the values
#print(capitols.items()) # prints the keys and the values together
#capitols.update({'Germany':'Berlin'}) # adds/changes an element in the dictionary
#capitols.pop('USA') # removes the key value pair from the dictionary
#capitols.clear() # clears the dictionary
#-----index operator [], used in strings, lists, and tuples-----
#name = "ben rafalski"
#first_name = name[:3].upper() # [start:end]
#last_name = name[4:].upper() # [start:end]
#last_character = name[-1] # access element in reverse
#print(first_name+last_name)
#-----functions-----
#def func(name): #use the def keyword
# print("this is " + name + "'s first function")
#input = input("what is your name?: ")
#func(input)
#-----function that returns something-----
#def multiply(factor, multiplier):
# multiplicand = factor * multiplier
# return multiplicand
#number_1 = int(input("enter a number: "))
#number_2 = int(input("enter another number: "))
#multiplying = multiply(number_1, number_2)
#print(multiplying)
#-----keyword arguments, preceded by an identifier when passing into a func-----
#def hello(first, middle, last):
# print("hello "+first+" "+middle+" "+last)
#hello(last = "rafalski", first = "benjamin", middle = "charles")
#-----*args paramenter, packs all arguments into a tuple-----
def argsAndKwargsParameters():
def add(*args):
sum = 0
for i in args:
sum += i
return sum
print(add(5,5,5,5,5,5,5))
#-----**kwargs, packs all arguments into a dictionary-----
def hello(**kwargs):
print("hello", end = " ")
for key,value in kwargs.items():
print(value, end=" ")
hello(first="ben", middle = "charles", last = "rafalski")
#-----format method, helps display output-----
def formatMethod():
animal = "cow"
item = "moon"
pi = 3.1415
print("the {0:^10} jumped over the {1:^10}".format(animal, item)) # uses placeholders {}
print("the number pi is {:.2f}".format(pi))
#-----random module, use 'import random'-----
def randomModule():
x = random.randint(1,6)
y = random.random()
myList = ["rock", "paper", "scissors"]
z = random.choice(myList)
print(x, y, z)
cards = [1,2,3,4,5,6,7,8,9,"J", "Q", "K", "A"]
random.shuffle(cards)
print(cards)
#-----exception handling-----
def exceptionHandling():
try:
numerator = int(input("enter a number to divide: "))
denominator = int(input("enter a number to divide by: "))
result = numerator / denominator
except ZeroDivisionError:
print("you cannot divide by zero")
except ValueError:
print("enter only numbers please")
except Exception: # catches all exceptions
print("something went wrong")
else:
print(result)
finally: # executes everytime the try/except statement is executed
print("this will always execute")
#-----file detection, use 'import os'-----
def detectFile():
path = "C:\\Users\\benrafalski\\Documents\\CSE365\\Assignment3\\README.txt"
if os.path.exists(path):
print("that location exists")
if os.path.isfile(path):
print("that is a file")
elif os.path.isdir(path):
print("that is a folder")
else:
print("that location does not exist")
#-----reading a file-----
def readFile():
try:
with open('test.txt') as file:
print(file.read()) # closes file automatically after opening
except FileNotFoundError:
print("that file was not found")
#-----writing a file-----
def writeFile():
| text = "this is my written text\nThis is a newline"
| random_line_split |
|
client.go | vr, name)
}
func (t *FakeObjectTracker) watchReactionFunc(action k8stesting.Action) (bool, watch.Interface, error) {
if t.FakeWatcher == nil {
return false, nil, errors.New("cannot watch on a tracker with no watch support")
}
switch a := action.(type) {
case k8stesting.WatchAction:
w := &watcher{
FakeWatcher: watch.NewFake(),
action: a,
}
go func() {
err := w.dispatchInitialObjects(a, t)
if err != nil {
klog.Errorf("error dispatching initial objects, Err: %v", err)
}
}()
t.trackerMutex.Lock()
defer t.trackerMutex.Unlock()
t.watchers = append(t.watchers, w)
return true, w, nil
default:
return false, nil, fmt.Errorf("expected WatchAction but got %v", action)
}
}
// Start begins tracking of an FakeObjectTracker
func (t *FakeObjectTracker) Start() error {
if t.FakeWatcher == nil {
return errors.New("tracker has no watch support")
}
for event := range t.ResultChan() {
event := event.DeepCopy() // passing a deep copy to avoid race.
t.dispatch(event)
}
return nil
}
func (t *FakeObjectTracker) dispatch(event *watch.Event) {
for _, w := range t.watchers {
go w.dispatch(event)
}
}
// Stop terminates tracking of an FakeObjectTracker
func (t *FakeObjectTracker) Stop() {
if t.FakeWatcher == nil {
panic(errors.New("tracker has no watch support"))
}
t.trackerMutex.Lock()
defer t.trackerMutex.Unlock()
t.FakeWatcher.Stop()
for _, w := range t.watchers {
w.Stop()
}
}
type watcher struct {
*watch.FakeWatcher
action k8stesting.WatchAction
updateMutex sync.Mutex
}
func (w *watcher) Stop() {
w.updateMutex.Lock()
defer w.updateMutex.Unlock()
w.FakeWatcher.Stop()
}
func (w *watcher) handles(event *watch.Event) bool {
if w.IsStopped() {
return false
}
t, err := meta.TypeAccessor(event.Object)
if err != nil {
return false
}
gvr, _ := meta.UnsafeGuessKindToResource(schema.FromAPIVersionAndKind(t.GetAPIVersion(), t.GetKind()))
if !(&k8stesting.SimpleWatchReactor{Resource: gvr.Resource}).Handles(w.action) {
return false
}
o, err := meta.Accessor(event.Object)
if err != nil {
return false
}
info := w.action.GetWatchRestrictions()
rv, fs, ls := info.ResourceVersion, info.Fields, info.Labels
if rv != "" && o.GetResourceVersion() != rv {
return false
}
if fs != nil && !fs.Matches(fields.Set{
"metadata.name": o.GetName(),
"metadata.namespace": o.GetNamespace(),
}) {
return false
}
if ls != nil && !ls.Matches(labels.Set(o.GetLabels())) {
return false
}
return true
}
func (w *watcher) dispatch(event *watch.Event) {
w.updateMutex.Lock()
defer w.updateMutex.Unlock()
if !w.handles(event) {
return
}
w.Action(event.Type, event.Object)
}
func (w *watcher) dispatchInitialObjects(action k8stesting.WatchAction, t k8stesting.ObjectTracker) error {
listObj, err := t.List(action.GetResource(), action.GetResource().GroupVersion().WithKind(action.GetResource().Resource), action.GetNamespace())
if err != nil {
return err
}
itemsPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
}
items := itemsPtr.([]runtime.Object)
for _, o := range items {
w.dispatch(&watch.Event{
Type: watch.Added,
Object: o,
})
}
return nil
}
// ResourceActions contains of Kubernetes/Machine resources whose response can be faked
type ResourceActions struct {
Node Actions
Machine Actions
MachineSet Actions
MachineDeployment Actions
}
// Actions contains the actions whose response can be faked
type Actions struct {
Get FakeResponse
Update FakeResponse
}
// FakeResponse is the custom error response configuration that are used for responding to client calls
type FakeResponse struct {
counter int
errorMsg string
responseDelay time.Duration
}
// fakingOptions are options that can be set while trying to fake object tracker returns
type fakingOptions struct {
// Fail at different resource action
failAt *ResourceActions
// Fail every action
failAll *FakeResponse
}
// CreateFakeResponse creates a fake response for an action
func CreateFakeResponse(counter int, errorMsg string, responseDelay time.Duration) FakeResponse {
return FakeResponse{
counter: counter,
errorMsg: errorMsg,
responseDelay: responseDelay,
}
}
// DecrementCounter reduces the counter for the particular action response by 1
func (o *FakeResponse) DecrementCounter() {
o.counter--
}
// RunFakeInvocations runs any custom fake configurations/methods before invoking standard ObjectTrackers
func (o *FakeResponse) RunFakeInvocations() error {
if !o.IsFakingEnabled() {
return nil
}
// decrement the counter
o.DecrementCounter()
// Delay while returning call
if o.responseDelay != 0 {
time.Sleep(o.responseDelay)
}
// If error message has been set
if o.errorMsg != "" {
return errors.New(o.errorMsg)
}
return nil
}
// IsFakingEnabled will return true if counter is positive for the fake response
func (o *FakeResponse) IsFakingEnabled() bool {
return o.counter > 0
}
// SetFailAtFakeResourceActions sets up the errorMessage to be returned on specific calls
func (o *fakingOptions) SetFailAtFakeResourceActions(resourceActions *ResourceActions) {
o.failAt = resourceActions
}
// SetFailAllFakeResponse sets the error message for all calls from the client
func (o *fakingOptions) SetFailAllFakeResponse(response *FakeResponse) {
o.failAll = response
}
// NewMachineClientSet returns a clientset that will respond with the provided objects.
// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
// without applying any validations and/or defaults. It shouldn't be considered a replacement
// for a real clientset and is mostly useful in simple unit tests.
func NewMachineClientSet(objects ...runtime.Object) (*fakeuntyped.Clientset, *FakeObjectTracker) {
var scheme = runtime.NewScheme()
var codecs = serializer.NewCodecFactory(scheme)
metav1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
_ = fakeuntyped.AddToScheme(scheme)
o := &FakeObjectTracker{
FakeWatcher: watch.NewFake(),
delegatee: k8stesting.NewObjectTracker(scheme, codecs.UniversalDecoder()),
}
for _, obj := range objects {
if err := o.Add(obj); err != nil {
panic(err)
}
}
cs := &fakeuntyped.Clientset{}
cs.Fake.AddReactor("*", "*", k8stesting.ObjectReaction(o))
cs.Fake.AddWatchReactor("*", o.watchReactionFunc)
return cs, o
}
// FakeObjectTrackers is a struct containing all the controller fake object trackers
type FakeObjectTrackers struct {
ControlMachine, TargetCore *FakeObjectTracker
}
// NewFakeObjectTrackers initializes fakeObjectTrackers initializes the fake object trackers
func NewFakeObjectTrackers(controlMachine, targetCore *FakeObjectTracker) *FakeObjectTrackers {
fakeObjectTrackers := &FakeObjectTrackers{
ControlMachine: controlMachine,
TargetCore: targetCore,
}
return fakeObjectTrackers
}
// Start starts all object trackers as go routines
func (o *FakeObjectTrackers) Start() {
go func() {
err := o.ControlMachine.Start()
if err != nil {
klog.Errorf("failed to start machine object tracker, Err: %v", err)
}
}()
go func() {
err := o.TargetCore.Start()
if err != nil {
klog.Errorf("failed to start target core object tracker, Err: %v", err)
}
}()
}
// Stop stops all object trackers
func (o *FakeObjectTrackers) Stop() {
o.ControlMachine.Stop()
o.TargetCore.Stop()
}
// NewCoreClientSet returns a clientset that will respond with the provided objects.
// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
// without applying any validations and/or defaults. It shouldn't be considered a replacement
// for a real clientset and is mostly useful in simple unit tests.
func NewCoreClientSet(objects ...runtime.Object) (*Clientset, *FakeObjectTracker) {
var scheme = runtime.NewScheme()
var codecs = serializer.NewCodecFactory(scheme)
metav1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
_ = k8sfake.AddToScheme(scheme)
o := &FakeObjectTracker{
FakeWatcher: watch.NewFake(),
delegatee: k8stesting.NewObjectTracker(scheme, codecs.UniversalDecoder()), | } | random_line_split |
|
client.go | }
}
return t.delegatee.Get(gvr, ns, name)
}
// Create receives a create event with the object. Not needed for CA.
func (t *FakeObjectTracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error {
return nil
}
// Update receives an update event with the object
func (t *FakeObjectTracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error {
var err error
if t.fakingOptions.failAll != nil {
err = t.fakingOptions.failAll.RunFakeInvocations()
if err != nil {
return err
}
}
if t.fakingOptions.failAt != nil {
if gvr.Resource == "nodes" {
err = t.fakingOptions.failAt.Node.Update.RunFakeInvocations()
} else if gvr.Resource == "machines" {
err = t.fakingOptions.failAt.Machine.Update.RunFakeInvocations()
} else if gvr.Resource == "machinedeployments" {
err = t.fakingOptions.failAt.MachineDeployment.Update.RunFakeInvocations()
}
if err != nil {
return err
}
}
err = t.delegatee.Update(gvr, obj, ns)
if err != nil {
return err
}
if t.FakeWatcher == nil {
return errors.New("error sending event on a tracker with no watch support")
}
if t.IsStopped() {
return errors.New("error sending event on a stopped tracker")
}
t.FakeWatcher.Modify(obj)
return nil
}
// List receives a list event with the object
func (t *FakeObjectTracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) {
if t.fakingOptions.failAll != nil {
err := t.fakingOptions.failAll.RunFakeInvocations()
if err != nil {
return nil, err
}
}
return t.delegatee.List(gvr, gvk, ns)
}
// Delete receives an delete event with the object. Not needed for CA.
func (t *FakeObjectTracker) Delete(gvr schema.GroupVersionResource, ns, name string) error {
return nil
}
// Watch receives a watch event with the object
func (t *FakeObjectTracker) Watch(gvr schema.GroupVersionResource, name string) (watch.Interface, error) {
if t.fakingOptions.failAll != nil {
err := t.fakingOptions.failAll.RunFakeInvocations()
if err != nil {
return nil, err
}
}
return t.delegatee.Watch(gvr, name)
}
func (t *FakeObjectTracker) watchReactionFunc(action k8stesting.Action) (bool, watch.Interface, error) {
if t.FakeWatcher == nil {
return false, nil, errors.New("cannot watch on a tracker with no watch support")
}
switch a := action.(type) {
case k8stesting.WatchAction:
w := &watcher{
FakeWatcher: watch.NewFake(),
action: a,
}
go func() {
err := w.dispatchInitialObjects(a, t)
if err != nil {
klog.Errorf("error dispatching initial objects, Err: %v", err)
}
}()
t.trackerMutex.Lock()
defer t.trackerMutex.Unlock()
t.watchers = append(t.watchers, w)
return true, w, nil
default:
return false, nil, fmt.Errorf("expected WatchAction but got %v", action)
}
}
// Start begins tracking of an FakeObjectTracker
func (t *FakeObjectTracker) Start() error {
if t.FakeWatcher == nil {
return errors.New("tracker has no watch support")
}
for event := range t.ResultChan() {
event := event.DeepCopy() // passing a deep copy to avoid race.
t.dispatch(event)
}
return nil
}
func (t *FakeObjectTracker) dispatch(event *watch.Event) {
for _, w := range t.watchers {
go w.dispatch(event)
}
}
// Stop terminates tracking of an FakeObjectTracker
func (t *FakeObjectTracker) Stop() {
if t.FakeWatcher == nil {
panic(errors.New("tracker has no watch support"))
}
t.trackerMutex.Lock()
defer t.trackerMutex.Unlock()
t.FakeWatcher.Stop()
for _, w := range t.watchers {
w.Stop()
}
}
type watcher struct {
*watch.FakeWatcher
action k8stesting.WatchAction
updateMutex sync.Mutex
}
func (w *watcher) Stop() {
w.updateMutex.Lock()
defer w.updateMutex.Unlock()
w.FakeWatcher.Stop()
}
func (w *watcher) handles(event *watch.Event) bool {
if w.IsStopped() {
return false
}
t, err := meta.TypeAccessor(event.Object)
if err != nil {
return false
}
gvr, _ := meta.UnsafeGuessKindToResource(schema.FromAPIVersionAndKind(t.GetAPIVersion(), t.GetKind()))
if !(&k8stesting.SimpleWatchReactor{Resource: gvr.Resource}).Handles(w.action) {
return false
}
o, err := meta.Accessor(event.Object)
if err != nil {
return false
}
info := w.action.GetWatchRestrictions()
rv, fs, ls := info.ResourceVersion, info.Fields, info.Labels
if rv != "" && o.GetResourceVersion() != rv {
return false
}
if fs != nil && !fs.Matches(fields.Set{
"metadata.name": o.GetName(),
"metadata.namespace": o.GetNamespace(),
}) {
return false
}
if ls != nil && !ls.Matches(labels.Set(o.GetLabels())) {
return false
}
return true
}
func (w *watcher) dispatch(event *watch.Event) {
w.updateMutex.Lock()
defer w.updateMutex.Unlock()
if !w.handles(event) {
return
}
w.Action(event.Type, event.Object)
}
func (w *watcher) dispatchInitialObjects(action k8stesting.WatchAction, t k8stesting.ObjectTracker) error {
listObj, err := t.List(action.GetResource(), action.GetResource().GroupVersion().WithKind(action.GetResource().Resource), action.GetNamespace())
if err != nil {
return err
}
itemsPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
}
items := itemsPtr.([]runtime.Object)
for _, o := range items {
w.dispatch(&watch.Event{
Type: watch.Added,
Object: o,
})
}
return nil
}
// ResourceActions contains of Kubernetes/Machine resources whose response can be faked
type ResourceActions struct {
Node Actions
Machine Actions
MachineSet Actions
MachineDeployment Actions
}
// Actions contains the actions whose response can be faked
type Actions struct {
Get FakeResponse
Update FakeResponse
}
// FakeResponse is the custom error response configuration that are used for responding to client calls
type FakeResponse struct {
counter int
errorMsg string
responseDelay time.Duration
}
// fakingOptions are options that can be set while trying to fake object tracker returns
type fakingOptions struct {
// Fail at different resource action
failAt *ResourceActions
// Fail every action
failAll *FakeResponse
}
// CreateFakeResponse creates a fake response for an action
func CreateFakeResponse(counter int, errorMsg string, responseDelay time.Duration) FakeResponse {
return FakeResponse{
counter: counter,
errorMsg: errorMsg,
responseDelay: responseDelay,
}
}
// DecrementCounter reduces the counter for the particular action response by 1
func (o *FakeResponse) DecrementCounter() {
o.counter--
}
// RunFakeInvocations runs any custom fake configurations/methods before invoking standard ObjectTrackers
func (o *FakeResponse) RunFakeInvocations() error {
if !o.IsFakingEnabled() {
return nil
}
// decrement the counter
o.DecrementCounter()
// Delay while returning call
if o.responseDelay != 0 {
time.Sleep(o.responseDelay)
}
// If error message has been set
if o.errorMsg != "" {
return errors.New(o.errorMsg)
}
return nil
}
// IsFakingEnabled will return true if counter is positive for the fake response
func (o *FakeResponse) IsFakingEnabled() bool {
return o.counter > 0
}
// SetFailAtFakeResourceActions sets up the errorMessage to be returned | {
var err error
if t.fakingOptions.failAll != nil {
err = t.fakingOptions.failAll.RunFakeInvocations()
if err != nil {
return nil, err
}
}
if t.fakingOptions.failAt != nil {
if gvr.Resource == "nodes" {
err = t.fakingOptions.failAt.Node.Get.RunFakeInvocations()
} else if gvr.Resource == "machines" {
err = t.fakingOptions.failAt.Machine.Get.RunFakeInvocations()
} else if gvr.Resource == "machinesets" {
err = t.fakingOptions.failAt.MachineSet.Get.RunFakeInvocations()
} else if gvr.Resource == "machinedeployments" {
err = t.fakingOptions.failAt.MachineDeployment.Get.RunFakeInvocations()
}
if err != nil {
return nil, err | identifier_body |
|
client.go | if t.fakingOptions.failAt != nil {
if gvr.Resource == "nodes" {
err = t.fakingOptions.failAt.Node.Get.RunFakeInvocations()
} else if gvr.Resource == "machines" {
err = t.fakingOptions.failAt.Machine.Get.RunFakeInvocations()
} else if gvr.Resource == "machinesets" {
err = t.fakingOptions.failAt.MachineSet.Get.RunFakeInvocations()
} else if gvr.Resource == "machinedeployments" {
err = t.fakingOptions.failAt.MachineDeployment.Get.RunFakeInvocations()
}
if err != nil {
return nil, err
}
}
return t.delegatee.Get(gvr, ns, name)
}
// Create receives a create event with the object. Not needed for CA.
func (t *FakeObjectTracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error {
return nil
}
// Update receives an update event with the object
func (t *FakeObjectTracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error {
var err error
if t.fakingOptions.failAll != nil {
err = t.fakingOptions.failAll.RunFakeInvocations()
if err != nil {
return err
}
}
if t.fakingOptions.failAt != nil {
if gvr.Resource == "nodes" {
err = t.fakingOptions.failAt.Node.Update.RunFakeInvocations()
} else if gvr.Resource == "machines" {
err = t.fakingOptions.failAt.Machine.Update.RunFakeInvocations()
} else if gvr.Resource == "machinedeployments" {
err = t.fakingOptions.failAt.MachineDeployment.Update.RunFakeInvocations()
}
if err != nil {
return err
}
}
err = t.delegatee.Update(gvr, obj, ns)
if err != nil {
return err
}
if t.FakeWatcher == nil {
return errors.New("error sending event on a tracker with no watch support")
}
if t.IsStopped() {
return errors.New("error sending event on a stopped tracker")
}
t.FakeWatcher.Modify(obj)
return nil
}
// List receives a list event with the object
func (t *FakeObjectTracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) {
if t.fakingOptions.failAll != nil {
err := t.fakingOptions.failAll.RunFakeInvocations()
if err != nil {
return nil, err
}
}
return t.delegatee.List(gvr, gvk, ns)
}
// Delete receives an delete event with the object. Not needed for CA.
func (t *FakeObjectTracker) Delete(gvr schema.GroupVersionResource, ns, name string) error {
return nil
}
// Watch receives a watch event with the object
func (t *FakeObjectTracker) Watch(gvr schema.GroupVersionResource, name string) (watch.Interface, error) {
if t.fakingOptions.failAll != nil {
err := t.fakingOptions.failAll.RunFakeInvocations()
if err != nil {
return nil, err
}
}
return t.delegatee.Watch(gvr, name)
}
func (t *FakeObjectTracker) watchReactionFunc(action k8stesting.Action) (bool, watch.Interface, error) {
if t.FakeWatcher == nil {
return false, nil, errors.New("cannot watch on a tracker with no watch support")
}
switch a := action.(type) {
case k8stesting.WatchAction:
w := &watcher{
FakeWatcher: watch.NewFake(),
action: a,
}
go func() {
err := w.dispatchInitialObjects(a, t)
if err != nil {
klog.Errorf("error dispatching initial objects, Err: %v", err)
}
}()
t.trackerMutex.Lock()
defer t.trackerMutex.Unlock()
t.watchers = append(t.watchers, w)
return true, w, nil
default:
return false, nil, fmt.Errorf("expected WatchAction but got %v", action)
}
}
// Start begins tracking of an FakeObjectTracker
func (t *FakeObjectTracker) Start() error {
if t.FakeWatcher == nil {
return errors.New("tracker has no watch support")
}
for event := range t.ResultChan() {
event := event.DeepCopy() // passing a deep copy to avoid race.
t.dispatch(event)
}
return nil
}
func (t *FakeObjectTracker) | (event *watch.Event) {
for _, w := range t.watchers {
go w.dispatch(event)
}
}
// Stop terminates tracking of an FakeObjectTracker
func (t *FakeObjectTracker) Stop() {
if t.FakeWatcher == nil {
panic(errors.New("tracker has no watch support"))
}
t.trackerMutex.Lock()
defer t.trackerMutex.Unlock()
t.FakeWatcher.Stop()
for _, w := range t.watchers {
w.Stop()
}
}
type watcher struct {
*watch.FakeWatcher
action k8stesting.WatchAction
updateMutex sync.Mutex
}
func (w *watcher) Stop() {
w.updateMutex.Lock()
defer w.updateMutex.Unlock()
w.FakeWatcher.Stop()
}
func (w *watcher) handles(event *watch.Event) bool {
if w.IsStopped() {
return false
}
t, err := meta.TypeAccessor(event.Object)
if err != nil {
return false
}
gvr, _ := meta.UnsafeGuessKindToResource(schema.FromAPIVersionAndKind(t.GetAPIVersion(), t.GetKind()))
if !(&k8stesting.SimpleWatchReactor{Resource: gvr.Resource}).Handles(w.action) {
return false
}
o, err := meta.Accessor(event.Object)
if err != nil {
return false
}
info := w.action.GetWatchRestrictions()
rv, fs, ls := info.ResourceVersion, info.Fields, info.Labels
if rv != "" && o.GetResourceVersion() != rv {
return false
}
if fs != nil && !fs.Matches(fields.Set{
"metadata.name": o.GetName(),
"metadata.namespace": o.GetNamespace(),
}) {
return false
}
if ls != nil && !ls.Matches(labels.Set(o.GetLabels())) {
return false
}
return true
}
func (w *watcher) dispatch(event *watch.Event) {
w.updateMutex.Lock()
defer w.updateMutex.Unlock()
if !w.handles(event) {
return
}
w.Action(event.Type, event.Object)
}
func (w *watcher) dispatchInitialObjects(action k8stesting.WatchAction, t k8stesting.ObjectTracker) error {
listObj, err := t.List(action.GetResource(), action.GetResource().GroupVersion().WithKind(action.GetResource().Resource), action.GetNamespace())
if err != nil {
return err
}
itemsPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
}
items := itemsPtr.([]runtime.Object)
for _, o := range items {
w.dispatch(&watch.Event{
Type: watch.Added,
Object: o,
})
}
return nil
}
// ResourceActions contains of Kubernetes/Machine resources whose response can be faked
type ResourceActions struct {
Node Actions
Machine Actions
MachineSet Actions
MachineDeployment Actions
}
// Actions contains the actions whose response can be faked
type Actions struct {
Get FakeResponse
Update FakeResponse
}
// FakeResponse is the custom error response configuration that are used for responding to client calls
type FakeResponse struct {
counter int
errorMsg string
responseDelay time.Duration
}
// fakingOptions are options that can be set while trying to fake object tracker returns
type fakingOptions struct {
// Fail at different resource action
failAt *ResourceActions
// Fail every action
failAll *FakeResponse
}
// CreateFakeResponse creates a fake response for an action
func CreateFakeResponse(counter int, errorMsg string, responseDelay time.Duration) FakeResponse {
return FakeResponse{
counter: counter,
errorMsg: errorMsg,
responseDelay: responseDelay,
}
}
// DecrementCounter reduces the counter for the particular action response by 1
func (o *FakeResponse) DecrementCounter() {
o.counter--
}
// RunFakeInvocations runs any custom fake configurations/methods before invoking standard ObjectTrackers
func (o *FakeResponse) RunFakeInvocations() error {
if !o.IsFakingEnabled() {
return nil
}
// decrement the counter
o.DecrementCounter()
// Delay while returning call
if o.responseDelay != 0 {
time.Sleep(o.responseDelay)
}
// If error message has been set
if o.errorMsg != "" {
return errors.New(o.errorMsg)
}
return nil
}
// IsFakingEnabled will return true if counter is positive for the fake response
func (o *FakeResponse) IsFakingEnabled() bool {
return o.counter > 0
}
// SetFailAtFakeResourceActions sets up the errorMessage to be returned on specific calls
func (o *fakingOptions) SetFailAtFakeResourceActions(resourceActions *ResourceActions) {
o.failAt = resourceActions
}
// SetFailAllFakeResponse sets the error message for | dispatch | identifier_name |
client.go | if t.fakingOptions.failAt != nil {
if gvr.Resource == "nodes" {
err = t.fakingOptions.failAt.Node.Get.RunFakeInvocations()
} else if gvr.Resource == "machines" {
err = t.fakingOptions.failAt.Machine.Get.RunFakeInvocations()
} else if gvr.Resource == "machinesets" {
err = t.fakingOptions.failAt.MachineSet.Get.RunFakeInvocations()
} else if gvr.Resource == "machinedeployments" {
err = t.fakingOptions.failAt.MachineDeployment.Get.RunFakeInvocations()
}
if err != nil |
}
return t.delegatee.Get(gvr, ns, name)
}
// Create receives a create event with the object. Not needed for CA.
func (t *FakeObjectTracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error {
return nil
}
// Update receives an update event with the object
func (t *FakeObjectTracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error {
var err error
if t.fakingOptions.failAll != nil {
err = t.fakingOptions.failAll.RunFakeInvocations()
if err != nil {
return err
}
}
if t.fakingOptions.failAt != nil {
if gvr.Resource == "nodes" {
err = t.fakingOptions.failAt.Node.Update.RunFakeInvocations()
} else if gvr.Resource == "machines" {
err = t.fakingOptions.failAt.Machine.Update.RunFakeInvocations()
} else if gvr.Resource == "machinedeployments" {
err = t.fakingOptions.failAt.MachineDeployment.Update.RunFakeInvocations()
}
if err != nil {
return err
}
}
err = t.delegatee.Update(gvr, obj, ns)
if err != nil {
return err
}
if t.FakeWatcher == nil {
return errors.New("error sending event on a tracker with no watch support")
}
if t.IsStopped() {
return errors.New("error sending event on a stopped tracker")
}
t.FakeWatcher.Modify(obj)
return nil
}
// List receives a list event with the object
func (t *FakeObjectTracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) {
if t.fakingOptions.failAll != nil {
err := t.fakingOptions.failAll.RunFakeInvocations()
if err != nil {
return nil, err
}
}
return t.delegatee.List(gvr, gvk, ns)
}
// Delete receives an delete event with the object. Not needed for CA.
func (t *FakeObjectTracker) Delete(gvr schema.GroupVersionResource, ns, name string) error {
return nil
}
// Watch receives a watch event with the object
func (t *FakeObjectTracker) Watch(gvr schema.GroupVersionResource, name string) (watch.Interface, error) {
if t.fakingOptions.failAll != nil {
err := t.fakingOptions.failAll.RunFakeInvocations()
if err != nil {
return nil, err
}
}
return t.delegatee.Watch(gvr, name)
}
func (t *FakeObjectTracker) watchReactionFunc(action k8stesting.Action) (bool, watch.Interface, error) {
if t.FakeWatcher == nil {
return false, nil, errors.New("cannot watch on a tracker with no watch support")
}
switch a := action.(type) {
case k8stesting.WatchAction:
w := &watcher{
FakeWatcher: watch.NewFake(),
action: a,
}
go func() {
err := w.dispatchInitialObjects(a, t)
if err != nil {
klog.Errorf("error dispatching initial objects, Err: %v", err)
}
}()
t.trackerMutex.Lock()
defer t.trackerMutex.Unlock()
t.watchers = append(t.watchers, w)
return true, w, nil
default:
return false, nil, fmt.Errorf("expected WatchAction but got %v", action)
}
}
// Start begins tracking of an FakeObjectTracker
func (t *FakeObjectTracker) Start() error {
if t.FakeWatcher == nil {
return errors.New("tracker has no watch support")
}
for event := range t.ResultChan() {
event := event.DeepCopy() // passing a deep copy to avoid race.
t.dispatch(event)
}
return nil
}
func (t *FakeObjectTracker) dispatch(event *watch.Event) {
for _, w := range t.watchers {
go w.dispatch(event)
}
}
// Stop terminates tracking of an FakeObjectTracker
func (t *FakeObjectTracker) Stop() {
if t.FakeWatcher == nil {
panic(errors.New("tracker has no watch support"))
}
t.trackerMutex.Lock()
defer t.trackerMutex.Unlock()
t.FakeWatcher.Stop()
for _, w := range t.watchers {
w.Stop()
}
}
type watcher struct {
*watch.FakeWatcher
action k8stesting.WatchAction
updateMutex sync.Mutex
}
func (w *watcher) Stop() {
w.updateMutex.Lock()
defer w.updateMutex.Unlock()
w.FakeWatcher.Stop()
}
func (w *watcher) handles(event *watch.Event) bool {
if w.IsStopped() {
return false
}
t, err := meta.TypeAccessor(event.Object)
if err != nil {
return false
}
gvr, _ := meta.UnsafeGuessKindToResource(schema.FromAPIVersionAndKind(t.GetAPIVersion(), t.GetKind()))
if !(&k8stesting.SimpleWatchReactor{Resource: gvr.Resource}).Handles(w.action) {
return false
}
o, err := meta.Accessor(event.Object)
if err != nil {
return false
}
info := w.action.GetWatchRestrictions()
rv, fs, ls := info.ResourceVersion, info.Fields, info.Labels
if rv != "" && o.GetResourceVersion() != rv {
return false
}
if fs != nil && !fs.Matches(fields.Set{
"metadata.name": o.GetName(),
"metadata.namespace": o.GetNamespace(),
}) {
return false
}
if ls != nil && !ls.Matches(labels.Set(o.GetLabels())) {
return false
}
return true
}
func (w *watcher) dispatch(event *watch.Event) {
w.updateMutex.Lock()
defer w.updateMutex.Unlock()
if !w.handles(event) {
return
}
w.Action(event.Type, event.Object)
}
func (w *watcher) dispatchInitialObjects(action k8stesting.WatchAction, t k8stesting.ObjectTracker) error {
listObj, err := t.List(action.GetResource(), action.GetResource().GroupVersion().WithKind(action.GetResource().Resource), action.GetNamespace())
if err != nil {
return err
}
itemsPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
}
items := itemsPtr.([]runtime.Object)
for _, o := range items {
w.dispatch(&watch.Event{
Type: watch.Added,
Object: o,
})
}
return nil
}
// ResourceActions contains of Kubernetes/Machine resources whose response can be faked
type ResourceActions struct {
Node Actions
Machine Actions
MachineSet Actions
MachineDeployment Actions
}
// Actions contains the actions whose response can be faked
type Actions struct {
Get FakeResponse
Update FakeResponse
}
// FakeResponse is the custom error response configuration that are used for responding to client calls
type FakeResponse struct {
counter int
errorMsg string
responseDelay time.Duration
}
// fakingOptions are options that can be set while trying to fake object tracker returns
type fakingOptions struct {
// Fail at different resource action
failAt *ResourceActions
// Fail every action
failAll *FakeResponse
}
// CreateFakeResponse creates a fake response for an action
func CreateFakeResponse(counter int, errorMsg string, responseDelay time.Duration) FakeResponse {
return FakeResponse{
counter: counter,
errorMsg: errorMsg,
responseDelay: responseDelay,
}
}
// DecrementCounter reduces the counter for the particular action response by 1
func (o *FakeResponse) DecrementCounter() {
o.counter--
}
// RunFakeInvocations runs any custom fake configurations/methods before invoking standard ObjectTrackers
func (o *FakeResponse) RunFakeInvocations() error {
if !o.IsFakingEnabled() {
return nil
}
// decrement the counter
o.DecrementCounter()
// Delay while returning call
if o.responseDelay != 0 {
time.Sleep(o.responseDelay)
}
// If error message has been set
if o.errorMsg != "" {
return errors.New(o.errorMsg)
}
return nil
}
// IsFakingEnabled will return true if counter is positive for the fake response
func (o *FakeResponse) IsFakingEnabled() bool {
return o.counter > 0
}
// SetFailAtFakeResourceActions sets up the errorMessage to be returned on specific calls
func (o *fakingOptions) SetFailAtFakeResourceActions(resourceActions *ResourceActions) {
o.failAt = resourceActions
}
// SetFailAllFakeResponse sets the error message | {
return nil, err
} | conditional_block |
xf_numba.py | chi is a value... compute output
cx = np.cos(chi)
sx = np.sin(chi)
for i in range(n):
cw = np.cos(omes[i])
sw = np.sin(omes[i])
out[i, 0, 0] = cw; out[i, 0, 1] = 0.; out[i, 0, 2] = sw
out[i, 1, 0] = sx*sw; out[i, 1, 1] = cx; out[i, 1, 2] = -sx*cw
out[i, 2, 0] = -cx*sw; out[i, 2, 1] = sx; out[i, 2, 2] = cx*cw
else:
# omes is array and chi is None -> equivalent to chi=0.0, but shortcut computations.
# cx IS 1.0, sx IS 0.0
for i in range(n):
cw = np.cos(omes[i])
sw = np.sin(omes[i])
out[i, 0, 0] = cw; out[i, 0, 1] = 0.; out[i, 0, 2] = sw
out[i, 1, 0] = 0.; out[i, 1, 1] = 1.; out[i, 1, 2] = 0.
out[i, 2, 0] = -sw; out[i, 2, 1] = 0.; out[i, 2, 2] = cw
else:
# omes is None, results should be equivalent to an array with a single element 0.0
out = out if out is not None else np.empty((1, 3, 3))
if chi is not None:
# ome is 0.0. cw is 1.0 and sw is 0.0
cx = np.cos(chi)
sx = np.sin(chi)
out[0, 0, 0] = 1.; out[0, 0, 1] = 0.; out[0, 0, 2] = 0.
out[0, 1, 0] = 0.; out[0, 1, 1] = cx; out[0, 1, 2] = -sx
out[0, 2, 0] = 0.; out[0, 2, 1] = sx; out[0, 2, 2] = cx
else:
# both omes and chi are None... return a single identity matrix.
out[0, 0, 0] = 1.; out[0, 0, 1] = 0.; out[0, 0, 2] = 0.
out[0, 1, 0] = 0.; out[0, 1, 1] = 1.; out[0, 1, 2] = 0.
out[0, 2, 0] = 0.; out[0, 2, 1] = 0.; out[0, 2, 2] = 1.
return out
@xf_api
def angles_to_gvec(angs,
beam_vec=None, eta_vec=None,
chi=None, rmat_c=None):
"""Note about this implementation:
This used to take rmat_b instead of the pair beam_vec, eta_vec. So it may require
some checking.
"""
orig_ndim = angs.ndim
angs = np.atleast_2d(angs)
nvecs, dim = angs.shape
# make vectors in beam frame
gvec_b = _angles_to_gvec_helper(angs[:,0:2])
# _rmat_s_helper could return None to mean "Identity" when chi and ome are None.
omes = angs[:, 2] if dim > 2 else None
if chi is not None or omes is not None:
rmat_s = _rmat_s_helper(chi=chi, omes=omes)
else:
rmat_s = None
# apply defaults to beam_vec and eta_vec.
# TODO: use a default rmat when beam_vec and eta_vec are None so computations
# can be avoided?
beam_vec = beam_vec if beam_vec is not None else cnst.beam_vec
eta_vec = eta_vec if eta_vec is not None else cnst.beam_vec
rmat_b = make_beam_rmat(beam_vec, eta_vec)
out = _beam_to_crystal(gvec_b,
rmat_b=rmat_b, rmat_s=rmat_s, rmat_c=rmat_c)
return out[0] if orig_ndim == 1 else out
@xf_api
def angles_to_dvec(angs,
beam_vec=None, eta_vec=None,
chi=None, rmat_c=None):
"""Note about this implementation:
This used to take rmat_b instead of the pair beam_vec, eta_vec. So it may
require some checking.
"""
angs = np.atleast_2d(angs)
nvecs, dim = angs.shape
# make vectors in beam frame
dvec_b = _angles_to_dvec_helper(angs[:,0:2])
# calculate rmat_s
omes = angs[:, 2] if dim>2 else None
if chi is not None or omes is not None:
rmat_s = _rmat_s_helper(chi=chi, omes=omes)
else:
rmat_s = None
# apply defaults to beam_vec and eta_vec.
# TODO: use a default rmat when beam_vec and eta_vec are None so computations
# can be avoided?
beam_vec = beam_vec if beam_vec is not None else cnst.beam_vec
eta_vec = eta_vec if eta_vec is not None else cnst.beam_vec
rmat_b = make_beam_rmat(beam_vec, eta_vec)
return _beam_to_crystal(dvec_b,
rmat_b=rmat_b, rmat_s=rmat_s, rmat_c=rmat_c)
# this could be a gufunc... (n)->()
@numba.njit
def _row_norm(a, out=None):
n, dim = a.shape
out = out if out is not None else np.empty(n, dtype=a.dtype)
for i in range(n):
nrm = 0.0
for j in range(dim):
x = a[i, j]
nrm += x*x
out[i] = np.sqrt(nrm)
return out
# this and _unit_vector_single would be better as a gufunc.
@numba.njit
def _unit_vector_single(a, out=None):
out = out if out is not None else np.empty_like(a)
n = len(a)
sqr_norm = a[0]*a[0]
for i in range(1, n):
sqr_norm += a[i]*a[i]
# prevent divide by zero
if sqr_norm > cnst.epsf:
recip_norm = 1.0 / np.sqrt(sqr_norm)
out[:] = a[:] * recip_norm
else:
out[:] = a[:]
return out
@numba.njit
def _unit_vector_multi(a, out=None):
out = out if out is not None else np.empty_like(a)
n, dim = a.shape
for i in range(n):
#_unit_vector_single(a[i], out=out[i])
sqr_norm = a[i, 0] * a[i, 0]
for j in range(1, dim):
sqr_norm += a[i, j]*a[i, j]
if sqr_norm > cnst.epsf:
recip_norm = 1.0 / np.sqrt(sqr_norm)
out[i,:] = a[i,:] * recip_norm
else:
out[i,:] = a[i,:]
return out
@xf_api
def row_norm(vec_in):
"""
return row-wise norms for a list of vectors
"""
# TODO: leave this to a PRECONDITION in the xf_api?
if vec_in.ndim == 1:
out = _row_norm(np.atleast_2d(vec_in))[0]
elif vec_in.ndim == 2:
out = _row_norm(vec_in)
else:
raise ValueError(
"incorrect shape: arg must be 1-d or 2-d, yours is %d"
% (len(vec_in.shape)))
return out
@xf_api
def unit_vector(vec_in):
| """
normalize array of column vectors (hstacked, axis = 0)
"""
if vec_in.ndim == 1:
out = _unit_vector_single(vec_in)
elif vec_in.ndim == 2:
out = _unit_vector_multi(vec_in)
else:
raise ValueError(
"incorrect arg shape; must be 1-d or 2-d, yours is %d-d"
% (vec_in.ndim)
)
return out | identifier_body |
|
xf_numba.py | (fn):
out = numba.jit(fn)
out.__signature__ = get_signature(fn)
return out
@numba.njit
def _angles_to_gvec_helper(angs, out=None):
"""
angs are vstacked [2*theta, eta, omega], although omega is optional
This should be equivalent to the one-liner numpy version:
out = np.vstack([[np.cos(0.5*angs[:, 0]) * np.cos(angs[:, 1])],
[np.cos(0.5*angs[:, 0]) * np.sin(angs[:, 1])],
[np.sin(0.5*angs[:, 0])]])
although much faster
"""
count, dim = angs.shape
out = out if out is not None else np.empty((count, 3), dtype=angs.dtype)
for i in range(count):
ca0 = np.cos(0.5*angs[i, 0])
sa0 = np.sin(0.5*angs[i, 0])
ca1 = np.cos(angs[i, 1])
sa1 = np.sin(angs[i, 1])
out[i, 0] = ca0 * ca1
out[i, 1] = ca0 * sa1
out[i, 2] = sa0
return out
@numba.njit
def _angles_to_dvec_helper(angs, out=None):
"""
angs are vstacked [2*theta, eta, omega], although omega is optional
This shoud be equivalent to the one-liner numpy version:
out = np.vstack([[np.sin(angs[:, 0]) * np.cos(angs[:, 1])],
[np.sin(angs[:, 0]) * np.sin(angs[:, 1])],
[-np.cos(angs[:, 0])]])
although much faster
"""
_, dim = angs.shape
out = out if out is not None else np.empty((dim, 3), dtype=angs.dtype)
for i in range(len(angs)):
ca0 = np.cos(angs[i, 0])
sa0 = np.sin(angs[i, 0])
ca1 = np.cos(angs[i, 1])
sa1 = np.sin(angs[i, 1])
out[i, 0] = sa0 * ca1
out[i, 1] = sa0 * sa1
out[i, 2] = -ca0
return out
@numba.njit
def _rmat_s_helper(chi=None, omes=None, out=None):
"""
simple utility for calculating sample rotation matrices based on
standard definition for HEDM
chi is a single value, 0.0 by default
omes is either a 1d array or None.
If None the code should be equivalent to a single ome of value 0.0
out is a preallocated output array. No check is done about it having the
proper size. If None a new array will be allocated. The expected size
of the array is as many 3x3 matrices as omes (n, 3, 3).
"""
if chi is not None:
cx = np.cos(chi)
sx = np.sin(chi)
else:
cx = 1.0
sx = 0.0
if omes is not None:
# omes is an array (vector): output is as many rotation matrices as omes entries.
n = len(omes)
out = out if out is not None else np.empty((n,3,3), dtype=omes.dtype)
if chi is not None:
# ome is array and chi is a value... compute output
cx = np.cos(chi)
sx = np.sin(chi)
for i in range(n):
cw = np.cos(omes[i])
sw = np.sin(omes[i])
out[i, 0, 0] = cw; out[i, 0, 1] = 0.; out[i, 0, 2] = sw
out[i, 1, 0] = sx*sw; out[i, 1, 1] = cx; out[i, 1, 2] = -sx*cw
out[i, 2, 0] = -cx*sw; out[i, 2, 1] = sx; out[i, 2, 2] = cx*cw
else:
# omes is array and chi is None -> equivalent to chi=0.0, but shortcut computations.
# cx IS 1.0, sx IS 0.0
for i in range(n):
cw = np.cos(omes[i])
sw = np.sin(omes[i])
out[i, 0, 0] = cw; out[i, 0, 1] = 0.; out[i, 0, 2] = sw
out[i, 1, 0] = 0.; out[i, 1, 1] = 1.; out[i, 1, 2] = 0.
out[i, 2, 0] = -sw; out[i, 2, 1] = 0.; out[i, 2, 2] = cw
else:
# omes is None, results should be equivalent to an array with a single element 0.0
out = out if out is not None else np.empty((1, 3, 3))
if chi is not None:
# ome is 0.0. cw is 1.0 and sw is 0.0
cx = np.cos(chi)
sx = np.sin(chi)
out[0, 0, 0] = 1.; out[0, 0, 1] = 0.; out[0, 0, 2] = 0.
out[0, 1, 0] = 0.; out[0, 1, 1] = cx; out[0, 1, 2] = -sx
out[0, 2, 0] = 0.; out[0, 2, 1] = sx; out[0, 2, 2] = cx
else:
# both omes and chi are None... return a single identity matrix.
out[0, 0, 0] = 1.; out[0, 0, 1] = 0.; out[0, 0, 2] = 0.
out[0, 1, 0] = 0.; out[0, 1, 1] = 1.; out[0, 1, 2] = 0.
out[0, 2, 0] = 0.; out[0, 2, 1] = 0.; out[0, 2, 2] = 1.
return out
@xf_api
def angles_to_gvec(angs,
beam_vec=None, eta_vec=None,
chi=None, rmat_c=None):
"""Note about this implementation:
This used to take rmat_b instead of the pair beam_vec, eta_vec. So it may require
some checking.
"""
orig_ndim = angs.ndim
angs = np.atleast_2d(angs)
nvecs, dim = angs.shape
# make vectors in beam frame
gvec_b = _angles_to_gvec_helper(angs[:,0:2])
# _rmat_s_helper could return None to mean "Identity" when chi and ome are None.
omes = angs[:, 2] if dim > 2 else None
if chi is not None or omes is not None:
rmat_s = _rmat_s_helper(chi=chi, omes=omes)
else:
rmat_s = None
# apply defaults to beam_vec and eta_vec.
# TODO: use a default rmat when beam_vec and eta_vec are None so computations
# can be avoided?
beam_vec = beam_vec if beam_vec is not None else cnst.beam_vec
eta_vec = eta_vec if eta_vec is not None else cnst.beam_vec
rmat_b = make_beam_rmat(beam_vec, eta_vec)
out = _beam_to_crystal(gvec_b,
rmat_b=rmat_b, rmat_s=rmat_s, rmat_c=rmat_c)
return out[0] if orig_ndim == 1 else out
@xf_api
def angles_to_dvec(angs,
beam_vec=None, eta_vec=None,
chi=None, rmat_c=None):
"""Note about this implementation:
This used to take rmat_b instead of the pair beam_vec, eta_vec. So it may
require some checking.
"""
angs = np.atleast_2d(angs)
nvecs, dim = angs.shape
# make vectors in beam frame
dvec_b = _angles_to_dvec_helper(angs[:,0:2])
# | xfapi_jit | identifier_name |
|
xf_numba.py | if out is not None else np.empty((dim, 3), dtype=angs.dtype)
for i in range(len(angs)):
ca0 = np.cos(angs[i, 0])
sa0 = np.sin(angs[i, 0])
ca1 = np.cos(angs[i, 1])
sa1 = np.sin(angs[i, 1])
out[i, 0] = sa0 * ca1
out[i, 1] = sa0 * sa1
out[i, 2] = -ca0
return out
@numba.njit
def _rmat_s_helper(chi=None, omes=None, out=None):
"""
simple utility for calculating sample rotation matrices based on
standard definition for HEDM
chi is a single value, 0.0 by default
omes is either a 1d array or None.
If None the code should be equivalent to a single ome of value 0.0
out is a preallocated output array. No check is done about it having the
proper size. If None a new array will be allocated. The expected size
of the array is as many 3x3 matrices as omes (n, 3, 3).
"""
if chi is not None:
cx = np.cos(chi)
sx = np.sin(chi)
else:
cx = 1.0
sx = 0.0
if omes is not None:
# omes is an array (vector): output is as many rotation matrices as omes entries.
n = len(omes)
out = out if out is not None else np.empty((n,3,3), dtype=omes.dtype)
if chi is not None:
# ome is array and chi is a value... compute output
cx = np.cos(chi)
sx = np.sin(chi)
for i in range(n):
cw = np.cos(omes[i])
sw = np.sin(omes[i])
out[i, 0, 0] = cw; out[i, 0, 1] = 0.; out[i, 0, 2] = sw
out[i, 1, 0] = sx*sw; out[i, 1, 1] = cx; out[i, 1, 2] = -sx*cw
out[i, 2, 0] = -cx*sw; out[i, 2, 1] = sx; out[i, 2, 2] = cx*cw
else:
# omes is array and chi is None -> equivalent to chi=0.0, but shortcut computations.
# cx IS 1.0, sx IS 0.0
for i in range(n):
cw = np.cos(omes[i])
sw = np.sin(omes[i])
out[i, 0, 0] = cw; out[i, 0, 1] = 0.; out[i, 0, 2] = sw
out[i, 1, 0] = 0.; out[i, 1, 1] = 1.; out[i, 1, 2] = 0.
out[i, 2, 0] = -sw; out[i, 2, 1] = 0.; out[i, 2, 2] = cw
else:
# omes is None, results should be equivalent to an array with a single element 0.0
out = out if out is not None else np.empty((1, 3, 3))
if chi is not None:
# ome is 0.0. cw is 1.0 and sw is 0.0
cx = np.cos(chi)
sx = np.sin(chi)
out[0, 0, 0] = 1.; out[0, 0, 1] = 0.; out[0, 0, 2] = 0.
out[0, 1, 0] = 0.; out[0, 1, 1] = cx; out[0, 1, 2] = -sx
out[0, 2, 0] = 0.; out[0, 2, 1] = sx; out[0, 2, 2] = cx
else:
# both omes and chi are None... return a single identity matrix.
out[0, 0, 0] = 1.; out[0, 0, 1] = 0.; out[0, 0, 2] = 0.
out[0, 1, 0] = 0.; out[0, 1, 1] = 1.; out[0, 1, 2] = 0.
out[0, 2, 0] = 0.; out[0, 2, 1] = 0.; out[0, 2, 2] = 1.
return out
@xf_api
def angles_to_gvec(angs,
beam_vec=None, eta_vec=None,
chi=None, rmat_c=None):
"""Note about this implementation:
This used to take rmat_b instead of the pair beam_vec, eta_vec. So it may require
some checking.
"""
orig_ndim = angs.ndim
angs = np.atleast_2d(angs)
nvecs, dim = angs.shape
# make vectors in beam frame
gvec_b = _angles_to_gvec_helper(angs[:,0:2])
# _rmat_s_helper could return None to mean "Identity" when chi and ome are None.
omes = angs[:, 2] if dim > 2 else None
if chi is not None or omes is not None:
rmat_s = _rmat_s_helper(chi=chi, omes=omes)
else:
rmat_s = None
# apply defaults to beam_vec and eta_vec.
# TODO: use a default rmat when beam_vec and eta_vec are None so computations
# can be avoided?
beam_vec = beam_vec if beam_vec is not None else cnst.beam_vec
eta_vec = eta_vec if eta_vec is not None else cnst.beam_vec
rmat_b = make_beam_rmat(beam_vec, eta_vec)
out = _beam_to_crystal(gvec_b,
rmat_b=rmat_b, rmat_s=rmat_s, rmat_c=rmat_c)
return out[0] if orig_ndim == 1 else out
@xf_api
def angles_to_dvec(angs,
beam_vec=None, eta_vec=None,
chi=None, rmat_c=None):
"""Note about this implementation:
This used to take rmat_b instead of the pair beam_vec, eta_vec. So it may
require some checking.
"""
angs = np.atleast_2d(angs)
nvecs, dim = angs.shape
# make vectors in beam frame
dvec_b = _angles_to_dvec_helper(angs[:,0:2])
# calculate rmat_s
omes = angs[:, 2] if dim>2 else None
if chi is not None or omes is not None:
rmat_s = _rmat_s_helper(chi=chi, omes=omes)
else:
rmat_s = None
# apply defaults to beam_vec and eta_vec.
# TODO: use a default rmat when beam_vec and eta_vec are None so computations
# can be avoided?
beam_vec = beam_vec if beam_vec is not None else cnst.beam_vec
eta_vec = eta_vec if eta_vec is not None else cnst.beam_vec
rmat_b = make_beam_rmat(beam_vec, eta_vec)
return _beam_to_crystal(dvec_b,
rmat_b=rmat_b, rmat_s=rmat_s, rmat_c=rmat_c)
# this could be a gufunc... (n)->()
@numba.njit
def _row_norm(a, out=None):
n, dim = a.shape
out = out if out is not None else np.empty(n, dtype=a.dtype)
for i in range(n):
nrm = 0.0
for j in range(dim):
x = a[i, j]
nrm += x*x
out[i] = np.sqrt(nrm)
return out
# this and _unit_vector_single would be better as a gufunc.
@numba.njit
def _unit_vector_single(a, out=None):
out = out if out is not None else np.empty_like(a)
n = len(a)
sqr_norm = a[0]*a[0]
for i in range(1, n):
sqr_norm += a[i]*a[i]
# prevent divide by zero
if sqr_norm > cnst.epsf:
recip_norm = 1.0 / np.sqrt(sqr_norm)
out[:] = a[:] * recip_norm
else:
| out[:] = a[:] | conditional_block |
|
xf_numba.py | i in range(len(angs)):
ca0 = np.cos(angs[i, 0])
sa0 = np.sin(angs[i, 0])
ca1 = np.cos(angs[i, 1])
sa1 = np.sin(angs[i, 1])
out[i, 0] = sa0 * ca1
out[i, 1] = sa0 * sa1
out[i, 2] = -ca0
return out
@numba.njit
def _rmat_s_helper(chi=None, omes=None, out=None):
"""
simple utility for calculating sample rotation matrices based on
standard definition for HEDM
chi is a single value, 0.0 by default
omes is either a 1d array or None.
If None the code should be equivalent to a single ome of value 0.0
out is a preallocated output array. No check is done about it having the
proper size. If None a new array will be allocated. The expected size
of the array is as many 3x3 matrices as omes (n, 3, 3).
"""
if chi is not None:
cx = np.cos(chi)
sx = np.sin(chi)
else:
cx = 1.0
sx = 0.0
if omes is not None:
# omes is an array (vector): output is as many rotation matrices as omes entries.
n = len(omes)
out = out if out is not None else np.empty((n,3,3), dtype=omes.dtype)
if chi is not None:
# ome is array and chi is a value... compute output
cx = np.cos(chi)
sx = np.sin(chi)
for i in range(n):
cw = np.cos(omes[i])
sw = np.sin(omes[i])
out[i, 0, 0] = cw; out[i, 0, 1] = 0.; out[i, 0, 2] = sw
out[i, 1, 0] = sx*sw; out[i, 1, 1] = cx; out[i, 1, 2] = -sx*cw
out[i, 2, 0] = -cx*sw; out[i, 2, 1] = sx; out[i, 2, 2] = cx*cw
else:
# omes is array and chi is None -> equivalent to chi=0.0, but shortcut computations.
# cx IS 1.0, sx IS 0.0
for i in range(n):
cw = np.cos(omes[i])
sw = np.sin(omes[i])
out[i, 0, 0] = cw; out[i, 0, 1] = 0.; out[i, 0, 2] = sw
out[i, 1, 0] = 0.; out[i, 1, 1] = 1.; out[i, 1, 2] = 0.
out[i, 2, 0] = -sw; out[i, 2, 1] = 0.; out[i, 2, 2] = cw
else:
# omes is None, results should be equivalent to an array with a single element 0.0
out = out if out is not None else np.empty((1, 3, 3))
if chi is not None:
# ome is 0.0. cw is 1.0 and sw is 0.0
cx = np.cos(chi)
sx = np.sin(chi)
out[0, 0, 0] = 1.; out[0, 0, 1] = 0.; out[0, 0, 2] = 0.
out[0, 1, 0] = 0.; out[0, 1, 1] = cx; out[0, 1, 2] = -sx
out[0, 2, 0] = 0.; out[0, 2, 1] = sx; out[0, 2, 2] = cx
else:
# both omes and chi are None... return a single identity matrix.
out[0, 0, 0] = 1.; out[0, 0, 1] = 0.; out[0, 0, 2] = 0.
out[0, 1, 0] = 0.; out[0, 1, 1] = 1.; out[0, 1, 2] = 0.
out[0, 2, 0] = 0.; out[0, 2, 1] = 0.; out[0, 2, 2] = 1.
return out
@xf_api
def angles_to_gvec(angs,
beam_vec=None, eta_vec=None,
chi=None, rmat_c=None):
"""Note about this implementation:
This used to take rmat_b instead of the pair beam_vec, eta_vec. So it may require
some checking.
"""
orig_ndim = angs.ndim
angs = np.atleast_2d(angs)
nvecs, dim = angs.shape
# make vectors in beam frame
gvec_b = _angles_to_gvec_helper(angs[:,0:2])
# _rmat_s_helper could return None to mean "Identity" when chi and ome are None.
omes = angs[:, 2] if dim > 2 else None
if chi is not None or omes is not None:
rmat_s = _rmat_s_helper(chi=chi, omes=omes)
else:
rmat_s = None
# apply defaults to beam_vec and eta_vec.
# TODO: use a default rmat when beam_vec and eta_vec are None so computations
# can be avoided?
beam_vec = beam_vec if beam_vec is not None else cnst.beam_vec
eta_vec = eta_vec if eta_vec is not None else cnst.beam_vec
rmat_b = make_beam_rmat(beam_vec, eta_vec)
out = _beam_to_crystal(gvec_b,
rmat_b=rmat_b, rmat_s=rmat_s, rmat_c=rmat_c)
return out[0] if orig_ndim == 1 else out
@xf_api
def angles_to_dvec(angs,
beam_vec=None, eta_vec=None,
chi=None, rmat_c=None):
"""Note about this implementation:
This used to take rmat_b instead of the pair beam_vec, eta_vec. So it may
require some checking.
"""
angs = np.atleast_2d(angs)
nvecs, dim = angs.shape
# make vectors in beam frame
dvec_b = _angles_to_dvec_helper(angs[:,0:2])
# calculate rmat_s
omes = angs[:, 2] if dim>2 else None
if chi is not None or omes is not None:
rmat_s = _rmat_s_helper(chi=chi, omes=omes)
else:
rmat_s = None
# apply defaults to beam_vec and eta_vec.
# TODO: use a default rmat when beam_vec and eta_vec are None so computations
# can be avoided?
beam_vec = beam_vec if beam_vec is not None else cnst.beam_vec
eta_vec = eta_vec if eta_vec is not None else cnst.beam_vec
rmat_b = make_beam_rmat(beam_vec, eta_vec)
return _beam_to_crystal(dvec_b,
rmat_b=rmat_b, rmat_s=rmat_s, rmat_c=rmat_c)
# this could be a gufunc... (n)->()
@numba.njit
def _row_norm(a, out=None):
n, dim = a.shape
out = out if out is not None else np.empty(n, dtype=a.dtype)
for i in range(n):
nrm = 0.0
for j in range(dim):
x = a[i, j]
nrm += x*x
out[i] = np.sqrt(nrm)
return out
# this and _unit_vector_single would be better as a gufunc.
@numba.njit
def _unit_vector_single(a, out=None):
out = out if out is not None else np.empty_like(a)
n = len(a)
sqr_norm = a[0]*a[0]
for i in range(1, n):
sqr_norm += a[i]*a[i]
# prevent divide by zero
if sqr_norm > cnst.epsf:
recip_norm = 1.0 / np.sqrt(sqr_norm)
out[:] = a[:] * recip_norm
else:
out[:] = a[:]
return out |
@numba.njit
def _unit_vector_multi(a, out=None): | random_line_split |
|
report_errors_service.pb.go | () string { return proto.CompactTextString(m) }
func (*ReportErrorEventResponse) ProtoMessage() {}
func (*ReportErrorEventResponse) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} }
// An error event which is reported to the Error Reporting system.
type ReportedErrorEvent struct {
// [Optional] Time when the event occurred.
// If not provided, the time when the event was received by the
// Error Reporting system will be used.
EventTime *google_protobuf1.Timestamp `protobuf:"bytes,1,opt,name=event_time,json=eventTime" json:"event_time,omitempty"`
// [Required] The service context in which this error has occurred.
ServiceContext *ServiceContext `protobuf:"bytes,2,opt,name=service_context,json=serviceContext" json:"service_context,omitempty"`
// [Required] A message describing the error. The message can contain an
// exception stack in one of the supported programming languages and formats.
// In that case, the message is parsed and detailed exception information
// is returned when retrieving the error event again.
Message string `protobuf:"bytes,3,opt,name=message" json:"message,omitempty"`
// [Optional] A description of the context in which the error occurred.
Context *ErrorContext `protobuf:"bytes,4,opt,name=context" json:"context,omitempty"`
}
func (m *ReportedErrorEvent) Reset() { *m = ReportedErrorEvent{} }
func (m *ReportedErrorEvent) String() string { return proto.CompactTextString(m) }
func (*ReportedErrorEvent) ProtoMessage() {}
func (*ReportedErrorEvent) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{2} }
func (m *ReportedErrorEvent) GetEventTime() *google_protobuf1.Timestamp {
if m != nil {
return m.EventTime
}
return nil
}
func (m *ReportedErrorEvent) GetServiceContext() *ServiceContext {
if m != nil {
return m.ServiceContext
}
return nil
}
func (m *ReportedErrorEvent) GetMessage() string {
if m != nil {
return m.Message
}
return ""
}
func (m *ReportedErrorEvent) GetContext() *ErrorContext {
if m != nil {
return m.Context
}
return nil
}
func init() {
proto.RegisterType((*ReportErrorEventRequest)(nil), "google.devtools.clouderrorreporting.v1beta1.ReportErrorEventRequest")
proto.RegisterType((*ReportErrorEventResponse)(nil), "google.devtools.clouderrorreporting.v1beta1.ReportErrorEventResponse")
proto.RegisterType((*ReportedErrorEvent)(nil), "google.devtools.clouderrorreporting.v1beta1.ReportedErrorEvent")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for ReportErrorsService service
type ReportErrorsServiceClient interface {
// Report an individual error event.
//
// This endpoint accepts <strong>either</strong> an OAuth token,
// <strong>or</strong> an
// <a href="https://support.google.com/cloud/answer/6158862">API key</a>
// for authentication. To use an API key, append it to the URL as the value of
// a `key` parameter. For example:
// <pre>POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456</pre>
ReportErrorEvent(ctx context.Context, in *ReportErrorEventRequest, opts ...grpc.CallOption) (*ReportErrorEventResponse, error)
}
type reportErrorsServiceClient struct {
cc *grpc.ClientConn
}
func NewReportErrorsServiceClient(cc *grpc.ClientConn) ReportErrorsServiceClient {
return &reportErrorsServiceClient{cc}
}
func (c *reportErrorsServiceClient) ReportErrorEvent(ctx context.Context, in *ReportErrorEventRequest, opts ...grpc.CallOption) (*ReportErrorEventResponse, error) {
out := new(ReportErrorEventResponse)
err := grpc.Invoke(ctx, "/google.devtools.clouderrorreporting.v1beta1.ReportErrorsService/ReportErrorEvent", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for ReportErrorsService service
type ReportErrorsServiceServer interface {
// Report an individual error event.
//
// This endpoint accepts <strong>either</strong> an OAuth token,
// <strong>or</strong> an
// <a href="https://support.google.com/cloud/answer/6158862">API key</a>
// for authentication. To use an API key, append it to the URL as the value of
// a `key` parameter. For example:
// <pre>POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456</pre>
ReportErrorEvent(context.Context, *ReportErrorEventRequest) (*ReportErrorEventResponse, error)
}
func RegisterReportErrorsServiceServer(s *grpc.Server, srv ReportErrorsServiceServer) {
s.RegisterService(&_ReportErrorsService_serviceDesc, srv)
}
func _ReportErrorsService_ReportErrorEvent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ReportErrorEventRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ReportErrorsServiceServer).ReportErrorEvent(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.devtools.clouderrorreporting.v1beta1.ReportErrorsService/ReportErrorEvent",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ReportErrorsServiceServer).ReportErrorEvent(ctx, req.(*ReportErrorEventRequest))
}
return interceptor(ctx, in, info, handler)
}
var _ReportErrorsService_serviceDesc = grpc.ServiceDesc{
ServiceName: "google.devtools.clouderrorreporting.v1beta1.ReportErrorsService",
HandlerType: (*ReportErrorsServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "ReportErrorEvent",
Handler: _ReportErrorsService_ReportErrorEvent_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "google/devtools/clouderrorreporting/v1beta1/report_errors_service.proto",
}
func init() {
proto.RegisterFile("google/devtools/clouderrorreporting/v1beta1/report_errors_service.proto", fileDescriptor3)
}
var fileDescriptor3 = []byte{
// 490 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x93, 0xcd, 0x8a, 0x13, 0x41,
0x10, 0xc7, 0x99, 0xf8, 0xb1, 0x6c, 0x47, 0x54, 0xda, 0x83, 0xc3, 0x20, 0xb8, 0xc6, 0xcb, 0xa2,
0x30, 0x6d, 0xe2, 0xc5, 0xec, 0x22, 0x0b, 0x59, 0xc3, 0xde, 0x64, 0x99, 0xd5, 0x3d, 0x48, 0x70,
0xe8, 0x4c, 0xca, 0x61, 0x24, 0xd3, 0x35, 0x76, 0x77, 0x82, 0x20, 0x5e, 0x7c, 0x85, 0x7d, 0x05,
0x4f, 0x3e, 0x8a, 0x57, 0x5f, 0xc0, 0x83, 0x0f, 0xa1, 0x37, 0xe9, 0xaf, 0x25, 0x6b, 0x72, 0x70,
0xf4, 0x58, 0xd3, 0x55, 0xbf, 0xff, 0xbf, 0x3e, 0x86, 0x1c, 0x95, 0x88, 0xe5, 0x1c, 0xd8, 0x | String | identifier_name |
|
report_errors_service.pb.go | ) Reset() { *m = ReportedErrorEvent{} }
func (m *ReportedErrorEvent) String() string { return proto.CompactTextString(m) }
func (*ReportedErrorEvent) ProtoMessage() {}
func (*ReportedErrorEvent) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{2} }
func (m *ReportedErrorEvent) GetEventTime() *google_protobuf1.Timestamp {
if m != nil {
return m.EventTime
}
return nil
}
func (m *ReportedErrorEvent) GetServiceContext() *ServiceContext {
if m != nil {
return m.ServiceContext
}
return nil
}
func (m *ReportedErrorEvent) GetMessage() string {
if m != nil {
return m.Message
}
return ""
}
func (m *ReportedErrorEvent) GetContext() *ErrorContext {
if m != nil {
return m.Context
}
return nil
}
func init() {
proto.RegisterType((*ReportErrorEventRequest)(nil), "google.devtools.clouderrorreporting.v1beta1.ReportErrorEventRequest")
proto.RegisterType((*ReportErrorEventResponse)(nil), "google.devtools.clouderrorreporting.v1beta1.ReportErrorEventResponse")
proto.RegisterType((*ReportedErrorEvent)(nil), "google.devtools.clouderrorreporting.v1beta1.ReportedErrorEvent")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for ReportErrorsService service
type ReportErrorsServiceClient interface {
// Report an individual error event.
//
// This endpoint accepts <strong>either</strong> an OAuth token,
// <strong>or</strong> an
// <a href="https://support.google.com/cloud/answer/6158862">API key</a>
// for authentication. To use an API key, append it to the URL as the value of
// a `key` parameter. For example:
// <pre>POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456</pre>
ReportErrorEvent(ctx context.Context, in *ReportErrorEventRequest, opts ...grpc.CallOption) (*ReportErrorEventResponse, error)
}
type reportErrorsServiceClient struct {
cc *grpc.ClientConn
}
func NewReportErrorsServiceClient(cc *grpc.ClientConn) ReportErrorsServiceClient {
return &reportErrorsServiceClient{cc}
}
func (c *reportErrorsServiceClient) ReportErrorEvent(ctx context.Context, in *ReportErrorEventRequest, opts ...grpc.CallOption) (*ReportErrorEventResponse, error) {
out := new(ReportErrorEventResponse)
err := grpc.Invoke(ctx, "/google.devtools.clouderrorreporting.v1beta1.ReportErrorsService/ReportErrorEvent", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for ReportErrorsService service
type ReportErrorsServiceServer interface {
// Report an individual error event.
//
// This endpoint accepts <strong>either</strong> an OAuth token,
// <strong>or</strong> an
// <a href="https://support.google.com/cloud/answer/6158862">API key</a>
// for authentication. To use an API key, append it to the URL as the value of
// a `key` parameter. For example:
// <pre>POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456</pre>
ReportErrorEvent(context.Context, *ReportErrorEventRequest) (*ReportErrorEventResponse, error)
} | func RegisterReportErrorsServiceServer(s *grpc.Server, srv ReportErrorsServiceServer) {
s.RegisterService(&_ReportErrorsService_serviceDesc, srv)
}
func _ReportErrorsService_ReportErrorEvent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ReportErrorEventRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ReportErrorsServiceServer).ReportErrorEvent(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.devtools.clouderrorreporting.v1beta1.ReportErrorsService/ReportErrorEvent",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ReportErrorsServiceServer).ReportErrorEvent(ctx, req.(*ReportErrorEventRequest))
}
return interceptor(ctx, in, info, handler)
}
var _ReportErrorsService_serviceDesc = grpc.ServiceDesc{
ServiceName: "google.devtools.clouderrorreporting.v1beta1.ReportErrorsService",
HandlerType: (*ReportErrorsServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "ReportErrorEvent",
Handler: _ReportErrorsService_ReportErrorEvent_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "google/devtools/clouderrorreporting/v1beta1/report_errors_service.proto",
}
func init() {
proto.RegisterFile("google/devtools/clouderrorreporting/v1beta1/report_errors_service.proto", fileDescriptor3)
}
var fileDescriptor3 = []byte{
// 490 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x93, 0xcd, 0x8a, 0x13, 0x41,
0x10, 0xc7, 0x99, 0xf8, 0xb1, 0x6c, 0x47, 0x54, 0xda, 0x83, 0xc3, 0x20, 0xb8, 0xc6, 0xcb, 0xa2,
0x30, 0x6d, 0xe2, 0xc5, 0xec, 0x22, 0x0b, 0x59, 0xc3, 0xde, 0x64, 0x99, 0xd5, 0x3d, 0x48, 0x70,
0xe8, 0x4c, 0xca, 0x61, 0x24, 0xd3, 0x35, 0x76, 0x77, 0x82, 0x20, 0x5e, 0x7c, 0x85, 0x7d, 0x05,
0x4f, 0x3e, 0x8a, 0x57, 0x5f, 0xc0, 0x83, 0x0f, 0xa1, 0x37, 0xe9, 0xaf, 0x25, 0x6b, 0x72, 0x70,
0xf4, 0x58, 0xd3, 0x55, 0xbf, 0xff, 0xbf, 0x3e, 0x86, 0x1c, 0x95, 0x88, 0xe5, 0x1c, 0xd8, 0x0c,
0x96, 0x1a, 0x71, 0xae, 0x58, 0x31, 0xc7, 0xc5, 0x0c, 0xa4, 0x44, 0x29, 0xa1, 0x41, 0xa9, 0x2b,
0x51, 0xb2, 0x65, 0x7f, 0x0a, 0x9a, 0xf7, 0x99, 0xfb, 0x92, 0xdb, 0x57, 0x95, 0x2b, 0x90, 0xcb,
0xaa, 0x80, 0xb4, 0x91, 0xa8, 0x91, 0x3e, 0x74, 0xa0, 0x34, 0x80, 0xd2, 0x0d, 0xa0, 0xd4, 0x83,
0x92, 0x3b, 0x5e, 0x95, 0x3 | random_line_split |
|
report_errors_service.pb.go |
return nil
}
// Response for reporting an individual error event.
// Data may be added to this message in the future.
type ReportErrorEventResponse struct {
}
func (m *ReportErrorEventResponse) Reset() { *m = ReportErrorEventResponse{} }
func (m *ReportErrorEventResponse) String() string { return proto.CompactTextString(m) }
func (*ReportErrorEventResponse) ProtoMessage() {}
func (*ReportErrorEventResponse) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} }
// An error event which is reported to the Error Reporting system.
type ReportedErrorEvent struct {
// [Optional] Time when the event occurred.
// If not provided, the time when the event was received by the
// Error Reporting system will be used.
EventTime *google_protobuf1.Timestamp `protobuf:"bytes,1,opt,name=event_time,json=eventTime" json:"event_time,omitempty"`
// [Required] The service context in which this error has occurred.
ServiceContext *ServiceContext `protobuf:"bytes,2,opt,name=service_context,json=serviceContext" json:"service_context,omitempty"`
// [Required] A message describing the error. The message can contain an
// exception stack in one of the supported programming languages and formats.
// In that case, the message is parsed and detailed exception information
// is returned when retrieving the error event again.
Message string `protobuf:"bytes,3,opt,name=message" json:"message,omitempty"`
// [Optional] A description of the context in which the error occurred.
Context *ErrorContext `protobuf:"bytes,4,opt,name=context" json:"context,omitempty"`
}
func (m *ReportedErrorEvent) Reset() { *m = ReportedErrorEvent{} }
func (m *ReportedErrorEvent) String() string { return proto.CompactTextString(m) }
func (*ReportedErrorEvent) ProtoMessage() {}
func (*ReportedErrorEvent) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{2} }
func (m *ReportedErrorEvent) GetEventTime() *google_protobuf1.Timestamp {
if m != nil {
return m.EventTime
}
return nil
}
func (m *ReportedErrorEvent) GetServiceContext() *ServiceContext {
if m != nil {
return m.ServiceContext
}
return nil
}
func (m *ReportedErrorEvent) GetMessage() string {
if m != nil {
return m.Message
}
return ""
}
func (m *ReportedErrorEvent) GetContext() *ErrorContext {
if m != nil {
return m.Context
}
return nil
}
func init() {
proto.RegisterType((*ReportErrorEventRequest)(nil), "google.devtools.clouderrorreporting.v1beta1.ReportErrorEventRequest")
proto.RegisterType((*ReportErrorEventResponse)(nil), "google.devtools.clouderrorreporting.v1beta1.ReportErrorEventResponse")
proto.RegisterType((*ReportedErrorEvent)(nil), "google.devtools.clouderrorreporting.v1beta1.ReportedErrorEvent")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for ReportErrorsService service
type ReportErrorsServiceClient interface {
// Report an individual error event.
//
// This endpoint accepts <strong>either</strong> an OAuth token,
// <strong>or</strong> an
// <a href="https://support.google.com/cloud/answer/6158862">API key</a>
// for authentication. To use an API key, append it to the URL as the value of
// a `key` parameter. For example:
// <pre>POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456</pre>
ReportErrorEvent(ctx context.Context, in *ReportErrorEventRequest, opts ...grpc.CallOption) (*ReportErrorEventResponse, error)
}
type reportErrorsServiceClient struct {
cc *grpc.ClientConn
}
func NewReportErrorsServiceClient(cc *grpc.ClientConn) ReportErrorsServiceClient {
return &reportErrorsServiceClient{cc}
}
func (c *reportErrorsServiceClient) ReportErrorEvent(ctx context.Context, in *ReportErrorEventRequest, opts ...grpc.CallOption) (*ReportErrorEventResponse, error) {
out := new(ReportErrorEventResponse)
err := grpc.Invoke(ctx, "/google.devtools.clouderrorreporting.v1beta1.ReportErrorsService/ReportErrorEvent", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for ReportErrorsService service
type ReportErrorsServiceServer interface {
// Report an individual error event.
//
// This endpoint accepts <strong>either</strong> an OAuth token,
// <strong>or</strong> an
// <a href="https://support.google.com/cloud/answer/6158862">API key</a>
// for authentication. To use an API key, append it to the URL as the value of
// a `key` parameter. For example:
// <pre>POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456</pre>
ReportErrorEvent(context.Context, *ReportErrorEventRequest) (*ReportErrorEventResponse, error)
}
func RegisterReportErrorsServiceServer(s *grpc.Server, srv ReportErrorsServiceServer) {
s.RegisterService(&_ReportErrorsService_serviceDesc, srv)
}
func _ReportErrorsService_ReportErrorEvent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ReportErrorEventRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ReportErrorsServiceServer).ReportErrorEvent(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.devtools.clouderrorreporting.v1beta1.ReportErrorsService/ReportErrorEvent",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ReportErrorsServiceServer).ReportErrorEvent(ctx, req.(*ReportErrorEventRequest))
}
return interceptor(ctx, in, info, handler)
}
var _ReportErrorsService_serviceDesc = grpc.ServiceDesc{
ServiceName: "google.devtools.clouderrorreporting.v1beta1.ReportErrorsService",
HandlerType: (*ReportErrorsServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "ReportErrorEvent",
Handler: _ReportErrorsService_ReportErrorEvent_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "google/devtools/clouderrorreporting/v1beta1/report_errors_service.proto",
}
func init() {
proto.RegisterFile("google/devtools/clouderrorreporting/v1beta1/report_errors_service.proto", fileDescriptor3)
}
var fileDescriptor3 = []byte{
// 490 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x93, 0xcd, 0x8a, 0x13, 0x41,
0x10, 0xc7, 0x99, 0xf8, 0xb1, 0x6c, 0x47, 0x54, 0xda, 0x83, 0xc3, 0x20, 0xb8, 0xc6, 0xcb, 0xa2,
0x30, 0x6d, 0xe2, 0xc5, 0xec, 0x22, 0x0b, 0x59, 0xc3, 0xde, 0x64, 0x99, 0xd5, 0x3d, 0x48, 0x70,
0xe8, 0x4c, 0xca, 0x61, 0x24, 0xd3, 0x35, 0x76, 0x77, 0x82, 0x20, 0x5e, 0x7c, 0x85, 0x7d, 0x05,
0x4f, 0x3e, 0x8a, 0x57, 0x5f, 0xc0, 0x83, 0x0f, 0xa1, 0x37, 0xe9, 0xaf, 0x25, 0x6b, 0x72, 0x70,
0xf4, 0x58 | {
return m.Event
} | conditional_block |
|
report_errors_service.pb.go | Reset() { *m = ReportedErrorEvent{} }
func (m *ReportedErrorEvent) String() string { return proto.CompactTextString(m) }
func (*ReportedErrorEvent) ProtoMessage() {}
func (*ReportedErrorEvent) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{2} }
func (m *ReportedErrorEvent) GetEventTime() *google_protobuf1.Timestamp {
if m != nil {
return m.EventTime
}
return nil
}
func (m *ReportedErrorEvent) GetServiceContext() *ServiceContext {
if m != nil {
return m.ServiceContext
}
return nil
}
func (m *ReportedErrorEvent) GetMessage() string {
if m != nil {
return m.Message
}
return ""
}
func (m *ReportedErrorEvent) GetContext() *ErrorContext {
if m != nil {
return m.Context
}
return nil
}
func init() {
proto.RegisterType((*ReportErrorEventRequest)(nil), "google.devtools.clouderrorreporting.v1beta1.ReportErrorEventRequest")
proto.RegisterType((*ReportErrorEventResponse)(nil), "google.devtools.clouderrorreporting.v1beta1.ReportErrorEventResponse")
proto.RegisterType((*ReportedErrorEvent)(nil), "google.devtools.clouderrorreporting.v1beta1.ReportedErrorEvent")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for ReportErrorsService service
type ReportErrorsServiceClient interface {
// Report an individual error event.
//
// This endpoint accepts <strong>either</strong> an OAuth token,
// <strong>or</strong> an
// <a href="https://support.google.com/cloud/answer/6158862">API key</a>
// for authentication. To use an API key, append it to the URL as the value of
// a `key` parameter. For example:
// <pre>POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456</pre>
ReportErrorEvent(ctx context.Context, in *ReportErrorEventRequest, opts ...grpc.CallOption) (*ReportErrorEventResponse, error)
}
type reportErrorsServiceClient struct {
cc *grpc.ClientConn
}
func NewReportErrorsServiceClient(cc *grpc.ClientConn) ReportErrorsServiceClient {
return &reportErrorsServiceClient{cc}
}
func (c *reportErrorsServiceClient) ReportErrorEvent(ctx context.Context, in *ReportErrorEventRequest, opts ...grpc.CallOption) (*ReportErrorEventResponse, error) {
out := new(ReportErrorEventResponse)
err := grpc.Invoke(ctx, "/google.devtools.clouderrorreporting.v1beta1.ReportErrorsService/ReportErrorEvent", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for ReportErrorsService service
type ReportErrorsServiceServer interface {
// Report an individual error event.
//
// This endpoint accepts <strong>either</strong> an OAuth token,
// <strong>or</strong> an
// <a href="https://support.google.com/cloud/answer/6158862">API key</a>
// for authentication. To use an API key, append it to the URL as the value of
// a `key` parameter. For example:
// <pre>POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456</pre>
ReportErrorEvent(context.Context, *ReportErrorEventRequest) (*ReportErrorEventResponse, error)
}
func RegisterReportErrorsServiceServer(s *grpc.Server, srv ReportErrorsServiceServer) {
s.RegisterService(&_ReportErrorsService_serviceDesc, srv)
}
func _ReportErrorsService_ReportErrorEvent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ReportErrorEventRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ReportErrorsServiceServer).ReportErrorEvent(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.devtools.clouderrorreporting.v1beta1.ReportErrorsService/ReportErrorEvent",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ReportErrorsServiceServer).ReportErrorEvent(ctx, req.(*ReportErrorEventRequest))
}
return interceptor(ctx, in, info, handler)
}
var _ReportErrorsService_serviceDesc = grpc.ServiceDesc{
ServiceName: "google.devtools.clouderrorreporting.v1beta1.ReportErrorsService",
HandlerType: (*ReportErrorsServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "ReportErrorEvent",
Handler: _ReportErrorsService_ReportErrorEvent_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "google/devtools/clouderrorreporting/v1beta1/report_errors_service.proto",
}
func init() |
var fileDescriptor3 = []byte{
// 490 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x93, 0xcd, 0x8a, 0x13, 0x41,
0x10, 0xc7, 0x99, 0xf8, 0xb1, 0x6c, 0x47, 0x54, 0xda, 0x83, 0xc3, 0x20, 0xb8, 0xc6, 0xcb, 0xa2,
0x30, 0x6d, 0xe2, 0xc5, 0xec, 0x22, 0x0b, 0x59, 0xc3, 0xde, 0x64, 0x99, 0xd5, 0x3d, 0x48, 0x70,
0xe8, 0x4c, 0xca, 0x61, 0x24, 0xd3, 0x35, 0x76, 0x77, 0x82, 0x20, 0x5e, 0x7c, 0x85, 0x7d, 0x05,
0x4f, 0x3e, 0x8a, 0x57, 0x5f, 0xc0, 0x83, 0x0f, 0xa1, 0x37, 0xe9, 0xaf, 0x25, 0x6b, 0x72, 0x70,
0xf4, 0x58, 0xd3, 0x55, 0xbf, 0xff, 0xbf, 0x3e, 0x86, 0x1c, 0x95, 0x88, 0xe5, 0x1c, 0xd8, 0x0c,
0x96, 0x1a, 0x71, 0xae, 0x58, 0x31, 0xc7, 0xc5, 0x0c, 0xa4, 0x44, 0x29, 0xa1, 0x41, 0xa9, 0x2b,
0x51, 0xb2, 0x65, 0x7f, 0x0a, 0x9a, 0xf7, 0x99, 0xfb, 0x92, 0xdb, 0x57, 0x95, 0x2b, 0x90, 0xcb,
0xaa, 0x80, 0xb4, 0x91, 0xa8, 0x91, 0x3e, 0x74, 0xa0, 0x34, 0x80, 0xd2, 0x0d, 0xa0, 0xd4, 0x83,
0x92, 0x3b, 0x5e, 0x95, 0x | {
proto.RegisterFile("google/devtools/clouderrorreporting/v1beta1/report_errors_service.proto", fileDescriptor3)
} | identifier_body |
crunchauto.py |
else:
if endofdate:
dt = datetime.datetime.combine(dt,datetime.time(hour=23,minute=59,second=59,tzinfo=to_zone))
else:
dt = datetime.datetime.combine(dt,datetime.time(tzinfo=to_zone))
return dt
weekday=['Sun','Mon','Tues','Wed','Thurs','Fri','Sat'] # OUR Sunday=0 Convention!!
def crunch_calendar(rundate=None):
#ICAL_URL = Config.get('autoplot','ICAL_URI')
ICAL_URL = current_app.config['globalConfig'].Config.get("autoplot","ICAL_URI")
g = urllib.request.urlopen(ICAL_URL)
data= g.read()
print(data)
cal = icalendar.Calendar.from_ical(data)
g.close()
"""
g = urllib.urlopen(ICAL_URL)
print g.read()
g.close()
"""
if rundate:
now = datetime.datetime.strptime(rundate,"%Y-%m-%d").replace(tzinfo=tz.gettz('America/New York'))
else:
now = datetime.datetime.now().replace(tzinfo=tz.gettz('America/New York'))
#print "CRUNCH EFFECTIVE RUNDATE",rundate
## ADJUST HERE FOR TZ! (i.e. If we run Midnight on Sunday don't want LAST week's run
dow = now.weekday() # 0=Monday
dow = (dow+1) %7 #0=Sunday
weeknum = int(now.strftime("%U"))
#print "weeknum",weeknum,"Weekday",weekday[dow],"DOW",dow
weekstart = (now - datetime.timedelta(days=dow))
weekstart = weekstart.replace(hour=0,minute=0,second=0,microsecond=0)
weekend = weekstart + datetime.timedelta(days=7)
weekend = weekend - datetime.timedelta(seconds=1)
#print "WEEKSTART",weekstart,"through",weekend
errors=[]
warnings=[]
billables=[]
summaries=[]
debug=[]
data={}
debug.append("{2} Week #{3} - {0} through {1}".format(weekstart.strftime("%b-%d"),weekend.strftime("%b-%d"),weekstart.year,weeknum))
data['title']="Auto Plot Lease {2} Week #{3} - {0} through {1}".format(weekstart.strftime("%b-%d"),weekend.strftime("%b-%d"),weekstart.year,weeknum)
data['lease-id']="autoplot-lease-{2}-Week{3:02}".format(weekstart.strftime("%b-%d"),weekend.strftime("%b-%d"),weekstart.year,weeknum)
data['weekid']="{2:04}-{3:02}".format(weekstart.strftime("%b-%d"),weekend.strftime("%b-%d"),weekstart.year,weeknum)
for component in cal.walk():
#print component.name
#print dict(component)
#print dir(component)
#print(component.get('summary'))
#print(component.get('dtstart'))
#print(component.get('dtend'))
#print(component.get('dtstamp'))
summary={'errors':[],'warnings':[]}
if component.name != 'VEVENT':
print ("NOT A VEVENT!!!",component.name)
else:
#print "VEVENT",component
billable=False
members=[]
event={}
calstart = component['DTSTART'].dt
#print "CALSTART",calstart
calstart = utctolocal(calstart)
calend = component['DTEND'].dt
calend = utctolocal(calend,endofdate=True)
#print "SUMMARY",component['SUMMARY']
#print "START",calstart
#print "END",calend
if 'ORGANIZER' in component:
# print "ORGANIZER",component['ORGANIZER']
for p in component['ORGANIZER'].params:
pass #print "_ ---- ",p,component['ORGANIZER'].params[p]
#print "CHECK",weekstart,"<",calstart,
#print "aand",calend,"<",weekend
#if (weekstart <= calstart) and (calend <= weekend):
rrule = None
weeks=1
if 'RRULE' in component and 'COUNT' in component['RRULE'] and 'FREQ' in component['RRULE']:
rrule=component['RRULE']
#print "RRULE",calstart.strftime("%b-%d %H:%M ")+component['SUMMARY'],
#print rrule['COUNT'][0],rrule['FREQ'][0]
if rrule['FREQ'][0]== "WEEKLY":
weeks = rrule['COUNT'][0]
for weekno in range(0,weeks):
short = calstart.strftime("%b-%d %H:%M ")+component['SUMMARY']
if (calstart <= weekend) and (weekstart < calend):
#print "THISWEEK calendar",calstart,calend
#print "THISWEEK curweel",weekstart,weekend
#print "PROCESS",short
#print "WEEK IN SERIES",weekno
if 'ATTENDEE' not in component:
summary['errors'].append("No Attendees")
else:
if isinstance(component['ATTENDEE'],list):
attlist = component['ATTENDEE']
else:
attlist = [component['ATTENDEE']]
for a in attlist:
#print " -- Attendee:",a
#print " -- Params:"
for p in a.params:
pass #print "_ ---- ",p,a.params[p]
if 'CUTYPE' in a.params and a.params['CUTYPE'] == 'INDIVIDUAL':
members.append(a.params['CN'])
"""
print " -- DIR",dir(a)
print
print " -- ICAL",type(a.to_ical),dir(a.to_ical())
print
"""
hrs=(calend-calstart).total_seconds()/3600
#print "*** CURRENT!!! {0} Hours total".format(hrs)
if (hrs <= 24):
summary['warnings'].append("Partial day entry - NOT BILLING")
elif (hrs <= 167):
summary['warnings'].append("Entry isn't quite full week, but billing anyway")
if (hrs > 24):
if len(members) > 1:
summary['errors'].append("More than one member assigned: "+str(", ".join(members)))
elif len(members) == 0:
summary['errors'].append("No attendees in calendar entry")
else:
if not members[0].lower().endswith("@makeitlabs.com"):
summary['errors'].append("Non-MIL email: "+str(members[0]))
else:
billable=True
#print "*** BILLABLE"
event['summary']=short
event['member']=members[0]
#if component['SUMMARY'].strip().lower().startswith("rental"):
# print "** IS RENTAL"
# Figure out what to do based on Summary
if (len(summary['errors']) == 0) and billable:
billables.append(event)
for e in summary['errors']:
errors.append(short + ": "+e)
for w in summary['warnings']:
warnings.append(short + ": "+w)
#print "END PARSE"
calstart = calstart + datetime.timedelta(weeks=1)
calend = calend + datetime.timedelta(weeks=1)
# End of FOR for weeks
"""
for x in component:
print x,type(component[x]),
if (isinstance(component[x],icalendar.prop.vDDDTypes)):
print component.decoded(x)
print type(component[x].dt)
print component[x].dt
else:
print component.decoded(x)
#print dir(component[x])
print
"""
if len(billables) ==0:
warnings.append("WARNING - NO BILLABLES THIS WEEK!")
elif len(billables) >1:
errors.append("ERROR - MULTIPLE BILLABLES THIS WEEK!")
if (len(errors) != 0):
data['Decision']='error'
elif (len(billables) == 0):
data['Decision']='no_bill'
else:
data['Decision']='bill'
return (errors,warnings,debug,data,billables)
def do_payment(customer,price,leaseid,description,test=False,pay=False):
errors=[]
warnings=[]
debug=[]
stripe.api_key = current_app.config['globalConfig'].Config.get('autoplot','stripe_token')
#stripe.api_key = "sk_test_4eC39HqLyjWDarjtT1zdp7dc" # TEST KEY
#print stripe.SKU.list(limit=99)
#print stripe.Customer.list(limit=99)
debug.append("Process Payment customer {0} Price {1} leaseid {2}".format(customer,price,leaseid))
#print "Process Payment customer {0} Price {1} leaseid {2}".format(customer,price,leaseid)
debug.append("Description: {0}".format | dt = dt.astimezone(to_zone) | conditional_block |
|
crunchauto.py | (rundate,"%Y-%m-%d").replace(tzinfo=tz.gettz('America/New York'))
else:
now = datetime.datetime.now().replace(tzinfo=tz.gettz('America/New York'))
#print "CRUNCH EFFECTIVE RUNDATE",rundate
## ADJUST HERE FOR TZ! (i.e. If we run Midnight on Sunday don't want LAST week's run
dow = now.weekday() # 0=Monday
dow = (dow+1) %7 #0=Sunday
weeknum = int(now.strftime("%U"))
#print "weeknum",weeknum,"Weekday",weekday[dow],"DOW",dow
weekstart = (now - datetime.timedelta(days=dow))
weekstart = weekstart.replace(hour=0,minute=0,second=0,microsecond=0)
weekend = weekstart + datetime.timedelta(days=7)
weekend = weekend - datetime.timedelta(seconds=1)
#print "WEEKSTART",weekstart,"through",weekend
errors=[]
warnings=[]
billables=[]
summaries=[]
debug=[]
data={}
debug.append("{2} Week #{3} - {0} through {1}".format(weekstart.strftime("%b-%d"),weekend.strftime("%b-%d"),weekstart.year,weeknum))
data['title']="Auto Plot Lease {2} Week #{3} - {0} through {1}".format(weekstart.strftime("%b-%d"),weekend.strftime("%b-%d"),weekstart.year,weeknum)
data['lease-id']="autoplot-lease-{2}-Week{3:02}".format(weekstart.strftime("%b-%d"),weekend.strftime("%b-%d"),weekstart.year,weeknum)
data['weekid']="{2:04}-{3:02}".format(weekstart.strftime("%b-%d"),weekend.strftime("%b-%d"),weekstart.year,weeknum)
for component in cal.walk():
#print component.name
#print dict(component)
#print dir(component)
#print(component.get('summary'))
#print(component.get('dtstart'))
#print(component.get('dtend'))
#print(component.get('dtstamp'))
summary={'errors':[],'warnings':[]}
if component.name != 'VEVENT':
print ("NOT A VEVENT!!!",component.name)
else:
#print "VEVENT",component
billable=False
members=[]
event={}
calstart = component['DTSTART'].dt
#print "CALSTART",calstart
calstart = utctolocal(calstart)
calend = component['DTEND'].dt
calend = utctolocal(calend,endofdate=True)
#print "SUMMARY",component['SUMMARY']
#print "START",calstart
#print "END",calend
if 'ORGANIZER' in component:
# print "ORGANIZER",component['ORGANIZER']
for p in component['ORGANIZER'].params:
pass #print "_ ---- ",p,component['ORGANIZER'].params[p]
#print "CHECK",weekstart,"<",calstart,
#print "aand",calend,"<",weekend
#if (weekstart <= calstart) and (calend <= weekend):
rrule = None
weeks=1
if 'RRULE' in component and 'COUNT' in component['RRULE'] and 'FREQ' in component['RRULE']:
rrule=component['RRULE']
#print "RRULE",calstart.strftime("%b-%d %H:%M ")+component['SUMMARY'],
#print rrule['COUNT'][0],rrule['FREQ'][0]
if rrule['FREQ'][0]== "WEEKLY":
weeks = rrule['COUNT'][0]
for weekno in range(0,weeks):
short = calstart.strftime("%b-%d %H:%M ")+component['SUMMARY']
if (calstart <= weekend) and (weekstart < calend):
#print "THISWEEK calendar",calstart,calend
#print "THISWEEK curweel",weekstart,weekend
#print "PROCESS",short
#print "WEEK IN SERIES",weekno
if 'ATTENDEE' not in component:
summary['errors'].append("No Attendees")
else:
if isinstance(component['ATTENDEE'],list):
attlist = component['ATTENDEE']
else:
attlist = [component['ATTENDEE']]
for a in attlist:
#print " -- Attendee:",a
#print " -- Params:"
for p in a.params:
pass #print "_ ---- ",p,a.params[p]
if 'CUTYPE' in a.params and a.params['CUTYPE'] == 'INDIVIDUAL':
members.append(a.params['CN'])
"""
print " -- DIR",dir(a)
print
print " -- ICAL",type(a.to_ical),dir(a.to_ical())
print
"""
hrs=(calend-calstart).total_seconds()/3600
#print "*** CURRENT!!! {0} Hours total".format(hrs)
if (hrs <= 24):
summary['warnings'].append("Partial day entry - NOT BILLING")
elif (hrs <= 167):
summary['warnings'].append("Entry isn't quite full week, but billing anyway")
if (hrs > 24):
if len(members) > 1:
summary['errors'].append("More than one member assigned: "+str(", ".join(members)))
elif len(members) == 0:
summary['errors'].append("No attendees in calendar entry")
else:
if not members[0].lower().endswith("@makeitlabs.com"):
summary['errors'].append("Non-MIL email: "+str(members[0]))
else:
billable=True
#print "*** BILLABLE"
event['summary']=short
event['member']=members[0]
#if component['SUMMARY'].strip().lower().startswith("rental"):
# print "** IS RENTAL"
# Figure out what to do based on Summary
if (len(summary['errors']) == 0) and billable:
billables.append(event)
for e in summary['errors']:
errors.append(short + ": "+e)
for w in summary['warnings']:
warnings.append(short + ": "+w)
#print "END PARSE"
calstart = calstart + datetime.timedelta(weeks=1)
calend = calend + datetime.timedelta(weeks=1)
# End of FOR for weeks
"""
for x in component:
print x,type(component[x]),
if (isinstance(component[x],icalendar.prop.vDDDTypes)):
print component.decoded(x)
print type(component[x].dt)
print component[x].dt
else:
print component.decoded(x)
#print dir(component[x])
print
"""
if len(billables) ==0:
warnings.append("WARNING - NO BILLABLES THIS WEEK!")
elif len(billables) >1:
errors.append("ERROR - MULTIPLE BILLABLES THIS WEEK!")
if (len(errors) != 0):
data['Decision']='error'
elif (len(billables) == 0):
data['Decision']='no_bill'
else:
data['Decision']='bill'
return (errors,warnings,debug,data,billables)
def do_payment(customer,price,leaseid,description,test=False,pay=False):
| lastItem=None
pendingleases={}
while True:
ii= stripe.InvoiceItem.list(
limit=2,
#customer="cus_J0mrDmtpzbfYOk", # Stripe Test Customer
customer=customer, # MIL Brad Goodman
starting_after=lastItem
)
#print "EXISTING ITEMS"
#print ii
if ii:
for d in ii['data']:
lastItem=d['id']
if 'metadata' in d:
#print "Metadata ",d['metadata']
if 'X-MIL-lease-id' in d['metadata']:
pendingleases[d['metadata']['X-MIL-lease-id']] = { 'invoice':d['invoice'],'invoiceitem':d['id']}
warnings.append | errors=[]
warnings=[]
debug=[]
stripe.api_key = current_app.config['globalConfig'].Config.get('autoplot','stripe_token')
#stripe.api_key = "sk_test_4eC39HqLyjWDarjtT1zdp7dc" # TEST KEY
#print stripe.SKU.list(limit=99)
#print stripe.Customer.list(limit=99)
debug.append("Process Payment customer {0} Price {1} leaseid {2}".format(customer,price,leaseid))
#print "Process Payment customer {0} Price {1} leaseid {2}".format(customer,price,leaseid)
debug.append("Description: {0}".format(description))
#print "Description: {0}".format(description)
"""
"""
print ("""
** GET EXISTING INVOICE ITEM
""")
# Get existing outstanding items in Stripe to invoice | identifier_body |
crunchauto.py | undate,"%Y-%m-%d").replace(tzinfo=tz.gettz('America/New York'))
else:
now = datetime.datetime.now().replace(tzinfo=tz.gettz('America/New York'))
#print "CRUNCH EFFECTIVE RUNDATE",rundate
## ADJUST HERE FOR TZ! (i.e. If we run Midnight on Sunday don't want LAST week's run
dow = now.weekday() # 0=Monday
dow = (dow+1) %7 #0=Sunday
weeknum = int(now.strftime("%U"))
#print "weeknum",weeknum,"Weekday",weekday[dow],"DOW",dow
weekstart = (now - datetime.timedelta(days=dow))
weekstart = weekstart.replace(hour=0,minute=0,second=0,microsecond=0)
weekend = weekstart + datetime.timedelta(days=7)
weekend = weekend - datetime.timedelta(seconds=1)
#print "WEEKSTART",weekstart,"through",weekend
errors=[]
warnings=[]
billables=[]
summaries=[]
debug=[]
data={}
debug.append("{2} Week #{3} - {0} through {1}".format(weekstart.strftime("%b-%d"),weekend.strftime("%b-%d"),weekstart.year,weeknum))
data['title']="Auto Plot Lease {2} Week #{3} - {0} through {1}".format(weekstart.strftime("%b-%d"),weekend.strftime("%b-%d"),weekstart.year,weeknum)
data['lease-id']="autoplot-lease-{2}-Week{3:02}".format(weekstart.strftime("%b-%d"),weekend.strftime("%b-%d"),weekstart.year,weeknum)
data['weekid']="{2:04}-{3:02}".format(weekstart.strftime("%b-%d"),weekend.strftime("%b-%d"),weekstart.year,weeknum)
for component in cal.walk():
#print component.name
#print dict(component)
#print dir(component)
#print(component.get('summary'))
#print(component.get('dtstart'))
#print(component.get('dtend'))
#print(component.get('dtstamp'))
summary={'errors':[],'warnings':[]}
if component.name != 'VEVENT':
print ("NOT A VEVENT!!!",component.name)
else:
#print "VEVENT",component
billable=False
members=[]
event={}
calstart = component['DTSTART'].dt
#print "CALSTART",calstart
calstart = utctolocal(calstart)
calend = component['DTEND'].dt
calend = utctolocal(calend,endofdate=True)
#print "SUMMARY",component['SUMMARY']
#print "START",calstart
#print "END",calend
if 'ORGANIZER' in component:
# print "ORGANIZER",component['ORGANIZER']
for p in component['ORGANIZER'].params:
pass #print "_ ---- ",p,component['ORGANIZER'].params[p]
#print "CHECK",weekstart,"<",calstart,
#print "aand",calend,"<",weekend
#if (weekstart <= calstart) and (calend <= weekend):
rrule = None
weeks=1
if 'RRULE' in component and 'COUNT' in component['RRULE'] and 'FREQ' in component['RRULE']:
rrule=component['RRULE']
#print "RRULE",calstart.strftime("%b-%d %H:%M ")+component['SUMMARY'],
#print rrule['COUNT'][0],rrule['FREQ'][0]
if rrule['FREQ'][0]== "WEEKLY":
weeks = rrule['COUNT'][0]
for weekno in range(0,weeks):
short = calstart.strftime("%b-%d %H:%M ")+component['SUMMARY']
if (calstart <= weekend) and (weekstart < calend):
#print "THISWEEK calendar",calstart,calend
#print "THISWEEK curweel",weekstart,weekend
#print "PROCESS",short
#print "WEEK IN SERIES",weekno
if 'ATTENDEE' not in component:
summary['errors'].append("No Attendees")
else:
if isinstance(component['ATTENDEE'],list):
attlist = component['ATTENDEE']
else:
attlist = [component['ATTENDEE']]
for a in attlist:
#print " -- Attendee:",a
#print " -- Params:"
for p in a.params:
pass #print "_ ---- ",p,a.params[p]
if 'CUTYPE' in a.params and a.params['CUTYPE'] == 'INDIVIDUAL':
members.append(a.params['CN'])
"""
print " -- DIR",dir(a)
print
print " -- ICAL",type(a.to_ical),dir(a.to_ical())
print
"""
hrs=(calend-calstart).total_seconds()/3600
#print "*** CURRENT!!! {0} Hours total".format(hrs)
if (hrs <= 24):
summary['warnings'].append("Partial day entry - NOT BILLING")
elif (hrs <= 167):
summary['warnings'].append("Entry isn't quite full week, but billing anyway")
if (hrs > 24):
if len(members) > 1:
summary['errors'].append("More than one member assigned: "+str(", ".join(members)))
elif len(members) == 0:
summary['errors'].append("No attendees in calendar entry")
else:
if not members[0].lower().endswith("@makeitlabs.com"):
summary['errors'].append("Non-MIL email: "+str(members[0]))
else:
billable=True
#print "*** BILLABLE"
event['summary']=short
event['member']=members[0]
#if component['SUMMARY'].strip().lower().startswith("rental"):
# print "** IS RENTAL"
# Figure out what to do based on Summary
if (len(summary['errors']) == 0) and billable:
billables.append(event)
for e in summary['errors']:
errors.append(short + ": "+e)
for w in summary['warnings']:
warnings.append(short + ": "+w)
#print "END PARSE"
calstart = calstart + datetime.timedelta(weeks=1)
calend = calend + datetime.timedelta(weeks=1)
# End of FOR for weeks
"""
for x in component:
print x,type(component[x]),
if (isinstance(component[x],icalendar.prop.vDDDTypes)):
print component.decoded(x)
print type(component[x].dt)
print component[x].dt
else:
print component.decoded(x)
#print dir(component[x])
print
"""
if len(billables) ==0:
warnings.append("WARNING - NO BILLABLES THIS WEEK!")
elif len(billables) >1:
errors.append("ERROR - MULTIPLE BILLABLES THIS WEEK!")
if (len(errors) != 0):
data['Decision']='error'
elif (len(billables) == 0):
data['Decision']='no_bill'
else:
data['Decision']='bill'
return (errors,warnings,debug,data,billables)
def | (customer,price,leaseid,description,test=False,pay=False):
errors=[]
warnings=[]
debug=[]
stripe.api_key = current_app.config['globalConfig'].Config.get('autoplot','stripe_token')
#stripe.api_key = "sk_test_4eC39HqLyjWDarjtT1zdp7dc" # TEST KEY
#print stripe.SKU.list(limit=99)
#print stripe.Customer.list(limit=99)
debug.append("Process Payment customer {0} Price {1} leaseid {2}".format(customer,price,leaseid))
#print "Process Payment customer {0} Price {1} leaseid {2}".format(customer,price,leaseid)
debug.append("Description: {0}".format(description))
#print "Description: {0}".format(description)
"""
"""
print ("""
** GET EXISTING INVOICE ITEM
""")
# Get existing outstanding items in Stripe to invoice
lastItem=None
pendingleases={}
while True:
ii= stripe.InvoiceItem.list(
limit=2,
#customer="cus_J0mrDmtpzbfYOk", # Stripe Test Customer
customer=customer, # MIL Brad Goodman
starting_after=lastItem
)
#print "EXISTING ITEMS"
#print ii
if ii:
for d in ii['data']:
lastItem=d['id']
if 'metadata' in d:
#print "Metadata ",d['metadata']
if 'X-MIL-lease-id' in d['metadata']:
pendingleases[d['metadata']['X-MIL-lease-id']] = { 'invoice':d['invoice'],'invoiceitem':d['id']}
warnings | do_payment | identifier_name |
crunchauto.py | (rundate,"%Y-%m-%d").replace(tzinfo=tz.gettz('America/New York'))
else:
now = datetime.datetime.now().replace(tzinfo=tz.gettz('America/New York'))
#print "CRUNCH EFFECTIVE RUNDATE",rundate
## ADJUST HERE FOR TZ! (i.e. If we run Midnight on Sunday don't want LAST week's run
dow = now.weekday() # 0=Monday | weekstart = weekstart.replace(hour=0,minute=0,second=0,microsecond=0)
weekend = weekstart + datetime.timedelta(days=7)
weekend = weekend - datetime.timedelta(seconds=1)
#print "WEEKSTART",weekstart,"through",weekend
errors=[]
warnings=[]
billables=[]
summaries=[]
debug=[]
data={}
debug.append("{2} Week #{3} - {0} through {1}".format(weekstart.strftime("%b-%d"),weekend.strftime("%b-%d"),weekstart.year,weeknum))
data['title']="Auto Plot Lease {2} Week #{3} - {0} through {1}".format(weekstart.strftime("%b-%d"),weekend.strftime("%b-%d"),weekstart.year,weeknum)
data['lease-id']="autoplot-lease-{2}-Week{3:02}".format(weekstart.strftime("%b-%d"),weekend.strftime("%b-%d"),weekstart.year,weeknum)
data['weekid']="{2:04}-{3:02}".format(weekstart.strftime("%b-%d"),weekend.strftime("%b-%d"),weekstart.year,weeknum)
for component in cal.walk():
#print component.name
#print dict(component)
#print dir(component)
#print(component.get('summary'))
#print(component.get('dtstart'))
#print(component.get('dtend'))
#print(component.get('dtstamp'))
summary={'errors':[],'warnings':[]}
if component.name != 'VEVENT':
print ("NOT A VEVENT!!!",component.name)
else:
#print "VEVENT",component
billable=False
members=[]
event={}
calstart = component['DTSTART'].dt
#print "CALSTART",calstart
calstart = utctolocal(calstart)
calend = component['DTEND'].dt
calend = utctolocal(calend,endofdate=True)
#print "SUMMARY",component['SUMMARY']
#print "START",calstart
#print "END",calend
if 'ORGANIZER' in component:
# print "ORGANIZER",component['ORGANIZER']
for p in component['ORGANIZER'].params:
pass #print "_ ---- ",p,component['ORGANIZER'].params[p]
#print "CHECK",weekstart,"<",calstart,
#print "aand",calend,"<",weekend
#if (weekstart <= calstart) and (calend <= weekend):
rrule = None
weeks=1
if 'RRULE' in component and 'COUNT' in component['RRULE'] and 'FREQ' in component['RRULE']:
rrule=component['RRULE']
#print "RRULE",calstart.strftime("%b-%d %H:%M ")+component['SUMMARY'],
#print rrule['COUNT'][0],rrule['FREQ'][0]
if rrule['FREQ'][0]== "WEEKLY":
weeks = rrule['COUNT'][0]
for weekno in range(0,weeks):
short = calstart.strftime("%b-%d %H:%M ")+component['SUMMARY']
if (calstart <= weekend) and (weekstart < calend):
#print "THISWEEK calendar",calstart,calend
#print "THISWEEK curweel",weekstart,weekend
#print "PROCESS",short
#print "WEEK IN SERIES",weekno
if 'ATTENDEE' not in component:
summary['errors'].append("No Attendees")
else:
if isinstance(component['ATTENDEE'],list):
attlist = component['ATTENDEE']
else:
attlist = [component['ATTENDEE']]
for a in attlist:
#print " -- Attendee:",a
#print " -- Params:"
for p in a.params:
pass #print "_ ---- ",p,a.params[p]
if 'CUTYPE' in a.params and a.params['CUTYPE'] == 'INDIVIDUAL':
members.append(a.params['CN'])
"""
print " -- DIR",dir(a)
print
print " -- ICAL",type(a.to_ical),dir(a.to_ical())
print
"""
hrs=(calend-calstart).total_seconds()/3600
#print "*** CURRENT!!! {0} Hours total".format(hrs)
if (hrs <= 24):
summary['warnings'].append("Partial day entry - NOT BILLING")
elif (hrs <= 167):
summary['warnings'].append("Entry isn't quite full week, but billing anyway")
if (hrs > 24):
if len(members) > 1:
summary['errors'].append("More than one member assigned: "+str(", ".join(members)))
elif len(members) == 0:
summary['errors'].append("No attendees in calendar entry")
else:
if not members[0].lower().endswith("@makeitlabs.com"):
summary['errors'].append("Non-MIL email: "+str(members[0]))
else:
billable=True
#print "*** BILLABLE"
event['summary']=short
event['member']=members[0]
#if component['SUMMARY'].strip().lower().startswith("rental"):
# print "** IS RENTAL"
# Figure out what to do based on Summary
if (len(summary['errors']) == 0) and billable:
billables.append(event)
for e in summary['errors']:
errors.append(short + ": "+e)
for w in summary['warnings']:
warnings.append(short + ": "+w)
#print "END PARSE"
calstart = calstart + datetime.timedelta(weeks=1)
calend = calend + datetime.timedelta(weeks=1)
# End of FOR for weeks
"""
for x in component:
print x,type(component[x]),
if (isinstance(component[x],icalendar.prop.vDDDTypes)):
print component.decoded(x)
print type(component[x].dt)
print component[x].dt
else:
print component.decoded(x)
#print dir(component[x])
print
"""
if len(billables) ==0:
warnings.append("WARNING - NO BILLABLES THIS WEEK!")
elif len(billables) >1:
errors.append("ERROR - MULTIPLE BILLABLES THIS WEEK!")
if (len(errors) != 0):
data['Decision']='error'
elif (len(billables) == 0):
data['Decision']='no_bill'
else:
data['Decision']='bill'
return (errors,warnings,debug,data,billables)
def do_payment(customer,price,leaseid,description,test=False,pay=False):
errors=[]
warnings=[]
debug=[]
stripe.api_key = current_app.config['globalConfig'].Config.get('autoplot','stripe_token')
#stripe.api_key = "sk_test_4eC39HqLyjWDarjtT1zdp7dc" # TEST KEY
#print stripe.SKU.list(limit=99)
#print stripe.Customer.list(limit=99)
debug.append("Process Payment customer {0} Price {1} leaseid {2}".format(customer,price,leaseid))
#print "Process Payment customer {0} Price {1} leaseid {2}".format(customer,price,leaseid)
debug.append("Description: {0}".format(description))
#print "Description: {0}".format(description)
"""
"""
print ("""
** GET EXISTING INVOICE ITEM
""")
# Get existing outstanding items in Stripe to invoice
lastItem=None
pendingleases={}
while True:
ii= stripe.InvoiceItem.list(
limit=2,
#customer="cus_J0mrDmtpzbfYOk", # Stripe Test Customer
customer=customer, # MIL Brad Goodman
starting_after=lastItem
)
#print "EXISTING ITEMS"
#print ii
if ii:
for d in ii['data']:
lastItem=d['id']
if 'metadata' in d:
#print "Metadata ",d['metadata']
if 'X-MIL-lease-id' in d['metadata']:
pendingleases[d['metadata']['X-MIL-lease-id']] = { 'invoice':d['invoice'],'invoiceitem':d['id']}
warnings.append | dow = (dow+1) %7 #0=Sunday
weeknum = int(now.strftime("%U"))
#print "weeknum",weeknum,"Weekday",weekday[dow],"DOW",dow
weekstart = (now - datetime.timedelta(days=dow)) | random_line_split |
lib.rs | can return random values.
//! * `random.u8`
//! * `random.u16`
//! * `random.u32`
//! * `random.u64`
//! * `random.u128`
//! * `random.usize`
//! * `random.i8`
//! * `random.i16`
//! * `random.i32`
//! * `random.i64`
//! * `random.i128`
//! * `random.isize`
//! 2. Custom arguments source. [`SalakBuilder::set()`] can set a single kv,
//! and [`SalakBuilder::set_args()`] can set a group of kvs.
//! 3. System environment source. Implemented by [`source::system_environment`].
//! 4. Profile specified file source, eg. `app-dev.toml`, supports reloading.
//! 5. No profile file source, eg. `app.toml`, supports reloading.
//! 6. Custom sources, which can register by [`Salak::register()`].
//!
//! #### Key Convention
//! Key is used for search configuration from [`Environment`], normally it is represented by string.
//! Key is a group of SubKey separated by dot(`.`), and SubKey is a name or a name followed by index.
//! 1. SubKey Format (`[a-z][_a-z0-9]+(\[[0-9]+\])*`)
//! * `a`
//! * `a0`
//! * `a_b`
//! * `a[0]`
//! * `a[0][0]`
//! 2. Key Format (`SubKey(\.SubKey)*`)
//! * `a`
//! * `a.b`
//! * `a.val[0]`
//! * `a_b[0]`
//!
//! #### Value Placeholder Parsing
//! 1. Placeholder Format
//! * `${key}` => Get value of `key`.
//! * `${key:default}` => Get value of `key`, if not exists return `default`.
//! 2. Escape Format
//! * `\$\{key\}` => Return `${key}`.
//! * `$`, `\`, `{`, `}` must use escape format.
//!
//! #### Attributes For Derive
//! `salak` supports some attributes for automatically derive [`FromEnvironment`].
//! All attributes have format `#[salak(..)]`, eg. `#[salak(default = "default value")]`.
//! 1. Struct Header Attribute.
//! * `#[salak(prefix = "salak.application")]`, has this attr will auto implement [`PrefixedFromEnvironment`].
//! 2. Struct Field Attribute.
//! * `#[salak(default = "value")]`, this attr can specify default value.
//! * `#[salak(name = "key")]`, this attr can specify property key, default convension is use field name.
//! * `#[salak(desc = "Field Description")]`, this attr can be describe this property.
//!
//! #### Reload Configuration
//! `salak` supports reload configurations. Since in rust mutable
//! and alias can't be used together, here we introduce a wrapper
//! [`wrapper::IORef`] for updating values when reloading.
//!
//! #### Resource Factory
//! [`Resource`] defines a standard way to create instance. [`Factory`] provides functions to initialize resource
//! and cache resource. Please refer to [salak_factory](https://docs.rs/salak_factory) for resource usage.
//! Feature 'app' should be open for this feature.
//!
#![cfg_attr(docsrs, feature(doc_cfg))]
#![warn(
anonymous_parameters,
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
nonstandard_style,
rust_2018_idioms,
single_use_lifetimes,
trivial_casts,
trivial_numeric_casts,
unreachable_pub,
unused_extern_crates,
unused_qualifications,
variant_size_differences
)]
use parking_lot::Mutex;
#[cfg(feature = "derive")]
use crate::derive::KeyDesc;
#[cfg(feature = "derive")]
mod derive;
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
pub use crate::derive::{
AutoDeriveFromEnvironment, DescFromEnvironment, PrefixedFromEnvironment, SalakDescContext,
};
use raw_ioref::IORefT;
/// Auto derive [`FromEnvironment`] for struct.
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
pub use salak_derive::FromEnvironment;
/// Auto derive [`Service`] for struct.
#[cfg(all(feature = "derive", feature = "app"))]
#[cfg_attr(docsrs, doc(cfg(all(feature = "derive", feature = "app"))))]
pub use salak_derive::Service;
use source_raw::PropertyRegistryInternal;
#[cfg(feature = "args")]
#[cfg_attr(docsrs, doc(cfg(feature = "args")))]
mod args;
#[cfg(feature = "args")]
#[cfg_attr(docsrs, doc(cfg(feature = "args")))]
pub use crate::args::AppInfo;
mod err;
mod raw;
use crate::raw::SubKey;
pub use crate::raw::{IsProperty, Property};
mod raw_ioref;
mod raw_vec;
use crate::env::PREFIX;
pub use crate::env::{Salak, SalakBuilder};
mod env;
mod raw_enum;
pub use crate::err::PropertyError;
pub use crate::raw_enum::EnumProperty;
mod source_map;
#[cfg(feature = "rand")]
#[cfg_attr(docsrs, doc(cfg(feature = "rand")))]
mod source_rand;
mod source_raw;
#[cfg(feature = "toml")]
#[cfg_attr(docsrs, doc(cfg(feature = "toml")))]
mod source_toml;
#[cfg(feature = "yaml")]
#[cfg_attr(docsrs, doc(cfg(feature = "yaml")))]
mod source_yaml;
use crate::source::Key;
use crate::source::SubKeys;
#[cfg(feature = "app")]
#[cfg_attr(docsrs, doc(cfg(feature = "app")))]
mod app;
#[cfg(feature = "app")]
#[cfg_attr(docsrs, doc(cfg(feature = "app")))]
pub use crate::app::*;
#[cfg(test)]
#[macro_use(quickcheck)]
extern crate quickcheck_macros;
/// Salak wrapper for configuration parsing.
///
/// Wrapper can determine extra behavior for parsing.
/// Such as check empty of vec or update when reloading.
pub mod wrapper {
pub use crate::raw_ioref::IORef;
pub use crate::raw_vec::NonEmptyVec;
}
/// Salak sources.
///
/// This mod exports all pub sources.
pub mod source {
#[cfg(feature = "args")]
#[cfg_attr(docsrs, doc(cfg(feature = "args")))]
pub(crate) use crate::args::from_args;
pub use crate::raw::Key;
pub use crate::raw::SubKeys;
pub use crate::source_map::system_environment;
pub use crate::source_map::HashMapSource;
}
pub(crate) type Res<T> = Result<T, PropertyError>;
pub(crate) type Void = Res<()>;
/// A property source defines how to load properties.
/// `salak` has some predefined sources, user can
/// provide custom source by implementing this trait.
///
/// Sources provided by `salak`.
///
/// * hashmap source
/// * std::env source
/// * toml source
/// * yaml source
pub trait PropertySource: Send + Sync {
/// [`PropertySource`] name.
fn name(&self) -> &str;
/// Get property by key.
fn get_property(&self, key: &Key<'_>) -> Option<Property<'_>>;
/// Get all subkeys with given key.
///
/// Subkeys are keys without dot('.').
/// This method is unstable, and will be simplified by hidding
/// Key and SubKeys.
fn get_sub_keys<'a>(&'a self, key: &Key<'_>, sub_keys: &mut SubKeys<'a>);
/// Check whether the [`PropertySource`] is empty.
/// Empty source will be ignored when registering to `salak`.
fn is_empty(&self) -> bool;
/// Reload source, if nothing changes, then return none.
#[inline]
fn reload_source(&self) -> Res<Option<Box<dyn PropertySource>>> {
Ok(None)
}
}
/// Environment defines interface for getting values, and reloading
/// configurations.
///
/// The implementor of this trait is [`Salak`].
pub trait Environment {
/// Get value by key.
/// * `key` - Configuration key.
///
/// Require means is if the value `T` is not found,
/// then error will be returned. But if you try to get
/// `Option<T>`, then not found will return `None`.
fn require<T: FromEnvironment>(&self, key: &str) -> Res<T>;
/// Reload configuration. If reloading is completed,
/// all values wrapped by [`wrapper::IORef`] will be updated.
///
/// Currently, this feature is unstable, the returned bool
/// value means reloading is completed without error.
fn reload(&self) -> Res<bool>;
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
#[inline]
/// Get value with predefined key.
///
/// [`PrefixedFromEnvironment`] can be auto derives by
/// [`salak_derive::FromEnvironment`] macro. It provides
/// a standard key for getting value `T`.
fn | get | identifier_name |
|
lib.rs | `
//! * `random.i128`
//! * `random.isize`
//! 2. Custom arguments source. [`SalakBuilder::set()`] can set a single kv,
//! and [`SalakBuilder::set_args()`] can set a group of kvs.
//! 3. System environment source. Implemented by [`source::system_environment`].
//! 4. Profile specified file source, eg. `app-dev.toml`, supports reloading.
//! 5. No profile file source, eg. `app.toml`, supports reloading.
//! 6. Custom sources, which can register by [`Salak::register()`].
//!
//! #### Key Convention
//! Key is used for search configuration from [`Environment`], normally it is represented by string.
//! Key is a group of SubKey separated by dot(`.`), and SubKey is a name or a name followed by index.
//! 1. SubKey Format (`[a-z][_a-z0-9]+(\[[0-9]+\])*`)
//! * `a`
//! * `a0`
//! * `a_b`
//! * `a[0]`
//! * `a[0][0]`
//! 2. Key Format (`SubKey(\.SubKey)*`)
//! * `a`
//! * `a.b`
//! * `a.val[0]`
//! * `a_b[0]`
//!
//! #### Value Placeholder Parsing
//! 1. Placeholder Format
//! * `${key}` => Get value of `key`.
//! * `${key:default}` => Get value of `key`, if not exists return `default`.
//! 2. Escape Format
//! * `\$\{key\}` => Return `${key}`.
//! * `$`, `\`, `{`, `}` must use escape format.
//!
//! #### Attributes For Derive
//! `salak` supports some attributes for automatically derive [`FromEnvironment`].
//! All attributes have format `#[salak(..)]`, eg. `#[salak(default = "default value")]`.
//! 1. Struct Header Attribute.
//! * `#[salak(prefix = "salak.application")]`, has this attr will auto implement [`PrefixedFromEnvironment`].
//! 2. Struct Field Attribute.
//! * `#[salak(default = "value")]`, this attr can specify default value.
//! * `#[salak(name = "key")]`, this attr can specify property key, default convension is use field name.
//! * `#[salak(desc = "Field Description")]`, this attr can be describe this property.
//!
//! #### Reload Configuration
//! `salak` supports reload configurations. Since in rust mutable
//! and alias can't be used together, here we introduce a wrapper
//! [`wrapper::IORef`] for updating values when reloading.
//!
//! #### Resource Factory
//! [`Resource`] defines a standard way to create instance. [`Factory`] provides functions to initialize resource
//! and cache resource. Please refer to [salak_factory](https://docs.rs/salak_factory) for resource usage.
//! Feature 'app' should be open for this feature.
//!
#![cfg_attr(docsrs, feature(doc_cfg))]
#![warn(
anonymous_parameters,
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
nonstandard_style,
rust_2018_idioms,
single_use_lifetimes,
trivial_casts,
trivial_numeric_casts,
unreachable_pub,
unused_extern_crates,
unused_qualifications,
variant_size_differences
)]
use parking_lot::Mutex;
#[cfg(feature = "derive")]
use crate::derive::KeyDesc;
#[cfg(feature = "derive")]
mod derive;
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
pub use crate::derive::{
AutoDeriveFromEnvironment, DescFromEnvironment, PrefixedFromEnvironment, SalakDescContext,
};
use raw_ioref::IORefT;
/// Auto derive [`FromEnvironment`] for struct.
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
pub use salak_derive::FromEnvironment;
/// Auto derive [`Service`] for struct.
#[cfg(all(feature = "derive", feature = "app"))]
#[cfg_attr(docsrs, doc(cfg(all(feature = "derive", feature = "app"))))]
pub use salak_derive::Service;
use source_raw::PropertyRegistryInternal;
#[cfg(feature = "args")]
#[cfg_attr(docsrs, doc(cfg(feature = "args")))]
mod args;
#[cfg(feature = "args")]
#[cfg_attr(docsrs, doc(cfg(feature = "args")))]
pub use crate::args::AppInfo;
mod err;
mod raw;
use crate::raw::SubKey;
pub use crate::raw::{IsProperty, Property};
mod raw_ioref;
mod raw_vec;
use crate::env::PREFIX;
pub use crate::env::{Salak, SalakBuilder};
mod env;
mod raw_enum;
pub use crate::err::PropertyError;
pub use crate::raw_enum::EnumProperty;
mod source_map;
#[cfg(feature = "rand")]
#[cfg_attr(docsrs, doc(cfg(feature = "rand")))]
mod source_rand;
mod source_raw;
#[cfg(feature = "toml")]
#[cfg_attr(docsrs, doc(cfg(feature = "toml")))]
mod source_toml;
#[cfg(feature = "yaml")]
#[cfg_attr(docsrs, doc(cfg(feature = "yaml")))]
mod source_yaml;
use crate::source::Key;
use crate::source::SubKeys;
#[cfg(feature = "app")]
#[cfg_attr(docsrs, doc(cfg(feature = "app")))]
mod app;
#[cfg(feature = "app")]
#[cfg_attr(docsrs, doc(cfg(feature = "app")))]
pub use crate::app::*;
#[cfg(test)]
#[macro_use(quickcheck)]
extern crate quickcheck_macros;
/// Salak wrapper for configuration parsing.
///
/// Wrapper can determine extra behavior for parsing.
/// Such as check empty of vec or update when reloading.
pub mod wrapper {
pub use crate::raw_ioref::IORef;
pub use crate::raw_vec::NonEmptyVec;
}
/// Salak sources.
///
/// This mod exports all pub sources.
pub mod source {
#[cfg(feature = "args")]
#[cfg_attr(docsrs, doc(cfg(feature = "args")))]
pub(crate) use crate::args::from_args;
pub use crate::raw::Key;
pub use crate::raw::SubKeys;
pub use crate::source_map::system_environment;
pub use crate::source_map::HashMapSource;
}
pub(crate) type Res<T> = Result<T, PropertyError>;
pub(crate) type Void = Res<()>;
/// A property source defines how to load properties.
/// `salak` has some predefined sources, user can
/// provide custom source by implementing this trait.
///
/// Sources provided by `salak`.
///
/// * hashmap source
/// * std::env source
/// * toml source
/// * yaml source
pub trait PropertySource: Send + Sync {
/// [`PropertySource`] name.
fn name(&self) -> &str;
/// Get property by key.
fn get_property(&self, key: &Key<'_>) -> Option<Property<'_>>;
/// Get all subkeys with given key.
///
/// Subkeys are keys without dot('.').
/// This method is unstable, and will be simplified by hidding
/// Key and SubKeys.
fn get_sub_keys<'a>(&'a self, key: &Key<'_>, sub_keys: &mut SubKeys<'a>);
/// Check whether the [`PropertySource`] is empty.
/// Empty source will be ignored when registering to `salak`.
fn is_empty(&self) -> bool;
/// Reload source, if nothing changes, then return none.
#[inline]
fn reload_source(&self) -> Res<Option<Box<dyn PropertySource>>> {
Ok(None)
}
}
/// Environment defines interface for getting values, and reloading
/// configurations.
///
/// The implementor of this trait is [`Salak`].
pub trait Environment {
/// Get value by key.
/// * `key` - Configuration key.
///
/// Require means is if the value `T` is not found,
/// then error will be returned. But if you try to get
/// `Option<T>`, then not found will return `None`.
fn require<T: FromEnvironment>(&self, key: &str) -> Res<T>;
/// Reload configuration. If reloading is completed,
/// all values wrapped by [`wrapper::IORef`] will be updated.
///
/// Currently, this feature is unstable, the returned bool
/// value means reloading is completed without error.
fn reload(&self) -> Res<bool>;
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
#[inline]
/// Get value with predefined key.
///
/// [`PrefixedFromEnvironment`] can be auto derives by
/// [`salak_derive::FromEnvironment`] macro. It provides
/// a standard key for getting value `T`.
fn get<T: PrefixedFromEnvironment>(&self) -> Res<T> {
self.require::<T>(T::prefix())
}
}
/// Context for implementing [`FromEnvironment`].
#[allow(missing_debug_implementations)]
pub struct SalakContext<'a> {
registry: &'a PropertyRegistryInternal<'a>, | iorefs: &'a Mutex<Vec<Box<dyn IORefT + Send>>>,
key: &'a mut Key<'a>, | random_line_split |
|
lib.rs | random.u32`
//! * `random.u64`
//! * `random.u128`
//! * `random.usize`
//! * `random.i8`
//! * `random.i16`
//! * `random.i32`
//! * `random.i64`
//! * `random.i128`
//! * `random.isize`
//! 2. Custom arguments source. [`SalakBuilder::set()`] can set a single kv,
//! and [`SalakBuilder::set_args()`] can set a group of kvs.
//! 3. System environment source. Implemented by [`source::system_environment`].
//! 4. Profile specified file source, eg. `app-dev.toml`, supports reloading.
//! 5. No profile file source, eg. `app.toml`, supports reloading.
//! 6. Custom sources, which can register by [`Salak::register()`].
//!
//! #### Key Convention
//! Key is used for search configuration from [`Environment`], normally it is represented by string.
//! Key is a group of SubKey separated by dot(`.`), and SubKey is a name or a name followed by index.
//! 1. SubKey Format (`[a-z][_a-z0-9]+(\[[0-9]+\])*`)
//! * `a`
//! * `a0`
//! * `a_b`
//! * `a[0]`
//! * `a[0][0]`
//! 2. Key Format (`SubKey(\.SubKey)*`)
//! * `a`
//! * `a.b`
//! * `a.val[0]`
//! * `a_b[0]`
//!
//! #### Value Placeholder Parsing
//! 1. Placeholder Format
//! * `${key}` => Get value of `key`.
//! * `${key:default}` => Get value of `key`, if not exists return `default`.
//! 2. Escape Format
//! * `\$\{key\}` => Return `${key}`.
//! * `$`, `\`, `{`, `}` must use escape format.
//!
//! #### Attributes For Derive
//! `salak` supports some attributes for automatically derive [`FromEnvironment`].
//! All attributes have format `#[salak(..)]`, eg. `#[salak(default = "default value")]`.
//! 1. Struct Header Attribute.
//! * `#[salak(prefix = "salak.application")]`, has this attr will auto implement [`PrefixedFromEnvironment`].
//! 2. Struct Field Attribute.
//! * `#[salak(default = "value")]`, this attr can specify default value.
//! * `#[salak(name = "key")]`, this attr can specify property key, default convension is use field name.
//! * `#[salak(desc = "Field Description")]`, this attr can be describe this property.
//!
//! #### Reload Configuration
//! `salak` supports reload configurations. Since in rust mutable
//! and alias can't be used together, here we introduce a wrapper
//! [`wrapper::IORef`] for updating values when reloading.
//!
//! #### Resource Factory
//! [`Resource`] defines a standard way to create instance. [`Factory`] provides functions to initialize resource
//! and cache resource. Please refer to [salak_factory](https://docs.rs/salak_factory) for resource usage.
//! Feature 'app' should be open for this feature.
//!
#![cfg_attr(docsrs, feature(doc_cfg))]
#![warn(
anonymous_parameters,
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
nonstandard_style,
rust_2018_idioms,
single_use_lifetimes,
trivial_casts,
trivial_numeric_casts,
unreachable_pub,
unused_extern_crates,
unused_qualifications,
variant_size_differences
)]
use parking_lot::Mutex;
#[cfg(feature = "derive")]
use crate::derive::KeyDesc;
#[cfg(feature = "derive")]
mod derive;
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
pub use crate::derive::{
AutoDeriveFromEnvironment, DescFromEnvironment, PrefixedFromEnvironment, SalakDescContext,
};
use raw_ioref::IORefT;
/// Auto derive [`FromEnvironment`] for struct.
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
pub use salak_derive::FromEnvironment;
/// Auto derive [`Service`] for struct.
#[cfg(all(feature = "derive", feature = "app"))]
#[cfg_attr(docsrs, doc(cfg(all(feature = "derive", feature = "app"))))]
pub use salak_derive::Service;
use source_raw::PropertyRegistryInternal;
#[cfg(feature = "args")]
#[cfg_attr(docsrs, doc(cfg(feature = "args")))]
mod args;
#[cfg(feature = "args")]
#[cfg_attr(docsrs, doc(cfg(feature = "args")))]
pub use crate::args::AppInfo;
mod err;
mod raw;
use crate::raw::SubKey;
pub use crate::raw::{IsProperty, Property};
mod raw_ioref;
mod raw_vec;
use crate::env::PREFIX;
pub use crate::env::{Salak, SalakBuilder};
mod env;
mod raw_enum;
pub use crate::err::PropertyError;
pub use crate::raw_enum::EnumProperty;
mod source_map;
#[cfg(feature = "rand")]
#[cfg_attr(docsrs, doc(cfg(feature = "rand")))]
mod source_rand;
mod source_raw;
#[cfg(feature = "toml")]
#[cfg_attr(docsrs, doc(cfg(feature = "toml")))]
mod source_toml;
#[cfg(feature = "yaml")]
#[cfg_attr(docsrs, doc(cfg(feature = "yaml")))]
mod source_yaml;
use crate::source::Key;
use crate::source::SubKeys;
#[cfg(feature = "app")]
#[cfg_attr(docsrs, doc(cfg(feature = "app")))]
mod app;
#[cfg(feature = "app")]
#[cfg_attr(docsrs, doc(cfg(feature = "app")))]
pub use crate::app::*;
#[cfg(test)]
#[macro_use(quickcheck)]
extern crate quickcheck_macros;
/// Salak wrapper for configuration parsing.
///
/// Wrapper can determine extra behavior for parsing.
/// Such as check empty of vec or update when reloading.
pub mod wrapper {
pub use crate::raw_ioref::IORef;
pub use crate::raw_vec::NonEmptyVec;
}
/// Salak sources.
///
/// This mod exports all pub sources.
pub mod source {
#[cfg(feature = "args")]
#[cfg_attr(docsrs, doc(cfg(feature = "args")))]
pub(crate) use crate::args::from_args;
pub use crate::raw::Key;
pub use crate::raw::SubKeys;
pub use crate::source_map::system_environment;
pub use crate::source_map::HashMapSource;
}
pub(crate) type Res<T> = Result<T, PropertyError>;
pub(crate) type Void = Res<()>;
/// A property source defines how to load properties.
/// `salak` has some predefined sources, user can
/// provide custom source by implementing this trait.
///
/// Sources provided by `salak`.
///
/// * hashmap source
/// * std::env source
/// * toml source
/// * yaml source
pub trait PropertySource: Send + Sync {
/// [`PropertySource`] name.
fn name(&self) -> &str;
/// Get property by key.
fn get_property(&self, key: &Key<'_>) -> Option<Property<'_>>;
/// Get all subkeys with given key.
///
/// Subkeys are keys without dot('.').
/// This method is unstable, and will be simplified by hidding
/// Key and SubKeys.
fn get_sub_keys<'a>(&'a self, key: &Key<'_>, sub_keys: &mut SubKeys<'a>);
/// Check whether the [`PropertySource`] is empty.
/// Empty source will be ignored when registering to `salak`.
fn is_empty(&self) -> bool;
/// Reload source, if nothing changes, then return none.
#[inline]
fn reload_source(&self) -> Res<Option<Box<dyn PropertySource>>> {
Ok(None)
}
}
/// Environment defines interface for getting values, and reloading
/// configurations.
///
/// The implementor of this trait is [`Salak`].
pub trait Environment {
/// Get value by key.
/// * `key` - Configuration key.
///
/// Require means is if the value `T` is not found,
/// then error will be returned. But if you try to get
/// `Option<T>`, then not found will return `None`.
fn require<T: FromEnvironment>(&self, key: &str) -> Res<T>;
/// Reload configuration. If reloading is completed,
/// all values wrapped by [`wrapper::IORef`] will be updated.
///
/// Currently, this feature is unstable, the returned bool
/// value means reloading is completed without error.
fn reload(&self) -> Res<bool>;
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
#[inline]
/// Get value with predefined key.
///
/// [`PrefixedFromEnvironment`] can be auto derives by
/// [`salak_derive::FromEnvironment`] macro. It provides
/// a standard key for getting value `T`.
fn get<T: PrefixedFromEnvironment>(&self) -> Res<T> | {
self.require::<T>(T::prefix())
} | identifier_body |
|
core.py | .get('EDITOR', 'vim'))
subprocess.run([editor, filename], check=True)
def _write_template_to_tempfile(
self, f: IO[str], entry: model.JMESLogEntry
) -> None:
contents = DEFAULT_TEMPLATE.format(
type=entry.type,
category=entry.category,
description=entry.description,
)
f.write(contents)
f.flush()
def _read_tempfile(self, filename: str) -> str:
with open(filename) as f:
filled_in_contents = f.read()
return filled_in_contents
def _parse_filled_in_contents(
self, contents: str, entry: model.JMESLogEntry
) -> None:
parsed_entry = EntryFileParser().parse_contents(contents)
self._update_values_from_new_entry(entry, parsed_entry)
def _update_values_from_new_entry(
self, entry: model.JMESLogEntry, new_entry: model.JMESLogEntry
) -> None:
for key, value in asdict(new_entry).items():
if value:
setattr(entry, key, value)
class EntryFileParser:
def parse_contents(self, contents: str) -> model.JMESLogEntry:
entry = model.JMESLogEntry.empty()
if not contents.strip():
return entry
field_names = [f.name for f in fields(entry)]
line_starts = tuple([f'{name}:' for name in field_names])
for line in contents.splitlines():
line = line.lstrip()
if line.startswith('#') or not line:
continue
if line.startswith(line_starts):
field_name, remaining = line.split(':', 1)
setattr(entry, field_name, remaining.strip())
return entry
class EntryGenerator:
def __init__(self, entry: model.JMESLogEntry, retriever: EditorRetriever):
self._entry = entry
self._retriever = retriever
def complete_entry(self) -> None:
if not self._entry.is_completed():
self._retriever.prompt_entry_values(self._entry)
@property
def change_entry(self) -> model.JMESLogEntry:
return self._entry
class EntryFileWriter:
def write_next_release_entry(
self, entry: model.JMESLogEntry, change_dir: str
) -> str:
self._create_next_release_dir(change_dir)
abs_filename = self._generate_random_file(entry, change_dir)
with open(abs_filename, 'w') as f:
f.write(entry.to_json())
f.write('\n')
return abs_filename
def _create_next_release_dir(self, change_dir: str) -> None:
next_release = os.path.join(change_dir, 'next-release')
if not os.path.isdir(next_release):
os.mkdir(next_release)
def _generate_random_file(
self, entry: model.JMESLogEntry, change_dir: str
) -> str:
next_release = os.path.join(change_dir, 'next-release')
# Need to generate a unique filename for this change.
short_summary = ''.join(
ch for ch in entry.category if ch in VALID_CHARS
)
filename = f'{entry.type}-{short_summary}'
possible_filename = self._random_filename(next_release, filename)
while os.path.isfile(possible_filename):
possible_filename = self._random_filename(next_release, filename)
return possible_filename
def _random_filename(self, next_release: str, filename: str) -> str:
return os.path.join(
next_release,
'%s-%s-%s.json'
% (time.monotonic_ns(), filename, str(random.randint(1, 100000))),
)
class EntryRecorder:
def __init__(
self,
entry_gen: EntryGenerator,
schema: model.EntrySchema,
file_writer: EntryFileWriter,
output_dir: str = '.changes',
):
self._entry_gen = entry_gen
self._schema = schema
self._file_writer = file_writer
self._output_dir = output_dir
def write_change_file_entry(self) -> str:
self._entry_gen.complete_entry()
entry = self._entry_gen.change_entry
validate_change_entry(entry, self._schema)
filename = self._file_writer.write_next_release_entry(
entry, change_dir=self._output_dir
)
return filename
def validate_change_entry(
entry: model.JMESLogEntry, schema: model.EntrySchema
) -> None:
entry_dict = asdict(entry)
schema_dict = asdict(schema)
errors = []
for schema_field in fields(schema):
value = entry_dict[schema_field.name]
allowed_values = schema_dict[schema_field.name]
if allowed_values and value not in allowed_values:
errors.append(
f'The "{schema_field.name}" value must be one of: '
f'{", ".join(allowed_values)}, received: "{value}"'
)
for key, value in entry_dict.items():
if not value:
errors.append(f'The "{key}" value cannot be empty.')
if errors:
raise ValidationError(errors)
def consolidate_next_release(
next_version: str, change_dir: str, changes: model.JMESLogEntryCollection
) -> str:
# Creates a new x.y.x.json file in .changes/ with the changes in
# .changes/next-release.
# It'll then remove the .changes/next-release directory.
release_file = os.path.join(change_dir, f'{next_version}.json')
with open(release_file, 'w') as f:
f.write(json.dumps(changes.to_dict(), indent=2))
f.write('\n')
next_release_dir = os.path.join(change_dir, 'next-release')
shutil.rmtree(next_release_dir)
return release_file
def find_last_released_version(change_dir: str) -> str:
results = sorted_versioned_releases(change_dir)
if results:
return results[-1]
return '0.0.0'
def sorted_versioned_releases(change_dir: str) -> List[str]:
# Strip off the '.json' suffix.
files = [f[:-5] for f in os.listdir(change_dir) if f.endswith('.json')]
return sorted(files, key=lambda x: StrictVersion(x))
def determine_next_version(
last_released_version: str, version_bump_type: model.VersionBump
) -> str:
parts = last_released_version.split('.')
if version_bump_type == model.VersionBump.PATCH_VERSION:
parts[2] = str(int(parts[2]) + 1)
elif version_bump_type == model.VersionBump.MINOR_VERSION:
parts[1] = str(int(parts[1]) + 1)
parts[2] = '0'
elif version_bump_type == model.VersionBump.MAJOR_VERSION:
parts[0] = str(int(parts[0]) + 1)
parts[1] = '0'
parts[2] = '0'
return '.'.join(parts)
def load_next_changes(change_dir: str) -> model.JMESLogEntryCollection:
next_release = os.path.join(change_dir, 'next-release')
if not os.path.isdir(next_release):
raise NoChangesFoundError()
changes = []
for change in sorted(os.listdir(next_release)):
entry = parse_entry(os.path.join(next_release, change))
changes.append(entry)
return model.JMESLogEntryCollection(changes=changes)
def parse_entry(filename: str) -> model.JMESLogEntry:
with open(filename) as f:
data = json.load(f)
return model.JMESLogEntry(**data)
def create_entry_recorder(
entry: model.JMESLogEntry, change_dir: str
) -> EntryRecorder:
recorder = EntryRecorder(
entry_gen=EntryGenerator(
entry=entry,
retriever=EditorRetriever(),
),
schema=model.EntrySchema(),
file_writer=EntryFileWriter(),
output_dir=change_dir,
)
return recorder
def render_changes(
changes: Dict[str, model.JMESLogEntryCollection],
out: IO[str],
template_contents: str,
) -> None:
context = {
'releases': reversed(list(changes.items())),
}
template = jinja2.Template(template_contents)
result = template.render(**context)
out.write(result)
def load_all_changes(
change_dir: str,
) -> Dict[str, model.JMESLogEntryCollection]:
releases = {}
for version_number in sorted_versioned_releases(change_dir):
filename = os.path.join(change_dir, f'{version_number}.json')
with open(filename) as f:
data = json.load(f)
releases[version_number] = model.JMESLogEntryCollection.from_dict(
data
)
return releases
def render_single_release_changes(
change_collection: model.JMESLogEntryCollection, out: IO[str]
) -> None:
for change in change_collection.changes:
description = '\n '.join(change.description.splitlines())
out.write(f'* {change.type}:{change.category}:{description}\n')
out.write('\n\n')
class ChangeQuery:
def __init__(self, change_dir: str) -> None:
self._change_dir = change_dir
def run_query(self, query_for: str) -> Any:
try:
handler = getattr(self, f'query_{query_for.replace("-", "_")}')
except AttributeError:
raise RuntimeError(f"Unknown query type: {query_for}")
return handler()
def | query_last_release_version | identifier_name |
|
core.py | =True)
def _write_template_to_tempfile(
self, f: IO[str], entry: model.JMESLogEntry
) -> None:
contents = DEFAULT_TEMPLATE.format(
type=entry.type,
category=entry.category,
description=entry.description,
)
f.write(contents)
f.flush()
def _read_tempfile(self, filename: str) -> str:
with open(filename) as f:
filled_in_contents = f.read()
return filled_in_contents
def _parse_filled_in_contents(
self, contents: str, entry: model.JMESLogEntry
) -> None:
parsed_entry = EntryFileParser().parse_contents(contents)
self._update_values_from_new_entry(entry, parsed_entry)
def _update_values_from_new_entry(
self, entry: model.JMESLogEntry, new_entry: model.JMESLogEntry
) -> None:
for key, value in asdict(new_entry).items():
if value:
setattr(entry, key, value)
class EntryFileParser:
def parse_contents(self, contents: str) -> model.JMESLogEntry:
entry = model.JMESLogEntry.empty()
if not contents.strip():
return entry
field_names = [f.name for f in fields(entry)]
line_starts = tuple([f'{name}:' for name in field_names])
for line in contents.splitlines():
line = line.lstrip()
if line.startswith('#') or not line:
continue
if line.startswith(line_starts):
field_name, remaining = line.split(':', 1)
setattr(entry, field_name, remaining.strip())
return entry
class EntryGenerator:
def __init__(self, entry: model.JMESLogEntry, retriever: EditorRetriever):
self._entry = entry
self._retriever = retriever
def complete_entry(self) -> None:
if not self._entry.is_completed():
self._retriever.prompt_entry_values(self._entry)
@property
def change_entry(self) -> model.JMESLogEntry:
return self._entry
class EntryFileWriter:
def write_next_release_entry(
self, entry: model.JMESLogEntry, change_dir: str
) -> str:
self._create_next_release_dir(change_dir)
abs_filename = self._generate_random_file(entry, change_dir)
with open(abs_filename, 'w') as f:
f.write(entry.to_json())
f.write('\n')
return abs_filename
def _create_next_release_dir(self, change_dir: str) -> None:
next_release = os.path.join(change_dir, 'next-release')
if not os.path.isdir(next_release):
os.mkdir(next_release)
def _generate_random_file(
self, entry: model.JMESLogEntry, change_dir: str
) -> str:
next_release = os.path.join(change_dir, 'next-release')
# Need to generate a unique filename for this change.
short_summary = ''.join(
ch for ch in entry.category if ch in VALID_CHARS
)
filename = f'{entry.type}-{short_summary}'
possible_filename = self._random_filename(next_release, filename)
while os.path.isfile(possible_filename):
possible_filename = self._random_filename(next_release, filename)
return possible_filename
def _random_filename(self, next_release: str, filename: str) -> str:
return os.path.join(
next_release,
'%s-%s-%s.json'
% (time.monotonic_ns(), filename, str(random.randint(1, 100000))),
)
class EntryRecorder:
def __init__(
self,
entry_gen: EntryGenerator,
schema: model.EntrySchema,
file_writer: EntryFileWriter,
output_dir: str = '.changes',
):
self._entry_gen = entry_gen
self._schema = schema
self._file_writer = file_writer
self._output_dir = output_dir
def write_change_file_entry(self) -> str:
self._entry_gen.complete_entry()
entry = self._entry_gen.change_entry
validate_change_entry(entry, self._schema)
filename = self._file_writer.write_next_release_entry(
entry, change_dir=self._output_dir
)
return filename
def validate_change_entry(
entry: model.JMESLogEntry, schema: model.EntrySchema
) -> None:
entry_dict = asdict(entry)
schema_dict = asdict(schema)
errors = []
for schema_field in fields(schema):
value = entry_dict[schema_field.name]
allowed_values = schema_dict[schema_field.name]
if allowed_values and value not in allowed_values:
errors.append(
f'The "{schema_field.name}" value must be one of: '
f'{", ".join(allowed_values)}, received: "{value}"'
)
for key, value in entry_dict.items():
if not value:
errors.append(f'The "{key}" value cannot be empty.')
if errors:
raise ValidationError(errors)
def consolidate_next_release(
next_version: str, change_dir: str, changes: model.JMESLogEntryCollection
) -> str:
# Creates a new x.y.x.json file in .changes/ with the changes in
# .changes/next-release.
# It'll then remove the .changes/next-release directory.
release_file = os.path.join(change_dir, f'{next_version}.json')
with open(release_file, 'w') as f:
f.write(json.dumps(changes.to_dict(), indent=2))
f.write('\n')
next_release_dir = os.path.join(change_dir, 'next-release')
shutil.rmtree(next_release_dir)
return release_file
def find_last_released_version(change_dir: str) -> str:
results = sorted_versioned_releases(change_dir)
if results:
return results[-1]
return '0.0.0'
def sorted_versioned_releases(change_dir: str) -> List[str]:
# Strip off the '.json' suffix.
files = [f[:-5] for f in os.listdir(change_dir) if f.endswith('.json')]
return sorted(files, key=lambda x: StrictVersion(x))
def determine_next_version(
last_released_version: str, version_bump_type: model.VersionBump
) -> str:
parts = last_released_version.split('.')
if version_bump_type == model.VersionBump.PATCH_VERSION:
parts[2] = str(int(parts[2]) + 1)
elif version_bump_type == model.VersionBump.MINOR_VERSION:
parts[1] = str(int(parts[1]) + 1)
parts[2] = '0'
elif version_bump_type == model.VersionBump.MAJOR_VERSION:
parts[0] = str(int(parts[0]) + 1)
parts[1] = '0'
parts[2] = '0'
return '.'.join(parts)
def load_next_changes(change_dir: str) -> model.JMESLogEntryCollection:
next_release = os.path.join(change_dir, 'next-release')
if not os.path.isdir(next_release):
raise NoChangesFoundError()
changes = []
for change in sorted(os.listdir(next_release)):
entry = parse_entry(os.path.join(next_release, change))
changes.append(entry)
return model.JMESLogEntryCollection(changes=changes)
def parse_entry(filename: str) -> model.JMESLogEntry:
with open(filename) as f:
data = json.load(f)
return model.JMESLogEntry(**data)
def create_entry_recorder(
entry: model.JMESLogEntry, change_dir: str
) -> EntryRecorder:
recorder = EntryRecorder(
entry_gen=EntryGenerator(
entry=entry,
retriever=EditorRetriever(),
),
schema=model.EntrySchema(),
file_writer=EntryFileWriter(),
output_dir=change_dir,
)
return recorder
def render_changes(
changes: Dict[str, model.JMESLogEntryCollection],
out: IO[str],
template_contents: str,
) -> None:
context = {
'releases': reversed(list(changes.items())),
}
template = jinja2.Template(template_contents)
result = template.render(**context)
out.write(result)
def load_all_changes(
change_dir: str,
) -> Dict[str, model.JMESLogEntryCollection]:
releases = {}
for version_number in sorted_versioned_releases(change_dir):
filename = os.path.join(change_dir, f'{version_number}.json')
with open(filename) as f:
data = json.load(f)
releases[version_number] = model.JMESLogEntryCollection.from_dict(
data
)
return releases
def render_single_release_changes(
change_collection: model.JMESLogEntryCollection, out: IO[str]
) -> None:
for change in change_collection.changes:
description = '\n '.join(change.description.splitlines())
out.write(f'* {change.type}:{change.category}:{description}\n')
out.write('\n\n')
class ChangeQuery:
def __init__(self, change_dir: str) -> None:
self._change_dir = change_dir
def run_query(self, query_for: str) -> Any:
try:
handler = getattr(self, f'query_{query_for.replace("-", "_")}')
except AttributeError:
raise RuntimeError(f"Unknown query type: {query_for}")
return handler()
def query_last_release_version(self) -> str:
| return find_last_released_version(self._change_dir) | identifier_body |
|
core.py | def prompt_entry_values(self, entry: model.JMESLogEntry) -> None:
with tempfile.NamedTemporaryFile('w') as f:
self._write_template_to_tempfile(f, entry)
self._open_tempfile_in_editor(f.name)
contents = self._read_tempfile(f.name)
return self._parse_filled_in_contents(contents, entry)
def _open_tempfile_in_editor(self, filename: str) -> None:
env = os.environ
editor = env.get('VISUAL', env.get('EDITOR', 'vim'))
subprocess.run([editor, filename], check=True)
def _write_template_to_tempfile(
self, f: IO[str], entry: model.JMESLogEntry
) -> None:
contents = DEFAULT_TEMPLATE.format(
type=entry.type,
category=entry.category,
description=entry.description,
)
f.write(contents)
f.flush()
def _read_tempfile(self, filename: str) -> str:
with open(filename) as f:
filled_in_contents = f.read()
return filled_in_contents
def _parse_filled_in_contents(
self, contents: str, entry: model.JMESLogEntry
) -> None:
parsed_entry = EntryFileParser().parse_contents(contents)
self._update_values_from_new_entry(entry, parsed_entry)
def _update_values_from_new_entry(
self, entry: model.JMESLogEntry, new_entry: model.JMESLogEntry
) -> None:
for key, value in asdict(new_entry).items():
if value:
setattr(entry, key, value)
class EntryFileParser:
def parse_contents(self, contents: str) -> model.JMESLogEntry:
entry = model.JMESLogEntry.empty()
if not contents.strip():
return entry
field_names = [f.name for f in fields(entry)]
line_starts = tuple([f'{name}:' for name in field_names])
for line in contents.splitlines():
line = line.lstrip()
if line.startswith('#') or not line:
continue
if line.startswith(line_starts):
field_name, remaining = line.split(':', 1)
setattr(entry, field_name, remaining.strip())
return entry
class EntryGenerator:
def __init__(self, entry: model.JMESLogEntry, retriever: EditorRetriever):
self._entry = entry
self._retriever = retriever
def complete_entry(self) -> None:
if not self._entry.is_completed():
self._retriever.prompt_entry_values(self._entry)
@property
def change_entry(self) -> model.JMESLogEntry:
return self._entry
class EntryFileWriter:
def write_next_release_entry(
self, entry: model.JMESLogEntry, change_dir: str
) -> str:
self._create_next_release_dir(change_dir)
abs_filename = self._generate_random_file(entry, change_dir)
with open(abs_filename, 'w') as f:
f.write(entry.to_json())
f.write('\n')
return abs_filename
def _create_next_release_dir(self, change_dir: str) -> None:
next_release = os.path.join(change_dir, 'next-release')
if not os.path.isdir(next_release):
os.mkdir(next_release)
def _generate_random_file(
self, entry: model.JMESLogEntry, change_dir: str
) -> str:
next_release = os.path.join(change_dir, 'next-release')
# Need to generate a unique filename for this change.
short_summary = ''.join(
ch for ch in entry.category if ch in VALID_CHARS
)
filename = f'{entry.type}-{short_summary}'
possible_filename = self._random_filename(next_release, filename)
while os.path.isfile(possible_filename):
possible_filename = self._random_filename(next_release, filename)
return possible_filename
def _random_filename(self, next_release: str, filename: str) -> str:
return os.path.join(
next_release,
'%s-%s-%s.json'
% (time.monotonic_ns(), filename, str(random.randint(1, 100000))),
)
class EntryRecorder:
def __init__(
self,
entry_gen: EntryGenerator,
schema: model.EntrySchema,
file_writer: EntryFileWriter,
output_dir: str = '.changes',
):
self._entry_gen = entry_gen
self._schema = schema
self._file_writer = file_writer
self._output_dir = output_dir
def write_change_file_entry(self) -> str:
self._entry_gen.complete_entry()
entry = self._entry_gen.change_entry
validate_change_entry(entry, self._schema)
filename = self._file_writer.write_next_release_entry(
entry, change_dir=self._output_dir
)
return filename
def validate_change_entry(
entry: model.JMESLogEntry, schema: model.EntrySchema
) -> None:
entry_dict = asdict(entry)
schema_dict = asdict(schema)
errors = []
for schema_field in fields(schema):
value = entry_dict[schema_field.name]
allowed_values = schema_dict[schema_field.name]
if allowed_values and value not in allowed_values:
errors.append( | f'The "{schema_field.name}" value must be one of: '
f'{", ".join(allowed_values)}, received: "{value}"'
)
for key, value in entry_dict.items():
if not value:
errors.append(f'The "{key}" value cannot be empty.')
if errors:
raise ValidationError(errors)
def consolidate_next_release(
next_version: str, change_dir: str, changes: model.JMESLogEntryCollection
) -> str:
# Creates a new x.y.x.json file in .changes/ with the changes in
# .changes/next-release.
# It'll then remove the .changes/next-release directory.
release_file = os.path.join(change_dir, f'{next_version}.json')
with open(release_file, 'w') as f:
f.write(json.dumps(changes.to_dict(), indent=2))
f.write('\n')
next_release_dir = os.path.join(change_dir, 'next-release')
shutil.rmtree(next_release_dir)
return release_file
def find_last_released_version(change_dir: str) -> str:
results = sorted_versioned_releases(change_dir)
if results:
return results[-1]
return '0.0.0'
def sorted_versioned_releases(change_dir: str) -> List[str]:
# Strip off the '.json' suffix.
files = [f[:-5] for f in os.listdir(change_dir) if f.endswith('.json')]
return sorted(files, key=lambda x: StrictVersion(x))
def determine_next_version(
last_released_version: str, version_bump_type: model.VersionBump
) -> str:
parts = last_released_version.split('.')
if version_bump_type == model.VersionBump.PATCH_VERSION:
parts[2] = str(int(parts[2]) + 1)
elif version_bump_type == model.VersionBump.MINOR_VERSION:
parts[1] = str(int(parts[1]) + 1)
parts[2] = '0'
elif version_bump_type == model.VersionBump.MAJOR_VERSION:
parts[0] = str(int(parts[0]) + 1)
parts[1] = '0'
parts[2] = '0'
return '.'.join(parts)
def load_next_changes(change_dir: str) -> model.JMESLogEntryCollection:
next_release = os.path.join(change_dir, 'next-release')
if not os.path.isdir(next_release):
raise NoChangesFoundError()
changes = []
for change in sorted(os.listdir(next_release)):
entry = parse_entry(os.path.join(next_release, change))
changes.append(entry)
return model.JMESLogEntryCollection(changes=changes)
def parse_entry(filename: str) -> model.JMESLogEntry:
with open(filename) as f:
data = json.load(f)
return model.JMESLogEntry(**data)
def create_entry_recorder(
entry: model.JMESLogEntry, change_dir: str
) -> EntryRecorder:
recorder = EntryRecorder(
entry_gen=EntryGenerator(
entry=entry,
retriever=EditorRetriever(),
),
schema=model.EntrySchema(),
file_writer=EntryFileWriter(),
output_dir=change_dir,
)
return recorder
def render_changes(
changes: Dict[str, model.JMESLogEntryCollection],
out: IO[str],
template_contents: str,
) -> None:
context = {
'releases': reversed(list(changes.items())),
}
template = jinja2.Template(template_contents)
result = template.render(**context)
out.write(result)
def load_all_changes(
change_dir: str,
) -> Dict[str, model.JMESLogEntryCollection]:
releases = {}
for version_number in sorted_versioned_releases(change_dir):
filename = os.path.join(change_dir, f'{version_number}.json')
with open(filename) as f:
data = json.load(f)
releases[version_number] = model.JMESLogEntryCollection.from_dict(
data
)
return releases
def render_single_release_changes(
change_collection: model.JMESLogEntryCollection, out: IO[str]
) -> None:
for change in change_collection.changes:
description = '\n '.join(change.description.splitlines())
out.write(f'* | random_line_split |
|
core.py | def prompt_entry_values(self, entry: model.JMESLogEntry) -> None:
with tempfile.NamedTemporaryFile('w') as f:
self._write_template_to_tempfile(f, entry)
self._open_tempfile_in_editor(f.name)
contents = self._read_tempfile(f.name)
return self._parse_filled_in_contents(contents, entry)
def _open_tempfile_in_editor(self, filename: str) -> None:
env = os.environ
editor = env.get('VISUAL', env.get('EDITOR', 'vim'))
subprocess.run([editor, filename], check=True)
def _write_template_to_tempfile(
self, f: IO[str], entry: model.JMESLogEntry
) -> None:
contents = DEFAULT_TEMPLATE.format(
type=entry.type,
category=entry.category,
description=entry.description,
)
f.write(contents)
f.flush()
def _read_tempfile(self, filename: str) -> str:
with open(filename) as f:
filled_in_contents = f.read()
return filled_in_contents
def _parse_filled_in_contents(
self, contents: str, entry: model.JMESLogEntry
) -> None:
parsed_entry = EntryFileParser().parse_contents(contents)
self._update_values_from_new_entry(entry, parsed_entry)
def _update_values_from_new_entry(
self, entry: model.JMESLogEntry, new_entry: model.JMESLogEntry
) -> None:
for key, value in asdict(new_entry).items():
if value:
|
class EntryFileParser:
def parse_contents(self, contents: str) -> model.JMESLogEntry:
entry = model.JMESLogEntry.empty()
if not contents.strip():
return entry
field_names = [f.name for f in fields(entry)]
line_starts = tuple([f'{name}:' for name in field_names])
for line in contents.splitlines():
line = line.lstrip()
if line.startswith('#') or not line:
continue
if line.startswith(line_starts):
field_name, remaining = line.split(':', 1)
setattr(entry, field_name, remaining.strip())
return entry
class EntryGenerator:
def __init__(self, entry: model.JMESLogEntry, retriever: EditorRetriever):
self._entry = entry
self._retriever = retriever
def complete_entry(self) -> None:
if not self._entry.is_completed():
self._retriever.prompt_entry_values(self._entry)
@property
def change_entry(self) -> model.JMESLogEntry:
return self._entry
class EntryFileWriter:
def write_next_release_entry(
self, entry: model.JMESLogEntry, change_dir: str
) -> str:
self._create_next_release_dir(change_dir)
abs_filename = self._generate_random_file(entry, change_dir)
with open(abs_filename, 'w') as f:
f.write(entry.to_json())
f.write('\n')
return abs_filename
def _create_next_release_dir(self, change_dir: str) -> None:
next_release = os.path.join(change_dir, 'next-release')
if not os.path.isdir(next_release):
os.mkdir(next_release)
def _generate_random_file(
self, entry: model.JMESLogEntry, change_dir: str
) -> str:
next_release = os.path.join(change_dir, 'next-release')
# Need to generate a unique filename for this change.
short_summary = ''.join(
ch for ch in entry.category if ch in VALID_CHARS
)
filename = f'{entry.type}-{short_summary}'
possible_filename = self._random_filename(next_release, filename)
while os.path.isfile(possible_filename):
possible_filename = self._random_filename(next_release, filename)
return possible_filename
def _random_filename(self, next_release: str, filename: str) -> str:
return os.path.join(
next_release,
'%s-%s-%s.json'
% (time.monotonic_ns(), filename, str(random.randint(1, 100000))),
)
class EntryRecorder:
def __init__(
self,
entry_gen: EntryGenerator,
schema: model.EntrySchema,
file_writer: EntryFileWriter,
output_dir: str = '.changes',
):
self._entry_gen = entry_gen
self._schema = schema
self._file_writer = file_writer
self._output_dir = output_dir
def write_change_file_entry(self) -> str:
self._entry_gen.complete_entry()
entry = self._entry_gen.change_entry
validate_change_entry(entry, self._schema)
filename = self._file_writer.write_next_release_entry(
entry, change_dir=self._output_dir
)
return filename
def validate_change_entry(
entry: model.JMESLogEntry, schema: model.EntrySchema
) -> None:
entry_dict = asdict(entry)
schema_dict = asdict(schema)
errors = []
for schema_field in fields(schema):
value = entry_dict[schema_field.name]
allowed_values = schema_dict[schema_field.name]
if allowed_values and value not in allowed_values:
errors.append(
f'The "{schema_field.name}" value must be one of: '
f'{", ".join(allowed_values)}, received: "{value}"'
)
for key, value in entry_dict.items():
if not value:
errors.append(f'The "{key}" value cannot be empty.')
if errors:
raise ValidationError(errors)
def consolidate_next_release(
next_version: str, change_dir: str, changes: model.JMESLogEntryCollection
) -> str:
# Creates a new x.y.x.json file in .changes/ with the changes in
# .changes/next-release.
# It'll then remove the .changes/next-release directory.
release_file = os.path.join(change_dir, f'{next_version}.json')
with open(release_file, 'w') as f:
f.write(json.dumps(changes.to_dict(), indent=2))
f.write('\n')
next_release_dir = os.path.join(change_dir, 'next-release')
shutil.rmtree(next_release_dir)
return release_file
def find_last_released_version(change_dir: str) -> str:
results = sorted_versioned_releases(change_dir)
if results:
return results[-1]
return '0.0.0'
def sorted_versioned_releases(change_dir: str) -> List[str]:
# Strip off the '.json' suffix.
files = [f[:-5] for f in os.listdir(change_dir) if f.endswith('.json')]
return sorted(files, key=lambda x: StrictVersion(x))
def determine_next_version(
last_released_version: str, version_bump_type: model.VersionBump
) -> str:
parts = last_released_version.split('.')
if version_bump_type == model.VersionBump.PATCH_VERSION:
parts[2] = str(int(parts[2]) + 1)
elif version_bump_type == model.VersionBump.MINOR_VERSION:
parts[1] = str(int(parts[1]) + 1)
parts[2] = '0'
elif version_bump_type == model.VersionBump.MAJOR_VERSION:
parts[0] = str(int(parts[0]) + 1)
parts[1] = '0'
parts[2] = '0'
return '.'.join(parts)
def load_next_changes(change_dir: str) -> model.JMESLogEntryCollection:
next_release = os.path.join(change_dir, 'next-release')
if not os.path.isdir(next_release):
raise NoChangesFoundError()
changes = []
for change in sorted(os.listdir(next_release)):
entry = parse_entry(os.path.join(next_release, change))
changes.append(entry)
return model.JMESLogEntryCollection(changes=changes)
def parse_entry(filename: str) -> model.JMESLogEntry:
with open(filename) as f:
data = json.load(f)
return model.JMESLogEntry(**data)
def create_entry_recorder(
entry: model.JMESLogEntry, change_dir: str
) -> EntryRecorder:
recorder = EntryRecorder(
entry_gen=EntryGenerator(
entry=entry,
retriever=EditorRetriever(),
),
schema=model.EntrySchema(),
file_writer=EntryFileWriter(),
output_dir=change_dir,
)
return recorder
def render_changes(
changes: Dict[str, model.JMESLogEntryCollection],
out: IO[str],
template_contents: str,
) -> None:
context = {
'releases': reversed(list(changes.items())),
}
template = jinja2.Template(template_contents)
result = template.render(**context)
out.write(result)
def load_all_changes(
change_dir: str,
) -> Dict[str, model.JMESLogEntryCollection]:
releases = {}
for version_number in sorted_versioned_releases(change_dir):
filename = os.path.join(change_dir, f'{version_number}.json')
with open(filename) as f:
data = json.load(f)
releases[version_number] = model.JMESLogEntryCollection.from_dict(
data
)
return releases
def render_single_release_changes(
change_collection: model.JMESLogEntryCollection, out: IO[str]
) -> None:
for change in change_collection.changes:
description = '\n '.join(change.description.splitlines())
out.write(f | setattr(entry, key, value) | conditional_block |
mod.rs | rc<Tensor>),
}
impl TensorView {
/// Creates a shared TensorView from any TensorView.
pub fn into_shared(self) -> TensorView {
match self {
TensorView::Owned(m) => TensorView::Shared(Arc::new(m)),
TensorView::Shared(_) => self,
}
}
/// Creates a Tensor from a TensorView.
pub fn into_tensor(self) -> Tensor {
match self {
TensorView::Owned(m) => m,
TensorView::Shared(m) => m.as_ref().clone(),
}
}
/// Returns a reference to the Tensor wrapped inside a TensorView.
pub fn | (&self) -> &Tensor {
match self {
&TensorView::Owned(ref m) => &m,
&TensorView::Shared(ref m) => m.as_ref(),
}
}
/// Returns a shared copy of the TensorView, turning the one passed
/// as argument into a TensorView::Shared if necessary.
pub fn share(&mut self) -> TensorView {
// This is somewhat ugly, but sadly we couldn't find any other
// way to implement it. If we try to write something like:
// *self = TensorView::Shared(Arc::new(*m))
// the borrow checker will complain about *m being moved out of
// borrowed content, which makes sense but doesn't apply in our
// case because we will "give m back" to the TensorView, except
// wrapped around an Arc. The only way to get ownership of m is
// to use mem::replace, which means we have to create a "dummy"
// value to replace self first.
if let TensorView::Owned(_) = self {
let dummy = TensorView::Owned(Tensor::i32s(&[], &[0]).unwrap());
let shared = match mem::replace(self, dummy) {
TensorView::Owned(m) => TensorView::Shared(Arc::new(m)),
_ => panic!(),
};
*self = shared;
}
self.clone()
}
}
impl<M> From<M> for TensorView
where
Tensor: From<M>,
{
fn from(m: M) -> TensorView {
TensorView::Owned(m.into())
}
}
impl From<Arc<Tensor>> for TensorView {
fn from(m: Arc<Tensor>) -> TensorView {
TensorView::Shared(m)
}
}
impl ::std::ops::Deref for TensorView {
type Target = Tensor;
fn deref(&self) -> &Tensor {
match self {
&TensorView::Owned(ref m) => &m,
&TensorView::Shared(ref m) => m.as_ref(),
}
}
}
impl PartialEq for TensorView {
fn eq(&self, other: &TensorView) -> bool {
self.as_tensor() == other.as_tensor()
}
}
// TODO(liautaud): Find a more generic way to do this.
#[cfg_attr(feature = "serialize", derive(Serialize))]
#[derive(Debug, Clone)]
pub enum Attr {
I64(i64),
Usize(usize),
DataType(DataType),
DataFormat(DataFormat),
Padding(Padding),
Tensor(Tensor),
UsizeVec(Vec<usize>),
IsizeVec(Vec<isize>),
}
/// A Tensorflow operation.
pub trait Op: Debug + objekt::Clone + Send + Sync + 'static + InferenceOp {
/// Returns the attributes of the operation and their values.
fn get_attributes(&self) -> HashMap<&'static str, Attr>;
/// Evaluates the operation given the input tensors.
fn eval(&self, inputs: Vec<TensorView>) -> Result<Vec<TensorView>>;
/// Returns a new streaming buffer for the operation.
fn new_buffer(&self) -> Box<OpBuffer> {
Box::new(EmptyBuffer {})
}
/// Evaluates one step of the operation on the given input tensors.
/// This is only implemented for operators which support streaming.
///
/// The input tensors are annotated with an Option<usize>:
/// - None if the tensor doesn't have a streaming dimension.
/// - Option(d) if the tensor is being streamed on dimension d.
///
/// If an input tensor has a streaming dimension, the corresponding
/// TensorView will only contain a _chunk_ of input of size 1 along
/// that dimension. Note that each chunk will only be passed once
/// to the step function, so it should use the provided buffer to
/// store whichever chunks it needs for future computations.
///
/// The function should return Some(chunks) when it has computed
/// new chunks, and None if it has computed an intermediary result
/// successfully but doesn't have new output chunks ready yet.
///
/// For operators like Concat, multiple input tensors might have a
/// streaming dimension. In that case, at each call to step, only
/// one of the streaming inputs will receive new chunk while the
/// others will receive None.
fn step(
&self,
_inputs: Vec<(Option<usize>, Option<TensorView>)>,
_buffer: &mut Box<OpBuffer>,
) -> Result<Option<Vec<TensorView>>> {
bail!("Streaming is not available for operator {:?}", self)
}
/// Infers properties about the input and output tensors.
///
/// The `inputs` and `outputs` arguments correspond to properties about
/// the input and output tensors that are already known.
///
/// Returns Err in case of an unrecoverable error during the inference,
/// and the refined properties about the inputs and outputs otherwise.
fn infer_and_propagate(
&self,
inputs: Vec<TensorFact>,
outputs: Vec<TensorFact>,
) -> Result<(Vec<TensorFact>, Vec<TensorFact>)> {
let (infered_inputs, infered_outputs) = self.infer(inputs, outputs)?;
if infered_inputs.iter().all(|i| i.value.is_concrete()) {
let input_values = infered_inputs
.iter()
.map(|i| i.value.concretize().unwrap().clone().into())
.collect(); // checked
let output_value = self.eval(input_values)?.pop().unwrap();
Ok((
infered_inputs,
vec![::analyser::helpers::tensor_to_fact(
output_value.into_tensor(),
)],
))
} else {
Ok((infered_inputs, infered_outputs))
}
}
fn const_value(&self) -> Option<Tensor> {
None
}
}
pub trait InferenceOp {
fn infer(
&self,
inputs: Vec<TensorFact>,
outputs: Vec<TensorFact>,
) -> Result<(Vec<TensorFact>, Vec<TensorFact>)>;
}
pub trait InferenceRulesOp {
/// Registers the inference rules of the operator.
fn rules<'r, 'p: 'r, 's: 'r>(
&'s self,
solver: &mut Solver<'r>,
inputs: &'p TensorsProxy,
outputs: &'p TensorsProxy,
);
}
impl<O: InferenceRulesOp> InferenceOp for O {
fn infer(
&self,
inputs: Vec<TensorFact>,
outputs: Vec<TensorFact>,
) -> Result<(Vec<TensorFact>, Vec<TensorFact>)> {
let inputs_proxy = TensorsProxy::new(vec![0].into());
let outputs_proxy = TensorsProxy::new(vec![1].into());
let mut solver = Solver::default();
self.rules(&mut solver, &inputs_proxy, &outputs_proxy);
solver.infer((inputs, outputs))
}
}
clone_trait_object!(Op);
#[cfg(feature = "serialize")]
impl Serialize for Op {
fn serialize<S>(&self, serializer: S) -> StdResult<S::Ok, S::Error>
where
S: Serializer,
{
self.get_attributes().serialize(serializer)
}
}
pub type OpRegister = HashMap<&'static str, fn(&::tfpb::node_def::NodeDef) -> Result<Box<Op>>>;
pub struct OpBuilder(OpRegister);
impl OpBuilder {
pub fn new() -> OpBuilder {
let mut reg = OpRegister::new();
array::register_all_ops(&mut reg);
cast::register_all_ops(&mut reg);
konst::register_all_ops(&mut reg);
math::register_all_ops(&mut reg);
nn::register_all_ops(&mut reg);
OpBuilder(reg)
}
pub fn build(&self, pb: &::tfpb::node_def::NodeDef) -> Result<Box<Op>> {
match self.0.get(pb.get_op()) {
Some(builder) => builder(pb),
None => Ok(Box::new(UnimplementedOp(
pb.get_op().to_string(),
pb.to_owned(),
))),
}
}
}
#[derive(Debug, Clone)]
pub struct UnimplementedOp(String, ::tfpb::node_def::NodeDef);
impl Op for UnimplementedOp {
/// Evaluates the operation given the input tensors.
fn eval(&self, _inputs: Vec<TensorView>) -> Result<Vec<TensorView>> {
Err(format!("unimplemented operation: {}", self.0))?
}
/// Returns the attributes of the operation and their values.
fn get_attributes(&self) -> HashMap<&'static str, Attr> {
hashmap!{} // FIXME
}
}
impl InferenceRulesOp for UnimplementedOp {
fn rules | as_tensor | identifier_name |
mod.rs | rc<Tensor>),
}
impl TensorView {
/// Creates a shared TensorView from any TensorView.
pub fn into_shared(self) -> TensorView {
match self {
TensorView::Owned(m) => TensorView::Shared(Arc::new(m)),
TensorView::Shared(_) => self,
}
}
/// Creates a Tensor from a TensorView.
pub fn into_tensor(self) -> Tensor {
match self {
TensorView::Owned(m) => m,
TensorView::Shared(m) => m.as_ref().clone(),
}
}
/// Returns a reference to the Tensor wrapped inside a TensorView.
pub fn as_tensor(&self) -> &Tensor {
match self {
&TensorView::Owned(ref m) => &m,
&TensorView::Shared(ref m) => m.as_ref(),
}
}
/// Returns a shared copy of the TensorView, turning the one passed
/// as argument into a TensorView::Shared if necessary.
pub fn share(&mut self) -> TensorView {
// This is somewhat ugly, but sadly we couldn't find any other
// way to implement it. If we try to write something like:
// *self = TensorView::Shared(Arc::new(*m))
// the borrow checker will complain about *m being moved out of
// borrowed content, which makes sense but doesn't apply in our
// case because we will "give m back" to the TensorView, except
// wrapped around an Arc. The only way to get ownership of m is
// to use mem::replace, which means we have to create a "dummy"
// value to replace self first.
if let TensorView::Owned(_) = self {
let dummy = TensorView::Owned(Tensor::i32s(&[], &[0]).unwrap());
let shared = match mem::replace(self, dummy) {
TensorView::Owned(m) => TensorView::Shared(Arc::new(m)),
_ => panic!(),
};
*self = shared;
}
self.clone()
}
}
impl<M> From<M> for TensorView
where
Tensor: From<M>,
{
fn from(m: M) -> TensorView {
TensorView::Owned(m.into())
}
}
impl From<Arc<Tensor>> for TensorView {
fn from(m: Arc<Tensor>) -> TensorView {
TensorView::Shared(m)
}
}
impl ::std::ops::Deref for TensorView {
type Target = Tensor;
fn deref(&self) -> &Tensor {
match self {
&TensorView::Owned(ref m) => &m,
&TensorView::Shared(ref m) => m.as_ref(),
}
}
}
impl PartialEq for TensorView {
fn eq(&self, other: &TensorView) -> bool {
self.as_tensor() == other.as_tensor()
}
}
// TODO(liautaud): Find a more generic way to do this.
#[cfg_attr(feature = "serialize", derive(Serialize))]
#[derive(Debug, Clone)]
pub enum Attr {
I64(i64),
Usize(usize),
DataType(DataType),
DataFormat(DataFormat),
Padding(Padding),
Tensor(Tensor),
UsizeVec(Vec<usize>),
IsizeVec(Vec<isize>),
}
/// A Tensorflow operation.
pub trait Op: Debug + objekt::Clone + Send + Sync + 'static + InferenceOp {
/// Returns the attributes of the operation and their values.
fn get_attributes(&self) -> HashMap<&'static str, Attr>;
/// Evaluates the operation given the input tensors.
fn eval(&self, inputs: Vec<TensorView>) -> Result<Vec<TensorView>>;
/// Returns a new streaming buffer for the operation.
fn new_buffer(&self) -> Box<OpBuffer> {
Box::new(EmptyBuffer {})
}
/// Evaluates one step of the operation on the given input tensors.
/// This is only implemented for operators which support streaming.
///
/// The input tensors are annotated with an Option<usize>:
/// - None if the tensor doesn't have a streaming dimension.
/// - Option(d) if the tensor is being streamed on dimension d.
///
/// If an input tensor has a streaming dimension, the corresponding
/// TensorView will only contain a _chunk_ of input of size 1 along
/// that dimension. Note that each chunk will only be passed once
/// to the step function, so it should use the provided buffer to
/// store whichever chunks it needs for future computations.
///
/// The function should return Some(chunks) when it has computed
/// new chunks, and None if it has computed an intermediary result
/// successfully but doesn't have new output chunks ready yet.
///
/// For operators like Concat, multiple input tensors might have a
/// streaming dimension. In that case, at each call to step, only
/// one of the streaming inputs will receive new chunk while the
/// others will receive None. | bail!("Streaming is not available for operator {:?}", self)
}
/// Infers properties about the input and output tensors.
///
/// The `inputs` and `outputs` arguments correspond to properties about
/// the input and output tensors that are already known.
///
/// Returns Err in case of an unrecoverable error during the inference,
/// and the refined properties about the inputs and outputs otherwise.
fn infer_and_propagate(
&self,
inputs: Vec<TensorFact>,
outputs: Vec<TensorFact>,
) -> Result<(Vec<TensorFact>, Vec<TensorFact>)> {
let (infered_inputs, infered_outputs) = self.infer(inputs, outputs)?;
if infered_inputs.iter().all(|i| i.value.is_concrete()) {
let input_values = infered_inputs
.iter()
.map(|i| i.value.concretize().unwrap().clone().into())
.collect(); // checked
let output_value = self.eval(input_values)?.pop().unwrap();
Ok((
infered_inputs,
vec![::analyser::helpers::tensor_to_fact(
output_value.into_tensor(),
)],
))
} else {
Ok((infered_inputs, infered_outputs))
}
}
fn const_value(&self) -> Option<Tensor> {
None
}
}
pub trait InferenceOp {
fn infer(
&self,
inputs: Vec<TensorFact>,
outputs: Vec<TensorFact>,
) -> Result<(Vec<TensorFact>, Vec<TensorFact>)>;
}
pub trait InferenceRulesOp {
/// Registers the inference rules of the operator.
fn rules<'r, 'p: 'r, 's: 'r>(
&'s self,
solver: &mut Solver<'r>,
inputs: &'p TensorsProxy,
outputs: &'p TensorsProxy,
);
}
impl<O: InferenceRulesOp> InferenceOp for O {
fn infer(
&self,
inputs: Vec<TensorFact>,
outputs: Vec<TensorFact>,
) -> Result<(Vec<TensorFact>, Vec<TensorFact>)> {
let inputs_proxy = TensorsProxy::new(vec![0].into());
let outputs_proxy = TensorsProxy::new(vec![1].into());
let mut solver = Solver::default();
self.rules(&mut solver, &inputs_proxy, &outputs_proxy);
solver.infer((inputs, outputs))
}
}
clone_trait_object!(Op);
#[cfg(feature = "serialize")]
impl Serialize for Op {
fn serialize<S>(&self, serializer: S) -> StdResult<S::Ok, S::Error>
where
S: Serializer,
{
self.get_attributes().serialize(serializer)
}
}
pub type OpRegister = HashMap<&'static str, fn(&::tfpb::node_def::NodeDef) -> Result<Box<Op>>>;
pub struct OpBuilder(OpRegister);
impl OpBuilder {
pub fn new() -> OpBuilder {
let mut reg = OpRegister::new();
array::register_all_ops(&mut reg);
cast::register_all_ops(&mut reg);
konst::register_all_ops(&mut reg);
math::register_all_ops(&mut reg);
nn::register_all_ops(&mut reg);
OpBuilder(reg)
}
pub fn build(&self, pb: &::tfpb::node_def::NodeDef) -> Result<Box<Op>> {
match self.0.get(pb.get_op()) {
Some(builder) => builder(pb),
None => Ok(Box::new(UnimplementedOp(
pb.get_op().to_string(),
pb.to_owned(),
))),
}
}
}
#[derive(Debug, Clone)]
pub struct UnimplementedOp(String, ::tfpb::node_def::NodeDef);
impl Op for UnimplementedOp {
/// Evaluates the operation given the input tensors.
fn eval(&self, _inputs: Vec<TensorView>) -> Result<Vec<TensorView>> {
Err(format!("unimplemented operation: {}", self.0))?
}
/// Returns the attributes of the operation and their values.
fn get_attributes(&self) -> HashMap<&'static str, Attr> {
hashmap!{} // FIXME
}
}
impl InferenceRulesOp for UnimplementedOp {
fn rules<' | fn step(
&self,
_inputs: Vec<(Option<usize>, Option<TensorView>)>,
_buffer: &mut Box<OpBuffer>,
) -> Result<Option<Vec<TensorView>>> { | random_line_split |
mod.rs | <Tensor>),
}
impl TensorView {
/// Creates a shared TensorView from any TensorView.
pub fn into_shared(self) -> TensorView |
/// Creates a Tensor from a TensorView.
pub fn into_tensor(self) -> Tensor {
match self {
TensorView::Owned(m) => m,
TensorView::Shared(m) => m.as_ref().clone(),
}
}
/// Returns a reference to the Tensor wrapped inside a TensorView.
pub fn as_tensor(&self) -> &Tensor {
match self {
&TensorView::Owned(ref m) => &m,
&TensorView::Shared(ref m) => m.as_ref(),
}
}
/// Returns a shared copy of the TensorView, turning the one passed
/// as argument into a TensorView::Shared if necessary.
pub fn share(&mut self) -> TensorView {
// This is somewhat ugly, but sadly we couldn't find any other
// way to implement it. If we try to write something like:
// *self = TensorView::Shared(Arc::new(*m))
// the borrow checker will complain about *m being moved out of
// borrowed content, which makes sense but doesn't apply in our
// case because we will "give m back" to the TensorView, except
// wrapped around an Arc. The only way to get ownership of m is
// to use mem::replace, which means we have to create a "dummy"
// value to replace self first.
if let TensorView::Owned(_) = self {
let dummy = TensorView::Owned(Tensor::i32s(&[], &[0]).unwrap());
let shared = match mem::replace(self, dummy) {
TensorView::Owned(m) => TensorView::Shared(Arc::new(m)),
_ => panic!(),
};
*self = shared;
}
self.clone()
}
}
impl<M> From<M> for TensorView
where
Tensor: From<M>,
{
fn from(m: M) -> TensorView {
TensorView::Owned(m.into())
}
}
impl From<Arc<Tensor>> for TensorView {
fn from(m: Arc<Tensor>) -> TensorView {
TensorView::Shared(m)
}
}
impl ::std::ops::Deref for TensorView {
type Target = Tensor;
fn deref(&self) -> &Tensor {
match self {
&TensorView::Owned(ref m) => &m,
&TensorView::Shared(ref m) => m.as_ref(),
}
}
}
impl PartialEq for TensorView {
fn eq(&self, other: &TensorView) -> bool {
self.as_tensor() == other.as_tensor()
}
}
// TODO(liautaud): Find a more generic way to do this.
#[cfg_attr(feature = "serialize", derive(Serialize))]
#[derive(Debug, Clone)]
pub enum Attr {
I64(i64),
Usize(usize),
DataType(DataType),
DataFormat(DataFormat),
Padding(Padding),
Tensor(Tensor),
UsizeVec(Vec<usize>),
IsizeVec(Vec<isize>),
}
/// A Tensorflow operation.
pub trait Op: Debug + objekt::Clone + Send + Sync + 'static + InferenceOp {
/// Returns the attributes of the operation and their values.
fn get_attributes(&self) -> HashMap<&'static str, Attr>;
/// Evaluates the operation given the input tensors.
fn eval(&self, inputs: Vec<TensorView>) -> Result<Vec<TensorView>>;
/// Returns a new streaming buffer for the operation.
fn new_buffer(&self) -> Box<OpBuffer> {
Box::new(EmptyBuffer {})
}
/// Evaluates one step of the operation on the given input tensors.
/// This is only implemented for operators which support streaming.
///
/// The input tensors are annotated with an Option<usize>:
/// - None if the tensor doesn't have a streaming dimension.
/// - Option(d) if the tensor is being streamed on dimension d.
///
/// If an input tensor has a streaming dimension, the corresponding
/// TensorView will only contain a _chunk_ of input of size 1 along
/// that dimension. Note that each chunk will only be passed once
/// to the step function, so it should use the provided buffer to
/// store whichever chunks it needs for future computations.
///
/// The function should return Some(chunks) when it has computed
/// new chunks, and None if it has computed an intermediary result
/// successfully but doesn't have new output chunks ready yet.
///
/// For operators like Concat, multiple input tensors might have a
/// streaming dimension. In that case, at each call to step, only
/// one of the streaming inputs will receive new chunk while the
/// others will receive None.
fn step(
&self,
_inputs: Vec<(Option<usize>, Option<TensorView>)>,
_buffer: &mut Box<OpBuffer>,
) -> Result<Option<Vec<TensorView>>> {
bail!("Streaming is not available for operator {:?}", self)
}
/// Infers properties about the input and output tensors.
///
/// The `inputs` and `outputs` arguments correspond to properties about
/// the input and output tensors that are already known.
///
/// Returns Err in case of an unrecoverable error during the inference,
/// and the refined properties about the inputs and outputs otherwise.
fn infer_and_propagate(
&self,
inputs: Vec<TensorFact>,
outputs: Vec<TensorFact>,
) -> Result<(Vec<TensorFact>, Vec<TensorFact>)> {
let (infered_inputs, infered_outputs) = self.infer(inputs, outputs)?;
if infered_inputs.iter().all(|i| i.value.is_concrete()) {
let input_values = infered_inputs
.iter()
.map(|i| i.value.concretize().unwrap().clone().into())
.collect(); // checked
let output_value = self.eval(input_values)?.pop().unwrap();
Ok((
infered_inputs,
vec![::analyser::helpers::tensor_to_fact(
output_value.into_tensor(),
)],
))
} else {
Ok((infered_inputs, infered_outputs))
}
}
fn const_value(&self) -> Option<Tensor> {
None
}
}
pub trait InferenceOp {
fn infer(
&self,
inputs: Vec<TensorFact>,
outputs: Vec<TensorFact>,
) -> Result<(Vec<TensorFact>, Vec<TensorFact>)>;
}
pub trait InferenceRulesOp {
/// Registers the inference rules of the operator.
fn rules<'r, 'p: 'r, 's: 'r>(
&'s self,
solver: &mut Solver<'r>,
inputs: &'p TensorsProxy,
outputs: &'p TensorsProxy,
);
}
impl<O: InferenceRulesOp> InferenceOp for O {
fn infer(
&self,
inputs: Vec<TensorFact>,
outputs: Vec<TensorFact>,
) -> Result<(Vec<TensorFact>, Vec<TensorFact>)> {
let inputs_proxy = TensorsProxy::new(vec![0].into());
let outputs_proxy = TensorsProxy::new(vec![1].into());
let mut solver = Solver::default();
self.rules(&mut solver, &inputs_proxy, &outputs_proxy);
solver.infer((inputs, outputs))
}
}
clone_trait_object!(Op);
#[cfg(feature = "serialize")]
impl Serialize for Op {
fn serialize<S>(&self, serializer: S) -> StdResult<S::Ok, S::Error>
where
S: Serializer,
{
self.get_attributes().serialize(serializer)
}
}
pub type OpRegister = HashMap<&'static str, fn(&::tfpb::node_def::NodeDef) -> Result<Box<Op>>>;
pub struct OpBuilder(OpRegister);
impl OpBuilder {
pub fn new() -> OpBuilder {
let mut reg = OpRegister::new();
array::register_all_ops(&mut reg);
cast::register_all_ops(&mut reg);
konst::register_all_ops(&mut reg);
math::register_all_ops(&mut reg);
nn::register_all_ops(&mut reg);
OpBuilder(reg)
}
pub fn build(&self, pb: &::tfpb::node_def::NodeDef) -> Result<Box<Op>> {
match self.0.get(pb.get_op()) {
Some(builder) => builder(pb),
None => Ok(Box::new(UnimplementedOp(
pb.get_op().to_string(),
pb.to_owned(),
))),
}
}
}
#[derive(Debug, Clone)]
pub struct UnimplementedOp(String, ::tfpb::node_def::NodeDef);
impl Op for UnimplementedOp {
/// Evaluates the operation given the input tensors.
fn eval(&self, _inputs: Vec<TensorView>) -> Result<Vec<TensorView>> {
Err(format!("unimplemented operation: {}", self.0))?
}
/// Returns the attributes of the operation and their values.
fn get_attributes(&self) -> HashMap<&'static str, Attr> {
hashmap!{} // FIXME
}
}
impl InferenceRulesOp for UnimplementedOp {
fn rules | {
match self {
TensorView::Owned(m) => TensorView::Shared(Arc::new(m)),
TensorView::Shared(_) => self,
}
} | identifier_body |
import.go | ([]string, 0)
s.imports["routes"] = make([]string, 0)
s.imports["plugins"] = make([]string, 0)
s.imports["consumers"] = make([]string, 0)
} else if err := json.Unmarshal(raw, &s.imports); err != nil {
return err
}
}
return nil
}
func (s *kongState) discover() error {
if consumers, err := s.client.GetConsumers(); err != nil {
return err
} else {
s.consumers = consumers
}
if services, err := s.client.GetServices(); err != nil {
return err
} else {
s.services = services
}
if routes, err := s.client.GetRoutes(); err != nil {
return err
} else {
s.routes = routes
}
if plugins, err := s.client.GetPlugins(); err != nil {
return err
} else {
s.plugins = plugins
}
return nil
}
func (s *kongState) discoveryReport() string {
lines := make([]string, 0)
if len(s.consumers) > 0 {
lines = append(lines, fmt.Sprintf("Discovered %d consumers", len(s.consumers)))
} else {
lines = append(lines, "No consumers discovered.")
}
if len(s.services) > 0 {
lines = append(lines, fmt.Sprintf("Discovered %d services", len(s.services)))
} else {
lines = append(lines, "No services discovered.")
}
if len(s.routes) > 0 {
lines = append(lines, fmt.Sprintf("Discovered %d routes", len(s.routes)))
} else {
lines = append(lines, "No routes discovered.")
}
if len(s.plugins) > 0 {
lines = append(lines, fmt.Sprintf("Discovered %d plugins", len(s.plugins)))
} else {
lines = append(lines, "No plugins discovered.")
}
for index, line := range lines {
lines[index] = fmt.Sprintf("- %s", line)
}
return strings.Join(lines, "\n")
}
func createHclSafeName(name string) string {
invalid := []string{"-", "/", " ", "."}
hclName := name
for _, c := range invalid {
hclName = strings.Replace(hclName, c, "_", -1)
}
return hclName
}
type resourceImport struct {
kongResourceType string // plugin, route, service, consumer
terraformResourceType string // kong_plugin, kong_route, kong_plugin_openid_connect
resourceName string // what's to the right of the terraformResourceType in the HCL
resourceId string
dryRun bool
configPath string // HCL config
}
func (s *kongState) hasResourceBeenImported(resource *resourceImport) bool {
resourceTypePluralized := resource.kongResourceType + "s"
if importedIds, ok := s.imports[resourceTypePluralized]; ok {
for _, id := range importedIds {
if id == resource.resourceId {
return true
}
}
}
return false
}
func (s *kongState) importResource(resourceImport *resourceImport) error {
if s.hasResourceBeenImported(resourceImport) {
return nil
}
terraformResourceName := fmt.Sprintf("%s.%s", resourceImport.terraformResourceType, createHclSafeName(resourceImport.resourceName))
if !resourceImport.dryRun {
// ex: terraform import -config=examples/import kong_service.service_to_import e86f981e-a580-4bd6-aef3-1324adfcc12c
cmd := exec.Command(
"terraform",
"import",
fmt.Sprintf("-config=%s", resourceImport.configPath),
terraformResourceName,
resourceImport.resourceId,
)
var stderr bytes.Buffer
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
fmt.Println(stderr.String())
return err
}
}
fmt.Println("Imported:", terraformResourceName)
return nil
}
func (s *kongState) importResources(cmd *importCommand) error {
if len(s.consumers) > 0 {
fmt.Println("\nImporting consumers:")
for _, consumer := range s.consumers {
resource := &resourceImport{
kongResourceType: "consumer",
terraformResourceType: "kong_consumer",
resourceName: getResourceNameForConsumer(&consumer),
resourceId: consumer.Id,
dryRun: cmd.isDryRun,
configPath: cmd.tfConfigPath,
}
if err := s.importResource(resource); err != nil {
return err
} else {
s.imports["consumers"] = append(s.imports["consumers"], consumer.Id)
}
}
if len(s.services) > 0 {
fmt.Println("\nImporting services:")
for _, service := range s.services {
resource := &resourceImport{
kongResourceType: "service",
terraformResourceType: "kong_service",
resourceName: service.Name,
resourceId: service.Id,
dryRun: cmd.isDryRun,
configPath: cmd.tfConfigPath,
}
if err := s.importResource(resource); err != nil {
return err
} else {
s.imports["services"] = append(s.imports["services"], service.Id)
}
}
}
if len(s.routes) > 0 {
fmt.Println("\nImporting routes:")
for _, route := range s.routes {
resource := &resourceImport{
kongResourceType: "route",
terraformResourceType: "kong_route",
resourceName: getResourceNameForRoute(s, &route),
resourceId: route.Id,
dryRun: cmd.isDryRun,
configPath: cmd.tfConfigPath,
}
if err := s.importResource(resource); err != nil {
return err
} else {
s.imports["routes"] = append(s.imports["routes"], route.Id)
}
}
}
if len(s.plugins) > 0 {
fmt.Println("\nImporting plugins:")
for _, plugin := range s.plugins {
terraformResourceType := "kong_plugin"
if pluginResourceImplementation, ok := pluginsToResourceImplementations[plugin.Name]; ok {
terraformResourceType = pluginResourceImplementation
}
resource := &resourceImport{
kongResourceType: "plugin",
terraformResourceType: terraformResourceType,
resourceName: getResourceNameForPlugin(s, &plugin),
resourceId: plugin.Id,
dryRun: cmd.isDryRun,
configPath: cmd.tfConfigPath,
}
if err := s.importResource(resource); err != nil {
return err
} else {
s.imports["plugins"] = append(s.imports["plugins"], plugin.Id)
}
}
}
}
return nil
}
func (s *kongState) finish(fileName string) error {
if data, err := json.Marshal(s.imports); err != nil {
return err
} else {
return ioutil.WriteFile(fileName, data, 0644)
}
}
func getResourceNameForConsumer(consumer *kong.KongConsumer) string {
if len(consumer.Username) > 0 {
return consumer.Username
} else {
return consumer.CustomId
}
}
func getResourceNameForRoute(s *kongState, route *kong.KongRoute) string {
var service kong.KongService
for _, s := range s.services {
if s.Id == route.Service.Id {
service = s
break
}
}
name := service.Name
// TODO: the path/host slices should probably be sorted...
if len(route.Paths) > 0 {
path := strings.Split(route.Paths[0], "/")[1:] // need to remove the trailing space from splitting /path
for index, p := range path {
// if the path was prefixed with the service name, we don't want to repeat it
// e.g., service name: products, route path: /products
// the result should be products, not products_products
if index == 0 && p == name {
continue
}
name = name + "_" + p
}
} else {
name = name + route.Hosts[0]
}
return name
}
func getResourceNameForPlugin(s *kongState, plugin *kong.KongPlugin) string | {
namePrefix := ""
if plugin.ServiceId != "" {
for _, service := range s.services {
if service.Id == plugin.ServiceId {
namePrefix = service.Name
break
}
}
} else if plugin.RouteId != "" {
for _, route := range s.routes {
if route.Id == plugin.RouteId {
namePrefix = getResourceNameForRoute(s, &route)
break
}
}
} else if plugin.ConsumerId != "" {
for _, consumer := range s.consumers {
if consumer.Id == plugin.ConsumerId { | identifier_body |
|
import.go | ry-run",
false,
"List the resources that will be imported, but do not actually import them.",
)
flags.StringVar(
&cmd.importFileName,
"state",
"import-state.json",
"Holds the current import state and any exclusions",
)
flags.StringVar(
&cmd.tfConfigPath,
"tf-config",
"",
"Path to Terraform config directory",
)
}
func (cmd *importCommand) Execute(_ context.Context, flags *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus {
fmt.Println("Importing resources from: " + cmd.adminApiUrl)
client, err := kong.NewKongClient(kong.KongConfig{
AdminApiUrl: cmd.adminApiUrl,
RbacToken: cmd.rbacToken,
})
if err != nil {
fmt.Printf("error initializing Kong client: %v\n", err)
return subcommands.ExitFailure
}
state := &kongState{
client: client,
}
if err := state.loadState(cmd.importFileName); err != nil {
fmt.Printf("error loading import state file %v\n", err)
return subcommands.ExitFailure
}
if err := state.discover(); err != nil {
fmt.Printf("error while discovering resources %v\n", err)
return subcommands.ExitFailure
}
fmt.Println("\nDiscovery:")
fmt.Println(state.discoveryReport())
if !cmd.isDryRun {
if err := state.importResources(cmd); err != nil {
fmt.Printf("error occurred while importing resources: %v\n", err)
err := state.finish(cmd.importFileName)
if err != nil {
fmt.Println("Additional error saving progress ", err)
}
return subcommands.ExitFailure
} else {
if err := state.finish(cmd.importFileName); err != nil {
fmt.Printf("Error occurred saving import file %v\n", err)
}
}
}
return subcommands.ExitSuccess
}
type kongState struct {
services []kong.KongService
routes []kong.KongRoute
plugins []kong.KongPlugin
consumers []kong.KongConsumer
imports map[string][]string // { services: [<uuid>], routes: [<uuid>,] }
client *kong.KongClient
}
func (s *kongState) loadState(fileName string) error {
if stateFile, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0755); err != nil {
return err
} else {
defer stateFile.Close()
s.imports = make(map[string][]string)
raw, err := ioutil.ReadAll(stateFile)
if err != nil {
return err
}
if len(raw) == 0 {
s.imports["services"] = make([]string, 0)
s.imports["routes"] = make([]string, 0)
s.imports["plugins"] = make([]string, 0)
s.imports["consumers"] = make([]string, 0)
} else if err := json.Unmarshal(raw, &s.imports); err != nil {
return err
}
}
return nil
}
func (s *kongState) discover() error {
if consumers, err := s.client.GetConsumers(); err != nil {
return err
} else {
s.consumers = consumers
}
if services, err := s.client.GetServices(); err != nil {
return err
} else {
s.services = services
}
if routes, err := s.client.GetRoutes(); err != nil {
return err
} else {
s.routes = routes
}
if plugins, err := s.client.GetPlugins(); err != nil {
return err
} else {
s.plugins = plugins
}
return nil
}
func (s *kongState) discoveryReport() string {
lines := make([]string, 0)
if len(s.consumers) > 0 {
lines = append(lines, fmt.Sprintf("Discovered %d consumers", len(s.consumers)))
} else {
lines = append(lines, "No consumers discovered.")
}
if len(s.services) > 0 {
lines = append(lines, fmt.Sprintf("Discovered %d services", len(s.services)))
} else {
lines = append(lines, "No services discovered.")
}
if len(s.routes) > 0 {
lines = append(lines, fmt.Sprintf("Discovered %d routes", len(s.routes)))
} else {
lines = append(lines, "No routes discovered.")
}
if len(s.plugins) > 0 {
lines = append(lines, fmt.Sprintf("Discovered %d plugins", len(s.plugins)))
} else {
lines = append(lines, "No plugins discovered.")
}
for index, line := range lines {
lines[index] = fmt.Sprintf("- %s", line)
}
return strings.Join(lines, "\n")
}
func createHclSafeName(name string) string {
invalid := []string{"-", "/", " ", "."}
hclName := name
for _, c := range invalid {
hclName = strings.Replace(hclName, c, "_", -1)
}
return hclName
}
type resourceImport struct {
kongResourceType string // plugin, route, service, consumer
terraformResourceType string // kong_plugin, kong_route, kong_plugin_openid_connect
resourceName string // what's to the right of the terraformResourceType in the HCL
resourceId string
dryRun bool
configPath string // HCL config
}
func (s *kongState) hasResourceBeenImported(resource *resourceImport) bool {
resourceTypePluralized := resource.kongResourceType + "s"
if importedIds, ok := s.imports[resourceTypePluralized]; ok {
for _, id := range importedIds {
if id == resource.resourceId {
return true
}
}
}
return false
}
func (s *kongState) importResource(resourceImport *resourceImport) error {
if s.hasResourceBeenImported(resourceImport) {
return nil
}
terraformResourceName := fmt.Sprintf("%s.%s", resourceImport.terraformResourceType, createHclSafeName(resourceImport.resourceName))
if !resourceImport.dryRun {
// ex: terraform import -config=examples/import kong_service.service_to_import e86f981e-a580-4bd6-aef3-1324adfcc12c
cmd := exec.Command(
"terraform",
"import",
fmt.Sprintf("-config=%s", resourceImport.configPath),
terraformResourceName,
resourceImport.resourceId,
)
var stderr bytes.Buffer
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
fmt.Println(stderr.String())
return err
}
}
fmt.Println("Imported:", terraformResourceName)
return nil
}
func (s *kongState) importResources(cmd *importCommand) error {
if len(s.consumers) > 0 | fmt.Println("\nImporting services:")
for _, service := range s.services {
resource := &resourceImport{
kongResourceType: "service",
terraformResourceType: "kong_service",
resourceName: service.Name,
resourceId: service.Id,
dryRun: cmd.isDryRun,
configPath: cmd.tfConfigPath,
}
if err := s.importResource(resource); err != nil {
return err
} else {
s.imports["services"] = append(s.imports["services"], service.Id)
}
}
}
if len(s.routes) > 0 {
fmt.Println("\nImporting routes:")
for _, route := range s.routes {
resource := &resourceImport{
kongResourceType: "route",
terraformResourceType: "kong_route",
resourceName: getResourceNameForRoute(s, &route),
resourceId: route.Id,
dryRun: cmd.isDryRun,
configPath: cmd.tfConfigPath,
}
if err := s.importResource(resource); err != nil {
return err
} else {
s.imports["routes"] = append(s.imports["routes"], route.Id)
}
}
}
if len(s.plugins) > 0 {
fmt.Println("\nImporting plugins:")
for _, plugin := range s.plugins {
terraformResourceType := "kong_plugin"
if pluginResourceImplementation, ok := pluginsToResourceImplementations[plugin | {
fmt.Println("\nImporting consumers:")
for _, consumer := range s.consumers {
resource := &resourceImport{
kongResourceType: "consumer",
terraformResourceType: "kong_consumer",
resourceName: getResourceNameForConsumer(&consumer),
resourceId: consumer.Id,
dryRun: cmd.isDryRun,
configPath: cmd.tfConfigPath,
}
if err := s.importResource(resource); err != nil {
return err
} else {
s.imports["consumers"] = append(s.imports["consumers"], consumer.Id)
}
}
if len(s.services) > 0 { | conditional_block |
import.go | ry-run",
false,
"List the resources that will be imported, but do not actually import them.",
)
flags.StringVar(
&cmd.importFileName,
"state",
"import-state.json",
"Holds the current import state and any exclusions",
)
flags.StringVar(
&cmd.tfConfigPath,
"tf-config",
"",
"Path to Terraform config directory",
)
}
func (cmd *importCommand) Execute(_ context.Context, flags *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus {
fmt.Println("Importing resources from: " + cmd.adminApiUrl)
client, err := kong.NewKongClient(kong.KongConfig{
AdminApiUrl: cmd.adminApiUrl,
RbacToken: cmd.rbacToken,
})
if err != nil {
fmt.Printf("error initializing Kong client: %v\n", err)
return subcommands.ExitFailure
}
state := &kongState{
client: client,
}
if err := state.loadState(cmd.importFileName); err != nil {
fmt.Printf("error loading import state file %v\n", err)
return subcommands.ExitFailure
}
if err := state.discover(); err != nil {
fmt.Printf("error while discovering resources %v\n", err)
return subcommands.ExitFailure
}
fmt.Println("\nDiscovery:")
fmt.Println(state.discoveryReport())
if !cmd.isDryRun {
if err := state.importResources(cmd); err != nil {
fmt.Printf("error occurred while importing resources: %v\n", err)
err := state.finish(cmd.importFileName)
if err != nil {
fmt.Println("Additional error saving progress ", err)
}
return subcommands.ExitFailure
} else {
if err := state.finish(cmd.importFileName); err != nil {
fmt.Printf("Error occurred saving import file %v\n", err)
}
}
}
return subcommands.ExitSuccess
}
type kongState struct {
services []kong.KongService
routes []kong.KongRoute
plugins []kong.KongPlugin
consumers []kong.KongConsumer
imports map[string][]string // { services: [<uuid>], routes: [<uuid>,] }
client *kong.KongClient
}
func (s *kongState) loadState(fileName string) error {
if stateFile, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0755); err != nil {
return err
} else {
defer stateFile.Close()
s.imports = make(map[string][]string)
raw, err := ioutil.ReadAll(stateFile)
if err != nil {
return err
}
if len(raw) == 0 {
s.imports["services"] = make([]string, 0)
s.imports["routes"] = make([]string, 0)
s.imports["plugins"] = make([]string, 0)
s.imports["consumers"] = make([]string, 0)
} else if err := json.Unmarshal(raw, &s.imports); err != nil {
return err
}
}
return nil
}
func (s *kongState) discover() error {
if consumers, err := s.client.GetConsumers(); err != nil {
return err
} else {
s.consumers = consumers
}
if services, err := s.client.GetServices(); err != nil {
return err
} else {
s.services = services
}
if routes, err := s.client.GetRoutes(); err != nil {
return err
} else {
s.routes = routes
}
if plugins, err := s.client.GetPlugins(); err != nil {
return err
} else {
s.plugins = plugins
}
return nil
}
func (s *kongState) discoveryReport() string {
lines := make([]string, 0)
if len(s.consumers) > 0 {
lines = append(lines, fmt.Sprintf("Discovered %d consumers", len(s.consumers)))
} else {
lines = append(lines, "No consumers discovered.")
}
if len(s.services) > 0 {
lines = append(lines, fmt.Sprintf("Discovered %d services", len(s.services)))
} else {
lines = append(lines, "No services discovered.")
}
if len(s.routes) > 0 {
lines = append(lines, fmt.Sprintf("Discovered %d routes", len(s.routes)))
} else {
lines = append(lines, "No routes discovered.")
}
if len(s.plugins) > 0 {
lines = append(lines, fmt.Sprintf("Discovered %d plugins", len(s.plugins)))
} else {
lines = append(lines, "No plugins discovered.")
}
for index, line := range lines {
lines[index] = fmt.Sprintf("- %s", line)
}
return strings.Join(lines, "\n")
}
func createHclSafeName(name string) string {
invalid := []string{"-", "/", " ", "."}
hclName := name
for _, c := range invalid {
hclName = strings.Replace(hclName, c, "_", -1)
}
return hclName
}
type resourceImport struct {
kongResourceType string // plugin, route, service, consumer
terraformResourceType string // kong_plugin, kong_route, kong_plugin_openid_connect
resourceName string // what's to the right of the terraformResourceType in the HCL
resourceId string
dryRun bool
configPath string // HCL config
}
func (s *kongState) hasResourceBeenImported(resource *resourceImport) bool {
resourceTypePluralized := resource.kongResourceType + "s"
if importedIds, ok := s.imports[resourceTypePluralized]; ok {
for _, id := range importedIds {
if id == resource.resourceId {
return true
}
}
}
return false
}
func (s *kongState) | (resourceImport *resourceImport) error {
if s.hasResourceBeenImported(resourceImport) {
return nil
}
terraformResourceName := fmt.Sprintf("%s.%s", resourceImport.terraformResourceType, createHclSafeName(resourceImport.resourceName))
if !resourceImport.dryRun {
// ex: terraform import -config=examples/import kong_service.service_to_import e86f981e-a580-4bd6-aef3-1324adfcc12c
cmd := exec.Command(
"terraform",
"import",
fmt.Sprintf("-config=%s", resourceImport.configPath),
terraformResourceName,
resourceImport.resourceId,
)
var stderr bytes.Buffer
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
fmt.Println(stderr.String())
return err
}
}
fmt.Println("Imported:", terraformResourceName)
return nil
}
func (s *kongState) importResources(cmd *importCommand) error {
if len(s.consumers) > 0 {
fmt.Println("\nImporting consumers:")
for _, consumer := range s.consumers {
resource := &resourceImport{
kongResourceType: "consumer",
terraformResourceType: "kong_consumer",
resourceName: getResourceNameForConsumer(&consumer),
resourceId: consumer.Id,
dryRun: cmd.isDryRun,
configPath: cmd.tfConfigPath,
}
if err := s.importResource(resource); err != nil {
return err
} else {
s.imports["consumers"] = append(s.imports["consumers"], consumer.Id)
}
}
if len(s.services) > 0 {
fmt.Println("\nImporting services:")
for _, service := range s.services {
resource := &resourceImport{
kongResourceType: "service",
terraformResourceType: "kong_service",
resourceName: service.Name,
resourceId: service.Id,
dryRun: cmd.isDryRun,
configPath: cmd.tfConfigPath,
}
if err := s.importResource(resource); err != nil {
return err
} else {
s.imports["services"] = append(s.imports["services"], service.Id)
}
}
}
if len(s.routes) > 0 {
fmt.Println("\nImporting routes:")
for _, route := range s.routes {
resource := &resourceImport{
kongResourceType: "route",
terraformResourceType: "kong_route",
resourceName: getResourceNameForRoute(s, &route),
resourceId: route.Id,
dryRun: cmd.isDryRun,
configPath: cmd.tfConfigPath,
}
if err := s.importResource(resource); err != nil {
return err
} else {
s.imports["routes"] = append(s.imports["routes"], route.Id)
}
}
}
if len(s.plugins) > 0 {
fmt.Println("\nImporting plugins:")
for _, plugin := range s.plugins {
terraformResourceType := "kong_plugin"
if pluginResourceImplementation, ok := pluginsToResourceImplementations[plugin | importResource | identifier_name |
import.go | ry-run",
false,
"List the resources that will be imported, but do not actually import them.",
)
flags.StringVar(
&cmd.importFileName,
"state",
"import-state.json",
"Holds the current import state and any exclusions",
)
flags.StringVar(
&cmd.tfConfigPath,
"tf-config",
"",
"Path to Terraform config directory",
)
}
func (cmd *importCommand) Execute(_ context.Context, flags *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus {
fmt.Println("Importing resources from: " + cmd.adminApiUrl)
client, err := kong.NewKongClient(kong.KongConfig{
AdminApiUrl: cmd.adminApiUrl,
RbacToken: cmd.rbacToken,
})
if err != nil {
fmt.Printf("error initializing Kong client: %v\n", err)
return subcommands.ExitFailure
}
state := &kongState{
client: client,
}
if err := state.loadState(cmd.importFileName); err != nil {
fmt.Printf("error loading import state file %v\n", err)
return subcommands.ExitFailure
}
if err := state.discover(); err != nil {
fmt.Printf("error while discovering resources %v\n", err)
return subcommands.ExitFailure
}
fmt.Println("\nDiscovery:")
fmt.Println(state.discoveryReport())
if !cmd.isDryRun {
if err := state.importResources(cmd); err != nil {
fmt.Printf("error occurred while importing resources: %v\n", err)
err := state.finish(cmd.importFileName)
if err != nil {
fmt.Println("Additional error saving progress ", err)
}
return subcommands.ExitFailure
} else {
if err := state.finish(cmd.importFileName); err != nil {
fmt.Printf("Error occurred saving import file %v\n", err)
}
}
}
return subcommands.ExitSuccess
}
type kongState struct {
services []kong.KongService
routes []kong.KongRoute
plugins []kong.KongPlugin
consumers []kong.KongConsumer
imports map[string][]string // { services: [<uuid>], routes: [<uuid>,] }
client *kong.KongClient
}
func (s *kongState) loadState(fileName string) error {
if stateFile, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0755); err != nil {
return err
} else {
defer stateFile.Close()
s.imports = make(map[string][]string)
raw, err := ioutil.ReadAll(stateFile)
if err != nil {
return err
}
if len(raw) == 0 {
s.imports["services"] = make([]string, 0)
s.imports["routes"] = make([]string, 0)
s.imports["plugins"] = make([]string, 0)
s.imports["consumers"] = make([]string, 0)
} else if err := json.Unmarshal(raw, &s.imports); err != nil {
return err
}
}
return nil
}
func (s *kongState) discover() error {
if consumers, err := s.client.GetConsumers(); err != nil {
return err
} else {
s.consumers = consumers
}
if services, err := s.client.GetServices(); err != nil {
return err
} else {
s.services = services
}
if routes, err := s.client.GetRoutes(); err != nil {
return err
} else {
s.routes = routes
}
if plugins, err := s.client.GetPlugins(); err != nil {
return err
} else {
s.plugins = plugins
}
return nil
}
func (s *kongState) discoveryReport() string {
lines := make([]string, 0)
if len(s.consumers) > 0 {
lines = append(lines, fmt.Sprintf("Discovered %d consumers", len(s.consumers)))
} else {
lines = append(lines, "No consumers discovered.")
}
if len(s.services) > 0 {
lines = append(lines, fmt.Sprintf("Discovered %d services", len(s.services)))
} else {
lines = append(lines, "No services discovered.")
}
if len(s.routes) > 0 {
lines = append(lines, fmt.Sprintf("Discovered %d routes", len(s.routes)))
} else {
lines = append(lines, "No routes discovered.")
}
if len(s.plugins) > 0 {
lines = append(lines, fmt.Sprintf("Discovered %d plugins", len(s.plugins)))
} else {
lines = append(lines, "No plugins discovered.")
}
for index, line := range lines {
lines[index] = fmt.Sprintf("- %s", line)
}
return strings.Join(lines, "\n")
}
func createHclSafeName(name string) string {
invalid := []string{"-", "/", " ", "."}
hclName := name
for _, c := range invalid {
hclName = strings.Replace(hclName, c, "_", -1)
}
return hclName
}
type resourceImport struct {
kongResourceType string // plugin, route, service, consumer
terraformResourceType string // kong_plugin, kong_route, kong_plugin_openid_connect
resourceName string // what's to the right of the terraformResourceType in the HCL
resourceId string
dryRun bool
configPath string // HCL config
}
func (s *kongState) hasResourceBeenImported(resource *resourceImport) bool {
resourceTypePluralized := resource.kongResourceType + "s"
if importedIds, ok := s.imports[resourceTypePluralized]; ok {
for _, id := range importedIds {
if id == resource.resourceId {
return true
}
}
}
return false
}
func (s *kongState) importResource(resourceImport *resourceImport) error {
if s.hasResourceBeenImported(resourceImport) {
return nil
}
terraformResourceName := fmt.Sprintf("%s.%s", resourceImport.terraformResourceType, createHclSafeName(resourceImport.resourceName))
if !resourceImport.dryRun {
// ex: terraform import -config=examples/import kong_service.service_to_import e86f981e-a580-4bd6-aef3-1324adfcc12c
cmd := exec.Command(
"terraform",
"import",
fmt.Sprintf("-config=%s", resourceImport.configPath),
terraformResourceName,
resourceImport.resourceId,
)
var stderr bytes.Buffer
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
fmt.Println(stderr.String())
return err
}
}
fmt.Println("Imported:", terraformResourceName) | if len(s.consumers) > 0 {
fmt.Println("\nImporting consumers:")
for _, consumer := range s.consumers {
resource := &resourceImport{
kongResourceType: "consumer",
terraformResourceType: "kong_consumer",
resourceName: getResourceNameForConsumer(&consumer),
resourceId: consumer.Id,
dryRun: cmd.isDryRun,
configPath: cmd.tfConfigPath,
}
if err := s.importResource(resource); err != nil {
return err
} else {
s.imports["consumers"] = append(s.imports["consumers"], consumer.Id)
}
}
if len(s.services) > 0 {
fmt.Println("\nImporting services:")
for _, service := range s.services {
resource := &resourceImport{
kongResourceType: "service",
terraformResourceType: "kong_service",
resourceName: service.Name,
resourceId: service.Id,
dryRun: cmd.isDryRun,
configPath: cmd.tfConfigPath,
}
if err := s.importResource(resource); err != nil {
return err
} else {
s.imports["services"] = append(s.imports["services"], service.Id)
}
}
}
if len(s.routes) > 0 {
fmt.Println("\nImporting routes:")
for _, route := range s.routes {
resource := &resourceImport{
kongResourceType: "route",
terraformResourceType: "kong_route",
resourceName: getResourceNameForRoute(s, &route),
resourceId: route.Id,
dryRun: cmd.isDryRun,
configPath: cmd.tfConfigPath,
}
if err := s.importResource(resource); err != nil {
return err
} else {
s.imports["routes"] = append(s.imports["routes"], route.Id)
}
}
}
if len(s.plugins) > 0 {
fmt.Println("\nImporting plugins:")
for _, plugin := range s.plugins {
terraformResourceType := "kong_plugin"
if pluginResourceImplementation, ok := pluginsToResourceImplementations[plugin |
return nil
}
func (s *kongState) importResources(cmd *importCommand) error { | random_line_split |
mtcnn.py | = self.conv4_2(x)
return b, a
class RNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 28, kernel_size=3)
self.prelu1 = nn.PReLU(28)
self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv2 = nn.Conv2d(28, 48, kernel_size=3)
self.prelu2 = nn.PReLU(48)
self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv3 = nn.Conv2d(48, 64, kernel_size=2)
self.prelu3 = nn.PReLU(64)
self.dense4 = nn.Linear(576, 128)
self.prelu4 = nn.PReLU(128)
self.dense5_1 = nn.Linear(128, 2)
self.softmax5_1 = nn.Softmax(dim=1)
self.dense5_2 = nn.Linear(128, 4)
def forward(self, x):
x = self.conv1(x)
x = self.prelu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.prelu2(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.prelu3(x)
x = x.permute(0, 3, 2, 1).contiguous()
x = self.dense4(x.view(x.shape[0], -1))
x = self.prelu4(x)
a = self.dense5_1(x)
a = self.softmax5_1(a)
b = self.dense5_2(x)
return b, a
class ONet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3)
self.prelu1 = nn.PReLU(32)
self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
self.prelu2 = nn.PReLU(64)
self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3)
self.prelu3 = nn.PReLU(64)
self.pool3 = nn.MaxPool2d(2, 2, ceil_mode=True)
self.conv4 = nn.Conv2d(64, 128, kernel_size=2)
self.prelu4 = nn.PReLU(128)
self.dense5 = nn.Linear(1152, 256)
self.prelu5 = nn.PReLU(256)
self.dense6_1 = nn.Linear(256, 2)
self.softmax6_1 = nn.Softmax(dim=1)
self.dense6_2 = nn.Linear(256, 4)
self.dense6_3 = nn.Linear(256, 10)
def forward(self, x):
x = self.conv1(x)
x = self.prelu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.prelu2(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.prelu3(x)
x = self.pool3(x)
x = self.conv4(x)
x = self.prelu4(x)
x = x.permute(0, 3, 2, 1).contiguous()
x = self.dense5(x.view(x.shape[0], -1))
x = self.prelu5(x)
a = self.dense6_1(x)
a = self.softmax6_1(a)
b = self.dense6_2(x)
c = self.dense6_3(x)
return b, c, a
def detect_face(imgs, minsize, pnet, rnet, onet, threshold, factor, device):
if isinstance(imgs, (np.ndarray, torch.Tensor)):
|
else:
if not isinstance(imgs, (list, tuple)):
imgs = [imgs]
if any(img.size != imgs[0].size for img in imgs):
raise Exception("MTCNN batch processing only compatible with equal-dimension images.")
imgs = np.stack([np.uint8(img) for img in imgs])
imgs = torch.as_tensor(imgs, device=device)
model_dtype = next(pnet.parameters()).dtype
imgs = imgs.permute(0, 3, 1, 2).type(model_dtype)
batch_size = len(imgs)
h, w = imgs.shape[2:4]
m = 12.0 / minsize
minl = min(h, w)
minl = minl * m
# Create scale pyramid
scale_i = m
scales = []
while minl >= 12:
scales.append(scale_i)
scale_i = scale_i * factor
minl = minl * factor
# First stage
boxes = []
image_inds = []
all_inds = []
all_i = 0
for scale in scales:
im_data = imresample(imgs, (int(h * scale + 1), int(w * scale + 1)))
im_data = (im_data - 127.5) * 0.0078125
reg, probs = pnet(im_data)
empty_cache(device)
boxes_scale, image_inds_scale = generateBoundingBox(reg, probs[:, 1], scale, threshold[0])
boxes.append(boxes_scale)
image_inds.append(image_inds_scale)
all_inds.append(all_i + image_inds_scale)
all_i += batch_size
boxes = torch.cat(boxes, dim=0)
image_inds = torch.cat(image_inds, dim=0).cpu()
all_inds = torch.cat(all_inds, dim=0)
# NMS within each scale + image
pick = batched_nms(boxes[:, :4], boxes[:, 4], all_inds, 0.5)
boxes, image_inds = boxes[pick], image_inds[pick]
# NMS within each image
pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)
boxes, image_inds = boxes[pick], image_inds[pick]
regw = boxes[:, 2] - boxes[:, 0]
regh = boxes[:, 3] - boxes[:, 1]
qq1 = boxes[:, 0] + boxes[:, 5] * regw
qq2 = boxes[:, 1] + boxes[:, 6] * regh
qq3 = boxes[:, 2] + boxes[:, 7] * regw
qq4 = boxes[:, 3] + boxes[:, 8] * regh
boxes = torch.stack([qq1, qq2, qq3, qq4, boxes[:, 4]]).permute(1, 0)
boxes = rerec(boxes)
y, ey, x, ex = pad(boxes, w, h)
# Second stage
if len(boxes) > 0:
im_data = []
for k in range(len(y)):
if ey[k] > (y[k] - 1) and ex[k] > (x[k] - 1):
img_k = imgs[image_inds[k], :, (y[k] - 1):ey[k], (x[k] - 1):ex[k]].unsqueeze(0)
im_data.append(imresample(img_k, (24, 24)))
im_data = torch.cat(im_data, dim=0)
im_data = (im_data - 127.5) * 0.0078125
out = []
for batch in im_data.split(2000):
out += [rnet(batch)]
z = list(zip(*out))
out = (torch.cat(z[0]), torch.cat(z[1]))
empty_cache(device)
out0 = out[0].permute(1, 0)
out1 = out[1].permute(1, 0)
score = out1[1, :]
ipass = score > threshold[1]
boxes = torch.cat((boxes[ipass, :4], score[ipass].unsqueeze(1)), dim=1)
image_inds = image_inds[ipass]
mv = out0[:, ipass].permute(1, 0)
# NMS within each image
pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)
boxes, image_inds, mv = boxes[pick], image_inds[pick], mv[pick]
boxes = bbreg(boxes, mv)
boxes = rerec(boxes)
# Third stage
points = torch.zeros(0 | imgs = torch.as_tensor(imgs, device=device)
if len(imgs.shape) == 3:
imgs = imgs.unsqueeze(0) | conditional_block |
mtcnn.py | [:, 8] * regh
boxes = torch.stack([qq1, qq2, qq3, qq4, boxes[:, 4]]).permute(1, 0)
boxes = rerec(boxes)
y, ey, x, ex = pad(boxes, w, h)
# Second stage
if len(boxes) > 0:
im_data = []
for k in range(len(y)):
if ey[k] > (y[k] - 1) and ex[k] > (x[k] - 1):
img_k = imgs[image_inds[k], :, (y[k] - 1):ey[k], (x[k] - 1):ex[k]].unsqueeze(0)
im_data.append(imresample(img_k, (24, 24)))
im_data = torch.cat(im_data, dim=0)
im_data = (im_data - 127.5) * 0.0078125
out = []
for batch in im_data.split(2000):
out += [rnet(batch)]
z = list(zip(*out))
out = (torch.cat(z[0]), torch.cat(z[1]))
empty_cache(device)
out0 = out[0].permute(1, 0)
out1 = out[1].permute(1, 0)
score = out1[1, :]
ipass = score > threshold[1]
boxes = torch.cat((boxes[ipass, :4], score[ipass].unsqueeze(1)), dim=1)
image_inds = image_inds[ipass]
mv = out0[:, ipass].permute(1, 0)
# NMS within each image
pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)
boxes, image_inds, mv = boxes[pick], image_inds[pick], mv[pick]
boxes = bbreg(boxes, mv)
boxes = rerec(boxes)
# Third stage
points = torch.zeros(0, 5, 2, device=device)
if len(boxes) > 0:
y, ey, x, ex = pad(boxes, w, h)
im_data = []
for k in range(len(y)):
if ey[k] > (y[k] - 1) and ex[k] > (x[k] - 1):
img_k = imgs[image_inds[k], :, (y[k] - 1):ey[k], (x[k] - 1):ex[k]].unsqueeze(0)
im_data.append(imresample(img_k, (48, 48)))
im_data = torch.cat(im_data, dim=0)
im_data = (im_data - 127.5) * 0.0078125
out = []
for batch in im_data.split(500):
out += [onet(batch)]
z = list(zip(*out))
out = (torch.cat(z[0]), torch.cat(z[1]), torch.cat(z[2]))
empty_cache(device)
out0 = out[0].permute(1, 0)
out1 = out[1].permute(1, 0)
out2 = out[2].permute(1, 0)
score = out2[1, :]
points = out1
ipass = score > threshold[2]
points = points[:, ipass]
boxes = torch.cat((boxes[ipass, :4], score[ipass].unsqueeze(1)), dim=1)
image_inds = image_inds[ipass]
mv = out0[:, ipass].permute(1, 0)
w_i = boxes[:, 2] - boxes[:, 0] + 1
h_i = boxes[:, 3] - boxes[:, 1] + 1
points_x = w_i.repeat(5, 1) * points[:5, :] + boxes[:, 0].repeat(5, 1) - 1
points_y = h_i.repeat(5, 1) * points[5:10, :] + boxes[:, 1].repeat(5, 1) - 1
points = torch.stack((points_x, points_y)).permute(2, 1, 0)
boxes = bbreg(boxes, mv)
# NMS within each image using "Min" strategy
# pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)
pick = batched_nms_numpy(boxes[:, :4], boxes[:, 4], image_inds, 0.7, 'Min')
boxes, image_inds, points = boxes[pick], image_inds[pick], points[pick]
boxes = boxes.cpu().numpy()
points = points.cpu().numpy()
batch_boxes = []
batch_points = []
for b_i in range(batch_size):
b_i_inds = np.where(image_inds == b_i)
batch_boxes.append(boxes[b_i_inds].copy())
batch_points.append(points[b_i_inds].copy())
batch_boxes, batch_points = np.array(batch_boxes), np.array(batch_points)
empty_cache(device)
return batch_boxes, batch_points
def bbreg(boundingbox, reg):
if reg.shape[1] == 1:
reg = torch.reshape(reg, (reg.shape[2], reg.shape[3]))
w = boundingbox[:, 2] - boundingbox[:, 0] + 1
h = boundingbox[:, 3] - boundingbox[:, 1] + 1
b1 = boundingbox[:, 0] + reg[:, 0] * w
b2 = boundingbox[:, 1] + reg[:, 1] * h
b3 = boundingbox[:, 2] + reg[:, 2] * w
b4 = boundingbox[:, 3] + reg[:, 3] * h
boundingbox[:, :4] = torch.stack([b1, b2, b3, b4]).permute(1, 0)
return boundingbox
def generateBoundingBox(reg, probs, scale, thresh):
stride = 2
cellsize = 12
reg = reg.permute(1, 0, 2, 3)
mask = probs >= thresh
mask_inds = mask.nonzero(as_tuple=False)
image_inds = mask_inds[:, 0]
score = probs[mask]
reg = reg[:, mask].permute(1, 0)
bb = mask_inds[:, 1:].type(reg.dtype).flip(1)
q1 = ((stride * bb + 1) / scale).floor()
q2 = ((stride * bb + cellsize - 1 + 1) / scale).floor()
boundingbox = torch.cat([q1, q2, score.unsqueeze(1), reg], dim=1)
return boundingbox, image_inds
def nms_numpy(boxes, scores, threshold, method):
if boxes.size == 0:
return np.empty((0, 3))
x1 = boxes[:, 0].copy()
y1 = boxes[:, 1].copy()
x2 = boxes[:, 2].copy()
y2 = boxes[:, 3].copy()
s = scores
area = (x2 - x1 + 1) * (y2 - y1 + 1)
I = np.argsort(s)
pick = np.zeros_like(s, dtype=np.int16)
counter = 0
while I.size > 0:
i = I[-1]
pick[counter] = i
counter += 1
idx = I[0:-1]
xx1 = np.maximum(x1[i], x1[idx]).copy()
yy1 = np.maximum(y1[i], y1[idx]).copy()
xx2 = np.minimum(x2[i], x2[idx]).copy()
yy2 = np.minimum(y2[i], y2[idx]).copy()
w = np.maximum(0.0, xx2 - xx1 + 1).copy()
h = np.maximum(0.0, yy2 - yy1 + 1).copy()
inter = w * h
if method == "Min":
o = inter / np.minimum(area[i], area[idx])
else:
o = inter / (area[i] + area[idx] - inter)
I = I[np.where(o <= threshold)]
pick = pick[:counter].copy()
return pick
def batched_nms_numpy(boxes, scores, idxs, threshold, method):
device = boxes.device
if boxes.numel() == 0:
return torch.empty((0,), dtype=torch.int64, device=device)
# strategy: in order to perform NMS independently per class.
# we add an offset to all the boxes. The offset is dependent
# only on the class idx, and is large enough so that boxes
# from different classes do not overlap
max_coordinate = boxes.max()
offsets = idxs.to(boxes) * (max_coordinate + 1)
boxes_for_nms = boxes + offsets[:, None]
boxes_for_nms = boxes_for_nms.cpu().numpy()
scores = scores.cpu().numpy()
keep = nms_numpy(boxes_for_nms, scores, threshold, method)
return torch.as_tensor(keep, dtype=torch.long, device=device)
def | pad | identifier_name |
|
mtcnn.py | = self.conv4_2(x)
return b, a
class RNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 28, kernel_size=3)
self.prelu1 = nn.PReLU(28)
self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv2 = nn.Conv2d(28, 48, kernel_size=3)
self.prelu2 = nn.PReLU(48)
self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv3 = nn.Conv2d(48, 64, kernel_size=2)
self.prelu3 = nn.PReLU(64)
self.dense4 = nn.Linear(576, 128)
self.prelu4 = nn.PReLU(128)
self.dense5_1 = nn.Linear(128, 2)
self.softmax5_1 = nn.Softmax(dim=1)
self.dense5_2 = nn.Linear(128, 4)
def forward(self, x):
x = self.conv1(x)
x = self.prelu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.prelu2(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.prelu3(x)
x = x.permute(0, 3, 2, 1).contiguous()
x = self.dense4(x.view(x.shape[0], -1))
x = self.prelu4(x)
a = self.dense5_1(x)
a = self.softmax5_1(a)
b = self.dense5_2(x)
return b, a
class ONet(nn.Module):
|
def forward(self, x):
x = self.conv1(x)
x = self.prelu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.prelu2(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.prelu3(x)
x = self.pool3(x)
x = self.conv4(x)
x = self.prelu4(x)
x = x.permute(0, 3, 2, 1).contiguous()
x = self.dense5(x.view(x.shape[0], -1))
x = self.prelu5(x)
a = self.dense6_1(x)
a = self.softmax6_1(a)
b = self.dense6_2(x)
c = self.dense6_3(x)
return b, c, a
def detect_face(imgs, minsize, pnet, rnet, onet, threshold, factor, device):
if isinstance(imgs, (np.ndarray, torch.Tensor)):
imgs = torch.as_tensor(imgs, device=device)
if len(imgs.shape) == 3:
imgs = imgs.unsqueeze(0)
else:
if not isinstance(imgs, (list, tuple)):
imgs = [imgs]
if any(img.size != imgs[0].size for img in imgs):
raise Exception("MTCNN batch processing only compatible with equal-dimension images.")
imgs = np.stack([np.uint8(img) for img in imgs])
imgs = torch.as_tensor(imgs, device=device)
model_dtype = next(pnet.parameters()).dtype
imgs = imgs.permute(0, 3, 1, 2).type(model_dtype)
batch_size = len(imgs)
h, w = imgs.shape[2:4]
m = 12.0 / minsize
minl = min(h, w)
minl = minl * m
# Create scale pyramid
scale_i = m
scales = []
while minl >= 12:
scales.append(scale_i)
scale_i = scale_i * factor
minl = minl * factor
# First stage
boxes = []
image_inds = []
all_inds = []
all_i = 0
for scale in scales:
im_data = imresample(imgs, (int(h * scale + 1), int(w * scale + 1)))
im_data = (im_data - 127.5) * 0.0078125
reg, probs = pnet(im_data)
empty_cache(device)
boxes_scale, image_inds_scale = generateBoundingBox(reg, probs[:, 1], scale, threshold[0])
boxes.append(boxes_scale)
image_inds.append(image_inds_scale)
all_inds.append(all_i + image_inds_scale)
all_i += batch_size
boxes = torch.cat(boxes, dim=0)
image_inds = torch.cat(image_inds, dim=0).cpu()
all_inds = torch.cat(all_inds, dim=0)
# NMS within each scale + image
pick = batched_nms(boxes[:, :4], boxes[:, 4], all_inds, 0.5)
boxes, image_inds = boxes[pick], image_inds[pick]
# NMS within each image
pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)
boxes, image_inds = boxes[pick], image_inds[pick]
regw = boxes[:, 2] - boxes[:, 0]
regh = boxes[:, 3] - boxes[:, 1]
qq1 = boxes[:, 0] + boxes[:, 5] * regw
qq2 = boxes[:, 1] + boxes[:, 6] * regh
qq3 = boxes[:, 2] + boxes[:, 7] * regw
qq4 = boxes[:, 3] + boxes[:, 8] * regh
boxes = torch.stack([qq1, qq2, qq3, qq4, boxes[:, 4]]).permute(1, 0)
boxes = rerec(boxes)
y, ey, x, ex = pad(boxes, w, h)
# Second stage
if len(boxes) > 0:
im_data = []
for k in range(len(y)):
if ey[k] > (y[k] - 1) and ex[k] > (x[k] - 1):
img_k = imgs[image_inds[k], :, (y[k] - 1):ey[k], (x[k] - 1):ex[k]].unsqueeze(0)
im_data.append(imresample(img_k, (24, 24)))
im_data = torch.cat(im_data, dim=0)
im_data = (im_data - 127.5) * 0.0078125
out = []
for batch in im_data.split(2000):
out += [rnet(batch)]
z = list(zip(*out))
out = (torch.cat(z[0]), torch.cat(z[1]))
empty_cache(device)
out0 = out[0].permute(1, 0)
out1 = out[1].permute(1, 0)
score = out1[1, :]
ipass = score > threshold[1]
boxes = torch.cat((boxes[ipass, :4], score[ipass].unsqueeze(1)), dim=1)
image_inds = image_inds[ipass]
mv = out0[:, ipass].permute(1, 0)
# NMS within each image
pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)
boxes, image_inds, mv = boxes[pick], image_inds[pick], mv[pick]
boxes = bbreg(boxes, mv)
boxes = rerec(boxes)
# Third stage
points = torch.zeros | def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3)
self.prelu1 = nn.PReLU(32)
self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
self.prelu2 = nn.PReLU(64)
self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3)
self.prelu3 = nn.PReLU(64)
self.pool3 = nn.MaxPool2d(2, 2, ceil_mode=True)
self.conv4 = nn.Conv2d(64, 128, kernel_size=2)
self.prelu4 = nn.PReLU(128)
self.dense5 = nn.Linear(1152, 256)
self.prelu5 = nn.PReLU(256)
self.dense6_1 = nn.Linear(256, 2)
self.softmax6_1 = nn.Softmax(dim=1)
self.dense6_2 = nn.Linear(256, 4)
self.dense6_3 = nn.Linear(256, 10) | identifier_body |
mtcnn.py | = self.conv4_2(x)
return b, a
class RNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 28, kernel_size=3)
self.prelu1 = nn.PReLU(28)
self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv2 = nn.Conv2d(28, 48, kernel_size=3)
self.prelu2 = nn.PReLU(48)
self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv3 = nn.Conv2d(48, 64, kernel_size=2)
self.prelu3 = nn.PReLU(64)
self.dense4 = nn.Linear(576, 128)
self.prelu4 = nn.PReLU(128)
self.dense5_1 = nn.Linear(128, 2)
self.softmax5_1 = nn.Softmax(dim=1)
self.dense5_2 = nn.Linear(128, 4)
def forward(self, x):
x = self.conv1(x)
x = self.prelu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.prelu2(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.prelu3(x)
x = x.permute(0, 3, 2, 1).contiguous()
x = self.dense4(x.view(x.shape[0], -1))
x = self.prelu4(x) |
class ONet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3)
self.prelu1 = nn.PReLU(32)
self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
self.prelu2 = nn.PReLU(64)
self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3)
self.prelu3 = nn.PReLU(64)
self.pool3 = nn.MaxPool2d(2, 2, ceil_mode=True)
self.conv4 = nn.Conv2d(64, 128, kernel_size=2)
self.prelu4 = nn.PReLU(128)
self.dense5 = nn.Linear(1152, 256)
self.prelu5 = nn.PReLU(256)
self.dense6_1 = nn.Linear(256, 2)
self.softmax6_1 = nn.Softmax(dim=1)
self.dense6_2 = nn.Linear(256, 4)
self.dense6_3 = nn.Linear(256, 10)
def forward(self, x):
x = self.conv1(x)
x = self.prelu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.prelu2(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.prelu3(x)
x = self.pool3(x)
x = self.conv4(x)
x = self.prelu4(x)
x = x.permute(0, 3, 2, 1).contiguous()
x = self.dense5(x.view(x.shape[0], -1))
x = self.prelu5(x)
a = self.dense6_1(x)
a = self.softmax6_1(a)
b = self.dense6_2(x)
c = self.dense6_3(x)
return b, c, a
def detect_face(imgs, minsize, pnet, rnet, onet, threshold, factor, device):
if isinstance(imgs, (np.ndarray, torch.Tensor)):
imgs = torch.as_tensor(imgs, device=device)
if len(imgs.shape) == 3:
imgs = imgs.unsqueeze(0)
else:
if not isinstance(imgs, (list, tuple)):
imgs = [imgs]
if any(img.size != imgs[0].size for img in imgs):
raise Exception("MTCNN batch processing only compatible with equal-dimension images.")
imgs = np.stack([np.uint8(img) for img in imgs])
imgs = torch.as_tensor(imgs, device=device)
model_dtype = next(pnet.parameters()).dtype
imgs = imgs.permute(0, 3, 1, 2).type(model_dtype)
batch_size = len(imgs)
h, w = imgs.shape[2:4]
m = 12.0 / minsize
minl = min(h, w)
minl = minl * m
# Create scale pyramid
scale_i = m
scales = []
while minl >= 12:
scales.append(scale_i)
scale_i = scale_i * factor
minl = minl * factor
# First stage
boxes = []
image_inds = []
all_inds = []
all_i = 0
for scale in scales:
im_data = imresample(imgs, (int(h * scale + 1), int(w * scale + 1)))
im_data = (im_data - 127.5) * 0.0078125
reg, probs = pnet(im_data)
empty_cache(device)
boxes_scale, image_inds_scale = generateBoundingBox(reg, probs[:, 1], scale, threshold[0])
boxes.append(boxes_scale)
image_inds.append(image_inds_scale)
all_inds.append(all_i + image_inds_scale)
all_i += batch_size
boxes = torch.cat(boxes, dim=0)
image_inds = torch.cat(image_inds, dim=0).cpu()
all_inds = torch.cat(all_inds, dim=0)
# NMS within each scale + image
pick = batched_nms(boxes[:, :4], boxes[:, 4], all_inds, 0.5)
boxes, image_inds = boxes[pick], image_inds[pick]
# NMS within each image
pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)
boxes, image_inds = boxes[pick], image_inds[pick]
regw = boxes[:, 2] - boxes[:, 0]
regh = boxes[:, 3] - boxes[:, 1]
qq1 = boxes[:, 0] + boxes[:, 5] * regw
qq2 = boxes[:, 1] + boxes[:, 6] * regh
qq3 = boxes[:, 2] + boxes[:, 7] * regw
qq4 = boxes[:, 3] + boxes[:, 8] * regh
boxes = torch.stack([qq1, qq2, qq3, qq4, boxes[:, 4]]).permute(1, 0)
boxes = rerec(boxes)
y, ey, x, ex = pad(boxes, w, h)
# Second stage
if len(boxes) > 0:
im_data = []
for k in range(len(y)):
if ey[k] > (y[k] - 1) and ex[k] > (x[k] - 1):
img_k = imgs[image_inds[k], :, (y[k] - 1):ey[k], (x[k] - 1):ex[k]].unsqueeze(0)
im_data.append(imresample(img_k, (24, 24)))
im_data = torch.cat(im_data, dim=0)
im_data = (im_data - 127.5) * 0.0078125
out = []
for batch in im_data.split(2000):
out += [rnet(batch)]
z = list(zip(*out))
out = (torch.cat(z[0]), torch.cat(z[1]))
empty_cache(device)
out0 = out[0].permute(1, 0)
out1 = out[1].permute(1, 0)
score = out1[1, :]
ipass = score > threshold[1]
boxes = torch.cat((boxes[ipass, :4], score[ipass].unsqueeze(1)), dim=1)
image_inds = image_inds[ipass]
mv = out0[:, ipass].permute(1, 0)
# NMS within each image
pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)
boxes, image_inds, mv = boxes[pick], image_inds[pick], mv[pick]
boxes = bbreg(boxes, mv)
boxes = rerec(boxes)
# Third stage
points = torch.zeros(0 | a = self.dense5_1(x)
a = self.softmax5_1(a)
b = self.dense5_2(x)
return b, a | random_line_split |
lambdaFunction.go | Pull != nil && int64(now.Sub(*f.lastPull)) < cacheNs {
return nil
}
// is there new code?
rtType, codeDir, err := f.lmgr.HandlerPuller.Pull(f.name)
if err != nil {
return err
}
if codeDir == f.codeDir {
return nil
}
f.rtType = rtType
defer func() {
if err != nil {
if err := os.RemoveAll(codeDir); err != nil {
log.Printf("could not cleanup %s after failed pull\n", codeDir)
}
if rtType == common.RT_PYTHON {
// we dirty this dir (e.g., by setting up
// symlinks to packages, so we want the
// HandlerPuller to give us a new one next
// time, even if the code hasn't changed
f.lmgr.HandlerPuller.Reset(f.name)
}
}
}()
if rtType == common.RT_PYTHON {
// inspect new code for dependencies; if we can install
// everything necessary, start using new code
meta, err := parseMeta(codeDir)
if err != nil {
return err
}
// make sure all specified dependencies are installed
// (but don't recursively find others)
for _, pkg := range meta.Installs {
if _, err := f.lmgr.PackagePuller.GetPkg(pkg); err != nil {
return err
}
}
f.lmgr.DepTracer.TraceFunction(codeDir, meta.Installs)
f.meta = meta
} else if rtType == common.RT_NATIVE {
log.Printf("Got native function")
}
f.codeDir = codeDir
f.lastPull = &now
return nil
}
// this Task receives lambda requests, fetches new lambda code as
// needed, and dispatches to a set of lambda instances. Task also
// monitors outstanding requests, and scales the number of instances
// up or down as needed.
//
// communication for a given request is as follows (each of the four
// transfers are commented within the function):
//
// client -> function -> instance -> function -> client
//
// each of the 4 handoffs above is over a chan. In order, those chans are:
// 1. LambdaFunc.funcChan
// 2. LambdaFunc.instChan
// 3. LambdaFunc.doneChan
// 4. Invocation.done
//
// If either LambdaFunc.funcChan or LambdaFunc.instChan is full, we
// respond to the client with a backoff message: StatusTooManyRequests
func (f *LambdaFunc) Task() {
f.printf("debug: LambdaFunc.Task() runs on goroutine %d", common.GetGoroutineID())
// we want to perform various cleanup actions, such as killing
// instances and deleting old code. We want to do these
// asynchronously, but in order. Thus, we use a chan to get
// FIFO behavior and a single cleanup task to get async.
//
// two types can be sent to this chan:
//
// 1. string: this is a path to be deleted
//
// 2. chan: this is a signal chan that corresponds to
// previously initiated cleanup work. We block until we
// receive the complete signal, before proceeding to
// subsequent cleanup tasks in the FIFO.
cleanupChan := make(chan any, 32)
cleanupTaskDone := make(chan bool)
go func() {
for {
msg, ok := <-cleanupChan
if !ok {
cleanupTaskDone <- true
return
}
switch op := msg.(type) {
case string:
if err := os.RemoveAll(op); err != nil {
f.printf("Async code cleanup could not delete %s, even after all instances using it killed: %v", op, err)
}
case chan bool:
<-op
}
}
}()
// stats for autoscaling
outstandingReqs := 0
execMs := common.NewRollingAvg(10)
var lastScaling *time.Time = nil
timeout := time.NewTimer(0)
for {
select {
case <-timeout.C:
if f.codeDir == "" {
continue
}
case req := <-f.funcChan:
// msg: client -> function
// check for new code, and cleanup old code
// (and instances that use it) if necessary
oldCodeDir := f.codeDir
if err := f.pullHandlerIfStale(); err != nil {
f.printf("Error checking for new lambda code at `%s`: %v", f.codeDir, err)
req.w.WriteHeader(http.StatusInternalServerError)
req.w.Write([]byte(err.Error() + "\n"))
req.done <- true
continue
}
if oldCodeDir != "" && oldCodeDir != f.codeDir {
el := f.instances.Front()
for el != nil {
waitChan := el.Value.(*LambdaInstance).AsyncKill()
cleanupChan <- waitChan
el = el.Next()
}
f.instances = list.New()
// cleanupChan is a FIFO, so this will
// happen after the cleanup task waits
// for all instance kills to finish
cleanupChan <- oldCodeDir
}
f.lmgr.DepTracer.TraceInvocation(f.codeDir)
select {
case f.instChan <- req:
// msg: function -> instance
outstandingReqs++
default:
// queue cannot accept more, so reply with backoff
req.w.WriteHeader(http.StatusTooManyRequests)
req.w.Write([]byte("lambda instance queue is full\n"))
req.done <- true
}
case req := <-f.doneChan:
// msg: instance -> function
execMs.Add(req.execMs)
outstandingReqs--
// msg: function -> client
req.done <- true
case done := <-f.killChan:
// signal all instances to die, then wait for
// cleanup task to finish and exit
el := f.instances.Front()
for el != nil {
waitChan := el.Value.(*LambdaInstance).AsyncKill()
cleanupChan <- waitChan
el = el.Next()
}
if f.codeDir != "" {
//cleanupChan <- f.codeDir
}
close(cleanupChan)
<-cleanupTaskDone
done <- true
return
}
// POLICY: how many instances (i.e., virtual sandboxes) should we allocate?
// AUTOSCALING STEP 1: decide how many instances we want
// let's aim to have 1 sandbox per second of outstanding work
inProgressWorkMs := outstandingReqs * execMs.Avg
desiredInstances := inProgressWorkMs / 1000
// if we have, say, one job that will take 100
// seconds, spinning up 100 instances won't do any
// good, so cap by number of outstanding reqs
if outstandingReqs < desiredInstances {
desiredInstances = outstandingReqs
}
// always try to have one instance
if desiredInstances < 1 {
desiredInstances = 1
}
// AUTOSCALING STEP 2: tweak how many instances we have, to get closer to our goal
// make at most one scaling adjustment per second
adjustFreq := time.Second
now := time.Now()
if lastScaling != nil {
elapsed := now.Sub(*lastScaling)
if elapsed < adjustFreq {
if desiredInstances != f.instances.Len() {
timeout = time.NewTimer(adjustFreq - elapsed)
}
continue
}
}
// kill or start at most one instance to get closer to
// desired number
if f.instances.Len() < desiredInstances {
f.printf("increase instances to %d", f.instances.Len()+1)
f.newInstance()
lastScaling = &now
} else if f.instances.Len() > desiredInstances {
f.printf("reduce instances to %d", f.instances.Len()-1)
waitChan := f.instances.Back().Value.(*LambdaInstance).AsyncKill()
f.instances.Remove(f.instances.Back())
cleanupChan <- waitChan
lastScaling = &now
}
if f.instances.Len() != desiredInstances {
// we can only adjust quickly, so we want to
// run through this loop again as soon as
// possible, even if there are no requests to
// service.
timeout = time.NewTimer(adjustFreq)
}
}
}
func (f *LambdaFunc) newInstance() {
if f.codeDir == "" {
panic("cannot start instance until code has been fetched")
}
linst := &LambdaInstance{
lfunc: f,
codeDir: f.codeDir,
meta: f.meta,
killChan: make(chan chan bool, 1),
}
f.instances.PushBack(linst)
go linst.Task()
}
func (f *LambdaFunc) Kill() | {
done := make(chan bool)
f.killChan <- done
<-done
} | identifier_body |
|
lambdaFunction.go | }
func (f *LambdaFunc) Invoke(w http.ResponseWriter, r *http.Request) {
t := common.T0("LambdaFunc.Invoke")
defer t.T1()
done := make(chan bool)
req := &Invocation{w: w, r: r, done: done}
// send invocation to lambda func task, if room in queue
select {
case f.funcChan <- req:
// block until it's done
<-done
default:
// queue cannot accept more, so reply with backoff
req.w.WriteHeader(http.StatusTooManyRequests)
req.w.Write([]byte("lambda function queue is full\n"))
}
}
// add function name to each log message so we know which logs
// correspond to which LambdaFuncs
func (f *LambdaFunc) printf(format string, args ...any) {
msg := fmt.Sprintf(format, args...)
log.Printf("%s [FUNC %s]", strings.TrimRight(msg, "\n"), f.name)
}
// parseMeta reads in a requirements.txt file that was built from pip-compile
func parseMeta(codeDir string) (meta *sandbox.SandboxMeta, err error) {
meta = &sandbox.SandboxMeta{
Installs: []string{},
Imports: []string{},
}
path := filepath.Join(codeDir, "requirements.txt")
file, err := os.Open(path)
if errors.Is(err, os.ErrNotExist) {
// having a requirements.txt is optional
return meta, nil
} else if err != nil {
return nil, err
}
defer file.Close()
scnr := bufio.NewScanner(file)
for scnr.Scan() {
line := strings.ReplaceAll(scnr.Text(), " ", "")
pkg := strings.Split(line, "#")[0]
if pkg != "" {
pkg = packages.NormalizePkg(pkg)
meta.Installs = append(meta.Installs, pkg)
}
}
return meta, nil
}
// if there is any error:
// 1. we won't switch to the new code
// 2. we won't update pull time (so well check for a fix next time)
func (f *LambdaFunc) pullHandlerIfStale() (err error) {
// check if there is newer code, download it if necessary
now := time.Now()
cacheNs := int64(common.Conf.Registry_cache_ms) * 1000000
// should we check for new code?
if f.lastPull != nil && int64(now.Sub(*f.lastPull)) < cacheNs {
return nil
}
// is there new code?
rtType, codeDir, err := f.lmgr.HandlerPuller.Pull(f.name)
if err != nil {
return err
}
if codeDir == f.codeDir {
return nil
}
f.rtType = rtType
defer func() {
if err != nil {
if err := os.RemoveAll(codeDir); err != nil {
log.Printf("could not cleanup %s after failed pull\n", codeDir)
}
if rtType == common.RT_PYTHON {
// we dirty this dir (e.g., by setting up
// symlinks to packages, so we want the
// HandlerPuller to give us a new one next
// time, even if the code hasn't changed
f.lmgr.HandlerPuller.Reset(f.name)
}
}
}()
if rtType == common.RT_PYTHON {
// inspect new code for dependencies; if we can install
// everything necessary, start using new code
meta, err := parseMeta(codeDir)
if err != nil {
return err
}
// make sure all specified dependencies are installed
// (but don't recursively find others)
for _, pkg := range meta.Installs {
if _, err := f.lmgr.PackagePuller.GetPkg(pkg); err != nil {
return err
}
}
f.lmgr.DepTracer.TraceFunction(codeDir, meta.Installs)
f.meta = meta
} else if rtType == common.RT_NATIVE {
log.Printf("Got native function")
}
f.codeDir = codeDir
f.lastPull = &now
return nil
}
// this Task receives lambda requests, fetches new lambda code as
// needed, and dispatches to a set of lambda instances. Task also
// monitors outstanding requests, and scales the number of instances
// up or down as needed.
//
// communication for a given request is as follows (each of the four
// transfers are commented within the function):
//
// client -> function -> instance -> function -> client
//
// each of the 4 handoffs above is over a chan. In order, those chans are:
// 1. LambdaFunc.funcChan
// 2. LambdaFunc.instChan
// 3. LambdaFunc.doneChan
// 4. Invocation.done
//
// If either LambdaFunc.funcChan or LambdaFunc.instChan is full, we
// respond to the client with a backoff message: StatusTooManyRequests
func (f *LambdaFunc) Task() {
f.printf("debug: LambdaFunc.Task() runs on goroutine %d", common.GetGoroutineID())
// we want to perform various cleanup actions, such as killing
// instances and deleting old code. We want to do these
// asynchronously, but in order. Thus, we use a chan to get
// FIFO behavior and a single cleanup task to get async.
//
// two types can be sent to this chan:
//
// 1. string: this is a path to be deleted
//
// 2. chan: this is a signal chan that corresponds to
// previously initiated cleanup work. We block until we
// receive the complete signal, before proceeding to
// subsequent cleanup tasks in the FIFO.
cleanupChan := make(chan any, 32)
cleanupTaskDone := make(chan bool)
go func() {
for {
msg, ok := <-cleanupChan
if !ok {
cleanupTaskDone <- true
return
}
switch op := msg.(type) {
case string:
if err := os.RemoveAll(op); err != nil {
f.printf("Async code cleanup could not delete %s, even after all instances using it killed: %v", op, err)
}
case chan bool:
<-op
}
}
}()
// stats for autoscaling
outstandingReqs := 0
execMs := common.NewRollingAvg(10)
var lastScaling *time.Time = nil
timeout := time.NewTimer(0)
for | if oldCodeDir != "" && oldCodeDir != f.codeDir {
el := f.instances.Front()
for el != nil {
waitChan := el.Value.(*LambdaInstance).AsyncKill()
cleanupChan <- waitChan
el = el.Next()
}
f.instances = list.New()
// cleanupChan is a FIFO, so this will
// happen after the cleanup task waits
// for all instance kills to finish
cleanupChan <- oldCodeDir
}
f.lmgr.DepTracer.TraceInvocation(f.codeDir)
select {
case f.instChan <- req:
// msg: function -> instance
outstandingReqs++
default:
// queue cannot accept more, so reply with backoff
req.w.WriteHeader(http.StatusTooManyRequests)
req.w.Write([]byte("lambda instance queue is full\n"))
req.done <- true
}
case req := <-f.doneChan:
// msg: instance -> function
execMs.Add(req.execMs)
outstandingReqs--
// msg: function -> client
req.done <- true
case done := <-f.killChan:
// signal all instances to die, then wait for
// cleanup task to finish and exit
el := f.instances.Front()
for el != nil {
waitChan := el.Value.(*LambdaInstance).AsyncKill()
cleanupChan <- waitChan
el = el.Next()
}
if f.codeDir != "" {
//cleanupChan <- f.codeDir
}
close(cleanupChan)
<-cleanupTaskDone
done <- true
return
}
// POLICY: how many instances (i.e., virtual sandboxes) should we allocate?
// AUTOSCALING STEP 1: decide how many instances we want
// let's aim to have 1 sandbox per second of outstanding work
inProgressWorkMs := outstandingReqs * execMs.Avg
desiredInstances := inProgressWorkMs / 1000
| {
select {
case <-timeout.C:
if f.codeDir == "" {
continue
}
case req := <-f.funcChan:
// msg: client -> function
// check for new code, and cleanup old code
// (and instances that use it) if necessary
oldCodeDir := f.codeDir
if err := f.pullHandlerIfStale(); err != nil {
f.printf("Error checking for new lambda code at `%s`: %v", f.codeDir, err)
req.w.WriteHeader(http.StatusInternalServerError)
req.w.Write([]byte(err.Error() + "\n"))
req.done <- true
continue
}
| conditional_block |
lambdaFunction.go | is any error:
// 1. we won't switch to the new code
// 2. we won't update pull time (so well check for a fix next time)
func (f *LambdaFunc) pullHandlerIfStale() (err error) {
// check if there is newer code, download it if necessary
now := time.Now()
cacheNs := int64(common.Conf.Registry_cache_ms) * 1000000
// should we check for new code?
if f.lastPull != nil && int64(now.Sub(*f.lastPull)) < cacheNs {
return nil
}
// is there new code?
rtType, codeDir, err := f.lmgr.HandlerPuller.Pull(f.name)
if err != nil {
return err
}
if codeDir == f.codeDir {
return nil
}
f.rtType = rtType
defer func() {
if err != nil {
if err := os.RemoveAll(codeDir); err != nil {
log.Printf("could not cleanup %s after failed pull\n", codeDir)
}
if rtType == common.RT_PYTHON {
// we dirty this dir (e.g., by setting up
// symlinks to packages, so we want the
// HandlerPuller to give us a new one next
// time, even if the code hasn't changed
f.lmgr.HandlerPuller.Reset(f.name)
}
}
}()
if rtType == common.RT_PYTHON {
// inspect new code for dependencies; if we can install
// everything necessary, start using new code
meta, err := parseMeta(codeDir)
if err != nil {
return err
}
// make sure all specified dependencies are installed
// (but don't recursively find others)
for _, pkg := range meta.Installs {
if _, err := f.lmgr.PackagePuller.GetPkg(pkg); err != nil {
return err
}
}
f.lmgr.DepTracer.TraceFunction(codeDir, meta.Installs)
f.meta = meta
} else if rtType == common.RT_NATIVE {
log.Printf("Got native function")
}
f.codeDir = codeDir
f.lastPull = &now
return nil
}
// this Task receives lambda requests, fetches new lambda code as
// needed, and dispatches to a set of lambda instances. Task also
// monitors outstanding requests, and scales the number of instances
// up or down as needed.
//
// communication for a given request is as follows (each of the four
// transfers are commented within the function):
//
// client -> function -> instance -> function -> client
//
// each of the 4 handoffs above is over a chan. In order, those chans are:
// 1. LambdaFunc.funcChan
// 2. LambdaFunc.instChan
// 3. LambdaFunc.doneChan
// 4. Invocation.done
//
// If either LambdaFunc.funcChan or LambdaFunc.instChan is full, we
// respond to the client with a backoff message: StatusTooManyRequests
func (f *LambdaFunc) Task() {
f.printf("debug: LambdaFunc.Task() runs on goroutine %d", common.GetGoroutineID())
// we want to perform various cleanup actions, such as killing
// instances and deleting old code. We want to do these
// asynchronously, but in order. Thus, we use a chan to get
// FIFO behavior and a single cleanup task to get async.
//
// two types can be sent to this chan:
//
// 1. string: this is a path to be deleted
//
// 2. chan: this is a signal chan that corresponds to
// previously initiated cleanup work. We block until we
// receive the complete signal, before proceeding to
// subsequent cleanup tasks in the FIFO.
cleanupChan := make(chan any, 32)
cleanupTaskDone := make(chan bool)
go func() {
for {
msg, ok := <-cleanupChan
if !ok {
cleanupTaskDone <- true
return
}
switch op := msg.(type) {
case string:
if err := os.RemoveAll(op); err != nil {
f.printf("Async code cleanup could not delete %s, even after all instances using it killed: %v", op, err)
}
case chan bool:
<-op
}
}
}()
// stats for autoscaling
outstandingReqs := 0
execMs := common.NewRollingAvg(10)
var lastScaling *time.Time = nil
timeout := time.NewTimer(0)
for {
select {
case <-timeout.C:
if f.codeDir == "" {
continue
}
case req := <-f.funcChan:
// msg: client -> function
// check for new code, and cleanup old code
// (and instances that use it) if necessary
oldCodeDir := f.codeDir
if err := f.pullHandlerIfStale(); err != nil {
f.printf("Error checking for new lambda code at `%s`: %v", f.codeDir, err)
req.w.WriteHeader(http.StatusInternalServerError)
req.w.Write([]byte(err.Error() + "\n"))
req.done <- true
continue
}
if oldCodeDir != "" && oldCodeDir != f.codeDir {
el := f.instances.Front()
for el != nil {
waitChan := el.Value.(*LambdaInstance).AsyncKill()
cleanupChan <- waitChan
el = el.Next()
}
f.instances = list.New()
// cleanupChan is a FIFO, so this will
// happen after the cleanup task waits
// for all instance kills to finish
cleanupChan <- oldCodeDir
}
f.lmgr.DepTracer.TraceInvocation(f.codeDir)
select {
case f.instChan <- req:
// msg: function -> instance
outstandingReqs++
default:
// queue cannot accept more, so reply with backoff
req.w.WriteHeader(http.StatusTooManyRequests)
req.w.Write([]byte("lambda instance queue is full\n"))
req.done <- true
}
case req := <-f.doneChan:
// msg: instance -> function
execMs.Add(req.execMs)
outstandingReqs--
// msg: function -> client
req.done <- true
case done := <-f.killChan:
// signal all instances to die, then wait for
// cleanup task to finish and exit
el := f.instances.Front()
for el != nil {
waitChan := el.Value.(*LambdaInstance).AsyncKill()
cleanupChan <- waitChan
el = el.Next()
}
if f.codeDir != "" {
//cleanupChan <- f.codeDir
}
close(cleanupChan)
<-cleanupTaskDone
done <- true
return
}
// POLICY: how many instances (i.e., virtual sandboxes) should we allocate?
// AUTOSCALING STEP 1: decide how many instances we want
// let's aim to have 1 sandbox per second of outstanding work
inProgressWorkMs := outstandingReqs * execMs.Avg
desiredInstances := inProgressWorkMs / 1000
// if we have, say, one job that will take 100
// seconds, spinning up 100 instances won't do any
// good, so cap by number of outstanding reqs
if outstandingReqs < desiredInstances {
desiredInstances = outstandingReqs
}
// always try to have one instance
if desiredInstances < 1 {
desiredInstances = 1
}
// AUTOSCALING STEP 2: tweak how many instances we have, to get closer to our goal
// make at most one scaling adjustment per second
adjustFreq := time.Second
now := time.Now()
if lastScaling != nil {
elapsed := now.Sub(*lastScaling)
if elapsed < adjustFreq {
if desiredInstances != f.instances.Len() {
timeout = time.NewTimer(adjustFreq - elapsed)
}
continue
}
}
// kill or start at most one instance to get closer to
// desired number
if f.instances.Len() < desiredInstances {
f.printf("increase instances to %d", f.instances.Len()+1)
f.newInstance()
lastScaling = &now
} else if f.instances.Len() > desiredInstances {
f.printf("reduce instances to %d", f.instances.Len()-1)
waitChan := f.instances.Back().Value.(*LambdaInstance).AsyncKill()
f.instances.Remove(f.instances.Back())
cleanupChan <- waitChan
lastScaling = &now
}
if f.instances.Len() != desiredInstances {
// we can only adjust quickly, so we want to
// run through this loop again as soon as
// possible, even if there are no requests to
// service.
timeout = time.NewTimer(adjustFreq)
}
}
}
func (f *LambdaFunc) | newInstance | identifier_name |
|
lambdaFunction.go | }
func (f *LambdaFunc) Invoke(w http.ResponseWriter, r *http.Request) {
t := common.T0("LambdaFunc.Invoke")
defer t.T1()
done := make(chan bool)
req := &Invocation{w: w, r: r, done: done}
// send invocation to lambda func task, if room in queue
select {
case f.funcChan <- req:
// block until it's done
<-done
default:
// queue cannot accept more, so reply with backoff
req.w.WriteHeader(http.StatusTooManyRequests)
req.w.Write([]byte("lambda function queue is full\n"))
}
}
// add function name to each log message so we know which logs
// correspond to which LambdaFuncs
func (f *LambdaFunc) printf(format string, args ...any) {
msg := fmt.Sprintf(format, args...)
log.Printf("%s [FUNC %s]", strings.TrimRight(msg, "\n"), f.name)
}
// parseMeta reads in a requirements.txt file that was built from pip-compile
func parseMeta(codeDir string) (meta *sandbox.SandboxMeta, err error) {
meta = &sandbox.SandboxMeta{
Installs: []string{},
Imports: []string{},
}
path := filepath.Join(codeDir, "requirements.txt")
file, err := os.Open(path)
if errors.Is(err, os.ErrNotExist) {
// having a requirements.txt is optional
return meta, nil
} else if err != nil {
return nil, err
}
defer file.Close()
scnr := bufio.NewScanner(file)
for scnr.Scan() {
line := strings.ReplaceAll(scnr.Text(), " ", "")
pkg := strings.Split(line, "#")[0]
if pkg != "" {
pkg = packages.NormalizePkg(pkg)
meta.Installs = append(meta.Installs, pkg)
}
}
return meta, nil
}
// if there is any error:
// 1. we won't switch to the new code
// 2. we won't update pull time (so well check for a fix next time)
func (f *LambdaFunc) pullHandlerIfStale() (err error) {
// check if there is newer code, download it if necessary
now := time.Now()
cacheNs := int64(common.Conf.Registry_cache_ms) * 1000000
// should we check for new code?
if f.lastPull != nil && int64(now.Sub(*f.lastPull)) < cacheNs {
return nil
}
// is there new code?
rtType, codeDir, err := f.lmgr.HandlerPuller.Pull(f.name)
if err != nil {
return err
}
if codeDir == f.codeDir {
return nil
}
f.rtType = rtType
defer func() {
if err != nil {
if err := os.RemoveAll(codeDir); err != nil {
log.Printf("could not cleanup %s after failed pull\n", codeDir)
}
if rtType == common.RT_PYTHON {
// we dirty this dir (e.g., by setting up
// symlinks to packages, so we want the
// HandlerPuller to give us a new one next
// time, even if the code hasn't changed
f.lmgr.HandlerPuller.Reset(f.name)
}
}
}()
if rtType == common.RT_PYTHON {
// inspect new code for dependencies; if we can install
// everything necessary, start using new code
meta, err := parseMeta(codeDir)
if err != nil {
return err
}
// make sure all specified dependencies are installed
// (but don't recursively find others)
for _, pkg := range meta.Installs {
if _, err := f.lmgr.PackagePuller.GetPkg(pkg); err != nil {
return err
}
}
f.lmgr.DepTracer.TraceFunction(codeDir, meta.Installs)
f.meta = meta
} else if rtType == common.RT_NATIVE {
log.Printf("Got native function")
} |
// this Task receives lambda requests, fetches new lambda code as
// needed, and dispatches to a set of lambda instances. Task also
// monitors outstanding requests, and scales the number of instances
// up or down as needed.
//
// communication for a given request is as follows (each of the four
// transfers are commented within the function):
//
// client -> function -> instance -> function -> client
//
// each of the 4 handoffs above is over a chan. In order, those chans are:
// 1. LambdaFunc.funcChan
// 2. LambdaFunc.instChan
// 3. LambdaFunc.doneChan
// 4. Invocation.done
//
// If either LambdaFunc.funcChan or LambdaFunc.instChan is full, we
// respond to the client with a backoff message: StatusTooManyRequests
func (f *LambdaFunc) Task() {
f.printf("debug: LambdaFunc.Task() runs on goroutine %d", common.GetGoroutineID())
// we want to perform various cleanup actions, such as killing
// instances and deleting old code. We want to do these
// asynchronously, but in order. Thus, we use a chan to get
// FIFO behavior and a single cleanup task to get async.
//
// two types can be sent to this chan:
//
// 1. string: this is a path to be deleted
//
// 2. chan: this is a signal chan that corresponds to
// previously initiated cleanup work. We block until we
// receive the complete signal, before proceeding to
// subsequent cleanup tasks in the FIFO.
cleanupChan := make(chan any, 32)
cleanupTaskDone := make(chan bool)
go func() {
for {
msg, ok := <-cleanupChan
if !ok {
cleanupTaskDone <- true
return
}
switch op := msg.(type) {
case string:
if err := os.RemoveAll(op); err != nil {
f.printf("Async code cleanup could not delete %s, even after all instances using it killed: %v", op, err)
}
case chan bool:
<-op
}
}
}()
// stats for autoscaling
outstandingReqs := 0
execMs := common.NewRollingAvg(10)
var lastScaling *time.Time = nil
timeout := time.NewTimer(0)
for {
select {
case <-timeout.C:
if f.codeDir == "" {
continue
}
case req := <-f.funcChan:
// msg: client -> function
// check for new code, and cleanup old code
// (and instances that use it) if necessary
oldCodeDir := f.codeDir
if err := f.pullHandlerIfStale(); err != nil {
f.printf("Error checking for new lambda code at `%s`: %v", f.codeDir, err)
req.w.WriteHeader(http.StatusInternalServerError)
req.w.Write([]byte(err.Error() + "\n"))
req.done <- true
continue
}
if oldCodeDir != "" && oldCodeDir != f.codeDir {
el := f.instances.Front()
for el != nil {
waitChan := el.Value.(*LambdaInstance).AsyncKill()
cleanupChan <- waitChan
el = el.Next()
}
f.instances = list.New()
// cleanupChan is a FIFO, so this will
// happen after the cleanup task waits
// for all instance kills to finish
cleanupChan <- oldCodeDir
}
f.lmgr.DepTracer.TraceInvocation(f.codeDir)
select {
case f.instChan <- req:
// msg: function -> instance
outstandingReqs++
default:
// queue cannot accept more, so reply with backoff
req.w.WriteHeader(http.StatusTooManyRequests)
req.w.Write([]byte("lambda instance queue is full\n"))
req.done <- true
}
case req := <-f.doneChan:
// msg: instance -> function
execMs.Add(req.execMs)
outstandingReqs--
// msg: function -> client
req.done <- true
case done := <-f.killChan:
// signal all instances to die, then wait for
// cleanup task to finish and exit
el := f.instances.Front()
for el != nil {
waitChan := el.Value.(*LambdaInstance).AsyncKill()
cleanupChan <- waitChan
el = el.Next()
}
if f.codeDir != "" {
//cleanupChan <- f.codeDir
}
close(cleanupChan)
<-cleanupTaskDone
done <- true
return
}
// POLICY: how many instances (i.e., virtual sandboxes) should we allocate?
// AUTOSCALING STEP 1: decide how many instances we want
// let's aim to have 1 sandbox per second of outstanding work
inProgressWorkMs := outstandingReqs * execMs.Avg
desiredInstances := inProgressWorkMs / 1000
|
f.codeDir = codeDir
f.lastPull = &now
return nil
} | random_line_split |
lib.rs | threshold: slow_threshold,
inner: drain,
};
let filtered = drain.filter(filter).fuse();
slog::Logger::root(filtered, slog_o!())
};
set_global_logger(level, init_stdlog, logger)
}
pub fn set_global_logger(
level: Level,
init_stdlog: bool,
logger: slog::Logger,
) -> Result<(), SetLoggerError> {
slog_global::set_global(logger);
if init_stdlog {
slog_global::redirect_std_log(Some(level))?;
grpcio::redirect_log();
}
Ok(())
}
/// Constructs a new file writer which outputs log to a file at the specified path.
/// The file writer rotates for the specified timespan.
pub fn file_writer<N>(
path: impl AsRef<Path>,
rotation_timespan: Duration,
rotation_size: u64,
rename: N,
) -> io::Result<BufWriter<RotatingFileLogger>>
where
N: 'static + Send + Fn(&Path) -> io::Result<PathBuf>,
{
let logger = BufWriter::new(
RotatingFileLoggerBuilder::builder(rename)
.add_path(path)
.add_rotator(RotateByTime::new(rotation_timespan))
.add_rotator(RotateBySize::new(rotation_size))
.build()?,
);
Ok(logger)
}
/// Constructs a new terminal writer which outputs logs to stderr.
pub fn term_writer() -> io::Stderr {
io::stderr()
}
/// Formats output logs to "FastJob Log Format".
pub fn text_format<W>(io: W) -> FastJobFormat<PlainDecorator<W>>
where
W: io::Write,
{
let decorator = PlainDecorator::new(io);
FastJobFormat::new(decorator)
}
/// Formats output logs to JSON format.
pub fn json_format<W>(io: W) -> slog_json::Json<W>
where
W: io::Write,
{
slog_json::Json::new(io)
.set_newlines(true)
.set_flush(true)
.add_key_value(slog_o!(
"message" => PushFnValue(|record, ser| ser.emit(record.msg())),
"caller" => PushFnValue(|record, ser| ser.emit(format_args!(
"{}:{}",
Path::new(record.file())
.file_name()
.and_then(|path| path.to_str())
.unwrap_or("<unknown>"),
record.line(),
))),
"level" => FnValue(|record| get_unified_log_level(record.level())),
"time" => FnValue(|_| chrono::Local::now().format(TIMESTAMP_FORMAT).to_string()),
))
.build()
}
pub fn get_level_by_string(lv: &str) -> Option<Level> {
match &*lv.to_owned().to_lowercase() {
"critical" => Some(Level::Critical),
"error" => Some(Level::Error),
// We support `warn` due to legacy.
"warning" | "warn" => Some(Level::Warning),
"debug" => Some(Level::Debug),
"trace" => Some(Level::Trace),
"info" => Some(Level::Info),
_ => None,
}
}
// The `to_string()` function of `slog::Level` produces values like `erro` and `trce` instead of
// the full words. This produces the full word.
pub fn get_string_by_level(lv: Level) -> &'static str {
match lv {
Level::Critical => "critical",
Level::Error => "error",
Level::Warning => "warning",
Level::Debug => "debug",
Level::Trace => "trace",
Level::Info => "info",
}
}
// Converts `slog::Level` to unified log level format.
fn get_unified_log_level(lv: Level) -> &'static str {
match lv {
Level::Critical => "FATAL",
Level::Error => "ERROR",
Level::Warning => "WARN",
Level::Info => "INFO",
Level::Debug => "DEBUG",
Level::Trace => "TRACE",
}
}
pub fn convert_slog_level_to_log_level(lv: Level) -> log::Level {
match lv {
Level::Critical | Level::Error => log::Level::Error,
Level::Warning => log::Level::Warn,
Level::Debug => log::Level::Debug,
Level::Trace => log::Level::Trace,
Level::Info => log::Level::Info,
}
}
pub fn convert_log_level_to_slog_level(lv: log::Level) -> Level {
match lv {
log::Level::Error => Level::Error,
log::Level::Warn => Level::Warning,
log::Level::Debug => Level::Debug,
log::Level::Trace => Level::Trace,
log::Level::Info => Level::Info,
}
}
pub fn get_log_level() -> Option<Level> {
Level::from_usize(LOG_LEVEL.load(Ordering::Relaxed))
}
pub fn set_log_level(new_level: Level) {
LOG_LEVEL.store(new_level.as_usize(), Ordering::SeqCst)
}
pub struct FastJobFormat<D>
where
D: Decorator,
{
decorator: D,
}
impl<D> FastJobFormat<D>
where
D: Decorator,
{
pub fn new(decorator: D) -> Self {
Self { decorator }
}
}
impl<D> Drain for FastJobFormat<D>
where
D: Decorator,
{
type Ok = ();
type Err = io::Error;
fn log(&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
if record.level().as_usize() <= LOG_LEVEL.load(Ordering::Relaxed) {
self.decorator.with_record(record, values, |decorator| {
write_log_header(decorator, record)?;
write_log_msg(decorator, record)?;
write_log_fields(decorator, record, values)?;
decorator.start_whitespace()?;
writeln!(decorator)?;
decorator.flush()?;
Ok(())
})?;
}
Ok(())
}
}
struct LogAndFuse<D>(D);
impl<D> Drain for LogAndFuse<D>
where
D: Drain,
<D as Drain>::Err: std::fmt::Display,
{
type Ok = ();
type Err = slog::Never;
fn log(&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
if record.level().as_usize() <= LOG_LEVEL.load(Ordering::Relaxed) {
if let Err(e) = self.0.log(record, values) {
let fatal_drainer = Mutex::new(text_format(term_writer())).ignore_res();
fatal_drainer.log(record, values).unwrap();
let fatal_logger = slog::Logger::root(fatal_drainer, slog_o!());
slog::slog_crit!(
fatal_logger,
"logger encountered error";
"err" => %e,
)
}
}
Ok(())
}
}
/// Filters logs with operation cost lower than threshold. Otherwise output logs to inner drainer.
struct SlowLogFilter<D> {
threshold: u64,
inner: D,
}
impl<D> Drain for SlowLogFilter<D>
where
D: Drain<Ok = (), Err = slog::Never>,
{
type Ok = ();
type Err = slog::Never;
fn log(&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
if record.tag() == "slow_log" {
let mut s = SlowCostSerializer { cost: None };
let kv = record.kv();
let _ = kv.serialize(record, &mut s);
if let Some(cost) = s.cost {
if cost <= self.threshold {
return Ok(());
}
}
}
self.inner.log(record, values)
}
}
struct SlowCostSerializer {
// None means input record without key `takes`
cost: Option<u64>,
}
impl slog::ser::Serializer for SlowCostSerializer {
fn emit_arguments(&mut self, _key: Key, _val: &fmt::Arguments<'_>) -> slog::Result {
Ok(())
}
fn emit_u64(&mut self, key: Key, val: u64) -> slog::Result {
if key == "takes" {
self.cost = Some(val);
}
Ok(())
}
}
/// Special struct for slow log cost serializing
pub struct LogCost(pub u64);
impl slog::Value for LogCost {
fn serialize(
&self,
_record: &Record,
key: Key,
serializer: &mut dyn slog::Serializer,
) -> slog::Result {
serializer.emit_u64(key, self.0)
}
}
/// Dispatches logs to a normal `Drain` or a slow-log specialized `Drain` by tag
pub struct LogDispatcher<N: Drain, S: Drain> {
normal: N,
slow: Option<S>,
}
impl<N: Drain, S: Drain> LogDispatcher<N, S> {
pub fn new(normal: N, slow: Option<S>) -> Self {
Self { normal, slow }
}
}
impl<N, S> Drain for LogDispatcher<N, S>
where
N: Drain<Ok = (), Err = io::Error>,
S: Drain<Ok = (), Err = io::Error>,
{
type Ok = (); | random_line_split |
||
lib.rs | pub fn init_log<D>(
drain: D,
level: Level,
use_async: bool,
init_stdlog: bool,
mut disabled_targets: Vec<String>,
slow_threshold: u64,
) -> Result<(), SetLoggerError>
where
D: Drain + Send + 'static,
<D as Drain>::Err: std::fmt::Display,
{
// Set the initial log level used by the Drains
LOG_LEVEL.store(level.as_usize(), Ordering::Relaxed);
// Only for debug purpose, so use environment instead of configuration file.
if let Ok(extra_modules) = env::var("FASTJOB_DISABLE_LOG_TARGETS") {
disabled_targets.extend(extra_modules.split(',').map(ToOwned::to_owned));
}
let filter = move |record: &Record| {
if !disabled_targets.is_empty() {
// Here get the highest level module name to check.
let module = record.module().splitn(2, "::").next().unwrap();
disabled_targets.iter().all(|target| target != module)
} else {
true
}
};
let logger = if use_async {
let drain = Async::new(LogAndFuse(drain))
.chan_size(SLOG_CHANNEL_SIZE)
.overflow_strategy(SLOG_CHANNEL_OVERFLOW_STRATEGY)
.thread_name(thd_name!("slogger"))
.build()
.filter_level(level)
.fuse();
let drain = SlowLogFilter {
threshold: slow_threshold,
inner: drain,
};
let filtered = drain.filter(filter).fuse();
slog::Logger::root(filtered, slog_o!())
} else {
let drain = LogAndFuse(Mutex::new(drain).filter_level(level));
let drain = SlowLogFilter {
threshold: slow_threshold,
inner: drain,
};
let filtered = drain.filter(filter).fuse();
slog::Logger::root(filtered, slog_o!())
};
set_global_logger(level, init_stdlog, logger)
}
pub fn set_global_logger(
level: Level,
init_stdlog: bool,
logger: slog::Logger,
) -> Result<(), SetLoggerError> {
slog_global::set_global(logger);
if init_stdlog {
slog_global::redirect_std_log(Some(level))?;
grpcio::redirect_log();
}
Ok(())
}
/// Constructs a new file writer which outputs log to a file at the specified path.
/// The file writer rotates for the specified timespan.
pub fn file_writer<N>(
path: impl AsRef<Path>,
rotation_timespan: Duration,
rotation_size: u64,
rename: N,
) -> io::Result<BufWriter<RotatingFileLogger>>
where
N: 'static + Send + Fn(&Path) -> io::Result<PathBuf>,
{
let logger = BufWriter::new(
RotatingFileLoggerBuilder::builder(rename)
.add_path(path)
.add_rotator(RotateByTime::new(rotation_timespan))
.add_rotator(RotateBySize::new(rotation_size))
.build()?,
);
Ok(logger)
}
/// Constructs a new terminal writer which outputs logs to stderr.
pub fn term_writer() -> io::Stderr {
io::stderr()
}
/// Formats output logs to "FastJob Log Format".
pub fn text_format<W>(io: W) -> FastJobFormat<PlainDecorator<W>>
where
W: io::Write,
{
let decorator = PlainDecorator::new(io);
FastJobFormat::new(decorator)
}
/// Formats output logs to JSON format.
pub fn json_format<W>(io: W) -> slog_json::Json<W>
where
W: io::Write,
{
slog_json::Json::new(io)
.set_newlines(true)
.set_flush(true)
.add_key_value(slog_o!(
"message" => PushFnValue(|record, ser| ser.emit(record.msg())),
"caller" => PushFnValue(|record, ser| ser.emit(format_args!(
"{}:{}",
Path::new(record.file())
.file_name()
.and_then(|path| path.to_str())
.unwrap_or("<unknown>"),
record.line(),
))),
"level" => FnValue(|record| get_unified_log_level(record.level())),
"time" => FnValue(|_| chrono::Local::now().format(TIMESTAMP_FORMAT).to_string()),
))
.build()
}
pub fn get_level_by_string(lv: &str) -> Option<Level> {
match &*lv.to_owned().to_lowercase() {
"critical" => Some(Level::Critical),
"error" => Some(Level::Error),
// We support `warn` due to legacy.
"warning" | "warn" => Some(Level::Warning),
"debug" => Some(Level::Debug),
"trace" => Some(Level::Trace),
"info" => Some(Level::Info),
_ => None,
}
}
// The `to_string()` function of `slog::Level` produces values like `erro` and `trce` instead of
// the full words. This produces the full word.
pub fn get_string_by_level(lv: Level) -> &'static str |
// Converts `slog::Level` to unified log level format.
fn get_unified_log_level(lv: Level) -> &'static str {
match lv {
Level::Critical => "FATAL",
Level::Error => "ERROR",
Level::Warning => "WARN",
Level::Info => "INFO",
Level::Debug => "DEBUG",
Level::Trace => "TRACE",
}
}
pub fn convert_slog_level_to_log_level(lv: Level) -> log::Level {
match lv {
Level::Critical | Level::Error => log::Level::Error,
Level::Warning => log::Level::Warn,
Level::Debug => log::Level::Debug,
Level::Trace => log::Level::Trace,
Level::Info => log::Level::Info,
}
}
pub fn convert_log_level_to_slog_level(lv: log::Level) -> Level {
match lv {
log::Level::Error => Level::Error,
log::Level::Warn => Level::Warning,
log::Level::Debug => Level::Debug,
log::Level::Trace => Level::Trace,
log::Level::Info => Level::Info,
}
}
pub fn get_log_level() -> Option<Level> {
Level::from_usize(LOG_LEVEL.load(Ordering::Relaxed))
}
pub fn set_log_level(new_level: Level) {
LOG_LEVEL.store(new_level.as_usize(), Ordering::SeqCst)
}
pub struct FastJobFormat<D>
where
D: Decorator,
{
decorator: D,
}
impl<D> FastJobFormat<D>
where
D: Decorator,
{
pub fn new(decorator: D) -> Self {
Self { decorator }
}
}
impl<D> Drain for FastJobFormat<D>
where
D: Decorator,
{
type Ok = ();
type Err = io::Error;
fn log(&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
if record.level().as_usize() <= LOG_LEVEL.load(Ordering::Relaxed) {
self.decorator.with_record(record, values, |decorator| {
write_log_header(decorator, record)?;
write_log_msg(decorator, record)?;
write_log_fields(decorator, record, values)?;
decorator.start_whitespace()?;
writeln!(decorator)?;
decorator.flush()?;
Ok(())
})?;
}
Ok(())
}
}
struct LogAndFuse<D>(D);
impl<D> Drain for LogAndFuse<D>
where
D: Drain,
<D as Drain>::Err: std::fmt::Display,
{
type Ok = ();
type Err = slog::Never;
fn log(&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
if record.level().as_usize() <= LOG_LEVEL.load(Ordering::Relaxed) {
if let Err(e) = self.0.log(record, values) {
let fatal_drainer = Mutex::new(text_format(term_writer())).ignore_res();
fatal_drainer.log(record, values).unwrap();
let fatal_logger = slog::Logger::root(fatal_drainer, slog_o!());
slog::slog_crit!(
fatal_logger,
"logger encountered error";
"err" => %e,
)
}
}
Ok(())
}
}
/// Filters logs with operation cost lower than threshold. Otherwise output logs to inner drainer.
struct SlowLogFilter<D> {
threshold: u64,
inner: D,
}
impl<D> Drain for SlowLogFilter<D>
where
D: Drain<Ok = (), Err = slog::Never>,
{
type Ok = ();
type Err = slog::Never;
fn log(&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
if record.tag() == "slow_log" {
let mut s = SlowCostSerializer { cost: None };
let kv = record.kv();
let _ = kv.serialize(record, | {
match lv {
Level::Critical => "critical",
Level::Error => "error",
Level::Warning => "warning",
Level::Debug => "debug",
Level::Trace => "trace",
Level::Info => "info",
}
} | identifier_body |
lib.rs | let filtered = drain.filter(filter).fuse();
slog::Logger::root(filtered, slog_o!())
};
set_global_logger(level, init_stdlog, logger)
}
pub fn set_global_logger(
level: Level,
init_stdlog: bool,
logger: slog::Logger,
) -> Result<(), SetLoggerError> {
slog_global::set_global(logger);
if init_stdlog {
slog_global::redirect_std_log(Some(level))?;
grpcio::redirect_log();
}
Ok(())
}
/// Constructs a new file writer which outputs log to a file at the specified path.
/// The file writer rotates for the specified timespan.
pub fn file_writer<N>(
path: impl AsRef<Path>,
rotation_timespan: Duration,
rotation_size: u64,
rename: N,
) -> io::Result<BufWriter<RotatingFileLogger>>
where
N: 'static + Send + Fn(&Path) -> io::Result<PathBuf>,
{
let logger = BufWriter::new(
RotatingFileLoggerBuilder::builder(rename)
.add_path(path)
.add_rotator(RotateByTime::new(rotation_timespan))
.add_rotator(RotateBySize::new(rotation_size))
.build()?,
);
Ok(logger)
}
/// Constructs a new terminal writer which outputs logs to stderr.
pub fn term_writer() -> io::Stderr {
io::stderr()
}
/// Formats output logs to "FastJob Log Format".
pub fn text_format<W>(io: W) -> FastJobFormat<PlainDecorator<W>>
where
W: io::Write,
{
let decorator = PlainDecorator::new(io);
FastJobFormat::new(decorator)
}
/// Formats output logs to JSON format.
pub fn json_format<W>(io: W) -> slog_json::Json<W>
where
W: io::Write,
{
slog_json::Json::new(io)
.set_newlines(true)
.set_flush(true)
.add_key_value(slog_o!(
"message" => PushFnValue(|record, ser| ser.emit(record.msg())),
"caller" => PushFnValue(|record, ser| ser.emit(format_args!(
"{}:{}",
Path::new(record.file())
.file_name()
.and_then(|path| path.to_str())
.unwrap_or("<unknown>"),
record.line(),
))),
"level" => FnValue(|record| get_unified_log_level(record.level())),
"time" => FnValue(|_| chrono::Local::now().format(TIMESTAMP_FORMAT).to_string()),
))
.build()
}
pub fn get_level_by_string(lv: &str) -> Option<Level> {
match &*lv.to_owned().to_lowercase() {
"critical" => Some(Level::Critical),
"error" => Some(Level::Error),
// We support `warn` due to legacy.
"warning" | "warn" => Some(Level::Warning),
"debug" => Some(Level::Debug),
"trace" => Some(Level::Trace),
"info" => Some(Level::Info),
_ => None,
}
}
// The `to_string()` function of `slog::Level` produces values like `erro` and `trce` instead of
// the full words. This produces the full word.
pub fn get_string_by_level(lv: Level) -> &'static str {
match lv {
Level::Critical => "critical",
Level::Error => "error",
Level::Warning => "warning",
Level::Debug => "debug",
Level::Trace => "trace",
Level::Info => "info",
}
}
// Converts `slog::Level` to unified log level format.
fn get_unified_log_level(lv: Level) -> &'static str {
match lv {
Level::Critical => "FATAL",
Level::Error => "ERROR",
Level::Warning => "WARN",
Level::Info => "INFO",
Level::Debug => "DEBUG",
Level::Trace => "TRACE",
}
}
pub fn convert_slog_level_to_log_level(lv: Level) -> log::Level {
match lv {
Level::Critical | Level::Error => log::Level::Error,
Level::Warning => log::Level::Warn,
Level::Debug => log::Level::Debug,
Level::Trace => log::Level::Trace,
Level::Info => log::Level::Info,
}
}
pub fn convert_log_level_to_slog_level(lv: log::Level) -> Level {
match lv {
log::Level::Error => Level::Error,
log::Level::Warn => Level::Warning,
log::Level::Debug => Level::Debug,
log::Level::Trace => Level::Trace,
log::Level::Info => Level::Info,
}
}
pub fn get_log_level() -> Option<Level> {
Level::from_usize(LOG_LEVEL.load(Ordering::Relaxed))
}
pub fn set_log_level(new_level: Level) {
LOG_LEVEL.store(new_level.as_usize(), Ordering::SeqCst)
}
pub struct FastJobFormat<D>
where
D: Decorator,
{
decorator: D,
}
impl<D> FastJobFormat<D>
where
D: Decorator,
{
pub fn new(decorator: D) -> Self {
Self { decorator }
}
}
impl<D> Drain for FastJobFormat<D>
where
D: Decorator,
{
type Ok = ();
type Err = io::Error;
fn log(&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
if record.level().as_usize() <= LOG_LEVEL.load(Ordering::Relaxed) {
self.decorator.with_record(record, values, |decorator| {
write_log_header(decorator, record)?;
write_log_msg(decorator, record)?;
write_log_fields(decorator, record, values)?;
decorator.start_whitespace()?;
writeln!(decorator)?;
decorator.flush()?;
Ok(())
})?;
}
Ok(())
}
}
struct LogAndFuse<D>(D);
impl<D> Drain for LogAndFuse<D>
where
D: Drain,
<D as Drain>::Err: std::fmt::Display,
{
type Ok = ();
type Err = slog::Never;
fn log(&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
if record.level().as_usize() <= LOG_LEVEL.load(Ordering::Relaxed) {
if let Err(e) = self.0.log(record, values) {
let fatal_drainer = Mutex::new(text_format(term_writer())).ignore_res();
fatal_drainer.log(record, values).unwrap();
let fatal_logger = slog::Logger::root(fatal_drainer, slog_o!());
slog::slog_crit!(
fatal_logger,
"logger encountered error";
"err" => %e,
)
}
}
Ok(())
}
}
/// Filters logs with operation cost lower than threshold. Otherwise output logs to inner drainer.
struct SlowLogFilter<D> {
threshold: u64,
inner: D,
}
impl<D> Drain for SlowLogFilter<D>
where
D: Drain<Ok = (), Err = slog::Never>,
{
type Ok = ();
type Err = slog::Never;
fn log(&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
if record.tag() == "slow_log" {
let mut s = SlowCostSerializer { cost: None };
let kv = record.kv();
let _ = kv.serialize(record, &mut s);
if let Some(cost) = s.cost {
if cost <= self.threshold {
return Ok(());
}
}
}
self.inner.log(record, values)
}
}
struct SlowCostSerializer {
// None means input record without key `takes`
cost: Option<u64>,
}
impl slog::ser::Serializer for SlowCostSerializer {
fn emit_arguments(&mut self, _key: Key, _val: &fmt::Arguments<'_>) -> slog::Result {
Ok(())
}
fn emit_u64(&mut self, key: Key, val: u64) -> slog::Result {
if key == "takes" {
self.cost = Some(val);
}
Ok(())
}
}
/// Special struct for slow log cost serializing
pub struct LogCost(pub u64);
impl slog::Value for LogCost {
fn serialize(
&self,
_record: &Record,
key: Key,
serializer: &mut dyn slog::Serializer,
) -> slog::Result {
serializer.emit_u64(key, self.0)
}
}
/// Dispatches logs to a normal `Drain` or a slow-log specialized `Drain` by tag
pub struct LogDispatcher<N: Drain, S: Drain> {
normal: N,
slow: Option<S>,
}
impl<N: Drain, S: Drain> LogDispatcher<N, S> {
pub fn new(normal: N, slow: Option<S>) -> Self {
Self { normal, slow }
}
}
impl<N, S> Drain for LogDispatcher<N, S>
where
N: Drain<Ok = (), Err = io::Error>,
S: Drain<Ok = (), Err = io::Error>,
{
type Ok = ();
type Err = io::Error;
fn | log | identifier_name |
|
animLib.py | weights can result
#in the tangent types being ignored (for the case of stepped mainly, but subtle weirdness with flat happens too)
cmd.keyTangent( attrpath, t=(time,), ix=ix, iy=iy, ox=ox, oy=oy, l=isLocked, wl=isWeighted )
cmd.keyTangent( attrpath, t=(time,), itt=itt, ott=ott )
else:
cmd.keyTangent( attrpath, t=(time,), ix=ix, iy=iy, ox=ox, oy=oy )
#cmd.keyTangent( e=True, g=True, wt=beginningWeightedTanState )
def getKeyTimes( self ):
'''
returns an ordered list of key times
'''
keyTimesSet = set()
for obj, attrDict in self.iteritems():
for attr, (weightedTangents, keyList) in attrDict.iteritems():
if keyList[0][0] is None:
continue
for tup in keyList:
keyTimesSet.add( tup[0] )
keyTimes = list( keyTimesSet )
keyTimes.sort()
return keyTimes
def getRange( self ):
'''
returns a tuple of (start, end)
'''
times = self.getKeyTimes()
try:
start, end = times[0], times[-1]
self.offset = start
except IndexError:
start, end = 0, 0
self.__range = 0
return start, end
def getRangeValue( self ):
try:
return self.__range
except AttributeError:
self.getRange()
return self.__range
range = property(getRangeValue)
def generatePreArgs( self ):
return tuple()
kEXPORT_DICT_THE_CLIP = 'clip'
kEXPORT_DICT_CLIP_TYPE = 'clip_type'
kEXPORT_DICT_OBJECTS = 'objects'
kEXPORT_DICT_WORLDSPACE = 'worldspace'
class ClipPreset(Preset):
'''
a clip preset is different from a normal preset because it is actually two separate files - a
pickled animation data file, and an icon
'''
TYPE_CLASSES = {kPOSE: PoseClip,
kANIM: AnimClip,
kDELTA: None}
TYPE_LABELS = {kPOSE: 'pose',
kANIM: 'anim',
kDELTA: 'delta'}
### auto generate a label types
LABEL_TYPES = {}
for t, l in TYPE_LABELS.iteritems():
LABEL_TYPES[l] = t
def __new__( cls, locale, library, name, type=kPOSE ):
tool = '%s/%s' % (TOOL_NAME, library)
typeLbl = cls.TYPE_LABELS[type]
ext = '%s.%s' % (typeLbl, kEXT)
self = Preset.__new__( cls, locale, tool, name, ext )
self.icon = Preset( locale, tool, name, '%s.bmp' % typeLbl )
return self
def asClip( self ):
presetDict = self.unpickle()
return presetDict[ kEXPORT_DICT_THE_CLIP ]
def niceName( self ):
return self.name().split('.')[0]
def getLibrary( self ):
return self[-2]
def setLibrary( self, library ):
self[-2] = library
def getTypeName( self ):
return self.name().split('.')[ -1 ]
def getType( self ):
typeLbl = self.getTypeName()
return self.LABEL_TYPES[typeLbl]
def move( self, library=None ):
if library is None:
library = self.getLibrary()
newLoc = ClipPreset(self.other(), library, self.niceName(), self.getType())
#perform the move...
Path.move(self, newLoc)
Path.move(self.icon, newLoc.icon)
return newLoc
def copy( self, library=None ):
if library is None:
library = self.library
newLoc = ClipPreset(self.other(), library, self.niceName(), self.getType())
#perform the copy...
Path.copy(self, newLoc)
Path.copy(self.icon, newLoc.icon)
return newLoc
def rename( self, newName ):
'''
newName should be the base name - sans any clip type id or extension...
'''
newName = '%s.%s' % (scrubName(newName), self.getTypeName())
Preset.rename(self, newName)
self.icon.rename(newName)
def delete( self ):
Path.delete(self)
self.icon.delete()
@api.d_noAutoKey
def apply( self, objects, attributes=None, **kwargs ):
presetDict = self.unpickle()
srcObjs = presetDict[ kEXPORT_DICT_OBJECTS ]
clip = presetDict[ kEXPORT_DICT_THE_CLIP ]
#do a version check - if older version clip is being used - perhaps we can write conversion functionality?
try:
ver = presetDict[ kEXPORT_DICT_TOOL_VER ]
if ver != VER:
api.melWarning("the anim clip version don't match!")
except KeyError:
api.melWarning("this is an old VER 1 pose clip - I don't know how to load them anymore...")
return
#generate the name mapping
slamApply = kwargs.get( 'slam', False )
if slamApply:
objects = cmd.ls( typ='transform' )
tgts = names.matchNames( srcObjs, objects, threshold=kDEFAULT_MAPPING_THRESHOLD )
mapping = Mapping( srcObjs, tgts )
else:
tgts = names.matchNames( srcObjs, objects, threshold=kDEFAULT_MAPPING_THRESHOLD )
mapping = Mapping( srcObjs, tgts )
#run the clip's apply method
clip.apply( mapping, attributes, **kwargs )
def getClipObjects( self ):
'''
returns a list of all the object names contained in the clip
'''
presetDict = self.unpickle()
srcObjs = presetDict[ kEXPORT_DICT_OBJECTS ]
return srcObjs
def write( self, objects, **kwargs ):
type = self.getType()
clipDict = api.writeExportDict( TOOL_NAME, VER )
clipDict[ kEXPORT_DICT_CLIP_TYPE ] = type
clipDict[ kEXPORT_DICT_OBJECTS ] = objects
clipDict[ kEXPORT_DICT_WORLDSPACE ] = False
theClip = self.TYPE_CLASSES[ type ]()
success = theClip.generate( objects, **kwargs )
if not success:
printErrorStr( "Failed to generate clip!" )
return
clipDict[ kEXPORT_DICT_THE_CLIP ] = theClip
#write the preset file to disk
self.pickle( clipDict )
#generate the icon for the clip and add it to perforce if appropriate
icon = generateIcon( self )
#icon.asP4().add()
printInfoStr( "Generated clip!" )
class ClipManager(PresetManager):
'''
an abstraction for listing libraries and library clips for clip presets - there are two
main differences between clip presets and other presets - clips have a library which is
a subdir of the main preset dir, and there are also multiple types of clips both with
the same extension.
'''
def __init__( self ):
PresetManager.__init__(self, TOOL_NAME, kEXT)
def getLibraryNames( self ):
'''
returns the names of all libraries under the current mod
'''
libraries = set()
for locale, paths in self.getLibraryPaths().iteritems():
for p in paths:
libName = p.name()
libraries.add(libName)
libraries = list(libraries)
libraries.sort()
return libraries
def getLibraryPaths( self ):
'''
returns a dictionary of library paths keyed using locale. ie:
{LOCAL: [path1, path2, ...], GLOBAL: etc...}
'''
localeDict = {}
for locale in LOCAL, GLOBAL:
localeDict[locale] = libraries = []
dirs = self.getPresetDirs(locale)
libraryNames = set()
for d in dirs:
dLibs = d.dirs()
for dLib in dLibs:
dLibName = dLib[-1]
if dLibName not in libraryNames:
libraries.append(dLib)
libraryNames.add(dLibName)
return localeDict
def createLibrary( self, name ):
newLibraryPath = Preset(LOCAL, TOOL_NAME, name, '')
newLibraryPath.create()
def getLibraryClips( self, library ):
| global kEXT
clips = {LOCAL: [], GLOBAL: []}
for locale in LOCAL, GLOBAL:
localeClips = clips[locale]
for dir in getPresetDirs(locale, TOOL_NAME):
dir += library
if not dir.exists:
continue
for f in dir.files():
if f.hasExtension( kEXT ):
f = f.setExtension()
name, type = f[ -1 ].split('.')
f = f[ :-1 ]
type = ClipPreset.LABEL_TYPES[ type ]
localeClips.append( ClipPreset(locale, library, name, type) )
return clips | identifier_body |
|
animLib.py | .AddKey( time, value, ix, iy, ox, oy )
attrPath = '%s.%s' % (o, a)
try: self.curvePairs[attrPath].append( curve )
except KeyError: self.curvePairs[attrPath] = [ curve ]
#now iterate over each curve pair and make sure they both have keys on the same frames...
for attrPath, curves in self.curvePairs.iteritems():
try:
curveA, curveB = curves
except ValueError: continue
curveTimes = set( curveA.m_keys.keys() + curveB.m_keys.keys() )
for t in curveTimes:
curveA.InsertKey( t )
curveB.InsertKey( t )
print 'keys on', attrPath, 'at times', curveTimes
def __call__( self, pct, mapping=None ):
BaseBlender.__call__(self, pct, mapping)
cmdQueue = api.CmdQueue()
if pct == 0:
self.clipA.apply( self.getMapping() )
elif pct == 1:
self.clipB.apply( self.getMapping() )
else:
for attrPath, curves in self.curvePairs.iteritems():
try:
curveA, curveB = curves
except ValueError: continue
#because we know both curves have the same timings (ie if curveA has a key at time x, curveB is guaranteed to also have a key
#at time x) then we just need to iterate over the keys of one curve, and blend them with the values of the other
for time, keyA in curveA.m_keys.iteritems():
keyB = curveB.m_keys[ time ]
blendedValue = (keyA.m_flValue * (1-pct)) + (keyB.m_flValue * pct)
blendedIX = (keyA.m_flInTanX * (1-pct)) + (keyB.m_flInTanX * pct)
blendedIY = (keyA.m_flInTanY * (1-pct)) + (keyB.m_flInTanY * pct)
blendedOX = (keyA.m_flOutTanX * (1-pct)) + (keyB.m_flOutTanX * pct)
blendedOY = (keyA.m_flOutTanY * (1-pct)) + (keyB.m_flOutTanY * pct)
cmdQueue.append( 'setKeyframe -t %s -v %s %s' % (time, blendedValue, attrPath) )
cmdQueue.append( 'keyTangent -e -t %s -ix %s -iy %s -ox %s -oy %s %s' % (time, blendedIX, blendedIY, blendedOX, blendedOY, attrPath) )
class BaseClip(dict):
'''
baseclass for clips
'''
blender = None
OPTIONS =\
kOPT_ADDITIVE, kOPT_ADDITIVE_WORLD, kOPT_OFFSET, kOPT_CLEAR, kMULT, kOPT_ATTRSELECTION =\
'additive', 'additiveWorld', 'offset', 'clear', 'mult', 'attrSelection'
kOPT_DEFAULTS = { kOPT_ADDITIVE: False,
kOPT_ADDITIVE_WORLD: False,
kOPT_OFFSET: 0,
kOPT_CLEAR: True,
kMULT: 1,
kOPT_ATTRSELECTION: False }
def __init__( self, objects=None ):
if objects is not None:
self.generate( objects )
def generate( self, objects ):
pass
def apply( self, mapping, attributes=None, **kwargs ):
'''
valid kwargs are
additive [False] applys the animation additively
| return self.keys()
def generatePreArgs( self ):
return tuple()
def getObjAttrNames( obj, attrNamesToSkip=() ):
#grab attributes
objAttrs = cmd.listAttr( obj, keyable=True, visible=True, scalar=True ) or []
#also grab alias' - its possible to pass in an alias name, so we need to test against them as well
aliass = cmd.aliasAttr( obj, q=True ) or []
#because the aliasAttr cmd returns a list with the alias, attr pairs in a flat list, we need to iterate over the list, skipping every second entry
itAliass = iter( aliass )
for attr in itAliass:
objAttrs.append( attr )
itAliass.next()
filteredAttrs = []
for attr in objAttrs:
skipAttr = False
for skipName in attrNamesToSkip:
if attr == skipName:
skipAttr = True
elif attr.startswith( skipName +'[' ) or attr.startswith( skipName +'.' ):
skipAttr = True
if skipAttr:
continue
filteredAttrs.append( attr )
return filteredAttrs
#defines a mapping between node type, and the function used to get a list of attributes from that node to save to the clip. by default getObjAttrNames( obj ) is called
GET_ATTR_BY_NODE_TYPE = { 'blendShape': lambda obj: getObjAttrNames( obj, ['envelope', 'weight', 'inputTarget'] ) }
class PoseClip(BaseClip):
blender = PoseBlender
@classmethod
def FromObjects( cls, objects ):
new = cls()
cls.generate(new, objects)
return new
def __add__( self, other ):
'''
for adding multiple pose clips together - returns a new PoseClip instance
'''
pass
def __mult__( self, other ):
'''
for multiplying a clip by a scalar value
'''
assert isinstance(other, (int, long, float))
new = PoseClip
for obj, attrDict in self.iteritems():
for attr, value in attrDict.iteritems():
attrDict[attr] = value * other
def generate( self, objects, attrs=None ):
'''
generates a pose dictionary - its basically just dict with node names for keys. key values
are dictionaries with attribute name keys and attribute value keys
'''
self.clear()
if attrs:
attrs = set( attrs )
for obj in objects:
objType = cmd.nodeType( obj )
attrGetFunction = GET_ATTR_BY_NODE_TYPE.get( objType, getObjAttrNames )
objAttrs = set( attrGetFunction( obj ) )
if attrs:
objAttrs = objAttrs.intersection( attrs )
if objAttrs is None:
continue
self[ obj ] = objDict = {}
for attr in objAttrs:
objDict[ attr ] = cmd.getAttr( '%s.%s' % (obj, attr) )
return True
@d_unifyUndo
def apply( self, mapping, attributes=None, **kwargs ):
'''
construct a mel string to pass to eval - so it can be contained in a single undo...
'''
cmdQueue = api.CmdQueue()
#gather options...
additive = kwargs.get( self.kOPT_ADDITIVE,
self.kOPT_DEFAULTS[ self.kOPT_ADDITIVE ] )
#convert the attribute list to a set for fast lookup
if attributes:
attributes = set( attributes )
for clipObj, tgtObj in mapping.iteritems():
try:
attrDict = self[ clipObj ]
except KeyError: continue
for attr, value in attrDict.iteritems():
if attributes:
if attr not in attributes:
continue
if not tgtObj:
continue
attrpath = '%s.%s' % (tgtObj, attr)
try:
if not cmd.getAttr( attrpath, settable=True ): continue
except TypeError: continue
if additive: value += cmd.getAttr( attrpath )
cmdQueue.append( 'setAttr -clamp %s %f;' % (attrpath, value) )
cmdQueue()
class AnimClip(BaseClip):
blender = AnimBlender
def __init__( self, objects=None ):
self.offset = 0
BaseClip.__init__(self, objects)
def __add__( self, other ):
pass
def __mult__( self, other ):
assert isinstance(other, (int, long, float))
for obj, attrDict in self.iteritems():
for attr, value in attrDict.iteritems():
value *= other
def generate( self, objects, attrs=None, startFrame=None, endFrame=None ):
'''
generates an anim dictionary - its basically just dict with node names for keys. key values
are lists of tuples with the form: (keyTime, attrDict) where attrDict is a dictionary with
attribute name keys and attribute value keys
'''
defaultWeightedTangentOpt = bool(cmd.keyTangent(q=True, g=True, wt=True))
self.clear()
if attrs:
attrs = set( attrs )
if startFrame is None:
startFrame = cmd.playbackOptions( q=True, min=True )
if endFrame is None:
endFrame = cmd.playbackOptions( q=True, max=True )
startFrame, endFrame = list( sorted( [startFrame, endFrame] ) )
self.offset = startFrame
#list all keys on the objects - so we can determine the start frame, and range. all | '''
pass
def getObjects( self ):
| random_line_split |
animLib.py |
time = cmd.currentTime(q=True)
#make sure the icon is open for edit if its a global clip
if preset.locale == GLOBAL and preset.icon.exists:
preset.edit()
icon = cmd.playblast(st=time, et=time, w=kICON_W_H[0], h=kICON_W_H[1], fo=True, fmt="image", v=0, p=100, orn=0, cf=str(preset.icon.resolve()))
icon = Path(icon)
if icon.exists:
icon = icon.setExtension('bmp', True)
cmd.setAttr("defaultRenderGlobals.imageFormat", imgFormat)
#restore viewport settings
try: cmd.select(sel)
except TypeError: pass
for setting, initialState in zip(settings, states):
mel.eval("modelEditor -e %s %s %s;" % (setting, initialState, panel))
return icon
class BaseBlender(object):
'''
a blender object is simply a callable object that when called with a percentage arg (0-1) will
apply said percentage of the given clips to the given mapping
'''
def __init__( self, clipA, clipB, mapping=None, attributes=None ):
self.clipA = clipA
self.clipB = clipB
self.__mapping = mapping
if attributes:
attributes = set( attributes )
self.attributes = attributes
def setMapping( self, mapping ):
self.__mapping = mapping
def getMapping( self ):
return self.__mapping
def __call__( self, pct, mapping=None ):
if mapping is not None:
self.setMapping( mapping )
assert self.getMapping() is not None
class PoseBlender(BaseBlender):
def __call__( self, pct, mapping=None, attributes=None ):
BaseBlender.__call__(self, pct, mapping)
cmdQueue = api.CmdQueue()
if mapping is None:
mapping = self.getMapping()
if attributes is None:
attributes = self.attributes
mappingDict = mapping.asDict()
for clipAObj, attrDictA in self.clipA.iteritems():
#if the object isn't in the mapping dict, skip it
if clipAObj not in mappingDict:
continue
clipBObjs = mapping[ clipAObj ]
for a, valueA in attrDictA.iteritems():
if attributes:
if a not in attributes:
continue
if not clipAObj:
continue
attrpath = '%s.%s' % (clipAObj, a)
if not cmd.getAttr( attrpath, settable=True ):
continue
for clipBObj in clipBObjs:
try:
attrDictB = self.clipB[ clipBObj ]
except KeyError: continue
try:
valueB = attrDictB[ a ]
blendedValue = (valueA * (1-pct)) + (valueB * pct)
cmdQueue.append( 'setAttr -clamp %s %f' % (attrpath, blendedValue) )
except KeyError:
cmdQueue.append( 'setAttr -clamp %s %f' % (attrpath, valueA) )
except: pass
cmdQueue()
class AnimBlender(BaseBlender):
def __init__( self, clipA, clipB, mapping=None ):
BaseBlender.__init__(self, clipA, clipB, mapping)
#so now we need to generate a dict to represent the curves for both of the clips
animCurveDictA = self.animCurveDictA = {}
animCurveDictB = self.animCurveDictB = {}
#the curvePairs attribute contains a dict - indexed by attrpath - containing a tuple of (curveA, curveB)
self.curvePairs = {}
for clip, curveDict in zip([self.clipA, self.clipB], [animCurveDictA, animCurveDictB]):
for o, attrDict in clip.iteritems():
curveDict[o] = {}
for a, keyData in attrDict.iteritems():
curve = curveDict[o][a] = animCurve.AnimCurve()
#unpack the key data
weightedTangents, keyList = keyData
#generate the curves
for time, value, itt, ott, ix, iy, ox, oy, isLocked, isWeighted in keyList:
curve.m_bWeighted = weightedTangents
curve.AddKey( time, value, ix, iy, ox, oy )
attrPath = '%s.%s' % (o, a)
try: self.curvePairs[attrPath].append( curve )
except KeyError: self.curvePairs[attrPath] = [ curve ]
#now iterate over each curve pair and make sure they both have keys on the same frames...
for attrPath, curves in self.curvePairs.iteritems():
try:
curveA, curveB = curves
except ValueError: continue
curveTimes = set( curveA.m_keys.keys() + curveB.m_keys.keys() )
for t in curveTimes:
curveA.InsertKey( t )
curveB.InsertKey( t )
print 'keys on', attrPath, 'at times', curveTimes
def __call__( self, pct, mapping=None ):
BaseBlender.__call__(self, pct, mapping)
cmdQueue = api.CmdQueue()
if pct == 0:
self.clipA.apply( self.getMapping() )
elif pct == 1:
self.clipB.apply( self.getMapping() )
else:
for attrPath, curves in self.curvePairs.iteritems():
try:
curveA, curveB = curves
except ValueError: continue
#because we know both curves have the same timings (ie if curveA has a key at time x, curveB is guaranteed to also have a key
#at time x) then we just need to iterate over the keys of one curve, and blend them with the values of the other
for time, keyA in curveA.m_keys.iteritems():
keyB = curveB.m_keys[ time ]
blendedValue = (keyA.m_flValue * (1-pct)) + (keyB.m_flValue * pct)
blendedIX = (keyA.m_flInTanX * (1-pct)) + (keyB.m_flInTanX * pct)
blendedIY = (keyA.m_flInTanY * (1-pct)) + (keyB.m_flInTanY * pct)
blendedOX = (keyA.m_flOutTanX * (1-pct)) + (keyB.m_flOutTanX * pct)
blendedOY = (keyA.m_flOutTanY * (1-pct)) + (keyB.m_flOutTanY * pct)
cmdQueue.append( 'setKeyframe -t %s -v %s %s' % (time, blendedValue, attrPath) )
cmdQueue.append( 'keyTangent -e -t %s -ix %s -iy %s -ox %s -oy %s %s' % (time, blendedIX, blendedIY, blendedOX, blendedOY, attrPath) )
class BaseClip(dict):
'''
baseclass for clips
'''
blender = None
OPTIONS =\
kOPT_ADDITIVE, kOPT_ADDITIVE_WORLD, kOPT_OFFSET, kOPT_CLEAR, kMULT, kOPT_ATTRSELECTION =\
'additive', 'additiveWorld', 'offset', 'clear', 'mult', 'attrSelection'
kOPT_DEFAULTS = { kOPT_ADDITIVE: False,
kOPT_ADDITIVE_WORLD: False,
kOPT_OFFSET: 0,
kOPT_CLEAR: True,
kMULT: 1,
kOPT_ATTRSELECTION: False }
def __init__( self, objects=None ):
if objects is not None:
self.generate( objects )
def generate( self, objects ):
pass
def apply( self, mapping, attributes=None, **kwargs ):
'''
valid kwargs are
additive [False] applys the animation additively
'''
pass
def getObjects( self ):
return self.keys()
def generatePreArgs( self ):
return tuple()
def getObjAttrNames( obj, attrNamesToSkip=() ):
#grab attributes
objAttrs = cmd.listAttr( obj, keyable=True, visible=True, scalar=True ) or []
#also grab alias' - its possible to pass in an alias name, so we need to test against them as well
aliass = cmd.aliasAttr( obj, q=True ) or []
#because the aliasAttr cmd returns a list with the alias, attr pairs in a flat list, we need to iterate over the list, skipping every second entry
itAliass = iter( aliass )
for attr in itAliass:
objAttrs.append( attr )
itAliass.next()
filteredAttrs = []
for attr in objAttrs:
skipAttr = False
for skipName in attrNamesToSkip:
if attr == skipName:
skipAttr = True
elif attr.startswith( skipName +'[' ) or attr.startswith( skipName +'.' ):
skipAttr = True
if skipAttr:
continue
filteredAttrs.append( attr )
return filteredAttrs
# | mel.eval("modelEditor -e %s 0 %s;" % (setting, panel)) | conditional_block |
|
animLib.py | ( self, pct, mapping=None, attributes=None ):
BaseBlender.__call__(self, pct, mapping)
cmdQueue = api.CmdQueue()
if mapping is None:
mapping = self.getMapping()
if attributes is None:
attributes = self.attributes
mappingDict = mapping.asDict()
for clipAObj, attrDictA in self.clipA.iteritems():
#if the object isn't in the mapping dict, skip it
if clipAObj not in mappingDict:
continue
clipBObjs = mapping[ clipAObj ]
for a, valueA in attrDictA.iteritems():
if attributes:
if a not in attributes:
continue
if not clipAObj:
continue
attrpath = '%s.%s' % (clipAObj, a)
if not cmd.getAttr( attrpath, settable=True ):
continue
for clipBObj in clipBObjs:
try:
attrDictB = self.clipB[ clipBObj ]
except KeyError: continue
try:
valueB = attrDictB[ a ]
blendedValue = (valueA * (1-pct)) + (valueB * pct)
cmdQueue.append( 'setAttr -clamp %s %f' % (attrpath, blendedValue) )
except KeyError:
cmdQueue.append( 'setAttr -clamp %s %f' % (attrpath, valueA) )
except: pass
cmdQueue()
class AnimBlender(BaseBlender):
def __init__( self, clipA, clipB, mapping=None ):
BaseBlender.__init__(self, clipA, clipB, mapping)
#so now we need to generate a dict to represent the curves for both of the clips
animCurveDictA = self.animCurveDictA = {}
animCurveDictB = self.animCurveDictB = {}
#the curvePairs attribute contains a dict - indexed by attrpath - containing a tuple of (curveA, curveB)
self.curvePairs = {}
for clip, curveDict in zip([self.clipA, self.clipB], [animCurveDictA, animCurveDictB]):
for o, attrDict in clip.iteritems():
curveDict[o] = {}
for a, keyData in attrDict.iteritems():
curve = curveDict[o][a] = animCurve.AnimCurve()
#unpack the key data
weightedTangents, keyList = keyData
#generate the curves
for time, value, itt, ott, ix, iy, ox, oy, isLocked, isWeighted in keyList:
curve.m_bWeighted = weightedTangents
curve.AddKey( time, value, ix, iy, ox, oy )
attrPath = '%s.%s' % (o, a)
try: self.curvePairs[attrPath].append( curve )
except KeyError: self.curvePairs[attrPath] = [ curve ]
#now iterate over each curve pair and make sure they both have keys on the same frames...
for attrPath, curves in self.curvePairs.iteritems():
try:
curveA, curveB = curves
except ValueError: continue
curveTimes = set( curveA.m_keys.keys() + curveB.m_keys.keys() )
for t in curveTimes:
curveA.InsertKey( t )
curveB.InsertKey( t )
print 'keys on', attrPath, 'at times', curveTimes
def __call__( self, pct, mapping=None ):
BaseBlender.__call__(self, pct, mapping)
cmdQueue = api.CmdQueue()
if pct == 0:
self.clipA.apply( self.getMapping() )
elif pct == 1:
self.clipB.apply( self.getMapping() )
else:
for attrPath, curves in self.curvePairs.iteritems():
try:
curveA, curveB = curves
except ValueError: continue
#because we know both curves have the same timings (ie if curveA has a key at time x, curveB is guaranteed to also have a key
#at time x) then we just need to iterate over the keys of one curve, and blend them with the values of the other
for time, keyA in curveA.m_keys.iteritems():
keyB = curveB.m_keys[ time ]
blendedValue = (keyA.m_flValue * (1-pct)) + (keyB.m_flValue * pct)
blendedIX = (keyA.m_flInTanX * (1-pct)) + (keyB.m_flInTanX * pct)
blendedIY = (keyA.m_flInTanY * (1-pct)) + (keyB.m_flInTanY * pct)
blendedOX = (keyA.m_flOutTanX * (1-pct)) + (keyB.m_flOutTanX * pct)
blendedOY = (keyA.m_flOutTanY * (1-pct)) + (keyB.m_flOutTanY * pct)
cmdQueue.append( 'setKeyframe -t %s -v %s %s' % (time, blendedValue, attrPath) )
cmdQueue.append( 'keyTangent -e -t %s -ix %s -iy %s -ox %s -oy %s %s' % (time, blendedIX, blendedIY, blendedOX, blendedOY, attrPath) )
class BaseClip(dict):
'''
baseclass for clips
'''
blender = None
OPTIONS =\
kOPT_ADDITIVE, kOPT_ADDITIVE_WORLD, kOPT_OFFSET, kOPT_CLEAR, kMULT, kOPT_ATTRSELECTION =\
'additive', 'additiveWorld', 'offset', 'clear', 'mult', 'attrSelection'
kOPT_DEFAULTS = { kOPT_ADDITIVE: False,
kOPT_ADDITIVE_WORLD: False,
kOPT_OFFSET: 0,
kOPT_CLEAR: True,
kMULT: 1,
kOPT_ATTRSELECTION: False }
def __init__( self, objects=None ):
if objects is not None:
self.generate( objects )
def generate( self, objects ):
pass
def apply( self, mapping, attributes=None, **kwargs ):
'''
valid kwargs are
additive [False] applys the animation additively
'''
pass
def getObjects( self ):
return self.keys()
def generatePreArgs( self ):
return tuple()
def getObjAttrNames( obj, attrNamesToSkip=() ):
#grab attributes
objAttrs = cmd.listAttr( obj, keyable=True, visible=True, scalar=True ) or []
#also grab alias' - its possible to pass in an alias name, so we need to test against them as well
aliass = cmd.aliasAttr( obj, q=True ) or []
#because the aliasAttr cmd returns a list with the alias, attr pairs in a flat list, we need to iterate over the list, skipping every second entry
itAliass = iter( aliass )
for attr in itAliass:
objAttrs.append( attr )
itAliass.next()
filteredAttrs = []
for attr in objAttrs:
skipAttr = False
for skipName in attrNamesToSkip:
if attr == skipName:
skipAttr = True
elif attr.startswith( skipName +'[' ) or attr.startswith( skipName +'.' ):
skipAttr = True
if skipAttr:
continue
filteredAttrs.append( attr )
return filteredAttrs
#defines a mapping between node type, and the function used to get a list of attributes from that node to save to the clip. by default getObjAttrNames( obj ) is called
GET_ATTR_BY_NODE_TYPE = { 'blendShape': lambda obj: getObjAttrNames( obj, ['envelope', 'weight', 'inputTarget'] ) }
class PoseClip(BaseClip):
blender = PoseBlender
@classmethod
def FromObjects( cls, objects ):
new = cls()
cls.generate(new, objects)
return new
def __add__( self, other ):
'''
for adding multiple pose clips together - returns a new PoseClip instance
'''
pass
def __mult__( self, other ):
'''
for multiplying a clip by a scalar value
'''
assert isinstance(other, (int, long, float))
new = PoseClip
for obj, attrDict in self.iteritems():
for attr, value in attrDict.iteritems():
attrDict[attr] = value * other
def generate( self, objects, attrs=None ):
'''
generates a pose dictionary - its basically just dict with node names for keys. key values
are dictionaries with attribute name keys and attribute value keys
'''
self.clear()
if attrs:
attrs = set( attrs )
for obj in objects:
objType = cmd.nodeType( obj )
attrGetFunction = GET_ATTR_BY_NODE_TYPE.get( objType, getObjAttrNames )
objAttrs = set( attrGetFunction( obj ) )
if attrs:
objAttrs = objAttrs.intersection( attrs )
if objAttrs is None:
continue
self[ obj ] = objDict = {}
for attr in objAttrs:
| __call__ | identifier_name |
|
textboardcomponent.js | leties when using transparent backgrounds.
The shader discards fragments with alpha <0.9, so you need to set the alpha value of the bg to something less than that.
The canvas rendering uses a blend function that dithers between the foreground color and the background color, including the relative alpha values.
By setting the background alpha slightly below 0.9 you should get a slight outline around the text of the background color. This is quite visually
pleasing and has the side effect of antialiasing the text somewhat.
If you set the alpha too low, your outline will end up black or nonexistent which can make the text look pretty choppy.
*/
/*
Note also that there seems to be something of a performance cost to using a shader that discards a lot of fragments, as in the case of transparent
backgrounds. Not sure why this is :-|
*/
/*
Known issues:
reset() is kinda quirky when dealing with transparent backgrounds
Transparent backgrounds generally have a few quirks. Best to use them sparingly.
*/
/* would be good to standardise this - make it so that we don't have to manually pass in rotationQuaternon etc. Ideally the entire p from params should
be passed to the constructor and also mined for */
var drawableclass = CARNIVAL.shape.SegmentedRectangle;
var meta = {
ident: 'net.meta4vr.textboard'
};
var TextBoard = function (params) {
CARNIVAL.component.Component.call(this, params, drawableclass);
//
// this._paramsOrig = params;
// var p = params || {};
// this.size = {maxX: p.width || 2, maxY: p.height || 2};
// superclass.call(this,
// p.position || {x:0, y:0, z:0},
// this.size,
// p.orientation || {x:0, y:0, z:0},
// {segmentsX:1, segmentsY:1, textureLabel:'orange', materialLabel:'matteplastic', label:p.label || null, rotationQuaternion:p.rotationQuaternion}
// );
var cfg = (params || {}).config || {};
var input = (params || {}).input || {};
this.textScale = cfg.textScale || 1; // Use this to scale the text
this.canvasScale = cfg.canvasScale || 200; // generally better not to mess with this
this.canvas = document.createElement('canvas');
// this.canvas.width = this.canvasScale * this.size.maxX;
// this.canvas.height = this.canvasScale * this.size.maxY;
this.canvas.width = this.canvasScale * this.drawParams.size.width;
this.canvas.height = this.canvasScale * this.drawParams.size.height;
this.ctx = this.canvas.getContext('2d');
this.currentTextLines = input.textLines || [];
this.currentText = input.text || null;
this.transparentBackground = cfg.transparentBackground || false;
if (this.transparentBackground) {
this.shaderLabel = 'basic'; /* TODO make a special shader for this */
}
this.backgroundColor = cfg.backgroundColor || (this.transparentBackground && 'rgba(0,0,255,0.89)' || 'rgba(0,0,255,1)');
var fslh = this.calculateFontSizeAndLineHeight(this.canvasScale, this.textScale);
this.boardRenderState = {
font: cfg.font || 'Arial',
fontSize: cfg.fontSize || fslh.fontSize,
lineHeight: cfg.lineHeight || fslh.lineHeight,
textColor: cfg.textColor || 'white',
// backgroundColor: p.backgroundColor || 'blue',
leftMargin: cfg.leftMargin || 4,
topMargin: cfg.topMargin || 4,
style: cfg.style || ''
};
this.cursor = null;
this.tex = null;
};
TextBoard.prototype = Object.create(CARNIVAL.component.Component.prototype);
// TextBoard.prototype.serialize = function () {
// var component = this;
// var mat = component.transformationMatrix();
//
// var getPos = function () {
// var trans = vec3.create();
// mat4.getTranslation(trans, mat);
// return trans;
// // return component.position; /* This should extract trans and rot from the transform matrix */
// }
// var getRot = function () {
// var rot = quat.create();
// mat4.getRotation(rot, mat);
// return rot;
// // return component.orientation;
// }
//
// return {
// component: this.meta.ident,
// parameters: {
// textLines: this._paramsOrig.textLines,
// position: getPos(),
// rotationQuaternion: getRot(),
// orientation: this._paramsOrig.orientation,
// width: this._paramsOrig.width,
// height: this._paramsOrig.height
// }
// }
// }
TextBoard.prototype.calculateFontSizeAndLineHeight = function (canvasScale, textScale) {
return {
fontSize: textScale * (canvasScale/5),
lineHeight: textScale * (canvasScale/4.8)
}
}
TextBoard.prototype.clear = function (color, suppressUpdate) {
color = color || this.backgroundColor;
// this.ctx.fillStyle = 'black';
// this.ctx.fillRect(0, 0, this.canvas.width, this.canvas.height);
this.ctx.fillStyle = color;
this.ctx.fillRect(0, 0, this.canvas.width, this.canvas.height);
if (!suppressUpdate) this.updateTexture();
}
TextBoard.prototype.reset = function () {
/* Just clearing the canvas doesn't work properly when we're using transparent backgrounds */
var newCanvas = document.createElement('canvas');
newCanvas.width = this.canvas.width;
newCanvas.height = this.canvas.height;
this.canvas = newCanvas;
this.ctx = this.canvas.getContext('2d');
this.cursor = null;
this.clear();
// this.textLines = [];
}
TextBoard.prototype.getTexture = function () {
var gl = CARNIVAL.engine.gl;
this.tex = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, this.tex);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
return this.tex;
}
TextBoard.prototype.updateTexture = function () {
var gl = CARNIVAL.engine.gl;
gl.bindTexture(gl.TEXTURE_2D, this.tex);
// gl.pixelStorei(gl.UNPACK_FLIP_Y_WEBGL, true);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, this.canvas);
}
TextBoard.prototype.renderTextLines = function (textLines) {
var board = this;
var rstate = board.boardRenderState;
var text = null;
if (!board.cursor) board.cursor = {x:rstate.leftMargin, y:rstate.topMargin};
for (var i = 0; i < textLines.length; i++) {
var line = textLines[i];
rstate.font = line.font || rstate.font;
rstate.fontSize = line.fontSize || rstate.fontSize;
rstate.textColor = line.textColor || rstate.textColor;
rstate.backgroundColor = line.backgroundColor || rstate.backgroundColor;
rstate.lineHeight = line.lineHeight || rstate.lineHeight;
rstate.leftMargin = line.leftMargin || rstate.leftMargin;
rstate.topMargin = line.topMargin || rstate.topMargin;
rstate.style = line.style || rstate.style;
text = line.text || null;
if (text) {
// var ctx = board.canvas.getContext('2d');
board.ctx.fillStyle = rstate.textColor;
board.ctx.font = "@ST@ @SZ@px @F@".replace('@SZ@', rstate.fontSize).replace('@F@', rstate.font).replace('@ST@', rstate.style);
// console.log('drawing text', text, board.cursor.x, board.cursor.y+rstate.fontSize)
board.ctx.fillText(text, board.cursor.x, board.cursor.y+rstate.fontSize);
board.cursor.y += rstate.lineHeight;
}
}
board.updateTexture();
}
TextBoard.prototype.addTextLines = function (lines) {
for (var i = 0; i < lines.length; i++) {
this.currentTextLines.push(lines[i]); |
TextBoard.prototype.setText = function (lines) {
this.currentTextLines = lines;
this.reset();
this.renderTextLines(this.currentTextLines);
}
TextBoard.prototype.prepare = function () {
var board = this;
board.drawable.texture = board.getTexture();
board.clear(board.backgroundColor, true);
if (board.currentTextLines.length) {
board.renderTextLines(board.currentTextLines);
}
else if (board.currentText) {
board.addText | }
// this.currentTextLines.push(line);
this.reset();
this.renderTextLines(this.currentTextLines);
} | random_line_split |
textboardcomponent.js | ies when using transparent backgrounds.
The shader discards fragments with alpha <0.9, so you need to set the alpha value of the bg to something less than that.
The canvas rendering uses a blend function that dithers between the foreground color and the background color, including the relative alpha values.
By setting the background alpha slightly below 0.9 you should get a slight outline around the text of the background color. This is quite visually
pleasing and has the side effect of antialiasing the text somewhat.
If you set the alpha too low, your outline will end up black or nonexistent which can make the text look pretty choppy.
*/
/*
Note also that there seems to be something of a performance cost to using a shader that discards a lot of fragments, as in the case of transparent
backgrounds. Not sure why this is :-|
*/
/*
Known issues:
reset() is kinda quirky when dealing with transparent backgrounds
Transparent backgrounds generally have a few quirks. Best to use them sparingly.
*/
/* would be good to standardise this - make it so that we don't have to manually pass in rotationQuaternon etc. Ideally the entire p from params should
be passed to the constructor and also mined for */
var drawableclass = CARNIVAL.shape.SegmentedRectangle;
var meta = {
ident: 'net.meta4vr.textboard'
};
var TextBoard = function (params) {
CARNIVAL.component.Component.call(this, params, drawableclass);
//
// this._paramsOrig = params;
// var p = params || {};
// this.size = {maxX: p.width || 2, maxY: p.height || 2};
// superclass.call(this,
// p.position || {x:0, y:0, z:0},
// this.size,
// p.orientation || {x:0, y:0, z:0},
// {segmentsX:1, segmentsY:1, textureLabel:'orange', materialLabel:'matteplastic', label:p.label || null, rotationQuaternion:p.rotationQuaternion}
// );
var cfg = (params || {}).config || {};
var input = (params || {}).input || {};
this.textScale = cfg.textScale || 1; // Use this to scale the text
this.canvasScale = cfg.canvasScale || 200; // generally better not to mess with this
this.canvas = document.createElement('canvas');
// this.canvas.width = this.canvasScale * this.size.maxX;
// this.canvas.height = this.canvasScale * this.size.maxY;
this.canvas.width = this.canvasScale * this.drawParams.size.width;
this.canvas.height = this.canvasScale * this.drawParams.size.height;
this.ctx = this.canvas.getContext('2d');
this.currentTextLines = input.textLines || [];
this.currentText = input.text || null;
this.transparentBackground = cfg.transparentBackground || false;
if (this.transparentBackground) {
this.shaderLabel = 'basic'; /* TODO make a special shader for this */
}
this.backgroundColor = cfg.backgroundColor || (this.transparentBackground && 'rgba(0,0,255,0.89)' || 'rgba(0,0,255,1)');
var fslh = this.calculateFontSizeAndLineHeight(this.canvasScale, this.textScale);
this.boardRenderState = {
font: cfg.font || 'Arial',
fontSize: cfg.fontSize || fslh.fontSize,
lineHeight: cfg.lineHeight || fslh.lineHeight,
textColor: cfg.textColor || 'white',
// backgroundColor: p.backgroundColor || 'blue',
leftMargin: cfg.leftMargin || 4,
topMargin: cfg.topMargin || 4,
style: cfg.style || ''
};
this.cursor = null;
this.tex = null;
};
TextBoard.prototype = Object.create(CARNIVAL.component.Component.prototype);
// TextBoard.prototype.serialize = function () {
// var component = this;
// var mat = component.transformationMatrix();
//
// var getPos = function () {
// var trans = vec3.create();
// mat4.getTranslation(trans, mat);
// return trans;
// // return component.position; /* This should extract trans and rot from the transform matrix */
// }
// var getRot = function () {
// var rot = quat.create();
// mat4.getRotation(rot, mat);
// return rot;
// // return component.orientation;
// }
//
// return {
// component: this.meta.ident,
// parameters: {
// textLines: this._paramsOrig.textLines,
// position: getPos(),
// rotationQuaternion: getRot(),
// orientation: this._paramsOrig.orientation,
// width: this._paramsOrig.width,
// height: this._paramsOrig.height
// }
// }
// }
TextBoard.prototype.calculateFontSizeAndLineHeight = function (canvasScale, textScale) {
return {
fontSize: textScale * (canvasScale/5),
lineHeight: textScale * (canvasScale/4.8)
}
}
TextBoard.prototype.clear = function (color, suppressUpdate) {
color = color || this.backgroundColor;
// this.ctx.fillStyle = 'black';
// this.ctx.fillRect(0, 0, this.canvas.width, this.canvas.height);
this.ctx.fillStyle = color;
this.ctx.fillRect(0, 0, this.canvas.width, this.canvas.height);
if (!suppressUpdate) this.updateTexture();
}
TextBoard.prototype.reset = function () {
/* Just clearing the canvas doesn't work properly when we're using transparent backgrounds */
var newCanvas = document.createElement('canvas');
newCanvas.width = this.canvas.width;
newCanvas.height = this.canvas.height;
this.canvas = newCanvas;
this.ctx = this.canvas.getContext('2d');
this.cursor = null;
this.clear();
// this.textLines = [];
}
TextBoard.prototype.getTexture = function () {
var gl = CARNIVAL.engine.gl;
this.tex = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, this.tex);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
return this.tex;
}
TextBoard.prototype.updateTexture = function () {
var gl = CARNIVAL.engine.gl;
gl.bindTexture(gl.TEXTURE_2D, this.tex);
// gl.pixelStorei(gl.UNPACK_FLIP_Y_WEBGL, true);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, this.canvas);
}
TextBoard.prototype.renderTextLines = function (textLines) {
var board = this;
var rstate = board.boardRenderState;
var text = null;
if (!board.cursor) board.cursor = {x:rstate.leftMargin, y:rstate.topMargin};
for (var i = 0; i < textLines.length; i++) | board.cursor.y += rstate.lineHeight;
}
}
board.updateTexture();
}
TextBoard.prototype.addTextLines = function (lines) {
for (var i = 0; i < lines.length; i++) {
this.currentTextLines.push(lines[i]);
}
// this.currentTextLines.push(line);
this.reset();
this.renderTextLines(this.currentTextLines);
}
TextBoard.prototype.setText = function (lines) {
this.currentTextLines = lines;
this.reset();
this.renderTextLines(this.currentTextLines);
}
TextBoard.prototype.prepare = function () {
var board = this;
board.drawable.texture = board.getTexture();
board.clear(board.backgroundColor, true);
if (board.currentTextLines.length) {
board.renderTextLines(board.currentTextLines);
}
else if (board.currentText) {
board.addText | {
var line = textLines[i];
rstate.font = line.font || rstate.font;
rstate.fontSize = line.fontSize || rstate.fontSize;
rstate.textColor = line.textColor || rstate.textColor;
rstate.backgroundColor = line.backgroundColor || rstate.backgroundColor;
rstate.lineHeight = line.lineHeight || rstate.lineHeight;
rstate.leftMargin = line.leftMargin || rstate.leftMargin;
rstate.topMargin = line.topMargin || rstate.topMargin;
rstate.style = line.style || rstate.style;
text = line.text || null;
if (text) {
// var ctx = board.canvas.getContext('2d');
board.ctx.fillStyle = rstate.textColor;
board.ctx.font = "@ST@ @SZ@px @F@".replace('@SZ@', rstate.fontSize).replace('@F@', rstate.font).replace('@ST@', rstate.style);
// console.log('drawing text', text, board.cursor.x, board.cursor.y+rstate.fontSize)
board.ctx.fillText(text, board.cursor.x, board.cursor.y+rstate.fontSize); | conditional_block |
section_0771_to_0788.rs | |fin_col|, |fin_row|, |fin_align|.
//!
//! @ When \.{\\halign} or \.{\\valign} has been scanned in an appropriate
//! mode, \TeX\ calls |init_align|, whose task is to get everything off to a
//! good start. This mostly involves scanning the preamble and putting its
//! information into the preamble list.
//! @^preamble@>
//! | //! procedure@?align_peek; forward;@t\2@>@/
//! procedure@?normal_paragraph; forward;@t\2@>@/
//! procedure init_align;
//! label done, done1, done2, continue;
//! var save_cs_ptr:pointer; {|warning_index| value for error messages}
//! @!p:pointer; {for short-term temporary use}
//! begin save_cs_ptr:=cur_cs; {\.{\\halign} or \.{\\valign}, usually}
//! push_alignment; align_state:=-1000000; {enter a new alignment level}
//! @<Check for improper alignment in displayed math@>;
//! push_nest; {enter a new semantic level}
//! @<Change current mode to |-vmode| for \.{\\halign}, |-hmode| for \.{\\valign}@>;
//! scan_spec(align_group,false);@/
//! @<Scan the preamble and record it in the |preamble| list@>;
//! new_save_level(align_group);
//! if every_cr<>null then begin_token_list(every_cr,every_cr_text);
//! align_peek; {look for \.{\\noalign} or \.{\\omit}}
//! end;
//!
//! @ In vertical modes, |prev_depth| already has the correct value. But
//! if we are in |mmode| (displayed formula mode), we reach out to the
//! enclosing vertical mode for the |prev_depth| value that produces the
//! correct baseline calculations.
//!
//! @<Change current mode...@>=
//! if mode=mmode then
//! begin mode:=-vmode; prev_depth:=nest[nest_ptr-2].aux_field.sc;
//! end
//! else if mode>0 then negate(mode)
//!
//! @ When \.{\\halign} is used as a displayed formula, there should be
//! no other pieces of mlists present.
//!
//! @<Check for improper alignment in displayed math@>=
//! if (mode=mmode)and((tail<>head)or(incompleat_noad<>null)) then
//! begin print_err("Improper "); print_esc("halign"); print(" inside $$'s");
//! @.Improper \\halign...@>
//! help3("Displays can use special alignments (like \eqalignno)")@/
//! ("only if nothing but the alignment itself is between $$'s.")@/
//! ("So I've deleted the formulas that preceded this alignment.");
//! error; flush_math;
//! end
//!
//! @ @<Scan the preamble and record it in the |preamble| list@>=
//! preamble:=null; cur_align:=align_head; cur_loop:=null; scanner_status:=aligning;
//! warning_index:=save_cs_ptr; align_state:=-1000000;
//! {at this point, |cur_cmd=left_brace|}
//! loop@+ begin @<Append the current tabskip glue to the preamble list@>;
//! if cur_cmd=car_ret then goto done; {\.{\\cr} ends the preamble}
//! @<Scan preamble text until |cur_cmd| is |tab_mark| or |car_ret|,
//! looking for changes in the tabskip glue; append an
//! alignrecord to the preamble list@>;
//! end;
//! done: scanner_status:=normal
//!
//! @ @<Append the current tabskip glue to the preamble list@>=
//! link(cur_align):=new_param_glue(tab_skip_code);
//! cur_align:=link(cur_align)
//!
//! @ @<Scan preamble text until |cur_cmd| is |tab_mark| or |car_ret|...@>=
//! @<Scan the template \<u_j>, putting the resulting token list in |hold_head|@>;
//! link(cur_align):=new_null_box; cur_align:=link(cur_align); {a new alignrecord}
//! info(cur_align):=end_span; width(cur_align):=null_flag;
//! u_part(cur_align):=link(hold_head);
//! @<Scan the template \<v_j>, putting the resulting token list in |hold_head|@>;
//! v_part(cur_align):=link(hold_head)
//!
//! @ We enter `\.{\\span}' into |eqtb| with |tab_mark| as its command code,
//! and with |span_code| as the command modifier. This makes \TeX\ interpret it
//! essentially the same as an alignment delimiter like `\.\&', yet it is
//! recognizably different when we need to distinguish it from a normal delimiter.
//! It also turns out to be useful to give a special |cr_code| to `\.{\\cr}',
//! and an even larger |cr_cr_code| to `\.{\\crcr}'.
//!
//! The end of a template is represented by two ``frozen'' control sequences
//! called \.{\\endtemplate}. The first has the command code |end_template|, which
//! is |>outer_call|, so it will not easily disappear in the presence of errors.
//! The |get_x_token| routine converts the first into the second, which has |endv|
//! as its command code.
//!
//! @d span_code=256 {distinct from any character}
//! @d cr_code=257 {distinct from |span_code| and from any character}
//! @d cr_cr_code=cr_code+1 {this distinguishes \.{\\crcr} from \.{\\cr}}
//! @d end_template_token==cs_token_flag+frozen_end_template
//!
//! @<Put each of \TeX's primitives into the hash table@>=
//! primitive("span",tab_mark,span_code);@/
//! @!@:span_}{\.{\\span} primitive@>
//! primitive("cr",car_ret,cr_code);
//! @!@:cr_}{\.{\\cr} primitive@>
//! text(frozen_cr):="cr"; eqtb[frozen_cr]:=eqtb[cur_val];@/
//! primitive("crcr",car_ret,cr_cr_code);
//! @!@:cr_cr_}{\.{\\crcr} primitive@>
//! text(frozen_end_template):="endtemplate"; text(frozen_endv):="endtemplate";
//! eq_type(frozen_endv):=endv; equiv(frozen_endv):=null_list;
//! eq_level(frozen_endv):=level_one;@/
//! eqtb[frozen_end_template]:=eqtb[frozen_endv];
//! eq_type(frozen_end_template):=end_template;
//!
//! @ @<Cases of |print_cmd_chr|...@>=
//! tab_mark: if chr_code=span_code then print_esc("span")
//! else chr_cmd("alignment tab character ");
//! car_ret: if chr_code=cr_code then print_esc("cr")
//! else print_esc("crcr");
//!
//! @ The preamble is copied directly, except that \.{\\tabskip} causes a change
//! to the tabskip glue, thereby possibly expanding macros that immediately
//! follow it. An appearance of \.{\\span} also causes such an expansion.
//!
//! Note that if the preamble contains `\.{\\global\\tabskip}', the `\.{\\global}'
//! token survives in the preamble and the `\.{\\tabskip}' defines new
//! tabskip glue (locally).
//!
//! @<Declare the procedure called |get_preamble_token|@>=
//! procedure get_preamble_token;
//! label restart;
//! begin restart: get_token;
//! while (cur_chr=span_code)and(cur_cmd=tab_mark) do
//! begin get_token; {this token will be expanded once}
//! if cur_cmd>max_command then
//! begin expand; get_token;
//! end;
//! end;
//! if cur_cmd=endv then
//! fatal_error("(interwoven alignment preambles are not allowed)");
//! @.interwoven alignment preambles...@>
//! if (cur_cmd=assign_glue)and(cur_chr=glue_base+tab_skip_code) then
//! begin scan_optional_equals; scan_glue(glue_val);
//! if global_defs>0 then geq_define(glue_base+tab_skip_code,glue_ref,cur_val)
//! else eq_define(glue_base+tab_skip_code,glue_ref,cur_val);
//! goto restart;
//! end;
//! end;
//!
//! @ Spaces are eliminated from the beginning of a template.
//!
//! @<Scan the template \<u_j>...@>=
//! p:=hold_head; link(p):=null;
//! loop@+ begin get_preamble_token;
//! if cur_cmd=mac_param then goto done1;
//! if (cur_cmd<=car_ret)and(cur_cmd>=tab_mark)and(align_state=-1000000) then
//! if (p=hold_head)and(cur_loop=null)and(cur_cmd=tab_mark)
//! then cur_loop:=cur_align
//! else begin print_err("Missing # inserted in alignment preamble");
//! @.Missing \# inserted...@>
//! help3("There | //! @p @t\4@>@<Declare the procedure called |get_preamble_token|@>@t@>@/ | random_line_split |
http.rs | .send_request(request).await
},
}
}
/// Sends an HTTP request message and reads the response, returning its body.
async fn send_request(&mut self, request: &str) -> std::io::Result<Vec<u8>> {
self.write_request(request).await?;
self.read_response().await
}
/// Writes an HTTP request message.
async fn write_request(&mut self, request: &str) -> std::io::Result<()> {
#[cfg(feature = "tokio")]
{
self.stream.write_all(request.as_bytes()).await?;
self.stream.flush().await
}
#[cfg(not(feature = "tokio"))]
{
self.stream.write_all(request.as_bytes())?;
self.stream.flush()
}
}
/// Reads an HTTP response message.
async fn read_response(&mut self) -> std::io::Result<Vec<u8>> {
#[cfg(feature = "tokio")]
let stream = self.stream.split().0;
#[cfg(not(feature = "tokio"))]
let stream = std::io::Read::by_ref(&mut self.stream);
let limited_stream = stream.take(MAX_HTTP_MESSAGE_HEADER_SIZE as u64);
#[cfg(feature = "tokio")]
let mut reader = tokio::io::BufReader::new(limited_stream);
#[cfg(not(feature = "tokio"))]
let mut reader = std::io::BufReader::new(limited_stream);
macro_rules! read_line {
() => { read_line!(0) };
($retry_count: expr) => { {
let mut line = String::new();
let mut timeout_count: u64 = 0;
let bytes_read = loop {
#[cfg(feature = "tokio")]
let read_res = reader.read_line(&mut line).await;
#[cfg(not(feature = "tokio"))]
let read_res = reader.read_line(&mut line);
match read_res {
Ok(bytes_read) => break bytes_read,
Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
timeout_count += 1;
if timeout_count > $retry_count {
return Err(e);
} else {
continue;
}
}
Err(e) => return Err(e),
}
};
match bytes_read {
0 => None,
_ => {
// Remove trailing CRLF
if line.ends_with('\n') { line.pop(); if line.ends_with('\r') { line.pop(); } }
Some(line)
},
}
} }
}
// Read and parse status line
// Note that we allow retrying a few times to reach TCP_STREAM_RESPONSE_TIMEOUT.
let status_line = read_line!(TCP_STREAM_RESPONSE_TIMEOUT.as_secs() / TCP_STREAM_TIMEOUT.as_secs())
.ok_or(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "no status line"))?;
let status = HttpStatus::parse(&status_line)?;
// Read and parse relevant headers
let mut message_length = HttpMessageLength::Empty;
loop {
let line = read_line!()
.ok_or(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "no headers"))?;
if line.is_empty() { break; }
let header = HttpHeader::parse(&line)?;
if header.has_name("Content-Length") {
let length = header.value.parse()
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
if let HttpMessageLength::Empty = message_length {
message_length = HttpMessageLength::ContentLength(length);
}
continue;
}
if header.has_name("Transfer-Encoding") {
message_length = HttpMessageLength::TransferEncoding(header.value.into());
continue;
}
}
// Read message body
let read_limit = MAX_HTTP_MESSAGE_BODY_SIZE - reader.buffer().len();
reader.get_mut().set_limit(read_limit as u64);
let contents = match message_length {
HttpMessageLength::Empty => { Vec::new() },
HttpMessageLength::ContentLength(length) => {
if length == 0 || length > MAX_HTTP_MESSAGE_BODY_SIZE {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "out of range"))
} else {
let mut content = vec![0; length];
#[cfg(feature = "tokio")]
reader.read_exact(&mut content[..]).await?;
#[cfg(not(feature = "tokio"))]
reader.read_exact(&mut content[..])?;
content
}
},
HttpMessageLength::TransferEncoding(coding) => {
if !coding.eq_ignore_ascii_case("chunked") {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput, "unsupported transfer coding"))
} else {
let mut content = Vec::new();
#[cfg(feature = "tokio")]
{
// Since chunked_transfer doesn't have an async interface, only use it to
// determine the size of each chunk to read.
//
// TODO: Replace with an async interface when available.
// https://github.com/frewsxcv/rust-chunked-transfer/issues/7
loop {
// Read the chunk header which contains the chunk size.
let mut chunk_header = String::new();
reader.read_line(&mut chunk_header).await?;
if chunk_header == "0\r\n" |
// Decode the chunk header to obtain the chunk size.
let mut buffer = Vec::new();
let mut decoder = chunked_transfer::Decoder::new(chunk_header.as_bytes());
decoder.read_to_end(&mut buffer)?;
// Read the chunk body.
let chunk_size = match decoder.remaining_chunks_size() {
None => break,
Some(chunk_size) => chunk_size,
};
let chunk_offset = content.len();
content.resize(chunk_offset + chunk_size + "\r\n".len(), 0);
reader.read_exact(&mut content[chunk_offset..]).await?;
content.resize(chunk_offset + chunk_size, 0);
}
content
}
#[cfg(not(feature = "tokio"))]
{
let mut decoder = chunked_transfer::Decoder::new(reader);
decoder.read_to_end(&mut content)?;
content
}
}
},
};
if !status.is_ok() {
// TODO: Handle 3xx redirection responses.
let error = HttpError {
status_code: status.code.to_string(),
contents,
};
return Err(std::io::Error::new(std::io::ErrorKind::Other, error));
}
Ok(contents)
}
}
/// HTTP error consisting of a status code and body contents.
#[derive(Debug)]
pub(crate) struct HttpError {
pub(crate) status_code: String,
pub(crate) contents: Vec<u8>,
}
impl std::error::Error for HttpError {}
impl fmt::Display for HttpError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let contents = String::from_utf8_lossy(&self.contents);
write!(f, "status_code: {}, contents: {}", self.status_code, contents)
}
}
/// HTTP response status code as defined by [RFC 7231].
///
/// [RFC 7231]: https://tools.ietf.org/html/rfc7231#section-6
struct HttpStatus<'a> {
code: &'a str,
}
impl<'a> HttpStatus<'a> {
/// Parses an HTTP status line as defined by [RFC 7230].
///
/// [RFC 7230]: https://tools.ietf.org/html/rfc7230#section-3.1.2
fn parse(line: &'a String) -> std::io::Result<HttpStatus<'a>> {
let mut tokens = line.splitn(3, ' ');
let http_version = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no HTTP-Version"))?;
if !http_version.eq_ignore_ascii_case("HTTP/1.1") &&
!http_version.eq_ignore_ascii_case("HTTP/1.0") {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid HTTP-Version"));
}
let code = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no Status-Code"))?;
if code.len() != 3 || !code.chars().all(|c| c.is_ascii_digit()) {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid Status-Code"));
}
let _reason = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no Reason-Phrase"))?;
Ok(Self { code })
}
/// Returns whether | {
// Read the terminator chunk since the decoder consumes the CRLF
// immediately when this chunk is encountered.
reader.read_line(&mut chunk_header).await?;
} | conditional_block |
http.rs | .send_request(request).await
},
}
}
/// Sends an HTTP request message and reads the response, returning its body.
async fn send_request(&mut self, request: &str) -> std::io::Result<Vec<u8>> {
self.write_request(request).await?;
self.read_response().await
}
/// Writes an HTTP request message.
async fn write_request(&mut self, request: &str) -> std::io::Result<()> {
#[cfg(feature = "tokio")]
{
self.stream.write_all(request.as_bytes()).await?;
self.stream.flush().await
}
#[cfg(not(feature = "tokio"))]
{
self.stream.write_all(request.as_bytes())?;
self.stream.flush()
}
}
/// Reads an HTTP response message.
async fn read_response(&mut self) -> std::io::Result<Vec<u8>> {
#[cfg(feature = "tokio")]
let stream = self.stream.split().0;
#[cfg(not(feature = "tokio"))]
let stream = std::io::Read::by_ref(&mut self.stream);
let limited_stream = stream.take(MAX_HTTP_MESSAGE_HEADER_SIZE as u64);
#[cfg(feature = "tokio")]
let mut reader = tokio::io::BufReader::new(limited_stream);
#[cfg(not(feature = "tokio"))]
let mut reader = std::io::BufReader::new(limited_stream);
macro_rules! read_line {
() => { read_line!(0) };
($retry_count: expr) => { {
let mut line = String::new();
let mut timeout_count: u64 = 0;
let bytes_read = loop {
#[cfg(feature = "tokio")]
let read_res = reader.read_line(&mut line).await;
#[cfg(not(feature = "tokio"))]
let read_res = reader.read_line(&mut line);
match read_res {
Ok(bytes_read) => break bytes_read,
Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
timeout_count += 1;
if timeout_count > $retry_count {
return Err(e);
} else {
continue;
}
}
Err(e) => return Err(e),
}
};
match bytes_read {
0 => None,
_ => {
// Remove trailing CRLF
if line.ends_with('\n') { line.pop(); if line.ends_with('\r') { line.pop(); } }
Some(line)
},
}
} }
}
// Read and parse status line
// Note that we allow retrying a few times to reach TCP_STREAM_RESPONSE_TIMEOUT.
let status_line = read_line!(TCP_STREAM_RESPONSE_TIMEOUT.as_secs() / TCP_STREAM_TIMEOUT.as_secs())
.ok_or(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "no status line"))?;
let status = HttpStatus::parse(&status_line)?;
// Read and parse relevant headers
let mut message_length = HttpMessageLength::Empty;
loop {
let line = read_line!()
.ok_or(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "no headers"))?;
if line.is_empty() { break; }
let header = HttpHeader::parse(&line)?;
if header.has_name("Content-Length") {
let length = header.value.parse()
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
if let HttpMessageLength::Empty = message_length {
message_length = HttpMessageLength::ContentLength(length);
}
continue;
}
if header.has_name("Transfer-Encoding") {
message_length = HttpMessageLength::TransferEncoding(header.value.into());
continue;
}
}
// Read message body
let read_limit = MAX_HTTP_MESSAGE_BODY_SIZE - reader.buffer().len();
reader.get_mut().set_limit(read_limit as u64);
let contents = match message_length {
HttpMessageLength::Empty => { Vec::new() },
HttpMessageLength::ContentLength(length) => {
if length == 0 || length > MAX_HTTP_MESSAGE_BODY_SIZE {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "out of range"))
} else {
let mut content = vec![0; length];
#[cfg(feature = "tokio")]
reader.read_exact(&mut content[..]).await?;
#[cfg(not(feature = "tokio"))]
reader.read_exact(&mut content[..])?;
content
}
},
HttpMessageLength::TransferEncoding(coding) => {
if !coding.eq_ignore_ascii_case("chunked") {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput, "unsupported transfer coding"))
} else {
let mut content = Vec::new();
#[cfg(feature = "tokio")]
{
// Since chunked_transfer doesn't have an async interface, only use it to
// determine the size of each chunk to read.
//
// TODO: Replace with an async interface when available.
// https://github.com/frewsxcv/rust-chunked-transfer/issues/7
loop {
// Read the chunk header which contains the chunk size.
let mut chunk_header = String::new();
reader.read_line(&mut chunk_header).await?; | if chunk_header == "0\r\n" {
// Read the terminator chunk since the decoder consumes the CRLF
// immediately when this chunk is encountered.
reader.read_line(&mut chunk_header).await?;
}
// Decode the chunk header to obtain the chunk size.
let mut buffer = Vec::new();
let mut decoder = chunked_transfer::Decoder::new(chunk_header.as_bytes());
decoder.read_to_end(&mut buffer)?;
// Read the chunk body.
let chunk_size = match decoder.remaining_chunks_size() {
None => break,
Some(chunk_size) => chunk_size,
};
let chunk_offset = content.len();
content.resize(chunk_offset + chunk_size + "\r\n".len(), 0);
reader.read_exact(&mut content[chunk_offset..]).await?;
content.resize(chunk_offset + chunk_size, 0);
}
content
}
#[cfg(not(feature = "tokio"))]
{
let mut decoder = chunked_transfer::Decoder::new(reader);
decoder.read_to_end(&mut content)?;
content
}
}
},
};
if !status.is_ok() {
// TODO: Handle 3xx redirection responses.
let error = HttpError {
status_code: status.code.to_string(),
contents,
};
return Err(std::io::Error::new(std::io::ErrorKind::Other, error));
}
Ok(contents)
}
}
/// HTTP error consisting of a status code and body contents.
#[derive(Debug)]
pub(crate) struct HttpError {
pub(crate) status_code: String,
pub(crate) contents: Vec<u8>,
}
impl std::error::Error for HttpError {}
impl fmt::Display for HttpError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let contents = String::from_utf8_lossy(&self.contents);
write!(f, "status_code: {}, contents: {}", self.status_code, contents)
}
}
/// HTTP response status code as defined by [RFC 7231].
///
/// [RFC 7231]: https://tools.ietf.org/html/rfc7231#section-6
struct HttpStatus<'a> {
code: &'a str,
}
impl<'a> HttpStatus<'a> {
/// Parses an HTTP status line as defined by [RFC 7230].
///
/// [RFC 7230]: https://tools.ietf.org/html/rfc7230#section-3.1.2
fn parse(line: &'a String) -> std::io::Result<HttpStatus<'a>> {
let mut tokens = line.splitn(3, ' ');
let http_version = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no HTTP-Version"))?;
if !http_version.eq_ignore_ascii_case("HTTP/1.1") &&
!http_version.eq_ignore_ascii_case("HTTP/1.0") {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid HTTP-Version"));
}
let code = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no Status-Code"))?;
if code.len() != 3 || !code.chars().all(|c| c.is_ascii_digit()) {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid Status-Code"));
}
let _reason = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no Reason-Phrase"))?;
Ok(Self { code })
}
/// Returns whether the | random_line_split |
|
http.rs | <F>(&mut self, uri: &str, host: &str, auth: &str, content: serde_json::Value) -> std::io::Result<F>
where F: TryFrom<Vec<u8>, Error = std::io::Error> {
let content = content.to_string();
let request = format!(
"POST {} HTTP/1.1\r\n\
Host: {}\r\n\
Authorization: {}\r\n\
Connection: keep-alive\r\n\
Content-Type: application/json\r\n\
Content-Length: {}\r\n\
\r\n\
{}", uri, host, auth, content.len(), content);
let response_body = self.send_request_with_retry(&request).await?;
F::try_from(response_body)
}
/// Sends an HTTP request message and reads the response, returning its body. Attempts to
/// reconnect and retry if the connection has been closed.
async fn send_request_with_retry(&mut self, request: &str) -> std::io::Result<Vec<u8>> {
match self.send_request(request).await {
Ok(bytes) => Ok(bytes),
Err(_) => {
// Reconnect and retry on fail. This can happen if the connection was closed after
// the keep-alive limits are reached, or generally if the request timed out due to
// Bitcoin Core being stuck on a long-running operation or its RPC queue being
// full.
// Block 100ms before retrying the request as in many cases the source of the error
// may be persistent for some time.
#[cfg(feature = "tokio")]
tokio::time::sleep(Duration::from_millis(100)).await;
#[cfg(not(feature = "tokio"))]
std::thread::sleep(Duration::from_millis(100));
*self = Self::connect(self.address)?;
self.send_request(request).await
},
}
}
/// Sends an HTTP request message and reads the response, returning its body.
async fn send_request(&mut self, request: &str) -> std::io::Result<Vec<u8>> {
self.write_request(request).await?;
self.read_response().await
}
/// Writes an HTTP request message.
async fn write_request(&mut self, request: &str) -> std::io::Result<()> {
#[cfg(feature = "tokio")]
{
self.stream.write_all(request.as_bytes()).await?;
self.stream.flush().await
}
#[cfg(not(feature = "tokio"))]
{
self.stream.write_all(request.as_bytes())?;
self.stream.flush()
}
}
/// Reads an HTTP response message.
async fn read_response(&mut self) -> std::io::Result<Vec<u8>> {
#[cfg(feature = "tokio")]
let stream = self.stream.split().0;
#[cfg(not(feature = "tokio"))]
let stream = std::io::Read::by_ref(&mut self.stream);
let limited_stream = stream.take(MAX_HTTP_MESSAGE_HEADER_SIZE as u64);
#[cfg(feature = "tokio")]
let mut reader = tokio::io::BufReader::new(limited_stream);
#[cfg(not(feature = "tokio"))]
let mut reader = std::io::BufReader::new(limited_stream);
macro_rules! read_line {
() => { read_line!(0) };
($retry_count: expr) => { {
let mut line = String::new();
let mut timeout_count: u64 = 0;
let bytes_read = loop {
#[cfg(feature = "tokio")]
let read_res = reader.read_line(&mut line).await;
#[cfg(not(feature = "tokio"))]
let read_res = reader.read_line(&mut line);
match read_res {
Ok(bytes_read) => break bytes_read,
Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
timeout_count += 1;
if timeout_count > $retry_count {
return Err(e);
} else {
continue;
}
}
Err(e) => return Err(e),
}
};
match bytes_read {
0 => None,
_ => {
// Remove trailing CRLF
if line.ends_with('\n') { line.pop(); if line.ends_with('\r') { line.pop(); } }
Some(line)
},
}
} }
}
// Read and parse status line
// Note that we allow retrying a few times to reach TCP_STREAM_RESPONSE_TIMEOUT.
let status_line = read_line!(TCP_STREAM_RESPONSE_TIMEOUT.as_secs() / TCP_STREAM_TIMEOUT.as_secs())
.ok_or(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "no status line"))?;
let status = HttpStatus::parse(&status_line)?;
// Read and parse relevant headers
let mut message_length = HttpMessageLength::Empty;
loop {
let line = read_line!()
.ok_or(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "no headers"))?;
if line.is_empty() { break; }
let header = HttpHeader::parse(&line)?;
if header.has_name("Content-Length") {
let length = header.value.parse()
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
if let HttpMessageLength::Empty = message_length {
message_length = HttpMessageLength::ContentLength(length);
}
continue;
}
if header.has_name("Transfer-Encoding") {
message_length = HttpMessageLength::TransferEncoding(header.value.into());
continue;
}
}
// Read message body
let read_limit = MAX_HTTP_MESSAGE_BODY_SIZE - reader.buffer().len();
reader.get_mut().set_limit(read_limit as u64);
let contents = match message_length {
HttpMessageLength::Empty => { Vec::new() },
HttpMessageLength::ContentLength(length) => {
if length == 0 || length > MAX_HTTP_MESSAGE_BODY_SIZE {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "out of range"))
} else {
let mut content = vec![0; length];
#[cfg(feature = "tokio")]
reader.read_exact(&mut content[..]).await?;
#[cfg(not(feature = "tokio"))]
reader.read_exact(&mut content[..])?;
content
}
},
HttpMessageLength::TransferEncoding(coding) => {
if !coding.eq_ignore_ascii_case("chunked") {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput, "unsupported transfer coding"))
} else {
let mut content = Vec::new();
#[cfg(feature = "tokio")]
{
// Since chunked_transfer doesn't have an async interface, only use it to
// determine the size of each chunk to read.
//
// TODO: Replace with an async interface when available.
// https://github.com/frewsxcv/rust-chunked-transfer/issues/7
loop {
// Read the chunk header which contains the chunk size.
let mut chunk_header = String::new();
reader.read_line(&mut chunk_header).await?;
if chunk_header == "0\r\n" {
// Read the terminator chunk since the decoder consumes the CRLF
// immediately when this chunk is encountered.
reader.read_line(&mut chunk_header).await?;
}
// Decode the chunk header to obtain the chunk size.
let mut buffer = Vec::new();
let mut decoder = chunked_transfer::Decoder::new(chunk_header.as_bytes());
decoder.read_to_end(&mut buffer)?;
// Read the chunk body.
let chunk_size = match decoder.remaining_chunks_size() {
None => break,
Some(chunk_size) => chunk_size,
};
let chunk_offset = content.len();
content.resize(chunk_offset + chunk_size + "\r\n".len(), 0);
reader.read_exact(&mut content[chunk_offset..]).await?;
content.resize(chunk_offset + chunk_size, 0);
}
content
}
#[cfg(not(feature = "tokio"))]
{
let mut decoder = chunked_transfer::Decoder::new(reader);
decoder.read_to_end(&mut content)?;
content
}
}
},
};
if !status.is_ok() {
// TODO: Handle 3xx redirection responses.
let error = HttpError {
status_code: status.code.to_string(),
contents,
};
return Err(std::io::Error::new(std::io::ErrorKind::Other, error));
}
Ok(contents)
}
}
/// HTTP error consisting of a status code and body contents.
#[derive(Debug)]
pub(crate) struct HttpError {
pub(crate) status_code: String,
pub(crate) contents: Vec<u8>,
}
impl std::error::Error for HttpError {}
impl fmt::Display for HttpError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let contents = String::from_utf | post | identifier_name |
|
coref.py | soup = BeautifulSoup(f.read(),'lxml')
f.close()
soup_all = []
stack = []
coreList = []
core_all = []
coref = []
wrapped = False
for siru in soup.findAll('coref'):
soupList = []
soupList.append(siru.attrs.get('id'))
soupList.append(siru.attrs.get('type'))
soupList.append(''.join(list(siru.strings)))
soup_all.append(soupList)
for line in open(conllFile):
param = line.split()
if len(param) < 6:
sentno = sentno + 1
continue
info_coref = param[-1].split('|')
for co in info_coref:
if co.startswith('('):
stack.append([co,corefno])
core_all.append([index,0,0,sentno])
if wrapped:
core_all[corefno][2]
corefno = corefno + 1
if len(stack) == 0:
wrapped = False
elif len(stack) == 1:
wrapped = True
elif len(stack) > 1:
wrapped = True
for j in range(len(stack) - 1):
core_all[stack[j][1]][1] = 1
if co.endswith(')'):
stack.pop()
index = index + 1
for (cor,miso) in zip(core_all,soup_all):
coref.append(cor + miso)
return coref
def file_to_List(data):
return file_to_coref(data[0],data[1]),file_to_word(data[1])
def getGenreFeature(genre):
if genre == 'bc':
param = 0
elif genre == 'bn':
param = 1
elif genre == 'mz':
param = 2
elif genre == 'nw':
param = 3
elif genre == 'pt':
param = 4
elif genre == 'tc':
param = 5
elif genre == 'wb':
param = 6
return [param]
def getWordEmbedding(info_words):
dic = shelve.open('wordembedding.db')
embeddingList = []
for words in info_words:
if words[0] in dic:
embeddingList.append(np.array(dic[words[0].lower()], dtype = np.float32))
else:
embeddingList.append(np.random.randn(50))
ave_all = sum(embeddingList)/len(embeddingList)
dic.close()
return (embeddingList,ave_all)
def getEmbeddingFeature(mention, mpos, word, Wembed,ave_all_d):
smention = mention.split()
ret = []
len_m = len(smention)
len_w = len(Wembed)
vacant = ave_pre_5 = ave_fol_5 = ave_all_m = ave_all_s = np.zeros(50)
#mentionの最初の単語
ret.append(Wembed[mpos])
#mentionの最後の単語
ret.append(Wembed[mpos + len_m - 1])
#mentionの2つ前の単語
if mpos / 2 > 0:
ret.append(Wembed[mpos - 2])
else:
ret.append(vacant)
#mentionの1つ前の単語
if mpos != 0:
ret.append(Wembed[mpos - 1])
else:
ret.append(vacant)
#mentionの1つ後の単語
pos_f = len_w - (mpos + len_m - 1)
if pos_f > 1:
ret.append(Wembed[mpos + len_m])
else:
ret.append(vacant)
#mentionの2つ後の単語
if pos_f > 2:
ret.append(Wembed[mpos + len_m + 1])
else:
ret.append(vacant)
#前5つの単語の平均
if mpos / 5 > 0:
for i in range(5):
ave_pre_5 += Wembed[mpos - i - 1]
else:
for i in range(mpos):
ave_pre_5 += Wembed[mpos - i - 1]
ret.append(ave_pre_5/5)
#後5つの単語の平均
pos_f5 = len_w - (mpos + len_m - 1)
if pos_f5 > 5:
for j in range(5):
ave_fol_5 += Wembed[mpos + len_m + j - 1]
else:
for j in range(pos_f5):
ave_fol_5 += Wembed[mpos + len_m + j - 1]
ret.append(ave_fol_5/5)
#mentionの単語の平均
for k in range(len_m):
ave_all_m += Wembed[mpos + k]
ret.append(ave_all_m/len_m)
#文書の全単語の平均
ret.append(ave_all_d)
ret = [flatten for inner in ret for flatten in inner]
return ret
def getDistance(aPos, mPos):
dis = mPos - aPos
if dis == 0:
ret = [1,0,0,0,0,0,0,0,0,0]
elif dis == 1:
ret = [0,1,0,0,0,0,0,0,0,0]
elif dis == 2:
ret = [0,0,1,0,0,0,0,0,0,0]
elif dis == 3:
ret = [0,0,0,1,0,0,0,0,0,0]
elif dis == 4:
ret = [0,0,0,0,1,0,0,0,0,0]
elif dis > 4 and dis < 8:
ret = [0,0,0,0,0,1,0,0,0,0]
elif dis > 7 and dis < 16:
ret = [0,0,0,0,0,0,1,0,0,0]
elif dis > 15 and dis < 32:
ret = [0,0,0,0,0,0,0,1,0,0]
elif dis > 31 and dis < 64:
ret = [0,0,0,0,0,0,0,0,1,0]
elif dis > 63:
ret = [0,0,0,0,0,0,0,0,0,1]
return ret
def getSpeaker(aSpeaker, mSpeaker):
if (aSpeaker == mSpeaker):
return [1]
else:
return [0]
def stringMatch(a, m):
if (a == m):
return [1]
else:
return [0]
def getLength(mention):
length = len(mention.split())
if length == 0:
ret = [1,0,0,0,0,0,0,0,0,0]
elif length == 1:
ret = [0,1,0,0,0,0,0,0,0,0]
elif length == 2:
ret = [0,0,1,0,0,0,0,0,0,0]
elif length == 3:
ret = [0,0,0,1,0,0,0,0,0,0]
elif length == 4:
ret = [0,0,0,0,1,0,0,0,0,0]
elif length > 4 and length < 8:
ret = [0,0,0,0,0,1,0,0,0,0]
elif length > 7 and length < 16:
ret = [0,0,0,0,0,0,1,0,0,0]
elif length > 15 and length < 32:
ret = [0,0,0,0,0,0,0,1,0,0]
elif length > 31 and length < 64:
ret = [0,0,0,0,0,0,0,0,1,0]
elif length > 63:
ret = [0,0,0,0,0,0,0,0,0,1]
return ret
def getPosition(mpos,total):
return [float(mpos)/float(total)]
def particalMatch(a, m):
awords = a.split()
mwords = m.split()
pMatch = 0
for a in awords:
for m in mwords:
if (a == m):
pMatch = 1
break
return [pMatch]
def getInclude(include):
if include:
return [1]
else:
return [0]
def getVectors(mentions,words,Wembedave,genre):
print('begin')
vector = []
vectors = []
labels | random_line_split |
||
coref.py | (Dir):
r = Dir + '/'
cors = []
cons = []
data = []
genre = []
dirs = [x for x in glob.glob(r + '*')]
for d in dirs:
for _r, _d, files in os.walk(d):
for f in files:
if f.endswith('_cors'):
cors.append([os.path.join(_r, f),d])
elif f.endswith('_cons'):
cons.append(os.path.join(_r,f))
cors.sort()
cons.sort()i
count = 0
for r,n in zip(cors,cons):
data.append([r[0],n])
genre.append(r[1].split('/')[-1])
if count == 50:
break
count = count + 1
return data,genre
def file_to_word(conllfile):
wordList = []
for line in open(conllfile).readlines():
sp = line.split()
if len(sp) > 6:
wordList.append([sp[3],sp[4],sp[9]])
return wordList
def file_to_coref(corefFile,conllFile):
index = 0
corefno = 0
sentno = 0
f = open(corefFile)
soup = BeautifulSoup(f.read(),'lxml')
f.close()
soup_all = []
stack = []
coreList = []
core_all = []
coref = []
wrapped = False
for siru in soup.findAll('coref'):
soupList = []
soupList.append(siru.attrs.get('id'))
soupList.append(siru.attrs.get('type'))
soupList.append(''.join(list(siru.strings)))
soup_all.append(soupList)
for line in open(conllFile):
param = line.split()
if len(param) < 6:
sentno = sentno + 1
continue
info_coref = param[-1].split('|')
for co in info_coref:
if co.startswith('('):
stack.append([co,corefno])
core_all.append([index,0,0,sentno])
if wrapped:
core_all[corefno][2]
corefno = corefno + 1
if len(stack) == 0:
wrapped = False
elif len(stack) == 1:
wrapped = True
elif len(stack) > 1:
wrapped = True
for j in range(len(stack) - 1):
core_all[stack[j][1]][1] = 1
if co.endswith(')'):
stack.pop()
index = index + 1
for (cor,miso) in zip(core_all,soup_all):
coref.append(cor + miso)
return coref
def file_to_List(data):
return file_to_coref(data[0],data[1]),file_to_word(data[1])
def getGenreFeature(genre):
if genre == 'bc':
param = 0
elif genre == 'bn':
param = 1
elif genre == 'mz':
param = 2
elif genre == 'nw':
param = 3
elif genre == 'pt':
param = 4
elif genre == 'tc':
param = 5
elif genre == 'wb':
param = 6
return [param]
def getWordEmbedding(info_words):
dic = shelve.open('wordembedding.db')
embeddingList = []
for words in info_words:
if words[0] in dic:
embeddingList.append(np.array(dic[words[0].lower()], dtype = np.float32))
else:
embeddingList.append(np.random.randn(50))
ave_all = sum(embeddingList)/len(embeddingList)
dic.close()
return (embeddingList,ave_all)
def getEmbeddingFeature(mention, mpos, word, Wembed,ave_all_d):
smention = mention.split()
ret = []
len_m = len(smention)
len_w = len(Wembed)
vacant = ave_pre_5 = ave_fol_5 = ave_all_m = ave_all_s = np.zeros(50)
#mentionの最初の単語
ret.append(Wembed[mpos])
#mentionの最後の単語
ret.append(Wembed[mpos + len_m - 1])
#mentionの2つ前の単語
if mpos / 2 > 0:
ret.append(Wembed[mpos - 2])
else:
ret.append(vacant)
#mentionの1つ前の単語
if mpos != 0:
ret.append(Wembed[mpos - 1])
else:
ret.append(vacant)
#mentionの1つ後の単語
pos_f = len_w - (mpos + len_m - 1)
if pos_f > 1:
ret.append(Wembed[mpos + len_m])
else:
ret.append(vacant)
#mentionの2つ後の単語
if pos_f > 2:
ret.append(Wembed[mpos + len_m + 1])
else:
ret.append(vacant)
#前5つの単語の平均
if mpos / 5 > 0:
for i in range(5):
ave_pre_5 += Wembed[mpos - i - 1]
else:
for i in range(mpos):
ave_pre_5 += Wembed[mpos - i - 1]
ret.append(ave_pre_5/5)
#後5つの単語の平均
pos_f5 = len_w - (mpos + len_m - 1)
if pos_f5 > 5:
for j in range(5):
ave_fol_5 += Wembed[mpos + len_m + j - 1]
else:
for j in range(pos_f5):
ave_fol_5 += Wembed[mpos + len_m + j - 1]
ret.append(ave_fol_5/5)
#mentionの単語の平均
for k in range(len_m):
ave_all_m += Wembed[mpos + k]
ret.append(ave_all_m/len_m)
#文書の全単語の平均
ret.append(ave_all_d)
ret = [flatten for inner in ret for flatten in inner]
return ret
def getDistance(aPos, mPos):
dis = mPos - aPos
if dis == 0:
ret = [1,0,0,0,0,0,0,0,0,0]
elif dis == 1:
ret = [0,1,0,0,0,0,0,0,0,0]
elif dis == 2:
ret = [0,0,1,0,0,0,0,0,0,0]
elif dis == 3:
ret = [0,0,0,1,0,0,0,0,0,0]
elif dis == 4:
ret = [0,0,0,0,1,0,0,0,0,0]
elif dis > 4 and dis < 8:
ret = [0,0,0,0,0,1,0,0,0,0]
elif dis > 7 and dis < 16:
ret = [0,0,0,0,0,0,1,0,0,0]
elif dis > 15 and dis < 32:
ret = [0,0,0,0,0,0,0,1,0,0]
elif dis > 31 and dis < 64:
ret = [0,0,0,0,0,0,0,0,1,0]
elif dis > 63:
ret = [0,0,0,0,0,0,0,0,0,1]
return ret
def getSpeaker(aSpeaker, mSpeaker):
if (aSpeaker == mSpeaker):
return [1]
else:
return [0]
def stringMatch(a, m):
if (a == m):
return [1]
else:
return [0]
def getLength(mention):
length = len(mention.split())
if length == 0:
ret = [1,0,0,0,0,0,0,0,0,0]
elif length == 1:
ret = [0,1,0,0,0,0,0,0,0,0]
elif length == 2:
ret = [0,0,1,0,0,0,0,0,0,0]
elif length == 3:
ret = [0,0,0,1,0,0,0,0,0,0]
elif length == 4:
ret = [0,0,0,0,1,0,0,0,0,0]
elif length > 4 and length < 8:
ret = [0,0,0,0,0,1,0,0,0, | load_dir | identifier_name |
|
coref.py | stack.append([co,corefno])
core_all.append([index,0,0,sentno])
if wrapped:
core_all[corefno][2]
corefno = corefno + 1
if len(stack) == 0:
wrapped = False
elif len(stack) == 1:
wrapped = True
elif len(stack) > 1:
wrapped = True
for j in range(len(stack) - 1):
core_all[stack[j][1]][1] = 1
if co.endswith(')'):
stack.pop()
index = index + 1
for (cor,miso) in zip(core_all,soup_all):
coref.append(cor + miso)
return coref
def file_to_List(data):
return file_to_coref(data[0],data[1]),file_to_word(data[1])
def getGenreFeature(genre):
if genre == 'bc':
|
elif genre == 'bn':
param = 1
elif genre == 'mz':
param = 2
elif genre == 'nw':
param = 3
elif genre == 'pt':
param = 4
elif genre == 'tc':
param = 5
elif genre == 'wb':
param = 6
return [param]
def getWordEmbedding(info_words):
dic = shelve.open('wordembedding.db')
embeddingList = []
for words in info_words:
if words[0] in dic:
embeddingList.append(np.array(dic[words[0].lower()], dtype = np.float32))
else:
embeddingList.append(np.random.randn(50))
ave_all = sum(embeddingList)/len(embeddingList)
dic.close()
return (embeddingList,ave_all)
def getEmbeddingFeature(mention, mpos, word, Wembed,ave_all_d):
smention = mention.split()
ret = []
len_m = len(smention)
len_w = len(Wembed)
vacant = ave_pre_5 = ave_fol_5 = ave_all_m = ave_all_s = np.zeros(50)
#mentionの最初の単語
ret.append(Wembed[mpos])
#mentionの最後の単語
ret.append(Wembed[mpos + len_m - 1])
#mentionの2つ前の単語
if mpos / 2 > 0:
ret.append(Wembed[mpos - 2])
else:
ret.append(vacant)
#mentionの1つ前の単語
if mpos != 0:
ret.append(Wembed[mpos - 1])
else:
ret.append(vacant)
#mentionの1つ後の単語
pos_f = len_w - (mpos + len_m - 1)
if pos_f > 1:
ret.append(Wembed[mpos + len_m])
else:
ret.append(vacant)
#mentionの2つ後の単語
if pos_f > 2:
ret.append(Wembed[mpos + len_m + 1])
else:
ret.append(vacant)
#前5つの単語の平均
if mpos / 5 > 0:
for i in range(5):
ave_pre_5 += Wembed[mpos - i - 1]
else:
for i in range(mpos):
ave_pre_5 += Wembed[mpos - i - 1]
ret.append(ave_pre_5/5)
#後5つの単語の平均
pos_f5 = len_w - (mpos + len_m - 1)
if pos_f5 > 5:
for j in range(5):
ave_fol_5 += Wembed[mpos + len_m + j - 1]
else:
for j in range(pos_f5):
ave_fol_5 += Wembed[mpos + len_m + j - 1]
ret.append(ave_fol_5/5)
#mentionの単語の平均
for k in range(len_m):
ave_all_m += Wembed[mpos + k]
ret.append(ave_all_m/len_m)
#文書の全単語の平均
ret.append(ave_all_d)
ret = [flatten for inner in ret for flatten in inner]
return ret
def getDistance(aPos, mPos):
dis = mPos - aPos
if dis == 0:
ret = [1,0,0,0,0,0,0,0,0,0]
elif dis == 1:
ret = [0,1,0,0,0,0,0,0,0,0]
elif dis == 2:
ret = [0,0,1,0,0,0,0,0,0,0]
elif dis == 3:
ret = [0,0,0,1,0,0,0,0,0,0]
elif dis == 4:
ret = [0,0,0,0,1,0,0,0,0,0]
elif dis > 4 and dis < 8:
ret = [0,0,0,0,0,1,0,0,0,0]
elif dis > 7 and dis < 16:
ret = [0,0,0,0,0,0,1,0,0,0]
elif dis > 15 and dis < 32:
ret = [0,0,0,0,0,0,0,1,0,0]
elif dis > 31 and dis < 64:
ret = [0,0,0,0,0,0,0,0,1,0]
elif dis > 63:
ret = [0,0,0,0,0,0,0,0,0,1]
return ret
def getSpeaker(aSpeaker, mSpeaker):
if (aSpeaker == mSpeaker):
return [1]
else:
return [0]
def stringMatch(a, m):
if (a == m):
return [1]
else:
return [0]
def getLength(mention):
length = len(mention.split())
if length == 0:
ret = [1,0,0,0,0,0,0,0,0,0]
elif length == 1:
ret = [0,1,0,0,0,0,0,0,0,0]
elif length == 2:
ret = [0,0,1,0,0,0,0,0,0,0]
elif length == 3:
ret = [0,0,0,1,0,0,0,0,0,0]
elif length == 4:
ret = [0,0,0,0,1,0,0,0,0,0]
elif length > 4 and length < 8:
ret = [0,0,0,0,0,1,0,0,0,0]
elif length > 7 and length < 16:
ret = [0,0,0,0,0,0,1,0,0,0]
elif length > 15 and length < 32:
ret = [0,0,0,0,0,0,0,1,0,0]
elif length > 31 and length < 64:
ret = [0,0,0,0,0,0,0,0,1,0]
elif length > 63:
ret = [0,0,0,0,0,0,0,0,0,1]
return ret
def getPosition(mpos,total):
return [float(mpos)/float(total)]
def particalMatch(a, m):
awords = a.split()
mwords = m.split()
pMatch = 0
for a in awords:
for m in mwords:
if (a == m):
pMatch = 1
break
return [pMatch]
def getInclude(include):
if include:
return [1]
else:
return [0]
def getVectors(mentions,words,Wembedave,genre):
print('begin')
vector = []
vectors = []
labels = []
costs = []
antecedents = ['NA']
total = len(mentions)
print(total)
Wembed = Wembedave[0]
ave_all = Wembedave[1]
for m in mentions:
for a in antecedents:
if a == 'NA':
tmp = [0 for i in range(512)]
tmp.extend(getEmbeddingFeature(m[6],m[0],words,Wembed,ave_all))
for i in range(36):
tmp.append(0)
vectors.append(tmp)
labels.append([0.])
continue
elif(m[4] == a[4]):
labels.append([1.])
else:
labels.append([0.])
vector.extend(getEmbedding | param = 0 | conditional_block |
coref.py | stack.append([co,corefno])
core_all.append([index,0,0,sentno])
if wrapped:
core_all[corefno][2]
corefno = corefno + 1
if len(stack) == 0:
wrapped = False
elif len(stack) == 1:
wrapped = True
elif len(stack) > 1:
wrapped = True
for j in range(len(stack) - 1):
core_all[stack[j][1]][1] = 1
if co.endswith(')'):
stack.pop()
index = index + 1
for (cor,miso) in zip(core_all,soup_all):
coref.append(cor + miso)
return coref
def file_to_List(data):
return file_to_coref(data[0],data[1]),file_to_word(data[1])
def getGenreFeature(genre):
if genre == 'bc':
param = 0
elif genre == 'bn':
param = 1
elif genre == 'mz':
param = 2
elif genre == 'nw':
param = 3
elif genre == 'pt':
param = 4
elif genre == 'tc':
param = 5
elif genre == 'wb':
param = 6
return [param]
def getWordEmbedding(info_words):
dic = shelve.open('wordembedding.db')
embeddingList = []
for words in info_words:
if words[0] in dic:
embeddingList.append(np.array(dic[words[0].lower()], dtype = np.float32))
else:
embeddingList.append(np.random.randn(50))
ave_all = sum(embeddingList)/len(embeddingList)
dic.close()
return (embeddingList,ave_all)
def getEmbeddingFeature(mention, mpos, word, Wembed,ave_all_d):
smention = mention.split()
ret = []
len_m = len(smention)
len_w = len(Wembed)
vacant = ave_pre_5 = ave_fol_5 = ave_all_m = ave_all_s = np.zeros(50)
#mentionの最初の単語
ret.append(Wembed[mpos])
#mentionの最後の単語
ret.append(Wembed[mpos + len_m - 1])
#mentionの2つ前の単語
if mpos / 2 > 0:
ret.append(Wembed[mpos - 2])
else:
ret.append(vacant)
#mentionの1つ前の単語
if mpos != 0:
ret.append(Wembed[mpos - 1])
else:
ret.append(vacant)
#mentionの1つ後の単語
pos_f = len_w - (mpos + len_m - 1)
if pos_f > 1:
ret.append(Wembed[mpos + len_m])
else:
ret.append(vacant)
#mentionの2つ後の単語
if pos_f > 2:
ret.append(Wembed[mpos + len_m + 1])
else:
ret.append(vacant)
#前5つの単語の平均
if mpos / 5 > 0:
for i in range(5):
ave_pre_5 += Wembed[mpos - i - 1]
else:
for i in range(mpos):
ave_pre_5 += Wembed[mpos - i - 1]
ret.append(ave_pre_5/5)
#後5つの単語の平均
pos_f5 = len_w - (mpos + len_m - 1)
if pos_f5 > 5:
for j in range(5):
ave_fol_5 += Wembed[mpos + len_m + j - 1]
else:
for j in range(pos_f5):
ave_fol_5 += Wembed[mpos + len_m + j - 1]
ret.append(ave_fol_5/5)
#mentionの単語の平均
for k in range(len_m):
ave_all_m += Wembed[mpos + k]
ret.append(ave_all_m/len_m)
#文書の全単語の平均
ret.append(ave_all_d)
ret = [flatten for inner in ret for flatten in inner]
return ret
def getDistance(aPos, mPos):
dis = mPos - aPos
if dis == 0:
ret = [1,0,0,0,0,0,0,0,0,0]
elif dis == 1:
ret = [0,1,0,0,0,0,0,0,0,0]
elif dis == 2:
ret = [0,0,1,0,0,0,0,0,0,0]
elif dis == 3:
ret = [0,0,0,1,0,0,0,0,0,0]
elif dis == 4:
ret = [0,0,0,0,1,0,0,0,0,0]
elif dis > 4 and dis < 8:
ret = [0,0,0,0,0,1,0,0,0,0]
elif dis > 7 and dis < 16:
ret = [0,0,0,0,0,0,1,0,0,0]
elif dis > 15 and dis < 32:
ret = [0,0,0,0,0,0,0,1,0,0]
elif dis > 31 and dis < 64:
ret = [0,0,0,0,0,0,0,0,1,0]
elif dis > 63:
ret = [0,0,0,0,0,0,0,0,0,1]
return ret
def getSpeaker(aSpeaker, mSpeaker):
if (aSpeaker == mSpeaker):
return [1]
else:
return [0]
def stringMatch(a, m):
if (a == m):
return [1]
else:
return [0]
def getLength(mention):
length = len(mention.split())
if length == 0:
ret = [1,0,0,0,0,0,0,0,0,0]
elif length == 1:
ret = [0,1,0,0,0,0,0,0,0,0]
elif length == 2:
ret = [0,0,1,0,0,0,0,0,0,0]
elif length == 3:
ret = [0,0,0,1,0,0,0,0,0,0]
elif length == 4:
ret = [0,0,0,0,1,0,0,0,0,0]
elif length > 4 and length < 8:
ret = [0,0,0,0,0,1,0,0,0,0]
elif length > 7 and length < 16:
ret = [0,0,0,0,0,0,1,0,0,0]
elif length > 15 and length < 32:
ret = [0,0,0,0,0,0,0,1,0,0]
elif length > 31 and length < 64:
ret = [0,0,0,0,0,0,0,0,1,0]
elif length > 63:
ret = [0,0,0,0,0,0,0,0,0,1]
return ret
def getPosition(mpos,total):
return [float(mpos)/float(total)]
def particalMatch(a, m):
awords = a.split()
mwords = m.split()
pMatch = 0
for a in awords:
for m in mwords:
if (a == m):
| nre):
print('begin')
vector = []
vectors = []
labels = []
costs = []
antecedents = ['NA']
total = len(mentions)
print(total)
Wembed = Wembedave[0]
ave_all = Wembedave[1]
for m in mentions:
for a in antecedents:
if a == 'NA':
tmp = [0 for i in range(512)]
tmp.extend(getEmbeddingFeature(m[6],m[0],words,Wembed,ave_all))
for i in range(36):
tmp.append(0)
vectors.append(tmp)
labels.append([0.])
continue
elif(m[4] == a[4]):
labels.append([1.])
else:
labels.append([0.])
vector.extend(get | pMatch = 1
break
return [pMatch]
def getInclude(include):
if include:
return [1]
else:
return [0]
def getVectors(mentions,words,Wembedave,ge | identifier_body |
avito-lightgbm-with-ridge-feature-1.py | Fold
# Tf-Idf
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.pipeline import FeatureUnion
from scipy.sparse import hstack, csr_matrix
from nltk.corpus import stopwords
# Viz
import seaborn as sns
import matplotlib.pyplot as plt
import re
import string
NFOLDS = 5
SEED = 42
VALID = True
class SklearnWrapper(object):
def __init__(self, clf, seed=0, params=None, seed_bool = True):
if(seed_bool == True):
params['random_state'] = seed
self.clf = clf(**params)
def train(self, x_train, y_train):
self.clf.fit(x_train, y_train)
def predict(self, x):
return self.clf.predict(x)
def get_oof(clf, x_train, y, x_test):
oof_train = np.zeros((ntrain,))
oof_test = np.zeros((ntest,))
oof_test_skf = np.empty((NFOLDS, ntest))
for i, (train_index, test_index) in enumerate(kf):
|
oof_test[:] = oof_test_skf.mean(axis=0)
return oof_train.reshape(-1, 1), oof_test.reshape(-1, 1)
def cleanName(text):
try:
textProc = text.lower()
# textProc = " ".join(map(str.strip, re.split('(\d+)',textProc)))
#regex = re.compile(u'[^[:alpha:]]')
#textProc = regex.sub(" ", textProc)
textProc = re.sub('[!@#$_“”¨«»®´·º½¾¿¡§£₤‘’]', '', textProc)
textProc = " ".join(textProc.split())
return textProc
except:
return "name error"
def rmse(y, y0):
assert len(y) == len(y0)
return np.sqrt(np.mean(np.power((y - y0), 2)))
print("\nData Load Stage")
training = pd.read_csv('../input/train.csv', index_col = "item_id", parse_dates = ["activation_date"])
traindex = training.index
testing = pd.read_csv('../input/test.csv', index_col = "item_id", parse_dates = ["activation_date"])
testdex = testing.index
ntrain = training.shape[0]
ntest = testing.shape[0]
kf = KFold(ntrain, n_folds=NFOLDS, shuffle=True, random_state=SEED)
y = training.deal_probability.copy()
training.drop("deal_probability",axis=1, inplace=True)
print('Train shape: {} Rows, {} Columns'.format(*training.shape))
print('Test shape: {} Rows, {} Columns'.format(*testing.shape))
print("Combine Train and Test")
df = pd.concat([training,testing],axis=0)
del training, testing
gc.collect()
print('\nAll Data shape: {} Rows, {} Columns'.format(*df.shape))
print("Feature Engineering")
df["price"] = np.log(df["price"]+0.001)
df["price"].fillna(df.price.mean(),inplace=True)
df["image_top_1"].fillna(-999,inplace=True)
print("\nCreate Time Variables")
df["Weekday"] = df['activation_date'].dt.weekday
df["Weekd of Year"] = df['activation_date'].dt.week
df["Day of Month"] = df['activation_date'].dt.day
# Create Validation Index and Remove Dead Variables
training_index = df.loc[df.activation_date<=pd.to_datetime('2017-04-07')].index
validation_index = df.loc[df.activation_date>=pd.to_datetime('2017-04-08')].index
df.drop(["activation_date","image"],axis=1,inplace=True)
print("\nEncode Variables")
categorical = ["user_id","region","city","parent_category_name","category_name","user_type","image_top_1","param_1","param_2","param_3"]
print("Encoding :",categorical)
# Encoder:
lbl = preprocessing.LabelEncoder()
for col in categorical:
df[col].fillna('Unknown')
df[col] = lbl.fit_transform(df[col].astype(str))
print("\nText Features")
# Feature Engineering
# Meta Text Features
textfeats = ["description", "title"]
df['desc_punc'] = df['description'].apply(lambda x: len([c for c in str(x) if c in string.punctuation]))
df['title'] = df['title'].apply(lambda x: cleanName(x))
df["description"] = df["description"].apply(lambda x: cleanName(x))
for cols in textfeats:
df[cols] = df[cols].astype(str)
df[cols] = df[cols].astype(str).fillna('missing') # FILL NA
df[cols] = df[cols].str.lower() # Lowercase all text, so that capitalized words dont get treated differently
df[cols + '_num_words'] = df[cols].apply(lambda comment: len(comment.split())) # Count number of Words
df[cols + '_num_unique_words'] = df[cols].apply(lambda comment: len(set(w for w in comment.split())))
df[cols + '_words_vs_unique'] = df[cols+'_num_unique_words'] / df[cols+'_num_words'] * 100 # Count Unique Words
print("\n[TF-IDF] Term Frequency Inverse Document Frequency Stage")
russian_stop = set(stopwords.words('russian'))
tfidf_para = {
"stop_words": russian_stop,
"analyzer": 'word',
"token_pattern": r'\w{1,}',
"sublinear_tf": True,
"dtype": np.float32,
"norm": 'l2',
#"min_df":5,
#"max_df":.9,
"smooth_idf":False
}
def get_col(col_name): return lambda x: x[col_name]
##I added to the max_features of the description. It did not change my score much but it may be worth investigating
vectorizer = FeatureUnion([
('description',TfidfVectorizer(
ngram_range=(1, 2),
max_features=17000,
**tfidf_para,
preprocessor=get_col('description'))),
('title',CountVectorizer(
ngram_range=(1, 2),
stop_words = russian_stop,
#max_features=7000,
preprocessor=get_col('title')))
])
start_vect=time.time()
#Fit my vectorizer on the entire dataset instead of the training rows
#Score improved by .0001
vectorizer.fit(df.to_dict('records'))
ready_df = vectorizer.transform(df.to_dict('records'))
tfvocab = vectorizer.get_feature_names()
print("Vectorization Runtime: %0.2f Minutes"%((time.time() - start_vect)/60))
# Drop Text Cols
textfeats = ["description", "title"]
df.drop(textfeats, axis=1,inplace=True)
from sklearn.metrics import mean_squared_error
from math import sqrt
ridge_params = {'alpha':20.0, 'fit_intercept':True, 'normalize':False, 'copy_X':True,
'max_iter':None, 'tol':0.001, 'solver':'auto', 'random_state':SEED}
#Ridge oof method from Faron's kernel
#I was using this to analyze my vectorization, but figured it would be interesting to add the results back into the dataset
#It doesn't really add much to the score, but it does help lightgbm converge faster
ridge = SklearnWrapper(clf=Ridge, seed = SEED, params = ridge_params)
ridge_oof_train, ridge_oof_test = get_oof(ridge, ready_df[:ntrain], y, ready_df[ntrain:])
rms = sqrt(mean_squared_error(y, ridge_oof_train))
print('Ridge OOF RMSE: {}'.format(rms))
print("Modeling Stage")
ridge_preds = np.concatenate([ridge_oof_train, ridge_oof_test])
df['ridge_preds'] = ridge_preds
# Combine Dense Features with Sparse Text Bag of Words Features
X = hstack([csr_matrix(df.loc[traindex,:].values),ready_df[0:traindex.shape[0]]]) # Sparse Matrix
testing = hstack([csr_matrix(df.loc[testdex,:].values),ready_df[traindex.shape[0]:]])
tfvocab = df.columns.tolist() + tfvocab
for shape in [X,testing]:
print("{} Rows and {} Cols".format(*shape.shape))
print("Feature Names Length: ",len(tfvocab))
del df
gc.collect();
print("\nModeling Stage")
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.10, random_state=23)
del ridge_preds,vectorizer,ready_df
gc.collect();
print("Light Gradient Boosting Regressor")
lgbm_params = {
'task': 'train',
'boosting | print('\nFold {}'.format(i))
x_tr = x_train[train_index]
y_tr = y[train_index]
x_te = x_train[test_index]
clf.train(x_tr, y_tr)
oof_train[test_index] = clf.predict(x_te)
oof_test_skf[i, :] = clf.predict(x_test) | conditional_block |
avito-lightgbm-with-ridge-feature-1.py | Fold
# Tf-Idf
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.pipeline import FeatureUnion
from scipy.sparse import hstack, csr_matrix
from nltk.corpus import stopwords
# Viz
import seaborn as sns
import matplotlib.pyplot as plt
import re
import string
NFOLDS = 5
SEED = 42
VALID = True
class SklearnWrapper(object):
def __init__(self, clf, seed=0, params=None, seed_bool = True):
if(seed_bool == True):
params['random_state'] = seed
self.clf = clf(**params)
def train(self, x_train, y_train):
self.clf.fit(x_train, y_train)
def predict(self, x):
return self.clf.predict(x)
def get_oof(clf, x_train, y, x_test):
oof_train = np.zeros((ntrain,))
oof_test = np.zeros((ntest,))
oof_test_skf = np.empty((NFOLDS, ntest))
for i, (train_index, test_index) in enumerate(kf):
print('\nFold {}'.format(i))
x_tr = x_train[train_index]
y_tr = y[train_index]
x_te = x_train[test_index]
clf.train(x_tr, y_tr)
oof_train[test_index] = clf.predict(x_te)
oof_test_skf[i, :] = clf.predict(x_test)
oof_test[:] = oof_test_skf.mean(axis=0)
return oof_train.reshape(-1, 1), oof_test.reshape(-1, 1)
def cleanName(text):
try:
textProc = text.lower()
# textProc = " ".join(map(str.strip, re.split('(\d+)',textProc)))
#regex = re.compile(u'[^[:alpha:]]')
#textProc = regex.sub(" ", textProc)
textProc = re.sub('[!@#$_“”¨«»®´·º½¾¿¡§£₤‘’]', '', textProc)
textProc = " ".join(textProc.split())
return textProc
except:
return "name error"
def rmse(y, y0):
assert len(y) == len(y0)
return np.sqrt(np.mean(np.power((y - y0), 2)))
print("\nData Load Stage")
training = pd.read_csv('../input/train.csv', index_col = "item_id", parse_dates = ["activation_date"])
traindex = training.index
testing = pd.read_csv('../input/test.csv', index_col = "item_id", parse_dates = ["activation_date"])
testdex = testing.index
ntrain = training.shape[0]
ntest = testing.shape[0]
kf = KFold(ntrain, n_folds=NFOLDS, shuffle=True, random_state=SEED)
y = training.deal_probability.copy()
training.drop("deal_probability",axis=1, inplace=True)
print('Train shape: {} Rows, {} Columns'.format(*training.shape))
print('Test shape: {} Rows, {} Columns'.format(*testing.shape))
print("Combine Train and Test")
df = pd.concat([training,testing],axis=0)
del training, testing
gc.collect()
print('\nAll Data shape: {} Rows, {} Columns'.format(*df.shape))
print("Feature Engineering")
df["price"] = np.log(df["price"]+0.001)
df["price"].fillna(df.price.mean(),inplace=True)
df["image_top_1"].fillna(-999,inplace=True)
print("\nCreate Time Variables")
df["Weekday"] = df['activation_date'].dt.weekday
df["Weekd of Year"] = df['activation_date'].dt.week
df["Day of Month"] = df['activation_date'].dt.day
# Create Validation Index and Remove Dead Variables
training_index = df.loc[df.activation_date<=pd.to_datetime('2017-04-07')].index
validation_index = df.loc[df.activation_date>=pd.to_datetime('2017-04-08')].index
df.drop(["activation_date","image"],axis=1,inplace=True)
print("\nEncode Variables")
| lbl = preprocessing.LabelEncoder()
for col in categorical:
df[col].fillna('Unknown')
df[col] = lbl.fit_transform(df[col].astype(str))
print("\nText Features")
# Feature Engineering
# Meta Text Features
textfeats = ["description", "title"]
df['desc_punc'] = df['description'].apply(lambda x: len([c for c in str(x) if c in string.punctuation]))
df['title'] = df['title'].apply(lambda x: cleanName(x))
df["description"] = df["description"].apply(lambda x: cleanName(x))
for cols in textfeats:
df[cols] = df[cols].astype(str)
df[cols] = df[cols].astype(str).fillna('missing') # FILL NA
df[cols] = df[cols].str.lower() # Lowercase all text, so that capitalized words dont get treated differently
df[cols + '_num_words'] = df[cols].apply(lambda comment: len(comment.split())) # Count number of Words
df[cols + '_num_unique_words'] = df[cols].apply(lambda comment: len(set(w for w in comment.split())))
df[cols + '_words_vs_unique'] = df[cols+'_num_unique_words'] / df[cols+'_num_words'] * 100 # Count Unique Words
print("\n[TF-IDF] Term Frequency Inverse Document Frequency Stage")
russian_stop = set(stopwords.words('russian'))
tfidf_para = {
"stop_words": russian_stop,
"analyzer": 'word',
"token_pattern": r'\w{1,}',
"sublinear_tf": True,
"dtype": np.float32,
"norm": 'l2',
#"min_df":5,
#"max_df":.9,
"smooth_idf":False
}
def get_col(col_name): return lambda x: x[col_name]
##I added to the max_features of the description. It did not change my score much but it may be worth investigating
vectorizer = FeatureUnion([
('description',TfidfVectorizer(
ngram_range=(1, 2),
max_features=17000,
**tfidf_para,
preprocessor=get_col('description'))),
('title',CountVectorizer(
ngram_range=(1, 2),
stop_words = russian_stop,
#max_features=7000,
preprocessor=get_col('title')))
])
start_vect=time.time()
#Fit my vectorizer on the entire dataset instead of the training rows
#Score improved by .0001
vectorizer.fit(df.to_dict('records'))
ready_df = vectorizer.transform(df.to_dict('records'))
tfvocab = vectorizer.get_feature_names()
print("Vectorization Runtime: %0.2f Minutes"%((time.time() - start_vect)/60))
# Drop Text Cols
textfeats = ["description", "title"]
df.drop(textfeats, axis=1,inplace=True)
from sklearn.metrics import mean_squared_error
from math import sqrt
ridge_params = {'alpha':20.0, 'fit_intercept':True, 'normalize':False, 'copy_X':True,
'max_iter':None, 'tol':0.001, 'solver':'auto', 'random_state':SEED}
#Ridge oof method from Faron's kernel
#I was using this to analyze my vectorization, but figured it would be interesting to add the results back into the dataset
#It doesn't really add much to the score, but it does help lightgbm converge faster
ridge = SklearnWrapper(clf=Ridge, seed = SEED, params = ridge_params)
ridge_oof_train, ridge_oof_test = get_oof(ridge, ready_df[:ntrain], y, ready_df[ntrain:])
rms = sqrt(mean_squared_error(y, ridge_oof_train))
print('Ridge OOF RMSE: {}'.format(rms))
print("Modeling Stage")
ridge_preds = np.concatenate([ridge_oof_train, ridge_oof_test])
df['ridge_preds'] = ridge_preds
# Combine Dense Features with Sparse Text Bag of Words Features
X = hstack([csr_matrix(df.loc[traindex,:].values),ready_df[0:traindex.shape[0]]]) # Sparse Matrix
testing = hstack([csr_matrix(df.loc[testdex,:].values),ready_df[traindex.shape[0]:]])
tfvocab = df.columns.tolist() + tfvocab
for shape in [X,testing]:
print("{} Rows and {} Cols".format(*shape.shape))
print("Feature Names Length: ",len(tfvocab))
del df
gc.collect();
print("\nModeling Stage")
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.10, random_state=23)
del ridge_preds,vectorizer,ready_df
gc.collect();
print("Light Gradient Boosting Regressor")
lgbm_params = {
'task': 'train',
'boost | categorical = ["user_id","region","city","parent_category_name","category_name","user_type","image_top_1","param_1","param_2","param_3"]
print("Encoding :",categorical)
# Encoder:
| random_line_split |
avito-lightgbm-with-ridge-feature-1.py |
# Tf-Idf
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.pipeline import FeatureUnion
from scipy.sparse import hstack, csr_matrix
from nltk.corpus import stopwords
# Viz
import seaborn as sns
import matplotlib.pyplot as plt
import re
import string
NFOLDS = 5
SEED = 42
VALID = True
class SklearnWrapper(object):
def __init__(self, clf, seed=0, params=None, seed_bool = True):
if(seed_bool == True):
params['random_state'] = seed
self.clf = clf(**params)
def train(self, x_train, y_train):
self.clf.fit(x_train, y_train)
def predict(self, x):
return self.clf.predict(x)
def get_oof(clf, x_train, y, x_test):
oof_train = np.zeros((ntrain,))
oof_test = np.zeros((ntest,))
oof_test_skf = np.empty((NFOLDS, ntest))
for i, (train_index, test_index) in enumerate(kf):
print('\nFold {}'.format(i))
x_tr = x_train[train_index]
y_tr = y[train_index]
x_te = x_train[test_index]
clf.train(x_tr, y_tr)
oof_train[test_index] = clf.predict(x_te)
oof_test_skf[i, :] = clf.predict(x_test)
oof_test[:] = oof_test_skf.mean(axis=0)
return oof_train.reshape(-1, 1), oof_test.reshape(-1, 1)
def cleanName(text):
try:
textProc = text.lower()
# textProc = " ".join(map(str.strip, re.split('(\d+)',textProc)))
#regex = re.compile(u'[^[:alpha:]]')
#textProc = regex.sub(" ", textProc)
textProc = re.sub('[!@#$_“”¨«»®´·º½¾¿¡§£₤‘’]', '', textProc)
textProc = " ".join(textProc.split())
return textProc
except:
return "name error"
def rmse(y, y0):
assert len(y) == len(y0)
return np.sqrt(np.mean(np.power((y - y0), 2)))
print("\nData Load Stage")
training = pd.read_csv('../input/train.csv', index_col = "item_id", parse_dates = ["activation_date"])
traindex = training.index
testing = pd.read_csv('../input/test.csv', index_col = "item_id", parse_dates = ["activation_date"])
testdex = testing.index
ntrain = training.shape[0]
ntest = testing.shape[0]
kf = KFold(ntrain, n_folds=NFOLDS, shuffle=True, random_state=SEED)
y = training.deal_probability.copy()
training.drop("deal_probability",axis=1, inplace=True)
print('Train shape: {} Rows, {} Columns'.format(*training.shape))
print('Test shape: {} Rows, {} Columns'.format(*testing.shape))
print("Combine Train and Test")
df = pd.concat([training,testing],axis=0)
del training, testing
gc.collect()
print('\nAll Data shape: {} Rows, {} Columns'.format(*df.shape))
print("Feature Engineering")
df["price"] = np.log(df["price"]+0.001)
df["price"].fillna(df.price.mean(),inplace=True)
df["image_top_1"].fillna(-999,inplace=True)
print("\nCreate Time Variables")
df["Weekday"] = df['activation_date'].dt.weekday
df["Weekd of Year"] = df['activation_date'].dt.week
df["Day of Month"] = df['activation_date'].dt.day
# Create Validation Index and Remove Dead Variables
training_index = df.loc[df.activation_date<=pd.to_datetime('2017-04-07')].index
validation_index = df.loc[df.activation_date>=pd.to_datetime('2017-04-08')].index
df.drop(["activation_date","image"],axis=1,inplace=True)
print("\nEncode Variables")
categorical = ["user_id","region","city","parent_category_name","category_name","user_type","image_top_1","param_1","param_2","param_3"]
print("Encoding :",categorical)
# Encoder:
lbl = preprocessing.LabelEncoder()
for col in categorical:
df[col].fillna('Unknown')
df[col] = lbl.fit_transform(df[col].astype(str))
print("\nText Features")
# Feature Engineering
# Meta Text Features
textfeats = ["description", "title"]
df['desc_punc'] = df['description'].apply(lambda x: len([c for c in str(x) if c in string.punctuation]))
df['title'] = df['title'].apply(lambda x: cleanName(x))
df["description"] = df["description"].apply(lambda x: cleanName(x))
for cols in textfeats:
df[cols] = df[cols].astype(str)
df[cols] = df[cols].astype(str).fillna('missing') # FILL NA
df[cols] = df[cols].str.lower() # Lowercase all text, so that capitalized words dont get treated differently
df[cols + '_num_words'] = df[cols].apply(lambda comment: len(comment.split())) # Count number of Words
df[cols + '_num_unique_words'] = df[cols].apply(lambda comment: len(set(w for w in comment.split())))
df[cols + '_words_vs_unique'] = df[cols+'_num_unique_words'] / df[cols+'_num_words'] * 100 # Count Unique Words
print("\n[TF-IDF] Term Frequency Inverse Document Frequency Stage")
russian_stop = set(stopwords.words('russian'))
tfidf_para = {
"stop_words": russian_stop,
"analyzer": 'word',
"token_pattern": r'\w{1,}',
"sublinear_tf": True,
"dtype": np.float32,
"norm": 'l2',
#"min_df":5,
#"max_df":.9,
"smooth_idf":False
}
def get_col(col_name): retu | da x: x[col_name]
##I added to the max_features of the description. It did not change my score much but it may be worth investigating
vectorizer = FeatureUnion([
('description',TfidfVectorizer(
ngram_range=(1, 2),
max_features=17000,
**tfidf_para,
preprocessor=get_col('description'))),
('title',CountVectorizer(
ngram_range=(1, 2),
stop_words = russian_stop,
#max_features=7000,
preprocessor=get_col('title')))
])
start_vect=time.time()
#Fit my vectorizer on the entire dataset instead of the training rows
#Score improved by .0001
vectorizer.fit(df.to_dict('records'))
ready_df = vectorizer.transform(df.to_dict('records'))
tfvocab = vectorizer.get_feature_names()
print("Vectorization Runtime: %0.2f Minutes"%((time.time() - start_vect)/60))
# Drop Text Cols
textfeats = ["description", "title"]
df.drop(textfeats, axis=1,inplace=True)
from sklearn.metrics import mean_squared_error
from math import sqrt
ridge_params = {'alpha':20.0, 'fit_intercept':True, 'normalize':False, 'copy_X':True,
'max_iter':None, 'tol':0.001, 'solver':'auto', 'random_state':SEED}
#Ridge oof method from Faron's kernel
#I was using this to analyze my vectorization, but figured it would be interesting to add the results back into the dataset
#It doesn't really add much to the score, but it does help lightgbm converge faster
ridge = SklearnWrapper(clf=Ridge, seed = SEED, params = ridge_params)
ridge_oof_train, ridge_oof_test = get_oof(ridge, ready_df[:ntrain], y, ready_df[ntrain:])
rms = sqrt(mean_squared_error(y, ridge_oof_train))
print('Ridge OOF RMSE: {}'.format(rms))
print("Modeling Stage")
ridge_preds = np.concatenate([ridge_oof_train, ridge_oof_test])
df['ridge_preds'] = ridge_preds
# Combine Dense Features with Sparse Text Bag of Words Features
X = hstack([csr_matrix(df.loc[traindex,:].values),ready_df[0:traindex.shape[0]]]) # Sparse Matrix
testing = hstack([csr_matrix(df.loc[testdex,:].values),ready_df[traindex.shape[0]:]])
tfvocab = df.columns.tolist() + tfvocab
for shape in [X,testing]:
print("{} Rows and {} Cols".format(*shape.shape))
print("Feature Names Length: ",len(tfvocab))
del df
gc.collect();
print("\nModeling Stage")
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.10, random_state=23)
del ridge_preds,vectorizer,ready_df
gc.collect();
print("Light Gradient Boosting Regressor")
lgbm_params = {
'task': 'train',
'boost | rn lamb | identifier_name |
avito-lightgbm-with-ridge-feature-1.py | Fold
# Tf-Idf
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.pipeline import FeatureUnion
from scipy.sparse import hstack, csr_matrix
from nltk.corpus import stopwords
# Viz
import seaborn as sns
import matplotlib.pyplot as plt
import re
import string
NFOLDS = 5
SEED = 42
VALID = True
class SklearnWrapper(object):
def __init__(self, clf, seed=0, params=None, seed_bool = True):
if(seed_bool == True):
params['random_state'] = seed
self.clf = clf(**params)
def train(self, x_train, y_train):
self.clf.fit(x_train, y_train)
def predict(self, x):
return self.clf.predict(x)
def get_oof(clf, x_train, y, x_test):
oof_train = np.zeros((ntrain,))
oof_test = np.zeros((ntest,))
oof_test_skf = np.empty((NFOLDS, ntest))
for i, (train_index, test_index) in enumerate(kf):
print('\nFold {}'.format(i))
x_tr = x_train[train_index]
y_tr = y[train_index]
x_te = x_train[test_index]
clf.train(x_tr, y_tr)
oof_train[test_index] = clf.predict(x_te)
oof_test_skf[i, :] = clf.predict(x_test)
oof_test[:] = oof_test_skf.mean(axis=0)
return oof_train.reshape(-1, 1), oof_test.reshape(-1, 1)
def cleanName(text):
try:
textProc = text.lower()
# textProc = " ".join(map(str.strip, re.split('(\d+)',textProc)))
#regex = re.compile(u'[^[:alpha:]]')
#textProc = regex.sub(" ", textProc)
textProc = re.sub('[!@#$_“”¨«»®´·º½¾¿¡§£₤‘’]', '', textProc)
textProc = " ".join(textProc.split())
return textProc
except:
return "name error"
def rmse(y, y0):
assert len(y) == len(y0 | Stage")
training = pd.read_csv('../input/train.csv', index_col = "item_id", parse_dates = ["activation_date"])
traindex = training.index
testing = pd.read_csv('../input/test.csv', index_col = "item_id", parse_dates = ["activation_date"])
testdex = testing.index
ntrain = training.shape[0]
ntest = testing.shape[0]
kf = KFold(ntrain, n_folds=NFOLDS, shuffle=True, random_state=SEED)
y = training.deal_probability.copy()
training.drop("deal_probability",axis=1, inplace=True)
print('Train shape: {} Rows, {} Columns'.format(*training.shape))
print('Test shape: {} Rows, {} Columns'.format(*testing.shape))
print("Combine Train and Test")
df = pd.concat([training,testing],axis=0)
del training, testing
gc.collect()
print('\nAll Data shape: {} Rows, {} Columns'.format(*df.shape))
print("Feature Engineering")
df["price"] = np.log(df["price"]+0.001)
df["price"].fillna(df.price.mean(),inplace=True)
df["image_top_1"].fillna(-999,inplace=True)
print("\nCreate Time Variables")
df["Weekday"] = df['activation_date'].dt.weekday
df["Weekd of Year"] = df['activation_date'].dt.week
df["Day of Month"] = df['activation_date'].dt.day
# Create Validation Index and Remove Dead Variables
training_index = df.loc[df.activation_date<=pd.to_datetime('2017-04-07')].index
validation_index = df.loc[df.activation_date>=pd.to_datetime('2017-04-08')].index
df.drop(["activation_date","image"],axis=1,inplace=True)
print("\nEncode Variables")
categorical = ["user_id","region","city","parent_category_name","category_name","user_type","image_top_1","param_1","param_2","param_3"]
print("Encoding :",categorical)
# Encoder:
lbl = preprocessing.LabelEncoder()
for col in categorical:
df[col].fillna('Unknown')
df[col] = lbl.fit_transform(df[col].astype(str))
print("\nText Features")
# Feature Engineering
# Meta Text Features
textfeats = ["description", "title"]
df['desc_punc'] = df['description'].apply(lambda x: len([c for c in str(x) if c in string.punctuation]))
df['title'] = df['title'].apply(lambda x: cleanName(x))
df["description"] = df["description"].apply(lambda x: cleanName(x))
for cols in textfeats:
df[cols] = df[cols].astype(str)
df[cols] = df[cols].astype(str).fillna('missing') # FILL NA
df[cols] = df[cols].str.lower() # Lowercase all text, so that capitalized words dont get treated differently
df[cols + '_num_words'] = df[cols].apply(lambda comment: len(comment.split())) # Count number of Words
df[cols + '_num_unique_words'] = df[cols].apply(lambda comment: len(set(w for w in comment.split())))
df[cols + '_words_vs_unique'] = df[cols+'_num_unique_words'] / df[cols+'_num_words'] * 100 # Count Unique Words
print("\n[TF-IDF] Term Frequency Inverse Document Frequency Stage")
russian_stop = set(stopwords.words('russian'))
tfidf_para = {
"stop_words": russian_stop,
"analyzer": 'word',
"token_pattern": r'\w{1,}',
"sublinear_tf": True,
"dtype": np.float32,
"norm": 'l2',
#"min_df":5,
#"max_df":.9,
"smooth_idf":False
}
def get_col(col_name): return lambda x: x[col_name]
##I added to the max_features of the description. It did not change my score much but it may be worth investigating
vectorizer = FeatureUnion([
('description',TfidfVectorizer(
ngram_range=(1, 2),
max_features=17000,
**tfidf_para,
preprocessor=get_col('description'))),
('title',CountVectorizer(
ngram_range=(1, 2),
stop_words = russian_stop,
#max_features=7000,
preprocessor=get_col('title')))
])
start_vect=time.time()
#Fit my vectorizer on the entire dataset instead of the training rows
#Score improved by .0001
vectorizer.fit(df.to_dict('records'))
ready_df = vectorizer.transform(df.to_dict('records'))
tfvocab = vectorizer.get_feature_names()
print("Vectorization Runtime: %0.2f Minutes"%((time.time() - start_vect)/60))
# Drop Text Cols
textfeats = ["description", "title"]
df.drop(textfeats, axis=1,inplace=True)
from sklearn.metrics import mean_squared_error
from math import sqrt
ridge_params = {'alpha':20.0, 'fit_intercept':True, 'normalize':False, 'copy_X':True,
'max_iter':None, 'tol':0.001, 'solver':'auto', 'random_state':SEED}
#Ridge oof method from Faron's kernel
#I was using this to analyze my vectorization, but figured it would be interesting to add the results back into the dataset
#It doesn't really add much to the score, but it does help lightgbm converge faster
ridge = SklearnWrapper(clf=Ridge, seed = SEED, params = ridge_params)
ridge_oof_train, ridge_oof_test = get_oof(ridge, ready_df[:ntrain], y, ready_df[ntrain:])
rms = sqrt(mean_squared_error(y, ridge_oof_train))
print('Ridge OOF RMSE: {}'.format(rms))
print("Modeling Stage")
ridge_preds = np.concatenate([ridge_oof_train, ridge_oof_test])
df['ridge_preds'] = ridge_preds
# Combine Dense Features with Sparse Text Bag of Words Features
X = hstack([csr_matrix(df.loc[traindex,:].values),ready_df[0:traindex.shape[0]]]) # Sparse Matrix
testing = hstack([csr_matrix(df.loc[testdex,:].values),ready_df[traindex.shape[0]:]])
tfvocab = df.columns.tolist() + tfvocab
for shape in [X,testing]:
print("{} Rows and {} Cols".format(*shape.shape))
print("Feature Names Length: ",len(tfvocab))
del df
gc.collect();
print("\nModeling Stage")
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.10, random_state=23)
del ridge_preds,vectorizer,ready_df
gc.collect();
print("Light Gradient Boosting Regressor")
lgbm_params = {
'task': 'train',
'boosting | )
return np.sqrt(np.mean(np.power((y - y0), 2)))
print("\nData Load | identifier_body |
uiLibFlexbox.go | GetOrder() int {
return slot.Order
}
func (slot FlexboxSlot) fyRespectMinimumSize() bool {
return slot.RespectMinimumSize
}
// -- Solver --
func fyFlexboxGetPreferredSize(details FlexboxContainer) frenyard.Vec2i {
// Do note, this is in main/cross format.
mainCrossSize := frenyard.Vec2i{}
for _, v := range details.Slots {
sz := v.fyMainCrossSizeForMainCrossLimits(frenyard.Vec2iUnlimited(), details.DirVertical, false)
mainCrossSize.X += sz.X
mainCrossSize.Y = frenyard.Max(mainCrossSize.Y, sz.Y)
}
return mainCrossSize.ConditionalTranspose(details.DirVertical)
}
type fyFlexboxRow struct {
elem []fyFlexboxSlotlike
area []frenyard.Area2i
fullArea frenyard.Area2i
}
func (slot fyFlexboxRow) fyGrowShrink() (int32, int32) {
return 1, 1
}
// Critical to the whole thing and it's full of guesswork due to the vertical flags and axis juggling.
func (slot fyFlexboxRow) fyMainCrossSizeForMainCrossLimits(limits frenyard.Vec2i, vertical bool, debug bool) frenyard.Vec2i {
if debug {
fmt.Print("R{")
}
// Main & Cross in here refer to in the row flexbox, not the outer flexbox.
maximumMain := int32(0)
presentAreaCross := slot.fullArea.Size().ConditionalTranspose(vertical).Y
for _, v := range slot.elem {
lim := frenyard.Vec2i{X: limits.X, Y: presentAreaCross}
if debug {
fmt.Print(" ", limits.X, "x", presentAreaCross)
}
rcs := v.fyMainCrossSizeForMainCrossLimits(lim, vertical, false)
maximumMain = frenyard.Max(maximumMain, rcs.X)
if debug {
fmt.Print(":", rcs.X, "x", rcs.Y)
}
}
if debug {
fmt.Print(" }")
}
return frenyard.Vec2i{X: maximumMain, Y: presentAreaCross}
} | func (slot fyFlexboxRow) fyCalcBasis(cross int32, vertical bool) int32 {
return slot.fyMainCrossSizeForMainCrossLimits(frenyard.Vec2i{X: frenyard.SizeUnlimited, Y: cross}, vertical, false).X
}
func (slot fyFlexboxRow) fyGetOrder() int {
return 0
}
func (slot fyFlexboxRow) fyRespectMinimumSize() bool {
return false
}
// Do be aware, this only handles the one relevant axis.
func (slot *fyFlexboxRow) Fill(area frenyard.Area2i, vertical bool) {
for k := range slot.area {
if !vertical {
// Rows perpendicular to X
slot.area[k].X = area.X
} else {
// Rows perpendicular to Y
slot.area[k].Y = area.Y
}
}
slot.fullArea = area
}
type fyFlexboxSortingCollection struct {
// The collection being sorted.
slots []fyFlexboxSlotlike
// Given a SOURCE slot index, what is the RESULTING slot index?
originalToDisplayIndices []int
// Given a RESULTING slot index, what is the SOURCE slot index?
displayToOriginalIndices []int
}
func (sc fyFlexboxSortingCollection) Len() int {
return len(sc.slots)
}
func (sc fyFlexboxSortingCollection) Less(i int, j int) bool {
order1 := sc.slots[i].fyGetOrder()
order2 := sc.slots[j].fyGetOrder()
// Order1 != order2?
if order1 < order2 {
return true
}
if order1 > order2 {
return false
}
// No, they're equal. Sort by original index.
if sc.displayToOriginalIndices[i] < sc.displayToOriginalIndices[j] {
return true
}
return false
}
func (sc fyFlexboxSortingCollection) Swap(i int, j int) {
backup := sc.slots[i]
backup2 := sc.originalToDisplayIndices[i]
backup3 := sc.displayToOriginalIndices[i]
sc.slots[i] = sc.slots[j]
sc.originalToDisplayIndices[i] = sc.originalToDisplayIndices[j]
sc.displayToOriginalIndices[i] = sc.displayToOriginalIndices[j]
sc.slots[j] = backup
sc.originalToDisplayIndices[j] = backup2
sc.displayToOriginalIndices[j] = backup3
}
func fyFlexboxSolveLayout(details FlexboxContainer, limits frenyard.Vec2i) []frenyard.Area2i {
// Stage 1. Element order pre-processing (DirReverse)
slots := make([]fyFlexboxSlotlike, len(details.Slots))
originalToDisplayIndices := make([]int, len(details.Slots))
displayToOriginalIndices := make([]int, len(details.Slots))
for k, v := range details.Slots {
originalToDisplayIndices[k] = k
displayToOriginalIndices[k] = k
slots[k] = v
}
sort.Sort(fyFlexboxSortingCollection{
slots: slots,
originalToDisplayIndices: originalToDisplayIndices,
displayToOriginalIndices: displayToOriginalIndices,
})
// Stage 2. Wrapping (if relevant)
out := make([]frenyard.Area2i, len(slots))
mainCrossLimits := limits.ConditionalTranspose(details.DirVertical)
shouldWrap := fyFlexboxSolveLine(details, slots, out, mainCrossLimits, details.Debug)
// One row, so this is simple
rows := []fyFlexboxRow{{slots, out, frenyard.UnionArea2i(out)}}
if shouldWrap && details.WrapMode != FlexboxWrapModeNone {
// Wrapping has to start. Oh no...
// Do note, lines is implicitly limited because of the "one slot cannot wrap" rule.
lines := int32(2)
for {
rows = make([]fyFlexboxRow, lines)
lineStartSlot := 0
consumedSlots := 0
currentLine := int32(0)
for consumedSlots < len(slots) {
// If it wraps...
if fyFlexboxSolveLine(details, slots[lineStartSlot:consumedSlots+1], out[lineStartSlot:consumedSlots+1], mainCrossLimits, false) {
// Revert it & finish the line.
rows[currentLine] = fyFlexboxRow{
slots[lineStartSlot:consumedSlots],
out[lineStartSlot:consumedSlots],
frenyard.UnionArea2i(out),
}
fyFlexboxSolveLine(details, rows[currentLine].elem, rows[currentLine].area, mainCrossLimits, false)
// Now setup the new line.
currentLine++
lineStartSlot = consumedSlots
if currentLine == lines {
// Out of range, cancel before rows[currentLine] brings it to a halt
break
}
// Retry the same slot (slot not consumed)
} else {
// Success! Advance.
consumedSlots++
}
}
if currentLine < lines {
// Finish last line
rows[currentLine] = fyFlexboxRow{
slots[lineStartSlot:consumedSlots],
out[lineStartSlot:consumedSlots],
frenyard.UnionArea2i(out),
}
break
}
lines++
}
}
if details.WrapMode != FlexboxWrapModeNone {
// Stage 3. Row compression
rowAreas := make([]frenyard.Area2i, len(rows))
rowSlots := make([]fyFlexboxSlotlike, len(rows))
for rk, row := range rows {
rowSlots[rk] = row
}
fyFlexboxSolveLine(FlexboxContainer{
DirVertical: !details.DirVertical,
WrapMode: FlexboxWrapModeNone,
}, rowSlots, rowAreas, frenyard.Vec2i{mainCrossLimits.Y, mainCrossLimits.X}, false)
for rk, row := range rows {
row.Fill(rowAreas[rk], !details.DirVertical)
}
} else {
// Stage 3. Row setup
if mainCrossLimits.Y != frenyard.SizeUnlimited {
rows[0].Fill(frenyard.Area2iOfSize(mainCrossLimits.ConditionalTranspose(details.DirVertical)), !details.DirVertical)
}
}
// Stage 4. Element order post-processing (DirReverse)
realOutput := make([]frenyard.Area2i, len(out))
for k, v := range originalToDisplayIndices {
realOutput[k] = out[v]
}
return realOutput
}
// Returns true if should wrap. Will not return true ever for only one slot as this cannot wrap.
func fyFlexboxSolveLine(details FlexboxContainer, slots []fyFlexboxSlotlike, out []frenyard.Area2i, mainCrossLimits frenyard.Vec2i, debug bool) bool {
if len(slots) == 0 {
// Nowhere to output. Also, some calculations rely on at | random_line_split |
|
uiLibFlexbox.go | GetOrder() int {
return slot.Order
}
func (slot FlexboxSlot) fyRespectMinimumSize() bool {
return slot.RespectMinimumSize
}
// -- Solver --
func fyFlexboxGetPreferredSize(details FlexboxContainer) frenyard.Vec2i {
// Do note, this is in main/cross format.
mainCrossSize := frenyard.Vec2i{}
for _, v := range details.Slots {
sz := v.fyMainCrossSizeForMainCrossLimits(frenyard.Vec2iUnlimited(), details.DirVertical, false)
mainCrossSize.X += sz.X
mainCrossSize.Y = frenyard.Max(mainCrossSize.Y, sz.Y)
}
return mainCrossSize.ConditionalTranspose(details.DirVertical)
}
type fyFlexboxRow struct {
elem []fyFlexboxSlotlike
area []frenyard.Area2i
fullArea frenyard.Area2i
}
func (slot fyFlexboxRow) fyGrowShrink() (int32, int32) {
return 1, 1
}
// Critical to the whole thing and it's full of guesswork due to the vertical flags and axis juggling.
func (slot fyFlexboxRow) fyMainCrossSizeForMainCrossLimits(limits frenyard.Vec2i, vertical bool, debug bool) frenyard.Vec2i {
if debug {
fmt.Print("R{")
}
// Main & Cross in here refer to in the row flexbox, not the outer flexbox.
maximumMain := int32(0)
presentAreaCross := slot.fullArea.Size().ConditionalTranspose(vertical).Y
for _, v := range slot.elem {
lim := frenyard.Vec2i{X: limits.X, Y: presentAreaCross}
if debug {
fmt.Print(" ", limits.X, "x", presentAreaCross)
}
rcs := v.fyMainCrossSizeForMainCrossLimits(lim, vertical, false)
maximumMain = frenyard.Max(maximumMain, rcs.X)
if debug {
fmt.Print(":", rcs.X, "x", rcs.Y)
}
}
if debug |
return frenyard.Vec2i{X: maximumMain, Y: presentAreaCross}
}
func (slot fyFlexboxRow) fyCalcBasis(cross int32, vertical bool) int32 {
return slot.fyMainCrossSizeForMainCrossLimits(frenyard.Vec2i{X: frenyard.SizeUnlimited, Y: cross}, vertical, false).X
}
func (slot fyFlexboxRow) fyGetOrder() int {
return 0
}
func (slot fyFlexboxRow) fyRespectMinimumSize() bool {
return false
}
// Do be aware, this only handles the one relevant axis.
func (slot *fyFlexboxRow) Fill(area frenyard.Area2i, vertical bool) {
for k := range slot.area {
if !vertical {
// Rows perpendicular to X
slot.area[k].X = area.X
} else {
// Rows perpendicular to Y
slot.area[k].Y = area.Y
}
}
slot.fullArea = area
}
type fyFlexboxSortingCollection struct {
// The collection being sorted.
slots []fyFlexboxSlotlike
// Given a SOURCE slot index, what is the RESULTING slot index?
originalToDisplayIndices []int
// Given a RESULTING slot index, what is the SOURCE slot index?
displayToOriginalIndices []int
}
func (sc fyFlexboxSortingCollection) Len() int {
return len(sc.slots)
}
func (sc fyFlexboxSortingCollection) Less(i int, j int) bool {
order1 := sc.slots[i].fyGetOrder()
order2 := sc.slots[j].fyGetOrder()
// Order1 != order2?
if order1 < order2 {
return true
}
if order1 > order2 {
return false
}
// No, they're equal. Sort by original index.
if sc.displayToOriginalIndices[i] < sc.displayToOriginalIndices[j] {
return true
}
return false
}
func (sc fyFlexboxSortingCollection) Swap(i int, j int) {
backup := sc.slots[i]
backup2 := sc.originalToDisplayIndices[i]
backup3 := sc.displayToOriginalIndices[i]
sc.slots[i] = sc.slots[j]
sc.originalToDisplayIndices[i] = sc.originalToDisplayIndices[j]
sc.displayToOriginalIndices[i] = sc.displayToOriginalIndices[j]
sc.slots[j] = backup
sc.originalToDisplayIndices[j] = backup2
sc.displayToOriginalIndices[j] = backup3
}
func fyFlexboxSolveLayout(details FlexboxContainer, limits frenyard.Vec2i) []frenyard.Area2i {
// Stage 1. Element order pre-processing (DirReverse)
slots := make([]fyFlexboxSlotlike, len(details.Slots))
originalToDisplayIndices := make([]int, len(details.Slots))
displayToOriginalIndices := make([]int, len(details.Slots))
for k, v := range details.Slots {
originalToDisplayIndices[k] = k
displayToOriginalIndices[k] = k
slots[k] = v
}
sort.Sort(fyFlexboxSortingCollection{
slots: slots,
originalToDisplayIndices: originalToDisplayIndices,
displayToOriginalIndices: displayToOriginalIndices,
})
// Stage 2. Wrapping (if relevant)
out := make([]frenyard.Area2i, len(slots))
mainCrossLimits := limits.ConditionalTranspose(details.DirVertical)
shouldWrap := fyFlexboxSolveLine(details, slots, out, mainCrossLimits, details.Debug)
// One row, so this is simple
rows := []fyFlexboxRow{{slots, out, frenyard.UnionArea2i(out)}}
if shouldWrap && details.WrapMode != FlexboxWrapModeNone {
// Wrapping has to start. Oh no...
// Do note, lines is implicitly limited because of the "one slot cannot wrap" rule.
lines := int32(2)
for {
rows = make([]fyFlexboxRow, lines)
lineStartSlot := 0
consumedSlots := 0
currentLine := int32(0)
for consumedSlots < len(slots) {
// If it wraps...
if fyFlexboxSolveLine(details, slots[lineStartSlot:consumedSlots+1], out[lineStartSlot:consumedSlots+1], mainCrossLimits, false) {
// Revert it & finish the line.
rows[currentLine] = fyFlexboxRow{
slots[lineStartSlot:consumedSlots],
out[lineStartSlot:consumedSlots],
frenyard.UnionArea2i(out),
}
fyFlexboxSolveLine(details, rows[currentLine].elem, rows[currentLine].area, mainCrossLimits, false)
// Now setup the new line.
currentLine++
lineStartSlot = consumedSlots
if currentLine == lines {
// Out of range, cancel before rows[currentLine] brings it to a halt
break
}
// Retry the same slot (slot not consumed)
} else {
// Success! Advance.
consumedSlots++
}
}
if currentLine < lines {
// Finish last line
rows[currentLine] = fyFlexboxRow{
slots[lineStartSlot:consumedSlots],
out[lineStartSlot:consumedSlots],
frenyard.UnionArea2i(out),
}
break
}
lines++
}
}
if details.WrapMode != FlexboxWrapModeNone {
// Stage 3. Row compression
rowAreas := make([]frenyard.Area2i, len(rows))
rowSlots := make([]fyFlexboxSlotlike, len(rows))
for rk, row := range rows {
rowSlots[rk] = row
}
fyFlexboxSolveLine(FlexboxContainer{
DirVertical: !details.DirVertical,
WrapMode: FlexboxWrapModeNone,
}, rowSlots, rowAreas, frenyard.Vec2i{mainCrossLimits.Y, mainCrossLimits.X}, false)
for rk, row := range rows {
row.Fill(rowAreas[rk], !details.DirVertical)
}
} else {
// Stage 3. Row setup
if mainCrossLimits.Y != frenyard.SizeUnlimited {
rows[0].Fill(frenyard.Area2iOfSize(mainCrossLimits.ConditionalTranspose(details.DirVertical)), !details.DirVertical)
}
}
// Stage 4. Element order post-processing (DirReverse)
realOutput := make([]frenyard.Area2i, len(out))
for k, v := range originalToDisplayIndices {
realOutput[k] = out[v]
}
return realOutput
}
// Returns true if should wrap. Will not return true ever for only one slot as this cannot wrap.
func fyFlexboxSolveLine(details FlexboxContainer, slots []fyFlexboxSlotlike, out []frenyard.Area2i, mainCrossLimits frenyard.Vec2i, debug bool) bool {
if len(slots) == 0 {
// Nowhere to output. Also, some calculations rely | {
fmt.Print(" }")
} | conditional_block |
uiLibFlexbox.go | GetOrder() int {
return slot.Order
}
func (slot FlexboxSlot) fyRespectMinimumSize() bool {
return slot.RespectMinimumSize
}
// -- Solver --
func fyFlexboxGetPreferredSize(details FlexboxContainer) frenyard.Vec2i {
// Do note, this is in main/cross format.
mainCrossSize := frenyard.Vec2i{}
for _, v := range details.Slots {
sz := v.fyMainCrossSizeForMainCrossLimits(frenyard.Vec2iUnlimited(), details.DirVertical, false)
mainCrossSize.X += sz.X
mainCrossSize.Y = frenyard.Max(mainCrossSize.Y, sz.Y)
}
return mainCrossSize.ConditionalTranspose(details.DirVertical)
}
type fyFlexboxRow struct {
elem []fyFlexboxSlotlike
area []frenyard.Area2i
fullArea frenyard.Area2i
}
func (slot fyFlexboxRow) fyGrowShrink() (int32, int32) {
return 1, 1
}
// Critical to the whole thing and it's full of guesswork due to the vertical flags and axis juggling.
func (slot fyFlexboxRow) fyMainCrossSizeForMainCrossLimits(limits frenyard.Vec2i, vertical bool, debug bool) frenyard.Vec2i {
if debug {
fmt.Print("R{")
}
// Main & Cross in here refer to in the row flexbox, not the outer flexbox.
maximumMain := int32(0)
presentAreaCross := slot.fullArea.Size().ConditionalTranspose(vertical).Y
for _, v := range slot.elem {
lim := frenyard.Vec2i{X: limits.X, Y: presentAreaCross}
if debug {
fmt.Print(" ", limits.X, "x", presentAreaCross)
}
rcs := v.fyMainCrossSizeForMainCrossLimits(lim, vertical, false)
maximumMain = frenyard.Max(maximumMain, rcs.X)
if debug {
fmt.Print(":", rcs.X, "x", rcs.Y)
}
}
if debug {
fmt.Print(" }")
}
return frenyard.Vec2i{X: maximumMain, Y: presentAreaCross}
}
func (slot fyFlexboxRow) fyCalcBasis(cross int32, vertical bool) int32 {
return slot.fyMainCrossSizeForMainCrossLimits(frenyard.Vec2i{X: frenyard.SizeUnlimited, Y: cross}, vertical, false).X
}
func (slot fyFlexboxRow) fyGetOrder() int {
return 0
}
func (slot fyFlexboxRow) fyRespectMinimumSize() bool {
return false
}
// Do be aware, this only handles the one relevant axis.
func (slot *fyFlexboxRow) Fill(area frenyard.Area2i, vertical bool) {
for k := range slot.area {
if !vertical {
// Rows perpendicular to X
slot.area[k].X = area.X
} else {
// Rows perpendicular to Y
slot.area[k].Y = area.Y
}
}
slot.fullArea = area
}
type fyFlexboxSortingCollection struct {
// The collection being sorted.
slots []fyFlexboxSlotlike
// Given a SOURCE slot index, what is the RESULTING slot index?
originalToDisplayIndices []int
// Given a RESULTING slot index, what is the SOURCE slot index?
displayToOriginalIndices []int
}
func (sc fyFlexboxSortingCollection) | () int {
return len(sc.slots)
}
func (sc fyFlexboxSortingCollection) Less(i int, j int) bool {
order1 := sc.slots[i].fyGetOrder()
order2 := sc.slots[j].fyGetOrder()
// Order1 != order2?
if order1 < order2 {
return true
}
if order1 > order2 {
return false
}
// No, they're equal. Sort by original index.
if sc.displayToOriginalIndices[i] < sc.displayToOriginalIndices[j] {
return true
}
return false
}
func (sc fyFlexboxSortingCollection) Swap(i int, j int) {
backup := sc.slots[i]
backup2 := sc.originalToDisplayIndices[i]
backup3 := sc.displayToOriginalIndices[i]
sc.slots[i] = sc.slots[j]
sc.originalToDisplayIndices[i] = sc.originalToDisplayIndices[j]
sc.displayToOriginalIndices[i] = sc.displayToOriginalIndices[j]
sc.slots[j] = backup
sc.originalToDisplayIndices[j] = backup2
sc.displayToOriginalIndices[j] = backup3
}
func fyFlexboxSolveLayout(details FlexboxContainer, limits frenyard.Vec2i) []frenyard.Area2i {
// Stage 1. Element order pre-processing (DirReverse)
slots := make([]fyFlexboxSlotlike, len(details.Slots))
originalToDisplayIndices := make([]int, len(details.Slots))
displayToOriginalIndices := make([]int, len(details.Slots))
for k, v := range details.Slots {
originalToDisplayIndices[k] = k
displayToOriginalIndices[k] = k
slots[k] = v
}
sort.Sort(fyFlexboxSortingCollection{
slots: slots,
originalToDisplayIndices: originalToDisplayIndices,
displayToOriginalIndices: displayToOriginalIndices,
})
// Stage 2. Wrapping (if relevant)
out := make([]frenyard.Area2i, len(slots))
mainCrossLimits := limits.ConditionalTranspose(details.DirVertical)
shouldWrap := fyFlexboxSolveLine(details, slots, out, mainCrossLimits, details.Debug)
// One row, so this is simple
rows := []fyFlexboxRow{{slots, out, frenyard.UnionArea2i(out)}}
if shouldWrap && details.WrapMode != FlexboxWrapModeNone {
// Wrapping has to start. Oh no...
// Do note, lines is implicitly limited because of the "one slot cannot wrap" rule.
lines := int32(2)
for {
rows = make([]fyFlexboxRow, lines)
lineStartSlot := 0
consumedSlots := 0
currentLine := int32(0)
for consumedSlots < len(slots) {
// If it wraps...
if fyFlexboxSolveLine(details, slots[lineStartSlot:consumedSlots+1], out[lineStartSlot:consumedSlots+1], mainCrossLimits, false) {
// Revert it & finish the line.
rows[currentLine] = fyFlexboxRow{
slots[lineStartSlot:consumedSlots],
out[lineStartSlot:consumedSlots],
frenyard.UnionArea2i(out),
}
fyFlexboxSolveLine(details, rows[currentLine].elem, rows[currentLine].area, mainCrossLimits, false)
// Now setup the new line.
currentLine++
lineStartSlot = consumedSlots
if currentLine == lines {
// Out of range, cancel before rows[currentLine] brings it to a halt
break
}
// Retry the same slot (slot not consumed)
} else {
// Success! Advance.
consumedSlots++
}
}
if currentLine < lines {
// Finish last line
rows[currentLine] = fyFlexboxRow{
slots[lineStartSlot:consumedSlots],
out[lineStartSlot:consumedSlots],
frenyard.UnionArea2i(out),
}
break
}
lines++
}
}
if details.WrapMode != FlexboxWrapModeNone {
// Stage 3. Row compression
rowAreas := make([]frenyard.Area2i, len(rows))
rowSlots := make([]fyFlexboxSlotlike, len(rows))
for rk, row := range rows {
rowSlots[rk] = row
}
fyFlexboxSolveLine(FlexboxContainer{
DirVertical: !details.DirVertical,
WrapMode: FlexboxWrapModeNone,
}, rowSlots, rowAreas, frenyard.Vec2i{mainCrossLimits.Y, mainCrossLimits.X}, false)
for rk, row := range rows {
row.Fill(rowAreas[rk], !details.DirVertical)
}
} else {
// Stage 3. Row setup
if mainCrossLimits.Y != frenyard.SizeUnlimited {
rows[0].Fill(frenyard.Area2iOfSize(mainCrossLimits.ConditionalTranspose(details.DirVertical)), !details.DirVertical)
}
}
// Stage 4. Element order post-processing (DirReverse)
realOutput := make([]frenyard.Area2i, len(out))
for k, v := range originalToDisplayIndices {
realOutput[k] = out[v]
}
return realOutput
}
// Returns true if should wrap. Will not return true ever for only one slot as this cannot wrap.
func fyFlexboxSolveLine(details FlexboxContainer, slots []fyFlexboxSlotlike, out []frenyard.Area2i, mainCrossLimits frenyard.Vec2i, debug bool) bool {
if len(slots) == 0 {
// Nowhere to output. Also, some calculations rely on | Len | identifier_name |
uiLibFlexbox.go | GetOrder() int {
return slot.Order
}
func (slot FlexboxSlot) fyRespectMinimumSize() bool {
return slot.RespectMinimumSize
}
// -- Solver --
func fyFlexboxGetPreferredSize(details FlexboxContainer) frenyard.Vec2i {
// Do note, this is in main/cross format.
mainCrossSize := frenyard.Vec2i{}
for _, v := range details.Slots {
sz := v.fyMainCrossSizeForMainCrossLimits(frenyard.Vec2iUnlimited(), details.DirVertical, false)
mainCrossSize.X += sz.X
mainCrossSize.Y = frenyard.Max(mainCrossSize.Y, sz.Y)
}
return mainCrossSize.ConditionalTranspose(details.DirVertical)
}
type fyFlexboxRow struct {
elem []fyFlexboxSlotlike
area []frenyard.Area2i
fullArea frenyard.Area2i
}
func (slot fyFlexboxRow) fyGrowShrink() (int32, int32) {
return 1, 1
}
// Critical to the whole thing and it's full of guesswork due to the vertical flags and axis juggling.
func (slot fyFlexboxRow) fyMainCrossSizeForMainCrossLimits(limits frenyard.Vec2i, vertical bool, debug bool) frenyard.Vec2i {
if debug {
fmt.Print("R{")
}
// Main & Cross in here refer to in the row flexbox, not the outer flexbox.
maximumMain := int32(0)
presentAreaCross := slot.fullArea.Size().ConditionalTranspose(vertical).Y
for _, v := range slot.elem {
lim := frenyard.Vec2i{X: limits.X, Y: presentAreaCross}
if debug {
fmt.Print(" ", limits.X, "x", presentAreaCross)
}
rcs := v.fyMainCrossSizeForMainCrossLimits(lim, vertical, false)
maximumMain = frenyard.Max(maximumMain, rcs.X)
if debug {
fmt.Print(":", rcs.X, "x", rcs.Y)
}
}
if debug {
fmt.Print(" }")
}
return frenyard.Vec2i{X: maximumMain, Y: presentAreaCross}
}
func (slot fyFlexboxRow) fyCalcBasis(cross int32, vertical bool) int32 {
return slot.fyMainCrossSizeForMainCrossLimits(frenyard.Vec2i{X: frenyard.SizeUnlimited, Y: cross}, vertical, false).X
}
func (slot fyFlexboxRow) fyGetOrder() int {
return 0
}
func (slot fyFlexboxRow) fyRespectMinimumSize() bool {
return false
}
// Do be aware, this only handles the one relevant axis.
func (slot *fyFlexboxRow) Fill(area frenyard.Area2i, vertical bool) {
for k := range slot.area {
if !vertical {
// Rows perpendicular to X
slot.area[k].X = area.X
} else {
// Rows perpendicular to Y
slot.area[k].Y = area.Y
}
}
slot.fullArea = area
}
type fyFlexboxSortingCollection struct {
// The collection being sorted.
slots []fyFlexboxSlotlike
// Given a SOURCE slot index, what is the RESULTING slot index?
originalToDisplayIndices []int
// Given a RESULTING slot index, what is the SOURCE slot index?
displayToOriginalIndices []int
}
func (sc fyFlexboxSortingCollection) Len() int {
return len(sc.slots)
}
func (sc fyFlexboxSortingCollection) Less(i int, j int) bool {
order1 := sc.slots[i].fyGetOrder()
order2 := sc.slots[j].fyGetOrder()
// Order1 != order2?
if order1 < order2 {
return true
}
if order1 > order2 {
return false
}
// No, they're equal. Sort by original index.
if sc.displayToOriginalIndices[i] < sc.displayToOriginalIndices[j] {
return true
}
return false
}
func (sc fyFlexboxSortingCollection) Swap(i int, j int) |
func fyFlexboxSolveLayout(details FlexboxContainer, limits frenyard.Vec2i) []frenyard.Area2i {
// Stage 1. Element order pre-processing (DirReverse)
slots := make([]fyFlexboxSlotlike, len(details.Slots))
originalToDisplayIndices := make([]int, len(details.Slots))
displayToOriginalIndices := make([]int, len(details.Slots))
for k, v := range details.Slots {
originalToDisplayIndices[k] = k
displayToOriginalIndices[k] = k
slots[k] = v
}
sort.Sort(fyFlexboxSortingCollection{
slots: slots,
originalToDisplayIndices: originalToDisplayIndices,
displayToOriginalIndices: displayToOriginalIndices,
})
// Stage 2. Wrapping (if relevant)
out := make([]frenyard.Area2i, len(slots))
mainCrossLimits := limits.ConditionalTranspose(details.DirVertical)
shouldWrap := fyFlexboxSolveLine(details, slots, out, mainCrossLimits, details.Debug)
// One row, so this is simple
rows := []fyFlexboxRow{{slots, out, frenyard.UnionArea2i(out)}}
if shouldWrap && details.WrapMode != FlexboxWrapModeNone {
// Wrapping has to start. Oh no...
// Do note, lines is implicitly limited because of the "one slot cannot wrap" rule.
lines := int32(2)
for {
rows = make([]fyFlexboxRow, lines)
lineStartSlot := 0
consumedSlots := 0
currentLine := int32(0)
for consumedSlots < len(slots) {
// If it wraps...
if fyFlexboxSolveLine(details, slots[lineStartSlot:consumedSlots+1], out[lineStartSlot:consumedSlots+1], mainCrossLimits, false) {
// Revert it & finish the line.
rows[currentLine] = fyFlexboxRow{
slots[lineStartSlot:consumedSlots],
out[lineStartSlot:consumedSlots],
frenyard.UnionArea2i(out),
}
fyFlexboxSolveLine(details, rows[currentLine].elem, rows[currentLine].area, mainCrossLimits, false)
// Now setup the new line.
currentLine++
lineStartSlot = consumedSlots
if currentLine == lines {
// Out of range, cancel before rows[currentLine] brings it to a halt
break
}
// Retry the same slot (slot not consumed)
} else {
// Success! Advance.
consumedSlots++
}
}
if currentLine < lines {
// Finish last line
rows[currentLine] = fyFlexboxRow{
slots[lineStartSlot:consumedSlots],
out[lineStartSlot:consumedSlots],
frenyard.UnionArea2i(out),
}
break
}
lines++
}
}
if details.WrapMode != FlexboxWrapModeNone {
// Stage 3. Row compression
rowAreas := make([]frenyard.Area2i, len(rows))
rowSlots := make([]fyFlexboxSlotlike, len(rows))
for rk, row := range rows {
rowSlots[rk] = row
}
fyFlexboxSolveLine(FlexboxContainer{
DirVertical: !details.DirVertical,
WrapMode: FlexboxWrapModeNone,
}, rowSlots, rowAreas, frenyard.Vec2i{mainCrossLimits.Y, mainCrossLimits.X}, false)
for rk, row := range rows {
row.Fill(rowAreas[rk], !details.DirVertical)
}
} else {
// Stage 3. Row setup
if mainCrossLimits.Y != frenyard.SizeUnlimited {
rows[0].Fill(frenyard.Area2iOfSize(mainCrossLimits.ConditionalTranspose(details.DirVertical)), !details.DirVertical)
}
}
// Stage 4. Element order post-processing (DirReverse)
realOutput := make([]frenyard.Area2i, len(out))
for k, v := range originalToDisplayIndices {
realOutput[k] = out[v]
}
return realOutput
}
// Returns true if should wrap. Will not return true ever for only one slot as this cannot wrap.
func fyFlexboxSolveLine(details FlexboxContainer, slots []fyFlexboxSlotlike, out []frenyard.Area2i, mainCrossLimits frenyard.Vec2i, debug bool) bool {
if len(slots) == 0 {
// Nowhere to output. Also, some calculations rely | {
backup := sc.slots[i]
backup2 := sc.originalToDisplayIndices[i]
backup3 := sc.displayToOriginalIndices[i]
sc.slots[i] = sc.slots[j]
sc.originalToDisplayIndices[i] = sc.originalToDisplayIndices[j]
sc.displayToOriginalIndices[i] = sc.displayToOriginalIndices[j]
sc.slots[j] = backup
sc.originalToDisplayIndices[j] = backup2
sc.displayToOriginalIndices[j] = backup3
} | identifier_body |
typegenAutoConfig.ts | : 'string',
Float: 'number',
Boolean: 'boolean',
}
export interface SourceTypeModule {
/**
* The module for where to look for the types. This uses the node resolution algorithm via require.resolve,
* so if this lives in node_modules, you can just provide the module name otherwise you should provide the
* absolute path to the file.
*/
module: string
/**
* When we import the module, we use `import * as ____` to prevent conflicts. This alias should be a name
* that doesn't conflict with any other types, usually a short lowercase name.
*/
alias: string
/**
* Provides a custom approach to matching for the type
*
* If not provided, the default implementation is:
*
* (type) => [ new RegExp(`(?:interface|type|class|enum)\\s+(${type.name})\\W`, "g"), ]
*/
typeMatch?: (type: GraphQLNamedType, defaultRegex: RegExp) => RegExp | RegExp[]
/**
* A list of typesNames or regular expressions matching type names that should be resolved by this import.
* Provide an empty array if you wish to use the file for context and ensure no other types are matched.
*/
onlyTypes?: (string | RegExp)[]
/**
* By default the import is configured `import * as alias from`, setting glob to false will change this to
* `import alias from`
*/
glob?: false
}
export interface SourceTypesConfigOptions {
/** Any headers to prefix on the generated type file */
headers?: string[]
/**
* Array of SourceTypeModule's to look in and match the type names against.
*
* @example
* modules: [
* { module: 'typescript', alias: 'ts' },
* { module: path.join(__dirname, '../sourceTypes'), alias: 'b' },
* ]
*/
modules: SourceTypeModule[]
/**
* Types that should not be matched for a source type,
*
* By default this is set to ['Query', 'Mutation', 'Subscription']
*
* @example
* skipTypes: ['Query', 'Mutation', /(.*?)Edge/, /(.*?)Connection/]
*/
skipTypes?: (string | RegExp)[]
/**
* If debug is set to true, this will log out info about all types found, skipped, etc. for the type
* generation files. @default false
*/
debug?: boolean
/**
* If provided this will be used for the source types rather than the auto-resolve mechanism above. Useful
* as an override for one-off cases, or for scalar source types.
*/
mapping?: Record<string, string>
}
/**
* This is an approach for handling type definition auto-resolution. It is designed to handle the most common
* cases, as can be seen in the examples / the simplicity of the implementation.
*
* If you wish to do something more complex, involving full AST parsing, etc, you can provide a different
* function to the `typegenInfo` property of the `makeSchema` config.
*
* @param options
*/
export function typegenAutoConfig(options: SourceTypesConfigOptions, contextType: TypingImport | undefined) {
return async (schema: GraphQLSchema, outputPath: string): Promise<TypegenInfo> => {
const {
headers,
skipTypes = ['Query', 'Mutation', 'Subscription'],
mapping: _sourceTypeMap,
debug,
} = options
const typeMap = schema.getTypeMap()
const typesToIgnore = new Set<string>()
const typesToIgnoreRegex: RegExp[] = []
const allImportsMap: Record<string, string> = {} | }
const forceImports = new Set(
objValues(sourceTypeMap)
.concat(typeof contextType === 'string' ? contextType || '' : '')
.map((t) => {
const match = t.match(/^(\w+)\./)
return match ? match[1] : null
})
.filter((f) => f)
)
skipTypes.forEach((skip) => {
if (typeof skip === 'string') {
typesToIgnore.add(skip)
} else if (skip instanceof RegExp) {
typesToIgnoreRegex.push(skip)
} else {
throw new Error('Invalid type for options.skipTypes, expected string or RegExp')
}
})
const path = nodeImports().path
const typeSources = await Promise.all(
options.modules.map(async (source) => {
// Keeping all of this in here so if we don't have any sources
// e.g. in the Playground, it doesn't break things.
const { module: pathOrModule, glob = true, onlyTypes, alias, typeMatch } = source
if (path.isAbsolute(pathOrModule) && path.extname(pathOrModule) !== '.ts') {
return console.warn(
`Nexus Schema Typegen: Expected module ${pathOrModule} to be an absolute path to a TypeScript module, skipping.`
)
}
let resolvedPath: string
let fileContents: string
try {
resolvedPath = require.resolve(pathOrModule, {
paths: [process.cwd()],
})
if (path.extname(resolvedPath) !== '.ts') {
resolvedPath = findTypingForFile(resolvedPath, pathOrModule)
}
fileContents = String(await nodeImports().fs.promises.readFile(resolvedPath, 'utf-8'))
} catch (e) {
if (e instanceof Error && e.message.indexOf('Cannot find module') !== -1) {
console.error(`GraphQL Nexus: Unable to find file or module ${pathOrModule}, skipping`)
} else {
console.error(e.message)
}
return null
}
const importPath = (
path.isAbsolute(pathOrModule) ? relativePathTo(resolvedPath, outputPath) : pathOrModule
).replace(typeScriptFileExtension, '')
if (allImportsMap[alias] && allImportsMap[alias] !== importPath) {
return console.warn(
`Nexus Schema Typegen: Cannot have multiple type sources ${importsMap[alias]} and ${pathOrModule} with the same alias ${alias}, skipping`
)
}
allImportsMap[alias] = importPath
if (forceImports.has(alias)) {
importsMap[alias] = [importPath, glob]
forceImports.delete(alias)
}
return {
alias,
glob,
importPath,
fileContents,
onlyTypes,
typeMatch: typeMatch || defaultTypeMatcher,
}
})
)
const builtinScalars = new Set(Object.keys(SCALAR_TYPES))
Object.keys(typeMap).forEach((typeName) => {
if (typeName.startsWith('__')) {
return
}
if (typesToIgnore.has(typeName)) {
return
}
if (typesToIgnoreRegex.some((r) => r.test(typeName))) {
return
}
if (sourceTypeMap[typeName]) {
return
}
if (builtinScalars.has(typeName)) {
return
}
const type = schema.getType(typeName)
// For now we'll say that if it's output type it can be backed
if (isOutputType(type)) {
for (let i = 0; i < typeSources.length; i++) {
const typeSource = typeSources[i]
if (!typeSource) {
continue
}
// If we've specified an array of "onlyTypes" to match ensure the
// `typeName` falls within that list.
if (typeSource.onlyTypes) {
if (
!typeSource.onlyTypes.some((t) => {
return t instanceof RegExp ? t.test(typeName) : t === typeName
})
) {
continue
}
}
const { fileContents, importPath, glob, alias, typeMatch } = typeSource
const typeRegex = typeMatch(type, defaultTypeMatcher(type)[0])
const matched = firstMatch(fileContents, Array.isArray(typeRegex) ? typeRegex : [typeRegex])
if (matched) {
if (debug) {
log(`Matched type - ${typeName} in "${importPath}" - ${alias}.${matched[1]}`)
}
importsMap[alias] = [importPath, glob]
sourceTypeMap[typeName] = `${alias}.${matched[1]}`
} else {
if (debug) {
log(`No match for ${typeName} in "${importPath}" using ${typeRegex}`)
}
}
}
}
})
if (forceImports.size > 0) {
console.error(`Missing required typegen import: ${Array.from(forceImports)}`)
}
const imports: string[] = []
Object.keys(importsMap)
.sort()
.forEach((alias) => {
const [importPath, glob] = importsMap[alias]
const safeImportPath = importPath.replace(/\\+/g, '/')
imports.push(`import type ${glob ? '* as ' : ''}${alias} from "${safeImportPath}"`)
})
const typegenInfo = {
headers: headers || [TYPEGEN_HEADER],
sourceTypeMap,
imports,
contextTypeImport: context | const importsMap: Record<string, [string, boolean]> = {}
const sourceTypeMap: Record<string, string> = {
...SCALAR_TYPES,
..._sourceTypeMap, | random_line_split |
typegenAutoConfig.ts | 'string',
Float: 'number',
Boolean: 'boolean',
}
export interface SourceTypeModule {
/**
* The module for where to look for the types. This uses the node resolution algorithm via require.resolve,
* so if this lives in node_modules, you can just provide the module name otherwise you should provide the
* absolute path to the file.
*/
module: string
/**
* When we import the module, we use `import * as ____` to prevent conflicts. This alias should be a name
* that doesn't conflict with any other types, usually a short lowercase name.
*/
alias: string
/**
* Provides a custom approach to matching for the type
*
* If not provided, the default implementation is:
*
* (type) => [ new RegExp(`(?:interface|type|class|enum)\\s+(${type.name})\\W`, "g"), ]
*/
typeMatch?: (type: GraphQLNamedType, defaultRegex: RegExp) => RegExp | RegExp[]
/**
* A list of typesNames or regular expressions matching type names that should be resolved by this import.
* Provide an empty array if you wish to use the file for context and ensure no other types are matched.
*/
onlyTypes?: (string | RegExp)[]
/**
* By default the import is configured `import * as alias from`, setting glob to false will change this to
* `import alias from`
*/
glob?: false
}
export interface SourceTypesConfigOptions {
/** Any headers to prefix on the generated type file */
headers?: string[]
/**
* Array of SourceTypeModule's to look in and match the type names against.
*
* @example
* modules: [
* { module: 'typescript', alias: 'ts' },
* { module: path.join(__dirname, '../sourceTypes'), alias: 'b' },
* ]
*/
modules: SourceTypeModule[]
/**
* Types that should not be matched for a source type,
*
* By default this is set to ['Query', 'Mutation', 'Subscription']
*
* @example
* skipTypes: ['Query', 'Mutation', /(.*?)Edge/, /(.*?)Connection/]
*/
skipTypes?: (string | RegExp)[]
/**
* If debug is set to true, this will log out info about all types found, skipped, etc. for the type
* generation files. @default false
*/
debug?: boolean
/**
* If provided this will be used for the source types rather than the auto-resolve mechanism above. Useful
* as an override for one-off cases, or for scalar source types.
*/
mapping?: Record<string, string>
}
/**
* This is an approach for handling type definition auto-resolution. It is designed to handle the most common
* cases, as can be seen in the examples / the simplicity of the implementation.
*
* If you wish to do something more complex, involving full AST parsing, etc, you can provide a different
* function to the `typegenInfo` property of the `makeSchema` config.
*
* @param options
*/
export function typegenAutoConfig(options: SourceTypesConfigOptions, contextType: TypingImport | undefined) {
return async (schema: GraphQLSchema, outputPath: string): Promise<TypegenInfo> => {
const {
headers,
skipTypes = ['Query', 'Mutation', 'Subscription'],
mapping: _sourceTypeMap,
debug,
} = options
const typeMap = schema.getTypeMap()
const typesToIgnore = new Set<string>()
const typesToIgnoreRegex: RegExp[] = []
const allImportsMap: Record<string, string> = {}
const importsMap: Record<string, [string, boolean]> = {}
const sourceTypeMap: Record<string, string> = {
...SCALAR_TYPES,
..._sourceTypeMap,
}
const forceImports = new Set(
objValues(sourceTypeMap)
.concat(typeof contextType === 'string' ? contextType || '' : '')
.map((t) => {
const match = t.match(/^(\w+)\./)
return match ? match[1] : null
})
.filter((f) => f)
)
skipTypes.forEach((skip) => {
if (typeof skip === 'string') {
typesToIgnore.add(skip)
} else if (skip instanceof RegExp) {
typesToIgnoreRegex.push(skip)
} else {
throw new Error('Invalid type for options.skipTypes, expected string or RegExp')
}
})
const path = nodeImports().path
const typeSources = await Promise.all(
options.modules.map(async (source) => {
// Keeping all of this in here so if we don't have any sources
// e.g. in the Playground, it doesn't break things.
const { module: pathOrModule, glob = true, onlyTypes, alias, typeMatch } = source
if (path.isAbsolute(pathOrModule) && path.extname(pathOrModule) !== '.ts') {
return console.warn(
`Nexus Schema Typegen: Expected module ${pathOrModule} to be an absolute path to a TypeScript module, skipping.`
)
}
let resolvedPath: string
let fileContents: string
try {
resolvedPath = require.resolve(pathOrModule, {
paths: [process.cwd()],
})
if (path.extname(resolvedPath) !== '.ts') {
resolvedPath = findTypingForFile(resolvedPath, pathOrModule)
}
fileContents = String(await nodeImports().fs.promises.readFile(resolvedPath, 'utf-8'))
} catch (e) {
if (e instanceof Error && e.message.indexOf('Cannot find module') !== -1) {
console.error(`GraphQL Nexus: Unable to find file or module ${pathOrModule}, skipping`)
} else {
console.error(e.message)
}
return null
}
const importPath = (
path.isAbsolute(pathOrModule) ? relativePathTo(resolvedPath, outputPath) : pathOrModule
).replace(typeScriptFileExtension, '')
if (allImportsMap[alias] && allImportsMap[alias] !== importPath) {
return console.warn(
`Nexus Schema Typegen: Cannot have multiple type sources ${importsMap[alias]} and ${pathOrModule} with the same alias ${alias}, skipping`
)
}
allImportsMap[alias] = importPath
if (forceImports.has(alias)) {
importsMap[alias] = [importPath, glob]
forceImports.delete(alias)
}
return {
alias,
glob,
importPath,
fileContents,
onlyTypes,
typeMatch: typeMatch || defaultTypeMatcher,
}
})
)
const builtinScalars = new Set(Object.keys(SCALAR_TYPES))
Object.keys(typeMap).forEach((typeName) => {
if (typeName.startsWith('__')) {
return
}
if (typesToIgnore.has(typeName)) {
return
}
if (typesToIgnoreRegex.some((r) => r.test(typeName))) {
return
}
if (sourceTypeMap[typeName]) {
return
}
if (builtinScalars.has(typeName)) {
return
}
const type = schema.getType(typeName)
// For now we'll say that if it's output type it can be backed
if (isOutputType(type)) {
for (let i = 0; i < typeSources.length; i++) | if (debug) {
log(`Matched type - ${typeName} in "${importPath}" - ${alias}.${matched[1]}`)
}
importsMap[alias] = [importPath, glob]
sourceTypeMap[typeName] = `${alias}.${matched[1]}`
} else {
if (debug) {
log(`No match for ${typeName} in "${importPath}" using ${typeRegex}`)
}
}
}
}
})
if (forceImports.size > 0) {
console.error(`Missing required typegen import: ${Array.from(forceImports)}`)
}
const imports: string[] = []
Object.keys(importsMap)
.sort()
.forEach((alias) => {
const [importPath, glob] = importsMap[alias]
const safeImportPath = importPath.replace(/\\+/g, '/')
imports.push(`import type ${glob ? '* as ' : ''}${alias} from "${safeImportPath}"`)
})
const typegenInfo = {
headers: headers || [TYPEGEN_HEADER],
sourceTypeMap,
imports,
contextTypeImport: context | {
const typeSource = typeSources[i]
if (!typeSource) {
continue
}
// If we've specified an array of "onlyTypes" to match ensure the
// `typeName` falls within that list.
if (typeSource.onlyTypes) {
if (
!typeSource.onlyTypes.some((t) => {
return t instanceof RegExp ? t.test(typeName) : t === typeName
})
) {
continue
}
}
const { fileContents, importPath, glob, alias, typeMatch } = typeSource
const typeRegex = typeMatch(type, defaultTypeMatcher(type)[0])
const matched = firstMatch(fileContents, Array.isArray(typeRegex) ? typeRegex : [typeRegex])
if (matched) { | conditional_block |
typegenAutoConfig.ts | 'string',
Float: 'number',
Boolean: 'boolean',
}
export interface SourceTypeModule {
/**
* The module for where to look for the types. This uses the node resolution algorithm via require.resolve,
* so if this lives in node_modules, you can just provide the module name otherwise you should provide the
* absolute path to the file.
*/
module: string
/**
* When we import the module, we use `import * as ____` to prevent conflicts. This alias should be a name
* that doesn't conflict with any other types, usually a short lowercase name.
*/
alias: string
/**
* Provides a custom approach to matching for the type
*
* If not provided, the default implementation is:
*
* (type) => [ new RegExp(`(?:interface|type|class|enum)\\s+(${type.name})\\W`, "g"), ]
*/
typeMatch?: (type: GraphQLNamedType, defaultRegex: RegExp) => RegExp | RegExp[]
/**
* A list of typesNames or regular expressions matching type names that should be resolved by this import.
* Provide an empty array if you wish to use the file for context and ensure no other types are matched.
*/
onlyTypes?: (string | RegExp)[]
/**
* By default the import is configured `import * as alias from`, setting glob to false will change this to
* `import alias from`
*/
glob?: false
}
export interface SourceTypesConfigOptions {
/** Any headers to prefix on the generated type file */
headers?: string[]
/**
* Array of SourceTypeModule's to look in and match the type names against.
*
* @example
* modules: [
* { module: 'typescript', alias: 'ts' },
* { module: path.join(__dirname, '../sourceTypes'), alias: 'b' },
* ]
*/
modules: SourceTypeModule[]
/**
* Types that should not be matched for a source type,
*
* By default this is set to ['Query', 'Mutation', 'Subscription']
*
* @example
* skipTypes: ['Query', 'Mutation', /(.*?)Edge/, /(.*?)Connection/]
*/
skipTypes?: (string | RegExp)[]
/**
* If debug is set to true, this will log out info about all types found, skipped, etc. for the type
* generation files. @default false
*/
debug?: boolean
/**
* If provided this will be used for the source types rather than the auto-resolve mechanism above. Useful
* as an override for one-off cases, or for scalar source types.
*/
mapping?: Record<string, string>
}
/**
* This is an approach for handling type definition auto-resolution. It is designed to handle the most common
* cases, as can be seen in the examples / the simplicity of the implementation.
*
* If you wish to do something more complex, involving full AST parsing, etc, you can provide a different
* function to the `typegenInfo` property of the `makeSchema` config.
*
* @param options
*/
export function typegenAutoConfig(options: SourceTypesConfigOptions, contextType: TypingImport | undefined) | const forceImports = new Set(
objValues(sourceTypeMap)
.concat(typeof contextType === 'string' ? contextType || '' : '')
.map((t) => {
const match = t.match(/^(\w+)\./)
return match ? match[1] : null
})
.filter((f) => f)
)
skipTypes.forEach((skip) => {
if (typeof skip === 'string') {
typesToIgnore.add(skip)
} else if (skip instanceof RegExp) {
typesToIgnoreRegex.push(skip)
} else {
throw new Error('Invalid type for options.skipTypes, expected string or RegExp')
}
})
const path = nodeImports().path
const typeSources = await Promise.all(
options.modules.map(async (source) => {
// Keeping all of this in here so if we don't have any sources
// e.g. in the Playground, it doesn't break things.
const { module: pathOrModule, glob = true, onlyTypes, alias, typeMatch } = source
if (path.isAbsolute(pathOrModule) && path.extname(pathOrModule) !== '.ts') {
return console.warn(
`Nexus Schema Typegen: Expected module ${pathOrModule} to be an absolute path to a TypeScript module, skipping.`
)
}
let resolvedPath: string
let fileContents: string
try {
resolvedPath = require.resolve(pathOrModule, {
paths: [process.cwd()],
})
if (path.extname(resolvedPath) !== '.ts') {
resolvedPath = findTypingForFile(resolvedPath, pathOrModule)
}
fileContents = String(await nodeImports().fs.promises.readFile(resolvedPath, 'utf-8'))
} catch (e) {
if (e instanceof Error && e.message.indexOf('Cannot find module') !== -1) {
console.error(`GraphQL Nexus: Unable to find file or module ${pathOrModule}, skipping`)
} else {
console.error(e.message)
}
return null
}
const importPath = (
path.isAbsolute(pathOrModule) ? relativePathTo(resolvedPath, outputPath) : pathOrModule
).replace(typeScriptFileExtension, '')
if (allImportsMap[alias] && allImportsMap[alias] !== importPath) {
return console.warn(
`Nexus Schema Typegen: Cannot have multiple type sources ${importsMap[alias]} and ${pathOrModule} with the same alias ${alias}, skipping`
)
}
allImportsMap[alias] = importPath
if (forceImports.has(alias)) {
importsMap[alias] = [importPath, glob]
forceImports.delete(alias)
}
return {
alias,
glob,
importPath,
fileContents,
onlyTypes,
typeMatch: typeMatch || defaultTypeMatcher,
}
})
)
const builtinScalars = new Set(Object.keys(SCALAR_TYPES))
Object.keys(typeMap).forEach((typeName) => {
if (typeName.startsWith('__')) {
return
}
if (typesToIgnore.has(typeName)) {
return
}
if (typesToIgnoreRegex.some((r) => r.test(typeName))) {
return
}
if (sourceTypeMap[typeName]) {
return
}
if (builtinScalars.has(typeName)) {
return
}
const type = schema.getType(typeName)
// For now we'll say that if it's output type it can be backed
if (isOutputType(type)) {
for (let i = 0; i < typeSources.length; i++) {
const typeSource = typeSources[i]
if (!typeSource) {
continue
}
// If we've specified an array of "onlyTypes" to match ensure the
// `typeName` falls within that list.
if (typeSource.onlyTypes) {
if (
!typeSource.onlyTypes.some((t) => {
return t instanceof RegExp ? t.test(typeName) : t === typeName
})
) {
continue
}
}
const { fileContents, importPath, glob, alias, typeMatch } = typeSource
const typeRegex = typeMatch(type, defaultTypeMatcher(type)[0])
const matched = firstMatch(fileContents, Array.isArray(typeRegex) ? typeRegex : [typeRegex])
if (matched) {
if (debug) {
log(`Matched type - ${typeName} in "${importPath}" - ${alias}.${matched[1]}`)
}
importsMap[alias] = [importPath, glob]
sourceTypeMap[typeName] = `${alias}.${matched[1]}`
} else {
if (debug) {
log(`No match for ${typeName} in "${importPath}" using ${typeRegex}`)
}
}
}
}
})
if (forceImports.size > 0) {
console.error(`Missing required typegen import: ${Array.from(forceImports)}`)
}
const imports: string[] = []
Object.keys(importsMap)
.sort()
.forEach((alias) => {
const [importPath, glob] = importsMap[alias]
const safeImportPath = importPath.replace(/\\+/g, '/')
imports.push(`import type ${glob ? '* as ' : ''}${alias} from "${safeImportPath}"`)
})
const typegenInfo = {
headers: headers || [TYPEGEN_HEADER],
sourceTypeMap,
imports,
contextTypeImport: context | {
return async (schema: GraphQLSchema, outputPath: string): Promise<TypegenInfo> => {
const {
headers,
skipTypes = ['Query', 'Mutation', 'Subscription'],
mapping: _sourceTypeMap,
debug,
} = options
const typeMap = schema.getTypeMap()
const typesToIgnore = new Set<string>()
const typesToIgnoreRegex: RegExp[] = []
const allImportsMap: Record<string, string> = {}
const importsMap: Record<string, [string, boolean]> = {}
const sourceTypeMap: Record<string, string> = {
...SCALAR_TYPES,
..._sourceTypeMap,
}
| identifier_body |
typegenAutoConfig.ts | 'string',
Float: 'number',
Boolean: 'boolean',
}
export interface SourceTypeModule {
/**
* The module for where to look for the types. This uses the node resolution algorithm via require.resolve,
* so if this lives in node_modules, you can just provide the module name otherwise you should provide the
* absolute path to the file.
*/
module: string
/**
* When we import the module, we use `import * as ____` to prevent conflicts. This alias should be a name
* that doesn't conflict with any other types, usually a short lowercase name.
*/
alias: string
/**
* Provides a custom approach to matching for the type
*
* If not provided, the default implementation is:
*
* (type) => [ new RegExp(`(?:interface|type|class|enum)\\s+(${type.name})\\W`, "g"), ]
*/
typeMatch?: (type: GraphQLNamedType, defaultRegex: RegExp) => RegExp | RegExp[]
/**
* A list of typesNames or regular expressions matching type names that should be resolved by this import.
* Provide an empty array if you wish to use the file for context and ensure no other types are matched.
*/
onlyTypes?: (string | RegExp)[]
/**
* By default the import is configured `import * as alias from`, setting glob to false will change this to
* `import alias from`
*/
glob?: false
}
export interface SourceTypesConfigOptions {
/** Any headers to prefix on the generated type file */
headers?: string[]
/**
* Array of SourceTypeModule's to look in and match the type names against.
*
* @example
* modules: [
* { module: 'typescript', alias: 'ts' },
* { module: path.join(__dirname, '../sourceTypes'), alias: 'b' },
* ]
*/
modules: SourceTypeModule[]
/**
* Types that should not be matched for a source type,
*
* By default this is set to ['Query', 'Mutation', 'Subscription']
*
* @example
* skipTypes: ['Query', 'Mutation', /(.*?)Edge/, /(.*?)Connection/]
*/
skipTypes?: (string | RegExp)[]
/**
* If debug is set to true, this will log out info about all types found, skipped, etc. for the type
* generation files. @default false
*/
debug?: boolean
/**
* If provided this will be used for the source types rather than the auto-resolve mechanism above. Useful
* as an override for one-off cases, or for scalar source types.
*/
mapping?: Record<string, string>
}
/**
* This is an approach for handling type definition auto-resolution. It is designed to handle the most common
* cases, as can be seen in the examples / the simplicity of the implementation.
*
* If you wish to do something more complex, involving full AST parsing, etc, you can provide a different
* function to the `typegenInfo` property of the `makeSchema` config.
*
* @param options
*/
export function | (options: SourceTypesConfigOptions, contextType: TypingImport | undefined) {
return async (schema: GraphQLSchema, outputPath: string): Promise<TypegenInfo> => {
const {
headers,
skipTypes = ['Query', 'Mutation', 'Subscription'],
mapping: _sourceTypeMap,
debug,
} = options
const typeMap = schema.getTypeMap()
const typesToIgnore = new Set<string>()
const typesToIgnoreRegex: RegExp[] = []
const allImportsMap: Record<string, string> = {}
const importsMap: Record<string, [string, boolean]> = {}
const sourceTypeMap: Record<string, string> = {
...SCALAR_TYPES,
..._sourceTypeMap,
}
const forceImports = new Set(
objValues(sourceTypeMap)
.concat(typeof contextType === 'string' ? contextType || '' : '')
.map((t) => {
const match = t.match(/^(\w+)\./)
return match ? match[1] : null
})
.filter((f) => f)
)
skipTypes.forEach((skip) => {
if (typeof skip === 'string') {
typesToIgnore.add(skip)
} else if (skip instanceof RegExp) {
typesToIgnoreRegex.push(skip)
} else {
throw new Error('Invalid type for options.skipTypes, expected string or RegExp')
}
})
const path = nodeImports().path
const typeSources = await Promise.all(
options.modules.map(async (source) => {
// Keeping all of this in here so if we don't have any sources
// e.g. in the Playground, it doesn't break things.
const { module: pathOrModule, glob = true, onlyTypes, alias, typeMatch } = source
if (path.isAbsolute(pathOrModule) && path.extname(pathOrModule) !== '.ts') {
return console.warn(
`Nexus Schema Typegen: Expected module ${pathOrModule} to be an absolute path to a TypeScript module, skipping.`
)
}
let resolvedPath: string
let fileContents: string
try {
resolvedPath = require.resolve(pathOrModule, {
paths: [process.cwd()],
})
if (path.extname(resolvedPath) !== '.ts') {
resolvedPath = findTypingForFile(resolvedPath, pathOrModule)
}
fileContents = String(await nodeImports().fs.promises.readFile(resolvedPath, 'utf-8'))
} catch (e) {
if (e instanceof Error && e.message.indexOf('Cannot find module') !== -1) {
console.error(`GraphQL Nexus: Unable to find file or module ${pathOrModule}, skipping`)
} else {
console.error(e.message)
}
return null
}
const importPath = (
path.isAbsolute(pathOrModule) ? relativePathTo(resolvedPath, outputPath) : pathOrModule
).replace(typeScriptFileExtension, '')
if (allImportsMap[alias] && allImportsMap[alias] !== importPath) {
return console.warn(
`Nexus Schema Typegen: Cannot have multiple type sources ${importsMap[alias]} and ${pathOrModule} with the same alias ${alias}, skipping`
)
}
allImportsMap[alias] = importPath
if (forceImports.has(alias)) {
importsMap[alias] = [importPath, glob]
forceImports.delete(alias)
}
return {
alias,
glob,
importPath,
fileContents,
onlyTypes,
typeMatch: typeMatch || defaultTypeMatcher,
}
})
)
const builtinScalars = new Set(Object.keys(SCALAR_TYPES))
Object.keys(typeMap).forEach((typeName) => {
if (typeName.startsWith('__')) {
return
}
if (typesToIgnore.has(typeName)) {
return
}
if (typesToIgnoreRegex.some((r) => r.test(typeName))) {
return
}
if (sourceTypeMap[typeName]) {
return
}
if (builtinScalars.has(typeName)) {
return
}
const type = schema.getType(typeName)
// For now we'll say that if it's output type it can be backed
if (isOutputType(type)) {
for (let i = 0; i < typeSources.length; i++) {
const typeSource = typeSources[i]
if (!typeSource) {
continue
}
// If we've specified an array of "onlyTypes" to match ensure the
// `typeName` falls within that list.
if (typeSource.onlyTypes) {
if (
!typeSource.onlyTypes.some((t) => {
return t instanceof RegExp ? t.test(typeName) : t === typeName
})
) {
continue
}
}
const { fileContents, importPath, glob, alias, typeMatch } = typeSource
const typeRegex = typeMatch(type, defaultTypeMatcher(type)[0])
const matched = firstMatch(fileContents, Array.isArray(typeRegex) ? typeRegex : [typeRegex])
if (matched) {
if (debug) {
log(`Matched type - ${typeName} in "${importPath}" - ${alias}.${matched[1]}`)
}
importsMap[alias] = [importPath, glob]
sourceTypeMap[typeName] = `${alias}.${matched[1]}`
} else {
if (debug) {
log(`No match for ${typeName} in "${importPath}" using ${typeRegex}`)
}
}
}
}
})
if (forceImports.size > 0) {
console.error(`Missing required typegen import: ${Array.from(forceImports)}`)
}
const imports: string[] = []
Object.keys(importsMap)
.sort()
.forEach((alias) => {
const [importPath, glob] = importsMap[alias]
const safeImportPath = importPath.replace(/\\+/g, '/')
imports.push(`import type ${glob ? '* as ' : ''}${alias} from "${safeImportPath}"`)
})
const typegenInfo = {
headers: headers || [TYPEGEN_HEADER],
sourceTypeMap,
imports,
contextTypeImport: context | typegenAutoConfig | identifier_name |
csbref.go | }, {5425, 2838}}}
var possibleMapCount = len(possibleMaps)
func (p *point) dot(n point) float64 {
return p.x*n.x + p.y*n.y
}
func (p *point) norm() float64 {
return (math.Sqrt(((p.x * p.x) + (p.y * p.y))))
}
func (g *game) nextTurn() {
t := 1.0
curps := [4]point{g[0].p, g[1].p, g[2].p, g[3].p}
for t > 0.0 {
first := t
cli := 0
clj := 0
for i := podCount - 1; i > 0; i-- {
for j := i - 1; j >= 0; j-- {
tx := g[i].newCollide(&g[j], podRSQ)
if tx <= first {
first = tx
cli = i
clj = j
}
}
}
g.forwardTime(first)
t -= first
if cli != clj {
g.bounce(cli, clj)
}
if t > 0 {
for i := 0; i < podCount; i++ {
if (cpCollide(curps[i], g[i].p, globalCp[g[i].next], cpRSQ)) > 0 {
g[i].passCheckpoint(i)
}
}
curps = [4]point{g[0].p, g[1].p, g[2].p, g[3].p}
}
}
for i := 0; i < podCount; i++ {
g[i].endTurn(i)
if (cpCollide(curps[i], g[i].p, globalCp[g[i].next], cpRSQ)) > 0 {
g[i].passCheckpoint(i)
}
}
playerTimeout[0]--
playerTimeout[1]--
}
const EPSILON = .00001
func (g *game) bounce(p1 int, p2 int) {
oa := &g[p1]
ob := &g[p2]
normal := ob.p
normal.x -= oa.p.x
normal.y -= oa.p.y
dd := normal.norm()
normal.x /= dd
normal.y /= dd
relv := oa.s
relv.x -= ob.s.x
relv.y -= ob.s.y
var m1 float64 = 1
var m2 float64 = 1
if oa.shieldtimer == 4 {
m1 = 0.1
}
if ob.shieldtimer == 4 {
m2 = 0.1
}
force := normal.dot(relv) / (m1 + m2)
if force < 120 {
force += 120
} else {
force += force
} | ob.s.x += -impulse.x * m2
ob.s.y += -impulse.y * m2
if dd <= 800 {
dd -= 800
oa.p.x += (normal.x * -(-dd/2 + EPSILON))
oa.p.y += (normal.y * -(-dd/2 + EPSILON))
ob.p.x += (normal.x * (-dd/2 + EPSILON))
ob.p.y += (normal.y * (-dd/2 + EPSILON))
}
}
func getAngle(start point, end point) float64 {
dx := (end.x - start.x)
dy := (end.y - start.y)
a := (math.Atan2(dy, dx))
return a
}
func distance2(p1 point, p2 point) distanceSqType {
x := distanceSqType(p2.x - p1.x)
x = x * x
y := distanceSqType(p2.y - p1.y)
y = y * y
return x + y
}
func distance(p1 point, p2 point) float64 {
return (math.Sqrt(float64(distance2(p1, p2))))
}
func (obj *object) passCheckpoint(podn int) {
obj.next = (obj.next + 1)
if obj.next >= globalNumCp {
obj.next = globalNumCp - 1
obj.won = true
}
if podn < 2 {
playerTimeout[0] = 100
} else {
playerTimeout[1] = 100
}
}
func (g *game) forwardTime(t float64) {
for i := 0; i < podCount; i++ {
obj := &g[i]
obj.p.x += (obj.s.x * (t))
obj.p.y += (obj.s.y * (t))
}
}
func round(x float64) float64 {
x = (math.Floor((x) + 0.50000))
return x
}
func (obj *object) newCollide(b *object, rsq float64) float64 {
p := point{b.p.x - obj.p.x, b.p.y - obj.p.y}
pLength2 := p.x*p.x + p.y*p.y
if pLength2 <= rsq {
return 0
}
v := point{(b.s.x - obj.s.x), (b.s.y - obj.s.y)}
dot := p.dot(v)
if dot > 0 {
return 10
}
vLength2 := v.x*v.x + v.y*v.y
disc := dot*dot - vLength2*(pLength2-rsq)
if disc < 0 {
return 10
}
discdist := (math.Sqrt(disc))
t1 := (-dot - discdist) / vLength2
return float64(t1)
}
func cpCollide(p1 point, p2 point, cp point, cpRSQ float64) byte {
dx := (p2.x - p1.x)
dy := (p2.y - p1.y)
pp := p1
pd2 := dx*dx + dy*dy
if pd2 != 0 {
u := ((cp.x-p1.x)*dx + (cp.y-p1.y)*dy) / pd2
if u > 1 {
pp = p2
} else if u > 0 {
pp.x = p1.x + u*dx
pp.y = p1.y + u*dy
}
}
pp.x -= cp.x
pp.y -= cp.y
if ((pp.x * pp.x) + (pp.y * pp.y)) < cpRSQ {
return 1
}
return 0
}
func (obj *object) applyRotate(p point) {
a := getAngle(obj.p, p)
rotateAngle := obj.diffAngle(p)
if rotateAngle < -maxRotate {
a = obj.angle - maxRotate
}
if rotateAngle > maxRotate {
a = obj.angle + maxRotate
}
obj.angle = a
/*for obj.angle < 0 {
obj.angle += fullCircle
}
for obj.angle > fullCircle {
obj.angle -= fullCircle
}*/
}
func (obj *object) applyRotateFirst(rotateAngle float64) {
obj.angle = rotateAngle
for obj.angle < 0 {
obj.angle += fullCircle
}
for obj.angle > fullCircle {
obj.angle -= fullCircle
}
}
func (obj *object) applyThrust(t int) {
cs, cc := math.Sincos(obj.angle)
obj.s.x += (cc * float64(t))
obj.s.y += (cs * float64(t))
}
func (obj *object) endTurn(podn int) {
if obj.s.x > 0 {
obj.s.x = (math.Trunc((obj.s.x * frictionVal)))
} else {
obj.s.x = (math.Trunc((obj.s.x * frictionVal)))
}
if obj.s.y > 0 {
obj.s.y = (math.Trunc((obj.s.y * frictionVal)))
} else {
obj.s.y = (math.Trunc((obj.s.y * frictionVal)))
}
obj.p.x = round(obj.p.x)
obj.p.y = round(obj.p.y)
if obj.shieldtimer > 0 {
obj.shieldtimer--
}
}
func (obj *object) diffAngle(p point) float64 {
a := getAngle(obj.p, p)
da := math.Mod(a-obj.angle, math.Pi*2)
return math.Mod(2*da, math.Pi*2) - da
}
func testMode() {
scanner := bufio.NewScanner(os.Stdin)
scanner.Scan()
fmt.Sscan(scanner.Text(), &globalNumCp)
for i := 0; i < globalNumCp; i++ {
var x, y float64
scanner.Scan()
fmt.Sscan(scanner.Text(), &x, &y)
globalCp[i] = point{x, y}
}
var nTest int
scanner.Scan()
fmt.Sscan(scanner.Text | impulse := normal
impulse.x *= -force
impulse.y *= -force
oa.s.x += impulse.x * m1
oa.s.y += impulse.y * m1 | random_line_split |
csbref.go | {5425, 2838}}}
var possibleMapCount = len(possibleMaps)
func (p *point) dot(n point) float64 {
return p.x*n.x + p.y*n.y
}
func (p *point) norm() float64 {
return (math.Sqrt(((p.x * p.x) + (p.y * p.y))))
}
func (g *game) nextTurn() {
t := 1.0
curps := [4]point{g[0].p, g[1].p, g[2].p, g[3].p}
for t > 0.0 {
first := t
cli := 0
clj := 0
for i := podCount - 1; i > 0; i-- {
for j := i - 1; j >= 0; j-- {
tx := g[i].newCollide(&g[j], podRSQ)
if tx <= first {
first = tx
cli = i
clj = j
}
}
}
g.forwardTime(first)
t -= first
if cli != clj {
g.bounce(cli, clj)
}
if t > 0 {
for i := 0; i < podCount; i++ {
if (cpCollide(curps[i], g[i].p, globalCp[g[i].next], cpRSQ)) > 0 {
g[i].passCheckpoint(i)
}
}
curps = [4]point{g[0].p, g[1].p, g[2].p, g[3].p}
}
}
for i := 0; i < podCount; i++ {
g[i].endTurn(i)
if (cpCollide(curps[i], g[i].p, globalCp[g[i].next], cpRSQ)) > 0 {
g[i].passCheckpoint(i)
}
}
playerTimeout[0]--
playerTimeout[1]--
}
const EPSILON = .00001
func (g *game) bounce(p1 int, p2 int) {
oa := &g[p1]
ob := &g[p2]
normal := ob.p
normal.x -= oa.p.x
normal.y -= oa.p.y
dd := normal.norm()
normal.x /= dd
normal.y /= dd
relv := oa.s
relv.x -= ob.s.x
relv.y -= ob.s.y
var m1 float64 = 1
var m2 float64 = 1
if oa.shieldtimer == 4 {
m1 = 0.1
}
if ob.shieldtimer == 4 {
m2 = 0.1
}
force := normal.dot(relv) / (m1 + m2)
if force < 120 {
force += 120
} else {
force += force
}
impulse := normal
impulse.x *= -force
impulse.y *= -force
oa.s.x += impulse.x * m1
oa.s.y += impulse.y * m1
ob.s.x += -impulse.x * m2
ob.s.y += -impulse.y * m2
if dd <= 800 |
}
func getAngle(start point, end point) float64 {
dx := (end.x - start.x)
dy := (end.y - start.y)
a := (math.Atan2(dy, dx))
return a
}
func distance2(p1 point, p2 point) distanceSqType {
x := distanceSqType(p2.x - p1.x)
x = x * x
y := distanceSqType(p2.y - p1.y)
y = y * y
return x + y
}
func distance(p1 point, p2 point) float64 {
return (math.Sqrt(float64(distance2(p1, p2))))
}
func (obj *object) passCheckpoint(podn int) {
obj.next = (obj.next + 1)
if obj.next >= globalNumCp {
obj.next = globalNumCp - 1
obj.won = true
}
if podn < 2 {
playerTimeout[0] = 100
} else {
playerTimeout[1] = 100
}
}
func (g *game) forwardTime(t float64) {
for i := 0; i < podCount; i++ {
obj := &g[i]
obj.p.x += (obj.s.x * (t))
obj.p.y += (obj.s.y * (t))
}
}
func round(x float64) float64 {
x = (math.Floor((x) + 0.50000))
return x
}
func (obj *object) newCollide(b *object, rsq float64) float64 {
p := point{b.p.x - obj.p.x, b.p.y - obj.p.y}
pLength2 := p.x*p.x + p.y*p.y
if pLength2 <= rsq {
return 0
}
v := point{(b.s.x - obj.s.x), (b.s.y - obj.s.y)}
dot := p.dot(v)
if dot > 0 {
return 10
}
vLength2 := v.x*v.x + v.y*v.y
disc := dot*dot - vLength2*(pLength2-rsq)
if disc < 0 {
return 10
}
discdist := (math.Sqrt(disc))
t1 := (-dot - discdist) / vLength2
return float64(t1)
}
func cpCollide(p1 point, p2 point, cp point, cpRSQ float64) byte {
dx := (p2.x - p1.x)
dy := (p2.y - p1.y)
pp := p1
pd2 := dx*dx + dy*dy
if pd2 != 0 {
u := ((cp.x-p1.x)*dx + (cp.y-p1.y)*dy) / pd2
if u > 1 {
pp = p2
} else if u > 0 {
pp.x = p1.x + u*dx
pp.y = p1.y + u*dy
}
}
pp.x -= cp.x
pp.y -= cp.y
if ((pp.x * pp.x) + (pp.y * pp.y)) < cpRSQ {
return 1
}
return 0
}
func (obj *object) applyRotate(p point) {
a := getAngle(obj.p, p)
rotateAngle := obj.diffAngle(p)
if rotateAngle < -maxRotate {
a = obj.angle - maxRotate
}
if rotateAngle > maxRotate {
a = obj.angle + maxRotate
}
obj.angle = a
/*for obj.angle < 0 {
obj.angle += fullCircle
}
for obj.angle > fullCircle {
obj.angle -= fullCircle
}*/
}
func (obj *object) applyRotateFirst(rotateAngle float64) {
obj.angle = rotateAngle
for obj.angle < 0 {
obj.angle += fullCircle
}
for obj.angle > fullCircle {
obj.angle -= fullCircle
}
}
func (obj *object) applyThrust(t int) {
cs, cc := math.Sincos(obj.angle)
obj.s.x += (cc * float64(t))
obj.s.y += (cs * float64(t))
}
func (obj *object) endTurn(podn int) {
if obj.s.x > 0 {
obj.s.x = (math.Trunc((obj.s.x * frictionVal)))
} else {
obj.s.x = (math.Trunc((obj.s.x * frictionVal)))
}
if obj.s.y > 0 {
obj.s.y = (math.Trunc((obj.s.y * frictionVal)))
} else {
obj.s.y = (math.Trunc((obj.s.y * frictionVal)))
}
obj.p.x = round(obj.p.x)
obj.p.y = round(obj.p.y)
if obj.shieldtimer > 0 {
obj.shieldtimer--
}
}
func (obj *object) diffAngle(p point) float64 {
a := getAngle(obj.p, p)
da := math.Mod(a-obj.angle, math.Pi*2)
return math.Mod(2*da, math.Pi*2) - da
}
func testMode() {
scanner := bufio.NewScanner(os.Stdin)
scanner.Scan()
fmt.Sscan(scanner.Text(), &globalNumCp)
for i := 0; i < globalNumCp; i++ {
var x, y float64
scanner.Scan()
fmt.Sscan(scanner.Text(), &x, &y)
globalCp[i] = point{x, y}
}
var nTest int
scanner.Scan()
fmt.Sscan | {
dd -= 800
oa.p.x += (normal.x * -(-dd/2 + EPSILON))
oa.p.y += (normal.y * -(-dd/2 + EPSILON))
ob.p.x += (normal.x * (-dd/2 + EPSILON))
ob.p.y += (normal.y * (-dd/2 + EPSILON))
} | conditional_block |
csbref.go | py float64
var thrust string
var t int
scanner.Scan()
fmt.Sscan(scanner.Text(), &px, &py, &thrust)
t, err := strconv.Atoi(thrust)
if err != nil {
t = 0
if thrust == "SHIELD" {
g[i].shieldtimer = 4
} else if thrust == "BOOST" {
t = 650
if g[i].boosted == 0 {
g[i].boosted = 1
} else {
t = 200
}
}
}
if g[i].shieldtimer > 0 {
t = 0
}
dest := point{px, py}
if dest == g[i].p {
continue
}
if tn == 0 {
g[i].angle = 0
angle := g[i].diffAngle(dest)
g[i].applyRotateFirst(angle)
} else {
g[i].applyRotate(dest)
}
g[i].applyThrust(t)
}
g.nextTurn()
for i := 0; i < podCount; i++ {
p := &g[i]
fmt.Printf("%d %d %d %d %f %d %d %d\n", int(p.p.x), int(p.p.y), int(p.s.x), int(p.s.y), p.angle*radToDeg, p.next, p.shieldtimer, p.boosted)
}
}
}
var startPointMult = [4]point{{500, -500}, {-500, 500}, {1500, -1500}, {-1500, 1500}}
func initialiseGame(g *game, m gameMap) {
cp1minus0 := point{}
cp1minus0.x = m[1].x - m[0].x
cp1minus0.y = m[1].y - m[0].y
dd := distance(m[1], m[0])
cp1minus0.x /= dd
cp1minus0.y /= dd
for podN := range g {
p := &g[podN]
p.angle = -1 * degToRad
p.next = 1
p.p.x = round(m[0].x + cp1minus0.y*startPointMult[podN].x)
p.p.y = round(m[0].y + cp1minus0.x*startPointMult[podN].y)
}
}
func main() {
validateMode := false
if len(os.Args) > 1 {
if os.Args[1] == "-test" {
testMode()
return
}
}
playerTimeout[0] = 100
playerTimeout[1] = 100
rand.Seed(time.Now().UTC().UnixNano())
scanner := bufio.NewScanner(os.Stdin)
started := false
var players int
for started == false {
scanner.Scan()
startText := strings.Split(scanner.Text(), " ")
if startText[0] == "###Start" {
var err error
players, err = strconv.Atoi(startText[1])
if err != nil || players != 2 {
fmt.Fprintln(os.Stderr, "Error with player count input")
os.Exit(-1)
}
started = true
} else if startText[0] == "###Seed" {
v, err := strconv.ParseInt(startText[1], 10, 64)
fmt.Fprintln(os.Stderr, v)
if err == nil {
rand.Seed(v)
}
} else if startText[0] == "###Validate" {
validateMode = true
players = 2
started = true
} else {
fmt.Fprintln(os.Stderr, "Unsupported startup command: ", startText[0])
os.Exit(0)
}
}
currentMap := possibleMaps[rand.Intn(possibleMapCount)]
for i, v := range currentMap {
currentMap[i].x = v.x + float64(rand.Intn(checkpointGenerationGap*2+1)-checkpointGenerationGap)
currentMap[i].y = v.y + float64(rand.Intn(checkpointGenerationGap*2+1)-checkpointGenerationGap)
}
for i := len(currentMap) - 1; i > 0; i-- {
v := rand.Intn(i)
currentMap[v], currentMap[i] = currentMap[i], currentMap[v]
}
if validateMode {
var ncp int
scanner.Scan()
fmt.Sscan(scanner.Text(), &ncp)
currentMap = make(gameMap, ncp)
for i := range currentMap {
var x float64
var y float64
scanner.Scan()
fmt.Sscan(scanner.Text(), &x, &y)
currentMap[i].x = x
currentMap[i].y = y
}
}
//setup global checkpoints
laps := 3
for i := 0; i < 3; i++ {
for _, v := range currentMap {
globalCp[globalNumCp] = v
globalNumCp++
}
}
//add last checkpoint at the end
globalCp[globalNumCp] = currentMap[0]
globalNumCp++
var g game
initialiseGame(&g, currentMap)
outputSetup(currentMap, 2, laps)
for turnCount := 0; turnCount < 500; turnCount++ {
var moves [4]playerMove
for player := 0; player < players; player++ {
givePlayerOutput(&g, player, currentMap)
theseMoves, valid := getPlayerInput(player, scanner)
if valid == false {
fmt.Fprintln(os.Stderr, "INVALID INPUT", theseMoves)
lostGame(player)
}
for i, v := range theseMoves {
moves[player*2+i] = v
}
}
for podN := range g {
pod := &g[podN]
move := &moves[podN]
if move.boost {
if pod.boosted == 0 {
pod.boosted = 1
move.thrust = 650
} else {
move.thrust = 200
}
}
if move.shield {
pod.shieldtimer = 4
}
if pod.shieldtimer > 0 {
move.thrust = 0
}
if move.target == pod.p {
continue
}
if turnCount == 0 {
pod.angle = 0
pod.angle = pod.diffAngle(move.target)
} else {
pod.applyRotate(move.target)
}
pod.applyThrust(moves[podN].thrust)
}
g.nextTurn()
if playerTimeout[0] <= 0 {
lostGame(0)
}
if playerTimeout[1] <= 0 {
lostGame(1)
}
for podN := range g {
pod := &g[podN]
if pod.won {
if podN < 2 {
wonGame(0)
} else {
wonGame(1)
}
}
}
}
winner := 0
best := 0.0
for podN := range g {
score := float64(g[podN].next * 1000000)
score -= distance(g[podN].p, globalCp[g[podN].next])
if score > best {
best = score
winner = podN
}
}
if winner < 2 {
wonGame(0)
} else {
wonGame(1)
}
}
func lostGame(player int) {
winner := 0
loser := 1
if player == winner {
winner, loser = loser, winner
}
fmt.Printf("###End %d %d\n", winner, loser)
os.Exit(0)
}
func wonGame(player int) {
winner := 0
loser := 1
if player == loser {
winner, loser = loser, winner
}
fmt.Printf("###End %d %d\n", winner, loser)
os.Exit(0)
}
func getPlayerInput(player int, scanner *bufio.Scanner) ([2]playerMove, bool) | {
pm := [2]playerMove{}
valid := true
fmt.Printf("###Output %d 2\n", player)
for i := range pm {
if scanner.Scan() == false {
os.Exit(0)
}
var thrust string
fmt.Sscanf(scanner.Text(), "%f %f %s\n", &pm[i].target.x, &pm[i].target.y, &thrust)
pm[i].thrust = 0
switch thrust {
case "SHIELD":
pm[i].shield = true
case "BOOST":
pm[i].boost = true
default:
v, err := strconv.Atoi(thrust)
if err != nil { | identifier_body |
|
csbref.go | t = 0
if thrust == "SHIELD" {
g[i].shieldtimer = 4
} else if thrust == "BOOST" {
t = 650
if g[i].boosted == 0 {
g[i].boosted = 1
} else {
t = 200
}
}
}
if g[i].shieldtimer > 0 {
t = 0
}
dest := point{px, py}
if dest == g[i].p {
continue
}
if tn == 0 {
g[i].angle = 0
angle := g[i].diffAngle(dest)
g[i].applyRotateFirst(angle)
} else {
g[i].applyRotate(dest)
}
g[i].applyThrust(t)
}
g.nextTurn()
for i := 0; i < podCount; i++ {
p := &g[i]
fmt.Printf("%d %d %d %d %f %d %d %d\n", int(p.p.x), int(p.p.y), int(p.s.x), int(p.s.y), p.angle*radToDeg, p.next, p.shieldtimer, p.boosted)
}
}
}
var startPointMult = [4]point{{500, -500}, {-500, 500}, {1500, -1500}, {-1500, 1500}}
func initialiseGame(g *game, m gameMap) {
cp1minus0 := point{}
cp1minus0.x = m[1].x - m[0].x
cp1minus0.y = m[1].y - m[0].y
dd := distance(m[1], m[0])
cp1minus0.x /= dd
cp1minus0.y /= dd
for podN := range g {
p := &g[podN]
p.angle = -1 * degToRad
p.next = 1
p.p.x = round(m[0].x + cp1minus0.y*startPointMult[podN].x)
p.p.y = round(m[0].y + cp1minus0.x*startPointMult[podN].y)
}
}
func main() {
validateMode := false
if len(os.Args) > 1 {
if os.Args[1] == "-test" {
testMode()
return
}
}
playerTimeout[0] = 100
playerTimeout[1] = 100
rand.Seed(time.Now().UTC().UnixNano())
scanner := bufio.NewScanner(os.Stdin)
started := false
var players int
for started == false {
scanner.Scan()
startText := strings.Split(scanner.Text(), " ")
if startText[0] == "###Start" {
var err error
players, err = strconv.Atoi(startText[1])
if err != nil || players != 2 {
fmt.Fprintln(os.Stderr, "Error with player count input")
os.Exit(-1)
}
started = true
} else if startText[0] == "###Seed" {
v, err := strconv.ParseInt(startText[1], 10, 64)
fmt.Fprintln(os.Stderr, v)
if err == nil {
rand.Seed(v)
}
} else if startText[0] == "###Validate" {
validateMode = true
players = 2
started = true
} else {
fmt.Fprintln(os.Stderr, "Unsupported startup command: ", startText[0])
os.Exit(0)
}
}
currentMap := possibleMaps[rand.Intn(possibleMapCount)]
for i, v := range currentMap {
currentMap[i].x = v.x + float64(rand.Intn(checkpointGenerationGap*2+1)-checkpointGenerationGap)
currentMap[i].y = v.y + float64(rand.Intn(checkpointGenerationGap*2+1)-checkpointGenerationGap)
}
for i := len(currentMap) - 1; i > 0; i-- {
v := rand.Intn(i)
currentMap[v], currentMap[i] = currentMap[i], currentMap[v]
}
if validateMode {
var ncp int
scanner.Scan()
fmt.Sscan(scanner.Text(), &ncp)
currentMap = make(gameMap, ncp)
for i := range currentMap {
var x float64
var y float64
scanner.Scan()
fmt.Sscan(scanner.Text(), &x, &y)
currentMap[i].x = x
currentMap[i].y = y
}
}
//setup global checkpoints
laps := 3
for i := 0; i < 3; i++ {
for _, v := range currentMap {
globalCp[globalNumCp] = v
globalNumCp++
}
}
//add last checkpoint at the end
globalCp[globalNumCp] = currentMap[0]
globalNumCp++
var g game
initialiseGame(&g, currentMap)
outputSetup(currentMap, 2, laps)
for turnCount := 0; turnCount < 500; turnCount++ {
var moves [4]playerMove
for player := 0; player < players; player++ {
givePlayerOutput(&g, player, currentMap)
theseMoves, valid := getPlayerInput(player, scanner)
if valid == false {
fmt.Fprintln(os.Stderr, "INVALID INPUT", theseMoves)
lostGame(player)
}
for i, v := range theseMoves {
moves[player*2+i] = v
}
}
for podN := range g {
pod := &g[podN]
move := &moves[podN]
if move.boost {
if pod.boosted == 0 {
pod.boosted = 1
move.thrust = 650
} else {
move.thrust = 200
}
}
if move.shield {
pod.shieldtimer = 4
}
if pod.shieldtimer > 0 {
move.thrust = 0
}
if move.target == pod.p {
continue
}
if turnCount == 0 {
pod.angle = 0
pod.angle = pod.diffAngle(move.target)
} else {
pod.applyRotate(move.target)
}
pod.applyThrust(moves[podN].thrust)
}
g.nextTurn()
if playerTimeout[0] <= 0 {
lostGame(0)
}
if playerTimeout[1] <= 0 {
lostGame(1)
}
for podN := range g {
pod := &g[podN]
if pod.won {
if podN < 2 {
wonGame(0)
} else {
wonGame(1)
}
}
}
}
winner := 0
best := 0.0
for podN := range g {
score := float64(g[podN].next * 1000000)
score -= distance(g[podN].p, globalCp[g[podN].next])
if score > best {
best = score
winner = podN
}
}
if winner < 2 {
wonGame(0)
} else {
wonGame(1)
}
}
func lostGame(player int) {
winner := 0
loser := 1
if player == winner {
winner, loser = loser, winner
}
fmt.Printf("###End %d %d\n", winner, loser)
os.Exit(0)
}
func wonGame(player int) {
winner := 0
loser := 1
if player == loser {
winner, loser = loser, winner
}
fmt.Printf("###End %d %d\n", winner, loser)
os.Exit(0)
}
func getPlayerInput(player int, scanner *bufio.Scanner) ([2]playerMove, bool) {
pm := [2]playerMove{}
valid := true
fmt.Printf("###Output %d 2\n", player)
for i := range pm {
if scanner.Scan() == false {
os.Exit(0)
}
var thrust string
fmt.Sscanf(scanner.Text(), "%f %f %s\n", &pm[i].target.x, &pm[i].target.y, &thrust)
pm[i].thrust = 0
switch thrust {
case "SHIELD":
pm[i].shield = true
case "BOOST":
pm[i].boost = true
default:
v, err := strconv.Atoi(thrust)
if err != nil {
valid = false
} else {
if v > 200 {
valid = false
}
pm[i].thrust = v
}
}
}
return pm, valid
}
func | outputSetup | identifier_name |
|
mod.rs | Labs LLC. All rights reserved.
//
// Licensed under the BSD 3-Clause license. See LICENSE file in the project root
// for full license information.
//! Handling of communication with Buttplug Server.
pub mod messagesorter;
#[cfg(any(feature = "client-ws", feature = "client-ws-ssl"))]
pub mod websocket;
#[cfg(feature = "server")]
use crate::server::ButtplugServer;
use crate::{
client::internal::{
ButtplugClientFuture, ButtplugClientFutureState, ButtplugClientFutureStateShared,
ButtplugClientMessageStateShared,
},
core::messages::ButtplugMessageUnion,
};
use async_std::sync::{channel, Receiver};
#[cfg(feature = "serialize_json")]
use async_std::{
prelude::{FutureExt, StreamExt},
sync::Sender,
};
use async_trait::async_trait;
#[cfg(feature = "serialize_json")]
use futures::future::Future;
#[cfg(feature = "serialize_json")]
use messagesorter::ClientConnectorMessageSorter;
use std::{error::Error, fmt};
pub type ButtplugClientConnectionState =
ButtplugClientFutureState<Result<(), ButtplugClientConnectorError>>;
pub type ButtplugClientConnectionStateShared =
ButtplugClientFutureStateShared<Result<(), ButtplugClientConnectorError>>;
pub type ButtplugClientConnectionFuture =
ButtplugClientFuture<Result<(), ButtplugClientConnectorError>>;
#[derive(Debug, Clone)]
pub struct ButtplugClientConnectorError {
pub message: String,
}
impl ButtplugClientConnectorError {
pub fn new(msg: &str) -> Self {
Self {
message: msg.to_owned(),
}
}
}
impl fmt::Display for ButtplugClientConnectorError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Init Error: {}", self.message)
}
}
impl Error for ButtplugClientConnectorError {
fn description(&self) -> &str {
self.message.as_str()
}
fn source(&self) -> Option<&(dyn Error + 'static)> {
None
}
}
// Not real sure if this is sync, since there may be state that could get weird
// in connectors implementing this trait, but Send should be ok.
#[async_trait]
pub trait ButtplugClientConnector: Send {
async fn connect(&mut self) -> Result<(), ButtplugClientConnectorError>;
async fn disconnect(&mut self) -> Result<(), ButtplugClientConnectorError>;
async fn send(&mut self, msg: &ButtplugMessageUnion, state: &ButtplugClientMessageStateShared);
fn get_event_receiver(&mut self) -> Receiver<ButtplugMessageUnion>;
}
#[cfg(feature = "server")]
pub struct ButtplugEmbeddedClientConnector {
server: ButtplugServer,
recv: Option<Receiver<ButtplugMessageUnion>>,
}
#[cfg(feature = "server")]
impl ButtplugEmbeddedClientConnector {
pub fn new(name: &str, max_ping_time: u32) -> Self {
let (send, recv) = channel(256);
Self {
recv: Some(recv),
server: ButtplugServer::new(&name, max_ping_time, send),
}
}
}
#[cfg(feature = "server")]
#[async_trait]
impl ButtplugClientConnector for ButtplugEmbeddedClientConnector {
async fn connect(&mut self) -> Result<(), ButtplugClientConnectorError> {
Ok(())
}
async fn disconnect(&mut self) -> Result<(), ButtplugClientConnectorError> {
Ok(())
}
async fn send(&mut self, msg: &ButtplugMessageUnion, state: &ButtplugClientMessageStateShared) {
let ret_msg = self.server.send_message(msg).await;
let mut waker_state = state.lock().unwrap();
waker_state.set_reply(ret_msg.unwrap());
}
fn get_event_receiver(&mut self) -> Receiver<ButtplugMessageUnion> {
// This will panic if we've already taken the receiver.
self.recv.take().unwrap()
}
}
// The embedded connector is used heavily in the client unit tests, so we can
// assume code coverage there and omit specific tests here.
pub trait ButtplugRemoteClientConnectorSender: Sync + Send {
fn send(&self, msg: ButtplugMessageUnion);
fn close(&self);
}
pub enum ButtplugRemoteClientConnectorMessage {
Sender(Box<dyn ButtplugRemoteClientConnectorSender>),
Connected(),
Text(String),
Error(String),
ClientClose(String),
Close(String),
}
#[cfg(feature = "serialize_json")]
pub struct ButtplugRemoteClientConnectorHelper {
// Channel send/recv pair for applications wanting to send out through the
// remote connection. Receiver will be send to task on creation.
internal_send: Sender<(ButtplugMessageUnion, ButtplugClientMessageStateShared)>,
internal_recv: Option<Receiver<(ButtplugMessageUnion, ButtplugClientMessageStateShared)>>,
// Channel send/recv pair for remote connection sending information to the
// application. Receiver will be sent to task on creation.
remote_send: Sender<ButtplugRemoteClientConnectorMessage>,
remote_recv: Option<Receiver<ButtplugRemoteClientConnectorMessage>>,
event_send: Option<Sender<ButtplugMessageUnion>>,
}
#[cfg(feature = "serialize_json")]
unsafe impl Send for ButtplugRemoteClientConnectorHelper {}
#[cfg(feature = "serialize_json")]
unsafe impl Sync for ButtplugRemoteClientConnectorHelper {}
#[cfg(feature = "serialize_json")]
impl ButtplugRemoteClientConnectorHelper {
pub fn new(event_sender: Sender<ButtplugMessageUnion>) -> Self {
let (internal_send, internal_recv) = channel(256);
let (remote_send, remote_recv) = channel(256);
Self {
event_send: Some(event_sender),
remote_send,
remote_recv: Some(remote_recv),
internal_send,
internal_recv: Some(internal_recv),
}
}
pub fn | (&self) -> Sender<ButtplugRemoteClientConnectorMessage> {
self.remote_send.clone()
}
pub async fn send(
&mut self,
msg: &ButtplugMessageUnion,
state: &ButtplugClientMessageStateShared,
) {
self.internal_send.send((msg.clone(), state.clone())).await;
}
pub async fn close(&self) {
// Emulate a close from the connector side, which will cause us to
// close.
self.remote_send
.send(ButtplugRemoteClientConnectorMessage::Close(
"Client requested close.".to_owned(),
))
.await;
}
pub fn get_recv_future(&mut self) -> impl Future {
// Set up a way to get futures in and out of the sorter, which will live
// in our connector task.
let event_send = self.event_send.take().unwrap();
// Remove the receivers we need to move into the task.
let mut remote_recv = self.remote_recv.take().unwrap();
let mut internal_recv = self.internal_recv.take().unwrap();
async move {
let mut sorter = ClientConnectorMessageSorter::default();
// Our in-task remote sender, which is a wrapped version of whatever
// bus specific sender (websocket, tcp, etc) we'll be using.
let mut remote_send: Option<Box<dyn ButtplugRemoteClientConnectorSender>> = None;
enum StreamValue {
NoValue,
Incoming(ButtplugRemoteClientConnectorMessage),
Outgoing((ButtplugMessageUnion, ButtplugClientMessageStateShared)),
}
loop {
// We use two Options instead of an enum because we may never
// get anything.
let mut stream_return: StreamValue = async {
match remote_recv.next().await {
Some(msg) => StreamValue::Incoming(msg),
None => StreamValue::NoValue,
}
}
.race(async {
match internal_recv.next().await {
Some(msg) => StreamValue::Outgoing(msg),
None => StreamValue::NoValue,
}
})
.await;
match stream_return {
StreamValue::NoValue => break,
StreamValue::Incoming(remote_msg) => {
match remote_msg {
ButtplugRemoteClientConnectorMessage::Sender(s) => {
remote_send = Some(s);
}
ButtplugRemoteClientConnectorMessage::Text(t) => {
let array: Vec<ButtplugMessageUnion> =
serde_json::from_str(&t.clone()).unwrap();
for smsg in array {
if !sorter.maybe_resolve_message(&smsg) {
info!("Sending event!");
// Send notification through event channel
event_send.send(smsg).await;
}
}
}
ButtplugRemoteClientConnectorMessage::ClientClose(s) => {
info!("Client closing connection {}", s);
if let Some(ref mut remote_sender) = remote_send {
remote_sender.close();
} else {
panic!("Can't send message yet!");
}
}
ButtplugRemoteClientConnectorMessage::Close(s) => {
info!("Connector closing connection {}", s);
break;
}
_ => {
panic!("UNHANDLED BRANCH");
}
}
}
StreamValue::Outgoing(ref mut buttplug_fut_msg) => {
// Create future sets our message ID, so make sure this
// happens | get_remote_send | identifier_name |
mod.rs | -ws-ssl"))]
pub mod websocket;
#[cfg(feature = "server")]
use crate::server::ButtplugServer;
use crate::{
client::internal::{
ButtplugClientFuture, ButtplugClientFutureState, ButtplugClientFutureStateShared,
ButtplugClientMessageStateShared,
},
core::messages::ButtplugMessageUnion,
};
use async_std::sync::{channel, Receiver};
#[cfg(feature = "serialize_json")]
use async_std::{
prelude::{FutureExt, StreamExt},
sync::Sender,
};
use async_trait::async_trait;
#[cfg(feature = "serialize_json")]
use futures::future::Future;
#[cfg(feature = "serialize_json")]
use messagesorter::ClientConnectorMessageSorter;
use std::{error::Error, fmt};
pub type ButtplugClientConnectionState =
ButtplugClientFutureState<Result<(), ButtplugClientConnectorError>>;
pub type ButtplugClientConnectionStateShared =
ButtplugClientFutureStateShared<Result<(), ButtplugClientConnectorError>>;
pub type ButtplugClientConnectionFuture =
ButtplugClientFuture<Result<(), ButtplugClientConnectorError>>;
#[derive(Debug, Clone)]
pub struct ButtplugClientConnectorError {
pub message: String,
}
impl ButtplugClientConnectorError {
pub fn new(msg: &str) -> Self {
Self {
message: msg.to_owned(),
}
}
}
impl fmt::Display for ButtplugClientConnectorError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Init Error: {}", self.message)
}
}
impl Error for ButtplugClientConnectorError {
fn description(&self) -> &str {
self.message.as_str()
}
fn source(&self) -> Option<&(dyn Error + 'static)> {
None
}
}
// Not real sure if this is sync, since there may be state that could get weird
// in connectors implementing this trait, but Send should be ok.
#[async_trait]
pub trait ButtplugClientConnector: Send {
async fn connect(&mut self) -> Result<(), ButtplugClientConnectorError>;
async fn disconnect(&mut self) -> Result<(), ButtplugClientConnectorError>;
async fn send(&mut self, msg: &ButtplugMessageUnion, state: &ButtplugClientMessageStateShared);
fn get_event_receiver(&mut self) -> Receiver<ButtplugMessageUnion>;
}
#[cfg(feature = "server")]
pub struct ButtplugEmbeddedClientConnector {
server: ButtplugServer,
recv: Option<Receiver<ButtplugMessageUnion>>,
}
#[cfg(feature = "server")]
impl ButtplugEmbeddedClientConnector {
pub fn new(name: &str, max_ping_time: u32) -> Self {
let (send, recv) = channel(256);
Self {
recv: Some(recv),
server: ButtplugServer::new(&name, max_ping_time, send),
}
}
}
#[cfg(feature = "server")]
#[async_trait]
impl ButtplugClientConnector for ButtplugEmbeddedClientConnector {
async fn connect(&mut self) -> Result<(), ButtplugClientConnectorError> {
Ok(())
}
async fn disconnect(&mut self) -> Result<(), ButtplugClientConnectorError> {
Ok(())
}
async fn send(&mut self, msg: &ButtplugMessageUnion, state: &ButtplugClientMessageStateShared) {
let ret_msg = self.server.send_message(msg).await;
let mut waker_state = state.lock().unwrap();
waker_state.set_reply(ret_msg.unwrap());
}
fn get_event_receiver(&mut self) -> Receiver<ButtplugMessageUnion> {
// This will panic if we've already taken the receiver.
self.recv.take().unwrap()
}
}
// The embedded connector is used heavily in the client unit tests, so we can
// assume code coverage there and omit specific tests here.
pub trait ButtplugRemoteClientConnectorSender: Sync + Send {
fn send(&self, msg: ButtplugMessageUnion);
fn close(&self);
}
pub enum ButtplugRemoteClientConnectorMessage {
Sender(Box<dyn ButtplugRemoteClientConnectorSender>),
Connected(),
Text(String),
Error(String),
ClientClose(String),
Close(String),
}
#[cfg(feature = "serialize_json")]
pub struct ButtplugRemoteClientConnectorHelper {
// Channel send/recv pair for applications wanting to send out through the
// remote connection. Receiver will be send to task on creation.
internal_send: Sender<(ButtplugMessageUnion, ButtplugClientMessageStateShared)>,
internal_recv: Option<Receiver<(ButtplugMessageUnion, ButtplugClientMessageStateShared)>>,
// Channel send/recv pair for remote connection sending information to the
// application. Receiver will be sent to task on creation.
remote_send: Sender<ButtplugRemoteClientConnectorMessage>,
remote_recv: Option<Receiver<ButtplugRemoteClientConnectorMessage>>,
event_send: Option<Sender<ButtplugMessageUnion>>,
}
#[cfg(feature = "serialize_json")]
unsafe impl Send for ButtplugRemoteClientConnectorHelper {}
#[cfg(feature = "serialize_json")]
unsafe impl Sync for ButtplugRemoteClientConnectorHelper {}
#[cfg(feature = "serialize_json")]
impl ButtplugRemoteClientConnectorHelper {
pub fn new(event_sender: Sender<ButtplugMessageUnion>) -> Self {
let (internal_send, internal_recv) = channel(256);
let (remote_send, remote_recv) = channel(256);
Self {
event_send: Some(event_sender),
remote_send,
remote_recv: Some(remote_recv),
internal_send,
internal_recv: Some(internal_recv),
}
}
pub fn get_remote_send(&self) -> Sender<ButtplugRemoteClientConnectorMessage> {
self.remote_send.clone()
}
pub async fn send(
&mut self,
msg: &ButtplugMessageUnion,
state: &ButtplugClientMessageStateShared,
) {
self.internal_send.send((msg.clone(), state.clone())).await;
}
pub async fn close(&self) {
// Emulate a close from the connector side, which will cause us to
// close.
self.remote_send
.send(ButtplugRemoteClientConnectorMessage::Close(
"Client requested close.".to_owned(),
))
.await;
}
pub fn get_recv_future(&mut self) -> impl Future {
// Set up a way to get futures in and out of the sorter, which will live
// in our connector task.
let event_send = self.event_send.take().unwrap();
// Remove the receivers we need to move into the task.
let mut remote_recv = self.remote_recv.take().unwrap();
let mut internal_recv = self.internal_recv.take().unwrap();
async move {
let mut sorter = ClientConnectorMessageSorter::default();
// Our in-task remote sender, which is a wrapped version of whatever
// bus specific sender (websocket, tcp, etc) we'll be using.
let mut remote_send: Option<Box<dyn ButtplugRemoteClientConnectorSender>> = None;
enum StreamValue {
NoValue,
Incoming(ButtplugRemoteClientConnectorMessage),
Outgoing((ButtplugMessageUnion, ButtplugClientMessageStateShared)),
}
loop {
// We use two Options instead of an enum because we may never
// get anything.
let mut stream_return: StreamValue = async {
match remote_recv.next().await {
Some(msg) => StreamValue::Incoming(msg),
None => StreamValue::NoValue,
}
}
.race(async {
match internal_recv.next().await {
Some(msg) => StreamValue::Outgoing(msg),
None => StreamValue::NoValue,
}
})
.await;
match stream_return {
StreamValue::NoValue => break,
StreamValue::Incoming(remote_msg) => {
match remote_msg {
ButtplugRemoteClientConnectorMessage::Sender(s) => {
remote_send = Some(s);
}
ButtplugRemoteClientConnectorMessage::Text(t) => {
let array: Vec<ButtplugMessageUnion> =
serde_json::from_str(&t.clone()).unwrap();
for smsg in array {
if !sorter.maybe_resolve_message(&smsg) {
info!("Sending event!");
// Send notification through event channel
event_send.send(smsg).await;
}
}
}
ButtplugRemoteClientConnectorMessage::ClientClose(s) => {
info!("Client closing connection {}", s);
if let Some(ref mut remote_sender) = remote_send {
remote_sender.close();
} else {
panic!("Can't send message yet!");
}
}
ButtplugRemoteClientConnectorMessage::Close(s) => {
info!("Connector closing connection {}", s);
break;
}
_ => {
panic!("UNHANDLED BRANCH");
}
}
}
StreamValue::Outgoing(ref mut buttplug_fut_msg) => {
// Create future sets our message ID, so make sure this
// happens before we send out the message.
sorter.register_future(&mut buttplug_fut_msg.0, &buttplug_fut_msg.1);
if let Some(ref mut remote_sender) = remote_send | {
remote_sender.send(buttplug_fut_msg.0.clone());
} | conditional_block |
|
mod.rs | Labs LLC. All rights reserved.
//
// Licensed under the BSD 3-Clause license. See LICENSE file in the project root
// for full license information.
//! Handling of communication with Buttplug Server.
pub mod messagesorter;
#[cfg(any(feature = "client-ws", feature = "client-ws-ssl"))]
pub mod websocket;
#[cfg(feature = "server")]
use crate::server::ButtplugServer;
use crate::{
client::internal::{
ButtplugClientFuture, ButtplugClientFutureState, ButtplugClientFutureStateShared,
ButtplugClientMessageStateShared,
},
core::messages::ButtplugMessageUnion,
};
use async_std::sync::{channel, Receiver};
#[cfg(feature = "serialize_json")]
use async_std::{
prelude::{FutureExt, StreamExt},
sync::Sender,
};
use async_trait::async_trait;
#[cfg(feature = "serialize_json")]
use futures::future::Future;
#[cfg(feature = "serialize_json")]
use messagesorter::ClientConnectorMessageSorter;
use std::{error::Error, fmt};
pub type ButtplugClientConnectionState =
ButtplugClientFutureState<Result<(), ButtplugClientConnectorError>>;
pub type ButtplugClientConnectionStateShared =
ButtplugClientFutureStateShared<Result<(), ButtplugClientConnectorError>>;
pub type ButtplugClientConnectionFuture =
ButtplugClientFuture<Result<(), ButtplugClientConnectorError>>;
#[derive(Debug, Clone)]
pub struct ButtplugClientConnectorError {
pub message: String,
}
impl ButtplugClientConnectorError {
pub fn new(msg: &str) -> Self {
Self {
message: msg.to_owned(),
}
}
}
impl fmt::Display for ButtplugClientConnectorError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Init Error: {}", self.message)
}
}
impl Error for ButtplugClientConnectorError {
fn description(&self) -> &str {
self.message.as_str()
}
fn source(&self) -> Option<&(dyn Error + 'static)> {
None
}
}
// Not real sure if this is sync, since there may be state that could get weird
// in connectors implementing this trait, but Send should be ok.
#[async_trait]
pub trait ButtplugClientConnector: Send {
async fn connect(&mut self) -> Result<(), ButtplugClientConnectorError>;
async fn disconnect(&mut self) -> Result<(), ButtplugClientConnectorError>;
async fn send(&mut self, msg: &ButtplugMessageUnion, state: &ButtplugClientMessageStateShared);
fn get_event_receiver(&mut self) -> Receiver<ButtplugMessageUnion>;
}
#[cfg(feature = "server")]
pub struct ButtplugEmbeddedClientConnector {
server: ButtplugServer,
recv: Option<Receiver<ButtplugMessageUnion>>,
}
#[cfg(feature = "server")]
impl ButtplugEmbeddedClientConnector {
pub fn new(name: &str, max_ping_time: u32) -> Self {
let (send, recv) = channel(256);
Self {
recv: Some(recv),
server: ButtplugServer::new(&name, max_ping_time, send),
}
}
}
#[cfg(feature = "server")]
#[async_trait]
impl ButtplugClientConnector for ButtplugEmbeddedClientConnector {
async fn connect(&mut self) -> Result<(), ButtplugClientConnectorError> {
Ok(())
}
async fn disconnect(&mut self) -> Result<(), ButtplugClientConnectorError> {
Ok(())
}
async fn send(&mut self, msg: &ButtplugMessageUnion, state: &ButtplugClientMessageStateShared) {
let ret_msg = self.server.send_message(msg).await;
let mut waker_state = state.lock().unwrap();
waker_state.set_reply(ret_msg.unwrap());
}
fn get_event_receiver(&mut self) -> Receiver<ButtplugMessageUnion> {
// This will panic if we've already taken the receiver.
self.recv.take().unwrap()
}
}
// The embedded connector is used heavily in the client unit tests, so we can
// assume code coverage there and omit specific tests here.
pub trait ButtplugRemoteClientConnectorSender: Sync + Send {
fn send(&self, msg: ButtplugMessageUnion);
fn close(&self);
}
pub enum ButtplugRemoteClientConnectorMessage {
Sender(Box<dyn ButtplugRemoteClientConnectorSender>),
Connected(),
Text(String),
Error(String),
ClientClose(String),
Close(String),
}
#[cfg(feature = "serialize_json")]
pub struct ButtplugRemoteClientConnectorHelper {
// Channel send/recv pair for applications wanting to send out through the
// remote connection. Receiver will be send to task on creation.
internal_send: Sender<(ButtplugMessageUnion, ButtplugClientMessageStateShared)>,
internal_recv: Option<Receiver<(ButtplugMessageUnion, ButtplugClientMessageStateShared)>>,
// Channel send/recv pair for remote connection sending information to the
// application. Receiver will be sent to task on creation.
remote_send: Sender<ButtplugRemoteClientConnectorMessage>,
remote_recv: Option<Receiver<ButtplugRemoteClientConnectorMessage>>,
event_send: Option<Sender<ButtplugMessageUnion>>,
}
#[cfg(feature = "serialize_json")]
unsafe impl Send for ButtplugRemoteClientConnectorHelper {}
#[cfg(feature = "serialize_json")]
unsafe impl Sync for ButtplugRemoteClientConnectorHelper {}
#[cfg(feature = "serialize_json")]
impl ButtplugRemoteClientConnectorHelper {
pub fn new(event_sender: Sender<ButtplugMessageUnion>) -> Self {
let (internal_send, internal_recv) = channel(256);
let (remote_send, remote_recv) = channel(256);
Self {
event_send: Some(event_sender),
remote_send,
remote_recv: Some(remote_recv),
internal_send,
internal_recv: Some(internal_recv),
}
}
pub fn get_remote_send(&self) -> Sender<ButtplugRemoteClientConnectorMessage> {
self.remote_send.clone()
}
pub async fn send(
&mut self,
msg: &ButtplugMessageUnion,
state: &ButtplugClientMessageStateShared,
) {
self.internal_send.send((msg.clone(), state.clone())).await;
}
pub async fn close(&self) {
// Emulate a close from the connector side, which will cause us to
// close.
self.remote_send
.send(ButtplugRemoteClientConnectorMessage::Close(
"Client requested close.".to_owned(),
))
.await;
}
pub fn get_recv_future(&mut self) -> impl Future {
// Set up a way to get futures in and out of the sorter, which will live
// in our connector task.
let event_send = self.event_send.take().unwrap();
// Remove the receivers we need to move into the task.
let mut remote_recv = self.remote_recv.take().unwrap();
let mut internal_recv = self.internal_recv.take().unwrap();
async move {
let mut sorter = ClientConnectorMessageSorter::default();
// Our in-task remote sender, which is a wrapped version of whatever
// bus specific sender (websocket, tcp, etc) we'll be using.
let mut remote_send: Option<Box<dyn ButtplugRemoteClientConnectorSender>> = None;
enum StreamValue {
NoValue,
Incoming(ButtplugRemoteClientConnectorMessage),
Outgoing((ButtplugMessageUnion, ButtplugClientMessageStateShared)),
}
| // We use two Options instead of an enum because we may never
// get anything.
let mut stream_return: StreamValue = async {
match remote_recv.next().await {
Some(msg) => StreamValue::Incoming(msg),
None => StreamValue::NoValue,
}
}
.race(async {
match internal_recv.next().await {
Some(msg) => StreamValue::Outgoing(msg),
None => StreamValue::NoValue,
}
})
.await;
match stream_return {
StreamValue::NoValue => break,
StreamValue::Incoming(remote_msg) => {
match remote_msg {
ButtplugRemoteClientConnectorMessage::Sender(s) => {
remote_send = Some(s);
}
ButtplugRemoteClientConnectorMessage::Text(t) => {
let array: Vec<ButtplugMessageUnion> =
serde_json::from_str(&t.clone()).unwrap();
for smsg in array {
if !sorter.maybe_resolve_message(&smsg) {
info!("Sending event!");
// Send notification through event channel
event_send.send(smsg).await;
}
}
}
ButtplugRemoteClientConnectorMessage::ClientClose(s) => {
info!("Client closing connection {}", s);
if let Some(ref mut remote_sender) = remote_send {
remote_sender.close();
} else {
panic!("Can't send message yet!");
}
}
ButtplugRemoteClientConnectorMessage::Close(s) => {
info!("Connector closing connection {}", s);
break;
}
_ => {
panic!("UNHANDLED BRANCH");
}
}
}
StreamValue::Outgoing(ref mut buttplug_fut_msg) => {
// Create future sets our message ID, so make sure this
// happens before | loop { | random_line_split |
mod.rs | Labs LLC. All rights reserved.
//
// Licensed under the BSD 3-Clause license. See LICENSE file in the project root
// for full license information.
//! Handling of communication with Buttplug Server.
pub mod messagesorter;
#[cfg(any(feature = "client-ws", feature = "client-ws-ssl"))]
pub mod websocket;
#[cfg(feature = "server")]
use crate::server::ButtplugServer;
use crate::{
client::internal::{
ButtplugClientFuture, ButtplugClientFutureState, ButtplugClientFutureStateShared,
ButtplugClientMessageStateShared,
},
core::messages::ButtplugMessageUnion,
};
use async_std::sync::{channel, Receiver};
#[cfg(feature = "serialize_json")]
use async_std::{
prelude::{FutureExt, StreamExt},
sync::Sender,
};
use async_trait::async_trait;
#[cfg(feature = "serialize_json")]
use futures::future::Future;
#[cfg(feature = "serialize_json")]
use messagesorter::ClientConnectorMessageSorter;
use std::{error::Error, fmt};
pub type ButtplugClientConnectionState =
ButtplugClientFutureState<Result<(), ButtplugClientConnectorError>>;
pub type ButtplugClientConnectionStateShared =
ButtplugClientFutureStateShared<Result<(), ButtplugClientConnectorError>>;
pub type ButtplugClientConnectionFuture =
ButtplugClientFuture<Result<(), ButtplugClientConnectorError>>;
#[derive(Debug, Clone)]
pub struct ButtplugClientConnectorError {
pub message: String,
}
impl ButtplugClientConnectorError {
pub fn new(msg: &str) -> Self {
Self {
message: msg.to_owned(),
}
}
}
impl fmt::Display for ButtplugClientConnectorError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Init Error: {}", self.message)
}
}
impl Error for ButtplugClientConnectorError {
fn description(&self) -> &str {
self.message.as_str()
}
fn source(&self) -> Option<&(dyn Error + 'static)> {
None
}
}
// Not real sure if this is sync, since there may be state that could get weird
// in connectors implementing this trait, but Send should be ok.
#[async_trait]
pub trait ButtplugClientConnector: Send {
async fn connect(&mut self) -> Result<(), ButtplugClientConnectorError>;
async fn disconnect(&mut self) -> Result<(), ButtplugClientConnectorError>;
async fn send(&mut self, msg: &ButtplugMessageUnion, state: &ButtplugClientMessageStateShared);
fn get_event_receiver(&mut self) -> Receiver<ButtplugMessageUnion>;
}
#[cfg(feature = "server")]
pub struct ButtplugEmbeddedClientConnector {
server: ButtplugServer,
recv: Option<Receiver<ButtplugMessageUnion>>,
}
#[cfg(feature = "server")]
impl ButtplugEmbeddedClientConnector {
pub fn new(name: &str, max_ping_time: u32) -> Self {
let (send, recv) = channel(256);
Self {
recv: Some(recv),
server: ButtplugServer::new(&name, max_ping_time, send),
}
}
}
#[cfg(feature = "server")]
#[async_trait]
impl ButtplugClientConnector for ButtplugEmbeddedClientConnector {
async fn connect(&mut self) -> Result<(), ButtplugClientConnectorError> {
Ok(())
}
async fn disconnect(&mut self) -> Result<(), ButtplugClientConnectorError> {
Ok(())
}
async fn send(&mut self, msg: &ButtplugMessageUnion, state: &ButtplugClientMessageStateShared) {
let ret_msg = self.server.send_message(msg).await;
let mut waker_state = state.lock().unwrap();
waker_state.set_reply(ret_msg.unwrap());
}
fn get_event_receiver(&mut self) -> Receiver<ButtplugMessageUnion> {
// This will panic if we've already taken the receiver.
self.recv.take().unwrap()
}
}
// The embedded connector is used heavily in the client unit tests, so we can
// assume code coverage there and omit specific tests here.
pub trait ButtplugRemoteClientConnectorSender: Sync + Send {
fn send(&self, msg: ButtplugMessageUnion);
fn close(&self);
}
pub enum ButtplugRemoteClientConnectorMessage {
Sender(Box<dyn ButtplugRemoteClientConnectorSender>),
Connected(),
Text(String),
Error(String),
ClientClose(String),
Close(String),
}
#[cfg(feature = "serialize_json")]
pub struct ButtplugRemoteClientConnectorHelper {
// Channel send/recv pair for applications wanting to send out through the
// remote connection. Receiver will be send to task on creation.
internal_send: Sender<(ButtplugMessageUnion, ButtplugClientMessageStateShared)>,
internal_recv: Option<Receiver<(ButtplugMessageUnion, ButtplugClientMessageStateShared)>>,
// Channel send/recv pair for remote connection sending information to the
// application. Receiver will be sent to task on creation.
remote_send: Sender<ButtplugRemoteClientConnectorMessage>,
remote_recv: Option<Receiver<ButtplugRemoteClientConnectorMessage>>,
event_send: Option<Sender<ButtplugMessageUnion>>,
}
#[cfg(feature = "serialize_json")]
unsafe impl Send for ButtplugRemoteClientConnectorHelper {}
#[cfg(feature = "serialize_json")]
unsafe impl Sync for ButtplugRemoteClientConnectorHelper {}
#[cfg(feature = "serialize_json")]
impl ButtplugRemoteClientConnectorHelper {
pub fn new(event_sender: Sender<ButtplugMessageUnion>) -> Self {
let (internal_send, internal_recv) = channel(256);
let (remote_send, remote_recv) = channel(256);
Self {
event_send: Some(event_sender),
remote_send,
remote_recv: Some(remote_recv),
internal_send,
internal_recv: Some(internal_recv),
}
}
pub fn get_remote_send(&self) -> Sender<ButtplugRemoteClientConnectorMessage> {
self.remote_send.clone()
}
pub async fn send(
&mut self,
msg: &ButtplugMessageUnion,
state: &ButtplugClientMessageStateShared,
) {
self.internal_send.send((msg.clone(), state.clone())).await;
}
pub async fn close(&self) {
// Emulate a close from the connector side, which will cause us to
// close.
self.remote_send
.send(ButtplugRemoteClientConnectorMessage::Close(
"Client requested close.".to_owned(),
))
.await;
}
pub fn get_recv_future(&mut self) -> impl Future | loop {
// We use two Options instead of an enum because we may never
// get anything.
let mut stream_return: StreamValue = async {
match remote_recv.next().await {
Some(msg) => StreamValue::Incoming(msg),
None => StreamValue::NoValue,
}
}
.race(async {
match internal_recv.next().await {
Some(msg) => StreamValue::Outgoing(msg),
None => StreamValue::NoValue,
}
})
.await;
match stream_return {
StreamValue::NoValue => break,
StreamValue::Incoming(remote_msg) => {
match remote_msg {
ButtplugRemoteClientConnectorMessage::Sender(s) => {
remote_send = Some(s);
}
ButtplugRemoteClientConnectorMessage::Text(t) => {
let array: Vec<ButtplugMessageUnion> =
serde_json::from_str(&t.clone()).unwrap();
for smsg in array {
if !sorter.maybe_resolve_message(&smsg) {
info!("Sending event!");
// Send notification through event channel
event_send.send(smsg).await;
}
}
}
ButtplugRemoteClientConnectorMessage::ClientClose(s) => {
info!("Client closing connection {}", s);
if let Some(ref mut remote_sender) = remote_send {
remote_sender.close();
} else {
panic!("Can't send message yet!");
}
}
ButtplugRemoteClientConnectorMessage::Close(s) => {
info!("Connector closing connection {}", s);
break;
}
_ => {
panic!("UNHANDLED BRANCH");
}
}
}
StreamValue::Outgoing(ref mut buttplug_fut_msg) => {
// Create future sets our message ID, so make sure this
// happens | {
// Set up a way to get futures in and out of the sorter, which will live
// in our connector task.
let event_send = self.event_send.take().unwrap();
// Remove the receivers we need to move into the task.
let mut remote_recv = self.remote_recv.take().unwrap();
let mut internal_recv = self.internal_recv.take().unwrap();
async move {
let mut sorter = ClientConnectorMessageSorter::default();
// Our in-task remote sender, which is a wrapped version of whatever
// bus specific sender (websocket, tcp, etc) we'll be using.
let mut remote_send: Option<Box<dyn ButtplugRemoteClientConnectorSender>> = None;
enum StreamValue {
NoValue,
Incoming(ButtplugRemoteClientConnectorMessage),
Outgoing((ButtplugMessageUnion, ButtplugClientMessageStateShared)),
}
| identifier_body |
main.rs | distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate clap;
extern crate env_logger;
extern crate habitat_core as hcore;
extern crate habitat_common as common;
extern crate habitat_pkg_export_docker as export_docker;
extern crate handlebars;
extern crate rand;
#[macro_use]
extern crate serde_json;
#[macro_use]
extern crate log;
extern crate failure;
#[macro_use]
extern crate failure_derive;
use clap::{App, Arg};
use handlebars::Handlebars;
use std::env;
use std::result;
use std::str::FromStr;
use std::io::prelude::*;
use std::io;
use std::fs::File;
use std::path::Path;
use hcore::channel;
use hcore::PROGRAM_NAME;
use hcore::url as hurl;
use hcore::env as henv;
use hcore::package::{PackageArchive, PackageIdent};
use common::ui::{Coloring, UI, NOCOLORING_ENVVAR, NONINTERACTIVE_ENVVAR};
use rand::Rng;
use export_docker::{Cli, Credentials, BuildSpec, Naming, Result};
// Synced with the version of the Habitat operator.
pub const VERSION: &'static str = "0.1.0";
// Kubernetes manifest template
const MANIFESTFILE: &'static str = include_str!("../defaults/KubernetesManifest.hbs");
const BINDFILE: &'static str = include_str!("../defaults/KubernetesBind.hbs");
#[derive(Debug, Fail)]
enum Error {
#[fail(display = "Invalid bind specification '{}'", _0)]
InvalidBindSpec(String),
}
fn main() {
env_logger::init().unwrap();
let mut ui = get_ui();
if let Err(e) = start(&mut ui) {
let _ = ui.fatal(e);
std::process::exit(1)
}
}
fn get_ui() -> UI {
let isatty = if henv::var(NONINTERACTIVE_ENVVAR)
.map(|val| val == "true")
.unwrap_or(false)
{
Some(false)
} else {
None
};
let coloring = if henv::var(NOCOLORING_ENVVAR)
.map(|val| val == "true")
.unwrap_or(false)
{
Coloring::Never
} else {
Coloring::Auto
};
UI::default_with(coloring, isatty)
}
fn start(ui: &mut UI) -> Result<()> {
let m = cli().get_matches();
debug!("clap cli args: {:?}", m);
if !m.is_present("NO_DOCKER_IMAGE") {
gen_docker_img(ui, &m)?;
}
gen_k8s_manifest(ui, &m)
}
fn gen_docker_img(ui: &mut UI, matches: &clap::ArgMatches) -> Result<()> {
let default_channel = channel::default();
let default_url = hurl::default_bldr_url();
let spec = BuildSpec::new_from_cli_matches(&matches, &default_channel, &default_url);
let naming = Naming::new_from_cli_matches(&matches);
let docker_image = export_docker::export(ui, spec, &naming)?;
docker_image.create_report(
ui,
env::current_dir()?.join("results"),
)?;
if matches.is_present("PUSH_IMAGE") {
let credentials = Credentials::new(
naming.registry_type,
matches.value_of("REGISTRY_USERNAME").unwrap(),
matches.value_of("REGISTRY_PASSWORD").unwrap(),
)?;
docker_image.push(ui, &credentials, naming.registry_url)?;
}
if matches.is_present("RM_IMAGE") {
docker_image.rm(ui)?;
}
Ok(())
}
fn gen_k8s_manifest(_ui: &mut UI, matches: &clap::ArgMatches) -> Result<()> {
let count = matches.value_of("COUNT").unwrap_or("1");
let topology = matches.value_of("TOPOLOGY").unwrap_or("standalone");
let group = matches.value_of("GROUP");
let config_secret_name = matches.value_of("CONFIG_SECRET_NAME");
let ring_secret_name = matches.value_of("RING_SECRET_NAME");
// clap ensures that we do have the mandatory args so unwrap() is fine here
let pkg_ident_str = matches.value_of("PKG_IDENT_OR_ARTIFACT").unwrap();
let pkg_ident = if Path::new(pkg_ident_str).is_file() {
// We're going to use the `$pkg_origin/$pkg_name`, fuzzy form of a package
// identifier to ensure that update strategies will work if desired
PackageArchive::new(pkg_ident_str).ident()?
} else | ;
// To allow multiple instances of Habitat application in Kubernetes,
// random suffix in metadata_name is needed.
let metadata_name = format!(
"{}-{}{}",
pkg_ident.name,
rand::thread_rng()
.gen_ascii_chars()
.filter(|c| c.is_lowercase() || c.is_numeric())
.take(4)
.collect::<String>(),
rand::thread_rng()
.gen_ascii_chars()
.filter(|c| c.is_lowercase() && !c.is_numeric())
.take(1)
.collect::<String>()
);
let image = match matches.value_of("IMAGE_NAME") {
Some(i) => i.to_string(),
None => pkg_ident.origin + "/" + &pkg_ident.name,
};
let bind = matches.value_of("BIND");
let json = json!({
"metadata_name": metadata_name,
"habitat_name": pkg_ident.name,
"image": image,
"count": count,
"service_topology": topology,
"service_group": group,
"config_secret_name": config_secret_name,
"ring_secret_name": ring_secret_name,
"bind": bind,
});
let mut write: Box<Write> = match matches.value_of("OUTPUT") {
Some(o) if o != "-" => Box::new(File::create(o)?),
_ => Box::new(io::stdout()),
};
let r = Handlebars::new().template_render(MANIFESTFILE, &json)?;
let mut out = r.lines().filter(|l| *l != "").collect::<Vec<_>>().join(
"\n",
) + "\n";
if let Some(binds) = matches.values_of("BIND") {
for bind in binds {
let split: Vec<&str> = bind.split(":").collect();
if split.len() < 3 {
return Err(Error::InvalidBindSpec(bind.to_string()).into());
}
let json = json!({
"name": split[0],
"service": split[1],
"group": split[2],
});
out += &Handlebars::new().template_render(BINDFILE, &json)?;
}
}
write.write(out.as_bytes())?;
Ok(())
}
fn cli<'a, 'b>() -> App<'a, 'b> {
let name: &str = &*PROGRAM_NAME;
let about = "Creates a Docker image and Kubernetes manifest for a Habitat package. Habitat \
operator must be deployed within the Kubernetes cluster before the generated \
manifest can be applied to this cluster.";
let app = Cli::new(name, about)
.add_base_packages_args()
.add_builder_args()
.add_tagging_args()
.add_publishing_args()
.app;
app.arg(
Arg::with_name("OUTPUT")
.value_name("OUTPUT")
.long("output")
.short("o")
.help(
"Name of manifest file to create. Pass '-' for stdout (default: -)",
),
).arg(
Arg::with_name("COUNT")
.value_name("COUNT")
.long("count")
.validator(valid_natural_number)
.help("Count is the number of desired instances"),
)
.arg(
Arg::with_name("TOPOLOGY")
.value_name("TOPOLOGY")
.long("topology")
.short("t")
.possible_values(&["standalone", "leader"])
.help(
"A topology describes the intended relationship between peers \
within a Habitat service group. Specify either standalone or leader \
topology (default: standalone)",
),
)
.arg(
Arg::with_name("GROUP")
.value_name("GROUP")
.long("service-group")
.short("g")
.help(
"group is a logical grouping of services with the same package and \
topology type connected together in a ring (default: default)",
),
)
.arg(
Arg::with_name("CONFIG_SECRET_NAME")
.value_name("CONFIG_SECRET_NAME")
.long("config-secret-name")
.short("n")
.help(
"name of the Kubernetes Secret containing the config file - \
user.toml - that the user has previously created. Habitat will \
use it for initial configuration of the service",
),
)
.arg(
Arg::with_name("RING_SECRET_NAME")
.value_name("RING_SECRET_NAME")
.long("ring-secret-name")
.short("r")
.help(
"name of the Kubernetes Secret that contains the ring key, which \
encrypts the communication between Habitat supervisors",
),
)
.arg(
Arg::with_name("BIND")
.value_name("BIND")
| {
PackageIdent::from_str(pkg_ident_str)?
} | conditional_block |
main.rs | distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate clap;
extern crate env_logger;
extern crate habitat_core as hcore;
extern crate habitat_common as common;
extern crate habitat_pkg_export_docker as export_docker;
extern crate handlebars;
extern crate rand;
#[macro_use]
extern crate serde_json;
#[macro_use]
extern crate log;
extern crate failure;
#[macro_use]
extern crate failure_derive;
use clap::{App, Arg};
use handlebars::Handlebars;
use std::env;
use std::result;
use std::str::FromStr;
use std::io::prelude::*;
use std::io;
use std::fs::File;
use std::path::Path;
use hcore::channel;
use hcore::PROGRAM_NAME;
use hcore::url as hurl;
use hcore::env as henv;
use hcore::package::{PackageArchive, PackageIdent};
use common::ui::{Coloring, UI, NOCOLORING_ENVVAR, NONINTERACTIVE_ENVVAR};
use rand::Rng;
use export_docker::{Cli, Credentials, BuildSpec, Naming, Result};
// Synced with the version of the Habitat operator.
pub const VERSION: &'static str = "0.1.0";
// Kubernetes manifest template
const MANIFESTFILE: &'static str = include_str!("../defaults/KubernetesManifest.hbs");
const BINDFILE: &'static str = include_str!("../defaults/KubernetesBind.hbs");
#[derive(Debug, Fail)]
enum Error {
#[fail(display = "Invalid bind specification '{}'", _0)]
InvalidBindSpec(String),
}
fn main() {
env_logger::init().unwrap();
let mut ui = get_ui();
if let Err(e) = start(&mut ui) {
let _ = ui.fatal(e);
std::process::exit(1)
}
}
fn get_ui() -> UI {
let isatty = if henv::var(NONINTERACTIVE_ENVVAR)
.map(|val| val == "true")
.unwrap_or(false)
{
Some(false)
} else {
None
};
let coloring = if henv::var(NOCOLORING_ENVVAR)
.map(|val| val == "true")
.unwrap_or(false)
{
Coloring::Never
} else {
Coloring::Auto
};
UI::default_with(coloring, isatty)
}
fn | (ui: &mut UI) -> Result<()> {
let m = cli().get_matches();
debug!("clap cli args: {:?}", m);
if !m.is_present("NO_DOCKER_IMAGE") {
gen_docker_img(ui, &m)?;
}
gen_k8s_manifest(ui, &m)
}
fn gen_docker_img(ui: &mut UI, matches: &clap::ArgMatches) -> Result<()> {
let default_channel = channel::default();
let default_url = hurl::default_bldr_url();
let spec = BuildSpec::new_from_cli_matches(&matches, &default_channel, &default_url);
let naming = Naming::new_from_cli_matches(&matches);
let docker_image = export_docker::export(ui, spec, &naming)?;
docker_image.create_report(
ui,
env::current_dir()?.join("results"),
)?;
if matches.is_present("PUSH_IMAGE") {
let credentials = Credentials::new(
naming.registry_type,
matches.value_of("REGISTRY_USERNAME").unwrap(),
matches.value_of("REGISTRY_PASSWORD").unwrap(),
)?;
docker_image.push(ui, &credentials, naming.registry_url)?;
}
if matches.is_present("RM_IMAGE") {
docker_image.rm(ui)?;
}
Ok(())
}
fn gen_k8s_manifest(_ui: &mut UI, matches: &clap::ArgMatches) -> Result<()> {
let count = matches.value_of("COUNT").unwrap_or("1");
let topology = matches.value_of("TOPOLOGY").unwrap_or("standalone");
let group = matches.value_of("GROUP");
let config_secret_name = matches.value_of("CONFIG_SECRET_NAME");
let ring_secret_name = matches.value_of("RING_SECRET_NAME");
// clap ensures that we do have the mandatory args so unwrap() is fine here
let pkg_ident_str = matches.value_of("PKG_IDENT_OR_ARTIFACT").unwrap();
let pkg_ident = if Path::new(pkg_ident_str).is_file() {
// We're going to use the `$pkg_origin/$pkg_name`, fuzzy form of a package
// identifier to ensure that update strategies will work if desired
PackageArchive::new(pkg_ident_str).ident()?
} else {
PackageIdent::from_str(pkg_ident_str)?
};
// To allow multiple instances of Habitat application in Kubernetes,
// random suffix in metadata_name is needed.
let metadata_name = format!(
"{}-{}{}",
pkg_ident.name,
rand::thread_rng()
.gen_ascii_chars()
.filter(|c| c.is_lowercase() || c.is_numeric())
.take(4)
.collect::<String>(),
rand::thread_rng()
.gen_ascii_chars()
.filter(|c| c.is_lowercase() && !c.is_numeric())
.take(1)
.collect::<String>()
);
let image = match matches.value_of("IMAGE_NAME") {
Some(i) => i.to_string(),
None => pkg_ident.origin + "/" + &pkg_ident.name,
};
let bind = matches.value_of("BIND");
let json = json!({
"metadata_name": metadata_name,
"habitat_name": pkg_ident.name,
"image": image,
"count": count,
"service_topology": topology,
"service_group": group,
"config_secret_name": config_secret_name,
"ring_secret_name": ring_secret_name,
"bind": bind,
});
let mut write: Box<Write> = match matches.value_of("OUTPUT") {
Some(o) if o != "-" => Box::new(File::create(o)?),
_ => Box::new(io::stdout()),
};
let r = Handlebars::new().template_render(MANIFESTFILE, &json)?;
let mut out = r.lines().filter(|l| *l != "").collect::<Vec<_>>().join(
"\n",
) + "\n";
if let Some(binds) = matches.values_of("BIND") {
for bind in binds {
let split: Vec<&str> = bind.split(":").collect();
if split.len() < 3 {
return Err(Error::InvalidBindSpec(bind.to_string()).into());
}
let json = json!({
"name": split[0],
"service": split[1],
"group": split[2],
});
out += &Handlebars::new().template_render(BINDFILE, &json)?;
}
}
write.write(out.as_bytes())?;
Ok(())
}
fn cli<'a, 'b>() -> App<'a, 'b> {
let name: &str = &*PROGRAM_NAME;
let about = "Creates a Docker image and Kubernetes manifest for a Habitat package. Habitat \
operator must be deployed within the Kubernetes cluster before the generated \
manifest can be applied to this cluster.";
let app = Cli::new(name, about)
.add_base_packages_args()
.add_builder_args()
.add_tagging_args()
.add_publishing_args()
.app;
app.arg(
Arg::with_name("OUTPUT")
.value_name("OUTPUT")
.long("output")
.short("o")
.help(
"Name of manifest file to create. Pass '-' for stdout (default: -)",
),
).arg(
Arg::with_name("COUNT")
.value_name("COUNT")
.long("count")
.validator(valid_natural_number)
.help("Count is the number of desired instances"),
)
.arg(
Arg::with_name("TOPOLOGY")
.value_name("TOPOLOGY")
.long("topology")
.short("t")
.possible_values(&["standalone", "leader"])
.help(
"A topology describes the intended relationship between peers \
within a Habitat service group. Specify either standalone or leader \
topology (default: standalone)",
),
)
.arg(
Arg::with_name("GROUP")
.value_name("GROUP")
.long("service-group")
.short("g")
.help(
"group is a logical grouping of services with the same package and \
topology type connected together in a ring (default: default)",
),
)
.arg(
Arg::with_name("CONFIG_SECRET_NAME")
.value_name("CONFIG_SECRET_NAME")
.long("config-secret-name")
.short("n")
.help(
"name of the Kubernetes Secret containing the config file - \
user.toml - that the user has previously created. Habitat will \
use it for initial configuration of the service",
),
)
.arg(
Arg::with_name("RING_SECRET_NAME")
.value_name("RING_SECRET_NAME")
.long("ring-secret-name")
.short("r")
.help(
"name of the Kubernetes Secret that contains the ring key, which \
encrypts the communication between Habitat supervisors",
),
)
.arg(
Arg::with_name("BIND")
.value_name("BIND")
| start | identifier_name |
main.rs | IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate clap;
extern crate env_logger;
extern crate habitat_core as hcore;
extern crate habitat_common as common;
extern crate habitat_pkg_export_docker as export_docker;
extern crate handlebars;
extern crate rand;
#[macro_use]
extern crate serde_json;
#[macro_use]
extern crate log;
extern crate failure;
#[macro_use]
extern crate failure_derive;
use clap::{App, Arg};
use handlebars::Handlebars;
use std::env;
use std::result;
use std::str::FromStr;
use std::io::prelude::*;
use std::io;
use std::fs::File;
use std::path::Path;
use hcore::channel;
use hcore::PROGRAM_NAME;
use hcore::url as hurl;
use hcore::env as henv;
use hcore::package::{PackageArchive, PackageIdent};
use common::ui::{Coloring, UI, NOCOLORING_ENVVAR, NONINTERACTIVE_ENVVAR};
use rand::Rng;
use export_docker::{Cli, Credentials, BuildSpec, Naming, Result};
// Synced with the version of the Habitat operator.
pub const VERSION: &'static str = "0.1.0";
// Kubernetes manifest template
const MANIFESTFILE: &'static str = include_str!("../defaults/KubernetesManifest.hbs");
const BINDFILE: &'static str = include_str!("../defaults/KubernetesBind.hbs");
#[derive(Debug, Fail)]
enum Error {
#[fail(display = "Invalid bind specification '{}'", _0)]
InvalidBindSpec(String),
}
fn main() {
env_logger::init().unwrap();
let mut ui = get_ui();
if let Err(e) = start(&mut ui) {
let _ = ui.fatal(e);
std::process::exit(1)
}
}
fn get_ui() -> UI {
let isatty = if henv::var(NONINTERACTIVE_ENVVAR)
.map(|val| val == "true")
.unwrap_or(false)
{
Some(false)
} else {
None
};
let coloring = if henv::var(NOCOLORING_ENVVAR)
.map(|val| val == "true")
.unwrap_or(false)
{
Coloring::Never
} else {
Coloring::Auto
};
UI::default_with(coloring, isatty)
}
fn start(ui: &mut UI) -> Result<()> {
let m = cli().get_matches();
debug!("clap cli args: {:?}", m);
if !m.is_present("NO_DOCKER_IMAGE") {
gen_docker_img(ui, &m)?;
}
gen_k8s_manifest(ui, &m)
}
fn gen_docker_img(ui: &mut UI, matches: &clap::ArgMatches) -> Result<()> {
let default_channel = channel::default();
let default_url = hurl::default_bldr_url();
let spec = BuildSpec::new_from_cli_matches(&matches, &default_channel, &default_url);
let naming = Naming::new_from_cli_matches(&matches);
let docker_image = export_docker::export(ui, spec, &naming)?;
docker_image.create_report(
ui,
env::current_dir()?.join("results"),
)?;
if matches.is_present("PUSH_IMAGE") {
let credentials = Credentials::new(
naming.registry_type,
matches.value_of("REGISTRY_USERNAME").unwrap(),
matches.value_of("REGISTRY_PASSWORD").unwrap(),
)?;
docker_image.push(ui, &credentials, naming.registry_url)?;
}
if matches.is_present("RM_IMAGE") {
docker_image.rm(ui)?;
}
Ok(())
}
fn gen_k8s_manifest(_ui: &mut UI, matches: &clap::ArgMatches) -> Result<()> {
let count = matches.value_of("COUNT").unwrap_or("1");
let topology = matches.value_of("TOPOLOGY").unwrap_or("standalone");
let group = matches.value_of("GROUP");
let config_secret_name = matches.value_of("CONFIG_SECRET_NAME");
let ring_secret_name = matches.value_of("RING_SECRET_NAME");
// clap ensures that we do have the mandatory args so unwrap() is fine here
let pkg_ident_str = matches.value_of("PKG_IDENT_OR_ARTIFACT").unwrap();
let pkg_ident = if Path::new(pkg_ident_str).is_file() {
// We're going to use the `$pkg_origin/$pkg_name`, fuzzy form of a package
// identifier to ensure that update strategies will work if desired
PackageArchive::new(pkg_ident_str).ident()?
} else {
PackageIdent::from_str(pkg_ident_str)?
};
// To allow multiple instances of Habitat application in Kubernetes,
// random suffix in metadata_name is needed.
let metadata_name = format!(
"{}-{}{}",
pkg_ident.name,
rand::thread_rng()
.gen_ascii_chars()
.filter(|c| c.is_lowercase() || c.is_numeric())
.take(4)
.collect::<String>(),
rand::thread_rng()
.gen_ascii_chars()
.filter(|c| c.is_lowercase() && !c.is_numeric())
.take(1)
.collect::<String>()
);
let image = match matches.value_of("IMAGE_NAME") {
Some(i) => i.to_string(),
None => pkg_ident.origin + "/" + &pkg_ident.name,
};
let bind = matches.value_of("BIND");
let json = json!({
"metadata_name": metadata_name,
"habitat_name": pkg_ident.name,
"image": image,
"count": count,
"service_topology": topology,
"service_group": group,
"config_secret_name": config_secret_name,
"ring_secret_name": ring_secret_name,
"bind": bind,
});
let mut write: Box<Write> = match matches.value_of("OUTPUT") {
Some(o) if o != "-" => Box::new(File::create(o)?),
_ => Box::new(io::stdout()),
};
let r = Handlebars::new().template_render(MANIFESTFILE, &json)?;
let mut out = r.lines().filter(|l| *l != "").collect::<Vec<_>>().join(
"\n",
) + "\n";
if let Some(binds) = matches.values_of("BIND") {
for bind in binds {
let split: Vec<&str> = bind.split(":").collect();
if split.len() < 3 {
return Err(Error::InvalidBindSpec(bind.to_string()).into());
}
let json = json!({
"name": split[0],
"service": split[1],
"group": split[2],
});
out += &Handlebars::new().template_render(BINDFILE, &json)?;
}
}
write.write(out.as_bytes())?;
Ok(())
}
fn cli<'a, 'b>() -> App<'a, 'b> {
let name: &str = &*PROGRAM_NAME;
let about = "Creates a Docker image and Kubernetes manifest for a Habitat package. Habitat \
operator must be deployed within the Kubernetes cluster before the generated \
manifest can be applied to this cluster.";
let app = Cli::new(name, about)
.add_base_packages_args()
.add_builder_args()
.add_tagging_args()
.add_publishing_args()
.app;
app.arg(
Arg::with_name("OUTPUT")
.value_name("OUTPUT")
.long("output")
.short("o")
.help(
"Name of manifest file to create. Pass '-' for stdout (default: -)",
),
).arg(
Arg::with_name("COUNT")
.value_name("COUNT")
.long("count")
.validator(valid_natural_number)
.help("Count is the number of desired instances"),
)
.arg(
Arg::with_name("TOPOLOGY")
.value_name("TOPOLOGY")
.long("topology")
.short("t")
.possible_values(&["standalone", "leader"])
.help(
"A topology describes the intended relationship between peers \
within a Habitat service group. Specify either standalone or leader \
topology (default: standalone)",
),
)
.arg(
Arg::with_name("GROUP")
.value_name("GROUP")
.long("service-group")
.short("g")
.help(
"group is a logical grouping of services with the same package and \
topology type connected together in a ring (default: default)",
),
)
.arg(
Arg::with_name("CONFIG_SECRET_NAME")
.value_name("CONFIG_SECRET_NAME")
.long("config-secret-name")
.short("n")
.help(
"name of the Kubernetes Secret containing the config file - \
user.toml - that the user has previously created. Habitat will \
use it for initial configuration of the service",
),
)
.arg(
Arg::with_name("RING_SECRET_NAME")
.value_name("RING_SECRET_NAME")
.long("ring-secret-name")
.short("r")
.help(
"name of the Kubernetes Secret that contains the ring key, which \
encrypts the communication between Habitat supervisors",
),
)
.arg(
Arg::with_name("BIND")
.value_name("BIND")
.long("bind")
.short("b") | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.