file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
sandbox.ts | import invariant from 'tiny-invariant';
import { datasetParser, isElement, evaluate, createSequence } from 'yuzu-utils';
import { IComponentConstructable } from 'yuzu/types';
import { Component } from 'yuzu';
import { createContext, IContext } from './context';
export type entrySelectorFn = (sbx: Sandbox<any>) => boolean | HTMLElement[];
export type sandboxComponentOptions = [
IComponentConstructable<Component<any, any>>,
Record<string, any>,
];
export interface ISandboxRegistryEntry {
component: IComponentConstructable<Component<any, any>>;
selector: string | entrySelectorFn;
[key: string]: any;
}
export interface ISandboxOptions {
components?: (
| IComponentConstructable<Component<any, any>>
| sandboxComponentOptions
)[];
root: HTMLElement | string;
context: IContext<any>;
id: string;
}
const nextSbUid = createSequence();
const nextChildUid = createSequence();
/**
* A sandbox can be used to initialize a set of components based on an element's innerHTML.
*
* Lets say we have the following component:
*
* ```js
* class Counter extends Component {
* static root = '.Counter';
*
* // other stuff here ...
* }
* ```
*
* We can register the component inside a sandbox like this:
*
* ```js
* const sandbox = new Sandbox({
* components: [Counter],
* id: 'main', // optional
* });
*
* sandbox.mount('#main');
* ```
*
* In this way the sandbox will attach itself to the element matching `#main` and will traverse its children
* looking for every `.Counter` element attaching an instance of the Counter component onto it.
*
* To prevent a component for being initialized (for example when you want to initialize it at a later moment)
* just add a `data-skip` attribute to its root element.
*
* @class
* @param {object} config
* @param {Component[]|[Component, object][]} [config.components] Array of components constructor or array with [ComponentConstructor, options]
* @param {HTMLElement|string} [config.root=document.body] Root element of the sandbox. Either a DOM element or a CSS selector
* @param {string} [config.id] ID of the sandbox
* @property {string} $id Sandbox internal id
* @property {HTMLElement} $el Sandbox root DOM element
* @property {Context} $ctx Internal [context](/packages/yuzu-application/api/context). Used to share data across child instances
* @property {object[]} $registry Registered components storage
* @property {Map} $instances Running instances storage
* @returns {Sandbox}
*/
export class Sandbox<S = {}> extends Component<S, ISandboxOptions> {
public static SB_DATA_ATTR = 'data-yuzu-sb';
public defaultOptions(): ISandboxOptions {
return {
components: [],
context: createContext(),
id: '',
root: document.body,
};
}
public $id: string;
public $ctx?: IContext;
public $registry: ISandboxRegistryEntry[] = [];
public $instances = new Map<
string | entrySelectorFn,
Component<any, any>[]
>();
/**
* Creates a sandbox instance.
*
* @constructor
*/
public constructor(options: Partial<ISandboxOptions> = {}) {
super(options);
const { components = [], id } = this.options;
this.$id = id || nextSbUid('_sbx-');
components.forEach((config) => {
if (!Array.isArray(config)) {
if (config.root) {
this.register({ component: config, selector: config.root });
}
if (process.env.NODE_ENV !== 'production') {
!config.root &&
this.$warn(
`Skipping component ${config.displayName ||
config.name} because static "root" selector is missing`,
);
}
} else {
const [component, params = {}] = config;
const selector = component.root || params.selector;
if (selector) {
this.register({ component, selector, ...params });
}
if (process.env.NODE_ENV !== 'production') {
!selector &&
this.$warn(
`Skipping component ${component.displayName ||
component.name} because a static "root" selector is missing and no "selector" param is passed-in`,
);
}
}
});
return this;
}
/**
* ```js
* register(params)
* ```
*
* Registers a new component into the sandbox. The registered components
* will be traversed on `.mount()` initializing every matching component.
*
* @param {object} params Every property other than `component` and `selector` will be used as component option
* @param {Component} params.component Component constructor
* @param {string} params.selector Child component root CSS selector
* @example
* sandbox.register({
* component: Counter,
* selector: '.Counter',
* theme: 'dark' // <-- instance options
* });
*/
public register<C extends Component<any, any>>(params: {
component: IComponentConstructable<C>;
selector: string | entrySelectorFn;
[key: string]: any;
}): void {
invariant(
Component.isComponent(params.component),
'Missing or invalid `component` property',
);
invariant(
typeof params.selector === 'string' ||
typeof params.selector === 'function',
'Missing `selector` property',
);
this.$registry.push(params);
}
/**
* ```js
* start([data])
* ```
*
* **DEPRECATED!** Use `sandbox.mount(root)` instead.
*
* Starts the sandbox with an optional context.
*
* The store will be available inside each component at `this.$context`.
*
* @deprecated
* @param {object} [data] Optional context data object to be injected into the child components.
* @fires Sandbox#beforeStart
* @fires Sandbox#start Events dispatched after all components are initialized
* @returns {Sandbox}
* @example
* sandbox.start();
*
* // with context data
* sandbox.start({ globalTheme: 'dark' });
*/
public start(data = {}): this {
Object.defineProperty(this, '$legacyStart', { value: true });
if (process.env.NODE_ENV !== 'production') {
this.$warn(`Sandbox.start is deprecated. Use the "mount" method instead`);
}
this.mount(this.options.root);
this.setup();
this.$ctx && this.$ctx.update(data);
this.discover();
return this;
}
/**
* ```js
* mount([el], [state])
* ```
*
* Enhances `Component.mount()` by firing the child components discovery logic.
* By default will use `document.body` as mount element.
*
* @param {string|Element} el Component's root element
* @param {object|null} [state={}] Initial state
* @fires Sandbox#beforeStart
* @fires Sandbox#start Events dispatched after all components are initialized
* @returns {Sandbox}
*/
public mount(el: string | Element, state: Partial<S> | null = {}): this {
super.mount(el, state);
this.$el.setAttribute(Sandbox.SB_DATA_ATTR, '');
if (!this.hasOwnProperty('$legacyStart')) {
this.setup();
this.discover();
}
return this;
}
/**
* Setups the sandbox context passed in the options.
*
* @ignore
*/
public setup(): void {
this.$ctx = this.options.context;
this.$ctx.inject(this);
}
/**
* Initializes the sandbox child components.
*
* @ignore
* @returns {Promise}
*/
public discover(): Promise<void> {
invariant(isElement(this.$el), '"this.$el" is not a DOM element');
this.emit('beforeStart');
const sbSelector = `[${Sandbox.SB_DATA_ATTR}]`;
const ret = this.$registry.map(
async ({ component: ComponentConstructor, selector, ...options }) => {
if (this.$instances.has(selector)) {
this.$warn(
`Component ${ComponentConstructor} already initialized on ${selector}`,
);
return;
}
const targets = this.resolveSelector(selector);
let instances: Promise<Component<any, any>>[] | undefined;
if (targets === true) {
instances = [this.createInstance(ComponentConstructor, options)];
} else if (Array.isArray(targets)) {
const { $el } = this;
instances = targets
.filter((el) => {
return (
isElement(el) &&
!el.dataset.skip &&
!el.closest('[data-skip]') &&
el.closest(sbSelector) === $el
);
})
.map((el) => {
return this.createInstance(ComponentConstructor, options, el);
});
}
if (instances) {
this.$instances.set(selector, await Promise.all(instances));
}
return true;
},
);
return Promise.all(ret).then(() => {
this.emit('start');
});
}
/**
* Resolves a configured component selector to a list of DOM nodes or a boolean (for detached components)
*
* @ignore
* @param {string|function} selector Selector string or function.
* @returns {HTMLElement[]|boolean}
*/
public resolveSelector(
selector: string | entrySelectorFn,
): HTMLElement[] | boolean {
let targets = evaluate(selector, this);
if (typeof targets === 'string') {
targets = this.findNodes(targets) as HTMLElement[];
}
return targets;
}
/**
* Creates a component instance.
* Reads inline components from the passed-in root DOM element.
*
* @ignore
* @param {object} options instance options
* @param {HTMLElement} [el] Root element
* @returns {Component}
*/
public createInstance<C extends Component<any, any>>(
ComponentConstructor: IComponentConstructable<C>,
options: Record<string, any>,
el?: HTMLElement,
): Promise<C> {
const inlineOptions = el ? datasetParser(el) : {};
return this.setRef({
id: nextChildUid(this.$id + '-c.'),
...options,
...inlineOptions,
component: ComponentConstructor,
el,
});
}
/**
* ```js
* stop()
* ```
*
* **DEPRECATED!** Use `sandbox.destroy()` instead.
*
* Stops every running component, clears sandbox events and destroys the instance.
*
* @deprecated
* @fires Sandbox#beforeStop
* @fires Sandbox#stop
* @returns {Promise<void>}
* @example
* sandbox.stop();
*/
public async stop(): Promise<void> {
if (process.env.NODE_ENV !== 'production') {
this.$warn(
`Sandbox.stop is deprecated. Use the "destroy" method instead`,
);
}
return this.destroy();
}
/**
* ```js
* destroy()
* ```
*
* Enhances `Component.destroy()`.
* Stops every running component, clears sandbox events and destroys the instance.
*
* @deprecated
* @fires Sandbox#beforeStop
* @fires Sandbox#stop
* @returns {Promise<void>}
* @example
* sandbox.destroy();
*/
public async destroy(): Promise<void> |
/**
* Removes events and associated store
*
* @ignore
*/
public clear(): void {
this.$ctx = undefined; // release the context
this.off('beforeStart');
this.off('start');
this.off('error');
this.off('beforeStop');
this.off('stop');
}
}
| {
this.emit('beforeStop');
await this.beforeDestroy();
this.removeListeners();
try {
if (this.$el) {
this.$el.removeAttribute(Sandbox.SB_DATA_ATTR);
}
await this.destroyRefs();
this.$active = false;
} catch (e) {
this.emit('error', e);
return Promise.reject(e);
}
this.$instances.clear();
this.emit('stop');
this.clear();
return super.destroy();
} | identifier_body |
sandbox.ts | import invariant from 'tiny-invariant';
import { datasetParser, isElement, evaluate, createSequence } from 'yuzu-utils';
import { IComponentConstructable } from 'yuzu/types';
import { Component } from 'yuzu';
import { createContext, IContext } from './context';
export type entrySelectorFn = (sbx: Sandbox<any>) => boolean | HTMLElement[];
export type sandboxComponentOptions = [
IComponentConstructable<Component<any, any>>,
Record<string, any>,
];
export interface ISandboxRegistryEntry {
component: IComponentConstructable<Component<any, any>>;
selector: string | entrySelectorFn;
[key: string]: any;
}
export interface ISandboxOptions {
components?: (
| IComponentConstructable<Component<any, any>>
| sandboxComponentOptions
)[];
root: HTMLElement | string;
context: IContext<any>;
id: string;
}
const nextSbUid = createSequence();
const nextChildUid = createSequence();
/**
* A sandbox can be used to initialize a set of components based on an element's innerHTML.
*
* Lets say we have the following component:
*
* ```js
* class Counter extends Component {
* static root = '.Counter';
*
* // other stuff here ...
* }
* ```
*
* We can register the component inside a sandbox like this:
*
* ```js
* const sandbox = new Sandbox({
* components: [Counter],
* id: 'main', // optional
* });
*
* sandbox.mount('#main');
* ```
*
* In this way the sandbox will attach itself to the element matching `#main` and will traverse its children
* looking for every `.Counter` element attaching an instance of the Counter component onto it.
*
* To prevent a component for being initialized (for example when you want to initialize it at a later moment)
* just add a `data-skip` attribute to its root element.
*
* @class
* @param {object} config
* @param {Component[]|[Component, object][]} [config.components] Array of components constructor or array with [ComponentConstructor, options]
* @param {HTMLElement|string} [config.root=document.body] Root element of the sandbox. Either a DOM element or a CSS selector
* @param {string} [config.id] ID of the sandbox
* @property {string} $id Sandbox internal id
* @property {HTMLElement} $el Sandbox root DOM element
* @property {Context} $ctx Internal [context](/packages/yuzu-application/api/context). Used to share data across child instances
* @property {object[]} $registry Registered components storage
* @property {Map} $instances Running instances storage
* @returns {Sandbox}
*/
export class Sandbox<S = {}> extends Component<S, ISandboxOptions> {
public static SB_DATA_ATTR = 'data-yuzu-sb';
public defaultOptions(): ISandboxOptions {
return {
components: [],
context: createContext(),
id: '',
root: document.body,
};
}
public $id: string;
public $ctx?: IContext;
public $registry: ISandboxRegistryEntry[] = [];
public $instances = new Map<
string | entrySelectorFn,
Component<any, any>[]
>();
/**
* Creates a sandbox instance.
*
* @constructor
*/
public constructor(options: Partial<ISandboxOptions> = {}) {
super(options);
const { components = [], id } = this.options;
this.$id = id || nextSbUid('_sbx-');
components.forEach((config) => {
if (!Array.isArray(config)) {
if (config.root) {
this.register({ component: config, selector: config.root });
}
if (process.env.NODE_ENV !== 'production') {
!config.root &&
this.$warn(
`Skipping component ${config.displayName ||
config.name} because static "root" selector is missing`,
);
}
} else {
const [component, params = {}] = config;
const selector = component.root || params.selector;
if (selector) {
this.register({ component, selector, ...params });
}
if (process.env.NODE_ENV !== 'production') {
!selector &&
this.$warn(
`Skipping component ${component.displayName ||
component.name} because a static "root" selector is missing and no "selector" param is passed-in`,
);
}
}
});
return this;
}
/**
* ```js
* register(params)
* ```
*
* Registers a new component into the sandbox. The registered components
* will be traversed on `.mount()` initializing every matching component.
*
* @param {object} params Every property other than `component` and `selector` will be used as component option
* @param {Component} params.component Component constructor
* @param {string} params.selector Child component root CSS selector
* @example
* sandbox.register({
* component: Counter,
* selector: '.Counter',
* theme: 'dark' // <-- instance options
* });
*/
public register<C extends Component<any, any>>(params: {
component: IComponentConstructable<C>;
selector: string | entrySelectorFn;
[key: string]: any;
}): void {
invariant(
Component.isComponent(params.component),
'Missing or invalid `component` property',
);
invariant(
typeof params.selector === 'string' ||
typeof params.selector === 'function',
'Missing `selector` property',
);
this.$registry.push(params);
}
/**
* ```js
* start([data])
* ```
*
* **DEPRECATED!** Use `sandbox.mount(root)` instead.
*
* Starts the sandbox with an optional context.
*
* The store will be available inside each component at `this.$context`.
*
* @deprecated
* @param {object} [data] Optional context data object to be injected into the child components.
* @fires Sandbox#beforeStart
* @fires Sandbox#start Events dispatched after all components are initialized
* @returns {Sandbox}
* @example
* sandbox.start();
*
* // with context data
* sandbox.start({ globalTheme: 'dark' });
*/
public start(data = {}): this {
Object.defineProperty(this, '$legacyStart', { value: true });
if (process.env.NODE_ENV !== 'production') {
this.$warn(`Sandbox.start is deprecated. Use the "mount" method instead`);
}
this.mount(this.options.root);
this.setup();
this.$ctx && this.$ctx.update(data);
this.discover();
return this;
}
/**
* ```js
* mount([el], [state])
* ```
*
* Enhances `Component.mount()` by firing the child components discovery logic.
* By default will use `document.body` as mount element.
*
* @param {string|Element} el Component's root element
* @param {object|null} [state={}] Initial state
* @fires Sandbox#beforeStart
* @fires Sandbox#start Events dispatched after all components are initialized
* @returns {Sandbox}
*/
public mount(el: string | Element, state: Partial<S> | null = {}): this {
super.mount(el, state);
this.$el.setAttribute(Sandbox.SB_DATA_ATTR, '');
if (!this.hasOwnProperty('$legacyStart')) {
this.setup();
this.discover();
}
return this;
}
/**
* Setups the sandbox context passed in the options.
*
* @ignore
*/
public setup(): void {
this.$ctx = this.options.context;
this.$ctx.inject(this);
}
/**
* Initializes the sandbox child components.
*
* @ignore
* @returns {Promise}
*/
public discover(): Promise<void> {
invariant(isElement(this.$el), '"this.$el" is not a DOM element');
this.emit('beforeStart');
const sbSelector = `[${Sandbox.SB_DATA_ATTR}]`;
const ret = this.$registry.map(
async ({ component: ComponentConstructor, selector, ...options }) => {
if (this.$instances.has(selector)) {
this.$warn(
`Component ${ComponentConstructor} already initialized on ${selector}`,
);
return;
}
const targets = this.resolveSelector(selector);
let instances: Promise<Component<any, any>>[] | undefined;
if (targets === true) {
instances = [this.createInstance(ComponentConstructor, options)];
} else if (Array.isArray(targets)) {
const { $el } = this;
instances = targets
.filter((el) => {
return (
isElement(el) &&
!el.dataset.skip &&
!el.closest('[data-skip]') &&
el.closest(sbSelector) === $el
);
})
.map((el) => {
return this.createInstance(ComponentConstructor, options, el);
});
}
if (instances) {
this.$instances.set(selector, await Promise.all(instances));
}
return true;
},
);
return Promise.all(ret).then(() => {
this.emit('start');
});
}
/**
* Resolves a configured component selector to a list of DOM nodes or a boolean (for detached components)
*
* @ignore
* @param {string|function} selector Selector string or function.
* @returns {HTMLElement[]|boolean}
*/
public | (
selector: string | entrySelectorFn,
): HTMLElement[] | boolean {
let targets = evaluate(selector, this);
if (typeof targets === 'string') {
targets = this.findNodes(targets) as HTMLElement[];
}
return targets;
}
/**
* Creates a component instance.
* Reads inline components from the passed-in root DOM element.
*
* @ignore
* @param {object} options instance options
* @param {HTMLElement} [el] Root element
* @returns {Component}
*/
public createInstance<C extends Component<any, any>>(
ComponentConstructor: IComponentConstructable<C>,
options: Record<string, any>,
el?: HTMLElement,
): Promise<C> {
const inlineOptions = el ? datasetParser(el) : {};
return this.setRef({
id: nextChildUid(this.$id + '-c.'),
...options,
...inlineOptions,
component: ComponentConstructor,
el,
});
}
/**
* ```js
* stop()
* ```
*
* **DEPRECATED!** Use `sandbox.destroy()` instead.
*
* Stops every running component, clears sandbox events and destroys the instance.
*
* @deprecated
* @fires Sandbox#beforeStop
* @fires Sandbox#stop
* @returns {Promise<void>}
* @example
* sandbox.stop();
*/
public async stop(): Promise<void> {
if (process.env.NODE_ENV !== 'production') {
this.$warn(
`Sandbox.stop is deprecated. Use the "destroy" method instead`,
);
}
return this.destroy();
}
/**
* ```js
* destroy()
* ```
*
* Enhances `Component.destroy()`.
* Stops every running component, clears sandbox events and destroys the instance.
*
* @deprecated
* @fires Sandbox#beforeStop
* @fires Sandbox#stop
* @returns {Promise<void>}
* @example
* sandbox.destroy();
*/
public async destroy(): Promise<void> {
this.emit('beforeStop');
await this.beforeDestroy();
this.removeListeners();
try {
if (this.$el) {
this.$el.removeAttribute(Sandbox.SB_DATA_ATTR);
}
await this.destroyRefs();
this.$active = false;
} catch (e) {
this.emit('error', e);
return Promise.reject(e);
}
this.$instances.clear();
this.emit('stop');
this.clear();
return super.destroy();
}
/**
* Removes events and associated store
*
* @ignore
*/
public clear(): void {
this.$ctx = undefined; // release the context
this.off('beforeStart');
this.off('start');
this.off('error');
this.off('beforeStop');
this.off('stop');
}
}
| resolveSelector | identifier_name |
HexSnake.py | from time import sleep
from random import random as rnd
from numpy import zeros
import pygame
from pygame.locals import *
further = lambda x, y, d: {
0: (x ,y-1),
1: (x-1,y-1),
2: (x-1 ,y),
3: (x,y+1),
4: (x+1 ,y+1),
5: (x+1 ,y)
}[d]
def dir(x,y,i,j):
d=3 if abs(x-i)>1 or abs(y-j)>1 else 0
v=(x-i)/abs(x-i) if x!=i else 0
h=(y-j)/abs(y-j) if y!=j else 0
return (d+{
(0,-1):0,
(-1,-1):1,
(-1,0):2,
(0,1):3,
(1,1):4,
(1,0):5
}[(v,h)])%6
if True: # objects
HEAD=-3
BODY=-2
TAIL=-1
APPLE=1
BORDER=-10
KILLER=-20
EXTRAAPPLE=11
EXTRABORDER=12
LENGTHEN=13
SHORTEN=14
REVERSE=15
EXTRAKILLER=16
EXTRASCORE=17
BONUS=10
EMPTY=0
if True: # colors
colorSnake=(2,250,200)
colorGrass=(0,100,20)
colorBorder=(250,120,0)
colorApple=(250,0,0)
colorMouth=(250,200,200)
colorBonus=(40,0,240)
colorText=(240,240,10)
if True:
s=int(raw_input("What is field size? (more than 2) : "))
loop={'y':False,'n':True}[raw_input("Do borders kill? ('y' or 'n') : ")[0]] # read params
difficulty=int(raw_input("""
0 - "peaceful" : bonuses only make better
1 - "easy" : choose to eat a bonus would be well more often
2 - "medium" : bonuces are good or evil fifty fifty
3 - "hard" : basically, eat a bonus is of no benefit
4 - "insane" : the game is droven crazy
How difficult will the game be? (a digit) : """))
bonus_frequency=3
bonuses={ 0 : [EXTRAAPPLE,SHORTEN,REVERSE,EXTRASCORE],
1: [EXTRAAPPLE,SHORTEN,REVERSE,EXTRASCORE,EXTRABORDER,LENGTHEN,SHORTEN,EXTRAAPPLE],
2: [EXTRAAPPLE,SHORTEN,REVERSE,EXTRASCORE,EXTRABORDER,LENGTHEN,EXTRAKILLER],
3: [EXTRAAPPLE,SHORTEN,REVERSE,EXTRABORDER,LENGTHEN,EXTRAKILLER],
4: [EXTRAAPPLE,REVERSE,REVERSE,EXTRABORDER,LENGTHEN,EXTRAKILLER],
}[difficulty]
ZEROSPEED=35
speedlevel=s#15# float(raw_input("How fast does the snake crawl? \n"))
lose=False
pause = False
if True: # pygame init
pygame.init()
display_width = 800
display_height = int(display_width*0.85)
screen = pygame.display.set_mode((display_width, display_height))
pygame.display.set_caption('HexoSnake')
fontPause = pygame.font.SysFont("monospace", 40)
fontRules = pygame.font.SysFont("monospace", 13)
textGO = fontPause.render("GAME OVER!", 1, colorText)
textPause = fontPause.render("Paused...", 1, colorText)
textScore = fontRules.render("Score : ",1,colorText)
textSpeed = fontRules.render("Speed : ",1,colorText)
Rules = ["Controls:",
"q / e : left/right ",
"a / d : left/right x2 ",
"w / s : speed up/down ",
"p : pause",
"esc : quit"]
textRules=[fontRules.render(rule, 1, colorText)for rule in Rules]
class Snake:
field_size=1
field_dimension=1
field = zeros((1,1))
display_width_step=display_width
display_height_step=display_height
def __init__(self,x,y,sc,d):
self.score=sc
self.body=[(x,y)]*sc
self.cdir=d
def initfield(self,s):
n=2*s-1
self.field_size=s
self.field_dimension=n
self.field = zeros((n,n))
self.display_width_step=display_width/n
self.display_height_step=display_height/n
def out(self,x,y):
s=self.field_size
n=self.field_dimension
if x<0 or y<0: return True
if x>=n or y>=n: return True
if x>=s and y<=x-s : return True
if y>=s and x<=y-s : return True
if self.field[x][y]==BORDER : return True
return False
def setobj(self,a,t):
while a>0:
i=int(self.field_dimension*rnd())
j=int(self.field_dimension*rnd())
if self.field[i][j]==EMPTY and not self.out(i,j):
a=a-1
self.field[i][j]=t
def display_crds(self,x,y):
n=self.field_dimension
return (1+display_width/(4*n)+display_width/4
+x*display_width/(n)-display_width*y/2/(n),
1+display_height/(2*n)+y*display_height/(n))
def drawsymbol(self,i,j):
l=self.field[i][j]
crds=self.display_crds(i,j)
dw=self.display_width_step
dh=self.display_height_step
if l==TAIL:
pygame.draw.circle(screen, colorSnake, crds, dh/2, 0)
if l==BODY:
pygame.draw.circle(screen, colorSnake, crds, dw/2, 0)
if l==HEAD:
x,y=further(i,j,self.cdir)
x,y=self.display_crds(x,y)
mcrds=((x+2*crds[0])/3,(y+2*crds[1])/3)
pygame.draw.circle(screen, colorSnake, crds, dw/2, 0)
pygame.draw.circle(screen, colorMouth, mcrds, dw/6, 0)
if l==APPLE :
pygame.draw.circle(screen, colorApple, crds, dh/2, 0)
if l==EMPTY:
return
if l==KILLER:
cx,cy=crds
pygame.draw.line(screen,colorBorder,(cx-dh/2,cy),(cx+dh/2,cy),2)
pygame.draw.line(screen,colorBorder,(cx,cy-dh/2),(cx,cy+dh/2),2)
pygame.draw.line(screen,colorBorder,(cx-dh/3,cy-dh/3),(cx+dh/3,cy+dh/3),2)
pygame.draw.line(screen,colorBorder,(cx+dh/3,cy-dh/3),(cx-dh/3,cy+dh/3),2)
if l==BORDER:
pygame.draw.circle(screen, colorBorder, crds, dh/2, 0)
if l==EXTRABORDER or l==EXTRAAPPLE :
cx,cy=crds
pygame.draw.line(screen,colorBonus,(cx-dh/2,cy),(cx+dh/2,cy),2)
pygame.draw.line(screen,colorBonus,(cx,cy-dh/2),(cx,cy+dh/2),2)
if l==SHORTEN or l==LENGTHEN :
cx,cy=crds
pygame.draw.line(screen,colorBonus,(cx-dh/2,cy),(cx+dh/2,cy),2)
pygame.draw.line(screen,colorBonus,(cx,cy-dh/2),(cx,cy+dh/2),2)
pygame.draw.line(screen,colorBonus,(cx-dh/3,cy-dh/3),(cx+dh/3,cy+dh/3),2)
pygame.draw.line(screen,colorBonus,(cx+dh/3,cy-dh/3),(cx-dh/3,cy+dh/3),2)
if l==REVERSE or l==EXTRAKILLER :
cx,cy=crds
pygame.draw.line(screen,colorBonus,(cx-dh/3,cy-dh/3),(cx+dh/3,cy+dh/3),2)
pygame.draw.line(screen,colorBonus,(cx+dh/3,cy-dh/3),(cx-dh/3,cy+dh/3),2)
if l==BONUS or l==EXTRASCORE:
pygame.draw.circle(screen, colorBonus, crds, dh/2, 0)
return
def drawfield(self):
n=self.field_dimension
s=self.field_size
ds=self.drawsymbol
screen.fill((0,0,0))
pygame.draw.polygon(screen, colorGrass, [
(0,display_height/2),
(display_width/4.,0),
(display_width*3./4.,0),
(display_width,display_height/2),
(display_width*3./4.,display_height),
(display_width/4.,display_height)], 0)
for j in range(n): ds(s-1,j)
for i in range(s-1):
for j in range(s+i): ds(i,j)
for j in range(i+1,n): ds(i+s,j)
def next(self,x,y,d):
|
def crawl(self):
f=self.field
x,y=self.body[-1]
if f[x][y]==BODY: f[x][y],t=TAIL,[(x,y)]
else : f[x][y],t=EMPTY,[]
x,y=self.body[0]
if f[x][y]!=BODY:
f[x][y]=TAIL
x,y=self.next(x,y,self.cdir)
self.body=[(x,y)]+self.body[:-1]+t
return self.eat(x,y)
def eat(self,x,y):
a=self.field[x][y]
if a in [BODY,HEAD,TAIL,KILLER,BORDER] :
# snake=[]
self.field[x][y]=EMPTY
return True
else :
self.field[x][y]=-3
if a == APPLE :
self.field[x][y]=-2
self.score+=1
self.setobj(1,APPLE)
if self.score%bonus_frequency==0 : self.setobj(1,BONUS) # balance?
if difficulty==4:
if rnd()<.5: self.setobj(2,BORDER)
if rnd()<.5: self.setobj(2,KILLER)
if rnd()<.2: self.setobj(1,SHORTEN)
if rnd()<.2: self.setobj(1,LENGTHEN)
if a == BONUS :
a=bonuses[int(len(bonuses)*rnd())]
if difficulty==4:
if rnd()<.4: self.setobj(1,REVERSE)
if rnd()<.3: self.setobj(3,EXTRASCORE)
if a == EXTRAAPPLE :
self.setobj(1,APPLE)
if a == EXTRABORDER :
self.setobj(1,BORDER if loop else KILLER)
if a == EXTRASCORE :
self.score+=bonus_frequency
if a == EXTRAKILLER :
self.setobj(1,KILLER)
if a == SHORTEN :
for c in self.body[len(self.body)/2+1:]:
self.field[c[0]][c[1]]=EMPTY
self.body=self.body[:len(self.body)/2+1]
if a == LENGTHEN :
for c in self.body[1:]:
self.field[c[0]][c[1]]=BODY
if a == REVERSE :
self.field[x][y]=-1
self.body=self.body[::-1]
x,y=self.body[0]
i,j=self.body[1]
self.cdir=dir(x,y,i,j)
self.field[x][y]=-3
return False
def drawtext():
for i in range(len(Rules)):
screen.blit(textRules[i], (0, 15*i))
screen.blit(textScore, (display_width-100, 5))
screen.blit(fontPause.render(str(snake.score),1,colorText),(display_width-90, 20))
screen.blit(textSpeed, (display_width-100, display_height-65))
screen.blit(fontPause.render(str(speedlevel),1,colorText),(display_width-90, display_height-50))
if pause:
screen.blit(textPause, (display_width/2-100,display_height/2))
if lose:
screen.blit(textGO, (display_width/2-100,display_height/2-40))
snake = Snake(s,s,2,1)
snake.initfield(s)
snake.crawl()
snake.setobj(1,APPLE)
while True: # Game loop
if not pause and not lose: lose = snake.crawl()
snake.drawfield()
drawtext()
pygame.display.update()
pygame.time.wait(int(1.25**(ZEROSPEED-speedlevel)))
for event in pygame.event.get():
if event.type == QUIT :
pygame.quit()
quit()
if event.type == KEYDOWN:
if event.key == K_ESCAPE :
pygame.quit()
quit()
if event.key==K_p :
pause= not pause
if event.key==K_r :
lose= not lose
if event.key in [K_q,K_a,K_e,K_d]:
snake.cdir-={K_q:-1,K_a:-2,K_e:1,K_d:2}[event.key]
snake.cdir%=6
if event.key in [K_w,K_s]:
speedlevel +={K_w:1,K_s:-1}[event.key]
if speedlevel<1:
speedlevel=1
pause= not pause
| rx, ry= further(x,y,d)
if self.out(rx,ry):
while (not self.out(x,y) ) :
x,y= further(x,y,(d+3)%6)
rx, ry= further(x,y,d)
if not loop:
self.field[rx][ry]=KILLER
return rx,ry | identifier_body |
HexSnake.py | from time import sleep
from random import random as rnd
from numpy import zeros
import pygame
from pygame.locals import *
further = lambda x, y, d: {
0: (x ,y-1),
1: (x-1,y-1),
2: (x-1 ,y),
3: (x,y+1),
4: (x+1 ,y+1),
5: (x+1 ,y)
}[d]
def dir(x,y,i,j):
d=3 if abs(x-i)>1 or abs(y-j)>1 else 0
v=(x-i)/abs(x-i) if x!=i else 0
h=(y-j)/abs(y-j) if y!=j else 0
return (d+{
(0,-1):0,
(-1,-1):1,
(-1,0):2,
(0,1):3,
(1,1):4,
(1,0):5
}[(v,h)])%6
if True: # objects
HEAD=-3
BODY=-2
TAIL=-1
APPLE=1
BORDER=-10
KILLER=-20
EXTRAAPPLE=11
EXTRABORDER=12
LENGTHEN=13
SHORTEN=14
REVERSE=15
EXTRAKILLER=16
EXTRASCORE=17
BONUS=10
EMPTY=0
if True: # colors
colorSnake=(2,250,200)
colorGrass=(0,100,20)
colorBorder=(250,120,0)
colorApple=(250,0,0)
colorMouth=(250,200,200)
colorBonus=(40,0,240)
colorText=(240,240,10)
if True:
s=int(raw_input("What is field size? (more than 2) : "))
loop={'y':False,'n':True}[raw_input("Do borders kill? ('y' or 'n') : ")[0]] # read params
difficulty=int(raw_input("""
0 - "peaceful" : bonuses only make better
1 - "easy" : choose to eat a bonus would be well more often
2 - "medium" : bonuces are good or evil fifty fifty
3 - "hard" : basically, eat a bonus is of no benefit
4 - "insane" : the game is droven crazy
How difficult will the game be? (a digit) : """))
bonus_frequency=3
bonuses={ 0 : [EXTRAAPPLE,SHORTEN,REVERSE,EXTRASCORE],
1: [EXTRAAPPLE,SHORTEN,REVERSE,EXTRASCORE,EXTRABORDER,LENGTHEN,SHORTEN,EXTRAAPPLE],
2: [EXTRAAPPLE,SHORTEN,REVERSE,EXTRASCORE,EXTRABORDER,LENGTHEN,EXTRAKILLER],
3: [EXTRAAPPLE,SHORTEN,REVERSE,EXTRABORDER,LENGTHEN,EXTRAKILLER],
4: [EXTRAAPPLE,REVERSE,REVERSE,EXTRABORDER,LENGTHEN,EXTRAKILLER],
}[difficulty]
ZEROSPEED=35
speedlevel=s#15# float(raw_input("How fast does the snake crawl? \n"))
lose=False
pause = False
if True: # pygame init
pygame.init()
display_width = 800
display_height = int(display_width*0.85)
screen = pygame.display.set_mode((display_width, display_height))
pygame.display.set_caption('HexoSnake')
fontPause = pygame.font.SysFont("monospace", 40)
fontRules = pygame.font.SysFont("monospace", 13)
textGO = fontPause.render("GAME OVER!", 1, colorText)
textPause = fontPause.render("Paused...", 1, colorText)
textScore = fontRules.render("Score : ",1,colorText)
textSpeed = fontRules.render("Speed : ",1,colorText)
Rules = ["Controls:",
"q / e : left/right ",
"a / d : left/right x2 ",
"w / s : speed up/down ",
"p : pause",
"esc : quit"]
textRules=[fontRules.render(rule, 1, colorText)for rule in Rules]
class Snake:
field_size=1
field_dimension=1
field = zeros((1,1))
display_width_step=display_width
display_height_step=display_height
def __init__(self,x,y,sc,d):
self.score=sc
self.body=[(x,y)]*sc
self.cdir=d
def initfield(self,s):
n=2*s-1
self.field_size=s
self.field_dimension=n
self.field = zeros((n,n))
self.display_width_step=display_width/n
self.display_height_step=display_height/n
def out(self,x,y):
s=self.field_size
n=self.field_dimension
if x<0 or y<0: return True
if x>=n or y>=n: return True
if x>=s and y<=x-s : return True
if y>=s and x<=y-s : return True
if self.field[x][y]==BORDER : return True
return False
def setobj(self,a,t):
while a>0:
i=int(self.field_dimension*rnd())
j=int(self.field_dimension*rnd())
if self.field[i][j]==EMPTY and not self.out(i,j):
a=a-1
self.field[i][j]=t
def display_crds(self,x,y):
n=self.field_dimension
return (1+display_width/(4*n)+display_width/4
+x*display_width/(n)-display_width*y/2/(n),
1+display_height/(2*n)+y*display_height/(n))
def drawsymbol(self,i,j):
l=self.field[i][j]
crds=self.display_crds(i,j)
dw=self.display_width_step
dh=self.display_height_step
if l==TAIL:
pygame.draw.circle(screen, colorSnake, crds, dh/2, 0)
if l==BODY:
pygame.draw.circle(screen, colorSnake, crds, dw/2, 0)
if l==HEAD:
x,y=further(i,j,self.cdir)
x,y=self.display_crds(x,y)
mcrds=((x+2*crds[0])/3,(y+2*crds[1])/3)
pygame.draw.circle(screen, colorSnake, crds, dw/2, 0)
pygame.draw.circle(screen, colorMouth, mcrds, dw/6, 0)
if l==APPLE :
pygame.draw.circle(screen, colorApple, crds, dh/2, 0)
if l==EMPTY:
return
if l==KILLER:
cx,cy=crds
pygame.draw.line(screen,colorBorder,(cx-dh/2,cy),(cx+dh/2,cy),2)
pygame.draw.line(screen,colorBorder,(cx,cy-dh/2),(cx,cy+dh/2),2)
pygame.draw.line(screen,colorBorder,(cx-dh/3,cy-dh/3),(cx+dh/3,cy+dh/3),2)
pygame.draw.line(screen,colorBorder,(cx+dh/3,cy-dh/3),(cx-dh/3,cy+dh/3),2)
if l==BORDER:
pygame.draw.circle(screen, colorBorder, crds, dh/2, 0)
if l==EXTRABORDER or l==EXTRAAPPLE :
cx,cy=crds
pygame.draw.line(screen,colorBonus,(cx-dh/2,cy),(cx+dh/2,cy),2)
pygame.draw.line(screen,colorBonus,(cx,cy-dh/2),(cx,cy+dh/2),2)
if l==SHORTEN or l==LENGTHEN :
cx,cy=crds
pygame.draw.line(screen,colorBonus,(cx-dh/2,cy),(cx+dh/2,cy),2)
pygame.draw.line(screen,colorBonus,(cx,cy-dh/2),(cx,cy+dh/2),2)
pygame.draw.line(screen,colorBonus,(cx-dh/3,cy-dh/3),(cx+dh/3,cy+dh/3),2)
pygame.draw.line(screen,colorBonus,(cx+dh/3,cy-dh/3),(cx-dh/3,cy+dh/3),2)
if l==REVERSE or l==EXTRAKILLER :
|
if l==BONUS or l==EXTRASCORE:
pygame.draw.circle(screen, colorBonus, crds, dh/2, 0)
return
def drawfield(self):
n=self.field_dimension
s=self.field_size
ds=self.drawsymbol
screen.fill((0,0,0))
pygame.draw.polygon(screen, colorGrass, [
(0,display_height/2),
(display_width/4.,0),
(display_width*3./4.,0),
(display_width,display_height/2),
(display_width*3./4.,display_height),
(display_width/4.,display_height)], 0)
for j in range(n): ds(s-1,j)
for i in range(s-1):
for j in range(s+i): ds(i,j)
for j in range(i+1,n): ds(i+s,j)
def next(self,x,y,d):
rx, ry= further(x,y,d)
if self.out(rx,ry):
while (not self.out(x,y) ) :
x,y= further(x,y,(d+3)%6)
rx, ry= further(x,y,d)
if not loop:
self.field[rx][ry]=KILLER
return rx,ry
def crawl(self):
f=self.field
x,y=self.body[-1]
if f[x][y]==BODY: f[x][y],t=TAIL,[(x,y)]
else : f[x][y],t=EMPTY,[]
x,y=self.body[0]
if f[x][y]!=BODY:
f[x][y]=TAIL
x,y=self.next(x,y,self.cdir)
self.body=[(x,y)]+self.body[:-1]+t
return self.eat(x,y)
def eat(self,x,y):
a=self.field[x][y]
if a in [BODY,HEAD,TAIL,KILLER,BORDER] :
# snake=[]
self.field[x][y]=EMPTY
return True
else :
self.field[x][y]=-3
if a == APPLE :
self.field[x][y]=-2
self.score+=1
self.setobj(1,APPLE)
if self.score%bonus_frequency==0 : self.setobj(1,BONUS) # balance?
if difficulty==4:
if rnd()<.5: self.setobj(2,BORDER)
if rnd()<.5: self.setobj(2,KILLER)
if rnd()<.2: self.setobj(1,SHORTEN)
if rnd()<.2: self.setobj(1,LENGTHEN)
if a == BONUS :
a=bonuses[int(len(bonuses)*rnd())]
if difficulty==4:
if rnd()<.4: self.setobj(1,REVERSE)
if rnd()<.3: self.setobj(3,EXTRASCORE)
if a == EXTRAAPPLE :
self.setobj(1,APPLE)
if a == EXTRABORDER :
self.setobj(1,BORDER if loop else KILLER)
if a == EXTRASCORE :
self.score+=bonus_frequency
if a == EXTRAKILLER :
self.setobj(1,KILLER)
if a == SHORTEN :
for c in self.body[len(self.body)/2+1:]:
self.field[c[0]][c[1]]=EMPTY
self.body=self.body[:len(self.body)/2+1]
if a == LENGTHEN :
for c in self.body[1:]:
self.field[c[0]][c[1]]=BODY
if a == REVERSE :
self.field[x][y]=-1
self.body=self.body[::-1]
x,y=self.body[0]
i,j=self.body[1]
self.cdir=dir(x,y,i,j)
self.field[x][y]=-3
return False
def drawtext():
for i in range(len(Rules)):
screen.blit(textRules[i], (0, 15*i))
screen.blit(textScore, (display_width-100, 5))
screen.blit(fontPause.render(str(snake.score),1,colorText),(display_width-90, 20))
screen.blit(textSpeed, (display_width-100, display_height-65))
screen.blit(fontPause.render(str(speedlevel),1,colorText),(display_width-90, display_height-50))
if pause:
screen.blit(textPause, (display_width/2-100,display_height/2))
if lose:
screen.blit(textGO, (display_width/2-100,display_height/2-40))
snake = Snake(s,s,2,1)
snake.initfield(s)
snake.crawl()
snake.setobj(1,APPLE)
while True: # Game loop
if not pause and not lose: lose = snake.crawl()
snake.drawfield()
drawtext()
pygame.display.update()
pygame.time.wait(int(1.25**(ZEROSPEED-speedlevel)))
for event in pygame.event.get():
if event.type == QUIT :
pygame.quit()
quit()
if event.type == KEYDOWN:
if event.key == K_ESCAPE :
pygame.quit()
quit()
if event.key==K_p :
pause= not pause
if event.key==K_r :
lose= not lose
if event.key in [K_q,K_a,K_e,K_d]:
snake.cdir-={K_q:-1,K_a:-2,K_e:1,K_d:2}[event.key]
snake.cdir%=6
if event.key in [K_w,K_s]:
speedlevel +={K_w:1,K_s:-1}[event.key]
if speedlevel<1:
speedlevel=1
pause= not pause
| cx,cy=crds
pygame.draw.line(screen,colorBonus,(cx-dh/3,cy-dh/3),(cx+dh/3,cy+dh/3),2)
pygame.draw.line(screen,colorBonus,(cx+dh/3,cy-dh/3),(cx-dh/3,cy+dh/3),2) | conditional_block |
HexSnake.py | from time import sleep
from random import random as rnd
from numpy import zeros
import pygame
from pygame.locals import *
further = lambda x, y, d: {
0: (x ,y-1),
1: (x-1,y-1),
2: (x-1 ,y),
3: (x,y+1),
4: (x+1 ,y+1),
5: (x+1 ,y)
}[d]
def dir(x,y,i,j):
d=3 if abs(x-i)>1 or abs(y-j)>1 else 0
v=(x-i)/abs(x-i) if x!=i else 0
h=(y-j)/abs(y-j) if y!=j else 0
return (d+{
(0,-1):0,
(-1,-1):1,
(-1,0):2,
(0,1):3,
(1,1):4,
(1,0):5
}[(v,h)])%6
if True: # objects
HEAD=-3
BODY=-2
TAIL=-1
APPLE=1
BORDER=-10
KILLER=-20
EXTRAAPPLE=11
EXTRABORDER=12
LENGTHEN=13
SHORTEN=14
REVERSE=15
EXTRAKILLER=16
EXTRASCORE=17
BONUS=10
EMPTY=0
if True: # colors
colorSnake=(2,250,200)
colorGrass=(0,100,20)
colorBorder=(250,120,0)
colorApple=(250,0,0)
colorMouth=(250,200,200)
colorBonus=(40,0,240)
colorText=(240,240,10)
if True:
s=int(raw_input("What is field size? (more than 2) : "))
loop={'y':False,'n':True}[raw_input("Do borders kill? ('y' or 'n') : ")[0]] # read params
difficulty=int(raw_input("""
0 - "peaceful" : bonuses only make better
1 - "easy" : choose to eat a bonus would be well more often
2 - "medium" : bonuces are good or evil fifty fifty
3 - "hard" : basically, eat a bonus is of no benefit
4 - "insane" : the game is droven crazy
How difficult will the game be? (a digit) : """))
bonus_frequency=3
bonuses={ 0 : [EXTRAAPPLE,SHORTEN,REVERSE,EXTRASCORE],
1: [EXTRAAPPLE,SHORTEN,REVERSE,EXTRASCORE,EXTRABORDER,LENGTHEN,SHORTEN,EXTRAAPPLE],
2: [EXTRAAPPLE,SHORTEN,REVERSE,EXTRASCORE,EXTRABORDER,LENGTHEN,EXTRAKILLER],
3: [EXTRAAPPLE,SHORTEN,REVERSE,EXTRABORDER,LENGTHEN,EXTRAKILLER],
4: [EXTRAAPPLE,REVERSE,REVERSE,EXTRABORDER,LENGTHEN,EXTRAKILLER],
}[difficulty]
ZEROSPEED=35
speedlevel=s#15# float(raw_input("How fast does the snake crawl? \n"))
lose=False
pause = False
if True: # pygame init
pygame.init()
display_width = 800
display_height = int(display_width*0.85)
screen = pygame.display.set_mode((display_width, display_height))
pygame.display.set_caption('HexoSnake')
fontPause = pygame.font.SysFont("monospace", 40)
fontRules = pygame.font.SysFont("monospace", 13)
textGO = fontPause.render("GAME OVER!", 1, colorText)
textPause = fontPause.render("Paused...", 1, colorText)
textScore = fontRules.render("Score : ",1,colorText)
textSpeed = fontRules.render("Speed : ",1,colorText)
Rules = ["Controls:",
"q / e : left/right ",
"a / d : left/right x2 ",
"w / s : speed up/down ",
"p : pause",
"esc : quit"]
textRules=[fontRules.render(rule, 1, colorText)for rule in Rules]
class Snake:
field_size=1
field_dimension=1
field = zeros((1,1))
display_width_step=display_width
display_height_step=display_height
def __init__(self,x,y,sc,d):
self.score=sc
self.body=[(x,y)]*sc
self.cdir=d
def initfield(self,s):
n=2*s-1
self.field_size=s
self.field_dimension=n
self.field = zeros((n,n))
self.display_width_step=display_width/n
self.display_height_step=display_height/n
def out(self,x,y):
s=self.field_size
n=self.field_dimension
if x<0 or y<0: return True
if x>=n or y>=n: return True
if x>=s and y<=x-s : return True
if y>=s and x<=y-s : return True
if self.field[x][y]==BORDER : return True
return False
def setobj(self,a,t):
while a>0:
i=int(self.field_dimension*rnd())
j=int(self.field_dimension*rnd())
if self.field[i][j]==EMPTY and not self.out(i,j):
a=a-1
self.field[i][j]=t
def display_crds(self,x,y):
n=self.field_dimension
return (1+display_width/(4*n)+display_width/4
+x*display_width/(n)-display_width*y/2/(n),
1+display_height/(2*n)+y*display_height/(n))
def drawsymbol(self,i,j):
l=self.field[i][j]
crds=self.display_crds(i,j)
dw=self.display_width_step | dh=self.display_height_step
if l==TAIL:
pygame.draw.circle(screen, colorSnake, crds, dh/2, 0)
if l==BODY:
pygame.draw.circle(screen, colorSnake, crds, dw/2, 0)
if l==HEAD:
x,y=further(i,j,self.cdir)
x,y=self.display_crds(x,y)
mcrds=((x+2*crds[0])/3,(y+2*crds[1])/3)
pygame.draw.circle(screen, colorSnake, crds, dw/2, 0)
pygame.draw.circle(screen, colorMouth, mcrds, dw/6, 0)
if l==APPLE :
pygame.draw.circle(screen, colorApple, crds, dh/2, 0)
if l==EMPTY:
return
if l==KILLER:
cx,cy=crds
pygame.draw.line(screen,colorBorder,(cx-dh/2,cy),(cx+dh/2,cy),2)
pygame.draw.line(screen,colorBorder,(cx,cy-dh/2),(cx,cy+dh/2),2)
pygame.draw.line(screen,colorBorder,(cx-dh/3,cy-dh/3),(cx+dh/3,cy+dh/3),2)
pygame.draw.line(screen,colorBorder,(cx+dh/3,cy-dh/3),(cx-dh/3,cy+dh/3),2)
if l==BORDER:
pygame.draw.circle(screen, colorBorder, crds, dh/2, 0)
if l==EXTRABORDER or l==EXTRAAPPLE :
cx,cy=crds
pygame.draw.line(screen,colorBonus,(cx-dh/2,cy),(cx+dh/2,cy),2)
pygame.draw.line(screen,colorBonus,(cx,cy-dh/2),(cx,cy+dh/2),2)
if l==SHORTEN or l==LENGTHEN :
cx,cy=crds
pygame.draw.line(screen,colorBonus,(cx-dh/2,cy),(cx+dh/2,cy),2)
pygame.draw.line(screen,colorBonus,(cx,cy-dh/2),(cx,cy+dh/2),2)
pygame.draw.line(screen,colorBonus,(cx-dh/3,cy-dh/3),(cx+dh/3,cy+dh/3),2)
pygame.draw.line(screen,colorBonus,(cx+dh/3,cy-dh/3),(cx-dh/3,cy+dh/3),2)
if l==REVERSE or l==EXTRAKILLER :
cx,cy=crds
pygame.draw.line(screen,colorBonus,(cx-dh/3,cy-dh/3),(cx+dh/3,cy+dh/3),2)
pygame.draw.line(screen,colorBonus,(cx+dh/3,cy-dh/3),(cx-dh/3,cy+dh/3),2)
if l==BONUS or l==EXTRASCORE:
pygame.draw.circle(screen, colorBonus, crds, dh/2, 0)
return
def drawfield(self):
n=self.field_dimension
s=self.field_size
ds=self.drawsymbol
screen.fill((0,0,0))
pygame.draw.polygon(screen, colorGrass, [
(0,display_height/2),
(display_width/4.,0),
(display_width*3./4.,0),
(display_width,display_height/2),
(display_width*3./4.,display_height),
(display_width/4.,display_height)], 0)
for j in range(n): ds(s-1,j)
for i in range(s-1):
for j in range(s+i): ds(i,j)
for j in range(i+1,n): ds(i+s,j)
def next(self,x,y,d):
rx, ry= further(x,y,d)
if self.out(rx,ry):
while (not self.out(x,y) ) :
x,y= further(x,y,(d+3)%6)
rx, ry= further(x,y,d)
if not loop:
self.field[rx][ry]=KILLER
return rx,ry
def crawl(self):
f=self.field
x,y=self.body[-1]
if f[x][y]==BODY: f[x][y],t=TAIL,[(x,y)]
else : f[x][y],t=EMPTY,[]
x,y=self.body[0]
if f[x][y]!=BODY:
f[x][y]=TAIL
x,y=self.next(x,y,self.cdir)
self.body=[(x,y)]+self.body[:-1]+t
return self.eat(x,y)
def eat(self,x,y):
a=self.field[x][y]
if a in [BODY,HEAD,TAIL,KILLER,BORDER] :
# snake=[]
self.field[x][y]=EMPTY
return True
else :
self.field[x][y]=-3
if a == APPLE :
self.field[x][y]=-2
self.score+=1
self.setobj(1,APPLE)
if self.score%bonus_frequency==0 : self.setobj(1,BONUS) # balance?
if difficulty==4:
if rnd()<.5: self.setobj(2,BORDER)
if rnd()<.5: self.setobj(2,KILLER)
if rnd()<.2: self.setobj(1,SHORTEN)
if rnd()<.2: self.setobj(1,LENGTHEN)
if a == BONUS :
a=bonuses[int(len(bonuses)*rnd())]
if difficulty==4:
if rnd()<.4: self.setobj(1,REVERSE)
if rnd()<.3: self.setobj(3,EXTRASCORE)
if a == EXTRAAPPLE :
self.setobj(1,APPLE)
if a == EXTRABORDER :
self.setobj(1,BORDER if loop else KILLER)
if a == EXTRASCORE :
self.score+=bonus_frequency
if a == EXTRAKILLER :
self.setobj(1,KILLER)
if a == SHORTEN :
for c in self.body[len(self.body)/2+1:]:
self.field[c[0]][c[1]]=EMPTY
self.body=self.body[:len(self.body)/2+1]
if a == LENGTHEN :
for c in self.body[1:]:
self.field[c[0]][c[1]]=BODY
if a == REVERSE :
self.field[x][y]=-1
self.body=self.body[::-1]
x,y=self.body[0]
i,j=self.body[1]
self.cdir=dir(x,y,i,j)
self.field[x][y]=-3
return False
def drawtext():
for i in range(len(Rules)):
screen.blit(textRules[i], (0, 15*i))
screen.blit(textScore, (display_width-100, 5))
screen.blit(fontPause.render(str(snake.score),1,colorText),(display_width-90, 20))
screen.blit(textSpeed, (display_width-100, display_height-65))
screen.blit(fontPause.render(str(speedlevel),1,colorText),(display_width-90, display_height-50))
if pause:
screen.blit(textPause, (display_width/2-100,display_height/2))
if lose:
screen.blit(textGO, (display_width/2-100,display_height/2-40))
snake = Snake(s,s,2,1)
snake.initfield(s)
snake.crawl()
snake.setobj(1,APPLE)
while True: # Game loop
if not pause and not lose: lose = snake.crawl()
snake.drawfield()
drawtext()
pygame.display.update()
pygame.time.wait(int(1.25**(ZEROSPEED-speedlevel)))
for event in pygame.event.get():
if event.type == QUIT :
pygame.quit()
quit()
if event.type == KEYDOWN:
if event.key == K_ESCAPE :
pygame.quit()
quit()
if event.key==K_p :
pause= not pause
if event.key==K_r :
lose= not lose
if event.key in [K_q,K_a,K_e,K_d]:
snake.cdir-={K_q:-1,K_a:-2,K_e:1,K_d:2}[event.key]
snake.cdir%=6
if event.key in [K_w,K_s]:
speedlevel +={K_w:1,K_s:-1}[event.key]
if speedlevel<1:
speedlevel=1
pause= not pause | random_line_split |
|
HexSnake.py | from time import sleep
from random import random as rnd
from numpy import zeros
import pygame
from pygame.locals import *
further = lambda x, y, d: {
0: (x ,y-1),
1: (x-1,y-1),
2: (x-1 ,y),
3: (x,y+1),
4: (x+1 ,y+1),
5: (x+1 ,y)
}[d]
def | (x,y,i,j):
d=3 if abs(x-i)>1 or abs(y-j)>1 else 0
v=(x-i)/abs(x-i) if x!=i else 0
h=(y-j)/abs(y-j) if y!=j else 0
return (d+{
(0,-1):0,
(-1,-1):1,
(-1,0):2,
(0,1):3,
(1,1):4,
(1,0):5
}[(v,h)])%6
if True: # objects
HEAD=-3
BODY=-2
TAIL=-1
APPLE=1
BORDER=-10
KILLER=-20
EXTRAAPPLE=11
EXTRABORDER=12
LENGTHEN=13
SHORTEN=14
REVERSE=15
EXTRAKILLER=16
EXTRASCORE=17
BONUS=10
EMPTY=0
if True: # colors
colorSnake=(2,250,200)
colorGrass=(0,100,20)
colorBorder=(250,120,0)
colorApple=(250,0,0)
colorMouth=(250,200,200)
colorBonus=(40,0,240)
colorText=(240,240,10)
if True:
s=int(raw_input("What is field size? (more than 2) : "))
loop={'y':False,'n':True}[raw_input("Do borders kill? ('y' or 'n') : ")[0]] # read params
difficulty=int(raw_input("""
0 - "peaceful" : bonuses only make better
1 - "easy" : choose to eat a bonus would be well more often
2 - "medium" : bonuces are good or evil fifty fifty
3 - "hard" : basically, eat a bonus is of no benefit
4 - "insane" : the game is droven crazy
How difficult will the game be? (a digit) : """))
bonus_frequency=3
bonuses={ 0 : [EXTRAAPPLE,SHORTEN,REVERSE,EXTRASCORE],
1: [EXTRAAPPLE,SHORTEN,REVERSE,EXTRASCORE,EXTRABORDER,LENGTHEN,SHORTEN,EXTRAAPPLE],
2: [EXTRAAPPLE,SHORTEN,REVERSE,EXTRASCORE,EXTRABORDER,LENGTHEN,EXTRAKILLER],
3: [EXTRAAPPLE,SHORTEN,REVERSE,EXTRABORDER,LENGTHEN,EXTRAKILLER],
4: [EXTRAAPPLE,REVERSE,REVERSE,EXTRABORDER,LENGTHEN,EXTRAKILLER],
}[difficulty]
ZEROSPEED=35
speedlevel=s#15# float(raw_input("How fast does the snake crawl? \n"))
lose=False
pause = False
if True: # pygame init
pygame.init()
display_width = 800
display_height = int(display_width*0.85)
screen = pygame.display.set_mode((display_width, display_height))
pygame.display.set_caption('HexoSnake')
fontPause = pygame.font.SysFont("monospace", 40)
fontRules = pygame.font.SysFont("monospace", 13)
textGO = fontPause.render("GAME OVER!", 1, colorText)
textPause = fontPause.render("Paused...", 1, colorText)
textScore = fontRules.render("Score : ",1,colorText)
textSpeed = fontRules.render("Speed : ",1,colorText)
Rules = ["Controls:",
"q / e : left/right ",
"a / d : left/right x2 ",
"w / s : speed up/down ",
"p : pause",
"esc : quit"]
textRules=[fontRules.render(rule, 1, colorText)for rule in Rules]
class Snake:
field_size=1
field_dimension=1
field = zeros((1,1))
display_width_step=display_width
display_height_step=display_height
def __init__(self,x,y,sc,d):
self.score=sc
self.body=[(x,y)]*sc
self.cdir=d
def initfield(self,s):
n=2*s-1
self.field_size=s
self.field_dimension=n
self.field = zeros((n,n))
self.display_width_step=display_width/n
self.display_height_step=display_height/n
def out(self,x,y):
s=self.field_size
n=self.field_dimension
if x<0 or y<0: return True
if x>=n or y>=n: return True
if x>=s and y<=x-s : return True
if y>=s and x<=y-s : return True
if self.field[x][y]==BORDER : return True
return False
def setobj(self,a,t):
while a>0:
i=int(self.field_dimension*rnd())
j=int(self.field_dimension*rnd())
if self.field[i][j]==EMPTY and not self.out(i,j):
a=a-1
self.field[i][j]=t
def display_crds(self,x,y):
n=self.field_dimension
return (1+display_width/(4*n)+display_width/4
+x*display_width/(n)-display_width*y/2/(n),
1+display_height/(2*n)+y*display_height/(n))
def drawsymbol(self,i,j):
l=self.field[i][j]
crds=self.display_crds(i,j)
dw=self.display_width_step
dh=self.display_height_step
if l==TAIL:
pygame.draw.circle(screen, colorSnake, crds, dh/2, 0)
if l==BODY:
pygame.draw.circle(screen, colorSnake, crds, dw/2, 0)
if l==HEAD:
x,y=further(i,j,self.cdir)
x,y=self.display_crds(x,y)
mcrds=((x+2*crds[0])/3,(y+2*crds[1])/3)
pygame.draw.circle(screen, colorSnake, crds, dw/2, 0)
pygame.draw.circle(screen, colorMouth, mcrds, dw/6, 0)
if l==APPLE :
pygame.draw.circle(screen, colorApple, crds, dh/2, 0)
if l==EMPTY:
return
if l==KILLER:
cx,cy=crds
pygame.draw.line(screen,colorBorder,(cx-dh/2,cy),(cx+dh/2,cy),2)
pygame.draw.line(screen,colorBorder,(cx,cy-dh/2),(cx,cy+dh/2),2)
pygame.draw.line(screen,colorBorder,(cx-dh/3,cy-dh/3),(cx+dh/3,cy+dh/3),2)
pygame.draw.line(screen,colorBorder,(cx+dh/3,cy-dh/3),(cx-dh/3,cy+dh/3),2)
if l==BORDER:
pygame.draw.circle(screen, colorBorder, crds, dh/2, 0)
if l==EXTRABORDER or l==EXTRAAPPLE :
cx,cy=crds
pygame.draw.line(screen,colorBonus,(cx-dh/2,cy),(cx+dh/2,cy),2)
pygame.draw.line(screen,colorBonus,(cx,cy-dh/2),(cx,cy+dh/2),2)
if l==SHORTEN or l==LENGTHEN :
cx,cy=crds
pygame.draw.line(screen,colorBonus,(cx-dh/2,cy),(cx+dh/2,cy),2)
pygame.draw.line(screen,colorBonus,(cx,cy-dh/2),(cx,cy+dh/2),2)
pygame.draw.line(screen,colorBonus,(cx-dh/3,cy-dh/3),(cx+dh/3,cy+dh/3),2)
pygame.draw.line(screen,colorBonus,(cx+dh/3,cy-dh/3),(cx-dh/3,cy+dh/3),2)
if l==REVERSE or l==EXTRAKILLER :
cx,cy=crds
pygame.draw.line(screen,colorBonus,(cx-dh/3,cy-dh/3),(cx+dh/3,cy+dh/3),2)
pygame.draw.line(screen,colorBonus,(cx+dh/3,cy-dh/3),(cx-dh/3,cy+dh/3),2)
if l==BONUS or l==EXTRASCORE:
pygame.draw.circle(screen, colorBonus, crds, dh/2, 0)
return
def drawfield(self):
n=self.field_dimension
s=self.field_size
ds=self.drawsymbol
screen.fill((0,0,0))
pygame.draw.polygon(screen, colorGrass, [
(0,display_height/2),
(display_width/4.,0),
(display_width*3./4.,0),
(display_width,display_height/2),
(display_width*3./4.,display_height),
(display_width/4.,display_height)], 0)
for j in range(n): ds(s-1,j)
for i in range(s-1):
for j in range(s+i): ds(i,j)
for j in range(i+1,n): ds(i+s,j)
def next(self,x,y,d):
rx, ry= further(x,y,d)
if self.out(rx,ry):
while (not self.out(x,y) ) :
x,y= further(x,y,(d+3)%6)
rx, ry= further(x,y,d)
if not loop:
self.field[rx][ry]=KILLER
return rx,ry
def crawl(self):
f=self.field
x,y=self.body[-1]
if f[x][y]==BODY: f[x][y],t=TAIL,[(x,y)]
else : f[x][y],t=EMPTY,[]
x,y=self.body[0]
if f[x][y]!=BODY:
f[x][y]=TAIL
x,y=self.next(x,y,self.cdir)
self.body=[(x,y)]+self.body[:-1]+t
return self.eat(x,y)
def eat(self,x,y):
a=self.field[x][y]
if a in [BODY,HEAD,TAIL,KILLER,BORDER] :
# snake=[]
self.field[x][y]=EMPTY
return True
else :
self.field[x][y]=-3
if a == APPLE :
self.field[x][y]=-2
self.score+=1
self.setobj(1,APPLE)
if self.score%bonus_frequency==0 : self.setobj(1,BONUS) # balance?
if difficulty==4:
if rnd()<.5: self.setobj(2,BORDER)
if rnd()<.5: self.setobj(2,KILLER)
if rnd()<.2: self.setobj(1,SHORTEN)
if rnd()<.2: self.setobj(1,LENGTHEN)
if a == BONUS :
a=bonuses[int(len(bonuses)*rnd())]
if difficulty==4:
if rnd()<.4: self.setobj(1,REVERSE)
if rnd()<.3: self.setobj(3,EXTRASCORE)
if a == EXTRAAPPLE :
self.setobj(1,APPLE)
if a == EXTRABORDER :
self.setobj(1,BORDER if loop else KILLER)
if a == EXTRASCORE :
self.score+=bonus_frequency
if a == EXTRAKILLER :
self.setobj(1,KILLER)
if a == SHORTEN :
for c in self.body[len(self.body)/2+1:]:
self.field[c[0]][c[1]]=EMPTY
self.body=self.body[:len(self.body)/2+1]
if a == LENGTHEN :
for c in self.body[1:]:
self.field[c[0]][c[1]]=BODY
if a == REVERSE :
self.field[x][y]=-1
self.body=self.body[::-1]
x,y=self.body[0]
i,j=self.body[1]
self.cdir=dir(x,y,i,j)
self.field[x][y]=-3
return False
def drawtext():
for i in range(len(Rules)):
screen.blit(textRules[i], (0, 15*i))
screen.blit(textScore, (display_width-100, 5))
screen.blit(fontPause.render(str(snake.score),1,colorText),(display_width-90, 20))
screen.blit(textSpeed, (display_width-100, display_height-65))
screen.blit(fontPause.render(str(speedlevel),1,colorText),(display_width-90, display_height-50))
if pause:
screen.blit(textPause, (display_width/2-100,display_height/2))
if lose:
screen.blit(textGO, (display_width/2-100,display_height/2-40))
snake = Snake(s,s,2,1)
snake.initfield(s)
snake.crawl()
snake.setobj(1,APPLE)
while True: # Game loop
if not pause and not lose: lose = snake.crawl()
snake.drawfield()
drawtext()
pygame.display.update()
pygame.time.wait(int(1.25**(ZEROSPEED-speedlevel)))
for event in pygame.event.get():
if event.type == QUIT :
pygame.quit()
quit()
if event.type == KEYDOWN:
if event.key == K_ESCAPE :
pygame.quit()
quit()
if event.key==K_p :
pause= not pause
if event.key==K_r :
lose= not lose
if event.key in [K_q,K_a,K_e,K_d]:
snake.cdir-={K_q:-1,K_a:-2,K_e:1,K_d:2}[event.key]
snake.cdir%=6
if event.key in [K_w,K_s]:
speedlevel +={K_w:1,K_s:-1}[event.key]
if speedlevel<1:
speedlevel=1
pause= not pause
| dir | identifier_name |
helpers.ts | 'use strict';
import * as vscode from 'vscode';
import * as cp from 'child_process';
import * as fs from 'fs';
import * as os from 'os';
import { resolve } from 'path';
export default class Helpers {
static wordMatchRegex = /[\w\d\-_\.\:\\\/@]+/g;
static phpParser:any = null;
static cachedParseFunction:any = null;
static modelsCache: Array<string>;
static modelsCacheTime: number = 0;
static outputChannel: vscode.OutputChannel|null = null;
static tags:any = {
config: {classes: ['Config'] , functions: ['config']},
mix: {classes: [] , functions: ['mix']},
route: {classes: ['Route'] , functions: ['route']},
trans: {classes: ['Lang'] , functions: ['__', 'trans', '@lang']},
validation: {classes: ['Validator'] , functions: ['validate', 'sometimes', 'rules']},
view: {classes: ['View'] , functions: ['view', 'markdown', 'links', '@extends', '@component', '@include', '@each']},
env: {classes: [] , functions: ['env']},
auth: {classes: ['Gate'] , functions: ['can', '@can', '@cannot', '@canany']},
asset: {classes: [] , functions: ['asset']},
model: {classes: [] , functions: []},
};
static functionRegex: any = null;
static relationMethods = ['has', 'orHas', 'whereHas', 'orWhereHas', 'whereDoesntHave', 'orWhereDoesntHave',
'doesntHave', 'orDoesntHave', 'hasMorph', 'orHasMorph', 'doesntHaveMorph', 'orDoesntHaveMorph',
'whereHasMorph', 'orWhereHasMorph', 'whereDoesntHaveMorph', 'orWhereDoesntHaveMorph',
'withAggregate', 'withCount', 'withMax', 'withMin', 'withSum', 'withAvg'];
/**
* Create full path from project file name
*
* @param path
* @param forCode
* @param string
*/
static projectPath(path:string, forCode: boolean = false) : string {
if (path[0] !== '/') {
path = '/' + path;
}
let basePath = vscode.workspace.getConfiguration("LaravelExtraIntellisense").get<string>('basePath');
if (forCode === false && basePath && basePath.length > 0) {
if (basePath.startsWith('.') && vscode.workspace.workspaceFolders && vscode.workspace.workspaceFolders.length > 0) {
basePath = resolve(vscode.workspace.workspaceFolders[0].uri.fsPath, basePath);
}
basePath = basePath.replace(/[\/\\]$/, ""); | }
let basePathForCode = vscode.workspace.getConfiguration("LaravelExtraIntellisense").get<string>('basePathForCode');
if (forCode && basePathForCode && basePathForCode.length > 0) {
if (basePathForCode.startsWith('.') && vscode.workspace.workspaceFolders && vscode.workspace.workspaceFolders.length > 0) {
basePathForCode = resolve(vscode.workspace.workspaceFolders[0].uri.fsPath, basePathForCode);
}
basePathForCode = basePathForCode.replace(/[\/\\]$/, "");
return basePathForCode + path;
}
if (vscode.workspace.workspaceFolders instanceof Array && vscode.workspace.workspaceFolders.length > 0) {
for (let workspaceFolder of vscode.workspace.workspaceFolders) {
if (fs.existsSync(workspaceFolder.uri.fsPath + "/artisan")) {
return workspaceFolder.uri.fsPath + path;
}
}
}
return "";
}
static arrayUnique(value:any, index:any, self:Array<any>) {
return self.indexOf(value) === index;
}
/**
* Boot laravel and run simple php code.
*
* @param code
*/
static runLaravel(code: string) : Promise<string> {
code = code.replace(/(?:\r\n|\r|\n)/g, ' ');
if (fs.existsSync(Helpers.projectPath("vendor/autoload.php")) && fs.existsSync(Helpers.projectPath("bootstrap/app.php"))) {
var command =
"define('LARAVEL_START', microtime(true));" +
"require_once '" + Helpers.projectPath("vendor/autoload.php", true) + "';" +
"$app = require_once '" + Helpers.projectPath("bootstrap/app.php", true) + "';" +
"class VscodeLaravelExtraIntellisenseProvider extends \\Illuminate\\Support\\ServiceProvider" +
"{" +
" public function register() {}" +
" public function boot()" +
" {" +
" if (method_exists($this->app['log'], 'setHandlers')) {" +
" $this->app['log']->setHandlers([new \\Monolog\\Handler\\NullHandler()]);" +
" }" +
" }" +
"}" +
"$app->register(new VscodeLaravelExtraIntellisenseProvider($app));" +
"$kernel = $app->make(Illuminate\\Contracts\\Console\\Kernel::class);" +
"$status = $kernel->handle(" +
"$input = new Symfony\\Component\\Console\\Input\\ArgvInput," +
"new Symfony\\Component\\Console\\Output\\ConsoleOutput" +
");" +
"echo '___VSCODE_LARAVEL_EXTRA_INSTELLISENSE_OUTPUT___';" +
code +
"echo '___VSCODE_LARAVEL_EXTRA_INSTELLISENSE_END_OUTPUT___';";
var self = this;
return new Promise(function (resolve, error) {
self.runPhp(command)
.then(function (result: string) {
var out : string | null | RegExpExecArray = result;
out = /___VSCODE_LARAVEL_EXTRA_INSTELLISENSE_OUTPUT___(.*)___VSCODE_LARAVEL_EXTRA_INSTELLISENSE_END_OUTPUT___/g.exec(out);
if (out) {
resolve(out[1]);
} else {
error("PARSE ERROR: " + result);
}
})
.catch(function (e : Error) {
error(e);
});
});
}
return new Promise((resolve, error) => resolve(""));
}
/**
* run simple php code.
*
* @param code
*/
static async runPhp(code: string) : Promise<string> {
code = code.replace(/\"/g, "\\\"");
if (['linux', 'openbsd', 'sunos', 'darwin'].some(unixPlatforms => os.platform().includes(unixPlatforms))) {
code = code.replace(/\$/g, "\\$");
code = code.replace(/\\\\'/g, '\\\\\\\\\'');
code = code.replace(/\\\\"/g, '\\\\\\\\\"');
}
let command = vscode.workspace.getConfiguration("LaravelExtraIntellisense").get<string>('phpCommand') ?? "php -r \"{code}\"";
command = command.replace("{code}", code);
let out = new Promise<string>(function (resolve, error) {
cp.exec(command,
{ cwd: vscode.workspace.workspaceFolders && vscode.workspace.workspaceFolders.length > 0 ? vscode.workspace.workspaceFolders[0].uri.fsPath : undefined },
function (err, stdout, stderr) {
if (stdout.length > 0) {
resolve(stdout);
} else {
if (Helpers.outputChannel !== null) {
Helpers.outputChannel.appendLine("Laravel extra intellisense Error: " + stderr);
}
error(stderr);
}
}
);
});
return out;
}
/**
* Parse php code with 'php-parser' package.
* @param code
*/
static parsePhp(code: string): any {
if (! Helpers.phpParser) {
var PhpEngine = require('php-parser');
Helpers.phpParser = new PhpEngine({
parser: {
extractDoc: true,
php7: true
},
ast: {
withPositions: true
}
});
}
try {
return Helpers.phpParser.parseCode(code);
} catch (exception) {
return null;
}
}
/**
* Convert php variable defination to javascript variable.
* @param code
*/
static evalPhp(code: string): any {
var out = Helpers.parsePhp('<?php ' + code + ';');
if (out && typeof out.children[0] !== 'undefined') {
return out.children[0].expression.value;
}
return undefined;
}
/**
* Parse php function call.
*
* @param text
* @param position
*/
static parseFunction(text: string, position: number, level: number = 0): any {
var out:any = null;
var classes = [];
for(let i in Helpers.tags) {
for (let j in Helpers.tags[i].classes) {
classes.push(Helpers.tags[i].classes[j]);
}
}
var regexPattern = "(((" + classes.join('|') + ")::)?([@A-Za-z0-9_]+))((\\()((?:[^)(]|\\((?:[^)(]|\\([^)(]*\\))*\\))*)(\\)|$))";
var functionRegex = new RegExp(regexPattern, "g");
var paramsRegex = /((\s*\,\s*)?)(\[[\s\S]*(\]|$)|array\[\s\S]*(\)|$)|(\"((\\\")|[^\"])*(\"|$))|(\'((\\\')|[^\'])*(\'|$)))/g;
var inlineFunctionMatch = /\((([\s\S]*\,)?\s*function\s*\(.*\)\s*\{)([\S\s]*)\}/g;
text = text.substr(Math.max(0, position - 200), 400);
position -= Math.max(0, position - 200);
var match = null;
var match2 = null;
if (Helpers.cachedParseFunction !== null && Helpers.cachedParseFunction.text === text && position === Helpers.cachedParseFunction.position) {
out = Helpers.cachedParseFunction.out;
} else if (level < 6) {
while ((match = functionRegex.exec(text)) !== null) {
if (position >= match.index && match[0] && position < match.index + match[0].length) {
if ((match2 = inlineFunctionMatch.exec(match[0])) !== null && typeof match2[3] === 'string' && typeof match[1] === 'string' && typeof match[6] === 'string' && typeof match2[1] === 'string') {
out = this.parseFunction(match2[3], position - (match.index + match[1].length + match[6].length + match2[1].length), level + 1);
} else if (typeof match[1] === 'string' && typeof match[6]=== 'string' && typeof match[7]=== 'string') {
var textParameters = [];
var paramIndex = null;
var paramIndexCounter = 0;
var paramsPosition = position - (match.index + match[1].length + match[6].length);
var functionInsideParameter;
if (match[7].length >= 4 && (functionInsideParameter = this.parseFunction(match[7], paramsPosition))) {
return functionInsideParameter;
}
while ((match2 = paramsRegex.exec(match[7])) !== null) {
textParameters.push(match2[3]);
if (paramsPosition >= match2.index && typeof match2[0] === 'string' && paramsPosition <= match2.index + match2[0].length) {
paramIndex = paramIndexCounter;
}
paramIndexCounter++;
}
var functionParametrs = [];
for (let i in textParameters) {
functionParametrs.push(this.evalPhp(textParameters[i]));
}
out = {
class: match[3],
function: match[4],
paramIndex: paramIndex,
parameters: functionParametrs,
textParameters: textParameters
};
}
if (level === 0) {
Helpers.cachedParseFunction = {text, position, out};
}
}
}
}
return out;
}
/**
* Parse php function call from vscode editor.
*
* @param document
* @param position
*/
static parseDocumentFunction(document: vscode.TextDocument, position: vscode.Position) {
var pos = document.offsetAt(position);
return Helpers.parseFunction(document.getText(), pos);
}
/**
* Get laravel models as array.
*
* @returns array<string>
*/
static getModels() : Promise<Array<string>> {
var self = this;
return new Promise<Array<string>>(function (resolve, reject) {
if (Math.floor(Date.now()/1000) - self.modelsCacheTime < 60) {
return resolve(self.modelsCache);
} else {
Helpers.runLaravel(`
echo json_encode(array_values(array_filter(array_map(function ($name) {return app()->getNamespace().str_replace([app_path().'/', app_path().'\\\\', '.php', '/'], ['', '', '', '\\\\'], $name);}, array_merge(glob(app_path('*')), glob(app_path('Models/*')))), function ($class) {
return class_exists($class) && is_subclass_of($class, 'Illuminate\\\\Database\\\\Eloquent\\\\Model');
})));
`).then(function (result) {
var models = JSON.parse(result);
self.modelsCache = models;
resolve(models);
})
.catch(function (error) {
console.error(error);
resolve([]);
});
}
});
}
/**
* Get indent space based on user configuration
*/
static getSpacer() : string {
const editor = vscode.window.activeTextEditor;
if (editor && editor.options.insertSpaces) {
return ' '.repeat(<number>editor.options.tabSize);
}
return '\t';
}
} | return basePath + path; | random_line_split |
helpers.ts | 'use strict';
import * as vscode from 'vscode';
import * as cp from 'child_process';
import * as fs from 'fs';
import * as os from 'os';
import { resolve } from 'path';
export default class Helpers {
static wordMatchRegex = /[\w\d\-_\.\:\\\/@]+/g;
static phpParser:any = null;
static cachedParseFunction:any = null;
static modelsCache: Array<string>;
static modelsCacheTime: number = 0;
static outputChannel: vscode.OutputChannel|null = null;
static tags:any = {
config: {classes: ['Config'] , functions: ['config']},
mix: {classes: [] , functions: ['mix']},
route: {classes: ['Route'] , functions: ['route']},
trans: {classes: ['Lang'] , functions: ['__', 'trans', '@lang']},
validation: {classes: ['Validator'] , functions: ['validate', 'sometimes', 'rules']},
view: {classes: ['View'] , functions: ['view', 'markdown', 'links', '@extends', '@component', '@include', '@each']},
env: {classes: [] , functions: ['env']},
auth: {classes: ['Gate'] , functions: ['can', '@can', '@cannot', '@canany']},
asset: {classes: [] , functions: ['asset']},
model: {classes: [] , functions: []},
};
static functionRegex: any = null;
static relationMethods = ['has', 'orHas', 'whereHas', 'orWhereHas', 'whereDoesntHave', 'orWhereDoesntHave',
'doesntHave', 'orDoesntHave', 'hasMorph', 'orHasMorph', 'doesntHaveMorph', 'orDoesntHaveMorph',
'whereHasMorph', 'orWhereHasMorph', 'whereDoesntHaveMorph', 'orWhereDoesntHaveMorph',
'withAggregate', 'withCount', 'withMax', 'withMin', 'withSum', 'withAvg'];
/**
* Create full path from project file name
*
* @param path
* @param forCode
* @param string
*/
static | (path:string, forCode: boolean = false) : string {
if (path[0] !== '/') {
path = '/' + path;
}
let basePath = vscode.workspace.getConfiguration("LaravelExtraIntellisense").get<string>('basePath');
if (forCode === false && basePath && basePath.length > 0) {
if (basePath.startsWith('.') && vscode.workspace.workspaceFolders && vscode.workspace.workspaceFolders.length > 0) {
basePath = resolve(vscode.workspace.workspaceFolders[0].uri.fsPath, basePath);
}
basePath = basePath.replace(/[\/\\]$/, "");
return basePath + path;
}
let basePathForCode = vscode.workspace.getConfiguration("LaravelExtraIntellisense").get<string>('basePathForCode');
if (forCode && basePathForCode && basePathForCode.length > 0) {
if (basePathForCode.startsWith('.') && vscode.workspace.workspaceFolders && vscode.workspace.workspaceFolders.length > 0) {
basePathForCode = resolve(vscode.workspace.workspaceFolders[0].uri.fsPath, basePathForCode);
}
basePathForCode = basePathForCode.replace(/[\/\\]$/, "");
return basePathForCode + path;
}
if (vscode.workspace.workspaceFolders instanceof Array && vscode.workspace.workspaceFolders.length > 0) {
for (let workspaceFolder of vscode.workspace.workspaceFolders) {
if (fs.existsSync(workspaceFolder.uri.fsPath + "/artisan")) {
return workspaceFolder.uri.fsPath + path;
}
}
}
return "";
}
static arrayUnique(value:any, index:any, self:Array<any>) {
return self.indexOf(value) === index;
}
/**
* Boot laravel and run simple php code.
*
* @param code
*/
static runLaravel(code: string) : Promise<string> {
code = code.replace(/(?:\r\n|\r|\n)/g, ' ');
if (fs.existsSync(Helpers.projectPath("vendor/autoload.php")) && fs.existsSync(Helpers.projectPath("bootstrap/app.php"))) {
var command =
"define('LARAVEL_START', microtime(true));" +
"require_once '" + Helpers.projectPath("vendor/autoload.php", true) + "';" +
"$app = require_once '" + Helpers.projectPath("bootstrap/app.php", true) + "';" +
"class VscodeLaravelExtraIntellisenseProvider extends \\Illuminate\\Support\\ServiceProvider" +
"{" +
" public function register() {}" +
" public function boot()" +
" {" +
" if (method_exists($this->app['log'], 'setHandlers')) {" +
" $this->app['log']->setHandlers([new \\Monolog\\Handler\\NullHandler()]);" +
" }" +
" }" +
"}" +
"$app->register(new VscodeLaravelExtraIntellisenseProvider($app));" +
"$kernel = $app->make(Illuminate\\Contracts\\Console\\Kernel::class);" +
"$status = $kernel->handle(" +
"$input = new Symfony\\Component\\Console\\Input\\ArgvInput," +
"new Symfony\\Component\\Console\\Output\\ConsoleOutput" +
");" +
"echo '___VSCODE_LARAVEL_EXTRA_INSTELLISENSE_OUTPUT___';" +
code +
"echo '___VSCODE_LARAVEL_EXTRA_INSTELLISENSE_END_OUTPUT___';";
var self = this;
return new Promise(function (resolve, error) {
self.runPhp(command)
.then(function (result: string) {
var out : string | null | RegExpExecArray = result;
out = /___VSCODE_LARAVEL_EXTRA_INSTELLISENSE_OUTPUT___(.*)___VSCODE_LARAVEL_EXTRA_INSTELLISENSE_END_OUTPUT___/g.exec(out);
if (out) {
resolve(out[1]);
} else {
error("PARSE ERROR: " + result);
}
})
.catch(function (e : Error) {
error(e);
});
});
}
return new Promise((resolve, error) => resolve(""));
}
/**
* run simple php code.
*
* @param code
*/
static async runPhp(code: string) : Promise<string> {
code = code.replace(/\"/g, "\\\"");
if (['linux', 'openbsd', 'sunos', 'darwin'].some(unixPlatforms => os.platform().includes(unixPlatforms))) {
code = code.replace(/\$/g, "\\$");
code = code.replace(/\\\\'/g, '\\\\\\\\\'');
code = code.replace(/\\\\"/g, '\\\\\\\\\"');
}
let command = vscode.workspace.getConfiguration("LaravelExtraIntellisense").get<string>('phpCommand') ?? "php -r \"{code}\"";
command = command.replace("{code}", code);
let out = new Promise<string>(function (resolve, error) {
cp.exec(command,
{ cwd: vscode.workspace.workspaceFolders && vscode.workspace.workspaceFolders.length > 0 ? vscode.workspace.workspaceFolders[0].uri.fsPath : undefined },
function (err, stdout, stderr) {
if (stdout.length > 0) {
resolve(stdout);
} else {
if (Helpers.outputChannel !== null) {
Helpers.outputChannel.appendLine("Laravel extra intellisense Error: " + stderr);
}
error(stderr);
}
}
);
});
return out;
}
/**
* Parse php code with 'php-parser' package.
* @param code
*/
static parsePhp(code: string): any {
if (! Helpers.phpParser) {
var PhpEngine = require('php-parser');
Helpers.phpParser = new PhpEngine({
parser: {
extractDoc: true,
php7: true
},
ast: {
withPositions: true
}
});
}
try {
return Helpers.phpParser.parseCode(code);
} catch (exception) {
return null;
}
}
/**
* Convert php variable defination to javascript variable.
* @param code
*/
static evalPhp(code: string): any {
var out = Helpers.parsePhp('<?php ' + code + ';');
if (out && typeof out.children[0] !== 'undefined') {
return out.children[0].expression.value;
}
return undefined;
}
/**
* Parse php function call.
*
* @param text
* @param position
*/
static parseFunction(text: string, position: number, level: number = 0): any {
var out:any = null;
var classes = [];
for(let i in Helpers.tags) {
for (let j in Helpers.tags[i].classes) {
classes.push(Helpers.tags[i].classes[j]);
}
}
var regexPattern = "(((" + classes.join('|') + ")::)?([@A-Za-z0-9_]+))((\\()((?:[^)(]|\\((?:[^)(]|\\([^)(]*\\))*\\))*)(\\)|$))";
var functionRegex = new RegExp(regexPattern, "g");
var paramsRegex = /((\s*\,\s*)?)(\[[\s\S]*(\]|$)|array\[\s\S]*(\)|$)|(\"((\\\")|[^\"])*(\"|$))|(\'((\\\')|[^\'])*(\'|$)))/g;
var inlineFunctionMatch = /\((([\s\S]*\,)?\s*function\s*\(.*\)\s*\{)([\S\s]*)\}/g;
text = text.substr(Math.max(0, position - 200), 400);
position -= Math.max(0, position - 200);
var match = null;
var match2 = null;
if (Helpers.cachedParseFunction !== null && Helpers.cachedParseFunction.text === text && position === Helpers.cachedParseFunction.position) {
out = Helpers.cachedParseFunction.out;
} else if (level < 6) {
while ((match = functionRegex.exec(text)) !== null) {
if (position >= match.index && match[0] && position < match.index + match[0].length) {
if ((match2 = inlineFunctionMatch.exec(match[0])) !== null && typeof match2[3] === 'string' && typeof match[1] === 'string' && typeof match[6] === 'string' && typeof match2[1] === 'string') {
out = this.parseFunction(match2[3], position - (match.index + match[1].length + match[6].length + match2[1].length), level + 1);
} else if (typeof match[1] === 'string' && typeof match[6]=== 'string' && typeof match[7]=== 'string') {
var textParameters = [];
var paramIndex = null;
var paramIndexCounter = 0;
var paramsPosition = position - (match.index + match[1].length + match[6].length);
var functionInsideParameter;
if (match[7].length >= 4 && (functionInsideParameter = this.parseFunction(match[7], paramsPosition))) {
return functionInsideParameter;
}
while ((match2 = paramsRegex.exec(match[7])) !== null) {
textParameters.push(match2[3]);
if (paramsPosition >= match2.index && typeof match2[0] === 'string' && paramsPosition <= match2.index + match2[0].length) {
paramIndex = paramIndexCounter;
}
paramIndexCounter++;
}
var functionParametrs = [];
for (let i in textParameters) {
functionParametrs.push(this.evalPhp(textParameters[i]));
}
out = {
class: match[3],
function: match[4],
paramIndex: paramIndex,
parameters: functionParametrs,
textParameters: textParameters
};
}
if (level === 0) {
Helpers.cachedParseFunction = {text, position, out};
}
}
}
}
return out;
}
/**
* Parse php function call from vscode editor.
*
* @param document
* @param position
*/
static parseDocumentFunction(document: vscode.TextDocument, position: vscode.Position) {
var pos = document.offsetAt(position);
return Helpers.parseFunction(document.getText(), pos);
}
/**
* Get laravel models as array.
*
* @returns array<string>
*/
static getModels() : Promise<Array<string>> {
var self = this;
return new Promise<Array<string>>(function (resolve, reject) {
if (Math.floor(Date.now()/1000) - self.modelsCacheTime < 60) {
return resolve(self.modelsCache);
} else {
Helpers.runLaravel(`
echo json_encode(array_values(array_filter(array_map(function ($name) {return app()->getNamespace().str_replace([app_path().'/', app_path().'\\\\', '.php', '/'], ['', '', '', '\\\\'], $name);}, array_merge(glob(app_path('*')), glob(app_path('Models/*')))), function ($class) {
return class_exists($class) && is_subclass_of($class, 'Illuminate\\\\Database\\\\Eloquent\\\\Model');
})));
`).then(function (result) {
var models = JSON.parse(result);
self.modelsCache = models;
resolve(models);
})
.catch(function (error) {
console.error(error);
resolve([]);
});
}
});
}
/**
* Get indent space based on user configuration
*/
static getSpacer() : string {
const editor = vscode.window.activeTextEditor;
if (editor && editor.options.insertSpaces) {
return ' '.repeat(<number>editor.options.tabSize);
}
return '\t';
}
}
| projectPath | identifier_name |
helpers.ts | 'use strict';
import * as vscode from 'vscode';
import * as cp from 'child_process';
import * as fs from 'fs';
import * as os from 'os';
import { resolve } from 'path';
export default class Helpers {
static wordMatchRegex = /[\w\d\-_\.\:\\\/@]+/g;
static phpParser:any = null;
static cachedParseFunction:any = null;
static modelsCache: Array<string>;
static modelsCacheTime: number = 0;
static outputChannel: vscode.OutputChannel|null = null;
static tags:any = {
config: {classes: ['Config'] , functions: ['config']},
mix: {classes: [] , functions: ['mix']},
route: {classes: ['Route'] , functions: ['route']},
trans: {classes: ['Lang'] , functions: ['__', 'trans', '@lang']},
validation: {classes: ['Validator'] , functions: ['validate', 'sometimes', 'rules']},
view: {classes: ['View'] , functions: ['view', 'markdown', 'links', '@extends', '@component', '@include', '@each']},
env: {classes: [] , functions: ['env']},
auth: {classes: ['Gate'] , functions: ['can', '@can', '@cannot', '@canany']},
asset: {classes: [] , functions: ['asset']},
model: {classes: [] , functions: []},
};
static functionRegex: any = null;
static relationMethods = ['has', 'orHas', 'whereHas', 'orWhereHas', 'whereDoesntHave', 'orWhereDoesntHave',
'doesntHave', 'orDoesntHave', 'hasMorph', 'orHasMorph', 'doesntHaveMorph', 'orDoesntHaveMorph',
'whereHasMorph', 'orWhereHasMorph', 'whereDoesntHaveMorph', 'orWhereDoesntHaveMorph',
'withAggregate', 'withCount', 'withMax', 'withMin', 'withSum', 'withAvg'];
/**
* Create full path from project file name
*
* @param path
* @param forCode
* @param string
*/
static projectPath(path:string, forCode: boolean = false) : string {
if (path[0] !== '/') {
path = '/' + path;
}
let basePath = vscode.workspace.getConfiguration("LaravelExtraIntellisense").get<string>('basePath');
if (forCode === false && basePath && basePath.length > 0) {
if (basePath.startsWith('.') && vscode.workspace.workspaceFolders && vscode.workspace.workspaceFolders.length > 0) {
basePath = resolve(vscode.workspace.workspaceFolders[0].uri.fsPath, basePath);
}
basePath = basePath.replace(/[\/\\]$/, "");
return basePath + path;
}
let basePathForCode = vscode.workspace.getConfiguration("LaravelExtraIntellisense").get<string>('basePathForCode');
if (forCode && basePathForCode && basePathForCode.length > 0) {
if (basePathForCode.startsWith('.') && vscode.workspace.workspaceFolders && vscode.workspace.workspaceFolders.length > 0) {
basePathForCode = resolve(vscode.workspace.workspaceFolders[0].uri.fsPath, basePathForCode);
}
basePathForCode = basePathForCode.replace(/[\/\\]$/, "");
return basePathForCode + path;
}
if (vscode.workspace.workspaceFolders instanceof Array && vscode.workspace.workspaceFolders.length > 0) {
for (let workspaceFolder of vscode.workspace.workspaceFolders) {
if (fs.existsSync(workspaceFolder.uri.fsPath + "/artisan")) {
return workspaceFolder.uri.fsPath + path;
}
}
}
return "";
}
static arrayUnique(value:any, index:any, self:Array<any>) {
return self.indexOf(value) === index;
}
/**
* Boot laravel and run simple php code.
*
* @param code
*/
static runLaravel(code: string) : Promise<string> {
code = code.replace(/(?:\r\n|\r|\n)/g, ' ');
if (fs.existsSync(Helpers.projectPath("vendor/autoload.php")) && fs.existsSync(Helpers.projectPath("bootstrap/app.php"))) {
var command =
"define('LARAVEL_START', microtime(true));" +
"require_once '" + Helpers.projectPath("vendor/autoload.php", true) + "';" +
"$app = require_once '" + Helpers.projectPath("bootstrap/app.php", true) + "';" +
"class VscodeLaravelExtraIntellisenseProvider extends \\Illuminate\\Support\\ServiceProvider" +
"{" +
" public function register() {}" +
" public function boot()" +
" {" +
" if (method_exists($this->app['log'], 'setHandlers')) {" +
" $this->app['log']->setHandlers([new \\Monolog\\Handler\\NullHandler()]);" +
" }" +
" }" +
"}" +
"$app->register(new VscodeLaravelExtraIntellisenseProvider($app));" +
"$kernel = $app->make(Illuminate\\Contracts\\Console\\Kernel::class);" +
"$status = $kernel->handle(" +
"$input = new Symfony\\Component\\Console\\Input\\ArgvInput," +
"new Symfony\\Component\\Console\\Output\\ConsoleOutput" +
");" +
"echo '___VSCODE_LARAVEL_EXTRA_INSTELLISENSE_OUTPUT___';" +
code +
"echo '___VSCODE_LARAVEL_EXTRA_INSTELLISENSE_END_OUTPUT___';";
var self = this;
return new Promise(function (resolve, error) {
self.runPhp(command)
.then(function (result: string) {
var out : string | null | RegExpExecArray = result;
out = /___VSCODE_LARAVEL_EXTRA_INSTELLISENSE_OUTPUT___(.*)___VSCODE_LARAVEL_EXTRA_INSTELLISENSE_END_OUTPUT___/g.exec(out);
if (out) {
resolve(out[1]);
} else {
error("PARSE ERROR: " + result);
}
})
.catch(function (e : Error) {
error(e);
});
});
}
return new Promise((resolve, error) => resolve(""));
}
/**
* run simple php code.
*
* @param code
*/
static async runPhp(code: string) : Promise<string> |
/**
* Parse php code with 'php-parser' package.
* @param code
*/
static parsePhp(code: string): any {
if (! Helpers.phpParser) {
var PhpEngine = require('php-parser');
Helpers.phpParser = new PhpEngine({
parser: {
extractDoc: true,
php7: true
},
ast: {
withPositions: true
}
});
}
try {
return Helpers.phpParser.parseCode(code);
} catch (exception) {
return null;
}
}
/**
* Convert php variable defination to javascript variable.
* @param code
*/
static evalPhp(code: string): any {
var out = Helpers.parsePhp('<?php ' + code + ';');
if (out && typeof out.children[0] !== 'undefined') {
return out.children[0].expression.value;
}
return undefined;
}
/**
* Parse php function call.
*
* @param text
* @param position
*/
static parseFunction(text: string, position: number, level: number = 0): any {
var out:any = null;
var classes = [];
for(let i in Helpers.tags) {
for (let j in Helpers.tags[i].classes) {
classes.push(Helpers.tags[i].classes[j]);
}
}
var regexPattern = "(((" + classes.join('|') + ")::)?([@A-Za-z0-9_]+))((\\()((?:[^)(]|\\((?:[^)(]|\\([^)(]*\\))*\\))*)(\\)|$))";
var functionRegex = new RegExp(regexPattern, "g");
var paramsRegex = /((\s*\,\s*)?)(\[[\s\S]*(\]|$)|array\[\s\S]*(\)|$)|(\"((\\\")|[^\"])*(\"|$))|(\'((\\\')|[^\'])*(\'|$)))/g;
var inlineFunctionMatch = /\((([\s\S]*\,)?\s*function\s*\(.*\)\s*\{)([\S\s]*)\}/g;
text = text.substr(Math.max(0, position - 200), 400);
position -= Math.max(0, position - 200);
var match = null;
var match2 = null;
if (Helpers.cachedParseFunction !== null && Helpers.cachedParseFunction.text === text && position === Helpers.cachedParseFunction.position) {
out = Helpers.cachedParseFunction.out;
} else if (level < 6) {
while ((match = functionRegex.exec(text)) !== null) {
if (position >= match.index && match[0] && position < match.index + match[0].length) {
if ((match2 = inlineFunctionMatch.exec(match[0])) !== null && typeof match2[3] === 'string' && typeof match[1] === 'string' && typeof match[6] === 'string' && typeof match2[1] === 'string') {
out = this.parseFunction(match2[3], position - (match.index + match[1].length + match[6].length + match2[1].length), level + 1);
} else if (typeof match[1] === 'string' && typeof match[6]=== 'string' && typeof match[7]=== 'string') {
var textParameters = [];
var paramIndex = null;
var paramIndexCounter = 0;
var paramsPosition = position - (match.index + match[1].length + match[6].length);
var functionInsideParameter;
if (match[7].length >= 4 && (functionInsideParameter = this.parseFunction(match[7], paramsPosition))) {
return functionInsideParameter;
}
while ((match2 = paramsRegex.exec(match[7])) !== null) {
textParameters.push(match2[3]);
if (paramsPosition >= match2.index && typeof match2[0] === 'string' && paramsPosition <= match2.index + match2[0].length) {
paramIndex = paramIndexCounter;
}
paramIndexCounter++;
}
var functionParametrs = [];
for (let i in textParameters) {
functionParametrs.push(this.evalPhp(textParameters[i]));
}
out = {
class: match[3],
function: match[4],
paramIndex: paramIndex,
parameters: functionParametrs,
textParameters: textParameters
};
}
if (level === 0) {
Helpers.cachedParseFunction = {text, position, out};
}
}
}
}
return out;
}
/**
* Parse php function call from vscode editor.
*
* @param document
* @param position
*/
static parseDocumentFunction(document: vscode.TextDocument, position: vscode.Position) {
var pos = document.offsetAt(position);
return Helpers.parseFunction(document.getText(), pos);
}
/**
* Get laravel models as array.
*
* @returns array<string>
*/
static getModels() : Promise<Array<string>> {
var self = this;
return new Promise<Array<string>>(function (resolve, reject) {
if (Math.floor(Date.now()/1000) - self.modelsCacheTime < 60) {
return resolve(self.modelsCache);
} else {
Helpers.runLaravel(`
echo json_encode(array_values(array_filter(array_map(function ($name) {return app()->getNamespace().str_replace([app_path().'/', app_path().'\\\\', '.php', '/'], ['', '', '', '\\\\'], $name);}, array_merge(glob(app_path('*')), glob(app_path('Models/*')))), function ($class) {
return class_exists($class) && is_subclass_of($class, 'Illuminate\\\\Database\\\\Eloquent\\\\Model');
})));
`).then(function (result) {
var models = JSON.parse(result);
self.modelsCache = models;
resolve(models);
})
.catch(function (error) {
console.error(error);
resolve([]);
});
}
});
}
/**
* Get indent space based on user configuration
*/
static getSpacer() : string {
const editor = vscode.window.activeTextEditor;
if (editor && editor.options.insertSpaces) {
return ' '.repeat(<number>editor.options.tabSize);
}
return '\t';
}
}
| {
code = code.replace(/\"/g, "\\\"");
if (['linux', 'openbsd', 'sunos', 'darwin'].some(unixPlatforms => os.platform().includes(unixPlatforms))) {
code = code.replace(/\$/g, "\\$");
code = code.replace(/\\\\'/g, '\\\\\\\\\'');
code = code.replace(/\\\\"/g, '\\\\\\\\\"');
}
let command = vscode.workspace.getConfiguration("LaravelExtraIntellisense").get<string>('phpCommand') ?? "php -r \"{code}\"";
command = command.replace("{code}", code);
let out = new Promise<string>(function (resolve, error) {
cp.exec(command,
{ cwd: vscode.workspace.workspaceFolders && vscode.workspace.workspaceFolders.length > 0 ? vscode.workspace.workspaceFolders[0].uri.fsPath : undefined },
function (err, stdout, stderr) {
if (stdout.length > 0) {
resolve(stdout);
} else {
if (Helpers.outputChannel !== null) {
Helpers.outputChannel.appendLine("Laravel extra intellisense Error: " + stderr);
}
error(stderr);
}
}
);
});
return out;
} | identifier_body |
httpProcess.go | package main
import (
"net/http"
"fmt"
"net"
"log"
"strings"
"encoding/json"
"github.com/go-redis/redis"
"runtime"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
"database/sql"
_"github.com/go-sql-driver/mysql"
"math/rand"
)
type User struct {//mysql ile handshake yapmak için user modeli
UserName string //`json:"username"`
Password string//`json:"password"`
SubKey string//`json:"subkey"`
PubKey string//`json:"pubkey"`
}
type Client struct {//tcp'den bağlanan kullanıcının modeli
conn net.Conn
message string
}
type StatusMessage struct {//API response modeli
Message string `json:"status_message"`
StatusCode int `json:"status_code"`
}
type Publish struct {//API üzerinden bağlanan kullanıcının publish modeli
Token string //`json:"token"`
Message string //`json:"message"`
Channel string //`json:"channel"`
Pubkey string //`json:"pubkey"`
Subkey string //`json:"subkey"`
}
type Subscribe struct {////API üzerinden bağlanan kullanıcının subscribe modeli
Token string
Subkey string
}
type Broker struct {//Server Send Event kapsamında kullanıcının tüm eventlerini içeren ana model
Notifier chan []byte// Kullanıcının yaptığı tüm eventler burada değerlendiriliyor.
newClients chan chan []byte// yeni client bağlanınca
closingClients chan chan []byte// client disconnect olunca
clients map[chan []byte]bool// tüm clientlere brodcast yayın yapmak için
clientIpAdress []string//sisteme bağlı clientlerin ip adresleri
clientTokens []string//sisteme bağlı clientlerin tokenleri
}
func NewServer() (broker *Broker) {//sistemi ayağa kaldırmak için gerekli instance
broker = &Broker{
Notifier: make(chan []byte, 1),
newClients: make(chan chan []byte),
closingClients: make(chan chan []byte),
clients: make(map[chan []byte]bool),
}
go broker.listen()//sistem ayağa kalktığında eş zamanlı olarak bu metot çalışacak.
return
}
func (broker *Broker) addNewClient(ipAddress string) (index int){//yeni client bağlanınca ip'si ekleniyor.
broker.clientIpAdress =append(broker.clientIpAdress,ipAddress)
index = len(broker.clientIpAdress)-1
return
}
func (broker *Broker) findIpPosition(ipAddress string) (pos int) {//verilen ip'nin dizideki pozisyonunu buluyor.
for i,ip := range broker.clientIpAdress{
if ip == ipAddress{
pos = i
return
}
}
return
}
func (broker *Broker) closeClientToken(pos int){//kullanıcı çıktığında diziden çıkıyor.
broker.clientTokens = append(broker.clientTokens[:pos], broker.clientTokens[pos+1:]...)
}
func (broker *Broker) closeClientIpAddres(pos int){//kullanıcı çıktığında
broker.clientIpAdress = append(broker.clientIpAdress[:pos], broker.clientIpAdress[pos+1:]...)
}
func (broker *Broker) ServeHTTP(rw http.ResponseWriter, req *http.Request) {//client serverimiza bağlanıca..
flusher, ok := rw.(http.Flusher)
if !ok {
http.Error(rw, "Streaming unsupported!", http.StatusInternalServerError)
return
}
rw.Header().Set("Content-Type", "text/event-stream")
rw.Header().Set("Cache-Control", "no-cache")//handshake için gerekli headerler
rw.Header().Set("Connection", "keep-alive")
rw.Header().Set("Access-Control-Allow-Origin", "*")
messageChan := make(chan []byte)
broker.newClients <- messageChan//yeni client bağlanında devreye girecek olan channel
newClientFullIp := req.RemoteAddr//client'in ip adresini alıyoruz.
index := broker.addNewClient(newClientFullIp)//client'in dizideki indisini alıyoruz.
token := broker.clientTokens[index]//ip indisini token dizisinde aynı indise eşitliyoruz.
client.SAdd("onlineUserTokens",token)//rediste uniq listemize bu kullanıcının tokenını ekliyoruz.
defer func() {//kullanıcı disconnect olduğunda bu channel notification olarak gidecek.
broker.closingClients <- messageChan
}()
notify := rw.(http.CloseNotifier).CloseNotify()//3000 portuna bağlı client çıkış yapınca notify devreye giriyor..
clientCloseFullIp := req.RemoteAddr// disconnect olan kullanıcının ip adresinin alıyoruz
go func() {//user disconnect olunca eş zamanlı bunu channel ile notification olarak gönderecez.
<-notify
posClientIp := broker.findIpPosition(clientCloseFullIp)//disconnect olan kullanıcının dizideki indisini buluyoruz
client.SRem("onlineUserTokens",broker.clientTokens[posClientIp])//yukarıda elde ettiğimiz indisteki tokeni bulıuyoruz ve bunu redisteki uniq listten çıkarıyoruz.
broker.closeClientToken(posClientIp)//user'i token dizisinden çıkarıyoruz
broker.closeClientIpAddres(posClientIp)//user'i ip dizisinden çıkarıyoruz.
broker.closingClients <- messageChan//close notification'u gönderiyoruz.
}()
for {//burada ilgili tüm userlere sırasıyla ilgili projelerine broadcast mesaj göndericez.
tokenPosition := broker.findIpPosition(newClientFullIp)//aktif userin token indisi
token := broker.clientTokens[tokenPosition]//aktif user'in tokeni.
data := ByteToStr(<-messageChan)//channel'den gelen mesajımız
parsedData := strings.Split(data,":")//redisten gelen datayı parse ediyoruz(kanal ve mesaj olarak geliyor.)
channels := strings.Split(parsedData[0],"_")//channel bilgisini elde ettik.
isFindUserToken := client.SIsMember(channels[0][:4],token).Val()//userlerin ilgili projelerine ilgili mesajı ayırt etmek için rediste listede kontrol yapıyoruz.
if isFindUserToken{
fmt.Fprintf(rw, "data: %s\n\n", channels[1]+"_"+parsedData[1])
flusher.Flush()
}
}
}
func (broker *Broker) listen() {//burada eş zamanlı olarak çalışan ilgili metotlarımızı tek bir noktadan yönetmek için select ifadesini kullanıyoruz.
for {
select {
case s := <-broker.newClients: //yeni bir client bağlandı..
broker.clients[s] = true
onlieUsers := client.SMembers("onlineUserTokens")
fmt.Println("online Users:",onlieUsers)
onlineUsersCount := client.SCard("onlineUserTokens")
fmt.Println("online users count:",onlineUsersCount)
case s := <-broker.closingClients://Bir client ayrıldı ve mesaj göndermeyi bırakmak istiyoruz
onlieUsers := client.SMembers("onlineUserTokens")
fmt.Println("online Users:",onlieUsers)
onlineUsersCount := client.SCard("onlineUserTokens")
fmt.Println("online users count:",onlineUsersCount)
delete(broker.clients, s)
case event := <-broker.Notifier://sisteme bağlı tüm clientlere notify gönderiyoruz
for clientMessageChan, _ := range broker.clients {
clientMessageChan <- event
}
}
}
}
func check(err error, message string) {//genel hata yönetimi mekanizması
if err != nil {
panic(err)
}
fmt.Printf("%s\n", message)
}
func JsonStatus(message string, status int, jw http.ResponseWriter) {//Genel Response metodumuz
jw.Header().Set("Content-Type", "application/json")
return_message := &StatusMessage{Message: message, StatusCode: status}
jw.WriteHeader(http.StatusCreated)
json.NewEncoder(jw).Encode(return_message)
}
func ByteToStr(data []byte) string{//byte to str
d :=string(data[8:])
d = d[:len(d)-1]
return d
}
func randToken() string {//random token üreten fonksiyon
b := make([]byte, 8)
rand.Read(b)
return fmt.Sprintf("%x", b)
}
func handShake(w http.ResponseWriter, r *http.Request) {//API üzerinden gelen subscribe
var u User
_ = json.NewDecoder(r.Body).Decode(&u)
if u.Password == "" || u.UserName == "" || u.SubKey == "" || u.PubKey == "" {
JsonStatus("Error! Required User name ,Password ,Subkey and Pubkey!" ,330, w)
return
}
isCheck , token := checkUser(u)
if isCheck == -1{
JsonStatus("Error! Invalid User Info!" ,330, w)
return
}else{
JsonStatus("Token:"+token ,200, w)
return
}
}
func checkUser(user User) (isCheck int ,token string) {//handchake yaparken gelen datamızı mysql ile kontrol ediyoruz
isCheck = -1
token =""
c := user
var userId,appId int
stmtOut, err := db.Prepare("SELECT user_id FROM account where user_name = ? AND password = ?")
check(err,"")
err = stmtOut.QueryRow(c.UserName,c.Password).Scan(&userId)
if userId > 0{
stmtOut, err := db.Prepare("SELECT app_id FROM app where user_id = ? AND pub_key = ? AND sub_key = ?")
check(err,"")
err = stmtOut.QueryRow(userId,c.PubKey,c.SubKey).Scan(&appId)
if appId > 0 {
isCheck = appId
token = randToken()
client.SAdd(c.SubKey[:8],token)
}
}
defer stmtOut.Close()
return
}
func checkSubKey(subkey string) (isCheck int) {//handchake yaparken gelen datamızı mysql ile kontrol ediyoruz
var appId int
stmtOut, err := db.Prepare("SELECT app_id FROM app where sub_key = ?")
| err := db.Prepare("SELECT app_id FROM app where pub_key = ? AND sub_key = ?")
check(err,"")
err = stmtOut.QueryRow(pubkey,subkey).Scan(&appId)
if appId > 0{
isCheck =appId
}else{
isCheck = -1
}
defer stmtOut.Close()
return
}
func (broker *Broker)subscribeHttpToRedis(w http.ResponseWriter, r *http.Request) {//API üzerinden gelen subscribe
var s Subscribe
_ = json.NewDecoder(r.Body).Decode(&s)
if s.Token == "" || s.Subkey == "" {
JsonStatus("Error! Required Token and Subkey Subkey!" ,330, w)
return
}
stateSubkey := client.SIsMember("userSubkeys",s.Subkey[:5]).Val()//userin sistemde yarattığı proje check yapılmış mı diye kontrol yapılıyor
if !stateSubkey{
appId := checkSubKey(s.Subkey)//mysql check
if appId > 0{
client.SAdd("userSubkeys",s.Subkey[:5])//başarılı ise rediste check yapıldı olarak görünüyor.
}else{
fmt.Println("böyle bir subkey yok.")
return
}
}
s.Subkey =s.Subkey[:5]
broker.clientTokens = append(broker.clientTokens,s.Token)
client.SAdd(s.Subkey[:4],s.Token)
isSubscribe,_ := client.Get(s.Subkey).Int64()
if isSubscribe != 0 {
JsonStatus("Active Subscribe!" ,321, w)
return
}else{
JsonStatus("Successful!" ,200, w)
client.Set(s.Subkey,1,0)
}
go func() {//user subscribe olduğunda artık redisten kendisini ilgilendiren mesaj geldiğinde onu dinleyecektir.
pubsub := client.PSubscribe(s.Subkey+"_*")
for{
msg, err := pubsub.ReceiveMessage()
if err != nil {
panic(err)
}
chanRecieveRedisToHttp <- msg.String()//publih kanalımıza api'den gelen bilgileri ekledik
}
}()
}
func publishRedis(w http.ResponseWriter, r *http.Request) {//API üzerinden gelen publish
var p Publish
_ = json.NewDecoder(r.Body).Decode(&p)
if p.Token == "" || p.Message == "" || p.Channel == "" || p.Subkey == "" || p.Pubkey == "" {
JsonStatus("Required Pubkey and Subkey B" ,351,w)
return
}
stateSubkey := client.SIsMember("userSubkeys",p.Subkey[:5]).Val()
statePubkey := client.SIsMember("userPubkeys",p.Pubkey[:5]).Val()
if !stateSubkey || !statePubkey {
appId := checkPupKeySubKey(p.Pubkey,p.Subkey)//mysql check
if appId > 0{
client.SAdd("userSubkeys",p.Subkey[:5])//redis set
client.SAdd("userPubkeys",p.Pubkey[:5])
}else{
fmt.Println("böyle bir pubkey subkey yok.")
return
}
}
p.Pubkey = p.Pubkey[:5]
p.Subkey = p.Subkey[:5]
arrayChannels := strings.Split(p.Channel,"|")
arrayMesages := strings.Split(p.Message,"|")
if len(arrayMesages) > len(arrayChannels){
JsonStatus("channel sayisi mesaj sayisindan az olamaz" ,354, w)
return
}
getToken := client.Get("token").Val()
if p.Token ==getToken{
JsonStatus("Data Published" ,200, w)
go func() {
for i,chann := range arrayChannels {//belirtilen çoklu kanallara çoklu mesaj gönderiliyor..
p.Channel = p.Subkey + "_" + chann
if i >= len(arrayMesages) {
p.Message = ""
} else {
p.Message = arrayMesages[i]
}
chanSendHttpToRedisPublih <- p//Redis ile api arasında eş zamanlı dinlenen kanal
}
}()
}else{
JsonStatus("Error! Invalid Token !" ,355, w)
return
}
}
var chanSendHttpToRedisPublih = make(chan Publish)//apiPublish'den gelen datamızı publish için kanalımız
var chanRecieveRedisToHttp = make (chan string)//rediste kanala subscribe olur.
var db, err = sql.Open("mysql", "root:@/skyneb")
var client = redis.NewClient(&redis.Options{
Addr: "localhost:6379",
Password: "",
DB: 0,
})
func main(){
check(err,"Mysql Connectted.")
client.Set("token","tokenabc",0)
client.Set("pubkey","pubkey123",0)
var broker = NewServer()
runtime.GOMAXPROCS(8)
for {
go func() {
log.Fatal("HTTP server error: ", http.ListenAndServe("localhost:3000", broker))
}()
go func() {
for{
msg := <- chanRecieveRedisToHttp
broker.Notifier <- []byte(msg)
}
}()
go func() {
for{
publih := <- chanSendHttpToRedisPublih//api'den gelenleri redise publish edioruz
err = client.Publish(publih.Channel, publih.Message).Err()//kanal bilgisi api'den geliyor
check(err,"publish edildi")
}
}()
allowedHeaders := handlers.AllowedHeaders([]string{"X-Requested-With"})
allowedOrigins := handlers.AllowedOrigins([]string{"*"})
allowedMethods := handlers.AllowedMethods([]string{"GET", "HEAD", "POST", "PUT", "DELETE", "OPTIONS"})
r := mux.NewRouter()
r.HandleFunc("/publish", publishRedis).Methods("POST")
r.HandleFunc("/subscribe", broker.subscribeHttpToRedis).Methods("POST")
r.HandleFunc("/handshake", handShake).Methods("POST")
log.Fatal(http.ListenAndServe(":8000", handlers.CORS(allowedHeaders, allowedOrigins, allowedMethods)(r)))
}
}
| check(err,"")
err = stmtOut.QueryRow(subkey).Scan(&appId)
if appId > 0{
isCheck =appId
}else{
isCheck = -1
}
defer stmtOut.Close()
return
}
func checkPupKeySubKey(pubkey string,subkey string) (isCheck int) {//handchake yaparken gelen datamızı mysql ile kontrol ediyoruz
var appId int
stmtOut, | identifier_body |
httpProcess.go | package main
import (
"net/http"
"fmt"
"net"
"log"
"strings"
"encoding/json"
"github.com/go-redis/redis"
"runtime"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
"database/sql"
_"github.com/go-sql-driver/mysql"
"math/rand"
)
type User struct {//mysql ile handshake yapmak için user modeli
UserName string //`json:"username"`
Password string//`json:"password"`
SubKey string//`json:"subkey"`
PubKey string//`json:"pubkey"`
}
type Client struct {//tcp'den bağlanan kullanıcının modeli
conn net.Conn
message string
}
type StatusMessage struct {//API response modeli
Message string `json:"status_message"`
StatusCode int `json:"status_code"`
}
type Publish struct {//API üzerinden bağlanan kullanıcının publish modeli
Token string //`json:"token"`
Message string //`json:"message"`
Channel string //`json:"channel"`
Pubkey string //`json:"pubkey"`
Subkey string //`json:"subkey"`
}
type Subscribe struct {////API üzerinden bağlanan kullanıcının subscribe modeli
Token string
Subkey string
}
type Broker struct {//Server Send Event kapsamında kullanıcının tüm eventlerini içeren ana model
Notifier chan []byte// Kullanıcının yaptığı tüm eventler burada değerlendiriliyor.
newClients chan chan []byte// yeni client bağlanınca
closingClients chan chan []byte// client disconnect olunca
clients map[chan []byte]bool// tüm clientlere brodcast yayın yapmak için
clientIpAdress []string//sisteme bağlı clientlerin ip adresleri
clientTokens []string//sisteme bağlı clientlerin tokenleri
}
func NewServer() (broker *Broker) {//sistemi ayağa kaldırmak için gerekli instance
broker = &Broker{
Notifier: make(chan []byte, 1),
newClients: make(chan chan []byte),
closingClients: make(chan chan []byte),
clients: make(map[chan []byte]bool),
}
go broker.listen()//sistem ayağa kalktığında eş zamanlı olarak bu metot çalışacak.
return
}
func (broker *Broker) addNewClient(ipAddress string) (index int){//yeni client bağlanınca ip'si ekleniyor.
broker.clientIpAdress =append(broker.clientIpAdress,ipAddress)
index = len(broker.clientIpAdress)-1
return
}
func (broker *Broker) findIpPosition(ipAddress string) (pos int) {//verilen ip'nin dizideki pozisyonunu buluyor.
for i,ip := range broker.clientIpAdress{
if ip == ipAddress{
pos = i
return
}
}
return
}
func (broker *Broker) closeClientToken(pos int){//kullanıcı çıktığında diziden çıkıyor.
broker.clientTokens = append(broker.clientTokens[:pos], broker.clientTokens[pos+1:]...)
}
func (broker *Broker) closeClientIpAddres(pos int){//kullanıcı çıktığında
broker.clientIpAdress = append(broker.clientIpAdress[:pos], broker.clientIpAdress[pos+1:]...)
}
func (broker *Broker) ServeHTTP(rw http.ResponseWriter, req *http.Request) {//client serverimiza bağlanıca..
flusher, ok := rw.(http.Flusher)
if !ok {
http.Error(rw, "Streaming unsupported!", http.StatusInternalServerError)
return
}
rw.Header().Set("Content-Type", "text/event-stream")
rw.Header().Set("Cache-Control", "no-cache")//handshake için gerekli headerler
rw.Header().Set("Connection", "keep-alive")
rw.Header().Set("Access-Control-Allow-Origin", "*")
messageChan := make(chan []byte)
broker.newClients <- messageChan//yeni client bağlanında devreye girecek olan channel
newClientFullIp := req.RemoteAddr//client'in ip adresini alıyoruz.
index := broker.addNewClient(newClientFullIp)//client'in dizideki indisini alıyoruz.
token := broker.clientTokens[index]//ip indisini token dizisinde aynı indise eşitliyoruz.
client.SAdd("onlineUserTokens",token)//rediste uniq listemize bu kullanıcının tokenını ekliyoruz.
defer func() {//kullanıcı disconnect olduğunda bu channel notification olarak gidecek.
broker.closingClients <- messageChan
}()
notify := rw.(http.CloseNotifier).CloseNotify()//3000 portuna bağlı client çıkış yapınca notify devreye giriyor..
clientCloseFullIp := req.RemoteAddr// disconnect olan kullanıcının ip adresinin alıyoruz
go func() {//user disconnect olunca eş zamanlı bunu channel ile notification olarak gönderecez.
<-notify
posClientIp := broker.findIpPosition(clientCloseFullIp)//disconnect olan kullanıcının dizideki indisini buluyoruz
client.SRem("onlineUserTokens",broker.clientTokens[posClientIp])//yukarıda elde ettiğimiz indisteki tokeni bulıuyoruz ve bunu redisteki uniq listten çıkarıyoruz.
broker.closeClientToken(posClientIp)//user'i token dizisinden çıkarıyoruz
broker.closeClientIpAddres(posClientIp)//user'i ip dizisinden çıkarıyoruz.
broker.closingClients <- messageChan//close notification'u gönderiyoruz.
}()
for {//burada ilgili tüm userlere sırasıyla ilgili projelerine broadcast mesaj göndericez.
tokenPosition := broker.findIpPosition(newClientFullIp)//aktif userin token indisi
token := broker.clientTokens[tokenPosition]//aktif user'in tokeni.
data := ByteToStr(<-messageChan)//channel'den gelen mesajımız
parsedData := strings.Split(data,":")//redisten gelen datayı parse ediyoruz(kanal ve mesaj olarak geliyor.)
channels := strings.Split(parsedData[0],"_")//channel bilgisini elde ettik.
isFindUserToken := client.SIsMember(channels[0][:4],token).Val()//userlerin ilgili projelerine ilgili mesajı ayırt etmek için rediste listede kontrol yapıyoruz.
if isFindUserToken{
fmt.Fprintf(rw, "data: %s\n\n", channels[1]+"_"+parsedData[1])
flusher.Flush()
}
}
}
func (broker *Broker) listen() {//burada eş zamanlı olarak çalışan ilgili metotlarımızı tek bir noktadan yönetmek için select ifadesini kullanıyoruz.
for {
select {
case s := <-broker.newClients: //yeni bir client bağlandı..
broker.clients[s] = true
onlieUsers := client.SMembers( | sonStatus(message string, status int, jw http.ResponseWriter) {//Genel Response metodumuz
jw.Header().Set("Content-Type", "application/json")
return_message := &StatusMessage{Message: message, StatusCode: status}
jw.WriteHeader(http.StatusCreated)
json.NewEncoder(jw).Encode(return_message)
}
func ByteToStr(data []byte) string{//byte to str
d :=string(data[8:])
d = d[:len(d)-1]
return d
}
func randToken() string {//random token üreten fonksiyon
b := make([]byte, 8)
rand.Read(b)
return fmt.Sprintf("%x", b)
}
func handShake(w http.ResponseWriter, r *http.Request) {//API üzerinden gelen subscribe
var u User
_ = json.NewDecoder(r.Body).Decode(&u)
if u.Password == "" || u.UserName == "" || u.SubKey == "" || u.PubKey == "" {
JsonStatus("Error! Required User name ,Password ,Subkey and Pubkey!" ,330, w)
return
}
isCheck , token := checkUser(u)
if isCheck == -1{
JsonStatus("Error! Invalid User Info!" ,330, w)
return
}else{
JsonStatus("Token:"+token ,200, w)
return
}
}
func checkUser(user User) (isCheck int ,token string) {//handchake yaparken gelen datamızı mysql ile kontrol ediyoruz
isCheck = -1
token =""
c := user
var userId,appId int
stmtOut, err := db.Prepare("SELECT user_id FROM account where user_name = ? AND password = ?")
check(err,"")
err = stmtOut.QueryRow(c.UserName,c.Password).Scan(&userId)
if userId > 0{
stmtOut, err := db.Prepare("SELECT app_id FROM app where user_id = ? AND pub_key = ? AND sub_key = ?")
check(err,"")
err = stmtOut.QueryRow(userId,c.PubKey,c.SubKey).Scan(&appId)
if appId > 0 {
isCheck = appId
token = randToken()
client.SAdd(c.SubKey[:8],token)
}
}
defer stmtOut.Close()
return
}
func checkSubKey(subkey string) (isCheck int) {//handchake yaparken gelen datamızı mysql ile kontrol ediyoruz
var appId int
stmtOut, err := db.Prepare("SELECT app_id FROM app where sub_key = ?")
check(err,"")
err = stmtOut.QueryRow(subkey).Scan(&appId)
if appId > 0{
isCheck =appId
}else{
isCheck = -1
}
defer stmtOut.Close()
return
}
func checkPupKeySubKey(pubkey string,subkey string) (isCheck int) {//handchake yaparken gelen datamızı mysql ile kontrol ediyoruz
var appId int
stmtOut, err := db.Prepare("SELECT app_id FROM app where pub_key = ? AND sub_key = ?")
check(err,"")
err = stmtOut.QueryRow(pubkey,subkey).Scan(&appId)
if appId > 0{
isCheck =appId
}else{
isCheck = -1
}
defer stmtOut.Close()
return
}
func (broker *Broker)subscribeHttpToRedis(w http.ResponseWriter, r *http.Request) {//API üzerinden gelen subscribe
var s Subscribe
_ = json.NewDecoder(r.Body).Decode(&s)
if s.Token == "" || s.Subkey == "" {
JsonStatus("Error! Required Token and Subkey Subkey!" ,330, w)
return
}
stateSubkey := client.SIsMember("userSubkeys",s.Subkey[:5]).Val()//userin sistemde yarattığı proje check yapılmış mı diye kontrol yapılıyor
if !stateSubkey{
appId := checkSubKey(s.Subkey)//mysql check
if appId > 0{
client.SAdd("userSubkeys",s.Subkey[:5])//başarılı ise rediste check yapıldı olarak görünüyor.
}else{
fmt.Println("böyle bir subkey yok.")
return
}
}
s.Subkey =s.Subkey[:5]
broker.clientTokens = append(broker.clientTokens,s.Token)
client.SAdd(s.Subkey[:4],s.Token)
isSubscribe,_ := client.Get(s.Subkey).Int64()
if isSubscribe != 0 {
JsonStatus("Active Subscribe!" ,321, w)
return
}else{
JsonStatus("Successful!" ,200, w)
client.Set(s.Subkey,1,0)
}
go func() {//user subscribe olduğunda artık redisten kendisini ilgilendiren mesaj geldiğinde onu dinleyecektir.
pubsub := client.PSubscribe(s.Subkey+"_*")
for{
msg, err := pubsub.ReceiveMessage()
if err != nil {
panic(err)
}
chanRecieveRedisToHttp <- msg.String()//publih kanalımıza api'den gelen bilgileri ekledik
}
}()
}
func publishRedis(w http.ResponseWriter, r *http.Request) {//API üzerinden gelen publish
var p Publish
_ = json.NewDecoder(r.Body).Decode(&p)
if p.Token == "" || p.Message == "" || p.Channel == "" || p.Subkey == "" || p.Pubkey == "" {
JsonStatus("Required Pubkey and Subkey B" ,351,w)
return
}
stateSubkey := client.SIsMember("userSubkeys",p.Subkey[:5]).Val()
statePubkey := client.SIsMember("userPubkeys",p.Pubkey[:5]).Val()
if !stateSubkey || !statePubkey {
appId := checkPupKeySubKey(p.Pubkey,p.Subkey)//mysql check
if appId > 0{
client.SAdd("userSubkeys",p.Subkey[:5])//redis set
client.SAdd("userPubkeys",p.Pubkey[:5])
}else{
fmt.Println("böyle bir pubkey subkey yok.")
return
}
}
p.Pubkey = p.Pubkey[:5]
p.Subkey = p.Subkey[:5]
arrayChannels := strings.Split(p.Channel,"|")
arrayMesages := strings.Split(p.Message,"|")
if len(arrayMesages) > len(arrayChannels){
JsonStatus("channel sayisi mesaj sayisindan az olamaz" ,354, w)
return
}
getToken := client.Get("token").Val()
if p.Token ==getToken{
JsonStatus("Data Published" ,200, w)
go func() {
for i,chann := range arrayChannels {//belirtilen çoklu kanallara çoklu mesaj gönderiliyor..
p.Channel = p.Subkey + "_" + chann
if i >= len(arrayMesages) {
p.Message = ""
} else {
p.Message = arrayMesages[i]
}
chanSendHttpToRedisPublih <- p//Redis ile api arasında eş zamanlı dinlenen kanal
}
}()
}else{
JsonStatus("Error! Invalid Token !" ,355, w)
return
}
}
var chanSendHttpToRedisPublih = make(chan Publish)//apiPublish'den gelen datamızı publish için kanalımız
var chanRecieveRedisToHttp = make (chan string)//rediste kanala subscribe olur.
var db, err = sql.Open("mysql", "root:@/skyneb")
var client = redis.NewClient(&redis.Options{
Addr: "localhost:6379",
Password: "",
DB: 0,
})
func main(){
check(err,"Mysql Connectted.")
client.Set("token","tokenabc",0)
client.Set("pubkey","pubkey123",0)
var broker = NewServer()
runtime.GOMAXPROCS(8)
for {
go func() {
log.Fatal("HTTP server error: ", http.ListenAndServe("localhost:3000", broker))
}()
go func() {
for{
msg := <- chanRecieveRedisToHttp
broker.Notifier <- []byte(msg)
}
}()
go func() {
for{
publih := <- chanSendHttpToRedisPublih//api'den gelenleri redise publish edioruz
err = client.Publish(publih.Channel, publih.Message).Err()//kanal bilgisi api'den geliyor
check(err,"publish edildi")
}
}()
allowedHeaders := handlers.AllowedHeaders([]string{"X-Requested-With"})
allowedOrigins := handlers.AllowedOrigins([]string{"*"})
allowedMethods := handlers.AllowedMethods([]string{"GET", "HEAD", "POST", "PUT", "DELETE", "OPTIONS"})
r := mux.NewRouter()
r.HandleFunc("/publish", publishRedis).Methods("POST")
r.HandleFunc("/subscribe", broker.subscribeHttpToRedis).Methods("POST")
r.HandleFunc("/handshake", handShake).Methods("POST")
log.Fatal(http.ListenAndServe(":8000", handlers.CORS(allowedHeaders, allowedOrigins, allowedMethods)(r)))
}
}
| "onlineUserTokens")
fmt.Println("online Users:",onlieUsers)
onlineUsersCount := client.SCard("onlineUserTokens")
fmt.Println("online users count:",onlineUsersCount)
case s := <-broker.closingClients://Bir client ayrıldı ve mesaj göndermeyi bırakmak istiyoruz
onlieUsers := client.SMembers("onlineUserTokens")
fmt.Println("online Users:",onlieUsers)
onlineUsersCount := client.SCard("onlineUserTokens")
fmt.Println("online users count:",onlineUsersCount)
delete(broker.clients, s)
case event := <-broker.Notifier://sisteme bağlı tüm clientlere notify gönderiyoruz
for clientMessageChan, _ := range broker.clients {
clientMessageChan <- event
}
}
}
}
func check(err error, message string) {//genel hata yönetimi mekanizması
if err != nil {
panic(err)
}
fmt.Printf("%s\n", message)
}
func J | conditional_block |
httpProcess.go | package main
import (
"net/http"
"fmt"
"net"
"log"
"strings"
"encoding/json"
"github.com/go-redis/redis"
"runtime"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
"database/sql"
_"github.com/go-sql-driver/mysql"
"math/rand"
)
type User struct {//mysql ile handshake yapmak için user modeli
UserName string //`json:"username"`
Password string//`json:"password"`
SubKey string//`json:"subkey"`
PubKey string//`json:"pubkey"`
}
type Client struct {//tcp'den bağlanan kullanıcının modeli
conn net.Conn
message string
}
type StatusMessage struct {//API response modeli
Message string `json:"status_message"`
StatusCode int `json:"status_code"`
}
type Publish struct {//API üzerinden bağlanan kullanıcının publish modeli
Token string //`json:"token"`
Message string //`json:"message"`
Channel string //`json:"channel"`
Pubkey string //`json:"pubkey"`
Subkey string //`json:"subkey"`
}
type Subscribe struct {////API üzerinden bağlanan kullanıcının subscribe modeli
Token string
Subkey string
}
type Broker struct {//Server Send Event kapsamında kullanıcının tüm eventlerini içeren ana model
Notifier chan []byte// Kullanıcının yaptığı tüm eventler burada değerlendiriliyor.
newClients chan chan []byte// yeni client bağlanınca
closingClients chan chan []byte// client disconnect olunca
clients map[chan []byte]bool// tüm clientlere brodcast yayın yapmak için
clientIpAdress []string//sisteme bağlı clientlerin ip adresleri
clientTokens []string//sisteme bağlı clientlerin tokenleri
}
func NewServer() (broker *Broker) {//sistemi ayağa kaldırmak için gerekli instance
broker = &Broker{
Notifier: make(chan []byte, 1),
newClients: make(chan chan []byte),
closingClients: make(chan chan []byte),
clients: make(map[chan []byte]bool),
}
go broker.listen()//sistem ayağa kalktığında eş zamanlı olarak bu metot çalışacak.
return
}
func (broker *Broker) addNewClient(ipAddress string) (index int){//yeni client bağlanınca ip'si ekleniyor.
broker.clientIpAdress =append(broker.clientIpAdress,ipAddress)
index = len(broker.clientIpAdress)-1
return
}
func (broker *Broker) findIpPosition(ipAddress string) (pos int) {//verilen ip'nin dizideki pozisyonunu buluyor.
for i,ip := range broker.clientIpAdress{
if ip == ipAddress{
pos = i
return
}
}
return
}
func (broker *Broker) closeClientToken(pos int){//kullanıcı çıktığında diziden çıkıyor.
broker.clientTokens = append(broker.clientTokens[:pos], broker.clientTokens[pos+1:]...)
}
func (broker *Broker) closeClientIpAddres(pos int){//kullanıcı çıktığında
broker.clientIpAdress = append(broker.clientIpAdress[:pos], broker.clientIpAdress[pos+1:]...)
}
func (broker *Broker) ServeHTTP(rw http.ResponseWriter, req *http.Request) {//client serverimiza bağlanıca..
flusher, ok := rw.(http.Flusher)
if !ok {
http.Error(rw, "Streaming unsupported!", http.StatusInternalServerError)
return
}
rw.Header().Set("Content-Type", "text/event-stream")
rw.Header().Set("Cache-Control", "no-cache")//handshake için gerekli headerler
rw.Header().Set("Connection", "keep-alive")
rw.Header().Set("Access-Control-Allow-Origin", "*")
messageChan := make(chan []byte)
broker.newClients <- messageChan//yeni client bağlanında devreye girecek olan channel
newClientFullIp := req.RemoteAddr//client'in ip adresini alıyoruz.
index := broker.addNewClient(newClientFullIp)//client'in dizideki indisini alıyoruz.
token := broker.clientTokens[index]//ip indisini token dizisinde aynı indise eşitliyoruz.
client.SAdd("onlineUserTokens",token)//rediste uniq listemize bu kullanıcının tokenını ekliyoruz.
defer func() {//kullanıcı disconnect olduğunda bu channel notification olarak gidecek.
broker.closingClients <- messageChan
}()
notify := rw.(http.CloseNotifier).CloseNotify()//3000 portuna bağlı client çıkış yapınca notify devreye giriyor..
clientCloseFullIp := req.RemoteAddr// disconnect olan kullanıcının ip adresinin alıyoruz
go func() {//user disconnect olunca eş zamanlı bunu channel ile notification olarak gönderecez.
<-notify
posClientIp := broker.findIpPosition(clientCloseFullIp)//disconnect olan kullanıcının dizideki indisini buluyoruz
client.SRem("onlineUserTokens",broker.clientTokens[posClientIp])//yukarıda elde ettiğimiz indisteki tokeni bulıuyoruz ve bunu redisteki uniq listten çıkarıyoruz.
broker.closeClientToken(posClientIp)//user'i token dizisinden çıkarıyoruz
broker.closeClientIpAddres(posClientIp)//user'i ip dizisinden çıkarıyoruz.
broker.closingClients <- messageChan//close notification'u gönderiyoruz.
}()
for {//burada ilgili tüm userlere sırasıyla ilgili projelerine broadcast mesaj göndericez.
tokenPosition := broker.findIpPosition(newClientFullIp)//aktif userin token indisi
token := broker.clientTokens[tokenPosition]//aktif user'in tokeni.
data := ByteToStr(<-messageChan)//channel'den gelen mesajımız
parsedData := strings.Split(data,":")//redisten gelen datayı parse ediyoruz(kanal ve mesaj olarak geliyor.)
channels := strings.Split(parsedData[0],"_")//channel bilgisini elde ettik.
isFindUserToken := client.SIsMember(channels[0][:4],token).Val()//userlerin ilgili projelerine ilgili mesajı ayırt etmek için rediste listede kontrol yapıyoruz.
if isFindUserToken{
fmt.Fprintf(rw, "data: %s\n\n", channels[1]+"_"+parsedData[1])
flusher.Flush()
}
}
}
func (broker *Broker) listen() {//burada eş zamanlı olarak çalışan ilgili metotlarımızı tek bir noktadan yönetmek için select ifadesini kullanıyoruz.
for {
select {
case s := <-broker.newClients: //yeni bir client bağlandı..
broker.clients[s] = true
onlieUsers := client.SMembers("onlineUserTokens")
fmt.Println("online Users:",onlieUsers)
onlineUsersCount := client.SCard("onlineUserTokens")
fmt.Println("online users count:",onlineUsersCount)
case s := <-broker.closingClients://Bir client ayrıldı ve mesaj göndermeyi bırakmak istiyoruz
onlieUsers := client.SMembers("onlineUserTokens")
fmt.Println("online Users:",onlieUsers)
onlineUsersCount := client.SCard("onlineUserTokens")
fmt.Println("online users count:",onlineUsersCount)
delete(broker.clients, s)
case event := <-broker.Notifier://sisteme bağlı tüm clientlere notify gönderiyoruz
for clientMessageChan, _ := range broker.clients {
clientMessageChan <- event
}
}
}
}
func check(err error, message string) {//genel hata yönetimi mekanizması
if err != nil {
panic(err)
}
fmt.Printf("%s\n", message)
}
func JsonStatus(message string, status int, jw http.ResponseWriter) {//Genel Response metodumuz
jw.Header().Set("Content-Type", "application/json")
retu | := &StatusMessage{Message: message, StatusCode: status}
jw.WriteHeader(http.StatusCreated)
json.NewEncoder(jw).Encode(return_message)
}
func ByteToStr(data []byte) string{//byte to str
d :=string(data[8:])
d = d[:len(d)-1]
return d
}
func randToken() string {//random token üreten fonksiyon
b := make([]byte, 8)
rand.Read(b)
return fmt.Sprintf("%x", b)
}
func handShake(w http.ResponseWriter, r *http.Request) {//API üzerinden gelen subscribe
var u User
_ = json.NewDecoder(r.Body).Decode(&u)
if u.Password == "" || u.UserName == "" || u.SubKey == "" || u.PubKey == "" {
JsonStatus("Error! Required User name ,Password ,Subkey and Pubkey!" ,330, w)
return
}
isCheck , token := checkUser(u)
if isCheck == -1{
JsonStatus("Error! Invalid User Info!" ,330, w)
return
}else{
JsonStatus("Token:"+token ,200, w)
return
}
}
func checkUser(user User) (isCheck int ,token string) {//handchake yaparken gelen datamızı mysql ile kontrol ediyoruz
isCheck = -1
token =""
c := user
var userId,appId int
stmtOut, err := db.Prepare("SELECT user_id FROM account where user_name = ? AND password = ?")
check(err,"")
err = stmtOut.QueryRow(c.UserName,c.Password).Scan(&userId)
if userId > 0{
stmtOut, err := db.Prepare("SELECT app_id FROM app where user_id = ? AND pub_key = ? AND sub_key = ?")
check(err,"")
err = stmtOut.QueryRow(userId,c.PubKey,c.SubKey).Scan(&appId)
if appId > 0 {
isCheck = appId
token = randToken()
client.SAdd(c.SubKey[:8],token)
}
}
defer stmtOut.Close()
return
}
func checkSubKey(subkey string) (isCheck int) {//handchake yaparken gelen datamızı mysql ile kontrol ediyoruz
var appId int
stmtOut, err := db.Prepare("SELECT app_id FROM app where sub_key = ?")
check(err,"")
err = stmtOut.QueryRow(subkey).Scan(&appId)
if appId > 0{
isCheck =appId
}else{
isCheck = -1
}
defer stmtOut.Close()
return
}
func checkPupKeySubKey(pubkey string,subkey string) (isCheck int) {//handchake yaparken gelen datamızı mysql ile kontrol ediyoruz
var appId int
stmtOut, err := db.Prepare("SELECT app_id FROM app where pub_key = ? AND sub_key = ?")
check(err,"")
err = stmtOut.QueryRow(pubkey,subkey).Scan(&appId)
if appId > 0{
isCheck =appId
}else{
isCheck = -1
}
defer stmtOut.Close()
return
}
func (broker *Broker)subscribeHttpToRedis(w http.ResponseWriter, r *http.Request) {//API üzerinden gelen subscribe
var s Subscribe
_ = json.NewDecoder(r.Body).Decode(&s)
if s.Token == "" || s.Subkey == "" {
JsonStatus("Error! Required Token and Subkey Subkey!" ,330, w)
return
}
stateSubkey := client.SIsMember("userSubkeys",s.Subkey[:5]).Val()//userin sistemde yarattığı proje check yapılmış mı diye kontrol yapılıyor
if !stateSubkey{
appId := checkSubKey(s.Subkey)//mysql check
if appId > 0{
client.SAdd("userSubkeys",s.Subkey[:5])//başarılı ise rediste check yapıldı olarak görünüyor.
}else{
fmt.Println("böyle bir subkey yok.")
return
}
}
s.Subkey =s.Subkey[:5]
broker.clientTokens = append(broker.clientTokens,s.Token)
client.SAdd(s.Subkey[:4],s.Token)
isSubscribe,_ := client.Get(s.Subkey).Int64()
if isSubscribe != 0 {
JsonStatus("Active Subscribe!" ,321, w)
return
}else{
JsonStatus("Successful!" ,200, w)
client.Set(s.Subkey,1,0)
}
go func() {//user subscribe olduğunda artık redisten kendisini ilgilendiren mesaj geldiğinde onu dinleyecektir.
pubsub := client.PSubscribe(s.Subkey+"_*")
for{
msg, err := pubsub.ReceiveMessage()
if err != nil {
panic(err)
}
chanRecieveRedisToHttp <- msg.String()//publih kanalımıza api'den gelen bilgileri ekledik
}
}()
}
func publishRedis(w http.ResponseWriter, r *http.Request) {//API üzerinden gelen publish
var p Publish
_ = json.NewDecoder(r.Body).Decode(&p)
if p.Token == "" || p.Message == "" || p.Channel == "" || p.Subkey == "" || p.Pubkey == "" {
JsonStatus("Required Pubkey and Subkey B" ,351,w)
return
}
stateSubkey := client.SIsMember("userSubkeys",p.Subkey[:5]).Val()
statePubkey := client.SIsMember("userPubkeys",p.Pubkey[:5]).Val()
if !stateSubkey || !statePubkey {
appId := checkPupKeySubKey(p.Pubkey,p.Subkey)//mysql check
if appId > 0{
client.SAdd("userSubkeys",p.Subkey[:5])//redis set
client.SAdd("userPubkeys",p.Pubkey[:5])
}else{
fmt.Println("böyle bir pubkey subkey yok.")
return
}
}
p.Pubkey = p.Pubkey[:5]
p.Subkey = p.Subkey[:5]
arrayChannels := strings.Split(p.Channel,"|")
arrayMesages := strings.Split(p.Message,"|")
if len(arrayMesages) > len(arrayChannels){
JsonStatus("channel sayisi mesaj sayisindan az olamaz" ,354, w)
return
}
getToken := client.Get("token").Val()
if p.Token ==getToken{
JsonStatus("Data Published" ,200, w)
go func() {
for i,chann := range arrayChannels {//belirtilen çoklu kanallara çoklu mesaj gönderiliyor..
p.Channel = p.Subkey + "_" + chann
if i >= len(arrayMesages) {
p.Message = ""
} else {
p.Message = arrayMesages[i]
}
chanSendHttpToRedisPublih <- p//Redis ile api arasında eş zamanlı dinlenen kanal
}
}()
}else{
JsonStatus("Error! Invalid Token !" ,355, w)
return
}
}
var chanSendHttpToRedisPublih = make(chan Publish)//apiPublish'den gelen datamızı publish için kanalımız
var chanRecieveRedisToHttp = make (chan string)//rediste kanala subscribe olur.
var db, err = sql.Open("mysql", "root:@/skyneb")
var client = redis.NewClient(&redis.Options{
Addr: "localhost:6379",
Password: "",
DB: 0,
})
func main(){
check(err,"Mysql Connectted.")
client.Set("token","tokenabc",0)
client.Set("pubkey","pubkey123",0)
var broker = NewServer()
runtime.GOMAXPROCS(8)
for {
go func() {
log.Fatal("HTTP server error: ", http.ListenAndServe("localhost:3000", broker))
}()
go func() {
for{
msg := <- chanRecieveRedisToHttp
broker.Notifier <- []byte(msg)
}
}()
go func() {
for{
publih := <- chanSendHttpToRedisPublih//api'den gelenleri redise publish edioruz
err = client.Publish(publih.Channel, publih.Message).Err()//kanal bilgisi api'den geliyor
check(err,"publish edildi")
}
}()
allowedHeaders := handlers.AllowedHeaders([]string{"X-Requested-With"})
allowedOrigins := handlers.AllowedOrigins([]string{"*"})
allowedMethods := handlers.AllowedMethods([]string{"GET", "HEAD", "POST", "PUT", "DELETE", "OPTIONS"})
r := mux.NewRouter()
r.HandleFunc("/publish", publishRedis).Methods("POST")
r.HandleFunc("/subscribe", broker.subscribeHttpToRedis).Methods("POST")
r.HandleFunc("/handshake", handShake).Methods("POST")
log.Fatal(http.ListenAndServe(":8000", handlers.CORS(allowedHeaders, allowedOrigins, allowedMethods)(r)))
}
}
| rn_message | identifier_name |
httpProcess.go | package main
import (
"net/http"
"fmt"
"net"
"log"
"strings"
"encoding/json"
"github.com/go-redis/redis"
"runtime"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
"database/sql"
_"github.com/go-sql-driver/mysql"
"math/rand"
)
type User struct {//mysql ile handshake yapmak için user modeli
UserName string //`json:"username"`
Password string//`json:"password"`
SubKey string//`json:"subkey"`
PubKey string//`json:"pubkey"`
}
type Client struct {//tcp'den bağlanan kullanıcının modeli
conn net.Conn
message string
}
type StatusMessage struct {//API response modeli
Message string `json:"status_message"`
StatusCode int `json:"status_code"`
}
type Publish struct {//API üzerinden bağlanan kullanıcının publish modeli
Token string //`json:"token"`
Message string //`json:"message"`
Channel string //`json:"channel"`
Pubkey string //`json:"pubkey"`
Subkey string //`json:"subkey"`
}
type Subscribe struct {////API üzerinden bağlanan kullanıcının subscribe modeli
Token string
Subkey string
}
type Broker struct {//Server Send Event kapsamında kullanıcının tüm eventlerini içeren ana model
Notifier chan []byte// Kullanıcının yaptığı tüm eventler burada değerlendiriliyor.
newClients chan chan []byte// yeni client bağlanınca
closingClients chan chan []byte// client disconnect olunca
clients map[chan []byte]bool// tüm clientlere brodcast yayın yapmak için
clientIpAdress []string//sisteme bağlı clientlerin ip adresleri
clientTokens []string//sisteme bağlı clientlerin tokenleri
}
func NewServer() (broker *Broker) {//sistemi ayağa kaldırmak için gerekli instance
broker = &Broker{
Notifier: make(chan []byte, 1),
newClients: make(chan chan []byte),
closingClients: make(chan chan []byte),
clients: make(map[chan []byte]bool),
}
go broker.listen()//sistem ayağa kalktığında eş zamanlı olarak bu metot çalışacak.
return
}
func (broker *Broker) addNewClient(ipAddress string) (index int){//yeni client bağlanınca ip'si ekleniyor.
broker.clientIpAdress =append(broker.clientIpAdress,ipAddress)
index = len(broker.clientIpAdress)-1
return
}
func (broker *Broker) findIpPosition(ipAddress string) (pos int) {//verilen ip'nin dizideki pozisyonunu buluyor.
for i,ip := range broker.clientIpAdress{
if ip == ipAddress{
pos = i
return
}
}
return
}
func (broker *Broker) closeClientToken(pos int){//kullanıcı çıktığında diziden çıkıyor.
broker.clientTokens = append(broker.clientTokens[:pos], broker.clientTokens[pos+1:]...)
}
func (broker *Broker) closeClientIpAddres(pos int){//kullanıcı çıktığında
broker.clientIpAdress = append(broker.clientIpAdress[:pos], broker.clientIpAdress[pos+1:]...)
}
func (broker *Broker) ServeHTTP(rw http.ResponseWriter, req *http.Request) {//client serverimiza bağlanıca..
flusher, ok := rw.(http.Flusher)
if !ok {
http.Error(rw, "Streaming unsupported!", http.StatusInternalServerError)
return
}
rw.Header().Set("Content-Type", "text/event-stream")
rw.Header().Set("Cache-Control", "no-cache")//handshake için gerekli headerler
rw.Header().Set("Connection", "keep-alive")
rw.Header().Set("Access-Control-Allow-Origin", "*")
messageChan := make(chan []byte)
broker.newClients <- messageChan//yeni client bağlanında devreye girecek olan channel
newClientFullIp := req.RemoteAddr//client'in ip adresini alıyoruz.
index := broker.addNewClient(newClientFullIp)//client'in dizideki indisini alıyoruz.
token := broker.clientTokens[index]//ip indisini token dizisinde aynı indise eşitliyoruz.
client.SAdd("onlineUserTokens",token)//rediste uniq listemize bu kullanıcının tokenını ekliyoruz.
defer func() {//kullanıcı disconnect olduğunda bu channel notification olarak gidecek.
broker.closingClients <- messageChan
}()
notify := rw.(http.CloseNotifier).CloseNotify()//3000 portuna bağlı client çıkış yapınca notify devreye giriyor..
clientCloseFullIp := req.RemoteAddr// disconnect olan kullanıcının ip adresinin alıyoruz
go func() {//user disconnect olunca eş zamanlı bunu channel ile notification olarak gönderecez.
<-notify
posClientIp := broker.findIpPosition(clientCloseFullIp)//disconnect olan kullanıcının dizideki indisini buluyoruz
client.SRem("onlineUserTokens",broker.clientTokens[posClientIp])//yukarıda elde ettiğimiz indisteki tokeni bulıuyoruz ve bunu redisteki uniq listten çıkarıyoruz.
broker.closeClientToken(posClientIp)//user'i token dizisinden çıkarıyoruz
broker.closeClientIpAddres(posClientIp)//user'i ip dizisinden çıkarıyoruz.
broker.closingClients <- messageChan//close notification'u gönderiyoruz.
}()
for {//burada ilgili tüm userlere sırasıyla ilgili projelerine broadcast mesaj göndericez.
tokenPosition := broker.findIpPosition(newClientFullIp)//aktif userin token indisi
token := broker.clientTokens[tokenPosition]//aktif user'in tokeni.
data := ByteToStr(<-messageChan)//channel'den gelen mesajımız
parsedData := strings.Split(data,":")//redisten gelen datayı parse ediyoruz(kanal ve mesaj olarak geliyor.)
channels := strings.Split(parsedData[0],"_")//channel bilgisini elde ettik.
isFindUserToken := client.SIsMember(channels[0][:4],token).Val()//userlerin ilgili projelerine ilgili mesajı ayırt etmek için rediste listede kontrol yapıyoruz.
if isFindUserToken{
fmt.Fprintf(rw, "data: %s\n\n", channels[1]+"_"+parsedData[1])
flusher.Flush()
}
}
}
func (broker *Broker) listen() {//burada eş zamanlı olarak çalışan ilgili metotlarımızı tek bir noktadan yönetmek için select ifadesini kullanıyoruz.
for {
select {
case s := <-broker.newClients: //yeni bir client bağlandı..
broker.clients[s] = true
onlieUsers := client.SMembers("onlineUserTokens")
fmt.Println("online Users:",onlieUsers)
onlineUsersCount := client.SCard("onlineUserTokens")
fmt.Println("online users count:",onlineUsersCount)
case s := <-broker.closingClients://Bir client ayrıldı ve mesaj göndermeyi bırakmak istiyoruz
onlieUsers := client.SMembers("onlineUserTokens")
fmt.Println("online Users:",onlieUsers)
onlineUsersCount := client.SCard("onlineUserTokens")
fmt.Println("online users count:",onlineUsersCount)
delete(broker.clients, s)
case event := <-broker.Notifier://sisteme bağlı tüm clientlere notify gönderiyoruz
for clientMessageChan, _ := range broker.clients {
clientMessageChan <- event
}
}
}
}
func check(err error, message string) {//genel hata yönetimi mekanizması
if err != nil {
panic(err)
}
fmt.Printf("%s\n", message)
}
func JsonStatus(message string, status int, jw http.ResponseWriter) {//Genel Response metodumuz
jw.Header().Set("Content-Type", "application/json")
return_message := &StatusMessage{Message: message, StatusCode: status}
jw.WriteHeader(http.StatusCreated)
json.NewEncoder(jw).Encode(return_message)
}
func ByteToStr(data []byte) string{//byte to str
d :=string(data[8:])
d = d[:len(d)-1]
return d
}
func randToken() string {//random token üreten fonksiyon
b := make([]byte, 8)
rand.Read(b)
return fmt.Sprintf("%x", b)
}
func handShake(w http.ResponseWriter, r *http.Request) {//API üzerinden gelen subscribe
var u User
_ = json.NewDecoder(r.Body).Decode(&u)
if u.Password == "" || u.UserName == "" || u.SubKey == "" || u.PubKey == "" {
JsonStatus("Error! Required User name ,Password ,Subkey and Pubkey!" ,330, w)
return
}
isCheck , token := checkUser(u)
if isCheck == -1{
JsonStatus("Error! Invalid User Info!" ,330, w)
return
}else{
JsonStatus("Token:"+token ,200, w)
return
}
}
func checkUser(user User) (isCheck int ,token string) {//handchake yaparken gelen datamızı mysql ile kontrol ediyoruz
isCheck = -1
token =""
c := user
var userId,appId int
stmtOut, err := db.Prepare("SELECT user_id FROM account where user_name = ? AND password = ?")
check(err,"")
err = stmtOut.QueryRow(c.UserName,c.Password).Scan(&userId)
if userId > 0{ | check(err,"")
err = stmtOut.QueryRow(userId,c.PubKey,c.SubKey).Scan(&appId)
if appId > 0 {
isCheck = appId
token = randToken()
client.SAdd(c.SubKey[:8],token)
}
}
defer stmtOut.Close()
return
}
func checkSubKey(subkey string) (isCheck int) {//handchake yaparken gelen datamızı mysql ile kontrol ediyoruz
var appId int
stmtOut, err := db.Prepare("SELECT app_id FROM app where sub_key = ?")
check(err,"")
err = stmtOut.QueryRow(subkey).Scan(&appId)
if appId > 0{
isCheck =appId
}else{
isCheck = -1
}
defer stmtOut.Close()
return
}
func checkPupKeySubKey(pubkey string,subkey string) (isCheck int) {//handchake yaparken gelen datamızı mysql ile kontrol ediyoruz
var appId int
stmtOut, err := db.Prepare("SELECT app_id FROM app where pub_key = ? AND sub_key = ?")
check(err,"")
err = stmtOut.QueryRow(pubkey,subkey).Scan(&appId)
if appId > 0{
isCheck =appId
}else{
isCheck = -1
}
defer stmtOut.Close()
return
}
func (broker *Broker)subscribeHttpToRedis(w http.ResponseWriter, r *http.Request) {//API üzerinden gelen subscribe
var s Subscribe
_ = json.NewDecoder(r.Body).Decode(&s)
if s.Token == "" || s.Subkey == "" {
JsonStatus("Error! Required Token and Subkey Subkey!" ,330, w)
return
}
stateSubkey := client.SIsMember("userSubkeys",s.Subkey[:5]).Val()//userin sistemde yarattığı proje check yapılmış mı diye kontrol yapılıyor
if !stateSubkey{
appId := checkSubKey(s.Subkey)//mysql check
if appId > 0{
client.SAdd("userSubkeys",s.Subkey[:5])//başarılı ise rediste check yapıldı olarak görünüyor.
}else{
fmt.Println("böyle bir subkey yok.")
return
}
}
s.Subkey =s.Subkey[:5]
broker.clientTokens = append(broker.clientTokens,s.Token)
client.SAdd(s.Subkey[:4],s.Token)
isSubscribe,_ := client.Get(s.Subkey).Int64()
if isSubscribe != 0 {
JsonStatus("Active Subscribe!" ,321, w)
return
}else{
JsonStatus("Successful!" ,200, w)
client.Set(s.Subkey,1,0)
}
go func() {//user subscribe olduğunda artık redisten kendisini ilgilendiren mesaj geldiğinde onu dinleyecektir.
pubsub := client.PSubscribe(s.Subkey+"_*")
for{
msg, err := pubsub.ReceiveMessage()
if err != nil {
panic(err)
}
chanRecieveRedisToHttp <- msg.String()//publih kanalımıza api'den gelen bilgileri ekledik
}
}()
}
func publishRedis(w http.ResponseWriter, r *http.Request) {//API üzerinden gelen publish
var p Publish
_ = json.NewDecoder(r.Body).Decode(&p)
if p.Token == "" || p.Message == "" || p.Channel == "" || p.Subkey == "" || p.Pubkey == "" {
JsonStatus("Required Pubkey and Subkey B" ,351,w)
return
}
stateSubkey := client.SIsMember("userSubkeys",p.Subkey[:5]).Val()
statePubkey := client.SIsMember("userPubkeys",p.Pubkey[:5]).Val()
if !stateSubkey || !statePubkey {
appId := checkPupKeySubKey(p.Pubkey,p.Subkey)//mysql check
if appId > 0{
client.SAdd("userSubkeys",p.Subkey[:5])//redis set
client.SAdd("userPubkeys",p.Pubkey[:5])
}else{
fmt.Println("böyle bir pubkey subkey yok.")
return
}
}
p.Pubkey = p.Pubkey[:5]
p.Subkey = p.Subkey[:5]
arrayChannels := strings.Split(p.Channel,"|")
arrayMesages := strings.Split(p.Message,"|")
if len(arrayMesages) > len(arrayChannels){
JsonStatus("channel sayisi mesaj sayisindan az olamaz" ,354, w)
return
}
getToken := client.Get("token").Val()
if p.Token ==getToken{
JsonStatus("Data Published" ,200, w)
go func() {
for i,chann := range arrayChannels {//belirtilen çoklu kanallara çoklu mesaj gönderiliyor..
p.Channel = p.Subkey + "_" + chann
if i >= len(arrayMesages) {
p.Message = ""
} else {
p.Message = arrayMesages[i]
}
chanSendHttpToRedisPublih <- p//Redis ile api arasında eş zamanlı dinlenen kanal
}
}()
}else{
JsonStatus("Error! Invalid Token !" ,355, w)
return
}
}
var chanSendHttpToRedisPublih = make(chan Publish)//apiPublish'den gelen datamızı publish için kanalımız
var chanRecieveRedisToHttp = make (chan string)//rediste kanala subscribe olur.
var db, err = sql.Open("mysql", "root:@/skyneb")
var client = redis.NewClient(&redis.Options{
Addr: "localhost:6379",
Password: "",
DB: 0,
})
func main(){
check(err,"Mysql Connectted.")
client.Set("token","tokenabc",0)
client.Set("pubkey","pubkey123",0)
var broker = NewServer()
runtime.GOMAXPROCS(8)
for {
go func() {
log.Fatal("HTTP server error: ", http.ListenAndServe("localhost:3000", broker))
}()
go func() {
for{
msg := <- chanRecieveRedisToHttp
broker.Notifier <- []byte(msg)
}
}()
go func() {
for{
publih := <- chanSendHttpToRedisPublih//api'den gelenleri redise publish edioruz
err = client.Publish(publih.Channel, publih.Message).Err()//kanal bilgisi api'den geliyor
check(err,"publish edildi")
}
}()
allowedHeaders := handlers.AllowedHeaders([]string{"X-Requested-With"})
allowedOrigins := handlers.AllowedOrigins([]string{"*"})
allowedMethods := handlers.AllowedMethods([]string{"GET", "HEAD", "POST", "PUT", "DELETE", "OPTIONS"})
r := mux.NewRouter()
r.HandleFunc("/publish", publishRedis).Methods("POST")
r.HandleFunc("/subscribe", broker.subscribeHttpToRedis).Methods("POST")
r.HandleFunc("/handshake", handShake).Methods("POST")
log.Fatal(http.ListenAndServe(":8000", handlers.CORS(allowedHeaders, allowedOrigins, allowedMethods)(r)))
}
} | stmtOut, err := db.Prepare("SELECT app_id FROM app where user_id = ? AND pub_key = ? AND sub_key = ?") | random_line_split |
blocks.go | package labelmap
import (
"bytes"
"compress/gzip"
"encoding/binary"
"fmt"
"io/ioutil"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/janelia-flyem/dvid/datastore"
"github.com/janelia-flyem/dvid/datatype/common/labels"
"github.com/janelia-flyem/dvid/dvid"
"github.com/janelia-flyem/dvid/storage"
lz4 "github.com/janelia-flyem/go/golz4-updated"
)
func writeBlock(w http.ResponseWriter, bcoord dvid.ChunkPoint3d, out []byte) error {
if err := binary.Write(w, binary.LittleEndian, bcoord[0]); err != nil {
return err
}
if err := binary.Write(w, binary.LittleEndian, bcoord[1]); err != nil {
return err
}
if err := binary.Write(w, binary.LittleEndian, bcoord[2]); err != nil {
return err
}
if err := binary.Write(w, binary.LittleEndian, uint32(len(out))); err != nil {
return err
}
if written, err := w.Write(out); err != nil || written != int(len(out)) {
if err != nil {
dvid.Errorf("error writing value: %v\n", err)
return err
}
return fmt.Errorf("could not write %d bytes of block %s: only %d bytes written", len(out), bcoord, written)
}
return nil
}
type blockTiming struct {
readT, transcodeT, writeT time.Duration
readN, transcodeN int64
sync.RWMutex
}
func (bt *blockTiming) writeDone(t0 time.Time) {
bt.Lock()
bt.writeT += time.Since(t0)
bt.Unlock()
}
func (bt *blockTiming) readDone(t0 time.Time) {
bt.Lock()
bt.readT += time.Since(t0)
bt.readN++
bt.Unlock()
}
func (bt *blockTiming) transcodeDone(t0 time.Time) {
bt.Lock()
bt.transcodeT += time.Since(t0)
bt.transcodeN++
bt.Unlock()
}
func (bt *blockTiming) String() string {
var readAvgT, transcodeAvgT, writeAvgT time.Duration
if bt.readN == 0 {
readAvgT = 0
} else {
readAvgT = bt.readT / time.Duration(bt.readN)
}
if bt.transcodeN == 0 {
transcodeAvgT = 0
} else {
transcodeAvgT = bt.transcodeT / time.Duration(bt.transcodeN)
}
if bt.readN == 0 {
writeAvgT = 0
} else {
writeAvgT = bt.writeT / time.Duration(bt.readN)
}
return fmt.Sprintf("read %s (%s), transcode %s (%s), write %s (%s)", bt.readT, readAvgT, bt.transcodeT, transcodeAvgT, bt.writeT, writeAvgT)
}
type blockData struct {
bcoord dvid.ChunkPoint3d
compression string
supervoxels bool
v dvid.VersionID
data []byte
}
// transcodes a block of data by doing any data modifications necessary to meet requested
// compression compared to stored compression as well as raw supervoxels versus mapped labels.
func (d *Data) transcodeBlock(b blockData) (out []byte, err error) {
formatIn, checksum := dvid.DecodeSerializationFormat(dvid.SerializationFormat(b.data[0]))
var start int
if checksum == dvid.CRC32 {
start = 5
} else {
start = 1
}
var outsize uint32
switch formatIn {
case dvid.LZ4:
outsize = binary.LittleEndian.Uint32(b.data[start : start+4])
out = b.data[start+4:]
if len(out) != int(outsize) {
err = fmt.Errorf("block %s was corrupted lz4: supposed size %d but had %d bytes", b.bcoord, outsize, len(out))
return
}
case dvid.Uncompressed, dvid.Gzip:
outsize = uint32(len(b.data[start:]))
out = b.data[start:]
default:
err = fmt.Errorf("labelmap data was stored in unknown compressed format: %s", formatIn)
return
}
var formatOut dvid.CompressionFormat
switch b.compression {
case "", "lz4":
formatOut = dvid.LZ4
case "blocks":
formatOut = formatIn
case "gzip":
formatOut = dvid.Gzip
case "uncompressed":
formatOut = dvid.Uncompressed
default:
err = fmt.Errorf("unknown compression %q requested for blocks", b.compression)
return
}
var doMapping bool
var mapping *VCache
if !b.supervoxels {
if mapping, err = getMapping(d, b.v); err != nil {
return
}
if mapping != nil && mapping.mapUsed {
doMapping = true
}
}
// Need to do uncompression/recompression if we are changing compression or mapping
var uncompressed, recompressed []byte
if formatIn != formatOut || b.compression == "gzip" || doMapping {
switch formatIn {
case dvid.LZ4:
uncompressed = make([]byte, outsize)
if err = lz4.Uncompress(out, uncompressed); err != nil {
return
}
case dvid.Uncompressed:
uncompressed = out
case dvid.Gzip:
gzipIn := bytes.NewBuffer(out)
var zr *gzip.Reader
zr, err = gzip.NewReader(gzipIn)
if err != nil {
return
}
uncompressed, err = ioutil.ReadAll(zr)
if err != nil {
return
}
zr.Close()
}
var block labels.Block
if err = block.UnmarshalBinary(uncompressed); err != nil {
err = fmt.Errorf("unable to deserialize label block %s: %v", b.bcoord, err)
return
}
if !b.supervoxels {
modifyBlockMapping(b.v, &block, mapping)
}
if b.compression == "blocks" { // send native DVID block compression with gzip
out, err = block.CompressGZIP()
if err != nil {
return nil, err
}
} else { // we are sending raw block data
uint64array, size := block.MakeLabelVolume()
expectedSize := d.BlockSize().(dvid.Point3d)
if !size.Equals(expectedSize) {
err = fmt.Errorf("deserialized label block size %s does not equal data %q block size %s", size, d.DataName(), expectedSize)
return
}
switch formatOut {
case dvid.LZ4:
recompressed = make([]byte, lz4.CompressBound(uint64array))
var size int
if size, err = lz4.Compress(uint64array, recompressed); err != nil {
return nil, err
}
outsize = uint32(size)
out = recompressed[:outsize]
case dvid.Uncompressed:
out = uint64array
case dvid.Gzip:
var gzipOut bytes.Buffer
zw := gzip.NewWriter(&gzipOut)
if _, err = zw.Write(uint64array); err != nil {
return nil, err
}
zw.Flush()
zw.Close()
out = gzipOut.Bytes()
}
}
}
return
}
// try to write a single block either by streaming (allows for termination) or by writing
// with a simplified pipeline compared to subvolumes larger than a block.
func (d *Data) writeBlockToHTTP(ctx *datastore.VersionedCtx, w http.ResponseWriter, subvol *dvid.Subvolume, compression string, supervoxels bool, scale uint8, roiname dvid.InstanceName) (done bool, err error) {
// Can't handle ROI for now.
if roiname != "" {
return
}
// Can only handle 3d requests.
blockSize, okBlockSize := d.BlockSize().(dvid.Point3d)
subvolSize, okSubvolSize := subvol.Size().(dvid.Point3d)
startPt, okStartPt := subvol.StartPoint().(dvid.Point3d)
if !okBlockSize || !okSubvolSize || !okStartPt {
return
}
// Can only handle single block for now.
if subvolSize != blockSize {
return
}
// Can only handle aligned block for now.
chunkPt, aligned := dvid.GetChunkPoint3d(startPt, blockSize)
if !aligned {
return
}
if compression != "" {
err = d.sendCompressedBlock(ctx, w, subvol, compression, chunkPt, scale, supervoxels)
} else {
err = d.streamRawBlock(ctx, w, chunkPt, scale, supervoxels)
}
if err != nil {
return
}
return true, nil
}
// send a single aligned block of data via HTTP.
func (d *Data) sendCompressedBlock(ctx *datastore.VersionedCtx, w http.ResponseWriter, subvol *dvid.Subvolume, compression string, chunkPt dvid.ChunkPoint3d, scale uint8, supervoxels bool) error {
bcoordStr := chunkPt.ToIZYXString()
block, err := d.getLabelBlock(ctx, scale, bcoordStr)
if err != nil {
return err
}
if block == nil {
return fmt.Errorf("unable to get label block %s", bcoordStr)
}
if !supervoxels {
vc, err := getMapping(d, ctx.VersionID())
if err != nil {
return err
}
modifyBlockMapping(ctx.VersionID(), block, vc)
}
data, _ := block.MakeLabelVolume()
if err := writeCompressedToHTTP(compression, data, subvol, w); err != nil {
return err
}
return nil
}
// writes a block of data as uncompressed ZYX uint64 to the writer in streaming fashion, allowing
// for possible termination / error at any point.
func (d *Data) streamRawBlock(ctx *datastore.VersionedCtx, w http.ResponseWriter, bcoord dvid.ChunkPoint3d, scale uint8, supervoxels bool) error {
bcoordStr := bcoord.ToIZYXString()
block, err := d.getLabelBlock(ctx, scale, bcoordStr)
if err != nil {
return err
}
if !supervoxels {
mapping, err := getMapping(d, ctx.VersionID())
if err != nil {
return err
}
modifyBlockMapping(ctx.VersionID(), block, mapping)
}
if err := block.WriteLabelVolume(w); err != nil {
return err
}
return nil
}
// returns nil block if no block is at the given block coordinate
func (d *Data) getLabelBlock(ctx *datastore.VersionedCtx, scale uint8, bcoord dvid.IZYXString) (*labels.Block, error) {
store, err := datastore.GetKeyValueDB(d)
if err != nil {
return nil, fmt.Errorf("labelmap getLabelBlock() had error initializing store: %v", err)
}
tk := NewBlockTKeyByCoord(scale, bcoord)
val, err := store.Get(ctx, tk)
if err != nil {
return nil, fmt.Errorf("error on GET of labelmap %q label block @ %s", d.DataName(), bcoord)
}
if val == nil {
return nil, nil
}
data, _, err := dvid.DeserializeData(val, true)
if err != nil {
return nil, fmt.Errorf("unable to deserialize label block in %q: %v", d.DataName(), err)
}
block := new(labels.Block)
if err := block.UnmarshalBinary(data); err != nil {
return nil, err
}
return block, nil
}
func (d *Data) getLabelPositionedBlock(ctx *datastore.VersionedCtx, scale uint8, bcoord dvid.IZYXString) (*labels.PositionedBlock, error) {
block, err := d.getLabelBlock(ctx, scale, bcoord)
if err != nil {
return nil, err
}
if block == nil {
return nil, nil
}
return &labels.PositionedBlock{Block: *block, BCoord: bcoord}, nil
}
func (d *Data) putLabelBlock(ctx *datastore.VersionedCtx, scale uint8, pblock *labels.PositionedBlock) error {
store, err := datastore.GetKeyValueDB(d)
if err != nil {
return fmt.Errorf("labelmap putLabelBlock() had error initializing store: %v", err)
}
tk := NewBlockTKeyByCoord(scale, pblock.BCoord)
data, err := pblock.MarshalBinary()
if err != nil {
return err
}
val, err := dvid.SerializeData(data, d.Compression(), d.Checksum())
if err != nil {
return fmt.Errorf("unable to serialize block %s in %q: %v", pblock.BCoord, d.DataName(), err)
}
return store.Put(ctx, tk, val)
}
type blockSend struct {
bcoord dvid.ChunkPoint3d
value []byte
err error
}
// convert a slice of 3 integer strings into a coordinate
func strArrayToBCoord(coordarray []string) (bcoord dvid.ChunkPoint3d, err error) {
var xloc, yloc, zloc int
if xloc, err = strconv.Atoi(coordarray[0]); err != nil {
return
}
if yloc, err = strconv.Atoi(coordarray[1]); err != nil {
return
}
if zloc, err = strconv.Atoi(coordarray[2]); err != nil {
return
}
return dvid.ChunkPoint3d{int32(xloc), int32(yloc), int32(zloc)}, nil
}
// sendBlocksSpecific writes data to the blocks specified -- best for non-ordered backend
func (d *Data) sendBlocksSpecific(ctx *datastore.VersionedCtx, w http.ResponseWriter, supervoxels bool, compression, blockstring string, scale uint8) (numBlocks int, err error) {
timedLog := dvid.NewTimeLog()
switch compression {
case "":
compression = "blocks"
case "lz4", "gzip", "blocks", "uncompressed":
break
default:
err = fmt.Errorf(`compression must be "lz4" (default), "gzip", "blocks" or "uncompressed"`)
return
}
w.Header().Set("Content-type", "application/octet-stream")
// extract querey string
if blockstring == "" {
return
}
coordarray := strings.Split(blockstring, ",")
if len(coordarray)%3 != 0 {
return 0, fmt.Errorf("block query string should be three coordinates per block")
}
var store storage.KeyValueDB
if store, err = datastore.GetKeyValueDB(d); err != nil {
return
}
// launch goroutine that will stream blocks to client
numBlocks = len(coordarray) / 3
wg := new(sync.WaitGroup)
ch := make(chan blockSend, numBlocks)
var sendErr error
var startBlock dvid.ChunkPoint3d
var timing blockTiming
go func() {
for data := range ch {
if data.err != nil && sendErr == nil {
sendErr = data.err
} else if len(data.value) > 0 {
t0 := time.Now()
err := writeBlock(w, data.bcoord, data.value)
if err != nil && sendErr == nil {
sendErr = err
}
timing.writeDone(t0)
}
wg.Done()
}
timedLog.Infof("labelmap %q specificblocks - finished sending %d blocks starting with %s", d.DataName(), numBlocks, startBlock)
}()
// iterate through each block, get data from store, and transcode based on request parameters
for i := 0; i < len(coordarray); i += 3 {
var bcoord dvid.ChunkPoint3d
if bcoord, err = strArrayToBCoord(coordarray[i : i+3]); err != nil {
return
}
if i == 0 {
startBlock = bcoord
}
wg.Add(1)
t0 := time.Now()
indexBeg := dvid.IndexZYX(bcoord)
keyBeg := NewBlockTKey(scale, &indexBeg)
var value []byte
value, err = store.Get(ctx, keyBeg)
timing.readDone(t0)
if err != nil {
ch <- blockSend{err: err}
return
}
if len(value) > 0 {
go func(bcoord dvid.ChunkPoint3d, value []byte) {
b := blockData{
bcoord: bcoord,
v: ctx.VersionID(),
data: value,
compression: compression,
supervoxels: supervoxels,
}
t0 := time.Now()
out, err := d.transcodeBlock(b)
timing.transcodeDone(t0)
ch <- blockSend{bcoord: bcoord, value: out, err: err}
}(bcoord, value)
} else {
ch <- blockSend{value: nil}
}
}
timedLog.Infof("labelmap %q specificblocks - launched concurrent reads of %d blocks starting with %s", d.DataName(), numBlocks, startBlock)
wg.Wait()
close(ch)
dvid.Infof("labelmap %q specificblocks - %d blocks starting with %s: %s\n", d.DataName(), numBlocks, startBlock, &timing)
return numBlocks, sendErr
}
// sendBlocksVolume writes a series of blocks covering the given block-aligned subvolume to a HTTP response.
func (d *Data) sendBlocksVolume(ctx *datastore.VersionedCtx, w http.ResponseWriter, supervoxels bool, scale uint8, subvol *dvid.Subvolume, compression string) error {
w.Header().Set("Content-type", "application/octet-stream")
switch compression {
case "", "lz4", "gzip", "blocks", "uncompressed":
default:
return fmt.Errorf(`compression must be "lz4" (default), "gzip", "blocks" or "uncompressed"`)
}
// convert x,y,z coordinates to block coordinates for this scale
blocksdims := subvol.Size().Div(d.BlockSize())
blocksoff := subvol.StartPoint().Div(d.BlockSize())
timedLog := dvid.NewTimeLog()
defer timedLog.Infof("SendBlocks %s, span x %d, span y %d, span z %d", blocksoff, blocksdims.Value(0), blocksdims.Value(1), blocksdims.Value(2))
numBlocks := int(blocksdims.Prod())
wg := new(sync.WaitGroup)
// launch goroutine that will stream blocks to client
ch := make(chan blockSend, numBlocks)
var sendErr error
go func() {
for data := range ch {
if data.err != nil && sendErr == nil {
sendErr = data.err
} else {
err := writeBlock(w, data.bcoord, data.value)
if err != nil && sendErr == nil {
sendErr = err
}
}
wg.Done()
}
}()
store, err := datastore.GetOrderedKeyValueDB(d)
if err != nil {
return fmt.Errorf("Data type labelmap had error initializing store: %v", err)
}
okv := store.(storage.BufferableOps)
// extract buffer interface
req, hasbuffer := okv.(storage.KeyValueRequester)
if hasbuffer {
okv = req.NewBuffer(ctx)
}
for ziter := int32(0); ziter < blocksdims.Value(2); ziter++ {
for yiter := int32(0); yiter < blocksdims.Value(1); yiter++ {
beginPoint := dvid.ChunkPoint3d{blocksoff.Value(0), blocksoff.Value(1) + yiter, blocksoff.Value(2) + ziter}
endPoint := dvid.ChunkPoint3d{blocksoff.Value(0) + blocksdims.Value(0) - 1, blocksoff.Value(1) + yiter, blocksoff.Value(2) + ziter}
indexBeg := dvid.IndexZYX(beginPoint)
sx, sy, sz := indexBeg.Unpack()
begTKey := NewBlockTKey(scale, &indexBeg)
indexEnd := dvid.IndexZYX(endPoint)
endTKey := NewBlockTKey(scale, &indexEnd)
// Send the entire range of key-value pairs to chunk processor
err = okv.ProcessRange(ctx, begTKey, endTKey, &storage.ChunkOp{}, func(c *storage.Chunk) error {
if c == nil || c.TKeyValue == nil {
return nil
}
kv := c.TKeyValue
if kv.V == nil {
return nil
}
// Determine which block this is.
_, indexZYX, err := DecodeBlockTKey(kv.K)
if err != nil {
return err
}
x, y, z := indexZYX.Unpack()
if z != sz || y != sy || x < sx || x >= sx+int32(blocksdims.Value(0)) {
return nil
}
b := blockData{
bcoord: dvid.ChunkPoint3d{x, y, z},
compression: compression,
supervoxels: supervoxels,
v: ctx.VersionID(),
data: kv.V,
}
wg.Add(1)
go func(b blockData) {
out, err := d.transcodeBlock(b)
ch <- blockSend{bcoord: b.bcoord, value: out, err: err}
}(b)
return nil
})
if err != nil {
return fmt.Errorf("unable to GET data %s: %v", ctx, err)
}
}
}
wg.Wait()
close(ch)
if hasbuffer {
// submit the entire buffer to the DB
err = okv.(storage.RequestBuffer).Flush()
if err != nil {
return fmt.Errorf("unable to GET data %s: %v", ctx, err)
}
}
return sendErr
}
// getSupervoxelBlock returns a compressed supervoxel Block of the given block coordinate.
func (d *Data) getSupervoxelBlock(v dvid.VersionID, bcoord dvid.ChunkPoint3d, scale uint8) (*labels.Block, error) |
// getBlockLabels returns a block of labels at given scale in packed little-endian uint64 format.
func (d *Data) getBlockLabels(v dvid.VersionID, bcoord dvid.ChunkPoint3d, scale uint8, supervoxels bool) ([]byte, error) {
block, err := d.getSupervoxelBlock(v, bcoord, scale)
if err != nil {
return nil, err
}
var mapping *VCache
if !supervoxels {
if mapping, err = getMapping(d, v); err != nil {
return nil, err
}
}
if mapping != nil {
err = modifyBlockMapping(v, block, mapping)
if err != nil {
return nil, fmt.Errorf("unable to modify block %s mapping: %v", bcoord, err)
}
}
labelData, _ := block.MakeLabelVolume()
return labelData, nil
}
| {
store, err := datastore.GetOrderedKeyValueDB(d)
if err != nil {
return nil, err
}
// Retrieve the block of labels
ctx := datastore.NewVersionedCtx(d, v)
index := dvid.IndexZYX(bcoord)
serialization, err := store.Get(ctx, NewBlockTKey(scale, &index))
if err != nil {
return nil, fmt.Errorf("error getting '%s' block for index %s", d.DataName(), bcoord)
}
if serialization == nil {
blockSize, ok := d.BlockSize().(dvid.Point3d)
if !ok {
return nil, fmt.Errorf("block size for data %q should be 3d, not: %s", d.DataName(), d.BlockSize())
}
return labels.MakeSolidBlock(0, blockSize), nil
}
deserialization, _, err := dvid.DeserializeData(serialization, true)
if err != nil {
return nil, fmt.Errorf("unable to deserialize block %s in '%s': %v", bcoord, d.DataName(), err)
}
var block labels.Block
if err = block.UnmarshalBinary(deserialization); err != nil {
return nil, err
}
return &block, nil
} | identifier_body |
blocks.go | package labelmap
import (
"bytes"
"compress/gzip"
"encoding/binary"
"fmt"
"io/ioutil"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/janelia-flyem/dvid/datastore"
"github.com/janelia-flyem/dvid/datatype/common/labels"
"github.com/janelia-flyem/dvid/dvid"
"github.com/janelia-flyem/dvid/storage"
lz4 "github.com/janelia-flyem/go/golz4-updated"
)
func writeBlock(w http.ResponseWriter, bcoord dvid.ChunkPoint3d, out []byte) error {
if err := binary.Write(w, binary.LittleEndian, bcoord[0]); err != nil {
return err
}
if err := binary.Write(w, binary.LittleEndian, bcoord[1]); err != nil {
return err
}
if err := binary.Write(w, binary.LittleEndian, bcoord[2]); err != nil {
return err
}
if err := binary.Write(w, binary.LittleEndian, uint32(len(out))); err != nil {
return err
}
if written, err := w.Write(out); err != nil || written != int(len(out)) {
if err != nil {
dvid.Errorf("error writing value: %v\n", err)
return err
}
return fmt.Errorf("could not write %d bytes of block %s: only %d bytes written", len(out), bcoord, written)
}
return nil
}
type blockTiming struct {
readT, transcodeT, writeT time.Duration
readN, transcodeN int64
sync.RWMutex
}
func (bt *blockTiming) writeDone(t0 time.Time) {
bt.Lock()
bt.writeT += time.Since(t0)
bt.Unlock()
}
func (bt *blockTiming) readDone(t0 time.Time) {
bt.Lock()
bt.readT += time.Since(t0)
bt.readN++
bt.Unlock()
}
func (bt *blockTiming) transcodeDone(t0 time.Time) {
bt.Lock()
bt.transcodeT += time.Since(t0)
bt.transcodeN++
bt.Unlock()
}
func (bt *blockTiming) | () string {
var readAvgT, transcodeAvgT, writeAvgT time.Duration
if bt.readN == 0 {
readAvgT = 0
} else {
readAvgT = bt.readT / time.Duration(bt.readN)
}
if bt.transcodeN == 0 {
transcodeAvgT = 0
} else {
transcodeAvgT = bt.transcodeT / time.Duration(bt.transcodeN)
}
if bt.readN == 0 {
writeAvgT = 0
} else {
writeAvgT = bt.writeT / time.Duration(bt.readN)
}
return fmt.Sprintf("read %s (%s), transcode %s (%s), write %s (%s)", bt.readT, readAvgT, bt.transcodeT, transcodeAvgT, bt.writeT, writeAvgT)
}
type blockData struct {
bcoord dvid.ChunkPoint3d
compression string
supervoxels bool
v dvid.VersionID
data []byte
}
// transcodes a block of data by doing any data modifications necessary to meet requested
// compression compared to stored compression as well as raw supervoxels versus mapped labels.
func (d *Data) transcodeBlock(b blockData) (out []byte, err error) {
formatIn, checksum := dvid.DecodeSerializationFormat(dvid.SerializationFormat(b.data[0]))
var start int
if checksum == dvid.CRC32 {
start = 5
} else {
start = 1
}
var outsize uint32
switch formatIn {
case dvid.LZ4:
outsize = binary.LittleEndian.Uint32(b.data[start : start+4])
out = b.data[start+4:]
if len(out) != int(outsize) {
err = fmt.Errorf("block %s was corrupted lz4: supposed size %d but had %d bytes", b.bcoord, outsize, len(out))
return
}
case dvid.Uncompressed, dvid.Gzip:
outsize = uint32(len(b.data[start:]))
out = b.data[start:]
default:
err = fmt.Errorf("labelmap data was stored in unknown compressed format: %s", formatIn)
return
}
var formatOut dvid.CompressionFormat
switch b.compression {
case "", "lz4":
formatOut = dvid.LZ4
case "blocks":
formatOut = formatIn
case "gzip":
formatOut = dvid.Gzip
case "uncompressed":
formatOut = dvid.Uncompressed
default:
err = fmt.Errorf("unknown compression %q requested for blocks", b.compression)
return
}
var doMapping bool
var mapping *VCache
if !b.supervoxels {
if mapping, err = getMapping(d, b.v); err != nil {
return
}
if mapping != nil && mapping.mapUsed {
doMapping = true
}
}
// Need to do uncompression/recompression if we are changing compression or mapping
var uncompressed, recompressed []byte
if formatIn != formatOut || b.compression == "gzip" || doMapping {
switch formatIn {
case dvid.LZ4:
uncompressed = make([]byte, outsize)
if err = lz4.Uncompress(out, uncompressed); err != nil {
return
}
case dvid.Uncompressed:
uncompressed = out
case dvid.Gzip:
gzipIn := bytes.NewBuffer(out)
var zr *gzip.Reader
zr, err = gzip.NewReader(gzipIn)
if err != nil {
return
}
uncompressed, err = ioutil.ReadAll(zr)
if err != nil {
return
}
zr.Close()
}
var block labels.Block
if err = block.UnmarshalBinary(uncompressed); err != nil {
err = fmt.Errorf("unable to deserialize label block %s: %v", b.bcoord, err)
return
}
if !b.supervoxels {
modifyBlockMapping(b.v, &block, mapping)
}
if b.compression == "blocks" { // send native DVID block compression with gzip
out, err = block.CompressGZIP()
if err != nil {
return nil, err
}
} else { // we are sending raw block data
uint64array, size := block.MakeLabelVolume()
expectedSize := d.BlockSize().(dvid.Point3d)
if !size.Equals(expectedSize) {
err = fmt.Errorf("deserialized label block size %s does not equal data %q block size %s", size, d.DataName(), expectedSize)
return
}
switch formatOut {
case dvid.LZ4:
recompressed = make([]byte, lz4.CompressBound(uint64array))
var size int
if size, err = lz4.Compress(uint64array, recompressed); err != nil {
return nil, err
}
outsize = uint32(size)
out = recompressed[:outsize]
case dvid.Uncompressed:
out = uint64array
case dvid.Gzip:
var gzipOut bytes.Buffer
zw := gzip.NewWriter(&gzipOut)
if _, err = zw.Write(uint64array); err != nil {
return nil, err
}
zw.Flush()
zw.Close()
out = gzipOut.Bytes()
}
}
}
return
}
// try to write a single block either by streaming (allows for termination) or by writing
// with a simplified pipeline compared to subvolumes larger than a block.
func (d *Data) writeBlockToHTTP(ctx *datastore.VersionedCtx, w http.ResponseWriter, subvol *dvid.Subvolume, compression string, supervoxels bool, scale uint8, roiname dvid.InstanceName) (done bool, err error) {
// Can't handle ROI for now.
if roiname != "" {
return
}
// Can only handle 3d requests.
blockSize, okBlockSize := d.BlockSize().(dvid.Point3d)
subvolSize, okSubvolSize := subvol.Size().(dvid.Point3d)
startPt, okStartPt := subvol.StartPoint().(dvid.Point3d)
if !okBlockSize || !okSubvolSize || !okStartPt {
return
}
// Can only handle single block for now.
if subvolSize != blockSize {
return
}
// Can only handle aligned block for now.
chunkPt, aligned := dvid.GetChunkPoint3d(startPt, blockSize)
if !aligned {
return
}
if compression != "" {
err = d.sendCompressedBlock(ctx, w, subvol, compression, chunkPt, scale, supervoxels)
} else {
err = d.streamRawBlock(ctx, w, chunkPt, scale, supervoxels)
}
if err != nil {
return
}
return true, nil
}
// send a single aligned block of data via HTTP.
func (d *Data) sendCompressedBlock(ctx *datastore.VersionedCtx, w http.ResponseWriter, subvol *dvid.Subvolume, compression string, chunkPt dvid.ChunkPoint3d, scale uint8, supervoxels bool) error {
bcoordStr := chunkPt.ToIZYXString()
block, err := d.getLabelBlock(ctx, scale, bcoordStr)
if err != nil {
return err
}
if block == nil {
return fmt.Errorf("unable to get label block %s", bcoordStr)
}
if !supervoxels {
vc, err := getMapping(d, ctx.VersionID())
if err != nil {
return err
}
modifyBlockMapping(ctx.VersionID(), block, vc)
}
data, _ := block.MakeLabelVolume()
if err := writeCompressedToHTTP(compression, data, subvol, w); err != nil {
return err
}
return nil
}
// writes a block of data as uncompressed ZYX uint64 to the writer in streaming fashion, allowing
// for possible termination / error at any point.
func (d *Data) streamRawBlock(ctx *datastore.VersionedCtx, w http.ResponseWriter, bcoord dvid.ChunkPoint3d, scale uint8, supervoxels bool) error {
bcoordStr := bcoord.ToIZYXString()
block, err := d.getLabelBlock(ctx, scale, bcoordStr)
if err != nil {
return err
}
if !supervoxels {
mapping, err := getMapping(d, ctx.VersionID())
if err != nil {
return err
}
modifyBlockMapping(ctx.VersionID(), block, mapping)
}
if err := block.WriteLabelVolume(w); err != nil {
return err
}
return nil
}
// returns nil block if no block is at the given block coordinate
func (d *Data) getLabelBlock(ctx *datastore.VersionedCtx, scale uint8, bcoord dvid.IZYXString) (*labels.Block, error) {
store, err := datastore.GetKeyValueDB(d)
if err != nil {
return nil, fmt.Errorf("labelmap getLabelBlock() had error initializing store: %v", err)
}
tk := NewBlockTKeyByCoord(scale, bcoord)
val, err := store.Get(ctx, tk)
if err != nil {
return nil, fmt.Errorf("error on GET of labelmap %q label block @ %s", d.DataName(), bcoord)
}
if val == nil {
return nil, nil
}
data, _, err := dvid.DeserializeData(val, true)
if err != nil {
return nil, fmt.Errorf("unable to deserialize label block in %q: %v", d.DataName(), err)
}
block := new(labels.Block)
if err := block.UnmarshalBinary(data); err != nil {
return nil, err
}
return block, nil
}
func (d *Data) getLabelPositionedBlock(ctx *datastore.VersionedCtx, scale uint8, bcoord dvid.IZYXString) (*labels.PositionedBlock, error) {
block, err := d.getLabelBlock(ctx, scale, bcoord)
if err != nil {
return nil, err
}
if block == nil {
return nil, nil
}
return &labels.PositionedBlock{Block: *block, BCoord: bcoord}, nil
}
func (d *Data) putLabelBlock(ctx *datastore.VersionedCtx, scale uint8, pblock *labels.PositionedBlock) error {
store, err := datastore.GetKeyValueDB(d)
if err != nil {
return fmt.Errorf("labelmap putLabelBlock() had error initializing store: %v", err)
}
tk := NewBlockTKeyByCoord(scale, pblock.BCoord)
data, err := pblock.MarshalBinary()
if err != nil {
return err
}
val, err := dvid.SerializeData(data, d.Compression(), d.Checksum())
if err != nil {
return fmt.Errorf("unable to serialize block %s in %q: %v", pblock.BCoord, d.DataName(), err)
}
return store.Put(ctx, tk, val)
}
type blockSend struct {
bcoord dvid.ChunkPoint3d
value []byte
err error
}
// convert a slice of 3 integer strings into a coordinate
func strArrayToBCoord(coordarray []string) (bcoord dvid.ChunkPoint3d, err error) {
var xloc, yloc, zloc int
if xloc, err = strconv.Atoi(coordarray[0]); err != nil {
return
}
if yloc, err = strconv.Atoi(coordarray[1]); err != nil {
return
}
if zloc, err = strconv.Atoi(coordarray[2]); err != nil {
return
}
return dvid.ChunkPoint3d{int32(xloc), int32(yloc), int32(zloc)}, nil
}
// sendBlocksSpecific writes data to the blocks specified -- best for non-ordered backend
func (d *Data) sendBlocksSpecific(ctx *datastore.VersionedCtx, w http.ResponseWriter, supervoxels bool, compression, blockstring string, scale uint8) (numBlocks int, err error) {
timedLog := dvid.NewTimeLog()
switch compression {
case "":
compression = "blocks"
case "lz4", "gzip", "blocks", "uncompressed":
break
default:
err = fmt.Errorf(`compression must be "lz4" (default), "gzip", "blocks" or "uncompressed"`)
return
}
w.Header().Set("Content-type", "application/octet-stream")
// extract querey string
if blockstring == "" {
return
}
coordarray := strings.Split(blockstring, ",")
if len(coordarray)%3 != 0 {
return 0, fmt.Errorf("block query string should be three coordinates per block")
}
var store storage.KeyValueDB
if store, err = datastore.GetKeyValueDB(d); err != nil {
return
}
// launch goroutine that will stream blocks to client
numBlocks = len(coordarray) / 3
wg := new(sync.WaitGroup)
ch := make(chan blockSend, numBlocks)
var sendErr error
var startBlock dvid.ChunkPoint3d
var timing blockTiming
go func() {
for data := range ch {
if data.err != nil && sendErr == nil {
sendErr = data.err
} else if len(data.value) > 0 {
t0 := time.Now()
err := writeBlock(w, data.bcoord, data.value)
if err != nil && sendErr == nil {
sendErr = err
}
timing.writeDone(t0)
}
wg.Done()
}
timedLog.Infof("labelmap %q specificblocks - finished sending %d blocks starting with %s", d.DataName(), numBlocks, startBlock)
}()
// iterate through each block, get data from store, and transcode based on request parameters
for i := 0; i < len(coordarray); i += 3 {
var bcoord dvid.ChunkPoint3d
if bcoord, err = strArrayToBCoord(coordarray[i : i+3]); err != nil {
return
}
if i == 0 {
startBlock = bcoord
}
wg.Add(1)
t0 := time.Now()
indexBeg := dvid.IndexZYX(bcoord)
keyBeg := NewBlockTKey(scale, &indexBeg)
var value []byte
value, err = store.Get(ctx, keyBeg)
timing.readDone(t0)
if err != nil {
ch <- blockSend{err: err}
return
}
if len(value) > 0 {
go func(bcoord dvid.ChunkPoint3d, value []byte) {
b := blockData{
bcoord: bcoord,
v: ctx.VersionID(),
data: value,
compression: compression,
supervoxels: supervoxels,
}
t0 := time.Now()
out, err := d.transcodeBlock(b)
timing.transcodeDone(t0)
ch <- blockSend{bcoord: bcoord, value: out, err: err}
}(bcoord, value)
} else {
ch <- blockSend{value: nil}
}
}
timedLog.Infof("labelmap %q specificblocks - launched concurrent reads of %d blocks starting with %s", d.DataName(), numBlocks, startBlock)
wg.Wait()
close(ch)
dvid.Infof("labelmap %q specificblocks - %d blocks starting with %s: %s\n", d.DataName(), numBlocks, startBlock, &timing)
return numBlocks, sendErr
}
// sendBlocksVolume writes a series of blocks covering the given block-aligned subvolume to a HTTP response.
func (d *Data) sendBlocksVolume(ctx *datastore.VersionedCtx, w http.ResponseWriter, supervoxels bool, scale uint8, subvol *dvid.Subvolume, compression string) error {
w.Header().Set("Content-type", "application/octet-stream")
switch compression {
case "", "lz4", "gzip", "blocks", "uncompressed":
default:
return fmt.Errorf(`compression must be "lz4" (default), "gzip", "blocks" or "uncompressed"`)
}
// convert x,y,z coordinates to block coordinates for this scale
blocksdims := subvol.Size().Div(d.BlockSize())
blocksoff := subvol.StartPoint().Div(d.BlockSize())
timedLog := dvid.NewTimeLog()
defer timedLog.Infof("SendBlocks %s, span x %d, span y %d, span z %d", blocksoff, blocksdims.Value(0), blocksdims.Value(1), blocksdims.Value(2))
numBlocks := int(blocksdims.Prod())
wg := new(sync.WaitGroup)
// launch goroutine that will stream blocks to client
ch := make(chan blockSend, numBlocks)
var sendErr error
go func() {
for data := range ch {
if data.err != nil && sendErr == nil {
sendErr = data.err
} else {
err := writeBlock(w, data.bcoord, data.value)
if err != nil && sendErr == nil {
sendErr = err
}
}
wg.Done()
}
}()
store, err := datastore.GetOrderedKeyValueDB(d)
if err != nil {
return fmt.Errorf("Data type labelmap had error initializing store: %v", err)
}
okv := store.(storage.BufferableOps)
// extract buffer interface
req, hasbuffer := okv.(storage.KeyValueRequester)
if hasbuffer {
okv = req.NewBuffer(ctx)
}
for ziter := int32(0); ziter < blocksdims.Value(2); ziter++ {
for yiter := int32(0); yiter < blocksdims.Value(1); yiter++ {
beginPoint := dvid.ChunkPoint3d{blocksoff.Value(0), blocksoff.Value(1) + yiter, blocksoff.Value(2) + ziter}
endPoint := dvid.ChunkPoint3d{blocksoff.Value(0) + blocksdims.Value(0) - 1, blocksoff.Value(1) + yiter, blocksoff.Value(2) + ziter}
indexBeg := dvid.IndexZYX(beginPoint)
sx, sy, sz := indexBeg.Unpack()
begTKey := NewBlockTKey(scale, &indexBeg)
indexEnd := dvid.IndexZYX(endPoint)
endTKey := NewBlockTKey(scale, &indexEnd)
// Send the entire range of key-value pairs to chunk processor
err = okv.ProcessRange(ctx, begTKey, endTKey, &storage.ChunkOp{}, func(c *storage.Chunk) error {
if c == nil || c.TKeyValue == nil {
return nil
}
kv := c.TKeyValue
if kv.V == nil {
return nil
}
// Determine which block this is.
_, indexZYX, err := DecodeBlockTKey(kv.K)
if err != nil {
return err
}
x, y, z := indexZYX.Unpack()
if z != sz || y != sy || x < sx || x >= sx+int32(blocksdims.Value(0)) {
return nil
}
b := blockData{
bcoord: dvid.ChunkPoint3d{x, y, z},
compression: compression,
supervoxels: supervoxels,
v: ctx.VersionID(),
data: kv.V,
}
wg.Add(1)
go func(b blockData) {
out, err := d.transcodeBlock(b)
ch <- blockSend{bcoord: b.bcoord, value: out, err: err}
}(b)
return nil
})
if err != nil {
return fmt.Errorf("unable to GET data %s: %v", ctx, err)
}
}
}
wg.Wait()
close(ch)
if hasbuffer {
// submit the entire buffer to the DB
err = okv.(storage.RequestBuffer).Flush()
if err != nil {
return fmt.Errorf("unable to GET data %s: %v", ctx, err)
}
}
return sendErr
}
// getSupervoxelBlock returns a compressed supervoxel Block of the given block coordinate.
func (d *Data) getSupervoxelBlock(v dvid.VersionID, bcoord dvid.ChunkPoint3d, scale uint8) (*labels.Block, error) {
store, err := datastore.GetOrderedKeyValueDB(d)
if err != nil {
return nil, err
}
// Retrieve the block of labels
ctx := datastore.NewVersionedCtx(d, v)
index := dvid.IndexZYX(bcoord)
serialization, err := store.Get(ctx, NewBlockTKey(scale, &index))
if err != nil {
return nil, fmt.Errorf("error getting '%s' block for index %s", d.DataName(), bcoord)
}
if serialization == nil {
blockSize, ok := d.BlockSize().(dvid.Point3d)
if !ok {
return nil, fmt.Errorf("block size for data %q should be 3d, not: %s", d.DataName(), d.BlockSize())
}
return labels.MakeSolidBlock(0, blockSize), nil
}
deserialization, _, err := dvid.DeserializeData(serialization, true)
if err != nil {
return nil, fmt.Errorf("unable to deserialize block %s in '%s': %v", bcoord, d.DataName(), err)
}
var block labels.Block
if err = block.UnmarshalBinary(deserialization); err != nil {
return nil, err
}
return &block, nil
}
// getBlockLabels returns a block of labels at given scale in packed little-endian uint64 format.
func (d *Data) getBlockLabels(v dvid.VersionID, bcoord dvid.ChunkPoint3d, scale uint8, supervoxels bool) ([]byte, error) {
block, err := d.getSupervoxelBlock(v, bcoord, scale)
if err != nil {
return nil, err
}
var mapping *VCache
if !supervoxels {
if mapping, err = getMapping(d, v); err != nil {
return nil, err
}
}
if mapping != nil {
err = modifyBlockMapping(v, block, mapping)
if err != nil {
return nil, fmt.Errorf("unable to modify block %s mapping: %v", bcoord, err)
}
}
labelData, _ := block.MakeLabelVolume()
return labelData, nil
}
| String | identifier_name |
blocks.go | package labelmap
import (
"bytes"
"compress/gzip"
"encoding/binary"
"fmt"
"io/ioutil"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/janelia-flyem/dvid/datastore"
"github.com/janelia-flyem/dvid/datatype/common/labels"
"github.com/janelia-flyem/dvid/dvid"
"github.com/janelia-flyem/dvid/storage"
lz4 "github.com/janelia-flyem/go/golz4-updated"
)
func writeBlock(w http.ResponseWriter, bcoord dvid.ChunkPoint3d, out []byte) error {
if err := binary.Write(w, binary.LittleEndian, bcoord[0]); err != nil {
return err
}
if err := binary.Write(w, binary.LittleEndian, bcoord[1]); err != nil {
return err
}
if err := binary.Write(w, binary.LittleEndian, bcoord[2]); err != nil {
return err
}
if err := binary.Write(w, binary.LittleEndian, uint32(len(out))); err != nil {
return err
}
if written, err := w.Write(out); err != nil || written != int(len(out)) {
if err != nil {
dvid.Errorf("error writing value: %v\n", err)
return err
}
return fmt.Errorf("could not write %d bytes of block %s: only %d bytes written", len(out), bcoord, written)
}
return nil
}
type blockTiming struct {
readT, transcodeT, writeT time.Duration
readN, transcodeN int64
sync.RWMutex
}
func (bt *blockTiming) writeDone(t0 time.Time) {
bt.Lock()
bt.writeT += time.Since(t0)
bt.Unlock()
}
func (bt *blockTiming) readDone(t0 time.Time) {
bt.Lock()
bt.readT += time.Since(t0)
bt.readN++
bt.Unlock()
}
func (bt *blockTiming) transcodeDone(t0 time.Time) {
bt.Lock()
bt.transcodeT += time.Since(t0)
bt.transcodeN++
bt.Unlock()
}
func (bt *blockTiming) String() string {
var readAvgT, transcodeAvgT, writeAvgT time.Duration
if bt.readN == 0 {
readAvgT = 0
} else {
readAvgT = bt.readT / time.Duration(bt.readN)
}
if bt.transcodeN == 0 {
transcodeAvgT = 0
} else {
transcodeAvgT = bt.transcodeT / time.Duration(bt.transcodeN)
}
if bt.readN == 0 {
writeAvgT = 0
} else {
writeAvgT = bt.writeT / time.Duration(bt.readN)
}
return fmt.Sprintf("read %s (%s), transcode %s (%s), write %s (%s)", bt.readT, readAvgT, bt.transcodeT, transcodeAvgT, bt.writeT, writeAvgT)
}
type blockData struct {
bcoord dvid.ChunkPoint3d
compression string
supervoxels bool
v dvid.VersionID
data []byte
}
// transcodes a block of data by doing any data modifications necessary to meet requested
// compression compared to stored compression as well as raw supervoxels versus mapped labels.
func (d *Data) transcodeBlock(b blockData) (out []byte, err error) {
formatIn, checksum := dvid.DecodeSerializationFormat(dvid.SerializationFormat(b.data[0]))
var start int
if checksum == dvid.CRC32 {
start = 5
} else {
start = 1
}
var outsize uint32
switch formatIn {
case dvid.LZ4:
outsize = binary.LittleEndian.Uint32(b.data[start : start+4])
out = b.data[start+4:]
if len(out) != int(outsize) {
err = fmt.Errorf("block %s was corrupted lz4: supposed size %d but had %d bytes", b.bcoord, outsize, len(out))
return
}
case dvid.Uncompressed, dvid.Gzip:
outsize = uint32(len(b.data[start:]))
out = b.data[start:]
default:
err = fmt.Errorf("labelmap data was stored in unknown compressed format: %s", formatIn)
return
}
var formatOut dvid.CompressionFormat
switch b.compression {
case "", "lz4":
formatOut = dvid.LZ4
case "blocks":
formatOut = formatIn
case "gzip":
formatOut = dvid.Gzip
case "uncompressed":
formatOut = dvid.Uncompressed
default:
err = fmt.Errorf("unknown compression %q requested for blocks", b.compression)
return
}
var doMapping bool
var mapping *VCache
if !b.supervoxels {
if mapping, err = getMapping(d, b.v); err != nil {
return
}
if mapping != nil && mapping.mapUsed {
doMapping = true
}
}
// Need to do uncompression/recompression if we are changing compression or mapping
var uncompressed, recompressed []byte
if formatIn != formatOut || b.compression == "gzip" || doMapping {
switch formatIn {
case dvid.LZ4:
uncompressed = make([]byte, outsize)
if err = lz4.Uncompress(out, uncompressed); err != nil {
return
}
case dvid.Uncompressed:
uncompressed = out
case dvid.Gzip:
gzipIn := bytes.NewBuffer(out)
var zr *gzip.Reader
zr, err = gzip.NewReader(gzipIn)
if err != nil {
return
}
uncompressed, err = ioutil.ReadAll(zr)
if err != nil {
return
}
zr.Close()
}
var block labels.Block
if err = block.UnmarshalBinary(uncompressed); err != nil {
err = fmt.Errorf("unable to deserialize label block %s: %v", b.bcoord, err)
return
}
if !b.supervoxels {
modifyBlockMapping(b.v, &block, mapping)
}
if b.compression == "blocks" { // send native DVID block compression with gzip
out, err = block.CompressGZIP()
if err != nil {
return nil, err
}
} else { // we are sending raw block data
uint64array, size := block.MakeLabelVolume()
expectedSize := d.BlockSize().(dvid.Point3d)
if !size.Equals(expectedSize) {
err = fmt.Errorf("deserialized label block size %s does not equal data %q block size %s", size, d.DataName(), expectedSize)
return
}
switch formatOut {
case dvid.LZ4:
recompressed = make([]byte, lz4.CompressBound(uint64array))
var size int
if size, err = lz4.Compress(uint64array, recompressed); err != nil {
return nil, err
}
outsize = uint32(size)
out = recompressed[:outsize]
case dvid.Uncompressed:
out = uint64array
case dvid.Gzip:
var gzipOut bytes.Buffer
zw := gzip.NewWriter(&gzipOut)
if _, err = zw.Write(uint64array); err != nil {
return nil, err
}
zw.Flush()
zw.Close()
out = gzipOut.Bytes()
}
}
}
return
}
// try to write a single block either by streaming (allows for termination) or by writing
// with a simplified pipeline compared to subvolumes larger than a block.
func (d *Data) writeBlockToHTTP(ctx *datastore.VersionedCtx, w http.ResponseWriter, subvol *dvid.Subvolume, compression string, supervoxels bool, scale uint8, roiname dvid.InstanceName) (done bool, err error) {
// Can't handle ROI for now.
if roiname != "" |
// Can only handle 3d requests.
blockSize, okBlockSize := d.BlockSize().(dvid.Point3d)
subvolSize, okSubvolSize := subvol.Size().(dvid.Point3d)
startPt, okStartPt := subvol.StartPoint().(dvid.Point3d)
if !okBlockSize || !okSubvolSize || !okStartPt {
return
}
// Can only handle single block for now.
if subvolSize != blockSize {
return
}
// Can only handle aligned block for now.
chunkPt, aligned := dvid.GetChunkPoint3d(startPt, blockSize)
if !aligned {
return
}
if compression != "" {
err = d.sendCompressedBlock(ctx, w, subvol, compression, chunkPt, scale, supervoxels)
} else {
err = d.streamRawBlock(ctx, w, chunkPt, scale, supervoxels)
}
if err != nil {
return
}
return true, nil
}
// send a single aligned block of data via HTTP.
func (d *Data) sendCompressedBlock(ctx *datastore.VersionedCtx, w http.ResponseWriter, subvol *dvid.Subvolume, compression string, chunkPt dvid.ChunkPoint3d, scale uint8, supervoxels bool) error {
bcoordStr := chunkPt.ToIZYXString()
block, err := d.getLabelBlock(ctx, scale, bcoordStr)
if err != nil {
return err
}
if block == nil {
return fmt.Errorf("unable to get label block %s", bcoordStr)
}
if !supervoxels {
vc, err := getMapping(d, ctx.VersionID())
if err != nil {
return err
}
modifyBlockMapping(ctx.VersionID(), block, vc)
}
data, _ := block.MakeLabelVolume()
if err := writeCompressedToHTTP(compression, data, subvol, w); err != nil {
return err
}
return nil
}
// writes a block of data as uncompressed ZYX uint64 to the writer in streaming fashion, allowing
// for possible termination / error at any point.
func (d *Data) streamRawBlock(ctx *datastore.VersionedCtx, w http.ResponseWriter, bcoord dvid.ChunkPoint3d, scale uint8, supervoxels bool) error {
bcoordStr := bcoord.ToIZYXString()
block, err := d.getLabelBlock(ctx, scale, bcoordStr)
if err != nil {
return err
}
if !supervoxels {
mapping, err := getMapping(d, ctx.VersionID())
if err != nil {
return err
}
modifyBlockMapping(ctx.VersionID(), block, mapping)
}
if err := block.WriteLabelVolume(w); err != nil {
return err
}
return nil
}
// returns nil block if no block is at the given block coordinate
func (d *Data) getLabelBlock(ctx *datastore.VersionedCtx, scale uint8, bcoord dvid.IZYXString) (*labels.Block, error) {
store, err := datastore.GetKeyValueDB(d)
if err != nil {
return nil, fmt.Errorf("labelmap getLabelBlock() had error initializing store: %v", err)
}
tk := NewBlockTKeyByCoord(scale, bcoord)
val, err := store.Get(ctx, tk)
if err != nil {
return nil, fmt.Errorf("error on GET of labelmap %q label block @ %s", d.DataName(), bcoord)
}
if val == nil {
return nil, nil
}
data, _, err := dvid.DeserializeData(val, true)
if err != nil {
return nil, fmt.Errorf("unable to deserialize label block in %q: %v", d.DataName(), err)
}
block := new(labels.Block)
if err := block.UnmarshalBinary(data); err != nil {
return nil, err
}
return block, nil
}
func (d *Data) getLabelPositionedBlock(ctx *datastore.VersionedCtx, scale uint8, bcoord dvid.IZYXString) (*labels.PositionedBlock, error) {
block, err := d.getLabelBlock(ctx, scale, bcoord)
if err != nil {
return nil, err
}
if block == nil {
return nil, nil
}
return &labels.PositionedBlock{Block: *block, BCoord: bcoord}, nil
}
func (d *Data) putLabelBlock(ctx *datastore.VersionedCtx, scale uint8, pblock *labels.PositionedBlock) error {
store, err := datastore.GetKeyValueDB(d)
if err != nil {
return fmt.Errorf("labelmap putLabelBlock() had error initializing store: %v", err)
}
tk := NewBlockTKeyByCoord(scale, pblock.BCoord)
data, err := pblock.MarshalBinary()
if err != nil {
return err
}
val, err := dvid.SerializeData(data, d.Compression(), d.Checksum())
if err != nil {
return fmt.Errorf("unable to serialize block %s in %q: %v", pblock.BCoord, d.DataName(), err)
}
return store.Put(ctx, tk, val)
}
type blockSend struct {
bcoord dvid.ChunkPoint3d
value []byte
err error
}
// convert a slice of 3 integer strings into a coordinate
func strArrayToBCoord(coordarray []string) (bcoord dvid.ChunkPoint3d, err error) {
var xloc, yloc, zloc int
if xloc, err = strconv.Atoi(coordarray[0]); err != nil {
return
}
if yloc, err = strconv.Atoi(coordarray[1]); err != nil {
return
}
if zloc, err = strconv.Atoi(coordarray[2]); err != nil {
return
}
return dvid.ChunkPoint3d{int32(xloc), int32(yloc), int32(zloc)}, nil
}
// sendBlocksSpecific writes data to the blocks specified -- best for non-ordered backend
func (d *Data) sendBlocksSpecific(ctx *datastore.VersionedCtx, w http.ResponseWriter, supervoxels bool, compression, blockstring string, scale uint8) (numBlocks int, err error) {
timedLog := dvid.NewTimeLog()
switch compression {
case "":
compression = "blocks"
case "lz4", "gzip", "blocks", "uncompressed":
break
default:
err = fmt.Errorf(`compression must be "lz4" (default), "gzip", "blocks" or "uncompressed"`)
return
}
w.Header().Set("Content-type", "application/octet-stream")
// extract querey string
if blockstring == "" {
return
}
coordarray := strings.Split(blockstring, ",")
if len(coordarray)%3 != 0 {
return 0, fmt.Errorf("block query string should be three coordinates per block")
}
var store storage.KeyValueDB
if store, err = datastore.GetKeyValueDB(d); err != nil {
return
}
// launch goroutine that will stream blocks to client
numBlocks = len(coordarray) / 3
wg := new(sync.WaitGroup)
ch := make(chan blockSend, numBlocks)
var sendErr error
var startBlock dvid.ChunkPoint3d
var timing blockTiming
go func() {
for data := range ch {
if data.err != nil && sendErr == nil {
sendErr = data.err
} else if len(data.value) > 0 {
t0 := time.Now()
err := writeBlock(w, data.bcoord, data.value)
if err != nil && sendErr == nil {
sendErr = err
}
timing.writeDone(t0)
}
wg.Done()
}
timedLog.Infof("labelmap %q specificblocks - finished sending %d blocks starting with %s", d.DataName(), numBlocks, startBlock)
}()
// iterate through each block, get data from store, and transcode based on request parameters
for i := 0; i < len(coordarray); i += 3 {
var bcoord dvid.ChunkPoint3d
if bcoord, err = strArrayToBCoord(coordarray[i : i+3]); err != nil {
return
}
if i == 0 {
startBlock = bcoord
}
wg.Add(1)
t0 := time.Now()
indexBeg := dvid.IndexZYX(bcoord)
keyBeg := NewBlockTKey(scale, &indexBeg)
var value []byte
value, err = store.Get(ctx, keyBeg)
timing.readDone(t0)
if err != nil {
ch <- blockSend{err: err}
return
}
if len(value) > 0 {
go func(bcoord dvid.ChunkPoint3d, value []byte) {
b := blockData{
bcoord: bcoord,
v: ctx.VersionID(),
data: value,
compression: compression,
supervoxels: supervoxels,
}
t0 := time.Now()
out, err := d.transcodeBlock(b)
timing.transcodeDone(t0)
ch <- blockSend{bcoord: bcoord, value: out, err: err}
}(bcoord, value)
} else {
ch <- blockSend{value: nil}
}
}
timedLog.Infof("labelmap %q specificblocks - launched concurrent reads of %d blocks starting with %s", d.DataName(), numBlocks, startBlock)
wg.Wait()
close(ch)
dvid.Infof("labelmap %q specificblocks - %d blocks starting with %s: %s\n", d.DataName(), numBlocks, startBlock, &timing)
return numBlocks, sendErr
}
// sendBlocksVolume writes a series of blocks covering the given block-aligned subvolume to a HTTP response.
func (d *Data) sendBlocksVolume(ctx *datastore.VersionedCtx, w http.ResponseWriter, supervoxels bool, scale uint8, subvol *dvid.Subvolume, compression string) error {
w.Header().Set("Content-type", "application/octet-stream")
switch compression {
case "", "lz4", "gzip", "blocks", "uncompressed":
default:
return fmt.Errorf(`compression must be "lz4" (default), "gzip", "blocks" or "uncompressed"`)
}
// convert x,y,z coordinates to block coordinates for this scale
blocksdims := subvol.Size().Div(d.BlockSize())
blocksoff := subvol.StartPoint().Div(d.BlockSize())
timedLog := dvid.NewTimeLog()
defer timedLog.Infof("SendBlocks %s, span x %d, span y %d, span z %d", blocksoff, blocksdims.Value(0), blocksdims.Value(1), blocksdims.Value(2))
numBlocks := int(blocksdims.Prod())
wg := new(sync.WaitGroup)
// launch goroutine that will stream blocks to client
ch := make(chan blockSend, numBlocks)
var sendErr error
go func() {
for data := range ch {
if data.err != nil && sendErr == nil {
sendErr = data.err
} else {
err := writeBlock(w, data.bcoord, data.value)
if err != nil && sendErr == nil {
sendErr = err
}
}
wg.Done()
}
}()
store, err := datastore.GetOrderedKeyValueDB(d)
if err != nil {
return fmt.Errorf("Data type labelmap had error initializing store: %v", err)
}
okv := store.(storage.BufferableOps)
// extract buffer interface
req, hasbuffer := okv.(storage.KeyValueRequester)
if hasbuffer {
okv = req.NewBuffer(ctx)
}
for ziter := int32(0); ziter < blocksdims.Value(2); ziter++ {
for yiter := int32(0); yiter < blocksdims.Value(1); yiter++ {
beginPoint := dvid.ChunkPoint3d{blocksoff.Value(0), blocksoff.Value(1) + yiter, blocksoff.Value(2) + ziter}
endPoint := dvid.ChunkPoint3d{blocksoff.Value(0) + blocksdims.Value(0) - 1, blocksoff.Value(1) + yiter, blocksoff.Value(2) + ziter}
indexBeg := dvid.IndexZYX(beginPoint)
sx, sy, sz := indexBeg.Unpack()
begTKey := NewBlockTKey(scale, &indexBeg)
indexEnd := dvid.IndexZYX(endPoint)
endTKey := NewBlockTKey(scale, &indexEnd)
// Send the entire range of key-value pairs to chunk processor
err = okv.ProcessRange(ctx, begTKey, endTKey, &storage.ChunkOp{}, func(c *storage.Chunk) error {
if c == nil || c.TKeyValue == nil {
return nil
}
kv := c.TKeyValue
if kv.V == nil {
return nil
}
// Determine which block this is.
_, indexZYX, err := DecodeBlockTKey(kv.K)
if err != nil {
return err
}
x, y, z := indexZYX.Unpack()
if z != sz || y != sy || x < sx || x >= sx+int32(blocksdims.Value(0)) {
return nil
}
b := blockData{
bcoord: dvid.ChunkPoint3d{x, y, z},
compression: compression,
supervoxels: supervoxels,
v: ctx.VersionID(),
data: kv.V,
}
wg.Add(1)
go func(b blockData) {
out, err := d.transcodeBlock(b)
ch <- blockSend{bcoord: b.bcoord, value: out, err: err}
}(b)
return nil
})
if err != nil {
return fmt.Errorf("unable to GET data %s: %v", ctx, err)
}
}
}
wg.Wait()
close(ch)
if hasbuffer {
// submit the entire buffer to the DB
err = okv.(storage.RequestBuffer).Flush()
if err != nil {
return fmt.Errorf("unable to GET data %s: %v", ctx, err)
}
}
return sendErr
}
// getSupervoxelBlock returns a compressed supervoxel Block of the given block coordinate.
func (d *Data) getSupervoxelBlock(v dvid.VersionID, bcoord dvid.ChunkPoint3d, scale uint8) (*labels.Block, error) {
store, err := datastore.GetOrderedKeyValueDB(d)
if err != nil {
return nil, err
}
// Retrieve the block of labels
ctx := datastore.NewVersionedCtx(d, v)
index := dvid.IndexZYX(bcoord)
serialization, err := store.Get(ctx, NewBlockTKey(scale, &index))
if err != nil {
return nil, fmt.Errorf("error getting '%s' block for index %s", d.DataName(), bcoord)
}
if serialization == nil {
blockSize, ok := d.BlockSize().(dvid.Point3d)
if !ok {
return nil, fmt.Errorf("block size for data %q should be 3d, not: %s", d.DataName(), d.BlockSize())
}
return labels.MakeSolidBlock(0, blockSize), nil
}
deserialization, _, err := dvid.DeserializeData(serialization, true)
if err != nil {
return nil, fmt.Errorf("unable to deserialize block %s in '%s': %v", bcoord, d.DataName(), err)
}
var block labels.Block
if err = block.UnmarshalBinary(deserialization); err != nil {
return nil, err
}
return &block, nil
}
// getBlockLabels returns a block of labels at given scale in packed little-endian uint64 format.
func (d *Data) getBlockLabels(v dvid.VersionID, bcoord dvid.ChunkPoint3d, scale uint8, supervoxels bool) ([]byte, error) {
block, err := d.getSupervoxelBlock(v, bcoord, scale)
if err != nil {
return nil, err
}
var mapping *VCache
if !supervoxels {
if mapping, err = getMapping(d, v); err != nil {
return nil, err
}
}
if mapping != nil {
err = modifyBlockMapping(v, block, mapping)
if err != nil {
return nil, fmt.Errorf("unable to modify block %s mapping: %v", bcoord, err)
}
}
labelData, _ := block.MakeLabelVolume()
return labelData, nil
}
| {
return
} | conditional_block |
blocks.go | package labelmap
import (
"bytes"
"compress/gzip"
"encoding/binary"
"fmt"
"io/ioutil"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/janelia-flyem/dvid/datastore"
"github.com/janelia-flyem/dvid/datatype/common/labels"
"github.com/janelia-flyem/dvid/dvid"
"github.com/janelia-flyem/dvid/storage"
lz4 "github.com/janelia-flyem/go/golz4-updated"
)
func writeBlock(w http.ResponseWriter, bcoord dvid.ChunkPoint3d, out []byte) error {
if err := binary.Write(w, binary.LittleEndian, bcoord[0]); err != nil {
return err
}
if err := binary.Write(w, binary.LittleEndian, bcoord[1]); err != nil {
return err
}
if err := binary.Write(w, binary.LittleEndian, bcoord[2]); err != nil {
return err
}
if err := binary.Write(w, binary.LittleEndian, uint32(len(out))); err != nil {
return err
}
if written, err := w.Write(out); err != nil || written != int(len(out)) {
if err != nil {
dvid.Errorf("error writing value: %v\n", err)
return err
}
return fmt.Errorf("could not write %d bytes of block %s: only %d bytes written", len(out), bcoord, written)
}
return nil
}
type blockTiming struct {
readT, transcodeT, writeT time.Duration
readN, transcodeN int64
sync.RWMutex
}
func (bt *blockTiming) writeDone(t0 time.Time) {
bt.Lock()
bt.writeT += time.Since(t0)
bt.Unlock()
}
func (bt *blockTiming) readDone(t0 time.Time) {
bt.Lock()
bt.readT += time.Since(t0)
bt.readN++
bt.Unlock()
}
func (bt *blockTiming) transcodeDone(t0 time.Time) {
bt.Lock()
bt.transcodeT += time.Since(t0)
bt.transcodeN++
bt.Unlock()
}
func (bt *blockTiming) String() string {
var readAvgT, transcodeAvgT, writeAvgT time.Duration
if bt.readN == 0 {
readAvgT = 0
} else {
readAvgT = bt.readT / time.Duration(bt.readN)
}
if bt.transcodeN == 0 {
transcodeAvgT = 0
} else {
transcodeAvgT = bt.transcodeT / time.Duration(bt.transcodeN)
}
if bt.readN == 0 {
writeAvgT = 0
} else {
writeAvgT = bt.writeT / time.Duration(bt.readN)
}
return fmt.Sprintf("read %s (%s), transcode %s (%s), write %s (%s)", bt.readT, readAvgT, bt.transcodeT, transcodeAvgT, bt.writeT, writeAvgT)
}
type blockData struct {
bcoord dvid.ChunkPoint3d
compression string
supervoxels bool
v dvid.VersionID
data []byte
}
// transcodes a block of data by doing any data modifications necessary to meet requested
// compression compared to stored compression as well as raw supervoxels versus mapped labels.
func (d *Data) transcodeBlock(b blockData) (out []byte, err error) {
formatIn, checksum := dvid.DecodeSerializationFormat(dvid.SerializationFormat(b.data[0]))
var start int
if checksum == dvid.CRC32 {
start = 5
} else {
start = 1
}
var outsize uint32
switch formatIn {
case dvid.LZ4:
outsize = binary.LittleEndian.Uint32(b.data[start : start+4])
out = b.data[start+4:]
if len(out) != int(outsize) {
err = fmt.Errorf("block %s was corrupted lz4: supposed size %d but had %d bytes", b.bcoord, outsize, len(out))
return
}
case dvid.Uncompressed, dvid.Gzip:
outsize = uint32(len(b.data[start:]))
out = b.data[start:]
default:
err = fmt.Errorf("labelmap data was stored in unknown compressed format: %s", formatIn)
return
}
var formatOut dvid.CompressionFormat
switch b.compression {
case "", "lz4":
formatOut = dvid.LZ4
case "blocks":
formatOut = formatIn
case "gzip":
formatOut = dvid.Gzip
case "uncompressed":
formatOut = dvid.Uncompressed
default:
err = fmt.Errorf("unknown compression %q requested for blocks", b.compression)
return
}
var doMapping bool
var mapping *VCache
if !b.supervoxels {
if mapping, err = getMapping(d, b.v); err != nil {
return
}
if mapping != nil && mapping.mapUsed {
doMapping = true
}
}
// Need to do uncompression/recompression if we are changing compression or mapping
var uncompressed, recompressed []byte
if formatIn != formatOut || b.compression == "gzip" || doMapping {
switch formatIn {
case dvid.LZ4:
uncompressed = make([]byte, outsize)
if err = lz4.Uncompress(out, uncompressed); err != nil {
return
}
case dvid.Uncompressed:
uncompressed = out
case dvid.Gzip:
gzipIn := bytes.NewBuffer(out)
var zr *gzip.Reader
zr, err = gzip.NewReader(gzipIn)
if err != nil {
return
}
uncompressed, err = ioutil.ReadAll(zr)
if err != nil {
return
}
zr.Close()
}
var block labels.Block
if err = block.UnmarshalBinary(uncompressed); err != nil {
err = fmt.Errorf("unable to deserialize label block %s: %v", b.bcoord, err)
return
}
if !b.supervoxels {
modifyBlockMapping(b.v, &block, mapping)
}
if b.compression == "blocks" { // send native DVID block compression with gzip
out, err = block.CompressGZIP()
if err != nil {
return nil, err
}
} else { // we are sending raw block data
uint64array, size := block.MakeLabelVolume()
expectedSize := d.BlockSize().(dvid.Point3d)
if !size.Equals(expectedSize) {
err = fmt.Errorf("deserialized label block size %s does not equal data %q block size %s", size, d.DataName(), expectedSize)
return
}
switch formatOut {
case dvid.LZ4:
recompressed = make([]byte, lz4.CompressBound(uint64array))
var size int
if size, err = lz4.Compress(uint64array, recompressed); err != nil {
return nil, err
}
outsize = uint32(size)
out = recompressed[:outsize]
case dvid.Uncompressed:
out = uint64array
case dvid.Gzip:
var gzipOut bytes.Buffer
zw := gzip.NewWriter(&gzipOut)
if _, err = zw.Write(uint64array); err != nil {
return nil, err
}
zw.Flush()
zw.Close()
out = gzipOut.Bytes()
}
}
}
return
}
// try to write a single block either by streaming (allows for termination) or by writing
// with a simplified pipeline compared to subvolumes larger than a block.
func (d *Data) writeBlockToHTTP(ctx *datastore.VersionedCtx, w http.ResponseWriter, subvol *dvid.Subvolume, compression string, supervoxels bool, scale uint8, roiname dvid.InstanceName) (done bool, err error) {
// Can't handle ROI for now.
if roiname != "" {
return
}
// Can only handle 3d requests.
blockSize, okBlockSize := d.BlockSize().(dvid.Point3d)
subvolSize, okSubvolSize := subvol.Size().(dvid.Point3d)
startPt, okStartPt := subvol.StartPoint().(dvid.Point3d)
if !okBlockSize || !okSubvolSize || !okStartPt {
return
}
// Can only handle single block for now.
if subvolSize != blockSize {
return
}
// Can only handle aligned block for now.
chunkPt, aligned := dvid.GetChunkPoint3d(startPt, blockSize)
if !aligned {
return
}
if compression != "" {
err = d.sendCompressedBlock(ctx, w, subvol, compression, chunkPt, scale, supervoxels)
} else {
err = d.streamRawBlock(ctx, w, chunkPt, scale, supervoxels)
}
if err != nil {
return
}
return true, nil
}
// send a single aligned block of data via HTTP.
func (d *Data) sendCompressedBlock(ctx *datastore.VersionedCtx, w http.ResponseWriter, subvol *dvid.Subvolume, compression string, chunkPt dvid.ChunkPoint3d, scale uint8, supervoxels bool) error {
bcoordStr := chunkPt.ToIZYXString()
block, err := d.getLabelBlock(ctx, scale, bcoordStr)
if err != nil {
return err
}
if block == nil {
return fmt.Errorf("unable to get label block %s", bcoordStr)
}
if !supervoxels {
vc, err := getMapping(d, ctx.VersionID())
if err != nil {
return err
}
modifyBlockMapping(ctx.VersionID(), block, vc)
}
data, _ := block.MakeLabelVolume()
if err := writeCompressedToHTTP(compression, data, subvol, w); err != nil {
return err
}
return nil | bcoordStr := bcoord.ToIZYXString()
block, err := d.getLabelBlock(ctx, scale, bcoordStr)
if err != nil {
return err
}
if !supervoxels {
mapping, err := getMapping(d, ctx.VersionID())
if err != nil {
return err
}
modifyBlockMapping(ctx.VersionID(), block, mapping)
}
if err := block.WriteLabelVolume(w); err != nil {
return err
}
return nil
}
// returns nil block if no block is at the given block coordinate
func (d *Data) getLabelBlock(ctx *datastore.VersionedCtx, scale uint8, bcoord dvid.IZYXString) (*labels.Block, error) {
store, err := datastore.GetKeyValueDB(d)
if err != nil {
return nil, fmt.Errorf("labelmap getLabelBlock() had error initializing store: %v", err)
}
tk := NewBlockTKeyByCoord(scale, bcoord)
val, err := store.Get(ctx, tk)
if err != nil {
return nil, fmt.Errorf("error on GET of labelmap %q label block @ %s", d.DataName(), bcoord)
}
if val == nil {
return nil, nil
}
data, _, err := dvid.DeserializeData(val, true)
if err != nil {
return nil, fmt.Errorf("unable to deserialize label block in %q: %v", d.DataName(), err)
}
block := new(labels.Block)
if err := block.UnmarshalBinary(data); err != nil {
return nil, err
}
return block, nil
}
func (d *Data) getLabelPositionedBlock(ctx *datastore.VersionedCtx, scale uint8, bcoord dvid.IZYXString) (*labels.PositionedBlock, error) {
block, err := d.getLabelBlock(ctx, scale, bcoord)
if err != nil {
return nil, err
}
if block == nil {
return nil, nil
}
return &labels.PositionedBlock{Block: *block, BCoord: bcoord}, nil
}
func (d *Data) putLabelBlock(ctx *datastore.VersionedCtx, scale uint8, pblock *labels.PositionedBlock) error {
store, err := datastore.GetKeyValueDB(d)
if err != nil {
return fmt.Errorf("labelmap putLabelBlock() had error initializing store: %v", err)
}
tk := NewBlockTKeyByCoord(scale, pblock.BCoord)
data, err := pblock.MarshalBinary()
if err != nil {
return err
}
val, err := dvid.SerializeData(data, d.Compression(), d.Checksum())
if err != nil {
return fmt.Errorf("unable to serialize block %s in %q: %v", pblock.BCoord, d.DataName(), err)
}
return store.Put(ctx, tk, val)
}
type blockSend struct {
bcoord dvid.ChunkPoint3d
value []byte
err error
}
// convert a slice of 3 integer strings into a coordinate
func strArrayToBCoord(coordarray []string) (bcoord dvid.ChunkPoint3d, err error) {
var xloc, yloc, zloc int
if xloc, err = strconv.Atoi(coordarray[0]); err != nil {
return
}
if yloc, err = strconv.Atoi(coordarray[1]); err != nil {
return
}
if zloc, err = strconv.Atoi(coordarray[2]); err != nil {
return
}
return dvid.ChunkPoint3d{int32(xloc), int32(yloc), int32(zloc)}, nil
}
// sendBlocksSpecific writes data to the blocks specified -- best for non-ordered backend
func (d *Data) sendBlocksSpecific(ctx *datastore.VersionedCtx, w http.ResponseWriter, supervoxels bool, compression, blockstring string, scale uint8) (numBlocks int, err error) {
timedLog := dvid.NewTimeLog()
switch compression {
case "":
compression = "blocks"
case "lz4", "gzip", "blocks", "uncompressed":
break
default:
err = fmt.Errorf(`compression must be "lz4" (default), "gzip", "blocks" or "uncompressed"`)
return
}
w.Header().Set("Content-type", "application/octet-stream")
// extract querey string
if blockstring == "" {
return
}
coordarray := strings.Split(blockstring, ",")
if len(coordarray)%3 != 0 {
return 0, fmt.Errorf("block query string should be three coordinates per block")
}
var store storage.KeyValueDB
if store, err = datastore.GetKeyValueDB(d); err != nil {
return
}
// launch goroutine that will stream blocks to client
numBlocks = len(coordarray) / 3
wg := new(sync.WaitGroup)
ch := make(chan blockSend, numBlocks)
var sendErr error
var startBlock dvid.ChunkPoint3d
var timing blockTiming
go func() {
for data := range ch {
if data.err != nil && sendErr == nil {
sendErr = data.err
} else if len(data.value) > 0 {
t0 := time.Now()
err := writeBlock(w, data.bcoord, data.value)
if err != nil && sendErr == nil {
sendErr = err
}
timing.writeDone(t0)
}
wg.Done()
}
timedLog.Infof("labelmap %q specificblocks - finished sending %d blocks starting with %s", d.DataName(), numBlocks, startBlock)
}()
// iterate through each block, get data from store, and transcode based on request parameters
for i := 0; i < len(coordarray); i += 3 {
var bcoord dvid.ChunkPoint3d
if bcoord, err = strArrayToBCoord(coordarray[i : i+3]); err != nil {
return
}
if i == 0 {
startBlock = bcoord
}
wg.Add(1)
t0 := time.Now()
indexBeg := dvid.IndexZYX(bcoord)
keyBeg := NewBlockTKey(scale, &indexBeg)
var value []byte
value, err = store.Get(ctx, keyBeg)
timing.readDone(t0)
if err != nil {
ch <- blockSend{err: err}
return
}
if len(value) > 0 {
go func(bcoord dvid.ChunkPoint3d, value []byte) {
b := blockData{
bcoord: bcoord,
v: ctx.VersionID(),
data: value,
compression: compression,
supervoxels: supervoxels,
}
t0 := time.Now()
out, err := d.transcodeBlock(b)
timing.transcodeDone(t0)
ch <- blockSend{bcoord: bcoord, value: out, err: err}
}(bcoord, value)
} else {
ch <- blockSend{value: nil}
}
}
timedLog.Infof("labelmap %q specificblocks - launched concurrent reads of %d blocks starting with %s", d.DataName(), numBlocks, startBlock)
wg.Wait()
close(ch)
dvid.Infof("labelmap %q specificblocks - %d blocks starting with %s: %s\n", d.DataName(), numBlocks, startBlock, &timing)
return numBlocks, sendErr
}
// sendBlocksVolume writes a series of blocks covering the given block-aligned subvolume to a HTTP response.
func (d *Data) sendBlocksVolume(ctx *datastore.VersionedCtx, w http.ResponseWriter, supervoxels bool, scale uint8, subvol *dvid.Subvolume, compression string) error {
w.Header().Set("Content-type", "application/octet-stream")
switch compression {
case "", "lz4", "gzip", "blocks", "uncompressed":
default:
return fmt.Errorf(`compression must be "lz4" (default), "gzip", "blocks" or "uncompressed"`)
}
// convert x,y,z coordinates to block coordinates for this scale
blocksdims := subvol.Size().Div(d.BlockSize())
blocksoff := subvol.StartPoint().Div(d.BlockSize())
timedLog := dvid.NewTimeLog()
defer timedLog.Infof("SendBlocks %s, span x %d, span y %d, span z %d", blocksoff, blocksdims.Value(0), blocksdims.Value(1), blocksdims.Value(2))
numBlocks := int(blocksdims.Prod())
wg := new(sync.WaitGroup)
// launch goroutine that will stream blocks to client
ch := make(chan blockSend, numBlocks)
var sendErr error
go func() {
for data := range ch {
if data.err != nil && sendErr == nil {
sendErr = data.err
} else {
err := writeBlock(w, data.bcoord, data.value)
if err != nil && sendErr == nil {
sendErr = err
}
}
wg.Done()
}
}()
store, err := datastore.GetOrderedKeyValueDB(d)
if err != nil {
return fmt.Errorf("Data type labelmap had error initializing store: %v", err)
}
okv := store.(storage.BufferableOps)
// extract buffer interface
req, hasbuffer := okv.(storage.KeyValueRequester)
if hasbuffer {
okv = req.NewBuffer(ctx)
}
for ziter := int32(0); ziter < blocksdims.Value(2); ziter++ {
for yiter := int32(0); yiter < blocksdims.Value(1); yiter++ {
beginPoint := dvid.ChunkPoint3d{blocksoff.Value(0), blocksoff.Value(1) + yiter, blocksoff.Value(2) + ziter}
endPoint := dvid.ChunkPoint3d{blocksoff.Value(0) + blocksdims.Value(0) - 1, blocksoff.Value(1) + yiter, blocksoff.Value(2) + ziter}
indexBeg := dvid.IndexZYX(beginPoint)
sx, sy, sz := indexBeg.Unpack()
begTKey := NewBlockTKey(scale, &indexBeg)
indexEnd := dvid.IndexZYX(endPoint)
endTKey := NewBlockTKey(scale, &indexEnd)
// Send the entire range of key-value pairs to chunk processor
err = okv.ProcessRange(ctx, begTKey, endTKey, &storage.ChunkOp{}, func(c *storage.Chunk) error {
if c == nil || c.TKeyValue == nil {
return nil
}
kv := c.TKeyValue
if kv.V == nil {
return nil
}
// Determine which block this is.
_, indexZYX, err := DecodeBlockTKey(kv.K)
if err != nil {
return err
}
x, y, z := indexZYX.Unpack()
if z != sz || y != sy || x < sx || x >= sx+int32(blocksdims.Value(0)) {
return nil
}
b := blockData{
bcoord: dvid.ChunkPoint3d{x, y, z},
compression: compression,
supervoxels: supervoxels,
v: ctx.VersionID(),
data: kv.V,
}
wg.Add(1)
go func(b blockData) {
out, err := d.transcodeBlock(b)
ch <- blockSend{bcoord: b.bcoord, value: out, err: err}
}(b)
return nil
})
if err != nil {
return fmt.Errorf("unable to GET data %s: %v", ctx, err)
}
}
}
wg.Wait()
close(ch)
if hasbuffer {
// submit the entire buffer to the DB
err = okv.(storage.RequestBuffer).Flush()
if err != nil {
return fmt.Errorf("unable to GET data %s: %v", ctx, err)
}
}
return sendErr
}
// getSupervoxelBlock returns a compressed supervoxel Block of the given block coordinate.
func (d *Data) getSupervoxelBlock(v dvid.VersionID, bcoord dvid.ChunkPoint3d, scale uint8) (*labels.Block, error) {
store, err := datastore.GetOrderedKeyValueDB(d)
if err != nil {
return nil, err
}
// Retrieve the block of labels
ctx := datastore.NewVersionedCtx(d, v)
index := dvid.IndexZYX(bcoord)
serialization, err := store.Get(ctx, NewBlockTKey(scale, &index))
if err != nil {
return nil, fmt.Errorf("error getting '%s' block for index %s", d.DataName(), bcoord)
}
if serialization == nil {
blockSize, ok := d.BlockSize().(dvid.Point3d)
if !ok {
return nil, fmt.Errorf("block size for data %q should be 3d, not: %s", d.DataName(), d.BlockSize())
}
return labels.MakeSolidBlock(0, blockSize), nil
}
deserialization, _, err := dvid.DeserializeData(serialization, true)
if err != nil {
return nil, fmt.Errorf("unable to deserialize block %s in '%s': %v", bcoord, d.DataName(), err)
}
var block labels.Block
if err = block.UnmarshalBinary(deserialization); err != nil {
return nil, err
}
return &block, nil
}
// getBlockLabels returns a block of labels at given scale in packed little-endian uint64 format.
func (d *Data) getBlockLabels(v dvid.VersionID, bcoord dvid.ChunkPoint3d, scale uint8, supervoxels bool) ([]byte, error) {
block, err := d.getSupervoxelBlock(v, bcoord, scale)
if err != nil {
return nil, err
}
var mapping *VCache
if !supervoxels {
if mapping, err = getMapping(d, v); err != nil {
return nil, err
}
}
if mapping != nil {
err = modifyBlockMapping(v, block, mapping)
if err != nil {
return nil, fmt.Errorf("unable to modify block %s mapping: %v", bcoord, err)
}
}
labelData, _ := block.MakeLabelVolume()
return labelData, nil
} | }
// writes a block of data as uncompressed ZYX uint64 to the writer in streaming fashion, allowing
// for possible termination / error at any point.
func (d *Data) streamRawBlock(ctx *datastore.VersionedCtx, w http.ResponseWriter, bcoord dvid.ChunkPoint3d, scale uint8, supervoxels bool) error { | random_line_split |
external_accounts.go | package db
import (
"context"
"database/sql"
"fmt"
multierror "github.com/hashicorp/go-multierror"
"github.com/keegancsmith/sqlf"
"github.com/sourcegraph/sourcegraph/cmd/frontend/db/dbconn"
"github.com/sourcegraph/sourcegraph/pkg/extsvc"
log15 "gopkg.in/inconshreveable/log15.v2"
)
// userExternalAccountNotFoundError is the error that is returned when a user external account is not found.
type userExternalAccountNotFoundError struct {
args []interface{}
}
func (err userExternalAccountNotFoundError) Error() string {
return fmt.Sprintf("user external account not found: %v", err.args)
}
func (err userExternalAccountNotFoundError) NotFound() bool {
return true
}
// userExternalAccounts provides access to the `user_external_accounts` table.
type userExternalAccounts struct{}
// Get gets information about the user external account.
func (s *userExternalAccounts) Get(ctx context.Context, id int32) (*extsvc.ExternalAccount, error) {
if Mocks.ExternalAccounts.Get != nil {
return Mocks.ExternalAccounts.Get(id)
}
return s.getBySQL(ctx, sqlf.Sprintf("WHERE id=%d AND deleted_at IS NULL LIMIT 1", id))
}
// LookupUserAndSave is used for authenticating a user (when both their Sourcegraph account and the
// association with the external account already exist).
//
// It looks up the existing user associated with the external account's ExternalAccountSpec. If
// found, it updates the account's data and returns the user. It NEVER creates a user; you must call
// CreateUserAndSave for that.
func (s *userExternalAccounts) LookupUserAndSave(ctx context.Context, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) (userID int32, err error) {
if Mocks.ExternalAccounts.LookupUserAndSave != nil {
return Mocks.ExternalAccounts.LookupUserAndSave(spec, data)
}
err = dbconn.Global.QueryRowContext(ctx, `
UPDATE user_external_accounts SET auth_data=$5, account_data=$6, updated_at=now()
WHERE service_type=$1 AND service_id=$2 AND client_id=$3 AND account_id=$4 AND deleted_at IS NULL
RETURNING user_id
`, spec.ServiceType, spec.ServiceID, spec.ClientID, spec.AccountID, data.AuthData, data.AccountData).Scan(&userID)
if err == sql.ErrNoRows {
err = userExternalAccountNotFoundError{[]interface{}{spec}}
}
return userID, err
}
// AssociateUserAndSave is used for linking a new, additional external account with an existing
// Sourcegraph account.
//
// It creates a user external account and associates it with the specified user. If the external
// account already exists and is associated with:
//
// - the same user: it updates the data and returns a nil error; or
// - a different user: it performs no update and returns a non-nil error
func (s *userExternalAccounts) AssociateUserAndSave(ctx context.Context, userID int32, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) (err error) {
if Mocks.ExternalAccounts.AssociateUserAndSave != nil {
return Mocks.ExternalAccounts.AssociateUserAndSave(userID, spec, data)
}
// This "upsert" may cause us to return an ephemeral failure due to a race condition, but it
// won't result in inconsistent data. Wrap in transaction.
tx, err := dbconn.Global.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
if err != nil {
rollErr := tx.Rollback()
if rollErr != nil {
err = multierror.Append(err, rollErr)
}
return
}
err = tx.Commit()
}()
// Find whether the account exists and, if so, which user ID the account is associated with.
var exists bool
var existingID, associatedUserID int32
err = tx.QueryRowContext(ctx, `
SELECT id, user_id FROM user_external_accounts
WHERE service_type=$1 AND service_id=$2 AND client_id=$3 AND account_id=$4 AND deleted_at IS NULL
`, spec.ServiceType, spec.ServiceID, spec.ClientID, spec.AccountID).Scan(&existingID, &associatedUserID)
if err != nil && err != sql.ErrNoRows {
return err
}
exists = err != sql.ErrNoRows
err = nil
if exists && associatedUserID != userID {
// The account already exists and is associated with another user.
return fmt.Errorf("unable to change association of external account from user %d to user %d (delete the external account and then try again)", associatedUserID, userID)
}
if !exists {
// Create the external account (it doesn't yet exist).
return s.insert(ctx, tx, userID, spec, data)
}
// Update the external account (it exists).
res, err := tx.ExecContext(ctx, `
UPDATE user_external_accounts SET auth_data=$6, account_data=$7, updated_at=now()
WHERE service_type=$1 AND service_id=$2 AND client_id=$3 AND account_id=$4 AND user_id=$5 AND deleted_at IS NULL
`, spec.ServiceType, spec.ServiceID, spec.ClientID, spec.AccountID, userID, data.AuthData, data.AccountData)
if err != nil {
return err
}
nrows, err := res.RowsAffected()
if err != nil {
return err
}
if nrows == 0 {
return userExternalAccountNotFoundError{[]interface{}{existingID}}
}
return nil
}
// CreateUserAndSave is used to create a new Sourcegraph user account from an external account
// (e.g., "signup from SAML").
//
// It creates a new user and associates it with the specified external account. If the user to
// create already exists, it returns an error.
func (s *userExternalAccounts) CreateUserAndSave(ctx context.Context, newUser NewUser, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) (createdUserID int32, err error) {
if Mocks.ExternalAccounts.CreateUserAndSave != nil {
return Mocks.ExternalAccounts.CreateUserAndSave(newUser, spec, data)
}
// Wrap in transaction.
tx, err := dbconn.Global.BeginTx(ctx, nil)
if err != nil {
return 0, err
}
defer func() {
if err != nil {
rollErr := tx.Rollback()
if rollErr != nil {
err = multierror.Append(err, rollErr)
}
return
}
err = tx.Commit()
}()
createdUser, err := Users.create(ctx, tx, newUser)
if err != nil {
return 0, err
}
err = s.insert(ctx, tx, createdUser.ID, spec, data)
return createdUser.ID, err
}
func (s *userExternalAccounts) insert(ctx context.Context, tx *sql.Tx, userID int32, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) error {
_, err := tx.ExecContext(ctx, `
INSERT INTO user_external_accounts(user_id, service_type, service_id, client_id, account_id, auth_data, account_data)
VALUES($1, $2, $3, $4, $5, $6, $7)
`, userID, spec.ServiceType, spec.ServiceID, spec.ClientID, spec.AccountID, data.AuthData, data.AccountData)
return err
}
// Delete deletes a user external account.
func (*userExternalAccounts) Delete(ctx context.Context, id int32) error {
if Mocks.ExternalAccounts.Delete != nil |
res, err := dbconn.Global.ExecContext(ctx, "UPDATE user_external_accounts SET deleted_at=now() WHERE id=$1 AND deleted_at IS NULL", id)
if err != nil {
return err
}
nrows, err := res.RowsAffected()
if err != nil {
return err
}
if nrows == 0 {
return userExternalAccountNotFoundError{[]interface{}{id}}
}
return nil
}
// ExternalAccountsListOptions specifies the options for listing user external accounts.
type ExternalAccountsListOptions struct {
UserID int32
ServiceType, ServiceID, ClientID string
*LimitOffset
}
func (s *userExternalAccounts) List(ctx context.Context, opt ExternalAccountsListOptions) ([]*extsvc.ExternalAccount, error) {
if Mocks.ExternalAccounts.List != nil {
return Mocks.ExternalAccounts.List(opt)
}
conds := s.listSQL(opt)
return s.listBySQL(ctx, sqlf.Sprintf("WHERE %s ORDER BY id ASC %s", sqlf.Join(conds, "AND"), opt.LimitOffset.SQL()))
}
func (s *userExternalAccounts) Count(ctx context.Context, opt ExternalAccountsListOptions) (int, error) {
if Mocks.ExternalAccounts.Count != nil {
return Mocks.ExternalAccounts.Count(opt)
}
conds := s.listSQL(opt)
q := sqlf.Sprintf("SELECT COUNT(*) FROM user_external_accounts WHERE %s", sqlf.Join(conds, "AND"))
var count int
err := dbconn.Global.QueryRowContext(ctx, q.Query(sqlf.PostgresBindVar), q.Args()...).Scan(&count)
return count, err
}
// TmpMigrate implements the migration described in bg.MigrateExternalAccounts (which is the only
// func that should call this).
func (*userExternalAccounts) TmpMigrate(ctx context.Context, serviceType string) error {
// TEMP: Delete all external accounts associated with deleted users. Due to a bug in this
// migration code, it was possible for deleted users to be associated with non-deleted external
// accounts. This caused unexpected behavior in the UI (although did not pose a security
// threat). So, run this cleanup task upon each server startup.
if err := (userExternalAccounts{}).deleteForDeletedUsers(ctx); err != nil {
log15.Warn("Unable to clean up external user accounts.", "err", err)
}
const needsMigrationSentinel = "migration_in_progress"
// Avoid running UPDATE (which takes a lock) if it's not needed. The UPDATE only needs to run
// once ever, and we are guaranteed that the DB migration has run by the time we arrive here, so
// this is safe and not racy.
var needsMigration bool
if err := dbconn.Global.QueryRowContext(ctx, `SELECT EXISTS(SELECT 1 FROM user_external_accounts WHERE service_type=$1 AND deleted_at IS NULL)`, needsMigrationSentinel).Scan(&needsMigration); err != nil && err != sql.ErrNoRows {
return err
}
if !needsMigration {
return nil
}
var err error
if serviceType == "" {
_, err = dbconn.Global.ExecContext(ctx, `UPDATE user_external_accounts SET deleted_at=now(), service_type='not_configured_at_migration_time' WHERE service_type=$1`, needsMigrationSentinel)
} else {
_, err = dbconn.Global.ExecContext(ctx, `UPDATE user_external_accounts SET service_type=$2, account_id=SUBSTR(account_id, CHAR_LENGTH(service_id)+2) WHERE service_type=$1 AND service_id!='override'`, needsMigrationSentinel, serviceType)
if err == nil {
_, err = dbconn.Global.ExecContext(ctx, `UPDATE user_external_accounts SET service_type='override', service_id='' WHERE service_type=$1 AND service_id='override'`, needsMigrationSentinel)
}
}
return err
}
func (userExternalAccounts) deleteForDeletedUsers(ctx context.Context) error {
_, err := dbconn.Global.ExecContext(ctx, `UPDATE user_external_accounts SET deleted_at=now() FROM users WHERE user_external_accounts.user_id=users.id AND users.deleted_at IS NOT NULL AND user_external_accounts.deleted_at IS NULL`)
return err
}
func (s *userExternalAccounts) getBySQL(ctx context.Context, querySuffix *sqlf.Query) (*extsvc.ExternalAccount, error) {
results, err := s.listBySQL(ctx, querySuffix)
if err != nil {
return nil, err
}
if len(results) != 1 {
return nil, userExternalAccountNotFoundError{querySuffix.Args()}
}
return results[0], nil
}
func (*userExternalAccounts) listBySQL(ctx context.Context, querySuffix *sqlf.Query) ([]*extsvc.ExternalAccount, error) {
q := sqlf.Sprintf(`SELECT t.id, t.user_id, t.service_type, t.service_id, t.client_id, t.account_id, t.auth_data, t.account_data, t.created_at, t.updated_at FROM user_external_accounts t %s`, querySuffix)
rows, err := dbconn.Global.QueryContext(ctx, q.Query(sqlf.PostgresBindVar), q.Args()...)
if err != nil {
return nil, err
}
var results []*extsvc.ExternalAccount
defer rows.Close()
for rows.Next() {
var o extsvc.ExternalAccount
if err := rows.Scan(&o.ID, &o.UserID, &o.ServiceType, &o.ServiceID, &o.ClientID, &o.AccountID, &o.AuthData, &o.AccountData, &o.CreatedAt, &o.UpdatedAt); err != nil {
return nil, err
}
results = append(results, &o)
}
return results, rows.Err()
}
func (*userExternalAccounts) listSQL(opt ExternalAccountsListOptions) (conds []*sqlf.Query) {
conds = []*sqlf.Query{sqlf.Sprintf("deleted_at IS NULL")}
if opt.UserID != 0 {
conds = append(conds, sqlf.Sprintf("user_id=%d", opt.UserID))
}
if opt.ServiceType != "" || opt.ServiceID != "" || opt.ClientID != "" {
conds = append(conds, sqlf.Sprintf("(service_type=%s AND service_id=%s AND client_id=%s)", opt.ServiceType, opt.ServiceID, opt.ClientID))
}
return conds
}
// MockExternalAccounts mocks the Stores.ExternalAccounts DB store.
type MockExternalAccounts struct {
Get func(id int32) (*extsvc.ExternalAccount, error)
LookupUserAndSave func(extsvc.ExternalAccountSpec, extsvc.ExternalAccountData) (userID int32, err error)
AssociateUserAndSave func(userID int32, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) error
CreateUserAndSave func(NewUser, extsvc.ExternalAccountSpec, extsvc.ExternalAccountData) (createdUserID int32, err error)
Delete func(id int32) error
List func(ExternalAccountsListOptions) ([]*extsvc.ExternalAccount, error)
Count func(ExternalAccountsListOptions) (int, error)
}
| {
return Mocks.ExternalAccounts.Delete(id)
} | conditional_block |
external_accounts.go | package db
import (
"context"
"database/sql"
"fmt"
multierror "github.com/hashicorp/go-multierror"
"github.com/keegancsmith/sqlf"
"github.com/sourcegraph/sourcegraph/cmd/frontend/db/dbconn"
"github.com/sourcegraph/sourcegraph/pkg/extsvc"
log15 "gopkg.in/inconshreveable/log15.v2"
)
// userExternalAccountNotFoundError is the error that is returned when a user external account is not found.
type userExternalAccountNotFoundError struct {
args []interface{}
}
func (err userExternalAccountNotFoundError) Error() string {
return fmt.Sprintf("user external account not found: %v", err.args)
}
func (err userExternalAccountNotFoundError) NotFound() bool {
return true
}
// userExternalAccounts provides access to the `user_external_accounts` table.
type userExternalAccounts struct{}
// Get gets information about the user external account.
func (s *userExternalAccounts) Get(ctx context.Context, id int32) (*extsvc.ExternalAccount, error) {
if Mocks.ExternalAccounts.Get != nil {
return Mocks.ExternalAccounts.Get(id)
}
return s.getBySQL(ctx, sqlf.Sprintf("WHERE id=%d AND deleted_at IS NULL LIMIT 1", id))
}
// LookupUserAndSave is used for authenticating a user (when both their Sourcegraph account and the
// association with the external account already exist).
//
// It looks up the existing user associated with the external account's ExternalAccountSpec. If
// found, it updates the account's data and returns the user. It NEVER creates a user; you must call
// CreateUserAndSave for that.
func (s *userExternalAccounts) LookupUserAndSave(ctx context.Context, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) (userID int32, err error) {
if Mocks.ExternalAccounts.LookupUserAndSave != nil {
return Mocks.ExternalAccounts.LookupUserAndSave(spec, data)
}
err = dbconn.Global.QueryRowContext(ctx, `
UPDATE user_external_accounts SET auth_data=$5, account_data=$6, updated_at=now()
WHERE service_type=$1 AND service_id=$2 AND client_id=$3 AND account_id=$4 AND deleted_at IS NULL
RETURNING user_id
`, spec.ServiceType, spec.ServiceID, spec.ClientID, spec.AccountID, data.AuthData, data.AccountData).Scan(&userID)
if err == sql.ErrNoRows {
err = userExternalAccountNotFoundError{[]interface{}{spec}}
}
return userID, err
}
// AssociateUserAndSave is used for linking a new, additional external account with an existing
// Sourcegraph account.
//
// It creates a user external account and associates it with the specified user. If the external
// account already exists and is associated with:
//
// - the same user: it updates the data and returns a nil error; or
// - a different user: it performs no update and returns a non-nil error
func (s *userExternalAccounts) AssociateUserAndSave(ctx context.Context, userID int32, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) (err error) {
if Mocks.ExternalAccounts.AssociateUserAndSave != nil {
return Mocks.ExternalAccounts.AssociateUserAndSave(userID, spec, data)
}
// This "upsert" may cause us to return an ephemeral failure due to a race condition, but it
// won't result in inconsistent data. Wrap in transaction.
tx, err := dbconn.Global.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
if err != nil {
rollErr := tx.Rollback()
if rollErr != nil {
err = multierror.Append(err, rollErr)
}
return
}
err = tx.Commit()
}()
// Find whether the account exists and, if so, which user ID the account is associated with.
var exists bool
var existingID, associatedUserID int32
err = tx.QueryRowContext(ctx, `
SELECT id, user_id FROM user_external_accounts
WHERE service_type=$1 AND service_id=$2 AND client_id=$3 AND account_id=$4 AND deleted_at IS NULL
`, spec.ServiceType, spec.ServiceID, spec.ClientID, spec.AccountID).Scan(&existingID, &associatedUserID)
if err != nil && err != sql.ErrNoRows {
return err
}
exists = err != sql.ErrNoRows
err = nil
if exists && associatedUserID != userID {
// The account already exists and is associated with another user.
return fmt.Errorf("unable to change association of external account from user %d to user %d (delete the external account and then try again)", associatedUserID, userID)
}
if !exists {
// Create the external account (it doesn't yet exist).
return s.insert(ctx, tx, userID, spec, data)
}
// Update the external account (it exists).
res, err := tx.ExecContext(ctx, `
UPDATE user_external_accounts SET auth_data=$6, account_data=$7, updated_at=now()
WHERE service_type=$1 AND service_id=$2 AND client_id=$3 AND account_id=$4 AND user_id=$5 AND deleted_at IS NULL
`, spec.ServiceType, spec.ServiceID, spec.ClientID, spec.AccountID, userID, data.AuthData, data.AccountData)
if err != nil {
return err
}
nrows, err := res.RowsAffected()
if err != nil {
return err
}
if nrows == 0 {
return userExternalAccountNotFoundError{[]interface{}{existingID}}
}
return nil
}
// CreateUserAndSave is used to create a new Sourcegraph user account from an external account
// (e.g., "signup from SAML").
//
// It creates a new user and associates it with the specified external account. If the user to
// create already exists, it returns an error.
func (s *userExternalAccounts) CreateUserAndSave(ctx context.Context, newUser NewUser, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) (createdUserID int32, err error) {
if Mocks.ExternalAccounts.CreateUserAndSave != nil {
return Mocks.ExternalAccounts.CreateUserAndSave(newUser, spec, data)
}
// Wrap in transaction.
tx, err := dbconn.Global.BeginTx(ctx, nil)
if err != nil {
return 0, err
}
defer func() {
if err != nil {
rollErr := tx.Rollback()
if rollErr != nil {
err = multierror.Append(err, rollErr)
}
return
}
err = tx.Commit()
}()
createdUser, err := Users.create(ctx, tx, newUser)
if err != nil {
return 0, err
}
err = s.insert(ctx, tx, createdUser.ID, spec, data)
return createdUser.ID, err
}
func (s *userExternalAccounts) insert(ctx context.Context, tx *sql.Tx, userID int32, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) error {
_, err := tx.ExecContext(ctx, `
INSERT INTO user_external_accounts(user_id, service_type, service_id, client_id, account_id, auth_data, account_data)
VALUES($1, $2, $3, $4, $5, $6, $7)
`, userID, spec.ServiceType, spec.ServiceID, spec.ClientID, spec.AccountID, data.AuthData, data.AccountData)
return err
}
// Delete deletes a user external account.
func (*userExternalAccounts) Delete(ctx context.Context, id int32) error {
if Mocks.ExternalAccounts.Delete != nil {
return Mocks.ExternalAccounts.Delete(id)
}
res, err := dbconn.Global.ExecContext(ctx, "UPDATE user_external_accounts SET deleted_at=now() WHERE id=$1 AND deleted_at IS NULL", id)
if err != nil {
return err
}
nrows, err := res.RowsAffected()
if err != nil {
return err
}
if nrows == 0 {
return userExternalAccountNotFoundError{[]interface{}{id}}
}
return nil
}
// ExternalAccountsListOptions specifies the options for listing user external accounts.
type ExternalAccountsListOptions struct {
UserID int32
ServiceType, ServiceID, ClientID string
*LimitOffset
}
func (s *userExternalAccounts) List(ctx context.Context, opt ExternalAccountsListOptions) ([]*extsvc.ExternalAccount, error) {
if Mocks.ExternalAccounts.List != nil {
return Mocks.ExternalAccounts.List(opt)
}
conds := s.listSQL(opt)
return s.listBySQL(ctx, sqlf.Sprintf("WHERE %s ORDER BY id ASC %s", sqlf.Join(conds, "AND"), opt.LimitOffset.SQL()))
}
func (s *userExternalAccounts) Count(ctx context.Context, opt ExternalAccountsListOptions) (int, error) {
if Mocks.ExternalAccounts.Count != nil {
return Mocks.ExternalAccounts.Count(opt)
}
conds := s.listSQL(opt)
q := sqlf.Sprintf("SELECT COUNT(*) FROM user_external_accounts WHERE %s", sqlf.Join(conds, "AND"))
var count int
err := dbconn.Global.QueryRowContext(ctx, q.Query(sqlf.PostgresBindVar), q.Args()...).Scan(&count)
return count, err
}
// TmpMigrate implements the migration described in bg.MigrateExternalAccounts (which is the only
// func that should call this).
func (*userExternalAccounts) | (ctx context.Context, serviceType string) error {
// TEMP: Delete all external accounts associated with deleted users. Due to a bug in this
// migration code, it was possible for deleted users to be associated with non-deleted external
// accounts. This caused unexpected behavior in the UI (although did not pose a security
// threat). So, run this cleanup task upon each server startup.
if err := (userExternalAccounts{}).deleteForDeletedUsers(ctx); err != nil {
log15.Warn("Unable to clean up external user accounts.", "err", err)
}
const needsMigrationSentinel = "migration_in_progress"
// Avoid running UPDATE (which takes a lock) if it's not needed. The UPDATE only needs to run
// once ever, and we are guaranteed that the DB migration has run by the time we arrive here, so
// this is safe and not racy.
var needsMigration bool
if err := dbconn.Global.QueryRowContext(ctx, `SELECT EXISTS(SELECT 1 FROM user_external_accounts WHERE service_type=$1 AND deleted_at IS NULL)`, needsMigrationSentinel).Scan(&needsMigration); err != nil && err != sql.ErrNoRows {
return err
}
if !needsMigration {
return nil
}
var err error
if serviceType == "" {
_, err = dbconn.Global.ExecContext(ctx, `UPDATE user_external_accounts SET deleted_at=now(), service_type='not_configured_at_migration_time' WHERE service_type=$1`, needsMigrationSentinel)
} else {
_, err = dbconn.Global.ExecContext(ctx, `UPDATE user_external_accounts SET service_type=$2, account_id=SUBSTR(account_id, CHAR_LENGTH(service_id)+2) WHERE service_type=$1 AND service_id!='override'`, needsMigrationSentinel, serviceType)
if err == nil {
_, err = dbconn.Global.ExecContext(ctx, `UPDATE user_external_accounts SET service_type='override', service_id='' WHERE service_type=$1 AND service_id='override'`, needsMigrationSentinel)
}
}
return err
}
func (userExternalAccounts) deleteForDeletedUsers(ctx context.Context) error {
_, err := dbconn.Global.ExecContext(ctx, `UPDATE user_external_accounts SET deleted_at=now() FROM users WHERE user_external_accounts.user_id=users.id AND users.deleted_at IS NOT NULL AND user_external_accounts.deleted_at IS NULL`)
return err
}
func (s *userExternalAccounts) getBySQL(ctx context.Context, querySuffix *sqlf.Query) (*extsvc.ExternalAccount, error) {
results, err := s.listBySQL(ctx, querySuffix)
if err != nil {
return nil, err
}
if len(results) != 1 {
return nil, userExternalAccountNotFoundError{querySuffix.Args()}
}
return results[0], nil
}
func (*userExternalAccounts) listBySQL(ctx context.Context, querySuffix *sqlf.Query) ([]*extsvc.ExternalAccount, error) {
q := sqlf.Sprintf(`SELECT t.id, t.user_id, t.service_type, t.service_id, t.client_id, t.account_id, t.auth_data, t.account_data, t.created_at, t.updated_at FROM user_external_accounts t %s`, querySuffix)
rows, err := dbconn.Global.QueryContext(ctx, q.Query(sqlf.PostgresBindVar), q.Args()...)
if err != nil {
return nil, err
}
var results []*extsvc.ExternalAccount
defer rows.Close()
for rows.Next() {
var o extsvc.ExternalAccount
if err := rows.Scan(&o.ID, &o.UserID, &o.ServiceType, &o.ServiceID, &o.ClientID, &o.AccountID, &o.AuthData, &o.AccountData, &o.CreatedAt, &o.UpdatedAt); err != nil {
return nil, err
}
results = append(results, &o)
}
return results, rows.Err()
}
func (*userExternalAccounts) listSQL(opt ExternalAccountsListOptions) (conds []*sqlf.Query) {
conds = []*sqlf.Query{sqlf.Sprintf("deleted_at IS NULL")}
if opt.UserID != 0 {
conds = append(conds, sqlf.Sprintf("user_id=%d", opt.UserID))
}
if opt.ServiceType != "" || opt.ServiceID != "" || opt.ClientID != "" {
conds = append(conds, sqlf.Sprintf("(service_type=%s AND service_id=%s AND client_id=%s)", opt.ServiceType, opt.ServiceID, opt.ClientID))
}
return conds
}
// MockExternalAccounts mocks the Stores.ExternalAccounts DB store.
type MockExternalAccounts struct {
Get func(id int32) (*extsvc.ExternalAccount, error)
LookupUserAndSave func(extsvc.ExternalAccountSpec, extsvc.ExternalAccountData) (userID int32, err error)
AssociateUserAndSave func(userID int32, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) error
CreateUserAndSave func(NewUser, extsvc.ExternalAccountSpec, extsvc.ExternalAccountData) (createdUserID int32, err error)
Delete func(id int32) error
List func(ExternalAccountsListOptions) ([]*extsvc.ExternalAccount, error)
Count func(ExternalAccountsListOptions) (int, error)
}
| TmpMigrate | identifier_name |
external_accounts.go | package db
import (
"context"
"database/sql"
"fmt"
multierror "github.com/hashicorp/go-multierror"
"github.com/keegancsmith/sqlf"
"github.com/sourcegraph/sourcegraph/cmd/frontend/db/dbconn"
"github.com/sourcegraph/sourcegraph/pkg/extsvc"
log15 "gopkg.in/inconshreveable/log15.v2"
)
// userExternalAccountNotFoundError is the error that is returned when a user external account is not found.
type userExternalAccountNotFoundError struct {
args []interface{}
}
func (err userExternalAccountNotFoundError) Error() string {
return fmt.Sprintf("user external account not found: %v", err.args)
}
func (err userExternalAccountNotFoundError) NotFound() bool {
return true
}
// userExternalAccounts provides access to the `user_external_accounts` table.
type userExternalAccounts struct{}
// Get gets information about the user external account.
func (s *userExternalAccounts) Get(ctx context.Context, id int32) (*extsvc.ExternalAccount, error) {
if Mocks.ExternalAccounts.Get != nil {
return Mocks.ExternalAccounts.Get(id)
}
return s.getBySQL(ctx, sqlf.Sprintf("WHERE id=%d AND deleted_at IS NULL LIMIT 1", id))
}
// LookupUserAndSave is used for authenticating a user (when both their Sourcegraph account and the
// association with the external account already exist).
//
// It looks up the existing user associated with the external account's ExternalAccountSpec. If
// found, it updates the account's data and returns the user. It NEVER creates a user; you must call
// CreateUserAndSave for that.
func (s *userExternalAccounts) LookupUserAndSave(ctx context.Context, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) (userID int32, err error) {
if Mocks.ExternalAccounts.LookupUserAndSave != nil {
return Mocks.ExternalAccounts.LookupUserAndSave(spec, data)
}
err = dbconn.Global.QueryRowContext(ctx, `
UPDATE user_external_accounts SET auth_data=$5, account_data=$6, updated_at=now()
WHERE service_type=$1 AND service_id=$2 AND client_id=$3 AND account_id=$4 AND deleted_at IS NULL
RETURNING user_id
`, spec.ServiceType, spec.ServiceID, spec.ClientID, spec.AccountID, data.AuthData, data.AccountData).Scan(&userID)
if err == sql.ErrNoRows {
err = userExternalAccountNotFoundError{[]interface{}{spec}}
}
return userID, err
}
// AssociateUserAndSave is used for linking a new, additional external account with an existing
// Sourcegraph account.
//
// It creates a user external account and associates it with the specified user. If the external
// account already exists and is associated with:
//
// - the same user: it updates the data and returns a nil error; or
// - a different user: it performs no update and returns a non-nil error
func (s *userExternalAccounts) AssociateUserAndSave(ctx context.Context, userID int32, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) (err error) {
if Mocks.ExternalAccounts.AssociateUserAndSave != nil {
return Mocks.ExternalAccounts.AssociateUserAndSave(userID, spec, data)
}
// This "upsert" may cause us to return an ephemeral failure due to a race condition, but it
// won't result in inconsistent data. Wrap in transaction.
tx, err := dbconn.Global.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
if err != nil {
rollErr := tx.Rollback()
if rollErr != nil {
err = multierror.Append(err, rollErr)
}
return
}
err = tx.Commit()
}()
// Find whether the account exists and, if so, which user ID the account is associated with.
var exists bool
var existingID, associatedUserID int32
err = tx.QueryRowContext(ctx, `
SELECT id, user_id FROM user_external_accounts
WHERE service_type=$1 AND service_id=$2 AND client_id=$3 AND account_id=$4 AND deleted_at IS NULL
`, spec.ServiceType, spec.ServiceID, spec.ClientID, spec.AccountID).Scan(&existingID, &associatedUserID)
if err != nil && err != sql.ErrNoRows {
return err
}
exists = err != sql.ErrNoRows
err = nil
if exists && associatedUserID != userID {
// The account already exists and is associated with another user.
return fmt.Errorf("unable to change association of external account from user %d to user %d (delete the external account and then try again)", associatedUserID, userID)
}
if !exists {
// Create the external account (it doesn't yet exist).
return s.insert(ctx, tx, userID, spec, data)
}
// Update the external account (it exists).
res, err := tx.ExecContext(ctx, `
UPDATE user_external_accounts SET auth_data=$6, account_data=$7, updated_at=now()
WHERE service_type=$1 AND service_id=$2 AND client_id=$3 AND account_id=$4 AND user_id=$5 AND deleted_at IS NULL
`, spec.ServiceType, spec.ServiceID, spec.ClientID, spec.AccountID, userID, data.AuthData, data.AccountData)
if err != nil {
return err
}
nrows, err := res.RowsAffected()
if err != nil {
return err
}
if nrows == 0 {
return userExternalAccountNotFoundError{[]interface{}{existingID}}
}
return nil
}
// CreateUserAndSave is used to create a new Sourcegraph user account from an external account
// (e.g., "signup from SAML").
//
// It creates a new user and associates it with the specified external account. If the user to
// create already exists, it returns an error.
func (s *userExternalAccounts) CreateUserAndSave(ctx context.Context, newUser NewUser, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) (createdUserID int32, err error) {
if Mocks.ExternalAccounts.CreateUserAndSave != nil {
return Mocks.ExternalAccounts.CreateUserAndSave(newUser, spec, data)
}
// Wrap in transaction.
tx, err := dbconn.Global.BeginTx(ctx, nil)
if err != nil {
return 0, err
}
defer func() {
if err != nil {
rollErr := tx.Rollback()
if rollErr != nil {
err = multierror.Append(err, rollErr)
}
return
}
err = tx.Commit()
}()
createdUser, err := Users.create(ctx, tx, newUser)
if err != nil {
return 0, err
}
err = s.insert(ctx, tx, createdUser.ID, spec, data)
return createdUser.ID, err
}
func (s *userExternalAccounts) insert(ctx context.Context, tx *sql.Tx, userID int32, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) error {
_, err := tx.ExecContext(ctx, `
INSERT INTO user_external_accounts(user_id, service_type, service_id, client_id, account_id, auth_data, account_data)
VALUES($1, $2, $3, $4, $5, $6, $7)
`, userID, spec.ServiceType, spec.ServiceID, spec.ClientID, spec.AccountID, data.AuthData, data.AccountData)
return err
}
// Delete deletes a user external account.
func (*userExternalAccounts) Delete(ctx context.Context, id int32) error {
if Mocks.ExternalAccounts.Delete != nil {
return Mocks.ExternalAccounts.Delete(id)
}
res, err := dbconn.Global.ExecContext(ctx, "UPDATE user_external_accounts SET deleted_at=now() WHERE id=$1 AND deleted_at IS NULL", id)
if err != nil {
return err
}
nrows, err := res.RowsAffected()
if err != nil {
return err
}
if nrows == 0 {
return userExternalAccountNotFoundError{[]interface{}{id}}
}
return nil
}
// ExternalAccountsListOptions specifies the options for listing user external accounts.
type ExternalAccountsListOptions struct {
UserID int32
ServiceType, ServiceID, ClientID string
*LimitOffset
}
func (s *userExternalAccounts) List(ctx context.Context, opt ExternalAccountsListOptions) ([]*extsvc.ExternalAccount, error) {
if Mocks.ExternalAccounts.List != nil {
return Mocks.ExternalAccounts.List(opt)
}
conds := s.listSQL(opt)
return s.listBySQL(ctx, sqlf.Sprintf("WHERE %s ORDER BY id ASC %s", sqlf.Join(conds, "AND"), opt.LimitOffset.SQL()))
}
func (s *userExternalAccounts) Count(ctx context.Context, opt ExternalAccountsListOptions) (int, error) {
if Mocks.ExternalAccounts.Count != nil {
return Mocks.ExternalAccounts.Count(opt)
}
conds := s.listSQL(opt)
q := sqlf.Sprintf("SELECT COUNT(*) FROM user_external_accounts WHERE %s", sqlf.Join(conds, "AND"))
var count int
err := dbconn.Global.QueryRowContext(ctx, q.Query(sqlf.PostgresBindVar), q.Args()...).Scan(&count)
return count, err
}
// TmpMigrate implements the migration described in bg.MigrateExternalAccounts (which is the only
// func that should call this).
func (*userExternalAccounts) TmpMigrate(ctx context.Context, serviceType string) error {
// TEMP: Delete all external accounts associated with deleted users. Due to a bug in this
// migration code, it was possible for deleted users to be associated with non-deleted external
// accounts. This caused unexpected behavior in the UI (although did not pose a security
// threat). So, run this cleanup task upon each server startup.
if err := (userExternalAccounts{}).deleteForDeletedUsers(ctx); err != nil {
log15.Warn("Unable to clean up external user accounts.", "err", err)
}
const needsMigrationSentinel = "migration_in_progress"
// Avoid running UPDATE (which takes a lock) if it's not needed. The UPDATE only needs to run
// once ever, and we are guaranteed that the DB migration has run by the time we arrive here, so
// this is safe and not racy.
var needsMigration bool
if err := dbconn.Global.QueryRowContext(ctx, `SELECT EXISTS(SELECT 1 FROM user_external_accounts WHERE service_type=$1 AND deleted_at IS NULL)`, needsMigrationSentinel).Scan(&needsMigration); err != nil && err != sql.ErrNoRows {
return err
}
if !needsMigration {
return nil
}
var err error
if serviceType == "" {
_, err = dbconn.Global.ExecContext(ctx, `UPDATE user_external_accounts SET deleted_at=now(), service_type='not_configured_at_migration_time' WHERE service_type=$1`, needsMigrationSentinel)
} else {
_, err = dbconn.Global.ExecContext(ctx, `UPDATE user_external_accounts SET service_type=$2, account_id=SUBSTR(account_id, CHAR_LENGTH(service_id)+2) WHERE service_type=$1 AND service_id!='override'`, needsMigrationSentinel, serviceType)
if err == nil {
_, err = dbconn.Global.ExecContext(ctx, `UPDATE user_external_accounts SET service_type='override', service_id='' WHERE service_type=$1 AND service_id='override'`, needsMigrationSentinel)
}
}
return err
}
func (userExternalAccounts) deleteForDeletedUsers(ctx context.Context) error {
_, err := dbconn.Global.ExecContext(ctx, `UPDATE user_external_accounts SET deleted_at=now() FROM users WHERE user_external_accounts.user_id=users.id AND users.deleted_at IS NOT NULL AND user_external_accounts.deleted_at IS NULL`)
return err
}
func (s *userExternalAccounts) getBySQL(ctx context.Context, querySuffix *sqlf.Query) (*extsvc.ExternalAccount, error) {
results, err := s.listBySQL(ctx, querySuffix)
if err != nil { | return nil, err
}
if len(results) != 1 {
return nil, userExternalAccountNotFoundError{querySuffix.Args()}
}
return results[0], nil
}
func (*userExternalAccounts) listBySQL(ctx context.Context, querySuffix *sqlf.Query) ([]*extsvc.ExternalAccount, error) {
q := sqlf.Sprintf(`SELECT t.id, t.user_id, t.service_type, t.service_id, t.client_id, t.account_id, t.auth_data, t.account_data, t.created_at, t.updated_at FROM user_external_accounts t %s`, querySuffix)
rows, err := dbconn.Global.QueryContext(ctx, q.Query(sqlf.PostgresBindVar), q.Args()...)
if err != nil {
return nil, err
}
var results []*extsvc.ExternalAccount
defer rows.Close()
for rows.Next() {
var o extsvc.ExternalAccount
if err := rows.Scan(&o.ID, &o.UserID, &o.ServiceType, &o.ServiceID, &o.ClientID, &o.AccountID, &o.AuthData, &o.AccountData, &o.CreatedAt, &o.UpdatedAt); err != nil {
return nil, err
}
results = append(results, &o)
}
return results, rows.Err()
}
func (*userExternalAccounts) listSQL(opt ExternalAccountsListOptions) (conds []*sqlf.Query) {
conds = []*sqlf.Query{sqlf.Sprintf("deleted_at IS NULL")}
if opt.UserID != 0 {
conds = append(conds, sqlf.Sprintf("user_id=%d", opt.UserID))
}
if opt.ServiceType != "" || opt.ServiceID != "" || opt.ClientID != "" {
conds = append(conds, sqlf.Sprintf("(service_type=%s AND service_id=%s AND client_id=%s)", opt.ServiceType, opt.ServiceID, opt.ClientID))
}
return conds
}
// MockExternalAccounts mocks the Stores.ExternalAccounts DB store.
type MockExternalAccounts struct {
Get func(id int32) (*extsvc.ExternalAccount, error)
LookupUserAndSave func(extsvc.ExternalAccountSpec, extsvc.ExternalAccountData) (userID int32, err error)
AssociateUserAndSave func(userID int32, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) error
CreateUserAndSave func(NewUser, extsvc.ExternalAccountSpec, extsvc.ExternalAccountData) (createdUserID int32, err error)
Delete func(id int32) error
List func(ExternalAccountsListOptions) ([]*extsvc.ExternalAccount, error)
Count func(ExternalAccountsListOptions) (int, error)
} | random_line_split |
|
external_accounts.go | package db
import (
"context"
"database/sql"
"fmt"
multierror "github.com/hashicorp/go-multierror"
"github.com/keegancsmith/sqlf"
"github.com/sourcegraph/sourcegraph/cmd/frontend/db/dbconn"
"github.com/sourcegraph/sourcegraph/pkg/extsvc"
log15 "gopkg.in/inconshreveable/log15.v2"
)
// userExternalAccountNotFoundError is the error that is returned when a user external account is not found.
type userExternalAccountNotFoundError struct {
args []interface{}
}
func (err userExternalAccountNotFoundError) Error() string {
return fmt.Sprintf("user external account not found: %v", err.args)
}
func (err userExternalAccountNotFoundError) NotFound() bool {
return true
}
// userExternalAccounts provides access to the `user_external_accounts` table.
type userExternalAccounts struct{}
// Get gets information about the user external account.
func (s *userExternalAccounts) Get(ctx context.Context, id int32) (*extsvc.ExternalAccount, error) {
if Mocks.ExternalAccounts.Get != nil {
return Mocks.ExternalAccounts.Get(id)
}
return s.getBySQL(ctx, sqlf.Sprintf("WHERE id=%d AND deleted_at IS NULL LIMIT 1", id))
}
// LookupUserAndSave is used for authenticating a user (when both their Sourcegraph account and the
// association with the external account already exist).
//
// It looks up the existing user associated with the external account's ExternalAccountSpec. If
// found, it updates the account's data and returns the user. It NEVER creates a user; you must call
// CreateUserAndSave for that.
func (s *userExternalAccounts) LookupUserAndSave(ctx context.Context, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) (userID int32, err error) {
if Mocks.ExternalAccounts.LookupUserAndSave != nil {
return Mocks.ExternalAccounts.LookupUserAndSave(spec, data)
}
err = dbconn.Global.QueryRowContext(ctx, `
UPDATE user_external_accounts SET auth_data=$5, account_data=$6, updated_at=now()
WHERE service_type=$1 AND service_id=$2 AND client_id=$3 AND account_id=$4 AND deleted_at IS NULL
RETURNING user_id
`, spec.ServiceType, spec.ServiceID, spec.ClientID, spec.AccountID, data.AuthData, data.AccountData).Scan(&userID)
if err == sql.ErrNoRows {
err = userExternalAccountNotFoundError{[]interface{}{spec}}
}
return userID, err
}
// AssociateUserAndSave is used for linking a new, additional external account with an existing
// Sourcegraph account.
//
// It creates a user external account and associates it with the specified user. If the external
// account already exists and is associated with:
//
// - the same user: it updates the data and returns a nil error; or
// - a different user: it performs no update and returns a non-nil error
func (s *userExternalAccounts) AssociateUserAndSave(ctx context.Context, userID int32, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) (err error) {
if Mocks.ExternalAccounts.AssociateUserAndSave != nil {
return Mocks.ExternalAccounts.AssociateUserAndSave(userID, spec, data)
}
// This "upsert" may cause us to return an ephemeral failure due to a race condition, but it
// won't result in inconsistent data. Wrap in transaction.
tx, err := dbconn.Global.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
if err != nil {
rollErr := tx.Rollback()
if rollErr != nil {
err = multierror.Append(err, rollErr)
}
return
}
err = tx.Commit()
}()
// Find whether the account exists and, if so, which user ID the account is associated with.
var exists bool
var existingID, associatedUserID int32
err = tx.QueryRowContext(ctx, `
SELECT id, user_id FROM user_external_accounts
WHERE service_type=$1 AND service_id=$2 AND client_id=$3 AND account_id=$4 AND deleted_at IS NULL
`, spec.ServiceType, spec.ServiceID, spec.ClientID, spec.AccountID).Scan(&existingID, &associatedUserID)
if err != nil && err != sql.ErrNoRows {
return err
}
exists = err != sql.ErrNoRows
err = nil
if exists && associatedUserID != userID {
// The account already exists and is associated with another user.
return fmt.Errorf("unable to change association of external account from user %d to user %d (delete the external account and then try again)", associatedUserID, userID)
}
if !exists {
// Create the external account (it doesn't yet exist).
return s.insert(ctx, tx, userID, spec, data)
}
// Update the external account (it exists).
res, err := tx.ExecContext(ctx, `
UPDATE user_external_accounts SET auth_data=$6, account_data=$7, updated_at=now()
WHERE service_type=$1 AND service_id=$2 AND client_id=$3 AND account_id=$4 AND user_id=$5 AND deleted_at IS NULL
`, spec.ServiceType, spec.ServiceID, spec.ClientID, spec.AccountID, userID, data.AuthData, data.AccountData)
if err != nil {
return err
}
nrows, err := res.RowsAffected()
if err != nil {
return err
}
if nrows == 0 {
return userExternalAccountNotFoundError{[]interface{}{existingID}}
}
return nil
}
// CreateUserAndSave is used to create a new Sourcegraph user account from an external account
// (e.g., "signup from SAML").
//
// It creates a new user and associates it with the specified external account. If the user to
// create already exists, it returns an error.
func (s *userExternalAccounts) CreateUserAndSave(ctx context.Context, newUser NewUser, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) (createdUserID int32, err error) {
if Mocks.ExternalAccounts.CreateUserAndSave != nil {
return Mocks.ExternalAccounts.CreateUserAndSave(newUser, spec, data)
}
// Wrap in transaction.
tx, err := dbconn.Global.BeginTx(ctx, nil)
if err != nil {
return 0, err
}
defer func() {
if err != nil {
rollErr := tx.Rollback()
if rollErr != nil {
err = multierror.Append(err, rollErr)
}
return
}
err = tx.Commit()
}()
createdUser, err := Users.create(ctx, tx, newUser)
if err != nil {
return 0, err
}
err = s.insert(ctx, tx, createdUser.ID, spec, data)
return createdUser.ID, err
}
func (s *userExternalAccounts) insert(ctx context.Context, tx *sql.Tx, userID int32, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) error {
_, err := tx.ExecContext(ctx, `
INSERT INTO user_external_accounts(user_id, service_type, service_id, client_id, account_id, auth_data, account_data)
VALUES($1, $2, $3, $4, $5, $6, $7)
`, userID, spec.ServiceType, spec.ServiceID, spec.ClientID, spec.AccountID, data.AuthData, data.AccountData)
return err
}
// Delete deletes a user external account.
func (*userExternalAccounts) Delete(ctx context.Context, id int32) error {
if Mocks.ExternalAccounts.Delete != nil {
return Mocks.ExternalAccounts.Delete(id)
}
res, err := dbconn.Global.ExecContext(ctx, "UPDATE user_external_accounts SET deleted_at=now() WHERE id=$1 AND deleted_at IS NULL", id)
if err != nil {
return err
}
nrows, err := res.RowsAffected()
if err != nil {
return err
}
if nrows == 0 {
return userExternalAccountNotFoundError{[]interface{}{id}}
}
return nil
}
// ExternalAccountsListOptions specifies the options for listing user external accounts.
type ExternalAccountsListOptions struct {
UserID int32
ServiceType, ServiceID, ClientID string
*LimitOffset
}
func (s *userExternalAccounts) List(ctx context.Context, opt ExternalAccountsListOptions) ([]*extsvc.ExternalAccount, error) {
if Mocks.ExternalAccounts.List != nil {
return Mocks.ExternalAccounts.List(opt)
}
conds := s.listSQL(opt)
return s.listBySQL(ctx, sqlf.Sprintf("WHERE %s ORDER BY id ASC %s", sqlf.Join(conds, "AND"), opt.LimitOffset.SQL()))
}
func (s *userExternalAccounts) Count(ctx context.Context, opt ExternalAccountsListOptions) (int, error) {
if Mocks.ExternalAccounts.Count != nil {
return Mocks.ExternalAccounts.Count(opt)
}
conds := s.listSQL(opt)
q := sqlf.Sprintf("SELECT COUNT(*) FROM user_external_accounts WHERE %s", sqlf.Join(conds, "AND"))
var count int
err := dbconn.Global.QueryRowContext(ctx, q.Query(sqlf.PostgresBindVar), q.Args()...).Scan(&count)
return count, err
}
// TmpMigrate implements the migration described in bg.MigrateExternalAccounts (which is the only
// func that should call this).
func (*userExternalAccounts) TmpMigrate(ctx context.Context, serviceType string) error {
// TEMP: Delete all external accounts associated with deleted users. Due to a bug in this
// migration code, it was possible for deleted users to be associated with non-deleted external
// accounts. This caused unexpected behavior in the UI (although did not pose a security
// threat). So, run this cleanup task upon each server startup.
if err := (userExternalAccounts{}).deleteForDeletedUsers(ctx); err != nil {
log15.Warn("Unable to clean up external user accounts.", "err", err)
}
const needsMigrationSentinel = "migration_in_progress"
// Avoid running UPDATE (which takes a lock) if it's not needed. The UPDATE only needs to run
// once ever, and we are guaranteed that the DB migration has run by the time we arrive here, so
// this is safe and not racy.
var needsMigration bool
if err := dbconn.Global.QueryRowContext(ctx, `SELECT EXISTS(SELECT 1 FROM user_external_accounts WHERE service_type=$1 AND deleted_at IS NULL)`, needsMigrationSentinel).Scan(&needsMigration); err != nil && err != sql.ErrNoRows {
return err
}
if !needsMigration {
return nil
}
var err error
if serviceType == "" {
_, err = dbconn.Global.ExecContext(ctx, `UPDATE user_external_accounts SET deleted_at=now(), service_type='not_configured_at_migration_time' WHERE service_type=$1`, needsMigrationSentinel)
} else {
_, err = dbconn.Global.ExecContext(ctx, `UPDATE user_external_accounts SET service_type=$2, account_id=SUBSTR(account_id, CHAR_LENGTH(service_id)+2) WHERE service_type=$1 AND service_id!='override'`, needsMigrationSentinel, serviceType)
if err == nil {
_, err = dbconn.Global.ExecContext(ctx, `UPDATE user_external_accounts SET service_type='override', service_id='' WHERE service_type=$1 AND service_id='override'`, needsMigrationSentinel)
}
}
return err
}
func (userExternalAccounts) deleteForDeletedUsers(ctx context.Context) error {
_, err := dbconn.Global.ExecContext(ctx, `UPDATE user_external_accounts SET deleted_at=now() FROM users WHERE user_external_accounts.user_id=users.id AND users.deleted_at IS NOT NULL AND user_external_accounts.deleted_at IS NULL`)
return err
}
func (s *userExternalAccounts) getBySQL(ctx context.Context, querySuffix *sqlf.Query) (*extsvc.ExternalAccount, error) |
func (*userExternalAccounts) listBySQL(ctx context.Context, querySuffix *sqlf.Query) ([]*extsvc.ExternalAccount, error) {
q := sqlf.Sprintf(`SELECT t.id, t.user_id, t.service_type, t.service_id, t.client_id, t.account_id, t.auth_data, t.account_data, t.created_at, t.updated_at FROM user_external_accounts t %s`, querySuffix)
rows, err := dbconn.Global.QueryContext(ctx, q.Query(sqlf.PostgresBindVar), q.Args()...)
if err != nil {
return nil, err
}
var results []*extsvc.ExternalAccount
defer rows.Close()
for rows.Next() {
var o extsvc.ExternalAccount
if err := rows.Scan(&o.ID, &o.UserID, &o.ServiceType, &o.ServiceID, &o.ClientID, &o.AccountID, &o.AuthData, &o.AccountData, &o.CreatedAt, &o.UpdatedAt); err != nil {
return nil, err
}
results = append(results, &o)
}
return results, rows.Err()
}
func (*userExternalAccounts) listSQL(opt ExternalAccountsListOptions) (conds []*sqlf.Query) {
conds = []*sqlf.Query{sqlf.Sprintf("deleted_at IS NULL")}
if opt.UserID != 0 {
conds = append(conds, sqlf.Sprintf("user_id=%d", opt.UserID))
}
if opt.ServiceType != "" || opt.ServiceID != "" || opt.ClientID != "" {
conds = append(conds, sqlf.Sprintf("(service_type=%s AND service_id=%s AND client_id=%s)", opt.ServiceType, opt.ServiceID, opt.ClientID))
}
return conds
}
// MockExternalAccounts mocks the Stores.ExternalAccounts DB store.
type MockExternalAccounts struct {
Get func(id int32) (*extsvc.ExternalAccount, error)
LookupUserAndSave func(extsvc.ExternalAccountSpec, extsvc.ExternalAccountData) (userID int32, err error)
AssociateUserAndSave func(userID int32, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) error
CreateUserAndSave func(NewUser, extsvc.ExternalAccountSpec, extsvc.ExternalAccountData) (createdUserID int32, err error)
Delete func(id int32) error
List func(ExternalAccountsListOptions) ([]*extsvc.ExternalAccount, error)
Count func(ExternalAccountsListOptions) (int, error)
}
| {
results, err := s.listBySQL(ctx, querySuffix)
if err != nil {
return nil, err
}
if len(results) != 1 {
return nil, userExternalAccountNotFoundError{querySuffix.Args()}
}
return results[0], nil
} | identifier_body |
smt.rs | use std::sync::Arc;
use anyhow::Result;
use criterion::{criterion_group, BenchmarkId, Criterion, Throughput};
use gw_builtin_binaries::{file_checksum, Resource};
use gw_common::{
blake2b::new_blake2b,
builtins::{CKB_SUDT_ACCOUNT_ID, ETH_REGISTRY_ACCOUNT_ID},
registry_address::RegistryAddress,
state::State,
};
use gw_config::{BackendConfig, BackendForkConfig, GenesisConfig, StoreConfig};
use gw_generator::{
account_lock_manage::{always_success::AlwaysSuccess, AccountLockManage},
backend_manage::BackendManage,
genesis::build_genesis_from_store,
traits::StateExt,
Generator,
};
use gw_store::{
mem_pool_state::MemPoolState,
schema::COLUMNS,
state::{
history::history_state::{HistoryState, RWConfig},
state_db::StateDB,
traits::JournalDB,
MemStateDB,
},
traits::chain_store::ChainStore,
Store,
};
use gw_traits::{ChainView, CodeStore};
use gw_types::{
bytes::Bytes,
core::{AllowedEoaType, ScriptHashType, Status},
h256::*,
packed::{
AccountMerkleState, AllowedTypeHash, BlockInfo, BlockMerkleState, Fee, GlobalState,
L2Block, RawL2Block, RawL2Transaction, RollupConfig, SUDTArgs, SUDTTransfer, Script,
SubmitTransactions,
},
prelude::*,
U256,
};
use gw_utils::RollupContext;
use pprof::criterion::{Output, PProfProfiler};
// meta contract
const META_GENERATOR_PATH: &str =
"../../crates/builtin-binaries/builtin/gwos-v1.3.0-rc1/meta-contract-generator";
const META_VALIDATOR_SCRIPT_TYPE_HASH: [u8; 32] = [1u8; 32];
// sudt contract
const SUDT_GENERATOR_PATH: &str =
"../../crates/builtin-binaries/builtin/gwos-v1.3.0-rc1/sudt-generator";
const SUDT_VALIDATOR_SCRIPT_TYPE_HASH: [u8; 32] = [2u8; 32];
// always success lock
const ALWAYS_SUCCESS_LOCK_HASH: [u8; 32] = [3u8; 32];
// rollup type hash
const ROLLUP_TYPE_HASH: [u8; 32] = [4u8; 32];
const CKB_BALANCE: u128 = 100_000_000;
criterion_group! {
name = smt;
config = Criterion::default()
.with_profiler(PProfProfiler::new(100, Output::Flamegraph(None)));
targets = bench_ckb_transfer
}
pub fn | (c: &mut Criterion) {
let config = StoreConfig {
path: "./smt_data/db".parse().unwrap(),
options_file: Some("./smt_data/db.toml".parse().unwrap()),
cache_size: Some(1073741824),
};
let store = Store::open(&config, COLUMNS).unwrap();
let ee = BenchExecutionEnvironment::new_with_accounts(store, 7000);
let mut group = c.benchmark_group("ckb_transfer");
for txs in (500..=5000).step_by(500) {
group.sample_size(10);
group.throughput(Throughput::Elements(txs));
group.bench_with_input(BenchmarkId::from_parameter(txs), &txs, |b, txs| {
b.iter(|| {
ee.accounts_transfer(7000, *txs as usize);
});
});
}
group.finish();
}
#[allow(dead_code)]
struct Account {
id: u32,
}
impl Account {
fn build_script(n: u32) -> (Script, RegistryAddress) {
let mut addr = [0u8; 20];
addr[..4].copy_from_slice(&n.to_le_bytes());
let mut args = vec![42u8; 32];
args.extend(&addr);
let script = Script::new_builder()
.code_hash(ALWAYS_SUCCESS_LOCK_HASH.pack())
.hash_type(ScriptHashType::Type.into())
.args(args.pack())
.build();
let addr = RegistryAddress::new(ETH_REGISTRY_ACCOUNT_ID, addr.to_vec());
(script, addr)
}
}
struct BenchChain;
impl ChainView for BenchChain {
fn get_block_hash_by_number(&self, _: u64) -> Result<Option<H256>> {
unreachable!("bench chain store")
}
}
struct BenchExecutionEnvironment {
generator: Generator,
chain: BenchChain,
mem_pool_state: MemPoolState,
}
impl BenchExecutionEnvironment {
fn new_with_accounts(store: Store, accounts: u32) -> Self {
let genesis_config = GenesisConfig {
meta_contract_validator_type_hash: META_VALIDATOR_SCRIPT_TYPE_HASH.into(),
rollup_type_hash: ROLLUP_TYPE_HASH.into(),
rollup_config: RollupConfig::new_builder()
.l2_sudt_validator_script_type_hash(SUDT_VALIDATOR_SCRIPT_TYPE_HASH.pack())
.allowed_eoa_type_hashes(
vec![AllowedTypeHash::new_builder()
.hash(ALWAYS_SUCCESS_LOCK_HASH.pack())
.type_(AllowedEoaType::Eth.into())
.build()]
.pack(),
)
.build()
.into(),
..Default::default()
};
let rollup_context = RollupContext {
rollup_config: genesis_config.rollup_config.clone().into(),
rollup_script_hash: ROLLUP_TYPE_HASH,
..Default::default()
};
let backend_manage = {
let configs = vec![
BackendConfig {
generator: Resource::file_system(META_GENERATOR_PATH.into()),
generator_checksum: file_checksum(&META_GENERATOR_PATH).unwrap().into(),
validator_script_type_hash: META_VALIDATOR_SCRIPT_TYPE_HASH.into(),
backend_type: gw_config::BackendType::Meta,
},
BackendConfig {
generator: Resource::file_system(SUDT_GENERATOR_PATH.into()),
generator_checksum: file_checksum(&SUDT_GENERATOR_PATH).unwrap().into(),
validator_script_type_hash: SUDT_VALIDATOR_SCRIPT_TYPE_HASH.into(),
backend_type: gw_config::BackendType::Sudt,
},
];
BackendManage::from_config(vec![BackendForkConfig {
sudt_proxy: Default::default(),
fork_height: 0,
backends: configs,
}])
.expect("bench backend")
};
let account_lock_manage = {
let mut manage = AccountLockManage::default();
manage.register_lock_algorithm(ALWAYS_SUCCESS_LOCK_HASH, Arc::new(AlwaysSuccess));
manage
};
let generator = Generator::new(
backend_manage,
account_lock_manage,
rollup_context,
Default::default(),
);
Self::init_genesis(&store, &genesis_config, accounts);
let mem_pool_state = MemPoolState::new(
MemStateDB::from_store(store.get_snapshot()).expect("mem state db"),
true,
);
BenchExecutionEnvironment {
generator,
chain: BenchChain,
mem_pool_state,
}
}
fn accounts_transfer(&self, accounts: u32, count: usize) {
let mut state = self.mem_pool_state.load_state_db();
let (block_producer_script, block_producer) = Account::build_script(0);
let block_info = BlockInfo::new_builder()
.block_producer(Bytes::from(block_producer.to_bytes()).pack())
.number(1.pack())
.timestamp(1.pack())
.build();
let block_producer_balance = state
.get_sudt_balance(CKB_SUDT_ACCOUNT_ID, &block_producer)
.unwrap();
let addrs: Vec<_> = (0..=accounts)
.map(Account::build_script)
.map(|(_s, addr)| addr)
.collect();
let address_offset = state
.get_account_id_by_script_hash(&block_producer_script.hash())
.unwrap()
.unwrap(); // start from block producer
let start_account_id = address_offset + 1;
let end_account_id = address_offset + accounts;
// Loop transfer from id to id + 1, until we reach target count
let mut from_id = start_account_id;
let mut transfer_count = count;
while transfer_count > 0 {
let to_address = {
let mut to_id = from_id + 1;
if to_id > end_account_id {
to_id = start_account_id;
}
addrs.get((to_id - address_offset) as usize).unwrap()
};
let args = SUDTArgs::new_builder()
.set(
SUDTTransfer::new_builder()
.to_address(Bytes::from(to_address.to_bytes()).pack())
.amount(U256::one().pack())
.fee(
Fee::new_builder()
.registry_id(ETH_REGISTRY_ACCOUNT_ID.pack())
.amount(1u128.pack())
.build(),
)
.build(),
)
.build();
let raw_tx = RawL2Transaction::new_builder()
.from_id(from_id.pack())
.to_id(1u32.pack())
.args(args.as_bytes().pack())
.build();
self.generator
.execute_transaction(
&self.chain,
&mut state,
&block_info,
&raw_tx,
Some(u64::MAX),
None,
)
.unwrap();
state.finalise().unwrap();
from_id += 1;
if from_id > end_account_id {
from_id = start_account_id;
}
transfer_count -= 1;
}
self.mem_pool_state.store_state_db(state);
let state = self.mem_pool_state.load_state_db();
let post_block_producer_balance = state
.get_sudt_balance(CKB_SUDT_ACCOUNT_ID, &block_producer)
.unwrap();
assert_eq!(
post_block_producer_balance,
block_producer_balance + count as u128
);
}
fn generate_accounts(
state: &mut (impl State + StateExt + CodeStore),
accounts: u32,
) -> Vec<Account> {
let build_account = |idx: u32| -> Account {
let (account_script, addr) = Account::build_script(idx);
let account_script_hash: H256 = account_script.hash();
let account_id = state.create_account(account_script_hash).unwrap();
state.insert_script(account_script_hash, account_script);
state
.mapping_registry_address_to_script_hash(addr.clone(), account_script_hash)
.unwrap();
state
.mint_sudt(CKB_SUDT_ACCOUNT_ID, &addr, CKB_BALANCE.into())
.unwrap();
Account { id: account_id }
};
(0..accounts).map(build_account).collect()
}
fn init_genesis(store: &Store, config: &GenesisConfig, accounts: u32) {
if store.has_genesis().unwrap() {
let chain_id = store.get_chain_id().unwrap();
if chain_id == ROLLUP_TYPE_HASH {
return;
} else {
panic!("store genesis already initialized");
}
}
let mut db = store.begin_transaction();
db.setup_chain_id(ROLLUP_TYPE_HASH).unwrap();
let (mut db, genesis_state) =
build_genesis_from_store(db, config, Default::default()).unwrap();
let smt = db
.state_smt_with_merkle_state(genesis_state.genesis.raw().post_account())
.unwrap();
let account_count = genesis_state.genesis.raw().post_account().count().unpack();
let mut state = {
let history_state = HistoryState::new(smt, account_count, RWConfig::attach_block(0));
StateDB::new(history_state)
};
Self::generate_accounts(&mut state, accounts + 1); // Plus block producer
state.finalise().unwrap();
let (genesis, global_state) = {
let prev_state_checkpoint: [u8; 32] = state.calculate_state_checkpoint().unwrap();
let submit_txs = SubmitTransactions::new_builder()
.prev_state_checkpoint(prev_state_checkpoint.pack())
.build();
// calculate post state
let post_account = {
let root = state.calculate_root().unwrap();
let count = state.get_account_count().unwrap();
AccountMerkleState::new_builder()
.merkle_root(root.pack())
.count(count.pack())
.build()
};
let raw_genesis = RawL2Block::new_builder()
.number(0u64.pack())
.parent_block_hash([0u8; 32].pack())
.timestamp(1.pack())
.post_account(post_account.clone())
.submit_transactions(submit_txs)
.build();
// generate block proof
let genesis_hash = raw_genesis.hash();
let (block_root, block_proof) = {
let block_key = RawL2Block::compute_smt_key(0);
let mut smt = db.block_smt().unwrap();
smt.update(block_key.into(), genesis_hash.into()).unwrap();
let block_proof = smt
.merkle_proof(vec![block_key.into()])
.unwrap()
.compile(vec![block_key.into()])
.unwrap();
let block_root = *smt.root();
(block_root, block_proof)
};
// build genesis
let genesis = L2Block::new_builder()
.raw(raw_genesis)
.block_proof(block_proof.0.pack())
.build();
let global_state = {
let post_block = BlockMerkleState::new_builder()
.merkle_root({
let root: [u8; 32] = block_root.into();
root.pack()
})
.count(1u64.pack())
.build();
let rollup_config_hash = {
let mut hasher = new_blake2b();
hasher.update(
Into::<RollupConfig>::into(config.rollup_config.clone()).as_slice(),
);
let mut hash = [0u8; 32];
hasher.finalize(&mut hash);
hash
};
GlobalState::new_builder()
.account(post_account)
.block(post_block)
.status((Status::Running as u8).into())
.rollup_config_hash(rollup_config_hash.pack())
.tip_block_hash(genesis.hash().pack())
.build()
};
db.set_block_smt_root(global_state.block().merkle_root().unpack())
.unwrap();
(genesis, global_state)
};
let prev_txs_state = genesis.as_reader().raw().post_account().to_entity();
db.insert_block(
genesis.clone(),
global_state,
prev_txs_state,
Vec::new(),
Default::default(),
Vec::new(),
)
.unwrap();
db.attach_block(genesis).unwrap();
db.commit().unwrap();
}
}
| bench_ckb_transfer | identifier_name |
smt.rs | use std::sync::Arc;
use anyhow::Result;
use criterion::{criterion_group, BenchmarkId, Criterion, Throughput};
use gw_builtin_binaries::{file_checksum, Resource};
use gw_common::{
blake2b::new_blake2b,
builtins::{CKB_SUDT_ACCOUNT_ID, ETH_REGISTRY_ACCOUNT_ID},
registry_address::RegistryAddress,
state::State,
};
use gw_config::{BackendConfig, BackendForkConfig, GenesisConfig, StoreConfig};
use gw_generator::{
account_lock_manage::{always_success::AlwaysSuccess, AccountLockManage},
backend_manage::BackendManage,
genesis::build_genesis_from_store,
traits::StateExt,
Generator,
};
use gw_store::{
mem_pool_state::MemPoolState,
schema::COLUMNS,
state::{
history::history_state::{HistoryState, RWConfig},
state_db::StateDB,
traits::JournalDB,
MemStateDB,
},
traits::chain_store::ChainStore,
Store,
};
use gw_traits::{ChainView, CodeStore};
use gw_types::{
bytes::Bytes,
core::{AllowedEoaType, ScriptHashType, Status},
h256::*,
packed::{
AccountMerkleState, AllowedTypeHash, BlockInfo, BlockMerkleState, Fee, GlobalState,
L2Block, RawL2Block, RawL2Transaction, RollupConfig, SUDTArgs, SUDTTransfer, Script,
SubmitTransactions,
},
prelude::*,
U256,
};
use gw_utils::RollupContext;
use pprof::criterion::{Output, PProfProfiler};
| // meta contract
const META_GENERATOR_PATH: &str =
"../../crates/builtin-binaries/builtin/gwos-v1.3.0-rc1/meta-contract-generator";
const META_VALIDATOR_SCRIPT_TYPE_HASH: [u8; 32] = [1u8; 32];
// sudt contract
const SUDT_GENERATOR_PATH: &str =
"../../crates/builtin-binaries/builtin/gwos-v1.3.0-rc1/sudt-generator";
const SUDT_VALIDATOR_SCRIPT_TYPE_HASH: [u8; 32] = [2u8; 32];
// always success lock
const ALWAYS_SUCCESS_LOCK_HASH: [u8; 32] = [3u8; 32];
// rollup type hash
const ROLLUP_TYPE_HASH: [u8; 32] = [4u8; 32];
const CKB_BALANCE: u128 = 100_000_000;
criterion_group! {
name = smt;
config = Criterion::default()
.with_profiler(PProfProfiler::new(100, Output::Flamegraph(None)));
targets = bench_ckb_transfer
}
pub fn bench_ckb_transfer(c: &mut Criterion) {
let config = StoreConfig {
path: "./smt_data/db".parse().unwrap(),
options_file: Some("./smt_data/db.toml".parse().unwrap()),
cache_size: Some(1073741824),
};
let store = Store::open(&config, COLUMNS).unwrap();
let ee = BenchExecutionEnvironment::new_with_accounts(store, 7000);
let mut group = c.benchmark_group("ckb_transfer");
for txs in (500..=5000).step_by(500) {
group.sample_size(10);
group.throughput(Throughput::Elements(txs));
group.bench_with_input(BenchmarkId::from_parameter(txs), &txs, |b, txs| {
b.iter(|| {
ee.accounts_transfer(7000, *txs as usize);
});
});
}
group.finish();
}
#[allow(dead_code)]
struct Account {
id: u32,
}
impl Account {
fn build_script(n: u32) -> (Script, RegistryAddress) {
let mut addr = [0u8; 20];
addr[..4].copy_from_slice(&n.to_le_bytes());
let mut args = vec![42u8; 32];
args.extend(&addr);
let script = Script::new_builder()
.code_hash(ALWAYS_SUCCESS_LOCK_HASH.pack())
.hash_type(ScriptHashType::Type.into())
.args(args.pack())
.build();
let addr = RegistryAddress::new(ETH_REGISTRY_ACCOUNT_ID, addr.to_vec());
(script, addr)
}
}
struct BenchChain;
impl ChainView for BenchChain {
fn get_block_hash_by_number(&self, _: u64) -> Result<Option<H256>> {
unreachable!("bench chain store")
}
}
struct BenchExecutionEnvironment {
generator: Generator,
chain: BenchChain,
mem_pool_state: MemPoolState,
}
impl BenchExecutionEnvironment {
fn new_with_accounts(store: Store, accounts: u32) -> Self {
let genesis_config = GenesisConfig {
meta_contract_validator_type_hash: META_VALIDATOR_SCRIPT_TYPE_HASH.into(),
rollup_type_hash: ROLLUP_TYPE_HASH.into(),
rollup_config: RollupConfig::new_builder()
.l2_sudt_validator_script_type_hash(SUDT_VALIDATOR_SCRIPT_TYPE_HASH.pack())
.allowed_eoa_type_hashes(
vec![AllowedTypeHash::new_builder()
.hash(ALWAYS_SUCCESS_LOCK_HASH.pack())
.type_(AllowedEoaType::Eth.into())
.build()]
.pack(),
)
.build()
.into(),
..Default::default()
};
let rollup_context = RollupContext {
rollup_config: genesis_config.rollup_config.clone().into(),
rollup_script_hash: ROLLUP_TYPE_HASH,
..Default::default()
};
let backend_manage = {
let configs = vec![
BackendConfig {
generator: Resource::file_system(META_GENERATOR_PATH.into()),
generator_checksum: file_checksum(&META_GENERATOR_PATH).unwrap().into(),
validator_script_type_hash: META_VALIDATOR_SCRIPT_TYPE_HASH.into(),
backend_type: gw_config::BackendType::Meta,
},
BackendConfig {
generator: Resource::file_system(SUDT_GENERATOR_PATH.into()),
generator_checksum: file_checksum(&SUDT_GENERATOR_PATH).unwrap().into(),
validator_script_type_hash: SUDT_VALIDATOR_SCRIPT_TYPE_HASH.into(),
backend_type: gw_config::BackendType::Sudt,
},
];
BackendManage::from_config(vec![BackendForkConfig {
sudt_proxy: Default::default(),
fork_height: 0,
backends: configs,
}])
.expect("bench backend")
};
let account_lock_manage = {
let mut manage = AccountLockManage::default();
manage.register_lock_algorithm(ALWAYS_SUCCESS_LOCK_HASH, Arc::new(AlwaysSuccess));
manage
};
let generator = Generator::new(
backend_manage,
account_lock_manage,
rollup_context,
Default::default(),
);
Self::init_genesis(&store, &genesis_config, accounts);
let mem_pool_state = MemPoolState::new(
MemStateDB::from_store(store.get_snapshot()).expect("mem state db"),
true,
);
BenchExecutionEnvironment {
generator,
chain: BenchChain,
mem_pool_state,
}
}
fn accounts_transfer(&self, accounts: u32, count: usize) {
let mut state = self.mem_pool_state.load_state_db();
let (block_producer_script, block_producer) = Account::build_script(0);
let block_info = BlockInfo::new_builder()
.block_producer(Bytes::from(block_producer.to_bytes()).pack())
.number(1.pack())
.timestamp(1.pack())
.build();
let block_producer_balance = state
.get_sudt_balance(CKB_SUDT_ACCOUNT_ID, &block_producer)
.unwrap();
let addrs: Vec<_> = (0..=accounts)
.map(Account::build_script)
.map(|(_s, addr)| addr)
.collect();
let address_offset = state
.get_account_id_by_script_hash(&block_producer_script.hash())
.unwrap()
.unwrap(); // start from block producer
let start_account_id = address_offset + 1;
let end_account_id = address_offset + accounts;
// Loop transfer from id to id + 1, until we reach target count
let mut from_id = start_account_id;
let mut transfer_count = count;
while transfer_count > 0 {
let to_address = {
let mut to_id = from_id + 1;
if to_id > end_account_id {
to_id = start_account_id;
}
addrs.get((to_id - address_offset) as usize).unwrap()
};
let args = SUDTArgs::new_builder()
.set(
SUDTTransfer::new_builder()
.to_address(Bytes::from(to_address.to_bytes()).pack())
.amount(U256::one().pack())
.fee(
Fee::new_builder()
.registry_id(ETH_REGISTRY_ACCOUNT_ID.pack())
.amount(1u128.pack())
.build(),
)
.build(),
)
.build();
let raw_tx = RawL2Transaction::new_builder()
.from_id(from_id.pack())
.to_id(1u32.pack())
.args(args.as_bytes().pack())
.build();
self.generator
.execute_transaction(
&self.chain,
&mut state,
&block_info,
&raw_tx,
Some(u64::MAX),
None,
)
.unwrap();
state.finalise().unwrap();
from_id += 1;
if from_id > end_account_id {
from_id = start_account_id;
}
transfer_count -= 1;
}
self.mem_pool_state.store_state_db(state);
let state = self.mem_pool_state.load_state_db();
let post_block_producer_balance = state
.get_sudt_balance(CKB_SUDT_ACCOUNT_ID, &block_producer)
.unwrap();
assert_eq!(
post_block_producer_balance,
block_producer_balance + count as u128
);
}
fn generate_accounts(
state: &mut (impl State + StateExt + CodeStore),
accounts: u32,
) -> Vec<Account> {
let build_account = |idx: u32| -> Account {
let (account_script, addr) = Account::build_script(idx);
let account_script_hash: H256 = account_script.hash();
let account_id = state.create_account(account_script_hash).unwrap();
state.insert_script(account_script_hash, account_script);
state
.mapping_registry_address_to_script_hash(addr.clone(), account_script_hash)
.unwrap();
state
.mint_sudt(CKB_SUDT_ACCOUNT_ID, &addr, CKB_BALANCE.into())
.unwrap();
Account { id: account_id }
};
(0..accounts).map(build_account).collect()
}
fn init_genesis(store: &Store, config: &GenesisConfig, accounts: u32) {
if store.has_genesis().unwrap() {
let chain_id = store.get_chain_id().unwrap();
if chain_id == ROLLUP_TYPE_HASH {
return;
} else {
panic!("store genesis already initialized");
}
}
let mut db = store.begin_transaction();
db.setup_chain_id(ROLLUP_TYPE_HASH).unwrap();
let (mut db, genesis_state) =
build_genesis_from_store(db, config, Default::default()).unwrap();
let smt = db
.state_smt_with_merkle_state(genesis_state.genesis.raw().post_account())
.unwrap();
let account_count = genesis_state.genesis.raw().post_account().count().unpack();
let mut state = {
let history_state = HistoryState::new(smt, account_count, RWConfig::attach_block(0));
StateDB::new(history_state)
};
Self::generate_accounts(&mut state, accounts + 1); // Plus block producer
state.finalise().unwrap();
let (genesis, global_state) = {
let prev_state_checkpoint: [u8; 32] = state.calculate_state_checkpoint().unwrap();
let submit_txs = SubmitTransactions::new_builder()
.prev_state_checkpoint(prev_state_checkpoint.pack())
.build();
// calculate post state
let post_account = {
let root = state.calculate_root().unwrap();
let count = state.get_account_count().unwrap();
AccountMerkleState::new_builder()
.merkle_root(root.pack())
.count(count.pack())
.build()
};
let raw_genesis = RawL2Block::new_builder()
.number(0u64.pack())
.parent_block_hash([0u8; 32].pack())
.timestamp(1.pack())
.post_account(post_account.clone())
.submit_transactions(submit_txs)
.build();
// generate block proof
let genesis_hash = raw_genesis.hash();
let (block_root, block_proof) = {
let block_key = RawL2Block::compute_smt_key(0);
let mut smt = db.block_smt().unwrap();
smt.update(block_key.into(), genesis_hash.into()).unwrap();
let block_proof = smt
.merkle_proof(vec![block_key.into()])
.unwrap()
.compile(vec![block_key.into()])
.unwrap();
let block_root = *smt.root();
(block_root, block_proof)
};
// build genesis
let genesis = L2Block::new_builder()
.raw(raw_genesis)
.block_proof(block_proof.0.pack())
.build();
let global_state = {
let post_block = BlockMerkleState::new_builder()
.merkle_root({
let root: [u8; 32] = block_root.into();
root.pack()
})
.count(1u64.pack())
.build();
let rollup_config_hash = {
let mut hasher = new_blake2b();
hasher.update(
Into::<RollupConfig>::into(config.rollup_config.clone()).as_slice(),
);
let mut hash = [0u8; 32];
hasher.finalize(&mut hash);
hash
};
GlobalState::new_builder()
.account(post_account)
.block(post_block)
.status((Status::Running as u8).into())
.rollup_config_hash(rollup_config_hash.pack())
.tip_block_hash(genesis.hash().pack())
.build()
};
db.set_block_smt_root(global_state.block().merkle_root().unpack())
.unwrap();
(genesis, global_state)
};
let prev_txs_state = genesis.as_reader().raw().post_account().to_entity();
db.insert_block(
genesis.clone(),
global_state,
prev_txs_state,
Vec::new(),
Default::default(),
Vec::new(),
)
.unwrap();
db.attach_block(genesis).unwrap();
db.commit().unwrap();
}
} | random_line_split |
|
smt.rs | use std::sync::Arc;
use anyhow::Result;
use criterion::{criterion_group, BenchmarkId, Criterion, Throughput};
use gw_builtin_binaries::{file_checksum, Resource};
use gw_common::{
blake2b::new_blake2b,
builtins::{CKB_SUDT_ACCOUNT_ID, ETH_REGISTRY_ACCOUNT_ID},
registry_address::RegistryAddress,
state::State,
};
use gw_config::{BackendConfig, BackendForkConfig, GenesisConfig, StoreConfig};
use gw_generator::{
account_lock_manage::{always_success::AlwaysSuccess, AccountLockManage},
backend_manage::BackendManage,
genesis::build_genesis_from_store,
traits::StateExt,
Generator,
};
use gw_store::{
mem_pool_state::MemPoolState,
schema::COLUMNS,
state::{
history::history_state::{HistoryState, RWConfig},
state_db::StateDB,
traits::JournalDB,
MemStateDB,
},
traits::chain_store::ChainStore,
Store,
};
use gw_traits::{ChainView, CodeStore};
use gw_types::{
bytes::Bytes,
core::{AllowedEoaType, ScriptHashType, Status},
h256::*,
packed::{
AccountMerkleState, AllowedTypeHash, BlockInfo, BlockMerkleState, Fee, GlobalState,
L2Block, RawL2Block, RawL2Transaction, RollupConfig, SUDTArgs, SUDTTransfer, Script,
SubmitTransactions,
},
prelude::*,
U256,
};
use gw_utils::RollupContext;
use pprof::criterion::{Output, PProfProfiler};
// meta contract
const META_GENERATOR_PATH: &str =
"../../crates/builtin-binaries/builtin/gwos-v1.3.0-rc1/meta-contract-generator";
const META_VALIDATOR_SCRIPT_TYPE_HASH: [u8; 32] = [1u8; 32];
// sudt contract
const SUDT_GENERATOR_PATH: &str =
"../../crates/builtin-binaries/builtin/gwos-v1.3.0-rc1/sudt-generator";
const SUDT_VALIDATOR_SCRIPT_TYPE_HASH: [u8; 32] = [2u8; 32];
// always success lock
const ALWAYS_SUCCESS_LOCK_HASH: [u8; 32] = [3u8; 32];
// rollup type hash
const ROLLUP_TYPE_HASH: [u8; 32] = [4u8; 32];
const CKB_BALANCE: u128 = 100_000_000;
criterion_group! {
name = smt;
config = Criterion::default()
.with_profiler(PProfProfiler::new(100, Output::Flamegraph(None)));
targets = bench_ckb_transfer
}
pub fn bench_ckb_transfer(c: &mut Criterion) {
let config = StoreConfig {
path: "./smt_data/db".parse().unwrap(),
options_file: Some("./smt_data/db.toml".parse().unwrap()),
cache_size: Some(1073741824),
};
let store = Store::open(&config, COLUMNS).unwrap();
let ee = BenchExecutionEnvironment::new_with_accounts(store, 7000);
let mut group = c.benchmark_group("ckb_transfer");
for txs in (500..=5000).step_by(500) {
group.sample_size(10);
group.throughput(Throughput::Elements(txs));
group.bench_with_input(BenchmarkId::from_parameter(txs), &txs, |b, txs| {
b.iter(|| {
ee.accounts_transfer(7000, *txs as usize);
});
});
}
group.finish();
}
#[allow(dead_code)]
struct Account {
id: u32,
}
impl Account {
fn build_script(n: u32) -> (Script, RegistryAddress) {
let mut addr = [0u8; 20];
addr[..4].copy_from_slice(&n.to_le_bytes());
let mut args = vec![42u8; 32];
args.extend(&addr);
let script = Script::new_builder()
.code_hash(ALWAYS_SUCCESS_LOCK_HASH.pack())
.hash_type(ScriptHashType::Type.into())
.args(args.pack())
.build();
let addr = RegistryAddress::new(ETH_REGISTRY_ACCOUNT_ID, addr.to_vec());
(script, addr)
}
}
struct BenchChain;
impl ChainView for BenchChain {
fn get_block_hash_by_number(&self, _: u64) -> Result<Option<H256>> |
}
struct BenchExecutionEnvironment {
generator: Generator,
chain: BenchChain,
mem_pool_state: MemPoolState,
}
impl BenchExecutionEnvironment {
fn new_with_accounts(store: Store, accounts: u32) -> Self {
let genesis_config = GenesisConfig {
meta_contract_validator_type_hash: META_VALIDATOR_SCRIPT_TYPE_HASH.into(),
rollup_type_hash: ROLLUP_TYPE_HASH.into(),
rollup_config: RollupConfig::new_builder()
.l2_sudt_validator_script_type_hash(SUDT_VALIDATOR_SCRIPT_TYPE_HASH.pack())
.allowed_eoa_type_hashes(
vec![AllowedTypeHash::new_builder()
.hash(ALWAYS_SUCCESS_LOCK_HASH.pack())
.type_(AllowedEoaType::Eth.into())
.build()]
.pack(),
)
.build()
.into(),
..Default::default()
};
let rollup_context = RollupContext {
rollup_config: genesis_config.rollup_config.clone().into(),
rollup_script_hash: ROLLUP_TYPE_HASH,
..Default::default()
};
let backend_manage = {
let configs = vec![
BackendConfig {
generator: Resource::file_system(META_GENERATOR_PATH.into()),
generator_checksum: file_checksum(&META_GENERATOR_PATH).unwrap().into(),
validator_script_type_hash: META_VALIDATOR_SCRIPT_TYPE_HASH.into(),
backend_type: gw_config::BackendType::Meta,
},
BackendConfig {
generator: Resource::file_system(SUDT_GENERATOR_PATH.into()),
generator_checksum: file_checksum(&SUDT_GENERATOR_PATH).unwrap().into(),
validator_script_type_hash: SUDT_VALIDATOR_SCRIPT_TYPE_HASH.into(),
backend_type: gw_config::BackendType::Sudt,
},
];
BackendManage::from_config(vec![BackendForkConfig {
sudt_proxy: Default::default(),
fork_height: 0,
backends: configs,
}])
.expect("bench backend")
};
let account_lock_manage = {
let mut manage = AccountLockManage::default();
manage.register_lock_algorithm(ALWAYS_SUCCESS_LOCK_HASH, Arc::new(AlwaysSuccess));
manage
};
let generator = Generator::new(
backend_manage,
account_lock_manage,
rollup_context,
Default::default(),
);
Self::init_genesis(&store, &genesis_config, accounts);
let mem_pool_state = MemPoolState::new(
MemStateDB::from_store(store.get_snapshot()).expect("mem state db"),
true,
);
BenchExecutionEnvironment {
generator,
chain: BenchChain,
mem_pool_state,
}
}
fn accounts_transfer(&self, accounts: u32, count: usize) {
let mut state = self.mem_pool_state.load_state_db();
let (block_producer_script, block_producer) = Account::build_script(0);
let block_info = BlockInfo::new_builder()
.block_producer(Bytes::from(block_producer.to_bytes()).pack())
.number(1.pack())
.timestamp(1.pack())
.build();
let block_producer_balance = state
.get_sudt_balance(CKB_SUDT_ACCOUNT_ID, &block_producer)
.unwrap();
let addrs: Vec<_> = (0..=accounts)
.map(Account::build_script)
.map(|(_s, addr)| addr)
.collect();
let address_offset = state
.get_account_id_by_script_hash(&block_producer_script.hash())
.unwrap()
.unwrap(); // start from block producer
let start_account_id = address_offset + 1;
let end_account_id = address_offset + accounts;
// Loop transfer from id to id + 1, until we reach target count
let mut from_id = start_account_id;
let mut transfer_count = count;
while transfer_count > 0 {
let to_address = {
let mut to_id = from_id + 1;
if to_id > end_account_id {
to_id = start_account_id;
}
addrs.get((to_id - address_offset) as usize).unwrap()
};
let args = SUDTArgs::new_builder()
.set(
SUDTTransfer::new_builder()
.to_address(Bytes::from(to_address.to_bytes()).pack())
.amount(U256::one().pack())
.fee(
Fee::new_builder()
.registry_id(ETH_REGISTRY_ACCOUNT_ID.pack())
.amount(1u128.pack())
.build(),
)
.build(),
)
.build();
let raw_tx = RawL2Transaction::new_builder()
.from_id(from_id.pack())
.to_id(1u32.pack())
.args(args.as_bytes().pack())
.build();
self.generator
.execute_transaction(
&self.chain,
&mut state,
&block_info,
&raw_tx,
Some(u64::MAX),
None,
)
.unwrap();
state.finalise().unwrap();
from_id += 1;
if from_id > end_account_id {
from_id = start_account_id;
}
transfer_count -= 1;
}
self.mem_pool_state.store_state_db(state);
let state = self.mem_pool_state.load_state_db();
let post_block_producer_balance = state
.get_sudt_balance(CKB_SUDT_ACCOUNT_ID, &block_producer)
.unwrap();
assert_eq!(
post_block_producer_balance,
block_producer_balance + count as u128
);
}
fn generate_accounts(
state: &mut (impl State + StateExt + CodeStore),
accounts: u32,
) -> Vec<Account> {
let build_account = |idx: u32| -> Account {
let (account_script, addr) = Account::build_script(idx);
let account_script_hash: H256 = account_script.hash();
let account_id = state.create_account(account_script_hash).unwrap();
state.insert_script(account_script_hash, account_script);
state
.mapping_registry_address_to_script_hash(addr.clone(), account_script_hash)
.unwrap();
state
.mint_sudt(CKB_SUDT_ACCOUNT_ID, &addr, CKB_BALANCE.into())
.unwrap();
Account { id: account_id }
};
(0..accounts).map(build_account).collect()
}
fn init_genesis(store: &Store, config: &GenesisConfig, accounts: u32) {
if store.has_genesis().unwrap() {
let chain_id = store.get_chain_id().unwrap();
if chain_id == ROLLUP_TYPE_HASH {
return;
} else {
panic!("store genesis already initialized");
}
}
let mut db = store.begin_transaction();
db.setup_chain_id(ROLLUP_TYPE_HASH).unwrap();
let (mut db, genesis_state) =
build_genesis_from_store(db, config, Default::default()).unwrap();
let smt = db
.state_smt_with_merkle_state(genesis_state.genesis.raw().post_account())
.unwrap();
let account_count = genesis_state.genesis.raw().post_account().count().unpack();
let mut state = {
let history_state = HistoryState::new(smt, account_count, RWConfig::attach_block(0));
StateDB::new(history_state)
};
Self::generate_accounts(&mut state, accounts + 1); // Plus block producer
state.finalise().unwrap();
let (genesis, global_state) = {
let prev_state_checkpoint: [u8; 32] = state.calculate_state_checkpoint().unwrap();
let submit_txs = SubmitTransactions::new_builder()
.prev_state_checkpoint(prev_state_checkpoint.pack())
.build();
// calculate post state
let post_account = {
let root = state.calculate_root().unwrap();
let count = state.get_account_count().unwrap();
AccountMerkleState::new_builder()
.merkle_root(root.pack())
.count(count.pack())
.build()
};
let raw_genesis = RawL2Block::new_builder()
.number(0u64.pack())
.parent_block_hash([0u8; 32].pack())
.timestamp(1.pack())
.post_account(post_account.clone())
.submit_transactions(submit_txs)
.build();
// generate block proof
let genesis_hash = raw_genesis.hash();
let (block_root, block_proof) = {
let block_key = RawL2Block::compute_smt_key(0);
let mut smt = db.block_smt().unwrap();
smt.update(block_key.into(), genesis_hash.into()).unwrap();
let block_proof = smt
.merkle_proof(vec![block_key.into()])
.unwrap()
.compile(vec![block_key.into()])
.unwrap();
let block_root = *smt.root();
(block_root, block_proof)
};
// build genesis
let genesis = L2Block::new_builder()
.raw(raw_genesis)
.block_proof(block_proof.0.pack())
.build();
let global_state = {
let post_block = BlockMerkleState::new_builder()
.merkle_root({
let root: [u8; 32] = block_root.into();
root.pack()
})
.count(1u64.pack())
.build();
let rollup_config_hash = {
let mut hasher = new_blake2b();
hasher.update(
Into::<RollupConfig>::into(config.rollup_config.clone()).as_slice(),
);
let mut hash = [0u8; 32];
hasher.finalize(&mut hash);
hash
};
GlobalState::new_builder()
.account(post_account)
.block(post_block)
.status((Status::Running as u8).into())
.rollup_config_hash(rollup_config_hash.pack())
.tip_block_hash(genesis.hash().pack())
.build()
};
db.set_block_smt_root(global_state.block().merkle_root().unpack())
.unwrap();
(genesis, global_state)
};
let prev_txs_state = genesis.as_reader().raw().post_account().to_entity();
db.insert_block(
genesis.clone(),
global_state,
prev_txs_state,
Vec::new(),
Default::default(),
Vec::new(),
)
.unwrap();
db.attach_block(genesis).unwrap();
db.commit().unwrap();
}
}
| {
unreachable!("bench chain store")
} | identifier_body |
smt.rs | use std::sync::Arc;
use anyhow::Result;
use criterion::{criterion_group, BenchmarkId, Criterion, Throughput};
use gw_builtin_binaries::{file_checksum, Resource};
use gw_common::{
blake2b::new_blake2b,
builtins::{CKB_SUDT_ACCOUNT_ID, ETH_REGISTRY_ACCOUNT_ID},
registry_address::RegistryAddress,
state::State,
};
use gw_config::{BackendConfig, BackendForkConfig, GenesisConfig, StoreConfig};
use gw_generator::{
account_lock_manage::{always_success::AlwaysSuccess, AccountLockManage},
backend_manage::BackendManage,
genesis::build_genesis_from_store,
traits::StateExt,
Generator,
};
use gw_store::{
mem_pool_state::MemPoolState,
schema::COLUMNS,
state::{
history::history_state::{HistoryState, RWConfig},
state_db::StateDB,
traits::JournalDB,
MemStateDB,
},
traits::chain_store::ChainStore,
Store,
};
use gw_traits::{ChainView, CodeStore};
use gw_types::{
bytes::Bytes,
core::{AllowedEoaType, ScriptHashType, Status},
h256::*,
packed::{
AccountMerkleState, AllowedTypeHash, BlockInfo, BlockMerkleState, Fee, GlobalState,
L2Block, RawL2Block, RawL2Transaction, RollupConfig, SUDTArgs, SUDTTransfer, Script,
SubmitTransactions,
},
prelude::*,
U256,
};
use gw_utils::RollupContext;
use pprof::criterion::{Output, PProfProfiler};
// meta contract
const META_GENERATOR_PATH: &str =
"../../crates/builtin-binaries/builtin/gwos-v1.3.0-rc1/meta-contract-generator";
const META_VALIDATOR_SCRIPT_TYPE_HASH: [u8; 32] = [1u8; 32];
// sudt contract
const SUDT_GENERATOR_PATH: &str =
"../../crates/builtin-binaries/builtin/gwos-v1.3.0-rc1/sudt-generator";
const SUDT_VALIDATOR_SCRIPT_TYPE_HASH: [u8; 32] = [2u8; 32];
// always success lock
const ALWAYS_SUCCESS_LOCK_HASH: [u8; 32] = [3u8; 32];
// rollup type hash
const ROLLUP_TYPE_HASH: [u8; 32] = [4u8; 32];
const CKB_BALANCE: u128 = 100_000_000;
criterion_group! {
name = smt;
config = Criterion::default()
.with_profiler(PProfProfiler::new(100, Output::Flamegraph(None)));
targets = bench_ckb_transfer
}
pub fn bench_ckb_transfer(c: &mut Criterion) {
let config = StoreConfig {
path: "./smt_data/db".parse().unwrap(),
options_file: Some("./smt_data/db.toml".parse().unwrap()),
cache_size: Some(1073741824),
};
let store = Store::open(&config, COLUMNS).unwrap();
let ee = BenchExecutionEnvironment::new_with_accounts(store, 7000);
let mut group = c.benchmark_group("ckb_transfer");
for txs in (500..=5000).step_by(500) {
group.sample_size(10);
group.throughput(Throughput::Elements(txs));
group.bench_with_input(BenchmarkId::from_parameter(txs), &txs, |b, txs| {
b.iter(|| {
ee.accounts_transfer(7000, *txs as usize);
});
});
}
group.finish();
}
#[allow(dead_code)]
struct Account {
id: u32,
}
impl Account {
fn build_script(n: u32) -> (Script, RegistryAddress) {
let mut addr = [0u8; 20];
addr[..4].copy_from_slice(&n.to_le_bytes());
let mut args = vec![42u8; 32];
args.extend(&addr);
let script = Script::new_builder()
.code_hash(ALWAYS_SUCCESS_LOCK_HASH.pack())
.hash_type(ScriptHashType::Type.into())
.args(args.pack())
.build();
let addr = RegistryAddress::new(ETH_REGISTRY_ACCOUNT_ID, addr.to_vec());
(script, addr)
}
}
struct BenchChain;
impl ChainView for BenchChain {
fn get_block_hash_by_number(&self, _: u64) -> Result<Option<H256>> {
unreachable!("bench chain store")
}
}
struct BenchExecutionEnvironment {
generator: Generator,
chain: BenchChain,
mem_pool_state: MemPoolState,
}
impl BenchExecutionEnvironment {
fn new_with_accounts(store: Store, accounts: u32) -> Self {
let genesis_config = GenesisConfig {
meta_contract_validator_type_hash: META_VALIDATOR_SCRIPT_TYPE_HASH.into(),
rollup_type_hash: ROLLUP_TYPE_HASH.into(),
rollup_config: RollupConfig::new_builder()
.l2_sudt_validator_script_type_hash(SUDT_VALIDATOR_SCRIPT_TYPE_HASH.pack())
.allowed_eoa_type_hashes(
vec![AllowedTypeHash::new_builder()
.hash(ALWAYS_SUCCESS_LOCK_HASH.pack())
.type_(AllowedEoaType::Eth.into())
.build()]
.pack(),
)
.build()
.into(),
..Default::default()
};
let rollup_context = RollupContext {
rollup_config: genesis_config.rollup_config.clone().into(),
rollup_script_hash: ROLLUP_TYPE_HASH,
..Default::default()
};
let backend_manage = {
let configs = vec![
BackendConfig {
generator: Resource::file_system(META_GENERATOR_PATH.into()),
generator_checksum: file_checksum(&META_GENERATOR_PATH).unwrap().into(),
validator_script_type_hash: META_VALIDATOR_SCRIPT_TYPE_HASH.into(),
backend_type: gw_config::BackendType::Meta,
},
BackendConfig {
generator: Resource::file_system(SUDT_GENERATOR_PATH.into()),
generator_checksum: file_checksum(&SUDT_GENERATOR_PATH).unwrap().into(),
validator_script_type_hash: SUDT_VALIDATOR_SCRIPT_TYPE_HASH.into(),
backend_type: gw_config::BackendType::Sudt,
},
];
BackendManage::from_config(vec![BackendForkConfig {
sudt_proxy: Default::default(),
fork_height: 0,
backends: configs,
}])
.expect("bench backend")
};
let account_lock_manage = {
let mut manage = AccountLockManage::default();
manage.register_lock_algorithm(ALWAYS_SUCCESS_LOCK_HASH, Arc::new(AlwaysSuccess));
manage
};
let generator = Generator::new(
backend_manage,
account_lock_manage,
rollup_context,
Default::default(),
);
Self::init_genesis(&store, &genesis_config, accounts);
let mem_pool_state = MemPoolState::new(
MemStateDB::from_store(store.get_snapshot()).expect("mem state db"),
true,
);
BenchExecutionEnvironment {
generator,
chain: BenchChain,
mem_pool_state,
}
}
fn accounts_transfer(&self, accounts: u32, count: usize) {
let mut state = self.mem_pool_state.load_state_db();
let (block_producer_script, block_producer) = Account::build_script(0);
let block_info = BlockInfo::new_builder()
.block_producer(Bytes::from(block_producer.to_bytes()).pack())
.number(1.pack())
.timestamp(1.pack())
.build();
let block_producer_balance = state
.get_sudt_balance(CKB_SUDT_ACCOUNT_ID, &block_producer)
.unwrap();
let addrs: Vec<_> = (0..=accounts)
.map(Account::build_script)
.map(|(_s, addr)| addr)
.collect();
let address_offset = state
.get_account_id_by_script_hash(&block_producer_script.hash())
.unwrap()
.unwrap(); // start from block producer
let start_account_id = address_offset + 1;
let end_account_id = address_offset + accounts;
// Loop transfer from id to id + 1, until we reach target count
let mut from_id = start_account_id;
let mut transfer_count = count;
while transfer_count > 0 {
let to_address = {
let mut to_id = from_id + 1;
if to_id > end_account_id {
to_id = start_account_id;
}
addrs.get((to_id - address_offset) as usize).unwrap()
};
let args = SUDTArgs::new_builder()
.set(
SUDTTransfer::new_builder()
.to_address(Bytes::from(to_address.to_bytes()).pack())
.amount(U256::one().pack())
.fee(
Fee::new_builder()
.registry_id(ETH_REGISTRY_ACCOUNT_ID.pack())
.amount(1u128.pack())
.build(),
)
.build(),
)
.build();
let raw_tx = RawL2Transaction::new_builder()
.from_id(from_id.pack())
.to_id(1u32.pack())
.args(args.as_bytes().pack())
.build();
self.generator
.execute_transaction(
&self.chain,
&mut state,
&block_info,
&raw_tx,
Some(u64::MAX),
None,
)
.unwrap();
state.finalise().unwrap();
from_id += 1;
if from_id > end_account_id {
from_id = start_account_id;
}
transfer_count -= 1;
}
self.mem_pool_state.store_state_db(state);
let state = self.mem_pool_state.load_state_db();
let post_block_producer_balance = state
.get_sudt_balance(CKB_SUDT_ACCOUNT_ID, &block_producer)
.unwrap();
assert_eq!(
post_block_producer_balance,
block_producer_balance + count as u128
);
}
fn generate_accounts(
state: &mut (impl State + StateExt + CodeStore),
accounts: u32,
) -> Vec<Account> {
let build_account = |idx: u32| -> Account {
let (account_script, addr) = Account::build_script(idx);
let account_script_hash: H256 = account_script.hash();
let account_id = state.create_account(account_script_hash).unwrap();
state.insert_script(account_script_hash, account_script);
state
.mapping_registry_address_to_script_hash(addr.clone(), account_script_hash)
.unwrap();
state
.mint_sudt(CKB_SUDT_ACCOUNT_ID, &addr, CKB_BALANCE.into())
.unwrap();
Account { id: account_id }
};
(0..accounts).map(build_account).collect()
}
fn init_genesis(store: &Store, config: &GenesisConfig, accounts: u32) {
if store.has_genesis().unwrap() {
let chain_id = store.get_chain_id().unwrap();
if chain_id == ROLLUP_TYPE_HASH {
return;
} else |
}
let mut db = store.begin_transaction();
db.setup_chain_id(ROLLUP_TYPE_HASH).unwrap();
let (mut db, genesis_state) =
build_genesis_from_store(db, config, Default::default()).unwrap();
let smt = db
.state_smt_with_merkle_state(genesis_state.genesis.raw().post_account())
.unwrap();
let account_count = genesis_state.genesis.raw().post_account().count().unpack();
let mut state = {
let history_state = HistoryState::new(smt, account_count, RWConfig::attach_block(0));
StateDB::new(history_state)
};
Self::generate_accounts(&mut state, accounts + 1); // Plus block producer
state.finalise().unwrap();
let (genesis, global_state) = {
let prev_state_checkpoint: [u8; 32] = state.calculate_state_checkpoint().unwrap();
let submit_txs = SubmitTransactions::new_builder()
.prev_state_checkpoint(prev_state_checkpoint.pack())
.build();
// calculate post state
let post_account = {
let root = state.calculate_root().unwrap();
let count = state.get_account_count().unwrap();
AccountMerkleState::new_builder()
.merkle_root(root.pack())
.count(count.pack())
.build()
};
let raw_genesis = RawL2Block::new_builder()
.number(0u64.pack())
.parent_block_hash([0u8; 32].pack())
.timestamp(1.pack())
.post_account(post_account.clone())
.submit_transactions(submit_txs)
.build();
// generate block proof
let genesis_hash = raw_genesis.hash();
let (block_root, block_proof) = {
let block_key = RawL2Block::compute_smt_key(0);
let mut smt = db.block_smt().unwrap();
smt.update(block_key.into(), genesis_hash.into()).unwrap();
let block_proof = smt
.merkle_proof(vec![block_key.into()])
.unwrap()
.compile(vec![block_key.into()])
.unwrap();
let block_root = *smt.root();
(block_root, block_proof)
};
// build genesis
let genesis = L2Block::new_builder()
.raw(raw_genesis)
.block_proof(block_proof.0.pack())
.build();
let global_state = {
let post_block = BlockMerkleState::new_builder()
.merkle_root({
let root: [u8; 32] = block_root.into();
root.pack()
})
.count(1u64.pack())
.build();
let rollup_config_hash = {
let mut hasher = new_blake2b();
hasher.update(
Into::<RollupConfig>::into(config.rollup_config.clone()).as_slice(),
);
let mut hash = [0u8; 32];
hasher.finalize(&mut hash);
hash
};
GlobalState::new_builder()
.account(post_account)
.block(post_block)
.status((Status::Running as u8).into())
.rollup_config_hash(rollup_config_hash.pack())
.tip_block_hash(genesis.hash().pack())
.build()
};
db.set_block_smt_root(global_state.block().merkle_root().unpack())
.unwrap();
(genesis, global_state)
};
let prev_txs_state = genesis.as_reader().raw().post_account().to_entity();
db.insert_block(
genesis.clone(),
global_state,
prev_txs_state,
Vec::new(),
Default::default(),
Vec::new(),
)
.unwrap();
db.attach_block(genesis).unwrap();
db.commit().unwrap();
}
}
| {
panic!("store genesis already initialized");
} | conditional_block |
run.py | from typing import Any, Callable, ClassVar, Collection, Dict, FrozenSet, \
Hashable, IO, Iterable, Iterator, List, Literal, NewType, Optional, \
Protocol, Sequence, Sequence, Set, Tuple, Type, TypeGuard, TypeVar, Union, \
runtime_checkable, TYPE_CHECKING, no_type_check
import sys
import argparse
import matplotlib.pyplot as plt
from Model import *
from Log import lo, trace, indent_log, set_log_level
from util import newline, nf, reseed, pts, short
m: Model
last_args: Dict[str, Any] = {}
rngseed: Optional[int] = None
# NEXT At the end of a run, print out all the global parameters in a
# concise form.
def run(
seed: str='a ',
ltm: List[str]=['ajaqb'],
asteps: int=100, #40,
rsteps: int=60, #60,
fresh: bool=True, # Create a new Model in global variable 'm'?
lla: int=0, # logging level during absorption
llr: int=2, # logging level during regeneration
auto_annotate: Iterable[Annotation]=default_auto_annotations,
ab: List[Painter]=default_initial_painters,
abr: bool=True, # allow ab initio painters during regeneration?
rng: Optional[int]=None, # random-number seed, or None to randomize
exc: bool=False, # exclude absolute painters from lts?
ccl: bool=True, # allow source/target cell clarity to affect probability?
pcl: bool=False, # allow painter clarity to affect probability?
pun: bool=False # allow punishing of painters for overwriting given letters?
) -> None:
global m, last_args
last_args = dict(
seed=seed,
asteps=asteps,
rsteps=rsteps,
lla=lla,
llr=llr,
auto_annotate=auto_annotate,
ab=ab,
exc=exc
)
if fresh:
set_rngseed(rng)
set_global_param('allow_ab_initio_painters', True)
set_log_level(lla)
m = Model(
lts=Soup.make_from(ab),
auto_annotate=auto_annotate,
exclude_abs=exc
)
lo(1, 'INITS', '\n' + m.lts.state_str())
if asteps:
for s in ltm:
m.absorb(s, timesteps=asteps)
set_global_param('punishing', pun)
set_global_param('painter_clarity', pcl)
set_global_param('cell_clarity', ccl)
set_log_level(llr)
lo(1, 'LTS\n' + m.lts.state_str())
#lo(1, 'LTS\n' + m.lts.state_str_with_authors())
if rsteps:
set_global_param('allow_ab_initio_painters', abr)
m.regen_from(seed, nsteps=rsteps)
set_global_param('allow_ab_initio_painters', True)
print(m.canvas)
print()
print(m.ws.state_str_with_authors())
def again(**kwargs):
global last_args
run(fresh=False, **(last_args | kwargs))
def run1(**kwargs):
'''1st example in dissertation.'''
set_latex_mode()
run(ab=[ab1, ab3], **kwargs)
def run_bad(**kwargs) -> None:
d = dict(
auto_annotate=[Start, End],
asteps=100,
rsteps=0,
lla=4
)
run(**(d | kwargs))
def run_test(**kwargs) -> None:
d = dict(
auto_annotate=[Start, End],
asteps=100,
rsteps=60,
lla=6,
llr=6,
seed='m '
)
run(**(d | kwargs))
def run_pons(**kwargs) -> None:
'''Runs the pons asinorum.'''
lo(0, "pons asinorum")
d = dict(
ltm=[],
asteps=0,
seed='abcabdijk ',
rsteps=200, #500,
#llr=4,
auto_annotate=[]
)
run(**(d | kwargs))
def h(*ids):
'''Plot history of painters with given ids.'''
global m
for i in ids:
#plt.plot(range(1, m.t + 1), m.history_of(i))
#plt.plot(*zip(m.history_of(i)))
hs = list(zip(*m.history_of(i)))
pts(hs)
print(len(hs))
plt.plot(*hs)
plt.show()
def as_lts(s: str) -> List[str]:
if not s:
return []
elif ',' not in s:
return [s]
else:
return s.split(',')
def parse_and_run() -> None:
global rngseed
parser = argparse.ArgumentParser()
parser.add_argument(
"-r",
"--rngseed",
help="random-number seed",
type=int
)
parser.add_argument("--ltm", help="the long-term soup", default='ajaqb')
parser.add_argument(
"seed",
help="the seed string",
default='a ',
nargs='?'
)
parser.add_argument(
"--asteps",
help="number of timesteps to absorb each LTS string",
type=int,
default=40
)
parser.add_argument(
"--rsteps",
help="number of timesteps to regenerate",
type=int,
default=60
)
parser.add_argument(
"--lla",
help="logging level during absorption",
type=int,
default=0
)
parser.add_argument(
"--llr",
help="logging level during regeneration",
type=int,
default=2
)
parser.add_argument(
"--au",
help="which set of cell attributes to auto-annotate",
type=int,
choices=[0, 1],
default=global_params.auto_annotations
)
args = parser.parse_args()
global_params.auto_annotations = args.au
run(
seed=args.seed,
ltm=as_lts(args.ltm),
asteps=args.asteps,
rsteps=args.rsteps,
lla=args.lla,
llr=args.llr,
rng=args.rngseed
)
lo(0, f'rngseed={rngseed}{newline}')
def set_rngseed(r: Optional[int]=None) -> None:
global rngseed
rngseed = reseed(r)
lo(0, f'rngseed={rngseed}{newline}')
def runabs1():
# abs painters only
run(seed='a ', ltm=['abcde'], ab=[ab1])
def runabs2():
# abs painters with a different seed
run(seed='m ', ltm=['abcde'], ab=[ab1])
def runabs_ajaqb(**kwargs):
|
def runab13_ajaqb(seed='a '):
run(seed, ltm=['ajaqb'], ab=[ab1, ab3])
def runabs3():
# abs painters with more LTM
run(seed='a ', ltm=['abcde', 'ggggg'], ab=[ab1])
# problem: many sames => many nonadjacent 'same' painters
# therefore 'aaaaa' wins
def runrel():
kw = dict(
seed='a ',
ltm=['abcde'],
ab=[ab1, ab2],
asteps=100,
rsteps=40,
exc=True
)
run(**kw)
def runab123(**kwargs):
kw = dict(ltm=['ajaqb'], ab=[ab1, ab2, ab3], asteps=100, exc=True) | kwargs
run(**kw)
# NEXT Try absolute & digraph painters with a big LTM to see if that creates
# a need for clarity.
def run_ad(**kwargs):
# make relative (digraph) painters: gets 'aqb ' a lot
kw = dict(
#ltm=['ajaqb', 'pqrst', 'abcde', 'aabbc'],
#ltm=['ajaqb', 'abcde', 'aabbc'],
#ltm=['ajaqb', 'abcde'],
ltm=['ajaqb'],
ab=[ab4],
asteps=30,
rsteps=25,
lla=0,
llr=1,
pun=False,
exc=True
) | kwargs
run(**kw)
def run_ad2(**kwargs):
# blends some simple strings
kw = dict(ltm=['ajaqb', 'abcde', 'aabbc']) | kwargs
run(**kw)
def r(d: dict, **kwargs):
'''Convenience function for running experiments in the REPL. Optionally
override a dict of arguments to run().'''
run(**(d | kwargs))
# A run just like the Hopfield net: many strings in memory, absolute painters
# only.
hoplike = dict(
ltm=['ajaqb', 'pqrst', 'abcde', 'aabbc', 'ponop'],
ab=[ab1],
asteps=40,
rsteps=100,
lla=0,
llr=2,
pun=False,
pcl=False,
exc=False
)
hoplike_long = hoplike | dict(
ltm=['abrsecpbqf', 'efghijklmn', 'tfserfdqgc', 'abcdepomnl'],
seed='abr e bqf'
)
hoplike_long_easy = hoplike | dict(
ltm=['aaaaabca', 'gggvvwgg', 'pqrspqrs'],
seed='aaaa a',
ab=[ab1a],
rsteps=30,
ccl=False
)
example1 = hoplike_long_easy | dict(
#seed='aa a',
seed=' aabb ',
rsteps=20, #50,
abr=False,
ccl=False,
rng=444834040015719226, #8066335492150159463,
llr=2
)
example2 = example1 | dict(
ccl=True,
rsteps=40,
)
rel0 = dict( # reconstructs with abs painters only, because seed is aligned
# right
ltm=['bbbbghij'],
seed='b g ',
ab=[ab1a],
asteps=150,
rsteps=50,
abr=False,
pcl=False,
llr=2
)
rel1 = rel0 | dict( # shows inability of abs painters to adjust to a shift (??)
seed='b gh ',
rsteps=50,
rng=620217766481971979
)
# PROBLEM This actually generates the desired analogy! bbbghijk
# We need an illustration of how absolute painters can't follow a shift.
rel2 = rel1 | dict( # relative painters: a bit nicer
ab=[ab4]
)
rel0a = rel0 | dict(
ltm=['bbbbghib'],
seed='b g '
)
rel1a = rel0a | dict(
seed='b g ',
)
rel2a = rel1a | dict(
ab=[ab4],
rsteps=120,
rng=682026381905476632
)
rel3a = rel2a | dict(
seed='c s '
)
# Can we solve 'ajaqb' without relative indirect painters?
quest1 = dict(
ltm=['ajaqb'],
seed='a ',
asteps=40,
rsteps=40,
ab=[ab1, ab3],
pun=False,
exc=True
)
# No.
# seed='a a ' also fails, because without absolute or relative indirect
# painters, the LTS has no record of the a_a and a_b relationships.
# A relative indirect painter can recreate that relationship wherever it
# sees an 'a' or 'b'.
# Does adding relative indirect solve that problem?
quest2 = quest1 | dict(
ab=[ab1, ab2, ab3]
)
# Does adding relative painters get hoplike_long to regenerate the memories
# reliably?
quest3 = hoplike_long | dict(
ab=[ab1, ab4]
)
# Is clarity needed to settle down on an attractor?
quest4 = hoplike_long | dict(
ccl=False
)
cdecb = dict(
seed=' e ',
ltm=['cdecb'],
ab=[ab1a],
asteps=30,
ccl=False,
pcl=False,
pun=False
)
# hoplike_few
# pcl=False => ignore painter clarity
# ccl=False => ignore cell clarity
# TODO rsteps=None => run until all clarities >= 4
# ann=False => no cell annotations
# TODO Collect together some named experiments that show each point in
# sequence. Start without cell clarity.
ri1 = dict(
seed='m ',
ltm=['ajaqb'],
ab=[ab1a, ab4],
abr=False,
ccl=True,
pcl=False,
pun=False,
)
# NEXT Who solves ajaqb with m____? What is the minimum set of ab initio
# painters?
# Answer: r(ri1, seed='m ', ab=[ab1a, ab3])
# THEN Find the bug that makes the betw painter match a painter in the LTS.
# r(ri1, ltm=['aaajaqb'], seed='mj ', ab=[ab1a, a b3, ab4])
# Could we set up a problem where the presence of absolute painters
# interferes with a good solution?
# Can we see any use for the relative indirect painter at all, which we cab
# illustrate with an example?
# Yes. This fails:
# r(ri1, seed='a ', exc=True, abr=True, ab=[ab1a, ab3])
# This succeeds:
# r(ri1, seed='a ', exc=True, abr=True, ab=[ab1a, ab2, ab3])
# To illustrate Process A and Process B:
# Need abr=True to be necessary to solve problem with some set of
# ab initio painters.
# ((I, I+2, same), ws, (I, I+4, succ)) <-- this would do it, even on m____
src = Painter(I, Plus(I, 2), same)
fun = Painter(I, Plus(I, 4), succ)
p1 = Painter(src, SR.WorkingSoup, fun)
# Fails:
# r(ri1, seed='m m ', exc=True, abr=False, ab=[ab1a, ab3, p1])
# Succeeds:
# r(ri1, seed='m m ', exc=True, abr=True, ab=[ab1a, ab3, p1])
pab1 = ri1 | dict(
seed='m m ',
exc=True,
abr=False,
ab=[ab1a, ab3, p1],
rng=6324482767183163296
)
pab2 = pab1 | dict(
abr=True
)
# Succeeds:
# r(ri1, seed='mmm m ', exc=True, abr=True, ab=[ab1a, ab3, p1])
# Shows changing both position and letter.
# New ab initio painter:
# See two abs painters with one overlapping index, make the above painter.
# DECISION: Omit relative indirect painter, include only this painter above.
if __name__ == '__main__':
#parse_and_run() # Uncomment this to get normal command line
#run_ad(pun=True, llr=5, rsteps=2)
#r(hoplike, seed='a de') # erratic
#r(hoplike_long)
#'ghijk '
#r(hoplike_long_easy)
#r(example1)
#r(rel1)
#r(ri1)
r(pab1)
#r(cdecb, llr=2, rsteps=0, lla=2)
#set_rngseed(1)
#run_ajaqb()
#run_ajaqb('a ', ['wxyaaaa'], 120)
#run('abc ')
#run()
#run_bad()
#run_test()
#run_pons()
#run(ltm=['ajaqb'], asteps=100, lla=6, rsteps=0)
# WANT Run an experiment with same params as example2, but with hundreds of
# different randomly chosen 4-letter seeds (with 4 randomly placed blanks),
# and see if there are only a small number of attractors. Run to completion
# rather than running a set number of timesteps.
| run(seed='a ', ltm=['ajaqb'], ab=[ab1])
# no reln with j or q | identifier_body |
run.py | from typing import Any, Callable, ClassVar, Collection, Dict, FrozenSet, \
Hashable, IO, Iterable, Iterator, List, Literal, NewType, Optional, \
Protocol, Sequence, Sequence, Set, Tuple, Type, TypeGuard, TypeVar, Union, \
runtime_checkable, TYPE_CHECKING, no_type_check
import sys
import argparse
import matplotlib.pyplot as plt
from Model import *
from Log import lo, trace, indent_log, set_log_level
from util import newline, nf, reseed, pts, short
m: Model
last_args: Dict[str, Any] = {}
rngseed: Optional[int] = None
# NEXT At the end of a run, print out all the global parameters in a
# concise form.
def run(
seed: str='a ',
ltm: List[str]=['ajaqb'],
asteps: int=100, #40,
rsteps: int=60, #60,
fresh: bool=True, # Create a new Model in global variable 'm'?
lla: int=0, # logging level during absorption
llr: int=2, # logging level during regeneration
auto_annotate: Iterable[Annotation]=default_auto_annotations,
ab: List[Painter]=default_initial_painters,
abr: bool=True, # allow ab initio painters during regeneration?
rng: Optional[int]=None, # random-number seed, or None to randomize
exc: bool=False, # exclude absolute painters from lts?
ccl: bool=True, # allow source/target cell clarity to affect probability?
pcl: bool=False, # allow painter clarity to affect probability?
pun: bool=False # allow punishing of painters for overwriting given letters?
) -> None:
global m, last_args
last_args = dict(
seed=seed,
asteps=asteps,
rsteps=rsteps,
lla=lla,
llr=llr,
auto_annotate=auto_annotate,
ab=ab,
exc=exc
)
if fresh:
set_rngseed(rng)
set_global_param('allow_ab_initio_painters', True)
set_log_level(lla)
m = Model(
lts=Soup.make_from(ab),
auto_annotate=auto_annotate,
exclude_abs=exc
)
lo(1, 'INITS', '\n' + m.lts.state_str())
if asteps:
|
set_global_param('punishing', pun)
set_global_param('painter_clarity', pcl)
set_global_param('cell_clarity', ccl)
set_log_level(llr)
lo(1, 'LTS\n' + m.lts.state_str())
#lo(1, 'LTS\n' + m.lts.state_str_with_authors())
if rsteps:
set_global_param('allow_ab_initio_painters', abr)
m.regen_from(seed, nsteps=rsteps)
set_global_param('allow_ab_initio_painters', True)
print(m.canvas)
print()
print(m.ws.state_str_with_authors())
def again(**kwargs):
global last_args
run(fresh=False, **(last_args | kwargs))
def run1(**kwargs):
'''1st example in dissertation.'''
set_latex_mode()
run(ab=[ab1, ab3], **kwargs)
def run_bad(**kwargs) -> None:
d = dict(
auto_annotate=[Start, End],
asteps=100,
rsteps=0,
lla=4
)
run(**(d | kwargs))
def run_test(**kwargs) -> None:
d = dict(
auto_annotate=[Start, End],
asteps=100,
rsteps=60,
lla=6,
llr=6,
seed='m '
)
run(**(d | kwargs))
def run_pons(**kwargs) -> None:
'''Runs the pons asinorum.'''
lo(0, "pons asinorum")
d = dict(
ltm=[],
asteps=0,
seed='abcabdijk ',
rsteps=200, #500,
#llr=4,
auto_annotate=[]
)
run(**(d | kwargs))
def h(*ids):
'''Plot history of painters with given ids.'''
global m
for i in ids:
#plt.plot(range(1, m.t + 1), m.history_of(i))
#plt.plot(*zip(m.history_of(i)))
hs = list(zip(*m.history_of(i)))
pts(hs)
print(len(hs))
plt.plot(*hs)
plt.show()
def as_lts(s: str) -> List[str]:
if not s:
return []
elif ',' not in s:
return [s]
else:
return s.split(',')
def parse_and_run() -> None:
global rngseed
parser = argparse.ArgumentParser()
parser.add_argument(
"-r",
"--rngseed",
help="random-number seed",
type=int
)
parser.add_argument("--ltm", help="the long-term soup", default='ajaqb')
parser.add_argument(
"seed",
help="the seed string",
default='a ',
nargs='?'
)
parser.add_argument(
"--asteps",
help="number of timesteps to absorb each LTS string",
type=int,
default=40
)
parser.add_argument(
"--rsteps",
help="number of timesteps to regenerate",
type=int,
default=60
)
parser.add_argument(
"--lla",
help="logging level during absorption",
type=int,
default=0
)
parser.add_argument(
"--llr",
help="logging level during regeneration",
type=int,
default=2
)
parser.add_argument(
"--au",
help="which set of cell attributes to auto-annotate",
type=int,
choices=[0, 1],
default=global_params.auto_annotations
)
args = parser.parse_args()
global_params.auto_annotations = args.au
run(
seed=args.seed,
ltm=as_lts(args.ltm),
asteps=args.asteps,
rsteps=args.rsteps,
lla=args.lla,
llr=args.llr,
rng=args.rngseed
)
lo(0, f'rngseed={rngseed}{newline}')
def set_rngseed(r: Optional[int]=None) -> None:
global rngseed
rngseed = reseed(r)
lo(0, f'rngseed={rngseed}{newline}')
def runabs1():
# abs painters only
run(seed='a ', ltm=['abcde'], ab=[ab1])
def runabs2():
# abs painters with a different seed
run(seed='m ', ltm=['abcde'], ab=[ab1])
def runabs_ajaqb(**kwargs):
run(seed='a ', ltm=['ajaqb'], ab=[ab1])
# no reln with j or q
def runab13_ajaqb(seed='a '):
run(seed, ltm=['ajaqb'], ab=[ab1, ab3])
def runabs3():
# abs painters with more LTM
run(seed='a ', ltm=['abcde', 'ggggg'], ab=[ab1])
# problem: many sames => many nonadjacent 'same' painters
# therefore 'aaaaa' wins
def runrel():
kw = dict(
seed='a ',
ltm=['abcde'],
ab=[ab1, ab2],
asteps=100,
rsteps=40,
exc=True
)
run(**kw)
def runab123(**kwargs):
kw = dict(ltm=['ajaqb'], ab=[ab1, ab2, ab3], asteps=100, exc=True) | kwargs
run(**kw)
# NEXT Try absolute & digraph painters with a big LTM to see if that creates
# a need for clarity.
def run_ad(**kwargs):
# make relative (digraph) painters: gets 'aqb ' a lot
kw = dict(
#ltm=['ajaqb', 'pqrst', 'abcde', 'aabbc'],
#ltm=['ajaqb', 'abcde', 'aabbc'],
#ltm=['ajaqb', 'abcde'],
ltm=['ajaqb'],
ab=[ab4],
asteps=30,
rsteps=25,
lla=0,
llr=1,
pun=False,
exc=True
) | kwargs
run(**kw)
def run_ad2(**kwargs):
# blends some simple strings
kw = dict(ltm=['ajaqb', 'abcde', 'aabbc']) | kwargs
run(**kw)
def r(d: dict, **kwargs):
'''Convenience function for running experiments in the REPL. Optionally
override a dict of arguments to run().'''
run(**(d | kwargs))
# A run just like the Hopfield net: many strings in memory, absolute painters
# only.
hoplike = dict(
ltm=['ajaqb', 'pqrst', 'abcde', 'aabbc', 'ponop'],
ab=[ab1],
asteps=40,
rsteps=100,
lla=0,
llr=2,
pun=False,
pcl=False,
exc=False
)
hoplike_long = hoplike | dict(
ltm=['abrsecpbqf', 'efghijklmn', 'tfserfdqgc', 'abcdepomnl'],
seed='abr e bqf'
)
hoplike_long_easy = hoplike | dict(
ltm=['aaaaabca', 'gggvvwgg', 'pqrspqrs'],
seed='aaaa a',
ab=[ab1a],
rsteps=30,
ccl=False
)
example1 = hoplike_long_easy | dict(
#seed='aa a',
seed=' aabb ',
rsteps=20, #50,
abr=False,
ccl=False,
rng=444834040015719226, #8066335492150159463,
llr=2
)
example2 = example1 | dict(
ccl=True,
rsteps=40,
)
rel0 = dict( # reconstructs with abs painters only, because seed is aligned
# right
ltm=['bbbbghij'],
seed='b g ',
ab=[ab1a],
asteps=150,
rsteps=50,
abr=False,
pcl=False,
llr=2
)
rel1 = rel0 | dict( # shows inability of abs painters to adjust to a shift (??)
seed='b gh ',
rsteps=50,
rng=620217766481971979
)
# PROBLEM This actually generates the desired analogy! bbbghijk
# We need an illustration of how absolute painters can't follow a shift.
rel2 = rel1 | dict( # relative painters: a bit nicer
ab=[ab4]
)
rel0a = rel0 | dict(
ltm=['bbbbghib'],
seed='b g '
)
rel1a = rel0a | dict(
seed='b g ',
)
rel2a = rel1a | dict(
ab=[ab4],
rsteps=120,
rng=682026381905476632
)
rel3a = rel2a | dict(
seed='c s '
)
# Can we solve 'ajaqb' without relative indirect painters?
quest1 = dict(
ltm=['ajaqb'],
seed='a ',
asteps=40,
rsteps=40,
ab=[ab1, ab3],
pun=False,
exc=True
)
# No.
# seed='a a ' also fails, because without absolute or relative indirect
# painters, the LTS has no record of the a_a and a_b relationships.
# A relative indirect painter can recreate that relationship wherever it
# sees an 'a' or 'b'.
# Does adding relative indirect solve that problem?
quest2 = quest1 | dict(
ab=[ab1, ab2, ab3]
)
# Does adding relative painters get hoplike_long to regenerate the memories
# reliably?
quest3 = hoplike_long | dict(
ab=[ab1, ab4]
)
# Is clarity needed to settle down on an attractor?
quest4 = hoplike_long | dict(
ccl=False
)
cdecb = dict(
seed=' e ',
ltm=['cdecb'],
ab=[ab1a],
asteps=30,
ccl=False,
pcl=False,
pun=False
)
# hoplike_few
# pcl=False => ignore painter clarity
# ccl=False => ignore cell clarity
# TODO rsteps=None => run until all clarities >= 4
# ann=False => no cell annotations
# TODO Collect together some named experiments that show each point in
# sequence. Start without cell clarity.
ri1 = dict(
seed='m ',
ltm=['ajaqb'],
ab=[ab1a, ab4],
abr=False,
ccl=True,
pcl=False,
pun=False,
)
# NEXT Who solves ajaqb with m____? What is the minimum set of ab initio
# painters?
# Answer: r(ri1, seed='m ', ab=[ab1a, ab3])
# THEN Find the bug that makes the betw painter match a painter in the LTS.
# r(ri1, ltm=['aaajaqb'], seed='mj ', ab=[ab1a, a b3, ab4])
# Could we set up a problem where the presence of absolute painters
# interferes with a good solution?
# Can we see any use for the relative indirect painter at all, which we cab
# illustrate with an example?
# Yes. This fails:
# r(ri1, seed='a ', exc=True, abr=True, ab=[ab1a, ab3])
# This succeeds:
# r(ri1, seed='a ', exc=True, abr=True, ab=[ab1a, ab2, ab3])
# To illustrate Process A and Process B:
# Need abr=True to be necessary to solve problem with some set of
# ab initio painters.
# ((I, I+2, same), ws, (I, I+4, succ)) <-- this would do it, even on m____
src = Painter(I, Plus(I, 2), same)
fun = Painter(I, Plus(I, 4), succ)
p1 = Painter(src, SR.WorkingSoup, fun)
# Fails:
# r(ri1, seed='m m ', exc=True, abr=False, ab=[ab1a, ab3, p1])
# Succeeds:
# r(ri1, seed='m m ', exc=True, abr=True, ab=[ab1a, ab3, p1])
pab1 = ri1 | dict(
seed='m m ',
exc=True,
abr=False,
ab=[ab1a, ab3, p1],
rng=6324482767183163296
)
pab2 = pab1 | dict(
abr=True
)
# Succeeds:
# r(ri1, seed='mmm m ', exc=True, abr=True, ab=[ab1a, ab3, p1])
# Shows changing both position and letter.
# New ab initio painter:
# See two abs painters with one overlapping index, make the above painter.
# DECISION: Omit relative indirect painter, include only this painter above.
if __name__ == '__main__':
#parse_and_run() # Uncomment this to get normal command line
#run_ad(pun=True, llr=5, rsteps=2)
#r(hoplike, seed='a de') # erratic
#r(hoplike_long)
#'ghijk '
#r(hoplike_long_easy)
#r(example1)
#r(rel1)
#r(ri1)
r(pab1)
#r(cdecb, llr=2, rsteps=0, lla=2)
#set_rngseed(1)
#run_ajaqb()
#run_ajaqb('a ', ['wxyaaaa'], 120)
#run('abc ')
#run()
#run_bad()
#run_test()
#run_pons()
#run(ltm=['ajaqb'], asteps=100, lla=6, rsteps=0)
# WANT Run an experiment with same params as example2, but with hundreds of
# different randomly chosen 4-letter seeds (with 4 randomly placed blanks),
# and see if there are only a small number of attractors. Run to completion
# rather than running a set number of timesteps.
| for s in ltm:
m.absorb(s, timesteps=asteps) | conditional_block |
run.py | from typing import Any, Callable, ClassVar, Collection, Dict, FrozenSet, \
Hashable, IO, Iterable, Iterator, List, Literal, NewType, Optional, \
Protocol, Sequence, Sequence, Set, Tuple, Type, TypeGuard, TypeVar, Union, \
runtime_checkable, TYPE_CHECKING, no_type_check
import sys
import argparse
import matplotlib.pyplot as plt
from Model import *
from Log import lo, trace, indent_log, set_log_level
from util import newline, nf, reseed, pts, short
m: Model
last_args: Dict[str, Any] = {}
rngseed: Optional[int] = None
# NEXT At the end of a run, print out all the global parameters in a
# concise form.
def run(
seed: str='a ',
ltm: List[str]=['ajaqb'],
asteps: int=100, #40,
rsteps: int=60, #60,
fresh: bool=True, # Create a new Model in global variable 'm'?
lla: int=0, # logging level during absorption
llr: int=2, # logging level during regeneration
auto_annotate: Iterable[Annotation]=default_auto_annotations,
ab: List[Painter]=default_initial_painters,
abr: bool=True, # allow ab initio painters during regeneration?
rng: Optional[int]=None, # random-number seed, or None to randomize
exc: bool=False, # exclude absolute painters from lts?
ccl: bool=True, # allow source/target cell clarity to affect probability?
pcl: bool=False, # allow painter clarity to affect probability?
pun: bool=False # allow punishing of painters for overwriting given letters?
) -> None:
global m, last_args
last_args = dict(
seed=seed,
asteps=asteps,
rsteps=rsteps,
lla=lla,
llr=llr,
auto_annotate=auto_annotate,
ab=ab,
exc=exc
)
if fresh:
set_rngseed(rng)
set_global_param('allow_ab_initio_painters', True)
set_log_level(lla)
m = Model(
lts=Soup.make_from(ab),
auto_annotate=auto_annotate,
exclude_abs=exc
)
lo(1, 'INITS', '\n' + m.lts.state_str())
if asteps:
for s in ltm:
m.absorb(s, timesteps=asteps)
set_global_param('punishing', pun)
set_global_param('painter_clarity', pcl)
set_global_param('cell_clarity', ccl)
set_log_level(llr)
lo(1, 'LTS\n' + m.lts.state_str())
#lo(1, 'LTS\n' + m.lts.state_str_with_authors())
if rsteps:
set_global_param('allow_ab_initio_painters', abr)
m.regen_from(seed, nsteps=rsteps)
set_global_param('allow_ab_initio_painters', True)
print(m.canvas)
print()
print(m.ws.state_str_with_authors())
def again(**kwargs):
global last_args
run(fresh=False, **(last_args | kwargs))
def run1(**kwargs):
'''1st example in dissertation.'''
set_latex_mode()
run(ab=[ab1, ab3], **kwargs)
def run_bad(**kwargs) -> None:
d = dict(
auto_annotate=[Start, End],
asteps=100,
rsteps=0,
lla=4
)
run(**(d | kwargs))
def run_test(**kwargs) -> None:
d = dict(
auto_annotate=[Start, End],
asteps=100,
rsteps=60,
lla=6,
llr=6,
seed='m '
)
run(**(d | kwargs))
def run_pons(**kwargs) -> None:
'''Runs the pons asinorum.'''
lo(0, "pons asinorum")
d = dict(
ltm=[],
asteps=0,
seed='abcabdijk ',
rsteps=200, #500,
#llr=4,
auto_annotate=[]
)
run(**(d | kwargs))
def h(*ids):
'''Plot history of painters with given ids.'''
global m
for i in ids:
#plt.plot(range(1, m.t + 1), m.history_of(i))
#plt.plot(*zip(m.history_of(i)))
hs = list(zip(*m.history_of(i)))
pts(hs)
print(len(hs))
plt.plot(*hs)
plt.show()
def as_lts(s: str) -> List[str]:
if not s:
return []
elif ',' not in s:
return [s]
else:
return s.split(',')
def parse_and_run() -> None:
global rngseed
parser = argparse.ArgumentParser()
parser.add_argument(
"-r",
"--rngseed",
help="random-number seed",
type=int
)
parser.add_argument("--ltm", help="the long-term soup", default='ajaqb')
parser.add_argument(
"seed",
help="the seed string",
default='a ',
nargs='?'
)
parser.add_argument(
"--asteps",
help="number of timesteps to absorb each LTS string",
type=int,
default=40
)
parser.add_argument(
"--rsteps",
help="number of timesteps to regenerate",
type=int,
default=60
)
parser.add_argument(
"--lla",
help="logging level during absorption",
type=int,
default=0
)
parser.add_argument(
"--llr",
help="logging level during regeneration",
type=int,
default=2
)
parser.add_argument(
"--au",
help="which set of cell attributes to auto-annotate",
type=int,
choices=[0, 1],
default=global_params.auto_annotations
)
args = parser.parse_args()
global_params.auto_annotations = args.au
run(
seed=args.seed,
ltm=as_lts(args.ltm),
asteps=args.asteps,
rsteps=args.rsteps,
lla=args.lla,
llr=args.llr,
rng=args.rngseed
)
lo(0, f'rngseed={rngseed}{newline}')
def set_rngseed(r: Optional[int]=None) -> None:
global rngseed
rngseed = reseed(r)
lo(0, f'rngseed={rngseed}{newline}')
def runabs1():
# abs painters only
run(seed='a ', ltm=['abcde'], ab=[ab1])
def | ():
# abs painters with a different seed
run(seed='m ', ltm=['abcde'], ab=[ab1])
def runabs_ajaqb(**kwargs):
run(seed='a ', ltm=['ajaqb'], ab=[ab1])
# no reln with j or q
def runab13_ajaqb(seed='a '):
run(seed, ltm=['ajaqb'], ab=[ab1, ab3])
def runabs3():
# abs painters with more LTM
run(seed='a ', ltm=['abcde', 'ggggg'], ab=[ab1])
# problem: many sames => many nonadjacent 'same' painters
# therefore 'aaaaa' wins
def runrel():
kw = dict(
seed='a ',
ltm=['abcde'],
ab=[ab1, ab2],
asteps=100,
rsteps=40,
exc=True
)
run(**kw)
def runab123(**kwargs):
kw = dict(ltm=['ajaqb'], ab=[ab1, ab2, ab3], asteps=100, exc=True) | kwargs
run(**kw)
# NEXT Try absolute & digraph painters with a big LTM to see if that creates
# a need for clarity.
def run_ad(**kwargs):
# make relative (digraph) painters: gets 'aqb ' a lot
kw = dict(
#ltm=['ajaqb', 'pqrst', 'abcde', 'aabbc'],
#ltm=['ajaqb', 'abcde', 'aabbc'],
#ltm=['ajaqb', 'abcde'],
ltm=['ajaqb'],
ab=[ab4],
asteps=30,
rsteps=25,
lla=0,
llr=1,
pun=False,
exc=True
) | kwargs
run(**kw)
def run_ad2(**kwargs):
# blends some simple strings
kw = dict(ltm=['ajaqb', 'abcde', 'aabbc']) | kwargs
run(**kw)
def r(d: dict, **kwargs):
'''Convenience function for running experiments in the REPL. Optionally
override a dict of arguments to run().'''
run(**(d | kwargs))
# A run just like the Hopfield net: many strings in memory, absolute painters
# only.
hoplike = dict(
ltm=['ajaqb', 'pqrst', 'abcde', 'aabbc', 'ponop'],
ab=[ab1],
asteps=40,
rsteps=100,
lla=0,
llr=2,
pun=False,
pcl=False,
exc=False
)
hoplike_long = hoplike | dict(
ltm=['abrsecpbqf', 'efghijklmn', 'tfserfdqgc', 'abcdepomnl'],
seed='abr e bqf'
)
hoplike_long_easy = hoplike | dict(
ltm=['aaaaabca', 'gggvvwgg', 'pqrspqrs'],
seed='aaaa a',
ab=[ab1a],
rsteps=30,
ccl=False
)
example1 = hoplike_long_easy | dict(
#seed='aa a',
seed=' aabb ',
rsteps=20, #50,
abr=False,
ccl=False,
rng=444834040015719226, #8066335492150159463,
llr=2
)
example2 = example1 | dict(
ccl=True,
rsteps=40,
)
rel0 = dict( # reconstructs with abs painters only, because seed is aligned
# right
ltm=['bbbbghij'],
seed='b g ',
ab=[ab1a],
asteps=150,
rsteps=50,
abr=False,
pcl=False,
llr=2
)
rel1 = rel0 | dict( # shows inability of abs painters to adjust to a shift (??)
seed='b gh ',
rsteps=50,
rng=620217766481971979
)
# PROBLEM This actually generates the desired analogy! bbbghijk
# We need an illustration of how absolute painters can't follow a shift.
rel2 = rel1 | dict( # relative painters: a bit nicer
ab=[ab4]
)
rel0a = rel0 | dict(
ltm=['bbbbghib'],
seed='b g '
)
rel1a = rel0a | dict(
seed='b g ',
)
rel2a = rel1a | dict(
ab=[ab4],
rsteps=120,
rng=682026381905476632
)
rel3a = rel2a | dict(
seed='c s '
)
# Can we solve 'ajaqb' without relative indirect painters?
quest1 = dict(
ltm=['ajaqb'],
seed='a ',
asteps=40,
rsteps=40,
ab=[ab1, ab3],
pun=False,
exc=True
)
# No.
# seed='a a ' also fails, because without absolute or relative indirect
# painters, the LTS has no record of the a_a and a_b relationships.
# A relative indirect painter can recreate that relationship wherever it
# sees an 'a' or 'b'.
# Does adding relative indirect solve that problem?
quest2 = quest1 | dict(
ab=[ab1, ab2, ab3]
)
# Does adding relative painters get hoplike_long to regenerate the memories
# reliably?
quest3 = hoplike_long | dict(
ab=[ab1, ab4]
)
# Is clarity needed to settle down on an attractor?
quest4 = hoplike_long | dict(
ccl=False
)
cdecb = dict(
seed=' e ',
ltm=['cdecb'],
ab=[ab1a],
asteps=30,
ccl=False,
pcl=False,
pun=False
)
# hoplike_few
# pcl=False => ignore painter clarity
# ccl=False => ignore cell clarity
# TODO rsteps=None => run until all clarities >= 4
# ann=False => no cell annotations
# TODO Collect together some named experiments that show each point in
# sequence. Start without cell clarity.
ri1 = dict(
seed='m ',
ltm=['ajaqb'],
ab=[ab1a, ab4],
abr=False,
ccl=True,
pcl=False,
pun=False,
)
# NEXT Who solves ajaqb with m____? What is the minimum set of ab initio
# painters?
# Answer: r(ri1, seed='m ', ab=[ab1a, ab3])
# THEN Find the bug that makes the betw painter match a painter in the LTS.
# r(ri1, ltm=['aaajaqb'], seed='mj ', ab=[ab1a, a b3, ab4])
# Could we set up a problem where the presence of absolute painters
# interferes with a good solution?
# Can we see any use for the relative indirect painter at all, which we cab
# illustrate with an example?
# Yes. This fails:
# r(ri1, seed='a ', exc=True, abr=True, ab=[ab1a, ab3])
# This succeeds:
# r(ri1, seed='a ', exc=True, abr=True, ab=[ab1a, ab2, ab3])
# To illustrate Process A and Process B:
# Need abr=True to be necessary to solve problem with some set of
# ab initio painters.
# ((I, I+2, same), ws, (I, I+4, succ)) <-- this would do it, even on m____
src = Painter(I, Plus(I, 2), same)
fun = Painter(I, Plus(I, 4), succ)
p1 = Painter(src, SR.WorkingSoup, fun)
# Fails:
# r(ri1, seed='m m ', exc=True, abr=False, ab=[ab1a, ab3, p1])
# Succeeds:
# r(ri1, seed='m m ', exc=True, abr=True, ab=[ab1a, ab3, p1])
pab1 = ri1 | dict(
seed='m m ',
exc=True,
abr=False,
ab=[ab1a, ab3, p1],
rng=6324482767183163296
)
pab2 = pab1 | dict(
abr=True
)
# Succeeds:
# r(ri1, seed='mmm m ', exc=True, abr=True, ab=[ab1a, ab3, p1])
# Shows changing both position and letter.
# New ab initio painter:
# See two abs painters with one overlapping index, make the above painter.
# DECISION: Omit relative indirect painter, include only this painter above.
if __name__ == '__main__':
#parse_and_run() # Uncomment this to get normal command line
#run_ad(pun=True, llr=5, rsteps=2)
#r(hoplike, seed='a de') # erratic
#r(hoplike_long)
#'ghijk '
#r(hoplike_long_easy)
#r(example1)
#r(rel1)
#r(ri1)
r(pab1)
#r(cdecb, llr=2, rsteps=0, lla=2)
#set_rngseed(1)
#run_ajaqb()
#run_ajaqb('a ', ['wxyaaaa'], 120)
#run('abc ')
#run()
#run_bad()
#run_test()
#run_pons()
#run(ltm=['ajaqb'], asteps=100, lla=6, rsteps=0)
# WANT Run an experiment with same params as example2, but with hundreds of
# different randomly chosen 4-letter seeds (with 4 randomly placed blanks),
# and see if there are only a small number of attractors. Run to completion
# rather than running a set number of timesteps.
| runabs2 | identifier_name |
run.py | from typing import Any, Callable, ClassVar, Collection, Dict, FrozenSet, \
Hashable, IO, Iterable, Iterator, List, Literal, NewType, Optional, \
Protocol, Sequence, Sequence, Set, Tuple, Type, TypeGuard, TypeVar, Union, \
runtime_checkable, TYPE_CHECKING, no_type_check
import sys
import argparse
import matplotlib.pyplot as plt
from Model import *
from Log import lo, trace, indent_log, set_log_level
from util import newline, nf, reseed, pts, short
m: Model
last_args: Dict[str, Any] = {}
rngseed: Optional[int] = None
# NEXT At the end of a run, print out all the global parameters in a
# concise form.
def run(
seed: str='a ',
ltm: List[str]=['ajaqb'],
asteps: int=100, #40,
rsteps: int=60, #60,
fresh: bool=True, # Create a new Model in global variable 'm'?
lla: int=0, # logging level during absorption
llr: int=2, # logging level during regeneration
auto_annotate: Iterable[Annotation]=default_auto_annotations,
ab: List[Painter]=default_initial_painters,
abr: bool=True, # allow ab initio painters during regeneration?
rng: Optional[int]=None, # random-number seed, or None to randomize
exc: bool=False, # exclude absolute painters from lts?
ccl: bool=True, # allow source/target cell clarity to affect probability?
pcl: bool=False, # allow painter clarity to affect probability?
pun: bool=False # allow punishing of painters for overwriting given letters?
) -> None:
global m, last_args
last_args = dict(
seed=seed,
asteps=asteps,
rsteps=rsteps,
lla=lla,
llr=llr,
auto_annotate=auto_annotate,
ab=ab,
exc=exc
)
if fresh:
set_rngseed(rng)
set_global_param('allow_ab_initio_painters', True)
set_log_level(lla)
m = Model(
lts=Soup.make_from(ab),
auto_annotate=auto_annotate,
exclude_abs=exc
)
lo(1, 'INITS', '\n' + m.lts.state_str())
if asteps:
for s in ltm:
m.absorb(s, timesteps=asteps)
set_global_param('punishing', pun)
set_global_param('painter_clarity', pcl)
set_global_param('cell_clarity', ccl)
set_log_level(llr)
lo(1, 'LTS\n' + m.lts.state_str())
#lo(1, 'LTS\n' + m.lts.state_str_with_authors())
if rsteps:
set_global_param('allow_ab_initio_painters', abr)
m.regen_from(seed, nsteps=rsteps)
set_global_param('allow_ab_initio_painters', True)
print(m.canvas)
print()
print(m.ws.state_str_with_authors())
def again(**kwargs):
global last_args
run(fresh=False, **(last_args | kwargs))
def run1(**kwargs):
'''1st example in dissertation.'''
set_latex_mode()
run(ab=[ab1, ab3], **kwargs)
def run_bad(**kwargs) -> None:
d = dict(
auto_annotate=[Start, End],
asteps=100,
rsteps=0,
lla=4
)
run(**(d | kwargs))
def run_test(**kwargs) -> None:
d = dict(
auto_annotate=[Start, End],
asteps=100,
rsteps=60,
lla=6,
llr=6,
seed='m '
)
run(**(d | kwargs))
def run_pons(**kwargs) -> None:
'''Runs the pons asinorum.'''
lo(0, "pons asinorum")
d = dict(
ltm=[],
asteps=0,
seed='abcabdijk ',
rsteps=200, #500,
#llr=4,
auto_annotate=[]
)
run(**(d | kwargs))
def h(*ids):
'''Plot history of painters with given ids.'''
global m
for i in ids:
#plt.plot(range(1, m.t + 1), m.history_of(i))
#plt.plot(*zip(m.history_of(i)))
hs = list(zip(*m.history_of(i)))
pts(hs)
print(len(hs))
plt.plot(*hs)
plt.show()
def as_lts(s: str) -> List[str]:
if not s:
return []
elif ',' not in s:
return [s]
else:
return s.split(',')
def parse_and_run() -> None:
global rngseed
parser = argparse.ArgumentParser()
parser.add_argument(
"-r",
"--rngseed",
help="random-number seed",
type=int
)
parser.add_argument("--ltm", help="the long-term soup", default='ajaqb')
parser.add_argument(
"seed",
help="the seed string",
default='a ',
nargs='?'
)
parser.add_argument(
"--asteps",
help="number of timesteps to absorb each LTS string",
type=int,
default=40
)
parser.add_argument(
"--rsteps",
help="number of timesteps to regenerate",
type=int,
default=60
)
parser.add_argument(
"--lla",
help="logging level during absorption",
type=int,
default=0
)
parser.add_argument(
"--llr",
help="logging level during regeneration",
type=int,
default=2
)
parser.add_argument(
"--au",
help="which set of cell attributes to auto-annotate",
type=int,
choices=[0, 1],
default=global_params.auto_annotations
)
args = parser.parse_args()
global_params.auto_annotations = args.au
run(
seed=args.seed,
ltm=as_lts(args.ltm),
asteps=args.asteps,
rsteps=args.rsteps,
lla=args.lla,
llr=args.llr,
rng=args.rngseed
)
lo(0, f'rngseed={rngseed}{newline}')
def set_rngseed(r: Optional[int]=None) -> None:
global rngseed
rngseed = reseed(r)
lo(0, f'rngseed={rngseed}{newline}')
def runabs1():
# abs painters only
run(seed='a ', ltm=['abcde'], ab=[ab1])
def runabs2():
# abs painters with a different seed
run(seed='m ', ltm=['abcde'], ab=[ab1])
def runabs_ajaqb(**kwargs):
run(seed='a ', ltm=['ajaqb'], ab=[ab1])
# no reln with j or q
def runab13_ajaqb(seed='a '):
run(seed, ltm=['ajaqb'], ab=[ab1, ab3])
def runabs3():
# abs painters with more LTM
run(seed='a ', ltm=['abcde', 'ggggg'], ab=[ab1])
# problem: many sames => many nonadjacent 'same' painters
# therefore 'aaaaa' wins
def runrel():
kw = dict(
seed='a ',
ltm=['abcde'],
ab=[ab1, ab2],
asteps=100,
rsteps=40,
exc=True
)
run(**kw)
def runab123(**kwargs):
kw = dict(ltm=['ajaqb'], ab=[ab1, ab2, ab3], asteps=100, exc=True) | kwargs
run(**kw)
# NEXT Try absolute & digraph painters with a big LTM to see if that creates
# a need for clarity.
def run_ad(**kwargs):
# make relative (digraph) painters: gets 'aqb ' a lot
kw = dict(
#ltm=['ajaqb', 'pqrst', 'abcde', 'aabbc'],
#ltm=['ajaqb', 'abcde', 'aabbc'],
#ltm=['ajaqb', 'abcde'],
ltm=['ajaqb'],
ab=[ab4],
asteps=30,
rsteps=25,
lla=0,
llr=1,
pun=False,
exc=True
) | kwargs
run(**kw)
def run_ad2(**kwargs):
# blends some simple strings
kw = dict(ltm=['ajaqb', 'abcde', 'aabbc']) | kwargs
run(**kw)
def r(d: dict, **kwargs):
'''Convenience function for running experiments in the REPL. Optionally
override a dict of arguments to run().'''
run(**(d | kwargs))
# A run just like the Hopfield net: many strings in memory, absolute painters
# only.
hoplike = dict(
ltm=['ajaqb', 'pqrst', 'abcde', 'aabbc', 'ponop'],
ab=[ab1],
asteps=40,
rsteps=100,
lla=0,
llr=2,
pun=False,
pcl=False,
exc=False
)
hoplike_long = hoplike | dict(
ltm=['abrsecpbqf', 'efghijklmn', 'tfserfdqgc', 'abcdepomnl'],
seed='abr e bqf'
)
hoplike_long_easy = hoplike | dict(
ltm=['aaaaabca', 'gggvvwgg', 'pqrspqrs'],
seed='aaaa a',
ab=[ab1a],
rsteps=30,
ccl=False
)
example1 = hoplike_long_easy | dict(
#seed='aa a',
seed=' aabb ',
rsteps=20, #50,
abr=False,
ccl=False,
rng=444834040015719226, #8066335492150159463,
llr=2
)
example2 = example1 | dict(
ccl=True,
rsteps=40,
)
rel0 = dict( # reconstructs with abs painters only, because seed is aligned
# right
ltm=['bbbbghij'],
seed='b g ',
ab=[ab1a],
asteps=150,
rsteps=50,
abr=False,
pcl=False,
llr=2
)
rel1 = rel0 | dict( # shows inability of abs painters to adjust to a shift (??)
seed='b gh ',
rsteps=50,
rng=620217766481971979
)
# PROBLEM This actually generates the desired analogy! bbbghijk
# We need an illustration of how absolute painters can't follow a shift.
rel2 = rel1 | dict( # relative painters: a bit nicer
ab=[ab4]
)
rel0a = rel0 | dict(
ltm=['bbbbghib'],
seed='b g '
)
rel1a = rel0a | dict(
seed='b g ',
)
rel2a = rel1a | dict(
ab=[ab4],
rsteps=120,
rng=682026381905476632
)
rel3a = rel2a | dict(
seed='c s '
)
# Can we solve 'ajaqb' without relative indirect painters?
quest1 = dict(
ltm=['ajaqb'],
seed='a ',
asteps=40,
rsteps=40,
ab=[ab1, ab3],
pun=False,
exc=True
)
# No.
# seed='a a ' also fails, because without absolute or relative indirect
# painters, the LTS has no record of the a_a and a_b relationships.
# A relative indirect painter can recreate that relationship wherever it
# sees an 'a' or 'b'.
# Does adding relative indirect solve that problem?
quest2 = quest1 | dict(
ab=[ab1, ab2, ab3]
)
# Does adding relative painters get hoplike_long to regenerate the memories
# reliably?
quest3 = hoplike_long | dict(
ab=[ab1, ab4]
)
# Is clarity needed to settle down on an attractor?
quest4 = hoplike_long | dict(
ccl=False
)
cdecb = dict(
seed=' e ',
ltm=['cdecb'], | ccl=False,
pcl=False,
pun=False
)
# hoplike_few
# pcl=False => ignore painter clarity
# ccl=False => ignore cell clarity
# TODO rsteps=None => run until all clarities >= 4
# ann=False => no cell annotations
# TODO Collect together some named experiments that show each point in
# sequence. Start without cell clarity.
ri1 = dict(
seed='m ',
ltm=['ajaqb'],
ab=[ab1a, ab4],
abr=False,
ccl=True,
pcl=False,
pun=False,
)
# NEXT Who solves ajaqb with m____? What is the minimum set of ab initio
# painters?
# Answer: r(ri1, seed='m ', ab=[ab1a, ab3])
# THEN Find the bug that makes the betw painter match a painter in the LTS.
# r(ri1, ltm=['aaajaqb'], seed='mj ', ab=[ab1a, a b3, ab4])
# Could we set up a problem where the presence of absolute painters
# interferes with a good solution?
# Can we see any use for the relative indirect painter at all, which we cab
# illustrate with an example?
# Yes. This fails:
# r(ri1, seed='a ', exc=True, abr=True, ab=[ab1a, ab3])
# This succeeds:
# r(ri1, seed='a ', exc=True, abr=True, ab=[ab1a, ab2, ab3])
# To illustrate Process A and Process B:
# Need abr=True to be necessary to solve problem with some set of
# ab initio painters.
# ((I, I+2, same), ws, (I, I+4, succ)) <-- this would do it, even on m____
src = Painter(I, Plus(I, 2), same)
fun = Painter(I, Plus(I, 4), succ)
p1 = Painter(src, SR.WorkingSoup, fun)
# Fails:
# r(ri1, seed='m m ', exc=True, abr=False, ab=[ab1a, ab3, p1])
# Succeeds:
# r(ri1, seed='m m ', exc=True, abr=True, ab=[ab1a, ab3, p1])
pab1 = ri1 | dict(
seed='m m ',
exc=True,
abr=False,
ab=[ab1a, ab3, p1],
rng=6324482767183163296
)
pab2 = pab1 | dict(
abr=True
)
# Succeeds:
# r(ri1, seed='mmm m ', exc=True, abr=True, ab=[ab1a, ab3, p1])
# Shows changing both position and letter.
# New ab initio painter:
# See two abs painters with one overlapping index, make the above painter.
# DECISION: Omit relative indirect painter, include only this painter above.
if __name__ == '__main__':
#parse_and_run() # Uncomment this to get normal command line
#run_ad(pun=True, llr=5, rsteps=2)
#r(hoplike, seed='a de') # erratic
#r(hoplike_long)
#'ghijk '
#r(hoplike_long_easy)
#r(example1)
#r(rel1)
#r(ri1)
r(pab1)
#r(cdecb, llr=2, rsteps=0, lla=2)
#set_rngseed(1)
#run_ajaqb()
#run_ajaqb('a ', ['wxyaaaa'], 120)
#run('abc ')
#run()
#run_bad()
#run_test()
#run_pons()
#run(ltm=['ajaqb'], asteps=100, lla=6, rsteps=0)
# WANT Run an experiment with same params as example2, but with hundreds of
# different randomly chosen 4-letter seeds (with 4 randomly placed blanks),
# and see if there are only a small number of attractors. Run to completion
# rather than running a set number of timesteps. | ab=[ab1a],
asteps=30, | random_line_split |
rfc7539_test.go | // test vectors from https://tools.ietf.org/html/rfc7539
package rfc7539_test
import (
"bytes"
"github.com/ascottqqq/rfc7539"
"testing"
)
func TestChaCha20BlockFunction(t *testing.T) {
type testCase struct {
key [32]uint8
counter uint32
nonce [12]uint8
ciphertext []byte
}
plaintext := make([]uint8, 64)
testCases := [...]testCase{{[32]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00}, uint32(0), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, []byte{0x76, 0xb8, 0xe0, 0xad,
0xa0, 0xf1, 0x3d, 0x90, 0x40, 0x5d, 0x6a, 0xe5, 0x53, 0x86, 0xbd, 0x28,
0xbd, 0xd2, 0x19, 0xb8, 0xa0, 0x8d, 0xed, 0x1a, 0xa8, 0x36, 0xef, 0xcc,
0x8b, 0x77, 0x0d, 0xc7, 0xda, 0x41, 0x59, 0x7c, 0x51, 0x57, 0x48, 0x8d,
0x77, 0x24, 0xe0, 0x3f, 0xb8, 0xd8, 0x4a, 0x37, 0x6a, 0x43, 0xb8, 0xf4,
0x15, 0x18, 0xa1, 0x1c, 0xc3, 0x87, 0xb6, 0x69, 0xb2, 0xee, 0x65, 0x86}},
{[32]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00}, uint32(1), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, []byte{0x9f, 0x07, 0xe7, 0xbe,
0x55, 0x51, 0x38, 0x7a, 0x98, 0xba, 0x97, 0x7c, 0x73, 0x2d, 0x08, 0x0d,
0xcb, 0x0f, 0x29, 0xa0, 0x48, 0xe3, 0x65, 0x69, 0x12, 0xc6, 0x53, 0x3e,
0x32, 0xee, 0x7a, 0xed, 0x29, 0xb7, 0x21, 0x76, 0x9c, 0xe6, 0x4e, 0x43,
0xd5, 0x71, 0x33, 0xb0, 0x74, 0xd8, 0x39, 0xd5, 0x31, 0xed, 0x1f, 0x28,
0x51, 0x0a, 0xfb, 0x45, 0xac, 0xe1, 0x0a, 0x1f, 0x4b, 0x79, 0x4d, 0x6f}},
{[32]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x01}, uint32(1), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, []byte{0x3a, 0xeb, 0x52, 0x24, 0xec,
0xf8, 0x49, 0x92, 0x9b, 0x9d, 0x82, 0x8d, 0xb1, 0xce, 0xd4, 0xdd, 0x83,
0x20, 0x25, 0xe8, 0x01, 0x8b, 0x81, 0x60, 0xb8, 0x22, 0x84, 0xf3, 0xc9,
0x49, 0xaa, 0x5a, 0x8e, 0xca, 0x00, 0xbb, 0xb4, 0xa7, 0x3b, 0xda, 0xd1,
0x92, 0xb5, 0xc4, 0x2f, 0x73, 0xf2, 0xfd, 0x4e, 0x27, 0x36, 0x44, 0xc8,
0xb3, 0x61, 0x25, 0xa6, 0x4a, 0xdd, 0xeb, 0x00, 0x6c, 0x13, 0xa0}},
{[32]uint8{0x00, 0xff, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00}, uint32(2), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, []byte{0x72, 0xd5, 0x4d, 0xfb,
0xf1, 0x2e, 0xc4, 0x4b, 0x36, 0x26, 0x92, 0xdf, 0x94, 0x13, 0x7f, 0x32,
0x8f, 0xea, 0x8d, 0xa7, 0x39, 0x90, 0x26, 0x5e, 0xc1, 0xbb, 0xbe, 0xa1,
0xae, 0x9a, 0xf0, 0xca, 0x13, 0xb2, 0x5a, 0xa2, 0x6c, 0xb4, 0xa6, 0x48,
0xcb, 0x9b, 0x9d, 0x1b, 0xe6, 0x5b, 0x2c, 0x09, 0x24, 0xa6, 0x6c, 0x54,
0xd5, 0x45, 0xec, 0x1b, 0x73, 0x74, 0xf4, 0x87, 0x2e, 0x99, 0xf0, 0x96}},
{[32]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00}, uint32(0), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x02}, []byte{0xc2, 0xc6, 0x4d, 0x37,
0x8c, 0xd5, 0x36, 0x37, 0x4a, 0xe2, 0x04, 0xb9, 0xef, 0x93, 0x3f, 0xcd,
0x1a, 0x8b, 0x22, 0x88, 0xb3, 0xdf, 0xa4, 0x96, 0x72, 0xab, 0x76, 0x5b,
0x54, 0xee, 0x27, 0xc7, 0x8a, 0x97, 0x0e, 0x0e, 0x95, 0x5c, 0x14, 0xf3,
0xa8, 0x8e, 0x74, 0x1b, 0x97, 0xc2, 0x86, 0xf7, 0x5f, 0x8f, 0xc2, 0x99,
0xe8, 0x14, 0x83, 0x62, 0xfa, 0x19, 0x8a, 0x39, 0x53, 0x1b, 0xed, 0x6d}}}
for _, test := range testCases {
cipher := rfc7539.ChaCha20{test.key, test.counter, test.nonce, plaintext}
encrypt := rfc7539.Encrypt(&cipher)
if !bytes.Equal(encrypt, test.ciphertext) {
t.Errorf("key: % x\n nonce: % x\n block counter: %d\n "+
"keystream: % x\nexpected: % x", test.key, test.nonce, test.counter,
encrypt, test.ciphertext)
}
}
}
func TestChaCha20Encryption(t *testing.T) {
type testCase struct {
plaintext string
key [32]uint8
counter uint32
nonce [12]uint8
ciphertext []byte
}
testCases := [...]testCase{{"Ladies and Gentlemen of the class of '99: " +
"If I could offer you only one tip for the future, sunscreen would be it.",
[32]uint8{0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11,
0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d,
0x1e, 0x1f}, uint32(1), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x4a, 0x00, 0x00, 0x00, 0x00}, []byte{0x6e, 0x2e, 0x35, 0x9a, 0x25,
0x68, 0xf9, 0x80, 0x41, 0xba, 0x07, 0x28, 0xdd, 0x0d, 0x69, 0x81, 0xe9,
0x7e, 0x7a, 0xec, 0x1d, 0x43, 0x60, 0xc2, 0x0a, 0x27, 0xaf, 0xcc, 0xfd,
0x9f, 0xae, 0x0b, 0xf9, 0x1b, 0x65, 0xc5, 0x52, 0x47, 0x33, 0xab, 0x8f,
0x59, 0x3d, 0xab, 0xcd, 0x62, 0xb3, 0x57, 0x16, 0x39, 0xd6, 0x24, 0xe6,
0x51, 0x52, 0xab, 0x8f, 0x53, 0x0c, 0x35, 0x9f, 0x08, 0x61, 0xd8, 0x07,
0xca, 0x0d, 0xbf, 0x50, 0x0d, 0x6a, 0x61, 0x56, 0xa3, 0x8e, 0x08, 0x8a,
0x22, 0xb6, 0x5e, 0x52, 0xbc, 0x51, 0x4d, 0x16, 0xcc, 0xf8, 0x06, 0x81,
0x8c, 0xe9, 0x1a, 0xb7, 0x79, 0x37, 0x36, 0x5a, 0xf9, 0x0b, 0xbf, 0x74,
0xa3, 0x5b, 0xe6, 0xb4, 0x0b, 0x8e, 0xed, 0xf2, 0x78, 0x5e, 0x42, 0x87,
0x4d}},
{"Any submission to the IETF intended by the Contributor for " +
"publication as all or part of an IETF Internet-Draft or RFC and any " +
"statement made within the context of an IETF activity is considered " +
"an \"IETF Contribution\". Such statements include oral statements " +
"in IETF sessions, as well as written and electronic communications " +
"made at any time or place, which are addressed to",
[32]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, uint32(1),
[12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x02},
[]byte{0xa3, 0xfb, 0xf0, 0x7d,
0xf3, 0xfa, 0x2f, 0xde, 0x4f, 0x37, 0x6c, 0xa2, 0x3e, 0x82, 0x73, 0x70,
0x41, 0x60, 0x5d, 0x9f, 0x4f, 0x4f, 0x57, 0xbd, 0x8c, 0xff, 0x2c, 0x1d,
0x4b, 0x79, 0x55, 0xec, 0x2a, 0x97, 0x94, 0x8b, 0xd3, 0x72, 0x29, 0x15,
0xc8, 0xf3, 0xd3, 0x37, 0xf7, 0xd3, 0x70, 0x05, 0x0e, 0x9e, 0x96, 0xd6,
0x47, 0xb7, 0xc3, 0x9f, 0x56, 0xe0, 0x31, 0xca, 0x5e, 0xb6, 0x25, 0x0d,
0x40, 0x42, 0xe0, 0x27, 0x85, 0xec, 0xec, 0xfa, 0x4b, 0x4b, 0xb5, 0xe8,
0xea, 0xd0, 0x44, 0x0e, 0x20, 0xb6, 0xe8, 0xdb, 0x09, 0xd8, 0x81, 0xa7,
0xc6, 0x13, 0x2f, 0x42, 0x0e, 0x52, 0x79, 0x50, 0x42, 0xbd, 0xfa, 0x77,
0x73, 0xd8, 0xa9, 0x05, 0x14, 0x47, 0xb3, 0x29, 0x1c, 0xe1, 0x41, 0x1c,
0x68, 0x04, 0x65, 0x55, 0x2a, 0xa6, 0xc4, 0x05, 0xb7, 0x76, 0x4d, 0x5e,
0x87, 0xbe, 0xa8, 0x5a, 0xd0, 0x0f, 0x84, 0x49, 0xed, 0x8f, 0x72, 0xd0,
0xd6, 0x62, 0xab, 0x05, 0x26, 0x91, 0xca, 0x66, 0x42, 0x4b, 0xc8, 0x6d,
0x2d, 0xf8, 0x0e, 0xa4, 0x1f, 0x43, 0xab, 0xf9, 0x37, 0xd3, 0x25, 0x9d,
0xc4, 0xb2, 0xd0, 0xdf, 0xb4, 0x8a, 0x6c, 0x91, 0x39, 0xdd, 0xd7, 0xf7,
0x69, 0x66, 0xe9, 0x28, 0xe6, 0x35, 0x55, 0x3b, 0xa7, 0x6c, 0x5c, 0x87,
0x9d, 0x7b, 0x35, 0xd4, 0x9e, 0xb2, 0xe6, 0x2b, 0x08, 0x71, 0xcd, 0xac,
0x63, 0x89, 0x39, 0xe2, 0x5e, 0x8a, 0x1e, 0x0e, 0xf9, 0xd5, 0x28, 0x0f,
0xa8, 0xca, 0x32, 0x8b, 0x35, 0x1c, 0x3c, 0x76, 0x59, 0x89, 0xcb, 0xcf,
0x3d, 0xaa, 0x8b, 0x6c, 0xcc, 0x3a, 0xaf, 0x9f, 0x39, 0x79, 0xc9, 0x2b,
0x37, 0x20, 0xfc, 0x88, 0xdc, 0x95, 0xed, 0x84, 0xa1, 0xbe, 0x05, 0x9c,
0x64, 0x99, 0xb9, 0xfd, 0xa2, 0x36, 0xe7, 0xe8, 0x18, 0xb0, 0x4b, 0x0b,
0xc3, 0x9c, 0x1e, 0x87, 0x6b, 0x19, 0x3b, 0xfe, 0x55, 0x69, 0x75, 0x3f,
0x88, 0x12, 0x8c, 0xc0, 0x8a, 0xaa, 0x9b, 0x63, 0xd1, 0xa1, 0x6f, 0x80,
0xef, 0x25, 0x54, 0xd7, 0x18, 0x9c, 0x41, 0x1f, 0x58, 0x69, 0xca, 0x52,
0xc5, 0xb8, 0x3f, 0xa3, 0x6f, 0xf2, 0x16, 0xb9, 0xc1, 0xd3, 0x00, 0x62,
0xbe, 0xbc, 0xfd, 0x2d, 0xc5, 0xbc, 0xe0, 0x91, 0x19, 0x34, 0xfd, 0xa7,
0x9a, 0x86, 0xf6, 0xe6, 0x98, 0xce, 0xd7, 0x59, 0xc3, 0xff, 0x9b, 0x64,
0x77, 0x33, 0x8f, 0x3d, 0xa4, 0xf9, 0xcd, 0x85, 0x14, 0xea, 0x99, 0x82,
0xcc, 0xaf, 0xb3, 0x41, 0xb2, 0x38, 0x4d, 0xd9, 0x02, 0xf3, 0xd1, 0xab,
0x7a, 0xc6, 0x1d, 0xd2, 0x9c, 0x6f, 0x21, 0xba, 0x5b, 0x86, 0x2f, 0x37,
0x30, 0xe3, 0x7c, 0xfd, 0xc4, 0xfd, 0x80, 0x6c, 0x22, 0xf2, 0x21}},
{"'Twas brillig, and the slithy toves\nDid gyre and gimble in the " + | uint32(42),
[12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x02},
[]byte{0x62, 0xe6, 0x34, 0x7f, 0x95, 0xed, 0x87, 0xa4, 0x5f, 0xfa,
0xe7, 0x42, 0x6f, 0x27, 0xa1, 0xdf, 0x5f, 0xb6, 0x91, 0x10, 0x04,
0x4c, 0x0d, 0x73, 0x11, 0x8e, 0xff, 0xa9, 0x5b, 0x01, 0xe5, 0xcf,
0x16, 0x6d, 0x3d, 0xf2, 0xd7, 0x21, 0xca, 0xf9, 0xb2, 0x1e, 0x5f,
0xb1, 0x4c, 0x61, 0x68, 0x71, 0xfd, 0x84, 0xc5, 0x4f, 0x9d, 0x65,
0xb2, 0x83, 0x19, 0x6c, 0x7f, 0xe4, 0xf6, 0x05, 0x53, 0xeb, 0xf3,
0x9c, 0x64, 0x02, 0xc4, 0x22, 0x34, 0xe3, 0x2a, 0x35, 0x6b, 0x3e,
0x76, 0x43, 0x12, 0xa6, 0x1a, 0x55, 0x32, 0x05, 0x57, 0x16, 0xea,
0xd6, 0x96, 0x25, 0x68, 0xf8, 0x7d, 0x3f, 0x3f, 0x77, 0x04, 0xc6,
0xa8, 0xd1, 0xbc, 0xd1, 0xbf, 0x4d, 0x50, 0xd6, 0x15, 0x4b, 0x6d,
0xa7, 0x31, 0xb1, 0x87, 0xb5, 0x8d, 0xfd, 0x72, 0x8a, 0xfa, 0x36,
0x75, 0x7a, 0x79, 0x7a, 0xc1, 0x88, 0xd1}}}
for _, test := range testCases {
byteArray := []byte(test.plaintext)
cipher := rfc7539.ChaCha20{test.key, test.counter, test.nonce, byteArray}
encrypt := rfc7539.Encrypt(&cipher)
if !bytes.Equal(encrypt, test.ciphertext) {
t.Errorf("%s\nencrypted to: % x\nexpected: % x", test.plaintext,
encrypt, test.ciphertext)
}
}
} | "wabe:\nAll mimsy were the borogoves,\nAnd the mome raths outgrabe.",
[32]uint8{0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a, 0xf3, 0x33,
0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0, 0x47, 0x39, 0x17, 0xc1, 0x40,
0x2b, 0x80, 0x09, 0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0}, | random_line_split |
rfc7539_test.go | // test vectors from https://tools.ietf.org/html/rfc7539
package rfc7539_test
import (
"bytes"
"github.com/ascottqqq/rfc7539"
"testing"
)
func TestChaCha20BlockFunction(t *testing.T) {
type testCase struct {
key [32]uint8
counter uint32
nonce [12]uint8
ciphertext []byte
}
plaintext := make([]uint8, 64)
testCases := [...]testCase{{[32]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00}, uint32(0), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, []byte{0x76, 0xb8, 0xe0, 0xad,
0xa0, 0xf1, 0x3d, 0x90, 0x40, 0x5d, 0x6a, 0xe5, 0x53, 0x86, 0xbd, 0x28,
0xbd, 0xd2, 0x19, 0xb8, 0xa0, 0x8d, 0xed, 0x1a, 0xa8, 0x36, 0xef, 0xcc,
0x8b, 0x77, 0x0d, 0xc7, 0xda, 0x41, 0x59, 0x7c, 0x51, 0x57, 0x48, 0x8d,
0x77, 0x24, 0xe0, 0x3f, 0xb8, 0xd8, 0x4a, 0x37, 0x6a, 0x43, 0xb8, 0xf4,
0x15, 0x18, 0xa1, 0x1c, 0xc3, 0x87, 0xb6, 0x69, 0xb2, 0xee, 0x65, 0x86}},
{[32]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00}, uint32(1), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, []byte{0x9f, 0x07, 0xe7, 0xbe,
0x55, 0x51, 0x38, 0x7a, 0x98, 0xba, 0x97, 0x7c, 0x73, 0x2d, 0x08, 0x0d,
0xcb, 0x0f, 0x29, 0xa0, 0x48, 0xe3, 0x65, 0x69, 0x12, 0xc6, 0x53, 0x3e,
0x32, 0xee, 0x7a, 0xed, 0x29, 0xb7, 0x21, 0x76, 0x9c, 0xe6, 0x4e, 0x43,
0xd5, 0x71, 0x33, 0xb0, 0x74, 0xd8, 0x39, 0xd5, 0x31, 0xed, 0x1f, 0x28,
0x51, 0x0a, 0xfb, 0x45, 0xac, 0xe1, 0x0a, 0x1f, 0x4b, 0x79, 0x4d, 0x6f}},
{[32]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x01}, uint32(1), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, []byte{0x3a, 0xeb, 0x52, 0x24, 0xec,
0xf8, 0x49, 0x92, 0x9b, 0x9d, 0x82, 0x8d, 0xb1, 0xce, 0xd4, 0xdd, 0x83,
0x20, 0x25, 0xe8, 0x01, 0x8b, 0x81, 0x60, 0xb8, 0x22, 0x84, 0xf3, 0xc9,
0x49, 0xaa, 0x5a, 0x8e, 0xca, 0x00, 0xbb, 0xb4, 0xa7, 0x3b, 0xda, 0xd1,
0x92, 0xb5, 0xc4, 0x2f, 0x73, 0xf2, 0xfd, 0x4e, 0x27, 0x36, 0x44, 0xc8,
0xb3, 0x61, 0x25, 0xa6, 0x4a, 0xdd, 0xeb, 0x00, 0x6c, 0x13, 0xa0}},
{[32]uint8{0x00, 0xff, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00}, uint32(2), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, []byte{0x72, 0xd5, 0x4d, 0xfb,
0xf1, 0x2e, 0xc4, 0x4b, 0x36, 0x26, 0x92, 0xdf, 0x94, 0x13, 0x7f, 0x32,
0x8f, 0xea, 0x8d, 0xa7, 0x39, 0x90, 0x26, 0x5e, 0xc1, 0xbb, 0xbe, 0xa1,
0xae, 0x9a, 0xf0, 0xca, 0x13, 0xb2, 0x5a, 0xa2, 0x6c, 0xb4, 0xa6, 0x48,
0xcb, 0x9b, 0x9d, 0x1b, 0xe6, 0x5b, 0x2c, 0x09, 0x24, 0xa6, 0x6c, 0x54,
0xd5, 0x45, 0xec, 0x1b, 0x73, 0x74, 0xf4, 0x87, 0x2e, 0x99, 0xf0, 0x96}},
{[32]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00}, uint32(0), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x02}, []byte{0xc2, 0xc6, 0x4d, 0x37,
0x8c, 0xd5, 0x36, 0x37, 0x4a, 0xe2, 0x04, 0xb9, 0xef, 0x93, 0x3f, 0xcd,
0x1a, 0x8b, 0x22, 0x88, 0xb3, 0xdf, 0xa4, 0x96, 0x72, 0xab, 0x76, 0x5b,
0x54, 0xee, 0x27, 0xc7, 0x8a, 0x97, 0x0e, 0x0e, 0x95, 0x5c, 0x14, 0xf3,
0xa8, 0x8e, 0x74, 0x1b, 0x97, 0xc2, 0x86, 0xf7, 0x5f, 0x8f, 0xc2, 0x99,
0xe8, 0x14, 0x83, 0x62, 0xfa, 0x19, 0x8a, 0x39, 0x53, 0x1b, 0xed, 0x6d}}}
for _, test := range testCases {
cipher := rfc7539.ChaCha20{test.key, test.counter, test.nonce, plaintext}
encrypt := rfc7539.Encrypt(&cipher)
if !bytes.Equal(encrypt, test.ciphertext) {
t.Errorf("key: % x\n nonce: % x\n block counter: %d\n "+
"keystream: % x\nexpected: % x", test.key, test.nonce, test.counter,
encrypt, test.ciphertext)
}
}
}
func TestChaCha20Encryption(t *testing.T) | {
type testCase struct {
plaintext string
key [32]uint8
counter uint32
nonce [12]uint8
ciphertext []byte
}
testCases := [...]testCase{{"Ladies and Gentlemen of the class of '99: " +
"If I could offer you only one tip for the future, sunscreen would be it.",
[32]uint8{0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11,
0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d,
0x1e, 0x1f}, uint32(1), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x4a, 0x00, 0x00, 0x00, 0x00}, []byte{0x6e, 0x2e, 0x35, 0x9a, 0x25,
0x68, 0xf9, 0x80, 0x41, 0xba, 0x07, 0x28, 0xdd, 0x0d, 0x69, 0x81, 0xe9,
0x7e, 0x7a, 0xec, 0x1d, 0x43, 0x60, 0xc2, 0x0a, 0x27, 0xaf, 0xcc, 0xfd,
0x9f, 0xae, 0x0b, 0xf9, 0x1b, 0x65, 0xc5, 0x52, 0x47, 0x33, 0xab, 0x8f,
0x59, 0x3d, 0xab, 0xcd, 0x62, 0xb3, 0x57, 0x16, 0x39, 0xd6, 0x24, 0xe6,
0x51, 0x52, 0xab, 0x8f, 0x53, 0x0c, 0x35, 0x9f, 0x08, 0x61, 0xd8, 0x07,
0xca, 0x0d, 0xbf, 0x50, 0x0d, 0x6a, 0x61, 0x56, 0xa3, 0x8e, 0x08, 0x8a,
0x22, 0xb6, 0x5e, 0x52, 0xbc, 0x51, 0x4d, 0x16, 0xcc, 0xf8, 0x06, 0x81,
0x8c, 0xe9, 0x1a, 0xb7, 0x79, 0x37, 0x36, 0x5a, 0xf9, 0x0b, 0xbf, 0x74,
0xa3, 0x5b, 0xe6, 0xb4, 0x0b, 0x8e, 0xed, 0xf2, 0x78, 0x5e, 0x42, 0x87,
0x4d}},
{"Any submission to the IETF intended by the Contributor for " +
"publication as all or part of an IETF Internet-Draft or RFC and any " +
"statement made within the context of an IETF activity is considered " +
"an \"IETF Contribution\". Such statements include oral statements " +
"in IETF sessions, as well as written and electronic communications " +
"made at any time or place, which are addressed to",
[32]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, uint32(1),
[12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x02},
[]byte{0xa3, 0xfb, 0xf0, 0x7d,
0xf3, 0xfa, 0x2f, 0xde, 0x4f, 0x37, 0x6c, 0xa2, 0x3e, 0x82, 0x73, 0x70,
0x41, 0x60, 0x5d, 0x9f, 0x4f, 0x4f, 0x57, 0xbd, 0x8c, 0xff, 0x2c, 0x1d,
0x4b, 0x79, 0x55, 0xec, 0x2a, 0x97, 0x94, 0x8b, 0xd3, 0x72, 0x29, 0x15,
0xc8, 0xf3, 0xd3, 0x37, 0xf7, 0xd3, 0x70, 0x05, 0x0e, 0x9e, 0x96, 0xd6,
0x47, 0xb7, 0xc3, 0x9f, 0x56, 0xe0, 0x31, 0xca, 0x5e, 0xb6, 0x25, 0x0d,
0x40, 0x42, 0xe0, 0x27, 0x85, 0xec, 0xec, 0xfa, 0x4b, 0x4b, 0xb5, 0xe8,
0xea, 0xd0, 0x44, 0x0e, 0x20, 0xb6, 0xe8, 0xdb, 0x09, 0xd8, 0x81, 0xa7,
0xc6, 0x13, 0x2f, 0x42, 0x0e, 0x52, 0x79, 0x50, 0x42, 0xbd, 0xfa, 0x77,
0x73, 0xd8, 0xa9, 0x05, 0x14, 0x47, 0xb3, 0x29, 0x1c, 0xe1, 0x41, 0x1c,
0x68, 0x04, 0x65, 0x55, 0x2a, 0xa6, 0xc4, 0x05, 0xb7, 0x76, 0x4d, 0x5e,
0x87, 0xbe, 0xa8, 0x5a, 0xd0, 0x0f, 0x84, 0x49, 0xed, 0x8f, 0x72, 0xd0,
0xd6, 0x62, 0xab, 0x05, 0x26, 0x91, 0xca, 0x66, 0x42, 0x4b, 0xc8, 0x6d,
0x2d, 0xf8, 0x0e, 0xa4, 0x1f, 0x43, 0xab, 0xf9, 0x37, 0xd3, 0x25, 0x9d,
0xc4, 0xb2, 0xd0, 0xdf, 0xb4, 0x8a, 0x6c, 0x91, 0x39, 0xdd, 0xd7, 0xf7,
0x69, 0x66, 0xe9, 0x28, 0xe6, 0x35, 0x55, 0x3b, 0xa7, 0x6c, 0x5c, 0x87,
0x9d, 0x7b, 0x35, 0xd4, 0x9e, 0xb2, 0xe6, 0x2b, 0x08, 0x71, 0xcd, 0xac,
0x63, 0x89, 0x39, 0xe2, 0x5e, 0x8a, 0x1e, 0x0e, 0xf9, 0xd5, 0x28, 0x0f,
0xa8, 0xca, 0x32, 0x8b, 0x35, 0x1c, 0x3c, 0x76, 0x59, 0x89, 0xcb, 0xcf,
0x3d, 0xaa, 0x8b, 0x6c, 0xcc, 0x3a, 0xaf, 0x9f, 0x39, 0x79, 0xc9, 0x2b,
0x37, 0x20, 0xfc, 0x88, 0xdc, 0x95, 0xed, 0x84, 0xa1, 0xbe, 0x05, 0x9c,
0x64, 0x99, 0xb9, 0xfd, 0xa2, 0x36, 0xe7, 0xe8, 0x18, 0xb0, 0x4b, 0x0b,
0xc3, 0x9c, 0x1e, 0x87, 0x6b, 0x19, 0x3b, 0xfe, 0x55, 0x69, 0x75, 0x3f,
0x88, 0x12, 0x8c, 0xc0, 0x8a, 0xaa, 0x9b, 0x63, 0xd1, 0xa1, 0x6f, 0x80,
0xef, 0x25, 0x54, 0xd7, 0x18, 0x9c, 0x41, 0x1f, 0x58, 0x69, 0xca, 0x52,
0xc5, 0xb8, 0x3f, 0xa3, 0x6f, 0xf2, 0x16, 0xb9, 0xc1, 0xd3, 0x00, 0x62,
0xbe, 0xbc, 0xfd, 0x2d, 0xc5, 0xbc, 0xe0, 0x91, 0x19, 0x34, 0xfd, 0xa7,
0x9a, 0x86, 0xf6, 0xe6, 0x98, 0xce, 0xd7, 0x59, 0xc3, 0xff, 0x9b, 0x64,
0x77, 0x33, 0x8f, 0x3d, 0xa4, 0xf9, 0xcd, 0x85, 0x14, 0xea, 0x99, 0x82,
0xcc, 0xaf, 0xb3, 0x41, 0xb2, 0x38, 0x4d, 0xd9, 0x02, 0xf3, 0xd1, 0xab,
0x7a, 0xc6, 0x1d, 0xd2, 0x9c, 0x6f, 0x21, 0xba, 0x5b, 0x86, 0x2f, 0x37,
0x30, 0xe3, 0x7c, 0xfd, 0xc4, 0xfd, 0x80, 0x6c, 0x22, 0xf2, 0x21}},
{"'Twas brillig, and the slithy toves\nDid gyre and gimble in the " +
"wabe:\nAll mimsy were the borogoves,\nAnd the mome raths outgrabe.",
[32]uint8{0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a, 0xf3, 0x33,
0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0, 0x47, 0x39, 0x17, 0xc1, 0x40,
0x2b, 0x80, 0x09, 0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0},
uint32(42),
[12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x02},
[]byte{0x62, 0xe6, 0x34, 0x7f, 0x95, 0xed, 0x87, 0xa4, 0x5f, 0xfa,
0xe7, 0x42, 0x6f, 0x27, 0xa1, 0xdf, 0x5f, 0xb6, 0x91, 0x10, 0x04,
0x4c, 0x0d, 0x73, 0x11, 0x8e, 0xff, 0xa9, 0x5b, 0x01, 0xe5, 0xcf,
0x16, 0x6d, 0x3d, 0xf2, 0xd7, 0x21, 0xca, 0xf9, 0xb2, 0x1e, 0x5f,
0xb1, 0x4c, 0x61, 0x68, 0x71, 0xfd, 0x84, 0xc5, 0x4f, 0x9d, 0x65,
0xb2, 0x83, 0x19, 0x6c, 0x7f, 0xe4, 0xf6, 0x05, 0x53, 0xeb, 0xf3,
0x9c, 0x64, 0x02, 0xc4, 0x22, 0x34, 0xe3, 0x2a, 0x35, 0x6b, 0x3e,
0x76, 0x43, 0x12, 0xa6, 0x1a, 0x55, 0x32, 0x05, 0x57, 0x16, 0xea,
0xd6, 0x96, 0x25, 0x68, 0xf8, 0x7d, 0x3f, 0x3f, 0x77, 0x04, 0xc6,
0xa8, 0xd1, 0xbc, 0xd1, 0xbf, 0x4d, 0x50, 0xd6, 0x15, 0x4b, 0x6d,
0xa7, 0x31, 0xb1, 0x87, 0xb5, 0x8d, 0xfd, 0x72, 0x8a, 0xfa, 0x36,
0x75, 0x7a, 0x79, 0x7a, 0xc1, 0x88, 0xd1}}}
for _, test := range testCases {
byteArray := []byte(test.plaintext)
cipher := rfc7539.ChaCha20{test.key, test.counter, test.nonce, byteArray}
encrypt := rfc7539.Encrypt(&cipher)
if !bytes.Equal(encrypt, test.ciphertext) {
t.Errorf("%s\nencrypted to: % x\nexpected: % x", test.plaintext,
encrypt, test.ciphertext)
}
}
} | identifier_body |
|
rfc7539_test.go | // test vectors from https://tools.ietf.org/html/rfc7539
package rfc7539_test
import (
"bytes"
"github.com/ascottqqq/rfc7539"
"testing"
)
func TestChaCha20BlockFunction(t *testing.T) {
type testCase struct {
key [32]uint8
counter uint32
nonce [12]uint8
ciphertext []byte
}
plaintext := make([]uint8, 64)
testCases := [...]testCase{{[32]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00}, uint32(0), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, []byte{0x76, 0xb8, 0xe0, 0xad,
0xa0, 0xf1, 0x3d, 0x90, 0x40, 0x5d, 0x6a, 0xe5, 0x53, 0x86, 0xbd, 0x28,
0xbd, 0xd2, 0x19, 0xb8, 0xa0, 0x8d, 0xed, 0x1a, 0xa8, 0x36, 0xef, 0xcc,
0x8b, 0x77, 0x0d, 0xc7, 0xda, 0x41, 0x59, 0x7c, 0x51, 0x57, 0x48, 0x8d,
0x77, 0x24, 0xe0, 0x3f, 0xb8, 0xd8, 0x4a, 0x37, 0x6a, 0x43, 0xb8, 0xf4,
0x15, 0x18, 0xa1, 0x1c, 0xc3, 0x87, 0xb6, 0x69, 0xb2, 0xee, 0x65, 0x86}},
{[32]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00}, uint32(1), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, []byte{0x9f, 0x07, 0xe7, 0xbe,
0x55, 0x51, 0x38, 0x7a, 0x98, 0xba, 0x97, 0x7c, 0x73, 0x2d, 0x08, 0x0d,
0xcb, 0x0f, 0x29, 0xa0, 0x48, 0xe3, 0x65, 0x69, 0x12, 0xc6, 0x53, 0x3e,
0x32, 0xee, 0x7a, 0xed, 0x29, 0xb7, 0x21, 0x76, 0x9c, 0xe6, 0x4e, 0x43,
0xd5, 0x71, 0x33, 0xb0, 0x74, 0xd8, 0x39, 0xd5, 0x31, 0xed, 0x1f, 0x28,
0x51, 0x0a, 0xfb, 0x45, 0xac, 0xe1, 0x0a, 0x1f, 0x4b, 0x79, 0x4d, 0x6f}},
{[32]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x01}, uint32(1), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, []byte{0x3a, 0xeb, 0x52, 0x24, 0xec,
0xf8, 0x49, 0x92, 0x9b, 0x9d, 0x82, 0x8d, 0xb1, 0xce, 0xd4, 0xdd, 0x83,
0x20, 0x25, 0xe8, 0x01, 0x8b, 0x81, 0x60, 0xb8, 0x22, 0x84, 0xf3, 0xc9,
0x49, 0xaa, 0x5a, 0x8e, 0xca, 0x00, 0xbb, 0xb4, 0xa7, 0x3b, 0xda, 0xd1,
0x92, 0xb5, 0xc4, 0x2f, 0x73, 0xf2, 0xfd, 0x4e, 0x27, 0x36, 0x44, 0xc8,
0xb3, 0x61, 0x25, 0xa6, 0x4a, 0xdd, 0xeb, 0x00, 0x6c, 0x13, 0xa0}},
{[32]uint8{0x00, 0xff, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00}, uint32(2), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, []byte{0x72, 0xd5, 0x4d, 0xfb,
0xf1, 0x2e, 0xc4, 0x4b, 0x36, 0x26, 0x92, 0xdf, 0x94, 0x13, 0x7f, 0x32,
0x8f, 0xea, 0x8d, 0xa7, 0x39, 0x90, 0x26, 0x5e, 0xc1, 0xbb, 0xbe, 0xa1,
0xae, 0x9a, 0xf0, 0xca, 0x13, 0xb2, 0x5a, 0xa2, 0x6c, 0xb4, 0xa6, 0x48,
0xcb, 0x9b, 0x9d, 0x1b, 0xe6, 0x5b, 0x2c, 0x09, 0x24, 0xa6, 0x6c, 0x54,
0xd5, 0x45, 0xec, 0x1b, 0x73, 0x74, 0xf4, 0x87, 0x2e, 0x99, 0xf0, 0x96}},
{[32]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00}, uint32(0), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x02}, []byte{0xc2, 0xc6, 0x4d, 0x37,
0x8c, 0xd5, 0x36, 0x37, 0x4a, 0xe2, 0x04, 0xb9, 0xef, 0x93, 0x3f, 0xcd,
0x1a, 0x8b, 0x22, 0x88, 0xb3, 0xdf, 0xa4, 0x96, 0x72, 0xab, 0x76, 0x5b,
0x54, 0xee, 0x27, 0xc7, 0x8a, 0x97, 0x0e, 0x0e, 0x95, 0x5c, 0x14, 0xf3,
0xa8, 0x8e, 0x74, 0x1b, 0x97, 0xc2, 0x86, 0xf7, 0x5f, 0x8f, 0xc2, 0x99,
0xe8, 0x14, 0x83, 0x62, 0xfa, 0x19, 0x8a, 0x39, 0x53, 0x1b, 0xed, 0x6d}}}
for _, test := range testCases {
cipher := rfc7539.ChaCha20{test.key, test.counter, test.nonce, plaintext}
encrypt := rfc7539.Encrypt(&cipher)
if !bytes.Equal(encrypt, test.ciphertext) |
}
}
func TestChaCha20Encryption(t *testing.T) {
type testCase struct {
plaintext string
key [32]uint8
counter uint32
nonce [12]uint8
ciphertext []byte
}
testCases := [...]testCase{{"Ladies and Gentlemen of the class of '99: " +
"If I could offer you only one tip for the future, sunscreen would be it.",
[32]uint8{0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11,
0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d,
0x1e, 0x1f}, uint32(1), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x4a, 0x00, 0x00, 0x00, 0x00}, []byte{0x6e, 0x2e, 0x35, 0x9a, 0x25,
0x68, 0xf9, 0x80, 0x41, 0xba, 0x07, 0x28, 0xdd, 0x0d, 0x69, 0x81, 0xe9,
0x7e, 0x7a, 0xec, 0x1d, 0x43, 0x60, 0xc2, 0x0a, 0x27, 0xaf, 0xcc, 0xfd,
0x9f, 0xae, 0x0b, 0xf9, 0x1b, 0x65, 0xc5, 0x52, 0x47, 0x33, 0xab, 0x8f,
0x59, 0x3d, 0xab, 0xcd, 0x62, 0xb3, 0x57, 0x16, 0x39, 0xd6, 0x24, 0xe6,
0x51, 0x52, 0xab, 0x8f, 0x53, 0x0c, 0x35, 0x9f, 0x08, 0x61, 0xd8, 0x07,
0xca, 0x0d, 0xbf, 0x50, 0x0d, 0x6a, 0x61, 0x56, 0xa3, 0x8e, 0x08, 0x8a,
0x22, 0xb6, 0x5e, 0x52, 0xbc, 0x51, 0x4d, 0x16, 0xcc, 0xf8, 0x06, 0x81,
0x8c, 0xe9, 0x1a, 0xb7, 0x79, 0x37, 0x36, 0x5a, 0xf9, 0x0b, 0xbf, 0x74,
0xa3, 0x5b, 0xe6, 0xb4, 0x0b, 0x8e, 0xed, 0xf2, 0x78, 0x5e, 0x42, 0x87,
0x4d}},
{"Any submission to the IETF intended by the Contributor for " +
"publication as all or part of an IETF Internet-Draft or RFC and any " +
"statement made within the context of an IETF activity is considered " +
"an \"IETF Contribution\". Such statements include oral statements " +
"in IETF sessions, as well as written and electronic communications " +
"made at any time or place, which are addressed to",
[32]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, uint32(1),
[12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x02},
[]byte{0xa3, 0xfb, 0xf0, 0x7d,
0xf3, 0xfa, 0x2f, 0xde, 0x4f, 0x37, 0x6c, 0xa2, 0x3e, 0x82, 0x73, 0x70,
0x41, 0x60, 0x5d, 0x9f, 0x4f, 0x4f, 0x57, 0xbd, 0x8c, 0xff, 0x2c, 0x1d,
0x4b, 0x79, 0x55, 0xec, 0x2a, 0x97, 0x94, 0x8b, 0xd3, 0x72, 0x29, 0x15,
0xc8, 0xf3, 0xd3, 0x37, 0xf7, 0xd3, 0x70, 0x05, 0x0e, 0x9e, 0x96, 0xd6,
0x47, 0xb7, 0xc3, 0x9f, 0x56, 0xe0, 0x31, 0xca, 0x5e, 0xb6, 0x25, 0x0d,
0x40, 0x42, 0xe0, 0x27, 0x85, 0xec, 0xec, 0xfa, 0x4b, 0x4b, 0xb5, 0xe8,
0xea, 0xd0, 0x44, 0x0e, 0x20, 0xb6, 0xe8, 0xdb, 0x09, 0xd8, 0x81, 0xa7,
0xc6, 0x13, 0x2f, 0x42, 0x0e, 0x52, 0x79, 0x50, 0x42, 0xbd, 0xfa, 0x77,
0x73, 0xd8, 0xa9, 0x05, 0x14, 0x47, 0xb3, 0x29, 0x1c, 0xe1, 0x41, 0x1c,
0x68, 0x04, 0x65, 0x55, 0x2a, 0xa6, 0xc4, 0x05, 0xb7, 0x76, 0x4d, 0x5e,
0x87, 0xbe, 0xa8, 0x5a, 0xd0, 0x0f, 0x84, 0x49, 0xed, 0x8f, 0x72, 0xd0,
0xd6, 0x62, 0xab, 0x05, 0x26, 0x91, 0xca, 0x66, 0x42, 0x4b, 0xc8, 0x6d,
0x2d, 0xf8, 0x0e, 0xa4, 0x1f, 0x43, 0xab, 0xf9, 0x37, 0xd3, 0x25, 0x9d,
0xc4, 0xb2, 0xd0, 0xdf, 0xb4, 0x8a, 0x6c, 0x91, 0x39, 0xdd, 0xd7, 0xf7,
0x69, 0x66, 0xe9, 0x28, 0xe6, 0x35, 0x55, 0x3b, 0xa7, 0x6c, 0x5c, 0x87,
0x9d, 0x7b, 0x35, 0xd4, 0x9e, 0xb2, 0xe6, 0x2b, 0x08, 0x71, 0xcd, 0xac,
0x63, 0x89, 0x39, 0xe2, 0x5e, 0x8a, 0x1e, 0x0e, 0xf9, 0xd5, 0x28, 0x0f,
0xa8, 0xca, 0x32, 0x8b, 0x35, 0x1c, 0x3c, 0x76, 0x59, 0x89, 0xcb, 0xcf,
0x3d, 0xaa, 0x8b, 0x6c, 0xcc, 0x3a, 0xaf, 0x9f, 0x39, 0x79, 0xc9, 0x2b,
0x37, 0x20, 0xfc, 0x88, 0xdc, 0x95, 0xed, 0x84, 0xa1, 0xbe, 0x05, 0x9c,
0x64, 0x99, 0xb9, 0xfd, 0xa2, 0x36, 0xe7, 0xe8, 0x18, 0xb0, 0x4b, 0x0b,
0xc3, 0x9c, 0x1e, 0x87, 0x6b, 0x19, 0x3b, 0xfe, 0x55, 0x69, 0x75, 0x3f,
0x88, 0x12, 0x8c, 0xc0, 0x8a, 0xaa, 0x9b, 0x63, 0xd1, 0xa1, 0x6f, 0x80,
0xef, 0x25, 0x54, 0xd7, 0x18, 0x9c, 0x41, 0x1f, 0x58, 0x69, 0xca, 0x52,
0xc5, 0xb8, 0x3f, 0xa3, 0x6f, 0xf2, 0x16, 0xb9, 0xc1, 0xd3, 0x00, 0x62,
0xbe, 0xbc, 0xfd, 0x2d, 0xc5, 0xbc, 0xe0, 0x91, 0x19, 0x34, 0xfd, 0xa7,
0x9a, 0x86, 0xf6, 0xe6, 0x98, 0xce, 0xd7, 0x59, 0xc3, 0xff, 0x9b, 0x64,
0x77, 0x33, 0x8f, 0x3d, 0xa4, 0xf9, 0xcd, 0x85, 0x14, 0xea, 0x99, 0x82,
0xcc, 0xaf, 0xb3, 0x41, 0xb2, 0x38, 0x4d, 0xd9, 0x02, 0xf3, 0xd1, 0xab,
0x7a, 0xc6, 0x1d, 0xd2, 0x9c, 0x6f, 0x21, 0xba, 0x5b, 0x86, 0x2f, 0x37,
0x30, 0xe3, 0x7c, 0xfd, 0xc4, 0xfd, 0x80, 0x6c, 0x22, 0xf2, 0x21}},
{"'Twas brillig, and the slithy toves\nDid gyre and gimble in the " +
"wabe:\nAll mimsy were the borogoves,\nAnd the mome raths outgrabe.",
[32]uint8{0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a, 0xf3, 0x33,
0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0, 0x47, 0x39, 0x17, 0xc1, 0x40,
0x2b, 0x80, 0x09, 0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0},
uint32(42),
[12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x02},
[]byte{0x62, 0xe6, 0x34, 0x7f, 0x95, 0xed, 0x87, 0xa4, 0x5f, 0xfa,
0xe7, 0x42, 0x6f, 0x27, 0xa1, 0xdf, 0x5f, 0xb6, 0x91, 0x10, 0x04,
0x4c, 0x0d, 0x73, 0x11, 0x8e, 0xff, 0xa9, 0x5b, 0x01, 0xe5, 0xcf,
0x16, 0x6d, 0x3d, 0xf2, 0xd7, 0x21, 0xca, 0xf9, 0xb2, 0x1e, 0x5f,
0xb1, 0x4c, 0x61, 0x68, 0x71, 0xfd, 0x84, 0xc5, 0x4f, 0x9d, 0x65,
0xb2, 0x83, 0x19, 0x6c, 0x7f, 0xe4, 0xf6, 0x05, 0x53, 0xeb, 0xf3,
0x9c, 0x64, 0x02, 0xc4, 0x22, 0x34, 0xe3, 0x2a, 0x35, 0x6b, 0x3e,
0x76, 0x43, 0x12, 0xa6, 0x1a, 0x55, 0x32, 0x05, 0x57, 0x16, 0xea,
0xd6, 0x96, 0x25, 0x68, 0xf8, 0x7d, 0x3f, 0x3f, 0x77, 0x04, 0xc6,
0xa8, 0xd1, 0xbc, 0xd1, 0xbf, 0x4d, 0x50, 0xd6, 0x15, 0x4b, 0x6d,
0xa7, 0x31, 0xb1, 0x87, 0xb5, 0x8d, 0xfd, 0x72, 0x8a, 0xfa, 0x36,
0x75, 0x7a, 0x79, 0x7a, 0xc1, 0x88, 0xd1}}}
for _, test := range testCases {
byteArray := []byte(test.plaintext)
cipher := rfc7539.ChaCha20{test.key, test.counter, test.nonce, byteArray}
encrypt := rfc7539.Encrypt(&cipher)
if !bytes.Equal(encrypt, test.ciphertext) {
t.Errorf("%s\nencrypted to: % x\nexpected: % x", test.plaintext,
encrypt, test.ciphertext)
}
}
}
| {
t.Errorf("key: % x\n nonce: % x\n block counter: %d\n "+
"keystream: % x\nexpected: % x", test.key, test.nonce, test.counter,
encrypt, test.ciphertext)
} | conditional_block |
rfc7539_test.go | // test vectors from https://tools.ietf.org/html/rfc7539
package rfc7539_test
import (
"bytes"
"github.com/ascottqqq/rfc7539"
"testing"
)
func TestChaCha20BlockFunction(t *testing.T) {
type testCase struct {
key [32]uint8
counter uint32
nonce [12]uint8
ciphertext []byte
}
plaintext := make([]uint8, 64)
testCases := [...]testCase{{[32]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00}, uint32(0), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, []byte{0x76, 0xb8, 0xe0, 0xad,
0xa0, 0xf1, 0x3d, 0x90, 0x40, 0x5d, 0x6a, 0xe5, 0x53, 0x86, 0xbd, 0x28,
0xbd, 0xd2, 0x19, 0xb8, 0xa0, 0x8d, 0xed, 0x1a, 0xa8, 0x36, 0xef, 0xcc,
0x8b, 0x77, 0x0d, 0xc7, 0xda, 0x41, 0x59, 0x7c, 0x51, 0x57, 0x48, 0x8d,
0x77, 0x24, 0xe0, 0x3f, 0xb8, 0xd8, 0x4a, 0x37, 0x6a, 0x43, 0xb8, 0xf4,
0x15, 0x18, 0xa1, 0x1c, 0xc3, 0x87, 0xb6, 0x69, 0xb2, 0xee, 0x65, 0x86}},
{[32]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00}, uint32(1), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, []byte{0x9f, 0x07, 0xe7, 0xbe,
0x55, 0x51, 0x38, 0x7a, 0x98, 0xba, 0x97, 0x7c, 0x73, 0x2d, 0x08, 0x0d,
0xcb, 0x0f, 0x29, 0xa0, 0x48, 0xe3, 0x65, 0x69, 0x12, 0xc6, 0x53, 0x3e,
0x32, 0xee, 0x7a, 0xed, 0x29, 0xb7, 0x21, 0x76, 0x9c, 0xe6, 0x4e, 0x43,
0xd5, 0x71, 0x33, 0xb0, 0x74, 0xd8, 0x39, 0xd5, 0x31, 0xed, 0x1f, 0x28,
0x51, 0x0a, 0xfb, 0x45, 0xac, 0xe1, 0x0a, 0x1f, 0x4b, 0x79, 0x4d, 0x6f}},
{[32]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x01}, uint32(1), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, []byte{0x3a, 0xeb, 0x52, 0x24, 0xec,
0xf8, 0x49, 0x92, 0x9b, 0x9d, 0x82, 0x8d, 0xb1, 0xce, 0xd4, 0xdd, 0x83,
0x20, 0x25, 0xe8, 0x01, 0x8b, 0x81, 0x60, 0xb8, 0x22, 0x84, 0xf3, 0xc9,
0x49, 0xaa, 0x5a, 0x8e, 0xca, 0x00, 0xbb, 0xb4, 0xa7, 0x3b, 0xda, 0xd1,
0x92, 0xb5, 0xc4, 0x2f, 0x73, 0xf2, 0xfd, 0x4e, 0x27, 0x36, 0x44, 0xc8,
0xb3, 0x61, 0x25, 0xa6, 0x4a, 0xdd, 0xeb, 0x00, 0x6c, 0x13, 0xa0}},
{[32]uint8{0x00, 0xff, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00}, uint32(2), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, []byte{0x72, 0xd5, 0x4d, 0xfb,
0xf1, 0x2e, 0xc4, 0x4b, 0x36, 0x26, 0x92, 0xdf, 0x94, 0x13, 0x7f, 0x32,
0x8f, 0xea, 0x8d, 0xa7, 0x39, 0x90, 0x26, 0x5e, 0xc1, 0xbb, 0xbe, 0xa1,
0xae, 0x9a, 0xf0, 0xca, 0x13, 0xb2, 0x5a, 0xa2, 0x6c, 0xb4, 0xa6, 0x48,
0xcb, 0x9b, 0x9d, 0x1b, 0xe6, 0x5b, 0x2c, 0x09, 0x24, 0xa6, 0x6c, 0x54,
0xd5, 0x45, 0xec, 0x1b, 0x73, 0x74, 0xf4, 0x87, 0x2e, 0x99, 0xf0, 0x96}},
{[32]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00}, uint32(0), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x02}, []byte{0xc2, 0xc6, 0x4d, 0x37,
0x8c, 0xd5, 0x36, 0x37, 0x4a, 0xe2, 0x04, 0xb9, 0xef, 0x93, 0x3f, 0xcd,
0x1a, 0x8b, 0x22, 0x88, 0xb3, 0xdf, 0xa4, 0x96, 0x72, 0xab, 0x76, 0x5b,
0x54, 0xee, 0x27, 0xc7, 0x8a, 0x97, 0x0e, 0x0e, 0x95, 0x5c, 0x14, 0xf3,
0xa8, 0x8e, 0x74, 0x1b, 0x97, 0xc2, 0x86, 0xf7, 0x5f, 0x8f, 0xc2, 0x99,
0xe8, 0x14, 0x83, 0x62, 0xfa, 0x19, 0x8a, 0x39, 0x53, 0x1b, 0xed, 0x6d}}}
for _, test := range testCases {
cipher := rfc7539.ChaCha20{test.key, test.counter, test.nonce, plaintext}
encrypt := rfc7539.Encrypt(&cipher)
if !bytes.Equal(encrypt, test.ciphertext) {
t.Errorf("key: % x\n nonce: % x\n block counter: %d\n "+
"keystream: % x\nexpected: % x", test.key, test.nonce, test.counter,
encrypt, test.ciphertext)
}
}
}
func | (t *testing.T) {
type testCase struct {
plaintext string
key [32]uint8
counter uint32
nonce [12]uint8
ciphertext []byte
}
testCases := [...]testCase{{"Ladies and Gentlemen of the class of '99: " +
"If I could offer you only one tip for the future, sunscreen would be it.",
[32]uint8{0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11,
0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d,
0x1e, 0x1f}, uint32(1), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x4a, 0x00, 0x00, 0x00, 0x00}, []byte{0x6e, 0x2e, 0x35, 0x9a, 0x25,
0x68, 0xf9, 0x80, 0x41, 0xba, 0x07, 0x28, 0xdd, 0x0d, 0x69, 0x81, 0xe9,
0x7e, 0x7a, 0xec, 0x1d, 0x43, 0x60, 0xc2, 0x0a, 0x27, 0xaf, 0xcc, 0xfd,
0x9f, 0xae, 0x0b, 0xf9, 0x1b, 0x65, 0xc5, 0x52, 0x47, 0x33, 0xab, 0x8f,
0x59, 0x3d, 0xab, 0xcd, 0x62, 0xb3, 0x57, 0x16, 0x39, 0xd6, 0x24, 0xe6,
0x51, 0x52, 0xab, 0x8f, 0x53, 0x0c, 0x35, 0x9f, 0x08, 0x61, 0xd8, 0x07,
0xca, 0x0d, 0xbf, 0x50, 0x0d, 0x6a, 0x61, 0x56, 0xa3, 0x8e, 0x08, 0x8a,
0x22, 0xb6, 0x5e, 0x52, 0xbc, 0x51, 0x4d, 0x16, 0xcc, 0xf8, 0x06, 0x81,
0x8c, 0xe9, 0x1a, 0xb7, 0x79, 0x37, 0x36, 0x5a, 0xf9, 0x0b, 0xbf, 0x74,
0xa3, 0x5b, 0xe6, 0xb4, 0x0b, 0x8e, 0xed, 0xf2, 0x78, 0x5e, 0x42, 0x87,
0x4d}},
{"Any submission to the IETF intended by the Contributor for " +
"publication as all or part of an IETF Internet-Draft or RFC and any " +
"statement made within the context of an IETF activity is considered " +
"an \"IETF Contribution\". Such statements include oral statements " +
"in IETF sessions, as well as written and electronic communications " +
"made at any time or place, which are addressed to",
[32]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, uint32(1),
[12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x02},
[]byte{0xa3, 0xfb, 0xf0, 0x7d,
0xf3, 0xfa, 0x2f, 0xde, 0x4f, 0x37, 0x6c, 0xa2, 0x3e, 0x82, 0x73, 0x70,
0x41, 0x60, 0x5d, 0x9f, 0x4f, 0x4f, 0x57, 0xbd, 0x8c, 0xff, 0x2c, 0x1d,
0x4b, 0x79, 0x55, 0xec, 0x2a, 0x97, 0x94, 0x8b, 0xd3, 0x72, 0x29, 0x15,
0xc8, 0xf3, 0xd3, 0x37, 0xf7, 0xd3, 0x70, 0x05, 0x0e, 0x9e, 0x96, 0xd6,
0x47, 0xb7, 0xc3, 0x9f, 0x56, 0xe0, 0x31, 0xca, 0x5e, 0xb6, 0x25, 0x0d,
0x40, 0x42, 0xe0, 0x27, 0x85, 0xec, 0xec, 0xfa, 0x4b, 0x4b, 0xb5, 0xe8,
0xea, 0xd0, 0x44, 0x0e, 0x20, 0xb6, 0xe8, 0xdb, 0x09, 0xd8, 0x81, 0xa7,
0xc6, 0x13, 0x2f, 0x42, 0x0e, 0x52, 0x79, 0x50, 0x42, 0xbd, 0xfa, 0x77,
0x73, 0xd8, 0xa9, 0x05, 0x14, 0x47, 0xb3, 0x29, 0x1c, 0xe1, 0x41, 0x1c,
0x68, 0x04, 0x65, 0x55, 0x2a, 0xa6, 0xc4, 0x05, 0xb7, 0x76, 0x4d, 0x5e,
0x87, 0xbe, 0xa8, 0x5a, 0xd0, 0x0f, 0x84, 0x49, 0xed, 0x8f, 0x72, 0xd0,
0xd6, 0x62, 0xab, 0x05, 0x26, 0x91, 0xca, 0x66, 0x42, 0x4b, 0xc8, 0x6d,
0x2d, 0xf8, 0x0e, 0xa4, 0x1f, 0x43, 0xab, 0xf9, 0x37, 0xd3, 0x25, 0x9d,
0xc4, 0xb2, 0xd0, 0xdf, 0xb4, 0x8a, 0x6c, 0x91, 0x39, 0xdd, 0xd7, 0xf7,
0x69, 0x66, 0xe9, 0x28, 0xe6, 0x35, 0x55, 0x3b, 0xa7, 0x6c, 0x5c, 0x87,
0x9d, 0x7b, 0x35, 0xd4, 0x9e, 0xb2, 0xe6, 0x2b, 0x08, 0x71, 0xcd, 0xac,
0x63, 0x89, 0x39, 0xe2, 0x5e, 0x8a, 0x1e, 0x0e, 0xf9, 0xd5, 0x28, 0x0f,
0xa8, 0xca, 0x32, 0x8b, 0x35, 0x1c, 0x3c, 0x76, 0x59, 0x89, 0xcb, 0xcf,
0x3d, 0xaa, 0x8b, 0x6c, 0xcc, 0x3a, 0xaf, 0x9f, 0x39, 0x79, 0xc9, 0x2b,
0x37, 0x20, 0xfc, 0x88, 0xdc, 0x95, 0xed, 0x84, 0xa1, 0xbe, 0x05, 0x9c,
0x64, 0x99, 0xb9, 0xfd, 0xa2, 0x36, 0xe7, 0xe8, 0x18, 0xb0, 0x4b, 0x0b,
0xc3, 0x9c, 0x1e, 0x87, 0x6b, 0x19, 0x3b, 0xfe, 0x55, 0x69, 0x75, 0x3f,
0x88, 0x12, 0x8c, 0xc0, 0x8a, 0xaa, 0x9b, 0x63, 0xd1, 0xa1, 0x6f, 0x80,
0xef, 0x25, 0x54, 0xd7, 0x18, 0x9c, 0x41, 0x1f, 0x58, 0x69, 0xca, 0x52,
0xc5, 0xb8, 0x3f, 0xa3, 0x6f, 0xf2, 0x16, 0xb9, 0xc1, 0xd3, 0x00, 0x62,
0xbe, 0xbc, 0xfd, 0x2d, 0xc5, 0xbc, 0xe0, 0x91, 0x19, 0x34, 0xfd, 0xa7,
0x9a, 0x86, 0xf6, 0xe6, 0x98, 0xce, 0xd7, 0x59, 0xc3, 0xff, 0x9b, 0x64,
0x77, 0x33, 0x8f, 0x3d, 0xa4, 0xf9, 0xcd, 0x85, 0x14, 0xea, 0x99, 0x82,
0xcc, 0xaf, 0xb3, 0x41, 0xb2, 0x38, 0x4d, 0xd9, 0x02, 0xf3, 0xd1, 0xab,
0x7a, 0xc6, 0x1d, 0xd2, 0x9c, 0x6f, 0x21, 0xba, 0x5b, 0x86, 0x2f, 0x37,
0x30, 0xe3, 0x7c, 0xfd, 0xc4, 0xfd, 0x80, 0x6c, 0x22, 0xf2, 0x21}},
{"'Twas brillig, and the slithy toves\nDid gyre and gimble in the " +
"wabe:\nAll mimsy were the borogoves,\nAnd the mome raths outgrabe.",
[32]uint8{0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a, 0xf3, 0x33,
0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0, 0x47, 0x39, 0x17, 0xc1, 0x40,
0x2b, 0x80, 0x09, 0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0},
uint32(42),
[12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x02},
[]byte{0x62, 0xe6, 0x34, 0x7f, 0x95, 0xed, 0x87, 0xa4, 0x5f, 0xfa,
0xe7, 0x42, 0x6f, 0x27, 0xa1, 0xdf, 0x5f, 0xb6, 0x91, 0x10, 0x04,
0x4c, 0x0d, 0x73, 0x11, 0x8e, 0xff, 0xa9, 0x5b, 0x01, 0xe5, 0xcf,
0x16, 0x6d, 0x3d, 0xf2, 0xd7, 0x21, 0xca, 0xf9, 0xb2, 0x1e, 0x5f,
0xb1, 0x4c, 0x61, 0x68, 0x71, 0xfd, 0x84, 0xc5, 0x4f, 0x9d, 0x65,
0xb2, 0x83, 0x19, 0x6c, 0x7f, 0xe4, 0xf6, 0x05, 0x53, 0xeb, 0xf3,
0x9c, 0x64, 0x02, 0xc4, 0x22, 0x34, 0xe3, 0x2a, 0x35, 0x6b, 0x3e,
0x76, 0x43, 0x12, 0xa6, 0x1a, 0x55, 0x32, 0x05, 0x57, 0x16, 0xea,
0xd6, 0x96, 0x25, 0x68, 0xf8, 0x7d, 0x3f, 0x3f, 0x77, 0x04, 0xc6,
0xa8, 0xd1, 0xbc, 0xd1, 0xbf, 0x4d, 0x50, 0xd6, 0x15, 0x4b, 0x6d,
0xa7, 0x31, 0xb1, 0x87, 0xb5, 0x8d, 0xfd, 0x72, 0x8a, 0xfa, 0x36,
0x75, 0x7a, 0x79, 0x7a, 0xc1, 0x88, 0xd1}}}
for _, test := range testCases {
byteArray := []byte(test.plaintext)
cipher := rfc7539.ChaCha20{test.key, test.counter, test.nonce, byteArray}
encrypt := rfc7539.Encrypt(&cipher)
if !bytes.Equal(encrypt, test.ciphertext) {
t.Errorf("%s\nencrypted to: % x\nexpected: % x", test.plaintext,
encrypt, test.ciphertext)
}
}
}
| TestChaCha20Encryption | identifier_name |
stream.rs | // Copyright 2016 Jeffrey Burdges.
//! Sphinx header symmetric cryptographic routines
//!
//! ...
use std::fmt;
use std::ops::Range;
use std::marker::PhantomData;
// use clear_on_drop::ClearOnDrop;
use crypto::mac::Mac;
use crypto::poly1305::Poly1305;
use chacha::ChaCha as ChaCha20;
use keystream::{KeyStream,SeekableKeyStream};
use keystream::Error as KeystreamError;
impl<'a> From<KeystreamError> for SphinxError {
fn from(ke: KeystreamError) -> SphinxError {
match ke {
KeystreamError::EndReached => {
// We verify the maximum key stream length is not
// exceeded inside `SphinxParams::stream_chunks`.
// panic!("Failed to unwrap ChaCha call!");
SphinxError::InternalError("XChaCha20 stream exceeded!")
},
}
}
}
use super::layout::{Params};
use super::body::{BodyCipher,BODY_CIPHER_KEY_SIZE};
use super::replay::*;
use super::error::*;
use super::*;
// /// Sphinx onion encrypted routing information
// pub type BetaBytes = [u8];
pub const GAMMA_LENGTH : usize = 16;
/// Unwrapped Sphinx poly1305 MAC
pub type GammaBytes = [u8; GAMMA_LENGTH];
/// Wrapped Sphinx poly1305 MAC
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Gamma(pub GammaBytes);
/// Sphinx poly1305 MAC key
#[derive(Debug,Clone,Copy,Default)]
struct GammaKey(pub [u8; 32]);
/// IETF Chacha20 stream cipher key and nonce.
#[derive(Clone)]
pub struct ChaChaKnN {
/// IETF ChaCha20 32 byte key
pub key: [u8; 32],
/// IETF ChaCha20 12 byte nonce
pub nonce: [u8; 12],
}
impl ChaChaKnN {
/// Initalize an IETF ChaCha20 stream cipher with our key material
/// and use it to generate the poly1305 key for our MAC gamma, and
/// the packet's name for SURB unwinding.
///
/// Notes: We could improve performance by using the curve25519 point
/// derived in the key exchagne directly as the key for an XChaCha20
/// instance, which includes some mixing, and using chacha for the
/// replay code and gamma key. We descided to use SHA3's SHAKE256
/// mode so that we have more and different mixing.
pub fn header_cipher<P: Params>(&self) -> SphinxResult<HeaderCipher<P>> {
let mut chacha = ChaCha20::new_ietf(&self.key, &self.nonce);
let r = &mut [0u8; HOP_EATS];
chacha.xor_read(r).unwrap(); // No KeystreamError::EndReached here.
let (packet_name,replay_code,gamma_key) = array_refs![r,16,16,32];
Ok( HeaderCipher {
params: PhantomData,
chunks: StreamChunks::make::<P>() ?,
packet_name: PacketName(*packet_name),
replay_code: ReplayCode(*replay_code),
gamma_key: GammaKey(*gamma_key),
stream: chacha,
} )
}
}
/// Results of our KDF consisting of the nonce and key for our
/// IETF Chacha20 stream cipher, which produces everything else
/// in the Sphinx header.
#[derive(Clone)]
pub struct SphinxKey<P: Params> {
pub params: PhantomData<P>,
/// IETF Chacha20 stream cipher key and nonce.
pub chacha: ChaChaKnN,
}
/*
impl<P> Clone for SphinxKey<P> where P: Params {
fn clone(&self) -> SphinxKey<P> {
SphinxKey {
params: PhantomData,
chacha: chacha.clone(),
}
}
}
*/
impl<P: Params> SphinxKey<P> {
/// Derive the key material for our IETF Chacha20 stream cipher,
/// incorporating both `P::PROTOCOL_NAME` and our `RoutingName`
/// as seed material.
pub fn new_kdf(ss: &SphinxSecret, rn: &::keys::RoutingName) -> SphinxKey<P> {
use crypto::digest::Digest;
use crypto::sha3::Sha3;
let r = &mut [0u8; 32+16]; // ClearOnDrop
let mut sha = Sha3::shake_256();
sha.input(&ss.0);
sha.input_str( "Sphinx" );
sha.input(&rn.0);
sha.input_str( P::PROTOCOL_NAME );
sha.input(&ss.0);
sha.result(r);
sha.reset();
let (nonce,_,key) = array_refs![r,12,4,32];
SphinxKey {
params: PhantomData,
chacha: ChaChaKnN { nonce: *nonce, key: *key },
}
}
/// Initalize our IETF ChaCha20 stream cipher by invoking
/// `ChaChaKnN::header_cipher` with our paramaters `P: Params`.
pub fn header_cipher(&self) -> SphinxResult<HeaderCipher<P>> {
self.chacha.header_cipher::<P>()
}
}
/// Amount of key stream consumed by `hop()` itself
const HOP_EATS : usize = 64;
/// Allocation of cipher ranges for the IETF ChaCha20 inside
/// `HeaderCipher` to various keys and stream cipher roles needed
/// to process a header.
struct StreamChunks {
beta: Range<usize>,
beta_tail: Range<usize>,
surb_log: Range<usize>,
lioness_key: Range<usize>,
blinding: Range<usize>,
delay: Range<usize>,
}
impl StreamChunks {
#[inline]
fn make<P: Params>() -> SphinxResult<StreamChunks> {
let mut offset = HOP_EATS; //
let chunks = {
let mut reserve = |l: usize, block: bool| -> Range<usize> {
if block { offset += 64 - offset % 64; }
let previous = offset;
offset += l;
let r = previous..offset;
debug_assert_eq!(r.len(), l);
r
};
StreamChunks {
beta: reserve(P::BETA_LENGTH as usize,true),
beta_tail: reserve(P::MAX_BETA_TAIL_LENGTH as usize,false),
surb_log: reserve(P::SURB_LOG_LENGTH as usize,true),
lioness_key: reserve(BODY_CIPHER_KEY_SIZE,true),
blinding: reserve(64,true),
delay: reserve(64,true), // Actually 32
}
}; // let chunks
// We check that the maximum key stream length is not exceeded
// here so that calls to both `seek_to` and `xor_read` can
// safetly be `.unwrap()`ed, thereby avoiding `-> SphinxResult<_>`
// everywhere.
if offset > 2^38 {
Err( SphinxError::InternalError("Paramaters exceed IETF ChaCha20 stream!") )
} else { Ok(chunks) }
}
}
/// Semetric cryptography for a single Sphinx sub-hop, usually
/// meaning the whole hop.
///
pub struct HeaderCipher<P: Params> {
params: PhantomData<P>,
/// XChaCha20 Stream cipher used when processing the header
stream: ChaCha20,
/// Stream cipher ranges determined by `params`
chunks: StreamChunks,
/// Replay code for replay protection
replay_code: ReplayCode,
/// The packet's name for SURB unwinding
packet_name: PacketName,
/// Sphinx poly1305 MAC key
gamma_key: GammaKey,
}
// Declare a `HeaderCipher` initalized after `ClearOnDrop` zeros it so
// that it may be dropped normally. Requirs that `Drop::drop` does
// nothing interesting.
impl<P: Params> ::clear_on_drop::clear::InitializableFromZeroed for HeaderCipher<P> {
unsafe fn initialize(_: *mut HeaderCipher<P>) {
}
}
// We implement `Drop::drop` so that `HeaderCipher` cannot be copy.
// `InitializableFromZeroed::initialize` leaves it invalid, so
// `Drop::drop` must not do anything interesting.
impl<P: Params> Drop for HeaderCipher<P> {
fn drop(&mut self) { }
}
impl<P: Params> fmt::Debug for HeaderCipher<P> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "HeaderCipher {{ {:?}, .. }}", self.replay_code.error_packet_id())
}
}
impl<P: Params> HeaderCipher<P> {
// TODO: Can we abstract the lengths checks? Operate on a pair
// `(LayoutRefs,HeaderCipher)` perhaps?
/// Compute the poly1305 MAC `Gamma` using the key found in a Sphinx key exchange.
///
/// Does not verify the lengths of Beta or the SURB.
pub fn | (&self, beta: &[u8]) -> SphinxResult<Gamma> {
if beta.len() != P::BETA_LENGTH as usize {
return Err( SphinxError::InternalError("Beta has the incorrect length for MAC!") );
}
// According to the current API gamma_out lies in a buffer supplied
// by our caller, so no need for games to zero it here.
let mut gamma_out: Gamma = Default::default();
let mut poly = Poly1305::new(&self.gamma_key.0);
// let mut poly = ClearOnDrop::new(&mut poly);
poly.input(beta);
poly.raw_result(&mut gamma_out.0);
poly.reset();
Ok(gamma_out)
}
/// Verify the poly1305 MAC `Gamma` given in a Sphinx packet.
///
/// Returns an InvalidMac error if the check fails. Does not
/// verify the lengths of Beta or the SURB.
pub fn verify_gamma(&self, beta: &[u8], gamma_given: &Gamma)
-> SphinxResult<()> {
let gamma_found = self.create_gamma(beta) ?; // InternalError
// TODO: let gamma_found = ClearOnDrop::new(&gamma_found);
if ! ::consistenttime::ct_u8_slice_eq(&gamma_given.0, &gamma_found.0) {
Err( SphinxError::InvalidMac(self.replay_code.error_packet_id()) )
} else { Ok(()) }
}
/// Checks for packet replays using the suplied `ReplayChecker`.
///
/// Replay protection requires that `ReplayChecker::replay_check`
/// returns `Err( SphinxError::Replay(hop.replay_code) )` when a
/// replay occurs.
///
/// You may however use `IgnoreReplay` as the `ReplayChecker` for
/// ratchet sub-hops and for all subhops in packet creation.
pub fn replay_check<RC: ReplayChecker>(&self, replayer: RC) -> SphinxResult<()> {
replayer.replay_check(&self.replay_code)
}
/// Returns full key schedule for the lioness cipher for the body.
pub fn lioness_key(&mut self) -> [u8; BODY_CIPHER_KEY_SIZE] {
let lioness_key = &mut [0u8; BODY_CIPHER_KEY_SIZE];
self.stream.seek_to(self.chunks.lioness_key.start as u64).unwrap();
self.stream.xor_read(lioness_key).unwrap();
*lioness_key
}
pub fn body_cipher(&mut self) -> BodyCipher<P> {
BodyCipher {
params: PhantomData,
cipher: ::lioness::LionessDefault::new_raw(& self.lioness_key())
}
}
/// Returns the curve25519 scalar for blinding alpha in Sphinx.
pub fn blinding(&mut self) -> ::curve::Scalar {
let b = &mut [0u8; 64];
self.stream.seek_to(self.chunks.blinding.start as u64).unwrap();
self.stream.xor_read(b).unwrap();
::curve::Scalar::make(b)
}
/// Returns our name for the packet for insertion into the SURB log
/// if the packet gets reforwarded.
pub fn packet_name(&mut self) -> &PacketName {
&self.packet_name
}
pub fn xor_beta(&mut self, beta: &mut [u8], offset: usize, tail: usize)
-> SphinxResult<()> {
let len = P::BETA_LENGTH as usize - offset;
if beta.len() < len {
return Err( SphinxError::InternalError("Beta too short to encrypt!") );
}
if tail > P::MAX_BETA_TAIL_LENGTH as usize {
return Err( SphinxError::InternalError("Excessive tail length requested!") );
}
if beta.len() > len+tail {
return Err( SphinxError::InternalError("Beta too long to encrypt!") );
}
self.stream.seek_to((self.chunks.beta.start + offset) as u64).unwrap();
self.stream.xor_read(beta).unwrap();
Ok(())
}
pub fn set_beta_tail(&mut self, beta_tail: &mut [u8]) -> SphinxResult<()> {
if beta_tail.len() > P::MAX_BETA_TAIL_LENGTH as usize {
return Err( SphinxError::InternalError("Beta's tail is too long!") );
}
for i in beta_tail.iter_mut() { *i = 0; }
self.stream.seek_to(self.chunks.beta_tail.start as u64).unwrap();
self.stream.xor_read(beta_tail).unwrap();
Ok(())
}
pub fn xor_surb_log(&mut self, surb_log: &mut [u8]) -> SphinxResult<()> {
if surb_log.len() > P::SURB_LOG_LENGTH as usize {
return Err( SphinxError::InternalError("SURB log too long!") );
}
self.stream.seek_to(self.chunks.surb_log.start as u64).unwrap();
self.stream.xor_read(surb_log).unwrap();
Ok(())
}
/// Sender's sugested delay for this packet.
pub fn delay(&mut self) -> ::std::time::Duration {
use rand::{ChaChaRng, SeedableRng}; // Rng, Rand
let mut rng = {
let mut seed = [0u32; 8];
fn as_bytes_mut(t: &mut [u32; 8]) -> &mut [u8; 32] {
unsafe { ::std::mem::transmute(t) }
}
self.stream.seek_to(self.chunks.delay.start as u64).unwrap();
self.stream.xor_read(as_bytes_mut(&mut seed)).unwrap();
for i in seed.iter_mut() { *i = u32::from_le(*i); }
ChaChaRng::from_seed(&seed)
};
use rand::distributions::{Exp, IndependentSample};
let exp = Exp::new(P::DELAY_LAMBDA);
let delay = exp.ind_sample(&mut rng);
debug_assert!( delay.is_finite() && delay.is_sign_positive() );
::std::time::Duration::from_secs( delay.round() as u64 )
// ::std::time::Duration::new(
// delay.trunc() as u64,
// (1000*delay.fract()).round() as u32
// )
}
// /// Approximate time when mix node should forward this packet
// pub fn time(&mut self) -> ::std::time::SystemTime {
// ::std::time::SystemTime::now() + self.delay()
// }
}
| create_gamma | identifier_name |
stream.rs | // Copyright 2016 Jeffrey Burdges.
//! Sphinx header symmetric cryptographic routines
//!
//! ...
use std::fmt;
use std::ops::Range;
use std::marker::PhantomData;
// use clear_on_drop::ClearOnDrop;
use crypto::mac::Mac;
use crypto::poly1305::Poly1305;
use chacha::ChaCha as ChaCha20;
use keystream::{KeyStream,SeekableKeyStream};
use keystream::Error as KeystreamError;
impl<'a> From<KeystreamError> for SphinxError {
fn from(ke: KeystreamError) -> SphinxError {
match ke {
KeystreamError::EndReached => {
// We verify the maximum key stream length is not
// exceeded inside `SphinxParams::stream_chunks`.
// panic!("Failed to unwrap ChaCha call!");
SphinxError::InternalError("XChaCha20 stream exceeded!")
},
}
}
}
use super::layout::{Params};
use super::body::{BodyCipher,BODY_CIPHER_KEY_SIZE};
use super::replay::*;
use super::error::*;
use super::*;
// /// Sphinx onion encrypted routing information
// pub type BetaBytes = [u8];
pub const GAMMA_LENGTH : usize = 16;
/// Unwrapped Sphinx poly1305 MAC
pub type GammaBytes = [u8; GAMMA_LENGTH];
/// Wrapped Sphinx poly1305 MAC
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Gamma(pub GammaBytes);
/// Sphinx poly1305 MAC key
#[derive(Debug,Clone,Copy,Default)]
struct GammaKey(pub [u8; 32]);
/// IETF Chacha20 stream cipher key and nonce.
#[derive(Clone)]
pub struct ChaChaKnN {
/// IETF ChaCha20 32 byte key
pub key: [u8; 32],
/// IETF ChaCha20 12 byte nonce
pub nonce: [u8; 12],
}
impl ChaChaKnN {
/// Initalize an IETF ChaCha20 stream cipher with our key material
/// and use it to generate the poly1305 key for our MAC gamma, and
/// the packet's name for SURB unwinding.
///
/// Notes: We could improve performance by using the curve25519 point
/// derived in the key exchagne directly as the key for an XChaCha20
/// instance, which includes some mixing, and using chacha for the
/// replay code and gamma key. We descided to use SHA3's SHAKE256
/// mode so that we have more and different mixing.
pub fn header_cipher<P: Params>(&self) -> SphinxResult<HeaderCipher<P>> {
let mut chacha = ChaCha20::new_ietf(&self.key, &self.nonce);
let r = &mut [0u8; HOP_EATS];
chacha.xor_read(r).unwrap(); // No KeystreamError::EndReached here.
let (packet_name,replay_code,gamma_key) = array_refs![r,16,16,32];
Ok( HeaderCipher {
params: PhantomData,
chunks: StreamChunks::make::<P>() ?,
packet_name: PacketName(*packet_name),
replay_code: ReplayCode(*replay_code),
gamma_key: GammaKey(*gamma_key),
stream: chacha,
} )
}
}
/// Results of our KDF consisting of the nonce and key for our
/// IETF Chacha20 stream cipher, which produces everything else
/// in the Sphinx header.
#[derive(Clone)]
pub struct SphinxKey<P: Params> {
pub params: PhantomData<P>,
/// IETF Chacha20 stream cipher key and nonce.
pub chacha: ChaChaKnN,
}
/*
impl<P> Clone for SphinxKey<P> where P: Params {
fn clone(&self) -> SphinxKey<P> {
SphinxKey {
params: PhantomData,
chacha: chacha.clone(),
}
}
}
*/
impl<P: Params> SphinxKey<P> {
/// Derive the key material for our IETF Chacha20 stream cipher,
/// incorporating both `P::PROTOCOL_NAME` and our `RoutingName`
/// as seed material.
pub fn new_kdf(ss: &SphinxSecret, rn: &::keys::RoutingName) -> SphinxKey<P> {
use crypto::digest::Digest;
use crypto::sha3::Sha3;
let r = &mut [0u8; 32+16]; // ClearOnDrop
let mut sha = Sha3::shake_256();
sha.input(&ss.0);
sha.input_str( "Sphinx" );
sha.input(&rn.0);
sha.input_str( P::PROTOCOL_NAME );
sha.input(&ss.0);
sha.result(r);
sha.reset();
let (nonce,_,key) = array_refs![r,12,4,32];
SphinxKey {
params: PhantomData,
chacha: ChaChaKnN { nonce: *nonce, key: *key },
}
}
/// Initalize our IETF ChaCha20 stream cipher by invoking
/// `ChaChaKnN::header_cipher` with our paramaters `P: Params`.
pub fn header_cipher(&self) -> SphinxResult<HeaderCipher<P>> {
self.chacha.header_cipher::<P>()
}
}
/// Amount of key stream consumed by `hop()` itself
const HOP_EATS : usize = 64;
/// Allocation of cipher ranges for the IETF ChaCha20 inside
/// `HeaderCipher` to various keys and stream cipher roles needed
/// to process a header.
struct StreamChunks {
beta: Range<usize>,
beta_tail: Range<usize>,
surb_log: Range<usize>,
lioness_key: Range<usize>,
blinding: Range<usize>,
delay: Range<usize>,
}
impl StreamChunks {
#[inline]
fn make<P: Params>() -> SphinxResult<StreamChunks> {
let mut offset = HOP_EATS; //
let chunks = {
let mut reserve = |l: usize, block: bool| -> Range<usize> {
if block { offset += 64 - offset % 64; }
let previous = offset;
offset += l;
let r = previous..offset;
debug_assert_eq!(r.len(), l);
r
};
StreamChunks {
beta: reserve(P::BETA_LENGTH as usize,true),
beta_tail: reserve(P::MAX_BETA_TAIL_LENGTH as usize,false),
surb_log: reserve(P::SURB_LOG_LENGTH as usize,true),
lioness_key: reserve(BODY_CIPHER_KEY_SIZE,true),
blinding: reserve(64,true),
delay: reserve(64,true), // Actually 32
}
}; // let chunks
// We check that the maximum key stream length is not exceeded
// here so that calls to both `seek_to` and `xor_read` can
// safetly be `.unwrap()`ed, thereby avoiding `-> SphinxResult<_>`
// everywhere.
if offset > 2^38 {
Err( SphinxError::InternalError("Paramaters exceed IETF ChaCha20 stream!") )
} else { Ok(chunks) }
}
}
/// Semetric cryptography for a single Sphinx sub-hop, usually
/// meaning the whole hop.
///
pub struct HeaderCipher<P: Params> {
params: PhantomData<P>,
/// XChaCha20 Stream cipher used when processing the header
stream: ChaCha20,
/// Stream cipher ranges determined by `params`
chunks: StreamChunks,
/// Replay code for replay protection
replay_code: ReplayCode,
/// The packet's name for SURB unwinding
packet_name: PacketName,
/// Sphinx poly1305 MAC key
gamma_key: GammaKey,
}
// Declare a `HeaderCipher` initalized after `ClearOnDrop` zeros it so
// that it may be dropped normally. Requirs that `Drop::drop` does
// nothing interesting.
impl<P: Params> ::clear_on_drop::clear::InitializableFromZeroed for HeaderCipher<P> {
unsafe fn initialize(_: *mut HeaderCipher<P>) {
}
}
// We implement `Drop::drop` so that `HeaderCipher` cannot be copy.
// `InitializableFromZeroed::initialize` leaves it invalid, so
// `Drop::drop` must not do anything interesting.
impl<P: Params> Drop for HeaderCipher<P> {
fn drop(&mut self) { }
}
impl<P: Params> fmt::Debug for HeaderCipher<P> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "HeaderCipher {{ {:?}, .. }}", self.replay_code.error_packet_id())
}
}
impl<P: Params> HeaderCipher<P> {
// TODO: Can we abstract the lengths checks? Operate on a pair
// `(LayoutRefs,HeaderCipher)` perhaps?
/// Compute the poly1305 MAC `Gamma` using the key found in a Sphinx key exchange.
///
/// Does not verify the lengths of Beta or the SURB.
pub fn create_gamma(&self, beta: &[u8]) -> SphinxResult<Gamma> {
if beta.len() != P::BETA_LENGTH as usize {
return Err( SphinxError::InternalError("Beta has the incorrect length for MAC!") );
}
// According to the current API gamma_out lies in a buffer supplied
// by our caller, so no need for games to zero it here.
let mut gamma_out: Gamma = Default::default();
let mut poly = Poly1305::new(&self.gamma_key.0);
// let mut poly = ClearOnDrop::new(&mut poly);
poly.input(beta);
poly.raw_result(&mut gamma_out.0);
poly.reset();
Ok(gamma_out)
}
/// Verify the poly1305 MAC `Gamma` given in a Sphinx packet.
///
/// Returns an InvalidMac error if the check fails. Does not
/// verify the lengths of Beta or the SURB.
pub fn verify_gamma(&self, beta: &[u8], gamma_given: &Gamma)
-> SphinxResult<()> {
let gamma_found = self.create_gamma(beta) ?; // InternalError
// TODO: let gamma_found = ClearOnDrop::new(&gamma_found);
if ! ::consistenttime::ct_u8_slice_eq(&gamma_given.0, &gamma_found.0) {
Err( SphinxError::InvalidMac(self.replay_code.error_packet_id()) )
} else { Ok(()) }
}
/// Checks for packet replays using the suplied `ReplayChecker`.
///
/// Replay protection requires that `ReplayChecker::replay_check`
/// returns `Err( SphinxError::Replay(hop.replay_code) )` when a
/// replay occurs.
///
/// You may however use `IgnoreReplay` as the `ReplayChecker` for
/// ratchet sub-hops and for all subhops in packet creation.
pub fn replay_check<RC: ReplayChecker>(&self, replayer: RC) -> SphinxResult<()> {
replayer.replay_check(&self.replay_code)
}
/// Returns full key schedule for the lioness cipher for the body.
pub fn lioness_key(&mut self) -> [u8; BODY_CIPHER_KEY_SIZE] {
let lioness_key = &mut [0u8; BODY_CIPHER_KEY_SIZE];
self.stream.seek_to(self.chunks.lioness_key.start as u64).unwrap();
self.stream.xor_read(lioness_key).unwrap();
*lioness_key
}
pub fn body_cipher(&mut self) -> BodyCipher<P> {
BodyCipher {
params: PhantomData,
cipher: ::lioness::LionessDefault::new_raw(& self.lioness_key())
}
}
/// Returns the curve25519 scalar for blinding alpha in Sphinx.
pub fn blinding(&mut self) -> ::curve::Scalar {
let b = &mut [0u8; 64];
self.stream.seek_to(self.chunks.blinding.start as u64).unwrap();
self.stream.xor_read(b).unwrap();
::curve::Scalar::make(b)
}
/// Returns our name for the packet for insertion into the SURB log
/// if the packet gets reforwarded.
pub fn packet_name(&mut self) -> &PacketName {
&self.packet_name
}
pub fn xor_beta(&mut self, beta: &mut [u8], offset: usize, tail: usize)
-> SphinxResult<()> {
let len = P::BETA_LENGTH as usize - offset;
if beta.len() < len {
return Err( SphinxError::InternalError("Beta too short to encrypt!") );
}
if tail > P::MAX_BETA_TAIL_LENGTH as usize {
return Err( SphinxError::InternalError("Excessive tail length requested!") );
}
if beta.len() > len+tail {
return Err( SphinxError::InternalError("Beta too long to encrypt!") );
}
self.stream.seek_to((self.chunks.beta.start + offset) as u64).unwrap();
self.stream.xor_read(beta).unwrap();
Ok(())
}
pub fn set_beta_tail(&mut self, beta_tail: &mut [u8]) -> SphinxResult<()> {
if beta_tail.len() > P::MAX_BETA_TAIL_LENGTH as usize {
return Err( SphinxError::InternalError("Beta's tail is too long!") );
}
for i in beta_tail.iter_mut() { *i = 0; }
self.stream.seek_to(self.chunks.beta_tail.start as u64).unwrap();
self.stream.xor_read(beta_tail).unwrap();
Ok(())
}
pub fn xor_surb_log(&mut self, surb_log: &mut [u8]) -> SphinxResult<()> {
if surb_log.len() > P::SURB_LOG_LENGTH as usize |
self.stream.seek_to(self.chunks.surb_log.start as u64).unwrap();
self.stream.xor_read(surb_log).unwrap();
Ok(())
}
/// Sender's sugested delay for this packet.
pub fn delay(&mut self) -> ::std::time::Duration {
use rand::{ChaChaRng, SeedableRng}; // Rng, Rand
let mut rng = {
let mut seed = [0u32; 8];
fn as_bytes_mut(t: &mut [u32; 8]) -> &mut [u8; 32] {
unsafe { ::std::mem::transmute(t) }
}
self.stream.seek_to(self.chunks.delay.start as u64).unwrap();
self.stream.xor_read(as_bytes_mut(&mut seed)).unwrap();
for i in seed.iter_mut() { *i = u32::from_le(*i); }
ChaChaRng::from_seed(&seed)
};
use rand::distributions::{Exp, IndependentSample};
let exp = Exp::new(P::DELAY_LAMBDA);
let delay = exp.ind_sample(&mut rng);
debug_assert!( delay.is_finite() && delay.is_sign_positive() );
::std::time::Duration::from_secs( delay.round() as u64 )
// ::std::time::Duration::new(
// delay.trunc() as u64,
// (1000*delay.fract()).round() as u32
// )
}
// /// Approximate time when mix node should forward this packet
// pub fn time(&mut self) -> ::std::time::SystemTime {
// ::std::time::SystemTime::now() + self.delay()
// }
}
| {
return Err( SphinxError::InternalError("SURB log too long!") );
} | conditional_block |
stream.rs | // Copyright 2016 Jeffrey Burdges.
//! Sphinx header symmetric cryptographic routines
//!
//! ...
use std::fmt;
use std::ops::Range;
use std::marker::PhantomData;
// use clear_on_drop::ClearOnDrop;
use crypto::mac::Mac;
use crypto::poly1305::Poly1305;
use chacha::ChaCha as ChaCha20;
use keystream::{KeyStream,SeekableKeyStream};
use keystream::Error as KeystreamError;
impl<'a> From<KeystreamError> for SphinxError {
fn from(ke: KeystreamError) -> SphinxError {
match ke {
KeystreamError::EndReached => {
// We verify the maximum key stream length is not
// exceeded inside `SphinxParams::stream_chunks`.
// panic!("Failed to unwrap ChaCha call!");
SphinxError::InternalError("XChaCha20 stream exceeded!")
},
}
}
}
use super::layout::{Params};
use super::body::{BodyCipher,BODY_CIPHER_KEY_SIZE};
use super::replay::*;
use super::error::*;
use super::*;
// /// Sphinx onion encrypted routing information
// pub type BetaBytes = [u8];
pub const GAMMA_LENGTH : usize = 16;
/// Unwrapped Sphinx poly1305 MAC
pub type GammaBytes = [u8; GAMMA_LENGTH];
/// Wrapped Sphinx poly1305 MAC
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Gamma(pub GammaBytes);
/// Sphinx poly1305 MAC key
#[derive(Debug,Clone,Copy,Default)]
struct GammaKey(pub [u8; 32]);
/// IETF Chacha20 stream cipher key and nonce.
#[derive(Clone)]
pub struct ChaChaKnN {
/// IETF ChaCha20 32 byte key
pub key: [u8; 32],
/// IETF ChaCha20 12 byte nonce
pub nonce: [u8; 12],
}
impl ChaChaKnN {
/// Initalize an IETF ChaCha20 stream cipher with our key material
/// and use it to generate the poly1305 key for our MAC gamma, and
/// the packet's name for SURB unwinding.
///
/// Notes: We could improve performance by using the curve25519 point
/// derived in the key exchagne directly as the key for an XChaCha20
/// instance, which includes some mixing, and using chacha for the
/// replay code and gamma key. We descided to use SHA3's SHAKE256
/// mode so that we have more and different mixing.
pub fn header_cipher<P: Params>(&self) -> SphinxResult<HeaderCipher<P>> {
let mut chacha = ChaCha20::new_ietf(&self.key, &self.nonce);
let r = &mut [0u8; HOP_EATS];
chacha.xor_read(r).unwrap(); // No KeystreamError::EndReached here.
let (packet_name,replay_code,gamma_key) = array_refs![r,16,16,32];
Ok( HeaderCipher {
params: PhantomData,
chunks: StreamChunks::make::<P>() ?,
packet_name: PacketName(*packet_name),
replay_code: ReplayCode(*replay_code),
gamma_key: GammaKey(*gamma_key),
stream: chacha,
} )
}
}
/// Results of our KDF consisting of the nonce and key for our
/// IETF Chacha20 stream cipher, which produces everything else
/// in the Sphinx header.
#[derive(Clone)]
pub struct SphinxKey<P: Params> {
pub params: PhantomData<P>,
/// IETF Chacha20 stream cipher key and nonce.
pub chacha: ChaChaKnN,
}
/*
impl<P> Clone for SphinxKey<P> where P: Params {
fn clone(&self) -> SphinxKey<P> {
SphinxKey {
params: PhantomData,
chacha: chacha.clone(),
}
}
}
*/
impl<P: Params> SphinxKey<P> {
/// Derive the key material for our IETF Chacha20 stream cipher,
/// incorporating both `P::PROTOCOL_NAME` and our `RoutingName`
/// as seed material.
pub fn new_kdf(ss: &SphinxSecret, rn: &::keys::RoutingName) -> SphinxKey<P> {
use crypto::digest::Digest;
use crypto::sha3::Sha3;
let r = &mut [0u8; 32+16]; // ClearOnDrop
let mut sha = Sha3::shake_256();
sha.input(&ss.0);
sha.input_str( "Sphinx" );
sha.input(&rn.0);
sha.input_str( P::PROTOCOL_NAME );
sha.input(&ss.0);
sha.result(r);
sha.reset();
let (nonce,_,key) = array_refs![r,12,4,32];
SphinxKey {
params: PhantomData,
chacha: ChaChaKnN { nonce: *nonce, key: *key },
}
}
/// Initalize our IETF ChaCha20 stream cipher by invoking
/// `ChaChaKnN::header_cipher` with our paramaters `P: Params`.
pub fn header_cipher(&self) -> SphinxResult<HeaderCipher<P>> {
self.chacha.header_cipher::<P>()
}
}
/// Amount of key stream consumed by `hop()` itself
const HOP_EATS : usize = 64;
/// Allocation of cipher ranges for the IETF ChaCha20 inside
/// `HeaderCipher` to various keys and stream cipher roles needed
/// to process a header.
struct StreamChunks {
beta: Range<usize>,
beta_tail: Range<usize>,
surb_log: Range<usize>,
lioness_key: Range<usize>,
blinding: Range<usize>,
delay: Range<usize>,
}
impl StreamChunks {
#[inline]
fn make<P: Params>() -> SphinxResult<StreamChunks> {
let mut offset = HOP_EATS; //
let chunks = {
let mut reserve = |l: usize, block: bool| -> Range<usize> {
if block { offset += 64 - offset % 64; }
let previous = offset;
offset += l;
let r = previous..offset;
debug_assert_eq!(r.len(), l);
r
};
StreamChunks {
beta: reserve(P::BETA_LENGTH as usize,true),
beta_tail: reserve(P::MAX_BETA_TAIL_LENGTH as usize,false),
surb_log: reserve(P::SURB_LOG_LENGTH as usize,true),
lioness_key: reserve(BODY_CIPHER_KEY_SIZE,true),
blinding: reserve(64,true),
delay: reserve(64,true), // Actually 32
}
}; // let chunks
// We check that the maximum key stream length is not exceeded
// here so that calls to both `seek_to` and `xor_read` can
// safetly be `.unwrap()`ed, thereby avoiding `-> SphinxResult<_>`
// everywhere.
if offset > 2^38 {
Err( SphinxError::InternalError("Paramaters exceed IETF ChaCha20 stream!") )
} else { Ok(chunks) }
}
}
/// Semetric cryptography for a single Sphinx sub-hop, usually
/// meaning the whole hop.
///
pub struct HeaderCipher<P: Params> {
params: PhantomData<P>,
/// XChaCha20 Stream cipher used when processing the header
stream: ChaCha20,
/// Stream cipher ranges determined by `params`
chunks: StreamChunks,
/// Replay code for replay protection
replay_code: ReplayCode,
/// The packet's name for SURB unwinding
packet_name: PacketName,
/// Sphinx poly1305 MAC key
gamma_key: GammaKey,
}
// Declare a `HeaderCipher` initalized after `ClearOnDrop` zeros it so
// that it may be dropped normally. Requirs that `Drop::drop` does
// nothing interesting.
impl<P: Params> ::clear_on_drop::clear::InitializableFromZeroed for HeaderCipher<P> {
unsafe fn initialize(_: *mut HeaderCipher<P>) {
}
}
// We implement `Drop::drop` so that `HeaderCipher` cannot be copy.
// `InitializableFromZeroed::initialize` leaves it invalid, so
// `Drop::drop` must not do anything interesting.
impl<P: Params> Drop for HeaderCipher<P> {
fn drop(&mut self) { }
}
impl<P: Params> fmt::Debug for HeaderCipher<P> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "HeaderCipher {{ {:?}, .. }}", self.replay_code.error_packet_id())
}
}
impl<P: Params> HeaderCipher<P> {
// TODO: Can we abstract the lengths checks? Operate on a pair
// `(LayoutRefs,HeaderCipher)` perhaps?
/// Compute the poly1305 MAC `Gamma` using the key found in a Sphinx key exchange.
///
/// Does not verify the lengths of Beta or the SURB.
pub fn create_gamma(&self, beta: &[u8]) -> SphinxResult<Gamma> |
/// Verify the poly1305 MAC `Gamma` given in a Sphinx packet.
///
/// Returns an InvalidMac error if the check fails. Does not
/// verify the lengths of Beta or the SURB.
pub fn verify_gamma(&self, beta: &[u8], gamma_given: &Gamma)
-> SphinxResult<()> {
let gamma_found = self.create_gamma(beta) ?; // InternalError
// TODO: let gamma_found = ClearOnDrop::new(&gamma_found);
if ! ::consistenttime::ct_u8_slice_eq(&gamma_given.0, &gamma_found.0) {
Err( SphinxError::InvalidMac(self.replay_code.error_packet_id()) )
} else { Ok(()) }
}
/// Checks for packet replays using the suplied `ReplayChecker`.
///
/// Replay protection requires that `ReplayChecker::replay_check`
/// returns `Err( SphinxError::Replay(hop.replay_code) )` when a
/// replay occurs.
///
/// You may however use `IgnoreReplay` as the `ReplayChecker` for
/// ratchet sub-hops and for all subhops in packet creation.
pub fn replay_check<RC: ReplayChecker>(&self, replayer: RC) -> SphinxResult<()> {
replayer.replay_check(&self.replay_code)
}
/// Returns full key schedule for the lioness cipher for the body.
pub fn lioness_key(&mut self) -> [u8; BODY_CIPHER_KEY_SIZE] {
let lioness_key = &mut [0u8; BODY_CIPHER_KEY_SIZE];
self.stream.seek_to(self.chunks.lioness_key.start as u64).unwrap();
self.stream.xor_read(lioness_key).unwrap();
*lioness_key
}
pub fn body_cipher(&mut self) -> BodyCipher<P> {
BodyCipher {
params: PhantomData,
cipher: ::lioness::LionessDefault::new_raw(& self.lioness_key())
}
}
/// Returns the curve25519 scalar for blinding alpha in Sphinx.
pub fn blinding(&mut self) -> ::curve::Scalar {
let b = &mut [0u8; 64];
self.stream.seek_to(self.chunks.blinding.start as u64).unwrap();
self.stream.xor_read(b).unwrap();
::curve::Scalar::make(b)
}
/// Returns our name for the packet for insertion into the SURB log
/// if the packet gets reforwarded.
pub fn packet_name(&mut self) -> &PacketName {
&self.packet_name
}
pub fn xor_beta(&mut self, beta: &mut [u8], offset: usize, tail: usize)
-> SphinxResult<()> {
let len = P::BETA_LENGTH as usize - offset;
if beta.len() < len {
return Err( SphinxError::InternalError("Beta too short to encrypt!") );
}
if tail > P::MAX_BETA_TAIL_LENGTH as usize {
return Err( SphinxError::InternalError("Excessive tail length requested!") );
}
if beta.len() > len+tail {
return Err( SphinxError::InternalError("Beta too long to encrypt!") );
}
self.stream.seek_to((self.chunks.beta.start + offset) as u64).unwrap();
self.stream.xor_read(beta).unwrap();
Ok(())
}
pub fn set_beta_tail(&mut self, beta_tail: &mut [u8]) -> SphinxResult<()> {
if beta_tail.len() > P::MAX_BETA_TAIL_LENGTH as usize {
return Err( SphinxError::InternalError("Beta's tail is too long!") );
}
for i in beta_tail.iter_mut() { *i = 0; }
self.stream.seek_to(self.chunks.beta_tail.start as u64).unwrap();
self.stream.xor_read(beta_tail).unwrap();
Ok(())
}
pub fn xor_surb_log(&mut self, surb_log: &mut [u8]) -> SphinxResult<()> {
if surb_log.len() > P::SURB_LOG_LENGTH as usize {
return Err( SphinxError::InternalError("SURB log too long!") );
}
self.stream.seek_to(self.chunks.surb_log.start as u64).unwrap();
self.stream.xor_read(surb_log).unwrap();
Ok(())
}
/// Sender's sugested delay for this packet.
pub fn delay(&mut self) -> ::std::time::Duration {
use rand::{ChaChaRng, SeedableRng}; // Rng, Rand
let mut rng = {
let mut seed = [0u32; 8];
fn as_bytes_mut(t: &mut [u32; 8]) -> &mut [u8; 32] {
unsafe { ::std::mem::transmute(t) }
}
self.stream.seek_to(self.chunks.delay.start as u64).unwrap();
self.stream.xor_read(as_bytes_mut(&mut seed)).unwrap();
for i in seed.iter_mut() { *i = u32::from_le(*i); }
ChaChaRng::from_seed(&seed)
};
use rand::distributions::{Exp, IndependentSample};
let exp = Exp::new(P::DELAY_LAMBDA);
let delay = exp.ind_sample(&mut rng);
debug_assert!( delay.is_finite() && delay.is_sign_positive() );
::std::time::Duration::from_secs( delay.round() as u64 )
// ::std::time::Duration::new(
// delay.trunc() as u64,
// (1000*delay.fract()).round() as u32
// )
}
// /// Approximate time when mix node should forward this packet
// pub fn time(&mut self) -> ::std::time::SystemTime {
// ::std::time::SystemTime::now() + self.delay()
// }
}
| {
if beta.len() != P::BETA_LENGTH as usize {
return Err( SphinxError::InternalError("Beta has the incorrect length for MAC!") );
}
// According to the current API gamma_out lies in a buffer supplied
// by our caller, so no need for games to zero it here.
let mut gamma_out: Gamma = Default::default();
let mut poly = Poly1305::new(&self.gamma_key.0);
// let mut poly = ClearOnDrop::new(&mut poly);
poly.input(beta);
poly.raw_result(&mut gamma_out.0);
poly.reset();
Ok(gamma_out)
} | identifier_body |
stream.rs | // Copyright 2016 Jeffrey Burdges.
//! Sphinx header symmetric cryptographic routines
//!
//! ...
use std::fmt;
use std::ops::Range;
use std::marker::PhantomData;
// use clear_on_drop::ClearOnDrop;
use crypto::mac::Mac;
use crypto::poly1305::Poly1305;
use chacha::ChaCha as ChaCha20;
use keystream::{KeyStream,SeekableKeyStream};
use keystream::Error as KeystreamError;
impl<'a> From<KeystreamError> for SphinxError {
fn from(ke: KeystreamError) -> SphinxError {
match ke {
KeystreamError::EndReached => {
// We verify the maximum key stream length is not
// exceeded inside `SphinxParams::stream_chunks`.
// panic!("Failed to unwrap ChaCha call!");
SphinxError::InternalError("XChaCha20 stream exceeded!")
},
}
}
}
use super::layout::{Params};
use super::body::{BodyCipher,BODY_CIPHER_KEY_SIZE};
use super::replay::*;
use super::error::*;
use super::*;
// /// Sphinx onion encrypted routing information
// pub type BetaBytes = [u8];
pub const GAMMA_LENGTH : usize = 16;
/// Unwrapped Sphinx poly1305 MAC
pub type GammaBytes = [u8; GAMMA_LENGTH];
/// Wrapped Sphinx poly1305 MAC
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Gamma(pub GammaBytes);
/// Sphinx poly1305 MAC key
#[derive(Debug,Clone,Copy,Default)]
struct GammaKey(pub [u8; 32]);
/// IETF Chacha20 stream cipher key and nonce.
#[derive(Clone)]
pub struct ChaChaKnN {
/// IETF ChaCha20 32 byte key
pub key: [u8; 32],
/// IETF ChaCha20 12 byte nonce
pub nonce: [u8; 12],
}
impl ChaChaKnN {
/// Initalize an IETF ChaCha20 stream cipher with our key material
/// and use it to generate the poly1305 key for our MAC gamma, and
/// the packet's name for SURB unwinding.
///
/// Notes: We could improve performance by using the curve25519 point
/// derived in the key exchagne directly as the key for an XChaCha20
/// instance, which includes some mixing, and using chacha for the
/// replay code and gamma key. We descided to use SHA3's SHAKE256
/// mode so that we have more and different mixing.
pub fn header_cipher<P: Params>(&self) -> SphinxResult<HeaderCipher<P>> {
let mut chacha = ChaCha20::new_ietf(&self.key, &self.nonce);
let r = &mut [0u8; HOP_EATS];
chacha.xor_read(r).unwrap(); // No KeystreamError::EndReached here.
let (packet_name,replay_code,gamma_key) = array_refs![r,16,16,32];
Ok( HeaderCipher {
params: PhantomData,
chunks: StreamChunks::make::<P>() ?,
packet_name: PacketName(*packet_name),
replay_code: ReplayCode(*replay_code),
gamma_key: GammaKey(*gamma_key),
stream: chacha,
} )
}
}
/// Results of our KDF consisting of the nonce and key for our
/// IETF Chacha20 stream cipher, which produces everything else
/// in the Sphinx header.
#[derive(Clone)]
pub struct SphinxKey<P: Params> {
pub params: PhantomData<P>,
/// IETF Chacha20 stream cipher key and nonce.
pub chacha: ChaChaKnN,
}
/*
impl<P> Clone for SphinxKey<P> where P: Params {
fn clone(&self) -> SphinxKey<P> {
SphinxKey {
params: PhantomData,
chacha: chacha.clone(),
}
}
}
*/
impl<P: Params> SphinxKey<P> {
/// Derive the key material for our IETF Chacha20 stream cipher,
/// incorporating both `P::PROTOCOL_NAME` and our `RoutingName`
/// as seed material.
pub fn new_kdf(ss: &SphinxSecret, rn: &::keys::RoutingName) -> SphinxKey<P> {
use crypto::digest::Digest;
use crypto::sha3::Sha3;
let r = &mut [0u8; 32+16]; // ClearOnDrop
let mut sha = Sha3::shake_256();
sha.input(&ss.0);
sha.input_str( "Sphinx" );
sha.input(&rn.0);
sha.input_str( P::PROTOCOL_NAME );
sha.input(&ss.0);
sha.result(r);
sha.reset();
let (nonce,_,key) = array_refs![r,12,4,32];
SphinxKey {
params: PhantomData,
chacha: ChaChaKnN { nonce: *nonce, key: *key }, | /// Initalize our IETF ChaCha20 stream cipher by invoking
/// `ChaChaKnN::header_cipher` with our paramaters `P: Params`.
pub fn header_cipher(&self) -> SphinxResult<HeaderCipher<P>> {
self.chacha.header_cipher::<P>()
}
}
/// Amount of key stream consumed by `hop()` itself
const HOP_EATS : usize = 64;
/// Allocation of cipher ranges for the IETF ChaCha20 inside
/// `HeaderCipher` to various keys and stream cipher roles needed
/// to process a header.
struct StreamChunks {
beta: Range<usize>,
beta_tail: Range<usize>,
surb_log: Range<usize>,
lioness_key: Range<usize>,
blinding: Range<usize>,
delay: Range<usize>,
}
impl StreamChunks {
#[inline]
fn make<P: Params>() -> SphinxResult<StreamChunks> {
let mut offset = HOP_EATS; //
let chunks = {
let mut reserve = |l: usize, block: bool| -> Range<usize> {
if block { offset += 64 - offset % 64; }
let previous = offset;
offset += l;
let r = previous..offset;
debug_assert_eq!(r.len(), l);
r
};
StreamChunks {
beta: reserve(P::BETA_LENGTH as usize,true),
beta_tail: reserve(P::MAX_BETA_TAIL_LENGTH as usize,false),
surb_log: reserve(P::SURB_LOG_LENGTH as usize,true),
lioness_key: reserve(BODY_CIPHER_KEY_SIZE,true),
blinding: reserve(64,true),
delay: reserve(64,true), // Actually 32
}
}; // let chunks
// We check that the maximum key stream length is not exceeded
// here so that calls to both `seek_to` and `xor_read` can
// safetly be `.unwrap()`ed, thereby avoiding `-> SphinxResult<_>`
// everywhere.
if offset > 2^38 {
Err( SphinxError::InternalError("Paramaters exceed IETF ChaCha20 stream!") )
} else { Ok(chunks) }
}
}
/// Semetric cryptography for a single Sphinx sub-hop, usually
/// meaning the whole hop.
///
pub struct HeaderCipher<P: Params> {
params: PhantomData<P>,
/// XChaCha20 Stream cipher used when processing the header
stream: ChaCha20,
/// Stream cipher ranges determined by `params`
chunks: StreamChunks,
/// Replay code for replay protection
replay_code: ReplayCode,
/// The packet's name for SURB unwinding
packet_name: PacketName,
/// Sphinx poly1305 MAC key
gamma_key: GammaKey,
}
// Declare a `HeaderCipher` initalized after `ClearOnDrop` zeros it so
// that it may be dropped normally. Requirs that `Drop::drop` does
// nothing interesting.
impl<P: Params> ::clear_on_drop::clear::InitializableFromZeroed for HeaderCipher<P> {
unsafe fn initialize(_: *mut HeaderCipher<P>) {
}
}
// We implement `Drop::drop` so that `HeaderCipher` cannot be copy.
// `InitializableFromZeroed::initialize` leaves it invalid, so
// `Drop::drop` must not do anything interesting.
impl<P: Params> Drop for HeaderCipher<P> {
fn drop(&mut self) { }
}
impl<P: Params> fmt::Debug for HeaderCipher<P> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "HeaderCipher {{ {:?}, .. }}", self.replay_code.error_packet_id())
}
}
impl<P: Params> HeaderCipher<P> {
// TODO: Can we abstract the lengths checks? Operate on a pair
// `(LayoutRefs,HeaderCipher)` perhaps?
/// Compute the poly1305 MAC `Gamma` using the key found in a Sphinx key exchange.
///
/// Does not verify the lengths of Beta or the SURB.
pub fn create_gamma(&self, beta: &[u8]) -> SphinxResult<Gamma> {
if beta.len() != P::BETA_LENGTH as usize {
return Err( SphinxError::InternalError("Beta has the incorrect length for MAC!") );
}
// According to the current API gamma_out lies in a buffer supplied
// by our caller, so no need for games to zero it here.
let mut gamma_out: Gamma = Default::default();
let mut poly = Poly1305::new(&self.gamma_key.0);
// let mut poly = ClearOnDrop::new(&mut poly);
poly.input(beta);
poly.raw_result(&mut gamma_out.0);
poly.reset();
Ok(gamma_out)
}
/// Verify the poly1305 MAC `Gamma` given in a Sphinx packet.
///
/// Returns an InvalidMac error if the check fails. Does not
/// verify the lengths of Beta or the SURB.
pub fn verify_gamma(&self, beta: &[u8], gamma_given: &Gamma)
-> SphinxResult<()> {
let gamma_found = self.create_gamma(beta) ?; // InternalError
// TODO: let gamma_found = ClearOnDrop::new(&gamma_found);
if ! ::consistenttime::ct_u8_slice_eq(&gamma_given.0, &gamma_found.0) {
Err( SphinxError::InvalidMac(self.replay_code.error_packet_id()) )
} else { Ok(()) }
}
/// Checks for packet replays using the suplied `ReplayChecker`.
///
/// Replay protection requires that `ReplayChecker::replay_check`
/// returns `Err( SphinxError::Replay(hop.replay_code) )` when a
/// replay occurs.
///
/// You may however use `IgnoreReplay` as the `ReplayChecker` for
/// ratchet sub-hops and for all subhops in packet creation.
pub fn replay_check<RC: ReplayChecker>(&self, replayer: RC) -> SphinxResult<()> {
replayer.replay_check(&self.replay_code)
}
/// Returns full key schedule for the lioness cipher for the body.
pub fn lioness_key(&mut self) -> [u8; BODY_CIPHER_KEY_SIZE] {
let lioness_key = &mut [0u8; BODY_CIPHER_KEY_SIZE];
self.stream.seek_to(self.chunks.lioness_key.start as u64).unwrap();
self.stream.xor_read(lioness_key).unwrap();
*lioness_key
}
pub fn body_cipher(&mut self) -> BodyCipher<P> {
BodyCipher {
params: PhantomData,
cipher: ::lioness::LionessDefault::new_raw(& self.lioness_key())
}
}
/// Returns the curve25519 scalar for blinding alpha in Sphinx.
pub fn blinding(&mut self) -> ::curve::Scalar {
let b = &mut [0u8; 64];
self.stream.seek_to(self.chunks.blinding.start as u64).unwrap();
self.stream.xor_read(b).unwrap();
::curve::Scalar::make(b)
}
/// Returns our name for the packet for insertion into the SURB log
/// if the packet gets reforwarded.
pub fn packet_name(&mut self) -> &PacketName {
&self.packet_name
}
pub fn xor_beta(&mut self, beta: &mut [u8], offset: usize, tail: usize)
-> SphinxResult<()> {
let len = P::BETA_LENGTH as usize - offset;
if beta.len() < len {
return Err( SphinxError::InternalError("Beta too short to encrypt!") );
}
if tail > P::MAX_BETA_TAIL_LENGTH as usize {
return Err( SphinxError::InternalError("Excessive tail length requested!") );
}
if beta.len() > len+tail {
return Err( SphinxError::InternalError("Beta too long to encrypt!") );
}
self.stream.seek_to((self.chunks.beta.start + offset) as u64).unwrap();
self.stream.xor_read(beta).unwrap();
Ok(())
}
pub fn set_beta_tail(&mut self, beta_tail: &mut [u8]) -> SphinxResult<()> {
if beta_tail.len() > P::MAX_BETA_TAIL_LENGTH as usize {
return Err( SphinxError::InternalError("Beta's tail is too long!") );
}
for i in beta_tail.iter_mut() { *i = 0; }
self.stream.seek_to(self.chunks.beta_tail.start as u64).unwrap();
self.stream.xor_read(beta_tail).unwrap();
Ok(())
}
pub fn xor_surb_log(&mut self, surb_log: &mut [u8]) -> SphinxResult<()> {
if surb_log.len() > P::SURB_LOG_LENGTH as usize {
return Err( SphinxError::InternalError("SURB log too long!") );
}
self.stream.seek_to(self.chunks.surb_log.start as u64).unwrap();
self.stream.xor_read(surb_log).unwrap();
Ok(())
}
/// Sender's sugested delay for this packet.
pub fn delay(&mut self) -> ::std::time::Duration {
use rand::{ChaChaRng, SeedableRng}; // Rng, Rand
let mut rng = {
let mut seed = [0u32; 8];
fn as_bytes_mut(t: &mut [u32; 8]) -> &mut [u8; 32] {
unsafe { ::std::mem::transmute(t) }
}
self.stream.seek_to(self.chunks.delay.start as u64).unwrap();
self.stream.xor_read(as_bytes_mut(&mut seed)).unwrap();
for i in seed.iter_mut() { *i = u32::from_le(*i); }
ChaChaRng::from_seed(&seed)
};
use rand::distributions::{Exp, IndependentSample};
let exp = Exp::new(P::DELAY_LAMBDA);
let delay = exp.ind_sample(&mut rng);
debug_assert!( delay.is_finite() && delay.is_sign_positive() );
::std::time::Duration::from_secs( delay.round() as u64 )
// ::std::time::Duration::new(
// delay.trunc() as u64,
// (1000*delay.fract()).round() as u32
// )
}
// /// Approximate time when mix node should forward this packet
// pub fn time(&mut self) -> ::std::time::SystemTime {
// ::std::time::SystemTime::now() + self.delay()
// }
} | }
}
| random_line_split |
client_darwin.go | package debugapi
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"net"
"os"
"os/exec"
"strconv"
"strings"
"syscall"
"time"
"github.com/ks888/tgo/log"
"golang.org/x/sys/unix"
)
// Assumes the packet size is not larger than this.
const (
maxPacketSize = 4096
excBadAccess = syscall.Signal(0x91) // EXC_BAD_ACCESS
)
// Client is the debug api client which depends on lldb's debugserver.
// See the gdb's doc for the reference: https://sourceware.org/gdb/onlinedocs/gdb/Remote-Protocol.html
// Some commands use the lldb extension: https://github.com/llvm-mirror/lldb/blob/master/docs/lldb-gdb-remote.txt
type Client struct {
conn net.Conn
pid int
killOnDetach bool
noAckMode bool
registerMetadataList []registerMetadata
buffer []byte
// outputWriter is the writer to which the output of the debugee process will be written.
outputWriter io.Writer
readTLSFuncAddr uint64
currentTLSOffset uint32
pendingSignal int
}
// NewClient returns the new debug api client which depends on OS API.
func NewClient() *Client {
return &Client{buffer: make([]byte, maxPacketSize), outputWriter: os.Stdout}
}
// LaunchProcess lets the debugserver launch the new prcoess.
func (c *Client) LaunchProcess(name string, arg ...string) error {
listener, err := net.Listen("tcp", "localhost:")
if err != nil {
return err
}
path, err := debugServerPath()
if err != nil {
return err
}
debugServerArgs := []string{"-F", "-R", listener.Addr().String(), "--", name}
debugServerArgs = append(debugServerArgs, arg...)
cmd := exec.Command(path, debugServerArgs...)
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} // Otherwise, the signal sent to all the group members.
if err := cmd.Start(); err != nil {
return err
}
c.conn, err = c.waitConnectOrExit(listener, cmd)
if err != nil {
return err
}
c.pid = cmd.Process.Pid
c.killOnDetach = true
return c.initialize()
}
func (c *Client) waitConnectOrExit(listener net.Listener, cmd *exec.Cmd) (net.Conn, error) {
waitCh := make(chan error)
go func(ch chan error) {
ch <- cmd.Wait()
}(waitCh)
connCh := make(chan net.Conn)
go func(ch chan net.Conn) {
conn, err := listener.Accept()
if err != nil {
connCh <- nil
}
connCh <- conn
}(connCh)
select {
case <-waitCh:
return nil, errors.New("the command exits immediately")
case conn := <-connCh:
if conn == nil {
return nil, errors.New("failed to accept the connection")
}
return conn, nil
}
}
func (c *Client) initialize() error {
if err := c.setNoAckMode(); err != nil {
return err
}
if err := c.qSupported(); err != nil {
return err
}
if err := c.qThreadSuffixSupported(); err != nil {
return err
}
var err error
c.registerMetadataList, err = c.collectRegisterMetadata()
if err != nil {
return err
}
if err := c.qListThreadsInStopReply(); err != nil {
return err
}
readTLSFunction := c.buildReadTLSFunction(0) // need the function length here. So the offset doesn't matter.
c.readTLSFuncAddr, err = c.allocateMemory(len(readTLSFunction))
return err
}
func (c *Client) setNoAckMode() error {
const command = "QStartNoAckMode"
if err := c.send(command); err != nil {
return err
}
if err := c.receiveAndCheck(); err != nil {
return err
}
c.noAckMode = true
return nil
}
func (c *Client) qSupported() error {
var supportedFeatures = []string{"swbreak+", "hwbreak+", "no-resumed+"}
command := fmt.Sprintf("qSupported:%s", strings.Join(supportedFeatures, ";"))
if err := c.send(command); err != nil {
return err
}
// TODO: adjust the buffer size so that it doesn't exceed the PacketSize in the response.
_, err := c.receive()
return err
}
func (c *Client) qThreadSuffixSupported() error {
const command = "QThreadSuffixSupported"
if err := c.send(command); err != nil {
return err
}
return c.receiveAndCheck()
}
var errEndOfList = errors.New("the end of list")
type registerMetadata struct {
name string
id, offset, size int
}
func (c *Client) collectRegisterMetadata() ([]registerMetadata, error) {
var regs []registerMetadata
for i := 0; ; i++ {
reg, err := c.qRegisterInfo(i)
if err != nil {
if err == errEndOfList {
break
}
return nil, err
}
regs = append(regs, reg)
}
return regs, nil
}
func (c *Client) qRegisterInfo(registerID int) (registerMetadata, error) {
command := fmt.Sprintf("qRegisterInfo%x", registerID)
if err := c.send(command); err != nil {
return registerMetadata{}, err
}
data, err := c.receive()
if err != nil {
return registerMetadata{}, err
}
if strings.HasPrefix(data, "E") {
if data == "E45" {
return registerMetadata{}, errEndOfList
}
return registerMetadata{}, fmt.Errorf("error response: %s", data)
}
return c.parseRegisterMetaData(registerID, data)
}
func (c *Client) parseRegisterMetaData(registerID int, data string) (registerMetadata, error) {
reg := registerMetadata{id: registerID}
for _, chunk := range strings.Split(data, ";") {
keyValue := strings.SplitN(chunk, ":", 2)
if len(keyValue) < 2 {
continue
}
key, value := keyValue[0], keyValue[1]
if key == "name" {
reg.name = value
} else if key == "bitsize" {
num, err := strconv.Atoi(value)
if err != nil {
return registerMetadata{}, err
}
reg.size = num / 8
} else if key == "offset" {
num, err := strconv.Atoi(value)
if err != nil {
return registerMetadata{}, err
}
reg.offset = num
}
}
return reg, nil
}
func (c *Client) qListThreadsInStopReply() error {
const command = "QListThreadsInStopReply"
if err := c.send(command); err != nil {
return err
}
return c.receiveAndCheck()
}
func (c *Client) allocateMemory(size int) (uint64, error) {
command := fmt.Sprintf("_M%x,rwx", size)
if err := c.send(command); err != nil {
return 0, err
}
data, err := c.receive()
if err != nil {
return 0, err
} else if data == "" || strings.HasPrefix(data, "E") {
return 0, fmt.Errorf("error response: %s", data)
}
return hexToUint64(data, false)
}
func (c *Client) deallocateMemory(addr uint64) error {
command := fmt.Sprintf("_m%x", addr)
if err := c.send(command); err != nil {
return err
}
return c.receiveAndCheck()
}
// ThreadIDs returns all the thread ids.
func (c *Client) ThreadIDs() ([]int, error) {
rawThreadIDs, err := c.qfThreadInfo()
if err != nil {
return nil, err
}
// TODO: call qsThreadInfo
var threadIDs []int
for _, rawThreadID := range strings.Split(rawThreadIDs, ",") {
threadID, err := hexToUint64(rawThreadID, false)
if err != nil {
return nil, err
}
threadIDs = append(threadIDs, int(threadID))
}
return threadIDs, nil
}
func (c *Client) qfThreadInfo() (string, error) {
const command = "qfThreadInfo"
if err := c.send(command); err != nil {
return "", err
}
data, err := c.receive()
if err != nil {
return "", err
} else if !strings.HasPrefix(data, "m") {
return "", fmt.Errorf("unexpected response: %s", data)
}
return data[1:], nil
}
// AttachProcess lets the debugserver attach the new prcoess.
func (c *Client) AttachProcess(pid int) error {
listener, err := net.Listen("tcp", "localhost:")
if err != nil {
return err
}
path, err := debugServerPath()
if err != nil {
return err
}
debugServerArgs := []string{"-F", "-R", listener.Addr().String(), fmt.Sprintf("--attach=%d", pid)}
cmd := exec.Command(path, debugServerArgs...)
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} // Otherwise, the signal sent to all the group members.
if err := cmd.Start(); err != nil {
return err
}
c.conn, err = c.waitConnectOrExit(listener, cmd)
if err != nil {
return err
}
c.pid = cmd.Process.Pid
return c.initialize()
}
// DetachProcess detaches from the prcoess.
func (c *Client) DetachProcess() error {
defer c.close()
if c.killOnDetach {
return c.killProcess()
}
if err := c.send("D"); err != nil {
return err
}
return c.receiveAndCheck()
}
func (c *Client) close() error {
return c.conn.Close()
}
func (c *Client) killProcess() error {
if err := c.send("k"); err != nil {
return err
}
data, err := c.receive()
if err != nil {
return err
} else if !strings.HasPrefix(data, "X09") {
return fmt.Errorf("unexpected reply: %s", data)
}
// debugserver automatically exits. So don't explicitly detach here.
return nil
}
// ReadRegisters reads the target threadID's registers.
func (c *Client) ReadRegisters(threadID int) (Registers, error) {
data, err := c.readRegisters(threadID)
if err != nil {
return Registers{}, err
}
return c.parseRegisterData(data)
}
func (c *Client) readRegisters(threadID int) (string, error) {
command := fmt.Sprintf("g;thread:%x;", threadID)
if err := c.send(command); err != nil {
return "", err
}
data, err := c.receive()
if err != nil {
return "", err
} else if strings.HasPrefix(data, "E") {
return data, fmt.Errorf("error response: %s", data)
}
return data, nil
}
func (c *Client) parseRegisterData(data string) (Registers, error) {
var regs Registers
for _, metadata := range c.registerMetadataList {
rawValue := data[metadata.offset*2 : (metadata.offset+metadata.size)*2]
var err error
switch metadata.name {
case "rip":
regs.Rip, err = hexToUint64(rawValue, true)
case "rsp":
regs.Rsp, err = hexToUint64(rawValue, true)
case "rcx":
regs.Rcx, err = hexToUint64(rawValue, true)
}
if err != nil {
return Registers{}, err
}
}
return regs, nil
}
// WriteRegisters updates the registers' value.
func (c *Client) WriteRegisters(threadID int, regs Registers) error {
data, err := c.readRegisters(threadID)
if err != nil {
return err
}
// The 'P' command is not used due to the bug explained here: https://github.com/llvm-mirror/lldb/commit/d8d7a40ca5377aa777e3840f3e9b6a63c6b09445
for _, metadata := range c.registerMetadataList {
prefix := data[0 : metadata.offset*2]
suffix := data[(metadata.offset+metadata.size)*2:]
var err error
switch metadata.name {
case "rip":
data = fmt.Sprintf("%s%s%s", prefix, uint64ToHex(regs.Rip, true), suffix)
case "rsp":
data = fmt.Sprintf("%s%s%s", prefix, uint64ToHex(regs.Rsp, true), suffix)
case "rcx":
data = fmt.Sprintf("%s%s%s", prefix, uint64ToHex(regs.Rcx, true), suffix)
}
if err != nil {
return err
}
}
command := fmt.Sprintf("G%s;thread:%x;", data, threadID)
if err := c.send(command); err != nil {
return err
}
return c.receiveAndCheck()
}
// ReadMemory reads the specified memory region.
func (c *Client) ReadMemory(addr uint64, out []byte) error {
command := fmt.Sprintf("m%x,%x", addr, len(out))
if err := c.send(command); err != nil {
return err
}
data, err := c.receive()
if err != nil {
return err
} else if strings.HasPrefix(data, "E") {
return fmt.Errorf("error response: %s", data)
}
byteArrary, err := hexToByteArray(data)
if err != nil {
return err
}
if len(byteArrary) != len(out) {
log.Debugf("The data size read from the memory is smaller than the requested size. actual: %d, expected: %d", len(byteArrary), len(out))
}
copy(out, byteArrary)
return nil
}
// WriteMemory write the data to the specified region
func (c *Client) WriteMemory(addr uint64, data []byte) error {
dataInHex := ""
for _, b := range data {
dataInHex += fmt.Sprintf("%02x", b)
}
command := fmt.Sprintf("M%x,%x:%s", addr, len(data), dataInHex)
if err := c.send(command); err != nil {
return err
}
return c.receiveAndCheck()
}
// ReadTLS reads the offset from the beginning of the TLS block.
func (c *Client) ReadTLS(threadID int, offset int32) (uint64, error) {
if err := c.updateReadTLSFunction(uint32(offset)); err != nil {
return 0, err
}
originalRegs, err := c.ReadRegisters(threadID)
if err != nil {
return 0, err
}
defer func() { err = c.WriteRegisters(threadID, originalRegs) }()
modifiedRegs := originalRegs
modifiedRegs.Rip = c.readTLSFuncAddr
if err = c.WriteRegisters(threadID, modifiedRegs); err != nil {
return 0, err
}
if _, err := c.StepAndWait(threadID); err != nil {
return 0, err
}
modifiedRegs, err = c.ReadRegisters(threadID)
return modifiedRegs.Rcx, err
}
func (c *Client) updateReadTLSFunction(offset uint32) error {
if c.currentTLSOffset == offset {
return nil
}
readTLSFunction := c.buildReadTLSFunction(offset)
if err := c.WriteMemory(c.readTLSFuncAddr, readTLSFunction); err != nil {
return err
}
c.currentTLSOffset = offset
return nil
}
func (c *Client) buildReadTLSFunction(offset uint32) []byte {
offsetBytes := make([]byte, 4)
binary.LittleEndian.PutUint32(offsetBytes, offset)
readTLSFunction := []byte{0x65, 0x48, 0x8b, 0x0c, 0x25} // mac OS X uses gs_base
return append(readTLSFunction, offsetBytes...)
}
// ContinueAndWait resumes processes and waits until an event happens.
// The exited event is reported when the main process exits (and not when its threads exit).
func (c *Client) ContinueAndWait() (Event, error) {
return c.continueAndWait(c.pendingSignal)
}
// StepAndWait executes the one instruction of the specified thread and waits until an event happens.
// The returned event may not be the trapped event.
// If unspecified thread is stopped, UnspecifiedThreadError is returned.
func (c *Client) StepAndWait(threadID int) (Event, error) {
var command string
if c.pendingSignal == 0 {
command = fmt.Sprintf("vCont;s:%x", threadID)
} else {
command = fmt.Sprintf("vCont;S%02x:%x", c.pendingSignal, threadID)
}
if err := c.send(command); err != nil {
return Event{}, fmt.Errorf("send error: %v", err)
}
event, err := c.wait()
if err != nil {
return Event{}, err
} else if event.Type != EventTypeTrapped {
return Event{}, fmt.Errorf("unexpected event: %#v", event)
} else if threadIDs := event.Data.([]int); len(threadIDs) != 1 || threadIDs[0] != threadID {
return Event{}, UnspecifiedThreadError{ThreadIDs: threadIDs}
}
return event, err
}
func (c *Client) continueAndWait(signalNumber int) (Event, error) {
var command string
if signalNumber == 0 {
command = "vCont;c"
} else {
// Though the signal number is specified, it's like the debugserver does not pass the signals like SIGTERM and SIGINT to the debugee.
// QPassSignals can change this setting, but debugserver (900.0.64) doesn't support the query.
command = fmt.Sprintf("vCont;C%02x", signalNumber)
}
if err := c.send(command); err != nil {
return Event{}, fmt.Errorf("send error: %v", err)
}
return c.wait()
}
func (c *Client) wait() (Event, error) {
var data string
var err error
for {
data, err = c.receiveWithTimeout(10 * time.Second)
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
// debugserver sometimes does not send a reply packet even when a thread is stopped.
data, err = c.checkStopReply()
if err != nil {
return Event{}, fmt.Errorf("failed to query stop reply: %v", err)
} else if data != "" {
log.Debugf("debugserver did not reply packets though there is the stopped thread.")
break
}
} else if err != nil {
return Event{}, fmt.Errorf("receive error: %v", err)
}
if data != "" {
break
}
}
stopReplies := c.buildStopReplies(data)
// process O packet beforehand in order to simplify further processing.
stopReplies, err = c.processOutputPacket(stopReplies)
if err != nil {
return Event{}, fmt.Errorf("failed to process output packet: %v", err)
}
if len(stopReplies) == 0 {
return c.wait()
}
return c.handleStopReply(stopReplies)
}
func (c *Client) checkStopReply() (string, error) {
threadIDs, err := c.ThreadIDs()
if err != nil {
return "", err
}
for _, threadID := range threadIDs {
data, err := c.qThreadStopInfo(threadID)
if err != nil {
return "", err
}
if !strings.HasPrefix(data, "T00") {
return data, nil
}
}
return "", nil
}
func (c *Client) buildStopReplies(data string) []string {
replies := strings.Split(data, "$")
for i, reply := range replies {
if reply[len(reply)-3] == '#' {
replies[i] = reply[0 : len(reply)-3]
}
}
return replies
}
func (c *Client) processOutputPacket(stopReplies []string) ([]string, error) {
var unprocessedReplies []string
for _, stopReply := range stopReplies {
if stopReply[0] != 'O' {
unprocessedReplies = append(unprocessedReplies, stopReply)
continue
}
out, err := hexToByteArray(stopReply[1:])
if err != nil {
return nil, err
}
c.outputWriter.Write(out)
}
return unprocessedReplies, nil
}
func (c *Client) handleStopReply(stopReplies []string) (event Event, err error) {
switch stopReplies[0][0] {
case 'T':
if len(stopReplies) > 1 {
log.Debugf("received 2 or more stop replies at once. Consider only first one. data: %v", stopReplies)
}
event, err = c.handleTPacket(stopReplies[0])
case 'W':
// Ignore remaining packets because the process ends.
event, err = c.handleWPacket(stopReplies[0])
case 'X':
// Ignore remaining packets because the process ends.
event, err = c.handleXPacket(stopReplies[0])
default:
err = fmt.Errorf("unknown packet type: %s", stopReplies[0])
}
if err != nil {
log.Debugf("failed to handle the packet (data: %v): %v", stopReplies[0], err)
return Event{}, err
}
if IsExitEvent(event.Type) {
// the connection may be closed already.
_ = c.close()
}
return event, nil
}
func (c *Client) handleTPacket(packet string) (Event, error) {
signalNumber, err := hexToUint64(packet[1:3], false)
if err != nil {
return Event{}, err
}
if syscall.Signal(signalNumber) == excBadAccess |
var threadIDs []int
for _, kvInStr := range strings.Split(packet[3:len(packet)-1], ";") {
kvArr := strings.Split(kvInStr, ":")
key, value := kvArr[0], kvArr[1]
if key == "threads" {
for _, threadID := range strings.Split(value, ",") {
threadIDInNum, err := hexToUint64(threadID, false)
if err != nil {
return Event{}, err
}
threadIDs = append(threadIDs, int(threadIDInNum))
}
}
}
trappedThreadIDs, err := c.selectTrappedThreads(threadIDs)
if err != nil {
return Event{}, err
} else if len(trappedThreadIDs) == 0 {
return c.continueAndWait(int(signalNumber))
}
if syscall.Signal(signalNumber) != unix.SIGTRAP {
c.pendingSignal = int(signalNumber)
} else {
c.pendingSignal = 0
}
return Event{Type: EventTypeTrapped, Data: trappedThreadIDs}, nil
}
func (c *Client) selectTrappedThreads(threadIDs []int) ([]int, error) {
var trappedThreads []int
for _, threadID := range threadIDs {
data, err := c.qThreadStopInfo(threadID)
if err != nil {
return nil, err
}
signalNumber, err := hexToUint64(data[1:3], false)
if err != nil {
return nil, err
}
if syscall.Signal(signalNumber) == unix.SIGTRAP {
trappedThreads = append(trappedThreads, threadID)
}
}
return trappedThreads, nil
}
func (c *Client) qThreadStopInfo(threadID int) (string, error) {
command := fmt.Sprintf("qThreadStopInfo%02x", threadID)
if err := c.send(command); err != nil {
return "", err
}
data, err := c.receive()
if err != nil {
return "", err
} else if strings.HasPrefix(data, "E") {
return data, fmt.Errorf("error response: %s", data)
}
return data, nil
}
func (c *Client) handleWPacket(packet string) (Event, error) {
exitStatus, err := hexToUint64(packet[1:3], false)
return Event{Type: EventTypeExited, Data: int(exitStatus)}, err
}
func (c *Client) handleXPacket(packet string) (Event, error) {
signalNumber, err := hexToUint64(packet[1:3], false)
// TODO: signalNumber here looks always 0. The number in the description looks correct, so maybe better to use it instead.
return Event{Type: EventTypeTerminated, Data: int(signalNumber)}, err
}
func (c *Client) send(command string) error {
packet := fmt.Sprintf("$%s#00", command)
if !c.noAckMode {
packet = fmt.Sprintf("$%s#%02x", command, calcChecksum([]byte(command)))
}
if n, err := c.conn.Write([]byte(packet)); err != nil {
return err
} else if n != len(packet) {
return fmt.Errorf("only part of the buffer is sent: %d / %d", n, len(packet))
}
if !c.noAckMode {
return c.receiveAck()
}
return nil
}
func (c *Client) receiveAndCheck() error {
if data, err := c.receive(); err != nil {
return err
} else if data != "OK" {
return fmt.Errorf("the error response is returned: %s", data)
}
return nil
}
func (c *Client) receive() (string, error) {
var rawPacket []byte
for {
n, err := c.conn.Read(c.buffer)
if err != nil {
return "", err
}
rawPacket = append(rawPacket, c.buffer[0:n]...)
if len(rawPacket) < 4 {
// there should be at least 4 bytes
continue
} else if rawPacket[len(rawPacket)-3] == '#' {
// received at least 1 packet.
// TODO: handle multiple packets case
break
}
}
packet := string(rawPacket)
data := string(rawPacket[1 : len(rawPacket)-3])
if !c.noAckMode {
if err := verifyPacket(packet); err != nil {
return "", err
}
return data, c.sendAck()
}
return data, nil
}
func (c *Client) receiveWithTimeout(timeout time.Duration) (string, error) {
c.conn.SetReadDeadline(time.Now().Add(timeout))
defer c.conn.SetReadDeadline(time.Time{})
return c.receive()
}
func (c *Client) sendAck() error {
_, err := c.conn.Write([]byte("+"))
return err
}
func (c *Client) receiveAck() error {
if _, err := c.conn.Read(c.buffer[0:1]); err != nil {
return err
} else if c.buffer[0] != '+' {
return errors.New("failed to receive ack")
}
return nil
}
func verifyPacket(packet string) error {
if packet[0:1] != "$" {
return fmt.Errorf("invalid head data: %v", packet[0])
}
if packet[len(packet)-3:len(packet)-2] != "#" {
return fmt.Errorf("invalid tail data: %v", packet[len(packet)-3])
}
body := packet[1 : len(packet)-3]
bodyChecksum := strconv.FormatUint(uint64(calcChecksum([]byte(body))), 16)
tailChecksum := packet[len(packet)-2:]
if tailChecksum != bodyChecksum {
return fmt.Errorf("invalid checksum: %s", tailChecksum)
}
return nil
}
func hexToUint64(hex string, littleEndian bool) (uint64, error) {
if littleEndian {
var reversedHex bytes.Buffer
for i := len(hex) - 2; i >= 0; i -= 2 {
reversedHex.WriteString(hex[i : i+2])
}
hex = reversedHex.String()
}
return strconv.ParseUint(hex, 16, 64)
}
func hexToByteArray(hex string) ([]byte, error) {
out := make([]byte, len(hex)/2)
for i := 0; i < len(hex); i += 2 {
value, err := strconv.ParseUint(hex[i:i+2], 16, 8)
if err != nil {
return nil, err
}
out[i/2] = uint8(value)
}
return out, nil
}
func uint64ToHex(input uint64, littleEndian bool) string {
hex := fmt.Sprintf("%016x", input)
if littleEndian {
var reversedHex bytes.Buffer
for i := len(hex) - 2; i >= 0; i -= 2 {
reversedHex.WriteString(hex[i : i+2])
}
hex = reversedHex.String()
}
return hex
}
func calcChecksum(buff []byte) uint8 {
var sum uint8
for _, b := range buff {
sum += b
}
return sum
}
var debugServerPathList = []string{
"/Library/Developer/CommandLineTools/Library/PrivateFrameworks/LLDB.framework/Versions/A/Resources/debugserver",
"/Applications/Xcode.app/Contents/SharedFrameworks/LLDB.framework/Resources/debugserver",
}
func debugServerPath() (string, error) {
for _, path := range debugServerPathList {
if _, err := os.Stat(path); !os.IsNotExist(err) {
return path, nil
}
}
return "", fmt.Errorf("debugserver is not found in these paths: %v", debugServerPathList)
}
| {
log.Debugf("bad memory access: %s", packet)
return Event{}, fmt.Errorf("bad memory access")
} | conditional_block |
client_darwin.go | package debugapi
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"net"
"os"
"os/exec"
"strconv"
"strings"
"syscall"
"time"
"github.com/ks888/tgo/log"
"golang.org/x/sys/unix"
)
// Assumes the packet size is not larger than this.
const (
maxPacketSize = 4096
excBadAccess = syscall.Signal(0x91) // EXC_BAD_ACCESS
)
// Client is the debug api client which depends on lldb's debugserver.
// See the gdb's doc for the reference: https://sourceware.org/gdb/onlinedocs/gdb/Remote-Protocol.html
// Some commands use the lldb extension: https://github.com/llvm-mirror/lldb/blob/master/docs/lldb-gdb-remote.txt
type Client struct {
conn net.Conn
pid int
killOnDetach bool
noAckMode bool
registerMetadataList []registerMetadata
buffer []byte
// outputWriter is the writer to which the output of the debugee process will be written.
outputWriter io.Writer
readTLSFuncAddr uint64
currentTLSOffset uint32
pendingSignal int
}
// NewClient returns the new debug api client which depends on OS API.
func NewClient() *Client {
return &Client{buffer: make([]byte, maxPacketSize), outputWriter: os.Stdout}
}
// LaunchProcess lets the debugserver launch the new prcoess.
func (c *Client) LaunchProcess(name string, arg ...string) error {
listener, err := net.Listen("tcp", "localhost:")
if err != nil {
return err
}
path, err := debugServerPath()
if err != nil {
return err
}
debugServerArgs := []string{"-F", "-R", listener.Addr().String(), "--", name}
debugServerArgs = append(debugServerArgs, arg...)
cmd := exec.Command(path, debugServerArgs...)
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} // Otherwise, the signal sent to all the group members.
if err := cmd.Start(); err != nil {
return err
}
c.conn, err = c.waitConnectOrExit(listener, cmd)
if err != nil {
return err
}
c.pid = cmd.Process.Pid
c.killOnDetach = true
return c.initialize()
}
func (c *Client) waitConnectOrExit(listener net.Listener, cmd *exec.Cmd) (net.Conn, error) {
waitCh := make(chan error)
go func(ch chan error) {
ch <- cmd.Wait()
}(waitCh)
connCh := make(chan net.Conn)
go func(ch chan net.Conn) {
conn, err := listener.Accept()
if err != nil {
connCh <- nil
}
connCh <- conn
}(connCh)
select {
case <-waitCh:
return nil, errors.New("the command exits immediately")
case conn := <-connCh:
if conn == nil {
return nil, errors.New("failed to accept the connection")
}
return conn, nil
}
}
func (c *Client) initialize() error {
if err := c.setNoAckMode(); err != nil {
return err
}
if err := c.qSupported(); err != nil {
return err
}
if err := c.qThreadSuffixSupported(); err != nil {
return err
}
var err error
c.registerMetadataList, err = c.collectRegisterMetadata()
if err != nil {
return err
}
if err := c.qListThreadsInStopReply(); err != nil {
return err
}
readTLSFunction := c.buildReadTLSFunction(0) // need the function length here. So the offset doesn't matter.
c.readTLSFuncAddr, err = c.allocateMemory(len(readTLSFunction))
return err
}
func (c *Client) setNoAckMode() error {
const command = "QStartNoAckMode"
if err := c.send(command); err != nil {
return err
}
if err := c.receiveAndCheck(); err != nil {
return err
}
c.noAckMode = true
return nil
}
func (c *Client) qSupported() error {
var supportedFeatures = []string{"swbreak+", "hwbreak+", "no-resumed+"}
command := fmt.Sprintf("qSupported:%s", strings.Join(supportedFeatures, ";"))
if err := c.send(command); err != nil {
return err
}
// TODO: adjust the buffer size so that it doesn't exceed the PacketSize in the response.
_, err := c.receive()
return err
}
func (c *Client) qThreadSuffixSupported() error {
const command = "QThreadSuffixSupported"
if err := c.send(command); err != nil {
return err
}
return c.receiveAndCheck()
}
var errEndOfList = errors.New("the end of list")
type registerMetadata struct {
name string
id, offset, size int
}
func (c *Client) collectRegisterMetadata() ([]registerMetadata, error) {
var regs []registerMetadata
for i := 0; ; i++ {
reg, err := c.qRegisterInfo(i)
if err != nil {
if err == errEndOfList {
break
}
return nil, err
}
regs = append(regs, reg)
}
return regs, nil
}
func (c *Client) qRegisterInfo(registerID int) (registerMetadata, error) {
command := fmt.Sprintf("qRegisterInfo%x", registerID)
if err := c.send(command); err != nil {
return registerMetadata{}, err
}
data, err := c.receive()
if err != nil {
return registerMetadata{}, err
}
if strings.HasPrefix(data, "E") {
if data == "E45" {
return registerMetadata{}, errEndOfList
}
return registerMetadata{}, fmt.Errorf("error response: %s", data)
}
return c.parseRegisterMetaData(registerID, data)
}
func (c *Client) parseRegisterMetaData(registerID int, data string) (registerMetadata, error) {
reg := registerMetadata{id: registerID}
for _, chunk := range strings.Split(data, ";") {
keyValue := strings.SplitN(chunk, ":", 2)
if len(keyValue) < 2 {
continue
}
key, value := keyValue[0], keyValue[1]
if key == "name" {
reg.name = value
} else if key == "bitsize" {
num, err := strconv.Atoi(value)
if err != nil {
return registerMetadata{}, err
}
reg.size = num / 8
} else if key == "offset" {
num, err := strconv.Atoi(value)
if err != nil {
return registerMetadata{}, err
}
reg.offset = num
}
}
return reg, nil
}
func (c *Client) qListThreadsInStopReply() error {
const command = "QListThreadsInStopReply"
if err := c.send(command); err != nil {
return err
}
return c.receiveAndCheck()
}
func (c *Client) allocateMemory(size int) (uint64, error) {
command := fmt.Sprintf("_M%x,rwx", size)
if err := c.send(command); err != nil {
return 0, err
}
data, err := c.receive()
if err != nil {
return 0, err
} else if data == "" || strings.HasPrefix(data, "E") {
return 0, fmt.Errorf("error response: %s", data)
}
return hexToUint64(data, false)
}
func (c *Client) deallocateMemory(addr uint64) error {
command := fmt.Sprintf("_m%x", addr)
if err := c.send(command); err != nil {
return err
}
return c.receiveAndCheck()
}
// ThreadIDs returns all the thread ids.
func (c *Client) ThreadIDs() ([]int, error) {
rawThreadIDs, err := c.qfThreadInfo()
if err != nil {
return nil, err
}
// TODO: call qsThreadInfo
var threadIDs []int
for _, rawThreadID := range strings.Split(rawThreadIDs, ",") {
threadID, err := hexToUint64(rawThreadID, false)
if err != nil {
return nil, err
}
threadIDs = append(threadIDs, int(threadID))
}
return threadIDs, nil
}
func (c *Client) qfThreadInfo() (string, error) {
const command = "qfThreadInfo"
if err := c.send(command); err != nil {
return "", err
}
data, err := c.receive()
if err != nil {
return "", err
} else if !strings.HasPrefix(data, "m") {
return "", fmt.Errorf("unexpected response: %s", data)
}
return data[1:], nil
}
// AttachProcess lets the debugserver attach the new prcoess.
func (c *Client) AttachProcess(pid int) error {
listener, err := net.Listen("tcp", "localhost:")
if err != nil {
return err
}
path, err := debugServerPath()
if err != nil {
return err
}
debugServerArgs := []string{"-F", "-R", listener.Addr().String(), fmt.Sprintf("--attach=%d", pid)}
cmd := exec.Command(path, debugServerArgs...)
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} // Otherwise, the signal sent to all the group members.
if err := cmd.Start(); err != nil {
return err
}
c.conn, err = c.waitConnectOrExit(listener, cmd)
if err != nil {
return err
}
c.pid = cmd.Process.Pid
return c.initialize()
}
// DetachProcess detaches from the prcoess.
func (c *Client) DetachProcess() error {
defer c.close()
if c.killOnDetach {
return c.killProcess()
}
if err := c.send("D"); err != nil {
return err
}
return c.receiveAndCheck()
}
func (c *Client) close() error {
return c.conn.Close()
}
func (c *Client) killProcess() error {
if err := c.send("k"); err != nil {
return err
}
data, err := c.receive()
if err != nil {
return err
} else if !strings.HasPrefix(data, "X09") {
return fmt.Errorf("unexpected reply: %s", data)
}
// debugserver automatically exits. So don't explicitly detach here.
return nil
}
// ReadRegisters reads the target threadID's registers.
func (c *Client) ReadRegisters(threadID int) (Registers, error) {
data, err := c.readRegisters(threadID)
if err != nil {
return Registers{}, err
}
return c.parseRegisterData(data)
}
func (c *Client) readRegisters(threadID int) (string, error) {
command := fmt.Sprintf("g;thread:%x;", threadID)
if err := c.send(command); err != nil {
return "", err
}
data, err := c.receive()
if err != nil {
return "", err
} else if strings.HasPrefix(data, "E") {
return data, fmt.Errorf("error response: %s", data)
}
return data, nil
}
func (c *Client) parseRegisterData(data string) (Registers, error) {
var regs Registers
for _, metadata := range c.registerMetadataList {
rawValue := data[metadata.offset*2 : (metadata.offset+metadata.size)*2]
var err error
switch metadata.name {
case "rip":
regs.Rip, err = hexToUint64(rawValue, true)
case "rsp":
regs.Rsp, err = hexToUint64(rawValue, true)
case "rcx":
regs.Rcx, err = hexToUint64(rawValue, true)
}
if err != nil {
return Registers{}, err
}
}
return regs, nil
}
// WriteRegisters updates the registers' value.
func (c *Client) WriteRegisters(threadID int, regs Registers) error {
data, err := c.readRegisters(threadID)
if err != nil {
return err
}
// The 'P' command is not used due to the bug explained here: https://github.com/llvm-mirror/lldb/commit/d8d7a40ca5377aa777e3840f3e9b6a63c6b09445
for _, metadata := range c.registerMetadataList {
prefix := data[0 : metadata.offset*2]
suffix := data[(metadata.offset+metadata.size)*2:]
var err error
switch metadata.name {
case "rip":
data = fmt.Sprintf("%s%s%s", prefix, uint64ToHex(regs.Rip, true), suffix)
case "rsp":
data = fmt.Sprintf("%s%s%s", prefix, uint64ToHex(regs.Rsp, true), suffix)
case "rcx":
data = fmt.Sprintf("%s%s%s", prefix, uint64ToHex(regs.Rcx, true), suffix)
}
if err != nil {
return err
}
}
command := fmt.Sprintf("G%s;thread:%x;", data, threadID)
if err := c.send(command); err != nil {
return err
}
return c.receiveAndCheck()
}
// ReadMemory reads the specified memory region.
func (c *Client) ReadMemory(addr uint64, out []byte) error {
command := fmt.Sprintf("m%x,%x", addr, len(out))
if err := c.send(command); err != nil {
return err
}
data, err := c.receive()
if err != nil {
return err
} else if strings.HasPrefix(data, "E") {
return fmt.Errorf("error response: %s", data)
}
byteArrary, err := hexToByteArray(data)
if err != nil {
return err
}
if len(byteArrary) != len(out) {
log.Debugf("The data size read from the memory is smaller than the requested size. actual: %d, expected: %d", len(byteArrary), len(out))
}
copy(out, byteArrary)
return nil
}
// WriteMemory write the data to the specified region
func (c *Client) WriteMemory(addr uint64, data []byte) error {
dataInHex := ""
for _, b := range data {
dataInHex += fmt.Sprintf("%02x", b)
}
command := fmt.Sprintf("M%x,%x:%s", addr, len(data), dataInHex)
if err := c.send(command); err != nil {
return err
}
return c.receiveAndCheck()
}
// ReadTLS reads the offset from the beginning of the TLS block.
func (c *Client) ReadTLS(threadID int, offset int32) (uint64, error) {
if err := c.updateReadTLSFunction(uint32(offset)); err != nil {
return 0, err
}
originalRegs, err := c.ReadRegisters(threadID)
if err != nil {
return 0, err
}
defer func() { err = c.WriteRegisters(threadID, originalRegs) }()
modifiedRegs := originalRegs
modifiedRegs.Rip = c.readTLSFuncAddr
if err = c.WriteRegisters(threadID, modifiedRegs); err != nil {
return 0, err
}
if _, err := c.StepAndWait(threadID); err != nil {
return 0, err
}
modifiedRegs, err = c.ReadRegisters(threadID)
return modifiedRegs.Rcx, err
}
func (c *Client) updateReadTLSFunction(offset uint32) error {
if c.currentTLSOffset == offset {
return nil
}
readTLSFunction := c.buildReadTLSFunction(offset)
if err := c.WriteMemory(c.readTLSFuncAddr, readTLSFunction); err != nil {
return err
}
c.currentTLSOffset = offset
return nil
}
func (c *Client) buildReadTLSFunction(offset uint32) []byte {
offsetBytes := make([]byte, 4)
binary.LittleEndian.PutUint32(offsetBytes, offset)
readTLSFunction := []byte{0x65, 0x48, 0x8b, 0x0c, 0x25} // mac OS X uses gs_base
return append(readTLSFunction, offsetBytes...)
}
// ContinueAndWait resumes processes and waits until an event happens.
// The exited event is reported when the main process exits (and not when its threads exit).
func (c *Client) ContinueAndWait() (Event, error) {
return c.continueAndWait(c.pendingSignal)
}
// StepAndWait executes the one instruction of the specified thread and waits until an event happens.
// The returned event may not be the trapped event.
// If unspecified thread is stopped, UnspecifiedThreadError is returned.
func (c *Client) StepAndWait(threadID int) (Event, error) {
var command string
if c.pendingSignal == 0 {
command = fmt.Sprintf("vCont;s:%x", threadID)
} else {
command = fmt.Sprintf("vCont;S%02x:%x", c.pendingSignal, threadID)
}
if err := c.send(command); err != nil {
return Event{}, fmt.Errorf("send error: %v", err)
}
event, err := c.wait()
if err != nil {
return Event{}, err
} else if event.Type != EventTypeTrapped {
return Event{}, fmt.Errorf("unexpected event: %#v", event)
} else if threadIDs := event.Data.([]int); len(threadIDs) != 1 || threadIDs[0] != threadID {
return Event{}, UnspecifiedThreadError{ThreadIDs: threadIDs}
}
return event, err
}
func (c *Client) continueAndWait(signalNumber int) (Event, error) {
var command string
if signalNumber == 0 {
command = "vCont;c"
} else {
// Though the signal number is specified, it's like the debugserver does not pass the signals like SIGTERM and SIGINT to the debugee.
// QPassSignals can change this setting, but debugserver (900.0.64) doesn't support the query.
command = fmt.Sprintf("vCont;C%02x", signalNumber)
}
if err := c.send(command); err != nil {
return Event{}, fmt.Errorf("send error: %v", err)
}
return c.wait()
}
func (c *Client) wait() (Event, error) {
var data string
var err error
for {
data, err = c.receiveWithTimeout(10 * time.Second)
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
// debugserver sometimes does not send a reply packet even when a thread is stopped.
data, err = c.checkStopReply()
if err != nil {
return Event{}, fmt.Errorf("failed to query stop reply: %v", err)
} else if data != "" {
log.Debugf("debugserver did not reply packets though there is the stopped thread.")
break
}
} else if err != nil {
return Event{}, fmt.Errorf("receive error: %v", err)
}
if data != "" {
break
}
}
stopReplies := c.buildStopReplies(data)
// process O packet beforehand in order to simplify further processing.
stopReplies, err = c.processOutputPacket(stopReplies)
if err != nil {
return Event{}, fmt.Errorf("failed to process output packet: %v", err)
}
if len(stopReplies) == 0 {
return c.wait()
}
return c.handleStopReply(stopReplies)
}
func (c *Client) checkStopReply() (string, error) {
threadIDs, err := c.ThreadIDs()
if err != nil {
return "", err
}
for _, threadID := range threadIDs {
data, err := c.qThreadStopInfo(threadID)
if err != nil {
return "", err
}
if !strings.HasPrefix(data, "T00") {
return data, nil
}
}
return "", nil
}
func (c *Client) buildStopReplies(data string) []string {
replies := strings.Split(data, "$")
for i, reply := range replies {
if reply[len(reply)-3] == '#' {
replies[i] = reply[0 : len(reply)-3]
}
}
return replies
}
func (c *Client) processOutputPacket(stopReplies []string) ([]string, error) {
var unprocessedReplies []string
for _, stopReply := range stopReplies {
if stopReply[0] != 'O' {
unprocessedReplies = append(unprocessedReplies, stopReply)
continue
}
out, err := hexToByteArray(stopReply[1:])
if err != nil {
return nil, err
}
c.outputWriter.Write(out)
}
return unprocessedReplies, nil
}
func (c *Client) handleStopReply(stopReplies []string) (event Event, err error) {
switch stopReplies[0][0] {
case 'T':
if len(stopReplies) > 1 {
log.Debugf("received 2 or more stop replies at once. Consider only first one. data: %v", stopReplies)
}
event, err = c.handleTPacket(stopReplies[0])
case 'W':
// Ignore remaining packets because the process ends.
event, err = c.handleWPacket(stopReplies[0])
case 'X':
// Ignore remaining packets because the process ends.
event, err = c.handleXPacket(stopReplies[0])
default:
err = fmt.Errorf("unknown packet type: %s", stopReplies[0])
}
if err != nil {
log.Debugf("failed to handle the packet (data: %v): %v", stopReplies[0], err)
return Event{}, err
}
if IsExitEvent(event.Type) {
// the connection may be closed already.
_ = c.close()
}
return event, nil
}
func (c *Client) handleTPacket(packet string) (Event, error) {
signalNumber, err := hexToUint64(packet[1:3], false)
if err != nil {
return Event{}, err
}
if syscall.Signal(signalNumber) == excBadAccess {
log.Debugf("bad memory access: %s", packet)
return Event{}, fmt.Errorf("bad memory access")
}
var threadIDs []int
for _, kvInStr := range strings.Split(packet[3:len(packet)-1], ";") {
kvArr := strings.Split(kvInStr, ":")
key, value := kvArr[0], kvArr[1]
if key == "threads" {
for _, threadID := range strings.Split(value, ",") {
threadIDInNum, err := hexToUint64(threadID, false)
if err != nil {
return Event{}, err
}
threadIDs = append(threadIDs, int(threadIDInNum))
}
}
}
trappedThreadIDs, err := c.selectTrappedThreads(threadIDs)
if err != nil {
return Event{}, err
} else if len(trappedThreadIDs) == 0 {
return c.continueAndWait(int(signalNumber))
}
if syscall.Signal(signalNumber) != unix.SIGTRAP {
c.pendingSignal = int(signalNumber)
} else {
c.pendingSignal = 0
}
return Event{Type: EventTypeTrapped, Data: trappedThreadIDs}, nil
}
func (c *Client) selectTrappedThreads(threadIDs []int) ([]int, error) {
var trappedThreads []int
for _, threadID := range threadIDs {
data, err := c.qThreadStopInfo(threadID)
if err != nil {
return nil, err
}
signalNumber, err := hexToUint64(data[1:3], false)
if err != nil {
return nil, err
}
if syscall.Signal(signalNumber) == unix.SIGTRAP {
trappedThreads = append(trappedThreads, threadID)
}
}
return trappedThreads, nil
}
func (c *Client) qThreadStopInfo(threadID int) (string, error) {
command := fmt.Sprintf("qThreadStopInfo%02x", threadID)
if err := c.send(command); err != nil {
return "", err
}
data, err := c.receive()
if err != nil {
return "", err
} else if strings.HasPrefix(data, "E") {
return data, fmt.Errorf("error response: %s", data)
}
return data, nil
}
func (c *Client) handleWPacket(packet string) (Event, error) {
exitStatus, err := hexToUint64(packet[1:3], false)
return Event{Type: EventTypeExited, Data: int(exitStatus)}, err
}
func (c *Client) handleXPacket(packet string) (Event, error) {
signalNumber, err := hexToUint64(packet[1:3], false)
// TODO: signalNumber here looks always 0. The number in the description looks correct, so maybe better to use it instead.
return Event{Type: EventTypeTerminated, Data: int(signalNumber)}, err
}
func (c *Client) send(command string) error {
packet := fmt.Sprintf("$%s#00", command)
if !c.noAckMode {
packet = fmt.Sprintf("$%s#%02x", command, calcChecksum([]byte(command)))
}
if n, err := c.conn.Write([]byte(packet)); err != nil {
return err
} else if n != len(packet) {
return fmt.Errorf("only part of the buffer is sent: %d / %d", n, len(packet))
}
if !c.noAckMode {
return c.receiveAck()
}
return nil
}
func (c *Client) receiveAndCheck() error {
if data, err := c.receive(); err != nil {
return err
} else if data != "OK" {
return fmt.Errorf("the error response is returned: %s", data)
}
return nil
}
func (c *Client) receive() (string, error) {
var rawPacket []byte
for {
n, err := c.conn.Read(c.buffer)
if err != nil {
return "", err
}
rawPacket = append(rawPacket, c.buffer[0:n]...)
if len(rawPacket) < 4 {
// there should be at least 4 bytes
continue
} else if rawPacket[len(rawPacket)-3] == '#' {
// received at least 1 packet.
// TODO: handle multiple packets case
break
}
}
packet := string(rawPacket)
data := string(rawPacket[1 : len(rawPacket)-3])
if !c.noAckMode {
if err := verifyPacket(packet); err != nil {
return "", err
}
return data, c.sendAck()
}
return data, nil
}
func (c *Client) receiveWithTimeout(timeout time.Duration) (string, error) {
c.conn.SetReadDeadline(time.Now().Add(timeout))
defer c.conn.SetReadDeadline(time.Time{})
return c.receive()
}
func (c *Client) sendAck() error {
_, err := c.conn.Write([]byte("+"))
return err
}
func (c *Client) receiveAck() error {
if _, err := c.conn.Read(c.buffer[0:1]); err != nil {
return err |
return nil
}
func verifyPacket(packet string) error {
if packet[0:1] != "$" {
return fmt.Errorf("invalid head data: %v", packet[0])
}
if packet[len(packet)-3:len(packet)-2] != "#" {
return fmt.Errorf("invalid tail data: %v", packet[len(packet)-3])
}
body := packet[1 : len(packet)-3]
bodyChecksum := strconv.FormatUint(uint64(calcChecksum([]byte(body))), 16)
tailChecksum := packet[len(packet)-2:]
if tailChecksum != bodyChecksum {
return fmt.Errorf("invalid checksum: %s", tailChecksum)
}
return nil
}
func hexToUint64(hex string, littleEndian bool) (uint64, error) {
if littleEndian {
var reversedHex bytes.Buffer
for i := len(hex) - 2; i >= 0; i -= 2 {
reversedHex.WriteString(hex[i : i+2])
}
hex = reversedHex.String()
}
return strconv.ParseUint(hex, 16, 64)
}
func hexToByteArray(hex string) ([]byte, error) {
out := make([]byte, len(hex)/2)
for i := 0; i < len(hex); i += 2 {
value, err := strconv.ParseUint(hex[i:i+2], 16, 8)
if err != nil {
return nil, err
}
out[i/2] = uint8(value)
}
return out, nil
}
func uint64ToHex(input uint64, littleEndian bool) string {
hex := fmt.Sprintf("%016x", input)
if littleEndian {
var reversedHex bytes.Buffer
for i := len(hex) - 2; i >= 0; i -= 2 {
reversedHex.WriteString(hex[i : i+2])
}
hex = reversedHex.String()
}
return hex
}
func calcChecksum(buff []byte) uint8 {
var sum uint8
for _, b := range buff {
sum += b
}
return sum
}
var debugServerPathList = []string{
"/Library/Developer/CommandLineTools/Library/PrivateFrameworks/LLDB.framework/Versions/A/Resources/debugserver",
"/Applications/Xcode.app/Contents/SharedFrameworks/LLDB.framework/Resources/debugserver",
}
func debugServerPath() (string, error) {
for _, path := range debugServerPathList {
if _, err := os.Stat(path); !os.IsNotExist(err) {
return path, nil
}
}
return "", fmt.Errorf("debugserver is not found in these paths: %v", debugServerPathList)
} | } else if c.buffer[0] != '+' {
return errors.New("failed to receive ack")
} | random_line_split |
client_darwin.go | package debugapi
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"net"
"os"
"os/exec"
"strconv"
"strings"
"syscall"
"time"
"github.com/ks888/tgo/log"
"golang.org/x/sys/unix"
)
// Assumes the packet size is not larger than this.
const (
maxPacketSize = 4096
excBadAccess = syscall.Signal(0x91) // EXC_BAD_ACCESS
)
// Client is the debug api client which depends on lldb's debugserver.
// See the gdb's doc for the reference: https://sourceware.org/gdb/onlinedocs/gdb/Remote-Protocol.html
// Some commands use the lldb extension: https://github.com/llvm-mirror/lldb/blob/master/docs/lldb-gdb-remote.txt
type Client struct {
conn net.Conn
pid int
killOnDetach bool
noAckMode bool
registerMetadataList []registerMetadata
buffer []byte
// outputWriter is the writer to which the output of the debugee process will be written.
outputWriter io.Writer
readTLSFuncAddr uint64
currentTLSOffset uint32
pendingSignal int
}
// NewClient returns the new debug api client which depends on OS API.
func NewClient() *Client {
return &Client{buffer: make([]byte, maxPacketSize), outputWriter: os.Stdout}
}
// LaunchProcess lets the debugserver launch the new prcoess.
func (c *Client) LaunchProcess(name string, arg ...string) error {
listener, err := net.Listen("tcp", "localhost:")
if err != nil {
return err
}
path, err := debugServerPath()
if err != nil {
return err
}
debugServerArgs := []string{"-F", "-R", listener.Addr().String(), "--", name}
debugServerArgs = append(debugServerArgs, arg...)
cmd := exec.Command(path, debugServerArgs...)
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} // Otherwise, the signal sent to all the group members.
if err := cmd.Start(); err != nil {
return err
}
c.conn, err = c.waitConnectOrExit(listener, cmd)
if err != nil {
return err
}
c.pid = cmd.Process.Pid
c.killOnDetach = true
return c.initialize()
}
func (c *Client) waitConnectOrExit(listener net.Listener, cmd *exec.Cmd) (net.Conn, error) {
waitCh := make(chan error)
go func(ch chan error) {
ch <- cmd.Wait()
}(waitCh)
connCh := make(chan net.Conn)
go func(ch chan net.Conn) {
conn, err := listener.Accept()
if err != nil {
connCh <- nil
}
connCh <- conn
}(connCh)
select {
case <-waitCh:
return nil, errors.New("the command exits immediately")
case conn := <-connCh:
if conn == nil {
return nil, errors.New("failed to accept the connection")
}
return conn, nil
}
}
func (c *Client) initialize() error {
if err := c.setNoAckMode(); err != nil {
return err
}
if err := c.qSupported(); err != nil {
return err
}
if err := c.qThreadSuffixSupported(); err != nil {
return err
}
var err error
c.registerMetadataList, err = c.collectRegisterMetadata()
if err != nil {
return err
}
if err := c.qListThreadsInStopReply(); err != nil {
return err
}
readTLSFunction := c.buildReadTLSFunction(0) // need the function length here. So the offset doesn't matter.
c.readTLSFuncAddr, err = c.allocateMemory(len(readTLSFunction))
return err
}
func (c *Client) setNoAckMode() error {
const command = "QStartNoAckMode"
if err := c.send(command); err != nil {
return err
}
if err := c.receiveAndCheck(); err != nil {
return err
}
c.noAckMode = true
return nil
}
func (c *Client) qSupported() error {
var supportedFeatures = []string{"swbreak+", "hwbreak+", "no-resumed+"}
command := fmt.Sprintf("qSupported:%s", strings.Join(supportedFeatures, ";"))
if err := c.send(command); err != nil {
return err
}
// TODO: adjust the buffer size so that it doesn't exceed the PacketSize in the response.
_, err := c.receive()
return err
}
func (c *Client) qThreadSuffixSupported() error {
const command = "QThreadSuffixSupported"
if err := c.send(command); err != nil {
return err
}
return c.receiveAndCheck()
}
var errEndOfList = errors.New("the end of list")
type registerMetadata struct {
name string
id, offset, size int
}
func (c *Client) collectRegisterMetadata() ([]registerMetadata, error) {
var regs []registerMetadata
for i := 0; ; i++ {
reg, err := c.qRegisterInfo(i)
if err != nil {
if err == errEndOfList {
break
}
return nil, err
}
regs = append(regs, reg)
}
return regs, nil
}
func (c *Client) qRegisterInfo(registerID int) (registerMetadata, error) {
command := fmt.Sprintf("qRegisterInfo%x", registerID)
if err := c.send(command); err != nil {
return registerMetadata{}, err
}
data, err := c.receive()
if err != nil {
return registerMetadata{}, err
}
if strings.HasPrefix(data, "E") {
if data == "E45" {
return registerMetadata{}, errEndOfList
}
return registerMetadata{}, fmt.Errorf("error response: %s", data)
}
return c.parseRegisterMetaData(registerID, data)
}
func (c *Client) parseRegisterMetaData(registerID int, data string) (registerMetadata, error) {
reg := registerMetadata{id: registerID}
for _, chunk := range strings.Split(data, ";") {
keyValue := strings.SplitN(chunk, ":", 2)
if len(keyValue) < 2 {
continue
}
key, value := keyValue[0], keyValue[1]
if key == "name" {
reg.name = value
} else if key == "bitsize" {
num, err := strconv.Atoi(value)
if err != nil {
return registerMetadata{}, err
}
reg.size = num / 8
} else if key == "offset" {
num, err := strconv.Atoi(value)
if err != nil {
return registerMetadata{}, err
}
reg.offset = num
}
}
return reg, nil
}
func (c *Client) qListThreadsInStopReply() error {
const command = "QListThreadsInStopReply"
if err := c.send(command); err != nil {
return err
}
return c.receiveAndCheck()
}
func (c *Client) allocateMemory(size int) (uint64, error) {
command := fmt.Sprintf("_M%x,rwx", size)
if err := c.send(command); err != nil {
return 0, err
}
data, err := c.receive()
if err != nil {
return 0, err
} else if data == "" || strings.HasPrefix(data, "E") {
return 0, fmt.Errorf("error response: %s", data)
}
return hexToUint64(data, false)
}
func (c *Client) deallocateMemory(addr uint64) error {
command := fmt.Sprintf("_m%x", addr)
if err := c.send(command); err != nil {
return err
}
return c.receiveAndCheck()
}
// ThreadIDs returns all the thread ids.
func (c *Client) ThreadIDs() ([]int, error) {
rawThreadIDs, err := c.qfThreadInfo()
if err != nil {
return nil, err
}
// TODO: call qsThreadInfo
var threadIDs []int
for _, rawThreadID := range strings.Split(rawThreadIDs, ",") {
threadID, err := hexToUint64(rawThreadID, false)
if err != nil {
return nil, err
}
threadIDs = append(threadIDs, int(threadID))
}
return threadIDs, nil
}
func (c *Client) qfThreadInfo() (string, error) {
const command = "qfThreadInfo"
if err := c.send(command); err != nil {
return "", err
}
data, err := c.receive()
if err != nil {
return "", err
} else if !strings.HasPrefix(data, "m") {
return "", fmt.Errorf("unexpected response: %s", data)
}
return data[1:], nil
}
// AttachProcess lets the debugserver attach the new prcoess.
func (c *Client) AttachProcess(pid int) error {
listener, err := net.Listen("tcp", "localhost:")
if err != nil {
return err
}
path, err := debugServerPath()
if err != nil {
return err
}
debugServerArgs := []string{"-F", "-R", listener.Addr().String(), fmt.Sprintf("--attach=%d", pid)}
cmd := exec.Command(path, debugServerArgs...)
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} // Otherwise, the signal sent to all the group members.
if err := cmd.Start(); err != nil {
return err
}
c.conn, err = c.waitConnectOrExit(listener, cmd)
if err != nil {
return err
}
c.pid = cmd.Process.Pid
return c.initialize()
}
// DetachProcess detaches from the prcoess.
func (c *Client) DetachProcess() error {
defer c.close()
if c.killOnDetach {
return c.killProcess()
}
if err := c.send("D"); err != nil {
return err
}
return c.receiveAndCheck()
}
func (c *Client) close() error {
return c.conn.Close()
}
func (c *Client) killProcess() error {
if err := c.send("k"); err != nil {
return err
}
data, err := c.receive()
if err != nil {
return err
} else if !strings.HasPrefix(data, "X09") {
return fmt.Errorf("unexpected reply: %s", data)
}
// debugserver automatically exits. So don't explicitly detach here.
return nil
}
// ReadRegisters reads the target threadID's registers.
func (c *Client) ReadRegisters(threadID int) (Registers, error) {
data, err := c.readRegisters(threadID)
if err != nil {
return Registers{}, err
}
return c.parseRegisterData(data)
}
func (c *Client) readRegisters(threadID int) (string, error) {
command := fmt.Sprintf("g;thread:%x;", threadID)
if err := c.send(command); err != nil {
return "", err
}
data, err := c.receive()
if err != nil {
return "", err
} else if strings.HasPrefix(data, "E") {
return data, fmt.Errorf("error response: %s", data)
}
return data, nil
}
func (c *Client) parseRegisterData(data string) (Registers, error) {
var regs Registers
for _, metadata := range c.registerMetadataList {
rawValue := data[metadata.offset*2 : (metadata.offset+metadata.size)*2]
var err error
switch metadata.name {
case "rip":
regs.Rip, err = hexToUint64(rawValue, true)
case "rsp":
regs.Rsp, err = hexToUint64(rawValue, true)
case "rcx":
regs.Rcx, err = hexToUint64(rawValue, true)
}
if err != nil {
return Registers{}, err
}
}
return regs, nil
}
// WriteRegisters updates the registers' value.
func (c *Client) WriteRegisters(threadID int, regs Registers) error {
data, err := c.readRegisters(threadID)
if err != nil {
return err
}
// The 'P' command is not used due to the bug explained here: https://github.com/llvm-mirror/lldb/commit/d8d7a40ca5377aa777e3840f3e9b6a63c6b09445
for _, metadata := range c.registerMetadataList {
prefix := data[0 : metadata.offset*2]
suffix := data[(metadata.offset+metadata.size)*2:]
var err error
switch metadata.name {
case "rip":
data = fmt.Sprintf("%s%s%s", prefix, uint64ToHex(regs.Rip, true), suffix)
case "rsp":
data = fmt.Sprintf("%s%s%s", prefix, uint64ToHex(regs.Rsp, true), suffix)
case "rcx":
data = fmt.Sprintf("%s%s%s", prefix, uint64ToHex(regs.Rcx, true), suffix)
}
if err != nil {
return err
}
}
command := fmt.Sprintf("G%s;thread:%x;", data, threadID)
if err := c.send(command); err != nil {
return err
}
return c.receiveAndCheck()
}
// ReadMemory reads the specified memory region.
func (c *Client) ReadMemory(addr uint64, out []byte) error {
command := fmt.Sprintf("m%x,%x", addr, len(out))
if err := c.send(command); err != nil {
return err
}
data, err := c.receive()
if err != nil {
return err
} else if strings.HasPrefix(data, "E") {
return fmt.Errorf("error response: %s", data)
}
byteArrary, err := hexToByteArray(data)
if err != nil {
return err
}
if len(byteArrary) != len(out) {
log.Debugf("The data size read from the memory is smaller than the requested size. actual: %d, expected: %d", len(byteArrary), len(out))
}
copy(out, byteArrary)
return nil
}
// WriteMemory write the data to the specified region
func (c *Client) WriteMemory(addr uint64, data []byte) error {
dataInHex := ""
for _, b := range data {
dataInHex += fmt.Sprintf("%02x", b)
}
command := fmt.Sprintf("M%x,%x:%s", addr, len(data), dataInHex)
if err := c.send(command); err != nil {
return err
}
return c.receiveAndCheck()
}
// ReadTLS reads the offset from the beginning of the TLS block.
func (c *Client) ReadTLS(threadID int, offset int32) (uint64, error) {
if err := c.updateReadTLSFunction(uint32(offset)); err != nil {
return 0, err
}
originalRegs, err := c.ReadRegisters(threadID)
if err != nil {
return 0, err
}
defer func() { err = c.WriteRegisters(threadID, originalRegs) }()
modifiedRegs := originalRegs
modifiedRegs.Rip = c.readTLSFuncAddr
if err = c.WriteRegisters(threadID, modifiedRegs); err != nil {
return 0, err
}
if _, err := c.StepAndWait(threadID); err != nil {
return 0, err
}
modifiedRegs, err = c.ReadRegisters(threadID)
return modifiedRegs.Rcx, err
}
func (c *Client) updateReadTLSFunction(offset uint32) error {
if c.currentTLSOffset == offset {
return nil
}
readTLSFunction := c.buildReadTLSFunction(offset)
if err := c.WriteMemory(c.readTLSFuncAddr, readTLSFunction); err != nil {
return err
}
c.currentTLSOffset = offset
return nil
}
func (c *Client) buildReadTLSFunction(offset uint32) []byte {
offsetBytes := make([]byte, 4)
binary.LittleEndian.PutUint32(offsetBytes, offset)
readTLSFunction := []byte{0x65, 0x48, 0x8b, 0x0c, 0x25} // mac OS X uses gs_base
return append(readTLSFunction, offsetBytes...)
}
// ContinueAndWait resumes processes and waits until an event happens.
// The exited event is reported when the main process exits (and not when its threads exit).
func (c *Client) ContinueAndWait() (Event, error) {
return c.continueAndWait(c.pendingSignal)
}
// StepAndWait executes the one instruction of the specified thread and waits until an event happens.
// The returned event may not be the trapped event.
// If unspecified thread is stopped, UnspecifiedThreadError is returned.
func (c *Client) StepAndWait(threadID int) (Event, error) {
var command string
if c.pendingSignal == 0 {
command = fmt.Sprintf("vCont;s:%x", threadID)
} else {
command = fmt.Sprintf("vCont;S%02x:%x", c.pendingSignal, threadID)
}
if err := c.send(command); err != nil {
return Event{}, fmt.Errorf("send error: %v", err)
}
event, err := c.wait()
if err != nil {
return Event{}, err
} else if event.Type != EventTypeTrapped {
return Event{}, fmt.Errorf("unexpected event: %#v", event)
} else if threadIDs := event.Data.([]int); len(threadIDs) != 1 || threadIDs[0] != threadID {
return Event{}, UnspecifiedThreadError{ThreadIDs: threadIDs}
}
return event, err
}
func (c *Client) continueAndWait(signalNumber int) (Event, error) {
var command string
if signalNumber == 0 {
command = "vCont;c"
} else {
// Though the signal number is specified, it's like the debugserver does not pass the signals like SIGTERM and SIGINT to the debugee.
// QPassSignals can change this setting, but debugserver (900.0.64) doesn't support the query.
command = fmt.Sprintf("vCont;C%02x", signalNumber)
}
if err := c.send(command); err != nil {
return Event{}, fmt.Errorf("send error: %v", err)
}
return c.wait()
}
func (c *Client) wait() (Event, error) {
var data string
var err error
for {
data, err = c.receiveWithTimeout(10 * time.Second)
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
// debugserver sometimes does not send a reply packet even when a thread is stopped.
data, err = c.checkStopReply()
if err != nil {
return Event{}, fmt.Errorf("failed to query stop reply: %v", err)
} else if data != "" {
log.Debugf("debugserver did not reply packets though there is the stopped thread.")
break
}
} else if err != nil {
return Event{}, fmt.Errorf("receive error: %v", err)
}
if data != "" {
break
}
}
stopReplies := c.buildStopReplies(data)
// process O packet beforehand in order to simplify further processing.
stopReplies, err = c.processOutputPacket(stopReplies)
if err != nil {
return Event{}, fmt.Errorf("failed to process output packet: %v", err)
}
if len(stopReplies) == 0 {
return c.wait()
}
return c.handleStopReply(stopReplies)
}
func (c *Client) checkStopReply() (string, error) {
threadIDs, err := c.ThreadIDs()
if err != nil {
return "", err
}
for _, threadID := range threadIDs {
data, err := c.qThreadStopInfo(threadID)
if err != nil {
return "", err
}
if !strings.HasPrefix(data, "T00") {
return data, nil
}
}
return "", nil
}
func (c *Client) | (data string) []string {
replies := strings.Split(data, "$")
for i, reply := range replies {
if reply[len(reply)-3] == '#' {
replies[i] = reply[0 : len(reply)-3]
}
}
return replies
}
func (c *Client) processOutputPacket(stopReplies []string) ([]string, error) {
var unprocessedReplies []string
for _, stopReply := range stopReplies {
if stopReply[0] != 'O' {
unprocessedReplies = append(unprocessedReplies, stopReply)
continue
}
out, err := hexToByteArray(stopReply[1:])
if err != nil {
return nil, err
}
c.outputWriter.Write(out)
}
return unprocessedReplies, nil
}
func (c *Client) handleStopReply(stopReplies []string) (event Event, err error) {
switch stopReplies[0][0] {
case 'T':
if len(stopReplies) > 1 {
log.Debugf("received 2 or more stop replies at once. Consider only first one. data: %v", stopReplies)
}
event, err = c.handleTPacket(stopReplies[0])
case 'W':
// Ignore remaining packets because the process ends.
event, err = c.handleWPacket(stopReplies[0])
case 'X':
// Ignore remaining packets because the process ends.
event, err = c.handleXPacket(stopReplies[0])
default:
err = fmt.Errorf("unknown packet type: %s", stopReplies[0])
}
if err != nil {
log.Debugf("failed to handle the packet (data: %v): %v", stopReplies[0], err)
return Event{}, err
}
if IsExitEvent(event.Type) {
// the connection may be closed already.
_ = c.close()
}
return event, nil
}
func (c *Client) handleTPacket(packet string) (Event, error) {
signalNumber, err := hexToUint64(packet[1:3], false)
if err != nil {
return Event{}, err
}
if syscall.Signal(signalNumber) == excBadAccess {
log.Debugf("bad memory access: %s", packet)
return Event{}, fmt.Errorf("bad memory access")
}
var threadIDs []int
for _, kvInStr := range strings.Split(packet[3:len(packet)-1], ";") {
kvArr := strings.Split(kvInStr, ":")
key, value := kvArr[0], kvArr[1]
if key == "threads" {
for _, threadID := range strings.Split(value, ",") {
threadIDInNum, err := hexToUint64(threadID, false)
if err != nil {
return Event{}, err
}
threadIDs = append(threadIDs, int(threadIDInNum))
}
}
}
trappedThreadIDs, err := c.selectTrappedThreads(threadIDs)
if err != nil {
return Event{}, err
} else if len(trappedThreadIDs) == 0 {
return c.continueAndWait(int(signalNumber))
}
if syscall.Signal(signalNumber) != unix.SIGTRAP {
c.pendingSignal = int(signalNumber)
} else {
c.pendingSignal = 0
}
return Event{Type: EventTypeTrapped, Data: trappedThreadIDs}, nil
}
func (c *Client) selectTrappedThreads(threadIDs []int) ([]int, error) {
var trappedThreads []int
for _, threadID := range threadIDs {
data, err := c.qThreadStopInfo(threadID)
if err != nil {
return nil, err
}
signalNumber, err := hexToUint64(data[1:3], false)
if err != nil {
return nil, err
}
if syscall.Signal(signalNumber) == unix.SIGTRAP {
trappedThreads = append(trappedThreads, threadID)
}
}
return trappedThreads, nil
}
func (c *Client) qThreadStopInfo(threadID int) (string, error) {
command := fmt.Sprintf("qThreadStopInfo%02x", threadID)
if err := c.send(command); err != nil {
return "", err
}
data, err := c.receive()
if err != nil {
return "", err
} else if strings.HasPrefix(data, "E") {
return data, fmt.Errorf("error response: %s", data)
}
return data, nil
}
func (c *Client) handleWPacket(packet string) (Event, error) {
exitStatus, err := hexToUint64(packet[1:3], false)
return Event{Type: EventTypeExited, Data: int(exitStatus)}, err
}
func (c *Client) handleXPacket(packet string) (Event, error) {
signalNumber, err := hexToUint64(packet[1:3], false)
// TODO: signalNumber here looks always 0. The number in the description looks correct, so maybe better to use it instead.
return Event{Type: EventTypeTerminated, Data: int(signalNumber)}, err
}
func (c *Client) send(command string) error {
packet := fmt.Sprintf("$%s#00", command)
if !c.noAckMode {
packet = fmt.Sprintf("$%s#%02x", command, calcChecksum([]byte(command)))
}
if n, err := c.conn.Write([]byte(packet)); err != nil {
return err
} else if n != len(packet) {
return fmt.Errorf("only part of the buffer is sent: %d / %d", n, len(packet))
}
if !c.noAckMode {
return c.receiveAck()
}
return nil
}
func (c *Client) receiveAndCheck() error {
if data, err := c.receive(); err != nil {
return err
} else if data != "OK" {
return fmt.Errorf("the error response is returned: %s", data)
}
return nil
}
func (c *Client) receive() (string, error) {
var rawPacket []byte
for {
n, err := c.conn.Read(c.buffer)
if err != nil {
return "", err
}
rawPacket = append(rawPacket, c.buffer[0:n]...)
if len(rawPacket) < 4 {
// there should be at least 4 bytes
continue
} else if rawPacket[len(rawPacket)-3] == '#' {
// received at least 1 packet.
// TODO: handle multiple packets case
break
}
}
packet := string(rawPacket)
data := string(rawPacket[1 : len(rawPacket)-3])
if !c.noAckMode {
if err := verifyPacket(packet); err != nil {
return "", err
}
return data, c.sendAck()
}
return data, nil
}
func (c *Client) receiveWithTimeout(timeout time.Duration) (string, error) {
c.conn.SetReadDeadline(time.Now().Add(timeout))
defer c.conn.SetReadDeadline(time.Time{})
return c.receive()
}
func (c *Client) sendAck() error {
_, err := c.conn.Write([]byte("+"))
return err
}
func (c *Client) receiveAck() error {
if _, err := c.conn.Read(c.buffer[0:1]); err != nil {
return err
} else if c.buffer[0] != '+' {
return errors.New("failed to receive ack")
}
return nil
}
func verifyPacket(packet string) error {
if packet[0:1] != "$" {
return fmt.Errorf("invalid head data: %v", packet[0])
}
if packet[len(packet)-3:len(packet)-2] != "#" {
return fmt.Errorf("invalid tail data: %v", packet[len(packet)-3])
}
body := packet[1 : len(packet)-3]
bodyChecksum := strconv.FormatUint(uint64(calcChecksum([]byte(body))), 16)
tailChecksum := packet[len(packet)-2:]
if tailChecksum != bodyChecksum {
return fmt.Errorf("invalid checksum: %s", tailChecksum)
}
return nil
}
func hexToUint64(hex string, littleEndian bool) (uint64, error) {
if littleEndian {
var reversedHex bytes.Buffer
for i := len(hex) - 2; i >= 0; i -= 2 {
reversedHex.WriteString(hex[i : i+2])
}
hex = reversedHex.String()
}
return strconv.ParseUint(hex, 16, 64)
}
func hexToByteArray(hex string) ([]byte, error) {
out := make([]byte, len(hex)/2)
for i := 0; i < len(hex); i += 2 {
value, err := strconv.ParseUint(hex[i:i+2], 16, 8)
if err != nil {
return nil, err
}
out[i/2] = uint8(value)
}
return out, nil
}
func uint64ToHex(input uint64, littleEndian bool) string {
hex := fmt.Sprintf("%016x", input)
if littleEndian {
var reversedHex bytes.Buffer
for i := len(hex) - 2; i >= 0; i -= 2 {
reversedHex.WriteString(hex[i : i+2])
}
hex = reversedHex.String()
}
return hex
}
func calcChecksum(buff []byte) uint8 {
var sum uint8
for _, b := range buff {
sum += b
}
return sum
}
var debugServerPathList = []string{
"/Library/Developer/CommandLineTools/Library/PrivateFrameworks/LLDB.framework/Versions/A/Resources/debugserver",
"/Applications/Xcode.app/Contents/SharedFrameworks/LLDB.framework/Resources/debugserver",
}
func debugServerPath() (string, error) {
for _, path := range debugServerPathList {
if _, err := os.Stat(path); !os.IsNotExist(err) {
return path, nil
}
}
return "", fmt.Errorf("debugserver is not found in these paths: %v", debugServerPathList)
}
| buildStopReplies | identifier_name |
client_darwin.go | package debugapi
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"net"
"os"
"os/exec"
"strconv"
"strings"
"syscall"
"time"
"github.com/ks888/tgo/log"
"golang.org/x/sys/unix"
)
// Assumes the packet size is not larger than this.
const (
maxPacketSize = 4096
excBadAccess = syscall.Signal(0x91) // EXC_BAD_ACCESS
)
// Client is the debug api client which depends on lldb's debugserver.
// See the gdb's doc for the reference: https://sourceware.org/gdb/onlinedocs/gdb/Remote-Protocol.html
// Some commands use the lldb extension: https://github.com/llvm-mirror/lldb/blob/master/docs/lldb-gdb-remote.txt
type Client struct {
conn net.Conn
pid int
killOnDetach bool
noAckMode bool
registerMetadataList []registerMetadata
buffer []byte
// outputWriter is the writer to which the output of the debugee process will be written.
outputWriter io.Writer
readTLSFuncAddr uint64
currentTLSOffset uint32
pendingSignal int
}
// NewClient returns the new debug api client which depends on OS API.
func NewClient() *Client {
return &Client{buffer: make([]byte, maxPacketSize), outputWriter: os.Stdout}
}
// LaunchProcess lets the debugserver launch the new prcoess.
func (c *Client) LaunchProcess(name string, arg ...string) error {
listener, err := net.Listen("tcp", "localhost:")
if err != nil {
return err
}
path, err := debugServerPath()
if err != nil {
return err
}
debugServerArgs := []string{"-F", "-R", listener.Addr().String(), "--", name}
debugServerArgs = append(debugServerArgs, arg...)
cmd := exec.Command(path, debugServerArgs...)
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} // Otherwise, the signal sent to all the group members.
if err := cmd.Start(); err != nil {
return err
}
c.conn, err = c.waitConnectOrExit(listener, cmd)
if err != nil {
return err
}
c.pid = cmd.Process.Pid
c.killOnDetach = true
return c.initialize()
}
func (c *Client) waitConnectOrExit(listener net.Listener, cmd *exec.Cmd) (net.Conn, error) {
waitCh := make(chan error)
go func(ch chan error) {
ch <- cmd.Wait()
}(waitCh)
connCh := make(chan net.Conn)
go func(ch chan net.Conn) {
conn, err := listener.Accept()
if err != nil {
connCh <- nil
}
connCh <- conn
}(connCh)
select {
case <-waitCh:
return nil, errors.New("the command exits immediately")
case conn := <-connCh:
if conn == nil {
return nil, errors.New("failed to accept the connection")
}
return conn, nil
}
}
func (c *Client) initialize() error {
if err := c.setNoAckMode(); err != nil {
return err
}
if err := c.qSupported(); err != nil {
return err
}
if err := c.qThreadSuffixSupported(); err != nil {
return err
}
var err error
c.registerMetadataList, err = c.collectRegisterMetadata()
if err != nil {
return err
}
if err := c.qListThreadsInStopReply(); err != nil {
return err
}
readTLSFunction := c.buildReadTLSFunction(0) // need the function length here. So the offset doesn't matter.
c.readTLSFuncAddr, err = c.allocateMemory(len(readTLSFunction))
return err
}
func (c *Client) setNoAckMode() error {
const command = "QStartNoAckMode"
if err := c.send(command); err != nil {
return err
}
if err := c.receiveAndCheck(); err != nil {
return err
}
c.noAckMode = true
return nil
}
func (c *Client) qSupported() error {
var supportedFeatures = []string{"swbreak+", "hwbreak+", "no-resumed+"}
command := fmt.Sprintf("qSupported:%s", strings.Join(supportedFeatures, ";"))
if err := c.send(command); err != nil {
return err
}
// TODO: adjust the buffer size so that it doesn't exceed the PacketSize in the response.
_, err := c.receive()
return err
}
func (c *Client) qThreadSuffixSupported() error {
const command = "QThreadSuffixSupported"
if err := c.send(command); err != nil {
return err
}
return c.receiveAndCheck()
}
var errEndOfList = errors.New("the end of list")
type registerMetadata struct {
name string
id, offset, size int
}
func (c *Client) collectRegisterMetadata() ([]registerMetadata, error) {
var regs []registerMetadata
for i := 0; ; i++ {
reg, err := c.qRegisterInfo(i)
if err != nil {
if err == errEndOfList {
break
}
return nil, err
}
regs = append(regs, reg)
}
return regs, nil
}
func (c *Client) qRegisterInfo(registerID int) (registerMetadata, error) {
command := fmt.Sprintf("qRegisterInfo%x", registerID)
if err := c.send(command); err != nil {
return registerMetadata{}, err
}
data, err := c.receive()
if err != nil {
return registerMetadata{}, err
}
if strings.HasPrefix(data, "E") {
if data == "E45" {
return registerMetadata{}, errEndOfList
}
return registerMetadata{}, fmt.Errorf("error response: %s", data)
}
return c.parseRegisterMetaData(registerID, data)
}
func (c *Client) parseRegisterMetaData(registerID int, data string) (registerMetadata, error) {
reg := registerMetadata{id: registerID}
for _, chunk := range strings.Split(data, ";") {
keyValue := strings.SplitN(chunk, ":", 2)
if len(keyValue) < 2 {
continue
}
key, value := keyValue[0], keyValue[1]
if key == "name" {
reg.name = value
} else if key == "bitsize" {
num, err := strconv.Atoi(value)
if err != nil {
return registerMetadata{}, err
}
reg.size = num / 8
} else if key == "offset" {
num, err := strconv.Atoi(value)
if err != nil {
return registerMetadata{}, err
}
reg.offset = num
}
}
return reg, nil
}
func (c *Client) qListThreadsInStopReply() error {
const command = "QListThreadsInStopReply"
if err := c.send(command); err != nil {
return err
}
return c.receiveAndCheck()
}
func (c *Client) allocateMemory(size int) (uint64, error) {
command := fmt.Sprintf("_M%x,rwx", size)
if err := c.send(command); err != nil {
return 0, err
}
data, err := c.receive()
if err != nil {
return 0, err
} else if data == "" || strings.HasPrefix(data, "E") {
return 0, fmt.Errorf("error response: %s", data)
}
return hexToUint64(data, false)
}
func (c *Client) deallocateMemory(addr uint64) error {
command := fmt.Sprintf("_m%x", addr)
if err := c.send(command); err != nil {
return err
}
return c.receiveAndCheck()
}
// ThreadIDs returns all the thread ids.
func (c *Client) ThreadIDs() ([]int, error) {
rawThreadIDs, err := c.qfThreadInfo()
if err != nil {
return nil, err
}
// TODO: call qsThreadInfo
var threadIDs []int
for _, rawThreadID := range strings.Split(rawThreadIDs, ",") {
threadID, err := hexToUint64(rawThreadID, false)
if err != nil {
return nil, err
}
threadIDs = append(threadIDs, int(threadID))
}
return threadIDs, nil
}
func (c *Client) qfThreadInfo() (string, error) {
const command = "qfThreadInfo"
if err := c.send(command); err != nil {
return "", err
}
data, err := c.receive()
if err != nil {
return "", err
} else if !strings.HasPrefix(data, "m") {
return "", fmt.Errorf("unexpected response: %s", data)
}
return data[1:], nil
}
// AttachProcess lets the debugserver attach the new prcoess.
func (c *Client) AttachProcess(pid int) error {
listener, err := net.Listen("tcp", "localhost:")
if err != nil {
return err
}
path, err := debugServerPath()
if err != nil {
return err
}
debugServerArgs := []string{"-F", "-R", listener.Addr().String(), fmt.Sprintf("--attach=%d", pid)}
cmd := exec.Command(path, debugServerArgs...)
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} // Otherwise, the signal sent to all the group members.
if err := cmd.Start(); err != nil {
return err
}
c.conn, err = c.waitConnectOrExit(listener, cmd)
if err != nil {
return err
}
c.pid = cmd.Process.Pid
return c.initialize()
}
// DetachProcess detaches from the prcoess.
func (c *Client) DetachProcess() error {
defer c.close()
if c.killOnDetach {
return c.killProcess()
}
if err := c.send("D"); err != nil {
return err
}
return c.receiveAndCheck()
}
func (c *Client) close() error {
return c.conn.Close()
}
func (c *Client) killProcess() error {
if err := c.send("k"); err != nil {
return err
}
data, err := c.receive()
if err != nil {
return err
} else if !strings.HasPrefix(data, "X09") {
return fmt.Errorf("unexpected reply: %s", data)
}
// debugserver automatically exits. So don't explicitly detach here.
return nil
}
// ReadRegisters reads the target threadID's registers.
func (c *Client) ReadRegisters(threadID int) (Registers, error) {
data, err := c.readRegisters(threadID)
if err != nil {
return Registers{}, err
}
return c.parseRegisterData(data)
}
func (c *Client) readRegisters(threadID int) (string, error) {
command := fmt.Sprintf("g;thread:%x;", threadID)
if err := c.send(command); err != nil {
return "", err
}
data, err := c.receive()
if err != nil {
return "", err
} else if strings.HasPrefix(data, "E") {
return data, fmt.Errorf("error response: %s", data)
}
return data, nil
}
func (c *Client) parseRegisterData(data string) (Registers, error) {
var regs Registers
for _, metadata := range c.registerMetadataList {
rawValue := data[metadata.offset*2 : (metadata.offset+metadata.size)*2]
var err error
switch metadata.name {
case "rip":
regs.Rip, err = hexToUint64(rawValue, true)
case "rsp":
regs.Rsp, err = hexToUint64(rawValue, true)
case "rcx":
regs.Rcx, err = hexToUint64(rawValue, true)
}
if err != nil {
return Registers{}, err
}
}
return regs, nil
}
// WriteRegisters updates the registers' value.
func (c *Client) WriteRegisters(threadID int, regs Registers) error {
data, err := c.readRegisters(threadID)
if err != nil {
return err
}
// The 'P' command is not used due to the bug explained here: https://github.com/llvm-mirror/lldb/commit/d8d7a40ca5377aa777e3840f3e9b6a63c6b09445
for _, metadata := range c.registerMetadataList {
prefix := data[0 : metadata.offset*2]
suffix := data[(metadata.offset+metadata.size)*2:]
var err error
switch metadata.name {
case "rip":
data = fmt.Sprintf("%s%s%s", prefix, uint64ToHex(regs.Rip, true), suffix)
case "rsp":
data = fmt.Sprintf("%s%s%s", prefix, uint64ToHex(regs.Rsp, true), suffix)
case "rcx":
data = fmt.Sprintf("%s%s%s", prefix, uint64ToHex(regs.Rcx, true), suffix)
}
if err != nil {
return err
}
}
command := fmt.Sprintf("G%s;thread:%x;", data, threadID)
if err := c.send(command); err != nil {
return err
}
return c.receiveAndCheck()
}
// ReadMemory reads the specified memory region.
func (c *Client) ReadMemory(addr uint64, out []byte) error {
command := fmt.Sprintf("m%x,%x", addr, len(out))
if err := c.send(command); err != nil {
return err
}
data, err := c.receive()
if err != nil {
return err
} else if strings.HasPrefix(data, "E") {
return fmt.Errorf("error response: %s", data)
}
byteArrary, err := hexToByteArray(data)
if err != nil {
return err
}
if len(byteArrary) != len(out) {
log.Debugf("The data size read from the memory is smaller than the requested size. actual: %d, expected: %d", len(byteArrary), len(out))
}
copy(out, byteArrary)
return nil
}
// WriteMemory write the data to the specified region
func (c *Client) WriteMemory(addr uint64, data []byte) error {
dataInHex := ""
for _, b := range data {
dataInHex += fmt.Sprintf("%02x", b)
}
command := fmt.Sprintf("M%x,%x:%s", addr, len(data), dataInHex)
if err := c.send(command); err != nil {
return err
}
return c.receiveAndCheck()
}
// ReadTLS reads the offset from the beginning of the TLS block.
func (c *Client) ReadTLS(threadID int, offset int32) (uint64, error) {
if err := c.updateReadTLSFunction(uint32(offset)); err != nil {
return 0, err
}
originalRegs, err := c.ReadRegisters(threadID)
if err != nil {
return 0, err
}
defer func() { err = c.WriteRegisters(threadID, originalRegs) }()
modifiedRegs := originalRegs
modifiedRegs.Rip = c.readTLSFuncAddr
if err = c.WriteRegisters(threadID, modifiedRegs); err != nil {
return 0, err
}
if _, err := c.StepAndWait(threadID); err != nil {
return 0, err
}
modifiedRegs, err = c.ReadRegisters(threadID)
return modifiedRegs.Rcx, err
}
func (c *Client) updateReadTLSFunction(offset uint32) error {
if c.currentTLSOffset == offset {
return nil
}
readTLSFunction := c.buildReadTLSFunction(offset)
if err := c.WriteMemory(c.readTLSFuncAddr, readTLSFunction); err != nil {
return err
}
c.currentTLSOffset = offset
return nil
}
func (c *Client) buildReadTLSFunction(offset uint32) []byte {
offsetBytes := make([]byte, 4)
binary.LittleEndian.PutUint32(offsetBytes, offset)
readTLSFunction := []byte{0x65, 0x48, 0x8b, 0x0c, 0x25} // mac OS X uses gs_base
return append(readTLSFunction, offsetBytes...)
}
// ContinueAndWait resumes processes and waits until an event happens.
// The exited event is reported when the main process exits (and not when its threads exit).
func (c *Client) ContinueAndWait() (Event, error) {
return c.continueAndWait(c.pendingSignal)
}
// StepAndWait executes the one instruction of the specified thread and waits until an event happens.
// The returned event may not be the trapped event.
// If unspecified thread is stopped, UnspecifiedThreadError is returned.
func (c *Client) StepAndWait(threadID int) (Event, error) {
var command string
if c.pendingSignal == 0 {
command = fmt.Sprintf("vCont;s:%x", threadID)
} else {
command = fmt.Sprintf("vCont;S%02x:%x", c.pendingSignal, threadID)
}
if err := c.send(command); err != nil {
return Event{}, fmt.Errorf("send error: %v", err)
}
event, err := c.wait()
if err != nil {
return Event{}, err
} else if event.Type != EventTypeTrapped {
return Event{}, fmt.Errorf("unexpected event: %#v", event)
} else if threadIDs := event.Data.([]int); len(threadIDs) != 1 || threadIDs[0] != threadID {
return Event{}, UnspecifiedThreadError{ThreadIDs: threadIDs}
}
return event, err
}
func (c *Client) continueAndWait(signalNumber int) (Event, error) {
var command string
if signalNumber == 0 {
command = "vCont;c"
} else {
// Though the signal number is specified, it's like the debugserver does not pass the signals like SIGTERM and SIGINT to the debugee.
// QPassSignals can change this setting, but debugserver (900.0.64) doesn't support the query.
command = fmt.Sprintf("vCont;C%02x", signalNumber)
}
if err := c.send(command); err != nil {
return Event{}, fmt.Errorf("send error: %v", err)
}
return c.wait()
}
func (c *Client) wait() (Event, error) {
var data string
var err error
for {
data, err = c.receiveWithTimeout(10 * time.Second)
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
// debugserver sometimes does not send a reply packet even when a thread is stopped.
data, err = c.checkStopReply()
if err != nil {
return Event{}, fmt.Errorf("failed to query stop reply: %v", err)
} else if data != "" {
log.Debugf("debugserver did not reply packets though there is the stopped thread.")
break
}
} else if err != nil {
return Event{}, fmt.Errorf("receive error: %v", err)
}
if data != "" {
break
}
}
stopReplies := c.buildStopReplies(data)
// process O packet beforehand in order to simplify further processing.
stopReplies, err = c.processOutputPacket(stopReplies)
if err != nil {
return Event{}, fmt.Errorf("failed to process output packet: %v", err)
}
if len(stopReplies) == 0 {
return c.wait()
}
return c.handleStopReply(stopReplies)
}
func (c *Client) checkStopReply() (string, error) {
threadIDs, err := c.ThreadIDs()
if err != nil {
return "", err
}
for _, threadID := range threadIDs {
data, err := c.qThreadStopInfo(threadID)
if err != nil {
return "", err
}
if !strings.HasPrefix(data, "T00") {
return data, nil
}
}
return "", nil
}
func (c *Client) buildStopReplies(data string) []string {
replies := strings.Split(data, "$")
for i, reply := range replies {
if reply[len(reply)-3] == '#' {
replies[i] = reply[0 : len(reply)-3]
}
}
return replies
}
func (c *Client) processOutputPacket(stopReplies []string) ([]string, error) {
var unprocessedReplies []string
for _, stopReply := range stopReplies {
if stopReply[0] != 'O' {
unprocessedReplies = append(unprocessedReplies, stopReply)
continue
}
out, err := hexToByteArray(stopReply[1:])
if err != nil {
return nil, err
}
c.outputWriter.Write(out)
}
return unprocessedReplies, nil
}
func (c *Client) handleStopReply(stopReplies []string) (event Event, err error) {
switch stopReplies[0][0] {
case 'T':
if len(stopReplies) > 1 {
log.Debugf("received 2 or more stop replies at once. Consider only first one. data: %v", stopReplies)
}
event, err = c.handleTPacket(stopReplies[0])
case 'W':
// Ignore remaining packets because the process ends.
event, err = c.handleWPacket(stopReplies[0])
case 'X':
// Ignore remaining packets because the process ends.
event, err = c.handleXPacket(stopReplies[0])
default:
err = fmt.Errorf("unknown packet type: %s", stopReplies[0])
}
if err != nil {
log.Debugf("failed to handle the packet (data: %v): %v", stopReplies[0], err)
return Event{}, err
}
if IsExitEvent(event.Type) {
// the connection may be closed already.
_ = c.close()
}
return event, nil
}
func (c *Client) handleTPacket(packet string) (Event, error) {
signalNumber, err := hexToUint64(packet[1:3], false)
if err != nil {
return Event{}, err
}
if syscall.Signal(signalNumber) == excBadAccess {
log.Debugf("bad memory access: %s", packet)
return Event{}, fmt.Errorf("bad memory access")
}
var threadIDs []int
for _, kvInStr := range strings.Split(packet[3:len(packet)-1], ";") {
kvArr := strings.Split(kvInStr, ":")
key, value := kvArr[0], kvArr[1]
if key == "threads" {
for _, threadID := range strings.Split(value, ",") {
threadIDInNum, err := hexToUint64(threadID, false)
if err != nil {
return Event{}, err
}
threadIDs = append(threadIDs, int(threadIDInNum))
}
}
}
trappedThreadIDs, err := c.selectTrappedThreads(threadIDs)
if err != nil {
return Event{}, err
} else if len(trappedThreadIDs) == 0 {
return c.continueAndWait(int(signalNumber))
}
if syscall.Signal(signalNumber) != unix.SIGTRAP {
c.pendingSignal = int(signalNumber)
} else {
c.pendingSignal = 0
}
return Event{Type: EventTypeTrapped, Data: trappedThreadIDs}, nil
}
func (c *Client) selectTrappedThreads(threadIDs []int) ([]int, error) {
var trappedThreads []int
for _, threadID := range threadIDs {
data, err := c.qThreadStopInfo(threadID)
if err != nil {
return nil, err
}
signalNumber, err := hexToUint64(data[1:3], false)
if err != nil {
return nil, err
}
if syscall.Signal(signalNumber) == unix.SIGTRAP {
trappedThreads = append(trappedThreads, threadID)
}
}
return trappedThreads, nil
}
func (c *Client) qThreadStopInfo(threadID int) (string, error) |
func (c *Client) handleWPacket(packet string) (Event, error) {
exitStatus, err := hexToUint64(packet[1:3], false)
return Event{Type: EventTypeExited, Data: int(exitStatus)}, err
}
func (c *Client) handleXPacket(packet string) (Event, error) {
signalNumber, err := hexToUint64(packet[1:3], false)
// TODO: signalNumber here looks always 0. The number in the description looks correct, so maybe better to use it instead.
return Event{Type: EventTypeTerminated, Data: int(signalNumber)}, err
}
func (c *Client) send(command string) error {
packet := fmt.Sprintf("$%s#00", command)
if !c.noAckMode {
packet = fmt.Sprintf("$%s#%02x", command, calcChecksum([]byte(command)))
}
if n, err := c.conn.Write([]byte(packet)); err != nil {
return err
} else if n != len(packet) {
return fmt.Errorf("only part of the buffer is sent: %d / %d", n, len(packet))
}
if !c.noAckMode {
return c.receiveAck()
}
return nil
}
func (c *Client) receiveAndCheck() error {
if data, err := c.receive(); err != nil {
return err
} else if data != "OK" {
return fmt.Errorf("the error response is returned: %s", data)
}
return nil
}
func (c *Client) receive() (string, error) {
var rawPacket []byte
for {
n, err := c.conn.Read(c.buffer)
if err != nil {
return "", err
}
rawPacket = append(rawPacket, c.buffer[0:n]...)
if len(rawPacket) < 4 {
// there should be at least 4 bytes
continue
} else if rawPacket[len(rawPacket)-3] == '#' {
// received at least 1 packet.
// TODO: handle multiple packets case
break
}
}
packet := string(rawPacket)
data := string(rawPacket[1 : len(rawPacket)-3])
if !c.noAckMode {
if err := verifyPacket(packet); err != nil {
return "", err
}
return data, c.sendAck()
}
return data, nil
}
func (c *Client) receiveWithTimeout(timeout time.Duration) (string, error) {
c.conn.SetReadDeadline(time.Now().Add(timeout))
defer c.conn.SetReadDeadline(time.Time{})
return c.receive()
}
func (c *Client) sendAck() error {
_, err := c.conn.Write([]byte("+"))
return err
}
func (c *Client) receiveAck() error {
if _, err := c.conn.Read(c.buffer[0:1]); err != nil {
return err
} else if c.buffer[0] != '+' {
return errors.New("failed to receive ack")
}
return nil
}
func verifyPacket(packet string) error {
if packet[0:1] != "$" {
return fmt.Errorf("invalid head data: %v", packet[0])
}
if packet[len(packet)-3:len(packet)-2] != "#" {
return fmt.Errorf("invalid tail data: %v", packet[len(packet)-3])
}
body := packet[1 : len(packet)-3]
bodyChecksum := strconv.FormatUint(uint64(calcChecksum([]byte(body))), 16)
tailChecksum := packet[len(packet)-2:]
if tailChecksum != bodyChecksum {
return fmt.Errorf("invalid checksum: %s", tailChecksum)
}
return nil
}
func hexToUint64(hex string, littleEndian bool) (uint64, error) {
if littleEndian {
var reversedHex bytes.Buffer
for i := len(hex) - 2; i >= 0; i -= 2 {
reversedHex.WriteString(hex[i : i+2])
}
hex = reversedHex.String()
}
return strconv.ParseUint(hex, 16, 64)
}
func hexToByteArray(hex string) ([]byte, error) {
out := make([]byte, len(hex)/2)
for i := 0; i < len(hex); i += 2 {
value, err := strconv.ParseUint(hex[i:i+2], 16, 8)
if err != nil {
return nil, err
}
out[i/2] = uint8(value)
}
return out, nil
}
func uint64ToHex(input uint64, littleEndian bool) string {
hex := fmt.Sprintf("%016x", input)
if littleEndian {
var reversedHex bytes.Buffer
for i := len(hex) - 2; i >= 0; i -= 2 {
reversedHex.WriteString(hex[i : i+2])
}
hex = reversedHex.String()
}
return hex
}
func calcChecksum(buff []byte) uint8 {
var sum uint8
for _, b := range buff {
sum += b
}
return sum
}
var debugServerPathList = []string{
"/Library/Developer/CommandLineTools/Library/PrivateFrameworks/LLDB.framework/Versions/A/Resources/debugserver",
"/Applications/Xcode.app/Contents/SharedFrameworks/LLDB.framework/Resources/debugserver",
}
func debugServerPath() (string, error) {
for _, path := range debugServerPathList {
if _, err := os.Stat(path); !os.IsNotExist(err) {
return path, nil
}
}
return "", fmt.Errorf("debugserver is not found in these paths: %v", debugServerPathList)
}
| {
command := fmt.Sprintf("qThreadStopInfo%02x", threadID)
if err := c.send(command); err != nil {
return "", err
}
data, err := c.receive()
if err != nil {
return "", err
} else if strings.HasPrefix(data, "E") {
return data, fmt.Errorf("error response: %s", data)
}
return data, nil
} | identifier_body |
router.go | package atreugo
import (
"net/http"
"sort"
"strings"
fastrouter "github.com/fasthttp/router"
gstrings "github.com/savsgio/gotils/strings"
"github.com/valyala/fasthttp"
"github.com/valyala/fasthttp/fasthttpadaptor"
)
func defaultErrorView(ctx *RequestCtx, err error, statusCode int) {
ctx.Error(err.Error(), statusCode)
}
func emptyView(_ *RequestCtx) error {
return nil
}
func buildOptionsView(url string, fn View, paths map[string][]string) View {
allow := make([]string, 0)
for method, urls := range paths {
if method == fasthttp.MethodOptions || !gstrings.Include(urls, url) {
continue
}
allow = append(allow, method)
}
if len(allow) == 0 {
allow = append(allow, fasthttp.MethodOptions)
}
sort.Strings(allow)
allowValue := strings.Join(allow, ", ")
return func(ctx *RequestCtx) error {
ctx.Response.Header.Set(fasthttp.HeaderAllow, allowValue)
return fn(ctx)
}
}
func newRouter(cfg Config) *Router {
router := fastrouter.New()
router.HandleOPTIONS = false
return &Router{
router: router,
errorView: cfg.ErrorView,
handleOPTIONS: true,
}
}
func (r *Router) mutable(v bool) {
if v != r.routerMutable {
r.routerMutable = v
r.router.Mutable(v)
}
}
func (r *Router) buildMiddlewares(m Middlewares) Middlewares {
m2 := Middlewares{}
m2.Before = append(m2.Before, r.middlewares.Before...)
m2.Before = append(m2.Before, m.Before...)
m2.After = append(m2.After, m.After...)
m2.After = append(m2.After, r.middlewares.After...)
m2.Skip = append(m2.Skip, m.Skip...)
m2.Skip = append(m2.Skip, r.middlewares.Skip...)
m2.Final = append(m2.Final, m.Final...)
m2.Final = append(m2.Final, r.middlewares.Final...)
if r.parent != nil {
return r.parent.buildMiddlewares(m2)
}
m2.Before = appendMiddlewares(m2.Before[:0], m2.Before, m2.Skip...)
m2.After = appendMiddlewares(m2.After[:0], m2.After, m2.Skip...)
return m2
}
func (r *Router) getGroupFullPath(path string) string {
if r.parent != nil |
return path
}
func (r *Router) handler(fn View, middle Middlewares) fasthttp.RequestHandler {
middle = r.buildMiddlewares(middle)
chain := make([]Middleware, 0)
chain = append(chain, middle.Before...)
chain = append(chain, func(ctx *RequestCtx) error {
if !ctx.skipView {
if err := fn(ctx); err != nil {
return err
}
}
return ctx.Next()
})
chain = append(chain, middle.After...)
chainLen := len(chain)
return func(ctx *fasthttp.RequestCtx) {
actx := AcquireRequestCtx(ctx)
for i := 0; i < chainLen; i++ {
if err := chain[i](actx); err != nil {
r.handleMiddlewareError(actx, err)
break
} else if !actx.next {
break
}
actx.next = false
}
for _, final := range middle.Final {
final(actx)
}
ReleaseRequestCtx(actx)
}
}
func (r *Router) handleMiddlewareError(ctx *RequestCtx, err error) {
statusCode := ctx.Response.Header.StatusCode()
if statusCode == fasthttp.StatusOK {
statusCode = fasthttp.StatusInternalServerError
}
r.errorView(ctx, err, statusCode)
}
func (r *Router) handlePath(p *Path) {
isOPTIONS := p.method == fasthttp.MethodOptions
switch {
case p.registered:
r.mutable(true)
case isOPTIONS:
mutable := !gstrings.Include(r.customOPTIONS, p.fullURL)
r.mutable(mutable)
case r.routerMutable:
r.mutable(false)
}
view := p.view
if isOPTIONS {
view = buildOptionsView(p.fullURL, view, r.ListPaths())
r.customOPTIONS = gstrings.UniqueAppend(r.customOPTIONS, p.fullURL)
}
handler := r.handler(view, p.middlewares)
if p.withTimeout {
handler = fasthttp.TimeoutWithCodeHandler(handler, p.timeout, p.timeoutMsg, p.timeoutCode)
}
handleFunc := r.router.Handle
if r.group != nil {
handleFunc = r.group.Handle
}
handleFunc(p.method, p.url, handler)
if r.handleOPTIONS && !p.registered && !isOPTIONS {
view = buildOptionsView(p.fullURL, emptyView, r.ListPaths())
handler = r.handler(view, p.middlewares)
r.mutable(true)
handleFunc(fasthttp.MethodOptions, p.url, handler)
}
}
// NewGroupPath returns a new router to group paths.
func (r *Router) NewGroupPath(path string) *Router {
groupFunc := r.router.Group
if r.group != nil {
groupFunc = r.group.Group
}
return &Router{
parent: r,
router: r.router,
routerMutable: r.routerMutable,
errorView: r.errorView,
prefix: path,
group: groupFunc(path),
handleOPTIONS: r.handleOPTIONS,
}
}
// ListPaths returns all registered routes grouped by method.
func (r *Router) ListPaths() map[string][]string {
return r.router.List()
}
// Middlewares defines the middlewares (before, after and skip) in the order in which you want to execute them
// for the view or group
//
// WARNING: The previous middlewares configuration could be overridden.
func (r *Router) Middlewares(middlewares Middlewares) *Router {
r.middlewares = middlewares
return r
}
// UseBefore registers the middlewares in the order in which you want to execute them
// before the execution of the view or group.
func (r *Router) UseBefore(fns ...Middleware) *Router {
r.middlewares.Before = append(r.middlewares.Before, fns...)
return r
}
// UseAfter registers the middlewares in the order in which you want to execute them
// after the execution of the view or group.
func (r *Router) UseAfter(fns ...Middleware) *Router {
r.middlewares.After = append(r.middlewares.After, fns...)
return r
}
// UseFinal registers the given middlewares to be executed in the order in which they are added,
// after the view or group has been executed. These middlewares will always be executed,
// even if a previous middleware or the view/group returned a response.
func (r *Router) UseFinal(fns ...FinalMiddleware) *Router {
r.middlewares.Final = append(r.middlewares.Final, fns...)
return r
}
// SkipMiddlewares registers the middlewares that you want to skip when executing the view or group.
func (r *Router) SkipMiddlewares(fns ...Middleware) *Router {
r.middlewares.Skip = append(r.middlewares.Skip, fns...)
return r
}
// GET shortcut for router.Path("GET", url, viewFn).
func (r *Router) GET(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodGet, url, viewFn)
}
// HEAD shortcut for router.Path("HEAD", url, viewFn).
func (r *Router) HEAD(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodHead, url, viewFn)
}
// OPTIONS shortcut for router.Path("OPTIONS", url, viewFn).
func (r *Router) OPTIONS(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodOptions, url, viewFn)
}
// POST shortcut for router.Path("POST", url, viewFn).
func (r *Router) POST(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodPost, url, viewFn)
}
// PUT shortcut for router.Path("PUT", url, viewFn).
func (r *Router) PUT(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodPut, url, viewFn)
}
// PATCH shortcut for router.Path("PATCH", url, viewFn).
func (r *Router) PATCH(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodPatch, url, viewFn)
}
// DELETE shortcut for router.Path("DELETE", url, viewFn).
func (r *Router) DELETE(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodDelete, url, viewFn)
}
// ANY shortcut for router.Path("*", url, viewFn)
//
// WARNING: Use only for routes where the request method is not important.
func (r *Router) ANY(url string, viewFn View) *Path {
return r.Path(fastrouter.MethodWild, url, viewFn)
}
// RequestHandlerPath wraps fasthttp request handler to atreugo view and registers it to
// the given path and method.
func (r *Router) RequestHandlerPath(method, url string, handler fasthttp.RequestHandler) *Path {
viewFn := func(ctx *RequestCtx) error {
handler(ctx.RequestCtx)
return nil
}
return r.Path(method, url, viewFn)
}
// NetHTTPPath wraps net/http handler to atreugo view and registers it to
// the given path and method.
//
// While this function may be used for easy switching from net/http to fasthttp/atreugo,
// it has the following drawbacks comparing to using manually written fasthttp/atreugo,
// request handler:
//
// - A lot of useful functionality provided by fasthttp/atreugo is missing
// from net/http handler.
// - net/http -> fasthttp/atreugo handler conversion has some overhead,
// so the returned handler will be always slower than manually written
// fasthttp/atreugo handler.
//
// So it is advisable using this function only for quick net/http -> fasthttp
// switching. Then manually convert net/http handlers to fasthttp handlers.
// according to https://github.com/valyala/fasthttp#switching-from-nethttp-to-fasthttp.
func (r *Router) NetHTTPPath(method, url string, handler http.Handler) *Path {
h := fasthttpadaptor.NewFastHTTPHandler(handler)
return r.RequestHandlerPath(method, url, h)
}
// Static serves static files from the given file system root
//
// Make sure your program has enough 'max open files' limit aka
// 'ulimit -n' if root folder contains many files.
func (r *Router) Static(url, rootPath string) *Path {
return r.StaticCustom(url, &StaticFS{
Root: rootPath,
IndexNames: []string{"index.html"},
GenerateIndexPages: true,
AcceptByteRange: true,
})
}
// StaticCustom serves static files from the given file system settings
//
// Make sure your program has enough 'max open files' limit aka
// 'ulimit -n' if root folder contains many files.
func (r *Router) StaticCustom(url string, fs *StaticFS) *Path {
url = strings.TrimSuffix(url, "/")
ffs := &fasthttp.FS{
Root: fs.Root,
AllowEmptyRoot: fs.AllowEmptyRoot,
IndexNames: fs.IndexNames,
GenerateIndexPages: fs.GenerateIndexPages,
Compress: fs.Compress,
CompressBrotli: fs.CompressBrotli,
CompressRoot: fs.CompressRoot,
AcceptByteRange: fs.AcceptByteRange,
CacheDuration: fs.CacheDuration,
CompressedFileSuffix: fs.CompressedFileSuffix,
CompressedFileSuffixes: fs.CompressedFileSuffixes,
CleanStop: fs.CleanStop,
}
if fs.PathRewrite != nil {
ffs.PathRewrite = func(ctx *fasthttp.RequestCtx) []byte {
actx := AcquireRequestCtx(ctx)
result := fs.PathRewrite(actx)
ReleaseRequestCtx(actx)
return result
}
}
if fs.PathNotFound != nil {
ffs.PathNotFound = viewToHandler(fs.PathNotFound, r.errorView)
}
stripSlashes := strings.Count(r.getGroupFullPath(url), "/")
if ffs.PathRewrite == nil && stripSlashes > 0 {
ffs.PathRewrite = fasthttp.NewPathSlashesStripper(stripSlashes)
}
return r.RequestHandlerPath(fasthttp.MethodGet, url+"/{filepath:*}", ffs.NewRequestHandler())
}
// ServeFile returns HTTP response containing compressed file contents
// from the given path
//
// HTTP response may contain uncompressed file contents in the following cases:
//
// - Missing 'Accept-Encoding: gzip' request header.
// - No write access to directory containing the file.
//
// Directory contents is returned if path points to directory.
func (r *Router) ServeFile(url, filePath string) *Path {
viewFn := func(ctx *RequestCtx) error {
fasthttp.ServeFile(ctx.RequestCtx, filePath)
return nil
}
return r.Path(fasthttp.MethodGet, url, viewFn)
}
// Path registers a new view with the given path and method
//
// This function is intended for bulk loading and to allow the usage of less
// frequently used, non-standardized or custom methods (e.g. for internal
// communication with a proxy).
func (r *Router) Path(method, url string, viewFn View) *Path {
if method != strings.ToUpper(method) {
panicf("http method '%s' must be in uppercase", method)
}
p := &Path{
router: r,
method: method,
url: url,
fullURL: r.getGroupFullPath(url),
view: viewFn,
}
r.handlePath(p)
p.registered = true
return p
}
| {
path = r.parent.getGroupFullPath(r.prefix + path)
} | conditional_block |
router.go | package atreugo
import (
"net/http"
"sort"
"strings"
fastrouter "github.com/fasthttp/router"
gstrings "github.com/savsgio/gotils/strings"
"github.com/valyala/fasthttp"
"github.com/valyala/fasthttp/fasthttpadaptor"
)
func defaultErrorView(ctx *RequestCtx, err error, statusCode int) {
ctx.Error(err.Error(), statusCode)
}
func emptyView(_ *RequestCtx) error {
return nil
}
func buildOptionsView(url string, fn View, paths map[string][]string) View {
allow := make([]string, 0)
for method, urls := range paths {
if method == fasthttp.MethodOptions || !gstrings.Include(urls, url) {
continue
}
allow = append(allow, method)
}
if len(allow) == 0 {
allow = append(allow, fasthttp.MethodOptions)
}
sort.Strings(allow)
allowValue := strings.Join(allow, ", ")
return func(ctx *RequestCtx) error {
ctx.Response.Header.Set(fasthttp.HeaderAllow, allowValue)
return fn(ctx)
}
}
func newRouter(cfg Config) *Router {
router := fastrouter.New()
router.HandleOPTIONS = false
return &Router{
router: router,
errorView: cfg.ErrorView,
handleOPTIONS: true,
}
}
func (r *Router) mutable(v bool) {
if v != r.routerMutable {
r.routerMutable = v
r.router.Mutable(v)
}
}
func (r *Router) buildMiddlewares(m Middlewares) Middlewares {
m2 := Middlewares{}
m2.Before = append(m2.Before, r.middlewares.Before...)
m2.Before = append(m2.Before, m.Before...)
m2.After = append(m2.After, m.After...)
m2.After = append(m2.After, r.middlewares.After...)
m2.Skip = append(m2.Skip, m.Skip...)
m2.Skip = append(m2.Skip, r.middlewares.Skip...)
m2.Final = append(m2.Final, m.Final...)
m2.Final = append(m2.Final, r.middlewares.Final...)
if r.parent != nil {
return r.parent.buildMiddlewares(m2)
}
m2.Before = appendMiddlewares(m2.Before[:0], m2.Before, m2.Skip...)
m2.After = appendMiddlewares(m2.After[:0], m2.After, m2.Skip...)
return m2
}
func (r *Router) getGroupFullPath(path string) string {
if r.parent != nil {
path = r.parent.getGroupFullPath(r.prefix + path)
}
return path
}
func (r *Router) handler(fn View, middle Middlewares) fasthttp.RequestHandler {
middle = r.buildMiddlewares(middle)
chain := make([]Middleware, 0)
chain = append(chain, middle.Before...)
chain = append(chain, func(ctx *RequestCtx) error {
if !ctx.skipView {
if err := fn(ctx); err != nil {
return err
}
}
return ctx.Next()
})
chain = append(chain, middle.After...)
chainLen := len(chain)
return func(ctx *fasthttp.RequestCtx) {
actx := AcquireRequestCtx(ctx)
for i := 0; i < chainLen; i++ {
if err := chain[i](actx); err != nil {
r.handleMiddlewareError(actx, err)
break
} else if !actx.next {
break
}
actx.next = false
}
for _, final := range middle.Final {
final(actx)
}
ReleaseRequestCtx(actx)
}
}
func (r *Router) handleMiddlewareError(ctx *RequestCtx, err error) {
statusCode := ctx.Response.Header.StatusCode()
if statusCode == fasthttp.StatusOK {
statusCode = fasthttp.StatusInternalServerError
}
r.errorView(ctx, err, statusCode)
}
func (r *Router) handlePath(p *Path) {
isOPTIONS := p.method == fasthttp.MethodOptions
switch {
case p.registered:
r.mutable(true)
case isOPTIONS:
mutable := !gstrings.Include(r.customOPTIONS, p.fullURL)
r.mutable(mutable)
case r.routerMutable:
r.mutable(false)
}
view := p.view
if isOPTIONS {
view = buildOptionsView(p.fullURL, view, r.ListPaths())
r.customOPTIONS = gstrings.UniqueAppend(r.customOPTIONS, p.fullURL)
}
handler := r.handler(view, p.middlewares)
if p.withTimeout {
handler = fasthttp.TimeoutWithCodeHandler(handler, p.timeout, p.timeoutMsg, p.timeoutCode)
}
handleFunc := r.router.Handle
if r.group != nil {
handleFunc = r.group.Handle
}
handleFunc(p.method, p.url, handler)
if r.handleOPTIONS && !p.registered && !isOPTIONS {
view = buildOptionsView(p.fullURL, emptyView, r.ListPaths())
handler = r.handler(view, p.middlewares)
r.mutable(true)
handleFunc(fasthttp.MethodOptions, p.url, handler)
}
}
// NewGroupPath returns a new router to group paths.
func (r *Router) | (path string) *Router {
groupFunc := r.router.Group
if r.group != nil {
groupFunc = r.group.Group
}
return &Router{
parent: r,
router: r.router,
routerMutable: r.routerMutable,
errorView: r.errorView,
prefix: path,
group: groupFunc(path),
handleOPTIONS: r.handleOPTIONS,
}
}
// ListPaths returns all registered routes grouped by method.
func (r *Router) ListPaths() map[string][]string {
return r.router.List()
}
// Middlewares defines the middlewares (before, after and skip) in the order in which you want to execute them
// for the view or group
//
// WARNING: The previous middlewares configuration could be overridden.
func (r *Router) Middlewares(middlewares Middlewares) *Router {
r.middlewares = middlewares
return r
}
// UseBefore registers the middlewares in the order in which you want to execute them
// before the execution of the view or group.
func (r *Router) UseBefore(fns ...Middleware) *Router {
r.middlewares.Before = append(r.middlewares.Before, fns...)
return r
}
// UseAfter registers the middlewares in the order in which you want to execute them
// after the execution of the view or group.
func (r *Router) UseAfter(fns ...Middleware) *Router {
r.middlewares.After = append(r.middlewares.After, fns...)
return r
}
// UseFinal registers the given middlewares to be executed in the order in which they are added,
// after the view or group has been executed. These middlewares will always be executed,
// even if a previous middleware or the view/group returned a response.
func (r *Router) UseFinal(fns ...FinalMiddleware) *Router {
r.middlewares.Final = append(r.middlewares.Final, fns...)
return r
}
// SkipMiddlewares registers the middlewares that you want to skip when executing the view or group.
func (r *Router) SkipMiddlewares(fns ...Middleware) *Router {
r.middlewares.Skip = append(r.middlewares.Skip, fns...)
return r
}
// GET shortcut for router.Path("GET", url, viewFn).
func (r *Router) GET(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodGet, url, viewFn)
}
// HEAD shortcut for router.Path("HEAD", url, viewFn).
func (r *Router) HEAD(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodHead, url, viewFn)
}
// OPTIONS shortcut for router.Path("OPTIONS", url, viewFn).
func (r *Router) OPTIONS(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodOptions, url, viewFn)
}
// POST shortcut for router.Path("POST", url, viewFn).
func (r *Router) POST(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodPost, url, viewFn)
}
// PUT shortcut for router.Path("PUT", url, viewFn).
func (r *Router) PUT(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodPut, url, viewFn)
}
// PATCH shortcut for router.Path("PATCH", url, viewFn).
func (r *Router) PATCH(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodPatch, url, viewFn)
}
// DELETE shortcut for router.Path("DELETE", url, viewFn).
func (r *Router) DELETE(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodDelete, url, viewFn)
}
// ANY shortcut for router.Path("*", url, viewFn)
//
// WARNING: Use only for routes where the request method is not important.
func (r *Router) ANY(url string, viewFn View) *Path {
return r.Path(fastrouter.MethodWild, url, viewFn)
}
// RequestHandlerPath wraps fasthttp request handler to atreugo view and registers it to
// the given path and method.
func (r *Router) RequestHandlerPath(method, url string, handler fasthttp.RequestHandler) *Path {
viewFn := func(ctx *RequestCtx) error {
handler(ctx.RequestCtx)
return nil
}
return r.Path(method, url, viewFn)
}
// NetHTTPPath wraps net/http handler to atreugo view and registers it to
// the given path and method.
//
// While this function may be used for easy switching from net/http to fasthttp/atreugo,
// it has the following drawbacks comparing to using manually written fasthttp/atreugo,
// request handler:
//
// - A lot of useful functionality provided by fasthttp/atreugo is missing
// from net/http handler.
// - net/http -> fasthttp/atreugo handler conversion has some overhead,
// so the returned handler will be always slower than manually written
// fasthttp/atreugo handler.
//
// So it is advisable using this function only for quick net/http -> fasthttp
// switching. Then manually convert net/http handlers to fasthttp handlers.
// according to https://github.com/valyala/fasthttp#switching-from-nethttp-to-fasthttp.
func (r *Router) NetHTTPPath(method, url string, handler http.Handler) *Path {
h := fasthttpadaptor.NewFastHTTPHandler(handler)
return r.RequestHandlerPath(method, url, h)
}
// Static serves static files from the given file system root
//
// Make sure your program has enough 'max open files' limit aka
// 'ulimit -n' if root folder contains many files.
func (r *Router) Static(url, rootPath string) *Path {
return r.StaticCustom(url, &StaticFS{
Root: rootPath,
IndexNames: []string{"index.html"},
GenerateIndexPages: true,
AcceptByteRange: true,
})
}
// StaticCustom serves static files from the given file system settings
//
// Make sure your program has enough 'max open files' limit aka
// 'ulimit -n' if root folder contains many files.
func (r *Router) StaticCustom(url string, fs *StaticFS) *Path {
url = strings.TrimSuffix(url, "/")
ffs := &fasthttp.FS{
Root: fs.Root,
AllowEmptyRoot: fs.AllowEmptyRoot,
IndexNames: fs.IndexNames,
GenerateIndexPages: fs.GenerateIndexPages,
Compress: fs.Compress,
CompressBrotli: fs.CompressBrotli,
CompressRoot: fs.CompressRoot,
AcceptByteRange: fs.AcceptByteRange,
CacheDuration: fs.CacheDuration,
CompressedFileSuffix: fs.CompressedFileSuffix,
CompressedFileSuffixes: fs.CompressedFileSuffixes,
CleanStop: fs.CleanStop,
}
if fs.PathRewrite != nil {
ffs.PathRewrite = func(ctx *fasthttp.RequestCtx) []byte {
actx := AcquireRequestCtx(ctx)
result := fs.PathRewrite(actx)
ReleaseRequestCtx(actx)
return result
}
}
if fs.PathNotFound != nil {
ffs.PathNotFound = viewToHandler(fs.PathNotFound, r.errorView)
}
stripSlashes := strings.Count(r.getGroupFullPath(url), "/")
if ffs.PathRewrite == nil && stripSlashes > 0 {
ffs.PathRewrite = fasthttp.NewPathSlashesStripper(stripSlashes)
}
return r.RequestHandlerPath(fasthttp.MethodGet, url+"/{filepath:*}", ffs.NewRequestHandler())
}
// ServeFile returns HTTP response containing compressed file contents
// from the given path
//
// HTTP response may contain uncompressed file contents in the following cases:
//
// - Missing 'Accept-Encoding: gzip' request header.
// - No write access to directory containing the file.
//
// Directory contents is returned if path points to directory.
func (r *Router) ServeFile(url, filePath string) *Path {
viewFn := func(ctx *RequestCtx) error {
fasthttp.ServeFile(ctx.RequestCtx, filePath)
return nil
}
return r.Path(fasthttp.MethodGet, url, viewFn)
}
// Path registers a new view with the given path and method
//
// This function is intended for bulk loading and to allow the usage of less
// frequently used, non-standardized or custom methods (e.g. for internal
// communication with a proxy).
func (r *Router) Path(method, url string, viewFn View) *Path {
if method != strings.ToUpper(method) {
panicf("http method '%s' must be in uppercase", method)
}
p := &Path{
router: r,
method: method,
url: url,
fullURL: r.getGroupFullPath(url),
view: viewFn,
}
r.handlePath(p)
p.registered = true
return p
}
| NewGroupPath | identifier_name |
router.go | package atreugo
import (
"net/http"
"sort"
"strings"
fastrouter "github.com/fasthttp/router"
gstrings "github.com/savsgio/gotils/strings"
"github.com/valyala/fasthttp"
"github.com/valyala/fasthttp/fasthttpadaptor"
)
func defaultErrorView(ctx *RequestCtx, err error, statusCode int) {
ctx.Error(err.Error(), statusCode)
}
func emptyView(_ *RequestCtx) error {
return nil
}
func buildOptionsView(url string, fn View, paths map[string][]string) View {
allow := make([]string, 0)
for method, urls := range paths {
if method == fasthttp.MethodOptions || !gstrings.Include(urls, url) {
continue
}
allow = append(allow, method)
}
if len(allow) == 0 {
allow = append(allow, fasthttp.MethodOptions)
}
sort.Strings(allow)
allowValue := strings.Join(allow, ", ")
return func(ctx *RequestCtx) error {
ctx.Response.Header.Set(fasthttp.HeaderAllow, allowValue)
return fn(ctx)
}
}
func newRouter(cfg Config) *Router {
router := fastrouter.New()
router.HandleOPTIONS = false
return &Router{
router: router,
errorView: cfg.ErrorView,
handleOPTIONS: true,
}
}
func (r *Router) mutable(v bool) {
if v != r.routerMutable {
r.routerMutable = v
r.router.Mutable(v)
}
}
func (r *Router) buildMiddlewares(m Middlewares) Middlewares {
m2 := Middlewares{}
m2.Before = append(m2.Before, r.middlewares.Before...)
m2.Before = append(m2.Before, m.Before...)
m2.After = append(m2.After, m.After...)
m2.After = append(m2.After, r.middlewares.After...)
m2.Skip = append(m2.Skip, m.Skip...)
m2.Skip = append(m2.Skip, r.middlewares.Skip...)
m2.Final = append(m2.Final, m.Final...)
m2.Final = append(m2.Final, r.middlewares.Final...)
if r.parent != nil {
return r.parent.buildMiddlewares(m2)
}
m2.Before = appendMiddlewares(m2.Before[:0], m2.Before, m2.Skip...)
m2.After = appendMiddlewares(m2.After[:0], m2.After, m2.Skip...)
return m2
}
func (r *Router) getGroupFullPath(path string) string {
if r.parent != nil {
path = r.parent.getGroupFullPath(r.prefix + path)
}
return path
}
func (r *Router) handler(fn View, middle Middlewares) fasthttp.RequestHandler {
middle = r.buildMiddlewares(middle)
chain := make([]Middleware, 0)
chain = append(chain, middle.Before...)
chain = append(chain, func(ctx *RequestCtx) error {
if !ctx.skipView {
if err := fn(ctx); err != nil {
return err
}
}
return ctx.Next()
})
chain = append(chain, middle.After...)
chainLen := len(chain)
return func(ctx *fasthttp.RequestCtx) {
actx := AcquireRequestCtx(ctx)
for i := 0; i < chainLen; i++ {
if err := chain[i](actx); err != nil {
r.handleMiddlewareError(actx, err)
break
} else if !actx.next {
break
}
actx.next = false
}
for _, final := range middle.Final {
final(actx)
}
ReleaseRequestCtx(actx)
}
}
func (r *Router) handleMiddlewareError(ctx *RequestCtx, err error) {
statusCode := ctx.Response.Header.StatusCode()
if statusCode == fasthttp.StatusOK {
statusCode = fasthttp.StatusInternalServerError
}
r.errorView(ctx, err, statusCode)
}
func (r *Router) handlePath(p *Path) {
isOPTIONS := p.method == fasthttp.MethodOptions
switch {
case p.registered:
r.mutable(true)
case isOPTIONS:
mutable := !gstrings.Include(r.customOPTIONS, p.fullURL)
r.mutable(mutable)
case r.routerMutable:
r.mutable(false)
}
view := p.view
if isOPTIONS {
view = buildOptionsView(p.fullURL, view, r.ListPaths())
r.customOPTIONS = gstrings.UniqueAppend(r.customOPTIONS, p.fullURL)
}
handler := r.handler(view, p.middlewares)
if p.withTimeout {
handler = fasthttp.TimeoutWithCodeHandler(handler, p.timeout, p.timeoutMsg, p.timeoutCode)
}
handleFunc := r.router.Handle
if r.group != nil {
handleFunc = r.group.Handle
}
handleFunc(p.method, p.url, handler)
if r.handleOPTIONS && !p.registered && !isOPTIONS {
view = buildOptionsView(p.fullURL, emptyView, r.ListPaths())
handler = r.handler(view, p.middlewares)
r.mutable(true)
handleFunc(fasthttp.MethodOptions, p.url, handler)
}
}
// NewGroupPath returns a new router to group paths.
func (r *Router) NewGroupPath(path string) *Router {
groupFunc := r.router.Group
if r.group != nil {
groupFunc = r.group.Group
}
return &Router{
parent: r,
router: r.router,
routerMutable: r.routerMutable,
errorView: r.errorView,
prefix: path,
group: groupFunc(path),
handleOPTIONS: r.handleOPTIONS,
}
}
// ListPaths returns all registered routes grouped by method.
func (r *Router) ListPaths() map[string][]string {
return r.router.List() | // Middlewares defines the middlewares (before, after and skip) in the order in which you want to execute them
// for the view or group
//
// WARNING: The previous middlewares configuration could be overridden.
func (r *Router) Middlewares(middlewares Middlewares) *Router {
r.middlewares = middlewares
return r
}
// UseBefore registers the middlewares in the order in which you want to execute them
// before the execution of the view or group.
func (r *Router) UseBefore(fns ...Middleware) *Router {
r.middlewares.Before = append(r.middlewares.Before, fns...)
return r
}
// UseAfter registers the middlewares in the order in which you want to execute them
// after the execution of the view or group.
func (r *Router) UseAfter(fns ...Middleware) *Router {
r.middlewares.After = append(r.middlewares.After, fns...)
return r
}
// UseFinal registers the given middlewares to be executed in the order in which they are added,
// after the view or group has been executed. These middlewares will always be executed,
// even if a previous middleware or the view/group returned a response.
func (r *Router) UseFinal(fns ...FinalMiddleware) *Router {
r.middlewares.Final = append(r.middlewares.Final, fns...)
return r
}
// SkipMiddlewares registers the middlewares that you want to skip when executing the view or group.
func (r *Router) SkipMiddlewares(fns ...Middleware) *Router {
r.middlewares.Skip = append(r.middlewares.Skip, fns...)
return r
}
// GET shortcut for router.Path("GET", url, viewFn).
func (r *Router) GET(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodGet, url, viewFn)
}
// HEAD shortcut for router.Path("HEAD", url, viewFn).
func (r *Router) HEAD(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodHead, url, viewFn)
}
// OPTIONS shortcut for router.Path("OPTIONS", url, viewFn).
func (r *Router) OPTIONS(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodOptions, url, viewFn)
}
// POST shortcut for router.Path("POST", url, viewFn).
func (r *Router) POST(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodPost, url, viewFn)
}
// PUT shortcut for router.Path("PUT", url, viewFn).
func (r *Router) PUT(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodPut, url, viewFn)
}
// PATCH shortcut for router.Path("PATCH", url, viewFn).
func (r *Router) PATCH(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodPatch, url, viewFn)
}
// DELETE shortcut for router.Path("DELETE", url, viewFn).
func (r *Router) DELETE(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodDelete, url, viewFn)
}
// ANY shortcut for router.Path("*", url, viewFn)
//
// WARNING: Use only for routes where the request method is not important.
func (r *Router) ANY(url string, viewFn View) *Path {
return r.Path(fastrouter.MethodWild, url, viewFn)
}
// RequestHandlerPath wraps fasthttp request handler to atreugo view and registers it to
// the given path and method.
func (r *Router) RequestHandlerPath(method, url string, handler fasthttp.RequestHandler) *Path {
viewFn := func(ctx *RequestCtx) error {
handler(ctx.RequestCtx)
return nil
}
return r.Path(method, url, viewFn)
}
// NetHTTPPath wraps net/http handler to atreugo view and registers it to
// the given path and method.
//
// While this function may be used for easy switching from net/http to fasthttp/atreugo,
// it has the following drawbacks comparing to using manually written fasthttp/atreugo,
// request handler:
//
// - A lot of useful functionality provided by fasthttp/atreugo is missing
// from net/http handler.
// - net/http -> fasthttp/atreugo handler conversion has some overhead,
// so the returned handler will be always slower than manually written
// fasthttp/atreugo handler.
//
// So it is advisable using this function only for quick net/http -> fasthttp
// switching. Then manually convert net/http handlers to fasthttp handlers.
// according to https://github.com/valyala/fasthttp#switching-from-nethttp-to-fasthttp.
func (r *Router) NetHTTPPath(method, url string, handler http.Handler) *Path {
h := fasthttpadaptor.NewFastHTTPHandler(handler)
return r.RequestHandlerPath(method, url, h)
}
// Static serves static files from the given file system root
//
// Make sure your program has enough 'max open files' limit aka
// 'ulimit -n' if root folder contains many files.
func (r *Router) Static(url, rootPath string) *Path {
return r.StaticCustom(url, &StaticFS{
Root: rootPath,
IndexNames: []string{"index.html"},
GenerateIndexPages: true,
AcceptByteRange: true,
})
}
// StaticCustom serves static files from the given file system settings
//
// Make sure your program has enough 'max open files' limit aka
// 'ulimit -n' if root folder contains many files.
func (r *Router) StaticCustom(url string, fs *StaticFS) *Path {
url = strings.TrimSuffix(url, "/")
ffs := &fasthttp.FS{
Root: fs.Root,
AllowEmptyRoot: fs.AllowEmptyRoot,
IndexNames: fs.IndexNames,
GenerateIndexPages: fs.GenerateIndexPages,
Compress: fs.Compress,
CompressBrotli: fs.CompressBrotli,
CompressRoot: fs.CompressRoot,
AcceptByteRange: fs.AcceptByteRange,
CacheDuration: fs.CacheDuration,
CompressedFileSuffix: fs.CompressedFileSuffix,
CompressedFileSuffixes: fs.CompressedFileSuffixes,
CleanStop: fs.CleanStop,
}
if fs.PathRewrite != nil {
ffs.PathRewrite = func(ctx *fasthttp.RequestCtx) []byte {
actx := AcquireRequestCtx(ctx)
result := fs.PathRewrite(actx)
ReleaseRequestCtx(actx)
return result
}
}
if fs.PathNotFound != nil {
ffs.PathNotFound = viewToHandler(fs.PathNotFound, r.errorView)
}
stripSlashes := strings.Count(r.getGroupFullPath(url), "/")
if ffs.PathRewrite == nil && stripSlashes > 0 {
ffs.PathRewrite = fasthttp.NewPathSlashesStripper(stripSlashes)
}
return r.RequestHandlerPath(fasthttp.MethodGet, url+"/{filepath:*}", ffs.NewRequestHandler())
}
// ServeFile returns HTTP response containing compressed file contents
// from the given path
//
// HTTP response may contain uncompressed file contents in the following cases:
//
// - Missing 'Accept-Encoding: gzip' request header.
// - No write access to directory containing the file.
//
// Directory contents is returned if path points to directory.
func (r *Router) ServeFile(url, filePath string) *Path {
viewFn := func(ctx *RequestCtx) error {
fasthttp.ServeFile(ctx.RequestCtx, filePath)
return nil
}
return r.Path(fasthttp.MethodGet, url, viewFn)
}
// Path registers a new view with the given path and method
//
// This function is intended for bulk loading and to allow the usage of less
// frequently used, non-standardized or custom methods (e.g. for internal
// communication with a proxy).
func (r *Router) Path(method, url string, viewFn View) *Path {
if method != strings.ToUpper(method) {
panicf("http method '%s' must be in uppercase", method)
}
p := &Path{
router: r,
method: method,
url: url,
fullURL: r.getGroupFullPath(url),
view: viewFn,
}
r.handlePath(p)
p.registered = true
return p
} | }
| random_line_split |
router.go | package atreugo
import (
"net/http"
"sort"
"strings"
fastrouter "github.com/fasthttp/router"
gstrings "github.com/savsgio/gotils/strings"
"github.com/valyala/fasthttp"
"github.com/valyala/fasthttp/fasthttpadaptor"
)
func defaultErrorView(ctx *RequestCtx, err error, statusCode int) {
ctx.Error(err.Error(), statusCode)
}
func emptyView(_ *RequestCtx) error {
return nil
}
func buildOptionsView(url string, fn View, paths map[string][]string) View {
allow := make([]string, 0)
for method, urls := range paths {
if method == fasthttp.MethodOptions || !gstrings.Include(urls, url) {
continue
}
allow = append(allow, method)
}
if len(allow) == 0 {
allow = append(allow, fasthttp.MethodOptions)
}
sort.Strings(allow)
allowValue := strings.Join(allow, ", ")
return func(ctx *RequestCtx) error {
ctx.Response.Header.Set(fasthttp.HeaderAllow, allowValue)
return fn(ctx)
}
}
func newRouter(cfg Config) *Router {
router := fastrouter.New()
router.HandleOPTIONS = false
return &Router{
router: router,
errorView: cfg.ErrorView,
handleOPTIONS: true,
}
}
func (r *Router) mutable(v bool) |
func (r *Router) buildMiddlewares(m Middlewares) Middlewares {
m2 := Middlewares{}
m2.Before = append(m2.Before, r.middlewares.Before...)
m2.Before = append(m2.Before, m.Before...)
m2.After = append(m2.After, m.After...)
m2.After = append(m2.After, r.middlewares.After...)
m2.Skip = append(m2.Skip, m.Skip...)
m2.Skip = append(m2.Skip, r.middlewares.Skip...)
m2.Final = append(m2.Final, m.Final...)
m2.Final = append(m2.Final, r.middlewares.Final...)
if r.parent != nil {
return r.parent.buildMiddlewares(m2)
}
m2.Before = appendMiddlewares(m2.Before[:0], m2.Before, m2.Skip...)
m2.After = appendMiddlewares(m2.After[:0], m2.After, m2.Skip...)
return m2
}
func (r *Router) getGroupFullPath(path string) string {
if r.parent != nil {
path = r.parent.getGroupFullPath(r.prefix + path)
}
return path
}
func (r *Router) handler(fn View, middle Middlewares) fasthttp.RequestHandler {
middle = r.buildMiddlewares(middle)
chain := make([]Middleware, 0)
chain = append(chain, middle.Before...)
chain = append(chain, func(ctx *RequestCtx) error {
if !ctx.skipView {
if err := fn(ctx); err != nil {
return err
}
}
return ctx.Next()
})
chain = append(chain, middle.After...)
chainLen := len(chain)
return func(ctx *fasthttp.RequestCtx) {
actx := AcquireRequestCtx(ctx)
for i := 0; i < chainLen; i++ {
if err := chain[i](actx); err != nil {
r.handleMiddlewareError(actx, err)
break
} else if !actx.next {
break
}
actx.next = false
}
for _, final := range middle.Final {
final(actx)
}
ReleaseRequestCtx(actx)
}
}
func (r *Router) handleMiddlewareError(ctx *RequestCtx, err error) {
statusCode := ctx.Response.Header.StatusCode()
if statusCode == fasthttp.StatusOK {
statusCode = fasthttp.StatusInternalServerError
}
r.errorView(ctx, err, statusCode)
}
func (r *Router) handlePath(p *Path) {
isOPTIONS := p.method == fasthttp.MethodOptions
switch {
case p.registered:
r.mutable(true)
case isOPTIONS:
mutable := !gstrings.Include(r.customOPTIONS, p.fullURL)
r.mutable(mutable)
case r.routerMutable:
r.mutable(false)
}
view := p.view
if isOPTIONS {
view = buildOptionsView(p.fullURL, view, r.ListPaths())
r.customOPTIONS = gstrings.UniqueAppend(r.customOPTIONS, p.fullURL)
}
handler := r.handler(view, p.middlewares)
if p.withTimeout {
handler = fasthttp.TimeoutWithCodeHandler(handler, p.timeout, p.timeoutMsg, p.timeoutCode)
}
handleFunc := r.router.Handle
if r.group != nil {
handleFunc = r.group.Handle
}
handleFunc(p.method, p.url, handler)
if r.handleOPTIONS && !p.registered && !isOPTIONS {
view = buildOptionsView(p.fullURL, emptyView, r.ListPaths())
handler = r.handler(view, p.middlewares)
r.mutable(true)
handleFunc(fasthttp.MethodOptions, p.url, handler)
}
}
// NewGroupPath returns a new router to group paths.
func (r *Router) NewGroupPath(path string) *Router {
groupFunc := r.router.Group
if r.group != nil {
groupFunc = r.group.Group
}
return &Router{
parent: r,
router: r.router,
routerMutable: r.routerMutable,
errorView: r.errorView,
prefix: path,
group: groupFunc(path),
handleOPTIONS: r.handleOPTIONS,
}
}
// ListPaths returns all registered routes grouped by method.
func (r *Router) ListPaths() map[string][]string {
return r.router.List()
}
// Middlewares defines the middlewares (before, after and skip) in the order in which you want to execute them
// for the view or group
//
// WARNING: The previous middlewares configuration could be overridden.
func (r *Router) Middlewares(middlewares Middlewares) *Router {
r.middlewares = middlewares
return r
}
// UseBefore registers the middlewares in the order in which you want to execute them
// before the execution of the view or group.
func (r *Router) UseBefore(fns ...Middleware) *Router {
r.middlewares.Before = append(r.middlewares.Before, fns...)
return r
}
// UseAfter registers the middlewares in the order in which you want to execute them
// after the execution of the view or group.
func (r *Router) UseAfter(fns ...Middleware) *Router {
r.middlewares.After = append(r.middlewares.After, fns...)
return r
}
// UseFinal registers the given middlewares to be executed in the order in which they are added,
// after the view or group has been executed. These middlewares will always be executed,
// even if a previous middleware or the view/group returned a response.
func (r *Router) UseFinal(fns ...FinalMiddleware) *Router {
r.middlewares.Final = append(r.middlewares.Final, fns...)
return r
}
// SkipMiddlewares registers the middlewares that you want to skip when executing the view or group.
func (r *Router) SkipMiddlewares(fns ...Middleware) *Router {
r.middlewares.Skip = append(r.middlewares.Skip, fns...)
return r
}
// GET shortcut for router.Path("GET", url, viewFn).
func (r *Router) GET(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodGet, url, viewFn)
}
// HEAD shortcut for router.Path("HEAD", url, viewFn).
func (r *Router) HEAD(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodHead, url, viewFn)
}
// OPTIONS shortcut for router.Path("OPTIONS", url, viewFn).
func (r *Router) OPTIONS(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodOptions, url, viewFn)
}
// POST shortcut for router.Path("POST", url, viewFn).
func (r *Router) POST(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodPost, url, viewFn)
}
// PUT shortcut for router.Path("PUT", url, viewFn).
func (r *Router) PUT(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodPut, url, viewFn)
}
// PATCH shortcut for router.Path("PATCH", url, viewFn).
func (r *Router) PATCH(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodPatch, url, viewFn)
}
// DELETE shortcut for router.Path("DELETE", url, viewFn).
func (r *Router) DELETE(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodDelete, url, viewFn)
}
// ANY shortcut for router.Path("*", url, viewFn)
//
// WARNING: Use only for routes where the request method is not important.
func (r *Router) ANY(url string, viewFn View) *Path {
return r.Path(fastrouter.MethodWild, url, viewFn)
}
// RequestHandlerPath wraps fasthttp request handler to atreugo view and registers it to
// the given path and method.
func (r *Router) RequestHandlerPath(method, url string, handler fasthttp.RequestHandler) *Path {
viewFn := func(ctx *RequestCtx) error {
handler(ctx.RequestCtx)
return nil
}
return r.Path(method, url, viewFn)
}
// NetHTTPPath wraps net/http handler to atreugo view and registers it to
// the given path and method.
//
// While this function may be used for easy switching from net/http to fasthttp/atreugo,
// it has the following drawbacks comparing to using manually written fasthttp/atreugo,
// request handler:
//
// - A lot of useful functionality provided by fasthttp/atreugo is missing
// from net/http handler.
// - net/http -> fasthttp/atreugo handler conversion has some overhead,
// so the returned handler will be always slower than manually written
// fasthttp/atreugo handler.
//
// So it is advisable using this function only for quick net/http -> fasthttp
// switching. Then manually convert net/http handlers to fasthttp handlers.
// according to https://github.com/valyala/fasthttp#switching-from-nethttp-to-fasthttp.
func (r *Router) NetHTTPPath(method, url string, handler http.Handler) *Path {
h := fasthttpadaptor.NewFastHTTPHandler(handler)
return r.RequestHandlerPath(method, url, h)
}
// Static serves static files from the given file system root
//
// Make sure your program has enough 'max open files' limit aka
// 'ulimit -n' if root folder contains many files.
func (r *Router) Static(url, rootPath string) *Path {
return r.StaticCustom(url, &StaticFS{
Root: rootPath,
IndexNames: []string{"index.html"},
GenerateIndexPages: true,
AcceptByteRange: true,
})
}
// StaticCustom serves static files from the given file system settings
//
// Make sure your program has enough 'max open files' limit aka
// 'ulimit -n' if root folder contains many files.
func (r *Router) StaticCustom(url string, fs *StaticFS) *Path {
url = strings.TrimSuffix(url, "/")
ffs := &fasthttp.FS{
Root: fs.Root,
AllowEmptyRoot: fs.AllowEmptyRoot,
IndexNames: fs.IndexNames,
GenerateIndexPages: fs.GenerateIndexPages,
Compress: fs.Compress,
CompressBrotli: fs.CompressBrotli,
CompressRoot: fs.CompressRoot,
AcceptByteRange: fs.AcceptByteRange,
CacheDuration: fs.CacheDuration,
CompressedFileSuffix: fs.CompressedFileSuffix,
CompressedFileSuffixes: fs.CompressedFileSuffixes,
CleanStop: fs.CleanStop,
}
if fs.PathRewrite != nil {
ffs.PathRewrite = func(ctx *fasthttp.RequestCtx) []byte {
actx := AcquireRequestCtx(ctx)
result := fs.PathRewrite(actx)
ReleaseRequestCtx(actx)
return result
}
}
if fs.PathNotFound != nil {
ffs.PathNotFound = viewToHandler(fs.PathNotFound, r.errorView)
}
stripSlashes := strings.Count(r.getGroupFullPath(url), "/")
if ffs.PathRewrite == nil && stripSlashes > 0 {
ffs.PathRewrite = fasthttp.NewPathSlashesStripper(stripSlashes)
}
return r.RequestHandlerPath(fasthttp.MethodGet, url+"/{filepath:*}", ffs.NewRequestHandler())
}
// ServeFile returns HTTP response containing compressed file contents
// from the given path
//
// HTTP response may contain uncompressed file contents in the following cases:
//
// - Missing 'Accept-Encoding: gzip' request header.
// - No write access to directory containing the file.
//
// Directory contents is returned if path points to directory.
func (r *Router) ServeFile(url, filePath string) *Path {
viewFn := func(ctx *RequestCtx) error {
fasthttp.ServeFile(ctx.RequestCtx, filePath)
return nil
}
return r.Path(fasthttp.MethodGet, url, viewFn)
}
// Path registers a new view with the given path and method
//
// This function is intended for bulk loading and to allow the usage of less
// frequently used, non-standardized or custom methods (e.g. for internal
// communication with a proxy).
func (r *Router) Path(method, url string, viewFn View) *Path {
if method != strings.ToUpper(method) {
panicf("http method '%s' must be in uppercase", method)
}
p := &Path{
router: r,
method: method,
url: url,
fullURL: r.getGroupFullPath(url),
view: viewFn,
}
r.handlePath(p)
p.registered = true
return p
}
| {
if v != r.routerMutable {
r.routerMutable = v
r.router.Mutable(v)
}
} | identifier_body |
ciao-vendor.go | //
// Copyright (c) 2016 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package main
import (
"bufio"
"bytes"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"sort"
"strings"
"text/tabwriter"
)
type repoInfo struct {
URL string `json:"url"`
Version string `json:"version"`
License string `json:"license"`
}
type packageDeps struct {
p string
deps []string
}
type packageInfo struct {
name string
vendored bool
installed bool
CGO bool `json:"cgo"`
Standard bool `json:"standard"`
}
type subPackage struct {
name string
wildcard string
docs []string
cgo bool
}
type clientInfo struct {
name string
err error
}
type piList []*packageInfo
func (p piList) Len() int {
return len(p)
}
func (p piList) Swap(i, j int) {
p[i], p[j] = p[j], p[i]
}
func (p piList) Less(i, j int) bool {
return p[i].name < p[j].name
}
var repos = map[string]repoInfo{}
var listTemplate = `
{{- range .Deps -}}
{{.}}
{{end -}}
`
var directTemplate = `
{{- range .Imports -}}
{{.}}
{{end -}}
`
func getPackageDetails(name string) *packageInfo {
packageTemplate := `{
"standard" : {{.Standard}},
"cgo" : {{if .CFiles}}true{{else}}false{{end}}
}`
pi := &packageInfo{name: name}
cmd := exec.Command("go", "list", "-f", packageTemplate, name)
output, err := cmd.Output()
if err != nil {
return pi
}
pi.installed = true
_ = json.Unmarshal(output, pi)
return pi
}
func getPackageDependencies(packages []string, template string) (map[string]struct{}, error) {
deps := make(map[string]struct{})
args := []string{"list", "-f", template}
args = append(args, packages...)
var output bytes.Buffer
cmd := exec.Command("go", args...)
cmd.Stdout = &output
err := cmd.Run()
if err != nil {
return nil, fmt.Errorf("go list failed: %v", err)
}
scanner := bufio.NewScanner(&output)
for scanner.Scan() {
deps[scanner.Text()] = struct{}{}
}
return deps, nil
}
func calcDeps(projectRoot string, packages []string) (piList, error) {
deps, err := getPackageDependencies(packages, listTemplate)
if err != nil {
return nil, err
}
ch := make(chan *packageInfo)
for pkg := range deps {
go func(pkg string) {
localDep := strings.HasPrefix(pkg, projectRoot)
vendoredDep := strings.HasPrefix(pkg, path.Join(projectRoot, "vendor"))
if localDep && !vendoredDep {
ch <- nil
} else {
pd := getPackageDetails(pkg)
if pd.Standard {
ch <- nil
} else {
pd.vendored = vendoredDep
ch <- pd
}
}
}(pkg)
}
depsAr := make(piList, 0, len(deps))
for i := 0; i < cap(depsAr); i++ {
pd := <-ch
if pd != nil {
depsAr = append(depsAr, pd)
}
}
sort.Sort(depsAr)
return depsAr, nil
}
func checkWD() (string, string, error) {
cwd, err := os.Getwd()
if err != nil {
return "", "", fmt.Errorf("Unable to determine cwd: %v", err)
}
gopath, _ := os.LookupEnv("GOPATH")
if gopath == "" {
return "", "", fmt.Errorf("GOPATH is not set")
}
pths := strings.Split(gopath, ":")
for _, p := range pths {
if strings.HasPrefix(cwd, path.Join(p, "src")) {
return cwd, p, nil
}
}
return "", "", fmt.Errorf("ciao-vendor must be run from $GOPATH/src/path/to/project")
}
func copyRepos(cwd, sourceRoot string, subPackages map[string][]*subPackage) error {
errCh := make(chan error)
for k, r := range repos {
go func(k string, URL string) {
packages, ok := subPackages[k]
if !ok {
errCh <- nil
return
}
cmd1 := exec.Command("git", "archive", repos[k].Version)
cmd1.Dir = path.Join(sourceRoot, k)
os.MkdirAll(path.Join(cwd, "vendor", k), 0755)
args := []string{"-xC", path.Join(cwd, "vendor", k), "--wildcards",
"--no-wildcards-match-slash"}
for _, a := range packages {
if a.wildcard != "" {
args = append(args, a.wildcard+".go")
}
if a.cgo {
args = append(args, a.wildcard+".[ch]")
}
args = append(args, a.docs...)
}
args = append(args, "--exclude", "*_test.go")
cmd2 := exec.Command("tar", args...)
pipe, err := cmd1.StdoutPipe()
if err != nil {
errCh <- fmt.Errorf("Unable to retrieve pipe for git command %v: %v", args, err)
return
}
defer func() {
_ = pipe.Close()
}()
cmd2.Stdin = pipe
err = cmd1.Start()
if err != nil {
errCh <- fmt.Errorf("Unable to start git command %v: %v", args, err)
return
}
err = cmd2.Run()
if err != nil {
errCh <- fmt.Errorf("Unable to run tar command %v", err)
return
}
errCh <- nil
}(k, r.URL)
}
var err error
for range repos {
rcvErr := <-errCh
if err == nil && rcvErr != nil {
err = rcvErr
}
}
return err
}
func updateNonVendoredDeps(deps piList, projectRoot string) error {
fmt.Println("Updating non-vendored dependencies")
goGot := make(map[string]struct{})
for _, d := range deps {
args := []string{"get", "-v"}
var repoFound string
for k := range repos {
if strings.HasPrefix(d.name, k) {
repoFound = k
break
}
}
if _, ok := goGot[repoFound]; !ok {
args = append(args, "-u")
}
args = append(args, d.name)
cmd := exec.Command("go", args...)
stdout, err := cmd.StderrPipe()
if err != nil {
return err
}
err = cmd.Start()
if err != nil {
return err
}
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
fmt.Println(scanner.Text())
}
err = cmd.Wait()
if err != nil {
return err
}
goGot[repoFound] = struct{}{}
}
return nil
}
func getCurrentBranch(repo string) (string, error) {
cmd := exec.Command("git", "symbolic-ref", "HEAD")
cmd.Dir = repo
output, err := cmd.Output()
if err != nil {
return "", err
}
scanner := bufio.NewScanner(bytes.NewBuffer(output))
if !scanner.Scan() {
return "", fmt.Errorf("Unable to determine current branch of %s",
repo)
}
branch := strings.TrimSpace(scanner.Text())
const prefix = "refs/heads/"
if !strings.HasPrefix(branch, prefix) {
return "", fmt.Errorf("Unable to determine current branch of %s",
repo)
}
return branch[len(prefix):], nil
}
func checkoutVersion(sourceRoot string) {
for k, v := range repos {
cmd := exec.Command("git", "checkout", v.Version)
cmd.Dir = path.Join(sourceRoot, k)
_ = cmd.Run()
}
}
func checkoutMaster(sourceRoot string) {
for k := range repos {
cmd := exec.Command("git", "checkout", "master")
cmd.Dir = path.Join(sourceRoot, k)
_ = cmd.Run()
}
}
func findDocs(dir, prefix string) ([]string, error) {
docs := make([]string, 0, 8)
docGlob := []string{
"LICENSE*",
"README*",
"NOTICE",
"MAINTAINERS*",
"PATENTS*",
"AUTHORS*",
"CONTRIBUTORS*",
"VERSION",
}
err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() && (dir != path) {
return filepath.SkipDir
}
for _, pattern := range docGlob {
match, err := filepath.Match(pattern, info.Name())
if err != nil {
return err
}
if match {
docs = append(docs, filepath.Join(prefix, info.Name()))
break
}
}
return nil
})
if err != nil {
return nil, err
}
return docs, nil
}
func computeSubPackages(deps piList) map[string][]*subPackage {
subPackages := make(map[string][]*subPackage)
for _, d := range deps {
for k := range repos {
if !strings.HasPrefix(d.name, k) {
continue
}
packages := subPackages[k]
pkg := d.name[len(k):]
if pkg == "" {
packages = append([]*subPackage{{name: k, wildcard: "*", cgo: d.CGO}}, packages...)
} else if pkg[0] == '/' {
packages = append(packages, &subPackage{name: d.name, wildcard: pkg[1:] + "/*", cgo: d.CGO})
} else {
fmt.Printf("Warning: unvendored package: %s\n", d.name)
}
subPackages[k] = packages
break
}
}
return subPackages
}
// This might look a little convoluted but we can't just go get
// on all the repos in repos, using a wildcard. This would build
// loads of stuff we're not interested in at best and at worst,
// breakage in a package we're not interested in would break
// ciao-vendor
//
// We can't just go get github.com/01org/ciao this would pull down
// the dependencies of the master version of ciao's depdendencies
// which is not what we want. This might miss some dependencies
// which have been deleted from the master branch of ciao's
// dependencies.
//
// So we need to figure out which dependencies ciao actually has,
// pull them down, check out the version of these dependencies
// that ciao actually uses, and then recompute our dependencies.
//
// Right now it's possible for a ciao dependency to have a dependency
// that is no longer present in master. This dependency will not be
// pulled down. If this happens, ciao-vendor vendor will need to be
// invoked again. We could probably fix this here.
func vendor(cwd, projectRoot, sourceRoot string) error {
checkoutVersion(sourceRoot)
deps, err := calcDeps(projectRoot, []string{"./..."})
if err != nil {
checkoutMaster(sourceRoot)
return err
}
i := 0
for ; i < len(deps); i++ {
if !deps[i].vendored {
break
}
}
if i < len(deps) {
checkoutMaster(sourceRoot)
err = updateNonVendoredDeps(deps, projectRoot)
if err != nil {
return err
}
checkoutVersion(sourceRoot)
deps, err = calcDeps(projectRoot, []string{"./..."})
if err != nil {
checkoutMaster(sourceRoot)
return err
}
}
subPackages := computeSubPackages(deps)
for k := range subPackages {
packages := subPackages[k]
for _, p := range packages {
dir := path.Join(sourceRoot, p.name)
prefix := p.name[len(k):]
if len(prefix) > 0 {
prefix = prefix[1:]
}
docs, err := findDocs(dir, prefix)
if err != nil {
checkoutMaster(sourceRoot)
return err
}
p.docs = docs
}
if packages[0].wildcard != "*" {
dir := path.Join(sourceRoot, k)
docs, err := findDocs(dir, "")
if err != nil {
checkoutMaster(sourceRoot)
return err
}
packages = append(packages, &subPackage{name: k, docs: docs})
}
subPackages[k] = packages
}
checkoutMaster(sourceRoot)
fmt.Println("Populating vendor folder")
err = copyRepos(cwd, sourceRoot, subPackages)
if err != nil {
return err
}
fmt.Println("Dependencies vendored. Run go run ciao-vendor/ciao-vendor.go check to verify all is well")
return nil
}
func usedBy(name string, packages piList, depsMap map[string][]string) string {
var users bytes.Buffer
for _, p := range packages {
if p.name == name {
continue
}
deps := depsMap[p.name]
for _, d := range deps {
if d == name {
users.WriteString(" ")
users.WriteString(p.name)
break
}
}
}
// BUG(markus): We don't report when a dependency is used by ciao if
// it is also used by a dependency
if users.Len() == 0 {
return "project"
}
return users.String()[1:]
}
func depsByPackage(packages piList) map[string][]string {
depsMap := make(map[string][]string)
depsCh := make(chan packageDeps)
for _, p := range packages {
go func(p string) {
var output bytes.Buffer
cmd := exec.Command("go", "list", "-f", listTemplate, p)
cmd.Stdout = &output
err := cmd.Run()
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to call get list on %s : %v", p, err)
depsCh <- packageDeps{p: p}
return
}
scanner := bufio.NewScanner(&output)
deps := make([]string, 0, 32)
for scanner.Scan() {
deps = append(deps, scanner.Text())
}
depsCh <- packageDeps{p, deps}
}(p.name)
}
for range packages {
pkgDeps := <-depsCh
depsMap[pkgDeps.p] = pkgDeps.deps
}
return depsMap
}
func computeClients(packages piList) map[string]string {
depsMap := depsByPackage(packages)
clientMap := make(map[string]string)
for _, p := range packages {
clientMap[p.name] = usedBy(p.name, packages, depsMap)
}
return clientMap
}
func verify(deps piList, vendorRoot string) ([]string, []string, []string, []string) {
uninstalled := make([]string, 0, 128)
missing := make([]string, 0, 128)
notVendored := make([]string, 0, 128)
notUsed := make([]string, 0, 128)
reposUsed := make(map[string]struct{})
depLoop:
for _, d := range deps {
if !d.installed {
uninstalled = append(uninstalled, d.name)
}
for k := range repos {
if strings.HasPrefix(d.name, k) ||
(len(d.name) > len(vendorRoot)+1 &&
strings.HasPrefix(d.name[len(vendorRoot)+1:], k)) {
if !d.vendored {
cmd := exec.Command("go", "list", path.Join(vendorRoot, d.name))
if cmd.Run() != nil {
notVendored = append(notVendored, d.name)
}
}
reposUsed[k] = struct{}{}
continue depLoop
}
}
missing = append(missing, d.name)
}
for k := range repos {
if _, ok := reposUsed[k]; !ok {
notUsed = append(notUsed, k)
}
}
return missing, uninstalled, notVendored, notUsed
}
func checkKnown(missing []string, deps piList) bool {
if len(missing) == 0 {
fmt.Println("All Dependencies Known: [OK]")
return true
}
clientMap := computeClients(deps)
fmt.Println("All Dependencies Known: [FAIL]")
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 8, 1, '\t', 0)
fmt.Fprintln(w, "Missing Package\tUsed By")
for _, d := range missing {
fmt.Fprintf(w, "%s\t%s\n", d, clientMap[d])
}
w.Flush()
fmt.Println("")
return false
}
func checkUninstalled(uninstalled []string) bool {
if len(uninstalled) == 0 {
fmt.Println("All Dependencies Installed: [OK]")
return true
}
fmt.Println("All Dependencies Installed: [FAIL]")
for _, d := range uninstalled {
fmt.Printf("\t%s\n", d)
}
fmt.Println("")
return false
}
func checkVendored(notVendored []string) bool {
if len(notVendored) == 0 {
fmt.Println("All Dependencies Vendored: [OK]")
return true
}
fmt.Println("All Dependencies Vendored: [FAIL]")
for _, d := range notVendored {
fmt.Printf("\t%s\n", d)
}
fmt.Println("")
return false
}
func checkNotUsed(notUsed []string) bool {
if len(notUsed) == 0 {
fmt.Println("All Dependencies Used: [OK]")
return true
}
fmt.Println("All Dependencies Used: [FAIL]")
for _, k := range notUsed {
fmt.Println(k)
}
return false
}
func check(cwd, projectRoot string) error {
deps, err := calcDeps(projectRoot, []string{"./..."})
if err != nil {
return err
}
vendorRoot := path.Join(projectRoot, "vendor")
missing, uninstalled, notVendored, notUsed := verify(deps, vendorRoot)
ok := checkKnown(missing, deps)
ok = checkUninstalled(uninstalled) && ok
ok = checkVendored(notVendored) && ok
ok = checkNotUsed(notUsed) && ok
if !ok {
return fmt.Errorf("Dependency checks failed")
}
return nil
}
func packages(cwd, projectRoot string) error {
uninstalledDeps := false
plist, err := calcDeps(projectRoot, []string{"./..."})
if err != nil {
return err
}
vendorRoot := path.Join(projectRoot, "vendor")
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 8, 1, '\t', 0)
fmt.Fprintln(w, "Package\tStatus\tRepo\tVersion\tLicense")
for _, d := range plist {
fmt.Fprintf(w, "%s\t", d.name)
r := ""
for k := range repos {
if strings.HasPrefix(d.name, k) ||
(len(d.name) > len(vendorRoot)+1 &&
strings.HasPrefix(d.name[len(vendorRoot)+1:], k)) {
r = k
break
}
}
if d.vendored {
fmt.Fprintf(w, "Vendored\t")
} else if d.installed {
fmt.Fprintf(w, "GOPATH\t")
} else {
fmt.Fprintf(w, "Missing\t")
uninstalledDeps = true
}
if repos[r].URL != "" {
fmt.Fprintf(w, "%s\t", r)
if d.vendored {
fmt.Fprintf(w, "%s\t", repos[r].Version)
} else {
fmt.Fprintf(w, "master\t")
}
fmt.Fprintf(w, "%s", repos[r].License)
} else {
fmt.Fprintf(w, "Unknown\tUnknown\tUnknown")
}
fmt.Fprintln(w)
}
w.Flush()
if uninstalledDeps {
fmt.Println("")
return fmt.Errorf("Some dependencies are not installed. Unable to provide complete dependency list")
}
return nil
}
func deps(projectRoot string) error {
deps, err := calcDeps(projectRoot, []string{"./..."})
if err != nil {
return err
}
vendorRoot := path.Join(projectRoot, "vendor")
missing, uninstalled, notVendored, notUsed := verify(deps, vendorRoot)
if len(missing) != 0 || len(uninstalled) != 0 || len(notVendored) != 0 || len(notUsed) != 0 {
return fmt.Errorf("Dependencies out of sync. Please run go ciao-vendor/ciao-vendor.go check")
}
keys := make([]string, 0, len(repos))
for k := range repos {
keys = append(keys, k)
}
sort.Strings(keys)
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 8, 1, '\t', 0)
fmt.Fprintln(w, "Package Root\tRepo\tVersion\tLicense")
for _, k := range keys {
r := repos[k]
fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", k, r.URL, r.Version, r.License)
}
w.Flush()
return nil
}
func uses(pkg string, projectRoot string, direct bool) error {
deps, err := calcDeps(projectRoot, []string{"./..."})
if err != nil {
return err
}
var output bytes.Buffer
cmd := exec.Command("go", "list", "./...")
cmd.Stdout = &output
err = cmd.Run()
if err != nil {
return fmt.Errorf("go list failed: %v", err)
}
scanner := bufio.NewScanner(&output)
vendorPrefix := path.Join(projectRoot, "vendor")
for scanner.Scan() {
d := scanner.Text()
if !strings.HasPrefix(d, vendorPrefix) {
deps = append(deps, &packageInfo{name: d})
}
}
var template string
if direct {
template = directTemplate
} else {
template = listTemplate
}
clientCh := make(chan clientInfo)
for _, d := range deps {
go func(name string) {
ci := clientInfo{}
pd, err := getPackageDependencies([]string{name}, template)
if err == nil {
if _, ok := pd[pkg]; ok {
ci.name = name
}
} else {
ci.err = err
}
clientCh <- ci
}(d.name)
}
clients := make([]string, 0, len(deps))
for range deps {
clientInfo := <-clientCh
if clientInfo.err != nil {
return err
}
if clientInfo.name != "" {
clients = append(clients, clientInfo.name)
}
}
sort.Strings(clients)
for _, client := range clients {
fmt.Println(client)
}
return nil
}
func updates(sourceRoot, projectRoot string) error {
deps, err := calcDeps(projectRoot, []string{"./..."})
if err != nil {
return err
}
vendorRoot := path.Join(projectRoot, "vendor") + "/"
for _, d := range deps {
if strings.HasPrefix(d.name, vendorRoot) {
d.name = d.name[len(vendorRoot):]
}
}
err = updateNonVendoredDeps(deps, projectRoot)
if err != nil {
return err
}
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 8, 1, '\t', 0)
fmt.Fprintln(w, "Package\tStatus\t")
keys := make([]string, 0, len(repos))
for k := range repos {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
v := repos[k]
var output bytes.Buffer
cmd := exec.Command("git", "log", "--oneline", fmt.Sprintf("%s..HEAD", v.Version))
cmd.Stdout = &output
cmd.Dir = path.Join(sourceRoot, k)
err = cmd.Run()
if err != nil {
fmt.Fprintf(w, "%s\tUnknown: %v\t\n", k, err)
continue
}
scanner := bufio.NewScanner(&output)
count := 0
for scanner.Scan() {
count++
}
if count != 0 {
fmt.Fprintf(w, "%s\t%d commits behind HEAD\t\n", k, count)
} else {
fmt.Fprintf(w, "%s\tUp to date\t\n", k)
}
}
w.Flush()
return nil
}
func test(sudo bool, sourceRoot, projectRoot, pkg, version string, goTestFlags []string) error {
fmt.Printf("Go getting %s\n", pkg)
cmd := exec.Command("go", "get", "-t", "-u", pkg)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
return fmt.Errorf("Unable to go get %s", pkg)
}
branch, err := getCurrentBranch(path.Join(sourceRoot, pkg))
if err != nil {
return fmt.Errorf("Unable to determine current branch of %s: %v", pkg, err)
}
cmd = exec.Command("git", "checkout", version)
cmd.Dir = path.Join(sourceRoot, pkg)
err = cmd.Run()
if err != nil {
return fmt.Errorf("Unable to checkout version %s of %s: %v",
version, pkg, err)
}
var args []string
var command string
if sudo {
command = "sudo"
args = []string{"-E", "go"}
} else {
command = "go"
}
args = append(args, "test")
args = append(args, goTestFlags...)
args = append(args, pkg)
cmd = exec.Command(command, args...)
cmd.Dir = path.Join(sourceRoot, pkg)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if sudo {
cmd.Stdin = os.Stdin
}
err = cmd.Run()
cmd = exec.Command("git", "checkout", branch)
cmd.Dir = path.Join(sourceRoot, pkg)
_ = cmd.Run()
return err
}
func revendor(cwd, sourceRoot, projectRoot, repo, version string) error {
ri, ok := repos[repo]
if !ok {
return fmt.Errorf("%s is not a vendored repository", repo)
}
fmt.Printf("Go getting %s\n", repo)
cmd := exec.Command("go", "get", "-v", "-u", "-d", repo+"/...")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
return fmt.Errorf("Unable to go get %s", repo)
}
ri.Version = version
repos[repo] = ri
err = writeRepos(cwd)
if err != nil {
return err
}
vendoredDir := path.Join(cwd, "vendor", repo)
err = os.RemoveAll(vendoredDir)
if err != nil {
return fmt.Errorf("Unable to remove vendored directory %s : %v",
vendoredDir, err)
}
return vendor(cwd, projectRoot, sourceRoot)
}
func vendorNew(cwd, sourceRoot, projectRoot, repo string, ri repoInfo) error {
_, ok := repos[repo]
if ok {
return fmt.Errorf("%s is already vendored", repo)
}
repos[repo] = ri
if err := writeRepos(cwd); err != nil {
return err
}
return vendor(cwd, projectRoot, sourceRoot)
}
func unvendor(cwd, sourceRoot, projectRoot, repo string) error {
_, ok := repos[repo]
if !ok {
return fmt.Errorf("%s is not vendored", repo)
}
delete(repos, repo)
if err := writeRepos(cwd); err != nil {
return err
}
vendoredDir := path.Join(cwd, "vendor", repo)
err := os.RemoveAll(vendoredDir)
if err != nil {
return fmt.Errorf("Unable to remove vendored directory %s : %v",
vendoredDir, err)
}
fmt.Printf("%s unvendored. Run go run ciao-vendor/ciao-vendor.go check to verify all is well\n", repo)
return nil
}
func runCommand(cwd, sourceRoot string, args []string) error {
var err error
projectRoot := cwd[len(sourceRoot)+1:]
switch args[1] {
case "check":
err = check(cwd, projectRoot)
case "vendor":
err = vendor(cwd, projectRoot, sourceRoot)
case "deps":
err = deps(projectRoot)
case "packages":
err = packages(cwd, projectRoot)
case "uses":
fs := flag.NewFlagSet("uses", flag.ExitOnError)
direct := false
fs.BoolVar(&direct, "d", false, "output direct dependencies only")
if err := fs.Parse(args[2:]); err != nil {
return err
}
if len(fs.Args()) == 0 {
return fmt.Errorf("Missing package for uses command")
}
err = uses(fs.Args()[0], projectRoot, direct)
case "updates":
err = updates(sourceRoot, projectRoot)
case "test":
fs := flag.NewFlagSet("test", flag.ExitOnError)
sudo := false
fs.BoolVar(&sudo, "s", false, "run tests with sudo")
if err := fs.Parse(args[2:]); err != nil {
return err
}
args = fs.Args()
err = test(sudo, sourceRoot, projectRoot, args[0], args[1], args[2:])
case "revendor":
err = revendor(cwd, sourceRoot, projectRoot, args[2], args[3])
case "vendornew":
ri := repoInfo{URL: args[5], Version: args[3], License: args[4]}
err = vendorNew(cwd, sourceRoot, projectRoot, args[2], ri)
case "unvendor":
err = unvendor(cwd, sourceRoot, projectRoot, args[2])
}
return err
}
func readRepos(projectRoot string) error {
packageFile := path.Join(projectRoot, "packages.json")
d, err := ioutil.ReadFile(packageFile)
if err != nil |
err = json.Unmarshal(d, &repos)
if err != nil {
return fmt.Errorf("Unable to unmarshall %s : %v", packageFile, err)
}
return nil
}
func writeRepos(projectRoot string) error {
packageFile := path.Join(projectRoot, "packages.json")
d, err := json.MarshalIndent(&repos, "", "\t")
if err != nil {
return fmt.Errorf("Unable to marhsall %s : %v", packageFile, err)
}
err = ioutil.WriteFile(packageFile, d, 0755)
if err != nil {
return fmt.Errorf("Unable to write %s : %v", packageFile, err)
}
return nil
}
func checkTwoArgs(args []string) bool {
return (len(args) == 2 &&
(args[1] == "vendor" || args[1] == "check" || args[1] == "deps" ||
args[1] == "packages" || args[1] == "updates"))
}
func checkArgs(args []string) bool {
return checkTwoArgs(args) ||
(len(args) == 3 && (args[1] == "unvendor")) ||
(len(args) >= 3 && (args[1] == "uses")) ||
(len(args) == 4 && (args[1] == "revendor")) ||
(len(args) == 6 && (args[1] == "vendornew")) ||
(len(args) >= 4 && (args[1] == "test"))
}
func main() {
if !checkArgs(os.Args) {
fmt.Fprintln(os.Stderr, "Usage: ciao-vendor vendor|check|deps|packages|updates")
fmt.Fprintln(os.Stderr, "Usage: ciao-vendor uses [-d] package")
fmt.Fprintln(os.Stderr, "Usage: ciao-vendor test package version [go-test flags]")
fmt.Fprintln(os.Stderr, "Usage: ciao-vendor unvendor package")
fmt.Fprintln(os.Stderr, "Usage: ciao-vendor revendor package version")
fmt.Fprintln(os.Stderr, "Usage: ciao-vendor vendornew package version license URL")
os.Exit(1)
}
cwd, goPath, err := checkWD()
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
err = readRepos(cwd)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
sourceRoot := path.Join(goPath, "src")
if len(cwd) < len(sourceRoot)+1 {
fmt.Fprintln(os.Stderr, "Could not determine project root")
os.Exit(1)
}
err = runCommand(cwd, sourceRoot, os.Args)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
| {
if !os.IsNotExist(err) {
return fmt.Errorf("Unable to read %s : %v", packageFile, err)
}
return nil
} | conditional_block |
ciao-vendor.go | //
// Copyright (c) 2016 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package main
import (
"bufio"
"bytes"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"sort"
"strings"
"text/tabwriter"
)
type repoInfo struct {
URL string `json:"url"`
Version string `json:"version"`
License string `json:"license"`
}
type packageDeps struct {
p string
deps []string
}
type packageInfo struct {
name string
vendored bool
installed bool
CGO bool `json:"cgo"`
Standard bool `json:"standard"`
}
type subPackage struct {
name string
wildcard string
docs []string
cgo bool
}
type clientInfo struct {
name string
err error
}
type piList []*packageInfo
func (p piList) Len() int {
return len(p)
}
func (p piList) Swap(i, j int) {
p[i], p[j] = p[j], p[i]
}
func (p piList) Less(i, j int) bool {
return p[i].name < p[j].name
}
var repos = map[string]repoInfo{}
var listTemplate = `
{{- range .Deps -}}
{{.}}
{{end -}}
`
var directTemplate = `
{{- range .Imports -}}
{{.}}
{{end -}}
`
func getPackageDetails(name string) *packageInfo {
packageTemplate := `{
"standard" : {{.Standard}},
"cgo" : {{if .CFiles}}true{{else}}false{{end}}
}`
pi := &packageInfo{name: name}
cmd := exec.Command("go", "list", "-f", packageTemplate, name)
output, err := cmd.Output()
if err != nil {
return pi
}
pi.installed = true
_ = json.Unmarshal(output, pi)
return pi
}
func getPackageDependencies(packages []string, template string) (map[string]struct{}, error) {
deps := make(map[string]struct{})
args := []string{"list", "-f", template}
args = append(args, packages...)
var output bytes.Buffer
cmd := exec.Command("go", args...)
cmd.Stdout = &output
err := cmd.Run()
if err != nil {
return nil, fmt.Errorf("go list failed: %v", err)
}
scanner := bufio.NewScanner(&output)
for scanner.Scan() {
deps[scanner.Text()] = struct{}{}
}
return deps, nil
}
func calcDeps(projectRoot string, packages []string) (piList, error) {
deps, err := getPackageDependencies(packages, listTemplate)
if err != nil {
return nil, err
}
ch := make(chan *packageInfo)
for pkg := range deps {
go func(pkg string) {
localDep := strings.HasPrefix(pkg, projectRoot)
vendoredDep := strings.HasPrefix(pkg, path.Join(projectRoot, "vendor"))
if localDep && !vendoredDep {
ch <- nil
} else {
pd := getPackageDetails(pkg)
if pd.Standard {
ch <- nil
} else {
pd.vendored = vendoredDep
ch <- pd
}
}
}(pkg)
}
depsAr := make(piList, 0, len(deps))
for i := 0; i < cap(depsAr); i++ {
pd := <-ch
if pd != nil {
depsAr = append(depsAr, pd)
}
}
sort.Sort(depsAr)
return depsAr, nil
}
func checkWD() (string, string, error) {
cwd, err := os.Getwd()
if err != nil {
return "", "", fmt.Errorf("Unable to determine cwd: %v", err)
}
gopath, _ := os.LookupEnv("GOPATH")
if gopath == "" {
return "", "", fmt.Errorf("GOPATH is not set")
}
pths := strings.Split(gopath, ":")
for _, p := range pths {
if strings.HasPrefix(cwd, path.Join(p, "src")) {
return cwd, p, nil
}
}
return "", "", fmt.Errorf("ciao-vendor must be run from $GOPATH/src/path/to/project")
}
func copyRepos(cwd, sourceRoot string, subPackages map[string][]*subPackage) error {
errCh := make(chan error)
for k, r := range repos {
go func(k string, URL string) {
packages, ok := subPackages[k]
if !ok {
errCh <- nil
return
}
cmd1 := exec.Command("git", "archive", repos[k].Version)
cmd1.Dir = path.Join(sourceRoot, k)
os.MkdirAll(path.Join(cwd, "vendor", k), 0755)
args := []string{"-xC", path.Join(cwd, "vendor", k), "--wildcards",
"--no-wildcards-match-slash"}
for _, a := range packages {
if a.wildcard != "" {
args = append(args, a.wildcard+".go")
}
if a.cgo {
args = append(args, a.wildcard+".[ch]")
}
args = append(args, a.docs...)
}
args = append(args, "--exclude", "*_test.go")
cmd2 := exec.Command("tar", args...)
pipe, err := cmd1.StdoutPipe()
if err != nil {
errCh <- fmt.Errorf("Unable to retrieve pipe for git command %v: %v", args, err)
return
}
defer func() {
_ = pipe.Close()
}()
cmd2.Stdin = pipe
err = cmd1.Start()
if err != nil {
errCh <- fmt.Errorf("Unable to start git command %v: %v", args, err)
return
}
err = cmd2.Run()
if err != nil {
errCh <- fmt.Errorf("Unable to run tar command %v", err)
return
}
errCh <- nil
}(k, r.URL)
}
var err error
for range repos {
rcvErr := <-errCh
if err == nil && rcvErr != nil {
err = rcvErr
}
}
return err
}
func updateNonVendoredDeps(deps piList, projectRoot string) error {
fmt.Println("Updating non-vendored dependencies")
goGot := make(map[string]struct{})
for _, d := range deps {
args := []string{"get", "-v"}
var repoFound string
for k := range repos {
if strings.HasPrefix(d.name, k) {
repoFound = k
break
}
}
if _, ok := goGot[repoFound]; !ok {
args = append(args, "-u")
}
args = append(args, d.name)
cmd := exec.Command("go", args...)
stdout, err := cmd.StderrPipe()
if err != nil {
return err
}
err = cmd.Start()
if err != nil {
return err
}
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
fmt.Println(scanner.Text())
}
err = cmd.Wait()
if err != nil {
return err
}
goGot[repoFound] = struct{}{}
}
return nil
}
func getCurrentBranch(repo string) (string, error) {
cmd := exec.Command("git", "symbolic-ref", "HEAD")
cmd.Dir = repo
output, err := cmd.Output()
if err != nil {
return "", err
}
scanner := bufio.NewScanner(bytes.NewBuffer(output))
if !scanner.Scan() {
return "", fmt.Errorf("Unable to determine current branch of %s",
repo)
}
branch := strings.TrimSpace(scanner.Text())
const prefix = "refs/heads/"
if !strings.HasPrefix(branch, prefix) {
return "", fmt.Errorf("Unable to determine current branch of %s",
repo)
}
return branch[len(prefix):], nil
}
func checkoutVersion(sourceRoot string) {
for k, v := range repos {
cmd := exec.Command("git", "checkout", v.Version)
cmd.Dir = path.Join(sourceRoot, k)
_ = cmd.Run()
}
}
func checkoutMaster(sourceRoot string) {
for k := range repos {
cmd := exec.Command("git", "checkout", "master")
cmd.Dir = path.Join(sourceRoot, k)
_ = cmd.Run()
}
}
func findDocs(dir, prefix string) ([]string, error) {
docs := make([]string, 0, 8)
docGlob := []string{
"LICENSE*",
"README*",
"NOTICE",
"MAINTAINERS*",
"PATENTS*",
"AUTHORS*",
"CONTRIBUTORS*",
"VERSION",
}
err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() && (dir != path) {
return filepath.SkipDir
}
for _, pattern := range docGlob {
match, err := filepath.Match(pattern, info.Name())
if err != nil {
return err
}
if match {
docs = append(docs, filepath.Join(prefix, info.Name()))
break
}
}
return nil
})
if err != nil {
return nil, err
}
return docs, nil
}
func computeSubPackages(deps piList) map[string][]*subPackage {
subPackages := make(map[string][]*subPackage)
for _, d := range deps {
for k := range repos {
if !strings.HasPrefix(d.name, k) {
continue
}
packages := subPackages[k]
pkg := d.name[len(k):]
if pkg == "" {
packages = append([]*subPackage{{name: k, wildcard: "*", cgo: d.CGO}}, packages...)
} else if pkg[0] == '/' {
packages = append(packages, &subPackage{name: d.name, wildcard: pkg[1:] + "/*", cgo: d.CGO})
} else {
fmt.Printf("Warning: unvendored package: %s\n", d.name)
}
subPackages[k] = packages
break
}
}
return subPackages
}
// This might look a little convoluted but we can't just go get
// on all the repos in repos, using a wildcard. This would build
// loads of stuff we're not interested in at best and at worst,
// breakage in a package we're not interested in would break
// ciao-vendor
//
// We can't just go get github.com/01org/ciao this would pull down
// the dependencies of the master version of ciao's depdendencies
// which is not what we want. This might miss some dependencies
// which have been deleted from the master branch of ciao's
// dependencies.
//
// So we need to figure out which dependencies ciao actually has,
// pull them down, check out the version of these dependencies
// that ciao actually uses, and then recompute our dependencies.
//
// Right now it's possible for a ciao dependency to have a dependency
// that is no longer present in master. This dependency will not be
// pulled down. If this happens, ciao-vendor vendor will need to be
// invoked again. We could probably fix this here.
func vendor(cwd, projectRoot, sourceRoot string) error {
checkoutVersion(sourceRoot)
deps, err := calcDeps(projectRoot, []string{"./..."})
if err != nil {
checkoutMaster(sourceRoot)
return err
}
i := 0
for ; i < len(deps); i++ {
if !deps[i].vendored {
break
}
}
if i < len(deps) {
checkoutMaster(sourceRoot)
err = updateNonVendoredDeps(deps, projectRoot)
if err != nil {
return err
}
checkoutVersion(sourceRoot)
deps, err = calcDeps(projectRoot, []string{"./..."})
if err != nil {
checkoutMaster(sourceRoot)
return err
}
}
subPackages := computeSubPackages(deps)
for k := range subPackages {
packages := subPackages[k]
for _, p := range packages {
dir := path.Join(sourceRoot, p.name)
prefix := p.name[len(k):]
if len(prefix) > 0 {
prefix = prefix[1:]
}
docs, err := findDocs(dir, prefix)
if err != nil {
checkoutMaster(sourceRoot)
return err
}
p.docs = docs
}
if packages[0].wildcard != "*" {
dir := path.Join(sourceRoot, k)
docs, err := findDocs(dir, "")
if err != nil {
checkoutMaster(sourceRoot)
return err
}
packages = append(packages, &subPackage{name: k, docs: docs})
}
subPackages[k] = packages
}
checkoutMaster(sourceRoot)
fmt.Println("Populating vendor folder")
err = copyRepos(cwd, sourceRoot, subPackages)
if err != nil {
return err
}
fmt.Println("Dependencies vendored. Run go run ciao-vendor/ciao-vendor.go check to verify all is well")
return nil
}
func usedBy(name string, packages piList, depsMap map[string][]string) string {
var users bytes.Buffer
for _, p := range packages {
if p.name == name {
continue
}
deps := depsMap[p.name]
for _, d := range deps {
if d == name {
users.WriteString(" ")
users.WriteString(p.name)
break
}
}
}
// BUG(markus): We don't report when a dependency is used by ciao if
// it is also used by a dependency
if users.Len() == 0 {
return "project"
}
return users.String()[1:]
}
func depsByPackage(packages piList) map[string][]string {
depsMap := make(map[string][]string)
depsCh := make(chan packageDeps)
for _, p := range packages {
go func(p string) {
var output bytes.Buffer
cmd := exec.Command("go", "list", "-f", listTemplate, p)
cmd.Stdout = &output
err := cmd.Run()
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to call get list on %s : %v", p, err)
depsCh <- packageDeps{p: p}
return
}
scanner := bufio.NewScanner(&output)
deps := make([]string, 0, 32)
for scanner.Scan() {
deps = append(deps, scanner.Text())
}
depsCh <- packageDeps{p, deps}
}(p.name)
}
for range packages {
pkgDeps := <-depsCh
depsMap[pkgDeps.p] = pkgDeps.deps
}
return depsMap
}
func computeClients(packages piList) map[string]string {
depsMap := depsByPackage(packages)
clientMap := make(map[string]string)
for _, p := range packages {
clientMap[p.name] = usedBy(p.name, packages, depsMap)
}
return clientMap
}
func verify(deps piList, vendorRoot string) ([]string, []string, []string, []string) {
uninstalled := make([]string, 0, 128)
missing := make([]string, 0, 128)
notVendored := make([]string, 0, 128)
notUsed := make([]string, 0, 128)
reposUsed := make(map[string]struct{})
depLoop:
for _, d := range deps {
if !d.installed {
uninstalled = append(uninstalled, d.name)
}
for k := range repos {
if strings.HasPrefix(d.name, k) ||
(len(d.name) > len(vendorRoot)+1 &&
strings.HasPrefix(d.name[len(vendorRoot)+1:], k)) {
if !d.vendored {
cmd := exec.Command("go", "list", path.Join(vendorRoot, d.name))
if cmd.Run() != nil {
notVendored = append(notVendored, d.name)
}
}
reposUsed[k] = struct{}{}
continue depLoop
}
}
missing = append(missing, d.name)
}
for k := range repos {
if _, ok := reposUsed[k]; !ok {
notUsed = append(notUsed, k)
}
}
return missing, uninstalled, notVendored, notUsed
}
func checkKnown(missing []string, deps piList) bool {
if len(missing) == 0 {
fmt.Println("All Dependencies Known: [OK]")
return true
}
clientMap := computeClients(deps)
fmt.Println("All Dependencies Known: [FAIL]")
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 8, 1, '\t', 0)
fmt.Fprintln(w, "Missing Package\tUsed By")
for _, d := range missing {
fmt.Fprintf(w, "%s\t%s\n", d, clientMap[d])
}
w.Flush()
fmt.Println("")
return false
}
func checkUninstalled(uninstalled []string) bool {
if len(uninstalled) == 0 {
fmt.Println("All Dependencies Installed: [OK]")
return true
}
fmt.Println("All Dependencies Installed: [FAIL]")
for _, d := range uninstalled {
fmt.Printf("\t%s\n", d)
}
fmt.Println("")
return false
}
func checkVendored(notVendored []string) bool {
if len(notVendored) == 0 {
fmt.Println("All Dependencies Vendored: [OK]")
return true
}
fmt.Println("All Dependencies Vendored: [FAIL]")
for _, d := range notVendored {
fmt.Printf("\t%s\n", d)
}
fmt.Println("")
return false
}
func checkNotUsed(notUsed []string) bool {
if len(notUsed) == 0 {
fmt.Println("All Dependencies Used: [OK]")
return true
}
fmt.Println("All Dependencies Used: [FAIL]")
for _, k := range notUsed {
fmt.Println(k)
}
return false
}
func check(cwd, projectRoot string) error {
deps, err := calcDeps(projectRoot, []string{"./..."})
if err != nil {
return err
}
vendorRoot := path.Join(projectRoot, "vendor")
missing, uninstalled, notVendored, notUsed := verify(deps, vendorRoot)
ok := checkKnown(missing, deps)
ok = checkUninstalled(uninstalled) && ok
ok = checkVendored(notVendored) && ok
ok = checkNotUsed(notUsed) && ok
if !ok {
return fmt.Errorf("Dependency checks failed")
}
return nil
}
func packages(cwd, projectRoot string) error {
uninstalledDeps := false
plist, err := calcDeps(projectRoot, []string{"./..."})
if err != nil {
return err
}
vendorRoot := path.Join(projectRoot, "vendor")
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 8, 1, '\t', 0)
fmt.Fprintln(w, "Package\tStatus\tRepo\tVersion\tLicense")
for _, d := range plist {
fmt.Fprintf(w, "%s\t", d.name)
r := ""
for k := range repos {
if strings.HasPrefix(d.name, k) ||
(len(d.name) > len(vendorRoot)+1 &&
strings.HasPrefix(d.name[len(vendorRoot)+1:], k)) {
r = k
break
}
}
if d.vendored {
fmt.Fprintf(w, "Vendored\t")
} else if d.installed {
fmt.Fprintf(w, "GOPATH\t")
} else {
fmt.Fprintf(w, "Missing\t")
uninstalledDeps = true
}
if repos[r].URL != "" {
fmt.Fprintf(w, "%s\t", r)
if d.vendored {
fmt.Fprintf(w, "%s\t", repos[r].Version)
} else {
fmt.Fprintf(w, "master\t")
}
fmt.Fprintf(w, "%s", repos[r].License)
} else {
fmt.Fprintf(w, "Unknown\tUnknown\tUnknown")
}
fmt.Fprintln(w)
}
w.Flush()
if uninstalledDeps {
fmt.Println("")
return fmt.Errorf("Some dependencies are not installed. Unable to provide complete dependency list")
}
return nil
}
func deps(projectRoot string) error {
deps, err := calcDeps(projectRoot, []string{"./..."})
if err != nil {
return err
}
vendorRoot := path.Join(projectRoot, "vendor")
missing, uninstalled, notVendored, notUsed := verify(deps, vendorRoot)
if len(missing) != 0 || len(uninstalled) != 0 || len(notVendored) != 0 || len(notUsed) != 0 {
return fmt.Errorf("Dependencies out of sync. Please run go ciao-vendor/ciao-vendor.go check")
}
keys := make([]string, 0, len(repos))
for k := range repos {
keys = append(keys, k)
}
sort.Strings(keys)
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 8, 1, '\t', 0)
fmt.Fprintln(w, "Package Root\tRepo\tVersion\tLicense")
for _, k := range keys {
r := repos[k]
fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", k, r.URL, r.Version, r.License)
}
w.Flush()
return nil
}
func uses(pkg string, projectRoot string, direct bool) error {
deps, err := calcDeps(projectRoot, []string{"./..."})
if err != nil {
return err
}
var output bytes.Buffer
cmd := exec.Command("go", "list", "./...")
cmd.Stdout = &output
err = cmd.Run()
if err != nil {
return fmt.Errorf("go list failed: %v", err)
}
scanner := bufio.NewScanner(&output)
vendorPrefix := path.Join(projectRoot, "vendor")
for scanner.Scan() {
d := scanner.Text()
if !strings.HasPrefix(d, vendorPrefix) {
deps = append(deps, &packageInfo{name: d})
}
}
var template string
if direct {
template = directTemplate
} else {
template = listTemplate
}
clientCh := make(chan clientInfo)
for _, d := range deps {
go func(name string) {
ci := clientInfo{}
pd, err := getPackageDependencies([]string{name}, template)
if err == nil {
if _, ok := pd[pkg]; ok {
ci.name = name
}
} else {
ci.err = err
}
clientCh <- ci
}(d.name)
}
clients := make([]string, 0, len(deps))
for range deps {
clientInfo := <-clientCh
if clientInfo.err != nil {
return err
}
if clientInfo.name != "" {
clients = append(clients, clientInfo.name)
}
}
sort.Strings(clients)
for _, client := range clients {
fmt.Println(client)
}
return nil
}
func updates(sourceRoot, projectRoot string) error {
deps, err := calcDeps(projectRoot, []string{"./..."})
if err != nil {
return err
}
vendorRoot := path.Join(projectRoot, "vendor") + "/"
for _, d := range deps {
if strings.HasPrefix(d.name, vendorRoot) {
d.name = d.name[len(vendorRoot):]
}
}
err = updateNonVendoredDeps(deps, projectRoot)
if err != nil {
return err
}
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 8, 1, '\t', 0)
fmt.Fprintln(w, "Package\tStatus\t")
keys := make([]string, 0, len(repos))
for k := range repos {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
v := repos[k]
var output bytes.Buffer
cmd := exec.Command("git", "log", "--oneline", fmt.Sprintf("%s..HEAD", v.Version))
cmd.Stdout = &output
cmd.Dir = path.Join(sourceRoot, k)
err = cmd.Run()
if err != nil {
fmt.Fprintf(w, "%s\tUnknown: %v\t\n", k, err)
continue
}
scanner := bufio.NewScanner(&output)
count := 0
for scanner.Scan() {
count++
}
if count != 0 {
fmt.Fprintf(w, "%s\t%d commits behind HEAD\t\n", k, count)
} else {
fmt.Fprintf(w, "%s\tUp to date\t\n", k)
}
}
w.Flush()
return nil
}
func test(sudo bool, sourceRoot, projectRoot, pkg, version string, goTestFlags []string) error {
fmt.Printf("Go getting %s\n", pkg)
cmd := exec.Command("go", "get", "-t", "-u", pkg)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
return fmt.Errorf("Unable to go get %s", pkg)
}
branch, err := getCurrentBranch(path.Join(sourceRoot, pkg))
if err != nil {
return fmt.Errorf("Unable to determine current branch of %s: %v", pkg, err)
}
cmd = exec.Command("git", "checkout", version)
cmd.Dir = path.Join(sourceRoot, pkg)
err = cmd.Run()
if err != nil {
return fmt.Errorf("Unable to checkout version %s of %s: %v",
version, pkg, err)
}
var args []string
var command string
if sudo {
command = "sudo"
args = []string{"-E", "go"}
} else {
command = "go"
}
args = append(args, "test")
args = append(args, goTestFlags...)
args = append(args, pkg)
cmd = exec.Command(command, args...)
cmd.Dir = path.Join(sourceRoot, pkg)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if sudo {
cmd.Stdin = os.Stdin
}
err = cmd.Run()
cmd = exec.Command("git", "checkout", branch)
cmd.Dir = path.Join(sourceRoot, pkg)
_ = cmd.Run()
return err
}
func revendor(cwd, sourceRoot, projectRoot, repo, version string) error {
ri, ok := repos[repo]
if !ok {
return fmt.Errorf("%s is not a vendored repository", repo)
}
fmt.Printf("Go getting %s\n", repo)
cmd := exec.Command("go", "get", "-v", "-u", "-d", repo+"/...")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
return fmt.Errorf("Unable to go get %s", repo)
}
ri.Version = version
repos[repo] = ri
err = writeRepos(cwd)
if err != nil {
return err
}
vendoredDir := path.Join(cwd, "vendor", repo)
err = os.RemoveAll(vendoredDir)
if err != nil {
return fmt.Errorf("Unable to remove vendored directory %s : %v",
vendoredDir, err)
}
return vendor(cwd, projectRoot, sourceRoot)
}
func | (cwd, sourceRoot, projectRoot, repo string, ri repoInfo) error {
_, ok := repos[repo]
if ok {
return fmt.Errorf("%s is already vendored", repo)
}
repos[repo] = ri
if err := writeRepos(cwd); err != nil {
return err
}
return vendor(cwd, projectRoot, sourceRoot)
}
func unvendor(cwd, sourceRoot, projectRoot, repo string) error {
_, ok := repos[repo]
if !ok {
return fmt.Errorf("%s is not vendored", repo)
}
delete(repos, repo)
if err := writeRepos(cwd); err != nil {
return err
}
vendoredDir := path.Join(cwd, "vendor", repo)
err := os.RemoveAll(vendoredDir)
if err != nil {
return fmt.Errorf("Unable to remove vendored directory %s : %v",
vendoredDir, err)
}
fmt.Printf("%s unvendored. Run go run ciao-vendor/ciao-vendor.go check to verify all is well\n", repo)
return nil
}
func runCommand(cwd, sourceRoot string, args []string) error {
var err error
projectRoot := cwd[len(sourceRoot)+1:]
switch args[1] {
case "check":
err = check(cwd, projectRoot)
case "vendor":
err = vendor(cwd, projectRoot, sourceRoot)
case "deps":
err = deps(projectRoot)
case "packages":
err = packages(cwd, projectRoot)
case "uses":
fs := flag.NewFlagSet("uses", flag.ExitOnError)
direct := false
fs.BoolVar(&direct, "d", false, "output direct dependencies only")
if err := fs.Parse(args[2:]); err != nil {
return err
}
if len(fs.Args()) == 0 {
return fmt.Errorf("Missing package for uses command")
}
err = uses(fs.Args()[0], projectRoot, direct)
case "updates":
err = updates(sourceRoot, projectRoot)
case "test":
fs := flag.NewFlagSet("test", flag.ExitOnError)
sudo := false
fs.BoolVar(&sudo, "s", false, "run tests with sudo")
if err := fs.Parse(args[2:]); err != nil {
return err
}
args = fs.Args()
err = test(sudo, sourceRoot, projectRoot, args[0], args[1], args[2:])
case "revendor":
err = revendor(cwd, sourceRoot, projectRoot, args[2], args[3])
case "vendornew":
ri := repoInfo{URL: args[5], Version: args[3], License: args[4]}
err = vendorNew(cwd, sourceRoot, projectRoot, args[2], ri)
case "unvendor":
err = unvendor(cwd, sourceRoot, projectRoot, args[2])
}
return err
}
func readRepos(projectRoot string) error {
packageFile := path.Join(projectRoot, "packages.json")
d, err := ioutil.ReadFile(packageFile)
if err != nil {
if !os.IsNotExist(err) {
return fmt.Errorf("Unable to read %s : %v", packageFile, err)
}
return nil
}
err = json.Unmarshal(d, &repos)
if err != nil {
return fmt.Errorf("Unable to unmarshall %s : %v", packageFile, err)
}
return nil
}
func writeRepos(projectRoot string) error {
packageFile := path.Join(projectRoot, "packages.json")
d, err := json.MarshalIndent(&repos, "", "\t")
if err != nil {
return fmt.Errorf("Unable to marhsall %s : %v", packageFile, err)
}
err = ioutil.WriteFile(packageFile, d, 0755)
if err != nil {
return fmt.Errorf("Unable to write %s : %v", packageFile, err)
}
return nil
}
func checkTwoArgs(args []string) bool {
return (len(args) == 2 &&
(args[1] == "vendor" || args[1] == "check" || args[1] == "deps" ||
args[1] == "packages" || args[1] == "updates"))
}
func checkArgs(args []string) bool {
return checkTwoArgs(args) ||
(len(args) == 3 && (args[1] == "unvendor")) ||
(len(args) >= 3 && (args[1] == "uses")) ||
(len(args) == 4 && (args[1] == "revendor")) ||
(len(args) == 6 && (args[1] == "vendornew")) ||
(len(args) >= 4 && (args[1] == "test"))
}
func main() {
if !checkArgs(os.Args) {
fmt.Fprintln(os.Stderr, "Usage: ciao-vendor vendor|check|deps|packages|updates")
fmt.Fprintln(os.Stderr, "Usage: ciao-vendor uses [-d] package")
fmt.Fprintln(os.Stderr, "Usage: ciao-vendor test package version [go-test flags]")
fmt.Fprintln(os.Stderr, "Usage: ciao-vendor unvendor package")
fmt.Fprintln(os.Stderr, "Usage: ciao-vendor revendor package version")
fmt.Fprintln(os.Stderr, "Usage: ciao-vendor vendornew package version license URL")
os.Exit(1)
}
cwd, goPath, err := checkWD()
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
err = readRepos(cwd)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
sourceRoot := path.Join(goPath, "src")
if len(cwd) < len(sourceRoot)+1 {
fmt.Fprintln(os.Stderr, "Could not determine project root")
os.Exit(1)
}
err = runCommand(cwd, sourceRoot, os.Args)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
| vendorNew | identifier_name |
ciao-vendor.go | //
// Copyright (c) 2016 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package main
import (
"bufio"
"bytes"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"sort"
"strings"
"text/tabwriter"
)
type repoInfo struct {
URL string `json:"url"`
Version string `json:"version"`
License string `json:"license"`
}
type packageDeps struct {
p string
deps []string
}
type packageInfo struct {
name string
vendored bool
installed bool
CGO bool `json:"cgo"`
Standard bool `json:"standard"`
}
type subPackage struct {
name string
wildcard string
docs []string
cgo bool
}
type clientInfo struct {
name string
err error
}
type piList []*packageInfo
func (p piList) Len() int {
return len(p)
}
func (p piList) Swap(i, j int) {
p[i], p[j] = p[j], p[i]
}
func (p piList) Less(i, j int) bool {
return p[i].name < p[j].name
}
var repos = map[string]repoInfo{}
var listTemplate = `
{{- range .Deps -}}
{{.}}
{{end -}}
`
var directTemplate = `
{{- range .Imports -}}
{{.}}
{{end -}}
`
func getPackageDetails(name string) *packageInfo {
packageTemplate := `{
"standard" : {{.Standard}},
"cgo" : {{if .CFiles}}true{{else}}false{{end}}
}`
pi := &packageInfo{name: name}
cmd := exec.Command("go", "list", "-f", packageTemplate, name)
output, err := cmd.Output()
if err != nil {
return pi
}
pi.installed = true
_ = json.Unmarshal(output, pi)
return pi
}
func getPackageDependencies(packages []string, template string) (map[string]struct{}, error) {
deps := make(map[string]struct{})
args := []string{"list", "-f", template}
args = append(args, packages...)
var output bytes.Buffer
cmd := exec.Command("go", args...)
cmd.Stdout = &output
err := cmd.Run()
if err != nil {
return nil, fmt.Errorf("go list failed: %v", err)
}
scanner := bufio.NewScanner(&output)
for scanner.Scan() {
deps[scanner.Text()] = struct{}{}
}
return deps, nil
}
func calcDeps(projectRoot string, packages []string) (piList, error) {
deps, err := getPackageDependencies(packages, listTemplate)
if err != nil {
return nil, err
}
ch := make(chan *packageInfo)
for pkg := range deps {
go func(pkg string) {
localDep := strings.HasPrefix(pkg, projectRoot)
vendoredDep := strings.HasPrefix(pkg, path.Join(projectRoot, "vendor"))
if localDep && !vendoredDep {
ch <- nil
} else {
pd := getPackageDetails(pkg)
if pd.Standard {
ch <- nil
} else {
pd.vendored = vendoredDep
ch <- pd
}
}
}(pkg)
}
depsAr := make(piList, 0, len(deps))
for i := 0; i < cap(depsAr); i++ {
pd := <-ch
if pd != nil {
depsAr = append(depsAr, pd)
}
}
sort.Sort(depsAr)
return depsAr, nil
}
func checkWD() (string, string, error) {
cwd, err := os.Getwd()
if err != nil {
return "", "", fmt.Errorf("Unable to determine cwd: %v", err)
}
gopath, _ := os.LookupEnv("GOPATH")
if gopath == "" {
return "", "", fmt.Errorf("GOPATH is not set")
}
pths := strings.Split(gopath, ":")
for _, p := range pths {
if strings.HasPrefix(cwd, path.Join(p, "src")) {
return cwd, p, nil
}
}
return "", "", fmt.Errorf("ciao-vendor must be run from $GOPATH/src/path/to/project")
}
func copyRepos(cwd, sourceRoot string, subPackages map[string][]*subPackage) error {
errCh := make(chan error)
for k, r := range repos {
go func(k string, URL string) {
packages, ok := subPackages[k]
if !ok {
errCh <- nil
return
}
cmd1 := exec.Command("git", "archive", repos[k].Version)
cmd1.Dir = path.Join(sourceRoot, k)
os.MkdirAll(path.Join(cwd, "vendor", k), 0755)
args := []string{"-xC", path.Join(cwd, "vendor", k), "--wildcards",
"--no-wildcards-match-slash"}
for _, a := range packages {
if a.wildcard != "" {
args = append(args, a.wildcard+".go")
}
if a.cgo {
args = append(args, a.wildcard+".[ch]")
}
args = append(args, a.docs...)
}
args = append(args, "--exclude", "*_test.go")
cmd2 := exec.Command("tar", args...)
pipe, err := cmd1.StdoutPipe()
if err != nil {
errCh <- fmt.Errorf("Unable to retrieve pipe for git command %v: %v", args, err)
return
}
defer func() {
_ = pipe.Close()
}()
cmd2.Stdin = pipe
err = cmd1.Start()
if err != nil {
errCh <- fmt.Errorf("Unable to start git command %v: %v", args, err)
return
}
err = cmd2.Run()
if err != nil {
errCh <- fmt.Errorf("Unable to run tar command %v", err)
return
}
errCh <- nil
}(k, r.URL)
}
var err error
for range repos {
rcvErr := <-errCh
if err == nil && rcvErr != nil {
err = rcvErr
}
}
return err
}
func updateNonVendoredDeps(deps piList, projectRoot string) error {
fmt.Println("Updating non-vendored dependencies")
goGot := make(map[string]struct{})
for _, d := range deps {
args := []string{"get", "-v"}
var repoFound string
for k := range repos {
if strings.HasPrefix(d.name, k) {
repoFound = k
break
}
}
if _, ok := goGot[repoFound]; !ok {
args = append(args, "-u")
}
args = append(args, d.name)
cmd := exec.Command("go", args...)
stdout, err := cmd.StderrPipe()
if err != nil {
return err
}
err = cmd.Start()
if err != nil {
return err
}
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
fmt.Println(scanner.Text())
}
err = cmd.Wait()
if err != nil {
return err
}
goGot[repoFound] = struct{}{}
}
return nil
}
func getCurrentBranch(repo string) (string, error) {
cmd := exec.Command("git", "symbolic-ref", "HEAD")
cmd.Dir = repo
output, err := cmd.Output()
if err != nil {
return "", err
}
scanner := bufio.NewScanner(bytes.NewBuffer(output))
if !scanner.Scan() {
return "", fmt.Errorf("Unable to determine current branch of %s",
repo)
}
branch := strings.TrimSpace(scanner.Text())
const prefix = "refs/heads/"
if !strings.HasPrefix(branch, prefix) {
return "", fmt.Errorf("Unable to determine current branch of %s",
repo)
}
return branch[len(prefix):], nil
}
func checkoutVersion(sourceRoot string) {
for k, v := range repos {
cmd := exec.Command("git", "checkout", v.Version)
cmd.Dir = path.Join(sourceRoot, k)
_ = cmd.Run()
}
}
func checkoutMaster(sourceRoot string) {
for k := range repos {
cmd := exec.Command("git", "checkout", "master")
cmd.Dir = path.Join(sourceRoot, k)
_ = cmd.Run()
}
}
func findDocs(dir, prefix string) ([]string, error) {
docs := make([]string, 0, 8)
docGlob := []string{
"LICENSE*",
"README*",
"NOTICE",
"MAINTAINERS*",
"PATENTS*",
"AUTHORS*",
"CONTRIBUTORS*",
"VERSION",
}
err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() && (dir != path) {
return filepath.SkipDir
}
for _, pattern := range docGlob {
match, err := filepath.Match(pattern, info.Name())
if err != nil {
return err
}
if match {
docs = append(docs, filepath.Join(prefix, info.Name()))
break
}
}
return nil
})
if err != nil {
return nil, err
}
return docs, nil
}
func computeSubPackages(deps piList) map[string][]*subPackage {
subPackages := make(map[string][]*subPackage)
for _, d := range deps {
for k := range repos {
if !strings.HasPrefix(d.name, k) {
continue
}
packages := subPackages[k]
pkg := d.name[len(k):]
if pkg == "" {
packages = append([]*subPackage{{name: k, wildcard: "*", cgo: d.CGO}}, packages...)
} else if pkg[0] == '/' {
packages = append(packages, &subPackage{name: d.name, wildcard: pkg[1:] + "/*", cgo: d.CGO})
} else {
fmt.Printf("Warning: unvendored package: %s\n", d.name)
}
subPackages[k] = packages
break
}
}
return subPackages
}
// This might look a little convoluted but we can't just go get
// on all the repos in repos, using a wildcard. This would build
// loads of stuff we're not interested in at best and at worst,
// breakage in a package we're not interested in would break
// ciao-vendor
//
// We can't just go get github.com/01org/ciao this would pull down
// the dependencies of the master version of ciao's depdendencies
// which is not what we want. This might miss some dependencies
// which have been deleted from the master branch of ciao's
// dependencies.
//
// So we need to figure out which dependencies ciao actually has,
// pull them down, check out the version of these dependencies
// that ciao actually uses, and then recompute our dependencies.
//
// Right now it's possible for a ciao dependency to have a dependency
// that is no longer present in master. This dependency will not be
// pulled down. If this happens, ciao-vendor vendor will need to be
// invoked again. We could probably fix this here.
func vendor(cwd, projectRoot, sourceRoot string) error {
checkoutVersion(sourceRoot)
deps, err := calcDeps(projectRoot, []string{"./..."})
if err != nil {
checkoutMaster(sourceRoot)
return err
}
i := 0
for ; i < len(deps); i++ {
if !deps[i].vendored {
break
}
}
if i < len(deps) {
checkoutMaster(sourceRoot)
err = updateNonVendoredDeps(deps, projectRoot)
if err != nil {
return err
}
checkoutVersion(sourceRoot)
deps, err = calcDeps(projectRoot, []string{"./..."})
if err != nil {
checkoutMaster(sourceRoot)
return err
}
}
subPackages := computeSubPackages(deps)
for k := range subPackages {
packages := subPackages[k]
for _, p := range packages {
dir := path.Join(sourceRoot, p.name)
prefix := p.name[len(k):]
if len(prefix) > 0 {
prefix = prefix[1:]
}
docs, err := findDocs(dir, prefix)
if err != nil {
checkoutMaster(sourceRoot)
return err
}
p.docs = docs
}
if packages[0].wildcard != "*" {
dir := path.Join(sourceRoot, k)
docs, err := findDocs(dir, "")
if err != nil {
checkoutMaster(sourceRoot)
return err
}
packages = append(packages, &subPackage{name: k, docs: docs})
}
subPackages[k] = packages
}
checkoutMaster(sourceRoot)
fmt.Println("Populating vendor folder")
err = copyRepos(cwd, sourceRoot, subPackages)
if err != nil {
return err
}
fmt.Println("Dependencies vendored. Run go run ciao-vendor/ciao-vendor.go check to verify all is well")
return nil
}
func usedBy(name string, packages piList, depsMap map[string][]string) string {
var users bytes.Buffer
for _, p := range packages {
if p.name == name {
continue
}
deps := depsMap[p.name]
for _, d := range deps {
if d == name {
users.WriteString(" ")
users.WriteString(p.name)
break
}
}
}
// BUG(markus): We don't report when a dependency is used by ciao if
// it is also used by a dependency
if users.Len() == 0 {
return "project"
}
return users.String()[1:]
}
func depsByPackage(packages piList) map[string][]string {
depsMap := make(map[string][]string)
depsCh := make(chan packageDeps)
for _, p := range packages {
go func(p string) {
var output bytes.Buffer
cmd := exec.Command("go", "list", "-f", listTemplate, p)
cmd.Stdout = &output
err := cmd.Run()
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to call get list on %s : %v", p, err)
depsCh <- packageDeps{p: p}
return
}
scanner := bufio.NewScanner(&output)
deps := make([]string, 0, 32)
for scanner.Scan() {
deps = append(deps, scanner.Text())
}
depsCh <- packageDeps{p, deps}
}(p.name)
}
for range packages {
pkgDeps := <-depsCh
depsMap[pkgDeps.p] = pkgDeps.deps
}
return depsMap
}
func computeClients(packages piList) map[string]string {
depsMap := depsByPackage(packages)
clientMap := make(map[string]string) | }
return clientMap
}
func verify(deps piList, vendorRoot string) ([]string, []string, []string, []string) {
uninstalled := make([]string, 0, 128)
missing := make([]string, 0, 128)
notVendored := make([]string, 0, 128)
notUsed := make([]string, 0, 128)
reposUsed := make(map[string]struct{})
depLoop:
for _, d := range deps {
if !d.installed {
uninstalled = append(uninstalled, d.name)
}
for k := range repos {
if strings.HasPrefix(d.name, k) ||
(len(d.name) > len(vendorRoot)+1 &&
strings.HasPrefix(d.name[len(vendorRoot)+1:], k)) {
if !d.vendored {
cmd := exec.Command("go", "list", path.Join(vendorRoot, d.name))
if cmd.Run() != nil {
notVendored = append(notVendored, d.name)
}
}
reposUsed[k] = struct{}{}
continue depLoop
}
}
missing = append(missing, d.name)
}
for k := range repos {
if _, ok := reposUsed[k]; !ok {
notUsed = append(notUsed, k)
}
}
return missing, uninstalled, notVendored, notUsed
}
func checkKnown(missing []string, deps piList) bool {
if len(missing) == 0 {
fmt.Println("All Dependencies Known: [OK]")
return true
}
clientMap := computeClients(deps)
fmt.Println("All Dependencies Known: [FAIL]")
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 8, 1, '\t', 0)
fmt.Fprintln(w, "Missing Package\tUsed By")
for _, d := range missing {
fmt.Fprintf(w, "%s\t%s\n", d, clientMap[d])
}
w.Flush()
fmt.Println("")
return false
}
func checkUninstalled(uninstalled []string) bool {
if len(uninstalled) == 0 {
fmt.Println("All Dependencies Installed: [OK]")
return true
}
fmt.Println("All Dependencies Installed: [FAIL]")
for _, d := range uninstalled {
fmt.Printf("\t%s\n", d)
}
fmt.Println("")
return false
}
func checkVendored(notVendored []string) bool {
if len(notVendored) == 0 {
fmt.Println("All Dependencies Vendored: [OK]")
return true
}
fmt.Println("All Dependencies Vendored: [FAIL]")
for _, d := range notVendored {
fmt.Printf("\t%s\n", d)
}
fmt.Println("")
return false
}
func checkNotUsed(notUsed []string) bool {
if len(notUsed) == 0 {
fmt.Println("All Dependencies Used: [OK]")
return true
}
fmt.Println("All Dependencies Used: [FAIL]")
for _, k := range notUsed {
fmt.Println(k)
}
return false
}
func check(cwd, projectRoot string) error {
deps, err := calcDeps(projectRoot, []string{"./..."})
if err != nil {
return err
}
vendorRoot := path.Join(projectRoot, "vendor")
missing, uninstalled, notVendored, notUsed := verify(deps, vendorRoot)
ok := checkKnown(missing, deps)
ok = checkUninstalled(uninstalled) && ok
ok = checkVendored(notVendored) && ok
ok = checkNotUsed(notUsed) && ok
if !ok {
return fmt.Errorf("Dependency checks failed")
}
return nil
}
func packages(cwd, projectRoot string) error {
uninstalledDeps := false
plist, err := calcDeps(projectRoot, []string{"./..."})
if err != nil {
return err
}
vendorRoot := path.Join(projectRoot, "vendor")
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 8, 1, '\t', 0)
fmt.Fprintln(w, "Package\tStatus\tRepo\tVersion\tLicense")
for _, d := range plist {
fmt.Fprintf(w, "%s\t", d.name)
r := ""
for k := range repos {
if strings.HasPrefix(d.name, k) ||
(len(d.name) > len(vendorRoot)+1 &&
strings.HasPrefix(d.name[len(vendorRoot)+1:], k)) {
r = k
break
}
}
if d.vendored {
fmt.Fprintf(w, "Vendored\t")
} else if d.installed {
fmt.Fprintf(w, "GOPATH\t")
} else {
fmt.Fprintf(w, "Missing\t")
uninstalledDeps = true
}
if repos[r].URL != "" {
fmt.Fprintf(w, "%s\t", r)
if d.vendored {
fmt.Fprintf(w, "%s\t", repos[r].Version)
} else {
fmt.Fprintf(w, "master\t")
}
fmt.Fprintf(w, "%s", repos[r].License)
} else {
fmt.Fprintf(w, "Unknown\tUnknown\tUnknown")
}
fmt.Fprintln(w)
}
w.Flush()
if uninstalledDeps {
fmt.Println("")
return fmt.Errorf("Some dependencies are not installed. Unable to provide complete dependency list")
}
return nil
}
func deps(projectRoot string) error {
deps, err := calcDeps(projectRoot, []string{"./..."})
if err != nil {
return err
}
vendorRoot := path.Join(projectRoot, "vendor")
missing, uninstalled, notVendored, notUsed := verify(deps, vendorRoot)
if len(missing) != 0 || len(uninstalled) != 0 || len(notVendored) != 0 || len(notUsed) != 0 {
return fmt.Errorf("Dependencies out of sync. Please run go ciao-vendor/ciao-vendor.go check")
}
keys := make([]string, 0, len(repos))
for k := range repos {
keys = append(keys, k)
}
sort.Strings(keys)
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 8, 1, '\t', 0)
fmt.Fprintln(w, "Package Root\tRepo\tVersion\tLicense")
for _, k := range keys {
r := repos[k]
fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", k, r.URL, r.Version, r.License)
}
w.Flush()
return nil
}
func uses(pkg string, projectRoot string, direct bool) error {
deps, err := calcDeps(projectRoot, []string{"./..."})
if err != nil {
return err
}
var output bytes.Buffer
cmd := exec.Command("go", "list", "./...")
cmd.Stdout = &output
err = cmd.Run()
if err != nil {
return fmt.Errorf("go list failed: %v", err)
}
scanner := bufio.NewScanner(&output)
vendorPrefix := path.Join(projectRoot, "vendor")
for scanner.Scan() {
d := scanner.Text()
if !strings.HasPrefix(d, vendorPrefix) {
deps = append(deps, &packageInfo{name: d})
}
}
var template string
if direct {
template = directTemplate
} else {
template = listTemplate
}
clientCh := make(chan clientInfo)
for _, d := range deps {
go func(name string) {
ci := clientInfo{}
pd, err := getPackageDependencies([]string{name}, template)
if err == nil {
if _, ok := pd[pkg]; ok {
ci.name = name
}
} else {
ci.err = err
}
clientCh <- ci
}(d.name)
}
clients := make([]string, 0, len(deps))
for range deps {
clientInfo := <-clientCh
if clientInfo.err != nil {
return err
}
if clientInfo.name != "" {
clients = append(clients, clientInfo.name)
}
}
sort.Strings(clients)
for _, client := range clients {
fmt.Println(client)
}
return nil
}
func updates(sourceRoot, projectRoot string) error {
deps, err := calcDeps(projectRoot, []string{"./..."})
if err != nil {
return err
}
vendorRoot := path.Join(projectRoot, "vendor") + "/"
for _, d := range deps {
if strings.HasPrefix(d.name, vendorRoot) {
d.name = d.name[len(vendorRoot):]
}
}
err = updateNonVendoredDeps(deps, projectRoot)
if err != nil {
return err
}
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 8, 1, '\t', 0)
fmt.Fprintln(w, "Package\tStatus\t")
keys := make([]string, 0, len(repos))
for k := range repos {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
v := repos[k]
var output bytes.Buffer
cmd := exec.Command("git", "log", "--oneline", fmt.Sprintf("%s..HEAD", v.Version))
cmd.Stdout = &output
cmd.Dir = path.Join(sourceRoot, k)
err = cmd.Run()
if err != nil {
fmt.Fprintf(w, "%s\tUnknown: %v\t\n", k, err)
continue
}
scanner := bufio.NewScanner(&output)
count := 0
for scanner.Scan() {
count++
}
if count != 0 {
fmt.Fprintf(w, "%s\t%d commits behind HEAD\t\n", k, count)
} else {
fmt.Fprintf(w, "%s\tUp to date\t\n", k)
}
}
w.Flush()
return nil
}
func test(sudo bool, sourceRoot, projectRoot, pkg, version string, goTestFlags []string) error {
fmt.Printf("Go getting %s\n", pkg)
cmd := exec.Command("go", "get", "-t", "-u", pkg)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
return fmt.Errorf("Unable to go get %s", pkg)
}
branch, err := getCurrentBranch(path.Join(sourceRoot, pkg))
if err != nil {
return fmt.Errorf("Unable to determine current branch of %s: %v", pkg, err)
}
cmd = exec.Command("git", "checkout", version)
cmd.Dir = path.Join(sourceRoot, pkg)
err = cmd.Run()
if err != nil {
return fmt.Errorf("Unable to checkout version %s of %s: %v",
version, pkg, err)
}
var args []string
var command string
if sudo {
command = "sudo"
args = []string{"-E", "go"}
} else {
command = "go"
}
args = append(args, "test")
args = append(args, goTestFlags...)
args = append(args, pkg)
cmd = exec.Command(command, args...)
cmd.Dir = path.Join(sourceRoot, pkg)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if sudo {
cmd.Stdin = os.Stdin
}
err = cmd.Run()
cmd = exec.Command("git", "checkout", branch)
cmd.Dir = path.Join(sourceRoot, pkg)
_ = cmd.Run()
return err
}
func revendor(cwd, sourceRoot, projectRoot, repo, version string) error {
ri, ok := repos[repo]
if !ok {
return fmt.Errorf("%s is not a vendored repository", repo)
}
fmt.Printf("Go getting %s\n", repo)
cmd := exec.Command("go", "get", "-v", "-u", "-d", repo+"/...")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
return fmt.Errorf("Unable to go get %s", repo)
}
ri.Version = version
repos[repo] = ri
err = writeRepos(cwd)
if err != nil {
return err
}
vendoredDir := path.Join(cwd, "vendor", repo)
err = os.RemoveAll(vendoredDir)
if err != nil {
return fmt.Errorf("Unable to remove vendored directory %s : %v",
vendoredDir, err)
}
return vendor(cwd, projectRoot, sourceRoot)
}
func vendorNew(cwd, sourceRoot, projectRoot, repo string, ri repoInfo) error {
_, ok := repos[repo]
if ok {
return fmt.Errorf("%s is already vendored", repo)
}
repos[repo] = ri
if err := writeRepos(cwd); err != nil {
return err
}
return vendor(cwd, projectRoot, sourceRoot)
}
func unvendor(cwd, sourceRoot, projectRoot, repo string) error {
_, ok := repos[repo]
if !ok {
return fmt.Errorf("%s is not vendored", repo)
}
delete(repos, repo)
if err := writeRepos(cwd); err != nil {
return err
}
vendoredDir := path.Join(cwd, "vendor", repo)
err := os.RemoveAll(vendoredDir)
if err != nil {
return fmt.Errorf("Unable to remove vendored directory %s : %v",
vendoredDir, err)
}
fmt.Printf("%s unvendored. Run go run ciao-vendor/ciao-vendor.go check to verify all is well\n", repo)
return nil
}
func runCommand(cwd, sourceRoot string, args []string) error {
var err error
projectRoot := cwd[len(sourceRoot)+1:]
switch args[1] {
case "check":
err = check(cwd, projectRoot)
case "vendor":
err = vendor(cwd, projectRoot, sourceRoot)
case "deps":
err = deps(projectRoot)
case "packages":
err = packages(cwd, projectRoot)
case "uses":
fs := flag.NewFlagSet("uses", flag.ExitOnError)
direct := false
fs.BoolVar(&direct, "d", false, "output direct dependencies only")
if err := fs.Parse(args[2:]); err != nil {
return err
}
if len(fs.Args()) == 0 {
return fmt.Errorf("Missing package for uses command")
}
err = uses(fs.Args()[0], projectRoot, direct)
case "updates":
err = updates(sourceRoot, projectRoot)
case "test":
fs := flag.NewFlagSet("test", flag.ExitOnError)
sudo := false
fs.BoolVar(&sudo, "s", false, "run tests with sudo")
if err := fs.Parse(args[2:]); err != nil {
return err
}
args = fs.Args()
err = test(sudo, sourceRoot, projectRoot, args[0], args[1], args[2:])
case "revendor":
err = revendor(cwd, sourceRoot, projectRoot, args[2], args[3])
case "vendornew":
ri := repoInfo{URL: args[5], Version: args[3], License: args[4]}
err = vendorNew(cwd, sourceRoot, projectRoot, args[2], ri)
case "unvendor":
err = unvendor(cwd, sourceRoot, projectRoot, args[2])
}
return err
}
func readRepos(projectRoot string) error {
packageFile := path.Join(projectRoot, "packages.json")
d, err := ioutil.ReadFile(packageFile)
if err != nil {
if !os.IsNotExist(err) {
return fmt.Errorf("Unable to read %s : %v", packageFile, err)
}
return nil
}
err = json.Unmarshal(d, &repos)
if err != nil {
return fmt.Errorf("Unable to unmarshall %s : %v", packageFile, err)
}
return nil
}
func writeRepos(projectRoot string) error {
packageFile := path.Join(projectRoot, "packages.json")
d, err := json.MarshalIndent(&repos, "", "\t")
if err != nil {
return fmt.Errorf("Unable to marhsall %s : %v", packageFile, err)
}
err = ioutil.WriteFile(packageFile, d, 0755)
if err != nil {
return fmt.Errorf("Unable to write %s : %v", packageFile, err)
}
return nil
}
func checkTwoArgs(args []string) bool {
return (len(args) == 2 &&
(args[1] == "vendor" || args[1] == "check" || args[1] == "deps" ||
args[1] == "packages" || args[1] == "updates"))
}
func checkArgs(args []string) bool {
return checkTwoArgs(args) ||
(len(args) == 3 && (args[1] == "unvendor")) ||
(len(args) >= 3 && (args[1] == "uses")) ||
(len(args) == 4 && (args[1] == "revendor")) ||
(len(args) == 6 && (args[1] == "vendornew")) ||
(len(args) >= 4 && (args[1] == "test"))
}
func main() {
if !checkArgs(os.Args) {
fmt.Fprintln(os.Stderr, "Usage: ciao-vendor vendor|check|deps|packages|updates")
fmt.Fprintln(os.Stderr, "Usage: ciao-vendor uses [-d] package")
fmt.Fprintln(os.Stderr, "Usage: ciao-vendor test package version [go-test flags]")
fmt.Fprintln(os.Stderr, "Usage: ciao-vendor unvendor package")
fmt.Fprintln(os.Stderr, "Usage: ciao-vendor revendor package version")
fmt.Fprintln(os.Stderr, "Usage: ciao-vendor vendornew package version license URL")
os.Exit(1)
}
cwd, goPath, err := checkWD()
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
err = readRepos(cwd)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
sourceRoot := path.Join(goPath, "src")
if len(cwd) < len(sourceRoot)+1 {
fmt.Fprintln(os.Stderr, "Could not determine project root")
os.Exit(1)
}
err = runCommand(cwd, sourceRoot, os.Args)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
} | for _, p := range packages {
clientMap[p.name] = usedBy(p.name, packages, depsMap) | random_line_split |
ciao-vendor.go | //
// Copyright (c) 2016 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package main
import (
"bufio"
"bytes"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"sort"
"strings"
"text/tabwriter"
)
type repoInfo struct {
URL string `json:"url"`
Version string `json:"version"`
License string `json:"license"`
}
type packageDeps struct {
p string
deps []string
}
type packageInfo struct {
name string
vendored bool
installed bool
CGO bool `json:"cgo"`
Standard bool `json:"standard"`
}
type subPackage struct {
name string
wildcard string
docs []string
cgo bool
}
type clientInfo struct {
name string
err error
}
type piList []*packageInfo
func (p piList) Len() int {
return len(p)
}
func (p piList) Swap(i, j int) {
p[i], p[j] = p[j], p[i]
}
func (p piList) Less(i, j int) bool {
return p[i].name < p[j].name
}
var repos = map[string]repoInfo{}
var listTemplate = `
{{- range .Deps -}}
{{.}}
{{end -}}
`
var directTemplate = `
{{- range .Imports -}}
{{.}}
{{end -}}
`
func getPackageDetails(name string) *packageInfo {
packageTemplate := `{
"standard" : {{.Standard}},
"cgo" : {{if .CFiles}}true{{else}}false{{end}}
}`
pi := &packageInfo{name: name}
cmd := exec.Command("go", "list", "-f", packageTemplate, name)
output, err := cmd.Output()
if err != nil {
return pi
}
pi.installed = true
_ = json.Unmarshal(output, pi)
return pi
}
func getPackageDependencies(packages []string, template string) (map[string]struct{}, error) {
deps := make(map[string]struct{})
args := []string{"list", "-f", template}
args = append(args, packages...)
var output bytes.Buffer
cmd := exec.Command("go", args...)
cmd.Stdout = &output
err := cmd.Run()
if err != nil {
return nil, fmt.Errorf("go list failed: %v", err)
}
scanner := bufio.NewScanner(&output)
for scanner.Scan() {
deps[scanner.Text()] = struct{}{}
}
return deps, nil
}
func calcDeps(projectRoot string, packages []string) (piList, error) {
deps, err := getPackageDependencies(packages, listTemplate)
if err != nil {
return nil, err
}
ch := make(chan *packageInfo)
for pkg := range deps {
go func(pkg string) {
localDep := strings.HasPrefix(pkg, projectRoot)
vendoredDep := strings.HasPrefix(pkg, path.Join(projectRoot, "vendor"))
if localDep && !vendoredDep {
ch <- nil
} else {
pd := getPackageDetails(pkg)
if pd.Standard {
ch <- nil
} else {
pd.vendored = vendoredDep
ch <- pd
}
}
}(pkg)
}
depsAr := make(piList, 0, len(deps))
for i := 0; i < cap(depsAr); i++ {
pd := <-ch
if pd != nil {
depsAr = append(depsAr, pd)
}
}
sort.Sort(depsAr)
return depsAr, nil
}
func checkWD() (string, string, error) {
cwd, err := os.Getwd()
if err != nil {
return "", "", fmt.Errorf("Unable to determine cwd: %v", err)
}
gopath, _ := os.LookupEnv("GOPATH")
if gopath == "" {
return "", "", fmt.Errorf("GOPATH is not set")
}
pths := strings.Split(gopath, ":")
for _, p := range pths {
if strings.HasPrefix(cwd, path.Join(p, "src")) {
return cwd, p, nil
}
}
return "", "", fmt.Errorf("ciao-vendor must be run from $GOPATH/src/path/to/project")
}
func copyRepos(cwd, sourceRoot string, subPackages map[string][]*subPackage) error {
errCh := make(chan error)
for k, r := range repos {
go func(k string, URL string) {
packages, ok := subPackages[k]
if !ok {
errCh <- nil
return
}
cmd1 := exec.Command("git", "archive", repos[k].Version)
cmd1.Dir = path.Join(sourceRoot, k)
os.MkdirAll(path.Join(cwd, "vendor", k), 0755)
args := []string{"-xC", path.Join(cwd, "vendor", k), "--wildcards",
"--no-wildcards-match-slash"}
for _, a := range packages {
if a.wildcard != "" {
args = append(args, a.wildcard+".go")
}
if a.cgo {
args = append(args, a.wildcard+".[ch]")
}
args = append(args, a.docs...)
}
args = append(args, "--exclude", "*_test.go")
cmd2 := exec.Command("tar", args...)
pipe, err := cmd1.StdoutPipe()
if err != nil {
errCh <- fmt.Errorf("Unable to retrieve pipe for git command %v: %v", args, err)
return
}
defer func() {
_ = pipe.Close()
}()
cmd2.Stdin = pipe
err = cmd1.Start()
if err != nil {
errCh <- fmt.Errorf("Unable to start git command %v: %v", args, err)
return
}
err = cmd2.Run()
if err != nil {
errCh <- fmt.Errorf("Unable to run tar command %v", err)
return
}
errCh <- nil
}(k, r.URL)
}
var err error
for range repos {
rcvErr := <-errCh
if err == nil && rcvErr != nil {
err = rcvErr
}
}
return err
}
func updateNonVendoredDeps(deps piList, projectRoot string) error {
fmt.Println("Updating non-vendored dependencies")
goGot := make(map[string]struct{})
for _, d := range deps {
args := []string{"get", "-v"}
var repoFound string
for k := range repos {
if strings.HasPrefix(d.name, k) {
repoFound = k
break
}
}
if _, ok := goGot[repoFound]; !ok {
args = append(args, "-u")
}
args = append(args, d.name)
cmd := exec.Command("go", args...)
stdout, err := cmd.StderrPipe()
if err != nil {
return err
}
err = cmd.Start()
if err != nil {
return err
}
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
fmt.Println(scanner.Text())
}
err = cmd.Wait()
if err != nil {
return err
}
goGot[repoFound] = struct{}{}
}
return nil
}
func getCurrentBranch(repo string) (string, error) {
cmd := exec.Command("git", "symbolic-ref", "HEAD")
cmd.Dir = repo
output, err := cmd.Output()
if err != nil {
return "", err
}
scanner := bufio.NewScanner(bytes.NewBuffer(output))
if !scanner.Scan() {
return "", fmt.Errorf("Unable to determine current branch of %s",
repo)
}
branch := strings.TrimSpace(scanner.Text())
const prefix = "refs/heads/"
if !strings.HasPrefix(branch, prefix) {
return "", fmt.Errorf("Unable to determine current branch of %s",
repo)
}
return branch[len(prefix):], nil
}
func checkoutVersion(sourceRoot string) {
for k, v := range repos {
cmd := exec.Command("git", "checkout", v.Version)
cmd.Dir = path.Join(sourceRoot, k)
_ = cmd.Run()
}
}
func checkoutMaster(sourceRoot string) {
for k := range repos {
cmd := exec.Command("git", "checkout", "master")
cmd.Dir = path.Join(sourceRoot, k)
_ = cmd.Run()
}
}
func findDocs(dir, prefix string) ([]string, error) {
docs := make([]string, 0, 8)
docGlob := []string{
"LICENSE*",
"README*",
"NOTICE",
"MAINTAINERS*",
"PATENTS*",
"AUTHORS*",
"CONTRIBUTORS*",
"VERSION",
}
err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() && (dir != path) {
return filepath.SkipDir
}
for _, pattern := range docGlob {
match, err := filepath.Match(pattern, info.Name())
if err != nil {
return err
}
if match {
docs = append(docs, filepath.Join(prefix, info.Name()))
break
}
}
return nil
})
if err != nil {
return nil, err
}
return docs, nil
}
func computeSubPackages(deps piList) map[string][]*subPackage {
subPackages := make(map[string][]*subPackage)
for _, d := range deps {
for k := range repos {
if !strings.HasPrefix(d.name, k) {
continue
}
packages := subPackages[k]
pkg := d.name[len(k):]
if pkg == "" {
packages = append([]*subPackage{{name: k, wildcard: "*", cgo: d.CGO}}, packages...)
} else if pkg[0] == '/' {
packages = append(packages, &subPackage{name: d.name, wildcard: pkg[1:] + "/*", cgo: d.CGO})
} else {
fmt.Printf("Warning: unvendored package: %s\n", d.name)
}
subPackages[k] = packages
break
}
}
return subPackages
}
// This might look a little convoluted but we can't just go get
// on all the repos in repos, using a wildcard. This would build
// loads of stuff we're not interested in at best and at worst,
// breakage in a package we're not interested in would break
// ciao-vendor
//
// We can't just go get github.com/01org/ciao this would pull down
// the dependencies of the master version of ciao's depdendencies
// which is not what we want. This might miss some dependencies
// which have been deleted from the master branch of ciao's
// dependencies.
//
// So we need to figure out which dependencies ciao actually has,
// pull them down, check out the version of these dependencies
// that ciao actually uses, and then recompute our dependencies.
//
// Right now it's possible for a ciao dependency to have a dependency
// that is no longer present in master. This dependency will not be
// pulled down. If this happens, ciao-vendor vendor will need to be
// invoked again. We could probably fix this here.
func vendor(cwd, projectRoot, sourceRoot string) error {
checkoutVersion(sourceRoot)
deps, err := calcDeps(projectRoot, []string{"./..."})
if err != nil {
checkoutMaster(sourceRoot)
return err
}
i := 0
for ; i < len(deps); i++ {
if !deps[i].vendored {
break
}
}
if i < len(deps) {
checkoutMaster(sourceRoot)
err = updateNonVendoredDeps(deps, projectRoot)
if err != nil {
return err
}
checkoutVersion(sourceRoot)
deps, err = calcDeps(projectRoot, []string{"./..."})
if err != nil {
checkoutMaster(sourceRoot)
return err
}
}
subPackages := computeSubPackages(deps)
for k := range subPackages {
packages := subPackages[k]
for _, p := range packages {
dir := path.Join(sourceRoot, p.name)
prefix := p.name[len(k):]
if len(prefix) > 0 {
prefix = prefix[1:]
}
docs, err := findDocs(dir, prefix)
if err != nil {
checkoutMaster(sourceRoot)
return err
}
p.docs = docs
}
if packages[0].wildcard != "*" {
dir := path.Join(sourceRoot, k)
docs, err := findDocs(dir, "")
if err != nil {
checkoutMaster(sourceRoot)
return err
}
packages = append(packages, &subPackage{name: k, docs: docs})
}
subPackages[k] = packages
}
checkoutMaster(sourceRoot)
fmt.Println("Populating vendor folder")
err = copyRepos(cwd, sourceRoot, subPackages)
if err != nil {
return err
}
fmt.Println("Dependencies vendored. Run go run ciao-vendor/ciao-vendor.go check to verify all is well")
return nil
}
func usedBy(name string, packages piList, depsMap map[string][]string) string {
var users bytes.Buffer
for _, p := range packages {
if p.name == name {
continue
}
deps := depsMap[p.name]
for _, d := range deps {
if d == name {
users.WriteString(" ")
users.WriteString(p.name)
break
}
}
}
// BUG(markus): We don't report when a dependency is used by ciao if
// it is also used by a dependency
if users.Len() == 0 {
return "project"
}
return users.String()[1:]
}
func depsByPackage(packages piList) map[string][]string |
func computeClients(packages piList) map[string]string {
depsMap := depsByPackage(packages)
clientMap := make(map[string]string)
for _, p := range packages {
clientMap[p.name] = usedBy(p.name, packages, depsMap)
}
return clientMap
}
func verify(deps piList, vendorRoot string) ([]string, []string, []string, []string) {
uninstalled := make([]string, 0, 128)
missing := make([]string, 0, 128)
notVendored := make([]string, 0, 128)
notUsed := make([]string, 0, 128)
reposUsed := make(map[string]struct{})
depLoop:
for _, d := range deps {
if !d.installed {
uninstalled = append(uninstalled, d.name)
}
for k := range repos {
if strings.HasPrefix(d.name, k) ||
(len(d.name) > len(vendorRoot)+1 &&
strings.HasPrefix(d.name[len(vendorRoot)+1:], k)) {
if !d.vendored {
cmd := exec.Command("go", "list", path.Join(vendorRoot, d.name))
if cmd.Run() != nil {
notVendored = append(notVendored, d.name)
}
}
reposUsed[k] = struct{}{}
continue depLoop
}
}
missing = append(missing, d.name)
}
for k := range repos {
if _, ok := reposUsed[k]; !ok {
notUsed = append(notUsed, k)
}
}
return missing, uninstalled, notVendored, notUsed
}
func checkKnown(missing []string, deps piList) bool {
if len(missing) == 0 {
fmt.Println("All Dependencies Known: [OK]")
return true
}
clientMap := computeClients(deps)
fmt.Println("All Dependencies Known: [FAIL]")
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 8, 1, '\t', 0)
fmt.Fprintln(w, "Missing Package\tUsed By")
for _, d := range missing {
fmt.Fprintf(w, "%s\t%s\n", d, clientMap[d])
}
w.Flush()
fmt.Println("")
return false
}
func checkUninstalled(uninstalled []string) bool {
if len(uninstalled) == 0 {
fmt.Println("All Dependencies Installed: [OK]")
return true
}
fmt.Println("All Dependencies Installed: [FAIL]")
for _, d := range uninstalled {
fmt.Printf("\t%s\n", d)
}
fmt.Println("")
return false
}
func checkVendored(notVendored []string) bool {
if len(notVendored) == 0 {
fmt.Println("All Dependencies Vendored: [OK]")
return true
}
fmt.Println("All Dependencies Vendored: [FAIL]")
for _, d := range notVendored {
fmt.Printf("\t%s\n", d)
}
fmt.Println("")
return false
}
func checkNotUsed(notUsed []string) bool {
if len(notUsed) == 0 {
fmt.Println("All Dependencies Used: [OK]")
return true
}
fmt.Println("All Dependencies Used: [FAIL]")
for _, k := range notUsed {
fmt.Println(k)
}
return false
}
func check(cwd, projectRoot string) error {
deps, err := calcDeps(projectRoot, []string{"./..."})
if err != nil {
return err
}
vendorRoot := path.Join(projectRoot, "vendor")
missing, uninstalled, notVendored, notUsed := verify(deps, vendorRoot)
ok := checkKnown(missing, deps)
ok = checkUninstalled(uninstalled) && ok
ok = checkVendored(notVendored) && ok
ok = checkNotUsed(notUsed) && ok
if !ok {
return fmt.Errorf("Dependency checks failed")
}
return nil
}
func packages(cwd, projectRoot string) error {
uninstalledDeps := false
plist, err := calcDeps(projectRoot, []string{"./..."})
if err != nil {
return err
}
vendorRoot := path.Join(projectRoot, "vendor")
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 8, 1, '\t', 0)
fmt.Fprintln(w, "Package\tStatus\tRepo\tVersion\tLicense")
for _, d := range plist {
fmt.Fprintf(w, "%s\t", d.name)
r := ""
for k := range repos {
if strings.HasPrefix(d.name, k) ||
(len(d.name) > len(vendorRoot)+1 &&
strings.HasPrefix(d.name[len(vendorRoot)+1:], k)) {
r = k
break
}
}
if d.vendored {
fmt.Fprintf(w, "Vendored\t")
} else if d.installed {
fmt.Fprintf(w, "GOPATH\t")
} else {
fmt.Fprintf(w, "Missing\t")
uninstalledDeps = true
}
if repos[r].URL != "" {
fmt.Fprintf(w, "%s\t", r)
if d.vendored {
fmt.Fprintf(w, "%s\t", repos[r].Version)
} else {
fmt.Fprintf(w, "master\t")
}
fmt.Fprintf(w, "%s", repos[r].License)
} else {
fmt.Fprintf(w, "Unknown\tUnknown\tUnknown")
}
fmt.Fprintln(w)
}
w.Flush()
if uninstalledDeps {
fmt.Println("")
return fmt.Errorf("Some dependencies are not installed. Unable to provide complete dependency list")
}
return nil
}
func deps(projectRoot string) error {
deps, err := calcDeps(projectRoot, []string{"./..."})
if err != nil {
return err
}
vendorRoot := path.Join(projectRoot, "vendor")
missing, uninstalled, notVendored, notUsed := verify(deps, vendorRoot)
if len(missing) != 0 || len(uninstalled) != 0 || len(notVendored) != 0 || len(notUsed) != 0 {
return fmt.Errorf("Dependencies out of sync. Please run go ciao-vendor/ciao-vendor.go check")
}
keys := make([]string, 0, len(repos))
for k := range repos {
keys = append(keys, k)
}
sort.Strings(keys)
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 8, 1, '\t', 0)
fmt.Fprintln(w, "Package Root\tRepo\tVersion\tLicense")
for _, k := range keys {
r := repos[k]
fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", k, r.URL, r.Version, r.License)
}
w.Flush()
return nil
}
func uses(pkg string, projectRoot string, direct bool) error {
deps, err := calcDeps(projectRoot, []string{"./..."})
if err != nil {
return err
}
var output bytes.Buffer
cmd := exec.Command("go", "list", "./...")
cmd.Stdout = &output
err = cmd.Run()
if err != nil {
return fmt.Errorf("go list failed: %v", err)
}
scanner := bufio.NewScanner(&output)
vendorPrefix := path.Join(projectRoot, "vendor")
for scanner.Scan() {
d := scanner.Text()
if !strings.HasPrefix(d, vendorPrefix) {
deps = append(deps, &packageInfo{name: d})
}
}
var template string
if direct {
template = directTemplate
} else {
template = listTemplate
}
clientCh := make(chan clientInfo)
for _, d := range deps {
go func(name string) {
ci := clientInfo{}
pd, err := getPackageDependencies([]string{name}, template)
if err == nil {
if _, ok := pd[pkg]; ok {
ci.name = name
}
} else {
ci.err = err
}
clientCh <- ci
}(d.name)
}
clients := make([]string, 0, len(deps))
for range deps {
clientInfo := <-clientCh
if clientInfo.err != nil {
return err
}
if clientInfo.name != "" {
clients = append(clients, clientInfo.name)
}
}
sort.Strings(clients)
for _, client := range clients {
fmt.Println(client)
}
return nil
}
func updates(sourceRoot, projectRoot string) error {
deps, err := calcDeps(projectRoot, []string{"./..."})
if err != nil {
return err
}
vendorRoot := path.Join(projectRoot, "vendor") + "/"
for _, d := range deps {
if strings.HasPrefix(d.name, vendorRoot) {
d.name = d.name[len(vendorRoot):]
}
}
err = updateNonVendoredDeps(deps, projectRoot)
if err != nil {
return err
}
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 8, 1, '\t', 0)
fmt.Fprintln(w, "Package\tStatus\t")
keys := make([]string, 0, len(repos))
for k := range repos {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
v := repos[k]
var output bytes.Buffer
cmd := exec.Command("git", "log", "--oneline", fmt.Sprintf("%s..HEAD", v.Version))
cmd.Stdout = &output
cmd.Dir = path.Join(sourceRoot, k)
err = cmd.Run()
if err != nil {
fmt.Fprintf(w, "%s\tUnknown: %v\t\n", k, err)
continue
}
scanner := bufio.NewScanner(&output)
count := 0
for scanner.Scan() {
count++
}
if count != 0 {
fmt.Fprintf(w, "%s\t%d commits behind HEAD\t\n", k, count)
} else {
fmt.Fprintf(w, "%s\tUp to date\t\n", k)
}
}
w.Flush()
return nil
}
func test(sudo bool, sourceRoot, projectRoot, pkg, version string, goTestFlags []string) error {
fmt.Printf("Go getting %s\n", pkg)
cmd := exec.Command("go", "get", "-t", "-u", pkg)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
return fmt.Errorf("Unable to go get %s", pkg)
}
branch, err := getCurrentBranch(path.Join(sourceRoot, pkg))
if err != nil {
return fmt.Errorf("Unable to determine current branch of %s: %v", pkg, err)
}
cmd = exec.Command("git", "checkout", version)
cmd.Dir = path.Join(sourceRoot, pkg)
err = cmd.Run()
if err != nil {
return fmt.Errorf("Unable to checkout version %s of %s: %v",
version, pkg, err)
}
var args []string
var command string
if sudo {
command = "sudo"
args = []string{"-E", "go"}
} else {
command = "go"
}
args = append(args, "test")
args = append(args, goTestFlags...)
args = append(args, pkg)
cmd = exec.Command(command, args...)
cmd.Dir = path.Join(sourceRoot, pkg)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if sudo {
cmd.Stdin = os.Stdin
}
err = cmd.Run()
cmd = exec.Command("git", "checkout", branch)
cmd.Dir = path.Join(sourceRoot, pkg)
_ = cmd.Run()
return err
}
func revendor(cwd, sourceRoot, projectRoot, repo, version string) error {
ri, ok := repos[repo]
if !ok {
return fmt.Errorf("%s is not a vendored repository", repo)
}
fmt.Printf("Go getting %s\n", repo)
cmd := exec.Command("go", "get", "-v", "-u", "-d", repo+"/...")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
return fmt.Errorf("Unable to go get %s", repo)
}
ri.Version = version
repos[repo] = ri
err = writeRepos(cwd)
if err != nil {
return err
}
vendoredDir := path.Join(cwd, "vendor", repo)
err = os.RemoveAll(vendoredDir)
if err != nil {
return fmt.Errorf("Unable to remove vendored directory %s : %v",
vendoredDir, err)
}
return vendor(cwd, projectRoot, sourceRoot)
}
func vendorNew(cwd, sourceRoot, projectRoot, repo string, ri repoInfo) error {
_, ok := repos[repo]
if ok {
return fmt.Errorf("%s is already vendored", repo)
}
repos[repo] = ri
if err := writeRepos(cwd); err != nil {
return err
}
return vendor(cwd, projectRoot, sourceRoot)
}
func unvendor(cwd, sourceRoot, projectRoot, repo string) error {
_, ok := repos[repo]
if !ok {
return fmt.Errorf("%s is not vendored", repo)
}
delete(repos, repo)
if err := writeRepos(cwd); err != nil {
return err
}
vendoredDir := path.Join(cwd, "vendor", repo)
err := os.RemoveAll(vendoredDir)
if err != nil {
return fmt.Errorf("Unable to remove vendored directory %s : %v",
vendoredDir, err)
}
fmt.Printf("%s unvendored. Run go run ciao-vendor/ciao-vendor.go check to verify all is well\n", repo)
return nil
}
func runCommand(cwd, sourceRoot string, args []string) error {
var err error
projectRoot := cwd[len(sourceRoot)+1:]
switch args[1] {
case "check":
err = check(cwd, projectRoot)
case "vendor":
err = vendor(cwd, projectRoot, sourceRoot)
case "deps":
err = deps(projectRoot)
case "packages":
err = packages(cwd, projectRoot)
case "uses":
fs := flag.NewFlagSet("uses", flag.ExitOnError)
direct := false
fs.BoolVar(&direct, "d", false, "output direct dependencies only")
if err := fs.Parse(args[2:]); err != nil {
return err
}
if len(fs.Args()) == 0 {
return fmt.Errorf("Missing package for uses command")
}
err = uses(fs.Args()[0], projectRoot, direct)
case "updates":
err = updates(sourceRoot, projectRoot)
case "test":
fs := flag.NewFlagSet("test", flag.ExitOnError)
sudo := false
fs.BoolVar(&sudo, "s", false, "run tests with sudo")
if err := fs.Parse(args[2:]); err != nil {
return err
}
args = fs.Args()
err = test(sudo, sourceRoot, projectRoot, args[0], args[1], args[2:])
case "revendor":
err = revendor(cwd, sourceRoot, projectRoot, args[2], args[3])
case "vendornew":
ri := repoInfo{URL: args[5], Version: args[3], License: args[4]}
err = vendorNew(cwd, sourceRoot, projectRoot, args[2], ri)
case "unvendor":
err = unvendor(cwd, sourceRoot, projectRoot, args[2])
}
return err
}
func readRepos(projectRoot string) error {
packageFile := path.Join(projectRoot, "packages.json")
d, err := ioutil.ReadFile(packageFile)
if err != nil {
if !os.IsNotExist(err) {
return fmt.Errorf("Unable to read %s : %v", packageFile, err)
}
return nil
}
err = json.Unmarshal(d, &repos)
if err != nil {
return fmt.Errorf("Unable to unmarshall %s : %v", packageFile, err)
}
return nil
}
func writeRepos(projectRoot string) error {
packageFile := path.Join(projectRoot, "packages.json")
d, err := json.MarshalIndent(&repos, "", "\t")
if err != nil {
return fmt.Errorf("Unable to marhsall %s : %v", packageFile, err)
}
err = ioutil.WriteFile(packageFile, d, 0755)
if err != nil {
return fmt.Errorf("Unable to write %s : %v", packageFile, err)
}
return nil
}
func checkTwoArgs(args []string) bool {
return (len(args) == 2 &&
(args[1] == "vendor" || args[1] == "check" || args[1] == "deps" ||
args[1] == "packages" || args[1] == "updates"))
}
func checkArgs(args []string) bool {
return checkTwoArgs(args) ||
(len(args) == 3 && (args[1] == "unvendor")) ||
(len(args) >= 3 && (args[1] == "uses")) ||
(len(args) == 4 && (args[1] == "revendor")) ||
(len(args) == 6 && (args[1] == "vendornew")) ||
(len(args) >= 4 && (args[1] == "test"))
}
func main() {
if !checkArgs(os.Args) {
fmt.Fprintln(os.Stderr, "Usage: ciao-vendor vendor|check|deps|packages|updates")
fmt.Fprintln(os.Stderr, "Usage: ciao-vendor uses [-d] package")
fmt.Fprintln(os.Stderr, "Usage: ciao-vendor test package version [go-test flags]")
fmt.Fprintln(os.Stderr, "Usage: ciao-vendor unvendor package")
fmt.Fprintln(os.Stderr, "Usage: ciao-vendor revendor package version")
fmt.Fprintln(os.Stderr, "Usage: ciao-vendor vendornew package version license URL")
os.Exit(1)
}
cwd, goPath, err := checkWD()
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
err = readRepos(cwd)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
sourceRoot := path.Join(goPath, "src")
if len(cwd) < len(sourceRoot)+1 {
fmt.Fprintln(os.Stderr, "Could not determine project root")
os.Exit(1)
}
err = runCommand(cwd, sourceRoot, os.Args)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
| {
depsMap := make(map[string][]string)
depsCh := make(chan packageDeps)
for _, p := range packages {
go func(p string) {
var output bytes.Buffer
cmd := exec.Command("go", "list", "-f", listTemplate, p)
cmd.Stdout = &output
err := cmd.Run()
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to call get list on %s : %v", p, err)
depsCh <- packageDeps{p: p}
return
}
scanner := bufio.NewScanner(&output)
deps := make([]string, 0, 32)
for scanner.Scan() {
deps = append(deps, scanner.Text())
}
depsCh <- packageDeps{p, deps}
}(p.name)
}
for range packages {
pkgDeps := <-depsCh
depsMap[pkgDeps.p] = pkgDeps.deps
}
return depsMap
} | identifier_body |
newtoon.py | import os
import re
import sys
from textwrap import wrap
import zipfile
from contextlib import asynccontextmanager
from datetime import datetime
from io import BytesIO
from typing import Any, AsyncGenerator, Coroutine, Dict, List, Optional, Tuple, Union, Type, Callable
import asyncio
import aiohttp
from asyncio_pool import AioPool
from bs4 import BeautifulSoup
from motorized import (Document, EmbeddedDocument, Field, PrivatesAttrsMixin,
QuerySet, Q)
from pydantic import HttpUrl, validator
from selenium import webdriver
import undetected_chromedriver as uc
from functools import wraps
from PIL import Image, UnidentifiedImageError
import ssl
FIREFOX = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:93.0) Gecko/20100101 Firefox/93.0'
CHROME_LINUX = 'Mozilla/5.0 (X11; Linux x86_64; rv:89.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
CHROME = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36'
def raise_on_any_error_from_pool(pool_result: List[Optional[Exception]]):
errors = list(filter(None, pool_result))
for error in errors:
if isinstance(error, Exception):
raise error
def retry(count: int, *exceptions: List[Type[Exception]], delay: int = 0):
def wrapper(func):
@wraps(func)
async def decorator(*args, **kwargs):
for retry_index in range(count + 1):
try:
return await func(*args, **kwargs)
except Exception as error:
if type(error) in exceptions:
if delay:
await asyncio.sleep(delay)
continue
return decorator
return wrapper
def error_handler(error: Type[Exception], message: Optional[str] = None):
def decorator(func: Callable) -> Coroutine:
@wraps(func)
async def wrapper(*args, **kwargs) -> Optional[Any]:
try:
return await func(*args, **kwargs)
except error:
if message:
print(message)
return None
return wrapper
return decorator
class InMemoryZipFile:
def __init__(self, filename: str):
self.io = BytesIO()
self.cbz = zipfile.ZipFile(self.io, 'w', zipfile.ZIP_DEFLATED)
self.filename = filename
def __str__(self) -> str:
return self.filename
def exists(self) -> bool:
return os.path.exists(self.filename)
def write(self, filename: str, data: bytes) -> None:
self.cbz.writestr(filename, data)
def close(self):
self.cbz.close()
def save(self) -> None:
self.close()
self.io.seek(0)
with open(self.filename, 'wb') as fp:
fp.write(self.io.read())
class SeleniumMixin:
_driver: Optional[Union[webdriver.Firefox, webdriver.Chrome]] = None
_headless: bool = False
@classmethod
def get_new_marionette(cls, headless: bool = False) -> uc.Chrome:
print('Requesting a new marionette')
options = uc.ChromeOptions()
if headless:
options.headless = True
options.add_argument('--headless')
options.add_argument('--disable-gpu')
options.add_argument('--no-first-run --no-service-autorun --password-store=basic')
extenssions_paths = [
'/usr/lib/ublock-origin'
]
for extenssion in extenssions_paths:
options.add_argument(f'--load-extension={extenssion}')
driver = uc.Chrome(options=options)
print('Got new marionette.')
return driver
@property
def driver(self) -> Union[webdriver.Firefox, webdriver.Chrome]:
if not self._driver:
# self._driver = webdriver.Firefox()
# self._driver = webdriver.Chrome()
self._driver = self.get_new_marionette(self._headless)
return self._driver
async def parse_url(self, url: str, delay: int = 0) -> BeautifulSoup:
"""The `delay` parameter wait for the page to load/execute the scripts
in the marionette, some websites require that otherwise the JS don't
have the time to populate divs/lists.
"""
if url != self.driver.current_url:
self.driver.get(url)
return BeautifulSoup(self.driver.page_source, 'lxml')
async def parse_cloudflare_url(self, url: str, delay: int = 0) -> BeautifulSoup:
self.driver.get(url)
for index in range(20):
await asyncio.sleep(delay)
page = BeautifulSoup(self.driver.page_source, 'lxml')
# print(f'{index:02}: {self.driver.current_url}')
challenge_form = page.find('form', {'class': 'challenge-form'})
if not challenge_form:
return page
await asyncio.sleep(8)
async def post_cloudflare_challenge(self, page: BeautifulSoup) -> None:
challenge_form = page.find('form', {'class': 'challenge-form'})
challenge_link = challenge_form['action']
challenge_inputs = challenge_form.find_all('input')
payload = dict({
field['name']: field['value'] for field in challenge_inputs if field.get('value', None)
})
cookies = self.driver.get_cookies()
print('POST', challenge_link, payload, cookies)
class LocalStorage:
"""Allow to access to the local storage of the marionette from python.
"""
def __init__(self, driver: uc.Chrome) :
self.driver = driver
def __len__(self):
return self.driver.execute_script("return window.localStorage.length;")
def items(self) :
return self.driver.execute_script( \
"var ls = window.localStorage, items = {}; " \
"for (var i = 0, k; i < ls.length; ++i) " \
" items[k = ls.key(i)] = ls.getItem(k); " \
"return items; ")
def keys(self) :
return self.driver.execute_script( \
"var ls = window.localStorage, keys = []; " \
"for (var i = 0; i < ls.length; ++i) " \
" keys[i] = ls.key(i); " \
"return keys; ")
def get(self, key):
return self.driver.execute_script("return window.localStorage.getItem(arguments[0]);", key)
def set(self, key, value):
self.driver.execute_script("window.localStorage.setItem(arguments[0], arguments[1]);", key, value)
def has(self, key):
return key in self.keys()
def remove(self, key):
self.driver.execute_script("window.localStorage.removeItem(arguments[0]);", key)
class Chapter(PrivatesAttrsMixin, EmbeddedDocument):
"""
Describe ONE chapter of a webtoon.
Things to override:
properties:
- url
functions:
- get_pages_urls (return the list of urls)
- nexts (return the list of next chapters after the curent instance)
"""
name: str
episode: Optional[int]
_parent: Optional["WebToonPacked"]
@validator('episode', pre=True)
def validate_episode(cls, value: Optional[Union[int, str]]) -> Optional[int]:
if isinstance(value, str):
rule = re.compile(r'^[a-z\-]+(\d+)')
match = rule.match(value)
if match:
chapter = match.groups()[0]
return int(chapter)
return value
@validator('name', pre=True)
def validate_name(cls, value: str) -> str:
try:
return value \
.replace('"', "") \
.replace('/', '') \
.replace('*','') \
.replace('&', '&') \
.replace(''', '\'')
except Exception:
return value
def __str__(self) -> str:
return self.name
@property
def cbz_path(self) -> str:
return os.path.join(self._parent.path, self.name.strip() + '.cbz')
def exists(self) -> bool:
return os.path.exists(self.cbz_path)
async def pull(self, pool_size: int = 3) -> bool:
"""
download the files related to this chapter.
return True if the image is present on disk
False otherwise
Note that presence on disk can be True in multiples cases:
- it was here before
- it was downloaded successfully
"""
if self.exists():
return True
self._start_pull()
pair_list = list([(filename, str(url)) async for filename, url in self])
if not pair_list:
self._no_content()
return False
pool = AioPool(pool_size)
cbz = InMemoryZipFile(self.cbz_path)
async with self._parent.get_client() as client:
async def download_coroutine(pair: Tuple[str, str]) -> bool:
"""return True if the file has been downloaded, False otherwise
may raise errors that will be present in results
"""
filename, url = pair
# Download the page
response = await client.get(url, ssl=self._parent.ssl_context)
response.raise_for_status()
# Save the page content to the cbz file
page_content: bytes = await response.read()
if not await self.is_valid_page_content(page_content):
return False
cbz.write(filename, page_content)
self._progress()
return True
result = await pool.map(download_coroutine, pair_list)
raise_on_any_error_from_pool(result)
if not any(result):
self.log('Empty, removed')
return False
cbz.save()
self.log('\n', end='')
return True
async def is_valid_page_content(self, page_content: bytes) -> bool:
"""return True if the page content seems to be a valid image
False otherwise
"""
try:
image = Image.open(BytesIO(page_content))
return True
except UnidentifiedImageError:
return False
async def get_pages_urls(self) -> List[HttpUrl]:
raise NotImplementedError
async def nexts(self) -> List["Chapter"]:
# override this to implement the next pull feature
get_chapters_func = getattr(self._parent, 'get_chapters_from_website')
if not get_chapters_func:
return []
chapters = await get_chapters_func()
return list(filter(lambda chapter: chapter > self, chapters))
async def __aiter__(self) -> AsyncGenerator[Tuple[str, HttpUrl], None]:
pages = await self.get_pages_urls()
for index, url in enumerate(pages): | filename = f'{index:03}.jpg'
yield filename, url
def _progress(self) -> None:
self.log('.', end='')
def _start_pull(self) -> None:
self.log(f'{self.name}: ', end='')
def _no_content(self) -> None:
self.log('No content')
def log(self, *args, **kwargs) -> None:
print(*args, **kwargs)
sys.stdout.flush()
def __gt__(self, other: "Chapter") -> bool:
return self.episode > other.episode
def __lt__(self, other: "Chapter") -> bool:
return self.episode < other.episode
def __eq__(self, other: "Chapter") -> bool:
return self.episode == other.episode
class ToonManager(QuerySet):
async def leech(self, pool_size: int = 1, driver: Optional[uc.Chrome] = None) -> None:
# we want to iterate over all toons that are not explictly finished.
async for toon in self.filter(Q(finished=False) | Q(finished__exists=False)):
# if this can have a driver
if isinstance(toon, SeleniumMixin):
# and the driver in toon is set but not in global, we set it
if toon._driver and not driver:
driver = toon._driver
# otherwise if we have a driver but not the toon, set set it
elif driver:
toon._driver = driver
await toon.leech(pool_size)
async def drop(self):
# prevent droping the whole table, just drop the current filtering
await self.delete()
class WebToonPacked(Document):
"""
Things to override:
properties:
- url
"""
name: str
lang: str = Field(max_length=2)
finished: bool = False
domain: str
created: datetime = Field(default_factory=datetime.utcnow)
updated: Optional[datetime] = None
gender: Optional[str]
corporate: bool = True
chapters: List[Chapter] = []
# inner use, for futures developement.
version: int = 2
_quote_cookies: bool = False
_lowerize_headers: bool = False
class Mongo:
manager_class = ToonManager
def __str__(self):
return self.name
@property
def url(self) -> str:
raise NotImplementedError
@property
def path(self) -> str:
if not self.corporate:
return f'/mnt/aiur/Scans/Toons/Ero/{self.name}'
return f'/mnt/aiur/Scans/Toons/{self.name}'
def update_chapters_parent(self) -> None:
for chapter in self.chapters:
chapter._parent = self
async def create_folder(self) -> None:
if not os.path.exists(self.path):
os.mkdir(self.path)
async def save(self, *args, **kwargs):
self.updated = datetime.utcnow()
return await super().save(*args, **kwargs)
async def leech(self, pool_size: int = 1):
await self.create_folder()
print(f'--- {self.name} ---')
# check for missing chapters on local
for chapter in self.chapters:
chapter._parent = self
await chapter.pull(pool_size)
if not self.chapters:
return
nexts = await self.chapters[-1].nexts()
if not nexts:
return
self.chapters.extend(nexts)
for chapter in nexts:
chapter._parent = self
await chapter.pull(pool_size)
await self.save()
def get_headers(self) -> Dict[str, str]:
headers = {
'Referer': self.url,
'User-Agent': FIREFOX,
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Language': 'fr,fr-FR;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept-Encoding': 'gzip, deflate',
'Cache-Control': 'max-age=0'
}
if getattr(self, '_lowerize_headers', False):
headers = dict({k.lower(): v for k, v in headers.items()})
return headers
def get_cookies(self) -> Dict[str, str]:
return {}
@property
def ssl_context(self) -> ssl.SSLContext | None:
return None
@asynccontextmanager
async def get_client(self):
session = aiohttp.ClientSession(
headers=self.get_headers(),
cookie_jar=self.get_cookie_jar(),
)
async with session as client:
yield client
async def parse_url(self, url: str, method: str = 'get') -> BeautifulSoup:
async with self.get_client() as client:
func = getattr(client, method)
request = func(url)
async with request as response:
response.raise_for_status()
page_content = await response.read()
page = BeautifulSoup(page_content, 'lxml')
return page
def get_cookie_jar(self) -> aiohttp.CookieJar:
loop = aiohttp.helpers.get_running_loop()
jar = aiohttp.CookieJar(loop=loop, unsafe=True, quote_cookie=self._quote_cookies)
cookies: Optional[Dict] = self.get_cookies()
if cookies is not None:
jar.update_cookies(cookies)
return jar | random_line_split |
|
newtoon.py | import os
import re
import sys
from textwrap import wrap
import zipfile
from contextlib import asynccontextmanager
from datetime import datetime
from io import BytesIO
from typing import Any, AsyncGenerator, Coroutine, Dict, List, Optional, Tuple, Union, Type, Callable
import asyncio
import aiohttp
from asyncio_pool import AioPool
from bs4 import BeautifulSoup
from motorized import (Document, EmbeddedDocument, Field, PrivatesAttrsMixin,
QuerySet, Q)
from pydantic import HttpUrl, validator
from selenium import webdriver
import undetected_chromedriver as uc
from functools import wraps
from PIL import Image, UnidentifiedImageError
import ssl
FIREFOX = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:93.0) Gecko/20100101 Firefox/93.0'
CHROME_LINUX = 'Mozilla/5.0 (X11; Linux x86_64; rv:89.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
CHROME = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36'
def raise_on_any_error_from_pool(pool_result: List[Optional[Exception]]):
errors = list(filter(None, pool_result))
for error in errors:
if isinstance(error, Exception):
raise error
def retry(count: int, *exceptions: List[Type[Exception]], delay: int = 0):
def wrapper(func):
@wraps(func)
async def decorator(*args, **kwargs):
for retry_index in range(count + 1):
try:
return await func(*args, **kwargs)
except Exception as error:
if type(error) in exceptions:
if delay:
await asyncio.sleep(delay)
continue
return decorator
return wrapper
def error_handler(error: Type[Exception], message: Optional[str] = None):
def decorator(func: Callable) -> Coroutine:
@wraps(func)
async def wrapper(*args, **kwargs) -> Optional[Any]:
try:
return await func(*args, **kwargs)
except error:
if message:
print(message)
return None
return wrapper
return decorator
class InMemoryZipFile:
def __init__(self, filename: str):
self.io = BytesIO()
self.cbz = zipfile.ZipFile(self.io, 'w', zipfile.ZIP_DEFLATED)
self.filename = filename
def __str__(self) -> str:
return self.filename
def exists(self) -> bool:
return os.path.exists(self.filename)
def write(self, filename: str, data: bytes) -> None:
self.cbz.writestr(filename, data)
def close(self):
self.cbz.close()
def save(self) -> None:
self.close()
self.io.seek(0)
with open(self.filename, 'wb') as fp:
fp.write(self.io.read())
class SeleniumMixin:
_driver: Optional[Union[webdriver.Firefox, webdriver.Chrome]] = None
_headless: bool = False
@classmethod
def get_new_marionette(cls, headless: bool = False) -> uc.Chrome:
print('Requesting a new marionette')
options = uc.ChromeOptions()
if headless:
options.headless = True
options.add_argument('--headless')
options.add_argument('--disable-gpu')
options.add_argument('--no-first-run --no-service-autorun --password-store=basic')
extenssions_paths = [
'/usr/lib/ublock-origin'
]
for extenssion in extenssions_paths:
options.add_argument(f'--load-extension={extenssion}')
driver = uc.Chrome(options=options)
print('Got new marionette.')
return driver
@property
def driver(self) -> Union[webdriver.Firefox, webdriver.Chrome]:
if not self._driver:
# self._driver = webdriver.Firefox()
# self._driver = webdriver.Chrome()
self._driver = self.get_new_marionette(self._headless)
return self._driver
async def parse_url(self, url: str, delay: int = 0) -> BeautifulSoup:
"""The `delay` parameter wait for the page to load/execute the scripts
in the marionette, some websites require that otherwise the JS don't
have the time to populate divs/lists.
"""
if url != self.driver.current_url:
self.driver.get(url)
return BeautifulSoup(self.driver.page_source, 'lxml')
async def parse_cloudflare_url(self, url: str, delay: int = 0) -> BeautifulSoup:
self.driver.get(url)
for index in range(20):
await asyncio.sleep(delay)
page = BeautifulSoup(self.driver.page_source, 'lxml')
# print(f'{index:02}: {self.driver.current_url}')
challenge_form = page.find('form', {'class': 'challenge-form'})
if not challenge_form:
return page
await asyncio.sleep(8)
async def post_cloudflare_challenge(self, page: BeautifulSoup) -> None:
challenge_form = page.find('form', {'class': 'challenge-form'})
challenge_link = challenge_form['action']
challenge_inputs = challenge_form.find_all('input')
payload = dict({
field['name']: field['value'] for field in challenge_inputs if field.get('value', None)
})
cookies = self.driver.get_cookies()
print('POST', challenge_link, payload, cookies)
class LocalStorage:
"""Allow to access to the local storage of the marionette from python.
"""
def __init__(self, driver: uc.Chrome) :
self.driver = driver
def __len__(self):
return self.driver.execute_script("return window.localStorage.length;")
def items(self) :
return self.driver.execute_script( \
"var ls = window.localStorage, items = {}; " \
"for (var i = 0, k; i < ls.length; ++i) " \
" items[k = ls.key(i)] = ls.getItem(k); " \
"return items; ")
def keys(self) :
return self.driver.execute_script( \
"var ls = window.localStorage, keys = []; " \
"for (var i = 0; i < ls.length; ++i) " \
" keys[i] = ls.key(i); " \
"return keys; ")
def get(self, key):
return self.driver.execute_script("return window.localStorage.getItem(arguments[0]);", key)
def set(self, key, value):
self.driver.execute_script("window.localStorage.setItem(arguments[0], arguments[1]);", key, value)
def has(self, key):
return key in self.keys()
def remove(self, key):
self.driver.execute_script("window.localStorage.removeItem(arguments[0]);", key)
class Chapter(PrivatesAttrsMixin, EmbeddedDocument):
"""
Describe ONE chapter of a webtoon.
Things to override:
properties:
- url
functions:
- get_pages_urls (return the list of urls)
- nexts (return the list of next chapters after the curent instance)
"""
name: str
episode: Optional[int]
_parent: Optional["WebToonPacked"]
@validator('episode', pre=True)
def validate_episode(cls, value: Optional[Union[int, str]]) -> Optional[int]:
if isinstance(value, str):
rule = re.compile(r'^[a-z\-]+(\d+)')
match = rule.match(value)
if match:
chapter = match.groups()[0]
return int(chapter)
return value
@validator('name', pre=True)
def validate_name(cls, value: str) -> str:
try:
return value \
.replace('"', "") \
.replace('/', '') \
.replace('*','') \
.replace('&', '&') \
.replace(''', '\'')
except Exception:
return value
def __str__(self) -> str:
return self.name
@property
def cbz_path(self) -> str:
return os.path.join(self._parent.path, self.name.strip() + '.cbz')
def exists(self) -> bool:
return os.path.exists(self.cbz_path)
async def pull(self, pool_size: int = 3) -> bool:
"""
download the files related to this chapter.
return True if the image is present on disk
False otherwise
Note that presence on disk can be True in multiples cases:
- it was here before
- it was downloaded successfully
"""
if self.exists():
return True
self._start_pull()
pair_list = list([(filename, str(url)) async for filename, url in self])
if not pair_list:
self._no_content()
return False
pool = AioPool(pool_size)
cbz = InMemoryZipFile(self.cbz_path)
async with self._parent.get_client() as client:
async def download_coroutine(pair: Tuple[str, str]) -> bool:
"""return True if the file has been downloaded, False otherwise
may raise errors that will be present in results
"""
filename, url = pair
# Download the page
response = await client.get(url, ssl=self._parent.ssl_context)
response.raise_for_status()
# Save the page content to the cbz file
page_content: bytes = await response.read()
if not await self.is_valid_page_content(page_content):
return False
cbz.write(filename, page_content)
self._progress()
return True
result = await pool.map(download_coroutine, pair_list)
raise_on_any_error_from_pool(result)
if not any(result):
self.log('Empty, removed')
return False
cbz.save()
self.log('\n', end='')
return True
async def is_valid_page_content(self, page_content: bytes) -> bool:
"""return True if the page content seems to be a valid image
False otherwise
"""
try:
image = Image.open(BytesIO(page_content))
return True
except UnidentifiedImageError:
return False
async def get_pages_urls(self) -> List[HttpUrl]:
raise NotImplementedError
async def nexts(self) -> List["Chapter"]:
# override this to implement the next pull feature
get_chapters_func = getattr(self._parent, 'get_chapters_from_website')
if not get_chapters_func:
return []
chapters = await get_chapters_func()
return list(filter(lambda chapter: chapter > self, chapters))
async def __aiter__(self) -> AsyncGenerator[Tuple[str, HttpUrl], None]:
pages = await self.get_pages_urls()
for index, url in enumerate(pages):
filename = f'{index:03}.jpg'
yield filename, url
def _progress(self) -> None:
self.log('.', end='')
def _start_pull(self) -> None:
self.log(f'{self.name}: ', end='')
def _no_content(self) -> None:
self.log('No content')
def log(self, *args, **kwargs) -> None:
print(*args, **kwargs)
sys.stdout.flush()
def __gt__(self, other: "Chapter") -> bool:
|
def __lt__(self, other: "Chapter") -> bool:
return self.episode < other.episode
def __eq__(self, other: "Chapter") -> bool:
return self.episode == other.episode
class ToonManager(QuerySet):
async def leech(self, pool_size: int = 1, driver: Optional[uc.Chrome] = None) -> None:
# we want to iterate over all toons that are not explictly finished.
async for toon in self.filter(Q(finished=False) | Q(finished__exists=False)):
# if this can have a driver
if isinstance(toon, SeleniumMixin):
# and the driver in toon is set but not in global, we set it
if toon._driver and not driver:
driver = toon._driver
# otherwise if we have a driver but not the toon, set set it
elif driver:
toon._driver = driver
await toon.leech(pool_size)
async def drop(self):
# prevent droping the whole table, just drop the current filtering
await self.delete()
class WebToonPacked(Document):
"""
Things to override:
properties:
- url
"""
name: str
lang: str = Field(max_length=2)
finished: bool = False
domain: str
created: datetime = Field(default_factory=datetime.utcnow)
updated: Optional[datetime] = None
gender: Optional[str]
corporate: bool = True
chapters: List[Chapter] = []
# inner use, for futures developement.
version: int = 2
_quote_cookies: bool = False
_lowerize_headers: bool = False
class Mongo:
manager_class = ToonManager
def __str__(self):
return self.name
@property
def url(self) -> str:
raise NotImplementedError
@property
def path(self) -> str:
if not self.corporate:
return f'/mnt/aiur/Scans/Toons/Ero/{self.name}'
return f'/mnt/aiur/Scans/Toons/{self.name}'
def update_chapters_parent(self) -> None:
for chapter in self.chapters:
chapter._parent = self
async def create_folder(self) -> None:
if not os.path.exists(self.path):
os.mkdir(self.path)
async def save(self, *args, **kwargs):
self.updated = datetime.utcnow()
return await super().save(*args, **kwargs)
async def leech(self, pool_size: int = 1):
await self.create_folder()
print(f'--- {self.name} ---')
# check for missing chapters on local
for chapter in self.chapters:
chapter._parent = self
await chapter.pull(pool_size)
if not self.chapters:
return
nexts = await self.chapters[-1].nexts()
if not nexts:
return
self.chapters.extend(nexts)
for chapter in nexts:
chapter._parent = self
await chapter.pull(pool_size)
await self.save()
def get_headers(self) -> Dict[str, str]:
headers = {
'Referer': self.url,
'User-Agent': FIREFOX,
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Language': 'fr,fr-FR;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept-Encoding': 'gzip, deflate',
'Cache-Control': 'max-age=0'
}
if getattr(self, '_lowerize_headers', False):
headers = dict({k.lower(): v for k, v in headers.items()})
return headers
def get_cookies(self) -> Dict[str, str]:
return {}
@property
def ssl_context(self) -> ssl.SSLContext | None:
return None
@asynccontextmanager
async def get_client(self):
session = aiohttp.ClientSession(
headers=self.get_headers(),
cookie_jar=self.get_cookie_jar(),
)
async with session as client:
yield client
async def parse_url(self, url: str, method: str = 'get') -> BeautifulSoup:
async with self.get_client() as client:
func = getattr(client, method)
request = func(url)
async with request as response:
response.raise_for_status()
page_content = await response.read()
page = BeautifulSoup(page_content, 'lxml')
return page
def get_cookie_jar(self) -> aiohttp.CookieJar:
loop = aiohttp.helpers.get_running_loop()
jar = aiohttp.CookieJar(loop=loop, unsafe=True, quote_cookie=self._quote_cookies)
cookies: Optional[Dict] = self.get_cookies()
if cookies is not None:
jar.update_cookies(cookies)
return jar
| return self.episode > other.episode | identifier_body |
newtoon.py | import os
import re
import sys
from textwrap import wrap
import zipfile
from contextlib import asynccontextmanager
from datetime import datetime
from io import BytesIO
from typing import Any, AsyncGenerator, Coroutine, Dict, List, Optional, Tuple, Union, Type, Callable
import asyncio
import aiohttp
from asyncio_pool import AioPool
from bs4 import BeautifulSoup
from motorized import (Document, EmbeddedDocument, Field, PrivatesAttrsMixin,
QuerySet, Q)
from pydantic import HttpUrl, validator
from selenium import webdriver
import undetected_chromedriver as uc
from functools import wraps
from PIL import Image, UnidentifiedImageError
import ssl
FIREFOX = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:93.0) Gecko/20100101 Firefox/93.0'
CHROME_LINUX = 'Mozilla/5.0 (X11; Linux x86_64; rv:89.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
CHROME = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36'
def raise_on_any_error_from_pool(pool_result: List[Optional[Exception]]):
errors = list(filter(None, pool_result))
for error in errors:
if isinstance(error, Exception):
raise error
def retry(count: int, *exceptions: List[Type[Exception]], delay: int = 0):
def wrapper(func):
@wraps(func)
async def decorator(*args, **kwargs):
for retry_index in range(count + 1):
try:
return await func(*args, **kwargs)
except Exception as error:
if type(error) in exceptions:
if delay:
await asyncio.sleep(delay)
continue
return decorator
return wrapper
def error_handler(error: Type[Exception], message: Optional[str] = None):
def decorator(func: Callable) -> Coroutine:
@wraps(func)
async def wrapper(*args, **kwargs) -> Optional[Any]:
try:
return await func(*args, **kwargs)
except error:
if message:
print(message)
return None
return wrapper
return decorator
class InMemoryZipFile:
def __init__(self, filename: str):
self.io = BytesIO()
self.cbz = zipfile.ZipFile(self.io, 'w', zipfile.ZIP_DEFLATED)
self.filename = filename
def __str__(self) -> str:
return self.filename
def exists(self) -> bool:
return os.path.exists(self.filename)
def write(self, filename: str, data: bytes) -> None:
self.cbz.writestr(filename, data)
def close(self):
self.cbz.close()
def save(self) -> None:
self.close()
self.io.seek(0)
with open(self.filename, 'wb') as fp:
fp.write(self.io.read())
class SeleniumMixin:
_driver: Optional[Union[webdriver.Firefox, webdriver.Chrome]] = None
_headless: bool = False
@classmethod
def get_new_marionette(cls, headless: bool = False) -> uc.Chrome:
print('Requesting a new marionette')
options = uc.ChromeOptions()
if headless:
|
options.add_argument('--disable-gpu')
options.add_argument('--no-first-run --no-service-autorun --password-store=basic')
extenssions_paths = [
'/usr/lib/ublock-origin'
]
for extenssion in extenssions_paths:
options.add_argument(f'--load-extension={extenssion}')
driver = uc.Chrome(options=options)
print('Got new marionette.')
return driver
@property
def driver(self) -> Union[webdriver.Firefox, webdriver.Chrome]:
if not self._driver:
# self._driver = webdriver.Firefox()
# self._driver = webdriver.Chrome()
self._driver = self.get_new_marionette(self._headless)
return self._driver
async def parse_url(self, url: str, delay: int = 0) -> BeautifulSoup:
"""The `delay` parameter wait for the page to load/execute the scripts
in the marionette, some websites require that otherwise the JS don't
have the time to populate divs/lists.
"""
if url != self.driver.current_url:
self.driver.get(url)
return BeautifulSoup(self.driver.page_source, 'lxml')
async def parse_cloudflare_url(self, url: str, delay: int = 0) -> BeautifulSoup:
self.driver.get(url)
for index in range(20):
await asyncio.sleep(delay)
page = BeautifulSoup(self.driver.page_source, 'lxml')
# print(f'{index:02}: {self.driver.current_url}')
challenge_form = page.find('form', {'class': 'challenge-form'})
if not challenge_form:
return page
await asyncio.sleep(8)
async def post_cloudflare_challenge(self, page: BeautifulSoup) -> None:
challenge_form = page.find('form', {'class': 'challenge-form'})
challenge_link = challenge_form['action']
challenge_inputs = challenge_form.find_all('input')
payload = dict({
field['name']: field['value'] for field in challenge_inputs if field.get('value', None)
})
cookies = self.driver.get_cookies()
print('POST', challenge_link, payload, cookies)
class LocalStorage:
"""Allow to access to the local storage of the marionette from python.
"""
def __init__(self, driver: uc.Chrome) :
self.driver = driver
def __len__(self):
return self.driver.execute_script("return window.localStorage.length;")
def items(self) :
return self.driver.execute_script( \
"var ls = window.localStorage, items = {}; " \
"for (var i = 0, k; i < ls.length; ++i) " \
" items[k = ls.key(i)] = ls.getItem(k); " \
"return items; ")
def keys(self) :
return self.driver.execute_script( \
"var ls = window.localStorage, keys = []; " \
"for (var i = 0; i < ls.length; ++i) " \
" keys[i] = ls.key(i); " \
"return keys; ")
def get(self, key):
return self.driver.execute_script("return window.localStorage.getItem(arguments[0]);", key)
def set(self, key, value):
self.driver.execute_script("window.localStorage.setItem(arguments[0], arguments[1]);", key, value)
def has(self, key):
return key in self.keys()
def remove(self, key):
self.driver.execute_script("window.localStorage.removeItem(arguments[0]);", key)
class Chapter(PrivatesAttrsMixin, EmbeddedDocument):
"""
Describe ONE chapter of a webtoon.
Things to override:
properties:
- url
functions:
- get_pages_urls (return the list of urls)
- nexts (return the list of next chapters after the curent instance)
"""
name: str
episode: Optional[int]
_parent: Optional["WebToonPacked"]
@validator('episode', pre=True)
def validate_episode(cls, value: Optional[Union[int, str]]) -> Optional[int]:
if isinstance(value, str):
rule = re.compile(r'^[a-z\-]+(\d+)')
match = rule.match(value)
if match:
chapter = match.groups()[0]
return int(chapter)
return value
@validator('name', pre=True)
def validate_name(cls, value: str) -> str:
try:
return value \
.replace('"', "") \
.replace('/', '') \
.replace('*','') \
.replace('&', '&') \
.replace(''', '\'')
except Exception:
return value
def __str__(self) -> str:
return self.name
@property
def cbz_path(self) -> str:
return os.path.join(self._parent.path, self.name.strip() + '.cbz')
def exists(self) -> bool:
return os.path.exists(self.cbz_path)
async def pull(self, pool_size: int = 3) -> bool:
"""
download the files related to this chapter.
return True if the image is present on disk
False otherwise
Note that presence on disk can be True in multiples cases:
- it was here before
- it was downloaded successfully
"""
if self.exists():
return True
self._start_pull()
pair_list = list([(filename, str(url)) async for filename, url in self])
if not pair_list:
self._no_content()
return False
pool = AioPool(pool_size)
cbz = InMemoryZipFile(self.cbz_path)
async with self._parent.get_client() as client:
async def download_coroutine(pair: Tuple[str, str]) -> bool:
"""return True if the file has been downloaded, False otherwise
may raise errors that will be present in results
"""
filename, url = pair
# Download the page
response = await client.get(url, ssl=self._parent.ssl_context)
response.raise_for_status()
# Save the page content to the cbz file
page_content: bytes = await response.read()
if not await self.is_valid_page_content(page_content):
return False
cbz.write(filename, page_content)
self._progress()
return True
result = await pool.map(download_coroutine, pair_list)
raise_on_any_error_from_pool(result)
if not any(result):
self.log('Empty, removed')
return False
cbz.save()
self.log('\n', end='')
return True
async def is_valid_page_content(self, page_content: bytes) -> bool:
"""return True if the page content seems to be a valid image
False otherwise
"""
try:
image = Image.open(BytesIO(page_content))
return True
except UnidentifiedImageError:
return False
async def get_pages_urls(self) -> List[HttpUrl]:
raise NotImplementedError
async def nexts(self) -> List["Chapter"]:
# override this to implement the next pull feature
get_chapters_func = getattr(self._parent, 'get_chapters_from_website')
if not get_chapters_func:
return []
chapters = await get_chapters_func()
return list(filter(lambda chapter: chapter > self, chapters))
async def __aiter__(self) -> AsyncGenerator[Tuple[str, HttpUrl], None]:
pages = await self.get_pages_urls()
for index, url in enumerate(pages):
filename = f'{index:03}.jpg'
yield filename, url
def _progress(self) -> None:
self.log('.', end='')
def _start_pull(self) -> None:
self.log(f'{self.name}: ', end='')
def _no_content(self) -> None:
self.log('No content')
def log(self, *args, **kwargs) -> None:
print(*args, **kwargs)
sys.stdout.flush()
def __gt__(self, other: "Chapter") -> bool:
return self.episode > other.episode
def __lt__(self, other: "Chapter") -> bool:
return self.episode < other.episode
def __eq__(self, other: "Chapter") -> bool:
return self.episode == other.episode
class ToonManager(QuerySet):
async def leech(self, pool_size: int = 1, driver: Optional[uc.Chrome] = None) -> None:
# we want to iterate over all toons that are not explictly finished.
async for toon in self.filter(Q(finished=False) | Q(finished__exists=False)):
# if this can have a driver
if isinstance(toon, SeleniumMixin):
# and the driver in toon is set but not in global, we set it
if toon._driver and not driver:
driver = toon._driver
# otherwise if we have a driver but not the toon, set set it
elif driver:
toon._driver = driver
await toon.leech(pool_size)
async def drop(self):
# prevent droping the whole table, just drop the current filtering
await self.delete()
class WebToonPacked(Document):
"""
Things to override:
properties:
- url
"""
name: str
lang: str = Field(max_length=2)
finished: bool = False
domain: str
created: datetime = Field(default_factory=datetime.utcnow)
updated: Optional[datetime] = None
gender: Optional[str]
corporate: bool = True
chapters: List[Chapter] = []
# inner use, for futures developement.
version: int = 2
_quote_cookies: bool = False
_lowerize_headers: bool = False
class Mongo:
manager_class = ToonManager
def __str__(self):
return self.name
@property
def url(self) -> str:
raise NotImplementedError
@property
def path(self) -> str:
if not self.corporate:
return f'/mnt/aiur/Scans/Toons/Ero/{self.name}'
return f'/mnt/aiur/Scans/Toons/{self.name}'
def update_chapters_parent(self) -> None:
for chapter in self.chapters:
chapter._parent = self
async def create_folder(self) -> None:
if not os.path.exists(self.path):
os.mkdir(self.path)
async def save(self, *args, **kwargs):
self.updated = datetime.utcnow()
return await super().save(*args, **kwargs)
async def leech(self, pool_size: int = 1):
await self.create_folder()
print(f'--- {self.name} ---')
# check for missing chapters on local
for chapter in self.chapters:
chapter._parent = self
await chapter.pull(pool_size)
if not self.chapters:
return
nexts = await self.chapters[-1].nexts()
if not nexts:
return
self.chapters.extend(nexts)
for chapter in nexts:
chapter._parent = self
await chapter.pull(pool_size)
await self.save()
def get_headers(self) -> Dict[str, str]:
headers = {
'Referer': self.url,
'User-Agent': FIREFOX,
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Language': 'fr,fr-FR;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept-Encoding': 'gzip, deflate',
'Cache-Control': 'max-age=0'
}
if getattr(self, '_lowerize_headers', False):
headers = dict({k.lower(): v for k, v in headers.items()})
return headers
def get_cookies(self) -> Dict[str, str]:
return {}
@property
def ssl_context(self) -> ssl.SSLContext | None:
return None
@asynccontextmanager
async def get_client(self):
session = aiohttp.ClientSession(
headers=self.get_headers(),
cookie_jar=self.get_cookie_jar(),
)
async with session as client:
yield client
async def parse_url(self, url: str, method: str = 'get') -> BeautifulSoup:
async with self.get_client() as client:
func = getattr(client, method)
request = func(url)
async with request as response:
response.raise_for_status()
page_content = await response.read()
page = BeautifulSoup(page_content, 'lxml')
return page
def get_cookie_jar(self) -> aiohttp.CookieJar:
loop = aiohttp.helpers.get_running_loop()
jar = aiohttp.CookieJar(loop=loop, unsafe=True, quote_cookie=self._quote_cookies)
cookies: Optional[Dict] = self.get_cookies()
if cookies is not None:
jar.update_cookies(cookies)
return jar
| options.headless = True
options.add_argument('--headless') | conditional_block |
newtoon.py | import os
import re
import sys
from textwrap import wrap
import zipfile
from contextlib import asynccontextmanager
from datetime import datetime
from io import BytesIO
from typing import Any, AsyncGenerator, Coroutine, Dict, List, Optional, Tuple, Union, Type, Callable
import asyncio
import aiohttp
from asyncio_pool import AioPool
from bs4 import BeautifulSoup
from motorized import (Document, EmbeddedDocument, Field, PrivatesAttrsMixin,
QuerySet, Q)
from pydantic import HttpUrl, validator
from selenium import webdriver
import undetected_chromedriver as uc
from functools import wraps
from PIL import Image, UnidentifiedImageError
import ssl
FIREFOX = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:93.0) Gecko/20100101 Firefox/93.0'
CHROME_LINUX = 'Mozilla/5.0 (X11; Linux x86_64; rv:89.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
CHROME = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36'
def raise_on_any_error_from_pool(pool_result: List[Optional[Exception]]):
errors = list(filter(None, pool_result))
for error in errors:
if isinstance(error, Exception):
raise error
def retry(count: int, *exceptions: List[Type[Exception]], delay: int = 0):
def wrapper(func):
@wraps(func)
async def decorator(*args, **kwargs):
for retry_index in range(count + 1):
try:
return await func(*args, **kwargs)
except Exception as error:
if type(error) in exceptions:
if delay:
await asyncio.sleep(delay)
continue
return decorator
return wrapper
def error_handler(error: Type[Exception], message: Optional[str] = None):
def decorator(func: Callable) -> Coroutine:
@wraps(func)
async def wrapper(*args, **kwargs) -> Optional[Any]:
try:
return await func(*args, **kwargs)
except error:
if message:
print(message)
return None
return wrapper
return decorator
class InMemoryZipFile:
def __init__(self, filename: str):
self.io = BytesIO()
self.cbz = zipfile.ZipFile(self.io, 'w', zipfile.ZIP_DEFLATED)
self.filename = filename
def __str__(self) -> str:
return self.filename
def exists(self) -> bool:
return os.path.exists(self.filename)
def write(self, filename: str, data: bytes) -> None:
self.cbz.writestr(filename, data)
def close(self):
self.cbz.close()
def save(self) -> None:
self.close()
self.io.seek(0)
with open(self.filename, 'wb') as fp:
fp.write(self.io.read())
class SeleniumMixin:
_driver: Optional[Union[webdriver.Firefox, webdriver.Chrome]] = None
_headless: bool = False
@classmethod
def get_new_marionette(cls, headless: bool = False) -> uc.Chrome:
print('Requesting a new marionette')
options = uc.ChromeOptions()
if headless:
options.headless = True
options.add_argument('--headless')
options.add_argument('--disable-gpu')
options.add_argument('--no-first-run --no-service-autorun --password-store=basic')
extenssions_paths = [
'/usr/lib/ublock-origin'
]
for extenssion in extenssions_paths:
options.add_argument(f'--load-extension={extenssion}')
driver = uc.Chrome(options=options)
print('Got new marionette.')
return driver
@property
def driver(self) -> Union[webdriver.Firefox, webdriver.Chrome]:
if not self._driver:
# self._driver = webdriver.Firefox()
# self._driver = webdriver.Chrome()
self._driver = self.get_new_marionette(self._headless)
return self._driver
async def parse_url(self, url: str, delay: int = 0) -> BeautifulSoup:
"""The `delay` parameter wait for the page to load/execute the scripts
in the marionette, some websites require that otherwise the JS don't
have the time to populate divs/lists.
"""
if url != self.driver.current_url:
self.driver.get(url)
return BeautifulSoup(self.driver.page_source, 'lxml')
async def parse_cloudflare_url(self, url: str, delay: int = 0) -> BeautifulSoup:
self.driver.get(url)
for index in range(20):
await asyncio.sleep(delay)
page = BeautifulSoup(self.driver.page_source, 'lxml')
# print(f'{index:02}: {self.driver.current_url}')
challenge_form = page.find('form', {'class': 'challenge-form'})
if not challenge_form:
return page
await asyncio.sleep(8)
async def post_cloudflare_challenge(self, page: BeautifulSoup) -> None:
challenge_form = page.find('form', {'class': 'challenge-form'})
challenge_link = challenge_form['action']
challenge_inputs = challenge_form.find_all('input')
payload = dict({
field['name']: field['value'] for field in challenge_inputs if field.get('value', None)
})
cookies = self.driver.get_cookies()
print('POST', challenge_link, payload, cookies)
class LocalStorage:
"""Allow to access to the local storage of the marionette from python.
"""
def __init__(self, driver: uc.Chrome) :
self.driver = driver
def __len__(self):
return self.driver.execute_script("return window.localStorage.length;")
def items(self) :
return self.driver.execute_script( \
"var ls = window.localStorage, items = {}; " \
"for (var i = 0, k; i < ls.length; ++i) " \
" items[k = ls.key(i)] = ls.getItem(k); " \
"return items; ")
def keys(self) :
return self.driver.execute_script( \
"var ls = window.localStorage, keys = []; " \
"for (var i = 0; i < ls.length; ++i) " \
" keys[i] = ls.key(i); " \
"return keys; ")
def get(self, key):
return self.driver.execute_script("return window.localStorage.getItem(arguments[0]);", key)
def set(self, key, value):
self.driver.execute_script("window.localStorage.setItem(arguments[0], arguments[1]);", key, value)
def has(self, key):
return key in self.keys()
def remove(self, key):
self.driver.execute_script("window.localStorage.removeItem(arguments[0]);", key)
class Chapter(PrivatesAttrsMixin, EmbeddedDocument):
"""
Describe ONE chapter of a webtoon.
Things to override:
properties:
- url
functions:
- get_pages_urls (return the list of urls)
- nexts (return the list of next chapters after the curent instance)
"""
name: str
episode: Optional[int]
_parent: Optional["WebToonPacked"]
@validator('episode', pre=True)
def validate_episode(cls, value: Optional[Union[int, str]]) -> Optional[int]:
if isinstance(value, str):
rule = re.compile(r'^[a-z\-]+(\d+)')
match = rule.match(value)
if match:
chapter = match.groups()[0]
return int(chapter)
return value
@validator('name', pre=True)
def validate_name(cls, value: str) -> str:
try:
return value \
.replace('"', "") \
.replace('/', '') \
.replace('*','') \
.replace('&', '&') \
.replace(''', '\'')
except Exception:
return value
def __str__(self) -> str:
return self.name
@property
def cbz_path(self) -> str:
return os.path.join(self._parent.path, self.name.strip() + '.cbz')
def exists(self) -> bool:
return os.path.exists(self.cbz_path)
async def pull(self, pool_size: int = 3) -> bool:
"""
download the files related to this chapter.
return True if the image is present on disk
False otherwise
Note that presence on disk can be True in multiples cases:
- it was here before
- it was downloaded successfully
"""
if self.exists():
return True
self._start_pull()
pair_list = list([(filename, str(url)) async for filename, url in self])
if not pair_list:
self._no_content()
return False
pool = AioPool(pool_size)
cbz = InMemoryZipFile(self.cbz_path)
async with self._parent.get_client() as client:
async def download_coroutine(pair: Tuple[str, str]) -> bool:
"""return True if the file has been downloaded, False otherwise
may raise errors that will be present in results
"""
filename, url = pair
# Download the page
response = await client.get(url, ssl=self._parent.ssl_context)
response.raise_for_status()
# Save the page content to the cbz file
page_content: bytes = await response.read()
if not await self.is_valid_page_content(page_content):
return False
cbz.write(filename, page_content)
self._progress()
return True
result = await pool.map(download_coroutine, pair_list)
raise_on_any_error_from_pool(result)
if not any(result):
self.log('Empty, removed')
return False
cbz.save()
self.log('\n', end='')
return True
async def is_valid_page_content(self, page_content: bytes) -> bool:
"""return True if the page content seems to be a valid image
False otherwise
"""
try:
image = Image.open(BytesIO(page_content))
return True
except UnidentifiedImageError:
return False
async def get_pages_urls(self) -> List[HttpUrl]:
raise NotImplementedError
async def nexts(self) -> List["Chapter"]:
# override this to implement the next pull feature
get_chapters_func = getattr(self._parent, 'get_chapters_from_website')
if not get_chapters_func:
return []
chapters = await get_chapters_func()
return list(filter(lambda chapter: chapter > self, chapters))
async def __aiter__(self) -> AsyncGenerator[Tuple[str, HttpUrl], None]:
pages = await self.get_pages_urls()
for index, url in enumerate(pages):
filename = f'{index:03}.jpg'
yield filename, url
def _progress(self) -> None:
self.log('.', end='')
def _start_pull(self) -> None:
self.log(f'{self.name}: ', end='')
def _no_content(self) -> None:
self.log('No content')
def log(self, *args, **kwargs) -> None:
print(*args, **kwargs)
sys.stdout.flush()
def __gt__(self, other: "Chapter") -> bool:
return self.episode > other.episode
def __lt__(self, other: "Chapter") -> bool:
return self.episode < other.episode
def __eq__(self, other: "Chapter") -> bool:
return self.episode == other.episode
class | (QuerySet):
async def leech(self, pool_size: int = 1, driver: Optional[uc.Chrome] = None) -> None:
# we want to iterate over all toons that are not explictly finished.
async for toon in self.filter(Q(finished=False) | Q(finished__exists=False)):
# if this can have a driver
if isinstance(toon, SeleniumMixin):
# and the driver in toon is set but not in global, we set it
if toon._driver and not driver:
driver = toon._driver
# otherwise if we have a driver but not the toon, set set it
elif driver:
toon._driver = driver
await toon.leech(pool_size)
async def drop(self):
# prevent droping the whole table, just drop the current filtering
await self.delete()
class WebToonPacked(Document):
"""
Things to override:
properties:
- url
"""
name: str
lang: str = Field(max_length=2)
finished: bool = False
domain: str
created: datetime = Field(default_factory=datetime.utcnow)
updated: Optional[datetime] = None
gender: Optional[str]
corporate: bool = True
chapters: List[Chapter] = []
# inner use, for futures developement.
version: int = 2
_quote_cookies: bool = False
_lowerize_headers: bool = False
class Mongo:
manager_class = ToonManager
def __str__(self):
return self.name
@property
def url(self) -> str:
raise NotImplementedError
@property
def path(self) -> str:
if not self.corporate:
return f'/mnt/aiur/Scans/Toons/Ero/{self.name}'
return f'/mnt/aiur/Scans/Toons/{self.name}'
def update_chapters_parent(self) -> None:
for chapter in self.chapters:
chapter._parent = self
async def create_folder(self) -> None:
if not os.path.exists(self.path):
os.mkdir(self.path)
async def save(self, *args, **kwargs):
self.updated = datetime.utcnow()
return await super().save(*args, **kwargs)
async def leech(self, pool_size: int = 1):
await self.create_folder()
print(f'--- {self.name} ---')
# check for missing chapters on local
for chapter in self.chapters:
chapter._parent = self
await chapter.pull(pool_size)
if not self.chapters:
return
nexts = await self.chapters[-1].nexts()
if not nexts:
return
self.chapters.extend(nexts)
for chapter in nexts:
chapter._parent = self
await chapter.pull(pool_size)
await self.save()
def get_headers(self) -> Dict[str, str]:
headers = {
'Referer': self.url,
'User-Agent': FIREFOX,
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Language': 'fr,fr-FR;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept-Encoding': 'gzip, deflate',
'Cache-Control': 'max-age=0'
}
if getattr(self, '_lowerize_headers', False):
headers = dict({k.lower(): v for k, v in headers.items()})
return headers
def get_cookies(self) -> Dict[str, str]:
return {}
@property
def ssl_context(self) -> ssl.SSLContext | None:
return None
@asynccontextmanager
async def get_client(self):
session = aiohttp.ClientSession(
headers=self.get_headers(),
cookie_jar=self.get_cookie_jar(),
)
async with session as client:
yield client
async def parse_url(self, url: str, method: str = 'get') -> BeautifulSoup:
async with self.get_client() as client:
func = getattr(client, method)
request = func(url)
async with request as response:
response.raise_for_status()
page_content = await response.read()
page = BeautifulSoup(page_content, 'lxml')
return page
def get_cookie_jar(self) -> aiohttp.CookieJar:
loop = aiohttp.helpers.get_running_loop()
jar = aiohttp.CookieJar(loop=loop, unsafe=True, quote_cookie=self._quote_cookies)
cookies: Optional[Dict] = self.get_cookies()
if cookies is not None:
jar.update_cookies(cookies)
return jar
| ToonManager | identifier_name |
overlay.js | /* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is Gmail Conversation View
*
* The Initial Developer of the Original Code is
* Mozilla messaging
* Portions created by the Initial Developer are Copyright (C) 2010
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
/**
* tbParanoia namespace
*/
if (typeof(tbParanoia) === "undefined") {
var tbParanoia = {
/* Parse RFC-2822 header */
paranoiaParseHeaderString: function(headersStr) {
var headers = tbParanoia.paranoiaParseHeaderStringWithSplitter("\r\n",headersStr);
if (headers.length == 0) {
headers = tbParanoia.paranoiaParseHeaderStringWithSplitter("\n",headersStr);
}
return headers;
},
paranoiaParseHeaderStringWithSplitter: function(splitStr, headersStr) {
var hdrLines = headersStr.split(splitStr);
var headers = Array();
var currentHeader = "";
for(var i = 0; i < hdrLines.length; i++) {
line = hdrLines[i];
/* Strip spaces from start and end of line */
if(line[0] == " " || line[0] == "\t") {
currentHeader += " " + line.replace(/^\s+|\s+$/g, '');
}
else
{
/* No spaces - this is start of a new header */
if(currentHeader.length > 0) headers.push(currentHeader);
var currentHeader = line;
}
};
return headers;
},
/* Return only 'Received:' headers, parsed to objects */
paranoiaGetReceivedHeaders: function(parsedHeaders) {
var received = Array();
var secureMethods = ['SMTPS', 'ESMTPS', 'SMTPSA', 'ESMTPSA', 'AES256'];
for(var i = 0; i < parsedHeaders.length; i++) {
/* Regexp definition must stay in the loop - stupid JS won't match the same regexp twice */
var rcvdRegexp = /^Received:.*from\s+([^ ]+)\s+.*by ([^ ]+)\s+.*with\s+([A-Za-z0-9]+).*;.*$/g;
var rcvdIPRegexp = /^Received:.*from\s+([^ ]+)\s+[^\[]+\[(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\].*by ([^ ]+)\s+.*with\s+([A-Za-z0-9]+).*;.*$/g;
var header = parsedHeaders[i];
var matchedFrom = null;
var matchedTo = null;
var matchedMethod = null;
var matchedFromIP = null;
/* Try one regexp first */
var match = rcvdIPRegexp.exec(header);
if(match) {
matchedFrom = match[1];
matchedFromIP = match[2];
matchedTo = match[3];
matchedMethod = match[4];
}
/* Try another, if the first one failed */
if(!matchedFrom) {
var match = rcvdRegexp.exec(header);
if(match) {
matchedFrom = match[1];
matchedTo = match[2];
matchedMethod = match[3];
}
}
if(matchedFrom === null || matchedTo === null || matchedMethod === null) continue;
var local = tbParanoia.paranoiaIsHostLocal(matchedFrom) ||
tbParanoia.paranoiaIsHostLocal(matchedTo) ||
tbParanoia.paranoiaIsHostLocal(matchedFromIP) ||
tbParanoia.paranoiaGetDomainName(matchedFrom) == tbParanoia.paranoiaGetDomainName(matchedTo) ||
matchedMethod == 'local' ||
matchedFrom.replace(/^\s+|\s+$/g, '') == matchedTo.replace(/^\s+|\s+$/g, ''); // trim
received.push({
from: matchedFrom,
fromIP: matchedFromIP,
to: matchedTo,
method: matchedMethod,
local: local,
secure: (secureMethods.indexOf(matchedMethod.toUpperCase()) != -1),
toString: function() {
var secureSign = this.secure ? '✓' : '✗';
if(this.local) secureSign = '⌂';
return secureSign + ' ' + this.method + ": " + this.from + " ==> " + this.to;
}
});
}
return received;
},
/* Changes 'yandex' to 'Яндекс' */
paranoiaGetProviderDisplayName: function(provider) {
var providerDisplayNames = {
'yandex' : 'Яндекс',
'o2pl' : 'Grupa o2.pl',
'onet' : 'Onet.pl',
'wp': 'Wirtualna Polska',
'gadu': 'Gadu Gadu',
'qq': 'QQ',
'home': 'Home.pl',
}
if(providerDisplayNames[provider]) {
return providerDisplayNames[provider];
}
else {
return provider.charAt(0).toUpperCase() + provider.slice(1);
}
},
/* Finds known email provider from an array of 'Received:' headers */
paranoiaGetKnownProviders: function(receivedHeaders) {
var known = {
'yandex.net' : 'yandex',
'yandex.ru' : 'yandex',
'go2.pl' : 'o2pl',
'tlen.pl' : 'o2pl',
'o2.pl' : 'o2pl',
'google.com' : 'google',
'twitter.com' : 'twitter',
'facebook.com' : 'facebook',
'mailgun.us' : 'rackspace',
'mailgun.org' : 'rackspace',
'emailsrvr.com' : 'rackspace',
'rackspace.com' : 'rackspace',
'dreamhost.com' : 'dreamhost',
'linode.com' : 'linode',
'messagingengine.com' : 'opera',
'fastmail.fm' : 'opera',
'fastmail.net' : 'opera',
'onet.pl' : 'onet',
'sendgrid.com' : 'sendgrid',
'sendgrid.net' : 'sendgrid',
'wp.pl' : 'wp',
'hostgator.com' : 'hostgator',
'hostgator.net' : 'hostgator',
'interia.pl' : 'interia',
'yahoo.com' : 'yahoo',
'hotmail.com' : 'hotmail',
'outlook.com' : 'hotmail',
'live.com' : 'hotmail',
'qq.com' : 'qq',
'gadu-gadu.pl' : 'gadu',
'amazonses.com' : 'amazon',
'amazon.com' : 'amazon',
'home.pl' : 'home',
'home.net.pl' : 'home',
};
var found = new Array();
receivedHeaders.forEach(function(hdr) {
var domainRegex = /(?:\.|^)([a-z0-9\-]+\.[a-z0-9\-]+)$/g;
var thirdLevelDomain = /^(net|com|org|biz)\.[a-z0-9]+$/g;
var thirdLevelDomainRegex = /(?:\.|^)([a-z0-9\-]+\.[a-z0-9\-]+\.[a-z0-9\-]+)$/g;
var match = domainRegex.exec(hdr.from.toLowerCase());
if(match)
{
var domain = match[1];
// special case - .net.pl etc
if(thirdLevelDomain.test(domain)) {
match = thirdLevelDomainRegex.exec(hdr.from.toLowerCase());
if(match) {
domain = match[1];
}
}
if(known[domain] && found.indexOf(known[domain]) == -1) {
found.push(known[domain]);
}
}
});
return found;
},
/* Return number of insecure hosts in the path */
paranoiaAreReceivedHeadersInsecure: function(receivedHeaders) {
var insecure = 0;
var unencryptedLocal = 0;
var encrypted = 0;
receivedHeaders.forEach(function(header) {
// Application.console.log(header.from + " - " + header.secure);
if(!header.secure && !header.local) insecure++;
if(!header.secure && header.local) unencryptedLocal++;
if(header.secure) encrypted++;
});
return {
'insecure': insecure,
'unencryptedLocal': unencryptedLocal,
'encrypted': encrypted
};
},
/* Create a popup menu with all 'Received:' headers */
paranoiaCreateReceivedPopup: function(receivedHeaders) {
var popup = document.createElement('menupopup');
popup.setAttribute('id', 'paranoiaConnectionList');
receivedHeaders.forEach(function(hdr) {
var item = document.createElement('menuitem');
item.setAttribute('label', hdr.toString());
popup.appendChild(item);
});
return popup;
},
/* Remove popup from DOM tree, if found */
paranoiaRemoveReceivedPopup: function() {
var elem = document.getElementById('paranoiaConnectionList');
if(elem) elem.parentNode.removeChild(elem);
},
/* Return XULElement with icon - create one if necessary */
paranoiaGetHdrIconDOM: function() {
var id = 'paranoiaHdrIcon';
if(document.getElementById(id))
{
return document.getElementById(id);
}
var parentBox = document.getElementById('dateValueBox'); ///////
var previousBox = document.getElementById('smimeBox');
if(!parentBox || !previousBox) {
Application.console.log('Chrome element not found');
}
var elem = document.createElement('image');
elem.setAttribute('id', id);
elem.onclick = function() {
document.getElementById('paranoiaConnectionList').openPopup(this, 'after_start', 0, 0, false, false);
}
parentBox.insertBefore(elem, previousBox);
return elem;
},
paranoiaSetPerfectIcon: function() {
var icon = tbParanoia.paranoiaGetHdrIconDOM();
icon.setAttribute('style', 'list-style-image: url("chrome://demo/skin/perfect.png")');
icon.setAttribute('tooltiptext', 'Perfect - no known email providers and encryption between all hops');
return icon;
},
paranoiaSetGoodIcon: function() {
var icon = tbParanoia.paranoiaGetHdrIconDOM();
icon.setAttribute('style', 'list-style-image: url("chrome://demo/skin/good.png")');
icon.setAttribute('tooltiptext', 'Good - Email passed known providers or was unencrypted only on a local connection');
return icon;
},
paranoiaSetBadIcon: function() {
var icon = tbParanoia.paranoiaGetHdrIconDOM();
icon.setAttribute('style', 'list-style-image: url("chrome://demo/skin/bad.png")');
icon.setAttribute('tooltiptext', '1 non-local connection on the way was unencrypted');
return icon;
},
paranoiaSetTragicIcon: function() {
var icon = tbParanoia.paranoiaGetHdrIconDOM();
icon.setAttribute('style', 'list-style-image: url("chrome://demo/skin/tragic.png")');
icon.setAttribute('tooltiptext', 'More than 1 connection on the way was unencrypted');
return icon;
},
paranoiaAddProviderIcon: function(providerName, parentBox) {
var previousBox = tbParanoia.paranoiaGetHdrIconDOM();
var elem = document.createElement('image');
elem.setAttribute('class', 'paranoiaProvider');
elem.setAttribute('style', 'list-style-image: url("chrome://demo/skin/providers/' + providerName + '.png")');
elem.setAttribute('tooltiptext', tbParanoia.paranoiaGetProviderDisplayName(providerName));
parentBox.appendChild(elem);
},
paranoiaAddProviderIcons: function(providers)
{
var oldIcons = document.getElementsByClassName('paranoiaProviderVbox');
var i, len = oldIcons.length;
var vbox;
for(i = 0; i < len; i++) {
var elem = oldIcons[i];
elem.parentNode.removeChild(elem);
}
for(var i = 0; i < providers.length; i++) {
var item = providers[i];
if(i % 2 == 0) {
if(vbox) document.getElementById('dateValueBox').insertBefore(vbox, tbParanoia.paranoiaGetHdrIconDOM());
var vbox = document.createElement('vbox');
vbox.setAttribute('class', 'paranoiaProviderVbox');
}
tbParanoia.paranoiaAddProviderIcon(item, vbox);
};
if(vbox) document.getElementById('dateValueBox').insertBefore(vbox, tbParanoia.paranoiaGetHdrIconDOM());
},
/* Return true if host is on a local network */
paranoiaIsHostLocal: function(hostname) {
if(hostname == 'localhost') return true;
if(hostname == '[127.0.0.1]') return true;
if(hostname == 'Internal') return true;
if(hostname == 'www-data') return true;
if(/^\.internal$/g.test(hostname)) return true;
if(/(^\[10\.)|(^\[172\.1[6-9]\.)|(^\[172\.2[0-9]\.)|(^\[172\.3[0-1]\.)|(^\[192\.168\.)/g.test(hostname)) return true;
return false;
},
/* mx123.mail.corpo.com -> corpo.com */
paranoiaGetDomainName: function(hostname) {
if(hostname.indexOf('.') < 0) {
return hostname;
}
try {
return hostname.match(/[a-z0-9][a-z0-9\-]+\.[a-z]+$/)[0];
} catch(e) {
return hostname;
}
},
/* Checks if given nsMsgFolder is a RSS/Atom feed folder */
paranoiaIsFeedFolder: function(folder) {
return /^mailbox:\/\/[^@\/]+@Feeds/.exec(folder.URI); | /* Add a listener for changed message */
gMessageListeners.push({
onStartHeaders: function() {
var msg = gMessageDisplay.displayedMessage;
if(!msg) return;
var folder = msg.folder;
if(tbParanoia.paranoiaIsFeedFolder(folder)) return;
var offset = new Object();
var messageSize = new Object();
// https://github.com/clear-code/changequote/blob/0f5a09d3887d97446553d6225cc9f71dc2a75039/content/changequote/changequote.jsh
// http://thunderbirddocs.blogspot.com/2005/02/thunderbird-extensions-how-to-get-body.html
try {
var stream = folder.getOfflineFileStream(msg.messageKey, offset, messageSize);
var scriptableStream=Components.classes["@mozilla.org/scriptableinputstream;1"].getService(Components.interfaces.nsIScriptableInputStream);
scriptableStream.init(stream);
var fullBody = scriptableStream.read(msg.messageSize);
var headersStr = fullBody.substring(0, fullBody.indexOf("\r\n\r\n"));
if (headersStr.length == 0) {
headersStr = fullBody.substring(0, fullBody.indexOf("\n\n"));
}
scriptableStream.close();
stream.close();
/* We've got the headers string, let's parse it */
var headers = tbParanoia.paranoiaParseHeaderString(headersStr);
var receivedHeaders = tbParanoia.paranoiaGetReceivedHeaders(headers);
var providers = tbParanoia.paranoiaGetKnownProviders(receivedHeaders);
try {
var security = tbParanoia.paranoiaAreReceivedHeadersInsecure(receivedHeaders);
if(!security.insecure && !security.unencryptedLocal && providers.length == 0) {
tbParanoia.paranoiaSetPerfectIcon();
}
else if(!security.insecure) {
var icon = tbParanoia.paranoiaSetGoodIcon();
if(providers.length > 0 && security.unencryptedLocal > 0) {
icon.setAttribute('tooltiptext', 'Good: Passed known email providers and the only unencrypted connections were local');
}
else {
if(providers.length > 0) {
icon.setAttribute('tooltiptext', 'Good: Passed known email providers');
}
if(security.unencryptedLocal > 0) {
icon.setAttribute('tooltiptext', 'Good: The only unencrypted connections were local');
}
}
}
else if(security.insecure == 1) {
tbParanoia.paranoiaSetBadIcon();
}
else {
tbParanoia.paranoiaSetTragicIcon();
}
tbParanoia.paranoiaRemoveReceivedPopup();
var popup = tbParanoia.paranoiaCreateReceivedPopup(receivedHeaders);
document.getElementById('dateValueBox').appendChild(popup);
// receivedHeaders.forEach(function(hdr) {Application.console.log(hdr);});
tbParanoia.paranoiaAddProviderIcons(providers);
}
catch(e) {
/* Message title bar modified - Compact Headers or other extension */
if(e.name.toString() == "NotFoundError") {
Application.console.log('XUL element not found: ' + e.message);
} else {
throw e;
}
}
}
catch(e) {
Application.console.log("PROBLEM with Paranoia: " + e.message);
}
},
onEndHeaders: function() {
},
onEndAttachments: function () {
},
onBeforeShowHeaderPane: function () {
}
});
} // init()
} // tbParanoia
}; // if
window.addEventListener("load", tbParanoia.init(), false); | },
init: function() {
// http://stackoverflow.com/questions/5089405/thunderbird-extension-add-field-to-messagepane-how-to-deal-with-windows-instan | random_line_split |
cassandraMetadataPersistence.go | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package cassandra
import (
"fmt"
"github.com/gocql/gocql"
workflow "github.com/uber/cadence/.gen/go/shared"
"github.com/uber/cadence/common"
"github.com/uber/cadence/common/log"
"github.com/uber/cadence/common/log/tag"
p "github.com/uber/cadence/common/persistence"
"github.com/uber/cadence/common/service/config"
)
const (
templateDomainInfoType = `{` +
`id: ?, ` +
`name: ?, ` +
`status: ?, ` +
`description: ?, ` +
`owner_email: ?, ` +
`data: ? ` +
`}`
templateDomainConfigType = `{` +
`retention: ?, ` +
`emit_metric: ?, ` +
`archival_bucket: ?, ` +
`archival_status: ?,` +
`history_archival_status: ?, ` +
`history_archival_uri: ?, ` +
`visibility_archival_status: ?, ` +
`visibility_archival_uri: ?, ` +
`bad_binaries: ?,` +
`bad_binaries_encoding: ?` +
`}`
templateDomainReplicationConfigType = `{` +
`active_cluster_name: ?, ` +
`clusters: ? ` +
`}`
templateCreateDomainQuery = `INSERT INTO domains (` +
`id, domain) ` +
`VALUES(?, {name: ?}) IF NOT EXISTS`
templateCreateDomainByNameQuery = `INSERT INTO domains_by_name (` +
`name, domain, config, replication_config, is_global_domain, config_version, failover_version) ` +
`VALUES(?, ` + templateDomainInfoType + `, ` + templateDomainConfigType + `, ` + templateDomainReplicationConfigType + `, ?, ?, ?) IF NOT EXISTS`
templateGetDomainQuery = `SELECT domain.name ` +
`FROM domains ` +
`WHERE id = ?` | `config.archival_bucket, config.archival_status, ` +
`config.history_archival_status, config.history_archival_uri, ` +
`config.visibility_archival_status, config.visibility_archival_uri, ` +
`config.bad_binaries, config.bad_binaries_encoding, ` +
`replication_config.active_cluster_name, replication_config.clusters, ` +
`is_global_domain, ` +
`config_version, ` +
`failover_version, ` +
`db_version ` +
`FROM domains_by_name ` +
`WHERE name = ?`
templateUpdateDomainByNameQuery = `UPDATE domains_by_name ` +
`SET domain = ` + templateDomainInfoType + `, ` +
`config = ` + templateDomainConfigType + `, ` +
`replication_config = ` + templateDomainReplicationConfigType + `, ` +
`config_version = ? ,` +
`failover_version = ? ,` +
`db_version = ? ` +
`WHERE name = ? ` +
`IF db_version = ? `
templateDeleteDomainQuery = `DELETE FROM domains ` +
`WHERE id = ?`
templateDeleteDomainByNameQuery = `DELETE FROM domains_by_name ` +
`WHERE name = ?`
)
type (
cassandraMetadataPersistence struct {
cassandraStore
currentClusterName string
}
)
// newMetadataPersistence is used to create an instance of HistoryManager implementation
func newMetadataPersistence(cfg config.Cassandra, clusterName string, logger log.Logger) (p.MetadataStore,
error) {
cluster := NewCassandraCluster(cfg.Hosts, cfg.Port, cfg.User, cfg.Password, cfg.Datacenter)
cluster.Keyspace = cfg.Keyspace
cluster.ProtoVersion = cassandraProtoVersion
cluster.Consistency = gocql.LocalQuorum
cluster.SerialConsistency = gocql.LocalSerial
cluster.Timeout = defaultSessionTimeout
session, err := cluster.CreateSession()
if err != nil {
return nil, err
}
return &cassandraMetadataPersistence{
cassandraStore: cassandraStore{session: session, logger: logger},
currentClusterName: clusterName,
}, nil
}
// Close releases the resources held by this object
func (m *cassandraMetadataPersistence) Close() {
if m.session != nil {
m.session.Close()
}
}
// Cassandra does not support conditional updates across multiple tables. For this reason we have to first insert into
// 'Domains' table and then do a conditional insert into domains_by_name table. If the conditional write fails we
// delete the orphaned entry from domains table. There is a chance delete entry could fail and we never delete the
// orphaned entry from domains table. We might need a background job to delete those orphaned record.
func (m *cassandraMetadataPersistence) CreateDomain(request *p.InternalCreateDomainRequest) (*p.CreateDomainResponse, error) {
query := m.session.Query(templateCreateDomainQuery, request.Info.ID, request.Info.Name)
applied, err := query.MapScanCAS(make(map[string]interface{}))
if err != nil {
return nil, &workflow.InternalServiceError{
Message: fmt.Sprintf("CreateDomain operation failed. Inserting into domains table. Error: %v", err),
}
}
if !applied {
return nil, &workflow.InternalServiceError{
Message: fmt.Sprintf("CreateDomain operation failed because of uuid collision."),
}
}
query = m.session.Query(templateCreateDomainByNameQuery,
request.Info.Name,
request.Info.ID,
request.Info.Name,
request.Info.Status,
request.Info.Description,
request.Info.OwnerEmail,
request.Info.Data,
request.Config.Retention,
request.Config.EmitMetric,
request.Config.ArchivalBucket,
request.Config.ArchivalStatus,
request.Config.HistoryArchivalStatus,
request.Config.HistoryArchivalURI,
request.Config.VisibilityArchivalStatus,
request.Config.VisibilityArchivalURI,
request.Config.BadBinaries.Data,
string(request.Config.BadBinaries.GetEncoding()),
request.ReplicationConfig.ActiveClusterName,
p.SerializeClusterConfigs(request.ReplicationConfig.Clusters),
request.IsGlobalDomain,
request.ConfigVersion,
request.FailoverVersion,
)
previous := make(map[string]interface{})
applied, err = query.MapScanCAS(previous)
if err != nil {
return nil, &workflow.InternalServiceError{
Message: fmt.Sprintf("CreateDomain operation failed. Inserting into domains_by_name table. Error: %v", err),
}
}
if !applied {
// Domain already exist. Delete orphan domain record before returning back to user
if errDelete := m.session.Query(templateDeleteDomainQuery, request.Info.ID).Exec(); errDelete != nil {
m.logger.Warn("Unable to delete orphan domain record", tag.Error(errDelete))
}
if domain, ok := previous["domain"].(map[string]interface{}); ok {
msg := fmt.Sprintf("Domain already exists. DomainId: %v", domain["id"])
return nil, &workflow.DomainAlreadyExistsError{
Message: msg,
}
}
return nil, &workflow.DomainAlreadyExistsError{
Message: fmt.Sprintf("CreateDomain operation failed because of conditional failure."),
}
}
return &p.CreateDomainResponse{ID: request.Info.ID}, nil
}
func (m *cassandraMetadataPersistence) GetDomain(request *p.GetDomainRequest) (*p.InternalGetDomainResponse, error) {
var query *gocql.Query
var err error
info := &p.DomainInfo{}
config := &p.InternalDomainConfig{}
replicationConfig := &p.DomainReplicationConfig{}
var replicationClusters []map[string]interface{}
var dbVersion int64
var failoverVersion int64
var configVersion int64
var isGlobalDomain bool
if len(request.ID) > 0 && len(request.Name) > 0 {
return nil, &workflow.BadRequestError{
Message: "GetDomain operation failed. Both ID and Name specified in request.",
}
} else if len(request.ID) == 0 && len(request.Name) == 0 {
return nil, &workflow.BadRequestError{
Message: "GetDomain operation failed. Both ID and Name are empty.",
}
}
handleError := func(name, ID string, err error) error {
identity := name
if len(ID) > 0 {
identity = ID
}
if err == gocql.ErrNotFound {
return &workflow.EntityNotExistsError{
Message: fmt.Sprintf("Domain %s does not exist.", identity),
}
}
return &workflow.InternalServiceError{
Message: fmt.Sprintf("GetDomain operation failed. Error %v", err),
}
}
domainName := request.Name
if len(request.ID) > 0 {
query = m.session.Query(templateGetDomainQuery, request.ID)
err = query.Scan(&domainName)
if err != nil {
return nil, handleError(request.Name, request.ID, err)
}
}
var badBinariesData []byte
var badBinariesDataEncoding string
query = m.session.Query(templateGetDomainByNameQuery, domainName)
err = query.Scan(
&info.ID,
&info.Name,
&info.Status,
&info.Description,
&info.OwnerEmail,
&info.Data,
&config.Retention,
&config.EmitMetric,
&config.ArchivalBucket,
&config.ArchivalStatus,
&config.HistoryArchivalStatus,
&config.HistoryArchivalURI,
&config.VisibilityArchivalStatus,
&config.VisibilityArchivalURI,
&badBinariesData,
&badBinariesDataEncoding,
&replicationConfig.ActiveClusterName,
&replicationClusters,
&isGlobalDomain,
&configVersion,
&failoverVersion,
&dbVersion,
)
if err != nil {
return nil, handleError(request.Name, request.ID, err)
}
if info.Data == nil {
info.Data = map[string]string{}
}
config.BadBinaries = p.NewDataBlob(badBinariesData, common.EncodingType(badBinariesDataEncoding))
replicationConfig.ActiveClusterName = p.GetOrUseDefaultActiveCluster(m.currentClusterName, replicationConfig.ActiveClusterName)
replicationConfig.Clusters = p.DeserializeClusterConfigs(replicationClusters)
replicationConfig.Clusters = p.GetOrUseDefaultClusters(m.currentClusterName, replicationConfig.Clusters)
return &p.InternalGetDomainResponse{
Info: info,
Config: config,
ReplicationConfig: replicationConfig,
IsGlobalDomain: isGlobalDomain,
ConfigVersion: configVersion,
FailoverVersion: failoverVersion,
NotificationVersion: dbVersion,
TableVersion: p.DomainTableVersionV1,
}, nil
}
func (m *cassandraMetadataPersistence) UpdateDomain(request *p.InternalUpdateDomainRequest) error {
var nextVersion int64 = 1
var currentVersion *int64
if request.NotificationVersion > 0 {
nextVersion = request.NotificationVersion + 1
currentVersion = &request.NotificationVersion
}
query := m.session.Query(templateUpdateDomainByNameQuery,
request.Info.ID,
request.Info.Name,
request.Info.Status,
request.Info.Description,
request.Info.OwnerEmail,
request.Info.Data,
request.Config.Retention,
request.Config.EmitMetric,
request.Config.ArchivalBucket,
request.Config.ArchivalStatus,
request.Config.HistoryArchivalStatus,
request.Config.HistoryArchivalURI,
request.Config.VisibilityArchivalStatus,
request.Config.VisibilityArchivalURI,
request.Config.BadBinaries.Data,
string(request.Config.BadBinaries.GetEncoding()),
request.ReplicationConfig.ActiveClusterName,
p.SerializeClusterConfigs(request.ReplicationConfig.Clusters),
request.ConfigVersion,
request.FailoverVersion,
nextVersion,
request.Info.Name,
currentVersion,
)
applied, err := query.ScanCAS()
if !applied {
return &workflow.InternalServiceError{
Message: fmt.Sprintf("UpdateDomain operation encounter concurrent write."),
}
}
if err != nil {
return &workflow.InternalServiceError{
Message: fmt.Sprintf("UpdateDomain operation failed. Error %v", err),
}
}
return nil
}
func (m *cassandraMetadataPersistence) DeleteDomain(request *p.DeleteDomainRequest) error {
var name string
query := m.session.Query(templateGetDomainQuery, request.ID)
err := query.Scan(&name)
if err != nil {
if err == gocql.ErrNotFound {
return nil
}
return err
}
return m.deleteDomain(name, request.ID)
}
func (m *cassandraMetadataPersistence) DeleteDomainByName(request *p.DeleteDomainByNameRequest) error {
var ID string
query := m.session.Query(templateGetDomainByNameQuery, request.Name)
err := query.Scan(&ID, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil)
if err != nil {
if err == gocql.ErrNotFound {
return nil
}
return err
}
return m.deleteDomain(request.Name, ID)
}
func (m *cassandraMetadataPersistence) ListDomains(request *p.ListDomainsRequest) (*p.InternalListDomainsResponse, error) {
panic("cassandraMetadataPersistence do not support list domain operation.")
}
func (m *cassandraMetadataPersistence) GetMetadata() (*p.GetMetadataResponse, error) {
panic("cassandraMetadataPersistence do not support get metadata operation.")
}
func (m *cassandraMetadataPersistence) deleteDomain(name, ID string) error {
query := m.session.Query(templateDeleteDomainByNameQuery, name)
if err := query.Exec(); err != nil {
return &workflow.InternalServiceError{
Message: fmt.Sprintf("DeleteDomainByName operation failed. Error %v", err),
}
}
query = m.session.Query(templateDeleteDomainQuery, ID)
if err := query.Exec(); err != nil {
return &workflow.InternalServiceError{
Message: fmt.Sprintf("DeleteDomain operation failed. Error %v", err),
}
}
return nil
} |
templateGetDomainByNameQuery = `SELECT domain.id, domain.name, domain.status, domain.description, ` +
`domain.owner_email, domain.data, config.retention, config.emit_metric, ` + | random_line_split |
cassandraMetadataPersistence.go | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package cassandra
import (
"fmt"
"github.com/gocql/gocql"
workflow "github.com/uber/cadence/.gen/go/shared"
"github.com/uber/cadence/common"
"github.com/uber/cadence/common/log"
"github.com/uber/cadence/common/log/tag"
p "github.com/uber/cadence/common/persistence"
"github.com/uber/cadence/common/service/config"
)
const (
templateDomainInfoType = `{` +
`id: ?, ` +
`name: ?, ` +
`status: ?, ` +
`description: ?, ` +
`owner_email: ?, ` +
`data: ? ` +
`}`
templateDomainConfigType = `{` +
`retention: ?, ` +
`emit_metric: ?, ` +
`archival_bucket: ?, ` +
`archival_status: ?,` +
`history_archival_status: ?, ` +
`history_archival_uri: ?, ` +
`visibility_archival_status: ?, ` +
`visibility_archival_uri: ?, ` +
`bad_binaries: ?,` +
`bad_binaries_encoding: ?` +
`}`
templateDomainReplicationConfigType = `{` +
`active_cluster_name: ?, ` +
`clusters: ? ` +
`}`
templateCreateDomainQuery = `INSERT INTO domains (` +
`id, domain) ` +
`VALUES(?, {name: ?}) IF NOT EXISTS`
templateCreateDomainByNameQuery = `INSERT INTO domains_by_name (` +
`name, domain, config, replication_config, is_global_domain, config_version, failover_version) ` +
`VALUES(?, ` + templateDomainInfoType + `, ` + templateDomainConfigType + `, ` + templateDomainReplicationConfigType + `, ?, ?, ?) IF NOT EXISTS`
templateGetDomainQuery = `SELECT domain.name ` +
`FROM domains ` +
`WHERE id = ?`
templateGetDomainByNameQuery = `SELECT domain.id, domain.name, domain.status, domain.description, ` +
`domain.owner_email, domain.data, config.retention, config.emit_metric, ` +
`config.archival_bucket, config.archival_status, ` +
`config.history_archival_status, config.history_archival_uri, ` +
`config.visibility_archival_status, config.visibility_archival_uri, ` +
`config.bad_binaries, config.bad_binaries_encoding, ` +
`replication_config.active_cluster_name, replication_config.clusters, ` +
`is_global_domain, ` +
`config_version, ` +
`failover_version, ` +
`db_version ` +
`FROM domains_by_name ` +
`WHERE name = ?`
templateUpdateDomainByNameQuery = `UPDATE domains_by_name ` +
`SET domain = ` + templateDomainInfoType + `, ` +
`config = ` + templateDomainConfigType + `, ` +
`replication_config = ` + templateDomainReplicationConfigType + `, ` +
`config_version = ? ,` +
`failover_version = ? ,` +
`db_version = ? ` +
`WHERE name = ? ` +
`IF db_version = ? `
templateDeleteDomainQuery = `DELETE FROM domains ` +
`WHERE id = ?`
templateDeleteDomainByNameQuery = `DELETE FROM domains_by_name ` +
`WHERE name = ?`
)
type (
cassandraMetadataPersistence struct {
cassandraStore
currentClusterName string
}
)
// newMetadataPersistence is used to create an instance of HistoryManager implementation
func newMetadataPersistence(cfg config.Cassandra, clusterName string, logger log.Logger) (p.MetadataStore,
error) {
cluster := NewCassandraCluster(cfg.Hosts, cfg.Port, cfg.User, cfg.Password, cfg.Datacenter)
cluster.Keyspace = cfg.Keyspace
cluster.ProtoVersion = cassandraProtoVersion
cluster.Consistency = gocql.LocalQuorum
cluster.SerialConsistency = gocql.LocalSerial
cluster.Timeout = defaultSessionTimeout
session, err := cluster.CreateSession()
if err != nil {
return nil, err
}
return &cassandraMetadataPersistence{
cassandraStore: cassandraStore{session: session, logger: logger},
currentClusterName: clusterName,
}, nil
}
// Close releases the resources held by this object
func (m *cassandraMetadataPersistence) Close() {
if m.session != nil {
m.session.Close()
}
}
// Cassandra does not support conditional updates across multiple tables. For this reason we have to first insert into
// 'Domains' table and then do a conditional insert into domains_by_name table. If the conditional write fails we
// delete the orphaned entry from domains table. There is a chance delete entry could fail and we never delete the
// orphaned entry from domains table. We might need a background job to delete those orphaned record.
func (m *cassandraMetadataPersistence) CreateDomain(request *p.InternalCreateDomainRequest) (*p.CreateDomainResponse, error) {
query := m.session.Query(templateCreateDomainQuery, request.Info.ID, request.Info.Name)
applied, err := query.MapScanCAS(make(map[string]interface{}))
if err != nil {
return nil, &workflow.InternalServiceError{
Message: fmt.Sprintf("CreateDomain operation failed. Inserting into domains table. Error: %v", err),
}
}
if !applied {
return nil, &workflow.InternalServiceError{
Message: fmt.Sprintf("CreateDomain operation failed because of uuid collision."),
}
}
query = m.session.Query(templateCreateDomainByNameQuery,
request.Info.Name,
request.Info.ID,
request.Info.Name,
request.Info.Status,
request.Info.Description,
request.Info.OwnerEmail,
request.Info.Data,
request.Config.Retention,
request.Config.EmitMetric,
request.Config.ArchivalBucket,
request.Config.ArchivalStatus,
request.Config.HistoryArchivalStatus,
request.Config.HistoryArchivalURI,
request.Config.VisibilityArchivalStatus,
request.Config.VisibilityArchivalURI,
request.Config.BadBinaries.Data,
string(request.Config.BadBinaries.GetEncoding()),
request.ReplicationConfig.ActiveClusterName,
p.SerializeClusterConfigs(request.ReplicationConfig.Clusters),
request.IsGlobalDomain,
request.ConfigVersion,
request.FailoverVersion,
)
previous := make(map[string]interface{})
applied, err = query.MapScanCAS(previous)
if err != nil {
return nil, &workflow.InternalServiceError{
Message: fmt.Sprintf("CreateDomain operation failed. Inserting into domains_by_name table. Error: %v", err),
}
}
if !applied {
// Domain already exist. Delete orphan domain record before returning back to user
if errDelete := m.session.Query(templateDeleteDomainQuery, request.Info.ID).Exec(); errDelete != nil {
m.logger.Warn("Unable to delete orphan domain record", tag.Error(errDelete))
}
if domain, ok := previous["domain"].(map[string]interface{}); ok {
msg := fmt.Sprintf("Domain already exists. DomainId: %v", domain["id"])
return nil, &workflow.DomainAlreadyExistsError{
Message: msg,
}
}
return nil, &workflow.DomainAlreadyExistsError{
Message: fmt.Sprintf("CreateDomain operation failed because of conditional failure."),
}
}
return &p.CreateDomainResponse{ID: request.Info.ID}, nil
}
func (m *cassandraMetadataPersistence) GetDomain(request *p.GetDomainRequest) (*p.InternalGetDomainResponse, error) {
var query *gocql.Query
var err error
info := &p.DomainInfo{}
config := &p.InternalDomainConfig{}
replicationConfig := &p.DomainReplicationConfig{}
var replicationClusters []map[string]interface{}
var dbVersion int64
var failoverVersion int64
var configVersion int64
var isGlobalDomain bool
if len(request.ID) > 0 && len(request.Name) > 0 {
return nil, &workflow.BadRequestError{
Message: "GetDomain operation failed. Both ID and Name specified in request.",
}
} else if len(request.ID) == 0 && len(request.Name) == 0 {
return nil, &workflow.BadRequestError{
Message: "GetDomain operation failed. Both ID and Name are empty.",
}
}
handleError := func(name, ID string, err error) error {
identity := name
if len(ID) > 0 {
identity = ID
}
if err == gocql.ErrNotFound {
return &workflow.EntityNotExistsError{
Message: fmt.Sprintf("Domain %s does not exist.", identity),
}
}
return &workflow.InternalServiceError{
Message: fmt.Sprintf("GetDomain operation failed. Error %v", err),
}
}
domainName := request.Name
if len(request.ID) > 0 {
query = m.session.Query(templateGetDomainQuery, request.ID)
err = query.Scan(&domainName)
if err != nil {
return nil, handleError(request.Name, request.ID, err)
}
}
var badBinariesData []byte
var badBinariesDataEncoding string
query = m.session.Query(templateGetDomainByNameQuery, domainName)
err = query.Scan(
&info.ID,
&info.Name,
&info.Status,
&info.Description,
&info.OwnerEmail,
&info.Data,
&config.Retention,
&config.EmitMetric,
&config.ArchivalBucket,
&config.ArchivalStatus,
&config.HistoryArchivalStatus,
&config.HistoryArchivalURI,
&config.VisibilityArchivalStatus,
&config.VisibilityArchivalURI,
&badBinariesData,
&badBinariesDataEncoding,
&replicationConfig.ActiveClusterName,
&replicationClusters,
&isGlobalDomain,
&configVersion,
&failoverVersion,
&dbVersion,
)
if err != nil {
return nil, handleError(request.Name, request.ID, err)
}
if info.Data == nil {
info.Data = map[string]string{}
}
config.BadBinaries = p.NewDataBlob(badBinariesData, common.EncodingType(badBinariesDataEncoding))
replicationConfig.ActiveClusterName = p.GetOrUseDefaultActiveCluster(m.currentClusterName, replicationConfig.ActiveClusterName)
replicationConfig.Clusters = p.DeserializeClusterConfigs(replicationClusters)
replicationConfig.Clusters = p.GetOrUseDefaultClusters(m.currentClusterName, replicationConfig.Clusters)
return &p.InternalGetDomainResponse{
Info: info,
Config: config,
ReplicationConfig: replicationConfig,
IsGlobalDomain: isGlobalDomain,
ConfigVersion: configVersion,
FailoverVersion: failoverVersion,
NotificationVersion: dbVersion,
TableVersion: p.DomainTableVersionV1,
}, nil
}
func (m *cassandraMetadataPersistence) UpdateDomain(request *p.InternalUpdateDomainRequest) error {
var nextVersion int64 = 1
var currentVersion *int64
if request.NotificationVersion > 0 {
nextVersion = request.NotificationVersion + 1
currentVersion = &request.NotificationVersion
}
query := m.session.Query(templateUpdateDomainByNameQuery,
request.Info.ID,
request.Info.Name,
request.Info.Status,
request.Info.Description,
request.Info.OwnerEmail,
request.Info.Data,
request.Config.Retention,
request.Config.EmitMetric,
request.Config.ArchivalBucket,
request.Config.ArchivalStatus,
request.Config.HistoryArchivalStatus,
request.Config.HistoryArchivalURI,
request.Config.VisibilityArchivalStatus,
request.Config.VisibilityArchivalURI,
request.Config.BadBinaries.Data,
string(request.Config.BadBinaries.GetEncoding()),
request.ReplicationConfig.ActiveClusterName,
p.SerializeClusterConfigs(request.ReplicationConfig.Clusters),
request.ConfigVersion,
request.FailoverVersion,
nextVersion,
request.Info.Name,
currentVersion,
)
applied, err := query.ScanCAS()
if !applied {
return &workflow.InternalServiceError{
Message: fmt.Sprintf("UpdateDomain operation encounter concurrent write."),
}
}
if err != nil {
return &workflow.InternalServiceError{
Message: fmt.Sprintf("UpdateDomain operation failed. Error %v", err),
}
}
return nil
}
func (m *cassandraMetadataPersistence) DeleteDomain(request *p.DeleteDomainRequest) error {
var name string
query := m.session.Query(templateGetDomainQuery, request.ID)
err := query.Scan(&name)
if err != nil {
if err == gocql.ErrNotFound {
return nil
}
return err
}
return m.deleteDomain(name, request.ID)
}
func (m *cassandraMetadataPersistence) DeleteDomainByName(request *p.DeleteDomainByNameRequest) error {
var ID string
query := m.session.Query(templateGetDomainByNameQuery, request.Name)
err := query.Scan(&ID, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil)
if err != nil {
if err == gocql.ErrNotFound {
return nil
}
return err
}
return m.deleteDomain(request.Name, ID)
}
func (m *cassandraMetadataPersistence) | (request *p.ListDomainsRequest) (*p.InternalListDomainsResponse, error) {
panic("cassandraMetadataPersistence do not support list domain operation.")
}
func (m *cassandraMetadataPersistence) GetMetadata() (*p.GetMetadataResponse, error) {
panic("cassandraMetadataPersistence do not support get metadata operation.")
}
func (m *cassandraMetadataPersistence) deleteDomain(name, ID string) error {
query := m.session.Query(templateDeleteDomainByNameQuery, name)
if err := query.Exec(); err != nil {
return &workflow.InternalServiceError{
Message: fmt.Sprintf("DeleteDomainByName operation failed. Error %v", err),
}
}
query = m.session.Query(templateDeleteDomainQuery, ID)
if err := query.Exec(); err != nil {
return &workflow.InternalServiceError{
Message: fmt.Sprintf("DeleteDomain operation failed. Error %v", err),
}
}
return nil
}
| ListDomains | identifier_name |
cassandraMetadataPersistence.go | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package cassandra
import (
"fmt"
"github.com/gocql/gocql"
workflow "github.com/uber/cadence/.gen/go/shared"
"github.com/uber/cadence/common"
"github.com/uber/cadence/common/log"
"github.com/uber/cadence/common/log/tag"
p "github.com/uber/cadence/common/persistence"
"github.com/uber/cadence/common/service/config"
)
const (
templateDomainInfoType = `{` +
`id: ?, ` +
`name: ?, ` +
`status: ?, ` +
`description: ?, ` +
`owner_email: ?, ` +
`data: ? ` +
`}`
templateDomainConfigType = `{` +
`retention: ?, ` +
`emit_metric: ?, ` +
`archival_bucket: ?, ` +
`archival_status: ?,` +
`history_archival_status: ?, ` +
`history_archival_uri: ?, ` +
`visibility_archival_status: ?, ` +
`visibility_archival_uri: ?, ` +
`bad_binaries: ?,` +
`bad_binaries_encoding: ?` +
`}`
templateDomainReplicationConfigType = `{` +
`active_cluster_name: ?, ` +
`clusters: ? ` +
`}`
templateCreateDomainQuery = `INSERT INTO domains (` +
`id, domain) ` +
`VALUES(?, {name: ?}) IF NOT EXISTS`
templateCreateDomainByNameQuery = `INSERT INTO domains_by_name (` +
`name, domain, config, replication_config, is_global_domain, config_version, failover_version) ` +
`VALUES(?, ` + templateDomainInfoType + `, ` + templateDomainConfigType + `, ` + templateDomainReplicationConfigType + `, ?, ?, ?) IF NOT EXISTS`
templateGetDomainQuery = `SELECT domain.name ` +
`FROM domains ` +
`WHERE id = ?`
templateGetDomainByNameQuery = `SELECT domain.id, domain.name, domain.status, domain.description, ` +
`domain.owner_email, domain.data, config.retention, config.emit_metric, ` +
`config.archival_bucket, config.archival_status, ` +
`config.history_archival_status, config.history_archival_uri, ` +
`config.visibility_archival_status, config.visibility_archival_uri, ` +
`config.bad_binaries, config.bad_binaries_encoding, ` +
`replication_config.active_cluster_name, replication_config.clusters, ` +
`is_global_domain, ` +
`config_version, ` +
`failover_version, ` +
`db_version ` +
`FROM domains_by_name ` +
`WHERE name = ?`
templateUpdateDomainByNameQuery = `UPDATE domains_by_name ` +
`SET domain = ` + templateDomainInfoType + `, ` +
`config = ` + templateDomainConfigType + `, ` +
`replication_config = ` + templateDomainReplicationConfigType + `, ` +
`config_version = ? ,` +
`failover_version = ? ,` +
`db_version = ? ` +
`WHERE name = ? ` +
`IF db_version = ? `
templateDeleteDomainQuery = `DELETE FROM domains ` +
`WHERE id = ?`
templateDeleteDomainByNameQuery = `DELETE FROM domains_by_name ` +
`WHERE name = ?`
)
type (
cassandraMetadataPersistence struct {
cassandraStore
currentClusterName string
}
)
// newMetadataPersistence is used to create an instance of HistoryManager implementation
func newMetadataPersistence(cfg config.Cassandra, clusterName string, logger log.Logger) (p.MetadataStore,
error) {
cluster := NewCassandraCluster(cfg.Hosts, cfg.Port, cfg.User, cfg.Password, cfg.Datacenter)
cluster.Keyspace = cfg.Keyspace
cluster.ProtoVersion = cassandraProtoVersion
cluster.Consistency = gocql.LocalQuorum
cluster.SerialConsistency = gocql.LocalSerial
cluster.Timeout = defaultSessionTimeout
session, err := cluster.CreateSession()
if err != nil {
return nil, err
}
return &cassandraMetadataPersistence{
cassandraStore: cassandraStore{session: session, logger: logger},
currentClusterName: clusterName,
}, nil
}
// Close releases the resources held by this object
func (m *cassandraMetadataPersistence) Close() {
if m.session != nil {
m.session.Close()
}
}
// Cassandra does not support conditional updates across multiple tables. For this reason we have to first insert into
// 'Domains' table and then do a conditional insert into domains_by_name table. If the conditional write fails we
// delete the orphaned entry from domains table. There is a chance delete entry could fail and we never delete the
// orphaned entry from domains table. We might need a background job to delete those orphaned record.
func (m *cassandraMetadataPersistence) CreateDomain(request *p.InternalCreateDomainRequest) (*p.CreateDomainResponse, error) {
query := m.session.Query(templateCreateDomainQuery, request.Info.ID, request.Info.Name)
applied, err := query.MapScanCAS(make(map[string]interface{}))
if err != nil {
return nil, &workflow.InternalServiceError{
Message: fmt.Sprintf("CreateDomain operation failed. Inserting into domains table. Error: %v", err),
}
}
if !applied {
return nil, &workflow.InternalServiceError{
Message: fmt.Sprintf("CreateDomain operation failed because of uuid collision."),
}
}
query = m.session.Query(templateCreateDomainByNameQuery,
request.Info.Name,
request.Info.ID,
request.Info.Name,
request.Info.Status,
request.Info.Description,
request.Info.OwnerEmail,
request.Info.Data,
request.Config.Retention,
request.Config.EmitMetric,
request.Config.ArchivalBucket,
request.Config.ArchivalStatus,
request.Config.HistoryArchivalStatus,
request.Config.HistoryArchivalURI,
request.Config.VisibilityArchivalStatus,
request.Config.VisibilityArchivalURI,
request.Config.BadBinaries.Data,
string(request.Config.BadBinaries.GetEncoding()),
request.ReplicationConfig.ActiveClusterName,
p.SerializeClusterConfigs(request.ReplicationConfig.Clusters),
request.IsGlobalDomain,
request.ConfigVersion,
request.FailoverVersion,
)
previous := make(map[string]interface{})
applied, err = query.MapScanCAS(previous)
if err != nil {
return nil, &workflow.InternalServiceError{
Message: fmt.Sprintf("CreateDomain operation failed. Inserting into domains_by_name table. Error: %v", err),
}
}
if !applied {
// Domain already exist. Delete orphan domain record before returning back to user
if errDelete := m.session.Query(templateDeleteDomainQuery, request.Info.ID).Exec(); errDelete != nil {
m.logger.Warn("Unable to delete orphan domain record", tag.Error(errDelete))
}
if domain, ok := previous["domain"].(map[string]interface{}); ok {
msg := fmt.Sprintf("Domain already exists. DomainId: %v", domain["id"])
return nil, &workflow.DomainAlreadyExistsError{
Message: msg,
}
}
return nil, &workflow.DomainAlreadyExistsError{
Message: fmt.Sprintf("CreateDomain operation failed because of conditional failure."),
}
}
return &p.CreateDomainResponse{ID: request.Info.ID}, nil
}
func (m *cassandraMetadataPersistence) GetDomain(request *p.GetDomainRequest) (*p.InternalGetDomainResponse, error) {
var query *gocql.Query
var err error
info := &p.DomainInfo{}
config := &p.InternalDomainConfig{}
replicationConfig := &p.DomainReplicationConfig{}
var replicationClusters []map[string]interface{}
var dbVersion int64
var failoverVersion int64
var configVersion int64
var isGlobalDomain bool
if len(request.ID) > 0 && len(request.Name) > 0 {
return nil, &workflow.BadRequestError{
Message: "GetDomain operation failed. Both ID and Name specified in request.",
}
} else if len(request.ID) == 0 && len(request.Name) == 0 {
return nil, &workflow.BadRequestError{
Message: "GetDomain operation failed. Both ID and Name are empty.",
}
}
handleError := func(name, ID string, err error) error {
identity := name
if len(ID) > 0 {
identity = ID
}
if err == gocql.ErrNotFound {
return &workflow.EntityNotExistsError{
Message: fmt.Sprintf("Domain %s does not exist.", identity),
}
}
return &workflow.InternalServiceError{
Message: fmt.Sprintf("GetDomain operation failed. Error %v", err),
}
}
domainName := request.Name
if len(request.ID) > 0 {
query = m.session.Query(templateGetDomainQuery, request.ID)
err = query.Scan(&domainName)
if err != nil {
return nil, handleError(request.Name, request.ID, err)
}
}
var badBinariesData []byte
var badBinariesDataEncoding string
query = m.session.Query(templateGetDomainByNameQuery, domainName)
err = query.Scan(
&info.ID,
&info.Name,
&info.Status,
&info.Description,
&info.OwnerEmail,
&info.Data,
&config.Retention,
&config.EmitMetric,
&config.ArchivalBucket,
&config.ArchivalStatus,
&config.HistoryArchivalStatus,
&config.HistoryArchivalURI,
&config.VisibilityArchivalStatus,
&config.VisibilityArchivalURI,
&badBinariesData,
&badBinariesDataEncoding,
&replicationConfig.ActiveClusterName,
&replicationClusters,
&isGlobalDomain,
&configVersion,
&failoverVersion,
&dbVersion,
)
if err != nil {
return nil, handleError(request.Name, request.ID, err)
}
if info.Data == nil {
info.Data = map[string]string{}
}
config.BadBinaries = p.NewDataBlob(badBinariesData, common.EncodingType(badBinariesDataEncoding))
replicationConfig.ActiveClusterName = p.GetOrUseDefaultActiveCluster(m.currentClusterName, replicationConfig.ActiveClusterName)
replicationConfig.Clusters = p.DeserializeClusterConfigs(replicationClusters)
replicationConfig.Clusters = p.GetOrUseDefaultClusters(m.currentClusterName, replicationConfig.Clusters)
return &p.InternalGetDomainResponse{
Info: info,
Config: config,
ReplicationConfig: replicationConfig,
IsGlobalDomain: isGlobalDomain,
ConfigVersion: configVersion,
FailoverVersion: failoverVersion,
NotificationVersion: dbVersion,
TableVersion: p.DomainTableVersionV1,
}, nil
}
func (m *cassandraMetadataPersistence) UpdateDomain(request *p.InternalUpdateDomainRequest) error {
var nextVersion int64 = 1
var currentVersion *int64
if request.NotificationVersion > 0 {
nextVersion = request.NotificationVersion + 1
currentVersion = &request.NotificationVersion
}
query := m.session.Query(templateUpdateDomainByNameQuery,
request.Info.ID,
request.Info.Name,
request.Info.Status,
request.Info.Description,
request.Info.OwnerEmail,
request.Info.Data,
request.Config.Retention,
request.Config.EmitMetric,
request.Config.ArchivalBucket,
request.Config.ArchivalStatus,
request.Config.HistoryArchivalStatus,
request.Config.HistoryArchivalURI,
request.Config.VisibilityArchivalStatus,
request.Config.VisibilityArchivalURI,
request.Config.BadBinaries.Data,
string(request.Config.BadBinaries.GetEncoding()),
request.ReplicationConfig.ActiveClusterName,
p.SerializeClusterConfigs(request.ReplicationConfig.Clusters),
request.ConfigVersion,
request.FailoverVersion,
nextVersion,
request.Info.Name,
currentVersion,
)
applied, err := query.ScanCAS()
if !applied {
return &workflow.InternalServiceError{
Message: fmt.Sprintf("UpdateDomain operation encounter concurrent write."),
}
}
if err != nil {
return &workflow.InternalServiceError{
Message: fmt.Sprintf("UpdateDomain operation failed. Error %v", err),
}
}
return nil
}
func (m *cassandraMetadataPersistence) DeleteDomain(request *p.DeleteDomainRequest) error {
var name string
query := m.session.Query(templateGetDomainQuery, request.ID)
err := query.Scan(&name)
if err != nil {
if err == gocql.ErrNotFound {
return nil
}
return err
}
return m.deleteDomain(name, request.ID)
}
func (m *cassandraMetadataPersistence) DeleteDomainByName(request *p.DeleteDomainByNameRequest) error {
var ID string
query := m.session.Query(templateGetDomainByNameQuery, request.Name)
err := query.Scan(&ID, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil)
if err != nil {
if err == gocql.ErrNotFound |
return err
}
return m.deleteDomain(request.Name, ID)
}
func (m *cassandraMetadataPersistence) ListDomains(request *p.ListDomainsRequest) (*p.InternalListDomainsResponse, error) {
panic("cassandraMetadataPersistence do not support list domain operation.")
}
func (m *cassandraMetadataPersistence) GetMetadata() (*p.GetMetadataResponse, error) {
panic("cassandraMetadataPersistence do not support get metadata operation.")
}
func (m *cassandraMetadataPersistence) deleteDomain(name, ID string) error {
query := m.session.Query(templateDeleteDomainByNameQuery, name)
if err := query.Exec(); err != nil {
return &workflow.InternalServiceError{
Message: fmt.Sprintf("DeleteDomainByName operation failed. Error %v", err),
}
}
query = m.session.Query(templateDeleteDomainQuery, ID)
if err := query.Exec(); err != nil {
return &workflow.InternalServiceError{
Message: fmt.Sprintf("DeleteDomain operation failed. Error %v", err),
}
}
return nil
}
| {
return nil
} | conditional_block |
cassandraMetadataPersistence.go | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package cassandra
import (
"fmt"
"github.com/gocql/gocql"
workflow "github.com/uber/cadence/.gen/go/shared"
"github.com/uber/cadence/common"
"github.com/uber/cadence/common/log"
"github.com/uber/cadence/common/log/tag"
p "github.com/uber/cadence/common/persistence"
"github.com/uber/cadence/common/service/config"
)
const (
templateDomainInfoType = `{` +
`id: ?, ` +
`name: ?, ` +
`status: ?, ` +
`description: ?, ` +
`owner_email: ?, ` +
`data: ? ` +
`}`
templateDomainConfigType = `{` +
`retention: ?, ` +
`emit_metric: ?, ` +
`archival_bucket: ?, ` +
`archival_status: ?,` +
`history_archival_status: ?, ` +
`history_archival_uri: ?, ` +
`visibility_archival_status: ?, ` +
`visibility_archival_uri: ?, ` +
`bad_binaries: ?,` +
`bad_binaries_encoding: ?` +
`}`
templateDomainReplicationConfigType = `{` +
`active_cluster_name: ?, ` +
`clusters: ? ` +
`}`
templateCreateDomainQuery = `INSERT INTO domains (` +
`id, domain) ` +
`VALUES(?, {name: ?}) IF NOT EXISTS`
templateCreateDomainByNameQuery = `INSERT INTO domains_by_name (` +
`name, domain, config, replication_config, is_global_domain, config_version, failover_version) ` +
`VALUES(?, ` + templateDomainInfoType + `, ` + templateDomainConfigType + `, ` + templateDomainReplicationConfigType + `, ?, ?, ?) IF NOT EXISTS`
templateGetDomainQuery = `SELECT domain.name ` +
`FROM domains ` +
`WHERE id = ?`
templateGetDomainByNameQuery = `SELECT domain.id, domain.name, domain.status, domain.description, ` +
`domain.owner_email, domain.data, config.retention, config.emit_metric, ` +
`config.archival_bucket, config.archival_status, ` +
`config.history_archival_status, config.history_archival_uri, ` +
`config.visibility_archival_status, config.visibility_archival_uri, ` +
`config.bad_binaries, config.bad_binaries_encoding, ` +
`replication_config.active_cluster_name, replication_config.clusters, ` +
`is_global_domain, ` +
`config_version, ` +
`failover_version, ` +
`db_version ` +
`FROM domains_by_name ` +
`WHERE name = ?`
templateUpdateDomainByNameQuery = `UPDATE domains_by_name ` +
`SET domain = ` + templateDomainInfoType + `, ` +
`config = ` + templateDomainConfigType + `, ` +
`replication_config = ` + templateDomainReplicationConfigType + `, ` +
`config_version = ? ,` +
`failover_version = ? ,` +
`db_version = ? ` +
`WHERE name = ? ` +
`IF db_version = ? `
templateDeleteDomainQuery = `DELETE FROM domains ` +
`WHERE id = ?`
templateDeleteDomainByNameQuery = `DELETE FROM domains_by_name ` +
`WHERE name = ?`
)
type (
cassandraMetadataPersistence struct {
cassandraStore
currentClusterName string
}
)
// newMetadataPersistence is used to create an instance of HistoryManager implementation
func newMetadataPersistence(cfg config.Cassandra, clusterName string, logger log.Logger) (p.MetadataStore,
error) {
cluster := NewCassandraCluster(cfg.Hosts, cfg.Port, cfg.User, cfg.Password, cfg.Datacenter)
cluster.Keyspace = cfg.Keyspace
cluster.ProtoVersion = cassandraProtoVersion
cluster.Consistency = gocql.LocalQuorum
cluster.SerialConsistency = gocql.LocalSerial
cluster.Timeout = defaultSessionTimeout
session, err := cluster.CreateSession()
if err != nil {
return nil, err
}
return &cassandraMetadataPersistence{
cassandraStore: cassandraStore{session: session, logger: logger},
currentClusterName: clusterName,
}, nil
}
// Close releases the resources held by this object
func (m *cassandraMetadataPersistence) Close() {
if m.session != nil {
m.session.Close()
}
}
// Cassandra does not support conditional updates across multiple tables. For this reason we have to first insert into
// 'Domains' table and then do a conditional insert into domains_by_name table. If the conditional write fails we
// delete the orphaned entry from domains table. There is a chance delete entry could fail and we never delete the
// orphaned entry from domains table. We might need a background job to delete those orphaned record.
func (m *cassandraMetadataPersistence) CreateDomain(request *p.InternalCreateDomainRequest) (*p.CreateDomainResponse, error) {
query := m.session.Query(templateCreateDomainQuery, request.Info.ID, request.Info.Name)
applied, err := query.MapScanCAS(make(map[string]interface{}))
if err != nil {
return nil, &workflow.InternalServiceError{
Message: fmt.Sprintf("CreateDomain operation failed. Inserting into domains table. Error: %v", err),
}
}
if !applied {
return nil, &workflow.InternalServiceError{
Message: fmt.Sprintf("CreateDomain operation failed because of uuid collision."),
}
}
query = m.session.Query(templateCreateDomainByNameQuery,
request.Info.Name,
request.Info.ID,
request.Info.Name,
request.Info.Status,
request.Info.Description,
request.Info.OwnerEmail,
request.Info.Data,
request.Config.Retention,
request.Config.EmitMetric,
request.Config.ArchivalBucket,
request.Config.ArchivalStatus,
request.Config.HistoryArchivalStatus,
request.Config.HistoryArchivalURI,
request.Config.VisibilityArchivalStatus,
request.Config.VisibilityArchivalURI,
request.Config.BadBinaries.Data,
string(request.Config.BadBinaries.GetEncoding()),
request.ReplicationConfig.ActiveClusterName,
p.SerializeClusterConfigs(request.ReplicationConfig.Clusters),
request.IsGlobalDomain,
request.ConfigVersion,
request.FailoverVersion,
)
previous := make(map[string]interface{})
applied, err = query.MapScanCAS(previous)
if err != nil {
return nil, &workflow.InternalServiceError{
Message: fmt.Sprintf("CreateDomain operation failed. Inserting into domains_by_name table. Error: %v", err),
}
}
if !applied {
// Domain already exist. Delete orphan domain record before returning back to user
if errDelete := m.session.Query(templateDeleteDomainQuery, request.Info.ID).Exec(); errDelete != nil {
m.logger.Warn("Unable to delete orphan domain record", tag.Error(errDelete))
}
if domain, ok := previous["domain"].(map[string]interface{}); ok {
msg := fmt.Sprintf("Domain already exists. DomainId: %v", domain["id"])
return nil, &workflow.DomainAlreadyExistsError{
Message: msg,
}
}
return nil, &workflow.DomainAlreadyExistsError{
Message: fmt.Sprintf("CreateDomain operation failed because of conditional failure."),
}
}
return &p.CreateDomainResponse{ID: request.Info.ID}, nil
}
func (m *cassandraMetadataPersistence) GetDomain(request *p.GetDomainRequest) (*p.InternalGetDomainResponse, error) {
var query *gocql.Query
var err error
info := &p.DomainInfo{}
config := &p.InternalDomainConfig{}
replicationConfig := &p.DomainReplicationConfig{}
var replicationClusters []map[string]interface{}
var dbVersion int64
var failoverVersion int64
var configVersion int64
var isGlobalDomain bool
if len(request.ID) > 0 && len(request.Name) > 0 {
return nil, &workflow.BadRequestError{
Message: "GetDomain operation failed. Both ID and Name specified in request.",
}
} else if len(request.ID) == 0 && len(request.Name) == 0 {
return nil, &workflow.BadRequestError{
Message: "GetDomain operation failed. Both ID and Name are empty.",
}
}
handleError := func(name, ID string, err error) error {
identity := name
if len(ID) > 0 {
identity = ID
}
if err == gocql.ErrNotFound {
return &workflow.EntityNotExistsError{
Message: fmt.Sprintf("Domain %s does not exist.", identity),
}
}
return &workflow.InternalServiceError{
Message: fmt.Sprintf("GetDomain operation failed. Error %v", err),
}
}
domainName := request.Name
if len(request.ID) > 0 {
query = m.session.Query(templateGetDomainQuery, request.ID)
err = query.Scan(&domainName)
if err != nil {
return nil, handleError(request.Name, request.ID, err)
}
}
var badBinariesData []byte
var badBinariesDataEncoding string
query = m.session.Query(templateGetDomainByNameQuery, domainName)
err = query.Scan(
&info.ID,
&info.Name,
&info.Status,
&info.Description,
&info.OwnerEmail,
&info.Data,
&config.Retention,
&config.EmitMetric,
&config.ArchivalBucket,
&config.ArchivalStatus,
&config.HistoryArchivalStatus,
&config.HistoryArchivalURI,
&config.VisibilityArchivalStatus,
&config.VisibilityArchivalURI,
&badBinariesData,
&badBinariesDataEncoding,
&replicationConfig.ActiveClusterName,
&replicationClusters,
&isGlobalDomain,
&configVersion,
&failoverVersion,
&dbVersion,
)
if err != nil {
return nil, handleError(request.Name, request.ID, err)
}
if info.Data == nil {
info.Data = map[string]string{}
}
config.BadBinaries = p.NewDataBlob(badBinariesData, common.EncodingType(badBinariesDataEncoding))
replicationConfig.ActiveClusterName = p.GetOrUseDefaultActiveCluster(m.currentClusterName, replicationConfig.ActiveClusterName)
replicationConfig.Clusters = p.DeserializeClusterConfigs(replicationClusters)
replicationConfig.Clusters = p.GetOrUseDefaultClusters(m.currentClusterName, replicationConfig.Clusters)
return &p.InternalGetDomainResponse{
Info: info,
Config: config,
ReplicationConfig: replicationConfig,
IsGlobalDomain: isGlobalDomain,
ConfigVersion: configVersion,
FailoverVersion: failoverVersion,
NotificationVersion: dbVersion,
TableVersion: p.DomainTableVersionV1,
}, nil
}
func (m *cassandraMetadataPersistence) UpdateDomain(request *p.InternalUpdateDomainRequest) error |
func (m *cassandraMetadataPersistence) DeleteDomain(request *p.DeleteDomainRequest) error {
var name string
query := m.session.Query(templateGetDomainQuery, request.ID)
err := query.Scan(&name)
if err != nil {
if err == gocql.ErrNotFound {
return nil
}
return err
}
return m.deleteDomain(name, request.ID)
}
func (m *cassandraMetadataPersistence) DeleteDomainByName(request *p.DeleteDomainByNameRequest) error {
var ID string
query := m.session.Query(templateGetDomainByNameQuery, request.Name)
err := query.Scan(&ID, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil)
if err != nil {
if err == gocql.ErrNotFound {
return nil
}
return err
}
return m.deleteDomain(request.Name, ID)
}
func (m *cassandraMetadataPersistence) ListDomains(request *p.ListDomainsRequest) (*p.InternalListDomainsResponse, error) {
panic("cassandraMetadataPersistence do not support list domain operation.")
}
func (m *cassandraMetadataPersistence) GetMetadata() (*p.GetMetadataResponse, error) {
panic("cassandraMetadataPersistence do not support get metadata operation.")
}
func (m *cassandraMetadataPersistence) deleteDomain(name, ID string) error {
query := m.session.Query(templateDeleteDomainByNameQuery, name)
if err := query.Exec(); err != nil {
return &workflow.InternalServiceError{
Message: fmt.Sprintf("DeleteDomainByName operation failed. Error %v", err),
}
}
query = m.session.Query(templateDeleteDomainQuery, ID)
if err := query.Exec(); err != nil {
return &workflow.InternalServiceError{
Message: fmt.Sprintf("DeleteDomain operation failed. Error %v", err),
}
}
return nil
}
| {
var nextVersion int64 = 1
var currentVersion *int64
if request.NotificationVersion > 0 {
nextVersion = request.NotificationVersion + 1
currentVersion = &request.NotificationVersion
}
query := m.session.Query(templateUpdateDomainByNameQuery,
request.Info.ID,
request.Info.Name,
request.Info.Status,
request.Info.Description,
request.Info.OwnerEmail,
request.Info.Data,
request.Config.Retention,
request.Config.EmitMetric,
request.Config.ArchivalBucket,
request.Config.ArchivalStatus,
request.Config.HistoryArchivalStatus,
request.Config.HistoryArchivalURI,
request.Config.VisibilityArchivalStatus,
request.Config.VisibilityArchivalURI,
request.Config.BadBinaries.Data,
string(request.Config.BadBinaries.GetEncoding()),
request.ReplicationConfig.ActiveClusterName,
p.SerializeClusterConfigs(request.ReplicationConfig.Clusters),
request.ConfigVersion,
request.FailoverVersion,
nextVersion,
request.Info.Name,
currentVersion,
)
applied, err := query.ScanCAS()
if !applied {
return &workflow.InternalServiceError{
Message: fmt.Sprintf("UpdateDomain operation encounter concurrent write."),
}
}
if err != nil {
return &workflow.InternalServiceError{
Message: fmt.Sprintf("UpdateDomain operation failed. Error %v", err),
}
}
return nil
} | identifier_body |
bikeshareproject.py | ### ******************************************** Project Assignment on " Explore US Bikeshare Data " ***************************************************************
import time
import datetime
import pandas as pd
import statistics as st
##All the include Filenames
#1.chicago = 'chicago.csv'
#2.new_york_city = 'new_york_city.csv'
#3.washington = 'washington.csv'
CITY_DATA = { 'chicago': 'chicago.csv',
'new york': 'new_york_city.csv',
'washington': 'washington.csv' }
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!')
# get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
city = input('\nWould you like to see data for.?: Simply type the Name \n-> Chicago \n-> New York\n-> Washington\n').lower()
#lower() command is used to take input of any type formate
while(True):
if(city == 'chicago' or city == 'new york' or city == 'washington' or city == 'all of them'):
break
else:
city = input('Sorry, I do not understand your input. Please input either '
'Chicago, New York, or Washington.\n(Enter Correct city):\t ').lower()
#lower() command is used to take input of any type formate
# TO DO: get user input for month (all, january, february, ... , june)
month = input('\nWhich month Data you want..?: Simply type the Name \n-> January \n-> February \n-> March \n-> April \n-> May \n-> June \n').lower()
#.lower() command is used to take input of any type formate
while(True):
if(month == 'january' or month == 'february' or month == 'march' or month == 'april' or month == 'may' or month == 'june' or month == 'all'):
break
else:
month = input('\nPlease try to Enter valid month otherwise it will invalid and Not showing any result:\n').lower()
#lower() command is used to take input of any type formate
# TO DO: get user input for day of week (all, monday, tuesday, ... sunday)
day = input('Which day Data you want..? simply type the name \n-> Monday \n-> Tuesday \n-> Wednesday \n-> Thursday \n-> Friday \n-> Saturday \n-> Sunday \n-> all to display data of all days\n').lower()
#lower() command is used to take input of any type formate
while(True):
if(day == 'monday' or day == 'tuesday' or day == 'wednesday' or day == 'thursday' or day == 'friday' or day == 'saturday' or day == 'sunday' or day == 'all'):
break;
else:
day = input('\nPlease try to Enter valid Day otherwise it will invalid and Not showing any result:\nEnter Correct day:\t ').lower()
#lower() command is used to take input of any type formate
#return day
print('-'*40)
return city, month, day
def | (city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
df = pd.read_csv(CITY_DATA[city])
df['Start Time'] = pd.to_datetime(df['Start Time'])
# to_datetime command is used to convert(change) date into date format
df['End Time'] = pd.to_datetime(df['End Time'])
if month != 'all':
months = ['january', 'february', 'march', 'april', 'may', 'june']
#used to find index of month.
month = months.index(month) + 1
df = df[df['Start Time'].dt.month == month]
#filter data by day.
if day != 'all':
df = df[df['Start Time'].dt.weekday_name == day.title()]
#print 5 rows.
print(df.head())
return df
def time_stats(df, month, day):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most regular Times of Travelling\n Loading Please wait for a while ........\n')
start_time = time.time()
# display the most common month
if(month == 'all'):
most_similar_month = df['Start Time'].dt.month.value_counts().idxmax()
print('******************** Most common(popular) month is :'+ str(most_similar_month) + ' *********************')
# display the most common day of week
if(day == 'all'):
most_similar_day = df['Start Time'].dt.weekday_name.value_counts().idxmax()
print('******************** Most common(popular) day is : ' + str(most_similar_day) + ' *********************')
# display the most common start hour
most_similar_hour = df['Start Time'].dt.hour.value_counts().idxmax()
print('******************** Most popular hour is : ' + str(most_similar_hour) + ' *********************')
print("\n******************** This took %s seconds. *********************" % (time.time() - start_time))
print('-'*40)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\n******************** Calculating The Most Popular Stations and Trip... *********************\n')
start_time = time.time()
# display most commonly used start station
most_similar_start_station = st.mode(df['Start Station'])
print('\n******************** Most common start station is {} *********************\n'.format(most_similar_start_station))
# display most commonly used end station
most_similar_end_station = st.mode(df['End Station'])
print('\n******************** Most common end station is {} *********************\n'.format(most_similar_end_station))
# display most frequent combination of start station and end station trip
combination_trip = df['Start Station'].astype(str) + " to " + df['End Station'].astype(str)
The_most_frequent_trip = combination_trip.value_counts().idxmax()
print('\n******************** Most popular trip is from {} *********************\n'.format(The_most_frequent_trip))
print("\n******************** This took %s seconds. *********************" % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\n******************** Calculating Trip Duration... *********************\n')
start_time = time.time()
# display total travel time
total_travel_time = df['Trip Duration'].sum()
time1 = total_travel_time
day = time1 // (24 * 3600)
time1 = time1 % (24 * 3600)
hour = time1 // 3600
time1 %= 3600
minutes = time1 // 60
time1 %= 60
seconds = time1
print('\n******************** Total travel time is {} days {} hours {} minutes {} seconds *********************'.format(day, hour, minutes, seconds))
# display mean travel time
mean_travel_time = df['Trip Duration'].mean()
time2 = mean_travel_time
day2 = time2 // (24 * 3600)
time2 = time2 % (24 * 3600)
hour2 = time2 // 3600
time2 %= 3600
minutes2 = time2 // 60
time2 %= 60
seconds2 = time2
print('\n******************** Mean travel time is {} hours {} minutes {} seconds *********************'.format(hour2, minutes2, seconds2))
print("\n******************** This took %s seconds. *********************" % (time.time() - start_time))
print('-'*40)
def user_stats(df):
"""Displays statistics on bikeshare users."""
print('\n******************** Calculating User Stats... *********************\n')
start_time = time.time()
# Display counts of user types
no_of_subscribing_user = df['User Type'].str.count('Subscriber').sum()
no_of_customers_using = df['User Type'].str.count('Customer').sum()
print('\n******************** Number of subscribers are {} *********************\n'.format(int(no_of_subscribing_user)))
print('\n******************** Number of customers(users) are {} *********************\n'.format(int(no_of_customers_using)))
# Display counts of gender
if('Gender' in df):
count_male = df['Gender'].str.count('Male').sum()
count_female = df['Gender'].str.count('Female').sum()
print('\n******************** Number of male users are {} *********************\n'.format(int(count_male)))
print('\n******************** Number of female users are {} *********************\n'.format(int(count_female)))
# Display earliest, most recent, and most common year of birth
if('Birth Year' in df):
earliest_year = df['Birth Year'].min()
recent_year = df['Birth Year'].max()
most_similar_birth_year = st.mode(df['Birth Year'])
print('\n Oldest Birth Year is: {}\n Youngest Birth Year is: {}\n Most popular Birth Year is: {}'.format(int(earliest_year), int(recent_year), int(most_similar_birth_year)))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def main():
while True:
city, month, day = get_filters()
df = load_data(city, month, day)
time_stats(df, month, day)
station_stats(df)
trip_duration_stats(df)
user_stats(df)
restart = input('\n******************** Would you like to restart? Enter yes or no. *********************\n')
if restart.lower() != 'yes':
break
if __name__ == "__main__":
main() | load_data | identifier_name |
bikeshareproject.py | ### ******************************************** Project Assignment on " Explore US Bikeshare Data " ***************************************************************
import time
import datetime
import pandas as pd
import statistics as st
##All the include Filenames
#1.chicago = 'chicago.csv'
#2.new_york_city = 'new_york_city.csv'
#3.washington = 'washington.csv'
CITY_DATA = { 'chicago': 'chicago.csv',
'new york': 'new_york_city.csv',
'washington': 'washington.csv' }
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!')
# get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
city = input('\nWould you like to see data for.?: Simply type the Name \n-> Chicago \n-> New York\n-> Washington\n').lower()
#lower() command is used to take input of any type formate
while(True):
if(city == 'chicago' or city == 'new york' or city == 'washington' or city == 'all of them'):
break
else:
city = input('Sorry, I do not understand your input. Please input either '
'Chicago, New York, or Washington.\n(Enter Correct city):\t ').lower()
#lower() command is used to take input of any type formate
# TO DO: get user input for month (all, january, february, ... , june)
month = input('\nWhich month Data you want..?: Simply type the Name \n-> January \n-> February \n-> March \n-> April \n-> May \n-> June \n').lower()
#.lower() command is used to take input of any type formate
while(True):
if(month == 'january' or month == 'february' or month == 'march' or month == 'april' or month == 'may' or month == 'june' or month == 'all'):
break
else:
month = input('\nPlease try to Enter valid month otherwise it will invalid and Not showing any result:\n').lower()
#lower() command is used to take input of any type formate
# TO DO: get user input for day of week (all, monday, tuesday, ... sunday)
day = input('Which day Data you want..? simply type the name \n-> Monday \n-> Tuesday \n-> Wednesday \n-> Thursday \n-> Friday \n-> Saturday \n-> Sunday \n-> all to display data of all days\n').lower()
#lower() command is used to take input of any type formate
while(True):
if(day == 'monday' or day == 'tuesday' or day == 'wednesday' or day == 'thursday' or day == 'friday' or day == 'saturday' or day == 'sunday' or day == 'all'):
break;
else:
day = input('\nPlease try to Enter valid Day otherwise it will invalid and Not showing any result:\nEnter Correct day:\t ').lower()
#lower() command is used to take input of any type formate
#return day
print('-'*40)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
df = pd.read_csv(CITY_DATA[city])
df['Start Time'] = pd.to_datetime(df['Start Time'])
# to_datetime command is used to convert(change) date into date format
df['End Time'] = pd.to_datetime(df['End Time'])
if month != 'all':
months = ['january', 'february', 'march', 'april', 'may', 'june']
#used to find index of month.
month = months.index(month) + 1
df = df[df['Start Time'].dt.month == month]
#filter data by day.
if day != 'all':
df = df[df['Start Time'].dt.weekday_name == day.title()]
#print 5 rows.
print(df.head())
return df
def time_stats(df, month, day):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most regular Times of Travelling\n Loading Please wait for a while ........\n')
start_time = time.time()
# display the most common month
if(month == 'all'):
most_similar_month = df['Start Time'].dt.month.value_counts().idxmax()
print('******************** Most common(popular) month is :'+ str(most_similar_month) + ' *********************')
# display the most common day of week
if(day == 'all'):
most_similar_day = df['Start Time'].dt.weekday_name.value_counts().idxmax()
print('******************** Most common(popular) day is : ' + str(most_similar_day) + ' *********************')
# display the most common start hour
most_similar_hour = df['Start Time'].dt.hour.value_counts().idxmax()
print('******************** Most popular hour is : ' + str(most_similar_hour) + ' *********************')
print("\n******************** This took %s seconds. *********************" % (time.time() - start_time))
print('-'*40)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\n******************** Calculating The Most Popular Stations and Trip... *********************\n')
start_time = time.time()
# display most commonly used start station
most_similar_start_station = st.mode(df['Start Station'])
print('\n******************** Most common start station is {} *********************\n'.format(most_similar_start_station))
# display most commonly used end station
most_similar_end_station = st.mode(df['End Station'])
print('\n******************** Most common end station is {} *********************\n'.format(most_similar_end_station))
# display most frequent combination of start station and end station trip
combination_trip = df['Start Station'].astype(str) + " to " + df['End Station'].astype(str)
The_most_frequent_trip = combination_trip.value_counts().idxmax()
print('\n******************** Most popular trip is from {} *********************\n'.format(The_most_frequent_trip))
print("\n******************** This took %s seconds. *********************" % (time.time() - start_time)) |
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\n******************** Calculating Trip Duration... *********************\n')
start_time = time.time()
# display total travel time
total_travel_time = df['Trip Duration'].sum()
time1 = total_travel_time
day = time1 // (24 * 3600)
time1 = time1 % (24 * 3600)
hour = time1 // 3600
time1 %= 3600
minutes = time1 // 60
time1 %= 60
seconds = time1
print('\n******************** Total travel time is {} days {} hours {} minutes {} seconds *********************'.format(day, hour, minutes, seconds))
# display mean travel time
mean_travel_time = df['Trip Duration'].mean()
time2 = mean_travel_time
day2 = time2 // (24 * 3600)
time2 = time2 % (24 * 3600)
hour2 = time2 // 3600
time2 %= 3600
minutes2 = time2 // 60
time2 %= 60
seconds2 = time2
print('\n******************** Mean travel time is {} hours {} minutes {} seconds *********************'.format(hour2, minutes2, seconds2))
print("\n******************** This took %s seconds. *********************" % (time.time() - start_time))
print('-'*40)
def user_stats(df):
"""Displays statistics on bikeshare users."""
print('\n******************** Calculating User Stats... *********************\n')
start_time = time.time()
# Display counts of user types
no_of_subscribing_user = df['User Type'].str.count('Subscriber').sum()
no_of_customers_using = df['User Type'].str.count('Customer').sum()
print('\n******************** Number of subscribers are {} *********************\n'.format(int(no_of_subscribing_user)))
print('\n******************** Number of customers(users) are {} *********************\n'.format(int(no_of_customers_using)))
# Display counts of gender
if('Gender' in df):
count_male = df['Gender'].str.count('Male').sum()
count_female = df['Gender'].str.count('Female').sum()
print('\n******************** Number of male users are {} *********************\n'.format(int(count_male)))
print('\n******************** Number of female users are {} *********************\n'.format(int(count_female)))
# Display earliest, most recent, and most common year of birth
if('Birth Year' in df):
earliest_year = df['Birth Year'].min()
recent_year = df['Birth Year'].max()
most_similar_birth_year = st.mode(df['Birth Year'])
print('\n Oldest Birth Year is: {}\n Youngest Birth Year is: {}\n Most popular Birth Year is: {}'.format(int(earliest_year), int(recent_year), int(most_similar_birth_year)))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def main():
while True:
city, month, day = get_filters()
df = load_data(city, month, day)
time_stats(df, month, day)
station_stats(df)
trip_duration_stats(df)
user_stats(df)
restart = input('\n******************** Would you like to restart? Enter yes or no. *********************\n')
if restart.lower() != 'yes':
break
if __name__ == "__main__":
main() | print('-'*40)
| random_line_split |
bikeshareproject.py | ### ******************************************** Project Assignment on " Explore US Bikeshare Data " ***************************************************************
import time
import datetime
import pandas as pd
import statistics as st
##All the include Filenames
#1.chicago = 'chicago.csv'
#2.new_york_city = 'new_york_city.csv'
#3.washington = 'washington.csv'
CITY_DATA = { 'chicago': 'chicago.csv',
'new york': 'new_york_city.csv',
'washington': 'washington.csv' }
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!')
# get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
city = input('\nWould you like to see data for.?: Simply type the Name \n-> Chicago \n-> New York\n-> Washington\n').lower()
#lower() command is used to take input of any type formate
while(True):
if(city == 'chicago' or city == 'new york' or city == 'washington' or city == 'all of them'):
|
else:
city = input('Sorry, I do not understand your input. Please input either '
'Chicago, New York, or Washington.\n(Enter Correct city):\t ').lower()
#lower() command is used to take input of any type formate
# TO DO: get user input for month (all, january, february, ... , june)
month = input('\nWhich month Data you want..?: Simply type the Name \n-> January \n-> February \n-> March \n-> April \n-> May \n-> June \n').lower()
#.lower() command is used to take input of any type formate
while(True):
if(month == 'january' or month == 'february' or month == 'march' or month == 'april' or month == 'may' or month == 'june' or month == 'all'):
break
else:
month = input('\nPlease try to Enter valid month otherwise it will invalid and Not showing any result:\n').lower()
#lower() command is used to take input of any type formate
# TO DO: get user input for day of week (all, monday, tuesday, ... sunday)
day = input('Which day Data you want..? simply type the name \n-> Monday \n-> Tuesday \n-> Wednesday \n-> Thursday \n-> Friday \n-> Saturday \n-> Sunday \n-> all to display data of all days\n').lower()
#lower() command is used to take input of any type formate
while(True):
if(day == 'monday' or day == 'tuesday' or day == 'wednesday' or day == 'thursday' or day == 'friday' or day == 'saturday' or day == 'sunday' or day == 'all'):
break;
else:
day = input('\nPlease try to Enter valid Day otherwise it will invalid and Not showing any result:\nEnter Correct day:\t ').lower()
#lower() command is used to take input of any type formate
#return day
print('-'*40)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
df = pd.read_csv(CITY_DATA[city])
df['Start Time'] = pd.to_datetime(df['Start Time'])
# to_datetime command is used to convert(change) date into date format
df['End Time'] = pd.to_datetime(df['End Time'])
if month != 'all':
months = ['january', 'february', 'march', 'april', 'may', 'june']
#used to find index of month.
month = months.index(month) + 1
df = df[df['Start Time'].dt.month == month]
#filter data by day.
if day != 'all':
df = df[df['Start Time'].dt.weekday_name == day.title()]
#print 5 rows.
print(df.head())
return df
def time_stats(df, month, day):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most regular Times of Travelling\n Loading Please wait for a while ........\n')
start_time = time.time()
# display the most common month
if(month == 'all'):
most_similar_month = df['Start Time'].dt.month.value_counts().idxmax()
print('******************** Most common(popular) month is :'+ str(most_similar_month) + ' *********************')
# display the most common day of week
if(day == 'all'):
most_similar_day = df['Start Time'].dt.weekday_name.value_counts().idxmax()
print('******************** Most common(popular) day is : ' + str(most_similar_day) + ' *********************')
# display the most common start hour
most_similar_hour = df['Start Time'].dt.hour.value_counts().idxmax()
print('******************** Most popular hour is : ' + str(most_similar_hour) + ' *********************')
print("\n******************** This took %s seconds. *********************" % (time.time() - start_time))
print('-'*40)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\n******************** Calculating The Most Popular Stations and Trip... *********************\n')
start_time = time.time()
# display most commonly used start station
most_similar_start_station = st.mode(df['Start Station'])
print('\n******************** Most common start station is {} *********************\n'.format(most_similar_start_station))
# display most commonly used end station
most_similar_end_station = st.mode(df['End Station'])
print('\n******************** Most common end station is {} *********************\n'.format(most_similar_end_station))
# display most frequent combination of start station and end station trip
combination_trip = df['Start Station'].astype(str) + " to " + df['End Station'].astype(str)
The_most_frequent_trip = combination_trip.value_counts().idxmax()
print('\n******************** Most popular trip is from {} *********************\n'.format(The_most_frequent_trip))
print("\n******************** This took %s seconds. *********************" % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\n******************** Calculating Trip Duration... *********************\n')
start_time = time.time()
# display total travel time
total_travel_time = df['Trip Duration'].sum()
time1 = total_travel_time
day = time1 // (24 * 3600)
time1 = time1 % (24 * 3600)
hour = time1 // 3600
time1 %= 3600
minutes = time1 // 60
time1 %= 60
seconds = time1
print('\n******************** Total travel time is {} days {} hours {} minutes {} seconds *********************'.format(day, hour, minutes, seconds))
# display mean travel time
mean_travel_time = df['Trip Duration'].mean()
time2 = mean_travel_time
day2 = time2 // (24 * 3600)
time2 = time2 % (24 * 3600)
hour2 = time2 // 3600
time2 %= 3600
minutes2 = time2 // 60
time2 %= 60
seconds2 = time2
print('\n******************** Mean travel time is {} hours {} minutes {} seconds *********************'.format(hour2, minutes2, seconds2))
print("\n******************** This took %s seconds. *********************" % (time.time() - start_time))
print('-'*40)
def user_stats(df):
"""Displays statistics on bikeshare users."""
print('\n******************** Calculating User Stats... *********************\n')
start_time = time.time()
# Display counts of user types
no_of_subscribing_user = df['User Type'].str.count('Subscriber').sum()
no_of_customers_using = df['User Type'].str.count('Customer').sum()
print('\n******************** Number of subscribers are {} *********************\n'.format(int(no_of_subscribing_user)))
print('\n******************** Number of customers(users) are {} *********************\n'.format(int(no_of_customers_using)))
# Display counts of gender
if('Gender' in df):
count_male = df['Gender'].str.count('Male').sum()
count_female = df['Gender'].str.count('Female').sum()
print('\n******************** Number of male users are {} *********************\n'.format(int(count_male)))
print('\n******************** Number of female users are {} *********************\n'.format(int(count_female)))
# Display earliest, most recent, and most common year of birth
if('Birth Year' in df):
earliest_year = df['Birth Year'].min()
recent_year = df['Birth Year'].max()
most_similar_birth_year = st.mode(df['Birth Year'])
print('\n Oldest Birth Year is: {}\n Youngest Birth Year is: {}\n Most popular Birth Year is: {}'.format(int(earliest_year), int(recent_year), int(most_similar_birth_year)))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def main():
while True:
city, month, day = get_filters()
df = load_data(city, month, day)
time_stats(df, month, day)
station_stats(df)
trip_duration_stats(df)
user_stats(df)
restart = input('\n******************** Would you like to restart? Enter yes or no. *********************\n')
if restart.lower() != 'yes':
break
if __name__ == "__main__":
main() | break | conditional_block |
bikeshareproject.py | ### ******************************************** Project Assignment on " Explore US Bikeshare Data " ***************************************************************
import time
import datetime
import pandas as pd
import statistics as st
##All the include Filenames
#1.chicago = 'chicago.csv'
#2.new_york_city = 'new_york_city.csv'
#3.washington = 'washington.csv'
CITY_DATA = { 'chicago': 'chicago.csv',
'new york': 'new_york_city.csv',
'washington': 'washington.csv' }
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!')
# get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
city = input('\nWould you like to see data for.?: Simply type the Name \n-> Chicago \n-> New York\n-> Washington\n').lower()
#lower() command is used to take input of any type formate
while(True):
if(city == 'chicago' or city == 'new york' or city == 'washington' or city == 'all of them'):
break
else:
city = input('Sorry, I do not understand your input. Please input either '
'Chicago, New York, or Washington.\n(Enter Correct city):\t ').lower()
#lower() command is used to take input of any type formate
# TO DO: get user input for month (all, january, february, ... , june)
month = input('\nWhich month Data you want..?: Simply type the Name \n-> January \n-> February \n-> March \n-> April \n-> May \n-> June \n').lower()
#.lower() command is used to take input of any type formate
while(True):
if(month == 'january' or month == 'february' or month == 'march' or month == 'april' or month == 'may' or month == 'june' or month == 'all'):
break
else:
month = input('\nPlease try to Enter valid month otherwise it will invalid and Not showing any result:\n').lower()
#lower() command is used to take input of any type formate
# TO DO: get user input for day of week (all, monday, tuesday, ... sunday)
day = input('Which day Data you want..? simply type the name \n-> Monday \n-> Tuesday \n-> Wednesday \n-> Thursday \n-> Friday \n-> Saturday \n-> Sunday \n-> all to display data of all days\n').lower()
#lower() command is used to take input of any type formate
while(True):
if(day == 'monday' or day == 'tuesday' or day == 'wednesday' or day == 'thursday' or day == 'friday' or day == 'saturday' or day == 'sunday' or day == 'all'):
break;
else:
day = input('\nPlease try to Enter valid Day otherwise it will invalid and Not showing any result:\nEnter Correct day:\t ').lower()
#lower() command is used to take input of any type formate
#return day
print('-'*40)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
df = pd.read_csv(CITY_DATA[city])
df['Start Time'] = pd.to_datetime(df['Start Time'])
# to_datetime command is used to convert(change) date into date format
df['End Time'] = pd.to_datetime(df['End Time'])
if month != 'all':
months = ['january', 'february', 'march', 'april', 'may', 'june']
#used to find index of month.
month = months.index(month) + 1
df = df[df['Start Time'].dt.month == month]
#filter data by day.
if day != 'all':
df = df[df['Start Time'].dt.weekday_name == day.title()]
#print 5 rows.
print(df.head())
return df
def time_stats(df, month, day):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most regular Times of Travelling\n Loading Please wait for a while ........\n')
start_time = time.time()
# display the most common month
if(month == 'all'):
most_similar_month = df['Start Time'].dt.month.value_counts().idxmax()
print('******************** Most common(popular) month is :'+ str(most_similar_month) + ' *********************')
# display the most common day of week
if(day == 'all'):
most_similar_day = df['Start Time'].dt.weekday_name.value_counts().idxmax()
print('******************** Most common(popular) day is : ' + str(most_similar_day) + ' *********************')
# display the most common start hour
most_similar_hour = df['Start Time'].dt.hour.value_counts().idxmax()
print('******************** Most popular hour is : ' + str(most_similar_hour) + ' *********************')
print("\n******************** This took %s seconds. *********************" % (time.time() - start_time))
print('-'*40)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\n******************** Calculating The Most Popular Stations and Trip... *********************\n')
start_time = time.time()
# display most commonly used start station
most_similar_start_station = st.mode(df['Start Station'])
print('\n******************** Most common start station is {} *********************\n'.format(most_similar_start_station))
# display most commonly used end station
most_similar_end_station = st.mode(df['End Station'])
print('\n******************** Most common end station is {} *********************\n'.format(most_similar_end_station))
# display most frequent combination of start station and end station trip
combination_trip = df['Start Station'].astype(str) + " to " + df['End Station'].astype(str)
The_most_frequent_trip = combination_trip.value_counts().idxmax()
print('\n******************** Most popular trip is from {} *********************\n'.format(The_most_frequent_trip))
print("\n******************** This took %s seconds. *********************" % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\n******************** Calculating Trip Duration... *********************\n')
start_time = time.time()
# display total travel time
total_travel_time = df['Trip Duration'].sum()
time1 = total_travel_time
day = time1 // (24 * 3600)
time1 = time1 % (24 * 3600)
hour = time1 // 3600
time1 %= 3600
minutes = time1 // 60
time1 %= 60
seconds = time1
print('\n******************** Total travel time is {} days {} hours {} minutes {} seconds *********************'.format(day, hour, minutes, seconds))
# display mean travel time
mean_travel_time = df['Trip Duration'].mean()
time2 = mean_travel_time
day2 = time2 // (24 * 3600)
time2 = time2 % (24 * 3600)
hour2 = time2 // 3600
time2 %= 3600
minutes2 = time2 // 60
time2 %= 60
seconds2 = time2
print('\n******************** Mean travel time is {} hours {} minutes {} seconds *********************'.format(hour2, minutes2, seconds2))
print("\n******************** This took %s seconds. *********************" % (time.time() - start_time))
print('-'*40)
def user_stats(df):
|
def main():
while True:
city, month, day = get_filters()
df = load_data(city, month, day)
time_stats(df, month, day)
station_stats(df)
trip_duration_stats(df)
user_stats(df)
restart = input('\n******************** Would you like to restart? Enter yes or no. *********************\n')
if restart.lower() != 'yes':
break
if __name__ == "__main__":
main() | """Displays statistics on bikeshare users."""
print('\n******************** Calculating User Stats... *********************\n')
start_time = time.time()
# Display counts of user types
no_of_subscribing_user = df['User Type'].str.count('Subscriber').sum()
no_of_customers_using = df['User Type'].str.count('Customer').sum()
print('\n******************** Number of subscribers are {} *********************\n'.format(int(no_of_subscribing_user)))
print('\n******************** Number of customers(users) are {} *********************\n'.format(int(no_of_customers_using)))
# Display counts of gender
if('Gender' in df):
count_male = df['Gender'].str.count('Male').sum()
count_female = df['Gender'].str.count('Female').sum()
print('\n******************** Number of male users are {} *********************\n'.format(int(count_male)))
print('\n******************** Number of female users are {} *********************\n'.format(int(count_female)))
# Display earliest, most recent, and most common year of birth
if('Birth Year' in df):
earliest_year = df['Birth Year'].min()
recent_year = df['Birth Year'].max()
most_similar_birth_year = st.mode(df['Birth Year'])
print('\n Oldest Birth Year is: {}\n Youngest Birth Year is: {}\n Most popular Birth Year is: {}'.format(int(earliest_year), int(recent_year), int(most_similar_birth_year)))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40) | identifier_body |
bparser.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Original source: github.com/okfn/bibserver
# Authors:
# markmacgillivray
# Etienne Posthumus (epoz)
# Francois Boulogne <fboulogne at april dot org>
import sys
import logging
import io
import re
from bibtexparser.bibdatabase import BibDatabase
logger = logging.getLogger(__name__)
__all__ = ['BibTexParser']
if sys.version_info >= (3, 0):
from io import StringIO
ustr = str
else:
from StringIO import StringIO
ustr = unicode
class | (object):
"""
A parser for reading BibTeX bibliographic data files.
Example::
from bibtexparser.bparser import BibTexParser
bibtex_str = ...
parser = BibTexParser()
parser.ignore_nonstandard_types = False
parser.homogenise_fields = False
bib_database = bibtexparser.loads(bibtex_str, parser)
"""
def __new__(cls, data=None,
customization=None,
ignore_nonstandard_types=True,
homogenise_fields=True):
"""
To catch the old API structure in which creating the parser would immediately parse and return data.
"""
if data is None:
return super(BibTexParser, cls).__new__(cls)
else:
# For backwards compatibility: if data is given, parse and return the `BibDatabase` object instead of the
# parser.
parser = BibTexParser()
parser.customization = customization
parser.ignore_nonstandard_types = ignore_nonstandard_types
parser.homogenise_fields = homogenise_fields
return parser.parse(data)
def __init__(self):
"""
Creates a parser for rading BibTeX files
:return: parser
:rtype: `BibTexParser`
"""
self.bib_database = BibDatabase()
#: Callback function to process BibTeX entries after parsing, for example to create a list from a string with
#: multiple values. By default all BibTeX values are treated as simple strings. Default: `None`.
self.customization = None
#: Ignore non-standard BibTeX types (`book`, `article`, etc). Default: `True`.
self.ignore_nonstandard_types = True
#: Sanitise BibTeX field names, for example change `url` to `link` etc. Field names are always converted to
#: lowercase names. Default: `True`.
self.homogenise_fields = True
# On some sample data files, the character encoding detection simply
# hangs We are going to default to utf8, and mandate it.
self.encoding = 'utf8'
# pre-defined set of key changes
self.alt_dict = {
'keyw': 'keyword',
'keywords': 'keyword',
'authors': 'author',
'editors': 'editor',
'url': 'link',
'urls': 'link',
'links': 'link',
'subjects': 'subject'
}
self.replace_all_re = re.compile(r'((?P<pre>"?)\s*(#|^)\s*(?P<id>[^\d\W]\w*)\s*(#|$)\s*(?P<post>"?))', re.UNICODE)
def _bibtex_file_obj(self, bibtex_str):
# Some files have Byte-order marks inserted at the start
byte = '\xef\xbb\xbf'
if not isinstance(byte, ustr):
byte = ustr('\xef\xbb\xbf', self.encoding, 'ignore')
if bibtex_str[:3] == byte:
bibtex_str = bibtex_str[3:]
return StringIO(bibtex_str)
def parse(self, bibtex_str):
"""Parse a BibTeX string into an object
:param bibtex_str: BibTeX string
:type: str or unicode
:return: bibliographic database
:rtype: BibDatabase
"""
self.bibtex_file_obj = self._bibtex_file_obj(bibtex_str)
self._parse_records(customization=self.customization)
return self.bib_database
def parse_file(self, file):
"""Parse a BibTeX file into an object
:param file: BibTeX file or file-like object
:type: file
:return: bibliographic database
:rtype: BibDatabase
"""
return self.parse(file.read())
def _parse_records(self, customization=None):
"""Parse the bibtex into a list of records.
:param customization: a function
"""
def _add_parsed_record(record, records):
"""
Atomic function to parse a record
and append the result in records
"""
if record != "":
logger.debug('The record is not empty. Let\'s parse it.')
parsed = self._parse_record(record, customization=customization)
if parsed:
logger.debug('Store the result of the parsed record')
records.append(parsed)
else:
logger.debug('Nothing returned from the parsed record!')
else:
logger.debug('The record is empty')
records = []
record = ""
# read each line, bundle them up until they form an object, then send for parsing
for linenumber, line in enumerate(self.bibtex_file_obj):
logger.debug('Inspect line %s', linenumber)
if line.strip().startswith('@'):
# Remove leading whitespaces
line = line.lstrip()
logger.debug('Line starts with @')
# Parse previous record
_add_parsed_record(record, records)
# Start new record
logger.debug('The record is set to empty')
record = ""
# Keep adding lines to the record
record += line
# catch any remaining record and send it for parsing
_add_parsed_record(record, records)
logger.debug('Set the list of entries')
self.bib_database.entries = records
def _parse_record(self, record, customization=None):
"""Parse a record.
* tidy whitespace and other rubbish
* parse out the bibtype and citekey
* find all the key-value pairs it contains
:param record: a record
:param customization: a function
:returns: dict --
"""
d = {}
if not record.startswith('@'):
logger.debug('The record does not start with @. Return empty dict.')
return {}
# if a comment record, add to bib_database.comments
if record.lower().startswith('@comment'):
logger.debug('The record startswith @comment')
logger.debug('Store comment in list of comments')
self.bib_database.comments.append(re.search('\{(.*)\}', record, re.DOTALL).group(1))
logger.debug('Return an empty dict')
return {}
# if a preamble record, add to bib_database.preambles
if record.lower().startswith('@preamble'):
logger.debug('The record startswith @preamble')
logger.debug('Store preamble in list of preambles')
self.bib_database.preambles.append(re.search('\{(.*)\}', record, re.DOTALL).group(1))
logger.debug('Return an empty dict')
return {}
# prepare record
record = '\n'.join([i.strip() for i in record.split('\n')])
if '}\n' in record:
logger.debug('}\\n detected in the record. Clean up.')
record = record.replace('\r\n', '\n').replace('\r', '\n').rstrip('\n')
# treat the case for which the last line of the record
# does not have a coma
if record.endswith('}\n}') or record.endswith('}}'):
logger.debug('Missing coma in the last line of the record. Fix it.')
record = re.sub('}(\n|)}$', '},\n}', record)
# if a string record, put it in the replace_dict
if record.lower().startswith('@string'):
logger.debug('The record startswith @string')
key, val = [i.strip().strip('{').strip('}').replace('\n', ' ') for i in record.split('{', 1)[1].strip('\n').strip(',').strip('}').split('=')]
key = key.lower() # key is case insensitive
val = self._string_subst_partial(val)
if val.startswith('"') or val.lower() not in self.bib_database.strings:
self.bib_database.strings[key] = val.strip('"')
else:
self.bib_database.strings[key] = self.bib_database.strings[val.lower()]
logger.debug('Return a dict')
return d
# for each line in record
logger.debug('Split the record of its lines and treat them')
kvs = [i.strip() for i in record.split(',\n')]
inkey = ""
inval = ""
for kv in kvs:
logger.debug('Inspect: %s', kv)
# TODO: We may check that the keyword belongs to a known type
if kv.startswith('@') and not inkey:
# it is the start of the record - set the bibtype and citekey (id)
logger.debug('Line starts with @ and the key is not stored yet.')
bibtype, id = kv.split('{', 1)
bibtype = self._add_key(bibtype)
id = id.strip('}').strip(',')
logger.debug('bibtype = %s', bibtype)
logger.debug('id = %s', id)
if self.ignore_nonstandard_types and bibtype not in ('article',
'book',
'booklet',
'conference',
'inbook',
'incollection',
'inproceedings',
'manual',
'mastersthesis',
'misc',
'phdthesis',
'proceedings',
'techreport',
'unpublished'):
logger.warning('Entry type %s not standard. Not considered.', bibtype)
break
elif '=' in kv and not inkey:
# it is a line with a key value pair on it
logger.debug('Line contains a key-pair value and the key is not stored yet.')
key, val = [i.strip() for i in kv.split('=', 1)]
key = self._add_key(key)
val = self._string_subst_partial(val)
# if it looks like the value spans lines, store details for next loop
if (val.count('{') != val.count('}')) or (val.startswith('"') and not val.replace('}', '').endswith('"')):
logger.debug('The line is not ending the record.')
inkey = key
inval = val
else:
logger.debug('The line is the end of the record.')
d[key] = self._add_val(val)
elif inkey:
logger.debug('Continues the previous line to complete the key pair value...')
# if this line continues the value from a previous line, append
inval += ', ' + kv
# if it looks like this line finishes the value, store it and clear for next loop
if (inval.startswith('{') and inval.endswith('}')) or (inval.startswith('"') and inval.endswith('"')):
logger.debug('This line represents the end of the current key-pair value')
d[inkey] = self._add_val(inval)
inkey = ""
inval = ""
else:
logger.debug('This line does NOT represent the end of the current key-pair value')
logger.debug('All lines have been treated')
if not d:
logger.debug('The dict is empty, return it.')
return d
d['ENTRYTYPE'] = bibtype
d['ID'] = id
if customization is None:
logger.debug('No customization to apply, return dict')
return d
else:
# apply any customizations to the record object then return it
logger.debug('Apply customizations and return dict')
return customization(d)
def _strip_quotes(self, val):
"""Strip double quotes enclosing string
:param val: a value
:type val: string
:returns: string -- value
"""
logger.debug('Strip quotes')
val = val.strip()
if val.startswith('"') and val.endswith('"'):
return val[1:-1]
return val
def _strip_braces(self, val):
"""Strip braces enclosing string
:param val: a value
:type val: string
:returns: string -- value
"""
logger.debug('Strip braces')
val = val.strip()
if val.startswith('{') and val.endswith('}') and self._full_span(val):
return val[1:-1]
return val
def _full_span(self, val):
cnt = 0
for i in range(0, len(val)):
if val[i] == '{':
cnt += 1
elif val[i] == '}':
cnt -= 1
if cnt == 0:
break
if i == len(val) - 1:
return True
else:
return False
def _string_subst(self, val):
""" Substitute string definitions
:param val: a value
:type val: string
:returns: string -- value
"""
logger.debug('Substitute string definitions')
if not val:
return ''
for k in list(self.bib_database.strings.keys()):
if val.lower() == k:
val = self.bib_database.strings[k]
if not isinstance(val, ustr):
val = ustr(val, self.encoding, 'ignore')
return val
def _string_subst_partial(self, val):
""" Substitute string definitions inside larger expressions
:param val: a value
:type val: string
:returns: string -- value
"""
def repl(m):
k = m.group('id')
replacement = self.bib_database.strings[k.lower()] if k.lower() in self.bib_database.strings else k
pre = '"' if m.group('pre') != '"' else ''
post = '"' if m.group('post') != '"' else ''
return pre + replacement + post
logger.debug('Substitute string definitions inside larger expressions')
if '#' not in val:
return val
# TODO?: Does not match two subsequent variables or strings, such as "start" # foo # bar # "end" or "start" # "end".
# TODO: Does not support braces instead of quotes, e.g.: {start} # foo # {bar}
# TODO: Does not support strings like: "te#s#t"
return self.replace_all_re.sub(repl, val)
def _add_val(self, val):
""" Clean instring before adding to dictionary
:param val: a value
:type val: string
:returns: string -- value
"""
if not val or val == "{}":
return ''
val = self._strip_braces(val)
val = self._strip_quotes(val)
val = self._strip_braces(val)
val = self._string_subst(val)
return val
def _add_key(self, key):
""" Add a key and homogeneize alternative forms.
:param key: a key
:type key: string
:returns: string -- value
"""
key = key.strip().strip('@').lower()
if self.homogenise_fields:
if key in list(self.alt_dict.keys()):
key = self.alt_dict[key]
if not isinstance(key, ustr):
return ustr(key, 'utf-8')
else:
return key
| BibTexParser | identifier_name |
bparser.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Original source: github.com/okfn/bibserver
# Authors:
# markmacgillivray
# Etienne Posthumus (epoz)
# Francois Boulogne <fboulogne at april dot org>
import sys
import logging
import io
import re
from bibtexparser.bibdatabase import BibDatabase
logger = logging.getLogger(__name__)
__all__ = ['BibTexParser']
if sys.version_info >= (3, 0):
from io import StringIO
ustr = str
else:
from StringIO import StringIO
ustr = unicode
class BibTexParser(object):
"""
A parser for reading BibTeX bibliographic data files.
Example::
from bibtexparser.bparser import BibTexParser
bibtex_str = ...
parser = BibTexParser()
parser.ignore_nonstandard_types = False
parser.homogenise_fields = False
bib_database = bibtexparser.loads(bibtex_str, parser)
"""
def __new__(cls, data=None,
customization=None,
ignore_nonstandard_types=True,
homogenise_fields=True):
"""
To catch the old API structure in which creating the parser would immediately parse and return data.
"""
if data is None:
return super(BibTexParser, cls).__new__(cls)
else:
# For backwards compatibility: if data is given, parse and return the `BibDatabase` object instead of the
# parser.
parser = BibTexParser()
parser.customization = customization
parser.ignore_nonstandard_types = ignore_nonstandard_types
parser.homogenise_fields = homogenise_fields
return parser.parse(data)
def __init__(self):
"""
Creates a parser for rading BibTeX files
:return: parser
:rtype: `BibTexParser`
"""
self.bib_database = BibDatabase()
#: Callback function to process BibTeX entries after parsing, for example to create a list from a string with
#: multiple values. By default all BibTeX values are treated as simple strings. Default: `None`.
self.customization = None
#: Ignore non-standard BibTeX types (`book`, `article`, etc). Default: `True`.
self.ignore_nonstandard_types = True
#: Sanitise BibTeX field names, for example change `url` to `link` etc. Field names are always converted to
#: lowercase names. Default: `True`.
self.homogenise_fields = True
# On some sample data files, the character encoding detection simply
# hangs We are going to default to utf8, and mandate it.
self.encoding = 'utf8'
# pre-defined set of key changes
self.alt_dict = {
'keyw': 'keyword',
'keywords': 'keyword',
'authors': 'author',
'editors': 'editor',
'url': 'link',
'urls': 'link',
'links': 'link',
'subjects': 'subject'
}
self.replace_all_re = re.compile(r'((?P<pre>"?)\s*(#|^)\s*(?P<id>[^\d\W]\w*)\s*(#|$)\s*(?P<post>"?))', re.UNICODE)
def _bibtex_file_obj(self, bibtex_str):
# Some files have Byte-order marks inserted at the start
byte = '\xef\xbb\xbf'
if not isinstance(byte, ustr):
byte = ustr('\xef\xbb\xbf', self.encoding, 'ignore')
if bibtex_str[:3] == byte:
bibtex_str = bibtex_str[3:]
return StringIO(bibtex_str)
def parse(self, bibtex_str):
"""Parse a BibTeX string into an object
:param bibtex_str: BibTeX string
:type: str or unicode
:return: bibliographic database
:rtype: BibDatabase
"""
self.bibtex_file_obj = self._bibtex_file_obj(bibtex_str)
self._parse_records(customization=self.customization)
return self.bib_database
def parse_file(self, file):
"""Parse a BibTeX file into an object
:param file: BibTeX file or file-like object
:type: file
:return: bibliographic database
:rtype: BibDatabase
"""
return self.parse(file.read())
def _parse_records(self, customization=None):
"""Parse the bibtex into a list of records.
:param customization: a function
"""
def _add_parsed_record(record, records):
"""
Atomic function to parse a record
and append the result in records
"""
if record != "":
logger.debug('The record is not empty. Let\'s parse it.')
parsed = self._parse_record(record, customization=customization)
if parsed:
logger.debug('Store the result of the parsed record')
records.append(parsed)
else:
logger.debug('Nothing returned from the parsed record!')
else:
logger.debug('The record is empty')
records = []
record = ""
# read each line, bundle them up until they form an object, then send for parsing
for linenumber, line in enumerate(self.bibtex_file_obj):
logger.debug('Inspect line %s', linenumber)
if line.strip().startswith('@'):
# Remove leading whitespaces
line = line.lstrip()
logger.debug('Line starts with @')
# Parse previous record
_add_parsed_record(record, records)
# Start new record
logger.debug('The record is set to empty')
record = ""
# Keep adding lines to the record
record += line
# catch any remaining record and send it for parsing
_add_parsed_record(record, records)
logger.debug('Set the list of entries')
self.bib_database.entries = records
def _parse_record(self, record, customization=None):
"""Parse a record.
* tidy whitespace and other rubbish
* parse out the bibtype and citekey
* find all the key-value pairs it contains
:param record: a record
:param customization: a function
:returns: dict --
"""
d = {}
if not record.startswith('@'):
logger.debug('The record does not start with @. Return empty dict.')
return {}
# if a comment record, add to bib_database.comments
if record.lower().startswith('@comment'):
logger.debug('The record startswith @comment')
logger.debug('Store comment in list of comments')
self.bib_database.comments.append(re.search('\{(.*)\}', record, re.DOTALL).group(1))
logger.debug('Return an empty dict')
return {}
# if a preamble record, add to bib_database.preambles
if record.lower().startswith('@preamble'):
logger.debug('The record startswith @preamble')
logger.debug('Store preamble in list of preambles')
self.bib_database.preambles.append(re.search('\{(.*)\}', record, re.DOTALL).group(1))
logger.debug('Return an empty dict')
return {}
# prepare record
record = '\n'.join([i.strip() for i in record.split('\n')])
if '}\n' in record:
logger.debug('}\\n detected in the record. Clean up.')
record = record.replace('\r\n', '\n').replace('\r', '\n').rstrip('\n')
# treat the case for which the last line of the record
# does not have a coma
if record.endswith('}\n}') or record.endswith('}}'):
logger.debug('Missing coma in the last line of the record. Fix it.')
record = re.sub('}(\n|)}$', '},\n}', record)
# if a string record, put it in the replace_dict
if record.lower().startswith('@string'):
logger.debug('The record startswith @string')
key, val = [i.strip().strip('{').strip('}').replace('\n', ' ') for i in record.split('{', 1)[1].strip('\n').strip(',').strip('}').split('=')]
key = key.lower() # key is case insensitive
val = self._string_subst_partial(val)
if val.startswith('"') or val.lower() not in self.bib_database.strings:
self.bib_database.strings[key] = val.strip('"')
else:
self.bib_database.strings[key] = self.bib_database.strings[val.lower()]
logger.debug('Return a dict')
return d
# for each line in record
logger.debug('Split the record of its lines and treat them')
kvs = [i.strip() for i in record.split(',\n')]
inkey = ""
inval = ""
for kv in kvs:
logger.debug('Inspect: %s', kv)
# TODO: We may check that the keyword belongs to a known type
if kv.startswith('@') and not inkey:
# it is the start of the record - set the bibtype and citekey (id)
logger.debug('Line starts with @ and the key is not stored yet.')
bibtype, id = kv.split('{', 1)
bibtype = self._add_key(bibtype)
id = id.strip('}').strip(',')
logger.debug('bibtype = %s', bibtype)
logger.debug('id = %s', id)
if self.ignore_nonstandard_types and bibtype not in ('article',
'book',
'booklet',
'conference',
'inbook',
'incollection',
'inproceedings',
'manual',
'mastersthesis',
'misc',
'phdthesis',
'proceedings',
'techreport',
'unpublished'):
logger.warning('Entry type %s not standard. Not considered.', bibtype)
break
elif '=' in kv and not inkey:
# it is a line with a key value pair on it
logger.debug('Line contains a key-pair value and the key is not stored yet.')
key, val = [i.strip() for i in kv.split('=', 1)]
key = self._add_key(key)
val = self._string_subst_partial(val)
# if it looks like the value spans lines, store details for next loop
if (val.count('{') != val.count('}')) or (val.startswith('"') and not val.replace('}', '').endswith('"')):
logger.debug('The line is not ending the record.')
inkey = key
inval = val
else:
logger.debug('The line is the end of the record.')
d[key] = self._add_val(val)
elif inkey:
logger.debug('Continues the previous line to complete the key pair value...')
# if this line continues the value from a previous line, append
inval += ', ' + kv
# if it looks like this line finishes the value, store it and clear for next loop
if (inval.startswith('{') and inval.endswith('}')) or (inval.startswith('"') and inval.endswith('"')):
logger.debug('This line represents the end of the current key-pair value')
d[inkey] = self._add_val(inval)
inkey = ""
inval = ""
else:
logger.debug('This line does NOT represent the end of the current key-pair value')
logger.debug('All lines have been treated')
if not d:
logger.debug('The dict is empty, return it.')
return d
d['ENTRYTYPE'] = bibtype
d['ID'] = id
if customization is None:
logger.debug('No customization to apply, return dict')
return d
else:
# apply any customizations to the record object then return it
logger.debug('Apply customizations and return dict')
return customization(d)
def _strip_quotes(self, val):
"""Strip double quotes enclosing string
:param val: a value
:type val: string
:returns: string -- value
"""
logger.debug('Strip quotes')
val = val.strip()
if val.startswith('"') and val.endswith('"'):
return val[1:-1]
return val
def _strip_braces(self, val):
"""Strip braces enclosing string
:param val: a value
:type val: string
:returns: string -- value
"""
logger.debug('Strip braces')
val = val.strip()
if val.startswith('{') and val.endswith('}') and self._full_span(val):
return val[1:-1]
return val
def _full_span(self, val):
|
def _string_subst(self, val):
""" Substitute string definitions
:param val: a value
:type val: string
:returns: string -- value
"""
logger.debug('Substitute string definitions')
if not val:
return ''
for k in list(self.bib_database.strings.keys()):
if val.lower() == k:
val = self.bib_database.strings[k]
if not isinstance(val, ustr):
val = ustr(val, self.encoding, 'ignore')
return val
def _string_subst_partial(self, val):
""" Substitute string definitions inside larger expressions
:param val: a value
:type val: string
:returns: string -- value
"""
def repl(m):
k = m.group('id')
replacement = self.bib_database.strings[k.lower()] if k.lower() in self.bib_database.strings else k
pre = '"' if m.group('pre') != '"' else ''
post = '"' if m.group('post') != '"' else ''
return pre + replacement + post
logger.debug('Substitute string definitions inside larger expressions')
if '#' not in val:
return val
# TODO?: Does not match two subsequent variables or strings, such as "start" # foo # bar # "end" or "start" # "end".
# TODO: Does not support braces instead of quotes, e.g.: {start} # foo # {bar}
# TODO: Does not support strings like: "te#s#t"
return self.replace_all_re.sub(repl, val)
def _add_val(self, val):
""" Clean instring before adding to dictionary
:param val: a value
:type val: string
:returns: string -- value
"""
if not val or val == "{}":
return ''
val = self._strip_braces(val)
val = self._strip_quotes(val)
val = self._strip_braces(val)
val = self._string_subst(val)
return val
def _add_key(self, key):
""" Add a key and homogeneize alternative forms.
:param key: a key
:type key: string
:returns: string -- value
"""
key = key.strip().strip('@').lower()
if self.homogenise_fields:
if key in list(self.alt_dict.keys()):
key = self.alt_dict[key]
if not isinstance(key, ustr):
return ustr(key, 'utf-8')
else:
return key
| cnt = 0
for i in range(0, len(val)):
if val[i] == '{':
cnt += 1
elif val[i] == '}':
cnt -= 1
if cnt == 0:
break
if i == len(val) - 1:
return True
else:
return False | identifier_body |
bparser.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Original source: github.com/okfn/bibserver
# Authors:
# markmacgillivray
# Etienne Posthumus (epoz)
# Francois Boulogne <fboulogne at april dot org>
import sys
import logging
import io
import re
from bibtexparser.bibdatabase import BibDatabase
logger = logging.getLogger(__name__)
__all__ = ['BibTexParser']
if sys.version_info >= (3, 0):
from io import StringIO
ustr = str
else:
from StringIO import StringIO
ustr = unicode
class BibTexParser(object):
"""
A parser for reading BibTeX bibliographic data files.
Example::
from bibtexparser.bparser import BibTexParser
bibtex_str = ...
parser = BibTexParser()
parser.ignore_nonstandard_types = False
parser.homogenise_fields = False
bib_database = bibtexparser.loads(bibtex_str, parser)
"""
def __new__(cls, data=None,
customization=None,
ignore_nonstandard_types=True,
homogenise_fields=True):
"""
To catch the old API structure in which creating the parser would immediately parse and return data.
"""
if data is None:
return super(BibTexParser, cls).__new__(cls)
else:
# For backwards compatibility: if data is given, parse and return the `BibDatabase` object instead of the
# parser.
parser = BibTexParser()
parser.customization = customization
parser.ignore_nonstandard_types = ignore_nonstandard_types
parser.homogenise_fields = homogenise_fields
return parser.parse(data)
def __init__(self):
"""
Creates a parser for rading BibTeX files
:return: parser
:rtype: `BibTexParser`
"""
self.bib_database = BibDatabase()
#: Callback function to process BibTeX entries after parsing, for example to create a list from a string with
#: multiple values. By default all BibTeX values are treated as simple strings. Default: `None`.
self.customization = None
#: Ignore non-standard BibTeX types (`book`, `article`, etc). Default: `True`.
self.ignore_nonstandard_types = True
#: Sanitise BibTeX field names, for example change `url` to `link` etc. Field names are always converted to
#: lowercase names. Default: `True`.
self.homogenise_fields = True
# On some sample data files, the character encoding detection simply
# hangs We are going to default to utf8, and mandate it.
self.encoding = 'utf8'
# pre-defined set of key changes
self.alt_dict = {
'keyw': 'keyword',
'keywords': 'keyword',
'authors': 'author',
'editors': 'editor',
'url': 'link',
'urls': 'link',
'links': 'link',
'subjects': 'subject'
}
self.replace_all_re = re.compile(r'((?P<pre>"?)\s*(#|^)\s*(?P<id>[^\d\W]\w*)\s*(#|$)\s*(?P<post>"?))', re.UNICODE)
def _bibtex_file_obj(self, bibtex_str):
# Some files have Byte-order marks inserted at the start
byte = '\xef\xbb\xbf'
if not isinstance(byte, ustr):
byte = ustr('\xef\xbb\xbf', self.encoding, 'ignore')
if bibtex_str[:3] == byte:
bibtex_str = bibtex_str[3:]
return StringIO(bibtex_str)
def parse(self, bibtex_str):
"""Parse a BibTeX string into an object
:param bibtex_str: BibTeX string
:type: str or unicode
:return: bibliographic database
:rtype: BibDatabase
"""
self.bibtex_file_obj = self._bibtex_file_obj(bibtex_str)
self._parse_records(customization=self.customization)
return self.bib_database
def parse_file(self, file):
"""Parse a BibTeX file into an object
:param file: BibTeX file or file-like object
:type: file
:return: bibliographic database
:rtype: BibDatabase
"""
return self.parse(file.read())
def _parse_records(self, customization=None):
"""Parse the bibtex into a list of records.
:param customization: a function
"""
def _add_parsed_record(record, records):
"""
Atomic function to parse a record
and append the result in records
"""
if record != "":
logger.debug('The record is not empty. Let\'s parse it.')
parsed = self._parse_record(record, customization=customization)
if parsed:
logger.debug('Store the result of the parsed record')
records.append(parsed)
else:
logger.debug('Nothing returned from the parsed record!')
else:
logger.debug('The record is empty')
records = []
record = ""
# read each line, bundle them up until they form an object, then send for parsing
for linenumber, line in enumerate(self.bibtex_file_obj):
logger.debug('Inspect line %s', linenumber)
if line.strip().startswith('@'):
# Remove leading whitespaces
line = line.lstrip()
logger.debug('Line starts with @')
# Parse previous record
_add_parsed_record(record, records)
# Start new record
logger.debug('The record is set to empty')
record = ""
# Keep adding lines to the record
record += line
# catch any remaining record and send it for parsing
_add_parsed_record(record, records)
logger.debug('Set the list of entries')
self.bib_database.entries = records
def _parse_record(self, record, customization=None):
"""Parse a record.
* tidy whitespace and other rubbish
* parse out the bibtype and citekey
* find all the key-value pairs it contains
:param record: a record
:param customization: a function
:returns: dict --
"""
d = {}
if not record.startswith('@'):
logger.debug('The record does not start with @. Return empty dict.')
return {}
# if a comment record, add to bib_database.comments
if record.lower().startswith('@comment'):
logger.debug('The record startswith @comment')
logger.debug('Store comment in list of comments')
self.bib_database.comments.append(re.search('\{(.*)\}', record, re.DOTALL).group(1))
logger.debug('Return an empty dict')
return {}
# if a preamble record, add to bib_database.preambles
if record.lower().startswith('@preamble'):
logger.debug('The record startswith @preamble')
logger.debug('Store preamble in list of preambles')
self.bib_database.preambles.append(re.search('\{(.*)\}', record, re.DOTALL).group(1))
logger.debug('Return an empty dict')
return {}
# prepare record
record = '\n'.join([i.strip() for i in record.split('\n')])
if '}\n' in record:
logger.debug('}\\n detected in the record. Clean up.')
record = record.replace('\r\n', '\n').replace('\r', '\n').rstrip('\n')
# treat the case for which the last line of the record
# does not have a coma
if record.endswith('}\n}') or record.endswith('}}'):
logger.debug('Missing coma in the last line of the record. Fix it.')
record = re.sub('}(\n|)}$', '},\n}', record)
# if a string record, put it in the replace_dict
if record.lower().startswith('@string'):
logger.debug('The record startswith @string')
key, val = [i.strip().strip('{').strip('}').replace('\n', ' ') for i in record.split('{', 1)[1].strip('\n').strip(',').strip('}').split('=')]
key = key.lower() # key is case insensitive
val = self._string_subst_partial(val)
if val.startswith('"') or val.lower() not in self.bib_database.strings:
self.bib_database.strings[key] = val.strip('"')
else:
self.bib_database.strings[key] = self.bib_database.strings[val.lower()]
logger.debug('Return a dict')
return d
# for each line in record
logger.debug('Split the record of its lines and treat them')
kvs = [i.strip() for i in record.split(',\n')]
inkey = ""
inval = ""
for kv in kvs:
logger.debug('Inspect: %s', kv)
# TODO: We may check that the keyword belongs to a known type
if kv.startswith('@') and not inkey:
# it is the start of the record - set the bibtype and citekey (id)
logger.debug('Line starts with @ and the key is not stored yet.')
bibtype, id = kv.split('{', 1)
bibtype = self._add_key(bibtype)
id = id.strip('}').strip(',')
logger.debug('bibtype = %s', bibtype)
logger.debug('id = %s', id)
if self.ignore_nonstandard_types and bibtype not in ('article',
'book',
'booklet',
'conference',
'inbook',
'incollection',
'inproceedings',
'manual',
'mastersthesis',
'misc',
'phdthesis',
'proceedings',
'techreport',
'unpublished'):
logger.warning('Entry type %s not standard. Not considered.', bibtype)
break
elif '=' in kv and not inkey:
# it is a line with a key value pair on it
logger.debug('Line contains a key-pair value and the key is not stored yet.')
key, val = [i.strip() for i in kv.split('=', 1)]
key = self._add_key(key)
val = self._string_subst_partial(val)
# if it looks like the value spans lines, store details for next loop
if (val.count('{') != val.count('}')) or (val.startswith('"') and not val.replace('}', '').endswith('"')):
logger.debug('The line is not ending the record.')
inkey = key
inval = val
else:
logger.debug('The line is the end of the record.')
d[key] = self._add_val(val)
elif inkey:
logger.debug('Continues the previous line to complete the key pair value...')
# if this line continues the value from a previous line, append
inval += ', ' + kv
# if it looks like this line finishes the value, store it and clear for next loop
if (inval.startswith('{') and inval.endswith('}')) or (inval.startswith('"') and inval.endswith('"')):
logger.debug('This line represents the end of the current key-pair value')
d[inkey] = self._add_val(inval)
inkey = ""
inval = ""
else:
logger.debug('This line does NOT represent the end of the current key-pair value')
logger.debug('All lines have been treated')
if not d:
logger.debug('The dict is empty, return it.')
return d
d['ENTRYTYPE'] = bibtype
d['ID'] = id
if customization is None:
logger.debug('No customization to apply, return dict')
return d
else:
# apply any customizations to the record object then return it
logger.debug('Apply customizations and return dict')
return customization(d)
def _strip_quotes(self, val):
"""Strip double quotes enclosing string
:param val: a value
:type val: string
:returns: string -- value
"""
logger.debug('Strip quotes')
val = val.strip()
if val.startswith('"') and val.endswith('"'):
return val[1:-1]
return val
def _strip_braces(self, val):
"""Strip braces enclosing string
:param val: a value
:type val: string
:returns: string -- value
"""
logger.debug('Strip braces')
val = val.strip()
if val.startswith('{') and val.endswith('}') and self._full_span(val):
return val[1:-1]
return val
def _full_span(self, val):
cnt = 0
for i in range(0, len(val)):
|
if i == len(val) - 1:
return True
else:
return False
def _string_subst(self, val):
""" Substitute string definitions
:param val: a value
:type val: string
:returns: string -- value
"""
logger.debug('Substitute string definitions')
if not val:
return ''
for k in list(self.bib_database.strings.keys()):
if val.lower() == k:
val = self.bib_database.strings[k]
if not isinstance(val, ustr):
val = ustr(val, self.encoding, 'ignore')
return val
def _string_subst_partial(self, val):
""" Substitute string definitions inside larger expressions
:param val: a value
:type val: string
:returns: string -- value
"""
def repl(m):
k = m.group('id')
replacement = self.bib_database.strings[k.lower()] if k.lower() in self.bib_database.strings else k
pre = '"' if m.group('pre') != '"' else ''
post = '"' if m.group('post') != '"' else ''
return pre + replacement + post
logger.debug('Substitute string definitions inside larger expressions')
if '#' not in val:
return val
# TODO?: Does not match two subsequent variables or strings, such as "start" # foo # bar # "end" or "start" # "end".
# TODO: Does not support braces instead of quotes, e.g.: {start} # foo # {bar}
# TODO: Does not support strings like: "te#s#t"
return self.replace_all_re.sub(repl, val)
def _add_val(self, val):
""" Clean instring before adding to dictionary
:param val: a value
:type val: string
:returns: string -- value
"""
if not val or val == "{}":
return ''
val = self._strip_braces(val)
val = self._strip_quotes(val)
val = self._strip_braces(val)
val = self._string_subst(val)
return val
def _add_key(self, key):
""" Add a key and homogeneize alternative forms.
:param key: a key
:type key: string
:returns: string -- value
"""
key = key.strip().strip('@').lower()
if self.homogenise_fields:
if key in list(self.alt_dict.keys()):
key = self.alt_dict[key]
if not isinstance(key, ustr):
return ustr(key, 'utf-8')
else:
return key
| if val[i] == '{':
cnt += 1
elif val[i] == '}':
cnt -= 1
if cnt == 0:
break | conditional_block |
bparser.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Original source: github.com/okfn/bibserver
# Authors:
# markmacgillivray
# Etienne Posthumus (epoz)
# Francois Boulogne <fboulogne at april dot org>
import sys
import logging
import io
import re
from bibtexparser.bibdatabase import BibDatabase
logger = logging.getLogger(__name__)
__all__ = ['BibTexParser']
if sys.version_info >= (3, 0):
from io import StringIO
ustr = str
else:
from StringIO import StringIO
ustr = unicode
class BibTexParser(object):
"""
A parser for reading BibTeX bibliographic data files.
Example::
from bibtexparser.bparser import BibTexParser
bibtex_str = ...
parser = BibTexParser()
parser.ignore_nonstandard_types = False
parser.homogenise_fields = False
bib_database = bibtexparser.loads(bibtex_str, parser)
"""
def __new__(cls, data=None,
customization=None,
ignore_nonstandard_types=True,
homogenise_fields=True):
"""
To catch the old API structure in which creating the parser would immediately parse and return data.
"""
if data is None:
return super(BibTexParser, cls).__new__(cls)
else:
# For backwards compatibility: if data is given, parse and return the `BibDatabase` object instead of the
# parser.
parser = BibTexParser()
parser.customization = customization
parser.ignore_nonstandard_types = ignore_nonstandard_types
parser.homogenise_fields = homogenise_fields
return parser.parse(data)
def __init__(self):
"""
Creates a parser for rading BibTeX files
:return: parser
:rtype: `BibTexParser`
"""
self.bib_database = BibDatabase()
#: Callback function to process BibTeX entries after parsing, for example to create a list from a string with
#: multiple values. By default all BibTeX values are treated as simple strings. Default: `None`.
self.customization = None
#: Ignore non-standard BibTeX types (`book`, `article`, etc). Default: `True`.
self.ignore_nonstandard_types = True
#: Sanitise BibTeX field names, for example change `url` to `link` etc. Field names are always converted to
#: lowercase names. Default: `True`.
self.homogenise_fields = True
# On some sample data files, the character encoding detection simply
# hangs We are going to default to utf8, and mandate it.
self.encoding = 'utf8'
# pre-defined set of key changes
self.alt_dict = {
'keyw': 'keyword',
'keywords': 'keyword',
'authors': 'author',
'editors': 'editor',
'url': 'link',
'urls': 'link',
'links': 'link',
'subjects': 'subject'
}
self.replace_all_re = re.compile(r'((?P<pre>"?)\s*(#|^)\s*(?P<id>[^\d\W]\w*)\s*(#|$)\s*(?P<post>"?))', re.UNICODE)
def _bibtex_file_obj(self, bibtex_str):
# Some files have Byte-order marks inserted at the start
byte = '\xef\xbb\xbf'
if not isinstance(byte, ustr):
byte = ustr('\xef\xbb\xbf', self.encoding, 'ignore')
if bibtex_str[:3] == byte:
bibtex_str = bibtex_str[3:]
return StringIO(bibtex_str)
def parse(self, bibtex_str):
"""Parse a BibTeX string into an object
:param bibtex_str: BibTeX string
:type: str or unicode
:return: bibliographic database
:rtype: BibDatabase
"""
self.bibtex_file_obj = self._bibtex_file_obj(bibtex_str)
self._parse_records(customization=self.customization)
return self.bib_database
def parse_file(self, file):
"""Parse a BibTeX file into an object
:param file: BibTeX file or file-like object
:type: file
:return: bibliographic database
:rtype: BibDatabase
"""
return self.parse(file.read())
def _parse_records(self, customization=None):
"""Parse the bibtex into a list of records.
:param customization: a function
"""
def _add_parsed_record(record, records):
"""
Atomic function to parse a record
and append the result in records
"""
if record != "":
logger.debug('The record is not empty. Let\'s parse it.')
parsed = self._parse_record(record, customization=customization)
if parsed:
logger.debug('Store the result of the parsed record')
records.append(parsed)
else:
logger.debug('Nothing returned from the parsed record!')
else:
logger.debug('The record is empty')
records = []
record = ""
# read each line, bundle them up until they form an object, then send for parsing
for linenumber, line in enumerate(self.bibtex_file_obj):
logger.debug('Inspect line %s', linenumber)
if line.strip().startswith('@'):
# Remove leading whitespaces
line = line.lstrip()
logger.debug('Line starts with @')
# Parse previous record
_add_parsed_record(record, records)
# Start new record
logger.debug('The record is set to empty')
record = ""
# Keep adding lines to the record
record += line
# catch any remaining record and send it for parsing
_add_parsed_record(record, records)
logger.debug('Set the list of entries')
self.bib_database.entries = records
def _parse_record(self, record, customization=None):
"""Parse a record.
* tidy whitespace and other rubbish
* parse out the bibtype and citekey
* find all the key-value pairs it contains
:param record: a record
:param customization: a function
:returns: dict --
"""
d = {}
if not record.startswith('@'):
logger.debug('The record does not start with @. Return empty dict.')
return {}
# if a comment record, add to bib_database.comments
if record.lower().startswith('@comment'):
logger.debug('The record startswith @comment')
logger.debug('Store comment in list of comments')
self.bib_database.comments.append(re.search('\{(.*)\}', record, re.DOTALL).group(1))
logger.debug('Return an empty dict')
return {}
# if a preamble record, add to bib_database.preambles
if record.lower().startswith('@preamble'):
logger.debug('The record startswith @preamble')
logger.debug('Store preamble in list of preambles')
self.bib_database.preambles.append(re.search('\{(.*)\}', record, re.DOTALL).group(1))
logger.debug('Return an empty dict')
return {}
# prepare record
record = '\n'.join([i.strip() for i in record.split('\n')])
if '}\n' in record:
logger.debug('}\\n detected in the record. Clean up.')
record = record.replace('\r\n', '\n').replace('\r', '\n').rstrip('\n')
# treat the case for which the last line of the record
# does not have a coma
if record.endswith('}\n}') or record.endswith('}}'):
logger.debug('Missing coma in the last line of the record. Fix it.')
record = re.sub('}(\n|)}$', '},\n}', record)
# if a string record, put it in the replace_dict
if record.lower().startswith('@string'):
logger.debug('The record startswith @string')
key, val = [i.strip().strip('{').strip('}').replace('\n', ' ') for i in record.split('{', 1)[1].strip('\n').strip(',').strip('}').split('=')]
key = key.lower() # key is case insensitive
val = self._string_subst_partial(val)
if val.startswith('"') or val.lower() not in self.bib_database.strings:
self.bib_database.strings[key] = val.strip('"')
else:
self.bib_database.strings[key] = self.bib_database.strings[val.lower()]
logger.debug('Return a dict')
return d
# for each line in record
logger.debug('Split the record of its lines and treat them')
kvs = [i.strip() for i in record.split(',\n')]
inkey = ""
inval = ""
for kv in kvs:
logger.debug('Inspect: %s', kv)
# TODO: We may check that the keyword belongs to a known type
if kv.startswith('@') and not inkey:
# it is the start of the record - set the bibtype and citekey (id)
logger.debug('Line starts with @ and the key is not stored yet.')
bibtype, id = kv.split('{', 1)
bibtype = self._add_key(bibtype)
id = id.strip('}').strip(',')
logger.debug('bibtype = %s', bibtype)
logger.debug('id = %s', id)
if self.ignore_nonstandard_types and bibtype not in ('article',
'book',
'booklet',
'conference',
'inbook',
'incollection',
'inproceedings',
'manual',
'mastersthesis',
'misc',
'phdthesis',
'proceedings',
'techreport',
'unpublished'):
logger.warning('Entry type %s not standard. Not considered.', bibtype)
break
elif '=' in kv and not inkey:
# it is a line with a key value pair on it
logger.debug('Line contains a key-pair value and the key is not stored yet.')
key, val = [i.strip() for i in kv.split('=', 1)]
key = self._add_key(key)
val = self._string_subst_partial(val)
# if it looks like the value spans lines, store details for next loop
if (val.count('{') != val.count('}')) or (val.startswith('"') and not val.replace('}', '').endswith('"')):
logger.debug('The line is not ending the record.')
inkey = key
inval = val | elif inkey:
logger.debug('Continues the previous line to complete the key pair value...')
# if this line continues the value from a previous line, append
inval += ', ' + kv
# if it looks like this line finishes the value, store it and clear for next loop
if (inval.startswith('{') and inval.endswith('}')) or (inval.startswith('"') and inval.endswith('"')):
logger.debug('This line represents the end of the current key-pair value')
d[inkey] = self._add_val(inval)
inkey = ""
inval = ""
else:
logger.debug('This line does NOT represent the end of the current key-pair value')
logger.debug('All lines have been treated')
if not d:
logger.debug('The dict is empty, return it.')
return d
d['ENTRYTYPE'] = bibtype
d['ID'] = id
if customization is None:
logger.debug('No customization to apply, return dict')
return d
else:
# apply any customizations to the record object then return it
logger.debug('Apply customizations and return dict')
return customization(d)
def _strip_quotes(self, val):
"""Strip double quotes enclosing string
:param val: a value
:type val: string
:returns: string -- value
"""
logger.debug('Strip quotes')
val = val.strip()
if val.startswith('"') and val.endswith('"'):
return val[1:-1]
return val
def _strip_braces(self, val):
"""Strip braces enclosing string
:param val: a value
:type val: string
:returns: string -- value
"""
logger.debug('Strip braces')
val = val.strip()
if val.startswith('{') and val.endswith('}') and self._full_span(val):
return val[1:-1]
return val
def _full_span(self, val):
cnt = 0
for i in range(0, len(val)):
if val[i] == '{':
cnt += 1
elif val[i] == '}':
cnt -= 1
if cnt == 0:
break
if i == len(val) - 1:
return True
else:
return False
def _string_subst(self, val):
""" Substitute string definitions
:param val: a value
:type val: string
:returns: string -- value
"""
logger.debug('Substitute string definitions')
if not val:
return ''
for k in list(self.bib_database.strings.keys()):
if val.lower() == k:
val = self.bib_database.strings[k]
if not isinstance(val, ustr):
val = ustr(val, self.encoding, 'ignore')
return val
def _string_subst_partial(self, val):
""" Substitute string definitions inside larger expressions
:param val: a value
:type val: string
:returns: string -- value
"""
def repl(m):
k = m.group('id')
replacement = self.bib_database.strings[k.lower()] if k.lower() in self.bib_database.strings else k
pre = '"' if m.group('pre') != '"' else ''
post = '"' if m.group('post') != '"' else ''
return pre + replacement + post
logger.debug('Substitute string definitions inside larger expressions')
if '#' not in val:
return val
# TODO?: Does not match two subsequent variables or strings, such as "start" # foo # bar # "end" or "start" # "end".
# TODO: Does not support braces instead of quotes, e.g.: {start} # foo # {bar}
# TODO: Does not support strings like: "te#s#t"
return self.replace_all_re.sub(repl, val)
def _add_val(self, val):
""" Clean instring before adding to dictionary
:param val: a value
:type val: string
:returns: string -- value
"""
if not val or val == "{}":
return ''
val = self._strip_braces(val)
val = self._strip_quotes(val)
val = self._strip_braces(val)
val = self._string_subst(val)
return val
def _add_key(self, key):
""" Add a key and homogeneize alternative forms.
:param key: a key
:type key: string
:returns: string -- value
"""
key = key.strip().strip('@').lower()
if self.homogenise_fields:
if key in list(self.alt_dict.keys()):
key = self.alt_dict[key]
if not isinstance(key, ustr):
return ustr(key, 'utf-8')
else:
return key | else:
logger.debug('The line is the end of the record.')
d[key] = self._add_val(val) | random_line_split |
fill_data.py | # -*- coding: utf-8 -*-
'''
Toda la informacion de un fichero
'''
import urllib, re
from flask import g, Markup
from flask.ext.babelex import gettext as _
from urlparse import urlparse
from itertools import izip_longest, chain
from foofind.services import *
from foofind.blueprints.files.helpers import *
from foofind.utils import mid2url, mid2hex, hex2mid, to_seconds, u, logging
from foofind.utils.content_types import *
from foofind.utils.filepredictor import guess_doc_content_type
from foofind.datafixes import content_fixes
from foofind.utils.splitter import slugify
from foofind.utils.seo import seoize_text
from foofind.utils.html import clean_html
def | (file_data, ntts=[]):
'''
Inicializa el diccionario de datos del archivo
'''
content_fixes(file_data)
file_data["id"]=mid2url(file_data['_id'])
file_data['name']=file_data['src'].itervalues().next()['url']
file_se = file_data["se"] if "se" in file_data else None
ntt = ntts[int(float(file_se["_id"]))] if file_se and "_id" in file_se and file_se["_id"] in ntts else None
if ntt:
file_se["info"] = ntt
file_se["rel"] = [ntts[relid] for relids in ntt["r"].itervalues() for relid in relids if relid in ntts] if "r" in ntt else []
return {"file":file_data,"view":{}}
def choose_filename(f,text_cache=None):
'''
Elige el archivo correcto
'''
srcs = f['file']['src']
fns = f['file']['fn']
chosen = None
max_count = -1
current_weight = -1
if text_cache and text_cache[0] in fns: # Si text es en realidad un ID de fn
chosen = text_cache[0]
else:
for hexuri,src in srcs.items():
if 'bl' in src and src['bl']!=0:
continue
for crc,srcfn in src['fn'].items():
if crc not in fns: #para los sources que tienen nombre pero no estan en el archivo
continue
#si no tiene nombre no se tiene en cuenta
m = srcfn['m'] if len(fns[crc]['n'])>0 else 0
if 'c' in fns[crc]:
fns[crc]['c']+=m
else:
fns[crc]['c']=m
text_weight = 0
if text_cache:
fn_parts = slugify(fns[crc]['n']).strip().split(" ")
if len(fn_parts)>0:
text_words = slugify(text_cache[0]).split(" ")
# valora numero y orden coincidencias
last_pos = -1
max_length = length = 0
occurrences = [0]*len(text_words)
for part in fn_parts:
pos = text_words.index(part) if part in text_words else -1
if pos != -1 and (last_pos==-1 or pos==last_pos+1):
length += 1
else:
if length > max_length: max_length = length
length = 0
if pos != -1:
occurrences[pos]=1
last_pos = pos
if length > max_length: max_length = length
text_weight = sum(occurrences)*100 + max_length
f['file']['fn'][crc]['tht'] = text_weight
better = fns[crc]['c']>max_count
if text_weight > current_weight or (better and text_weight==current_weight):
current_weight = text_weight
chosen = crc
max_count = fns[crc]['c']
f['view']['url'] = mid2url(hex2mid(f['file']['_id']))
f['view']['fnid'] = chosen
if chosen:
filename = fns[chosen]['n']
ext = fns[chosen]['x']
else: #uses filename from src
filename = ""
for hexuri,src in srcs.items():
if src['url'].find("/")!=-1:
filename = src['url']
if filename=="":
return
filename = filename[filename.rfind("/")+1:]
ext = filename[filename.rfind(".")+1:]
filename = filename[0:filename.rfind(".")]
#TODO si no viene nombre de archivo buscar en los metadatos para formar uno (por ejemplo serie - titulo capitulo)
filename = extension_filename(filename,ext)
f['view']['fn'] = filename.replace("?", "")
f['view']['qfn'] = qfn = u(filename).encode("UTF-8") #nombre del archivo escapado para generar las url de descarga
f['view']['pfn'] = urllib.quote(qfn).replace(" ", "%20") # P2P filename
nfilename = seoize_text(filename, " ",True, 0)
f['view']['nfn'] = nfilename
# añade el nombre del fichero como palabra clave
g.keywords.update(set(keyword for keyword in nfilename.split(" ") if len(keyword)>1))
#nombre del archivo con las palabras que coinciden con la busqueda resaltadas
if text_cache:
f['view']['fnh'], f['view']['fnhs'] = highlight(text_cache[2],filename,True)
else:
f['view']['fnh'] = filename #esto es solo para download que nunca tiene text
return current_weight>0 # indica si ha encontrado el texto buscado
def build_source_links(f):
'''
Construye los enlaces correctamente
'''
def get_domain(src):
'''
Devuelve el dominio de una URL
'''
url_parts=urlparse(src).netloc.split('.')
i=len(url_parts)-1
if len(url_parts[i])<=2 and len(url_parts[i-1])<=3:
return url_parts[i-2]+'.'+url_parts[i-1]+'.'+url_parts[i]
else:
return url_parts[i-1]+'.'+url_parts[i];
f['view']['action']='download'
f['view']['sources']={}
max_weight=0
icon=""
any_downloader=False
# agrupación de origenes
source_groups = {}
file_sources = f['file']['src'].items()
file_sources.sort(key=lambda x:x[1]["t"])
for hexuri,src in file_sources:
if not src.get('bl',None) in (0, None):
continue
url_pattern=downloader=join=False
count=0
part=url=""
source_data=g.sources[src["t"]] if "t" in src and src["t"] in g.sources else None
if source_data is None: #si no existe el origen del archivo
logging.error("El fichero contiene un origen inexistente en la tabla \"sources\": %s" % src["t"], extra={"file":f})
if feedbackdb.initialized:
feedbackdb.notify_source_error(f['file']["_id"], f['file']["s"])
continue
elif "crbl" in source_data and int(source_data["crbl"])==1: #si el origen esta bloqueado
continue
elif "w" in source_data["g"] or "f" in source_data["g"] or "s" in source_data["g"]: #si es descarga directa o streaming
link_weight=1
tip=source_data["d"]
icon="web"
source_groups[icon] = tip
source=get_domain(src['url']) if "f" in source_data["g"] else source_data["d"]
url=src['url']
if "url_pattern" in source_data and not url.startswith(("https://","http://","ftp://")):
url_pattern=True
#en caso de duda se prefiere streaming
if "s" in source_data["g"]:
f['view']['action']="listen" if f['view']['ct']==CONTENT_AUDIO else 'watch'
link_weight*=2
#torrenthash antes de torrent porque es un caso especifico
elif source_data["d"]=="BitTorrentHash":
downloader=True
link_weight=0.9 if 'torrent:tracker' in f['file']['md'] or 'torrent:trackers' in f['file']['md'] else 0.1
tip="Torrent MagnetLink"
source="tmagnet"
icon="torrent"
if not icon in source_groups:
source_groups[icon] = tip # magnet link tiene menos prioridad para el texto
join=True
count=int(src['m'])
part="xt=urn:btih:"+src['url']
if 'torrent:tracker' in f['file']['md']:
part += unicode('&tr=' + urllib.quote_plus(u(f['file']['md']['torrent:tracker']).encode("UTF-8")), "UTF-8")
elif 'torrent:trackers' in f['file']['md']:
trackers = f['file']['md']['torrent:trackers']
if isinstance(trackers, basestring):
part += unicode("".join('&tr='+urllib.quote_plus(tr) for tr in u(trackers).encode("UTF-8").split(" ")), "UTF-8")
elif "t" in source_data["g"]:
downloader=True
link_weight=0.8
url=src['url']
if "url_pattern" in source_data and not url.startswith(("https://","http://","ftp://")):
url_pattern=True
tip=source=get_domain(source_data["url_pattern"]%url)
else:
tip=source=get_domain(src['url'])
icon="torrent"
if not icon in source_groups:
source_groups[icon] = tip
elif source_data["d"]=="Gnutella":
link_weight=0.2
tip="Gnutella"
source=icon="gnutella"
part="xt=urn:sha1:"+src['url']
join=True
count=int(src['m'])
source_groups[icon] = tip
elif source_data["d"]=="eD2k":
downloader=True
link_weight=0.1
tip="eD2k"
source=icon="ed2k"
url="ed2k://|file|"+f['view']['pfn']+"|"+str(f['file']['z'] if "z" in f["file"] else 1)+"|"+src['url']+"|/"
count=int(src['m'])
source_groups[icon] = tip
elif source_data["d"]=="Tiger":
link_weight=0
tip="Gnutella"
source=icon="gnutella"
part="xt=urn:tiger:"+src['url']
join=True
elif source_data["d"]=="MD5":
link_weight=0
tip="Gnutella"
source=icon="gnutella"
part="xt=urn:md5:"+src['url']
source_groups[icon] = tip
join=True
else:
continue
if source in f['view']['sources']:
view_source = f['view']['sources'][source]
else:
view_source = f['view']['sources'][source] = {}
view_source.update(source_data)
if downloader:
any_downloader = True
view_source['downloader']=1
elif not 'downloader' in view_source:
view_source['downloader']=0
view_source['tip']=tip
view_source['icon']=icon
view_source['icons']=source_data.get("icons",False)
view_source['join']=join
view_source['source']="streaming" if "s" in source_data["g"] else "direct_download" if "w" in source_data["g"] else "P2P" if "p" in source_data["g"] else ""
#para no machacar el numero si hay varios archivos del mismo source
if not 'count' in view_source or count>0:
view_source['count']=count
if not "parts" in view_source:
view_source['parts']=[]
if not 'urls' in view_source:
view_source['urls']=[]
if part:
view_source['parts'].append(part)
if url:
if url_pattern:
view_source['urls']=[source_data["url_pattern"]%url]
f['view']['source_id']=url
view_source["pattern_used"]=True
elif not "pattern_used" in view_source:
view_source['urls'].append(url)
if source_data["d"]!="eD2k":
view_source['count']+=1
if link_weight>max_weight:
max_weight = link_weight
f['view']['source'] = source
f['view']['source_groups'] = sorted(source_groups.items())
f['view']['any_downloader'] = any_downloader
if "source" not in f["view"]:
raise FileNoSources
if icon!="web":
for src,info in f['view']['sources'].items():
if info['join']:
f['view']['sources'][src]['urls'].append("magnet:?"+"&".join(info['parts'])+"&dn="+f['view']['pfn']+("&xl="+str(f['file']['z']) if 'z' in f['file'] else ""))
elif not 'urls' in info:
del(f['view']['sources'][src])
def choose_file_type(f):
'''
Elige el tipo de archivo
'''
ct, file_tags, file_format = guess_doc_content_type(f["file"], g.sources)
f['view']["ct"] = ct
f['view']['file_type'] = CONTENTS[ct].lower()
f['view']["tags"] = file_tags
if file_format: f['view']['format'] = file_format
def get_images(f):
'''
Obtiene las imagenes para los archivos que las tienen
'''
images = images_id = None
if "i" in f["file"] and isinstance(f["file"]["i"],list):
images = f["file"]["i"]
images_id = f["file"]["_id"]
elif "se" in f["file"] and "info" in f["file"]["se"]:
for ntt in chain([f["file"]["se"]["info"]], f["file"]["se"]["rel"]):
if "im" in ntt:
images = ntt["im"]
images_id = "e_%d_"%int(ntt["_id"])
break
if images:
images_servers=[]
for image in images:
server=g.image_servers[image]
images_servers.append("%02d"%int(server["_id"]))
if not "first_image_server" in f["view"]:
f["view"]["first_image_server"]=server["ip"]
f["view"]["images_server"]="_".join(images_servers)
f["view"]["images_id"] = images_id
def get_int(adict, key):
if not key in adict:
return None
value = adict[key]
if isinstance(value, (int,long)):
return value
elif isinstance(value, float):
return int(value)
elif isinstance(value, basestring):
result = None
for c in value:
digit = ord(c)-48
if 0<=digit<=9:
if result:
result *= 10
else:
result = 0
result += digit
else:
break
return result
return None
def get_float(adict, key):
if not key in adict:
return None
value = adict[key]
if isinstance(value, float):
return value
elif isinstance(value, (int,long)):
return float(value)
elif isinstance(value, basestring):
result = ""
decimal = False
for c in value:
if c in "0123456789":
result += c
elif c in ".," and not decimal:
result += "."
decimal = True
else:
break
if result:
try:
return float(result)
except:
pass
return None
def format_metadata(f,text_cache, search_text_shown=False):
'''
Formatea los metadatos de los archivos
'''
text = text_cache[2] if text_cache else None
view_md = f['view']['md'] = {}
view_searches = f["view"]["searches"]={}
file_type = f['view']['file_type'] if 'file_type' in f['view'] else None
if 'md' in f['file']:
#si viene con el formato tipo:metadato se le quita el tipo
file_md = {(meta.split(":")[-1] if ":" in meta else meta): value for meta, value in f['file']['md'].iteritems()}
# Duración para vídeo e imágenes
seconds = get_float(file_md, "seconds")
minutes = get_float(file_md, "minutes")
hours = get_float(file_md, "hours")
# Si no he recibido duracion de otra forma, pruebo con length y duration
if seconds==minutes==hours==None:
seconds = get_float(file_md, "length") or get_float(file_md, "duration")
duration = [hours or 0, minutes or 0, seconds or 0] # h, m, s
if any(duration):
carry = 0
for i in xrange(len(duration)-1,-1,-1):
unit = long(duration[i]) + carry
duration[i] = unit%60
carry = unit/60
view_md["length"] = "%d:%02d:%02d" % tuple(duration) if duration[0] > 0 else "%02d:%02d" % tuple(duration[1:])
# Tamaño para vídeos e imágenes
width = get_int(file_md, "width")
height = get_int(file_md, "height")
if width and height:
view_md["size"] = "%dx%dpx" % (width, height)
# Metadatos que no cambian
try:
view_md.update(
(meta, file_md[meta]) for meta in
(
"folders","description","fileversion","os","files","pages","format",
"seeds","leechs","composer","publisher","encoding","director","writer","starring","producer","released"
) if meta in file_md
)
view_searches.update(
(meta, seoize_text(file_md[meta],"_",False)) for meta in
(
"folders","os","composer","publisher","director","writer","starring","producer"
) if meta in file_md
)
except BaseException as e:
logging.warn(e)
# thumbnail
if "thumbnail" in file_md:
f["view"]["thumbnail"] = file_md["thumbnail"]
#metadatos que tienen otros nombres
try:
view_md.update(("tags", file_md[meta]) for meta in ("keywords", "tags", "tag") if meta in file_md)
if "tags" in view_md and isinstance(view_md["tags"], basestring):
view_searches["tags"] = []
view_md.update(("comments", file_md[meta]) for meta in ("comments", "comment") if meta in file_md)
view_md.update(("track", file_md[meta]) for meta in ("track", "track_number") if meta in file_md)
view_md.update(("created_by", file_md[meta]) for meta in ("created_by", "encodedby","encoder") if meta in file_md)
view_md.update(("language", file_md[meta]) for meta in ("language", "lang") if meta in file_md)
view_md.update(("date", file_md[meta]) for meta in ("published", "creationdate") if meta in file_md)
view_md.update(("trackers", "\n".join(file_md[meta].split(" "))) for meta in ("trackers", "tracker") if meta in file_md and isinstance(file_md[meta], basestring))
view_md.update(("hash", file_md[meta]) for meta in ("hash", "infohash") if meta in file_md)
view_md.update(("visualizations", file_md[meta]) for meta in ("count", "viewCount") if meta in file_md)
if "unpackedsize" in file_md:
view_md["unpacked_size"]=file_md["unpackedsize"]
if "privateflag" in file_md:
view_md["private_file"]=file_md["privateflag"]
except BaseException as e:
logging.warn(e)
#torrents -> filedir filesizes filepaths
if "filepaths" in file_md:
filepaths = {}
for path, size in izip_longest(u(file_md["filepaths"]).split("///"), u(file_md.get("filesizes","")).split(" "), fillvalue=None):
# no permite tamaños sin fichero
if not path: break
parts = path.strip("/").split("/")
# crea subdirectorios
relative_path = filepaths
for part in parts[:-1]:
if "/"+part not in relative_path:
relative_path["/"+part] = {}
relative_path = relative_path["/"+part]
# si ya existe el directorio no hace nada
if "/"+parts[-1] in relative_path:
pass
# si el ultimo nivel se repite es un directorio (fallo de contenido)
elif parts[-1] in relative_path:
relative_path["/"+parts[-1]] = {}
del relative_path[parts[-1]]
else:
relative_path[parts[-1]] = size
if "filedir" in file_md:
filepaths = {"/"+u(file_md["filedir"]).strip("/"):filepaths}
if filepaths:
view_md["filepaths"] = filepaths
view_searches["filepaths"] = {}
# Metadatos multimedia
try:
#extraccion del codec de video y/o audio
if "video_codec" in file_md: #si hay video_codec se concatena el audio_codec detras si es necesario
view_md["codec"]=file_md["video_codec"]+" "+file_md["audio_codec"] if "audio_codec" in file_md else file_md["video_codec"]
else: #sino se meten directamente
view_md.update(("codec", file_md[meta]) for meta in ("audio_codec", "codec") if meta in file_md)
if file_type in ("audio", "video", "image"):
view_md.update((meta, file_md[meta]) for meta in ("genre", "track", "artist", "author", "colors") if meta in file_md)
view_searches.update((meta, seoize_text(file_md[meta], "_", False)) for meta in ("artist", "author") if meta in file_md)
except BaseException as e:
logging.warn(e)
# No muestra titulo si es igual al nombre del fichero
if "name" in file_md:
title = u(file_md["name"])
elif "title" in file_md:
title = u(file_md["title"])
else:
title = f['view']['nfn']
if title:
show_title = True
text_longer = title
text_shorter = f["view"]["fn"]
if len(text_shorter)>len(text_longer):
text_longer, text_shorter = text_shorter, text_longer
if text_longer.startswith(text_shorter):
text_longer = text_longer[len(text_shorter):]
if len(text_longer)==0 or (len(text_longer)>0 and text_longer.startswith(".") and text_longer[1:] in EXTENSIONS):
show_title = False
if show_title:
view_md["title"] = title
view_searches["title"] = seoize_text(title, "_", False)
# Los que cambian o son especificos de un tipo
try:
if "date" in view_md: #intentar obtener una fecha válida
try:
view_md["date"]=format_datetime(datetime.fromtimestamp(strtotime(view_md["date"])))
except:
del view_md["date"]
if file_type == 'audio': #album, year, bitrate, seconds, track, genre, length
if 'album' in file_md:
album = u(file_md["album"])
year = get_int(file_md, "year")
if album:
view_md["album"] = album + (" (%d)"%year if year and 1900<year<2100 else "")
view_searches["album"] = seoize_text(album, "_", False)
if 'bitrate' in file_md: # bitrate o bitrate - soundtype o bitrate - soundtype - channels
bitrate = get_int(file_md, "bitrate")
if bitrate:
soundtype=" - %s" % file_md["soundtype"] if "soundtype" in file_md else ""
channels = get_float(file_md, "channels")
channels=" (%g %s)" % (round(channels,1),_("channels")) if channels else ""
view_md["quality"] = "%g kbps %s%s" % (bitrate,soundtype,channels)
elif file_type == 'document': #title, author, pages, format, version
if "format" in file_md:
view_md["format"] = "%s%s" % (file_md["format"]," %s" % file_md["formatversion"] if "formatversion" in file_md else "")
version = []
if "formatVersion" in file_md:
version.append(u(file_md["formatVersion"]))
elif "version" in file_md:
version.append(u(file_md["version"]))
if "revision" in file_md:
version.append(u(file_md["revision"]))
if version:
view_md["version"] = " ".join(version)
elif file_type == 'image': #title, artist, description, width, height, colors
pass
elif file_type == 'software': #title, version, fileversion, os
if "title" in view_md and "version" in file_md:
view_md["title"] += " %s" % file_md["version"]
view_searches["title"] += " %s" % seoize_text(file_md["version"], "_", False)
elif file_type == 'video':
quality = []
framerate = get_int(file_md, "framerate")
if framerate:
quality.append("%d fps" % framerate)
if 'codec' in view_md: #si ya venia codec se muestra ahora en quality solamente
quality.append(u(view_md["codec"]))
del view_md["codec"]
if quality:
view_md["quality"] = " - ".join(quality)
if "series" in file_md:
series = u(file_md["series"])
if series:
safe_series = seoize_text(series, "_", False)
view_md["series"] = series
view_searches["series"]="%s_%s"%(safe_series,"(series)")
season = get_int(file_md, "season")
if season:
view_md["season"] = season
view_searches["season"]="%s_(s%d)"%(safe_series,season)
episode = get_int(file_md, "episode")
if episode:
view_md["episode"] = episode
view_searches["episode"]="%s_(s%de%d)"%(safe_series,season,episode)
except BaseException as e:
logging.exception("Error obteniendo metadatos especificos del tipo de contenido.")
view_mdh=f['view']['mdh']={}
for metadata,value in view_md.items():
if isinstance(value, basestring):
value = clean_html(value)
if not value:
del view_md[metadata]
continue
view_md[metadata]=value
# resaltar contenidos que coinciden con la busqueda, para textos no muy largos
if len(value)<500:
view_mdh[metadata]=highlight(text,value) if text and len(text)<100 else value
elif isinstance(value, float): #no hay ningun metadato tipo float
view_md[metadata]=str(int(value))
else:
view_md[metadata]=value
# TODO: mostrar metadatos con palabras buscadas si no aparecen en lo mostrado
def embed_info(f):
'''
Añade la informacion del embed
'''
embed_width = 560
embed_height = 315
embed_code = None
for src_id, src_data in f["file"]["src"].iteritems():
source_id = src_data["t"]
source_data = g.sources.get(source_id, None)
if not (source_data and source_data.get("embed_active", False) and "embed" in source_data):
continue
try:
embed_code = source_data["embed"]
# comprueba si el content type se puede embeber
embed_cts = source_data["embed_cts"] if "embed_cts" in source_data else DEFAULT_EMBED_CTS
if not f["view"]["ct"] in embed_cts: continue
embed_groups = ()
# url directamente desde los sources
if "source_id" in f["view"] and f["view"]["source_id"]:
embed_groups = {"id": f["view"]["source_id"]}
elif "url_embed_regexp" in source_data and source_data["url_embed_regexp"]:
# comprueba si la url puede ser utilizada para embeber
embed_url = src_data["url"]
regexp = source_data["url_embed_regexp"]
embed_match = cache.regexp(regexp).match(embed_url)
if embed_match is None:
continue
embed_groups = embed_match.groupdict()
if "%s" in embed_code and "id" in embed_groups: # Modo simple, %s intercambiado por el id
embed_code = embed_code % (
# Workaround para embeds con varios %s
# no se hace replace para permitir escapes ('\%s')
(embed_groups["id"],) * embed_code.count("%s")
)
else:
# Modo completo, %(variable)s intercambiado por grupos con nombre
replace_dict = dict(f["file"]["md"])
replace_dict["width"] = embed_width
replace_dict["height"] = embed_height
replace_dict.update(embed_groups)
try:
embed_code = embed_code % replace_dict
except KeyError as e:
# No logeamos los errores por falta de metadatos 'special'
if all(i.startswith("special:") for i in e.args):
continue
raise e
except BaseException as e:
logging.exception(e)
continue
f["view"]["embed"] = embed_code
f["view"]["play"] = (source_data.get("embed_disabled", ""), source_data.get("embed_enabled", ""))
break
def fill_data(file_data, text=None, ntts={}):
'''
Añade los datos necesarios para mostrar los archivos
'''
if text:
slug_text = slugify(text)
text = (text, slug_text, frozenset(slug_text.split(" ")))
# se asegura que esten cargados los datos de origenes y servidor de imagen antes de empezar
fetch_global_data()
f=init_data(file_data, ntts)
choose_file_type(f)
# al elegir nombre de fichero, averigua si aparece el texto buscado
search_text_shown = choose_filename(f,text)
build_source_links(f)
embed_info(f)
get_images(f)
# si hace falta, muestra metadatos extras con el texto buscado
format_metadata(f,text, search_text_shown)
return f
def secure_fill_data(file_data,text=None, ntts={}):
'''
Maneja errores en fill_data
'''
try:
return fill_data(file_data,text,ntts)
except BaseException as e:
logging.exception("Fill_data error on file %s: %s"%(str(file_data["_id"]),repr(e)))
return None
def get_file_metadata(file_id, file_name=None):
'''
Obtiene el fichero de base de datos y rellena sus metadatos.
@type file_id: mongoid
@param file_id: id de mongo del fichero
@type file_name: basestring
@param file_name: nombre del fichero
@rtype dict
@return Diccionario de datos del fichero con metadatos
@raise DatabaseError: si falla la conexión con la base de datos
@raise FileNotExist: si el fichero no existe o ha sido bloqueado
@raise FileRemoved: si el fichero ha sido eliminado de su origen
@raise FileFoofindRemoved: si el fichero ha sido bloqueado por foofind
@raise FileUnknownBlock: si el fichero está bloqueado pero se desconoce el porqué
@raise FileNoSources: si el fichero no tiene orígenes
'''
try:
data = filesdb.get_file(file_id, bl = None)
except BaseException as e:
logging.exception(e)
raise DatabaseError
# intenta sacar el id del servidor de sphinx,
# resuelve inconsistencias de los datos
if not data:
sid = searchd.get_id_server_from_search(file_id, file_name)
if sid:
try:
data = filesdb.get_file(file_id, sid = sid, bl = None)
if feedbackdb.initialized:
feedbackdb.notify_indir(file_id, sid)
except BaseException as e:
logging.exception(e)
raise DatabaseError
if data:
bl = data.get("bl",None)
if bl and isinstance(bl, (str, unicode)) and bl.isdigit():
bl = int(bl)
if bl:
if bl == 1: raise FileFoofindRemoved
elif bl == 3: raise FileRemoved
logging.warn(
"File with an unknown 'bl' value found: %s" % repr(bl),
extra=data)
raise FileUnknownBlock
file_se = data["se"] if "se" in data else None
file_ntt = entitiesdb.get_entity(file_se["_id"]) if file_se and "_id" in file_se else None
ntts = {file_se["_id"]:file_ntt} if file_ntt else {}
'''
# trae entidades relacionadas
if file_ntt and "r" in file_ntt:
rel_ids = list(set(eid for eids in file_ntt["r"].itervalues() for eid in eids))
ntts.update({int(ntt["_id"]):ntt for ntt in entitiesdb.get_entities(rel_ids, None, (False, [u"episode"]))})
'''
else:
raise FileNotExist
#obtener los datos
return fill_data(data, file_name, ntts)
| init_data | identifier_name |
fill_data.py | # -*- coding: utf-8 -*-
'''
Toda la informacion de un fichero
'''
import urllib, re
from flask import g, Markup
from flask.ext.babelex import gettext as _
from urlparse import urlparse
from itertools import izip_longest, chain
from foofind.services import *
from foofind.blueprints.files.helpers import *
from foofind.utils import mid2url, mid2hex, hex2mid, to_seconds, u, logging
from foofind.utils.content_types import *
from foofind.utils.filepredictor import guess_doc_content_type
from foofind.datafixes import content_fixes
from foofind.utils.splitter import slugify
from foofind.utils.seo import seoize_text
from foofind.utils.html import clean_html
def init_data(file_data, ntts=[]):
'''
Inicializa el diccionario de datos del archivo
'''
content_fixes(file_data)
file_data["id"]=mid2url(file_data['_id'])
file_data['name']=file_data['src'].itervalues().next()['url']
file_se = file_data["se"] if "se" in file_data else None
ntt = ntts[int(float(file_se["_id"]))] if file_se and "_id" in file_se and file_se["_id"] in ntts else None
if ntt:
file_se["info"] = ntt
file_se["rel"] = [ntts[relid] for relids in ntt["r"].itervalues() for relid in relids if relid in ntts] if "r" in ntt else []
return {"file":file_data,"view":{}}
def choose_filename(f,text_cache=None):
'''
Elige el archivo correcto
'''
srcs = f['file']['src']
fns = f['file']['fn']
chosen = None
max_count = -1
current_weight = -1
if text_cache and text_cache[0] in fns: # Si text es en realidad un ID de fn
chosen = text_cache[0]
else:
for hexuri,src in srcs.items():
if 'bl' in src and src['bl']!=0:
continue
for crc,srcfn in src['fn'].items():
if crc not in fns: #para los sources que tienen nombre pero no estan en el archivo
continue
#si no tiene nombre no se tiene en cuenta
m = srcfn['m'] if len(fns[crc]['n'])>0 else 0
if 'c' in fns[crc]:
fns[crc]['c']+=m
else:
fns[crc]['c']=m
text_weight = 0
if text_cache:
fn_parts = slugify(fns[crc]['n']).strip().split(" ")
if len(fn_parts)>0:
text_words = slugify(text_cache[0]).split(" ")
# valora numero y orden coincidencias
last_pos = -1
max_length = length = 0
occurrences = [0]*len(text_words)
for part in fn_parts:
pos = text_words.index(part) if part in text_words else -1
if pos != -1 and (last_pos==-1 or pos==last_pos+1):
length += 1
else:
if length > max_length: max_length = length
length = 0
if pos != -1:
occurrences[pos]=1
last_pos = pos
if length > max_length: max_length = length
text_weight = sum(occurrences)*100 + max_length
f['file']['fn'][crc]['tht'] = text_weight
better = fns[crc]['c']>max_count
if text_weight > current_weight or (better and text_weight==current_weight):
current_weight = text_weight
chosen = crc
max_count = fns[crc]['c']
f['view']['url'] = mid2url(hex2mid(f['file']['_id']))
f['view']['fnid'] = chosen
if chosen:
filename = fns[chosen]['n']
ext = fns[chosen]['x']
else: #uses filename from src
filename = ""
for hexuri,src in srcs.items():
if src['url'].find("/")!=-1:
filename = src['url']
if filename=="":
return
filename = filename[filename.rfind("/")+1:]
ext = filename[filename.rfind(".")+1:]
filename = filename[0:filename.rfind(".")]
#TODO si no viene nombre de archivo buscar en los metadatos para formar uno (por ejemplo serie - titulo capitulo)
filename = extension_filename(filename,ext)
f['view']['fn'] = filename.replace("?", "")
f['view']['qfn'] = qfn = u(filename).encode("UTF-8") #nombre del archivo escapado para generar las url de descarga
f['view']['pfn'] = urllib.quote(qfn).replace(" ", "%20") # P2P filename
nfilename = seoize_text(filename, " ",True, 0)
f['view']['nfn'] = nfilename
# añade el nombre del fichero como palabra clave
g.keywords.update(set(keyword for keyword in nfilename.split(" ") if len(keyword)>1))
#nombre del archivo con las palabras que coinciden con la busqueda resaltadas
if text_cache:
f['view']['fnh'], f['view']['fnhs'] = highlight(text_cache[2],filename,True)
else:
f['view']['fnh'] = filename #esto es solo para download que nunca tiene text
return current_weight>0 # indica si ha encontrado el texto buscado
def build_source_links(f):
' | def choose_file_type(f):
'''
Elige el tipo de archivo
'''
ct, file_tags, file_format = guess_doc_content_type(f["file"], g.sources)
f['view']["ct"] = ct
f['view']['file_type'] = CONTENTS[ct].lower()
f['view']["tags"] = file_tags
if file_format: f['view']['format'] = file_format
def get_images(f):
'''
Obtiene las imagenes para los archivos que las tienen
'''
images = images_id = None
if "i" in f["file"] and isinstance(f["file"]["i"],list):
images = f["file"]["i"]
images_id = f["file"]["_id"]
elif "se" in f["file"] and "info" in f["file"]["se"]:
for ntt in chain([f["file"]["se"]["info"]], f["file"]["se"]["rel"]):
if "im" in ntt:
images = ntt["im"]
images_id = "e_%d_"%int(ntt["_id"])
break
if images:
images_servers=[]
for image in images:
server=g.image_servers[image]
images_servers.append("%02d"%int(server["_id"]))
if not "first_image_server" in f["view"]:
f["view"]["first_image_server"]=server["ip"]
f["view"]["images_server"]="_".join(images_servers)
f["view"]["images_id"] = images_id
def get_int(adict, key):
if not key in adict:
return None
value = adict[key]
if isinstance(value, (int,long)):
return value
elif isinstance(value, float):
return int(value)
elif isinstance(value, basestring):
result = None
for c in value:
digit = ord(c)-48
if 0<=digit<=9:
if result:
result *= 10
else:
result = 0
result += digit
else:
break
return result
return None
def get_float(adict, key):
if not key in adict:
return None
value = adict[key]
if isinstance(value, float):
return value
elif isinstance(value, (int,long)):
return float(value)
elif isinstance(value, basestring):
result = ""
decimal = False
for c in value:
if c in "0123456789":
result += c
elif c in ".," and not decimal:
result += "."
decimal = True
else:
break
if result:
try:
return float(result)
except:
pass
return None
def format_metadata(f,text_cache, search_text_shown=False):
'''
Formatea los metadatos de los archivos
'''
text = text_cache[2] if text_cache else None
view_md = f['view']['md'] = {}
view_searches = f["view"]["searches"]={}
file_type = f['view']['file_type'] if 'file_type' in f['view'] else None
if 'md' in f['file']:
#si viene con el formato tipo:metadato se le quita el tipo
file_md = {(meta.split(":")[-1] if ":" in meta else meta): value for meta, value in f['file']['md'].iteritems()}
# Duración para vídeo e imágenes
seconds = get_float(file_md, "seconds")
minutes = get_float(file_md, "minutes")
hours = get_float(file_md, "hours")
# Si no he recibido duracion de otra forma, pruebo con length y duration
if seconds==minutes==hours==None:
seconds = get_float(file_md, "length") or get_float(file_md, "duration")
duration = [hours or 0, minutes or 0, seconds or 0] # h, m, s
if any(duration):
carry = 0
for i in xrange(len(duration)-1,-1,-1):
unit = long(duration[i]) + carry
duration[i] = unit%60
carry = unit/60
view_md["length"] = "%d:%02d:%02d" % tuple(duration) if duration[0] > 0 else "%02d:%02d" % tuple(duration[1:])
# Tamaño para vídeos e imágenes
width = get_int(file_md, "width")
height = get_int(file_md, "height")
if width and height:
view_md["size"] = "%dx%dpx" % (width, height)
# Metadatos que no cambian
try:
view_md.update(
(meta, file_md[meta]) for meta in
(
"folders","description","fileversion","os","files","pages","format",
"seeds","leechs","composer","publisher","encoding","director","writer","starring","producer","released"
) if meta in file_md
)
view_searches.update(
(meta, seoize_text(file_md[meta],"_",False)) for meta in
(
"folders","os","composer","publisher","director","writer","starring","producer"
) if meta in file_md
)
except BaseException as e:
logging.warn(e)
# thumbnail
if "thumbnail" in file_md:
f["view"]["thumbnail"] = file_md["thumbnail"]
#metadatos que tienen otros nombres
try:
view_md.update(("tags", file_md[meta]) for meta in ("keywords", "tags", "tag") if meta in file_md)
if "tags" in view_md and isinstance(view_md["tags"], basestring):
view_searches["tags"] = []
view_md.update(("comments", file_md[meta]) for meta in ("comments", "comment") if meta in file_md)
view_md.update(("track", file_md[meta]) for meta in ("track", "track_number") if meta in file_md)
view_md.update(("created_by", file_md[meta]) for meta in ("created_by", "encodedby","encoder") if meta in file_md)
view_md.update(("language", file_md[meta]) for meta in ("language", "lang") if meta in file_md)
view_md.update(("date", file_md[meta]) for meta in ("published", "creationdate") if meta in file_md)
view_md.update(("trackers", "\n".join(file_md[meta].split(" "))) for meta in ("trackers", "tracker") if meta in file_md and isinstance(file_md[meta], basestring))
view_md.update(("hash", file_md[meta]) for meta in ("hash", "infohash") if meta in file_md)
view_md.update(("visualizations", file_md[meta]) for meta in ("count", "viewCount") if meta in file_md)
if "unpackedsize" in file_md:
view_md["unpacked_size"]=file_md["unpackedsize"]
if "privateflag" in file_md:
view_md["private_file"]=file_md["privateflag"]
except BaseException as e:
logging.warn(e)
#torrents -> filedir filesizes filepaths
if "filepaths" in file_md:
filepaths = {}
for path, size in izip_longest(u(file_md["filepaths"]).split("///"), u(file_md.get("filesizes","")).split(" "), fillvalue=None):
# no permite tamaños sin fichero
if not path: break
parts = path.strip("/").split("/")
# crea subdirectorios
relative_path = filepaths
for part in parts[:-1]:
if "/"+part not in relative_path:
relative_path["/"+part] = {}
relative_path = relative_path["/"+part]
# si ya existe el directorio no hace nada
if "/"+parts[-1] in relative_path:
pass
# si el ultimo nivel se repite es un directorio (fallo de contenido)
elif parts[-1] in relative_path:
relative_path["/"+parts[-1]] = {}
del relative_path[parts[-1]]
else:
relative_path[parts[-1]] = size
if "filedir" in file_md:
filepaths = {"/"+u(file_md["filedir"]).strip("/"):filepaths}
if filepaths:
view_md["filepaths"] = filepaths
view_searches["filepaths"] = {}
# Metadatos multimedia
try:
#extraccion del codec de video y/o audio
if "video_codec" in file_md: #si hay video_codec se concatena el audio_codec detras si es necesario
view_md["codec"]=file_md["video_codec"]+" "+file_md["audio_codec"] if "audio_codec" in file_md else file_md["video_codec"]
else: #sino se meten directamente
view_md.update(("codec", file_md[meta]) for meta in ("audio_codec", "codec") if meta in file_md)
if file_type in ("audio", "video", "image"):
view_md.update((meta, file_md[meta]) for meta in ("genre", "track", "artist", "author", "colors") if meta in file_md)
view_searches.update((meta, seoize_text(file_md[meta], "_", False)) for meta in ("artist", "author") if meta in file_md)
except BaseException as e:
logging.warn(e)
# No muestra titulo si es igual al nombre del fichero
if "name" in file_md:
title = u(file_md["name"])
elif "title" in file_md:
title = u(file_md["title"])
else:
title = f['view']['nfn']
if title:
show_title = True
text_longer = title
text_shorter = f["view"]["fn"]
if len(text_shorter)>len(text_longer):
text_longer, text_shorter = text_shorter, text_longer
if text_longer.startswith(text_shorter):
text_longer = text_longer[len(text_shorter):]
if len(text_longer)==0 or (len(text_longer)>0 and text_longer.startswith(".") and text_longer[1:] in EXTENSIONS):
show_title = False
if show_title:
view_md["title"] = title
view_searches["title"] = seoize_text(title, "_", False)
# Los que cambian o son especificos de un tipo
try:
if "date" in view_md: #intentar obtener una fecha válida
try:
view_md["date"]=format_datetime(datetime.fromtimestamp(strtotime(view_md["date"])))
except:
del view_md["date"]
if file_type == 'audio': #album, year, bitrate, seconds, track, genre, length
if 'album' in file_md:
album = u(file_md["album"])
year = get_int(file_md, "year")
if album:
view_md["album"] = album + (" (%d)"%year if year and 1900<year<2100 else "")
view_searches["album"] = seoize_text(album, "_", False)
if 'bitrate' in file_md: # bitrate o bitrate - soundtype o bitrate - soundtype - channels
bitrate = get_int(file_md, "bitrate")
if bitrate:
soundtype=" - %s" % file_md["soundtype"] if "soundtype" in file_md else ""
channels = get_float(file_md, "channels")
channels=" (%g %s)" % (round(channels,1),_("channels")) if channels else ""
view_md["quality"] = "%g kbps %s%s" % (bitrate,soundtype,channels)
elif file_type == 'document': #title, author, pages, format, version
if "format" in file_md:
view_md["format"] = "%s%s" % (file_md["format"]," %s" % file_md["formatversion"] if "formatversion" in file_md else "")
version = []
if "formatVersion" in file_md:
version.append(u(file_md["formatVersion"]))
elif "version" in file_md:
version.append(u(file_md["version"]))
if "revision" in file_md:
version.append(u(file_md["revision"]))
if version:
view_md["version"] = " ".join(version)
elif file_type == 'image': #title, artist, description, width, height, colors
pass
elif file_type == 'software': #title, version, fileversion, os
if "title" in view_md and "version" in file_md:
view_md["title"] += " %s" % file_md["version"]
view_searches["title"] += " %s" % seoize_text(file_md["version"], "_", False)
elif file_type == 'video':
quality = []
framerate = get_int(file_md, "framerate")
if framerate:
quality.append("%d fps" % framerate)
if 'codec' in view_md: #si ya venia codec se muestra ahora en quality solamente
quality.append(u(view_md["codec"]))
del view_md["codec"]
if quality:
view_md["quality"] = " - ".join(quality)
if "series" in file_md:
series = u(file_md["series"])
if series:
safe_series = seoize_text(series, "_", False)
view_md["series"] = series
view_searches["series"]="%s_%s"%(safe_series,"(series)")
season = get_int(file_md, "season")
if season:
view_md["season"] = season
view_searches["season"]="%s_(s%d)"%(safe_series,season)
episode = get_int(file_md, "episode")
if episode:
view_md["episode"] = episode
view_searches["episode"]="%s_(s%de%d)"%(safe_series,season,episode)
except BaseException as e:
logging.exception("Error obteniendo metadatos especificos del tipo de contenido.")
view_mdh=f['view']['mdh']={}
for metadata,value in view_md.items():
if isinstance(value, basestring):
value = clean_html(value)
if not value:
del view_md[metadata]
continue
view_md[metadata]=value
# resaltar contenidos que coinciden con la busqueda, para textos no muy largos
if len(value)<500:
view_mdh[metadata]=highlight(text,value) if text and len(text)<100 else value
elif isinstance(value, float): #no hay ningun metadato tipo float
view_md[metadata]=str(int(value))
else:
view_md[metadata]=value
# TODO: mostrar metadatos con palabras buscadas si no aparecen en lo mostrado
def embed_info(f):
'''
Añade la informacion del embed
'''
embed_width = 560
embed_height = 315
embed_code = None
for src_id, src_data in f["file"]["src"].iteritems():
source_id = src_data["t"]
source_data = g.sources.get(source_id, None)
if not (source_data and source_data.get("embed_active", False) and "embed" in source_data):
continue
try:
embed_code = source_data["embed"]
# comprueba si el content type se puede embeber
embed_cts = source_data["embed_cts"] if "embed_cts" in source_data else DEFAULT_EMBED_CTS
if not f["view"]["ct"] in embed_cts: continue
embed_groups = ()
# url directamente desde los sources
if "source_id" in f["view"] and f["view"]["source_id"]:
embed_groups = {"id": f["view"]["source_id"]}
elif "url_embed_regexp" in source_data and source_data["url_embed_regexp"]:
# comprueba si la url puede ser utilizada para embeber
embed_url = src_data["url"]
regexp = source_data["url_embed_regexp"]
embed_match = cache.regexp(regexp).match(embed_url)
if embed_match is None:
continue
embed_groups = embed_match.groupdict()
if "%s" in embed_code and "id" in embed_groups: # Modo simple, %s intercambiado por el id
embed_code = embed_code % (
# Workaround para embeds con varios %s
# no se hace replace para permitir escapes ('\%s')
(embed_groups["id"],) * embed_code.count("%s")
)
else:
# Modo completo, %(variable)s intercambiado por grupos con nombre
replace_dict = dict(f["file"]["md"])
replace_dict["width"] = embed_width
replace_dict["height"] = embed_height
replace_dict.update(embed_groups)
try:
embed_code = embed_code % replace_dict
except KeyError as e:
# No logeamos los errores por falta de metadatos 'special'
if all(i.startswith("special:") for i in e.args):
continue
raise e
except BaseException as e:
logging.exception(e)
continue
f["view"]["embed"] = embed_code
f["view"]["play"] = (source_data.get("embed_disabled", ""), source_data.get("embed_enabled", ""))
break
def fill_data(file_data, text=None, ntts={}):
'''
Añade los datos necesarios para mostrar los archivos
'''
if text:
slug_text = slugify(text)
text = (text, slug_text, frozenset(slug_text.split(" ")))
# se asegura que esten cargados los datos de origenes y servidor de imagen antes de empezar
fetch_global_data()
f=init_data(file_data, ntts)
choose_file_type(f)
# al elegir nombre de fichero, averigua si aparece el texto buscado
search_text_shown = choose_filename(f,text)
build_source_links(f)
embed_info(f)
get_images(f)
# si hace falta, muestra metadatos extras con el texto buscado
format_metadata(f,text, search_text_shown)
return f
def secure_fill_data(file_data,text=None, ntts={}):
'''
Maneja errores en fill_data
'''
try:
return fill_data(file_data,text,ntts)
except BaseException as e:
logging.exception("Fill_data error on file %s: %s"%(str(file_data["_id"]),repr(e)))
return None
def get_file_metadata(file_id, file_name=None):
'''
Obtiene el fichero de base de datos y rellena sus metadatos.
@type file_id: mongoid
@param file_id: id de mongo del fichero
@type file_name: basestring
@param file_name: nombre del fichero
@rtype dict
@return Diccionario de datos del fichero con metadatos
@raise DatabaseError: si falla la conexión con la base de datos
@raise FileNotExist: si el fichero no existe o ha sido bloqueado
@raise FileRemoved: si el fichero ha sido eliminado de su origen
@raise FileFoofindRemoved: si el fichero ha sido bloqueado por foofind
@raise FileUnknownBlock: si el fichero está bloqueado pero se desconoce el porqué
@raise FileNoSources: si el fichero no tiene orígenes
'''
try:
data = filesdb.get_file(file_id, bl = None)
except BaseException as e:
logging.exception(e)
raise DatabaseError
# intenta sacar el id del servidor de sphinx,
# resuelve inconsistencias de los datos
if not data:
sid = searchd.get_id_server_from_search(file_id, file_name)
if sid:
try:
data = filesdb.get_file(file_id, sid = sid, bl = None)
if feedbackdb.initialized:
feedbackdb.notify_indir(file_id, sid)
except BaseException as e:
logging.exception(e)
raise DatabaseError
if data:
bl = data.get("bl",None)
if bl and isinstance(bl, (str, unicode)) and bl.isdigit():
bl = int(bl)
if bl:
if bl == 1: raise FileFoofindRemoved
elif bl == 3: raise FileRemoved
logging.warn(
"File with an unknown 'bl' value found: %s" % repr(bl),
extra=data)
raise FileUnknownBlock
file_se = data["se"] if "se" in data else None
file_ntt = entitiesdb.get_entity(file_se["_id"]) if file_se and "_id" in file_se else None
ntts = {file_se["_id"]:file_ntt} if file_ntt else {}
'''
# trae entidades relacionadas
if file_ntt and "r" in file_ntt:
rel_ids = list(set(eid for eids in file_ntt["r"].itervalues() for eid in eids))
ntts.update({int(ntt["_id"]):ntt for ntt in entitiesdb.get_entities(rel_ids, None, (False, [u"episode"]))})
'''
else:
raise FileNotExist
#obtener los datos
return fill_data(data, file_name, ntts)
| ''
Construye los enlaces correctamente
'''
def get_domain(src):
'''
Devuelve el dominio de una URL
'''
url_parts=urlparse(src).netloc.split('.')
i=len(url_parts)-1
if len(url_parts[i])<=2 and len(url_parts[i-1])<=3:
return url_parts[i-2]+'.'+url_parts[i-1]+'.'+url_parts[i]
else:
return url_parts[i-1]+'.'+url_parts[i];
f['view']['action']='download'
f['view']['sources']={}
max_weight=0
icon=""
any_downloader=False
# agrupación de origenes
source_groups = {}
file_sources = f['file']['src'].items()
file_sources.sort(key=lambda x:x[1]["t"])
for hexuri,src in file_sources:
if not src.get('bl',None) in (0, None):
continue
url_pattern=downloader=join=False
count=0
part=url=""
source_data=g.sources[src["t"]] if "t" in src and src["t"] in g.sources else None
if source_data is None: #si no existe el origen del archivo
logging.error("El fichero contiene un origen inexistente en la tabla \"sources\": %s" % src["t"], extra={"file":f})
if feedbackdb.initialized:
feedbackdb.notify_source_error(f['file']["_id"], f['file']["s"])
continue
elif "crbl" in source_data and int(source_data["crbl"])==1: #si el origen esta bloqueado
continue
elif "w" in source_data["g"] or "f" in source_data["g"] or "s" in source_data["g"]: #si es descarga directa o streaming
link_weight=1
tip=source_data["d"]
icon="web"
source_groups[icon] = tip
source=get_domain(src['url']) if "f" in source_data["g"] else source_data["d"]
url=src['url']
if "url_pattern" in source_data and not url.startswith(("https://","http://","ftp://")):
url_pattern=True
#en caso de duda se prefiere streaming
if "s" in source_data["g"]:
f['view']['action']="listen" if f['view']['ct']==CONTENT_AUDIO else 'watch'
link_weight*=2
#torrenthash antes de torrent porque es un caso especifico
elif source_data["d"]=="BitTorrentHash":
downloader=True
link_weight=0.9 if 'torrent:tracker' in f['file']['md'] or 'torrent:trackers' in f['file']['md'] else 0.1
tip="Torrent MagnetLink"
source="tmagnet"
icon="torrent"
if not icon in source_groups:
source_groups[icon] = tip # magnet link tiene menos prioridad para el texto
join=True
count=int(src['m'])
part="xt=urn:btih:"+src['url']
if 'torrent:tracker' in f['file']['md']:
part += unicode('&tr=' + urllib.quote_plus(u(f['file']['md']['torrent:tracker']).encode("UTF-8")), "UTF-8")
elif 'torrent:trackers' in f['file']['md']:
trackers = f['file']['md']['torrent:trackers']
if isinstance(trackers, basestring):
part += unicode("".join('&tr='+urllib.quote_plus(tr) for tr in u(trackers).encode("UTF-8").split(" ")), "UTF-8")
elif "t" in source_data["g"]:
downloader=True
link_weight=0.8
url=src['url']
if "url_pattern" in source_data and not url.startswith(("https://","http://","ftp://")):
url_pattern=True
tip=source=get_domain(source_data["url_pattern"]%url)
else:
tip=source=get_domain(src['url'])
icon="torrent"
if not icon in source_groups:
source_groups[icon] = tip
elif source_data["d"]=="Gnutella":
link_weight=0.2
tip="Gnutella"
source=icon="gnutella"
part="xt=urn:sha1:"+src['url']
join=True
count=int(src['m'])
source_groups[icon] = tip
elif source_data["d"]=="eD2k":
downloader=True
link_weight=0.1
tip="eD2k"
source=icon="ed2k"
url="ed2k://|file|"+f['view']['pfn']+"|"+str(f['file']['z'] if "z" in f["file"] else 1)+"|"+src['url']+"|/"
count=int(src['m'])
source_groups[icon] = tip
elif source_data["d"]=="Tiger":
link_weight=0
tip="Gnutella"
source=icon="gnutella"
part="xt=urn:tiger:"+src['url']
join=True
elif source_data["d"]=="MD5":
link_weight=0
tip="Gnutella"
source=icon="gnutella"
part="xt=urn:md5:"+src['url']
source_groups[icon] = tip
join=True
else:
continue
if source in f['view']['sources']:
view_source = f['view']['sources'][source]
else:
view_source = f['view']['sources'][source] = {}
view_source.update(source_data)
if downloader:
any_downloader = True
view_source['downloader']=1
elif not 'downloader' in view_source:
view_source['downloader']=0
view_source['tip']=tip
view_source['icon']=icon
view_source['icons']=source_data.get("icons",False)
view_source['join']=join
view_source['source']="streaming" if "s" in source_data["g"] else "direct_download" if "w" in source_data["g"] else "P2P" if "p" in source_data["g"] else ""
#para no machacar el numero si hay varios archivos del mismo source
if not 'count' in view_source or count>0:
view_source['count']=count
if not "parts" in view_source:
view_source['parts']=[]
if not 'urls' in view_source:
view_source['urls']=[]
if part:
view_source['parts'].append(part)
if url:
if url_pattern:
view_source['urls']=[source_data["url_pattern"]%url]
f['view']['source_id']=url
view_source["pattern_used"]=True
elif not "pattern_used" in view_source:
view_source['urls'].append(url)
if source_data["d"]!="eD2k":
view_source['count']+=1
if link_weight>max_weight:
max_weight = link_weight
f['view']['source'] = source
f['view']['source_groups'] = sorted(source_groups.items())
f['view']['any_downloader'] = any_downloader
if "source" not in f["view"]:
raise FileNoSources
if icon!="web":
for src,info in f['view']['sources'].items():
if info['join']:
f['view']['sources'][src]['urls'].append("magnet:?"+"&".join(info['parts'])+"&dn="+f['view']['pfn']+("&xl="+str(f['file']['z']) if 'z' in f['file'] else ""))
elif not 'urls' in info:
del(f['view']['sources'][src])
| identifier_body |
fill_data.py | # -*- coding: utf-8 -*-
'''
Toda la informacion de un fichero
'''
import urllib, re
from flask import g, Markup
from flask.ext.babelex import gettext as _
from urlparse import urlparse
from itertools import izip_longest, chain
from foofind.services import *
from foofind.blueprints.files.helpers import *
from foofind.utils import mid2url, mid2hex, hex2mid, to_seconds, u, logging
from foofind.utils.content_types import *
from foofind.utils.filepredictor import guess_doc_content_type
from foofind.datafixes import content_fixes
from foofind.utils.splitter import slugify
from foofind.utils.seo import seoize_text
from foofind.utils.html import clean_html
def init_data(file_data, ntts=[]):
'''
Inicializa el diccionario de datos del archivo
'''
content_fixes(file_data)
file_data["id"]=mid2url(file_data['_id'])
file_data['name']=file_data['src'].itervalues().next()['url']
file_se = file_data["se"] if "se" in file_data else None
ntt = ntts[int(float(file_se["_id"]))] if file_se and "_id" in file_se and file_se["_id"] in ntts else None
if ntt:
file_se["info"] = ntt
file_se["rel"] = [ntts[relid] for relids in ntt["r"].itervalues() for relid in relids if relid in ntts] if "r" in ntt else []
return {"file":file_data,"view":{}}
def choose_filename(f,text_cache=None):
'''
Elige el archivo correcto
'''
srcs = f['file']['src']
fns = f['file']['fn']
chosen = None
max_count = -1
current_weight = -1
if text_cache and text_cache[0] in fns: # Si text es en realidad un ID de fn
chosen = text_cache[0]
else:
for hexuri,src in srcs.items():
if 'bl' in src and src['bl']!=0:
continue
for crc,srcfn in src['fn'].items():
if crc not in fns: #para los sources que tienen nombre pero no estan en el archivo
continue
#si no tiene nombre no se tiene en cuenta
m = srcfn['m'] if len(fns[crc]['n'])>0 else 0
if 'c' in fns[crc]:
fns[crc]['c']+=m
else:
fns[crc]['c']=m
text_weight = 0
if text_cache:
fn_parts = slugify(fns[crc]['n']).strip().split(" ")
if len(fn_parts)>0:
text_words = slugify(text_cache[0]).split(" ")
# valora numero y orden coincidencias
last_pos = -1
max_length = length = 0
occurrences = [0]*len(text_words)
for part in fn_parts:
pos = text_words.index(part) if part in text_words else -1
if pos != -1 and (last_pos==-1 or pos==last_pos+1):
length += 1
else:
if length > max_length: max_length = length
length = 0
if pos != -1:
occurrences[pos]=1
last_pos = pos
if length > max_length: max_length = length
text_weight = sum(occurrences)*100 + max_length
f['file']['fn'][crc]['tht'] = text_weight
better = fns[crc]['c']>max_count
if text_weight > current_weight or (better and text_weight==current_weight):
current_weight = text_weight
chosen = crc
max_count = fns[crc]['c']
f['view']['url'] = mid2url(hex2mid(f['file']['_id']))
f['view']['fnid'] = chosen
if chosen:
filename = fns[chosen]['n']
ext = fns[chosen]['x']
else: #uses filename from src
filename = ""
for hexuri,src in srcs.items():
if src['url'].find("/")!=-1:
filename = src['url']
if filename=="":
return
filename = filename[filename.rfind("/")+1:]
ext = filename[filename.rfind(".")+1:]
filename = filename[0:filename.rfind(".")]
#TODO si no viene nombre de archivo buscar en los metadatos para formar uno (por ejemplo serie - titulo capitulo)
filename = extension_filename(filename,ext)
f['view']['fn'] = filename.replace("?", "")
f['view']['qfn'] = qfn = u(filename).encode("UTF-8") #nombre del archivo escapado para generar las url de descarga
f['view']['pfn'] = urllib.quote(qfn).replace(" ", "%20") # P2P filename
nfilename = seoize_text(filename, " ",True, 0)
f['view']['nfn'] = nfilename
# añade el nombre del fichero como palabra clave
g.keywords.update(set(keyword for keyword in nfilename.split(" ") if len(keyword)>1))
#nombre del archivo con las palabras que coinciden con la busqueda resaltadas
if text_cache:
f['view']['fnh'], f['view']['fnhs'] = highlight(text_cache[2],filename,True)
else:
f['view']['fnh'] = filename #esto es solo para download que nunca tiene text
return current_weight>0 # indica si ha encontrado el texto buscado
def build_source_links(f):
'''
Construye los enlaces correctamente
'''
def get_domain(src):
'''
Devuelve el dominio de una URL
'''
url_parts=urlparse(src).netloc.split('.')
i=len(url_parts)-1
if len(url_parts[i])<=2 and len(url_parts[i-1])<=3:
return url_parts[i-2]+'.'+url_parts[i-1]+'.'+url_parts[i]
else:
return url_parts[i-1]+'.'+url_parts[i];
f['view']['action']='download'
f['view']['sources']={}
max_weight=0
icon=""
any_downloader=False
# agrupación de origenes
source_groups = {}
file_sources = f['file']['src'].items()
file_sources.sort(key=lambda x:x[1]["t"])
for hexuri,src in file_sources:
if not src.get('bl',None) in (0, None):
continue
url_pattern=downloader=join=False
count=0
part=url=""
source_data=g.sources[src["t"]] if "t" in src and src["t"] in g.sources else None
if source_data is None: #si no existe el origen del archivo
logging.error("El fichero contiene un origen inexistente en la tabla \"sources\": %s" % src["t"], extra={"file":f})
if feedbackdb.initialized:
feedbackdb.notify_source_error(f['file']["_id"], f['file']["s"])
continue
elif "crbl" in source_data and int(source_data["crbl"])==1: #si el origen esta bloqueado
continue
elif "w" in source_data["g"] or "f" in source_data["g"] or "s" in source_data["g"]: #si es descarga directa o streaming
link_weight=1
tip=source_data["d"]
icon="web"
source_groups[icon] = tip
source=get_domain(src['url']) if "f" in source_data["g"] else source_data["d"]
url=src['url']
if "url_pattern" in source_data and not url.startswith(("https://","http://","ftp://")):
url_pattern=True
#en caso de duda se prefiere streaming
if "s" in source_data["g"]:
f['view']['action']="listen" if f['view']['ct']==CONTENT_AUDIO else 'watch'
link_weight*=2
#torrenthash antes de torrent porque es un caso especifico
elif source_data["d"]=="BitTorrentHash":
downloader=True
link_weight=0.9 if 'torrent:tracker' in f['file']['md'] or 'torrent:trackers' in f['file']['md'] else 0.1
tip="Torrent MagnetLink"
source="tmagnet"
icon="torrent"
if not icon in source_groups:
source_groups[icon] = tip # magnet link tiene menos prioridad para el texto
join=True
count=int(src['m'])
part="xt=urn:btih:"+src['url']
if 'torrent:tracker' in f['file']['md']:
part += unicode('&tr=' + urllib.quote_plus(u(f['file']['md']['torrent:tracker']).encode("UTF-8")), "UTF-8")
elif 'torrent:trackers' in f['file']['md']:
trackers = f['file']['md']['torrent:trackers']
if isinstance(trackers, basestring):
part += unicode("".join('&tr='+urllib.quote_plus(tr) for tr in u(trackers).encode("UTF-8").split(" ")), "UTF-8")
elif "t" in source_data["g"]:
downloader=True
link_weight=0.8
url=src['url']
if "url_pattern" in source_data and not url.startswith(("https://","http://","ftp://")):
url_pattern=True
tip=source=get_domain(source_data["url_pattern"]%url)
else:
tip=source=get_domain(src['url'])
icon="torrent"
if not icon in source_groups:
source_groups[icon] = tip
elif source_data["d"]=="Gnutella":
link_weight=0.2
tip="Gnutella"
source=icon="gnutella"
part="xt=urn:sha1:"+src['url']
join=True
count=int(src['m'])
source_groups[icon] = tip
elif source_data["d"]=="eD2k":
downloader=True
link_weight=0.1
tip="eD2k"
source=icon="ed2k"
url="ed2k://|file|"+f['view']['pfn']+"|"+str(f['file']['z'] if "z" in f["file"] else 1)+"|"+src['url']+"|/"
count=int(src['m'])
source_groups[icon] = tip
elif source_data["d"]=="Tiger":
link_weight=0
tip="Gnutella"
source=icon="gnutella"
part="xt=urn:tiger:"+src['url']
join=True
elif source_data["d"]=="MD5":
link_weight=0
tip="Gnutella"
source=icon="gnutella"
part="xt=urn:md5:"+src['url']
source_groups[icon] = tip
join=True
else:
continue
if source in f['view']['sources']:
view_source = f['view']['sources'][source]
else:
view_source = f['view']['sources'][source] = {}
view_source.update(source_data)
if downloader:
any_downloader = True
view_source['downloader']=1
elif not 'downloader' in view_source:
view_source['downloader']=0
view_source['tip']=tip
view_source['icon']=icon
view_source['icons']=source_data.get("icons",False)
view_source['join']=join
view_source['source']="streaming" if "s" in source_data["g"] else "direct_download" if "w" in source_data["g"] else "P2P" if "p" in source_data["g"] else ""
#para no machacar el numero si hay varios archivos del mismo source
if not 'count' in view_source or count>0:
view_source['count']=count
if not "parts" in view_source:
view_source['parts']=[]
if not 'urls' in view_source:
view_source['urls']=[]
if part:
view_source['parts'].append(part)
if url:
if url_pattern:
view_source['urls']=[source_data["url_pattern"]%url]
f['view']['source_id']=url
view_source["pattern_used"]=True
elif not "pattern_used" in view_source:
view_source['urls'].append(url)
if source_data["d"]!="eD2k":
view_source['count']+=1
if link_weight>max_weight:
max_weight = link_weight
f['view']['source'] = source
f['view']['source_groups'] = sorted(source_groups.items())
f['view']['any_downloader'] = any_downloader
if "source" not in f["view"]:
raise FileNoSources
if icon!="web":
for src,info in f['view']['sources'].items():
if info['join']:
f['view']['sources'][src]['urls'].append("magnet:?"+"&".join(info['parts'])+"&dn="+f['view']['pfn']+("&xl="+str(f['file']['z']) if 'z' in f['file'] else ""))
elif not 'urls' in info:
del(f['view']['sources'][src])
def choose_file_type(f):
'''
Elige el tipo de archivo
'''
ct, file_tags, file_format = guess_doc_content_type(f["file"], g.sources)
f['view']["ct"] = ct
f['view']['file_type'] = CONTENTS[ct].lower()
f['view']["tags"] = file_tags
if file_format: f['view']['format'] = file_format
def get_images(f):
'''
Obtiene las imagenes para los archivos que las tienen
'''
images = images_id = None
if "i" in f["file"] and isinstance(f["file"]["i"],list):
images = f["file"]["i"]
images_id = f["file"]["_id"]
elif "se" in f["file"] and "info" in f["file"]["se"]:
for ntt in chain([f["file"]["se"]["info"]], f["file"]["se"]["rel"]):
if "im" in ntt:
images = ntt["im"]
images_id = "e_%d_"%int(ntt["_id"])
break
if images:
images_servers=[]
for image in images:
server=g.image_servers[image]
images_servers.append("%02d"%int(server["_id"]))
if not "first_image_server" in f["view"]:
f["view"]["first_image_server"]=server["ip"]
f["view"]["images_server"]="_".join(images_servers)
f["view"]["images_id"] = images_id
def get_int(adict, key):
if not key in adict:
return None
value = adict[key]
if isinstance(value, (int,long)):
return value
elif isinstance(value, float):
return int(value)
elif isinstance(value, basestring):
result = None
for c in value:
digit = ord(c)-48
if 0<=digit<=9:
if result:
result *= 10
else:
result = 0
result += digit
else:
break
return result
return None
def get_float(adict, key):
if not key in adict:
return None
value = adict[key]
if isinstance(value, float):
return value
elif isinstance(value, (int,long)):
return float(value)
elif isinstance(value, basestring):
result = ""
decimal = False
for c in value:
if c in "0123456789":
result += c
elif c in ".," and not decimal:
result += "."
decimal = True
else:
break
if result:
try:
return float(result)
except:
pass
return None
def format_metadata(f,text_cache, search_text_shown=False):
'''
Formatea los metadatos de los archivos
'''
text = text_cache[2] if text_cache else None
view_md = f['view']['md'] = {}
view_searches = f["view"]["searches"]={}
file_type = f['view']['file_type'] if 'file_type' in f['view'] else None
if 'md' in f['file']:
#si viene con el formato tipo:metadato se le quita el tipo
file_md = {(meta.split(":")[-1] if ":" in meta else meta): value for meta, value in f['file']['md'].iteritems()}
# Duración para vídeo e imágenes
seconds = get_float(file_md, "seconds")
minutes = get_float(file_md, "minutes")
hours = get_float(file_md, "hours")
# Si no he recibido duracion de otra forma, pruebo con length y duration
if seconds==minutes==hours==None:
seconds = get_float(file_md, "length") or get_float(file_md, "duration")
duration = [hours or 0, minutes or 0, seconds or 0] # h, m, s
if any(duration):
carry = 0
for i in xrange(len(duration)-1,-1,-1):
unit = long(duration[i]) + carry
duration[i] = unit%60
carry = unit/60
view_md["length"] = "%d:%02d:%02d" % tuple(duration) if duration[0] > 0 else "%02d:%02d" % tuple(duration[1:])
# Tamaño para vídeos e imágenes
width = get_int(file_md, "width")
height = get_int(file_md, "height")
if width and height:
view_md["size"] = "%dx%dpx" % (width, height)
# Metadatos que no cambian
try:
view_md.update(
(meta, file_md[meta]) for meta in
(
"folders","description","fileversion","os","files","pages","format",
"seeds","leechs","composer","publisher","encoding","director","writer","starring","producer","released"
) if meta in file_md
)
view_searches.update(
(meta, seoize_text(file_md[meta],"_",False)) for meta in
(
"folders","os","composer","publisher","director","writer","starring","producer"
) if meta in file_md
)
except BaseException as e:
logging.warn(e)
# thumbnail
if "thumbnail" in file_md:
f["view"]["thumbnail"] = file_md["thumbnail"]
#metadatos que tienen otros nombres
try:
view_md.update(("tags", file_md[meta]) for meta in ("keywords", "tags", "tag") if meta in file_md)
if "tags" in view_md and isinstance(view_md["tags"], basestring):
view_searches["tags"] = []
view_md.update(("comments", file_md[meta]) for meta in ("comments", "comment") if meta in file_md)
view_md.update(("track", file_md[meta]) for meta in ("track", "track_number") if meta in file_md)
view_md.update(("created_by", file_md[meta]) for meta in ("created_by", "encodedby","encoder") if meta in file_md)
view_md.update(("language", file_md[meta]) for meta in ("language", "lang") if meta in file_md)
view_md.update(("date", file_md[meta]) for meta in ("published", "creationdate") if meta in file_md)
view_md.update(("trackers", "\n".join(file_md[meta].split(" "))) for meta in ("trackers", "tracker") if meta in file_md and isinstance(file_md[meta], basestring))
view_md.update(("hash", file_md[meta]) for meta in ("hash", "infohash") if meta in file_md)
view_md.update(("visualizations", file_md[meta]) for meta in ("count", "viewCount") if meta in file_md)
if "unpackedsize" in file_md:
view_md["unpacked_size"]=file_md["unpackedsize"]
if "privateflag" in file_md:
view_md["private_file"]=file_md["privateflag"]
except BaseException as e:
logging.warn(e)
#torrents -> filedir filesizes filepaths
if "filepaths" in file_md:
filepath | # Metadatos multimedia
try:
#extraccion del codec de video y/o audio
if "video_codec" in file_md: #si hay video_codec se concatena el audio_codec detras si es necesario
view_md["codec"]=file_md["video_codec"]+" "+file_md["audio_codec"] if "audio_codec" in file_md else file_md["video_codec"]
else: #sino se meten directamente
view_md.update(("codec", file_md[meta]) for meta in ("audio_codec", "codec") if meta in file_md)
if file_type in ("audio", "video", "image"):
view_md.update((meta, file_md[meta]) for meta in ("genre", "track", "artist", "author", "colors") if meta in file_md)
view_searches.update((meta, seoize_text(file_md[meta], "_", False)) for meta in ("artist", "author") if meta in file_md)
except BaseException as e:
logging.warn(e)
# No muestra titulo si es igual al nombre del fichero
if "name" in file_md:
title = u(file_md["name"])
elif "title" in file_md:
title = u(file_md["title"])
else:
title = f['view']['nfn']
if title:
show_title = True
text_longer = title
text_shorter = f["view"]["fn"]
if len(text_shorter)>len(text_longer):
text_longer, text_shorter = text_shorter, text_longer
if text_longer.startswith(text_shorter):
text_longer = text_longer[len(text_shorter):]
if len(text_longer)==0 or (len(text_longer)>0 and text_longer.startswith(".") and text_longer[1:] in EXTENSIONS):
show_title = False
if show_title:
view_md["title"] = title
view_searches["title"] = seoize_text(title, "_", False)
# Los que cambian o son especificos de un tipo
try:
if "date" in view_md: #intentar obtener una fecha válida
try:
view_md["date"]=format_datetime(datetime.fromtimestamp(strtotime(view_md["date"])))
except:
del view_md["date"]
if file_type == 'audio': #album, year, bitrate, seconds, track, genre, length
if 'album' in file_md:
album = u(file_md["album"])
year = get_int(file_md, "year")
if album:
view_md["album"] = album + (" (%d)"%year if year and 1900<year<2100 else "")
view_searches["album"] = seoize_text(album, "_", False)
if 'bitrate' in file_md: # bitrate o bitrate - soundtype o bitrate - soundtype - channels
bitrate = get_int(file_md, "bitrate")
if bitrate:
soundtype=" - %s" % file_md["soundtype"] if "soundtype" in file_md else ""
channels = get_float(file_md, "channels")
channels=" (%g %s)" % (round(channels,1),_("channels")) if channels else ""
view_md["quality"] = "%g kbps %s%s" % (bitrate,soundtype,channels)
elif file_type == 'document': #title, author, pages, format, version
if "format" in file_md:
view_md["format"] = "%s%s" % (file_md["format"]," %s" % file_md["formatversion"] if "formatversion" in file_md else "")
version = []
if "formatVersion" in file_md:
version.append(u(file_md["formatVersion"]))
elif "version" in file_md:
version.append(u(file_md["version"]))
if "revision" in file_md:
version.append(u(file_md["revision"]))
if version:
view_md["version"] = " ".join(version)
elif file_type == 'image': #title, artist, description, width, height, colors
pass
elif file_type == 'software': #title, version, fileversion, os
if "title" in view_md and "version" in file_md:
view_md["title"] += " %s" % file_md["version"]
view_searches["title"] += " %s" % seoize_text(file_md["version"], "_", False)
elif file_type == 'video':
quality = []
framerate = get_int(file_md, "framerate")
if framerate:
quality.append("%d fps" % framerate)
if 'codec' in view_md: #si ya venia codec se muestra ahora en quality solamente
quality.append(u(view_md["codec"]))
del view_md["codec"]
if quality:
view_md["quality"] = " - ".join(quality)
if "series" in file_md:
series = u(file_md["series"])
if series:
safe_series = seoize_text(series, "_", False)
view_md["series"] = series
view_searches["series"]="%s_%s"%(safe_series,"(series)")
season = get_int(file_md, "season")
if season:
view_md["season"] = season
view_searches["season"]="%s_(s%d)"%(safe_series,season)
episode = get_int(file_md, "episode")
if episode:
view_md["episode"] = episode
view_searches["episode"]="%s_(s%de%d)"%(safe_series,season,episode)
except BaseException as e:
logging.exception("Error obteniendo metadatos especificos del tipo de contenido.")
view_mdh=f['view']['mdh']={}
for metadata,value in view_md.items():
if isinstance(value, basestring):
value = clean_html(value)
if not value:
del view_md[metadata]
continue
view_md[metadata]=value
# resaltar contenidos que coinciden con la busqueda, para textos no muy largos
if len(value)<500:
view_mdh[metadata]=highlight(text,value) if text and len(text)<100 else value
elif isinstance(value, float): #no hay ningun metadato tipo float
view_md[metadata]=str(int(value))
else:
view_md[metadata]=value
# TODO: mostrar metadatos con palabras buscadas si no aparecen en lo mostrado
def embed_info(f):
'''
Añade la informacion del embed
'''
embed_width = 560
embed_height = 315
embed_code = None
for src_id, src_data in f["file"]["src"].iteritems():
source_id = src_data["t"]
source_data = g.sources.get(source_id, None)
if not (source_data and source_data.get("embed_active", False) and "embed" in source_data):
continue
try:
embed_code = source_data["embed"]
# comprueba si el content type se puede embeber
embed_cts = source_data["embed_cts"] if "embed_cts" in source_data else DEFAULT_EMBED_CTS
if not f["view"]["ct"] in embed_cts: continue
embed_groups = ()
# url directamente desde los sources
if "source_id" in f["view"] and f["view"]["source_id"]:
embed_groups = {"id": f["view"]["source_id"]}
elif "url_embed_regexp" in source_data and source_data["url_embed_regexp"]:
# comprueba si la url puede ser utilizada para embeber
embed_url = src_data["url"]
regexp = source_data["url_embed_regexp"]
embed_match = cache.regexp(regexp).match(embed_url)
if embed_match is None:
continue
embed_groups = embed_match.groupdict()
if "%s" in embed_code and "id" in embed_groups: # Modo simple, %s intercambiado por el id
embed_code = embed_code % (
# Workaround para embeds con varios %s
# no se hace replace para permitir escapes ('\%s')
(embed_groups["id"],) * embed_code.count("%s")
)
else:
# Modo completo, %(variable)s intercambiado por grupos con nombre
replace_dict = dict(f["file"]["md"])
replace_dict["width"] = embed_width
replace_dict["height"] = embed_height
replace_dict.update(embed_groups)
try:
embed_code = embed_code % replace_dict
except KeyError as e:
# No logeamos los errores por falta de metadatos 'special'
if all(i.startswith("special:") for i in e.args):
continue
raise e
except BaseException as e:
logging.exception(e)
continue
f["view"]["embed"] = embed_code
f["view"]["play"] = (source_data.get("embed_disabled", ""), source_data.get("embed_enabled", ""))
break
def fill_data(file_data, text=None, ntts={}):
'''
Añade los datos necesarios para mostrar los archivos
'''
if text:
slug_text = slugify(text)
text = (text, slug_text, frozenset(slug_text.split(" ")))
# se asegura que esten cargados los datos de origenes y servidor de imagen antes de empezar
fetch_global_data()
f=init_data(file_data, ntts)
choose_file_type(f)
# al elegir nombre de fichero, averigua si aparece el texto buscado
search_text_shown = choose_filename(f,text)
build_source_links(f)
embed_info(f)
get_images(f)
# si hace falta, muestra metadatos extras con el texto buscado
format_metadata(f,text, search_text_shown)
return f
def secure_fill_data(file_data,text=None, ntts={}):
'''
Maneja errores en fill_data
'''
try:
return fill_data(file_data,text,ntts)
except BaseException as e:
logging.exception("Fill_data error on file %s: %s"%(str(file_data["_id"]),repr(e)))
return None
def get_file_metadata(file_id, file_name=None):
'''
Obtiene el fichero de base de datos y rellena sus metadatos.
@type file_id: mongoid
@param file_id: id de mongo del fichero
@type file_name: basestring
@param file_name: nombre del fichero
@rtype dict
@return Diccionario de datos del fichero con metadatos
@raise DatabaseError: si falla la conexión con la base de datos
@raise FileNotExist: si el fichero no existe o ha sido bloqueado
@raise FileRemoved: si el fichero ha sido eliminado de su origen
@raise FileFoofindRemoved: si el fichero ha sido bloqueado por foofind
@raise FileUnknownBlock: si el fichero está bloqueado pero se desconoce el porqué
@raise FileNoSources: si el fichero no tiene orígenes
'''
try:
data = filesdb.get_file(file_id, bl = None)
except BaseException as e:
logging.exception(e)
raise DatabaseError
# intenta sacar el id del servidor de sphinx,
# resuelve inconsistencias de los datos
if not data:
sid = searchd.get_id_server_from_search(file_id, file_name)
if sid:
try:
data = filesdb.get_file(file_id, sid = sid, bl = None)
if feedbackdb.initialized:
feedbackdb.notify_indir(file_id, sid)
except BaseException as e:
logging.exception(e)
raise DatabaseError
if data:
bl = data.get("bl",None)
if bl and isinstance(bl, (str, unicode)) and bl.isdigit():
bl = int(bl)
if bl:
if bl == 1: raise FileFoofindRemoved
elif bl == 3: raise FileRemoved
logging.warn(
"File with an unknown 'bl' value found: %s" % repr(bl),
extra=data)
raise FileUnknownBlock
file_se = data["se"] if "se" in data else None
file_ntt = entitiesdb.get_entity(file_se["_id"]) if file_se and "_id" in file_se else None
ntts = {file_se["_id"]:file_ntt} if file_ntt else {}
'''
# trae entidades relacionadas
if file_ntt and "r" in file_ntt:
rel_ids = list(set(eid for eids in file_ntt["r"].itervalues() for eid in eids))
ntts.update({int(ntt["_id"]):ntt for ntt in entitiesdb.get_entities(rel_ids, None, (False, [u"episode"]))})
'''
else:
raise FileNotExist
#obtener los datos
return fill_data(data, file_name, ntts)
| s = {}
for path, size in izip_longest(u(file_md["filepaths"]).split("///"), u(file_md.get("filesizes","")).split(" "), fillvalue=None):
# no permite tamaños sin fichero
if not path: break
parts = path.strip("/").split("/")
# crea subdirectorios
relative_path = filepaths
for part in parts[:-1]:
if "/"+part not in relative_path:
relative_path["/"+part] = {}
relative_path = relative_path["/"+part]
# si ya existe el directorio no hace nada
if "/"+parts[-1] in relative_path:
pass
# si el ultimo nivel se repite es un directorio (fallo de contenido)
elif parts[-1] in relative_path:
relative_path["/"+parts[-1]] = {}
del relative_path[parts[-1]]
else:
relative_path[parts[-1]] = size
if "filedir" in file_md:
filepaths = {"/"+u(file_md["filedir"]).strip("/"):filepaths}
if filepaths:
view_md["filepaths"] = filepaths
view_searches["filepaths"] = {}
| conditional_block |
fill_data.py | # -*- coding: utf-8 -*-
'''
Toda la informacion de un fichero
'''
import urllib, re
from flask import g, Markup
from flask.ext.babelex import gettext as _
from urlparse import urlparse
from itertools import izip_longest, chain
from foofind.services import *
from foofind.blueprints.files.helpers import *
from foofind.utils import mid2url, mid2hex, hex2mid, to_seconds, u, logging
from foofind.utils.content_types import *
from foofind.utils.filepredictor import guess_doc_content_type
from foofind.datafixes import content_fixes
from foofind.utils.splitter import slugify
from foofind.utils.seo import seoize_text
from foofind.utils.html import clean_html
def init_data(file_data, ntts=[]):
'''
Inicializa el diccionario de datos del archivo
'''
content_fixes(file_data)
file_data["id"]=mid2url(file_data['_id'])
file_data['name']=file_data['src'].itervalues().next()['url']
file_se = file_data["se"] if "se" in file_data else None
ntt = ntts[int(float(file_se["_id"]))] if file_se and "_id" in file_se and file_se["_id"] in ntts else None
if ntt:
file_se["info"] = ntt
file_se["rel"] = [ntts[relid] for relids in ntt["r"].itervalues() for relid in relids if relid in ntts] if "r" in ntt else []
return {"file":file_data,"view":{}}
def choose_filename(f,text_cache=None):
'''
Elige el archivo correcto
'''
srcs = f['file']['src']
fns = f['file']['fn']
chosen = None
max_count = -1
current_weight = -1
if text_cache and text_cache[0] in fns: # Si text es en realidad un ID de fn
chosen = text_cache[0]
else:
for hexuri,src in srcs.items():
if 'bl' in src and src['bl']!=0:
continue
for crc,srcfn in src['fn'].items():
if crc not in fns: #para los sources que tienen nombre pero no estan en el archivo
continue
#si no tiene nombre no se tiene en cuenta
m = srcfn['m'] if len(fns[crc]['n'])>0 else 0
if 'c' in fns[crc]:
fns[crc]['c']+=m
else:
fns[crc]['c']=m
text_weight = 0
if text_cache:
fn_parts = slugify(fns[crc]['n']).strip().split(" ")
if len(fn_parts)>0:
text_words = slugify(text_cache[0]).split(" ")
# valora numero y orden coincidencias
last_pos = -1
max_length = length = 0
occurrences = [0]*len(text_words)
for part in fn_parts:
pos = text_words.index(part) if part in text_words else -1
if pos != -1 and (last_pos==-1 or pos==last_pos+1):
length += 1
else:
if length > max_length: max_length = length
length = 0
if pos != -1:
occurrences[pos]=1
last_pos = pos
if length > max_length: max_length = length
text_weight = sum(occurrences)*100 + max_length
f['file']['fn'][crc]['tht'] = text_weight
better = fns[crc]['c']>max_count
if text_weight > current_weight or (better and text_weight==current_weight):
current_weight = text_weight
chosen = crc
max_count = fns[crc]['c']
f['view']['url'] = mid2url(hex2mid(f['file']['_id']))
f['view']['fnid'] = chosen
if chosen:
filename = fns[chosen]['n']
ext = fns[chosen]['x']
else: #uses filename from src
filename = ""
for hexuri,src in srcs.items():
if src['url'].find("/")!=-1:
filename = src['url']
if filename=="":
return
filename = filename[filename.rfind("/")+1:]
ext = filename[filename.rfind(".")+1:]
filename = filename[0:filename.rfind(".")]
#TODO si no viene nombre de archivo buscar en los metadatos para formar uno (por ejemplo serie - titulo capitulo)
filename = extension_filename(filename,ext)
f['view']['fn'] = filename.replace("?", "")
f['view']['qfn'] = qfn = u(filename).encode("UTF-8") #nombre del archivo escapado para generar las url de descarga
f['view']['pfn'] = urllib.quote(qfn).replace(" ", "%20") # P2P filename
nfilename = seoize_text(filename, " ",True, 0)
f['view']['nfn'] = nfilename
# añade el nombre del fichero como palabra clave
g.keywords.update(set(keyword for keyword in nfilename.split(" ") if len(keyword)>1))
#nombre del archivo con las palabras que coinciden con la busqueda resaltadas
if text_cache:
f['view']['fnh'], f['view']['fnhs'] = highlight(text_cache[2],filename,True)
else:
f['view']['fnh'] = filename #esto es solo para download que nunca tiene text
return current_weight>0 # indica si ha encontrado el texto buscado
def build_source_links(f):
'''
Construye los enlaces correctamente
'''
def get_domain(src):
'''
Devuelve el dominio de una URL
'''
url_parts=urlparse(src).netloc.split('.')
i=len(url_parts)-1
if len(url_parts[i])<=2 and len(url_parts[i-1])<=3:
return url_parts[i-2]+'.'+url_parts[i-1]+'.'+url_parts[i]
else:
return url_parts[i-1]+'.'+url_parts[i];
f['view']['action']='download'
f['view']['sources']={}
max_weight=0
icon=""
any_downloader=False
# agrupación de origenes
source_groups = {}
file_sources = f['file']['src'].items()
file_sources.sort(key=lambda x:x[1]["t"])
for hexuri,src in file_sources:
if not src.get('bl',None) in (0, None):
continue
url_pattern=downloader=join=False
count=0
part=url=""
source_data=g.sources[src["t"]] if "t" in src and src["t"] in g.sources else None
if source_data is None: #si no existe el origen del archivo
logging.error("El fichero contiene un origen inexistente en la tabla \"sources\": %s" % src["t"], extra={"file":f})
if feedbackdb.initialized:
feedbackdb.notify_source_error(f['file']["_id"], f['file']["s"])
continue
elif "crbl" in source_data and int(source_data["crbl"])==1: #si el origen esta bloqueado
continue
elif "w" in source_data["g"] or "f" in source_data["g"] or "s" in source_data["g"]: #si es descarga directa o streaming
link_weight=1
tip=source_data["d"]
icon="web"
source_groups[icon] = tip
source=get_domain(src['url']) if "f" in source_data["g"] else source_data["d"]
url=src['url']
if "url_pattern" in source_data and not url.startswith(("https://","http://","ftp://")):
url_pattern=True
#en caso de duda se prefiere streaming
if "s" in source_data["g"]:
f['view']['action']="listen" if f['view']['ct']==CONTENT_AUDIO else 'watch'
link_weight*=2
#torrenthash antes de torrent porque es un caso especifico
elif source_data["d"]=="BitTorrentHash":
downloader=True
link_weight=0.9 if 'torrent:tracker' in f['file']['md'] or 'torrent:trackers' in f['file']['md'] else 0.1
tip="Torrent MagnetLink"
source="tmagnet"
icon="torrent"
if not icon in source_groups:
source_groups[icon] = tip # magnet link tiene menos prioridad para el texto
join=True
count=int(src['m'])
part="xt=urn:btih:"+src['url']
if 'torrent:tracker' in f['file']['md']:
part += unicode('&tr=' + urllib.quote_plus(u(f['file']['md']['torrent:tracker']).encode("UTF-8")), "UTF-8")
elif 'torrent:trackers' in f['file']['md']:
trackers = f['file']['md']['torrent:trackers']
if isinstance(trackers, basestring):
part += unicode("".join('&tr='+urllib.quote_plus(tr) for tr in u(trackers).encode("UTF-8").split(" ")), "UTF-8")
elif "t" in source_data["g"]:
downloader=True
link_weight=0.8
url=src['url']
if "url_pattern" in source_data and not url.startswith(("https://","http://","ftp://")):
url_pattern=True
tip=source=get_domain(source_data["url_pattern"]%url)
else:
tip=source=get_domain(src['url'])
icon="torrent"
if not icon in source_groups:
source_groups[icon] = tip
elif source_data["d"]=="Gnutella":
link_weight=0.2
tip="Gnutella"
source=icon="gnutella"
part="xt=urn:sha1:"+src['url']
join=True
count=int(src['m'])
source_groups[icon] = tip
elif source_data["d"]=="eD2k":
downloader=True
link_weight=0.1
tip="eD2k"
source=icon="ed2k"
url="ed2k://|file|"+f['view']['pfn']+"|"+str(f['file']['z'] if "z" in f["file"] else 1)+"|"+src['url']+"|/"
count=int(src['m'])
source_groups[icon] = tip
elif source_data["d"]=="Tiger":
link_weight=0
tip="Gnutella"
source=icon="gnutella"
part="xt=urn:tiger:"+src['url']
join=True
elif source_data["d"]=="MD5":
link_weight=0
tip="Gnutella"
source=icon="gnutella"
part="xt=urn:md5:"+src['url']
source_groups[icon] = tip
join=True
else:
continue
if source in f['view']['sources']:
view_source = f['view']['sources'][source]
else:
view_source = f['view']['sources'][source] = {}
view_source.update(source_data)
if downloader:
any_downloader = True
view_source['downloader']=1
elif not 'downloader' in view_source:
view_source['downloader']=0
view_source['tip']=tip
view_source['icon']=icon
view_source['icons']=source_data.get("icons",False)
view_source['join']=join
view_source['source']="streaming" if "s" in source_data["g"] else "direct_download" if "w" in source_data["g"] else "P2P" if "p" in source_data["g"] else ""
#para no machacar el numero si hay varios archivos del mismo source
if not 'count' in view_source or count>0:
view_source['count']=count
if not "parts" in view_source:
view_source['parts']=[]
if not 'urls' in view_source:
view_source['urls']=[]
if part:
view_source['parts'].append(part)
if url:
if url_pattern:
view_source['urls']=[source_data["url_pattern"]%url]
f['view']['source_id']=url
view_source["pattern_used"]=True
elif not "pattern_used" in view_source:
view_source['urls'].append(url)
if source_data["d"]!="eD2k":
view_source['count']+=1
if link_weight>max_weight:
max_weight = link_weight
f['view']['source'] = source
f['view']['source_groups'] = sorted(source_groups.items())
f['view']['any_downloader'] = any_downloader
if "source" not in f["view"]:
raise FileNoSources
if icon!="web":
for src,info in f['view']['sources'].items():
if info['join']:
f['view']['sources'][src]['urls'].append("magnet:?"+"&".join(info['parts'])+"&dn="+f['view']['pfn']+("&xl="+str(f['file']['z']) if 'z' in f['file'] else ""))
elif not 'urls' in info:
del(f['view']['sources'][src])
def choose_file_type(f):
'''
Elige el tipo de archivo
'''
ct, file_tags, file_format = guess_doc_content_type(f["file"], g.sources)
f['view']["ct"] = ct
f['view']['file_type'] = CONTENTS[ct].lower()
f['view']["tags"] = file_tags
if file_format: f['view']['format'] = file_format
def get_images(f):
'''
Obtiene las imagenes para los archivos que las tienen
'''
images = images_id = None
if "i" in f["file"] and isinstance(f["file"]["i"],list):
images = f["file"]["i"]
images_id = f["file"]["_id"]
elif "se" in f["file"] and "info" in f["file"]["se"]:
for ntt in chain([f["file"]["se"]["info"]], f["file"]["se"]["rel"]):
if "im" in ntt:
images = ntt["im"]
images_id = "e_%d_"%int(ntt["_id"])
break
if images:
images_servers=[]
for image in images:
server=g.image_servers[image]
images_servers.append("%02d"%int(server["_id"]))
if not "first_image_server" in f["view"]:
f["view"]["first_image_server"]=server["ip"]
f["view"]["images_server"]="_".join(images_servers)
f["view"]["images_id"] = images_id
def get_int(adict, key):
if not key in adict:
return None | value = adict[key]
if isinstance(value, (int,long)):
return value
elif isinstance(value, float):
return int(value)
elif isinstance(value, basestring):
result = None
for c in value:
digit = ord(c)-48
if 0<=digit<=9:
if result:
result *= 10
else:
result = 0
result += digit
else:
break
return result
return None
def get_float(adict, key):
if not key in adict:
return None
value = adict[key]
if isinstance(value, float):
return value
elif isinstance(value, (int,long)):
return float(value)
elif isinstance(value, basestring):
result = ""
decimal = False
for c in value:
if c in "0123456789":
result += c
elif c in ".," and not decimal:
result += "."
decimal = True
else:
break
if result:
try:
return float(result)
except:
pass
return None
def format_metadata(f,text_cache, search_text_shown=False):
'''
Formatea los metadatos de los archivos
'''
text = text_cache[2] if text_cache else None
view_md = f['view']['md'] = {}
view_searches = f["view"]["searches"]={}
file_type = f['view']['file_type'] if 'file_type' in f['view'] else None
if 'md' in f['file']:
#si viene con el formato tipo:metadato se le quita el tipo
file_md = {(meta.split(":")[-1] if ":" in meta else meta): value for meta, value in f['file']['md'].iteritems()}
# Duración para vídeo e imágenes
seconds = get_float(file_md, "seconds")
minutes = get_float(file_md, "minutes")
hours = get_float(file_md, "hours")
# Si no he recibido duracion de otra forma, pruebo con length y duration
if seconds==minutes==hours==None:
seconds = get_float(file_md, "length") or get_float(file_md, "duration")
duration = [hours or 0, minutes or 0, seconds or 0] # h, m, s
if any(duration):
carry = 0
for i in xrange(len(duration)-1,-1,-1):
unit = long(duration[i]) + carry
duration[i] = unit%60
carry = unit/60
view_md["length"] = "%d:%02d:%02d" % tuple(duration) if duration[0] > 0 else "%02d:%02d" % tuple(duration[1:])
# Tamaño para vídeos e imágenes
width = get_int(file_md, "width")
height = get_int(file_md, "height")
if width and height:
view_md["size"] = "%dx%dpx" % (width, height)
# Metadatos que no cambian
try:
view_md.update(
(meta, file_md[meta]) for meta in
(
"folders","description","fileversion","os","files","pages","format",
"seeds","leechs","composer","publisher","encoding","director","writer","starring","producer","released"
) if meta in file_md
)
view_searches.update(
(meta, seoize_text(file_md[meta],"_",False)) for meta in
(
"folders","os","composer","publisher","director","writer","starring","producer"
) if meta in file_md
)
except BaseException as e:
logging.warn(e)
# thumbnail
if "thumbnail" in file_md:
f["view"]["thumbnail"] = file_md["thumbnail"]
#metadatos que tienen otros nombres
try:
view_md.update(("tags", file_md[meta]) for meta in ("keywords", "tags", "tag") if meta in file_md)
if "tags" in view_md and isinstance(view_md["tags"], basestring):
view_searches["tags"] = []
view_md.update(("comments", file_md[meta]) for meta in ("comments", "comment") if meta in file_md)
view_md.update(("track", file_md[meta]) for meta in ("track", "track_number") if meta in file_md)
view_md.update(("created_by", file_md[meta]) for meta in ("created_by", "encodedby","encoder") if meta in file_md)
view_md.update(("language", file_md[meta]) for meta in ("language", "lang") if meta in file_md)
view_md.update(("date", file_md[meta]) for meta in ("published", "creationdate") if meta in file_md)
view_md.update(("trackers", "\n".join(file_md[meta].split(" "))) for meta in ("trackers", "tracker") if meta in file_md and isinstance(file_md[meta], basestring))
view_md.update(("hash", file_md[meta]) for meta in ("hash", "infohash") if meta in file_md)
view_md.update(("visualizations", file_md[meta]) for meta in ("count", "viewCount") if meta in file_md)
if "unpackedsize" in file_md:
view_md["unpacked_size"]=file_md["unpackedsize"]
if "privateflag" in file_md:
view_md["private_file"]=file_md["privateflag"]
except BaseException as e:
logging.warn(e)
#torrents -> filedir filesizes filepaths
if "filepaths" in file_md:
filepaths = {}
for path, size in izip_longest(u(file_md["filepaths"]).split("///"), u(file_md.get("filesizes","")).split(" "), fillvalue=None):
# no permite tamaños sin fichero
if not path: break
parts = path.strip("/").split("/")
# crea subdirectorios
relative_path = filepaths
for part in parts[:-1]:
if "/"+part not in relative_path:
relative_path["/"+part] = {}
relative_path = relative_path["/"+part]
# si ya existe el directorio no hace nada
if "/"+parts[-1] in relative_path:
pass
# si el ultimo nivel se repite es un directorio (fallo de contenido)
elif parts[-1] in relative_path:
relative_path["/"+parts[-1]] = {}
del relative_path[parts[-1]]
else:
relative_path[parts[-1]] = size
if "filedir" in file_md:
filepaths = {"/"+u(file_md["filedir"]).strip("/"):filepaths}
if filepaths:
view_md["filepaths"] = filepaths
view_searches["filepaths"] = {}
# Metadatos multimedia
try:
#extraccion del codec de video y/o audio
if "video_codec" in file_md: #si hay video_codec se concatena el audio_codec detras si es necesario
view_md["codec"]=file_md["video_codec"]+" "+file_md["audio_codec"] if "audio_codec" in file_md else file_md["video_codec"]
else: #sino se meten directamente
view_md.update(("codec", file_md[meta]) for meta in ("audio_codec", "codec") if meta in file_md)
if file_type in ("audio", "video", "image"):
view_md.update((meta, file_md[meta]) for meta in ("genre", "track", "artist", "author", "colors") if meta in file_md)
view_searches.update((meta, seoize_text(file_md[meta], "_", False)) for meta in ("artist", "author") if meta in file_md)
except BaseException as e:
logging.warn(e)
# No muestra titulo si es igual al nombre del fichero
if "name" in file_md:
title = u(file_md["name"])
elif "title" in file_md:
title = u(file_md["title"])
else:
title = f['view']['nfn']
if title:
show_title = True
text_longer = title
text_shorter = f["view"]["fn"]
if len(text_shorter)>len(text_longer):
text_longer, text_shorter = text_shorter, text_longer
if text_longer.startswith(text_shorter):
text_longer = text_longer[len(text_shorter):]
if len(text_longer)==0 or (len(text_longer)>0 and text_longer.startswith(".") and text_longer[1:] in EXTENSIONS):
show_title = False
if show_title:
view_md["title"] = title
view_searches["title"] = seoize_text(title, "_", False)
# Los que cambian o son especificos de un tipo
try:
if "date" in view_md: #intentar obtener una fecha válida
try:
view_md["date"]=format_datetime(datetime.fromtimestamp(strtotime(view_md["date"])))
except:
del view_md["date"]
if file_type == 'audio': #album, year, bitrate, seconds, track, genre, length
if 'album' in file_md:
album = u(file_md["album"])
year = get_int(file_md, "year")
if album:
view_md["album"] = album + (" (%d)"%year if year and 1900<year<2100 else "")
view_searches["album"] = seoize_text(album, "_", False)
if 'bitrate' in file_md: # bitrate o bitrate - soundtype o bitrate - soundtype - channels
bitrate = get_int(file_md, "bitrate")
if bitrate:
soundtype=" - %s" % file_md["soundtype"] if "soundtype" in file_md else ""
channels = get_float(file_md, "channels")
channels=" (%g %s)" % (round(channels,1),_("channels")) if channels else ""
view_md["quality"] = "%g kbps %s%s" % (bitrate,soundtype,channels)
elif file_type == 'document': #title, author, pages, format, version
if "format" in file_md:
view_md["format"] = "%s%s" % (file_md["format"]," %s" % file_md["formatversion"] if "formatversion" in file_md else "")
version = []
if "formatVersion" in file_md:
version.append(u(file_md["formatVersion"]))
elif "version" in file_md:
version.append(u(file_md["version"]))
if "revision" in file_md:
version.append(u(file_md["revision"]))
if version:
view_md["version"] = " ".join(version)
elif file_type == 'image': #title, artist, description, width, height, colors
pass
elif file_type == 'software': #title, version, fileversion, os
if "title" in view_md and "version" in file_md:
view_md["title"] += " %s" % file_md["version"]
view_searches["title"] += " %s" % seoize_text(file_md["version"], "_", False)
elif file_type == 'video':
quality = []
framerate = get_int(file_md, "framerate")
if framerate:
quality.append("%d fps" % framerate)
if 'codec' in view_md: #si ya venia codec se muestra ahora en quality solamente
quality.append(u(view_md["codec"]))
del view_md["codec"]
if quality:
view_md["quality"] = " - ".join(quality)
if "series" in file_md:
series = u(file_md["series"])
if series:
safe_series = seoize_text(series, "_", False)
view_md["series"] = series
view_searches["series"]="%s_%s"%(safe_series,"(series)")
season = get_int(file_md, "season")
if season:
view_md["season"] = season
view_searches["season"]="%s_(s%d)"%(safe_series,season)
episode = get_int(file_md, "episode")
if episode:
view_md["episode"] = episode
view_searches["episode"]="%s_(s%de%d)"%(safe_series,season,episode)
except BaseException as e:
logging.exception("Error obteniendo metadatos especificos del tipo de contenido.")
view_mdh=f['view']['mdh']={}
for metadata,value in view_md.items():
if isinstance(value, basestring):
value = clean_html(value)
if not value:
del view_md[metadata]
continue
view_md[metadata]=value
# resaltar contenidos que coinciden con la busqueda, para textos no muy largos
if len(value)<500:
view_mdh[metadata]=highlight(text,value) if text and len(text)<100 else value
elif isinstance(value, float): #no hay ningun metadato tipo float
view_md[metadata]=str(int(value))
else:
view_md[metadata]=value
# TODO: mostrar metadatos con palabras buscadas si no aparecen en lo mostrado
def embed_info(f):
'''
Añade la informacion del embed
'''
embed_width = 560
embed_height = 315
embed_code = None
for src_id, src_data in f["file"]["src"].iteritems():
source_id = src_data["t"]
source_data = g.sources.get(source_id, None)
if not (source_data and source_data.get("embed_active", False) and "embed" in source_data):
continue
try:
embed_code = source_data["embed"]
# comprueba si el content type se puede embeber
embed_cts = source_data["embed_cts"] if "embed_cts" in source_data else DEFAULT_EMBED_CTS
if not f["view"]["ct"] in embed_cts: continue
embed_groups = ()
# url directamente desde los sources
if "source_id" in f["view"] and f["view"]["source_id"]:
embed_groups = {"id": f["view"]["source_id"]}
elif "url_embed_regexp" in source_data and source_data["url_embed_regexp"]:
# comprueba si la url puede ser utilizada para embeber
embed_url = src_data["url"]
regexp = source_data["url_embed_regexp"]
embed_match = cache.regexp(regexp).match(embed_url)
if embed_match is None:
continue
embed_groups = embed_match.groupdict()
if "%s" in embed_code and "id" in embed_groups: # Modo simple, %s intercambiado por el id
embed_code = embed_code % (
# Workaround para embeds con varios %s
# no se hace replace para permitir escapes ('\%s')
(embed_groups["id"],) * embed_code.count("%s")
)
else:
# Modo completo, %(variable)s intercambiado por grupos con nombre
replace_dict = dict(f["file"]["md"])
replace_dict["width"] = embed_width
replace_dict["height"] = embed_height
replace_dict.update(embed_groups)
try:
embed_code = embed_code % replace_dict
except KeyError as e:
# No logeamos los errores por falta de metadatos 'special'
if all(i.startswith("special:") for i in e.args):
continue
raise e
except BaseException as e:
logging.exception(e)
continue
f["view"]["embed"] = embed_code
f["view"]["play"] = (source_data.get("embed_disabled", ""), source_data.get("embed_enabled", ""))
break
def fill_data(file_data, text=None, ntts={}):
'''
Añade los datos necesarios para mostrar los archivos
'''
if text:
slug_text = slugify(text)
text = (text, slug_text, frozenset(slug_text.split(" ")))
# se asegura que esten cargados los datos de origenes y servidor de imagen antes de empezar
fetch_global_data()
f=init_data(file_data, ntts)
choose_file_type(f)
# al elegir nombre de fichero, averigua si aparece el texto buscado
search_text_shown = choose_filename(f,text)
build_source_links(f)
embed_info(f)
get_images(f)
# si hace falta, muestra metadatos extras con el texto buscado
format_metadata(f,text, search_text_shown)
return f
def secure_fill_data(file_data,text=None, ntts={}):
'''
Maneja errores en fill_data
'''
try:
return fill_data(file_data,text,ntts)
except BaseException as e:
logging.exception("Fill_data error on file %s: %s"%(str(file_data["_id"]),repr(e)))
return None
def get_file_metadata(file_id, file_name=None):
'''
Obtiene el fichero de base de datos y rellena sus metadatos.
@type file_id: mongoid
@param file_id: id de mongo del fichero
@type file_name: basestring
@param file_name: nombre del fichero
@rtype dict
@return Diccionario de datos del fichero con metadatos
@raise DatabaseError: si falla la conexión con la base de datos
@raise FileNotExist: si el fichero no existe o ha sido bloqueado
@raise FileRemoved: si el fichero ha sido eliminado de su origen
@raise FileFoofindRemoved: si el fichero ha sido bloqueado por foofind
@raise FileUnknownBlock: si el fichero está bloqueado pero se desconoce el porqué
@raise FileNoSources: si el fichero no tiene orígenes
'''
try:
data = filesdb.get_file(file_id, bl = None)
except BaseException as e:
logging.exception(e)
raise DatabaseError
# intenta sacar el id del servidor de sphinx,
# resuelve inconsistencias de los datos
if not data:
sid = searchd.get_id_server_from_search(file_id, file_name)
if sid:
try:
data = filesdb.get_file(file_id, sid = sid, bl = None)
if feedbackdb.initialized:
feedbackdb.notify_indir(file_id, sid)
except BaseException as e:
logging.exception(e)
raise DatabaseError
if data:
bl = data.get("bl",None)
if bl and isinstance(bl, (str, unicode)) and bl.isdigit():
bl = int(bl)
if bl:
if bl == 1: raise FileFoofindRemoved
elif bl == 3: raise FileRemoved
logging.warn(
"File with an unknown 'bl' value found: %s" % repr(bl),
extra=data)
raise FileUnknownBlock
file_se = data["se"] if "se" in data else None
file_ntt = entitiesdb.get_entity(file_se["_id"]) if file_se and "_id" in file_se else None
ntts = {file_se["_id"]:file_ntt} if file_ntt else {}
'''
# trae entidades relacionadas
if file_ntt and "r" in file_ntt:
rel_ids = list(set(eid for eids in file_ntt["r"].itervalues() for eid in eids))
ntts.update({int(ntt["_id"]):ntt for ntt in entitiesdb.get_entities(rel_ids, None, (False, [u"episode"]))})
'''
else:
raise FileNotExist
#obtener los datos
return fill_data(data, file_name, ntts) | random_line_split |
|
main.rs | use crate::avro_encode::encode;
use avro_rs::schema::{RecordField, Schema, SchemaFingerprint, UnionSchema};
use avro_rs::types::Value;
use avro_rs::{from_value, types::Record, Codec, Reader, Writer};
use clap::{App, Arg};
use failure::bail;
use failure::Error;
use futures::StreamExt;
use futures_util::future::FutureExt;
use log::{info, warn};
use rdkafka::client::ClientContext;
use rdkafka::config::{ClientConfig, RDKafkaLogLevel};
use rdkafka::consumer::stream_consumer::{MessageStream, StreamConsumer};
use rdkafka::consumer::{CommitMode, Consumer, ConsumerContext, Rebalance};
use rdkafka::error::KafkaResult;
use rdkafka::message::OwnedHeaders;
use rdkafka::message::{Headers, Message};
use rdkafka::producer::{BaseProducer, BaseRecord, DeliveryResult, ProducerContext};
use rdkafka::producer::{FutureProducer, FutureRecord};
use rdkafka::topic_partition_list::TopicPartitionList;
use rdkafka::util::get_rdkafka_version;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::future::Future;
use std::thread::sleep;
use std::time::Duration;
mod avro_encode;
struct CustomContext;
impl ClientContext for CustomContext {}
impl ConsumerContext for CustomContext {
fn pre_rebalance(&self, rebalance: &Rebalance) {
info!("Pre rebalance {:?}", rebalance);
}
fn post_rebalance(&self, rebalance: &Rebalance) {
info!("Post rebalance {:?}", rebalance);
}
fn commit_callback(&self, result: KafkaResult<()>, _offsets: &TopicPartitionList) {
info!("Committing offsets: {:?}", result);
}
}
type LoggingConsumer = StreamConsumer<CustomContext>;
#[tokio::main]
async fn main() {
let matches = App::new("consumer example")
.version(option_env!("CARGO_PKG_VERSION").unwrap_or(""))
.about("Simple command line consumer")
.arg(
Arg::with_name("brokers")
.short("b")
.long("brokers")
.help("Broker list in kafka format")
.takes_value(true)
.default_value("localhost:9092"),
)
.arg(
Arg::with_name("group-id")
.short("g")
.long("group-id")
.help("Consumer group id")
.takes_value(true)
.default_value("example_consumer_group_id"),
)
.arg(
Arg::with_name("log-conf")
.long("log-conf")
.help("Configure the logging format (example: 'rdkafka=trace')")
.takes_value(true),
)
.arg(
Arg::with_name("topics")
.short("t")
.long("topics")
.help("Topic list")
.takes_value(true)
.multiple(true)
.required(true),
)
.get_matches();
// setup_logger(true, matches.value_of("log-conf"));
log4rs::init_file("log4rs.yml", Default::default())
.expect("'log4rs.yml' not found. Required for logging.");
// let (version_n, version_s) = get_rdkafka_version();
// info!("rd_kafka_version: 0x{:08x}, {}", version_n, version_s);
let brokers = matches.value_of("brokers").unwrap();
let topics = matches.values_of("topics").unwrap().collect::<Vec<&str>>();
let group_id = matches.value_of("group-id").unwrap();
info!("Brokers: ({:?})", brokers);
info!("Topics: ({:?})", topics);
info!("Group Id: ({:?})", group_id);
// This sends over the schema in the payload
// let payload = serialize().unwrap();
// publish(brokers, topics[0], &payload).await;
// This uses `encode` to send a minimal payload
let payload = serialize2().unwrap();
publish(brokers, topics[0], &payload).await;
// Code for consumer
// let context = CustomContext;
// let consumer = get_consumer(context, brokers, group_id, &topics);
// process_message_stream(&consumer).await;
}
async fn publish(brokers: &str, topic_name: &str, payload: &[u8]) {
let producer: FutureProducer = ClientConfig::new()
.set("bootstrap.servers", brokers)
.set("message.timeout.ms", "5000")
.create()
.expect("Producer creation error");
let res = producer
.send(
FutureRecord::to(topic_name)
.payload(payload)
.key(&format!("Key1"))
.headers(OwnedHeaders::new().add("header_key", "header_value")),
0,
)
.await;
info!("Future completed. Result: {:?}", res);
}
fn get_consumer<'a>(
context: CustomContext,
brokers: &str,
group_id: &str,
topics: &[&str],
) -> LoggingConsumer {
let consumer: LoggingConsumer = ClientConfig::new()
.set("group.id", group_id)
.set("bootstrap.servers", brokers)
.set("enable.partition.eof", "false")
.set("session.timeout.ms", "6000")
.set("enable.auto.commit", "true")
//.set("statistics.interval.ms", "30000")
//.set("auto.offset.reset", "smallest")
.set_log_level(RDKafkaLogLevel::Debug)
.create_with_context(context)
.expect("Consumer creation failed");
consumer
.subscribe(&topics.to_vec())
.expect("Can't subscribe to specified topics");
consumer
}
fn get_schema() -> Schema {
let schema = r#"{
"type": "record",
"name": "envelope",
"fields": [
{
"name": "before",
"type": [
"null",
{
"type": "record",
"name": "row",
"fields": [
{"name": "FirstName", "type": "string"},
{"name": "LastName", "type": "string"}
]
}
]
},
{
"name": "after",
"type": [
"null",
{
"type": "record",
"name": "row",
"fields": [
{"name": "FirstName", "type": "string"},
{"name": "LastName", "type": "string"}
]
}
]
}
]
}"#;
// parse_schema(schema).unwrap()
Schema::parse_str(schema).unwrap()
}
fn serialize() -> Result<Vec<u8>, Error> {
let schema = get_schema();
let mut writer = Writer::with_codec(&schema, Vec::new(), Codec::Null);
let mut record = Value::Record(vec![
(
"before".to_string(),
Value::Union(Box::new(Value::Record(vec![
("FirstName".to_string(), Value::String("Pink".to_string())),
(
"LastName".to_string(),
Value::String("Elephants".to_string()),
),
]))),
),
(
"after".to_string(),
Value::Union(Box::new(Value::Record(vec![
(
"FirstName".to_string(),
Value::String("Donnatella".to_string()),
),
("LastName".to_string(), Value::String("Moss".to_string())),
]))),
),
]);
writer.append(record)?;
writer.flush()?;
let input = writer.into_inner();
//add header info: '01111'
// required for https://github.com/MaterializeInc/materialize/blob/master/src/interchange/avro.rs#L394
let body = [b'O', b'1', b'1', b'1', b'1'].to_vec();
let output = [&body[..], &input[..]].concat();
Ok(output)
}
fn serialize2() -> Result<Vec<u8>, Error> |
fn deserialize(bytes: &Vec<u8>) -> Result<String, Error> {
let schema = get_schema();
let out = match avro_rs::Reader::with_schema(&schema, &bytes[..]) {
Ok(reader) => {
let value = reader.map(|v| format!("{:?}", v)).collect::<Vec<String>>();
// let value =
// avro_rs::from_avro_datum(&schema, &mut bytes.clone(), Some(&schema));
format!("Value: {:?}", value)
// format!("{:?}", decode(&schema, &mut s.clone()))
}
Err(e) => {
println!("Reader ERROR: {:?}", e);
"".to_string()
}
};
Ok(out)
}
#[test]
fn serialize_deserialize() {
// let schema = get_schema();
// println!("Schema: {:?}", schema);
// panic!("forced panic");
let bytes = serialize2().unwrap();
println!(
"in bytes: len {:?}, \n {:?}, \n {:?}",
bytes.len(),
bytes,
std::string::String::from_utf8_lossy(&bytes.as_slice())
);
//This is failing by design right now
let out = deserialize(&bytes);
println!("Out: {:?}", out);
panic!("forced panic");
}
#[derive(Debug)]
pub struct DiffPair {
pub before: Option<avro_rs::types::Value>,
pub after: Option<avro_rs::types::Value>,
}
async fn process_message_stream(consumer: &LoggingConsumer) {
// let mut buffer = Vec::new();
while true {
if let Some(result) = consumer
.get_base_consumer()
.poll(std::time::Duration::from_millis(500))
{
match result {
Ok(message) => {
println!("Message: {:?}", message);
// buffer.clear();
// buffer.extend_from_slice(message.payload().unwrap());
}
Err(err) => {
println!("Message error: {:?}", err);
}
}
} else {
println!("No message found");
}
}
}
fn decode(schema: &Schema, bytes: &mut &[u8]) -> Result<DiffPair, failure::Error> {
let mut before = None;
let mut after = None;
let val = avro_rs::from_avro_datum(&schema, bytes, Some(&schema))?;
match val {
Value::Record(fields) => {
for (name, val) in fields {
if name == "before" {
before = Some(val); // extract_row(val, iter::once(Datum::Int64(-1)))?;
} else if name == "after" {
after = Some(val); // extract_row(val, iter::once(Datum::Int64(1)))?;
} else {
// Intentionally ignore other fields.
}
}
}
_ => bail!("avro envelope had unexpected type: {:?}", val),
}
Ok(DiffPair { before, after })
}
| {
let schema = get_schema();
let mut writer = Writer::new(&schema, Vec::new());
let record = Value::Record(vec![
(
"before".to_string(),
Value::Union(Box::new(Value::Record(vec![
("FirstName".to_string(), Value::String("Greg".to_string())),
("LastName".to_string(), Value::String("Berns".to_string())),
]))),
),
(
"after".to_string(),
Value::Union(Box::new(Value::Record(vec![
(
"FirstName".to_string(),
Value::String("Hilbert".to_string()),
),
("LastName".to_string(), Value::String("McDugal".to_string())),
]))),
),
]);
//add header info: '01111'
// required for https://github.com/MaterializeInc/materialize/blob/master/src/interchange/avro.rs#L394
let mut body = [b'O', b'1', b'1', b'1', b'1'].to_vec();
encode(&record, &schema, &mut body);
Ok(body)
} | identifier_body |
main.rs | use crate::avro_encode::encode;
use avro_rs::schema::{RecordField, Schema, SchemaFingerprint, UnionSchema};
use avro_rs::types::Value;
use avro_rs::{from_value, types::Record, Codec, Reader, Writer};
use clap::{App, Arg};
use failure::bail;
use failure::Error;
use futures::StreamExt;
use futures_util::future::FutureExt;
use log::{info, warn};
use rdkafka::client::ClientContext;
use rdkafka::config::{ClientConfig, RDKafkaLogLevel};
use rdkafka::consumer::stream_consumer::{MessageStream, StreamConsumer};
use rdkafka::consumer::{CommitMode, Consumer, ConsumerContext, Rebalance};
use rdkafka::error::KafkaResult;
use rdkafka::message::OwnedHeaders;
use rdkafka::message::{Headers, Message};
use rdkafka::producer::{BaseProducer, BaseRecord, DeliveryResult, ProducerContext};
use rdkafka::producer::{FutureProducer, FutureRecord};
use rdkafka::topic_partition_list::TopicPartitionList;
use rdkafka::util::get_rdkafka_version;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::future::Future;
use std::thread::sleep;
use std::time::Duration;
mod avro_encode;
struct CustomContext;
impl ClientContext for CustomContext {}
impl ConsumerContext for CustomContext {
fn pre_rebalance(&self, rebalance: &Rebalance) {
info!("Pre rebalance {:?}", rebalance);
}
fn post_rebalance(&self, rebalance: &Rebalance) {
info!("Post rebalance {:?}", rebalance);
}
fn commit_callback(&self, result: KafkaResult<()>, _offsets: &TopicPartitionList) {
info!("Committing offsets: {:?}", result);
}
}
type LoggingConsumer = StreamConsumer<CustomContext>;
#[tokio::main]
async fn | () {
let matches = App::new("consumer example")
.version(option_env!("CARGO_PKG_VERSION").unwrap_or(""))
.about("Simple command line consumer")
.arg(
Arg::with_name("brokers")
.short("b")
.long("brokers")
.help("Broker list in kafka format")
.takes_value(true)
.default_value("localhost:9092"),
)
.arg(
Arg::with_name("group-id")
.short("g")
.long("group-id")
.help("Consumer group id")
.takes_value(true)
.default_value("example_consumer_group_id"),
)
.arg(
Arg::with_name("log-conf")
.long("log-conf")
.help("Configure the logging format (example: 'rdkafka=trace')")
.takes_value(true),
)
.arg(
Arg::with_name("topics")
.short("t")
.long("topics")
.help("Topic list")
.takes_value(true)
.multiple(true)
.required(true),
)
.get_matches();
// setup_logger(true, matches.value_of("log-conf"));
log4rs::init_file("log4rs.yml", Default::default())
.expect("'log4rs.yml' not found. Required for logging.");
// let (version_n, version_s) = get_rdkafka_version();
// info!("rd_kafka_version: 0x{:08x}, {}", version_n, version_s);
let brokers = matches.value_of("brokers").unwrap();
let topics = matches.values_of("topics").unwrap().collect::<Vec<&str>>();
let group_id = matches.value_of("group-id").unwrap();
info!("Brokers: ({:?})", brokers);
info!("Topics: ({:?})", topics);
info!("Group Id: ({:?})", group_id);
// This sends over the schema in the payload
// let payload = serialize().unwrap();
// publish(brokers, topics[0], &payload).await;
// This uses `encode` to send a minimal payload
let payload = serialize2().unwrap();
publish(brokers, topics[0], &payload).await;
// Code for consumer
// let context = CustomContext;
// let consumer = get_consumer(context, brokers, group_id, &topics);
// process_message_stream(&consumer).await;
}
async fn publish(brokers: &str, topic_name: &str, payload: &[u8]) {
let producer: FutureProducer = ClientConfig::new()
.set("bootstrap.servers", brokers)
.set("message.timeout.ms", "5000")
.create()
.expect("Producer creation error");
let res = producer
.send(
FutureRecord::to(topic_name)
.payload(payload)
.key(&format!("Key1"))
.headers(OwnedHeaders::new().add("header_key", "header_value")),
0,
)
.await;
info!("Future completed. Result: {:?}", res);
}
fn get_consumer<'a>(
context: CustomContext,
brokers: &str,
group_id: &str,
topics: &[&str],
) -> LoggingConsumer {
let consumer: LoggingConsumer = ClientConfig::new()
.set("group.id", group_id)
.set("bootstrap.servers", brokers)
.set("enable.partition.eof", "false")
.set("session.timeout.ms", "6000")
.set("enable.auto.commit", "true")
//.set("statistics.interval.ms", "30000")
//.set("auto.offset.reset", "smallest")
.set_log_level(RDKafkaLogLevel::Debug)
.create_with_context(context)
.expect("Consumer creation failed");
consumer
.subscribe(&topics.to_vec())
.expect("Can't subscribe to specified topics");
consumer
}
fn get_schema() -> Schema {
let schema = r#"{
"type": "record",
"name": "envelope",
"fields": [
{
"name": "before",
"type": [
"null",
{
"type": "record",
"name": "row",
"fields": [
{"name": "FirstName", "type": "string"},
{"name": "LastName", "type": "string"}
]
}
]
},
{
"name": "after",
"type": [
"null",
{
"type": "record",
"name": "row",
"fields": [
{"name": "FirstName", "type": "string"},
{"name": "LastName", "type": "string"}
]
}
]
}
]
}"#;
// parse_schema(schema).unwrap()
Schema::parse_str(schema).unwrap()
}
fn serialize() -> Result<Vec<u8>, Error> {
let schema = get_schema();
let mut writer = Writer::with_codec(&schema, Vec::new(), Codec::Null);
let mut record = Value::Record(vec![
(
"before".to_string(),
Value::Union(Box::new(Value::Record(vec![
("FirstName".to_string(), Value::String("Pink".to_string())),
(
"LastName".to_string(),
Value::String("Elephants".to_string()),
),
]))),
),
(
"after".to_string(),
Value::Union(Box::new(Value::Record(vec![
(
"FirstName".to_string(),
Value::String("Donnatella".to_string()),
),
("LastName".to_string(), Value::String("Moss".to_string())),
]))),
),
]);
writer.append(record)?;
writer.flush()?;
let input = writer.into_inner();
//add header info: '01111'
// required for https://github.com/MaterializeInc/materialize/blob/master/src/interchange/avro.rs#L394
let body = [b'O', b'1', b'1', b'1', b'1'].to_vec();
let output = [&body[..], &input[..]].concat();
Ok(output)
}
fn serialize2() -> Result<Vec<u8>, Error> {
let schema = get_schema();
let mut writer = Writer::new(&schema, Vec::new());
let record = Value::Record(vec![
(
"before".to_string(),
Value::Union(Box::new(Value::Record(vec![
("FirstName".to_string(), Value::String("Greg".to_string())),
("LastName".to_string(), Value::String("Berns".to_string())),
]))),
),
(
"after".to_string(),
Value::Union(Box::new(Value::Record(vec![
(
"FirstName".to_string(),
Value::String("Hilbert".to_string()),
),
("LastName".to_string(), Value::String("McDugal".to_string())),
]))),
),
]);
//add header info: '01111'
// required for https://github.com/MaterializeInc/materialize/blob/master/src/interchange/avro.rs#L394
let mut body = [b'O', b'1', b'1', b'1', b'1'].to_vec();
encode(&record, &schema, &mut body);
Ok(body)
}
fn deserialize(bytes: &Vec<u8>) -> Result<String, Error> {
let schema = get_schema();
let out = match avro_rs::Reader::with_schema(&schema, &bytes[..]) {
Ok(reader) => {
let value = reader.map(|v| format!("{:?}", v)).collect::<Vec<String>>();
// let value =
// avro_rs::from_avro_datum(&schema, &mut bytes.clone(), Some(&schema));
format!("Value: {:?}", value)
// format!("{:?}", decode(&schema, &mut s.clone()))
}
Err(e) => {
println!("Reader ERROR: {:?}", e);
"".to_string()
}
};
Ok(out)
}
#[test]
fn serialize_deserialize() {
// let schema = get_schema();
// println!("Schema: {:?}", schema);
// panic!("forced panic");
let bytes = serialize2().unwrap();
println!(
"in bytes: len {:?}, \n {:?}, \n {:?}",
bytes.len(),
bytes,
std::string::String::from_utf8_lossy(&bytes.as_slice())
);
//This is failing by design right now
let out = deserialize(&bytes);
println!("Out: {:?}", out);
panic!("forced panic");
}
#[derive(Debug)]
pub struct DiffPair {
pub before: Option<avro_rs::types::Value>,
pub after: Option<avro_rs::types::Value>,
}
async fn process_message_stream(consumer: &LoggingConsumer) {
// let mut buffer = Vec::new();
while true {
if let Some(result) = consumer
.get_base_consumer()
.poll(std::time::Duration::from_millis(500))
{
match result {
Ok(message) => {
println!("Message: {:?}", message);
// buffer.clear();
// buffer.extend_from_slice(message.payload().unwrap());
}
Err(err) => {
println!("Message error: {:?}", err);
}
}
} else {
println!("No message found");
}
}
}
fn decode(schema: &Schema, bytes: &mut &[u8]) -> Result<DiffPair, failure::Error> {
let mut before = None;
let mut after = None;
let val = avro_rs::from_avro_datum(&schema, bytes, Some(&schema))?;
match val {
Value::Record(fields) => {
for (name, val) in fields {
if name == "before" {
before = Some(val); // extract_row(val, iter::once(Datum::Int64(-1)))?;
} else if name == "after" {
after = Some(val); // extract_row(val, iter::once(Datum::Int64(1)))?;
} else {
// Intentionally ignore other fields.
}
}
}
_ => bail!("avro envelope had unexpected type: {:?}", val),
}
Ok(DiffPair { before, after })
}
| main | identifier_name |
main.rs | use crate::avro_encode::encode;
use avro_rs::schema::{RecordField, Schema, SchemaFingerprint, UnionSchema};
use avro_rs::types::Value;
use avro_rs::{from_value, types::Record, Codec, Reader, Writer};
use clap::{App, Arg};
use failure::bail;
use failure::Error;
use futures::StreamExt;
use futures_util::future::FutureExt;
use log::{info, warn};
use rdkafka::client::ClientContext;
use rdkafka::config::{ClientConfig, RDKafkaLogLevel};
use rdkafka::consumer::stream_consumer::{MessageStream, StreamConsumer};
use rdkafka::consumer::{CommitMode, Consumer, ConsumerContext, Rebalance};
use rdkafka::error::KafkaResult;
use rdkafka::message::OwnedHeaders;
use rdkafka::message::{Headers, Message};
use rdkafka::producer::{BaseProducer, BaseRecord, DeliveryResult, ProducerContext};
use rdkafka::producer::{FutureProducer, FutureRecord};
use rdkafka::topic_partition_list::TopicPartitionList;
use rdkafka::util::get_rdkafka_version;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::future::Future;
use std::thread::sleep;
use std::time::Duration;
mod avro_encode;
struct CustomContext;
impl ClientContext for CustomContext {}
impl ConsumerContext for CustomContext {
fn pre_rebalance(&self, rebalance: &Rebalance) {
info!("Pre rebalance {:?}", rebalance);
}
fn post_rebalance(&self, rebalance: &Rebalance) {
info!("Post rebalance {:?}", rebalance);
}
fn commit_callback(&self, result: KafkaResult<()>, _offsets: &TopicPartitionList) {
info!("Committing offsets: {:?}", result);
}
}
type LoggingConsumer = StreamConsumer<CustomContext>;
#[tokio::main]
async fn main() {
let matches = App::new("consumer example")
.version(option_env!("CARGO_PKG_VERSION").unwrap_or(""))
.about("Simple command line consumer")
.arg(
Arg::with_name("brokers")
.short("b")
.long("brokers")
.help("Broker list in kafka format")
.takes_value(true)
.default_value("localhost:9092"),
)
.arg(
Arg::with_name("group-id")
.short("g")
.long("group-id")
.help("Consumer group id")
.takes_value(true)
.default_value("example_consumer_group_id"),
)
.arg(
Arg::with_name("log-conf")
.long("log-conf")
.help("Configure the logging format (example: 'rdkafka=trace')")
.takes_value(true),
)
.arg(
Arg::with_name("topics")
.short("t")
.long("topics")
.help("Topic list")
.takes_value(true)
.multiple(true)
.required(true),
)
.get_matches();
// setup_logger(true, matches.value_of("log-conf"));
log4rs::init_file("log4rs.yml", Default::default())
.expect("'log4rs.yml' not found. Required for logging.");
// let (version_n, version_s) = get_rdkafka_version();
// info!("rd_kafka_version: 0x{:08x}, {}", version_n, version_s);
let brokers = matches.value_of("brokers").unwrap();
let topics = matches.values_of("topics").unwrap().collect::<Vec<&str>>();
let group_id = matches.value_of("group-id").unwrap();
info!("Brokers: ({:?})", brokers);
info!("Topics: ({:?})", topics);
info!("Group Id: ({:?})", group_id);
// This sends over the schema in the payload
// let payload = serialize().unwrap();
// publish(brokers, topics[0], &payload).await;
// This uses `encode` to send a minimal payload
let payload = serialize2().unwrap();
publish(brokers, topics[0], &payload).await;
// Code for consumer
// let context = CustomContext;
// let consumer = get_consumer(context, brokers, group_id, &topics);
// process_message_stream(&consumer).await;
}
async fn publish(brokers: &str, topic_name: &str, payload: &[u8]) {
let producer: FutureProducer = ClientConfig::new()
.set("bootstrap.servers", brokers)
.set("message.timeout.ms", "5000")
.create()
.expect("Producer creation error");
let res = producer
.send(
FutureRecord::to(topic_name)
.payload(payload)
.key(&format!("Key1"))
.headers(OwnedHeaders::new().add("header_key", "header_value")),
0,
)
.await;
info!("Future completed. Result: {:?}", res);
}
fn get_consumer<'a>(
context: CustomContext,
brokers: &str,
group_id: &str,
topics: &[&str],
) -> LoggingConsumer {
let consumer: LoggingConsumer = ClientConfig::new()
.set("group.id", group_id)
.set("bootstrap.servers", brokers)
.set("enable.partition.eof", "false")
.set("session.timeout.ms", "6000")
.set("enable.auto.commit", "true")
//.set("statistics.interval.ms", "30000")
//.set("auto.offset.reset", "smallest")
.set_log_level(RDKafkaLogLevel::Debug)
.create_with_context(context)
.expect("Consumer creation failed");
consumer
.subscribe(&topics.to_vec())
.expect("Can't subscribe to specified topics");
consumer
}
fn get_schema() -> Schema {
let schema = r#"{
"type": "record",
"name": "envelope",
"fields": [
{
"name": "before",
"type": [
"null",
{
"type": "record",
"name": "row",
"fields": [
{"name": "FirstName", "type": "string"},
{"name": "LastName", "type": "string"}
]
}
]
},
{
"name": "after",
"type": [
"null",
{
"type": "record",
"name": "row",
"fields": [
{"name": "FirstName", "type": "string"},
{"name": "LastName", "type": "string"}
]
}
]
}
]
}"#;
// parse_schema(schema).unwrap()
Schema::parse_str(schema).unwrap()
}
fn serialize() -> Result<Vec<u8>, Error> {
let schema = get_schema();
let mut writer = Writer::with_codec(&schema, Vec::new(), Codec::Null);
let mut record = Value::Record(vec![
(
"before".to_string(),
Value::Union(Box::new(Value::Record(vec![
("FirstName".to_string(), Value::String("Pink".to_string())),
(
"LastName".to_string(),
Value::String("Elephants".to_string()),
),
]))),
),
(
"after".to_string(),
Value::Union(Box::new(Value::Record(vec![
(
"FirstName".to_string(),
Value::String("Donnatella".to_string()),
),
("LastName".to_string(), Value::String("Moss".to_string())),
]))),
),
]);
writer.append(record)?;
writer.flush()?; | let input = writer.into_inner();
//add header info: '01111'
// required for https://github.com/MaterializeInc/materialize/blob/master/src/interchange/avro.rs#L394
let body = [b'O', b'1', b'1', b'1', b'1'].to_vec();
let output = [&body[..], &input[..]].concat();
Ok(output)
}
fn serialize2() -> Result<Vec<u8>, Error> {
let schema = get_schema();
let mut writer = Writer::new(&schema, Vec::new());
let record = Value::Record(vec![
(
"before".to_string(),
Value::Union(Box::new(Value::Record(vec![
("FirstName".to_string(), Value::String("Greg".to_string())),
("LastName".to_string(), Value::String("Berns".to_string())),
]))),
),
(
"after".to_string(),
Value::Union(Box::new(Value::Record(vec![
(
"FirstName".to_string(),
Value::String("Hilbert".to_string()),
),
("LastName".to_string(), Value::String("McDugal".to_string())),
]))),
),
]);
//add header info: '01111'
// required for https://github.com/MaterializeInc/materialize/blob/master/src/interchange/avro.rs#L394
let mut body = [b'O', b'1', b'1', b'1', b'1'].to_vec();
encode(&record, &schema, &mut body);
Ok(body)
}
fn deserialize(bytes: &Vec<u8>) -> Result<String, Error> {
let schema = get_schema();
let out = match avro_rs::Reader::with_schema(&schema, &bytes[..]) {
Ok(reader) => {
let value = reader.map(|v| format!("{:?}", v)).collect::<Vec<String>>();
// let value =
// avro_rs::from_avro_datum(&schema, &mut bytes.clone(), Some(&schema));
format!("Value: {:?}", value)
// format!("{:?}", decode(&schema, &mut s.clone()))
}
Err(e) => {
println!("Reader ERROR: {:?}", e);
"".to_string()
}
};
Ok(out)
}
#[test]
fn serialize_deserialize() {
// let schema = get_schema();
// println!("Schema: {:?}", schema);
// panic!("forced panic");
let bytes = serialize2().unwrap();
println!(
"in bytes: len {:?}, \n {:?}, \n {:?}",
bytes.len(),
bytes,
std::string::String::from_utf8_lossy(&bytes.as_slice())
);
//This is failing by design right now
let out = deserialize(&bytes);
println!("Out: {:?}", out);
panic!("forced panic");
}
#[derive(Debug)]
pub struct DiffPair {
pub before: Option<avro_rs::types::Value>,
pub after: Option<avro_rs::types::Value>,
}
async fn process_message_stream(consumer: &LoggingConsumer) {
// let mut buffer = Vec::new();
while true {
if let Some(result) = consumer
.get_base_consumer()
.poll(std::time::Duration::from_millis(500))
{
match result {
Ok(message) => {
println!("Message: {:?}", message);
// buffer.clear();
// buffer.extend_from_slice(message.payload().unwrap());
}
Err(err) => {
println!("Message error: {:?}", err);
}
}
} else {
println!("No message found");
}
}
}
fn decode(schema: &Schema, bytes: &mut &[u8]) -> Result<DiffPair, failure::Error> {
let mut before = None;
let mut after = None;
let val = avro_rs::from_avro_datum(&schema, bytes, Some(&schema))?;
match val {
Value::Record(fields) => {
for (name, val) in fields {
if name == "before" {
before = Some(val); // extract_row(val, iter::once(Datum::Int64(-1)))?;
} else if name == "after" {
after = Some(val); // extract_row(val, iter::once(Datum::Int64(1)))?;
} else {
// Intentionally ignore other fields.
}
}
}
_ => bail!("avro envelope had unexpected type: {:?}", val),
}
Ok(DiffPair { before, after })
} | random_line_split |
|
logg.go | package log
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"sort"
"strings"
"sync"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/gookit/color"
uberatomic "go.uber.org/atomic"
)
const (
_Off = iota
_Fatal
_Error
_Chek
_Warn
_Info
_Debug
_Trace
)
type (
// LevelPrinter defines a set of terminal printing primitives that output with
// extra data, time, log logLevelList, and code location
LevelPrinter struct {
// Ln prints lists of interfaces with spaces in between
Ln func(a ...interface{})
// F prints like fmt.Println surrounded by log details
F func(format string, a ...interface{})
// S prints a spew.Sdump for an interface slice
S func(a ...interface{})
// C accepts a function so that the extra computation can be avoided if it is
// not being viewed
C func(closure func() string)
// Chk is a shortcut for printing if there is an error, or returning true
Chk func(e error) bool
}
logLevelList struct {
Off, Fatal, Error, Check, Warn, Info, Debug, Trace int32
}
LevelSpec struct {
ID int32
Name string
Colorizer func(format string, a ...interface{}) string
}
// Entry is a log entry to be printed as json to the log file
Entry struct {
Time time.Time
Level string
Package string
CodeLocation string
Text string
}
)
var (
logger_started = time.Now()
App = " pod"
AppColorizer = color.White.Sprint
// sep is just a convenient shortcut for this very longwinded expression
sep = string(os.PathSeparator)
currentLevel = uberatomic.NewInt32(logLevels.Info)
// writer can be swapped out for any io.*writer* that you want to use instead of
// stdout.
writer io.Writer = os.Stderr
// allSubsystems stores all of the package subsystem names found in the current
// application
allSubsystems []string
// highlighted is a text that helps visually distinguish a log entry by category
highlighted = make(map[string]struct{})
// logFilter specifies a set of packages that will not pr logs
logFilter = make(map[string]struct{})
// mutexes to prevent concurrent map accesses
highlightMx, _logFilterMx sync.Mutex
// logLevels is a shorthand access that minimises possible Name collisions in the
// dot import
logLevels = logLevelList{
Off: _Off,
Fatal: _Fatal,
Error: _Error,
Check: _Chek,
Warn: _Warn,
Info: _Info,
Debug: _Debug,
Trace: _Trace,
}
// LevelSpecs specifies the id, string name and color-printing function
LevelSpecs = []LevelSpec{
{logLevels.Off, "off ", color.Bit24(0, 0, 0, false).Sprintf},
{logLevels.Fatal, "fatal", color.Bit24(128, 0, 0, false).Sprintf},
{logLevels.Error, "error", color.Bit24(255, 0, 0, false).Sprintf},
{logLevels.Check, "check", color.Bit24(255, 255, 0, false).Sprintf},
{logLevels.Warn, "warn ", color.Bit24(0, 255, 0, false).Sprintf},
{logLevels.Info, "info ", color.Bit24(255, 255, 0, false).Sprintf},
{logLevels.Debug, "debug", color.Bit24(0, 128, 255, false).Sprintf},
{logLevels.Trace, "trace", color.Bit24(128, 0, 255, false).Sprintf},
}
Levels = []string{
Off,
Fatal,
Error,
Check,
Warn,
Info,
Debug,
Trace,
}
LogChanDisabled = uberatomic.NewBool(true)
LogChan chan Entry
)
const (
Off = "off"
Fatal = "fatal"
Error = "error"
Warn = "warn"
Info = "info"
Check = "check"
Debug = "debug"
Trace = "trace"
)
// AddLogChan adds a channel that log entries are sent to
func AddLogChan() (ch chan Entry) {
LogChanDisabled.Store(false)
if LogChan != nil {
panic("warning warning")
}
// L.Writer.Write.Store( false
LogChan = make(chan Entry)
return LogChan
}
// GetLogPrinterSet returns a set of LevelPrinter with their subsystem preloaded
func GetLogPrinterSet(subsystem string) (Fatal, Error, Warn, Info, Debug, Trace LevelPrinter) {
return _getOnePrinter(_Fatal, subsystem),
_getOnePrinter(_Error, subsystem),
_getOnePrinter(_Warn, subsystem),
_getOnePrinter(_Info, subsystem),
_getOnePrinter(_Debug, subsystem),
_getOnePrinter(_Trace, subsystem)
}
func _getOnePrinter(level int32, subsystem string) LevelPrinter {
return LevelPrinter{
Ln: _ln(level, subsystem),
F: _f(level, subsystem),
S: _s(level, subsystem),
C: _c(level, subsystem),
Chk: _chk(level, subsystem),
}
}
// SetLogLevel sets the log level via a string, which can be truncated down to
// one character, similar to nmcli's argument processor, as the first letter is
// unique. This could be used with a linter to make larger command sets.
func SetLogLevel(l string) {
if l == "" {
l = "info"
}
// fmt.Fprintln(os.Stderr, "setting log level", l)
lvl := logLevels.Info
for i := range LevelSpecs {
if LevelSpecs[i].Name[:1] == l[:1] {
lvl = LevelSpecs[i].ID
}
}
currentLevel.Store(lvl)
}
// SetLogWriter atomically changes the log io.Writer interface
func SetLogWriter(wr io.Writer) {
// w := unsafe.Pointer(writer)
// c := unsafe.Pointer(wr)
// atomic.SwapPointer(&w, c)
writer = wr
}
func SetLogWriteToFile(path, appName string) (e error) {
// copy existing log file to dated log file as we will truncate it per
// session
path = filepath.Join(path, "log"+appName)
if _, e = os.Stat(path); e == nil {
var b []byte
b, e = ioutil.ReadFile(path)
if e == nil {
ioutil.WriteFile(path+fmt.Sprint(time.Now().Unix()), b, 0600)
}
}
var fileWriter *os.File
if fileWriter, e = os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC,
0600); e != nil {
fmt.Fprintln(os.Stderr, "unable to write log to", path, "error:", e)
return
}
mw := io.MultiWriter(os.Stderr, fileWriter)
fileWriter.Write([]byte("logging to file '" + path + "'\n"))
mw.Write([]byte("logging to file '" + path + "'\n"))
SetLogWriter(mw)
return
}
// SortSubsystemsList sorts the list of subsystems, to keep the data read-only,
// call this function right at the top of the main, which runs after
// declarations and main/init. Really this is just here to alert the reader.
func SortSubsystemsList() {
sort.Strings(allSubsystems)
// fmt.Fprintln(
// os.Stderr,
// spew.Sdump(allSubsystems),
// spew.Sdump(highlighted),
// spew.Sdump(logFilter),
// )
}
// AddLoggerSubsystem adds a subsystem to the list of known subsystems and returns the
// string so it is nice and neat in the package logg.go file
func AddLoggerSubsystem(pathBase string) (subsystem string) {
// var split []string
var ok bool
var file string
_, file, _, ok = runtime.Caller(1)
if ok {
r := strings.Split(file, pathBase)
// fmt.Fprintln(os.Stderr, version.PathBase, r)
fromRoot := filepath.Base(file)
if len(r) > 1 {
fromRoot = r[1]
}
split := strings.Split(fromRoot, "/")
// fmt.Fprintln(os.Stderr, version.PathBase, "file", file, r, fromRoot, split)
subsystem = strings.Join(split[:len(split)-1], "/")
// fmt.Fprintln(os.Stderr, "adding subsystem", subsystem)
allSubsystems = append(allSubsystems, subsystem)
}
return
}
// StoreHighlightedSubsystems sets the list of subsystems to highlight
func StoreHighlightedSubsystems(highlights []string) (found bool) {
highlightMx.Lock()
highlighted = make(map[string]struct{}, len(highlights))
for i := range highlights {
highlighted[highlights[i]] = struct{}{}
}
highlightMx.Unlock()
return
}
// LoadHighlightedSubsystems returns a copy of the map of highlighted subsystems
func LoadHighlightedSubsystems() (o []string) {
highlightMx.Lock()
o = make([]string, len(logFilter))
var counter int
for i := range logFilter {
o[counter] = i
counter++
}
highlightMx.Unlock()
sort.Strings(o)
return
}
// StoreSubsystemFilter sets the list of subsystems to filter
func StoreSubsystemFilter(filter []string) {
_logFilterMx.Lock()
logFilter = make(map[string]struct{}, len(filter))
for i := range filter {
logFilter[filter[i]] = struct{}{}
}
_logFilterMx.Unlock()
}
// LoadSubsystemFilter returns a copy of the map of filtered subsystems
func LoadSubsystemFilter() (o []string) {
_logFilterMx.Lock()
o = make([]string, len(logFilter))
var counter int
for i := range logFilter {
o[counter] = i
counter++
}
_logFilterMx.Unlock()
sort.Strings(o)
return
}
// _isHighlighted returns true if the subsystem is in the list to have attention
// getters added to them
func _isHighlighted(subsystem string) (found bool) {
highlightMx.Lock()
_, found = highlighted[subsystem]
highlightMx.Unlock()
return
}
// AddHighlightedSubsystem adds a new subsystem Name to the highlighted list
func AddHighlightedSubsystem(hl string) struct{} {
highlightMx.Lock()
highlighted[hl] = struct{}{}
highlightMx.Unlock()
return struct{}{}
}
// _isSubsystemFiltered returns true if the subsystem should not pr logs
func _isSubsystemFiltered(subsystem string) (found bool) {
_logFilterMx.Lock()
_, found = logFilter[subsystem]
_logFilterMx.Unlock()
return
}
// AddFilteredSubsystem adds a new subsystem Name to the highlighted list
func AddFilteredSubsystem(hl string) struct{} {
_logFilterMx.Lock()
logFilter[hl] = struct{}{}
_logFilterMx.Unlock()
return struct{}{}
}
func getTimeText(level int32) string {
// since := time.Now().Sub(logger_started).Round(time.Millisecond).String()
// diff := 12 - len(since)
// if diff > 0 {
// since = strings.Repeat(" ", diff) + since + " "
// }
return color.Bit24(99, 99, 99, false).Sprint(time.Now().
Format(time.StampMilli))
}
func _ln(level int32, subsystem string) func(a ...interface{}) {
return func(a ...interface{}) {
if level <= currentLevel.Load() && !_isSubsystemFiltered(subsystem) {
printer := fmt.Sprintf
if _isHighlighted(subsystem) {
printer = color.Bold.Sprintf
}
fmt.Fprintf(
writer,
printer(
"%-58v%s%s%-6v %s\n",
getLoc(2, level, subsystem),
getTimeText(level),
color.Bit24(20, 20, 20, true).
Sprint(AppColorizer(" "+App)),
LevelSpecs[level].Colorizer(
color.Bit24(20, 20, 20, true).
Sprint(" "+LevelSpecs[level].Name+" "),
),
AppColorizer(joinStrings(" ", a...)),
),
)
}
}
}
func _f(level int32, subsystem string) func(format string, a ...interface{}) {
return func(format string, a ...interface{}) {
if level <= currentLevel.Load() && !_isSubsystemFiltered(subsystem) {
printer := fmt.Sprintf
if _isHighlighted(subsystem) {
printer = color.Bold.Sprintf
}
fmt.Fprintf(
writer,
printer(
"%-58v%s%s%-6v %s\n",
getLoc(2, level, subsystem),
getTimeText(level),
color.Bit24(20, 20, 20, true).
Sprint(AppColorizer(" "+App)),
LevelSpecs[level].Colorizer(
color.Bit24(20, 20, 20, true).
Sprint(" "+LevelSpecs[level].Name+" "),
),
AppColorizer(fmt.Sprintf(format, a...)),
),
)
}
}
}
func _s(level int32, subsystem string) func(a ...interface{}) {
return func(a ...interface{}) {
if level <= currentLevel.Load() && !_isSubsystemFiltered(subsystem) {
printer := fmt.Sprintf
if _isHighlighted(subsystem) {
printer = color.Bold.Sprintf
}
fmt.Fprintf(
writer,
printer(
"%-58v%s%s%s%s%s\n",
getLoc(2, level, subsystem),
getTimeText(level),
color.Bit24(20, 20, 20, true).
Sprint(AppColorizer(" "+App)),
LevelSpecs[level].Colorizer(
color.Bit24(20, 20, 20, true).
Sprint(" "+LevelSpecs[level].Name+" "),
),
AppColorizer(
" spew:",
),
fmt.Sprint(
color.Bit24(20, 20, 20, true).Sprint("\n\n"+spew.Sdump(a)),
"\n",
),
),
)
}
}
}
func _c(level int32, subsystem string) func(closure func() string) {
return func(closure func() string) {
if level <= currentLevel.Load() && !_isSubsystemFiltered(subsystem) {
printer := fmt.Sprintf
if _isHighlighted(subsystem) {
printer = color.Bold.Sprintf
}
fmt.Fprintf(
writer,
printer(
"%-58v%s%s%-6v %s\n",
getLoc(2, level, subsystem),
getTimeText(level),
color.Bit24(20, 20, 20, true).
Sprint(AppColorizer(" "+App)),
LevelSpecs[level].Colorizer(
color.Bit24(20, 20, 20, true).
Sprint(" "+LevelSpecs[level].Name+" "),
),
AppColorizer(closure()),
),
)
}
}
}
func _chk(level int32, subsystem string) func(e error) bool {
return func(e error) bool {
if level <= currentLevel.Load() && !_isSubsystemFiltered(subsystem) {
if e != nil {
printer := fmt.Sprintf
if _isHighlighted(subsystem) {
printer = color.Bold.Sprintf
}
fmt.Fprintf(
writer,
printer(
"%-58v%s%s%-6v %s\n",
getLoc(2, level, subsystem),
getTimeText(level),
color.Bit24(20, 20, 20, true).
Sprint(AppColorizer(" "+App)),
LevelSpecs[level].Colorizer(
color.Bit24(20, 20, 20, true).
Sprint(" "+LevelSpecs[level].Name+" "),
),
LevelSpecs[level].Colorizer(joinStrings(" ", e.Error())),
),
)
return true
}
}
return false
}
}
// joinStrings constructs a string from an slice of interface same as Println but
// without the terminal newline
func joinStrings(sep string, a ...interface{}) (o string) {
for i := range a {
o += fmt.Sprint(a[i])
if i < len(a)-1 {
o += sep
}
}
return
}
// getLoc calls runtime.Caller and formats as expected by source code editors
// for terminal hyperlinks
//
// Regular expressions and the substitution texts to make these clickable in
// Tilix and other RE hyperlink configurable terminal emulators:
//
// This matches the shortened paths generated in this command and printed at
// the very beginning of the line as this logger prints:
//
// ^((([\/a-zA-Z@0-9-_.]+/)+([a-zA-Z@0-9-_.]+)):([0-9]+))
//
// goland --line $5 $GOPATH/src/github.com/p9c/matrjoska/$2
//
// I have used a shell variable there but tilix doesn't expand them,
// so put your GOPATH in manually, and obviously change the repo subpath.
//
//
// Change the path to use with another repository's logging output (
// someone with more time on their hands could probably come up with
// something, but frankly the custom links feature of Tilix has the absolute
// worst UX I have encountered since the 90s...
// Maybe in the future this library will be expanded with a tool that more
// intelligently sets the path, ie from CWD or other cleverness.
//
// This matches full paths anywhere on the commandline delimited by spaces:
//
// ([/](([\/a-zA-Z@0-9-_.]+/)+([a-zA-Z@0-9-_.]+)):([0-9]+))
//
// goland --line $5 /$2
//
// Adapt the invocation to open your preferred editor if it has the capability,
// the above is for Jetbrains Goland
//
func getLoc(skip int, level int32, subsystem string) (output string) {
_, file, line, _ := runtime.Caller(skip)
defer func() {
if r := recover(); r != nil {
fmt.Fprintln(os.Stderr, "getloc panic on subsystem", subsystem, file)
}
}()
split := strings.Split(file, subsystem)
if len(split) < 2 {
output = fmt.Sprint(
color.White.Sprint(subsystem),
color.Gray.Sprint(
file, ":", line,
),
)
} else {
output = fmt.Sprint(
color.White.Sprint(subsystem),
color.Gray.Sprint(
split[1], ":", line,
),
)
}
return
}
// DirectionString is a helper function that returns a string that represents the direction of a connection (inbound or outbound).
func DirectionString(inbound bool) string {
if inbound {
return "inbound"
}
return "outbound"
}
func PickNoun(n int, singular, plural string) string {
if n == 1 {
return singular
}
return plural
}
func FileExists(filePath string) bool {
_, e := os.Stat(filePath) |
func Caller(comment string, skip int) string {
_, file, line, _ := runtime.Caller(skip + 1)
o := fmt.Sprintf("%s: %s:%d", comment, file, line)
// L.Debug(o)
return o
} | return e == nil
} | random_line_split |
logg.go | package log
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"sort"
"strings"
"sync"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/gookit/color"
uberatomic "go.uber.org/atomic"
)
const (
_Off = iota
_Fatal
_Error
_Chek
_Warn
_Info
_Debug
_Trace
)
type (
// LevelPrinter defines a set of terminal printing primitives that output with
// extra data, time, log logLevelList, and code location
LevelPrinter struct {
// Ln prints lists of interfaces with spaces in between
Ln func(a ...interface{})
// F prints like fmt.Println surrounded by log details
F func(format string, a ...interface{})
// S prints a spew.Sdump for an interface slice
S func(a ...interface{})
// C accepts a function so that the extra computation can be avoided if it is
// not being viewed
C func(closure func() string)
// Chk is a shortcut for printing if there is an error, or returning true
Chk func(e error) bool
}
logLevelList struct {
Off, Fatal, Error, Check, Warn, Info, Debug, Trace int32
}
LevelSpec struct {
ID int32
Name string
Colorizer func(format string, a ...interface{}) string
}
// Entry is a log entry to be printed as json to the log file
Entry struct {
Time time.Time
Level string
Package string
CodeLocation string
Text string
}
)
var (
logger_started = time.Now()
App = " pod"
AppColorizer = color.White.Sprint
// sep is just a convenient shortcut for this very longwinded expression
sep = string(os.PathSeparator)
currentLevel = uberatomic.NewInt32(logLevels.Info)
// writer can be swapped out for any io.*writer* that you want to use instead of
// stdout.
writer io.Writer = os.Stderr
// allSubsystems stores all of the package subsystem names found in the current
// application
allSubsystems []string
// highlighted is a text that helps visually distinguish a log entry by category
highlighted = make(map[string]struct{})
// logFilter specifies a set of packages that will not pr logs
logFilter = make(map[string]struct{})
// mutexes to prevent concurrent map accesses
highlightMx, _logFilterMx sync.Mutex
// logLevels is a shorthand access that minimises possible Name collisions in the
// dot import
logLevels = logLevelList{
Off: _Off,
Fatal: _Fatal,
Error: _Error,
Check: _Chek,
Warn: _Warn,
Info: _Info,
Debug: _Debug,
Trace: _Trace,
}
// LevelSpecs specifies the id, string name and color-printing function
LevelSpecs = []LevelSpec{
{logLevels.Off, "off ", color.Bit24(0, 0, 0, false).Sprintf},
{logLevels.Fatal, "fatal", color.Bit24(128, 0, 0, false).Sprintf},
{logLevels.Error, "error", color.Bit24(255, 0, 0, false).Sprintf},
{logLevels.Check, "check", color.Bit24(255, 255, 0, false).Sprintf},
{logLevels.Warn, "warn ", color.Bit24(0, 255, 0, false).Sprintf},
{logLevels.Info, "info ", color.Bit24(255, 255, 0, false).Sprintf},
{logLevels.Debug, "debug", color.Bit24(0, 128, 255, false).Sprintf},
{logLevels.Trace, "trace", color.Bit24(128, 0, 255, false).Sprintf},
}
Levels = []string{
Off,
Fatal,
Error,
Check,
Warn,
Info,
Debug,
Trace,
}
LogChanDisabled = uberatomic.NewBool(true)
LogChan chan Entry
)
const (
Off = "off"
Fatal = "fatal"
Error = "error"
Warn = "warn"
Info = "info"
Check = "check"
Debug = "debug"
Trace = "trace"
)
// AddLogChan adds a channel that log entries are sent to
func AddLogChan() (ch chan Entry) {
LogChanDisabled.Store(false)
if LogChan != nil {
panic("warning warning")
}
// L.Writer.Write.Store( false
LogChan = make(chan Entry)
return LogChan
}
// GetLogPrinterSet returns a set of LevelPrinter with their subsystem preloaded
func GetLogPrinterSet(subsystem string) (Fatal, Error, Warn, Info, Debug, Trace LevelPrinter) {
return _getOnePrinter(_Fatal, subsystem),
_getOnePrinter(_Error, subsystem),
_getOnePrinter(_Warn, subsystem),
_getOnePrinter(_Info, subsystem),
_getOnePrinter(_Debug, subsystem),
_getOnePrinter(_Trace, subsystem)
}
func _getOnePrinter(level int32, subsystem string) LevelPrinter {
return LevelPrinter{
Ln: _ln(level, subsystem),
F: _f(level, subsystem),
S: _s(level, subsystem),
C: _c(level, subsystem),
Chk: _chk(level, subsystem),
}
}
// SetLogLevel sets the log level via a string, which can be truncated down to
// one character, similar to nmcli's argument processor, as the first letter is
// unique. This could be used with a linter to make larger command sets.
func SetLogLevel(l string) {
if l == "" {
l = "info"
}
// fmt.Fprintln(os.Stderr, "setting log level", l)
lvl := logLevels.Info
for i := range LevelSpecs {
if LevelSpecs[i].Name[:1] == l[:1] {
lvl = LevelSpecs[i].ID
}
}
currentLevel.Store(lvl)
}
// SetLogWriter atomically changes the log io.Writer interface
func SetLogWriter(wr io.Writer) {
// w := unsafe.Pointer(writer)
// c := unsafe.Pointer(wr)
// atomic.SwapPointer(&w, c)
writer = wr
}
func SetLogWriteToFile(path, appName string) (e error) {
// copy existing log file to dated log file as we will truncate it per
// session
path = filepath.Join(path, "log"+appName)
if _, e = os.Stat(path); e == nil {
var b []byte
b, e = ioutil.ReadFile(path)
if e == nil {
ioutil.WriteFile(path+fmt.Sprint(time.Now().Unix()), b, 0600)
}
}
var fileWriter *os.File
if fileWriter, e = os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC,
0600); e != nil {
fmt.Fprintln(os.Stderr, "unable to write log to", path, "error:", e)
return
}
mw := io.MultiWriter(os.Stderr, fileWriter)
fileWriter.Write([]byte("logging to file '" + path + "'\n"))
mw.Write([]byte("logging to file '" + path + "'\n"))
SetLogWriter(mw)
return
}
// SortSubsystemsList sorts the list of subsystems, to keep the data read-only,
// call this function right at the top of the main, which runs after
// declarations and main/init. Really this is just here to alert the reader.
func SortSubsystemsList() {
sort.Strings(allSubsystems)
// fmt.Fprintln(
// os.Stderr,
// spew.Sdump(allSubsystems),
// spew.Sdump(highlighted),
// spew.Sdump(logFilter),
// )
}
// AddLoggerSubsystem adds a subsystem to the list of known subsystems and returns the
// string so it is nice and neat in the package logg.go file
func AddLoggerSubsystem(pathBase string) (subsystem string) {
// var split []string
var ok bool
var file string
_, file, _, ok = runtime.Caller(1)
if ok {
r := strings.Split(file, pathBase)
// fmt.Fprintln(os.Stderr, version.PathBase, r)
fromRoot := filepath.Base(file)
if len(r) > 1 {
fromRoot = r[1]
}
split := strings.Split(fromRoot, "/")
// fmt.Fprintln(os.Stderr, version.PathBase, "file", file, r, fromRoot, split)
subsystem = strings.Join(split[:len(split)-1], "/")
// fmt.Fprintln(os.Stderr, "adding subsystem", subsystem)
allSubsystems = append(allSubsystems, subsystem)
}
return
}
// StoreHighlightedSubsystems sets the list of subsystems to highlight
func StoreHighlightedSubsystems(highlights []string) (found bool) {
highlightMx.Lock()
highlighted = make(map[string]struct{}, len(highlights))
for i := range highlights {
highlighted[highlights[i]] = struct{}{}
}
highlightMx.Unlock()
return
}
// LoadHighlightedSubsystems returns a copy of the map of highlighted subsystems
func LoadHighlightedSubsystems() (o []string) {
highlightMx.Lock()
o = make([]string, len(logFilter))
var counter int
for i := range logFilter {
o[counter] = i
counter++
}
highlightMx.Unlock()
sort.Strings(o)
return
}
// StoreSubsystemFilter sets the list of subsystems to filter
func StoreSubsystemFilter(filter []string) {
_logFilterMx.Lock()
logFilter = make(map[string]struct{}, len(filter))
for i := range filter {
logFilter[filter[i]] = struct{}{}
}
_logFilterMx.Unlock()
}
// LoadSubsystemFilter returns a copy of the map of filtered subsystems
func LoadSubsystemFilter() (o []string) {
_logFilterMx.Lock()
o = make([]string, len(logFilter))
var counter int
for i := range logFilter {
o[counter] = i
counter++
}
_logFilterMx.Unlock()
sort.Strings(o)
return
}
// _isHighlighted returns true if the subsystem is in the list to have attention
// getters added to them
func _isHighlighted(subsystem string) (found bool) {
highlightMx.Lock()
_, found = highlighted[subsystem]
highlightMx.Unlock()
return
}
// AddHighlightedSubsystem adds a new subsystem Name to the highlighted list
func AddHighlightedSubsystem(hl string) struct{} {
highlightMx.Lock()
highlighted[hl] = struct{}{}
highlightMx.Unlock()
return struct{}{}
}
// _isSubsystemFiltered returns true if the subsystem should not pr logs
func _isSubsystemFiltered(subsystem string) (found bool) {
_logFilterMx.Lock()
_, found = logFilter[subsystem]
_logFilterMx.Unlock()
return
}
// AddFilteredSubsystem adds a new subsystem Name to the highlighted list
func | (hl string) struct{} {
_logFilterMx.Lock()
logFilter[hl] = struct{}{}
_logFilterMx.Unlock()
return struct{}{}
}
func getTimeText(level int32) string {
// since := time.Now().Sub(logger_started).Round(time.Millisecond).String()
// diff := 12 - len(since)
// if diff > 0 {
// since = strings.Repeat(" ", diff) + since + " "
// }
return color.Bit24(99, 99, 99, false).Sprint(time.Now().
Format(time.StampMilli))
}
func _ln(level int32, subsystem string) func(a ...interface{}) {
return func(a ...interface{}) {
if level <= currentLevel.Load() && !_isSubsystemFiltered(subsystem) {
printer := fmt.Sprintf
if _isHighlighted(subsystem) {
printer = color.Bold.Sprintf
}
fmt.Fprintf(
writer,
printer(
"%-58v%s%s%-6v %s\n",
getLoc(2, level, subsystem),
getTimeText(level),
color.Bit24(20, 20, 20, true).
Sprint(AppColorizer(" "+App)),
LevelSpecs[level].Colorizer(
color.Bit24(20, 20, 20, true).
Sprint(" "+LevelSpecs[level].Name+" "),
),
AppColorizer(joinStrings(" ", a...)),
),
)
}
}
}
func _f(level int32, subsystem string) func(format string, a ...interface{}) {
return func(format string, a ...interface{}) {
if level <= currentLevel.Load() && !_isSubsystemFiltered(subsystem) {
printer := fmt.Sprintf
if _isHighlighted(subsystem) {
printer = color.Bold.Sprintf
}
fmt.Fprintf(
writer,
printer(
"%-58v%s%s%-6v %s\n",
getLoc(2, level, subsystem),
getTimeText(level),
color.Bit24(20, 20, 20, true).
Sprint(AppColorizer(" "+App)),
LevelSpecs[level].Colorizer(
color.Bit24(20, 20, 20, true).
Sprint(" "+LevelSpecs[level].Name+" "),
),
AppColorizer(fmt.Sprintf(format, a...)),
),
)
}
}
}
func _s(level int32, subsystem string) func(a ...interface{}) {
return func(a ...interface{}) {
if level <= currentLevel.Load() && !_isSubsystemFiltered(subsystem) {
printer := fmt.Sprintf
if _isHighlighted(subsystem) {
printer = color.Bold.Sprintf
}
fmt.Fprintf(
writer,
printer(
"%-58v%s%s%s%s%s\n",
getLoc(2, level, subsystem),
getTimeText(level),
color.Bit24(20, 20, 20, true).
Sprint(AppColorizer(" "+App)),
LevelSpecs[level].Colorizer(
color.Bit24(20, 20, 20, true).
Sprint(" "+LevelSpecs[level].Name+" "),
),
AppColorizer(
" spew:",
),
fmt.Sprint(
color.Bit24(20, 20, 20, true).Sprint("\n\n"+spew.Sdump(a)),
"\n",
),
),
)
}
}
}
func _c(level int32, subsystem string) func(closure func() string) {
return func(closure func() string) {
if level <= currentLevel.Load() && !_isSubsystemFiltered(subsystem) {
printer := fmt.Sprintf
if _isHighlighted(subsystem) {
printer = color.Bold.Sprintf
}
fmt.Fprintf(
writer,
printer(
"%-58v%s%s%-6v %s\n",
getLoc(2, level, subsystem),
getTimeText(level),
color.Bit24(20, 20, 20, true).
Sprint(AppColorizer(" "+App)),
LevelSpecs[level].Colorizer(
color.Bit24(20, 20, 20, true).
Sprint(" "+LevelSpecs[level].Name+" "),
),
AppColorizer(closure()),
),
)
}
}
}
func _chk(level int32, subsystem string) func(e error) bool {
return func(e error) bool {
if level <= currentLevel.Load() && !_isSubsystemFiltered(subsystem) {
if e != nil {
printer := fmt.Sprintf
if _isHighlighted(subsystem) {
printer = color.Bold.Sprintf
}
fmt.Fprintf(
writer,
printer(
"%-58v%s%s%-6v %s\n",
getLoc(2, level, subsystem),
getTimeText(level),
color.Bit24(20, 20, 20, true).
Sprint(AppColorizer(" "+App)),
LevelSpecs[level].Colorizer(
color.Bit24(20, 20, 20, true).
Sprint(" "+LevelSpecs[level].Name+" "),
),
LevelSpecs[level].Colorizer(joinStrings(" ", e.Error())),
),
)
return true
}
}
return false
}
}
// joinStrings constructs a string from an slice of interface same as Println but
// without the terminal newline
func joinStrings(sep string, a ...interface{}) (o string) {
for i := range a {
o += fmt.Sprint(a[i])
if i < len(a)-1 {
o += sep
}
}
return
}
// getLoc calls runtime.Caller and formats as expected by source code editors
// for terminal hyperlinks
//
// Regular expressions and the substitution texts to make these clickable in
// Tilix and other RE hyperlink configurable terminal emulators:
//
// This matches the shortened paths generated in this command and printed at
// the very beginning of the line as this logger prints:
//
// ^((([\/a-zA-Z@0-9-_.]+/)+([a-zA-Z@0-9-_.]+)):([0-9]+))
//
// goland --line $5 $GOPATH/src/github.com/p9c/matrjoska/$2
//
// I have used a shell variable there but tilix doesn't expand them,
// so put your GOPATH in manually, and obviously change the repo subpath.
//
//
// Change the path to use with another repository's logging output (
// someone with more time on their hands could probably come up with
// something, but frankly the custom links feature of Tilix has the absolute
// worst UX I have encountered since the 90s...
// Maybe in the future this library will be expanded with a tool that more
// intelligently sets the path, ie from CWD or other cleverness.
//
// This matches full paths anywhere on the commandline delimited by spaces:
//
// ([/](([\/a-zA-Z@0-9-_.]+/)+([a-zA-Z@0-9-_.]+)):([0-9]+))
//
// goland --line $5 /$2
//
// Adapt the invocation to open your preferred editor if it has the capability,
// the above is for Jetbrains Goland
//
func getLoc(skip int, level int32, subsystem string) (output string) {
_, file, line, _ := runtime.Caller(skip)
defer func() {
if r := recover(); r != nil {
fmt.Fprintln(os.Stderr, "getloc panic on subsystem", subsystem, file)
}
}()
split := strings.Split(file, subsystem)
if len(split) < 2 {
output = fmt.Sprint(
color.White.Sprint(subsystem),
color.Gray.Sprint(
file, ":", line,
),
)
} else {
output = fmt.Sprint(
color.White.Sprint(subsystem),
color.Gray.Sprint(
split[1], ":", line,
),
)
}
return
}
// DirectionString is a helper function that returns a string that represents the direction of a connection (inbound or outbound).
func DirectionString(inbound bool) string {
if inbound {
return "inbound"
}
return "outbound"
}
func PickNoun(n int, singular, plural string) string {
if n == 1 {
return singular
}
return plural
}
func FileExists(filePath string) bool {
_, e := os.Stat(filePath)
return e == nil
}
func Caller(comment string, skip int) string {
_, file, line, _ := runtime.Caller(skip + 1)
o := fmt.Sprintf("%s: %s:%d", comment, file, line)
// L.Debug(o)
return o
}
| AddFilteredSubsystem | identifier_name |
logg.go | package log
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"sort"
"strings"
"sync"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/gookit/color"
uberatomic "go.uber.org/atomic"
)
const (
_Off = iota
_Fatal
_Error
_Chek
_Warn
_Info
_Debug
_Trace
)
type (
// LevelPrinter defines a set of terminal printing primitives that output with
// extra data, time, log logLevelList, and code location
LevelPrinter struct {
// Ln prints lists of interfaces with spaces in between
Ln func(a ...interface{})
// F prints like fmt.Println surrounded by log details
F func(format string, a ...interface{})
// S prints a spew.Sdump for an interface slice
S func(a ...interface{})
// C accepts a function so that the extra computation can be avoided if it is
// not being viewed
C func(closure func() string)
// Chk is a shortcut for printing if there is an error, or returning true
Chk func(e error) bool
}
logLevelList struct {
Off, Fatal, Error, Check, Warn, Info, Debug, Trace int32
}
LevelSpec struct {
ID int32
Name string
Colorizer func(format string, a ...interface{}) string
}
// Entry is a log entry to be printed as json to the log file
Entry struct {
Time time.Time
Level string
Package string
CodeLocation string
Text string
}
)
var (
logger_started = time.Now()
App = " pod"
AppColorizer = color.White.Sprint
// sep is just a convenient shortcut for this very longwinded expression
sep = string(os.PathSeparator)
currentLevel = uberatomic.NewInt32(logLevels.Info)
// writer can be swapped out for any io.*writer* that you want to use instead of
// stdout.
writer io.Writer = os.Stderr
// allSubsystems stores all of the package subsystem names found in the current
// application
allSubsystems []string
// highlighted is a text that helps visually distinguish a log entry by category
highlighted = make(map[string]struct{})
// logFilter specifies a set of packages that will not pr logs
logFilter = make(map[string]struct{})
// mutexes to prevent concurrent map accesses
highlightMx, _logFilterMx sync.Mutex
// logLevels is a shorthand access that minimises possible Name collisions in the
// dot import
logLevels = logLevelList{
Off: _Off,
Fatal: _Fatal,
Error: _Error,
Check: _Chek,
Warn: _Warn,
Info: _Info,
Debug: _Debug,
Trace: _Trace,
}
// LevelSpecs specifies the id, string name and color-printing function
LevelSpecs = []LevelSpec{
{logLevels.Off, "off ", color.Bit24(0, 0, 0, false).Sprintf},
{logLevels.Fatal, "fatal", color.Bit24(128, 0, 0, false).Sprintf},
{logLevels.Error, "error", color.Bit24(255, 0, 0, false).Sprintf},
{logLevels.Check, "check", color.Bit24(255, 255, 0, false).Sprintf},
{logLevels.Warn, "warn ", color.Bit24(0, 255, 0, false).Sprintf},
{logLevels.Info, "info ", color.Bit24(255, 255, 0, false).Sprintf},
{logLevels.Debug, "debug", color.Bit24(0, 128, 255, false).Sprintf},
{logLevels.Trace, "trace", color.Bit24(128, 0, 255, false).Sprintf},
}
Levels = []string{
Off,
Fatal,
Error,
Check,
Warn,
Info,
Debug,
Trace,
}
LogChanDisabled = uberatomic.NewBool(true)
LogChan chan Entry
)
const (
Off = "off"
Fatal = "fatal"
Error = "error"
Warn = "warn"
Info = "info"
Check = "check"
Debug = "debug"
Trace = "trace"
)
// AddLogChan adds a channel that log entries are sent to
func AddLogChan() (ch chan Entry) {
LogChanDisabled.Store(false)
if LogChan != nil {
panic("warning warning")
}
// L.Writer.Write.Store( false
LogChan = make(chan Entry)
return LogChan
}
// GetLogPrinterSet returns a set of LevelPrinter with their subsystem preloaded
func GetLogPrinterSet(subsystem string) (Fatal, Error, Warn, Info, Debug, Trace LevelPrinter) {
return _getOnePrinter(_Fatal, subsystem),
_getOnePrinter(_Error, subsystem),
_getOnePrinter(_Warn, subsystem),
_getOnePrinter(_Info, subsystem),
_getOnePrinter(_Debug, subsystem),
_getOnePrinter(_Trace, subsystem)
}
func _getOnePrinter(level int32, subsystem string) LevelPrinter {
return LevelPrinter{
Ln: _ln(level, subsystem),
F: _f(level, subsystem),
S: _s(level, subsystem),
C: _c(level, subsystem),
Chk: _chk(level, subsystem),
}
}
// SetLogLevel sets the log level via a string, which can be truncated down to
// one character, similar to nmcli's argument processor, as the first letter is
// unique. This could be used with a linter to make larger command sets.
func SetLogLevel(l string) {
if l == "" {
l = "info"
}
// fmt.Fprintln(os.Stderr, "setting log level", l)
lvl := logLevels.Info
for i := range LevelSpecs {
if LevelSpecs[i].Name[:1] == l[:1] {
lvl = LevelSpecs[i].ID
}
}
currentLevel.Store(lvl)
}
// SetLogWriter atomically changes the log io.Writer interface
func SetLogWriter(wr io.Writer) {
// w := unsafe.Pointer(writer)
// c := unsafe.Pointer(wr)
// atomic.SwapPointer(&w, c)
writer = wr
}
func SetLogWriteToFile(path, appName string) (e error) {
// copy existing log file to dated log file as we will truncate it per
// session
path = filepath.Join(path, "log"+appName)
if _, e = os.Stat(path); e == nil {
var b []byte
b, e = ioutil.ReadFile(path)
if e == nil {
ioutil.WriteFile(path+fmt.Sprint(time.Now().Unix()), b, 0600)
}
}
var fileWriter *os.File
if fileWriter, e = os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC,
0600); e != nil {
fmt.Fprintln(os.Stderr, "unable to write log to", path, "error:", e)
return
}
mw := io.MultiWriter(os.Stderr, fileWriter)
fileWriter.Write([]byte("logging to file '" + path + "'\n"))
mw.Write([]byte("logging to file '" + path + "'\n"))
SetLogWriter(mw)
return
}
// SortSubsystemsList sorts the list of subsystems, to keep the data read-only,
// call this function right at the top of the main, which runs after
// declarations and main/init. Really this is just here to alert the reader.
func SortSubsystemsList() {
sort.Strings(allSubsystems)
// fmt.Fprintln(
// os.Stderr,
// spew.Sdump(allSubsystems),
// spew.Sdump(highlighted),
// spew.Sdump(logFilter),
// )
}
// AddLoggerSubsystem adds a subsystem to the list of known subsystems and returns the
// string so it is nice and neat in the package logg.go file
func AddLoggerSubsystem(pathBase string) (subsystem string) {
// var split []string
var ok bool
var file string
_, file, _, ok = runtime.Caller(1)
if ok {
r := strings.Split(file, pathBase)
// fmt.Fprintln(os.Stderr, version.PathBase, r)
fromRoot := filepath.Base(file)
if len(r) > 1 {
fromRoot = r[1]
}
split := strings.Split(fromRoot, "/")
// fmt.Fprintln(os.Stderr, version.PathBase, "file", file, r, fromRoot, split)
subsystem = strings.Join(split[:len(split)-1], "/")
// fmt.Fprintln(os.Stderr, "adding subsystem", subsystem)
allSubsystems = append(allSubsystems, subsystem)
}
return
}
// StoreHighlightedSubsystems sets the list of subsystems to highlight
func StoreHighlightedSubsystems(highlights []string) (found bool) {
highlightMx.Lock()
highlighted = make(map[string]struct{}, len(highlights))
for i := range highlights {
highlighted[highlights[i]] = struct{}{}
}
highlightMx.Unlock()
return
}
// LoadHighlightedSubsystems returns a copy of the map of highlighted subsystems
func LoadHighlightedSubsystems() (o []string) {
highlightMx.Lock()
o = make([]string, len(logFilter))
var counter int
for i := range logFilter {
o[counter] = i
counter++
}
highlightMx.Unlock()
sort.Strings(o)
return
}
// StoreSubsystemFilter sets the list of subsystems to filter
func StoreSubsystemFilter(filter []string) {
_logFilterMx.Lock()
logFilter = make(map[string]struct{}, len(filter))
for i := range filter {
logFilter[filter[i]] = struct{}{}
}
_logFilterMx.Unlock()
}
// LoadSubsystemFilter returns a copy of the map of filtered subsystems
func LoadSubsystemFilter() (o []string) {
_logFilterMx.Lock()
o = make([]string, len(logFilter))
var counter int
for i := range logFilter {
o[counter] = i
counter++
}
_logFilterMx.Unlock()
sort.Strings(o)
return
}
// _isHighlighted returns true if the subsystem is in the list to have attention
// getters added to them
func _isHighlighted(subsystem string) (found bool) {
highlightMx.Lock()
_, found = highlighted[subsystem]
highlightMx.Unlock()
return
}
// AddHighlightedSubsystem adds a new subsystem Name to the highlighted list
func AddHighlightedSubsystem(hl string) struct{} {
highlightMx.Lock()
highlighted[hl] = struct{}{}
highlightMx.Unlock()
return struct{}{}
}
// _isSubsystemFiltered returns true if the subsystem should not pr logs
func _isSubsystemFiltered(subsystem string) (found bool) {
_logFilterMx.Lock()
_, found = logFilter[subsystem]
_logFilterMx.Unlock()
return
}
// AddFilteredSubsystem adds a new subsystem Name to the highlighted list
func AddFilteredSubsystem(hl string) struct{} {
_logFilterMx.Lock()
logFilter[hl] = struct{}{}
_logFilterMx.Unlock()
return struct{}{}
}
func getTimeText(level int32) string {
// since := time.Now().Sub(logger_started).Round(time.Millisecond).String()
// diff := 12 - len(since)
// if diff > 0 {
// since = strings.Repeat(" ", diff) + since + " "
// }
return color.Bit24(99, 99, 99, false).Sprint(time.Now().
Format(time.StampMilli))
}
func _ln(level int32, subsystem string) func(a ...interface{}) {
return func(a ...interface{}) {
if level <= currentLevel.Load() && !_isSubsystemFiltered(subsystem) {
printer := fmt.Sprintf
if _isHighlighted(subsystem) {
printer = color.Bold.Sprintf
}
fmt.Fprintf(
writer,
printer(
"%-58v%s%s%-6v %s\n",
getLoc(2, level, subsystem),
getTimeText(level),
color.Bit24(20, 20, 20, true).
Sprint(AppColorizer(" "+App)),
LevelSpecs[level].Colorizer(
color.Bit24(20, 20, 20, true).
Sprint(" "+LevelSpecs[level].Name+" "),
),
AppColorizer(joinStrings(" ", a...)),
),
)
}
}
}
func _f(level int32, subsystem string) func(format string, a ...interface{}) {
return func(format string, a ...interface{}) {
if level <= currentLevel.Load() && !_isSubsystemFiltered(subsystem) {
printer := fmt.Sprintf
if _isHighlighted(subsystem) {
printer = color.Bold.Sprintf
}
fmt.Fprintf(
writer,
printer(
"%-58v%s%s%-6v %s\n",
getLoc(2, level, subsystem),
getTimeText(level),
color.Bit24(20, 20, 20, true).
Sprint(AppColorizer(" "+App)),
LevelSpecs[level].Colorizer(
color.Bit24(20, 20, 20, true).
Sprint(" "+LevelSpecs[level].Name+" "),
),
AppColorizer(fmt.Sprintf(format, a...)),
),
)
}
}
}
func _s(level int32, subsystem string) func(a ...interface{}) {
return func(a ...interface{}) {
if level <= currentLevel.Load() && !_isSubsystemFiltered(subsystem) {
printer := fmt.Sprintf
if _isHighlighted(subsystem) {
printer = color.Bold.Sprintf
}
fmt.Fprintf(
writer,
printer(
"%-58v%s%s%s%s%s\n",
getLoc(2, level, subsystem),
getTimeText(level),
color.Bit24(20, 20, 20, true).
Sprint(AppColorizer(" "+App)),
LevelSpecs[level].Colorizer(
color.Bit24(20, 20, 20, true).
Sprint(" "+LevelSpecs[level].Name+" "),
),
AppColorizer(
" spew:",
),
fmt.Sprint(
color.Bit24(20, 20, 20, true).Sprint("\n\n"+spew.Sdump(a)),
"\n",
),
),
)
}
}
}
func _c(level int32, subsystem string) func(closure func() string) {
return func(closure func() string) {
if level <= currentLevel.Load() && !_isSubsystemFiltered(subsystem) {
printer := fmt.Sprintf
if _isHighlighted(subsystem) {
printer = color.Bold.Sprintf
}
fmt.Fprintf(
writer,
printer(
"%-58v%s%s%-6v %s\n",
getLoc(2, level, subsystem),
getTimeText(level),
color.Bit24(20, 20, 20, true).
Sprint(AppColorizer(" "+App)),
LevelSpecs[level].Colorizer(
color.Bit24(20, 20, 20, true).
Sprint(" "+LevelSpecs[level].Name+" "),
),
AppColorizer(closure()),
),
)
}
}
}
func _chk(level int32, subsystem string) func(e error) bool |
// joinStrings constructs a string from an slice of interface same as Println but
// without the terminal newline
func joinStrings(sep string, a ...interface{}) (o string) {
for i := range a {
o += fmt.Sprint(a[i])
if i < len(a)-1 {
o += sep
}
}
return
}
// getLoc calls runtime.Caller and formats as expected by source code editors
// for terminal hyperlinks
//
// Regular expressions and the substitution texts to make these clickable in
// Tilix and other RE hyperlink configurable terminal emulators:
//
// This matches the shortened paths generated in this command and printed at
// the very beginning of the line as this logger prints:
//
// ^((([\/a-zA-Z@0-9-_.]+/)+([a-zA-Z@0-9-_.]+)):([0-9]+))
//
// goland --line $5 $GOPATH/src/github.com/p9c/matrjoska/$2
//
// I have used a shell variable there but tilix doesn't expand them,
// so put your GOPATH in manually, and obviously change the repo subpath.
//
//
// Change the path to use with another repository's logging output (
// someone with more time on their hands could probably come up with
// something, but frankly the custom links feature of Tilix has the absolute
// worst UX I have encountered since the 90s...
// Maybe in the future this library will be expanded with a tool that more
// intelligently sets the path, ie from CWD or other cleverness.
//
// This matches full paths anywhere on the commandline delimited by spaces:
//
// ([/](([\/a-zA-Z@0-9-_.]+/)+([a-zA-Z@0-9-_.]+)):([0-9]+))
//
// goland --line $5 /$2
//
// Adapt the invocation to open your preferred editor if it has the capability,
// the above is for Jetbrains Goland
//
func getLoc(skip int, level int32, subsystem string) (output string) {
_, file, line, _ := runtime.Caller(skip)
defer func() {
if r := recover(); r != nil {
fmt.Fprintln(os.Stderr, "getloc panic on subsystem", subsystem, file)
}
}()
split := strings.Split(file, subsystem)
if len(split) < 2 {
output = fmt.Sprint(
color.White.Sprint(subsystem),
color.Gray.Sprint(
file, ":", line,
),
)
} else {
output = fmt.Sprint(
color.White.Sprint(subsystem),
color.Gray.Sprint(
split[1], ":", line,
),
)
}
return
}
// DirectionString is a helper function that returns a string that represents the direction of a connection (inbound or outbound).
func DirectionString(inbound bool) string {
if inbound {
return "inbound"
}
return "outbound"
}
func PickNoun(n int, singular, plural string) string {
if n == 1 {
return singular
}
return plural
}
func FileExists(filePath string) bool {
_, e := os.Stat(filePath)
return e == nil
}
func Caller(comment string, skip int) string {
_, file, line, _ := runtime.Caller(skip + 1)
o := fmt.Sprintf("%s: %s:%d", comment, file, line)
// L.Debug(o)
return o
}
| {
return func(e error) bool {
if level <= currentLevel.Load() && !_isSubsystemFiltered(subsystem) {
if e != nil {
printer := fmt.Sprintf
if _isHighlighted(subsystem) {
printer = color.Bold.Sprintf
}
fmt.Fprintf(
writer,
printer(
"%-58v%s%s%-6v %s\n",
getLoc(2, level, subsystem),
getTimeText(level),
color.Bit24(20, 20, 20, true).
Sprint(AppColorizer(" "+App)),
LevelSpecs[level].Colorizer(
color.Bit24(20, 20, 20, true).
Sprint(" "+LevelSpecs[level].Name+" "),
),
LevelSpecs[level].Colorizer(joinStrings(" ", e.Error())),
),
)
return true
}
}
return false
}
} | identifier_body |
logg.go | package log
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"sort"
"strings"
"sync"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/gookit/color"
uberatomic "go.uber.org/atomic"
)
const (
_Off = iota
_Fatal
_Error
_Chek
_Warn
_Info
_Debug
_Trace
)
type (
// LevelPrinter defines a set of terminal printing primitives that output with
// extra data, time, log logLevelList, and code location
LevelPrinter struct {
// Ln prints lists of interfaces with spaces in between
Ln func(a ...interface{})
// F prints like fmt.Println surrounded by log details
F func(format string, a ...interface{})
// S prints a spew.Sdump for an interface slice
S func(a ...interface{})
// C accepts a function so that the extra computation can be avoided if it is
// not being viewed
C func(closure func() string)
// Chk is a shortcut for printing if there is an error, or returning true
Chk func(e error) bool
}
logLevelList struct {
Off, Fatal, Error, Check, Warn, Info, Debug, Trace int32
}
LevelSpec struct {
ID int32
Name string
Colorizer func(format string, a ...interface{}) string
}
// Entry is a log entry to be printed as json to the log file
Entry struct {
Time time.Time
Level string
Package string
CodeLocation string
Text string
}
)
var (
logger_started = time.Now()
App = " pod"
AppColorizer = color.White.Sprint
// sep is just a convenient shortcut for this very longwinded expression
sep = string(os.PathSeparator)
currentLevel = uberatomic.NewInt32(logLevels.Info)
// writer can be swapped out for any io.*writer* that you want to use instead of
// stdout.
writer io.Writer = os.Stderr
// allSubsystems stores all of the package subsystem names found in the current
// application
allSubsystems []string
// highlighted is a text that helps visually distinguish a log entry by category
highlighted = make(map[string]struct{})
// logFilter specifies a set of packages that will not pr logs
logFilter = make(map[string]struct{})
// mutexes to prevent concurrent map accesses
highlightMx, _logFilterMx sync.Mutex
// logLevels is a shorthand access that minimises possible Name collisions in the
// dot import
logLevels = logLevelList{
Off: _Off,
Fatal: _Fatal,
Error: _Error,
Check: _Chek,
Warn: _Warn,
Info: _Info,
Debug: _Debug,
Trace: _Trace,
}
// LevelSpecs specifies the id, string name and color-printing function
LevelSpecs = []LevelSpec{
{logLevels.Off, "off ", color.Bit24(0, 0, 0, false).Sprintf},
{logLevels.Fatal, "fatal", color.Bit24(128, 0, 0, false).Sprintf},
{logLevels.Error, "error", color.Bit24(255, 0, 0, false).Sprintf},
{logLevels.Check, "check", color.Bit24(255, 255, 0, false).Sprintf},
{logLevels.Warn, "warn ", color.Bit24(0, 255, 0, false).Sprintf},
{logLevels.Info, "info ", color.Bit24(255, 255, 0, false).Sprintf},
{logLevels.Debug, "debug", color.Bit24(0, 128, 255, false).Sprintf},
{logLevels.Trace, "trace", color.Bit24(128, 0, 255, false).Sprintf},
}
Levels = []string{
Off,
Fatal,
Error,
Check,
Warn,
Info,
Debug,
Trace,
}
LogChanDisabled = uberatomic.NewBool(true)
LogChan chan Entry
)
const (
Off = "off"
Fatal = "fatal"
Error = "error"
Warn = "warn"
Info = "info"
Check = "check"
Debug = "debug"
Trace = "trace"
)
// AddLogChan adds a channel that log entries are sent to
func AddLogChan() (ch chan Entry) {
LogChanDisabled.Store(false)
if LogChan != nil {
panic("warning warning")
}
// L.Writer.Write.Store( false
LogChan = make(chan Entry)
return LogChan
}
// GetLogPrinterSet returns a set of LevelPrinter with their subsystem preloaded
func GetLogPrinterSet(subsystem string) (Fatal, Error, Warn, Info, Debug, Trace LevelPrinter) {
return _getOnePrinter(_Fatal, subsystem),
_getOnePrinter(_Error, subsystem),
_getOnePrinter(_Warn, subsystem),
_getOnePrinter(_Info, subsystem),
_getOnePrinter(_Debug, subsystem),
_getOnePrinter(_Trace, subsystem)
}
func _getOnePrinter(level int32, subsystem string) LevelPrinter {
return LevelPrinter{
Ln: _ln(level, subsystem),
F: _f(level, subsystem),
S: _s(level, subsystem),
C: _c(level, subsystem),
Chk: _chk(level, subsystem),
}
}
// SetLogLevel sets the log level via a string, which can be truncated down to
// one character, similar to nmcli's argument processor, as the first letter is
// unique. This could be used with a linter to make larger command sets.
func SetLogLevel(l string) {
if l == "" {
l = "info"
}
// fmt.Fprintln(os.Stderr, "setting log level", l)
lvl := logLevels.Info
for i := range LevelSpecs {
if LevelSpecs[i].Name[:1] == l[:1] {
lvl = LevelSpecs[i].ID
}
}
currentLevel.Store(lvl)
}
// SetLogWriter atomically changes the log io.Writer interface
func SetLogWriter(wr io.Writer) {
// w := unsafe.Pointer(writer)
// c := unsafe.Pointer(wr)
// atomic.SwapPointer(&w, c)
writer = wr
}
func SetLogWriteToFile(path, appName string) (e error) {
// copy existing log file to dated log file as we will truncate it per
// session
path = filepath.Join(path, "log"+appName)
if _, e = os.Stat(path); e == nil |
var fileWriter *os.File
if fileWriter, e = os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC,
0600); e != nil {
fmt.Fprintln(os.Stderr, "unable to write log to", path, "error:", e)
return
}
mw := io.MultiWriter(os.Stderr, fileWriter)
fileWriter.Write([]byte("logging to file '" + path + "'\n"))
mw.Write([]byte("logging to file '" + path + "'\n"))
SetLogWriter(mw)
return
}
// SortSubsystemsList sorts the list of subsystems, to keep the data read-only,
// call this function right at the top of the main, which runs after
// declarations and main/init. Really this is just here to alert the reader.
func SortSubsystemsList() {
sort.Strings(allSubsystems)
// fmt.Fprintln(
// os.Stderr,
// spew.Sdump(allSubsystems),
// spew.Sdump(highlighted),
// spew.Sdump(logFilter),
// )
}
// AddLoggerSubsystem adds a subsystem to the list of known subsystems and returns the
// string so it is nice and neat in the package logg.go file
func AddLoggerSubsystem(pathBase string) (subsystem string) {
// var split []string
var ok bool
var file string
_, file, _, ok = runtime.Caller(1)
if ok {
r := strings.Split(file, pathBase)
// fmt.Fprintln(os.Stderr, version.PathBase, r)
fromRoot := filepath.Base(file)
if len(r) > 1 {
fromRoot = r[1]
}
split := strings.Split(fromRoot, "/")
// fmt.Fprintln(os.Stderr, version.PathBase, "file", file, r, fromRoot, split)
subsystem = strings.Join(split[:len(split)-1], "/")
// fmt.Fprintln(os.Stderr, "adding subsystem", subsystem)
allSubsystems = append(allSubsystems, subsystem)
}
return
}
// StoreHighlightedSubsystems sets the list of subsystems to highlight
func StoreHighlightedSubsystems(highlights []string) (found bool) {
highlightMx.Lock()
highlighted = make(map[string]struct{}, len(highlights))
for i := range highlights {
highlighted[highlights[i]] = struct{}{}
}
highlightMx.Unlock()
return
}
// LoadHighlightedSubsystems returns a copy of the map of highlighted subsystems
func LoadHighlightedSubsystems() (o []string) {
highlightMx.Lock()
o = make([]string, len(logFilter))
var counter int
for i := range logFilter {
o[counter] = i
counter++
}
highlightMx.Unlock()
sort.Strings(o)
return
}
// StoreSubsystemFilter sets the list of subsystems to filter
func StoreSubsystemFilter(filter []string) {
_logFilterMx.Lock()
logFilter = make(map[string]struct{}, len(filter))
for i := range filter {
logFilter[filter[i]] = struct{}{}
}
_logFilterMx.Unlock()
}
// LoadSubsystemFilter returns a copy of the map of filtered subsystems
func LoadSubsystemFilter() (o []string) {
_logFilterMx.Lock()
o = make([]string, len(logFilter))
var counter int
for i := range logFilter {
o[counter] = i
counter++
}
_logFilterMx.Unlock()
sort.Strings(o)
return
}
// _isHighlighted returns true if the subsystem is in the list to have attention
// getters added to them
func _isHighlighted(subsystem string) (found bool) {
highlightMx.Lock()
_, found = highlighted[subsystem]
highlightMx.Unlock()
return
}
// AddHighlightedSubsystem adds a new subsystem Name to the highlighted list
func AddHighlightedSubsystem(hl string) struct{} {
highlightMx.Lock()
highlighted[hl] = struct{}{}
highlightMx.Unlock()
return struct{}{}
}
// _isSubsystemFiltered returns true if the subsystem should not pr logs
func _isSubsystemFiltered(subsystem string) (found bool) {
_logFilterMx.Lock()
_, found = logFilter[subsystem]
_logFilterMx.Unlock()
return
}
// AddFilteredSubsystem adds a new subsystem Name to the highlighted list
func AddFilteredSubsystem(hl string) struct{} {
_logFilterMx.Lock()
logFilter[hl] = struct{}{}
_logFilterMx.Unlock()
return struct{}{}
}
func getTimeText(level int32) string {
// since := time.Now().Sub(logger_started).Round(time.Millisecond).String()
// diff := 12 - len(since)
// if diff > 0 {
// since = strings.Repeat(" ", diff) + since + " "
// }
return color.Bit24(99, 99, 99, false).Sprint(time.Now().
Format(time.StampMilli))
}
func _ln(level int32, subsystem string) func(a ...interface{}) {
return func(a ...interface{}) {
if level <= currentLevel.Load() && !_isSubsystemFiltered(subsystem) {
printer := fmt.Sprintf
if _isHighlighted(subsystem) {
printer = color.Bold.Sprintf
}
fmt.Fprintf(
writer,
printer(
"%-58v%s%s%-6v %s\n",
getLoc(2, level, subsystem),
getTimeText(level),
color.Bit24(20, 20, 20, true).
Sprint(AppColorizer(" "+App)),
LevelSpecs[level].Colorizer(
color.Bit24(20, 20, 20, true).
Sprint(" "+LevelSpecs[level].Name+" "),
),
AppColorizer(joinStrings(" ", a...)),
),
)
}
}
}
func _f(level int32, subsystem string) func(format string, a ...interface{}) {
return func(format string, a ...interface{}) {
if level <= currentLevel.Load() && !_isSubsystemFiltered(subsystem) {
printer := fmt.Sprintf
if _isHighlighted(subsystem) {
printer = color.Bold.Sprintf
}
fmt.Fprintf(
writer,
printer(
"%-58v%s%s%-6v %s\n",
getLoc(2, level, subsystem),
getTimeText(level),
color.Bit24(20, 20, 20, true).
Sprint(AppColorizer(" "+App)),
LevelSpecs[level].Colorizer(
color.Bit24(20, 20, 20, true).
Sprint(" "+LevelSpecs[level].Name+" "),
),
AppColorizer(fmt.Sprintf(format, a...)),
),
)
}
}
}
func _s(level int32, subsystem string) func(a ...interface{}) {
return func(a ...interface{}) {
if level <= currentLevel.Load() && !_isSubsystemFiltered(subsystem) {
printer := fmt.Sprintf
if _isHighlighted(subsystem) {
printer = color.Bold.Sprintf
}
fmt.Fprintf(
writer,
printer(
"%-58v%s%s%s%s%s\n",
getLoc(2, level, subsystem),
getTimeText(level),
color.Bit24(20, 20, 20, true).
Sprint(AppColorizer(" "+App)),
LevelSpecs[level].Colorizer(
color.Bit24(20, 20, 20, true).
Sprint(" "+LevelSpecs[level].Name+" "),
),
AppColorizer(
" spew:",
),
fmt.Sprint(
color.Bit24(20, 20, 20, true).Sprint("\n\n"+spew.Sdump(a)),
"\n",
),
),
)
}
}
}
func _c(level int32, subsystem string) func(closure func() string) {
return func(closure func() string) {
if level <= currentLevel.Load() && !_isSubsystemFiltered(subsystem) {
printer := fmt.Sprintf
if _isHighlighted(subsystem) {
printer = color.Bold.Sprintf
}
fmt.Fprintf(
writer,
printer(
"%-58v%s%s%-6v %s\n",
getLoc(2, level, subsystem),
getTimeText(level),
color.Bit24(20, 20, 20, true).
Sprint(AppColorizer(" "+App)),
LevelSpecs[level].Colorizer(
color.Bit24(20, 20, 20, true).
Sprint(" "+LevelSpecs[level].Name+" "),
),
AppColorizer(closure()),
),
)
}
}
}
func _chk(level int32, subsystem string) func(e error) bool {
return func(e error) bool {
if level <= currentLevel.Load() && !_isSubsystemFiltered(subsystem) {
if e != nil {
printer := fmt.Sprintf
if _isHighlighted(subsystem) {
printer = color.Bold.Sprintf
}
fmt.Fprintf(
writer,
printer(
"%-58v%s%s%-6v %s\n",
getLoc(2, level, subsystem),
getTimeText(level),
color.Bit24(20, 20, 20, true).
Sprint(AppColorizer(" "+App)),
LevelSpecs[level].Colorizer(
color.Bit24(20, 20, 20, true).
Sprint(" "+LevelSpecs[level].Name+" "),
),
LevelSpecs[level].Colorizer(joinStrings(" ", e.Error())),
),
)
return true
}
}
return false
}
}
// joinStrings constructs a string from an slice of interface same as Println but
// without the terminal newline
func joinStrings(sep string, a ...interface{}) (o string) {
for i := range a {
o += fmt.Sprint(a[i])
if i < len(a)-1 {
o += sep
}
}
return
}
// getLoc calls runtime.Caller and formats as expected by source code editors
// for terminal hyperlinks
//
// Regular expressions and the substitution texts to make these clickable in
// Tilix and other RE hyperlink configurable terminal emulators:
//
// This matches the shortened paths generated in this command and printed at
// the very beginning of the line as this logger prints:
//
// ^((([\/a-zA-Z@0-9-_.]+/)+([a-zA-Z@0-9-_.]+)):([0-9]+))
//
// goland --line $5 $GOPATH/src/github.com/p9c/matrjoska/$2
//
// I have used a shell variable there but tilix doesn't expand them,
// so put your GOPATH in manually, and obviously change the repo subpath.
//
//
// Change the path to use with another repository's logging output (
// someone with more time on their hands could probably come up with
// something, but frankly the custom links feature of Tilix has the absolute
// worst UX I have encountered since the 90s...
// Maybe in the future this library will be expanded with a tool that more
// intelligently sets the path, ie from CWD or other cleverness.
//
// This matches full paths anywhere on the commandline delimited by spaces:
//
// ([/](([\/a-zA-Z@0-9-_.]+/)+([a-zA-Z@0-9-_.]+)):([0-9]+))
//
// goland --line $5 /$2
//
// Adapt the invocation to open your preferred editor if it has the capability,
// the above is for Jetbrains Goland
//
func getLoc(skip int, level int32, subsystem string) (output string) {
_, file, line, _ := runtime.Caller(skip)
defer func() {
if r := recover(); r != nil {
fmt.Fprintln(os.Stderr, "getloc panic on subsystem", subsystem, file)
}
}()
split := strings.Split(file, subsystem)
if len(split) < 2 {
output = fmt.Sprint(
color.White.Sprint(subsystem),
color.Gray.Sprint(
file, ":", line,
),
)
} else {
output = fmt.Sprint(
color.White.Sprint(subsystem),
color.Gray.Sprint(
split[1], ":", line,
),
)
}
return
}
// DirectionString is a helper function that returns a string that represents the direction of a connection (inbound or outbound).
func DirectionString(inbound bool) string {
if inbound {
return "inbound"
}
return "outbound"
}
func PickNoun(n int, singular, plural string) string {
if n == 1 {
return singular
}
return plural
}
func FileExists(filePath string) bool {
_, e := os.Stat(filePath)
return e == nil
}
func Caller(comment string, skip int) string {
_, file, line, _ := runtime.Caller(skip + 1)
o := fmt.Sprintf("%s: %s:%d", comment, file, line)
// L.Debug(o)
return o
}
| {
var b []byte
b, e = ioutil.ReadFile(path)
if e == nil {
ioutil.WriteFile(path+fmt.Sprint(time.Now().Unix()), b, 0600)
}
} | conditional_block |
cogroup.rs | //! Group records by a key, and apply a reduction function.
//!
//! The `group` operators act on data that can be viewed as pairs `(key, val)`. They group records
//! with the same key, and apply user supplied functions to the key and a list of values, which are
//! expected to populate a list of output values.
//!
//! Several variants of `group` exist which allow more precise control over how grouping is done.
//! For example, the `_by` suffixed variants take arbitrary data, but require a key-value selector
//! to be applied to each record. The `_u` suffixed variants use unsigned integers as keys, and
//! will use a dense array rather than a `HashMap` to store their keys.
//!
//! The list of values are presented as an iterator which internally merges sorted lists of values.
//! This ordering can be exploited in several cases to avoid computation when only the first few
//! elements are required.
//!
//! #Examples
//!
//! This example groups a stream of `(key,val)` pairs by `key`, and yields only the most frequently
//! occurring value for each key.
//!
//! ```ignore
//! stream.group(|key, vals, output| {
//! let (mut max_val, mut max_wgt) = vals.peek().unwrap();
//! for (val, wgt) in vals {
//! if wgt > max_wgt {
//! max_wgt = wgt;
//! max_val = val;
//! }
//! }
//! output.push((max_val.clone(), max_wgt));
//! })
//! ```
use std::rc::Rc;
use std::default::Default;
use std::hash::Hasher;
use std::ops::DerefMut;
use itertools::Itertools;
use ::{Collection, Data};
use timely::dataflow::*;
use timely::dataflow::operators::{Map, Binary};
use timely::dataflow::channels::pact::Exchange;
use timely_sort::{LSBRadixSorter, Unsigned};
use collection::{LeastUpperBound, Lookup, Trace, Offset};
use collection::trace::{CollectionIterator, DifferenceIterator, Traceable};
use iterators::coalesce::Coalesce;
use collection::compact::Compact;
/// Extension trait for the `group_by` and `group_by_u` differential dataflow methods.
pub trait CoGroupBy<G: Scope, K: Data, V1: Data> where G::Timestamp: LeastUpperBound {
/// A primitive binary version of `group_by`, which acts on a `Collection<G, (K, V1)>` and a `Collection<G, (K, V2)>`.
///
/// The two streams must already be key-value pairs, which is too bad. Also, in addition to the
/// normal arguments (another stream, a hash for the key, a reduction function, and per-key logic),
/// the user must specify a function implmenting `Fn(u64) -> Look`, where `Look: Lookup<K, Offset>` is something you shouldn't have to know about yet.
/// The right thing to use here, for the moment, is `|_| HashMap::new()`.
///
/// There are better options if you know your key is an unsigned integer, namely `|x| (Vec::new(), x)`.
fn cogroup_by_inner<
D: Data,
V2: Data+Default,
V3: Data+Default,
U: Unsigned+Default,
KH: Fn(&K)->U+'static,
Look: Lookup<K, Offset>+'static,
LookG: Fn(u64)->Look,
Logic: Fn(&K, &mut CollectionIterator<DifferenceIterator<V1>>, &mut CollectionIterator<DifferenceIterator<V2>>, &mut Vec<(V3, i32)>)+'static,
Reduc: Fn(&K, &V3)->D+'static,
>
(&self, other: &Collection<G, (K, V2)>, key_h: KH, reduc: Reduc, look: LookG, logic: Logic) -> Collection<G, D>;
}
impl<G: Scope, K: Data, V1: Data> CoGroupBy<G, K, V1> for Collection<G, (K, V1)>
where G::Timestamp: LeastUpperBound {
fn cogroup_by_inner<
D: Data,
V2: Data+Default,
V3: Data+Default,
U: Unsigned+Default,
KH: Fn(&K)->U+'static,
Look: Lookup<K, Offset>+'static,
LookG: Fn(u64)->Look,
Logic: Fn(&K, &mut CollectionIterator<V1>, &mut CollectionIterator<V2>, &mut Vec<(V3, i32)>)+'static,
Reduc: Fn(&K, &V3)->D+'static,
>
(&self, other: &Collection<G, (K, V2)>, key_h: KH, reduc: Reduc, look: LookG, logic: Logic) -> Collection<G, D> {
let mut source1 = Trace::new(look(0));
let mut source2 = Trace::new(look(0));
let mut result = Trace::new(look(0));
// A map from times to received (key, val, wgt) triples.
let mut inputs1 = Vec::new();
let mut inputs2 = Vec::new();
// A map from times to a list of keys that need processing at that time.
let mut to_do = Vec::new();
// temporary storage for operator implementations to populate
let mut buffer = vec![];
let key_h = Rc::new(key_h);
let key_1 = key_h.clone();
let key_2 = key_h.clone();
// create an exchange channel based on the supplied Fn(&D1)->u64.
let exch1 = Exchange::new(move |&((ref k, _),_)| key_1(k).as_u64());
let exch2 = Exchange::new(move |&((ref k, _),_)| key_2(k).as_u64());
let mut sorter1 = LSBRadixSorter::new();
let mut sorter2 = LSBRadixSorter::new();
// fabricate a data-parallel operator using the `unary_notify` pattern.
Collection::new(self.inner.binary_notify(&other.inner, exch1, exch2, "CoGroupBy", vec![], move |input1, input2, output, notificator| {
// 1. read each input, and stash it in our staging area
while let Some((time, data)) = input1.next() {
inputs1.entry_or_insert(time.time(), || Vec::new())
.push(::std::mem::replace(data.deref_mut(), Vec::new()));
notificator.notify_at(time);
}
// 1. read each input, and stash it in our staging area
while let Some((time, data)) = input2.next() {
inputs2.entry_or_insert(time.time(), || Vec::new())
.push(::std::mem::replace(data.deref_mut(), Vec::new()));
notificator.notify_at(time);
}
// 2. go through each time of interest that has reached completion
// times are interesting either because we received data, or because we conclude
// in the processing of a time that a future time will be interesting.
while let Some((index, _count)) = notificator.next() {
let mut stash = Vec::new();
panic!("interesting times needs to do LUB of union of times for each key, input");
// 2a. fetch any data associated with this time.
if let Some(mut queue) = inputs1.remove_key(&index) {
// sort things; radix if many, .sort_by if few.
let compact = if queue.len() > 1 {
for element in queue.into_iter() {
sorter1.extend(element.into_iter(), &|x| key_h(&(x.0).0));
}
let mut sorted = sorter1.finish(&|x| key_h(&(x.0).0));
let result = Compact::from_radix(&mut sorted, &|k| key_h(k));
sorted.truncate(256);
sorter1.recycle(sorted);
result
}
else {
let mut vec = queue.pop().unwrap();
let mut vec = vec.drain(..).collect::<Vec<_>>();
vec.sort_by(|x,y| key_h(&(x.0).0).cmp(&key_h((&(y.0).0))));
Compact::from_radix(&mut vec![vec], &|k| key_h(k))
};
if let Some(compact) = compact {
for key in &compact.keys {
stash.push(index.clone());
source1.interesting_times(key, &index, &mut stash);
for time in &stash {
let mut queue = to_do.entry_or_insert((*time).clone(), || { notificator.notify_at(index.delayed(time)); Vec::new() });
queue.push((*key).clone());
}
stash.clear();
}
source1.set_difference(index.time(), compact);
}
}
// 2a. fetch any data associated with this time.
if let Some(mut queue) = inputs2.remove_key(&index) {
// sort things; radix if many, .sort_by if few.
let compact = if queue.len() > 1 {
for element in queue.into_iter() {
sorter2.extend(element.into_iter(), &|x| key_h(&(x.0).0));
}
let mut sorted = sorter2.finish(&|x| key_h(&(x.0).0));
let result = Compact::from_radix(&mut sorted, &|k| key_h(k));
sorted.truncate(256);
sorter2.recycle(sorted);
result
}
else {
let mut vec = queue.pop().unwrap();
let mut vec = vec.drain(..).collect::<Vec<_>>();
vec.sort_by(|x,y| key_h(&(x.0).0).cmp(&key_h((&(y.0).0))));
Compact::from_radix(&mut vec![vec], &|k| key_h(k))
};
if let Some(compact) = compact |
}
// we may need to produce output at index
let mut session = output.session(&index);
// 2b. We must now determine for each interesting key at this time, how does the
// currently reported output match up with what we need as output. Should we send
// more output differences, and what are they?
// Much of this logic used to hide in `OperatorTrace` and `CollectionTrace`.
// They are now gone and simpler, respectively.
if let Some(mut keys) = to_do.remove_key(&index) {
// we would like these keys in a particular order.
// TODO : use a radix sort since we have `key_h`.
keys.sort_by(|x,y| (key_h(&x), x).cmp(&(key_h(&y), y)));
keys.dedup();
// accumulations for installation into result
let mut accumulation = Compact::new(0,0);
for key in keys {
// acquire an iterator over the collection at `time`.
let mut input1 = source1.get_collection(&key, &index);
let mut input2 = source2.get_collection(&key, &index);
// if we have some data, invoke logic to populate self.dst
if input1.peek().is_some() || input2.peek().is_some() { logic(&key, &mut input1, &mut input2, &mut buffer); }
buffer.sort_by(|x,y| x.0.cmp(&y.0));
// push differences in to Compact.
let mut compact = accumulation.session();
for (val, wgt) in Coalesce::coalesce(result.get_collection(&key, &index)
.map(|(v, w)| (v,-w))
.merge_by(buffer.iter().map(|&(ref v, w)| (v, w)), |x,y| {
x.0 <= y.0
}))
{
session.give((reduc(&key, val), wgt));
compact.push(val.clone(), wgt);
}
compact.done(key);
buffer.clear();
}
if accumulation.vals.len() > 0 {
// println!("group2");
result.set_difference(index.time(), accumulation);
}
}
}
}))
}
}
| {
for key in &compact.keys {
stash.push(index.clone());
source2.interesting_times(key, &index, &mut stash);
for time in &stash {
let mut queue = to_do.entry_or_insert((*time).clone(), || { notificator.notify_at(index.delayed(time)); Vec::new() });
queue.push((*key).clone());
}
stash.clear();
}
source2.set_difference(index.time(), compact);
} | conditional_block |
cogroup.rs | //! Group records by a key, and apply a reduction function.
//!
//! The `group` operators act on data that can be viewed as pairs `(key, val)`. They group records
//! with the same key, and apply user supplied functions to the key and a list of values, which are
//! expected to populate a list of output values.
//!
//! Several variants of `group` exist which allow more precise control over how grouping is done.
//! For example, the `_by` suffixed variants take arbitrary data, but require a key-value selector
//! to be applied to each record. The `_u` suffixed variants use unsigned integers as keys, and
//! will use a dense array rather than a `HashMap` to store their keys.
//!
//! The list of values are presented as an iterator which internally merges sorted lists of values.
//! This ordering can be exploited in several cases to avoid computation when only the first few
//! elements are required.
//!
//! #Examples
//!
//! This example groups a stream of `(key,val)` pairs by `key`, and yields only the most frequently
//! occurring value for each key.
//!
//! ```ignore
//! stream.group(|key, vals, output| {
//! let (mut max_val, mut max_wgt) = vals.peek().unwrap();
//! for (val, wgt) in vals {
//! if wgt > max_wgt {
//! max_wgt = wgt;
//! max_val = val;
//! }
//! }
//! output.push((max_val.clone(), max_wgt));
//! })
//! ```
use std::rc::Rc;
use std::default::Default;
use std::hash::Hasher;
use std::ops::DerefMut;
use itertools::Itertools;
use ::{Collection, Data};
use timely::dataflow::*;
use timely::dataflow::operators::{Map, Binary};
use timely::dataflow::channels::pact::Exchange;
use timely_sort::{LSBRadixSorter, Unsigned};
use collection::{LeastUpperBound, Lookup, Trace, Offset};
use collection::trace::{CollectionIterator, DifferenceIterator, Traceable};
use iterators::coalesce::Coalesce;
use collection::compact::Compact;
/// Extension trait for the `group_by` and `group_by_u` differential dataflow methods.
pub trait CoGroupBy<G: Scope, K: Data, V1: Data> where G::Timestamp: LeastUpperBound {
/// A primitive binary version of `group_by`, which acts on a `Collection<G, (K, V1)>` and a `Collection<G, (K, V2)>`.
///
/// The two streams must already be key-value pairs, which is too bad. Also, in addition to the
/// normal arguments (another stream, a hash for the key, a reduction function, and per-key logic),
/// the user must specify a function implmenting `Fn(u64) -> Look`, where `Look: Lookup<K, Offset>` is something you shouldn't have to know about yet.
/// The right thing to use here, for the moment, is `|_| HashMap::new()`.
///
/// There are better options if you know your key is an unsigned integer, namely `|x| (Vec::new(), x)`.
fn cogroup_by_inner<
D: Data,
V2: Data+Default,
V3: Data+Default,
U: Unsigned+Default,
KH: Fn(&K)->U+'static,
Look: Lookup<K, Offset>+'static,
LookG: Fn(u64)->Look,
Logic: Fn(&K, &mut CollectionIterator<DifferenceIterator<V1>>, &mut CollectionIterator<DifferenceIterator<V2>>, &mut Vec<(V3, i32)>)+'static,
Reduc: Fn(&K, &V3)->D+'static,
>
(&self, other: &Collection<G, (K, V2)>, key_h: KH, reduc: Reduc, look: LookG, logic: Logic) -> Collection<G, D>;
}
impl<G: Scope, K: Data, V1: Data> CoGroupBy<G, K, V1> for Collection<G, (K, V1)>
where G::Timestamp: LeastUpperBound {
fn cogroup_by_inner<
D: Data,
V2: Data+Default,
V3: Data+Default,
U: Unsigned+Default,
KH: Fn(&K)->U+'static,
Look: Lookup<K, Offset>+'static,
LookG: Fn(u64)->Look,
Logic: Fn(&K, &mut CollectionIterator<V1>, &mut CollectionIterator<V2>, &mut Vec<(V3, i32)>)+'static,
Reduc: Fn(&K, &V3)->D+'static,
>
(&self, other: &Collection<G, (K, V2)>, key_h: KH, reduc: Reduc, look: LookG, logic: Logic) -> Collection<G, D> {
let mut source1 = Trace::new(look(0));
let mut source2 = Trace::new(look(0));
let mut result = Trace::new(look(0));
// A map from times to received (key, val, wgt) triples.
let mut inputs1 = Vec::new();
let mut inputs2 = Vec::new();
// A map from times to a list of keys that need processing at that time.
let mut to_do = Vec::new();
// temporary storage for operator implementations to populate
let mut buffer = vec![];
let key_h = Rc::new(key_h);
let key_1 = key_h.clone();
let key_2 = key_h.clone();
// create an exchange channel based on the supplied Fn(&D1)->u64.
let exch1 = Exchange::new(move |&((ref k, _),_)| key_1(k).as_u64());
let exch2 = Exchange::new(move |&((ref k, _),_)| key_2(k).as_u64());
let mut sorter1 = LSBRadixSorter::new();
let mut sorter2 = LSBRadixSorter::new();
// fabricate a data-parallel operator using the `unary_notify` pattern.
Collection::new(self.inner.binary_notify(&other.inner, exch1, exch2, "CoGroupBy", vec![], move |input1, input2, output, notificator| {
// 1. read each input, and stash it in our staging area
while let Some((time, data)) = input1.next() {
inputs1.entry_or_insert(time.time(), || Vec::new())
.push(::std::mem::replace(data.deref_mut(), Vec::new()));
notificator.notify_at(time);
}
// 1. read each input, and stash it in our staging area
while let Some((time, data)) = input2.next() {
inputs2.entry_or_insert(time.time(), || Vec::new())
.push(::std::mem::replace(data.deref_mut(), Vec::new()));
notificator.notify_at(time);
}
// 2. go through each time of interest that has reached completion
// times are interesting either because we received data, or because we conclude
// in the processing of a time that a future time will be interesting.
while let Some((index, _count)) = notificator.next() {
let mut stash = Vec::new();
panic!("interesting times needs to do LUB of union of times for each key, input");
// 2a. fetch any data associated with this time.
if let Some(mut queue) = inputs1.remove_key(&index) {
// sort things; radix if many, .sort_by if few.
let compact = if queue.len() > 1 {
for element in queue.into_iter() {
sorter1.extend(element.into_iter(), &|x| key_h(&(x.0).0));
}
let mut sorted = sorter1.finish(&|x| key_h(&(x.0).0));
let result = Compact::from_radix(&mut sorted, &|k| key_h(k));
sorted.truncate(256);
sorter1.recycle(sorted);
result
}
else {
let mut vec = queue.pop().unwrap();
let mut vec = vec.drain(..).collect::<Vec<_>>();
vec.sort_by(|x,y| key_h(&(x.0).0).cmp(&key_h((&(y.0).0))));
Compact::from_radix(&mut vec![vec], &|k| key_h(k))
};
if let Some(compact) = compact {
for key in &compact.keys {
stash.push(index.clone());
source1.interesting_times(key, &index, &mut stash);
for time in &stash {
let mut queue = to_do.entry_or_insert((*time).clone(), || { notificator.notify_at(index.delayed(time)); Vec::new() });
queue.push((*key).clone());
}
stash.clear();
}
source1.set_difference(index.time(), compact);
}
}
// 2a. fetch any data associated with this time.
if let Some(mut queue) = inputs2.remove_key(&index) {
// sort things; radix if many, .sort_by if few.
let compact = if queue.len() > 1 {
for element in queue.into_iter() {
sorter2.extend(element.into_iter(), &|x| key_h(&(x.0).0));
}
let mut sorted = sorter2.finish(&|x| key_h(&(x.0).0));
let result = Compact::from_radix(&mut sorted, &|k| key_h(k));
sorted.truncate(256);
sorter2.recycle(sorted);
result
}
else {
let mut vec = queue.pop().unwrap();
let mut vec = vec.drain(..).collect::<Vec<_>>(); | if let Some(compact) = compact {
for key in &compact.keys {
stash.push(index.clone());
source2.interesting_times(key, &index, &mut stash);
for time in &stash {
let mut queue = to_do.entry_or_insert((*time).clone(), || { notificator.notify_at(index.delayed(time)); Vec::new() });
queue.push((*key).clone());
}
stash.clear();
}
source2.set_difference(index.time(), compact);
}
}
// we may need to produce output at index
let mut session = output.session(&index);
// 2b. We must now determine for each interesting key at this time, how does the
// currently reported output match up with what we need as output. Should we send
// more output differences, and what are they?
// Much of this logic used to hide in `OperatorTrace` and `CollectionTrace`.
// They are now gone and simpler, respectively.
if let Some(mut keys) = to_do.remove_key(&index) {
// we would like these keys in a particular order.
// TODO : use a radix sort since we have `key_h`.
keys.sort_by(|x,y| (key_h(&x), x).cmp(&(key_h(&y), y)));
keys.dedup();
// accumulations for installation into result
let mut accumulation = Compact::new(0,0);
for key in keys {
// acquire an iterator over the collection at `time`.
let mut input1 = source1.get_collection(&key, &index);
let mut input2 = source2.get_collection(&key, &index);
// if we have some data, invoke logic to populate self.dst
if input1.peek().is_some() || input2.peek().is_some() { logic(&key, &mut input1, &mut input2, &mut buffer); }
buffer.sort_by(|x,y| x.0.cmp(&y.0));
// push differences in to Compact.
let mut compact = accumulation.session();
for (val, wgt) in Coalesce::coalesce(result.get_collection(&key, &index)
.map(|(v, w)| (v,-w))
.merge_by(buffer.iter().map(|&(ref v, w)| (v, w)), |x,y| {
x.0 <= y.0
}))
{
session.give((reduc(&key, val), wgt));
compact.push(val.clone(), wgt);
}
compact.done(key);
buffer.clear();
}
if accumulation.vals.len() > 0 {
// println!("group2");
result.set_difference(index.time(), accumulation);
}
}
}
}))
}
} | vec.sort_by(|x,y| key_h(&(x.0).0).cmp(&key_h((&(y.0).0))));
Compact::from_radix(&mut vec![vec], &|k| key_h(k))
};
| random_line_split |
cogroup.rs | //! Group records by a key, and apply a reduction function.
//!
//! The `group` operators act on data that can be viewed as pairs `(key, val)`. They group records
//! with the same key, and apply user supplied functions to the key and a list of values, which are
//! expected to populate a list of output values.
//!
//! Several variants of `group` exist which allow more precise control over how grouping is done.
//! For example, the `_by` suffixed variants take arbitrary data, but require a key-value selector
//! to be applied to each record. The `_u` suffixed variants use unsigned integers as keys, and
//! will use a dense array rather than a `HashMap` to store their keys.
//!
//! The list of values are presented as an iterator which internally merges sorted lists of values.
//! This ordering can be exploited in several cases to avoid computation when only the first few
//! elements are required.
//!
//! #Examples
//!
//! This example groups a stream of `(key,val)` pairs by `key`, and yields only the most frequently
//! occurring value for each key.
//!
//! ```ignore
//! stream.group(|key, vals, output| {
//! let (mut max_val, mut max_wgt) = vals.peek().unwrap();
//! for (val, wgt) in vals {
//! if wgt > max_wgt {
//! max_wgt = wgt;
//! max_val = val;
//! }
//! }
//! output.push((max_val.clone(), max_wgt));
//! })
//! ```
use std::rc::Rc;
use std::default::Default;
use std::hash::Hasher;
use std::ops::DerefMut;
use itertools::Itertools;
use ::{Collection, Data};
use timely::dataflow::*;
use timely::dataflow::operators::{Map, Binary};
use timely::dataflow::channels::pact::Exchange;
use timely_sort::{LSBRadixSorter, Unsigned};
use collection::{LeastUpperBound, Lookup, Trace, Offset};
use collection::trace::{CollectionIterator, DifferenceIterator, Traceable};
use iterators::coalesce::Coalesce;
use collection::compact::Compact;
/// Extension trait for the `group_by` and `group_by_u` differential dataflow methods.
pub trait CoGroupBy<G: Scope, K: Data, V1: Data> where G::Timestamp: LeastUpperBound {
/// A primitive binary version of `group_by`, which acts on a `Collection<G, (K, V1)>` and a `Collection<G, (K, V2)>`.
///
/// The two streams must already be key-value pairs, which is too bad. Also, in addition to the
/// normal arguments (another stream, a hash for the key, a reduction function, and per-key logic),
/// the user must specify a function implmenting `Fn(u64) -> Look`, where `Look: Lookup<K, Offset>` is something you shouldn't have to know about yet.
/// The right thing to use here, for the moment, is `|_| HashMap::new()`.
///
/// There are better options if you know your key is an unsigned integer, namely `|x| (Vec::new(), x)`.
fn cogroup_by_inner<
D: Data,
V2: Data+Default,
V3: Data+Default,
U: Unsigned+Default,
KH: Fn(&K)->U+'static,
Look: Lookup<K, Offset>+'static,
LookG: Fn(u64)->Look,
Logic: Fn(&K, &mut CollectionIterator<DifferenceIterator<V1>>, &mut CollectionIterator<DifferenceIterator<V2>>, &mut Vec<(V3, i32)>)+'static,
Reduc: Fn(&K, &V3)->D+'static,
>
(&self, other: &Collection<G, (K, V2)>, key_h: KH, reduc: Reduc, look: LookG, logic: Logic) -> Collection<G, D>;
}
impl<G: Scope, K: Data, V1: Data> CoGroupBy<G, K, V1> for Collection<G, (K, V1)>
where G::Timestamp: LeastUpperBound {
fn | <
D: Data,
V2: Data+Default,
V3: Data+Default,
U: Unsigned+Default,
KH: Fn(&K)->U+'static,
Look: Lookup<K, Offset>+'static,
LookG: Fn(u64)->Look,
Logic: Fn(&K, &mut CollectionIterator<V1>, &mut CollectionIterator<V2>, &mut Vec<(V3, i32)>)+'static,
Reduc: Fn(&K, &V3)->D+'static,
>
(&self, other: &Collection<G, (K, V2)>, key_h: KH, reduc: Reduc, look: LookG, logic: Logic) -> Collection<G, D> {
let mut source1 = Trace::new(look(0));
let mut source2 = Trace::new(look(0));
let mut result = Trace::new(look(0));
// A map from times to received (key, val, wgt) triples.
let mut inputs1 = Vec::new();
let mut inputs2 = Vec::new();
// A map from times to a list of keys that need processing at that time.
let mut to_do = Vec::new();
// temporary storage for operator implementations to populate
let mut buffer = vec![];
let key_h = Rc::new(key_h);
let key_1 = key_h.clone();
let key_2 = key_h.clone();
// create an exchange channel based on the supplied Fn(&D1)->u64.
let exch1 = Exchange::new(move |&((ref k, _),_)| key_1(k).as_u64());
let exch2 = Exchange::new(move |&((ref k, _),_)| key_2(k).as_u64());
let mut sorter1 = LSBRadixSorter::new();
let mut sorter2 = LSBRadixSorter::new();
// fabricate a data-parallel operator using the `unary_notify` pattern.
Collection::new(self.inner.binary_notify(&other.inner, exch1, exch2, "CoGroupBy", vec![], move |input1, input2, output, notificator| {
// 1. read each input, and stash it in our staging area
while let Some((time, data)) = input1.next() {
inputs1.entry_or_insert(time.time(), || Vec::new())
.push(::std::mem::replace(data.deref_mut(), Vec::new()));
notificator.notify_at(time);
}
// 1. read each input, and stash it in our staging area
while let Some((time, data)) = input2.next() {
inputs2.entry_or_insert(time.time(), || Vec::new())
.push(::std::mem::replace(data.deref_mut(), Vec::new()));
notificator.notify_at(time);
}
// 2. go through each time of interest that has reached completion
// times are interesting either because we received data, or because we conclude
// in the processing of a time that a future time will be interesting.
while let Some((index, _count)) = notificator.next() {
let mut stash = Vec::new();
panic!("interesting times needs to do LUB of union of times for each key, input");
// 2a. fetch any data associated with this time.
if let Some(mut queue) = inputs1.remove_key(&index) {
// sort things; radix if many, .sort_by if few.
let compact = if queue.len() > 1 {
for element in queue.into_iter() {
sorter1.extend(element.into_iter(), &|x| key_h(&(x.0).0));
}
let mut sorted = sorter1.finish(&|x| key_h(&(x.0).0));
let result = Compact::from_radix(&mut sorted, &|k| key_h(k));
sorted.truncate(256);
sorter1.recycle(sorted);
result
}
else {
let mut vec = queue.pop().unwrap();
let mut vec = vec.drain(..).collect::<Vec<_>>();
vec.sort_by(|x,y| key_h(&(x.0).0).cmp(&key_h((&(y.0).0))));
Compact::from_radix(&mut vec![vec], &|k| key_h(k))
};
if let Some(compact) = compact {
for key in &compact.keys {
stash.push(index.clone());
source1.interesting_times(key, &index, &mut stash);
for time in &stash {
let mut queue = to_do.entry_or_insert((*time).clone(), || { notificator.notify_at(index.delayed(time)); Vec::new() });
queue.push((*key).clone());
}
stash.clear();
}
source1.set_difference(index.time(), compact);
}
}
// 2a. fetch any data associated with this time.
if let Some(mut queue) = inputs2.remove_key(&index) {
// sort things; radix if many, .sort_by if few.
let compact = if queue.len() > 1 {
for element in queue.into_iter() {
sorter2.extend(element.into_iter(), &|x| key_h(&(x.0).0));
}
let mut sorted = sorter2.finish(&|x| key_h(&(x.0).0));
let result = Compact::from_radix(&mut sorted, &|k| key_h(k));
sorted.truncate(256);
sorter2.recycle(sorted);
result
}
else {
let mut vec = queue.pop().unwrap();
let mut vec = vec.drain(..).collect::<Vec<_>>();
vec.sort_by(|x,y| key_h(&(x.0).0).cmp(&key_h((&(y.0).0))));
Compact::from_radix(&mut vec![vec], &|k| key_h(k))
};
if let Some(compact) = compact {
for key in &compact.keys {
stash.push(index.clone());
source2.interesting_times(key, &index, &mut stash);
for time in &stash {
let mut queue = to_do.entry_or_insert((*time).clone(), || { notificator.notify_at(index.delayed(time)); Vec::new() });
queue.push((*key).clone());
}
stash.clear();
}
source2.set_difference(index.time(), compact);
}
}
// we may need to produce output at index
let mut session = output.session(&index);
// 2b. We must now determine for each interesting key at this time, how does the
// currently reported output match up with what we need as output. Should we send
// more output differences, and what are they?
// Much of this logic used to hide in `OperatorTrace` and `CollectionTrace`.
// They are now gone and simpler, respectively.
if let Some(mut keys) = to_do.remove_key(&index) {
// we would like these keys in a particular order.
// TODO : use a radix sort since we have `key_h`.
keys.sort_by(|x,y| (key_h(&x), x).cmp(&(key_h(&y), y)));
keys.dedup();
// accumulations for installation into result
let mut accumulation = Compact::new(0,0);
for key in keys {
// acquire an iterator over the collection at `time`.
let mut input1 = source1.get_collection(&key, &index);
let mut input2 = source2.get_collection(&key, &index);
// if we have some data, invoke logic to populate self.dst
if input1.peek().is_some() || input2.peek().is_some() { logic(&key, &mut input1, &mut input2, &mut buffer); }
buffer.sort_by(|x,y| x.0.cmp(&y.0));
// push differences in to Compact.
let mut compact = accumulation.session();
for (val, wgt) in Coalesce::coalesce(result.get_collection(&key, &index)
.map(|(v, w)| (v,-w))
.merge_by(buffer.iter().map(|&(ref v, w)| (v, w)), |x,y| {
x.0 <= y.0
}))
{
session.give((reduc(&key, val), wgt));
compact.push(val.clone(), wgt);
}
compact.done(key);
buffer.clear();
}
if accumulation.vals.len() > 0 {
// println!("group2");
result.set_difference(index.time(), accumulation);
}
}
}
}))
}
}
| cogroup_by_inner | identifier_name |
cogroup.rs | //! Group records by a key, and apply a reduction function.
//!
//! The `group` operators act on data that can be viewed as pairs `(key, val)`. They group records
//! with the same key, and apply user supplied functions to the key and a list of values, which are
//! expected to populate a list of output values.
//!
//! Several variants of `group` exist which allow more precise control over how grouping is done.
//! For example, the `_by` suffixed variants take arbitrary data, but require a key-value selector
//! to be applied to each record. The `_u` suffixed variants use unsigned integers as keys, and
//! will use a dense array rather than a `HashMap` to store their keys.
//!
//! The list of values are presented as an iterator which internally merges sorted lists of values.
//! This ordering can be exploited in several cases to avoid computation when only the first few
//! elements are required.
//!
//! #Examples
//!
//! This example groups a stream of `(key,val)` pairs by `key`, and yields only the most frequently
//! occurring value for each key.
//!
//! ```ignore
//! stream.group(|key, vals, output| {
//! let (mut max_val, mut max_wgt) = vals.peek().unwrap();
//! for (val, wgt) in vals {
//! if wgt > max_wgt {
//! max_wgt = wgt;
//! max_val = val;
//! }
//! }
//! output.push((max_val.clone(), max_wgt));
//! })
//! ```
use std::rc::Rc;
use std::default::Default;
use std::hash::Hasher;
use std::ops::DerefMut;
use itertools::Itertools;
use ::{Collection, Data};
use timely::dataflow::*;
use timely::dataflow::operators::{Map, Binary};
use timely::dataflow::channels::pact::Exchange;
use timely_sort::{LSBRadixSorter, Unsigned};
use collection::{LeastUpperBound, Lookup, Trace, Offset};
use collection::trace::{CollectionIterator, DifferenceIterator, Traceable};
use iterators::coalesce::Coalesce;
use collection::compact::Compact;
/// Extension trait for the `group_by` and `group_by_u` differential dataflow methods.
pub trait CoGroupBy<G: Scope, K: Data, V1: Data> where G::Timestamp: LeastUpperBound {
/// A primitive binary version of `group_by`, which acts on a `Collection<G, (K, V1)>` and a `Collection<G, (K, V2)>`.
///
/// The two streams must already be key-value pairs, which is too bad. Also, in addition to the
/// normal arguments (another stream, a hash for the key, a reduction function, and per-key logic),
/// the user must specify a function implmenting `Fn(u64) -> Look`, where `Look: Lookup<K, Offset>` is something you shouldn't have to know about yet.
/// The right thing to use here, for the moment, is `|_| HashMap::new()`.
///
/// There are better options if you know your key is an unsigned integer, namely `|x| (Vec::new(), x)`.
fn cogroup_by_inner<
D: Data,
V2: Data+Default,
V3: Data+Default,
U: Unsigned+Default,
KH: Fn(&K)->U+'static,
Look: Lookup<K, Offset>+'static,
LookG: Fn(u64)->Look,
Logic: Fn(&K, &mut CollectionIterator<DifferenceIterator<V1>>, &mut CollectionIterator<DifferenceIterator<V2>>, &mut Vec<(V3, i32)>)+'static,
Reduc: Fn(&K, &V3)->D+'static,
>
(&self, other: &Collection<G, (K, V2)>, key_h: KH, reduc: Reduc, look: LookG, logic: Logic) -> Collection<G, D>;
}
impl<G: Scope, K: Data, V1: Data> CoGroupBy<G, K, V1> for Collection<G, (K, V1)>
where G::Timestamp: LeastUpperBound {
fn cogroup_by_inner<
D: Data,
V2: Data+Default,
V3: Data+Default,
U: Unsigned+Default,
KH: Fn(&K)->U+'static,
Look: Lookup<K, Offset>+'static,
LookG: Fn(u64)->Look,
Logic: Fn(&K, &mut CollectionIterator<V1>, &mut CollectionIterator<V2>, &mut Vec<(V3, i32)>)+'static,
Reduc: Fn(&K, &V3)->D+'static,
>
(&self, other: &Collection<G, (K, V2)>, key_h: KH, reduc: Reduc, look: LookG, logic: Logic) -> Collection<G, D> |
}
| {
let mut source1 = Trace::new(look(0));
let mut source2 = Trace::new(look(0));
let mut result = Trace::new(look(0));
// A map from times to received (key, val, wgt) triples.
let mut inputs1 = Vec::new();
let mut inputs2 = Vec::new();
// A map from times to a list of keys that need processing at that time.
let mut to_do = Vec::new();
// temporary storage for operator implementations to populate
let mut buffer = vec![];
let key_h = Rc::new(key_h);
let key_1 = key_h.clone();
let key_2 = key_h.clone();
// create an exchange channel based on the supplied Fn(&D1)->u64.
let exch1 = Exchange::new(move |&((ref k, _),_)| key_1(k).as_u64());
let exch2 = Exchange::new(move |&((ref k, _),_)| key_2(k).as_u64());
let mut sorter1 = LSBRadixSorter::new();
let mut sorter2 = LSBRadixSorter::new();
// fabricate a data-parallel operator using the `unary_notify` pattern.
Collection::new(self.inner.binary_notify(&other.inner, exch1, exch2, "CoGroupBy", vec![], move |input1, input2, output, notificator| {
// 1. read each input, and stash it in our staging area
while let Some((time, data)) = input1.next() {
inputs1.entry_or_insert(time.time(), || Vec::new())
.push(::std::mem::replace(data.deref_mut(), Vec::new()));
notificator.notify_at(time);
}
// 1. read each input, and stash it in our staging area
while let Some((time, data)) = input2.next() {
inputs2.entry_or_insert(time.time(), || Vec::new())
.push(::std::mem::replace(data.deref_mut(), Vec::new()));
notificator.notify_at(time);
}
// 2. go through each time of interest that has reached completion
// times are interesting either because we received data, or because we conclude
// in the processing of a time that a future time will be interesting.
while let Some((index, _count)) = notificator.next() {
let mut stash = Vec::new();
panic!("interesting times needs to do LUB of union of times for each key, input");
// 2a. fetch any data associated with this time.
if let Some(mut queue) = inputs1.remove_key(&index) {
// sort things; radix if many, .sort_by if few.
let compact = if queue.len() > 1 {
for element in queue.into_iter() {
sorter1.extend(element.into_iter(), &|x| key_h(&(x.0).0));
}
let mut sorted = sorter1.finish(&|x| key_h(&(x.0).0));
let result = Compact::from_radix(&mut sorted, &|k| key_h(k));
sorted.truncate(256);
sorter1.recycle(sorted);
result
}
else {
let mut vec = queue.pop().unwrap();
let mut vec = vec.drain(..).collect::<Vec<_>>();
vec.sort_by(|x,y| key_h(&(x.0).0).cmp(&key_h((&(y.0).0))));
Compact::from_radix(&mut vec![vec], &|k| key_h(k))
};
if let Some(compact) = compact {
for key in &compact.keys {
stash.push(index.clone());
source1.interesting_times(key, &index, &mut stash);
for time in &stash {
let mut queue = to_do.entry_or_insert((*time).clone(), || { notificator.notify_at(index.delayed(time)); Vec::new() });
queue.push((*key).clone());
}
stash.clear();
}
source1.set_difference(index.time(), compact);
}
}
// 2a. fetch any data associated with this time.
if let Some(mut queue) = inputs2.remove_key(&index) {
// sort things; radix if many, .sort_by if few.
let compact = if queue.len() > 1 {
for element in queue.into_iter() {
sorter2.extend(element.into_iter(), &|x| key_h(&(x.0).0));
}
let mut sorted = sorter2.finish(&|x| key_h(&(x.0).0));
let result = Compact::from_radix(&mut sorted, &|k| key_h(k));
sorted.truncate(256);
sorter2.recycle(sorted);
result
}
else {
let mut vec = queue.pop().unwrap();
let mut vec = vec.drain(..).collect::<Vec<_>>();
vec.sort_by(|x,y| key_h(&(x.0).0).cmp(&key_h((&(y.0).0))));
Compact::from_radix(&mut vec![vec], &|k| key_h(k))
};
if let Some(compact) = compact {
for key in &compact.keys {
stash.push(index.clone());
source2.interesting_times(key, &index, &mut stash);
for time in &stash {
let mut queue = to_do.entry_or_insert((*time).clone(), || { notificator.notify_at(index.delayed(time)); Vec::new() });
queue.push((*key).clone());
}
stash.clear();
}
source2.set_difference(index.time(), compact);
}
}
// we may need to produce output at index
let mut session = output.session(&index);
// 2b. We must now determine for each interesting key at this time, how does the
// currently reported output match up with what we need as output. Should we send
// more output differences, and what are they?
// Much of this logic used to hide in `OperatorTrace` and `CollectionTrace`.
// They are now gone and simpler, respectively.
if let Some(mut keys) = to_do.remove_key(&index) {
// we would like these keys in a particular order.
// TODO : use a radix sort since we have `key_h`.
keys.sort_by(|x,y| (key_h(&x), x).cmp(&(key_h(&y), y)));
keys.dedup();
// accumulations for installation into result
let mut accumulation = Compact::new(0,0);
for key in keys {
// acquire an iterator over the collection at `time`.
let mut input1 = source1.get_collection(&key, &index);
let mut input2 = source2.get_collection(&key, &index);
// if we have some data, invoke logic to populate self.dst
if input1.peek().is_some() || input2.peek().is_some() { logic(&key, &mut input1, &mut input2, &mut buffer); }
buffer.sort_by(|x,y| x.0.cmp(&y.0));
// push differences in to Compact.
let mut compact = accumulation.session();
for (val, wgt) in Coalesce::coalesce(result.get_collection(&key, &index)
.map(|(v, w)| (v,-w))
.merge_by(buffer.iter().map(|&(ref v, w)| (v, w)), |x,y| {
x.0 <= y.0
}))
{
session.give((reduc(&key, val), wgt));
compact.push(val.clone(), wgt);
}
compact.done(key);
buffer.clear();
}
if accumulation.vals.len() > 0 {
// println!("group2");
result.set_difference(index.time(), accumulation);
}
}
}
}))
} | identifier_body |
create-cp-input.py | #!/usr/bin/env python3
###################
#
# Creates input to cp-ansible (ksql, connect) & julieops (topic provision)
#
# Author: Venky Narayanan ([email protected])
# Date: May 26, 2021
#
###################
from __future__ import print_function
from datetime import datetime
import argparse
from jinja2 import Template
import yaml
import json
import logging
import requests
import os
CONST_TIMESTAMP = 'timestamp'
CONST_NAME = 'name'
CONST_PARTITIONS = 'partitions'
CONST_REPLICATION = 'replication'
CONST_OVERRIDE = 'override'
CONST_DEPENDENCIES = 'dependencies'
CONST_KSQL = 'ksql'
CONST_CONNECT = 'connect'
CONST_TOPIC = 'topic'
CONST_TOPICS = 'topics'
CONST_BROKER = 'broker'
CONST_PROVISION = 'provision'
CONST_CONNECTORS = 'connectors'
CONST_DESCRIPTION = 'description'
CONST_QUERIES = 'queries'
CONST_HOSTS = 'hosts'
CONST_PLUGINS = 'plugins'
CONST_PLUGINS_HUB = 'hub'
CONST_PLUGINS_LOCAL = 'local'
CONST_PLUGINS_REMOTE = 'remote'
CONST_CLUSTERDATA = 'cluster_data'
CONST_SSH_USER = 'ssh_username'
CONST_SSH_KEY = 'ssh_key'
CONST_KSQL_DEST_DIR = '/var/lib/kafka/ksql'
CONST_SOURCE_PATH = 'source_path'
CONST_DEST_PATH = 'destination_path'
CONST_BOOTSTRAP_SERVERS = 'bootstrap_servers'
CONST_API_KEY = 'api_key'
CONST_API_SECRET = 'api_secret'
CONST_ADMIN = 'admin'
CONST_CONSUMER = 'consumer'
CONST_PRODUCER = 'producer'
CONST_SR = 'schema_registry'
CONST_KSQL = 'ksql'
CONST_URL = 'url'
CONST_CREDENTIALS = 'credentials'
CONST_ENV = 'env'
CONST_CONTEXT = 'context'
CONST_COMPANY = 'company'
CONST_PROJECTS = 'projects'
CONST_SOURCE = 'source'
inputs_map = {CONST_TIMESTAMP: '', CONST_CONTEXT: 'test_context', CONST_COMPANY: 'test_company', CONST_ENV: 'test_env', CONST_SOURCE: 'test_source', CONST_PROJECTS: [], CONST_BOOTSTRAP_SERVERS: '', CONST_CONNECT: [], CONST_CONSUMER: [], CONST_PRODUCER: [], CONST_CLUSTERDATA: {CONST_SSH_USER: 'TODO', CONST_SSH_KEY: 'TODO'}, CONST_KSQL + '_' + CONST_QUERIES: [], CONST_KSQL + '_' + CONST_HOSTS: [], CONST_CONNECT + '_' + CONST_HOSTS: [], CONST_CONNECT + '_' + CONST_CONNECTORS: [], CONST_CONNECT + '_' + CONST_PLUGINS: []}
def create_template(temp_file):
with open(temp_file) as f:
temp = f.read()
f.close()
return Template(temp)
def render_template (input_map, input_template, output_file):
with open(output_file, "w+") as f:
print (input_template.render(input_map), file=f)
f.close()
# Identify topic name, replication factor, and partitions by topic
def process_topic_item (feid, project, topic_item, override_part, override_repl):
name = topic_item[CONST_NAME]
fqname = name
if CONST_PARTITIONS in topic_item:
use_part = topic_item[CONST_PARTITIONS]
elif override_part != 0:
use_part = override_part
else:
use_part = 1
if CONST_REPLICATION in topic_item:
use_repl = topic_item[CONST_REPLICATION]
elif override_repl != 0:
use_repl = override_repl
else:
use_repl = 1
topic = {}
topic [CONST_NAME] = fqname
topic [CONST_REPLICATION] = use_repl
topic [CONST_PARTITIONS] = use_part
return topic
# Create Julieops descriptor file
def process_broker (feid, doc):
logging.debug ('-------')
if CONST_TOPIC in doc and CONST_OVERRIDE in doc[CONST_TOPIC]:
override = doc[CONST_TOPIC][CONST_OVERRIDE]
if CONST_PARTITIONS in override:
override_part = override[CONST_PARTITIONS]
if CONST_REPLICATION in override:
override_repl = override[CONST_REPLICATION]
logging.info ('partition = ' + str(override_part) + ', replication = ' + str(override_repl))
if CONST_DEPENDENCIES not in doc[CONST_TOPIC]:
logging.info ('No dependency topics')
for dependency in doc[CONST_TOPIC][CONST_DEPENDENCIES]:
process_topic_item (feid, feid, dependency, override_part, override_repl)
if CONST_TOPICS not in doc[CONST_TOPIC]:
logging.info ('No topics to provision')
return
topics = []
lists = doc[CONST_TOPIC][CONST_TOPICS]
for item in lists:
topic = process_topic_item (feid, feid, item, override_part, override_repl)
topics.append(topic)
logging.debug(topic)
projects = []
project = {}
project[CONST_NAME] = feid
project[CONST_TOPICS] = topics
projects.append(project)
inputs_map[CONST_PROJECTS] = projects
def provision_ksql_query (feid, doc):
ksql_files = []
for query_file in doc:
ksql_file = {}
ksql_file[CONST_SOURCE_PATH] = query_file
ksql_file[CONST_DEST_PATH] = CONST_KSQL_DEST_DIR + os.path.sep + os.path.basename(query_file)
ksql_files.append(ksql_file)
inputs_map[CONST_KSQL + '_' + CONST_QUERIES] = ksql_files
def provision_ksql_hosts (feid, doc):
hosts = []
for host in doc:
logging.info ('ksql host is ' + host)
hosts.append(host)
inputs_map[CONST_KSQL + '_' + CONST_HOSTS] = hosts
# Create cp-ansible yaml with ksql section
def process_ksql (feid, doc):
logging.debug ('-------')
if CONST_PROVISION in doc and doc[CONST_PROVISION] == True:
provision_ksql_hosts (feid, doc[CONST_HOSTS])
provision_ksql_query (feid, doc[CONST_QUERIES])
logging.debug ('-------')
def provision_connect_plugins (feid, doc, plugin_type):
plugins = []
for plugin in doc:
logging.info ('Connect Plugin ' + plugin_type + ' is ' + plugin)
plugins.append(plugin)
inputs_map[CONST_CONNECT + '_' + CONST_PLUGINS + "_" + plugin_type] = plugins
def provision_connect_connectors (feid, doc):
connectors = []
connectors_json = []
for connector in doc:
connectors.append(connector)
f = open(connector, 'r')
data = json.load(f)
f.close()
name = data['name']
config = data['config']
config2 = []
for item in config:
config2.append(item + " : " + str(config[item]))
data2 = {'name': name, 'config': config2}
connectors_json.append(data2)
inputs_map[CONST_CONNECT + '_' + CONST_CONNECTORS] = connectors
inputs_map[CONST_CONNECT + '_' + CONST_CONNECTORS + '_' + 'json'] = connectors_json
def provision_connect_hosts (feid, doc):
hosts = []
for host in doc:
logging.info ('Connect host is ' + host)
hosts.append(host)
inputs_map[CONST_CONNECT + '_' + CONST_HOSTS] = hosts
# Create cp-ansible yaml with connect section
def process_connect (feid, doc):
logging.debug ('-------')
if CONST_PROVISION in doc and doc[CONST_PROVISION] == True:
provision_connect_hosts (feid, doc[CONST_HOSTS])
provision_connect_plugins (feid, doc[CONST_PLUGINS][CONST_PLUGINS_HUB], CONST_PLUGINS_HUB)
provision_connect_plugins (feid, doc[CONST_PLUGINS][CONST_PLUGINS_LOCAL], CONST_PLUGINS_LOCAL)
provision_connect_plugins (feid, doc[CONST_PLUGINS][CONST_PLUGINS_REMOTE], CONST_PLUGINS_REMOTE)
provision_connect_connectors (feid, doc[CONST_CONNECTORS])
logging.debug ('-------')
def process (doc, args):
inputs_map[CONST_TIMESTAMP] = datetime.now()
fe_id = doc[CONST_NAME]
inputs_map[CONST_NAME] = fe_id
output_ansible = fe_id + ".ansible.yaml"
output_julie = fe_id + ".julieops.yaml"
output_cluster = fe_id + ".cluster.properties"
template_ansible = create_template (args.ansibletemplate)
template_julie = create_template (args.julietemplate)
template_cluster = create_template (args.brokertemplate)
logging.info("Feature name is " + fe_id)
logging.info("Ansible YAML is " + output_ansible + ", Template is " + args.ansibletemplate)
logging.info("Julieops YAML is " + output_julie + ", Template is " + args.julietemplate)
process_broker (doc[CONST_NAME], doc[CONST_BROKER])
process_ksql (doc[CONST_NAME], doc[CONST_KSQL])
process_connect (doc[CONST_NAME], doc[CONST_CONNECT])
render_template (inputs_map, template_ansible, output_ansible)
render_template (inputs_map, template_julie, output_julie)
render_template (inputs_map, template_cluster, output_cluster)
def | (docs, config_type, override_apikey, override_apisecret):
newdocs = {}
if config_type in docs and CONST_API_KEY in docs[config_type]:
newdocs[CONST_API_KEY] = docs[config_type][CONST_API_KEY]
newdocs[CONST_API_SECRET] = docs[config_type][CONST_API_SECRET]
else:
newdocs[CONST_API_KEY] = override_apikey
newdocs[CONST_API_SECRET] = override_apisecret
return newdocs
def process_ccloud_config (docs):
override_apikey = ""
override_apisecret = ""
if CONST_OVERRIDE in docs[CONST_CREDENTIALS]:
override = docs[CONST_CREDENTIALS][CONST_OVERRIDE]
if CONST_API_KEY in override:
override_apikey = override[CONST_API_KEY]
if CONST_API_SECRET in override:
override_apisecret = override[CONST_API_SECRET]
logging.debug ('REMOVE THIS api key = ' + str(override_apikey) + ', secret = ' + str(override_apisecret))
inputs_map[CONST_BOOTSTRAP_SERVERS] = docs[CONST_BOOTSTRAP_SERVERS]
inputs_map[CONST_ADMIN] = get_api_config (docs[CONST_CREDENTIALS], CONST_ADMIN, override_apikey, override_apisecret)
inputs_map[CONST_CONNECT] = get_api_config (docs[CONST_CREDENTIALS], CONST_CONNECT, override_apikey, override_apisecret)
inputs_map[CONST_CONSUMER] = get_api_config (docs[CONST_CREDENTIALS], CONST_CONSUMER, override_apikey, override_apisecret)
inputs_map[CONST_PRODUCER] = get_api_config (docs[CONST_CREDENTIALS], CONST_PRODUCER, override_apikey, override_apisecret)
inputs_map[CONST_KSQL] = get_api_config (docs[CONST_CREDENTIALS], CONST_KSQL, override_apikey, override_apisecret)
inputs_map[CONST_SR] = get_api_config (docs[CONST_CREDENTIALS], CONST_SR, override_apikey, override_apisecret)
inputs_map[CONST_SR][CONST_URL] = docs[CONST_SR][CONST_URL]
def do_process(args):
ccloud_config_file = args.commandconfig
with open(ccloud_config_file) as f:
ccloud_config_docs = yaml.load(f, Loader=yaml.FullLoader)
logging.debug ('-------')
logging.debug(ccloud_config_docs)
logging.debug ('-------')
process_ccloud_config (ccloud_config_docs)
feconfig_file = args.feconfig
with open(feconfig_file) as f:
doc = yaml.load(f, Loader=yaml.FullLoader)
logging.debug ('-------')
logging.debug(doc)
logging.debug ('-------')
process(doc, args)
logging.debug ('-------')
logging.debug (inputs_map)
logging.debug ('-------')
f.close()
def parse_arguments():
parser = argparse.ArgumentParser(
prog='create-cp-input.py', usage='%(prog)s [options]',
description="Reads the feature environment YAML config file; converts it into julieops and cp-ansible inventory"
)
# parser.add_argument("-h", "--help", help="Prints help")
parser.add_argument("-f", "--feconfig", help="Feature environment config YAML input file (default = input.yaml)", default="./input.yaml")
parser.add_argument("-a", "--ansibletemplate", help="Inventory template (default = cpansible.j2)", default="./cpansible.j2")
parser.add_argument("-j", "--julietemplate", help="Inventory template (default = julie.j2)", default="./julie.j2")
parser.add_argument("-b", "--brokertemplate", help="Broker Config template (default = julie-cluster.j2)", default="./julie-cluster.j2")
parser.add_argument("-c", "--commandconfig", help="Command Config (default = ccloud.yaml)", default="./ccloud.yaml")
return parser.parse_args()
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s | %(levelname)s | %(filename)s | %(funcName)s | %(lineno)d | %(message)s', level=logging.INFO)
logging.info("Started ...")
args = parse_arguments()
do_process (args)
logging.info("Completed ...") | get_api_config | identifier_name |
create-cp-input.py | #!/usr/bin/env python3
###################
#
# Creates input to cp-ansible (ksql, connect) & julieops (topic provision)
#
# Author: Venky Narayanan ([email protected])
# Date: May 26, 2021
#
###################
from __future__ import print_function
from datetime import datetime
import argparse | import os
CONST_TIMESTAMP = 'timestamp'
CONST_NAME = 'name'
CONST_PARTITIONS = 'partitions'
CONST_REPLICATION = 'replication'
CONST_OVERRIDE = 'override'
CONST_DEPENDENCIES = 'dependencies'
CONST_KSQL = 'ksql'
CONST_CONNECT = 'connect'
CONST_TOPIC = 'topic'
CONST_TOPICS = 'topics'
CONST_BROKER = 'broker'
CONST_PROVISION = 'provision'
CONST_CONNECTORS = 'connectors'
CONST_DESCRIPTION = 'description'
CONST_QUERIES = 'queries'
CONST_HOSTS = 'hosts'
CONST_PLUGINS = 'plugins'
CONST_PLUGINS_HUB = 'hub'
CONST_PLUGINS_LOCAL = 'local'
CONST_PLUGINS_REMOTE = 'remote'
CONST_CLUSTERDATA = 'cluster_data'
CONST_SSH_USER = 'ssh_username'
CONST_SSH_KEY = 'ssh_key'
CONST_KSQL_DEST_DIR = '/var/lib/kafka/ksql'
CONST_SOURCE_PATH = 'source_path'
CONST_DEST_PATH = 'destination_path'
CONST_BOOTSTRAP_SERVERS = 'bootstrap_servers'
CONST_API_KEY = 'api_key'
CONST_API_SECRET = 'api_secret'
CONST_ADMIN = 'admin'
CONST_CONSUMER = 'consumer'
CONST_PRODUCER = 'producer'
CONST_SR = 'schema_registry'
CONST_KSQL = 'ksql'
CONST_URL = 'url'
CONST_CREDENTIALS = 'credentials'
CONST_ENV = 'env'
CONST_CONTEXT = 'context'
CONST_COMPANY = 'company'
CONST_PROJECTS = 'projects'
CONST_SOURCE = 'source'
inputs_map = {CONST_TIMESTAMP: '', CONST_CONTEXT: 'test_context', CONST_COMPANY: 'test_company', CONST_ENV: 'test_env', CONST_SOURCE: 'test_source', CONST_PROJECTS: [], CONST_BOOTSTRAP_SERVERS: '', CONST_CONNECT: [], CONST_CONSUMER: [], CONST_PRODUCER: [], CONST_CLUSTERDATA: {CONST_SSH_USER: 'TODO', CONST_SSH_KEY: 'TODO'}, CONST_KSQL + '_' + CONST_QUERIES: [], CONST_KSQL + '_' + CONST_HOSTS: [], CONST_CONNECT + '_' + CONST_HOSTS: [], CONST_CONNECT + '_' + CONST_CONNECTORS: [], CONST_CONNECT + '_' + CONST_PLUGINS: []}
def create_template(temp_file):
with open(temp_file) as f:
temp = f.read()
f.close()
return Template(temp)
def render_template (input_map, input_template, output_file):
with open(output_file, "w+") as f:
print (input_template.render(input_map), file=f)
f.close()
# Identify topic name, replication factor, and partitions by topic
def process_topic_item (feid, project, topic_item, override_part, override_repl):
name = topic_item[CONST_NAME]
fqname = name
if CONST_PARTITIONS in topic_item:
use_part = topic_item[CONST_PARTITIONS]
elif override_part != 0:
use_part = override_part
else:
use_part = 1
if CONST_REPLICATION in topic_item:
use_repl = topic_item[CONST_REPLICATION]
elif override_repl != 0:
use_repl = override_repl
else:
use_repl = 1
topic = {}
topic [CONST_NAME] = fqname
topic [CONST_REPLICATION] = use_repl
topic [CONST_PARTITIONS] = use_part
return topic
# Create Julieops descriptor file
def process_broker (feid, doc):
logging.debug ('-------')
if CONST_TOPIC in doc and CONST_OVERRIDE in doc[CONST_TOPIC]:
override = doc[CONST_TOPIC][CONST_OVERRIDE]
if CONST_PARTITIONS in override:
override_part = override[CONST_PARTITIONS]
if CONST_REPLICATION in override:
override_repl = override[CONST_REPLICATION]
logging.info ('partition = ' + str(override_part) + ', replication = ' + str(override_repl))
if CONST_DEPENDENCIES not in doc[CONST_TOPIC]:
logging.info ('No dependency topics')
for dependency in doc[CONST_TOPIC][CONST_DEPENDENCIES]:
process_topic_item (feid, feid, dependency, override_part, override_repl)
if CONST_TOPICS not in doc[CONST_TOPIC]:
logging.info ('No topics to provision')
return
topics = []
lists = doc[CONST_TOPIC][CONST_TOPICS]
for item in lists:
topic = process_topic_item (feid, feid, item, override_part, override_repl)
topics.append(topic)
logging.debug(topic)
projects = []
project = {}
project[CONST_NAME] = feid
project[CONST_TOPICS] = topics
projects.append(project)
inputs_map[CONST_PROJECTS] = projects
def provision_ksql_query (feid, doc):
ksql_files = []
for query_file in doc:
ksql_file = {}
ksql_file[CONST_SOURCE_PATH] = query_file
ksql_file[CONST_DEST_PATH] = CONST_KSQL_DEST_DIR + os.path.sep + os.path.basename(query_file)
ksql_files.append(ksql_file)
inputs_map[CONST_KSQL + '_' + CONST_QUERIES] = ksql_files
def provision_ksql_hosts (feid, doc):
hosts = []
for host in doc:
logging.info ('ksql host is ' + host)
hosts.append(host)
inputs_map[CONST_KSQL + '_' + CONST_HOSTS] = hosts
# Create cp-ansible yaml with ksql section
def process_ksql (feid, doc):
logging.debug ('-------')
if CONST_PROVISION in doc and doc[CONST_PROVISION] == True:
provision_ksql_hosts (feid, doc[CONST_HOSTS])
provision_ksql_query (feid, doc[CONST_QUERIES])
logging.debug ('-------')
def provision_connect_plugins (feid, doc, plugin_type):
plugins = []
for plugin in doc:
logging.info ('Connect Plugin ' + plugin_type + ' is ' + plugin)
plugins.append(plugin)
inputs_map[CONST_CONNECT + '_' + CONST_PLUGINS + "_" + plugin_type] = plugins
def provision_connect_connectors (feid, doc):
connectors = []
connectors_json = []
for connector in doc:
connectors.append(connector)
f = open(connector, 'r')
data = json.load(f)
f.close()
name = data['name']
config = data['config']
config2 = []
for item in config:
config2.append(item + " : " + str(config[item]))
data2 = {'name': name, 'config': config2}
connectors_json.append(data2)
inputs_map[CONST_CONNECT + '_' + CONST_CONNECTORS] = connectors
inputs_map[CONST_CONNECT + '_' + CONST_CONNECTORS + '_' + 'json'] = connectors_json
def provision_connect_hosts (feid, doc):
hosts = []
for host in doc:
logging.info ('Connect host is ' + host)
hosts.append(host)
inputs_map[CONST_CONNECT + '_' + CONST_HOSTS] = hosts
# Create cp-ansible yaml with connect section
def process_connect (feid, doc):
logging.debug ('-------')
if CONST_PROVISION in doc and doc[CONST_PROVISION] == True:
provision_connect_hosts (feid, doc[CONST_HOSTS])
provision_connect_plugins (feid, doc[CONST_PLUGINS][CONST_PLUGINS_HUB], CONST_PLUGINS_HUB)
provision_connect_plugins (feid, doc[CONST_PLUGINS][CONST_PLUGINS_LOCAL], CONST_PLUGINS_LOCAL)
provision_connect_plugins (feid, doc[CONST_PLUGINS][CONST_PLUGINS_REMOTE], CONST_PLUGINS_REMOTE)
provision_connect_connectors (feid, doc[CONST_CONNECTORS])
logging.debug ('-------')
def process (doc, args):
inputs_map[CONST_TIMESTAMP] = datetime.now()
fe_id = doc[CONST_NAME]
inputs_map[CONST_NAME] = fe_id
output_ansible = fe_id + ".ansible.yaml"
output_julie = fe_id + ".julieops.yaml"
output_cluster = fe_id + ".cluster.properties"
template_ansible = create_template (args.ansibletemplate)
template_julie = create_template (args.julietemplate)
template_cluster = create_template (args.brokertemplate)
logging.info("Feature name is " + fe_id)
logging.info("Ansible YAML is " + output_ansible + ", Template is " + args.ansibletemplate)
logging.info("Julieops YAML is " + output_julie + ", Template is " + args.julietemplate)
process_broker (doc[CONST_NAME], doc[CONST_BROKER])
process_ksql (doc[CONST_NAME], doc[CONST_KSQL])
process_connect (doc[CONST_NAME], doc[CONST_CONNECT])
render_template (inputs_map, template_ansible, output_ansible)
render_template (inputs_map, template_julie, output_julie)
render_template (inputs_map, template_cluster, output_cluster)
def get_api_config(docs, config_type, override_apikey, override_apisecret):
newdocs = {}
if config_type in docs and CONST_API_KEY in docs[config_type]:
newdocs[CONST_API_KEY] = docs[config_type][CONST_API_KEY]
newdocs[CONST_API_SECRET] = docs[config_type][CONST_API_SECRET]
else:
newdocs[CONST_API_KEY] = override_apikey
newdocs[CONST_API_SECRET] = override_apisecret
return newdocs
def process_ccloud_config (docs):
override_apikey = ""
override_apisecret = ""
if CONST_OVERRIDE in docs[CONST_CREDENTIALS]:
override = docs[CONST_CREDENTIALS][CONST_OVERRIDE]
if CONST_API_KEY in override:
override_apikey = override[CONST_API_KEY]
if CONST_API_SECRET in override:
override_apisecret = override[CONST_API_SECRET]
logging.debug ('REMOVE THIS api key = ' + str(override_apikey) + ', secret = ' + str(override_apisecret))
inputs_map[CONST_BOOTSTRAP_SERVERS] = docs[CONST_BOOTSTRAP_SERVERS]
inputs_map[CONST_ADMIN] = get_api_config (docs[CONST_CREDENTIALS], CONST_ADMIN, override_apikey, override_apisecret)
inputs_map[CONST_CONNECT] = get_api_config (docs[CONST_CREDENTIALS], CONST_CONNECT, override_apikey, override_apisecret)
inputs_map[CONST_CONSUMER] = get_api_config (docs[CONST_CREDENTIALS], CONST_CONSUMER, override_apikey, override_apisecret)
inputs_map[CONST_PRODUCER] = get_api_config (docs[CONST_CREDENTIALS], CONST_PRODUCER, override_apikey, override_apisecret)
inputs_map[CONST_KSQL] = get_api_config (docs[CONST_CREDENTIALS], CONST_KSQL, override_apikey, override_apisecret)
inputs_map[CONST_SR] = get_api_config (docs[CONST_CREDENTIALS], CONST_SR, override_apikey, override_apisecret)
inputs_map[CONST_SR][CONST_URL] = docs[CONST_SR][CONST_URL]
def do_process(args):
ccloud_config_file = args.commandconfig
with open(ccloud_config_file) as f:
ccloud_config_docs = yaml.load(f, Loader=yaml.FullLoader)
logging.debug ('-------')
logging.debug(ccloud_config_docs)
logging.debug ('-------')
process_ccloud_config (ccloud_config_docs)
feconfig_file = args.feconfig
with open(feconfig_file) as f:
doc = yaml.load(f, Loader=yaml.FullLoader)
logging.debug ('-------')
logging.debug(doc)
logging.debug ('-------')
process(doc, args)
logging.debug ('-------')
logging.debug (inputs_map)
logging.debug ('-------')
f.close()
def parse_arguments():
parser = argparse.ArgumentParser(
prog='create-cp-input.py', usage='%(prog)s [options]',
description="Reads the feature environment YAML config file; converts it into julieops and cp-ansible inventory"
)
# parser.add_argument("-h", "--help", help="Prints help")
parser.add_argument("-f", "--feconfig", help="Feature environment config YAML input file (default = input.yaml)", default="./input.yaml")
parser.add_argument("-a", "--ansibletemplate", help="Inventory template (default = cpansible.j2)", default="./cpansible.j2")
parser.add_argument("-j", "--julietemplate", help="Inventory template (default = julie.j2)", default="./julie.j2")
parser.add_argument("-b", "--brokertemplate", help="Broker Config template (default = julie-cluster.j2)", default="./julie-cluster.j2")
parser.add_argument("-c", "--commandconfig", help="Command Config (default = ccloud.yaml)", default="./ccloud.yaml")
return parser.parse_args()
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s | %(levelname)s | %(filename)s | %(funcName)s | %(lineno)d | %(message)s', level=logging.INFO)
logging.info("Started ...")
args = parse_arguments()
do_process (args)
logging.info("Completed ...") | from jinja2 import Template
import yaml
import json
import logging
import requests | random_line_split |
create-cp-input.py | #!/usr/bin/env python3
###################
#
# Creates input to cp-ansible (ksql, connect) & julieops (topic provision)
#
# Author: Venky Narayanan ([email protected])
# Date: May 26, 2021
#
###################
from __future__ import print_function
from datetime import datetime
import argparse
from jinja2 import Template
import yaml
import json
import logging
import requests
import os
CONST_TIMESTAMP = 'timestamp'
CONST_NAME = 'name'
CONST_PARTITIONS = 'partitions'
CONST_REPLICATION = 'replication'
CONST_OVERRIDE = 'override'
CONST_DEPENDENCIES = 'dependencies'
CONST_KSQL = 'ksql'
CONST_CONNECT = 'connect'
CONST_TOPIC = 'topic'
CONST_TOPICS = 'topics'
CONST_BROKER = 'broker'
CONST_PROVISION = 'provision'
CONST_CONNECTORS = 'connectors'
CONST_DESCRIPTION = 'description'
CONST_QUERIES = 'queries'
CONST_HOSTS = 'hosts'
CONST_PLUGINS = 'plugins'
CONST_PLUGINS_HUB = 'hub'
CONST_PLUGINS_LOCAL = 'local'
CONST_PLUGINS_REMOTE = 'remote'
CONST_CLUSTERDATA = 'cluster_data'
CONST_SSH_USER = 'ssh_username'
CONST_SSH_KEY = 'ssh_key'
CONST_KSQL_DEST_DIR = '/var/lib/kafka/ksql'
CONST_SOURCE_PATH = 'source_path'
CONST_DEST_PATH = 'destination_path'
CONST_BOOTSTRAP_SERVERS = 'bootstrap_servers'
CONST_API_KEY = 'api_key'
CONST_API_SECRET = 'api_secret'
CONST_ADMIN = 'admin'
CONST_CONSUMER = 'consumer'
CONST_PRODUCER = 'producer'
CONST_SR = 'schema_registry'
CONST_KSQL = 'ksql'
CONST_URL = 'url'
CONST_CREDENTIALS = 'credentials'
CONST_ENV = 'env'
CONST_CONTEXT = 'context'
CONST_COMPANY = 'company'
CONST_PROJECTS = 'projects'
CONST_SOURCE = 'source'
inputs_map = {CONST_TIMESTAMP: '', CONST_CONTEXT: 'test_context', CONST_COMPANY: 'test_company', CONST_ENV: 'test_env', CONST_SOURCE: 'test_source', CONST_PROJECTS: [], CONST_BOOTSTRAP_SERVERS: '', CONST_CONNECT: [], CONST_CONSUMER: [], CONST_PRODUCER: [], CONST_CLUSTERDATA: {CONST_SSH_USER: 'TODO', CONST_SSH_KEY: 'TODO'}, CONST_KSQL + '_' + CONST_QUERIES: [], CONST_KSQL + '_' + CONST_HOSTS: [], CONST_CONNECT + '_' + CONST_HOSTS: [], CONST_CONNECT + '_' + CONST_CONNECTORS: [], CONST_CONNECT + '_' + CONST_PLUGINS: []}
def create_template(temp_file):
with open(temp_file) as f:
temp = f.read()
f.close()
return Template(temp)
def render_template (input_map, input_template, output_file):
with open(output_file, "w+") as f:
print (input_template.render(input_map), file=f)
f.close()
# Identify topic name, replication factor, and partitions by topic
def process_topic_item (feid, project, topic_item, override_part, override_repl):
name = topic_item[CONST_NAME]
fqname = name
if CONST_PARTITIONS in topic_item:
use_part = topic_item[CONST_PARTITIONS]
elif override_part != 0:
use_part = override_part
else:
use_part = 1
if CONST_REPLICATION in topic_item:
use_repl = topic_item[CONST_REPLICATION]
elif override_repl != 0:
use_repl = override_repl
else:
use_repl = 1
topic = {}
topic [CONST_NAME] = fqname
topic [CONST_REPLICATION] = use_repl
topic [CONST_PARTITIONS] = use_part
return topic
# Create Julieops descriptor file
def process_broker (feid, doc):
logging.debug ('-------')
if CONST_TOPIC in doc and CONST_OVERRIDE in doc[CONST_TOPIC]:
override = doc[CONST_TOPIC][CONST_OVERRIDE]
if CONST_PARTITIONS in override:
override_part = override[CONST_PARTITIONS]
if CONST_REPLICATION in override:
override_repl = override[CONST_REPLICATION]
logging.info ('partition = ' + str(override_part) + ', replication = ' + str(override_repl))
if CONST_DEPENDENCIES not in doc[CONST_TOPIC]:
logging.info ('No dependency topics')
for dependency in doc[CONST_TOPIC][CONST_DEPENDENCIES]:
process_topic_item (feid, feid, dependency, override_part, override_repl)
if CONST_TOPICS not in doc[CONST_TOPIC]:
logging.info ('No topics to provision')
return
topics = []
lists = doc[CONST_TOPIC][CONST_TOPICS]
for item in lists:
topic = process_topic_item (feid, feid, item, override_part, override_repl)
topics.append(topic)
logging.debug(topic)
projects = []
project = {}
project[CONST_NAME] = feid
project[CONST_TOPICS] = topics
projects.append(project)
inputs_map[CONST_PROJECTS] = projects
def provision_ksql_query (feid, doc):
ksql_files = []
for query_file in doc:
ksql_file = {}
ksql_file[CONST_SOURCE_PATH] = query_file
ksql_file[CONST_DEST_PATH] = CONST_KSQL_DEST_DIR + os.path.sep + os.path.basename(query_file)
ksql_files.append(ksql_file)
inputs_map[CONST_KSQL + '_' + CONST_QUERIES] = ksql_files
def provision_ksql_hosts (feid, doc):
|
# Create cp-ansible yaml with ksql section
def process_ksql (feid, doc):
logging.debug ('-------')
if CONST_PROVISION in doc and doc[CONST_PROVISION] == True:
provision_ksql_hosts (feid, doc[CONST_HOSTS])
provision_ksql_query (feid, doc[CONST_QUERIES])
logging.debug ('-------')
def provision_connect_plugins (feid, doc, plugin_type):
plugins = []
for plugin in doc:
logging.info ('Connect Plugin ' + plugin_type + ' is ' + plugin)
plugins.append(plugin)
inputs_map[CONST_CONNECT + '_' + CONST_PLUGINS + "_" + plugin_type] = plugins
def provision_connect_connectors (feid, doc):
connectors = []
connectors_json = []
for connector in doc:
connectors.append(connector)
f = open(connector, 'r')
data = json.load(f)
f.close()
name = data['name']
config = data['config']
config2 = []
for item in config:
config2.append(item + " : " + str(config[item]))
data2 = {'name': name, 'config': config2}
connectors_json.append(data2)
inputs_map[CONST_CONNECT + '_' + CONST_CONNECTORS] = connectors
inputs_map[CONST_CONNECT + '_' + CONST_CONNECTORS + '_' + 'json'] = connectors_json
def provision_connect_hosts (feid, doc):
hosts = []
for host in doc:
logging.info ('Connect host is ' + host)
hosts.append(host)
inputs_map[CONST_CONNECT + '_' + CONST_HOSTS] = hosts
# Create cp-ansible yaml with connect section
def process_connect (feid, doc):
logging.debug ('-------')
if CONST_PROVISION in doc and doc[CONST_PROVISION] == True:
provision_connect_hosts (feid, doc[CONST_HOSTS])
provision_connect_plugins (feid, doc[CONST_PLUGINS][CONST_PLUGINS_HUB], CONST_PLUGINS_HUB)
provision_connect_plugins (feid, doc[CONST_PLUGINS][CONST_PLUGINS_LOCAL], CONST_PLUGINS_LOCAL)
provision_connect_plugins (feid, doc[CONST_PLUGINS][CONST_PLUGINS_REMOTE], CONST_PLUGINS_REMOTE)
provision_connect_connectors (feid, doc[CONST_CONNECTORS])
logging.debug ('-------')
def process (doc, args):
inputs_map[CONST_TIMESTAMP] = datetime.now()
fe_id = doc[CONST_NAME]
inputs_map[CONST_NAME] = fe_id
output_ansible = fe_id + ".ansible.yaml"
output_julie = fe_id + ".julieops.yaml"
output_cluster = fe_id + ".cluster.properties"
template_ansible = create_template (args.ansibletemplate)
template_julie = create_template (args.julietemplate)
template_cluster = create_template (args.brokertemplate)
logging.info("Feature name is " + fe_id)
logging.info("Ansible YAML is " + output_ansible + ", Template is " + args.ansibletemplate)
logging.info("Julieops YAML is " + output_julie + ", Template is " + args.julietemplate)
process_broker (doc[CONST_NAME], doc[CONST_BROKER])
process_ksql (doc[CONST_NAME], doc[CONST_KSQL])
process_connect (doc[CONST_NAME], doc[CONST_CONNECT])
render_template (inputs_map, template_ansible, output_ansible)
render_template (inputs_map, template_julie, output_julie)
render_template (inputs_map, template_cluster, output_cluster)
def get_api_config(docs, config_type, override_apikey, override_apisecret):
newdocs = {}
if config_type in docs and CONST_API_KEY in docs[config_type]:
newdocs[CONST_API_KEY] = docs[config_type][CONST_API_KEY]
newdocs[CONST_API_SECRET] = docs[config_type][CONST_API_SECRET]
else:
newdocs[CONST_API_KEY] = override_apikey
newdocs[CONST_API_SECRET] = override_apisecret
return newdocs
def process_ccloud_config (docs):
override_apikey = ""
override_apisecret = ""
if CONST_OVERRIDE in docs[CONST_CREDENTIALS]:
override = docs[CONST_CREDENTIALS][CONST_OVERRIDE]
if CONST_API_KEY in override:
override_apikey = override[CONST_API_KEY]
if CONST_API_SECRET in override:
override_apisecret = override[CONST_API_SECRET]
logging.debug ('REMOVE THIS api key = ' + str(override_apikey) + ', secret = ' + str(override_apisecret))
inputs_map[CONST_BOOTSTRAP_SERVERS] = docs[CONST_BOOTSTRAP_SERVERS]
inputs_map[CONST_ADMIN] = get_api_config (docs[CONST_CREDENTIALS], CONST_ADMIN, override_apikey, override_apisecret)
inputs_map[CONST_CONNECT] = get_api_config (docs[CONST_CREDENTIALS], CONST_CONNECT, override_apikey, override_apisecret)
inputs_map[CONST_CONSUMER] = get_api_config (docs[CONST_CREDENTIALS], CONST_CONSUMER, override_apikey, override_apisecret)
inputs_map[CONST_PRODUCER] = get_api_config (docs[CONST_CREDENTIALS], CONST_PRODUCER, override_apikey, override_apisecret)
inputs_map[CONST_KSQL] = get_api_config (docs[CONST_CREDENTIALS], CONST_KSQL, override_apikey, override_apisecret)
inputs_map[CONST_SR] = get_api_config (docs[CONST_CREDENTIALS], CONST_SR, override_apikey, override_apisecret)
inputs_map[CONST_SR][CONST_URL] = docs[CONST_SR][CONST_URL]
def do_process(args):
ccloud_config_file = args.commandconfig
with open(ccloud_config_file) as f:
ccloud_config_docs = yaml.load(f, Loader=yaml.FullLoader)
logging.debug ('-------')
logging.debug(ccloud_config_docs)
logging.debug ('-------')
process_ccloud_config (ccloud_config_docs)
feconfig_file = args.feconfig
with open(feconfig_file) as f:
doc = yaml.load(f, Loader=yaml.FullLoader)
logging.debug ('-------')
logging.debug(doc)
logging.debug ('-------')
process(doc, args)
logging.debug ('-------')
logging.debug (inputs_map)
logging.debug ('-------')
f.close()
def parse_arguments():
parser = argparse.ArgumentParser(
prog='create-cp-input.py', usage='%(prog)s [options]',
description="Reads the feature environment YAML config file; converts it into julieops and cp-ansible inventory"
)
# parser.add_argument("-h", "--help", help="Prints help")
parser.add_argument("-f", "--feconfig", help="Feature environment config YAML input file (default = input.yaml)", default="./input.yaml")
parser.add_argument("-a", "--ansibletemplate", help="Inventory template (default = cpansible.j2)", default="./cpansible.j2")
parser.add_argument("-j", "--julietemplate", help="Inventory template (default = julie.j2)", default="./julie.j2")
parser.add_argument("-b", "--brokertemplate", help="Broker Config template (default = julie-cluster.j2)", default="./julie-cluster.j2")
parser.add_argument("-c", "--commandconfig", help="Command Config (default = ccloud.yaml)", default="./ccloud.yaml")
return parser.parse_args()
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s | %(levelname)s | %(filename)s | %(funcName)s | %(lineno)d | %(message)s', level=logging.INFO)
logging.info("Started ...")
args = parse_arguments()
do_process (args)
logging.info("Completed ...") | hosts = []
for host in doc:
logging.info ('ksql host is ' + host)
hosts.append(host)
inputs_map[CONST_KSQL + '_' + CONST_HOSTS] = hosts | identifier_body |
create-cp-input.py | #!/usr/bin/env python3
###################
#
# Creates input to cp-ansible (ksql, connect) & julieops (topic provision)
#
# Author: Venky Narayanan ([email protected])
# Date: May 26, 2021
#
###################
from __future__ import print_function
from datetime import datetime
import argparse
from jinja2 import Template
import yaml
import json
import logging
import requests
import os
CONST_TIMESTAMP = 'timestamp'
CONST_NAME = 'name'
CONST_PARTITIONS = 'partitions'
CONST_REPLICATION = 'replication'
CONST_OVERRIDE = 'override'
CONST_DEPENDENCIES = 'dependencies'
CONST_KSQL = 'ksql'
CONST_CONNECT = 'connect'
CONST_TOPIC = 'topic'
CONST_TOPICS = 'topics'
CONST_BROKER = 'broker'
CONST_PROVISION = 'provision'
CONST_CONNECTORS = 'connectors'
CONST_DESCRIPTION = 'description'
CONST_QUERIES = 'queries'
CONST_HOSTS = 'hosts'
CONST_PLUGINS = 'plugins'
CONST_PLUGINS_HUB = 'hub'
CONST_PLUGINS_LOCAL = 'local'
CONST_PLUGINS_REMOTE = 'remote'
CONST_CLUSTERDATA = 'cluster_data'
CONST_SSH_USER = 'ssh_username'
CONST_SSH_KEY = 'ssh_key'
CONST_KSQL_DEST_DIR = '/var/lib/kafka/ksql'
CONST_SOURCE_PATH = 'source_path'
CONST_DEST_PATH = 'destination_path'
CONST_BOOTSTRAP_SERVERS = 'bootstrap_servers'
CONST_API_KEY = 'api_key'
CONST_API_SECRET = 'api_secret'
CONST_ADMIN = 'admin'
CONST_CONSUMER = 'consumer'
CONST_PRODUCER = 'producer'
CONST_SR = 'schema_registry'
CONST_KSQL = 'ksql'
CONST_URL = 'url'
CONST_CREDENTIALS = 'credentials'
CONST_ENV = 'env'
CONST_CONTEXT = 'context'
CONST_COMPANY = 'company'
CONST_PROJECTS = 'projects'
CONST_SOURCE = 'source'
inputs_map = {CONST_TIMESTAMP: '', CONST_CONTEXT: 'test_context', CONST_COMPANY: 'test_company', CONST_ENV: 'test_env', CONST_SOURCE: 'test_source', CONST_PROJECTS: [], CONST_BOOTSTRAP_SERVERS: '', CONST_CONNECT: [], CONST_CONSUMER: [], CONST_PRODUCER: [], CONST_CLUSTERDATA: {CONST_SSH_USER: 'TODO', CONST_SSH_KEY: 'TODO'}, CONST_KSQL + '_' + CONST_QUERIES: [], CONST_KSQL + '_' + CONST_HOSTS: [], CONST_CONNECT + '_' + CONST_HOSTS: [], CONST_CONNECT + '_' + CONST_CONNECTORS: [], CONST_CONNECT + '_' + CONST_PLUGINS: []}
def create_template(temp_file):
with open(temp_file) as f:
temp = f.read()
f.close()
return Template(temp)
def render_template (input_map, input_template, output_file):
with open(output_file, "w+") as f:
print (input_template.render(input_map), file=f)
f.close()
# Identify topic name, replication factor, and partitions by topic
def process_topic_item (feid, project, topic_item, override_part, override_repl):
name = topic_item[CONST_NAME]
fqname = name
if CONST_PARTITIONS in topic_item:
use_part = topic_item[CONST_PARTITIONS]
elif override_part != 0:
use_part = override_part
else:
use_part = 1
if CONST_REPLICATION in topic_item:
use_repl = topic_item[CONST_REPLICATION]
elif override_repl != 0:
use_repl = override_repl
else:
use_repl = 1
topic = {}
topic [CONST_NAME] = fqname
topic [CONST_REPLICATION] = use_repl
topic [CONST_PARTITIONS] = use_part
return topic
# Create Julieops descriptor file
def process_broker (feid, doc):
logging.debug ('-------')
if CONST_TOPIC in doc and CONST_OVERRIDE in doc[CONST_TOPIC]:
override = doc[CONST_TOPIC][CONST_OVERRIDE]
if CONST_PARTITIONS in override:
override_part = override[CONST_PARTITIONS]
if CONST_REPLICATION in override:
override_repl = override[CONST_REPLICATION]
logging.info ('partition = ' + str(override_part) + ', replication = ' + str(override_repl))
if CONST_DEPENDENCIES not in doc[CONST_TOPIC]:
logging.info ('No dependency topics')
for dependency in doc[CONST_TOPIC][CONST_DEPENDENCIES]:
process_topic_item (feid, feid, dependency, override_part, override_repl)
if CONST_TOPICS not in doc[CONST_TOPIC]:
logging.info ('No topics to provision')
return
topics = []
lists = doc[CONST_TOPIC][CONST_TOPICS]
for item in lists:
topic = process_topic_item (feid, feid, item, override_part, override_repl)
topics.append(topic)
logging.debug(topic)
projects = []
project = {}
project[CONST_NAME] = feid
project[CONST_TOPICS] = topics
projects.append(project)
inputs_map[CONST_PROJECTS] = projects
def provision_ksql_query (feid, doc):
ksql_files = []
for query_file in doc:
ksql_file = {}
ksql_file[CONST_SOURCE_PATH] = query_file
ksql_file[CONST_DEST_PATH] = CONST_KSQL_DEST_DIR + os.path.sep + os.path.basename(query_file)
ksql_files.append(ksql_file)
inputs_map[CONST_KSQL + '_' + CONST_QUERIES] = ksql_files
def provision_ksql_hosts (feid, doc):
hosts = []
for host in doc:
logging.info ('ksql host is ' + host)
hosts.append(host)
inputs_map[CONST_KSQL + '_' + CONST_HOSTS] = hosts
# Create cp-ansible yaml with ksql section
def process_ksql (feid, doc):
logging.debug ('-------')
if CONST_PROVISION in doc and doc[CONST_PROVISION] == True:
provision_ksql_hosts (feid, doc[CONST_HOSTS])
provision_ksql_query (feid, doc[CONST_QUERIES])
logging.debug ('-------')
def provision_connect_plugins (feid, doc, plugin_type):
plugins = []
for plugin in doc:
logging.info ('Connect Plugin ' + plugin_type + ' is ' + plugin)
plugins.append(plugin)
inputs_map[CONST_CONNECT + '_' + CONST_PLUGINS + "_" + plugin_type] = plugins
def provision_connect_connectors (feid, doc):
connectors = []
connectors_json = []
for connector in doc:
connectors.append(connector)
f = open(connector, 'r')
data = json.load(f)
f.close()
name = data['name']
config = data['config']
config2 = []
for item in config:
config2.append(item + " : " + str(config[item]))
data2 = {'name': name, 'config': config2}
connectors_json.append(data2)
inputs_map[CONST_CONNECT + '_' + CONST_CONNECTORS] = connectors
inputs_map[CONST_CONNECT + '_' + CONST_CONNECTORS + '_' + 'json'] = connectors_json
def provision_connect_hosts (feid, doc):
hosts = []
for host in doc:
logging.info ('Connect host is ' + host)
hosts.append(host)
inputs_map[CONST_CONNECT + '_' + CONST_HOSTS] = hosts
# Create cp-ansible yaml with connect section
def process_connect (feid, doc):
logging.debug ('-------')
if CONST_PROVISION in doc and doc[CONST_PROVISION] == True:
provision_connect_hosts (feid, doc[CONST_HOSTS])
provision_connect_plugins (feid, doc[CONST_PLUGINS][CONST_PLUGINS_HUB], CONST_PLUGINS_HUB)
provision_connect_plugins (feid, doc[CONST_PLUGINS][CONST_PLUGINS_LOCAL], CONST_PLUGINS_LOCAL)
provision_connect_plugins (feid, doc[CONST_PLUGINS][CONST_PLUGINS_REMOTE], CONST_PLUGINS_REMOTE)
provision_connect_connectors (feid, doc[CONST_CONNECTORS])
logging.debug ('-------')
def process (doc, args):
inputs_map[CONST_TIMESTAMP] = datetime.now()
fe_id = doc[CONST_NAME]
inputs_map[CONST_NAME] = fe_id
output_ansible = fe_id + ".ansible.yaml"
output_julie = fe_id + ".julieops.yaml"
output_cluster = fe_id + ".cluster.properties"
template_ansible = create_template (args.ansibletemplate)
template_julie = create_template (args.julietemplate)
template_cluster = create_template (args.brokertemplate)
logging.info("Feature name is " + fe_id)
logging.info("Ansible YAML is " + output_ansible + ", Template is " + args.ansibletemplate)
logging.info("Julieops YAML is " + output_julie + ", Template is " + args.julietemplate)
process_broker (doc[CONST_NAME], doc[CONST_BROKER])
process_ksql (doc[CONST_NAME], doc[CONST_KSQL])
process_connect (doc[CONST_NAME], doc[CONST_CONNECT])
render_template (inputs_map, template_ansible, output_ansible)
render_template (inputs_map, template_julie, output_julie)
render_template (inputs_map, template_cluster, output_cluster)
def get_api_config(docs, config_type, override_apikey, override_apisecret):
newdocs = {}
if config_type in docs and CONST_API_KEY in docs[config_type]:
newdocs[CONST_API_KEY] = docs[config_type][CONST_API_KEY]
newdocs[CONST_API_SECRET] = docs[config_type][CONST_API_SECRET]
else:
|
return newdocs
def process_ccloud_config (docs):
override_apikey = ""
override_apisecret = ""
if CONST_OVERRIDE in docs[CONST_CREDENTIALS]:
override = docs[CONST_CREDENTIALS][CONST_OVERRIDE]
if CONST_API_KEY in override:
override_apikey = override[CONST_API_KEY]
if CONST_API_SECRET in override:
override_apisecret = override[CONST_API_SECRET]
logging.debug ('REMOVE THIS api key = ' + str(override_apikey) + ', secret = ' + str(override_apisecret))
inputs_map[CONST_BOOTSTRAP_SERVERS] = docs[CONST_BOOTSTRAP_SERVERS]
inputs_map[CONST_ADMIN] = get_api_config (docs[CONST_CREDENTIALS], CONST_ADMIN, override_apikey, override_apisecret)
inputs_map[CONST_CONNECT] = get_api_config (docs[CONST_CREDENTIALS], CONST_CONNECT, override_apikey, override_apisecret)
inputs_map[CONST_CONSUMER] = get_api_config (docs[CONST_CREDENTIALS], CONST_CONSUMER, override_apikey, override_apisecret)
inputs_map[CONST_PRODUCER] = get_api_config (docs[CONST_CREDENTIALS], CONST_PRODUCER, override_apikey, override_apisecret)
inputs_map[CONST_KSQL] = get_api_config (docs[CONST_CREDENTIALS], CONST_KSQL, override_apikey, override_apisecret)
inputs_map[CONST_SR] = get_api_config (docs[CONST_CREDENTIALS], CONST_SR, override_apikey, override_apisecret)
inputs_map[CONST_SR][CONST_URL] = docs[CONST_SR][CONST_URL]
def do_process(args):
ccloud_config_file = args.commandconfig
with open(ccloud_config_file) as f:
ccloud_config_docs = yaml.load(f, Loader=yaml.FullLoader)
logging.debug ('-------')
logging.debug(ccloud_config_docs)
logging.debug ('-------')
process_ccloud_config (ccloud_config_docs)
feconfig_file = args.feconfig
with open(feconfig_file) as f:
doc = yaml.load(f, Loader=yaml.FullLoader)
logging.debug ('-------')
logging.debug(doc)
logging.debug ('-------')
process(doc, args)
logging.debug ('-------')
logging.debug (inputs_map)
logging.debug ('-------')
f.close()
def parse_arguments():
parser = argparse.ArgumentParser(
prog='create-cp-input.py', usage='%(prog)s [options]',
description="Reads the feature environment YAML config file; converts it into julieops and cp-ansible inventory"
)
# parser.add_argument("-h", "--help", help="Prints help")
parser.add_argument("-f", "--feconfig", help="Feature environment config YAML input file (default = input.yaml)", default="./input.yaml")
parser.add_argument("-a", "--ansibletemplate", help="Inventory template (default = cpansible.j2)", default="./cpansible.j2")
parser.add_argument("-j", "--julietemplate", help="Inventory template (default = julie.j2)", default="./julie.j2")
parser.add_argument("-b", "--brokertemplate", help="Broker Config template (default = julie-cluster.j2)", default="./julie-cluster.j2")
parser.add_argument("-c", "--commandconfig", help="Command Config (default = ccloud.yaml)", default="./ccloud.yaml")
return parser.parse_args()
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s | %(levelname)s | %(filename)s | %(funcName)s | %(lineno)d | %(message)s', level=logging.INFO)
logging.info("Started ...")
args = parse_arguments()
do_process (args)
logging.info("Completed ...") | newdocs[CONST_API_KEY] = override_apikey
newdocs[CONST_API_SECRET] = override_apisecret | conditional_block |
mod.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR MIT
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use std::io;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use std::mem::size_of;
use std::os::unix::io::AsRawFd;
use std::ptr::null_mut;
use std::result;
use kvm_bindings::kvm_run;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use kvm_bindings::{kvm_cpuid2, kvm_cpuid_entry2};
/// Wrappers over KVM device ioctls.
pub mod device;
/// Wrappers over KVM system ioctls.
pub mod system;
/// Wrappers over KVM VCPU ioctls.
pub mod vcpu;
/// Wrappers over KVM Virtual Machine ioctls.
pub mod vm;
/// A specialized `Result` type for KVM ioctls.
///
/// This typedef is generally used to avoid writing out io::Error directly and
/// is otherwise a direct mapping to Result.
pub type Result<T> = result::Result<T, io::Error>;
// Returns a `Vec<T>` with a size in bytes at least as large as `size_in_bytes`.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn vec_with_size_in_bytes<T: Default>(size_in_bytes: usize) -> Vec<T> {
let rounded_size = (size_in_bytes + size_of::<T>() - 1) / size_of::<T>();
let mut v = Vec::with_capacity(rounded_size);
for _ in 0..rounded_size {
v.push(T::default())
}
v
}
// The kvm API has many structs that resemble the following `Foo` structure:
//
// ```
// #[repr(C)]
// struct Foo {
// some_data: u32
// entries: __IncompleteArrayField<__u32>,
// }
// ```
//
// In order to allocate such a structure, `size_of::<Foo>()` would be too small because it would not
// include any space for `entries`. To make the allocation large enough while still being aligned
// for `Foo`, a `Vec<Foo>` is created. Only the first element of `Vec<Foo>` would actually be used
// as a `Foo`. The remaining memory in the `Vec<Foo>` is for `entries`, which must be contiguous
// with `Foo`. This function is used to make the `Vec<Foo>` with enough space for `count` entries.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn vec_with_array_field<T: Default, F>(count: usize) -> Vec<T> {
let element_space = count * size_of::<F>();
let vec_size_bytes = size_of::<T>() + element_space;
vec_with_size_in_bytes(vec_size_bytes)
}
/// Wrapper over the `kvm_cpuid2` structure.
///
/// The structure has a zero length array at the end, hidden behind bounds check.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub struct CpuId {
// Wrapper over `kvm_cpuid2` from which we only use the first element.
kvm_cpuid: Vec<kvm_cpuid2>,
// Number of `kvm_cpuid_entry2` structs at the end of kvm_cpuid2.
allocated_len: usize,
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl Clone for CpuId {
fn clone(&self) -> Self {
let mut kvm_cpuid = Vec::with_capacity(self.kvm_cpuid.len());
for _ in 0..self.kvm_cpuid.len() {
kvm_cpuid.push(kvm_cpuid2::default());
}
let num_bytes = self.kvm_cpuid.len() * size_of::<kvm_cpuid2>();
let src_byte_slice =
unsafe { std::slice::from_raw_parts(self.kvm_cpuid.as_ptr() as *const u8, num_bytes) };
let dst_byte_slice =
unsafe { std::slice::from_raw_parts_mut(kvm_cpuid.as_mut_ptr() as *mut u8, num_bytes) };
dst_byte_slice.copy_from_slice(src_byte_slice);
CpuId {
kvm_cpuid,
allocated_len: self.allocated_len,
}
}
}
#[cfg(test)]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl PartialEq for CpuId {
fn eq(&self, other: &CpuId) -> bool {
let entries: &[kvm_cpuid_entry2] =
unsafe { self.kvm_cpuid[0].entries.as_slice(self.allocated_len) };
let other_entries: &[kvm_cpuid_entry2] =
unsafe { self.kvm_cpuid[0].entries.as_slice(other.allocated_len) };
self.allocated_len == other.allocated_len && entries == other_entries
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl CpuId {
/// Creates a new `CpuId` structure that contains at most `array_len` KVM CPUID entries.
///
/// # Arguments
///
/// * `array_len` - Maximum number of CPUID entries.
///
/// # Example
///
/// ```
/// use kvm_ioctls::CpuId;
/// let cpu_id = CpuId::new(32);
/// ```
pub fn new(array_len: usize) -> CpuId {
let mut kvm_cpuid = vec_with_array_field::<kvm_cpuid2, kvm_cpuid_entry2>(array_len);
kvm_cpuid[0].nent = array_len as u32;
CpuId {
kvm_cpuid,
allocated_len: array_len,
}
}
/// Creates a new `CpuId` structure based on a supplied vector of `kvm_cpuid_entry2`.
///
/// # Arguments
///
/// * `entries` - The vector of `kvm_cpuid_entry2` entries.
///
/// # Example
///
/// ```rust
/// # extern crate kvm_ioctls;
/// extern crate kvm_bindings;
///
/// use kvm_bindings::kvm_cpuid_entry2;
/// use kvm_ioctls::CpuId;
/// // Create a Cpuid to hold one entry.
/// let mut cpuid = CpuId::new(1);
/// let mut entries = cpuid.mut_entries_slice().to_vec();
/// let new_entry = kvm_cpuid_entry2 {
/// function: 0x4,
/// index: 0,
/// flags: 1,
/// eax: 0b1100000,
/// ebx: 0,
/// ecx: 0,
/// edx: 0,
/// padding: [0, 0, 0],
/// };
/// entries.insert(0, new_entry);
/// cpuid = CpuId::from_entries(&entries);
/// ```
///
pub fn from_entries(entries: &[kvm_cpuid_entry2]) -> CpuId {
let mut kvm_cpuid = vec_with_array_field::<kvm_cpuid2, kvm_cpuid_entry2>(entries.len());
kvm_cpuid[0].nent = entries.len() as u32;
unsafe {
kvm_cpuid[0]
.entries
.as_mut_slice(entries.len())
.copy_from_slice(entries);
}
CpuId {
kvm_cpuid,
allocated_len: entries.len(),
}
}
/// Returns the mutable entries slice so they can be modified before passing to the VCPU.
///
/// # Example
/// ```rust
/// use kvm_ioctls::{CpuId, Kvm, MAX_KVM_CPUID_ENTRIES};
/// let kvm = Kvm::new().unwrap();
/// let mut cpuid = kvm.get_supported_cpuid(MAX_KVM_CPUID_ENTRIES).unwrap();
/// let cpuid_entries = cpuid.mut_entries_slice();
/// ```
///
pub fn mut_entries_slice(&mut self) -> &mut [kvm_cpuid_entry2] {
// Mapping the unsized array to a slice is unsafe because the length isn't known. Using
// the length we originally allocated with eliminates the possibility of overflow.
if self.kvm_cpuid[0].nent as usize > self.allocated_len {
self.kvm_cpuid[0].nent = self.allocated_len as u32;
}
let nent = self.kvm_cpuid[0].nent as usize;
unsafe { self.kvm_cpuid[0].entries.as_mut_slice(nent) }
}
/// Get a pointer so it can be passed to the kernel. Using this pointer is unsafe.
///
pub fn | (&self) -> *const kvm_cpuid2 {
&self.kvm_cpuid[0]
}
/// Get a mutable pointer so it can be passed to the kernel. Using this pointer is unsafe.
///
pub fn as_mut_ptr(&mut self) -> *mut kvm_cpuid2 {
&mut self.kvm_cpuid[0]
}
}
/// Safe wrapper over the `kvm_run` struct.
///
/// The wrapper is needed for sending the pointer to `kvm_run` between
/// threads as raw pointers do not implement `Send` and `Sync`.
pub struct KvmRunWrapper {
kvm_run_ptr: *mut u8,
// This field is need so we can `munmap` the memory mapped to hold `kvm_run`.
mmap_size: usize,
}
// Send and Sync aren't automatically inherited for the raw address pointer.
// Accessing that pointer is only done through the stateless interface which
// allows the object to be shared by multiple threads without a decrease in
// safety.
unsafe impl Send for KvmRunWrapper {}
unsafe impl Sync for KvmRunWrapper {}
impl KvmRunWrapper {
/// Maps the first `size` bytes of the given `fd`.
///
/// # Arguments
/// * `fd` - File descriptor to mmap from.
/// * `size` - Size of memory region in bytes.
pub fn mmap_from_fd(fd: &AsRawFd, size: usize) -> Result<KvmRunWrapper> {
// This is safe because we are creating a mapping in a place not already used by any other
// area in this process.
let addr = unsafe {
libc::mmap(
null_mut(),
size,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_SHARED,
fd.as_raw_fd(),
0,
)
};
if addr == libc::MAP_FAILED {
return Err(io::Error::last_os_error());
}
Ok(KvmRunWrapper {
kvm_run_ptr: addr as *mut u8,
mmap_size: size,
})
}
/// Returns a mutable reference to `kvm_run`.
///
#[allow(clippy::mut_from_ref)]
pub fn as_mut_ref(&self) -> &mut kvm_run {
// Safe because we know we mapped enough memory to hold the kvm_run struct because the
// kernel told us how large it was.
#[allow(clippy::cast_ptr_alignment)]
unsafe {
&mut *(self.kvm_run_ptr as *mut kvm_run)
}
}
}
impl Drop for KvmRunWrapper {
fn drop(&mut self) {
// This is safe because we mmap the area at kvm_run_ptr ourselves,
// and nobody else is holding a reference to it.
unsafe {
libc::munmap(self.kvm_run_ptr as *mut libc::c_void, self.mmap_size);
}
}
}
#[cfg(test)]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
mod tests {
use super::*;
#[test]
fn test_cpuid_from_entries() {
let num_entries = 4;
let mut cpuid = CpuId::new(num_entries);
// add entry
let mut entries = cpuid.mut_entries_slice().to_vec();
let new_entry = kvm_cpuid_entry2 {
function: 0x4,
index: 0,
flags: 1,
eax: 0b1100000,
ebx: 0,
ecx: 0,
edx: 0,
padding: [0, 0, 0],
};
entries.insert(0, new_entry);
cpuid = CpuId::from_entries(&entries);
// check that the cpuid contains the new entry
assert_eq!(cpuid.allocated_len, num_entries + 1);
assert_eq!(cpuid.kvm_cpuid[0].nent, (num_entries + 1) as u32);
assert_eq!(cpuid.mut_entries_slice().len(), num_entries + 1);
assert_eq!(cpuid.mut_entries_slice()[0], new_entry);
}
}
| as_ptr | identifier_name |
mod.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR MIT
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use std::io;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use std::mem::size_of;
use std::os::unix::io::AsRawFd;
use std::ptr::null_mut;
use std::result;
use kvm_bindings::kvm_run;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use kvm_bindings::{kvm_cpuid2, kvm_cpuid_entry2};
/// Wrappers over KVM device ioctls.
pub mod device;
/// Wrappers over KVM system ioctls.
pub mod system;
/// Wrappers over KVM VCPU ioctls.
pub mod vcpu;
/// Wrappers over KVM Virtual Machine ioctls.
pub mod vm;
/// A specialized `Result` type for KVM ioctls.
///
/// This typedef is generally used to avoid writing out io::Error directly and
/// is otherwise a direct mapping to Result.
pub type Result<T> = result::Result<T, io::Error>;
// Returns a `Vec<T>` with a size in bytes at least as large as `size_in_bytes`.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn vec_with_size_in_bytes<T: Default>(size_in_bytes: usize) -> Vec<T> {
let rounded_size = (size_in_bytes + size_of::<T>() - 1) / size_of::<T>();
let mut v = Vec::with_capacity(rounded_size);
for _ in 0..rounded_size {
v.push(T::default())
}
v
}
// The kvm API has many structs that resemble the following `Foo` structure:
//
// ```
// #[repr(C)]
// struct Foo {
// some_data: u32
// entries: __IncompleteArrayField<__u32>,
// }
// ```
//
// In order to allocate such a structure, `size_of::<Foo>()` would be too small because it would not
// include any space for `entries`. To make the allocation large enough while still being aligned
// for `Foo`, a `Vec<Foo>` is created. Only the first element of `Vec<Foo>` would actually be used
// as a `Foo`. The remaining memory in the `Vec<Foo>` is for `entries`, which must be contiguous
// with `Foo`. This function is used to make the `Vec<Foo>` with enough space for `count` entries.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn vec_with_array_field<T: Default, F>(count: usize) -> Vec<T> {
let element_space = count * size_of::<F>();
let vec_size_bytes = size_of::<T>() + element_space;
vec_with_size_in_bytes(vec_size_bytes)
}
/// Wrapper over the `kvm_cpuid2` structure.
///
/// The structure has a zero length array at the end, hidden behind bounds check.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub struct CpuId {
// Wrapper over `kvm_cpuid2` from which we only use the first element.
kvm_cpuid: Vec<kvm_cpuid2>,
// Number of `kvm_cpuid_entry2` structs at the end of kvm_cpuid2.
allocated_len: usize,
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl Clone for CpuId {
fn clone(&self) -> Self {
let mut kvm_cpuid = Vec::with_capacity(self.kvm_cpuid.len());
for _ in 0..self.kvm_cpuid.len() {
kvm_cpuid.push(kvm_cpuid2::default());
}
let num_bytes = self.kvm_cpuid.len() * size_of::<kvm_cpuid2>();
let src_byte_slice =
unsafe { std::slice::from_raw_parts(self.kvm_cpuid.as_ptr() as *const u8, num_bytes) };
let dst_byte_slice =
unsafe { std::slice::from_raw_parts_mut(kvm_cpuid.as_mut_ptr() as *mut u8, num_bytes) };
dst_byte_slice.copy_from_slice(src_byte_slice);
CpuId {
kvm_cpuid,
allocated_len: self.allocated_len,
}
}
}
#[cfg(test)]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl PartialEq for CpuId {
fn eq(&self, other: &CpuId) -> bool {
let entries: &[kvm_cpuid_entry2] =
unsafe { self.kvm_cpuid[0].entries.as_slice(self.allocated_len) };
let other_entries: &[kvm_cpuid_entry2] =
unsafe { self.kvm_cpuid[0].entries.as_slice(other.allocated_len) };
self.allocated_len == other.allocated_len && entries == other_entries
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl CpuId {
/// Creates a new `CpuId` structure that contains at most `array_len` KVM CPUID entries.
///
/// # Arguments
///
/// * `array_len` - Maximum number of CPUID entries.
///
/// # Example
///
/// ```
/// use kvm_ioctls::CpuId;
/// let cpu_id = CpuId::new(32);
/// ```
pub fn new(array_len: usize) -> CpuId {
let mut kvm_cpuid = vec_with_array_field::<kvm_cpuid2, kvm_cpuid_entry2>(array_len);
kvm_cpuid[0].nent = array_len as u32;
CpuId {
kvm_cpuid,
allocated_len: array_len,
}
}
/// Creates a new `CpuId` structure based on a supplied vector of `kvm_cpuid_entry2`.
///
/// # Arguments
///
/// * `entries` - The vector of `kvm_cpuid_entry2` entries.
///
/// # Example
///
/// ```rust
/// # extern crate kvm_ioctls;
/// extern crate kvm_bindings;
///
/// use kvm_bindings::kvm_cpuid_entry2;
/// use kvm_ioctls::CpuId;
/// // Create a Cpuid to hold one entry.
/// let mut cpuid = CpuId::new(1);
/// let mut entries = cpuid.mut_entries_slice().to_vec();
/// let new_entry = kvm_cpuid_entry2 {
/// function: 0x4,
/// index: 0,
/// flags: 1,
/// eax: 0b1100000,
/// ebx: 0,
/// ecx: 0,
/// edx: 0,
/// padding: [0, 0, 0],
/// };
/// entries.insert(0, new_entry);
/// cpuid = CpuId::from_entries(&entries);
/// ```
///
pub fn from_entries(entries: &[kvm_cpuid_entry2]) -> CpuId {
let mut kvm_cpuid = vec_with_array_field::<kvm_cpuid2, kvm_cpuid_entry2>(entries.len());
kvm_cpuid[0].nent = entries.len() as u32;
unsafe {
kvm_cpuid[0]
.entries
.as_mut_slice(entries.len())
.copy_from_slice(entries);
}
CpuId {
kvm_cpuid,
allocated_len: entries.len(),
}
}
/// Returns the mutable entries slice so they can be modified before passing to the VCPU.
///
/// # Example
/// ```rust
/// use kvm_ioctls::{CpuId, Kvm, MAX_KVM_CPUID_ENTRIES};
/// let kvm = Kvm::new().unwrap();
/// let mut cpuid = kvm.get_supported_cpuid(MAX_KVM_CPUID_ENTRIES).unwrap();
/// let cpuid_entries = cpuid.mut_entries_slice();
/// ```
///
pub fn mut_entries_slice(&mut self) -> &mut [kvm_cpuid_entry2] {
// Mapping the unsized array to a slice is unsafe because the length isn't known. Using
// the length we originally allocated with eliminates the possibility of overflow.
if self.kvm_cpuid[0].nent as usize > self.allocated_len {
self.kvm_cpuid[0].nent = self.allocated_len as u32;
}
let nent = self.kvm_cpuid[0].nent as usize;
unsafe { self.kvm_cpuid[0].entries.as_mut_slice(nent) }
}
/// Get a pointer so it can be passed to the kernel. Using this pointer is unsafe.
///
pub fn as_ptr(&self) -> *const kvm_cpuid2 {
&self.kvm_cpuid[0]
}
/// Get a mutable pointer so it can be passed to the kernel. Using this pointer is unsafe.
///
pub fn as_mut_ptr(&mut self) -> *mut kvm_cpuid2 {
&mut self.kvm_cpuid[0]
}
}
/// Safe wrapper over the `kvm_run` struct.
///
/// The wrapper is needed for sending the pointer to `kvm_run` between
/// threads as raw pointers do not implement `Send` and `Sync`.
pub struct KvmRunWrapper {
kvm_run_ptr: *mut u8,
// This field is need so we can `munmap` the memory mapped to hold `kvm_run`.
mmap_size: usize,
}
// Send and Sync aren't automatically inherited for the raw address pointer.
// Accessing that pointer is only done through the stateless interface which
// allows the object to be shared by multiple threads without a decrease in
// safety.
unsafe impl Send for KvmRunWrapper {}
unsafe impl Sync for KvmRunWrapper {}
impl KvmRunWrapper {
/// Maps the first `size` bytes of the given `fd`.
///
/// # Arguments
/// * `fd` - File descriptor to mmap from.
/// * `size` - Size of memory region in bytes.
pub fn mmap_from_fd(fd: &AsRawFd, size: usize) -> Result<KvmRunWrapper> {
// This is safe because we are creating a mapping in a place not already used by any other
// area in this process.
let addr = unsafe {
libc::mmap(
null_mut(),
size,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_SHARED,
fd.as_raw_fd(),
0,
)
};
if addr == libc::MAP_FAILED |
Ok(KvmRunWrapper {
kvm_run_ptr: addr as *mut u8,
mmap_size: size,
})
}
/// Returns a mutable reference to `kvm_run`.
///
#[allow(clippy::mut_from_ref)]
pub fn as_mut_ref(&self) -> &mut kvm_run {
// Safe because we know we mapped enough memory to hold the kvm_run struct because the
// kernel told us how large it was.
#[allow(clippy::cast_ptr_alignment)]
unsafe {
&mut *(self.kvm_run_ptr as *mut kvm_run)
}
}
}
impl Drop for KvmRunWrapper {
fn drop(&mut self) {
// This is safe because we mmap the area at kvm_run_ptr ourselves,
// and nobody else is holding a reference to it.
unsafe {
libc::munmap(self.kvm_run_ptr as *mut libc::c_void, self.mmap_size);
}
}
}
#[cfg(test)]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
mod tests {
use super::*;
#[test]
fn test_cpuid_from_entries() {
let num_entries = 4;
let mut cpuid = CpuId::new(num_entries);
// add entry
let mut entries = cpuid.mut_entries_slice().to_vec();
let new_entry = kvm_cpuid_entry2 {
function: 0x4,
index: 0,
flags: 1,
eax: 0b1100000,
ebx: 0,
ecx: 0,
edx: 0,
padding: [0, 0, 0],
};
entries.insert(0, new_entry);
cpuid = CpuId::from_entries(&entries);
// check that the cpuid contains the new entry
assert_eq!(cpuid.allocated_len, num_entries + 1);
assert_eq!(cpuid.kvm_cpuid[0].nent, (num_entries + 1) as u32);
assert_eq!(cpuid.mut_entries_slice().len(), num_entries + 1);
assert_eq!(cpuid.mut_entries_slice()[0], new_entry);
}
}
| {
return Err(io::Error::last_os_error());
} | conditional_block |
mod.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR MIT
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use std::io;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use std::mem::size_of;
use std::os::unix::io::AsRawFd;
use std::ptr::null_mut;
use std::result;
use kvm_bindings::kvm_run;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use kvm_bindings::{kvm_cpuid2, kvm_cpuid_entry2};
/// Wrappers over KVM device ioctls.
pub mod device;
/// Wrappers over KVM system ioctls.
pub mod system;
/// Wrappers over KVM VCPU ioctls.
pub mod vcpu;
/// Wrappers over KVM Virtual Machine ioctls.
pub mod vm;
/// A specialized `Result` type for KVM ioctls.
///
/// This typedef is generally used to avoid writing out io::Error directly and
/// is otherwise a direct mapping to Result.
pub type Result<T> = result::Result<T, io::Error>;
// Returns a `Vec<T>` with a size in bytes at least as large as `size_in_bytes`.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn vec_with_size_in_bytes<T: Default>(size_in_bytes: usize) -> Vec<T> {
let rounded_size = (size_in_bytes + size_of::<T>() - 1) / size_of::<T>();
let mut v = Vec::with_capacity(rounded_size);
for _ in 0..rounded_size {
v.push(T::default())
}
v
}
// The kvm API has many structs that resemble the following `Foo` structure:
//
// ```
// #[repr(C)]
// struct Foo {
// some_data: u32
// entries: __IncompleteArrayField<__u32>,
// }
// ```
//
// In order to allocate such a structure, `size_of::<Foo>()` would be too small because it would not
// include any space for `entries`. To make the allocation large enough while still being aligned
// for `Foo`, a `Vec<Foo>` is created. Only the first element of `Vec<Foo>` would actually be used
// as a `Foo`. The remaining memory in the `Vec<Foo>` is for `entries`, which must be contiguous
// with `Foo`. This function is used to make the `Vec<Foo>` with enough space for `count` entries.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn vec_with_array_field<T: Default, F>(count: usize) -> Vec<T> {
let element_space = count * size_of::<F>();
let vec_size_bytes = size_of::<T>() + element_space;
vec_with_size_in_bytes(vec_size_bytes)
}
/// Wrapper over the `kvm_cpuid2` structure.
///
/// The structure has a zero length array at the end, hidden behind bounds check.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub struct CpuId {
// Wrapper over `kvm_cpuid2` from which we only use the first element.
kvm_cpuid: Vec<kvm_cpuid2>,
// Number of `kvm_cpuid_entry2` structs at the end of kvm_cpuid2.
allocated_len: usize,
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl Clone for CpuId {
fn clone(&self) -> Self {
let mut kvm_cpuid = Vec::with_capacity(self.kvm_cpuid.len());
for _ in 0..self.kvm_cpuid.len() {
kvm_cpuid.push(kvm_cpuid2::default());
}
let num_bytes = self.kvm_cpuid.len() * size_of::<kvm_cpuid2>();
let src_byte_slice =
unsafe { std::slice::from_raw_parts(self.kvm_cpuid.as_ptr() as *const u8, num_bytes) };
let dst_byte_slice =
unsafe { std::slice::from_raw_parts_mut(kvm_cpuid.as_mut_ptr() as *mut u8, num_bytes) };
dst_byte_slice.copy_from_slice(src_byte_slice);
CpuId {
kvm_cpuid,
allocated_len: self.allocated_len,
}
}
}
#[cfg(test)]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl PartialEq for CpuId {
fn eq(&self, other: &CpuId) -> bool {
let entries: &[kvm_cpuid_entry2] =
unsafe { self.kvm_cpuid[0].entries.as_slice(self.allocated_len) };
let other_entries: &[kvm_cpuid_entry2] =
unsafe { self.kvm_cpuid[0].entries.as_slice(other.allocated_len) };
self.allocated_len == other.allocated_len && entries == other_entries
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl CpuId {
/// Creates a new `CpuId` structure that contains at most `array_len` KVM CPUID entries.
///
/// # Arguments
///
/// * `array_len` - Maximum number of CPUID entries.
///
/// # Example
///
/// ```
/// use kvm_ioctls::CpuId;
/// let cpu_id = CpuId::new(32);
/// ```
pub fn new(array_len: usize) -> CpuId {
let mut kvm_cpuid = vec_with_array_field::<kvm_cpuid2, kvm_cpuid_entry2>(array_len);
kvm_cpuid[0].nent = array_len as u32;
CpuId {
kvm_cpuid,
allocated_len: array_len,
}
}
/// Creates a new `CpuId` structure based on a supplied vector of `kvm_cpuid_entry2`.
///
/// # Arguments
///
/// * `entries` - The vector of `kvm_cpuid_entry2` entries.
///
/// # Example
///
/// ```rust
/// # extern crate kvm_ioctls;
/// extern crate kvm_bindings;
///
/// use kvm_bindings::kvm_cpuid_entry2;
/// use kvm_ioctls::CpuId;
/// // Create a Cpuid to hold one entry.
/// let mut cpuid = CpuId::new(1);
/// let mut entries = cpuid.mut_entries_slice().to_vec();
/// let new_entry = kvm_cpuid_entry2 {
/// function: 0x4,
/// index: 0,
/// flags: 1,
/// eax: 0b1100000,
/// ebx: 0,
/// ecx: 0,
/// edx: 0,
/// padding: [0, 0, 0],
/// };
/// entries.insert(0, new_entry);
/// cpuid = CpuId::from_entries(&entries);
/// ```
///
pub fn from_entries(entries: &[kvm_cpuid_entry2]) -> CpuId {
let mut kvm_cpuid = vec_with_array_field::<kvm_cpuid2, kvm_cpuid_entry2>(entries.len());
kvm_cpuid[0].nent = entries.len() as u32;
unsafe {
kvm_cpuid[0]
.entries
.as_mut_slice(entries.len())
.copy_from_slice(entries);
}
CpuId {
kvm_cpuid,
allocated_len: entries.len(),
}
}
/// Returns the mutable entries slice so they can be modified before passing to the VCPU.
///
/// # Example
/// ```rust
/// use kvm_ioctls::{CpuId, Kvm, MAX_KVM_CPUID_ENTRIES};
/// let kvm = Kvm::new().unwrap();
/// let mut cpuid = kvm.get_supported_cpuid(MAX_KVM_CPUID_ENTRIES).unwrap();
/// let cpuid_entries = cpuid.mut_entries_slice();
/// ```
///
pub fn mut_entries_slice(&mut self) -> &mut [kvm_cpuid_entry2] |
/// Get a pointer so it can be passed to the kernel. Using this pointer is unsafe.
///
pub fn as_ptr(&self) -> *const kvm_cpuid2 {
&self.kvm_cpuid[0]
}
/// Get a mutable pointer so it can be passed to the kernel. Using this pointer is unsafe.
///
pub fn as_mut_ptr(&mut self) -> *mut kvm_cpuid2 {
&mut self.kvm_cpuid[0]
}
}
/// Safe wrapper over the `kvm_run` struct.
///
/// The wrapper is needed for sending the pointer to `kvm_run` between
/// threads as raw pointers do not implement `Send` and `Sync`.
pub struct KvmRunWrapper {
kvm_run_ptr: *mut u8,
// This field is need so we can `munmap` the memory mapped to hold `kvm_run`.
mmap_size: usize,
}
// Send and Sync aren't automatically inherited for the raw address pointer.
// Accessing that pointer is only done through the stateless interface which
// allows the object to be shared by multiple threads without a decrease in
// safety.
unsafe impl Send for KvmRunWrapper {}
unsafe impl Sync for KvmRunWrapper {}
impl KvmRunWrapper {
/// Maps the first `size` bytes of the given `fd`.
///
/// # Arguments
/// * `fd` - File descriptor to mmap from.
/// * `size` - Size of memory region in bytes.
pub fn mmap_from_fd(fd: &AsRawFd, size: usize) -> Result<KvmRunWrapper> {
// This is safe because we are creating a mapping in a place not already used by any other
// area in this process.
let addr = unsafe {
libc::mmap(
null_mut(),
size,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_SHARED,
fd.as_raw_fd(),
0,
)
};
if addr == libc::MAP_FAILED {
return Err(io::Error::last_os_error());
}
Ok(KvmRunWrapper {
kvm_run_ptr: addr as *mut u8,
mmap_size: size,
})
}
/// Returns a mutable reference to `kvm_run`.
///
#[allow(clippy::mut_from_ref)]
pub fn as_mut_ref(&self) -> &mut kvm_run {
// Safe because we know we mapped enough memory to hold the kvm_run struct because the
// kernel told us how large it was.
#[allow(clippy::cast_ptr_alignment)]
unsafe {
&mut *(self.kvm_run_ptr as *mut kvm_run)
}
}
}
impl Drop for KvmRunWrapper {
fn drop(&mut self) {
// This is safe because we mmap the area at kvm_run_ptr ourselves,
// and nobody else is holding a reference to it.
unsafe {
libc::munmap(self.kvm_run_ptr as *mut libc::c_void, self.mmap_size);
}
}
}
#[cfg(test)]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
mod tests {
use super::*;
#[test]
fn test_cpuid_from_entries() {
let num_entries = 4;
let mut cpuid = CpuId::new(num_entries);
// add entry
let mut entries = cpuid.mut_entries_slice().to_vec();
let new_entry = kvm_cpuid_entry2 {
function: 0x4,
index: 0,
flags: 1,
eax: 0b1100000,
ebx: 0,
ecx: 0,
edx: 0,
padding: [0, 0, 0],
};
entries.insert(0, new_entry);
cpuid = CpuId::from_entries(&entries);
// check that the cpuid contains the new entry
assert_eq!(cpuid.allocated_len, num_entries + 1);
assert_eq!(cpuid.kvm_cpuid[0].nent, (num_entries + 1) as u32);
assert_eq!(cpuid.mut_entries_slice().len(), num_entries + 1);
assert_eq!(cpuid.mut_entries_slice()[0], new_entry);
}
}
| {
// Mapping the unsized array to a slice is unsafe because the length isn't known. Using
// the length we originally allocated with eliminates the possibility of overflow.
if self.kvm_cpuid[0].nent as usize > self.allocated_len {
self.kvm_cpuid[0].nent = self.allocated_len as u32;
}
let nent = self.kvm_cpuid[0].nent as usize;
unsafe { self.kvm_cpuid[0].entries.as_mut_slice(nent) }
} | identifier_body |
mod.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR MIT
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use std::io;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use std::mem::size_of;
use std::os::unix::io::AsRawFd;
use std::ptr::null_mut;
use std::result;
use kvm_bindings::kvm_run;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use kvm_bindings::{kvm_cpuid2, kvm_cpuid_entry2};
/// Wrappers over KVM device ioctls.
pub mod device;
/// Wrappers over KVM system ioctls.
pub mod system;
/// Wrappers over KVM VCPU ioctls.
pub mod vcpu;
/// Wrappers over KVM Virtual Machine ioctls.
pub mod vm;
/// A specialized `Result` type for KVM ioctls.
///
/// This typedef is generally used to avoid writing out io::Error directly and
/// is otherwise a direct mapping to Result.
pub type Result<T> = result::Result<T, io::Error>;
// Returns a `Vec<T>` with a size in bytes at least as large as `size_in_bytes`.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn vec_with_size_in_bytes<T: Default>(size_in_bytes: usize) -> Vec<T> {
let rounded_size = (size_in_bytes + size_of::<T>() - 1) / size_of::<T>();
let mut v = Vec::with_capacity(rounded_size);
for _ in 0..rounded_size {
v.push(T::default())
}
v
}
// The kvm API has many structs that resemble the following `Foo` structure:
//
// ```
// #[repr(C)]
// struct Foo {
// some_data: u32
// entries: __IncompleteArrayField<__u32>,
// }
// ```
//
// In order to allocate such a structure, `size_of::<Foo>()` would be too small because it would not
// include any space for `entries`. To make the allocation large enough while still being aligned
// for `Foo`, a `Vec<Foo>` is created. Only the first element of `Vec<Foo>` would actually be used
// as a `Foo`. The remaining memory in the `Vec<Foo>` is for `entries`, which must be contiguous
// with `Foo`. This function is used to make the `Vec<Foo>` with enough space for `count` entries.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn vec_with_array_field<T: Default, F>(count: usize) -> Vec<T> {
let element_space = count * size_of::<F>();
let vec_size_bytes = size_of::<T>() + element_space;
vec_with_size_in_bytes(vec_size_bytes)
}
/// Wrapper over the `kvm_cpuid2` structure.
///
/// The structure has a zero length array at the end, hidden behind bounds check.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub struct CpuId {
// Wrapper over `kvm_cpuid2` from which we only use the first element.
kvm_cpuid: Vec<kvm_cpuid2>,
// Number of `kvm_cpuid_entry2` structs at the end of kvm_cpuid2.
allocated_len: usize,
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl Clone for CpuId {
fn clone(&self) -> Self {
let mut kvm_cpuid = Vec::with_capacity(self.kvm_cpuid.len());
for _ in 0..self.kvm_cpuid.len() {
kvm_cpuid.push(kvm_cpuid2::default());
}
let num_bytes = self.kvm_cpuid.len() * size_of::<kvm_cpuid2>();
let src_byte_slice =
unsafe { std::slice::from_raw_parts(self.kvm_cpuid.as_ptr() as *const u8, num_bytes) };
let dst_byte_slice =
unsafe { std::slice::from_raw_parts_mut(kvm_cpuid.as_mut_ptr() as *mut u8, num_bytes) };
dst_byte_slice.copy_from_slice(src_byte_slice);
CpuId {
kvm_cpuid,
allocated_len: self.allocated_len,
}
}
}
#[cfg(test)]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl PartialEq for CpuId {
fn eq(&self, other: &CpuId) -> bool {
let entries: &[kvm_cpuid_entry2] =
unsafe { self.kvm_cpuid[0].entries.as_slice(self.allocated_len) };
let other_entries: &[kvm_cpuid_entry2] =
unsafe { self.kvm_cpuid[0].entries.as_slice(other.allocated_len) };
self.allocated_len == other.allocated_len && entries == other_entries
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl CpuId {
/// Creates a new `CpuId` structure that contains at most `array_len` KVM CPUID entries.
///
/// # Arguments
///
/// * `array_len` - Maximum number of CPUID entries.
///
/// # Example
///
/// ```
/// use kvm_ioctls::CpuId;
/// let cpu_id = CpuId::new(32);
/// ```
pub fn new(array_len: usize) -> CpuId {
let mut kvm_cpuid = vec_with_array_field::<kvm_cpuid2, kvm_cpuid_entry2>(array_len);
kvm_cpuid[0].nent = array_len as u32;
CpuId {
kvm_cpuid,
allocated_len: array_len,
}
}
/// Creates a new `CpuId` structure based on a supplied vector of `kvm_cpuid_entry2`.
///
/// # Arguments
///
/// * `entries` - The vector of `kvm_cpuid_entry2` entries.
///
/// # Example
///
/// ```rust
/// # extern crate kvm_ioctls;
/// extern crate kvm_bindings;
///
/// use kvm_bindings::kvm_cpuid_entry2;
/// use kvm_ioctls::CpuId;
/// // Create a Cpuid to hold one entry.
/// let mut cpuid = CpuId::new(1);
/// let mut entries = cpuid.mut_entries_slice().to_vec();
/// let new_entry = kvm_cpuid_entry2 {
/// function: 0x4,
/// index: 0,
/// flags: 1,
/// eax: 0b1100000,
/// ebx: 0,
/// ecx: 0,
/// edx: 0,
/// padding: [0, 0, 0],
/// };
/// entries.insert(0, new_entry);
/// cpuid = CpuId::from_entries(&entries);
/// ```
///
pub fn from_entries(entries: &[kvm_cpuid_entry2]) -> CpuId {
let mut kvm_cpuid = vec_with_array_field::<kvm_cpuid2, kvm_cpuid_entry2>(entries.len());
kvm_cpuid[0].nent = entries.len() as u32;
unsafe {
kvm_cpuid[0]
.entries
.as_mut_slice(entries.len())
.copy_from_slice(entries);
} |
CpuId {
kvm_cpuid,
allocated_len: entries.len(),
}
}
/// Returns the mutable entries slice so they can be modified before passing to the VCPU.
///
/// # Example
/// ```rust
/// use kvm_ioctls::{CpuId, Kvm, MAX_KVM_CPUID_ENTRIES};
/// let kvm = Kvm::new().unwrap();
/// let mut cpuid = kvm.get_supported_cpuid(MAX_KVM_CPUID_ENTRIES).unwrap();
/// let cpuid_entries = cpuid.mut_entries_slice();
/// ```
///
pub fn mut_entries_slice(&mut self) -> &mut [kvm_cpuid_entry2] {
// Mapping the unsized array to a slice is unsafe because the length isn't known. Using
// the length we originally allocated with eliminates the possibility of overflow.
if self.kvm_cpuid[0].nent as usize > self.allocated_len {
self.kvm_cpuid[0].nent = self.allocated_len as u32;
}
let nent = self.kvm_cpuid[0].nent as usize;
unsafe { self.kvm_cpuid[0].entries.as_mut_slice(nent) }
}
/// Get a pointer so it can be passed to the kernel. Using this pointer is unsafe.
///
pub fn as_ptr(&self) -> *const kvm_cpuid2 {
&self.kvm_cpuid[0]
}
/// Get a mutable pointer so it can be passed to the kernel. Using this pointer is unsafe.
///
pub fn as_mut_ptr(&mut self) -> *mut kvm_cpuid2 {
&mut self.kvm_cpuid[0]
}
}
/// Safe wrapper over the `kvm_run` struct.
///
/// The wrapper is needed for sending the pointer to `kvm_run` between
/// threads as raw pointers do not implement `Send` and `Sync`.
pub struct KvmRunWrapper {
kvm_run_ptr: *mut u8,
// This field is need so we can `munmap` the memory mapped to hold `kvm_run`.
mmap_size: usize,
}
// Send and Sync aren't automatically inherited for the raw address pointer.
// Accessing that pointer is only done through the stateless interface which
// allows the object to be shared by multiple threads without a decrease in
// safety.
unsafe impl Send for KvmRunWrapper {}
unsafe impl Sync for KvmRunWrapper {}
impl KvmRunWrapper {
/// Maps the first `size` bytes of the given `fd`.
///
/// # Arguments
/// * `fd` - File descriptor to mmap from.
/// * `size` - Size of memory region in bytes.
pub fn mmap_from_fd(fd: &AsRawFd, size: usize) -> Result<KvmRunWrapper> {
// This is safe because we are creating a mapping in a place not already used by any other
// area in this process.
let addr = unsafe {
libc::mmap(
null_mut(),
size,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_SHARED,
fd.as_raw_fd(),
0,
)
};
if addr == libc::MAP_FAILED {
return Err(io::Error::last_os_error());
}
Ok(KvmRunWrapper {
kvm_run_ptr: addr as *mut u8,
mmap_size: size,
})
}
/// Returns a mutable reference to `kvm_run`.
///
#[allow(clippy::mut_from_ref)]
pub fn as_mut_ref(&self) -> &mut kvm_run {
// Safe because we know we mapped enough memory to hold the kvm_run struct because the
// kernel told us how large it was.
#[allow(clippy::cast_ptr_alignment)]
unsafe {
&mut *(self.kvm_run_ptr as *mut kvm_run)
}
}
}
impl Drop for KvmRunWrapper {
fn drop(&mut self) {
// This is safe because we mmap the area at kvm_run_ptr ourselves,
// and nobody else is holding a reference to it.
unsafe {
libc::munmap(self.kvm_run_ptr as *mut libc::c_void, self.mmap_size);
}
}
}
#[cfg(test)]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
mod tests {
use super::*;
#[test]
fn test_cpuid_from_entries() {
let num_entries = 4;
let mut cpuid = CpuId::new(num_entries);
// add entry
let mut entries = cpuid.mut_entries_slice().to_vec();
let new_entry = kvm_cpuid_entry2 {
function: 0x4,
index: 0,
flags: 1,
eax: 0b1100000,
ebx: 0,
ecx: 0,
edx: 0,
padding: [0, 0, 0],
};
entries.insert(0, new_entry);
cpuid = CpuId::from_entries(&entries);
// check that the cpuid contains the new entry
assert_eq!(cpuid.allocated_len, num_entries + 1);
assert_eq!(cpuid.kvm_cpuid[0].nent, (num_entries + 1) as u32);
assert_eq!(cpuid.mut_entries_slice().len(), num_entries + 1);
assert_eq!(cpuid.mut_entries_slice()[0], new_entry);
}
} | random_line_split |
|
file_server.rs | use crate::file_watcher::FileWatcher;
use bytes::Bytes;
use futures::{stream, Future, Sink, Stream};
use glob::{glob, Pattern};
use std::collections::HashMap;
use std::fs;
use std::io::{Read, Seek, SeekFrom};
use std::path::PathBuf;
use std::sync::mpsc::RecvTimeoutError;
use std::time;
use tracing::field;
/// `FileServer` is a Source which cooperatively schedules reads over files,
/// converting the lines of said files into `LogLine` structures. As
/// `FileServer` is intended to be useful across multiple operating systems with
/// POSIX filesystem semantics `FileServer` must poll for changes. That is, no
/// event notification is used by `FileServer`.
///
/// `FileServer` is configured on a path to watch. The files do _not_ need to
/// exist at cernan startup. `FileServer` will discover new files which match
/// its path in at most 60 seconds.
pub struct FileServer {
pub include: Vec<PathBuf>,
pub exclude: Vec<PathBuf>,
pub max_read_bytes: usize,
pub start_at_beginning: bool,
pub ignore_before: Option<time::SystemTime>,
pub max_line_bytes: usize,
pub fingerprint_bytes: usize,
pub ignored_header_bytes: usize,
}
type FileFingerprint = u64;
/// `FileServer` as Source
///
/// The 'run' of `FileServer` performs the cooperative scheduling of reads over
/// `FileServer`'s configured files. Much care has been taking to make this
/// scheduling 'fair', meaning busy files do not drown out quiet files or vice
/// versa but there's no one perfect approach. Very fast files _will_ be lost if
/// your system aggressively rolls log files. `FileServer` will keep a file
/// handler open but should your system move so quickly that a file disappears
/// before cernan is able to open it the contents will be lost. This should be a
/// rare occurence.
///
/// Specific operating systems support evented interfaces that correct this
/// problem but your intrepid authors know of no generic solution.
impl FileServer {
pub fn run(
self,
mut chans: impl Sink<SinkItem = (Bytes, String), SinkError = ()>,
shutdown: std::sync::mpsc::Receiver<()>,
) {
let mut line_buffer = Vec::new();
let mut fingerprint_buffer = Vec::new();
let mut fp_map: HashMap<FileFingerprint, FileWatcher> = Default::default();
let mut backoff_cap: usize = 1;
let mut lines = Vec::new();
let mut start_of_run = true;
// Alright friends, how does this work?
//
// We want to avoid burning up users' CPUs. To do this we sleep after
// reading lines out of files. But! We want to be responsive as well. We
// keep track of a 'backoff_cap' to decide how long we'll wait in any
// given loop. This cap grows each time we fail to read lines in an
// exponential fashion to some hard-coded cap.
loop {
let mut global_bytes_read: usize = 0;
// glob poll
let exclude_patterns = self
.exclude
.iter()
.map(|e| Pattern::new(e.to_str().expect("no ability to glob")).unwrap())
.collect::<Vec<_>>();
for (_file_id, watcher) in &mut fp_map {
watcher.set_file_findable(false); // assume not findable until found
}
for include_pattern in &self.include {
for entry in glob(include_pattern.to_str().expect("no ability to glob"))
.expect("Failed to read glob pattern")
{
if let Ok(path) = entry {
if exclude_patterns
.iter()
.any(|e| e.matches(path.to_str().unwrap()))
{
continue;
}
if let Some(file_id) =
self.get_fingerprint_of_file(&path, &mut fingerprint_buffer)
{
if let Some(watcher) = fp_map.get_mut(&file_id) {
// file fingerprint matches a watched file
let was_found_this_cycle = watcher.file_findable();
watcher.set_file_findable(true);
if watcher.path == path {
trace!(
message = "Continue watching file.",
path = field::debug(&path),
);
} else {
// matches a file with a different path
if !was_found_this_cycle {
info!(
message = "Watched file has been renamed.",
path = field::debug(&path),
old_path = field::debug(&watcher.path)
);
watcher.update_path(path).ok(); // ok if this fails: might fix next cycle
} else {
info!(
message = "More than one file has same fingerprint.",
path = field::debug(&path),
old_path = field::debug(&watcher.path)
);
let (old_path, new_path) = (&watcher.path, &path);
if let (Ok(old_modified_time), Ok(new_modified_time)) = (
fs::metadata(&old_path).and_then(|m| m.modified()),
fs::metadata(&new_path).and_then(|m| m.modified()),
) {
if old_modified_time < new_modified_time {
info!(
message = "Switching to watch most recently modified file.",
new_modified_time = field::debug(&new_modified_time),
old_modified_time = field::debug(&old_modified_time),
);
watcher.update_path(path).ok(); // ok if this fails: might fix next cycle
}
}
}
}
} else {
// unknown (new) file fingerprint
let read_file_from_beginning = if start_of_run {
self.start_at_beginning
} else {
true
};
if let Ok(mut watcher) = FileWatcher::new(
path,
read_file_from_beginning,
self.ignore_before,
) {
info!(
message = "Found file to watch.",
path = field::debug(&watcher.path),
start_at_beginning = field::debug(&self.start_at_beginning),
start_of_run = field::debug(&start_of_run),
);
watcher.set_file_findable(true);
fp_map.insert(file_id, watcher);
};
}
}
}
}
}
// line polling
for (_file_id, watcher) in &mut fp_map {
let mut bytes_read: usize = 0;
while let Ok(sz) = watcher.read_line(&mut line_buffer, self.max_line_bytes) {
if sz > 0 {
trace!(
message = "Read bytes.",
path = field::debug(&watcher.path),
bytes = field::debug(sz)
);
bytes_read += sz;
if !line_buffer.is_empty() {
lines.push((
line_buffer.clone().into(),
watcher.path.to_str().expect("not a valid path").to_owned(),
));
line_buffer.clear();
}
} else {
break;
}
if bytes_read > self.max_read_bytes {
break;
}
}
global_bytes_read = global_bytes_read.saturating_add(bytes_read);
}
// A FileWatcher is dead when the underlying file has disappeared.
// If the FileWatcher is dead we don't retain it; it will be deallocated.
fp_map.retain(|_file_id, watcher| !watcher.dead());
match stream::iter_ok::<_, ()>(lines.drain(..))
.forward(chans)
.wait()
{
Ok((_, sink)) => chans = sink,
Err(_) => unreachable!("Output channel is closed"),
}
// When no lines have been read we kick the backup_cap up by twice,
// limited by the hard-coded cap. Else, we set the backup_cap to its
// minimum on the assumption that next time through there will be
// more lines to read promptly.
if global_bytes_read == 0 {
let lim = backoff_cap.saturating_mul(2);
if lim > 2_048 {
backoff_cap = 2_048;
} else {
backoff_cap = lim;
}
} else {
backoff_cap = 1;
}
let backoff = backoff_cap.saturating_sub(global_bytes_read);
match shutdown.recv_timeout(time::Duration::from_millis(backoff as u64)) {
Ok(()) => unreachable!(), // The sender should never actually send
Err(RecvTimeoutError::Timeout) => {}
Err(RecvTimeoutError::Disconnected) => return,
}
start_of_run = false;
}
}
fn | (
&self,
path: &PathBuf,
buffer: &mut Vec<u8>,
) -> Option<FileFingerprint> {
let i = self.ignored_header_bytes as u64;
let b = self.fingerprint_bytes;
buffer.resize(b, 0u8);
if let Ok(mut fp) = fs::File::open(path) {
if fp.seek(SeekFrom::Start(i)).is_ok() && fp.read_exact(&mut buffer[..b]).is_ok() {
let fingerprint = crc::crc64::checksum_ecma(&buffer[..b]);
Some(fingerprint)
} else {
None
}
} else {
None
}
}
}
| get_fingerprint_of_file | identifier_name |
file_server.rs | use crate::file_watcher::FileWatcher;
use bytes::Bytes;
use futures::{stream, Future, Sink, Stream};
use glob::{glob, Pattern};
use std::collections::HashMap;
use std::fs;
use std::io::{Read, Seek, SeekFrom};
use std::path::PathBuf;
use std::sync::mpsc::RecvTimeoutError;
use std::time;
use tracing::field;
/// `FileServer` is a Source which cooperatively schedules reads over files,
/// converting the lines of said files into `LogLine` structures. As
/// `FileServer` is intended to be useful across multiple operating systems with
/// POSIX filesystem semantics `FileServer` must poll for changes. That is, no
/// event notification is used by `FileServer`.
///
/// `FileServer` is configured on a path to watch. The files do _not_ need to
/// exist at cernan startup. `FileServer` will discover new files which match
/// its path in at most 60 seconds.
pub struct FileServer {
pub include: Vec<PathBuf>,
pub exclude: Vec<PathBuf>,
pub max_read_bytes: usize,
pub start_at_beginning: bool,
pub ignore_before: Option<time::SystemTime>,
pub max_line_bytes: usize,
pub fingerprint_bytes: usize,
pub ignored_header_bytes: usize,
}
type FileFingerprint = u64;
/// `FileServer` as Source
///
/// The 'run' of `FileServer` performs the cooperative scheduling of reads over
/// `FileServer`'s configured files. Much care has been taking to make this
/// scheduling 'fair', meaning busy files do not drown out quiet files or vice
/// versa but there's no one perfect approach. Very fast files _will_ be lost if
/// your system aggressively rolls log files. `FileServer` will keep a file
/// handler open but should your system move so quickly that a file disappears
/// before cernan is able to open it the contents will be lost. This should be a
/// rare occurence.
///
/// Specific operating systems support evented interfaces that correct this
/// problem but your intrepid authors know of no generic solution.
impl FileServer {
pub fn run(
self,
mut chans: impl Sink<SinkItem = (Bytes, String), SinkError = ()>,
shutdown: std::sync::mpsc::Receiver<()>,
) |
fn get_fingerprint_of_file(
&self,
path: &PathBuf,
buffer: &mut Vec<u8>,
) -> Option<FileFingerprint> {
let i = self.ignored_header_bytes as u64;
let b = self.fingerprint_bytes;
buffer.resize(b, 0u8);
if let Ok(mut fp) = fs::File::open(path) {
if fp.seek(SeekFrom::Start(i)).is_ok() && fp.read_exact(&mut buffer[..b]).is_ok() {
let fingerprint = crc::crc64::checksum_ecma(&buffer[..b]);
Some(fingerprint)
} else {
None
}
} else {
None
}
}
}
| {
let mut line_buffer = Vec::new();
let mut fingerprint_buffer = Vec::new();
let mut fp_map: HashMap<FileFingerprint, FileWatcher> = Default::default();
let mut backoff_cap: usize = 1;
let mut lines = Vec::new();
let mut start_of_run = true;
// Alright friends, how does this work?
//
// We want to avoid burning up users' CPUs. To do this we sleep after
// reading lines out of files. But! We want to be responsive as well. We
// keep track of a 'backoff_cap' to decide how long we'll wait in any
// given loop. This cap grows each time we fail to read lines in an
// exponential fashion to some hard-coded cap.
loop {
let mut global_bytes_read: usize = 0;
// glob poll
let exclude_patterns = self
.exclude
.iter()
.map(|e| Pattern::new(e.to_str().expect("no ability to glob")).unwrap())
.collect::<Vec<_>>();
for (_file_id, watcher) in &mut fp_map {
watcher.set_file_findable(false); // assume not findable until found
}
for include_pattern in &self.include {
for entry in glob(include_pattern.to_str().expect("no ability to glob"))
.expect("Failed to read glob pattern")
{
if let Ok(path) = entry {
if exclude_patterns
.iter()
.any(|e| e.matches(path.to_str().unwrap()))
{
continue;
}
if let Some(file_id) =
self.get_fingerprint_of_file(&path, &mut fingerprint_buffer)
{
if let Some(watcher) = fp_map.get_mut(&file_id) {
// file fingerprint matches a watched file
let was_found_this_cycle = watcher.file_findable();
watcher.set_file_findable(true);
if watcher.path == path {
trace!(
message = "Continue watching file.",
path = field::debug(&path),
);
} else {
// matches a file with a different path
if !was_found_this_cycle {
info!(
message = "Watched file has been renamed.",
path = field::debug(&path),
old_path = field::debug(&watcher.path)
);
watcher.update_path(path).ok(); // ok if this fails: might fix next cycle
} else {
info!(
message = "More than one file has same fingerprint.",
path = field::debug(&path),
old_path = field::debug(&watcher.path)
);
let (old_path, new_path) = (&watcher.path, &path);
if let (Ok(old_modified_time), Ok(new_modified_time)) = (
fs::metadata(&old_path).and_then(|m| m.modified()),
fs::metadata(&new_path).and_then(|m| m.modified()),
) {
if old_modified_time < new_modified_time {
info!(
message = "Switching to watch most recently modified file.",
new_modified_time = field::debug(&new_modified_time),
old_modified_time = field::debug(&old_modified_time),
);
watcher.update_path(path).ok(); // ok if this fails: might fix next cycle
}
}
}
}
} else {
// unknown (new) file fingerprint
let read_file_from_beginning = if start_of_run {
self.start_at_beginning
} else {
true
};
if let Ok(mut watcher) = FileWatcher::new(
path,
read_file_from_beginning,
self.ignore_before,
) {
info!(
message = "Found file to watch.",
path = field::debug(&watcher.path),
start_at_beginning = field::debug(&self.start_at_beginning),
start_of_run = field::debug(&start_of_run),
);
watcher.set_file_findable(true);
fp_map.insert(file_id, watcher);
};
}
}
}
}
}
// line polling
for (_file_id, watcher) in &mut fp_map {
let mut bytes_read: usize = 0;
while let Ok(sz) = watcher.read_line(&mut line_buffer, self.max_line_bytes) {
if sz > 0 {
trace!(
message = "Read bytes.",
path = field::debug(&watcher.path),
bytes = field::debug(sz)
);
bytes_read += sz;
if !line_buffer.is_empty() {
lines.push((
line_buffer.clone().into(),
watcher.path.to_str().expect("not a valid path").to_owned(),
));
line_buffer.clear();
}
} else {
break;
}
if bytes_read > self.max_read_bytes {
break;
}
}
global_bytes_read = global_bytes_read.saturating_add(bytes_read);
}
// A FileWatcher is dead when the underlying file has disappeared.
// If the FileWatcher is dead we don't retain it; it will be deallocated.
fp_map.retain(|_file_id, watcher| !watcher.dead());
match stream::iter_ok::<_, ()>(lines.drain(..))
.forward(chans)
.wait()
{
Ok((_, sink)) => chans = sink,
Err(_) => unreachable!("Output channel is closed"),
}
// When no lines have been read we kick the backup_cap up by twice,
// limited by the hard-coded cap. Else, we set the backup_cap to its
// minimum on the assumption that next time through there will be
// more lines to read promptly.
if global_bytes_read == 0 {
let lim = backoff_cap.saturating_mul(2);
if lim > 2_048 {
backoff_cap = 2_048;
} else {
backoff_cap = lim;
}
} else {
backoff_cap = 1;
}
let backoff = backoff_cap.saturating_sub(global_bytes_read);
match shutdown.recv_timeout(time::Duration::from_millis(backoff as u64)) {
Ok(()) => unreachable!(), // The sender should never actually send
Err(RecvTimeoutError::Timeout) => {}
Err(RecvTimeoutError::Disconnected) => return,
}
start_of_run = false;
}
} | identifier_body |
file_server.rs | use crate::file_watcher::FileWatcher;
use bytes::Bytes;
use futures::{stream, Future, Sink, Stream};
use glob::{glob, Pattern};
use std::collections::HashMap;
use std::fs;
use std::io::{Read, Seek, SeekFrom};
use std::path::PathBuf;
use std::sync::mpsc::RecvTimeoutError;
use std::time;
use tracing::field;
/// `FileServer` is a Source which cooperatively schedules reads over files,
/// converting the lines of said files into `LogLine` structures. As
/// `FileServer` is intended to be useful across multiple operating systems with
/// POSIX filesystem semantics `FileServer` must poll for changes. That is, no
/// event notification is used by `FileServer`.
///
/// `FileServer` is configured on a path to watch. The files do _not_ need to
/// exist at cernan startup. `FileServer` will discover new files which match
/// its path in at most 60 seconds.
pub struct FileServer {
pub include: Vec<PathBuf>,
pub exclude: Vec<PathBuf>,
pub max_read_bytes: usize,
pub start_at_beginning: bool,
pub ignore_before: Option<time::SystemTime>,
pub max_line_bytes: usize,
pub fingerprint_bytes: usize,
pub ignored_header_bytes: usize,
}
type FileFingerprint = u64;
/// `FileServer` as Source
///
/// The 'run' of `FileServer` performs the cooperative scheduling of reads over
/// `FileServer`'s configured files. Much care has been taking to make this
/// scheduling 'fair', meaning busy files do not drown out quiet files or vice
/// versa but there's no one perfect approach. Very fast files _will_ be lost if
/// your system aggressively rolls log files. `FileServer` will keep a file
/// handler open but should your system move so quickly that a file disappears
/// before cernan is able to open it the contents will be lost. This should be a
/// rare occurence.
///
/// Specific operating systems support evented interfaces that correct this
/// problem but your intrepid authors know of no generic solution.
impl FileServer {
pub fn run(
self,
mut chans: impl Sink<SinkItem = (Bytes, String), SinkError = ()>,
shutdown: std::sync::mpsc::Receiver<()>,
) {
let mut line_buffer = Vec::new();
let mut fingerprint_buffer = Vec::new();
let mut fp_map: HashMap<FileFingerprint, FileWatcher> = Default::default();
let mut backoff_cap: usize = 1;
let mut lines = Vec::new();
let mut start_of_run = true;
// Alright friends, how does this work?
//
// We want to avoid burning up users' CPUs. To do this we sleep after
// reading lines out of files. But! We want to be responsive as well. We
// keep track of a 'backoff_cap' to decide how long we'll wait in any
// given loop. This cap grows each time we fail to read lines in an
// exponential fashion to some hard-coded cap.
loop {
let mut global_bytes_read: usize = 0;
// glob poll
let exclude_patterns = self
.exclude
.iter()
.map(|e| Pattern::new(e.to_str().expect("no ability to glob")).unwrap())
.collect::<Vec<_>>();
for (_file_id, watcher) in &mut fp_map {
watcher.set_file_findable(false); // assume not findable until found
}
for include_pattern in &self.include {
for entry in glob(include_pattern.to_str().expect("no ability to glob"))
.expect("Failed to read glob pattern")
{
if let Ok(path) = entry {
if exclude_patterns
.iter()
.any(|e| e.matches(path.to_str().unwrap()))
{
continue;
}
if let Some(file_id) =
self.get_fingerprint_of_file(&path, &mut fingerprint_buffer)
{
if let Some(watcher) = fp_map.get_mut(&file_id) {
// file fingerprint matches a watched file
let was_found_this_cycle = watcher.file_findable();
watcher.set_file_findable(true);
if watcher.path == path {
trace!(
message = "Continue watching file.",
path = field::debug(&path),
);
} else |
} else {
// unknown (new) file fingerprint
let read_file_from_beginning = if start_of_run {
self.start_at_beginning
} else {
true
};
if let Ok(mut watcher) = FileWatcher::new(
path,
read_file_from_beginning,
self.ignore_before,
) {
info!(
message = "Found file to watch.",
path = field::debug(&watcher.path),
start_at_beginning = field::debug(&self.start_at_beginning),
start_of_run = field::debug(&start_of_run),
);
watcher.set_file_findable(true);
fp_map.insert(file_id, watcher);
};
}
}
}
}
}
// line polling
for (_file_id, watcher) in &mut fp_map {
let mut bytes_read: usize = 0;
while let Ok(sz) = watcher.read_line(&mut line_buffer, self.max_line_bytes) {
if sz > 0 {
trace!(
message = "Read bytes.",
path = field::debug(&watcher.path),
bytes = field::debug(sz)
);
bytes_read += sz;
if !line_buffer.is_empty() {
lines.push((
line_buffer.clone().into(),
watcher.path.to_str().expect("not a valid path").to_owned(),
));
line_buffer.clear();
}
} else {
break;
}
if bytes_read > self.max_read_bytes {
break;
}
}
global_bytes_read = global_bytes_read.saturating_add(bytes_read);
}
// A FileWatcher is dead when the underlying file has disappeared.
// If the FileWatcher is dead we don't retain it; it will be deallocated.
fp_map.retain(|_file_id, watcher| !watcher.dead());
match stream::iter_ok::<_, ()>(lines.drain(..))
.forward(chans)
.wait()
{
Ok((_, sink)) => chans = sink,
Err(_) => unreachable!("Output channel is closed"),
}
// When no lines have been read we kick the backup_cap up by twice,
// limited by the hard-coded cap. Else, we set the backup_cap to its
// minimum on the assumption that next time through there will be
// more lines to read promptly.
if global_bytes_read == 0 {
let lim = backoff_cap.saturating_mul(2);
if lim > 2_048 {
backoff_cap = 2_048;
} else {
backoff_cap = lim;
}
} else {
backoff_cap = 1;
}
let backoff = backoff_cap.saturating_sub(global_bytes_read);
match shutdown.recv_timeout(time::Duration::from_millis(backoff as u64)) {
Ok(()) => unreachable!(), // The sender should never actually send
Err(RecvTimeoutError::Timeout) => {}
Err(RecvTimeoutError::Disconnected) => return,
}
start_of_run = false;
}
}
fn get_fingerprint_of_file(
&self,
path: &PathBuf,
buffer: &mut Vec<u8>,
) -> Option<FileFingerprint> {
let i = self.ignored_header_bytes as u64;
let b = self.fingerprint_bytes;
buffer.resize(b, 0u8);
if let Ok(mut fp) = fs::File::open(path) {
if fp.seek(SeekFrom::Start(i)).is_ok() && fp.read_exact(&mut buffer[..b]).is_ok() {
let fingerprint = crc::crc64::checksum_ecma(&buffer[..b]);
Some(fingerprint)
} else {
None
}
} else {
None
}
}
}
| {
// matches a file with a different path
if !was_found_this_cycle {
info!(
message = "Watched file has been renamed.",
path = field::debug(&path),
old_path = field::debug(&watcher.path)
);
watcher.update_path(path).ok(); // ok if this fails: might fix next cycle
} else {
info!(
message = "More than one file has same fingerprint.",
path = field::debug(&path),
old_path = field::debug(&watcher.path)
);
let (old_path, new_path) = (&watcher.path, &path);
if let (Ok(old_modified_time), Ok(new_modified_time)) = (
fs::metadata(&old_path).and_then(|m| m.modified()),
fs::metadata(&new_path).and_then(|m| m.modified()),
) {
if old_modified_time < new_modified_time {
info!(
message = "Switching to watch most recently modified file.",
new_modified_time = field::debug(&new_modified_time),
old_modified_time = field::debug(&old_modified_time),
);
watcher.update_path(path).ok(); // ok if this fails: might fix next cycle
}
}
}
} | conditional_block |
file_server.rs | use crate::file_watcher::FileWatcher;
use bytes::Bytes;
use futures::{stream, Future, Sink, Stream};
use glob::{glob, Pattern};
use std::collections::HashMap;
use std::fs;
use std::io::{Read, Seek, SeekFrom};
use std::path::PathBuf;
use std::sync::mpsc::RecvTimeoutError;
use std::time;
use tracing::field;
/// `FileServer` is a Source which cooperatively schedules reads over files,
/// converting the lines of said files into `LogLine` structures. As
/// `FileServer` is intended to be useful across multiple operating systems with
/// POSIX filesystem semantics `FileServer` must poll for changes. That is, no
/// event notification is used by `FileServer`.
///
/// `FileServer` is configured on a path to watch. The files do _not_ need to
/// exist at cernan startup. `FileServer` will discover new files which match
/// its path in at most 60 seconds.
pub struct FileServer {
pub include: Vec<PathBuf>,
pub exclude: Vec<PathBuf>,
pub max_read_bytes: usize,
pub start_at_beginning: bool,
pub ignore_before: Option<time::SystemTime>,
pub max_line_bytes: usize,
pub fingerprint_bytes: usize,
pub ignored_header_bytes: usize,
}
type FileFingerprint = u64;
/// `FileServer` as Source
///
/// The 'run' of `FileServer` performs the cooperative scheduling of reads over
/// `FileServer`'s configured files. Much care has been taking to make this
/// scheduling 'fair', meaning busy files do not drown out quiet files or vice
/// versa but there's no one perfect approach. Very fast files _will_ be lost if
/// your system aggressively rolls log files. `FileServer` will keep a file
/// handler open but should your system move so quickly that a file disappears
/// before cernan is able to open it the contents will be lost. This should be a
/// rare occurence.
///
/// Specific operating systems support evented interfaces that correct this
/// problem but your intrepid authors know of no generic solution.
impl FileServer {
pub fn run(
self,
mut chans: impl Sink<SinkItem = (Bytes, String), SinkError = ()>,
shutdown: std::sync::mpsc::Receiver<()>,
) {
let mut line_buffer = Vec::new();
let mut fingerprint_buffer = Vec::new();
let mut fp_map: HashMap<FileFingerprint, FileWatcher> = Default::default();
| let mut start_of_run = true;
// Alright friends, how does this work?
//
// We want to avoid burning up users' CPUs. To do this we sleep after
// reading lines out of files. But! We want to be responsive as well. We
// keep track of a 'backoff_cap' to decide how long we'll wait in any
// given loop. This cap grows each time we fail to read lines in an
// exponential fashion to some hard-coded cap.
loop {
let mut global_bytes_read: usize = 0;
// glob poll
let exclude_patterns = self
.exclude
.iter()
.map(|e| Pattern::new(e.to_str().expect("no ability to glob")).unwrap())
.collect::<Vec<_>>();
for (_file_id, watcher) in &mut fp_map {
watcher.set_file_findable(false); // assume not findable until found
}
for include_pattern in &self.include {
for entry in glob(include_pattern.to_str().expect("no ability to glob"))
.expect("Failed to read glob pattern")
{
if let Ok(path) = entry {
if exclude_patterns
.iter()
.any(|e| e.matches(path.to_str().unwrap()))
{
continue;
}
if let Some(file_id) =
self.get_fingerprint_of_file(&path, &mut fingerprint_buffer)
{
if let Some(watcher) = fp_map.get_mut(&file_id) {
// file fingerprint matches a watched file
let was_found_this_cycle = watcher.file_findable();
watcher.set_file_findable(true);
if watcher.path == path {
trace!(
message = "Continue watching file.",
path = field::debug(&path),
);
} else {
// matches a file with a different path
if !was_found_this_cycle {
info!(
message = "Watched file has been renamed.",
path = field::debug(&path),
old_path = field::debug(&watcher.path)
);
watcher.update_path(path).ok(); // ok if this fails: might fix next cycle
} else {
info!(
message = "More than one file has same fingerprint.",
path = field::debug(&path),
old_path = field::debug(&watcher.path)
);
let (old_path, new_path) = (&watcher.path, &path);
if let (Ok(old_modified_time), Ok(new_modified_time)) = (
fs::metadata(&old_path).and_then(|m| m.modified()),
fs::metadata(&new_path).and_then(|m| m.modified()),
) {
if old_modified_time < new_modified_time {
info!(
message = "Switching to watch most recently modified file.",
new_modified_time = field::debug(&new_modified_time),
old_modified_time = field::debug(&old_modified_time),
);
watcher.update_path(path).ok(); // ok if this fails: might fix next cycle
}
}
}
}
} else {
// unknown (new) file fingerprint
let read_file_from_beginning = if start_of_run {
self.start_at_beginning
} else {
true
};
if let Ok(mut watcher) = FileWatcher::new(
path,
read_file_from_beginning,
self.ignore_before,
) {
info!(
message = "Found file to watch.",
path = field::debug(&watcher.path),
start_at_beginning = field::debug(&self.start_at_beginning),
start_of_run = field::debug(&start_of_run),
);
watcher.set_file_findable(true);
fp_map.insert(file_id, watcher);
};
}
}
}
}
}
// line polling
for (_file_id, watcher) in &mut fp_map {
let mut bytes_read: usize = 0;
while let Ok(sz) = watcher.read_line(&mut line_buffer, self.max_line_bytes) {
if sz > 0 {
trace!(
message = "Read bytes.",
path = field::debug(&watcher.path),
bytes = field::debug(sz)
);
bytes_read += sz;
if !line_buffer.is_empty() {
lines.push((
line_buffer.clone().into(),
watcher.path.to_str().expect("not a valid path").to_owned(),
));
line_buffer.clear();
}
} else {
break;
}
if bytes_read > self.max_read_bytes {
break;
}
}
global_bytes_read = global_bytes_read.saturating_add(bytes_read);
}
// A FileWatcher is dead when the underlying file has disappeared.
// If the FileWatcher is dead we don't retain it; it will be deallocated.
fp_map.retain(|_file_id, watcher| !watcher.dead());
match stream::iter_ok::<_, ()>(lines.drain(..))
.forward(chans)
.wait()
{
Ok((_, sink)) => chans = sink,
Err(_) => unreachable!("Output channel is closed"),
}
// When no lines have been read we kick the backup_cap up by twice,
// limited by the hard-coded cap. Else, we set the backup_cap to its
// minimum on the assumption that next time through there will be
// more lines to read promptly.
if global_bytes_read == 0 {
let lim = backoff_cap.saturating_mul(2);
if lim > 2_048 {
backoff_cap = 2_048;
} else {
backoff_cap = lim;
}
} else {
backoff_cap = 1;
}
let backoff = backoff_cap.saturating_sub(global_bytes_read);
match shutdown.recv_timeout(time::Duration::from_millis(backoff as u64)) {
Ok(()) => unreachable!(), // The sender should never actually send
Err(RecvTimeoutError::Timeout) => {}
Err(RecvTimeoutError::Disconnected) => return,
}
start_of_run = false;
}
}
fn get_fingerprint_of_file(
&self,
path: &PathBuf,
buffer: &mut Vec<u8>,
) -> Option<FileFingerprint> {
let i = self.ignored_header_bytes as u64;
let b = self.fingerprint_bytes;
buffer.resize(b, 0u8);
if let Ok(mut fp) = fs::File::open(path) {
if fp.seek(SeekFrom::Start(i)).is_ok() && fp.read_exact(&mut buffer[..b]).is_ok() {
let fingerprint = crc::crc64::checksum_ecma(&buffer[..b]);
Some(fingerprint)
} else {
None
}
} else {
None
}
}
} | let mut backoff_cap: usize = 1;
let mut lines = Vec::new(); | random_line_split |
BAT.py | #!/usr/bin/env python
import numpy as np
from Scientific.Geometry.Objects3D import Sphere, Cone, Plane, Line, \
rotatePoint
from Scientific.Geometry import Vector
import MMTK
# Vector functions
def normalize(v1):
return v1 / np.sqrt(np.sum(v1 * v1))
def cross(v1, v2):
return np.array([v1[1]*v2[2]-v1[2]*v2[1], \
v1[2]*v2[0]-v1[0]*v2[2], \
v1[0]*v2[1]-v1[1]*v2[0]])
def distance(p1, p2):
v1 = p2 - p1
return np.sqrt(np.sum(v1 * v1))
def angle(p1, p2, p3):
v1 = p2 - p1
v2 = p2 - p3
return np.arccos(max(-1.,min(1.,np.sum(v1*v2)/\
np.sqrt(np.sum(v1*v1)*np.sum(v2*v2)))))
def dihedral(p1, p2, p3, p4):
v1 = p2 - p1
v2 = p2 - p3
v3 = p3 - p4
a = normalize(cross(v1, v2))
b = normalize(cross(v3, v2))
c = np.sum(a * b)
s = np.sum(cross(b, a) * normalize(v2))
return np.arctan2(s, c)
def BAT4(p1, p2, p3, p4):
v1 = p2 - p1
v2 = p2 - p3
v3 = p3 - p4
a = normalize(cross(v1, v2))
b = normalize(cross(v3, v2))
c = np.sum(a * b)
norm_v1_2 = np.sum(v1 * v1)
norm_v2_2 = np.sum(v2 * v2)
s = np.sum(cross(b, a) * v2 / np.sqrt(norm_v2_2))
return (np.sqrt(norm_v1_2), np.arccos(max(-1.,min(1.,np.sum(v1*v2)/\
np.sqrt(norm_v1_2*norm_v2_2)))), np.arctan2(s,c))
# Main converter class
class converter():
"""
Interconverts Cartesian and Bond-Angle-Torsion coordinates
"""
def __init__(self, universe, molecule, initial_atom=None):
self.universe = universe
self.molecule = molecule
self.natoms = universe.numberOfAtoms()
self._converter_setup(initial_atom)
def _converter_setup(self, initial_atom=None):
atom_name = lambda atom: atom.fullName()
atom_mass = lambda atom: atom.mass()
terminal_atoms = sorted(\
[a for a in self.molecule.atoms if len(a.bondedTo())==1], key=atom_name)
terminal_atoms = sorted(terminal_atoms, key=atom_mass)
if (initial_atom is None):
# Select the heaviest root atoms from the heaviest terminal atom
root = [terminal_atoms[-1]]
else:
if initial_atom in terminal_atoms:
root = [initial_atom]
else:
raise Exception('Initial atom is not a terminal atom')
self.initial_atom = initial_atom
attached_to_zero = sorted(root[0].bondedTo(), key=atom_name)
attached_to_zero = sorted(attached_to_zero, key=atom_mass)
root.append(attached_to_zero[-1])
attached_to_one = sorted([a for a in root[-1].bondedTo() \
if (a not in root) and (a not in terminal_atoms)], key=atom_name)
attached_to_one = sorted(attached_to_one, key=atom_mass)
root.append(attached_to_one[-1])
def _find_dihedral(selected):
"""
Finds a dihedral angle adjacent to the selected atoms that includes a new atom
:param selected: a list of atoms that have already been selected
:returns: a list of atoms that includes the new atom and its neighboring selected atoms
"""
atom_name = lambda atom: atom.fullName()
atom_mass = lambda atom: atom.mass()
# Loop over possible nearest neighbors
for a2 in selected:
# Find the new atom
attached_to_a2 = sorted([a for a in a2.bondedTo() \
if a not in selected], key=atom_name)
for a1 in sorted(attached_to_a2, key=atom_mass, reverse=True):
# Find the third atom
attached_to_a3 = sorted([a for a in a2.bondedTo() \
if (a in selected) and (a!=a1)], key=atom_name)
for a3 in sorted(attached_to_a3, key=atom_mass, reverse=True):
# Find the last atom
attached_to_a4 = sorted([a for a in a3.bondedTo() \
if (a in selected) and (a!=a2)], key=atom_name)
for a4 in sorted(attached_to_a4, key=atom_mass, reverse=True):
return (a1, a2, a3, a4)
print 'Selected atoms:', selected
raise Exception('No new dihedral angle found!')
# Construct a list of torsion angles
torsionL = []
selected = [a for a in root]
while len(selected) < self.universe.numberOfAtoms():
(a1, a2, a3, a4) = _find_dihedral(selected)
torsionL.append((a1, a2, a3, a4))
selected.append(a1)
# If _firstTorsionTInd is not equal to the list index,
# then the dihedrals will likely be correlated and it is more appropriate
# to use a relative phase angle
prior_atoms = [
sorted([a2.index, a3.index]) for (a1, a2, a3, a4) in torsionL
]
self.rootInd = [r.index for r in root]
self._torsionIndL = [[a.index for a in tset] for tset in torsionL]
self._firstTorsionTInd = [prior_atoms.index(prior_atoms[n]) \
for n in range(len(prior_atoms))]
self.ntorsions = self.natoms - 3
def getFirstTorsionInds(self, extended):
"""
Indices of the first torsions in the BAT array
"""
offset = 6 if extended else 0
torsionInds = np.array(range(offset + 5, self.natoms * 3, 3))
primaryTorsions = sorted(list(set(self._firstTorsionTInd)))
return list(torsionInds[primaryTorsions])
def BAT(self, XYZ, extended=False):
"""
Conversion from Cartesian to Bond-Angle-Torsion coordinates
:param extended: whether to include external coordinates or not
:param Cartesian: Cartesian coordinates. If None, then the molecules' coordinates will be used
"""
root = [distance(XYZ[self.rootInd[0]],XYZ[self.rootInd[1]]),\
distance(XYZ[self.rootInd[1]],XYZ[self.rootInd[2]]),\
angle(XYZ[self.rootInd[0]],XYZ[self.rootInd[1]],XYZ[self.rootInd[2]])]
import itertools
internal = root + \
[val for val in itertools.chain.from_iterable([\
BAT4(XYZ[a1],XYZ[a2],XYZ[a3],XYZ[a4]) \
for (a1,a2,a3,a4) in self._torsionIndL])]
torsions = internal[5::3]
phase_torsions = [(torsions[n] - torsions[self._firstTorsionTInd[n]]) \
if self._firstTorsionTInd[n]!=n else torsions[n] \
for n in range(len(torsions))]
internal[5::3] = phase_torsions
if not extended:
return np.array(internal)
external = self.extended_coordinates(XYZ[self.rootInd[0]], \
XYZ[self.rootInd[1]], XYZ[self.rootInd[2]])
return np.array(list(external) + list(internal))
def extended_coordinates(self, p1, p2, p3):
# The rotation axis is a normalized vector pointing from atom 0 to 1
# It is described in two degrees of freedom by the polar angle and azimuth
e = normalize(p2 - p1)
phi = np.arctan2(e[1], e[0]) # Polar angle
theta = np.arccos(e[2]) # Azimuthal angle
# Rotation to the z axis
cp = np.cos(phi)
sp = np.sin(phi)
ct = np.cos(theta)
st = np.sin(theta)
Rz = np.array([[cp * ct, ct * sp, -st], [-sp, cp, 0],
[cp * st, sp * st, ct]])
pos2 = Rz.dot(np.array(p3 - p2))
# Angle about the rotation axis
omega = np.arctan2(pos2[1], pos2[0])
return np.array(list(p1) + [phi, theta, omega])
def Cartesian(self, BAT):
"""
Conversion from (internal or extended) Bond-Angle-Torsion
to Cartesian coordinates
"""
# Arrange BAT coordinates in convenient arrays
offset = 6 if len(BAT) == (3 * self.natoms) else 0
bonds = BAT[offset + 3::3]
angles = BAT[offset + 4::3]
phase_torsions = BAT[offset + 5::3]
torsions = [(phase_torsions[n] + phase_torsions[self._firstTorsionTInd[n]]) \
if self._firstTorsionTInd[n]!=n else phase_torsions[n] \
for n in range(self.ntorsions)]
p1 = np.array([0., 0., 0.])
p2 = np.array([0., 0., BAT[offset]])
p3 = np.array([BAT[offset+1]*np.sin(BAT[offset+2]), 0., \
BAT[offset]-BAT[offset+1]*np.cos(BAT[offset+2])])
# If appropriate, rotate and translate the first three atoms
if offset == 6:
# Rotate the third atom by the appropriate value
(phi, theta, omega) = BAT[3:6]
co = np.cos(omega)
so = np.sin(omega)
Romega = np.array([[co, -so, 0], [so, co, 0], [0, 0, 1]])
p3 = Romega.dot(p3)
# Rotate the second two atoms to point in the right direction
cp = np.cos(phi)
sp = np.sin(phi)
ct = np.cos(theta)
st = np.sin(theta)
Re = np.array([[cp * ct, -sp, cp * st], [ct * sp, cp, sp * st],
[-st, 0, ct]])
p2 = Re.dot(p2)
p3 = Re.dot(p3)
# Translate the first three atoms by the origin
origin = np.array(BAT[:3])
p1 += origin
p2 += origin
p3 += origin
XYZ = np.zeros((self.natoms, 3))
XYZ[self.rootInd[0]] = p1
XYZ[self.rootInd[1]] = p2
XYZ[self.rootInd[2]] = p3
for ((a1,a2,a3,a4), bond, angle, torsion) in \
zip(self._torsionIndL,bonds,angles,torsions):
sphere = Sphere(Vector(XYZ[a2]), bond)
cone = Cone(Vector(XYZ[a2]), Vector(XYZ[a3] - XYZ[a2]), angle)
plane123 = Plane(Vector(XYZ[a4]), Vector(XYZ[a3]), Vector(XYZ[a2]))
points = sphere.intersectWith(cone).intersectWith(plane123)
p = points[0] if (Plane(Vector(XYZ[a3]), Vector(
XYZ[a2]), points[0]).normal * plane123.normal) > 0 else points[1]
p = rotatePoint(Vector(p),
Line(Vector(XYZ[a2]), Vector(XYZ[a2] - XYZ[a3])),
torsion)
XYZ[a1] = p.array
return XYZ
for ((a1,a2,a3,a4), bond, angle, torsion) in \
zip(self._torsionIndL,bonds,angles,torsions):
p2 = XYZ[a2]
p3 = XYZ[a3]
p4 = XYZ[a4]
# circle = sphere.intersectWith(cone)
n23 = normalize(p3 - p2)
# points = circle.intersectWith(plane123)
# plane.intersectWith(Plane(circle.center, circle.normal)) is a line
# line_direction = cross(normalize(cross(p4-p3,n23)),n23)
# Rotate the point about the p2-p3 axis by the torsion angle
v21 = (bond * np.cos(angle)) * n23 - (bond * np.sin(angle)) * cross(
normalize(cross(p4 - p3, n23)), n23)
s = np.sin(torsion)
c = np.cos(torsion)
XYZ[a1] = p2 - cross(n23, v21) * s + np.sum(
n23 * v21) * n23 * (1.0 - c) + v21 * c
def showMolecule(self, colorBy=None, label=False, dcdFN=None):
"""
Opens the molecule in VMD | # Write PDB file
# To set Occupancy, change atom.occupancy
# To set Beta, change atom.temperature_factor
import os.path
pdbFN = os.path.join(MMTK.Database.molecule_types.directory,
'showMolecule.pdb')
outF = MMTK.PDB.PDBOutputFile(pdbFN)
outF.write(self.molecule)
outF.close()
# Write VMD script
script = 'set ligand [mol new ' + pdbFN + ']\n'
if colorBy is not None:
script += 'mol modcolor 0 $ligand ' + colorBy + '\n'
script += 'mol modstyle 0 0 CPK 1.000000 0.300000 10.000000 10.000000\n'
if label:
script += """
proc label_atoms { molid seltext } {
set sel [atomselect $molid $seltext]
set atomlist [$sel list]
foreach {atom} $atomlist {
set atomlabel [format "%d/%d" $molid $atom]
label add Atoms $atomlabel
}
$sel delete
}
label_atoms 0 all
"""
if dcdFN is not None:
script += 'animate delete all $ligand\n'
script += 'mol addfile ' + dcdFN + ' type dcd waitfor all\n'
scriptF = open('showMolecule.vmd', 'w')
scriptF.write(script)
scriptF.close()
# Find and run vmd
import AlGDock
vmdCommand = AlGDock.findPath(AlGDock.search_paths['vmd'])
import subprocess
subprocess.call([vmdCommand, '-e', 'showMolecule.vmd'])
# Remove files
os.remove(pdbFN)
os.remove('showMolecule.vmd')
########
# MAIN #
########
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Bond-Angle-Torsion converter')
parser.add_argument(
'--database',
help='MMTK database that describes the molecule of interest',
default='ligand.db')
args = parser.parse_args()
import os.path
if args.database == 'all':
import glob
dbFNs = glob.glob('*/*.db')
elif args.database == 'random':
import glob
from random import randrange
dbFNs = glob.glob('*/*.db')
dbFNs = [dbFNs[randrange(len(dbFNs))]]
elif os.path.isfile(args.database):
dbFNs = [args.database]
else:
raise Exception('Database file %s not found' % args.database)
dirname = os.path.dirname(os.path.abspath(dbFNs[0]))
for FN in dbFNs:
print 'Loading', FN
dbFN = os.path.abspath(FN)
if os.path.dirname(dbFN) != dirname:
raise Exception('Cannot change ligand directory in MMTK')
MMTK.Database.molecule_types.directory = os.path.dirname(dbFN)
universe = MMTK.Universe.InfiniteUniverse()
molecule = MMTK.Molecule(os.path.basename(dbFN))
universe.addObject(molecule)
original_xyz = np.copy(universe.configuration().array)
self = converter(universe, molecule)
# This tests a conversion to BAT coordinates and back
BAT = self.BAT(original_xyz, extended=True)
new_xyz = self.Cartesian(BAT)
print sum(sum(new_xyz - original_xyz))
# This rotates a random primary torsion
from random import randrange
firstTorsionInds = self.getFirstTorsionInds(True)
BAT_ind = firstTorsionInds[randrange(len(firstTorsionInds))]
confs = []
for torsion_offset in np.linspace(0, 2 * np.pi):
BAT_n = np.array([BAT[ind] if ind!=BAT_ind else BAT[ind] + torsion_offset \
for ind in range(len(BAT))])
XYZ = self.Cartesian(BAT_n)
confs.append(XYZ)
import AlGDock.IO
IO_dcd = AlGDock.IO.dcd(molecule)
IO_dcd.write('rotation.dcd', confs)
self.showMolecule(dcdFN='rotation.dcd')
os.remove('rotation.dcd')
# [[51, 10, 5, 46],
# [2, 5, 10, 51],
# [4, 5, 10, 51], | :param colorBy: color atoms by 'Occupancy', or 'Beta'. None uses default colors.
""" | random_line_split |
BAT.py | #!/usr/bin/env python
import numpy as np
from Scientific.Geometry.Objects3D import Sphere, Cone, Plane, Line, \
rotatePoint
from Scientific.Geometry import Vector
import MMTK
# Vector functions
def normalize(v1):
return v1 / np.sqrt(np.sum(v1 * v1))
def cross(v1, v2):
return np.array([v1[1]*v2[2]-v1[2]*v2[1], \
v1[2]*v2[0]-v1[0]*v2[2], \
v1[0]*v2[1]-v1[1]*v2[0]])
def distance(p1, p2):
v1 = p2 - p1
return np.sqrt(np.sum(v1 * v1))
def angle(p1, p2, p3):
v1 = p2 - p1
v2 = p2 - p3
return np.arccos(max(-1.,min(1.,np.sum(v1*v2)/\
np.sqrt(np.sum(v1*v1)*np.sum(v2*v2)))))
def dihedral(p1, p2, p3, p4):
v1 = p2 - p1
v2 = p2 - p3
v3 = p3 - p4
a = normalize(cross(v1, v2))
b = normalize(cross(v3, v2))
c = np.sum(a * b)
s = np.sum(cross(b, a) * normalize(v2))
return np.arctan2(s, c)
def BAT4(p1, p2, p3, p4):
v1 = p2 - p1
v2 = p2 - p3
v3 = p3 - p4
a = normalize(cross(v1, v2))
b = normalize(cross(v3, v2))
c = np.sum(a * b)
norm_v1_2 = np.sum(v1 * v1)
norm_v2_2 = np.sum(v2 * v2)
s = np.sum(cross(b, a) * v2 / np.sqrt(norm_v2_2))
return (np.sqrt(norm_v1_2), np.arccos(max(-1.,min(1.,np.sum(v1*v2)/\
np.sqrt(norm_v1_2*norm_v2_2)))), np.arctan2(s,c))
# Main converter class
class converter():
"""
Interconverts Cartesian and Bond-Angle-Torsion coordinates
"""
def __init__(self, universe, molecule, initial_atom=None):
self.universe = universe
self.molecule = molecule
self.natoms = universe.numberOfAtoms()
self._converter_setup(initial_atom)
def _converter_setup(self, initial_atom=None):
atom_name = lambda atom: atom.fullName()
atom_mass = lambda atom: atom.mass()
terminal_atoms = sorted(\
[a for a in self.molecule.atoms if len(a.bondedTo())==1], key=atom_name)
terminal_atoms = sorted(terminal_atoms, key=atom_mass)
if (initial_atom is None):
# Select the heaviest root atoms from the heaviest terminal atom
root = [terminal_atoms[-1]]
else:
if initial_atom in terminal_atoms:
root = [initial_atom]
else:
raise Exception('Initial atom is not a terminal atom')
self.initial_atom = initial_atom
attached_to_zero = sorted(root[0].bondedTo(), key=atom_name)
attached_to_zero = sorted(attached_to_zero, key=atom_mass)
root.append(attached_to_zero[-1])
attached_to_one = sorted([a for a in root[-1].bondedTo() \
if (a not in root) and (a not in terminal_atoms)], key=atom_name)
attached_to_one = sorted(attached_to_one, key=atom_mass)
root.append(attached_to_one[-1])
def _find_dihedral(selected):
"""
Finds a dihedral angle adjacent to the selected atoms that includes a new atom
:param selected: a list of atoms that have already been selected
:returns: a list of atoms that includes the new atom and its neighboring selected atoms
"""
atom_name = lambda atom: atom.fullName()
atom_mass = lambda atom: atom.mass()
# Loop over possible nearest neighbors
for a2 in selected:
# Find the new atom
attached_to_a2 = sorted([a for a in a2.bondedTo() \
if a not in selected], key=atom_name)
for a1 in sorted(attached_to_a2, key=atom_mass, reverse=True):
# Find the third atom
attached_to_a3 = sorted([a for a in a2.bondedTo() \
if (a in selected) and (a!=a1)], key=atom_name)
for a3 in sorted(attached_to_a3, key=atom_mass, reverse=True):
# Find the last atom
attached_to_a4 = sorted([a for a in a3.bondedTo() \
if (a in selected) and (a!=a2)], key=atom_name)
for a4 in sorted(attached_to_a4, key=atom_mass, reverse=True):
return (a1, a2, a3, a4)
print 'Selected atoms:', selected
raise Exception('No new dihedral angle found!')
# Construct a list of torsion angles
torsionL = []
selected = [a for a in root]
while len(selected) < self.universe.numberOfAtoms():
(a1, a2, a3, a4) = _find_dihedral(selected)
torsionL.append((a1, a2, a3, a4))
selected.append(a1)
# If _firstTorsionTInd is not equal to the list index,
# then the dihedrals will likely be correlated and it is more appropriate
# to use a relative phase angle
prior_atoms = [
sorted([a2.index, a3.index]) for (a1, a2, a3, a4) in torsionL
]
self.rootInd = [r.index for r in root]
self._torsionIndL = [[a.index for a in tset] for tset in torsionL]
self._firstTorsionTInd = [prior_atoms.index(prior_atoms[n]) \
for n in range(len(prior_atoms))]
self.ntorsions = self.natoms - 3
def getFirstTorsionInds(self, extended):
|
def BAT(self, XYZ, extended=False):
"""
Conversion from Cartesian to Bond-Angle-Torsion coordinates
:param extended: whether to include external coordinates or not
:param Cartesian: Cartesian coordinates. If None, then the molecules' coordinates will be used
"""
root = [distance(XYZ[self.rootInd[0]],XYZ[self.rootInd[1]]),\
distance(XYZ[self.rootInd[1]],XYZ[self.rootInd[2]]),\
angle(XYZ[self.rootInd[0]],XYZ[self.rootInd[1]],XYZ[self.rootInd[2]])]
import itertools
internal = root + \
[val for val in itertools.chain.from_iterable([\
BAT4(XYZ[a1],XYZ[a2],XYZ[a3],XYZ[a4]) \
for (a1,a2,a3,a4) in self._torsionIndL])]
torsions = internal[5::3]
phase_torsions = [(torsions[n] - torsions[self._firstTorsionTInd[n]]) \
if self._firstTorsionTInd[n]!=n else torsions[n] \
for n in range(len(torsions))]
internal[5::3] = phase_torsions
if not extended:
return np.array(internal)
external = self.extended_coordinates(XYZ[self.rootInd[0]], \
XYZ[self.rootInd[1]], XYZ[self.rootInd[2]])
return np.array(list(external) + list(internal))
def extended_coordinates(self, p1, p2, p3):
# The rotation axis is a normalized vector pointing from atom 0 to 1
# It is described in two degrees of freedom by the polar angle and azimuth
e = normalize(p2 - p1)
phi = np.arctan2(e[1], e[0]) # Polar angle
theta = np.arccos(e[2]) # Azimuthal angle
# Rotation to the z axis
cp = np.cos(phi)
sp = np.sin(phi)
ct = np.cos(theta)
st = np.sin(theta)
Rz = np.array([[cp * ct, ct * sp, -st], [-sp, cp, 0],
[cp * st, sp * st, ct]])
pos2 = Rz.dot(np.array(p3 - p2))
# Angle about the rotation axis
omega = np.arctan2(pos2[1], pos2[0])
return np.array(list(p1) + [phi, theta, omega])
def Cartesian(self, BAT):
"""
Conversion from (internal or extended) Bond-Angle-Torsion
to Cartesian coordinates
"""
# Arrange BAT coordinates in convenient arrays
offset = 6 if len(BAT) == (3 * self.natoms) else 0
bonds = BAT[offset + 3::3]
angles = BAT[offset + 4::3]
phase_torsions = BAT[offset + 5::3]
torsions = [(phase_torsions[n] + phase_torsions[self._firstTorsionTInd[n]]) \
if self._firstTorsionTInd[n]!=n else phase_torsions[n] \
for n in range(self.ntorsions)]
p1 = np.array([0., 0., 0.])
p2 = np.array([0., 0., BAT[offset]])
p3 = np.array([BAT[offset+1]*np.sin(BAT[offset+2]), 0., \
BAT[offset]-BAT[offset+1]*np.cos(BAT[offset+2])])
# If appropriate, rotate and translate the first three atoms
if offset == 6:
# Rotate the third atom by the appropriate value
(phi, theta, omega) = BAT[3:6]
co = np.cos(omega)
so = np.sin(omega)
Romega = np.array([[co, -so, 0], [so, co, 0], [0, 0, 1]])
p3 = Romega.dot(p3)
# Rotate the second two atoms to point in the right direction
cp = np.cos(phi)
sp = np.sin(phi)
ct = np.cos(theta)
st = np.sin(theta)
Re = np.array([[cp * ct, -sp, cp * st], [ct * sp, cp, sp * st],
[-st, 0, ct]])
p2 = Re.dot(p2)
p3 = Re.dot(p3)
# Translate the first three atoms by the origin
origin = np.array(BAT[:3])
p1 += origin
p2 += origin
p3 += origin
XYZ = np.zeros((self.natoms, 3))
XYZ[self.rootInd[0]] = p1
XYZ[self.rootInd[1]] = p2
XYZ[self.rootInd[2]] = p3
for ((a1,a2,a3,a4), bond, angle, torsion) in \
zip(self._torsionIndL,bonds,angles,torsions):
sphere = Sphere(Vector(XYZ[a2]), bond)
cone = Cone(Vector(XYZ[a2]), Vector(XYZ[a3] - XYZ[a2]), angle)
plane123 = Plane(Vector(XYZ[a4]), Vector(XYZ[a3]), Vector(XYZ[a2]))
points = sphere.intersectWith(cone).intersectWith(plane123)
p = points[0] if (Plane(Vector(XYZ[a3]), Vector(
XYZ[a2]), points[0]).normal * plane123.normal) > 0 else points[1]
p = rotatePoint(Vector(p),
Line(Vector(XYZ[a2]), Vector(XYZ[a2] - XYZ[a3])),
torsion)
XYZ[a1] = p.array
return XYZ
for ((a1,a2,a3,a4), bond, angle, torsion) in \
zip(self._torsionIndL,bonds,angles,torsions):
p2 = XYZ[a2]
p3 = XYZ[a3]
p4 = XYZ[a4]
# circle = sphere.intersectWith(cone)
n23 = normalize(p3 - p2)
# points = circle.intersectWith(plane123)
# plane.intersectWith(Plane(circle.center, circle.normal)) is a line
# line_direction = cross(normalize(cross(p4-p3,n23)),n23)
# Rotate the point about the p2-p3 axis by the torsion angle
v21 = (bond * np.cos(angle)) * n23 - (bond * np.sin(angle)) * cross(
normalize(cross(p4 - p3, n23)), n23)
s = np.sin(torsion)
c = np.cos(torsion)
XYZ[a1] = p2 - cross(n23, v21) * s + np.sum(
n23 * v21) * n23 * (1.0 - c) + v21 * c
def showMolecule(self, colorBy=None, label=False, dcdFN=None):
"""
Opens the molecule in VMD
:param colorBy: color atoms by 'Occupancy', or 'Beta'. None uses default colors.
"""
# Write PDB file
# To set Occupancy, change atom.occupancy
# To set Beta, change atom.temperature_factor
import os.path
pdbFN = os.path.join(MMTK.Database.molecule_types.directory,
'showMolecule.pdb')
outF = MMTK.PDB.PDBOutputFile(pdbFN)
outF.write(self.molecule)
outF.close()
# Write VMD script
script = 'set ligand [mol new ' + pdbFN + ']\n'
if colorBy is not None:
script += 'mol modcolor 0 $ligand ' + colorBy + '\n'
script += 'mol modstyle 0 0 CPK 1.000000 0.300000 10.000000 10.000000\n'
if label:
script += """
proc label_atoms { molid seltext } {
set sel [atomselect $molid $seltext]
set atomlist [$sel list]
foreach {atom} $atomlist {
set atomlabel [format "%d/%d" $molid $atom]
label add Atoms $atomlabel
}
$sel delete
}
label_atoms 0 all
"""
if dcdFN is not None:
script += 'animate delete all $ligand\n'
script += 'mol addfile ' + dcdFN + ' type dcd waitfor all\n'
scriptF = open('showMolecule.vmd', 'w')
scriptF.write(script)
scriptF.close()
# Find and run vmd
import AlGDock
vmdCommand = AlGDock.findPath(AlGDock.search_paths['vmd'])
import subprocess
subprocess.call([vmdCommand, '-e', 'showMolecule.vmd'])
# Remove files
os.remove(pdbFN)
os.remove('showMolecule.vmd')
########
# MAIN #
########
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Bond-Angle-Torsion converter')
parser.add_argument(
'--database',
help='MMTK database that describes the molecule of interest',
default='ligand.db')
args = parser.parse_args()
import os.path
if args.database == 'all':
import glob
dbFNs = glob.glob('*/*.db')
elif args.database == 'random':
import glob
from random import randrange
dbFNs = glob.glob('*/*.db')
dbFNs = [dbFNs[randrange(len(dbFNs))]]
elif os.path.isfile(args.database):
dbFNs = [args.database]
else:
raise Exception('Database file %s not found' % args.database)
dirname = os.path.dirname(os.path.abspath(dbFNs[0]))
for FN in dbFNs:
print 'Loading', FN
dbFN = os.path.abspath(FN)
if os.path.dirname(dbFN) != dirname:
raise Exception('Cannot change ligand directory in MMTK')
MMTK.Database.molecule_types.directory = os.path.dirname(dbFN)
universe = MMTK.Universe.InfiniteUniverse()
molecule = MMTK.Molecule(os.path.basename(dbFN))
universe.addObject(molecule)
original_xyz = np.copy(universe.configuration().array)
self = converter(universe, molecule)
# This tests a conversion to BAT coordinates and back
BAT = self.BAT(original_xyz, extended=True)
new_xyz = self.Cartesian(BAT)
print sum(sum(new_xyz - original_xyz))
# This rotates a random primary torsion
from random import randrange
firstTorsionInds = self.getFirstTorsionInds(True)
BAT_ind = firstTorsionInds[randrange(len(firstTorsionInds))]
confs = []
for torsion_offset in np.linspace(0, 2 * np.pi):
BAT_n = np.array([BAT[ind] if ind!=BAT_ind else BAT[ind] + torsion_offset \
for ind in range(len(BAT))])
XYZ = self.Cartesian(BAT_n)
confs.append(XYZ)
import AlGDock.IO
IO_dcd = AlGDock.IO.dcd(molecule)
IO_dcd.write('rotation.dcd', confs)
self.showMolecule(dcdFN='rotation.dcd')
os.remove('rotation.dcd')
# [[51, 10, 5, 46],
# [2, 5, 10, 51],
# [4, 5, 10, 51],
| """
Indices of the first torsions in the BAT array
"""
offset = 6 if extended else 0
torsionInds = np.array(range(offset + 5, self.natoms * 3, 3))
primaryTorsions = sorted(list(set(self._firstTorsionTInd)))
return list(torsionInds[primaryTorsions]) | identifier_body |
BAT.py | #!/usr/bin/env python
import numpy as np
from Scientific.Geometry.Objects3D import Sphere, Cone, Plane, Line, \
rotatePoint
from Scientific.Geometry import Vector
import MMTK
# Vector functions
def normalize(v1):
return v1 / np.sqrt(np.sum(v1 * v1))
def cross(v1, v2):
return np.array([v1[1]*v2[2]-v1[2]*v2[1], \
v1[2]*v2[0]-v1[0]*v2[2], \
v1[0]*v2[1]-v1[1]*v2[0]])
def distance(p1, p2):
v1 = p2 - p1
return np.sqrt(np.sum(v1 * v1))
def angle(p1, p2, p3):
v1 = p2 - p1
v2 = p2 - p3
return np.arccos(max(-1.,min(1.,np.sum(v1*v2)/\
np.sqrt(np.sum(v1*v1)*np.sum(v2*v2)))))
def dihedral(p1, p2, p3, p4):
v1 = p2 - p1
v2 = p2 - p3
v3 = p3 - p4
a = normalize(cross(v1, v2))
b = normalize(cross(v3, v2))
c = np.sum(a * b)
s = np.sum(cross(b, a) * normalize(v2))
return np.arctan2(s, c)
def BAT4(p1, p2, p3, p4):
v1 = p2 - p1
v2 = p2 - p3
v3 = p3 - p4
a = normalize(cross(v1, v2))
b = normalize(cross(v3, v2))
c = np.sum(a * b)
norm_v1_2 = np.sum(v1 * v1)
norm_v2_2 = np.sum(v2 * v2)
s = np.sum(cross(b, a) * v2 / np.sqrt(norm_v2_2))
return (np.sqrt(norm_v1_2), np.arccos(max(-1.,min(1.,np.sum(v1*v2)/\
np.sqrt(norm_v1_2*norm_v2_2)))), np.arctan2(s,c))
# Main converter class
class converter():
"""
Interconverts Cartesian and Bond-Angle-Torsion coordinates
"""
def __init__(self, universe, molecule, initial_atom=None):
self.universe = universe
self.molecule = molecule
self.natoms = universe.numberOfAtoms()
self._converter_setup(initial_atom)
def _converter_setup(self, initial_atom=None):
atom_name = lambda atom: atom.fullName()
atom_mass = lambda atom: atom.mass()
terminal_atoms = sorted(\
[a for a in self.molecule.atoms if len(a.bondedTo())==1], key=atom_name)
terminal_atoms = sorted(terminal_atoms, key=atom_mass)
if (initial_atom is None):
# Select the heaviest root atoms from the heaviest terminal atom
root = [terminal_atoms[-1]]
else:
if initial_atom in terminal_atoms:
root = [initial_atom]
else:
raise Exception('Initial atom is not a terminal atom')
self.initial_atom = initial_atom
attached_to_zero = sorted(root[0].bondedTo(), key=atom_name)
attached_to_zero = sorted(attached_to_zero, key=atom_mass)
root.append(attached_to_zero[-1])
attached_to_one = sorted([a for a in root[-1].bondedTo() \
if (a not in root) and (a not in terminal_atoms)], key=atom_name)
attached_to_one = sorted(attached_to_one, key=atom_mass)
root.append(attached_to_one[-1])
def _find_dihedral(selected):
"""
Finds a dihedral angle adjacent to the selected atoms that includes a new atom
:param selected: a list of atoms that have already been selected
:returns: a list of atoms that includes the new atom and its neighboring selected atoms
"""
atom_name = lambda atom: atom.fullName()
atom_mass = lambda atom: atom.mass()
# Loop over possible nearest neighbors
for a2 in selected:
# Find the new atom
attached_to_a2 = sorted([a for a in a2.bondedTo() \
if a not in selected], key=atom_name)
for a1 in sorted(attached_to_a2, key=atom_mass, reverse=True):
# Find the third atom
attached_to_a3 = sorted([a for a in a2.bondedTo() \
if (a in selected) and (a!=a1)], key=atom_name)
for a3 in sorted(attached_to_a3, key=atom_mass, reverse=True):
# Find the last atom
attached_to_a4 = sorted([a for a in a3.bondedTo() \
if (a in selected) and (a!=a2)], key=atom_name)
for a4 in sorted(attached_to_a4, key=atom_mass, reverse=True):
return (a1, a2, a3, a4)
print 'Selected atoms:', selected
raise Exception('No new dihedral angle found!')
# Construct a list of torsion angles
torsionL = []
selected = [a for a in root]
while len(selected) < self.universe.numberOfAtoms():
(a1, a2, a3, a4) = _find_dihedral(selected)
torsionL.append((a1, a2, a3, a4))
selected.append(a1)
# If _firstTorsionTInd is not equal to the list index,
# then the dihedrals will likely be correlated and it is more appropriate
# to use a relative phase angle
prior_atoms = [
sorted([a2.index, a3.index]) for (a1, a2, a3, a4) in torsionL
]
self.rootInd = [r.index for r in root]
self._torsionIndL = [[a.index for a in tset] for tset in torsionL]
self._firstTorsionTInd = [prior_atoms.index(prior_atoms[n]) \
for n in range(len(prior_atoms))]
self.ntorsions = self.natoms - 3
def getFirstTorsionInds(self, extended):
"""
Indices of the first torsions in the BAT array
"""
offset = 6 if extended else 0
torsionInds = np.array(range(offset + 5, self.natoms * 3, 3))
primaryTorsions = sorted(list(set(self._firstTorsionTInd)))
return list(torsionInds[primaryTorsions])
def BAT(self, XYZ, extended=False):
"""
Conversion from Cartesian to Bond-Angle-Torsion coordinates
:param extended: whether to include external coordinates or not
:param Cartesian: Cartesian coordinates. If None, then the molecules' coordinates will be used
"""
root = [distance(XYZ[self.rootInd[0]],XYZ[self.rootInd[1]]),\
distance(XYZ[self.rootInd[1]],XYZ[self.rootInd[2]]),\
angle(XYZ[self.rootInd[0]],XYZ[self.rootInd[1]],XYZ[self.rootInd[2]])]
import itertools
internal = root + \
[val for val in itertools.chain.from_iterable([\
BAT4(XYZ[a1],XYZ[a2],XYZ[a3],XYZ[a4]) \
for (a1,a2,a3,a4) in self._torsionIndL])]
torsions = internal[5::3]
phase_torsions = [(torsions[n] - torsions[self._firstTorsionTInd[n]]) \
if self._firstTorsionTInd[n]!=n else torsions[n] \
for n in range(len(torsions))]
internal[5::3] = phase_torsions
if not extended:
return np.array(internal)
external = self.extended_coordinates(XYZ[self.rootInd[0]], \
XYZ[self.rootInd[1]], XYZ[self.rootInd[2]])
return np.array(list(external) + list(internal))
def extended_coordinates(self, p1, p2, p3):
# The rotation axis is a normalized vector pointing from atom 0 to 1
# It is described in two degrees of freedom by the polar angle and azimuth
e = normalize(p2 - p1)
phi = np.arctan2(e[1], e[0]) # Polar angle
theta = np.arccos(e[2]) # Azimuthal angle
# Rotation to the z axis
cp = np.cos(phi)
sp = np.sin(phi)
ct = np.cos(theta)
st = np.sin(theta)
Rz = np.array([[cp * ct, ct * sp, -st], [-sp, cp, 0],
[cp * st, sp * st, ct]])
pos2 = Rz.dot(np.array(p3 - p2))
# Angle about the rotation axis
omega = np.arctan2(pos2[1], pos2[0])
return np.array(list(p1) + [phi, theta, omega])
def Cartesian(self, BAT):
"""
Conversion from (internal or extended) Bond-Angle-Torsion
to Cartesian coordinates
"""
# Arrange BAT coordinates in convenient arrays
offset = 6 if len(BAT) == (3 * self.natoms) else 0
bonds = BAT[offset + 3::3]
angles = BAT[offset + 4::3]
phase_torsions = BAT[offset + 5::3]
torsions = [(phase_torsions[n] + phase_torsions[self._firstTorsionTInd[n]]) \
if self._firstTorsionTInd[n]!=n else phase_torsions[n] \
for n in range(self.ntorsions)]
p1 = np.array([0., 0., 0.])
p2 = np.array([0., 0., BAT[offset]])
p3 = np.array([BAT[offset+1]*np.sin(BAT[offset+2]), 0., \
BAT[offset]-BAT[offset+1]*np.cos(BAT[offset+2])])
# If appropriate, rotate and translate the first three atoms
if offset == 6:
# Rotate the third atom by the appropriate value
(phi, theta, omega) = BAT[3:6]
co = np.cos(omega)
so = np.sin(omega)
Romega = np.array([[co, -so, 0], [so, co, 0], [0, 0, 1]])
p3 = Romega.dot(p3)
# Rotate the second two atoms to point in the right direction
cp = np.cos(phi)
sp = np.sin(phi)
ct = np.cos(theta)
st = np.sin(theta)
Re = np.array([[cp * ct, -sp, cp * st], [ct * sp, cp, sp * st],
[-st, 0, ct]])
p2 = Re.dot(p2)
p3 = Re.dot(p3)
# Translate the first three atoms by the origin
origin = np.array(BAT[:3])
p1 += origin
p2 += origin
p3 += origin
XYZ = np.zeros((self.natoms, 3))
XYZ[self.rootInd[0]] = p1
XYZ[self.rootInd[1]] = p2
XYZ[self.rootInd[2]] = p3
for ((a1,a2,a3,a4), bond, angle, torsion) in \
zip(self._torsionIndL,bonds,angles,torsions):
|
return XYZ
for ((a1,a2,a3,a4), bond, angle, torsion) in \
zip(self._torsionIndL,bonds,angles,torsions):
p2 = XYZ[a2]
p3 = XYZ[a3]
p4 = XYZ[a4]
# circle = sphere.intersectWith(cone)
n23 = normalize(p3 - p2)
# points = circle.intersectWith(plane123)
# plane.intersectWith(Plane(circle.center, circle.normal)) is a line
# line_direction = cross(normalize(cross(p4-p3,n23)),n23)
# Rotate the point about the p2-p3 axis by the torsion angle
v21 = (bond * np.cos(angle)) * n23 - (bond * np.sin(angle)) * cross(
normalize(cross(p4 - p3, n23)), n23)
s = np.sin(torsion)
c = np.cos(torsion)
XYZ[a1] = p2 - cross(n23, v21) * s + np.sum(
n23 * v21) * n23 * (1.0 - c) + v21 * c
def showMolecule(self, colorBy=None, label=False, dcdFN=None):
"""
Opens the molecule in VMD
:param colorBy: color atoms by 'Occupancy', or 'Beta'. None uses default colors.
"""
# Write PDB file
# To set Occupancy, change atom.occupancy
# To set Beta, change atom.temperature_factor
import os.path
pdbFN = os.path.join(MMTK.Database.molecule_types.directory,
'showMolecule.pdb')
outF = MMTK.PDB.PDBOutputFile(pdbFN)
outF.write(self.molecule)
outF.close()
# Write VMD script
script = 'set ligand [mol new ' + pdbFN + ']\n'
if colorBy is not None:
script += 'mol modcolor 0 $ligand ' + colorBy + '\n'
script += 'mol modstyle 0 0 CPK 1.000000 0.300000 10.000000 10.000000\n'
if label:
script += """
proc label_atoms { molid seltext } {
set sel [atomselect $molid $seltext]
set atomlist [$sel list]
foreach {atom} $atomlist {
set atomlabel [format "%d/%d" $molid $atom]
label add Atoms $atomlabel
}
$sel delete
}
label_atoms 0 all
"""
if dcdFN is not None:
script += 'animate delete all $ligand\n'
script += 'mol addfile ' + dcdFN + ' type dcd waitfor all\n'
scriptF = open('showMolecule.vmd', 'w')
scriptF.write(script)
scriptF.close()
# Find and run vmd
import AlGDock
vmdCommand = AlGDock.findPath(AlGDock.search_paths['vmd'])
import subprocess
subprocess.call([vmdCommand, '-e', 'showMolecule.vmd'])
# Remove files
os.remove(pdbFN)
os.remove('showMolecule.vmd')
########
# MAIN #
########
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Bond-Angle-Torsion converter')
parser.add_argument(
'--database',
help='MMTK database that describes the molecule of interest',
default='ligand.db')
args = parser.parse_args()
import os.path
if args.database == 'all':
import glob
dbFNs = glob.glob('*/*.db')
elif args.database == 'random':
import glob
from random import randrange
dbFNs = glob.glob('*/*.db')
dbFNs = [dbFNs[randrange(len(dbFNs))]]
elif os.path.isfile(args.database):
dbFNs = [args.database]
else:
raise Exception('Database file %s not found' % args.database)
dirname = os.path.dirname(os.path.abspath(dbFNs[0]))
for FN in dbFNs:
print 'Loading', FN
dbFN = os.path.abspath(FN)
if os.path.dirname(dbFN) != dirname:
raise Exception('Cannot change ligand directory in MMTK')
MMTK.Database.molecule_types.directory = os.path.dirname(dbFN)
universe = MMTK.Universe.InfiniteUniverse()
molecule = MMTK.Molecule(os.path.basename(dbFN))
universe.addObject(molecule)
original_xyz = np.copy(universe.configuration().array)
self = converter(universe, molecule)
# This tests a conversion to BAT coordinates and back
BAT = self.BAT(original_xyz, extended=True)
new_xyz = self.Cartesian(BAT)
print sum(sum(new_xyz - original_xyz))
# This rotates a random primary torsion
from random import randrange
firstTorsionInds = self.getFirstTorsionInds(True)
BAT_ind = firstTorsionInds[randrange(len(firstTorsionInds))]
confs = []
for torsion_offset in np.linspace(0, 2 * np.pi):
BAT_n = np.array([BAT[ind] if ind!=BAT_ind else BAT[ind] + torsion_offset \
for ind in range(len(BAT))])
XYZ = self.Cartesian(BAT_n)
confs.append(XYZ)
import AlGDock.IO
IO_dcd = AlGDock.IO.dcd(molecule)
IO_dcd.write('rotation.dcd', confs)
self.showMolecule(dcdFN='rotation.dcd')
os.remove('rotation.dcd')
# [[51, 10, 5, 46],
# [2, 5, 10, 51],
# [4, 5, 10, 51],
| sphere = Sphere(Vector(XYZ[a2]), bond)
cone = Cone(Vector(XYZ[a2]), Vector(XYZ[a3] - XYZ[a2]), angle)
plane123 = Plane(Vector(XYZ[a4]), Vector(XYZ[a3]), Vector(XYZ[a2]))
points = sphere.intersectWith(cone).intersectWith(plane123)
p = points[0] if (Plane(Vector(XYZ[a3]), Vector(
XYZ[a2]), points[0]).normal * plane123.normal) > 0 else points[1]
p = rotatePoint(Vector(p),
Line(Vector(XYZ[a2]), Vector(XYZ[a2] - XYZ[a3])),
torsion)
XYZ[a1] = p.array | conditional_block |
BAT.py | #!/usr/bin/env python
import numpy as np
from Scientific.Geometry.Objects3D import Sphere, Cone, Plane, Line, \
rotatePoint
from Scientific.Geometry import Vector
import MMTK
# Vector functions
def normalize(v1):
return v1 / np.sqrt(np.sum(v1 * v1))
def cross(v1, v2):
return np.array([v1[1]*v2[2]-v1[2]*v2[1], \
v1[2]*v2[0]-v1[0]*v2[2], \
v1[0]*v2[1]-v1[1]*v2[0]])
def distance(p1, p2):
v1 = p2 - p1
return np.sqrt(np.sum(v1 * v1))
def angle(p1, p2, p3):
v1 = p2 - p1
v2 = p2 - p3
return np.arccos(max(-1.,min(1.,np.sum(v1*v2)/\
np.sqrt(np.sum(v1*v1)*np.sum(v2*v2)))))
def dihedral(p1, p2, p3, p4):
v1 = p2 - p1
v2 = p2 - p3
v3 = p3 - p4
a = normalize(cross(v1, v2))
b = normalize(cross(v3, v2))
c = np.sum(a * b)
s = np.sum(cross(b, a) * normalize(v2))
return np.arctan2(s, c)
def BAT4(p1, p2, p3, p4):
v1 = p2 - p1
v2 = p2 - p3
v3 = p3 - p4
a = normalize(cross(v1, v2))
b = normalize(cross(v3, v2))
c = np.sum(a * b)
norm_v1_2 = np.sum(v1 * v1)
norm_v2_2 = np.sum(v2 * v2)
s = np.sum(cross(b, a) * v2 / np.sqrt(norm_v2_2))
return (np.sqrt(norm_v1_2), np.arccos(max(-1.,min(1.,np.sum(v1*v2)/\
np.sqrt(norm_v1_2*norm_v2_2)))), np.arctan2(s,c))
# Main converter class
class converter():
"""
Interconverts Cartesian and Bond-Angle-Torsion coordinates
"""
def __init__(self, universe, molecule, initial_atom=None):
self.universe = universe
self.molecule = molecule
self.natoms = universe.numberOfAtoms()
self._converter_setup(initial_atom)
def _converter_setup(self, initial_atom=None):
atom_name = lambda atom: atom.fullName()
atom_mass = lambda atom: atom.mass()
terminal_atoms = sorted(\
[a for a in self.molecule.atoms if len(a.bondedTo())==1], key=atom_name)
terminal_atoms = sorted(terminal_atoms, key=atom_mass)
if (initial_atom is None):
# Select the heaviest root atoms from the heaviest terminal atom
root = [terminal_atoms[-1]]
else:
if initial_atom in terminal_atoms:
root = [initial_atom]
else:
raise Exception('Initial atom is not a terminal atom')
self.initial_atom = initial_atom
attached_to_zero = sorted(root[0].bondedTo(), key=atom_name)
attached_to_zero = sorted(attached_to_zero, key=atom_mass)
root.append(attached_to_zero[-1])
attached_to_one = sorted([a for a in root[-1].bondedTo() \
if (a not in root) and (a not in terminal_atoms)], key=atom_name)
attached_to_one = sorted(attached_to_one, key=atom_mass)
root.append(attached_to_one[-1])
def _find_dihedral(selected):
"""
Finds a dihedral angle adjacent to the selected atoms that includes a new atom
:param selected: a list of atoms that have already been selected
:returns: a list of atoms that includes the new atom and its neighboring selected atoms
"""
atom_name = lambda atom: atom.fullName()
atom_mass = lambda atom: atom.mass()
# Loop over possible nearest neighbors
for a2 in selected:
# Find the new atom
attached_to_a2 = sorted([a for a in a2.bondedTo() \
if a not in selected], key=atom_name)
for a1 in sorted(attached_to_a2, key=atom_mass, reverse=True):
# Find the third atom
attached_to_a3 = sorted([a for a in a2.bondedTo() \
if (a in selected) and (a!=a1)], key=atom_name)
for a3 in sorted(attached_to_a3, key=atom_mass, reverse=True):
# Find the last atom
attached_to_a4 = sorted([a for a in a3.bondedTo() \
if (a in selected) and (a!=a2)], key=atom_name)
for a4 in sorted(attached_to_a4, key=atom_mass, reverse=True):
return (a1, a2, a3, a4)
print 'Selected atoms:', selected
raise Exception('No new dihedral angle found!')
# Construct a list of torsion angles
torsionL = []
selected = [a for a in root]
while len(selected) < self.universe.numberOfAtoms():
(a1, a2, a3, a4) = _find_dihedral(selected)
torsionL.append((a1, a2, a3, a4))
selected.append(a1)
# If _firstTorsionTInd is not equal to the list index,
# then the dihedrals will likely be correlated and it is more appropriate
# to use a relative phase angle
prior_atoms = [
sorted([a2.index, a3.index]) for (a1, a2, a3, a4) in torsionL
]
self.rootInd = [r.index for r in root]
self._torsionIndL = [[a.index for a in tset] for tset in torsionL]
self._firstTorsionTInd = [prior_atoms.index(prior_atoms[n]) \
for n in range(len(prior_atoms))]
self.ntorsions = self.natoms - 3
def | (self, extended):
"""
Indices of the first torsions in the BAT array
"""
offset = 6 if extended else 0
torsionInds = np.array(range(offset + 5, self.natoms * 3, 3))
primaryTorsions = sorted(list(set(self._firstTorsionTInd)))
return list(torsionInds[primaryTorsions])
def BAT(self, XYZ, extended=False):
"""
Conversion from Cartesian to Bond-Angle-Torsion coordinates
:param extended: whether to include external coordinates or not
:param Cartesian: Cartesian coordinates. If None, then the molecules' coordinates will be used
"""
root = [distance(XYZ[self.rootInd[0]],XYZ[self.rootInd[1]]),\
distance(XYZ[self.rootInd[1]],XYZ[self.rootInd[2]]),\
angle(XYZ[self.rootInd[0]],XYZ[self.rootInd[1]],XYZ[self.rootInd[2]])]
import itertools
internal = root + \
[val for val in itertools.chain.from_iterable([\
BAT4(XYZ[a1],XYZ[a2],XYZ[a3],XYZ[a4]) \
for (a1,a2,a3,a4) in self._torsionIndL])]
torsions = internal[5::3]
phase_torsions = [(torsions[n] - torsions[self._firstTorsionTInd[n]]) \
if self._firstTorsionTInd[n]!=n else torsions[n] \
for n in range(len(torsions))]
internal[5::3] = phase_torsions
if not extended:
return np.array(internal)
external = self.extended_coordinates(XYZ[self.rootInd[0]], \
XYZ[self.rootInd[1]], XYZ[self.rootInd[2]])
return np.array(list(external) + list(internal))
def extended_coordinates(self, p1, p2, p3):
# The rotation axis is a normalized vector pointing from atom 0 to 1
# It is described in two degrees of freedom by the polar angle and azimuth
e = normalize(p2 - p1)
phi = np.arctan2(e[1], e[0]) # Polar angle
theta = np.arccos(e[2]) # Azimuthal angle
# Rotation to the z axis
cp = np.cos(phi)
sp = np.sin(phi)
ct = np.cos(theta)
st = np.sin(theta)
Rz = np.array([[cp * ct, ct * sp, -st], [-sp, cp, 0],
[cp * st, sp * st, ct]])
pos2 = Rz.dot(np.array(p3 - p2))
# Angle about the rotation axis
omega = np.arctan2(pos2[1], pos2[0])
return np.array(list(p1) + [phi, theta, omega])
def Cartesian(self, BAT):
"""
Conversion from (internal or extended) Bond-Angle-Torsion
to Cartesian coordinates
"""
# Arrange BAT coordinates in convenient arrays
offset = 6 if len(BAT) == (3 * self.natoms) else 0
bonds = BAT[offset + 3::3]
angles = BAT[offset + 4::3]
phase_torsions = BAT[offset + 5::3]
torsions = [(phase_torsions[n] + phase_torsions[self._firstTorsionTInd[n]]) \
if self._firstTorsionTInd[n]!=n else phase_torsions[n] \
for n in range(self.ntorsions)]
p1 = np.array([0., 0., 0.])
p2 = np.array([0., 0., BAT[offset]])
p3 = np.array([BAT[offset+1]*np.sin(BAT[offset+2]), 0., \
BAT[offset]-BAT[offset+1]*np.cos(BAT[offset+2])])
# If appropriate, rotate and translate the first three atoms
if offset == 6:
# Rotate the third atom by the appropriate value
(phi, theta, omega) = BAT[3:6]
co = np.cos(omega)
so = np.sin(omega)
Romega = np.array([[co, -so, 0], [so, co, 0], [0, 0, 1]])
p3 = Romega.dot(p3)
# Rotate the second two atoms to point in the right direction
cp = np.cos(phi)
sp = np.sin(phi)
ct = np.cos(theta)
st = np.sin(theta)
Re = np.array([[cp * ct, -sp, cp * st], [ct * sp, cp, sp * st],
[-st, 0, ct]])
p2 = Re.dot(p2)
p3 = Re.dot(p3)
# Translate the first three atoms by the origin
origin = np.array(BAT[:3])
p1 += origin
p2 += origin
p3 += origin
XYZ = np.zeros((self.natoms, 3))
XYZ[self.rootInd[0]] = p1
XYZ[self.rootInd[1]] = p2
XYZ[self.rootInd[2]] = p3
for ((a1,a2,a3,a4), bond, angle, torsion) in \
zip(self._torsionIndL,bonds,angles,torsions):
sphere = Sphere(Vector(XYZ[a2]), bond)
cone = Cone(Vector(XYZ[a2]), Vector(XYZ[a3] - XYZ[a2]), angle)
plane123 = Plane(Vector(XYZ[a4]), Vector(XYZ[a3]), Vector(XYZ[a2]))
points = sphere.intersectWith(cone).intersectWith(plane123)
p = points[0] if (Plane(Vector(XYZ[a3]), Vector(
XYZ[a2]), points[0]).normal * plane123.normal) > 0 else points[1]
p = rotatePoint(Vector(p),
Line(Vector(XYZ[a2]), Vector(XYZ[a2] - XYZ[a3])),
torsion)
XYZ[a1] = p.array
return XYZ
for ((a1,a2,a3,a4), bond, angle, torsion) in \
zip(self._torsionIndL,bonds,angles,torsions):
p2 = XYZ[a2]
p3 = XYZ[a3]
p4 = XYZ[a4]
# circle = sphere.intersectWith(cone)
n23 = normalize(p3 - p2)
# points = circle.intersectWith(plane123)
# plane.intersectWith(Plane(circle.center, circle.normal)) is a line
# line_direction = cross(normalize(cross(p4-p3,n23)),n23)
# Rotate the point about the p2-p3 axis by the torsion angle
v21 = (bond * np.cos(angle)) * n23 - (bond * np.sin(angle)) * cross(
normalize(cross(p4 - p3, n23)), n23)
s = np.sin(torsion)
c = np.cos(torsion)
XYZ[a1] = p2 - cross(n23, v21) * s + np.sum(
n23 * v21) * n23 * (1.0 - c) + v21 * c
def showMolecule(self, colorBy=None, label=False, dcdFN=None):
"""
Opens the molecule in VMD
:param colorBy: color atoms by 'Occupancy', or 'Beta'. None uses default colors.
"""
# Write PDB file
# To set Occupancy, change atom.occupancy
# To set Beta, change atom.temperature_factor
import os.path
pdbFN = os.path.join(MMTK.Database.molecule_types.directory,
'showMolecule.pdb')
outF = MMTK.PDB.PDBOutputFile(pdbFN)
outF.write(self.molecule)
outF.close()
# Write VMD script
script = 'set ligand [mol new ' + pdbFN + ']\n'
if colorBy is not None:
script += 'mol modcolor 0 $ligand ' + colorBy + '\n'
script += 'mol modstyle 0 0 CPK 1.000000 0.300000 10.000000 10.000000\n'
if label:
script += """
proc label_atoms { molid seltext } {
set sel [atomselect $molid $seltext]
set atomlist [$sel list]
foreach {atom} $atomlist {
set atomlabel [format "%d/%d" $molid $atom]
label add Atoms $atomlabel
}
$sel delete
}
label_atoms 0 all
"""
if dcdFN is not None:
script += 'animate delete all $ligand\n'
script += 'mol addfile ' + dcdFN + ' type dcd waitfor all\n'
scriptF = open('showMolecule.vmd', 'w')
scriptF.write(script)
scriptF.close()
# Find and run vmd
import AlGDock
vmdCommand = AlGDock.findPath(AlGDock.search_paths['vmd'])
import subprocess
subprocess.call([vmdCommand, '-e', 'showMolecule.vmd'])
# Remove files
os.remove(pdbFN)
os.remove('showMolecule.vmd')
########
# MAIN #
########
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Bond-Angle-Torsion converter')
parser.add_argument(
'--database',
help='MMTK database that describes the molecule of interest',
default='ligand.db')
args = parser.parse_args()
import os.path
if args.database == 'all':
import glob
dbFNs = glob.glob('*/*.db')
elif args.database == 'random':
import glob
from random import randrange
dbFNs = glob.glob('*/*.db')
dbFNs = [dbFNs[randrange(len(dbFNs))]]
elif os.path.isfile(args.database):
dbFNs = [args.database]
else:
raise Exception('Database file %s not found' % args.database)
dirname = os.path.dirname(os.path.abspath(dbFNs[0]))
for FN in dbFNs:
print 'Loading', FN
dbFN = os.path.abspath(FN)
if os.path.dirname(dbFN) != dirname:
raise Exception('Cannot change ligand directory in MMTK')
MMTK.Database.molecule_types.directory = os.path.dirname(dbFN)
universe = MMTK.Universe.InfiniteUniverse()
molecule = MMTK.Molecule(os.path.basename(dbFN))
universe.addObject(molecule)
original_xyz = np.copy(universe.configuration().array)
self = converter(universe, molecule)
# This tests a conversion to BAT coordinates and back
BAT = self.BAT(original_xyz, extended=True)
new_xyz = self.Cartesian(BAT)
print sum(sum(new_xyz - original_xyz))
# This rotates a random primary torsion
from random import randrange
firstTorsionInds = self.getFirstTorsionInds(True)
BAT_ind = firstTorsionInds[randrange(len(firstTorsionInds))]
confs = []
for torsion_offset in np.linspace(0, 2 * np.pi):
BAT_n = np.array([BAT[ind] if ind!=BAT_ind else BAT[ind] + torsion_offset \
for ind in range(len(BAT))])
XYZ = self.Cartesian(BAT_n)
confs.append(XYZ)
import AlGDock.IO
IO_dcd = AlGDock.IO.dcd(molecule)
IO_dcd.write('rotation.dcd', confs)
self.showMolecule(dcdFN='rotation.dcd')
os.remove('rotation.dcd')
# [[51, 10, 5, 46],
# [2, 5, 10, 51],
# [4, 5, 10, 51],
| getFirstTorsionInds | identifier_name |
gen_mike_input_rf_linux.py | #!/home/uwcc-admin/curw_mike_data_handler/venv/bin/python3
"only she bang, root dir, output dir and filename are different from generic one"
import pymysql
from datetime import datetime, timedelta
import traceback
import json
import os
import sys
import getopt
import pandas as pd
import numpy as np
DATE_TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
from db_adapter.constants import set_db_config_file_path
from db_adapter.constants import connection as con_params
from db_adapter.base import get_Pool, destroy_Pool
from db_adapter.constants import CURW_SIM_DATABASE, CURW_SIM_PASSWORD, CURW_SIM_USERNAME, CURW_SIM_PORT, CURW_SIM_HOST
from db_adapter.curw_sim.timeseries import Timeseries
from db_adapter.constants import COMMON_DATE_TIME_FORMAT
ROOT_DIRECTORY = '/home/uwcc-admin/curw_mike_data_handler'
# ROOT_DIRECTORY = 'D:\curw_mike_data_handlers'
OUTPUT_DIRECTORY = "/mnt/disks/wrf_nfs/mike/inputs"
def write_to_file(file_name, data):
with open(file_name, 'w+') as f:
f.write('\n'.join(data))
def append_to_file(file_name, data):
with open(file_name, 'a+') as f:
f.write('\n'.join(data))
def append_file_to_file(file_name, file_content):
with open(file_name, 'a+') as f:
f.write('\n')
f.write(file_content)
def makedir_if_not_exist_given_filepath(filename):
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
pass
def read_attribute_from_config_file(attribute, config, compulsory=False):
"""
:param attribute: key name of the config json file
:param config: loaded json file
:param compulsory: Boolean value: whether the attribute is must present or not in the config file
:return:
"""
if attribute in config and (config[attribute] != ""):
return config[attribute]
elif compulsory:
print("{} not specified in config file.".format(attribute))
exit(1)
else:
# print("{} not specified in config file.".format(attribute))
return None
def check_time_format(time):
try:
time = datetime.strptime(time, DATE_TIME_FORMAT)
if time.strftime('%S') != '00':
print("Seconds should be always 00")
exit(1)
if time.strftime('%M') not in ('00', '15', '30', '45'):
print("Minutes should be always multiple of 15")
exit(1)
return True
except Exception:
print("Time {} is not in proper format".format(time))
exit(1)
def list_of_lists_to_df_first_row_as_columns(data):
"""
:param data: data in list of lists format
:return: equivalent pandas dataframe
"""
return pd.DataFrame.from_records(data[1:], columns=data[0])
def replace_negative_numbers_with_nan(df):
num = df._get_numeric_data()
num[num < 0] = np.nan
return df
def replace_nan_with_row_average(df):
m = df.mean(axis=1)
for i, col in enumerate(df):
df.iloc[:, i] = df.iloc[:, i].fillna(m)
return df
def get_all_obs_rain_hashids_from_curw_sim(pool):
obs_id_hash_id_mappings = {}
expected_earliest_obs_end = (datetime.now() - timedelta(days=1)).strftime(COMMON_DATE_TIME_FORMAT)
connection = pool.connection()
try:
with connection.cursor() as cursor:
sql_statement = "SELECT `id`, `grid_id` FROM `run` where `model`=%s and `obs_end`>=%s;"
row_count = cursor.execute(sql_statement, ("hechms", expected_earliest_obs_end))
if row_count > 0:
results = cursor.fetchall()
for dict in results:
grid_id = dict.get("grid_id")
grid_id_parts = grid_id.split("_")
obs_id_hash_id_mappings[grid_id_parts[1]] = dict.get("id")
return obs_id_hash_id_mappings
else:
return None
except Exception as exception:
traceback.print_exc()
finally:
if connection is not None:
connection.close()
def prepare_mike_rf_input(start, end, coefficients):
try:
#### process staton based hybrid timeseries ####
distinct_obs_ids = coefficients.curw_obs_id.unique()
hybrid_ts_df = pd.DataFrame()
hybrid_ts_df['time'] = pd.date_range(start=start, end=end, freq='5min')
pool = get_Pool(host=con_params.CURW_SIM_HOST, port=con_params.CURW_SIM_PORT, user=con_params.CURW_SIM_USERNAME,
password=con_params.CURW_SIM_PASSWORD,
db=con_params.CURW_SIM_DATABASE)
TS = Timeseries(pool)
obs_id_hash_id_mapping = get_all_obs_rain_hashids_from_curw_sim(pool)
for obs_id in distinct_obs_ids:
# taking data from curw_sim database (data prepared based on active stations for hechms)
ts = TS.get_timeseries(id_=obs_id_hash_id_mapping.get(str(obs_id)), start_date=start, end_date=end)
ts.insert(0, ['time', obs_id])
ts_df = list_of_lists_to_df_first_row_as_columns(ts)
ts_df[obs_id] = ts_df[obs_id].astype('float64')
hybrid_ts_df = pd.merge(hybrid_ts_df, ts_df, how="left", on='time')
hybrid_ts_df.set_index('time', inplace=True)
hybrid_ts_df = hybrid_ts_df.resample('15min', label='right', closed='right').sum()
# pd.set_option('display.max_rows', hybrid_ts_df.shape[0]+1)
# pd.set_option('display.max_columns', hybrid_ts_df.shape[1]+1)
# print(hybrid_ts_df)
hybrid_ts_df = replace_negative_numbers_with_nan(hybrid_ts_df)
# print(hybrid_ts_df)
hybrid_ts_df = replace_nan_with_row_average(hybrid_ts_df)
# print(hybrid_ts_df)
#### process mike input ####
distinct_names = coefficients.name.unique()
mike_input = pd.DataFrame()
mike_input_initialized = False
for name in distinct_names:
catchment_coefficients = coefficients[coefficients.name == name]
# print(catchment_coefficients)
catchment = pd.DataFrame()
catchment_initialized = False
for index, row in catchment_coefficients.iterrows():
# print(index, row['curw_obs_id'], row['coefficient'])
if not catchment_initialized:
catchment = (hybrid_ts_df[row['curw_obs_id']] * row['coefficient']).to_frame(name=row['curw_obs_id'])
catchment_initialized = True
else:
new = (hybrid_ts_df[row['curw_obs_id']] * row['coefficient']).to_frame(name=row['curw_obs_id'])
catchment = pd.merge(catchment, new, how="left", on='time')
if not mike_input_initialized:
mike_input[name] = catchment.sum(axis=1)
mike_input_initialized = True
else:
mike_input = pd.merge(mike_input, (catchment.sum(axis=1)).to_frame(name=name), how="left", on='time')
mike_input.round(1)
return mike_input
except Exception:
traceback.print_exc()
finally:
destroy_Pool(pool)
def usage():
usageText = """
Usage: ./inputs/gen_mike_input_rf_linux.py [-s "YYYY-MM-DD HH:MM:SS"] [-e "YYYY-MM-DD HH:MM:SS"]
-h --help Show usage
-s --start_time Mike rainfall timeseries start time (e.g: "2019-06-05 00:00:00"). Default is 00:00:00, 3 days before today.
-e --end_time Mike rainfall timeseries end time (e.g: "2019-06-05 23:00:00"). Default is 00:00:00, 2 days after.
"""
print(usageText)
if __name__ == "__main__":
set_db_config_file_path(os.path.join(ROOT_DIRECTORY, 'db_adapter_config.json'))
try:
start_time = None
end_time = None
try:
opts, args = getopt.getopt(sys.argv[1:], "h:s:e:",
["help", "start_time=", "end_time="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-s", "--start_time"):
start_time = arg.strip()
elif opt in ("-e", "--end_time"):
end_time = arg.strip()
# Load config params
config = json.loads(open(os.path.join('inputs', 'configs', 'rain_config.json')).read())
output_dir = read_attribute_from_config_file('output_dir', config)
file_name = read_attribute_from_config_file('output_file_name', config)
if start_time is None:
start_time = (datetime.now() - timedelta(days=3)).strftime('%Y-%m-%d 00:00:00')
else:
check_time_format(time=start_time)
if end_time is None:
end_time = (datetime.now() + timedelta(days=2)).strftime('%Y-%m-%d 00:00:00')
else:
check_time_format(time=end_time)
if output_dir is None:
output_dir = os.path.join(OUTPUT_DIRECTORY, (datetime.utcnow() + timedelta(hours=5, minutes=30)).strftime('%Y-%m-%d_%H-00-00'))
if file_name is None:
file_name = 'mike_rf.txt'.format(start_time, end_time)
mike_rf_file_path = os.path.join(output_dir, file_name)
if not os.path.isfile(mike_rf_file_path):
makedir_if_not_exist_given_filepath(mike_rf_file_path)
print("{} start preparing mike rainfall input".format(datetime.now()))
coefficients = pd.read_csv(os.path.join('inputs', 'params', 'sb_rf_coefficients.csv'), delimiter=',')
mike_rainfall = prepare_mike_rf_input(start=start_time, end=end_time, coefficients=coefficients)
mike_rainfall.to_csv(mike_rf_file_path, header=True, index=True) | else:
print('Mike rainfall input file already in path : ', mike_rf_file_path)
except Exception:
traceback.print_exc() | print("{} completed preparing mike rainfall input".format(datetime.now()))
print("Mike input rainfall file is available at {}".format(mike_rf_file_path)) | random_line_split |
gen_mike_input_rf_linux.py | #!/home/uwcc-admin/curw_mike_data_handler/venv/bin/python3
"only she bang, root dir, output dir and filename are different from generic one"
import pymysql
from datetime import datetime, timedelta
import traceback
import json
import os
import sys
import getopt
import pandas as pd
import numpy as np
DATE_TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
from db_adapter.constants import set_db_config_file_path
from db_adapter.constants import connection as con_params
from db_adapter.base import get_Pool, destroy_Pool
from db_adapter.constants import CURW_SIM_DATABASE, CURW_SIM_PASSWORD, CURW_SIM_USERNAME, CURW_SIM_PORT, CURW_SIM_HOST
from db_adapter.curw_sim.timeseries import Timeseries
from db_adapter.constants import COMMON_DATE_TIME_FORMAT
ROOT_DIRECTORY = '/home/uwcc-admin/curw_mike_data_handler'
# ROOT_DIRECTORY = 'D:\curw_mike_data_handlers'
OUTPUT_DIRECTORY = "/mnt/disks/wrf_nfs/mike/inputs"
def write_to_file(file_name, data):
with open(file_name, 'w+') as f:
f.write('\n'.join(data))
def append_to_file(file_name, data):
with open(file_name, 'a+') as f:
f.write('\n'.join(data))
def append_file_to_file(file_name, file_content):
with open(file_name, 'a+') as f:
f.write('\n')
f.write(file_content)
def makedir_if_not_exist_given_filepath(filename):
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
pass
def read_attribute_from_config_file(attribute, config, compulsory=False):
"""
:param attribute: key name of the config json file
:param config: loaded json file
:param compulsory: Boolean value: whether the attribute is must present or not in the config file
:return:
"""
if attribute in config and (config[attribute] != ""):
return config[attribute]
elif compulsory:
print("{} not specified in config file.".format(attribute))
exit(1)
else:
# print("{} not specified in config file.".format(attribute))
return None
def check_time_format(time):
try:
time = datetime.strptime(time, DATE_TIME_FORMAT)
if time.strftime('%S') != '00':
print("Seconds should be always 00")
exit(1)
if time.strftime('%M') not in ('00', '15', '30', '45'):
print("Minutes should be always multiple of 15")
exit(1)
return True
except Exception:
print("Time {} is not in proper format".format(time))
exit(1)
def list_of_lists_to_df_first_row_as_columns(data):
"""
:param data: data in list of lists format
:return: equivalent pandas dataframe
"""
return pd.DataFrame.from_records(data[1:], columns=data[0])
def replace_negative_numbers_with_nan(df):
num = df._get_numeric_data()
num[num < 0] = np.nan
return df
def replace_nan_with_row_average(df):
m = df.mean(axis=1)
for i, col in enumerate(df):
df.iloc[:, i] = df.iloc[:, i].fillna(m)
return df
def get_all_obs_rain_hashids_from_curw_sim(pool):
obs_id_hash_id_mappings = {}
expected_earliest_obs_end = (datetime.now() - timedelta(days=1)).strftime(COMMON_DATE_TIME_FORMAT)
connection = pool.connection()
try:
with connection.cursor() as cursor:
sql_statement = "SELECT `id`, `grid_id` FROM `run` where `model`=%s and `obs_end`>=%s;"
row_count = cursor.execute(sql_statement, ("hechms", expected_earliest_obs_end))
if row_count > 0:
results = cursor.fetchall()
for dict in results:
grid_id = dict.get("grid_id")
grid_id_parts = grid_id.split("_")
obs_id_hash_id_mappings[grid_id_parts[1]] = dict.get("id")
return obs_id_hash_id_mappings
else:
return None
except Exception as exception:
traceback.print_exc()
finally:
if connection is not None:
connection.close()
def prepare_mike_rf_input(start, end, coefficients):
try:
#### process staton based hybrid timeseries ####
distinct_obs_ids = coefficients.curw_obs_id.unique()
hybrid_ts_df = pd.DataFrame()
hybrid_ts_df['time'] = pd.date_range(start=start, end=end, freq='5min')
pool = get_Pool(host=con_params.CURW_SIM_HOST, port=con_params.CURW_SIM_PORT, user=con_params.CURW_SIM_USERNAME,
password=con_params.CURW_SIM_PASSWORD,
db=con_params.CURW_SIM_DATABASE)
TS = Timeseries(pool)
obs_id_hash_id_mapping = get_all_obs_rain_hashids_from_curw_sim(pool)
for obs_id in distinct_obs_ids:
# taking data from curw_sim database (data prepared based on active stations for hechms)
ts = TS.get_timeseries(id_=obs_id_hash_id_mapping.get(str(obs_id)), start_date=start, end_date=end)
ts.insert(0, ['time', obs_id])
ts_df = list_of_lists_to_df_first_row_as_columns(ts)
ts_df[obs_id] = ts_df[obs_id].astype('float64')
hybrid_ts_df = pd.merge(hybrid_ts_df, ts_df, how="left", on='time')
hybrid_ts_df.set_index('time', inplace=True)
hybrid_ts_df = hybrid_ts_df.resample('15min', label='right', closed='right').sum()
# pd.set_option('display.max_rows', hybrid_ts_df.shape[0]+1)
# pd.set_option('display.max_columns', hybrid_ts_df.shape[1]+1)
# print(hybrid_ts_df)
hybrid_ts_df = replace_negative_numbers_with_nan(hybrid_ts_df)
# print(hybrid_ts_df)
hybrid_ts_df = replace_nan_with_row_average(hybrid_ts_df)
# print(hybrid_ts_df)
#### process mike input ####
distinct_names = coefficients.name.unique()
mike_input = pd.DataFrame()
mike_input_initialized = False
for name in distinct_names:
catchment_coefficients = coefficients[coefficients.name == name]
# print(catchment_coefficients)
catchment = pd.DataFrame()
catchment_initialized = False
for index, row in catchment_coefficients.iterrows():
# print(index, row['curw_obs_id'], row['coefficient'])
if not catchment_initialized:
catchment = (hybrid_ts_df[row['curw_obs_id']] * row['coefficient']).to_frame(name=row['curw_obs_id'])
catchment_initialized = True
else:
new = (hybrid_ts_df[row['curw_obs_id']] * row['coefficient']).to_frame(name=row['curw_obs_id'])
catchment = pd.merge(catchment, new, how="left", on='time')
if not mike_input_initialized:
mike_input[name] = catchment.sum(axis=1)
mike_input_initialized = True
else:
mike_input = pd.merge(mike_input, (catchment.sum(axis=1)).to_frame(name=name), how="left", on='time')
mike_input.round(1)
return mike_input
except Exception:
traceback.print_exc()
finally:
destroy_Pool(pool)
def usage():
|
if __name__ == "__main__":
set_db_config_file_path(os.path.join(ROOT_DIRECTORY, 'db_adapter_config.json'))
try:
start_time = None
end_time = None
try:
opts, args = getopt.getopt(sys.argv[1:], "h:s:e:",
["help", "start_time=", "end_time="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-s", "--start_time"):
start_time = arg.strip()
elif opt in ("-e", "--end_time"):
end_time = arg.strip()
# Load config params
config = json.loads(open(os.path.join('inputs', 'configs', 'rain_config.json')).read())
output_dir = read_attribute_from_config_file('output_dir', config)
file_name = read_attribute_from_config_file('output_file_name', config)
if start_time is None:
start_time = (datetime.now() - timedelta(days=3)).strftime('%Y-%m-%d 00:00:00')
else:
check_time_format(time=start_time)
if end_time is None:
end_time = (datetime.now() + timedelta(days=2)).strftime('%Y-%m-%d 00:00:00')
else:
check_time_format(time=end_time)
if output_dir is None:
output_dir = os.path.join(OUTPUT_DIRECTORY, (datetime.utcnow() + timedelta(hours=5, minutes=30)).strftime('%Y-%m-%d_%H-00-00'))
if file_name is None:
file_name = 'mike_rf.txt'.format(start_time, end_time)
mike_rf_file_path = os.path.join(output_dir, file_name)
if not os.path.isfile(mike_rf_file_path):
makedir_if_not_exist_given_filepath(mike_rf_file_path)
print("{} start preparing mike rainfall input".format(datetime.now()))
coefficients = pd.read_csv(os.path.join('inputs', 'params', 'sb_rf_coefficients.csv'), delimiter=',')
mike_rainfall = prepare_mike_rf_input(start=start_time, end=end_time, coefficients=coefficients)
mike_rainfall.to_csv(mike_rf_file_path, header=True, index=True)
print("{} completed preparing mike rainfall input".format(datetime.now()))
print("Mike input rainfall file is available at {}".format(mike_rf_file_path))
else:
print('Mike rainfall input file already in path : ', mike_rf_file_path)
except Exception:
traceback.print_exc()
| usageText = """
Usage: ./inputs/gen_mike_input_rf_linux.py [-s "YYYY-MM-DD HH:MM:SS"] [-e "YYYY-MM-DD HH:MM:SS"]
-h --help Show usage
-s --start_time Mike rainfall timeseries start time (e.g: "2019-06-05 00:00:00"). Default is 00:00:00, 3 days before today.
-e --end_time Mike rainfall timeseries end time (e.g: "2019-06-05 23:00:00"). Default is 00:00:00, 2 days after.
"""
print(usageText) | identifier_body |
gen_mike_input_rf_linux.py | #!/home/uwcc-admin/curw_mike_data_handler/venv/bin/python3
"only she bang, root dir, output dir and filename are different from generic one"
import pymysql
from datetime import datetime, timedelta
import traceback
import json
import os
import sys
import getopt
import pandas as pd
import numpy as np
DATE_TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
from db_adapter.constants import set_db_config_file_path
from db_adapter.constants import connection as con_params
from db_adapter.base import get_Pool, destroy_Pool
from db_adapter.constants import CURW_SIM_DATABASE, CURW_SIM_PASSWORD, CURW_SIM_USERNAME, CURW_SIM_PORT, CURW_SIM_HOST
from db_adapter.curw_sim.timeseries import Timeseries
from db_adapter.constants import COMMON_DATE_TIME_FORMAT
ROOT_DIRECTORY = '/home/uwcc-admin/curw_mike_data_handler'
# ROOT_DIRECTORY = 'D:\curw_mike_data_handlers'
OUTPUT_DIRECTORY = "/mnt/disks/wrf_nfs/mike/inputs"
def write_to_file(file_name, data):
with open(file_name, 'w+') as f:
f.write('\n'.join(data))
def append_to_file(file_name, data):
with open(file_name, 'a+') as f:
f.write('\n'.join(data))
def append_file_to_file(file_name, file_content):
with open(file_name, 'a+') as f:
f.write('\n')
f.write(file_content)
def makedir_if_not_exist_given_filepath(filename):
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
pass
def read_attribute_from_config_file(attribute, config, compulsory=False):
"""
:param attribute: key name of the config json file
:param config: loaded json file
:param compulsory: Boolean value: whether the attribute is must present or not in the config file
:return:
"""
if attribute in config and (config[attribute] != ""):
return config[attribute]
elif compulsory:
print("{} not specified in config file.".format(attribute))
exit(1)
else:
# print("{} not specified in config file.".format(attribute))
return None
def check_time_format(time):
try:
time = datetime.strptime(time, DATE_TIME_FORMAT)
if time.strftime('%S') != '00':
print("Seconds should be always 00")
exit(1)
if time.strftime('%M') not in ('00', '15', '30', '45'):
print("Minutes should be always multiple of 15")
exit(1)
return True
except Exception:
print("Time {} is not in proper format".format(time))
exit(1)
def list_of_lists_to_df_first_row_as_columns(data):
"""
:param data: data in list of lists format
:return: equivalent pandas dataframe
"""
return pd.DataFrame.from_records(data[1:], columns=data[0])
def replace_negative_numbers_with_nan(df):
num = df._get_numeric_data()
num[num < 0] = np.nan
return df
def replace_nan_with_row_average(df):
m = df.mean(axis=1)
for i, col in enumerate(df):
df.iloc[:, i] = df.iloc[:, i].fillna(m)
return df
def get_all_obs_rain_hashids_from_curw_sim(pool):
obs_id_hash_id_mappings = {}
expected_earliest_obs_end = (datetime.now() - timedelta(days=1)).strftime(COMMON_DATE_TIME_FORMAT)
connection = pool.connection()
try:
with connection.cursor() as cursor:
sql_statement = "SELECT `id`, `grid_id` FROM `run` where `model`=%s and `obs_end`>=%s;"
row_count = cursor.execute(sql_statement, ("hechms", expected_earliest_obs_end))
if row_count > 0:
results = cursor.fetchall()
for dict in results:
grid_id = dict.get("grid_id")
grid_id_parts = grid_id.split("_")
obs_id_hash_id_mappings[grid_id_parts[1]] = dict.get("id")
return obs_id_hash_id_mappings
else:
return None
except Exception as exception:
traceback.print_exc()
finally:
if connection is not None:
connection.close()
def prepare_mike_rf_input(start, end, coefficients):
try:
#### process staton based hybrid timeseries ####
distinct_obs_ids = coefficients.curw_obs_id.unique()
hybrid_ts_df = pd.DataFrame()
hybrid_ts_df['time'] = pd.date_range(start=start, end=end, freq='5min')
pool = get_Pool(host=con_params.CURW_SIM_HOST, port=con_params.CURW_SIM_PORT, user=con_params.CURW_SIM_USERNAME,
password=con_params.CURW_SIM_PASSWORD,
db=con_params.CURW_SIM_DATABASE)
TS = Timeseries(pool)
obs_id_hash_id_mapping = get_all_obs_rain_hashids_from_curw_sim(pool)
for obs_id in distinct_obs_ids:
# taking data from curw_sim database (data prepared based on active stations for hechms)
ts = TS.get_timeseries(id_=obs_id_hash_id_mapping.get(str(obs_id)), start_date=start, end_date=end)
ts.insert(0, ['time', obs_id])
ts_df = list_of_lists_to_df_first_row_as_columns(ts)
ts_df[obs_id] = ts_df[obs_id].astype('float64')
hybrid_ts_df = pd.merge(hybrid_ts_df, ts_df, how="left", on='time')
hybrid_ts_df.set_index('time', inplace=True)
hybrid_ts_df = hybrid_ts_df.resample('15min', label='right', closed='right').sum()
# pd.set_option('display.max_rows', hybrid_ts_df.shape[0]+1)
# pd.set_option('display.max_columns', hybrid_ts_df.shape[1]+1)
# print(hybrid_ts_df)
hybrid_ts_df = replace_negative_numbers_with_nan(hybrid_ts_df)
# print(hybrid_ts_df)
hybrid_ts_df = replace_nan_with_row_average(hybrid_ts_df)
# print(hybrid_ts_df)
#### process mike input ####
distinct_names = coefficients.name.unique()
mike_input = pd.DataFrame()
mike_input_initialized = False
for name in distinct_names:
catchment_coefficients = coefficients[coefficients.name == name]
# print(catchment_coefficients)
catchment = pd.DataFrame()
catchment_initialized = False
for index, row in catchment_coefficients.iterrows():
# print(index, row['curw_obs_id'], row['coefficient'])
if not catchment_initialized:
catchment = (hybrid_ts_df[row['curw_obs_id']] * row['coefficient']).to_frame(name=row['curw_obs_id'])
catchment_initialized = True
else:
new = (hybrid_ts_df[row['curw_obs_id']] * row['coefficient']).to_frame(name=row['curw_obs_id'])
catchment = pd.merge(catchment, new, how="left", on='time')
if not mike_input_initialized:
mike_input[name] = catchment.sum(axis=1)
mike_input_initialized = True
else:
mike_input = pd.merge(mike_input, (catchment.sum(axis=1)).to_frame(name=name), how="left", on='time')
mike_input.round(1)
return mike_input
except Exception:
traceback.print_exc()
finally:
destroy_Pool(pool)
def usage():
usageText = """
Usage: ./inputs/gen_mike_input_rf_linux.py [-s "YYYY-MM-DD HH:MM:SS"] [-e "YYYY-MM-DD HH:MM:SS"]
-h --help Show usage
-s --start_time Mike rainfall timeseries start time (e.g: "2019-06-05 00:00:00"). Default is 00:00:00, 3 days before today.
-e --end_time Mike rainfall timeseries end time (e.g: "2019-06-05 23:00:00"). Default is 00:00:00, 2 days after.
"""
print(usageText)
if __name__ == "__main__":
set_db_config_file_path(os.path.join(ROOT_DIRECTORY, 'db_adapter_config.json'))
try:
start_time = None
end_time = None
try:
opts, args = getopt.getopt(sys.argv[1:], "h:s:e:",
["help", "start_time=", "end_time="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-s", "--start_time"):
start_time = arg.strip()
elif opt in ("-e", "--end_time"):
end_time = arg.strip()
# Load config params
config = json.loads(open(os.path.join('inputs', 'configs', 'rain_config.json')).read())
output_dir = read_attribute_from_config_file('output_dir', config)
file_name = read_attribute_from_config_file('output_file_name', config)
if start_time is None:
start_time = (datetime.now() - timedelta(days=3)).strftime('%Y-%m-%d 00:00:00')
else:
check_time_format(time=start_time)
if end_time is None:
|
else:
check_time_format(time=end_time)
if output_dir is None:
output_dir = os.path.join(OUTPUT_DIRECTORY, (datetime.utcnow() + timedelta(hours=5, minutes=30)).strftime('%Y-%m-%d_%H-00-00'))
if file_name is None:
file_name = 'mike_rf.txt'.format(start_time, end_time)
mike_rf_file_path = os.path.join(output_dir, file_name)
if not os.path.isfile(mike_rf_file_path):
makedir_if_not_exist_given_filepath(mike_rf_file_path)
print("{} start preparing mike rainfall input".format(datetime.now()))
coefficients = pd.read_csv(os.path.join('inputs', 'params', 'sb_rf_coefficients.csv'), delimiter=',')
mike_rainfall = prepare_mike_rf_input(start=start_time, end=end_time, coefficients=coefficients)
mike_rainfall.to_csv(mike_rf_file_path, header=True, index=True)
print("{} completed preparing mike rainfall input".format(datetime.now()))
print("Mike input rainfall file is available at {}".format(mike_rf_file_path))
else:
print('Mike rainfall input file already in path : ', mike_rf_file_path)
except Exception:
traceback.print_exc()
| end_time = (datetime.now() + timedelta(days=2)).strftime('%Y-%m-%d 00:00:00') | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.