file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
environment.py | output
def _get_k8s_volumes_to_delete():
# universal_newlines decodes output on Python 3.x
out = subprocess.check_output(['kubectl', 'get', 'pods', '-o', 'json'], universal_newlines=True)
j = json.loads(out)
volumes = []
for pod in j['items']:
pod_vols = pod['spec'].get('volumes', [])
for pod_vol in pod_vols:
if 'hostPath' in pod_vol:
volumes.append(pod_vol['hostPath']['path'])
return volumes
def _dump_server_logs(context, tail=None):
if context.docker_compose_path:
cmd = _make_compose_command(context, 'logs')
if tail is not None:
cmd.append('--tail={:d}'.format(tail))
subprocess.check_call(cmd, stderr=subprocess.STDOUT)
else:
pass # No current support for dumping logs under k8s
def _teardown_system(context):
cmds = []
if context.docker_compose_path:
cmds.append(_make_compose_command(context, 'kill'))
cmds.append(_make_compose_command(context, 'rm', '-fv'))
if hasattr(context, "container"):
cmds.append(['docker', "kill", context.container])
cmds.append(['docker', "rm", "-fv", "--rm-all", context.container])
_set_default_compose_path(context)
else:
cmds.append(['kubectl', 'delete', '--ignore-not-found', '-f', context.kubernetes_dir_path])
volumes = _get_k8s_volumes_to_delete()
for volume in volumes:
# TODO: the sudo thing is not very nice, but...
cmds.append(['sudo', 'rm', '-rf', volume])
cmds.append(['sudo', 'mkdir', volume])
for cmd in cmds:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
def _post_startup(context, started_all, wait_for_server):
"""Post startup actions."""
if started_all:
# let's give the whole system a while to breathe
time.sleep(float(context.config.userdata.get('breath_time', 5)))
else:
raise Exception('Server failed to start in under {s} seconds'.
format(s=wait_for_server))
def _wait_for_system(context, wait_for_server=60):
start = datetime.datetime.utcnow()
wait_till = start + datetime.timedelta(seconds=wait_for_server)
# try to wait for server to start for some time
while datetime.datetime.utcnow() < wait_till:
time.sleep(1)
started_all = False
if context.kubernetes_dir_path:
res = json.loads(subprocess.check_output(['kubectl', 'get', 'pods', '-o', 'json']))
for pod in res['items']:
status = pod.get('status', {})
conditions = status.get('conditions', [])
phase = status.get('phase', '')
if status == {}:
continue
if phase != 'Running':
continue
for condition in conditions:
if condition['type'] == 'Ready' and condition['status'] != 'True':
continue
# if we got here, then everything is running
started_all = True
break
else:
if _is_running(context):
started_all = True
break
_post_startup(context, started_all, wait_for_server)
def _wait_for_api(context, wait_for_service, check_function):
for _ in range(wait_for_service):
if check_function(context):
break
time.sleep(1)
else:
raise Exception('Timeout waiting for the API service')
def _wait_for_jobs_debug_api_service(context, wait_for_service=60):
_wait_for_api(context, wait_for_service, _is_jobs_debug_api_running)
def _wait_for_component_search_service(context, wait_for_service=60):
_wait_for_api(context, wait_for_service, _is_component_search_service_running)
def _wait_for_master_tag_list_service(context, wait_for_service=60):
_wait_for_api(context, wait_for_service, _is_master_tag_list_service_running)
def _wait_for_get_untagged_component_service(context, wait_for_service=60):
_wait_for_api(context, wait_for_service, _is_get_untagged_component_service_running)
def _restart_system(context, wait_for_server=60):
# NOTE: it does make sense to restart the local system only
if context.running_locally:
try:
_teardown_system(context)
_start_system(context)
_wait_for_system(context, wait_for_server)
except subprocess.CalledProcessError as e:
raise Exception('Failed to restart system. Command "{c}" failed:\n{o}'.
format(c=' '.join(e.cmd), o=e.output))
def _is_api_running(url, accepted_codes=None):
accepted_codes = accepted_codes or {200, 401}
try:
res = requests.get(url)
if res.status_code in accepted_codes:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_3scale_staging_running(threescale_url, accepted_codes={200, 401}):
try:
res = requests.post(threescale_url)
if res.status_code in accepted_codes:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_3scale_preview_running(context, accepted_codes={200, 403, 401}):
try:
res = requests.post(context.threescale_preview_url)
if res.status_code in accepted_codes:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_backbone_api_running(backbone_api_url, accepted_codes={200}):
try:
url = '%s/api/v1/readiness' % backbone_api_url
res = requests.get(url)
if res.status_code in accepted_codes:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_gemini_api_running(gemini_api_url, accepted_codes={200}):
try:
url = '%s/api/v1/readiness' % gemini_api_url
res = requests.get(url)
if res.status_code in accepted_codes:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_api_running_post(url):
try:
res = requests.post(url)
if res.status_code in {200, 401}:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_running(context):
return _is_api_running(context.coreapi_url + _API_ENDPOINT) and \
_is_api_running(context.jobs_api_url + _API_ENDPOINT) and \
_is_api_running(context.gremlin_url, {400})
def _is_jobs_debug_api_running(context):
return _is_api_running(context.jobs_api_url + _JOBS_DEBUG_API +
"/analyses-report?ecosystem=maven")
def _is_component_search_service_running(context):
return _is_api_running(context.coreapi_url + _API_ENDPOINT +
"/component-search/any-component")
def _is_master_tag_list_service_running(context):
return _is_api_running(context.coreapi_url + _API_ENDPOINT +
"/master-tags/maven")
def _is_get_untagged_component_service_running(context):
return _is_api_running_post(context.coreapi_url + _API_ENDPOINT +
"/get-next-component/maven")
def _read_boolean_setting(context, setting_name):
setting = context.config.userdata.get(setting_name, '').lower()
if setting in ('1', 'yes', 'true', 'on'):
return True
if setting in ('', '0', 'no', 'false', 'off'):
return False
msg = '{!r} is not a valid option for boolean setting {!r}'
raise ValueError(msg.format(setting, setting_name))
def _add_slash(url):
if url and not url.endswith('/'):
url += '/'
return url
def _get_api_url(context, attribute, port):
return _add_slash(context.config.userdata.get(attribute,
'http://localhost:{port}/'.format(port=port)))
def _send_json_file(endpoint, filename, custom_headers=None):
"""Send the JSON file to the selected API endpoint.
The optional custom header is used (given it is provided).
"""
headers = {'Content-Type': 'application/json',
'Accept': 'application/json'}
if custom_headers is not None:
headers.update(custom_headers)
with open(filename) as json_data:
response = requests.post(endpoint, data=json_data, headers=headers)
return response
def _check_env_for_remote_tests(env_var_name):
if os.environ.get(env_var_name):
logger.info("Note: {e} environment variable is specified, but tests are "
"still run locally\n"
"Check other values required to run tests against existing "
"deployent".format(e=env_var_name)) |
def _missing_api_token_warning(env_var_name):
if os.environ.get(env_var_name):
logger.info("OK: {name} environment is set and will be used as "
"authorization token".format(name=env_var_name))
else:
logger.info("Warning: the {name} environment variable is not"
" set.\n"
"Most tests that require authorization will probably fail".format(
name=env_var_name))
def _check_api_tokens_presence():
# we | random_line_split |
|
environment.py | output
def _get_k8s_volumes_to_delete():
# universal_newlines decodes output on Python 3.x
out = subprocess.check_output(['kubectl', 'get', 'pods', '-o', 'json'], universal_newlines=True)
j = json.loads(out)
volumes = []
for pod in j['items']:
pod_vols = pod['spec'].get('volumes', [])
for pod_vol in pod_vols:
if 'hostPath' in pod_vol:
volumes.append(pod_vol['hostPath']['path'])
return volumes
def _dump_server_logs(context, tail=None):
if context.docker_compose_path:
cmd = _make_compose_command(context, 'logs')
if tail is not None:
cmd.append('--tail={:d}'.format(tail))
subprocess.check_call(cmd, stderr=subprocess.STDOUT)
else:
pass # No current support for dumping logs under k8s
def _teardown_system(context):
cmds = []
if context.docker_compose_path:
cmds.append(_make_compose_command(context, 'kill'))
cmds.append(_make_compose_command(context, 'rm', '-fv'))
if hasattr(context, "container"):
cmds.append(['docker', "kill", context.container])
cmds.append(['docker', "rm", "-fv", "--rm-all", context.container])
_set_default_compose_path(context)
else:
cmds.append(['kubectl', 'delete', '--ignore-not-found', '-f', context.kubernetes_dir_path])
volumes = _get_k8s_volumes_to_delete()
for volume in volumes:
# TODO: the sudo thing is not very nice, but...
cmds.append(['sudo', 'rm', '-rf', volume])
cmds.append(['sudo', 'mkdir', volume])
for cmd in cmds:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
def _post_startup(context, started_all, wait_for_server):
"""Post startup actions."""
if started_all:
# let's give the whole system a while to breathe
time.sleep(float(context.config.userdata.get('breath_time', 5)))
else:
raise Exception('Server failed to start in under {s} seconds'.
format(s=wait_for_server))
def _wait_for_system(context, wait_for_server=60):
start = datetime.datetime.utcnow()
wait_till = start + datetime.timedelta(seconds=wait_for_server)
# try to wait for server to start for some time
while datetime.datetime.utcnow() < wait_till:
time.sleep(1)
started_all = False
if context.kubernetes_dir_path:
res = json.loads(subprocess.check_output(['kubectl', 'get', 'pods', '-o', 'json']))
for pod in res['items']:
status = pod.get('status', {})
conditions = status.get('conditions', [])
phase = status.get('phase', '')
if status == {}:
continue
if phase != 'Running':
continue
for condition in conditions:
if condition['type'] == 'Ready' and condition['status'] != 'True':
continue
# if we got here, then everything is running
started_all = True
break
else:
if _is_running(context):
started_all = True
break
_post_startup(context, started_all, wait_for_server)
def _wait_for_api(context, wait_for_service, check_function):
for _ in range(wait_for_service):
if check_function(context):
break
time.sleep(1)
else:
raise Exception('Timeout waiting for the API service')
def _wait_for_jobs_debug_api_service(context, wait_for_service=60):
_wait_for_api(context, wait_for_service, _is_jobs_debug_api_running)
def _wait_for_component_search_service(context, wait_for_service=60):
_wait_for_api(context, wait_for_service, _is_component_search_service_running)
def _wait_for_master_tag_list_service(context, wait_for_service=60):
_wait_for_api(context, wait_for_service, _is_master_tag_list_service_running)
def _wait_for_get_untagged_component_service(context, wait_for_service=60):
_wait_for_api(context, wait_for_service, _is_get_untagged_component_service_running)
def _restart_system(context, wait_for_server=60):
# NOTE: it does make sense to restart the local system only
if context.running_locally:
try:
_teardown_system(context)
_start_system(context)
_wait_for_system(context, wait_for_server)
except subprocess.CalledProcessError as e:
raise Exception('Failed to restart system. Command "{c}" failed:\n{o}'.
format(c=' '.join(e.cmd), o=e.output))
def _is_api_running(url, accepted_codes=None):
accepted_codes = accepted_codes or {200, 401}
try:
res = requests.get(url)
if res.status_code in accepted_codes:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_3scale_staging_running(threescale_url, accepted_codes={200, 401}):
try:
res = requests.post(threescale_url)
if res.status_code in accepted_codes:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_3scale_preview_running(context, accepted_codes={200, 403, 401}):
try:
res = requests.post(context.threescale_preview_url)
if res.status_code in accepted_codes:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_backbone_api_running(backbone_api_url, accepted_codes={200}):
try:
url = '%s/api/v1/readiness' % backbone_api_url
res = requests.get(url)
if res.status_code in accepted_codes:
return True
except requests.exceptions.ConnectionError:
pass
return False
def | (gemini_api_url, accepted_codes={200}):
try:
url = '%s/api/v1/readiness' % gemini_api_url
res = requests.get(url)
if res.status_code in accepted_codes:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_api_running_post(url):
try:
res = requests.post(url)
if res.status_code in {200, 401}:
return True
except requests.exceptions.ConnectionError:
pass
return False
def _is_running(context):
return _is_api_running(context.coreapi_url + _API_ENDPOINT) and \
_is_api_running(context.jobs_api_url + _API_ENDPOINT) and \
_is_api_running(context.gremlin_url, {400})
def _is_jobs_debug_api_running(context):
return _is_api_running(context.jobs_api_url + _JOBS_DEBUG_API +
"/analyses-report?ecosystem=maven")
def _is_component_search_service_running(context):
return _is_api_running(context.coreapi_url + _API_ENDPOINT +
"/component-search/any-component")
def _is_master_tag_list_service_running(context):
return _is_api_running(context.coreapi_url + _API_ENDPOINT +
"/master-tags/maven")
def _is_get_untagged_component_service_running(context):
return _is_api_running_post(context.coreapi_url + _API_ENDPOINT +
"/get-next-component/maven")
def _read_boolean_setting(context, setting_name):
setting = context.config.userdata.get(setting_name, '').lower()
if setting in ('1', 'yes', 'true', 'on'):
return True
if setting in ('', '0', 'no', 'false', 'off'):
return False
msg = '{!r} is not a valid option for boolean setting {!r}'
raise ValueError(msg.format(setting, setting_name))
def _add_slash(url):
if url and not url.endswith('/'):
url += '/'
return url
def _get_api_url(context, attribute, port):
return _add_slash(context.config.userdata.get(attribute,
'http://localhost:{port}/'.format(port=port)))
def _send_json_file(endpoint, filename, custom_headers=None):
"""Send the JSON file to the selected API endpoint.
The optional custom header is used (given it is provided).
"""
headers = {'Content-Type': 'application/json',
'Accept': 'application/json'}
if custom_headers is not None:
headers.update(custom_headers)
with open(filename) as json_data:
response = requests.post(endpoint, data=json_data, headers=headers)
return response
def _check_env_for_remote_tests(env_var_name):
if os.environ.get(env_var_name):
logger.info("Note: {e} environment variable is specified, but tests are "
"still run locally\n"
"Check other values required to run tests against existing "
"deployent".format(e=env_var_name))
def _missing_api_token_warning(env_var_name):
if os.environ.get(env_var_name):
logger.info("OK: {name} environment is set and will be used as "
"authorization token".format(name=env_var_name))
else:
logger.info("Warning: the {name} environment variable is not"
" set.\n"
"Most tests that require authorization will probably fail".format(
name=env_var_name))
def _check_api_tokens_presence():
# we need | _is_gemini_api_running | identifier_name |
reactor.go | errorsForFSMCh,
}
fsm := NewFSM(startHeight, bcR)
bcR.fsm = fsm
bcR.BaseReactor = *p2p.NewBaseReactor("BlockchainReactor", bcR)
// bcR.swReporter = behaviour.NewSwitchReporter(bcR.BaseReactor.Switch)
return bcR
}
// bcReactorMessage is used by the reactor to send messages to the FSM.
type bcReactorMessage struct {
event bReactorEvent
data bReactorEventData
}
type bFsmEvent uint
const (
// message type events
peerErrorEv = iota + 1
syncFinishedEv
)
type bFsmEventData struct {
peerID p2p.ID
err error
}
// bcFsmMessage is used by the FSM to send messages to the reactor
type bcFsmMessage struct {
event bFsmEvent
data bFsmEventData
}
// SetLogger implements service.Service by setting the logger on reactor and pool.
func (bcR *BlockchainReactor) SetLogger(l log.Logger) {
bcR.BaseService.Logger = l
bcR.fsm.SetLogger(l)
}
// OnStart implements service.Service.
func (bcR *BlockchainReactor) OnStart() error {
bcR.swReporter = behaviour.NewSwitchReporter(bcR.BaseReactor.Switch)
if bcR.fastSync {
go bcR.poolRoutine()
}
return nil
}
// OnStop implements service.Service.
func (bcR *BlockchainReactor) OnStop() {
_ = bcR.Stop()
}
// SwitchToFastSync is called by the state sync reactor when switching to fast sync.
func (bcR *BlockchainReactor) SwitchToFastSync(state sm.State) error {
bcR.fastSync = true
bcR.initialState = state
bcR.state = state
bcR.stateSynced = true
bcR.fsm = NewFSM(state.LastBlockHeight+1, bcR)
bcR.fsm.SetLogger(bcR.Logger)
go bcR.poolRoutine()
return nil
}
// GetChannels implements Reactor
func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
return []*p2p.ChannelDescriptor{
{
ID: BlockchainChannel,
Priority: 10,
SendQueueCapacity: 2000,
RecvBufferCapacity: 50 * 4096,
RecvMessageCapacity: bc.MaxMsgSize,
MessageType: &bcproto.Message{},
},
}
}
// AddPeer implements Reactor by sending our state to peer.
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.StatusResponse{
Base: bcR.store.Base(),
Height: bcR.store.Height(),
},
}, bcR.Logger)
// it's OK if send fails. will try later in poolRoutine
// peer is added to the pool once we receive the first
// bcStatusResponseMessage from the peer and call pool.updatePeer()
}
// sendBlockToPeer loads a block and sends it to the requesting peer.
// If the block doesn't exist a bcNoBlockResponseMessage is sent.
// If all nodes are honest, no node should be requesting for a block that doesn't exist.
func (bcR *BlockchainReactor) sendBlockToPeer(msg *bcproto.BlockRequest,
src p2p.Peer) (queued bool) {
block := bcR.store.LoadBlock(msg.Height)
if block != nil {
pbbi, err := block.ToProto()
if err != nil {
bcR.Logger.Error("Could not send block message to peer", "err", err)
return false
}
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.BlockResponse{Block: pbbi},
}, bcR.Logger)
}
bcR.Logger.Info("peer asking for a block we don't have", "src", src, "height", msg.Height)
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.NoBlockResponse{Height: msg.Height},
}, bcR.Logger)
}
func (bcR *BlockchainReactor) sendStatusResponseToPeer(msg *bcproto.StatusRequest, src p2p.Peer) (queued bool) {
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.StatusResponse{
Base: bcR.store.Base(),
Height: bcR.store.Height(),
},
}, bcR.Logger)
}
// RemovePeer implements Reactor by removing peer from the pool.
func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
msgData := bcReactorMessage{
event: peerRemoveEv,
data: bReactorEventData{
peerID: peer.ID(),
err: errSwitchRemovesPeer,
},
}
bcR.errorsForFSMCh <- msgData
}
// Receive implements Reactor by handling 4 types of messages (look below).
func (bcR *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) {
if err := bc.ValidateMsg(e.Message); err != nil {
bcR.Logger.Error("peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
_ = bcR.swReporter.Report(behaviour.BadMessage(e.Src.ID(), err.Error()))
return
}
bcR.Logger.Debug("Receive", "src", e.Src, "chID", e.ChannelID, "msg", e.Message)
switch msg := e.Message.(type) {
case *bcproto.BlockRequest:
if queued := bcR.sendBlockToPeer(msg, e.Src); !queued {
// Unfortunately not queued since the queue is full.
bcR.Logger.Error("Could not send block message to peer", "src", e.Src, "height", msg.Height)
}
case *bcproto.StatusRequest:
// Send peer our state.
if queued := bcR.sendStatusResponseToPeer(msg, e.Src); !queued {
// Unfortunately not queued since the queue is full.
bcR.Logger.Error("Could not send status message to peer", "src", e.Src)
}
case *bcproto.BlockResponse:
bi, err := types.BlockFromProto(msg.Block)
if err != nil {
bcR.Logger.Error("error transition block from protobuf", "err", err)
return
}
msgForFSM := bcReactorMessage{
event: blockResponseEv,
data: bReactorEventData{
peerID: e.Src.ID(),
height: bi.Height,
block: bi,
length: msg.Size(),
},
}
bcR.Logger.Info("Received", "src", e.Src, "height", bi.Height)
bcR.messagesForFSMCh <- msgForFSM
case *bcproto.NoBlockResponse:
msgForFSM := bcReactorMessage{
event: noBlockResponseEv,
data: bReactorEventData{
peerID: e.Src.ID(),
height: msg.Height,
},
}
bcR.Logger.Debug("Peer does not have requested block", "peer", e.Src, "height", msg.Height)
bcR.messagesForFSMCh <- msgForFSM
case *bcproto.StatusResponse:
// Got a peer status. Unverified.
msgForFSM := bcReactorMessage{
event: statusResponseEv,
data: bReactorEventData{
peerID: e.Src.ID(),
height: msg.Height,
length: msg.Size(),
},
}
bcR.messagesForFSMCh <- msgForFSM
default:
bcR.Logger.Error(fmt.Sprintf("unknown message type %T", msg))
}
}
func (bcR *BlockchainReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) |
// processBlocksRoutine processes blocks until signlaed to stop over the stopProcessing channel
func (bcR *BlockchainReactor) processBlocksRoutine(stopProcessing chan struct{}) {
processReceivedBlockTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond)
doProcessBlockCh := make(chan struct{}, 1)
lastHundred := time.Now()
lastRate := 0.0
ForLoop:
for {
select {
case <-stopProcessing:
bcR.Logger.Info("finishing block execution")
break ForLoop
case <- | {
msg := &bcproto.Message{}
err := proto.Unmarshal(msgBytes, msg)
if err != nil {
panic(err)
}
uw, err := msg.Unwrap()
if err != nil {
panic(err)
}
bcR.ReceiveEnvelope(p2p.Envelope{
ChannelID: chID,
Src: peer,
Message: uw,
})
} | identifier_body |
reactor.go | errorsForFSMCh,
}
fsm := NewFSM(startHeight, bcR)
bcR.fsm = fsm
bcR.BaseReactor = *p2p.NewBaseReactor("BlockchainReactor", bcR)
// bcR.swReporter = behaviour.NewSwitchReporter(bcR.BaseReactor.Switch)
return bcR
}
// bcReactorMessage is used by the reactor to send messages to the FSM.
type bcReactorMessage struct {
event bReactorEvent
data bReactorEventData
}
type bFsmEvent uint
const (
// message type events
peerErrorEv = iota + 1
syncFinishedEv
)
type bFsmEventData struct {
peerID p2p.ID
err error
}
// bcFsmMessage is used by the FSM to send messages to the reactor
type bcFsmMessage struct {
event bFsmEvent
data bFsmEventData
}
// SetLogger implements service.Service by setting the logger on reactor and pool.
func (bcR *BlockchainReactor) SetLogger(l log.Logger) {
bcR.BaseService.Logger = l
bcR.fsm.SetLogger(l)
}
// OnStart implements service.Service.
func (bcR *BlockchainReactor) OnStart() error {
bcR.swReporter = behaviour.NewSwitchReporter(bcR.BaseReactor.Switch)
if bcR.fastSync {
go bcR.poolRoutine()
}
return nil
}
// OnStop implements service.Service.
func (bcR *BlockchainReactor) OnStop() {
_ = bcR.Stop()
}
// SwitchToFastSync is called by the state sync reactor when switching to fast sync.
func (bcR *BlockchainReactor) SwitchToFastSync(state sm.State) error {
bcR.fastSync = true
bcR.initialState = state
bcR.state = state
bcR.stateSynced = true
bcR.fsm = NewFSM(state.LastBlockHeight+1, bcR)
bcR.fsm.SetLogger(bcR.Logger)
go bcR.poolRoutine()
return nil
}
// GetChannels implements Reactor
func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
return []*p2p.ChannelDescriptor{
{
ID: BlockchainChannel,
Priority: 10,
SendQueueCapacity: 2000,
RecvBufferCapacity: 50 * 4096,
RecvMessageCapacity: bc.MaxMsgSize,
MessageType: &bcproto.Message{},
},
}
}
// AddPeer implements Reactor by sending our state to peer.
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.StatusResponse{
Base: bcR.store.Base(),
Height: bcR.store.Height(),
},
}, bcR.Logger)
// it's OK if send fails. will try later in poolRoutine
// peer is added to the pool once we receive the first
// bcStatusResponseMessage from the peer and call pool.updatePeer()
}
// sendBlockToPeer loads a block and sends it to the requesting peer.
// If the block doesn't exist a bcNoBlockResponseMessage is sent.
// If all nodes are honest, no node should be requesting for a block that doesn't exist.
func (bcR *BlockchainReactor) sendBlockToPeer(msg *bcproto.BlockRequest,
src p2p.Peer) (queued bool) {
block := bcR.store.LoadBlock(msg.Height)
if block != nil {
pbbi, err := block.ToProto()
if err != nil {
bcR.Logger.Error("Could not send block message to peer", "err", err)
return false
}
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.BlockResponse{Block: pbbi},
}, bcR.Logger)
}
bcR.Logger.Info("peer asking for a block we don't have", "src", src, "height", msg.Height)
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.NoBlockResponse{Height: msg.Height},
}, bcR.Logger)
}
func (bcR *BlockchainReactor) sendStatusResponseToPeer(msg *bcproto.StatusRequest, src p2p.Peer) (queued bool) {
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.StatusResponse{
Base: bcR.store.Base(),
Height: bcR.store.Height(),
},
}, bcR.Logger)
}
// RemovePeer implements Reactor by removing peer from the pool.
func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
msgData := bcReactorMessage{
event: peerRemoveEv,
data: bReactorEventData{
peerID: peer.ID(),
err: errSwitchRemovesPeer,
},
}
bcR.errorsForFSMCh <- msgData
}
// Receive implements Reactor by handling 4 types of messages (look below).
func (bcR *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) {
if err := bc.ValidateMsg(e.Message); err != nil {
bcR.Logger.Error("peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
_ = bcR.swReporter.Report(behaviour.BadMessage(e.Src.ID(), err.Error()))
return
}
bcR.Logger.Debug("Receive", "src", e.Src, "chID", e.ChannelID, "msg", e.Message)
switch msg := e.Message.(type) {
case *bcproto.BlockRequest:
if queued := bcR.sendBlockToPeer(msg, e.Src); !queued {
// Unfortunately not queued since the queue is full.
bcR.Logger.Error("Could not send block message to peer", "src", e.Src, "height", msg.Height)
}
case *bcproto.StatusRequest:
// Send peer our state.
if queued := bcR.sendStatusResponseToPeer(msg, e.Src); !queued {
// Unfortunately not queued since the queue is full.
bcR.Logger.Error("Could not send status message to peer", "src", e.Src)
}
case *bcproto.BlockResponse:
bi, err := types.BlockFromProto(msg.Block)
if err != nil |
msgForFSM := bcReactorMessage{
event: blockResponseEv,
data: bReactorEventData{
peerID: e.Src.ID(),
height: bi.Height,
block: bi,
length: msg.Size(),
},
}
bcR.Logger.Info("Received", "src", e.Src, "height", bi.Height)
bcR.messagesForFSMCh <- msgForFSM
case *bcproto.NoBlockResponse:
msgForFSM := bcReactorMessage{
event: noBlockResponseEv,
data: bReactorEventData{
peerID: e.Src.ID(),
height: msg.Height,
},
}
bcR.Logger.Debug("Peer does not have requested block", "peer", e.Src, "height", msg.Height)
bcR.messagesForFSMCh <- msgForFSM
case *bcproto.StatusResponse:
// Got a peer status. Unverified.
msgForFSM := bcReactorMessage{
event: statusResponseEv,
data: bReactorEventData{
peerID: e.Src.ID(),
height: msg.Height,
length: msg.Size(),
},
}
bcR.messagesForFSMCh <- msgForFSM
default:
bcR.Logger.Error(fmt.Sprintf("unknown message type %T", msg))
}
}
func (bcR *BlockchainReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
msg := &bcproto.Message{}
err := proto.Unmarshal(msgBytes, msg)
if err != nil {
panic(err)
}
uw, err := msg.Unwrap()
if err != nil {
panic(err)
}
bcR.ReceiveEnvelope(p2p.Envelope{
ChannelID: chID,
Src: peer,
Message: uw,
})
}
// processBlocksRoutine processes blocks until signlaed to stop over the stopProcessing channel
func (bcR *BlockchainReactor) processBlocksRoutine(stopProcessing chan struct{}) {
processReceivedBlockTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond)
doProcessBlockCh := make(chan struct{}, 1)
lastHundred := time.Now()
lastRate := 0.0
ForLoop:
for {
select {
case <-stopProcessing:
bcR.Logger.Info("finishing block execution")
break ForLoop
case <- | {
bcR.Logger.Error("error transition block from protobuf", "err", err)
return
} | conditional_block |
reactor.go | (state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore,
fastSync bool) *BlockchainReactor {
if state.LastBlockHeight != store.Height() {
panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight,
store.Height()))
}
const capacity = 1000
eventsFromFSMCh := make(chan bcFsmMessage, capacity)
messagesForFSMCh := make(chan bcReactorMessage, capacity)
errorsForFSMCh := make(chan bcReactorMessage, capacity)
startHeight := store.Height() + 1
if startHeight == 1 {
startHeight = state.InitialHeight
}
bcR := &BlockchainReactor{
initialState: state,
state: state,
blockExec: blockExec,
fastSync: fastSync,
store: store,
messagesForFSMCh: messagesForFSMCh,
eventsFromFSMCh: eventsFromFSMCh,
errorsForFSMCh: errorsForFSMCh,
}
fsm := NewFSM(startHeight, bcR)
bcR.fsm = fsm
bcR.BaseReactor = *p2p.NewBaseReactor("BlockchainReactor", bcR)
// bcR.swReporter = behaviour.NewSwitchReporter(bcR.BaseReactor.Switch)
return bcR
}
// bcReactorMessage is used by the reactor to send messages to the FSM.
type bcReactorMessage struct {
event bReactorEvent
data bReactorEventData
}
type bFsmEvent uint
const (
// message type events
peerErrorEv = iota + 1
syncFinishedEv
)
type bFsmEventData struct {
peerID p2p.ID
err error
}
// bcFsmMessage is used by the FSM to send messages to the reactor
type bcFsmMessage struct {
event bFsmEvent
data bFsmEventData
}
// SetLogger implements service.Service by setting the logger on reactor and pool.
func (bcR *BlockchainReactor) SetLogger(l log.Logger) {
bcR.BaseService.Logger = l
bcR.fsm.SetLogger(l)
}
// OnStart implements service.Service.
func (bcR *BlockchainReactor) OnStart() error {
bcR.swReporter = behaviour.NewSwitchReporter(bcR.BaseReactor.Switch)
if bcR.fastSync {
go bcR.poolRoutine()
}
return nil
}
// OnStop implements service.Service.
func (bcR *BlockchainReactor) OnStop() {
_ = bcR.Stop()
}
// SwitchToFastSync is called by the state sync reactor when switching to fast sync.
func (bcR *BlockchainReactor) SwitchToFastSync(state sm.State) error {
bcR.fastSync = true
bcR.initialState = state
bcR.state = state
bcR.stateSynced = true
bcR.fsm = NewFSM(state.LastBlockHeight+1, bcR)
bcR.fsm.SetLogger(bcR.Logger)
go bcR.poolRoutine()
return nil
}
// GetChannels implements Reactor
func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
return []*p2p.ChannelDescriptor{
{
ID: BlockchainChannel,
Priority: 10,
SendQueueCapacity: 2000,
RecvBufferCapacity: 50 * 4096,
RecvMessageCapacity: bc.MaxMsgSize,
MessageType: &bcproto.Message{},
},
}
}
// AddPeer implements Reactor by sending our state to peer.
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.StatusResponse{
Base: bcR.store.Base(),
Height: bcR.store.Height(),
},
}, bcR.Logger)
// it's OK if send fails. will try later in poolRoutine
// peer is added to the pool once we receive the first
// bcStatusResponseMessage from the peer and call pool.updatePeer()
}
// sendBlockToPeer loads a block and sends it to the requesting peer.
// If the block doesn't exist a bcNoBlockResponseMessage is sent.
// If all nodes are honest, no node should be requesting for a block that doesn't exist.
func (bcR *BlockchainReactor) sendBlockToPeer(msg *bcproto.BlockRequest,
src p2p.Peer) (queued bool) {
block := bcR.store.LoadBlock(msg.Height)
if block != nil {
pbbi, err := block.ToProto()
if err != nil {
bcR.Logger.Error("Could not send block message to peer", "err", err)
return false
}
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.BlockResponse{Block: pbbi},
}, bcR.Logger)
}
bcR.Logger.Info("peer asking for a block we don't have", "src", src, "height", msg.Height)
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.NoBlockResponse{Height: msg.Height},
}, bcR.Logger)
}
func (bcR *BlockchainReactor) sendStatusResponseToPeer(msg *bcproto.StatusRequest, src p2p.Peer) (queued bool) {
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.StatusResponse{
Base: bcR.store.Base(),
Height: bcR.store.Height(),
},
}, bcR.Logger)
}
// RemovePeer implements Reactor by removing peer from the pool.
func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
msgData := bcReactorMessage{
event: peerRemoveEv,
data: bReactorEventData{
peerID: peer.ID(),
err: errSwitchRemovesPeer,
},
}
bcR.errorsForFSMCh <- msgData
}
// Receive implements Reactor by handling 4 types of messages (look below).
func (bcR *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) {
if err := bc.ValidateMsg(e.Message); err != nil {
bcR.Logger.Error("peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
_ = bcR.swReporter.Report(behaviour.BadMessage(e.Src.ID(), err.Error()))
return
}
bcR.Logger.Debug("Receive", "src", e.Src, "chID", e.ChannelID, "msg", e.Message)
switch msg := e.Message.(type) {
case *bcproto.BlockRequest:
if queued := bcR.sendBlockToPeer(msg, e.Src); !queued {
// Unfortunately not queued since the queue is full.
bcR.Logger.Error("Could not send block message to peer", "src", e.Src, "height", msg.Height)
}
case *bcproto.StatusRequest:
// Send peer our state.
if queued := bcR.sendStatusResponseToPeer(msg, e.Src); !queued {
// Unfortunately not queued since the queue is full.
bcR.Logger.Error("Could not send status message to peer", "src", e.Src)
}
case *bcproto.BlockResponse:
bi, err := types.BlockFromProto(msg.Block)
if err != nil {
bcR.Logger.Error("error transition block from protobuf", "err", err)
return
}
msgForFSM := bcReactorMessage{
event: blockResponseEv,
data: bReactorEventData{
peerID: e.Src.ID(),
height: bi.Height,
block: bi,
length: msg.Size(),
},
}
bcR.Logger.Info("Received", "src", e.Src, "height", bi.Height)
bcR.messagesForFSMCh <- msgForFSM
case *bcproto.NoBlockResponse:
msgForFSM := bcReactorMessage{
event: noBlockResponseEv,
data: bReactorEventData{
peerID: e.Src.ID(),
height: msg.Height,
},
}
bcR.Logger.Debug("Peer does not have requested block", "peer", e.Src, "height", msg.Height)
bcR.messagesForFSMCh <- msgForFSM
case *bcproto.StatusResponse:
// Got a peer status. Unverified.
msgForFSM := bcReactorMessage{
event: statusResponseEv,
data: bReactorEventData{
peerID: e.Src.ID(),
height: msg.Height,
length: msg.Size(),
},
}
bcR.messagesForFSMCh <- msgForFSM
default:
bcR.Logger.Error(fmt.Sprintf("unknown message type %T | NewBlockchainReactor | identifier_name |
|
reactor.go | bcR *BlockchainReactor) sendBlockToPeer(msg *bcproto.BlockRequest,
src p2p.Peer) (queued bool) {
block := bcR.store.LoadBlock(msg.Height)
if block != nil {
pbbi, err := block.ToProto()
if err != nil {
bcR.Logger.Error("Could not send block message to peer", "err", err)
return false
}
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.BlockResponse{Block: pbbi},
}, bcR.Logger)
}
bcR.Logger.Info("peer asking for a block we don't have", "src", src, "height", msg.Height)
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.NoBlockResponse{Height: msg.Height},
}, bcR.Logger)
}
func (bcR *BlockchainReactor) sendStatusResponseToPeer(msg *bcproto.StatusRequest, src p2p.Peer) (queued bool) {
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.StatusResponse{
Base: bcR.store.Base(),
Height: bcR.store.Height(),
},
}, bcR.Logger)
}
// RemovePeer implements Reactor by removing peer from the pool.
func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
msgData := bcReactorMessage{
event: peerRemoveEv,
data: bReactorEventData{
peerID: peer.ID(),
err: errSwitchRemovesPeer,
},
}
bcR.errorsForFSMCh <- msgData
}
// Receive implements Reactor by handling 4 types of messages (look below).
func (bcR *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) {
if err := bc.ValidateMsg(e.Message); err != nil {
bcR.Logger.Error("peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
_ = bcR.swReporter.Report(behaviour.BadMessage(e.Src.ID(), err.Error()))
return
}
bcR.Logger.Debug("Receive", "src", e.Src, "chID", e.ChannelID, "msg", e.Message)
switch msg := e.Message.(type) {
case *bcproto.BlockRequest:
if queued := bcR.sendBlockToPeer(msg, e.Src); !queued {
// Unfortunately not queued since the queue is full.
bcR.Logger.Error("Could not send block message to peer", "src", e.Src, "height", msg.Height)
}
case *bcproto.StatusRequest:
// Send peer our state.
if queued := bcR.sendStatusResponseToPeer(msg, e.Src); !queued {
// Unfortunately not queued since the queue is full.
bcR.Logger.Error("Could not send status message to peer", "src", e.Src)
}
case *bcproto.BlockResponse:
bi, err := types.BlockFromProto(msg.Block)
if err != nil {
bcR.Logger.Error("error transition block from protobuf", "err", err)
return
}
msgForFSM := bcReactorMessage{
event: blockResponseEv,
data: bReactorEventData{
peerID: e.Src.ID(),
height: bi.Height,
block: bi,
length: msg.Size(),
},
}
bcR.Logger.Info("Received", "src", e.Src, "height", bi.Height)
bcR.messagesForFSMCh <- msgForFSM
case *bcproto.NoBlockResponse:
msgForFSM := bcReactorMessage{
event: noBlockResponseEv,
data: bReactorEventData{
peerID: e.Src.ID(),
height: msg.Height,
},
}
bcR.Logger.Debug("Peer does not have requested block", "peer", e.Src, "height", msg.Height)
bcR.messagesForFSMCh <- msgForFSM
case *bcproto.StatusResponse:
// Got a peer status. Unverified.
msgForFSM := bcReactorMessage{
event: statusResponseEv,
data: bReactorEventData{
peerID: e.Src.ID(),
height: msg.Height,
length: msg.Size(),
},
}
bcR.messagesForFSMCh <- msgForFSM
default:
bcR.Logger.Error(fmt.Sprintf("unknown message type %T", msg))
}
}
func (bcR *BlockchainReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
msg := &bcproto.Message{}
err := proto.Unmarshal(msgBytes, msg)
if err != nil {
panic(err)
}
uw, err := msg.Unwrap()
if err != nil {
panic(err)
}
bcR.ReceiveEnvelope(p2p.Envelope{
ChannelID: chID,
Src: peer,
Message: uw,
})
}
// processBlocksRoutine processes blocks until signlaed to stop over the stopProcessing channel
func (bcR *BlockchainReactor) processBlocksRoutine(stopProcessing chan struct{}) {
processReceivedBlockTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond)
doProcessBlockCh := make(chan struct{}, 1)
lastHundred := time.Now()
lastRate := 0.0
ForLoop:
for {
select {
case <-stopProcessing:
bcR.Logger.Info("finishing block execution")
break ForLoop
case <-processReceivedBlockTicker.C: // try to execute blocks
select {
case doProcessBlockCh <- struct{}{}:
default:
}
case <-doProcessBlockCh:
for {
err := bcR.processBlock()
if err == errMissingBlock {
break
}
// Notify FSM of block processing result.
msgForFSM := bcReactorMessage{
event: processedBlockEv,
data: bReactorEventData{
err: err,
},
}
_ = bcR.fsm.Handle(&msgForFSM)
if err != nil {
break
}
bcR.blocksSynced++
if bcR.blocksSynced%100 == 0 {
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
height, maxPeerHeight := bcR.fsm.Status()
bcR.Logger.Info("Fast Sync Rate", "height", height,
"max_peer_height", maxPeerHeight, "blocks/s", lastRate)
lastHundred = time.Now()
}
}
}
}
}
// poolRoutine receives and handles messages from the Receive() routine and from the FSM.
func (bcR *BlockchainReactor) poolRoutine() {
bcR.fsm.Start()
sendBlockRequestTicker := time.NewTicker(trySendIntervalMS * time.Millisecond)
statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second)
stopProcessing := make(chan struct{}, 1)
go bcR.processBlocksRoutine(stopProcessing)
ForLoop:
for {
select {
case <-sendBlockRequestTicker.C:
if !bcR.fsm.NeedsBlocks() {
continue
}
_ = bcR.fsm.Handle(&bcReactorMessage{
event: makeRequestsEv,
data: bReactorEventData{
maxNumRequests: maxNumRequests}})
case <-statusUpdateTicker.C:
// Ask for status updates.
go bcR.sendStatusRequest()
case msg := <-bcR.messagesForFSMCh:
// Sent from the Receive() routine when status (statusResponseEv) and
// block (blockResponseEv) response events are received
_ = bcR.fsm.Handle(&msg)
case msg := <-bcR.errorsForFSMCh:
// Sent from the switch.RemovePeer() routine (RemovePeerEv) and
// FSM state timer expiry routine (stateTimeoutEv).
_ = bcR.fsm.Handle(&msg)
case msg := <-bcR.eventsFromFSMCh:
switch msg.event {
case syncFinishedEv:
stopProcessing <- struct{}{}
// Sent from the FSM when it enters finished state.
break ForLoop
case peerErrorEv:
// Sent from the FSM when it detects peer error
bcR.reportPeerErrorToSwitch(msg.data.err, msg.data.peerID)
if msg.data.err == errNoPeerResponse {
// Sent from the peer timeout handler routine
_ = bcR.fsm.Handle(&bcReactorMessage{
event: peerRemoveEv,
data: bReactorEventData{
peerID: msg.data.peerID,
err: msg.data.err,
},
})
}
// else {
// For slow peers, or errors due to blocks received from wrong peer | // the FSM had already removed the peers
// } | random_line_split |
|
mod.rs | ;
use crate::storage::{get_connection, DelfStorageConnection};
use crate::DelfYamls;
/// The DelfGraph is the core structure for delf's functionality. It contains the algorithm to traverse the graph, as well as metadata to perform the deletions.
#[derive(Debug)]
pub struct DelfGraph {
pub(crate) nodes: HashMap<String, NodeIndex>,
pub(crate) edges: HashMap<String, EdgeIndex>,
graph: Graph<object::DelfObject, edge::DelfEdge, Directed>,
storages: HashMap<String, Box<dyn DelfStorageConnection>>,
}
impl DelfGraph {
/// Create a new DelfGraph from a schema and a config. See [yaml_rust](../../yaml_rust/index.html) for information on creating the Yaml structs, or alternately use the helper functions: [read_files](../fn.read_files.html), [read_yamls](../fn.read_yamls.html) for constructing a DelfGraph from either paths or `&str` of yaml.
pub fn new(yamls: &DelfYamls) -> DelfGraph {
let schema = &yamls.schema;
let config = &yamls.config;
let mut edges_to_insert = Vec::new();
let mut nodes = HashMap::<String, NodeIndex>::new();
let mut edges = HashMap::<String, EdgeIndex>::new();
let mut graph = Graph::<object::DelfObject, edge::DelfEdge>::new();
// each yaml is an object
for yaml in schema.iter() {
let obj_name = String::from(yaml["object_type"]["name"].as_str().unwrap());
let obj_node = object::DelfObject::from(&yaml["object_type"]);
let node_id = graph.add_node(obj_node);
nodes.insert(obj_name.clone(), node_id);
// need to make sure all the nodes exist before edges can be added to the graph
for e in yaml["object_type"]["edge_types"].as_vec().unwrap().iter() {
let delf_edge = edge::DelfEdge::from(e);
edges_to_insert.push((obj_name.clone(), delf_edge));
}
}
// add all the edges to the graph
for (from, e) in edges_to_insert.iter_mut() {
if !nodes.contains_key(&e.to.object_type) {
eprintln!("Error creating edge {:#?}: No object with name {:#?}", e.name, e.to.object_type);
exit(1);
}
let edge_id = graph.add_edge(nodes[from], nodes[&e.to.object_type], e.clone());
edges.insert(String::from(&e.name), edge_id);
}
// create the storage map
let mut storages = HashMap::<String, Box<dyn DelfStorageConnection>>::new();
for yaml in config.iter() {
for storage in yaml["storages"].as_vec().unwrap().iter() {
let storage_name = String::from(storage["name"].as_str().unwrap());
storages.insert(
storage_name,
get_connection(
storage["plugin"].as_str().unwrap(),
storage["url"].as_str().unwrap(),
),
);
}
}
return DelfGraph {
nodes,
edges,
graph,
storages,
};
}
/// Pretty print the graph's contents.
pub fn print(&self) {
println!("{:#?}", self.graph);
}
/// Given an edge name, get the corresponding DelfEdge
pub fn get_edge(&self, edge_name: &String) -> &edge::DelfEdge {
let edge_id = self.edges.get(edge_name).unwrap();
return self.graph.edge_weight(*edge_id).unwrap();
}
/// Given an edge name and the ids of the to/from object instances, delete the edge
pub fn delete_edge(&self, edge_name: &String, from_id: &String, to_id: &String) {
let e = self.get_edge(edge_name);
e.delete_one(from_id, to_id, self);
}
/// Given an object name, get the corresponding DelfObject
pub fn | (&self, object_name: &String) -> &object::DelfObject {
let object_id = self.nodes.get(object_name).unwrap();
return self.graph.node_weight(*object_id).unwrap();
}
/// Given the object name and the id of the instance, delete the object
pub fn delete_object(&self, object_name: &String, id: &String) {
self._delete_object(object_name, id, None);
}
fn _delete_object(
&self,
object_name: &String,
id: &String,
from_edge: Option<&edge::DelfEdge>,
) {
let obj = self.get_object(object_name);
let deleted = obj.delete(id, from_edge, &self.storages);
if deleted {
let edges = self.graph.edges_directed(self.nodes[&obj.name], Outgoing);
for e in edges {
e.weight().delete_all(id, &obj.id_type, self);
}
}
}
/// Validate that the objects and edges described in the schema exist in the corresponding storage as expected. Additionally, ensure that all objects in the graph are reachable by traversal via `deep` or `refcount` edges starting at an object with deletion type of `directly`, `directly_only`, `short_ttl`, or `not_deleted`. This ensures that all objects are deletable and accounted for.
pub fn validate(&self) {
println!("\u{1f50d} {}", Cyan.bold().paint("Validating DelF graph..."));
let mut errs = Vec::new();
let mut passed = true;
for (_, node_id) in self.nodes.iter() {
match self.graph
.node_weight(*node_id)
.unwrap()
.validate(&self.storages) {
Err(e) => errs.push(e),
_ => ()
}
}
if errs.len() > 0 {
passed = false;
println!("\u{274c} {}", Red.paint("Not all objects found in storage"));
for err in errs.drain(..) {
println!(" {}", err);
}
} else {
println!("\u{2705} {}", Green.paint("Objects exist in storage"));
}
for (_, edge_id) in self.edges.iter() {
match self.graph.edge_weight(*edge_id).unwrap().validate(self) {
Err(e) => errs.push(e),
_ => ()
}
}
if errs.len() > 0 {
passed = false;
println!("\u{274c} {}", Red.paint("Not all edges found in storage"));
for err in errs.drain(..) {
println!(" {}", err);
}
} else {
println!("\u{2705} {}", Green.paint("Edges exist in storage"));
}
match self.reachability_analysis() {
Err(e) => errs.push(e),
_ => ()
}
if errs.len() > 0 {
passed = false;
println!("\u{274c} {}", Red.paint("Not all objects deletable"));
for err in errs.drain(..) {
println!(" {}", err);
}
} else {
println!("\u{2705} {}", Green.paint("All objects deletable"));
}
if passed {
println!("\u{1F680} {} \u{1F680}", Green.bold().paint("Validation successful!"));
} else {
println!("\u{26a0} {} \u{26a0}", Red.bold().paint("Validation errors found"));
}
}
// Starting from a directly deletable (or excepted) node, ensure all ndoes are reached.
fn reachability_analysis(&self) -> Result<(), String> {
let mut visited_nodes = HashSet::new();
for (_, node_id) in self.nodes.iter() {
let obj = self.graph.node_weight(*node_id).unwrap();
match obj.deletion {
object::DeleteType::ShortTTL
| object::DeleteType::Directly
| object::DeleteType::DirectlyOnly
| object::DeleteType::NotDeleted => {
// this object is a starting point in traversal, start traversal
self.visit_node(&obj.name, &mut visited_nodes);
}
_ => (),
}
}
if visited_nodes.len() != self.nodes.len() {
let node_set: HashSet<String> = self.nodes.keys().cloned().collect();
return Err(format!(
"Not all objects are deletable: {:?}",
node_set.difference(&visited_nodes)
));
} else {
return Ok(());
}
}
// Recursively visit all un-visited nodes that are connected via depp or refcounte edges from the starting node with the passed in name
fn visit_node(&self, name: &String, visited_nodes: &mut HashSet<String>) {
visited_nodes.insert(name.clone());
let edges = self.graph.edges_directed(self.nodes[name], Outgoing);
for e in edges {
let ew = e.weight();
match ew.deletion {
edge::DeleteType::Deep | edge::DeleteType::RefCount => {
if !visited_nodes.contains(&ew.to.object_type) {
self.visit_node(&ew.to.object_type, visited_nodes);
}
}
_ => (),
}
}
}
// find | get_object | identifier_name |
mod.rs | ;
use crate::storage::{get_connection, DelfStorageConnection};
use crate::DelfYamls;
/// The DelfGraph is the core structure for delf's functionality. It contains the algorithm to traverse the graph, as well as metadata to perform the deletions.
#[derive(Debug)]
pub struct DelfGraph {
pub(crate) nodes: HashMap<String, NodeIndex>,
pub(crate) edges: HashMap<String, EdgeIndex>,
graph: Graph<object::DelfObject, edge::DelfEdge, Directed>,
storages: HashMap<String, Box<dyn DelfStorageConnection>>,
}
impl DelfGraph {
/// Create a new DelfGraph from a schema and a config. See [yaml_rust](../../yaml_rust/index.html) for information on creating the Yaml structs, or alternately use the helper functions: [read_files](../fn.read_files.html), [read_yamls](../fn.read_yamls.html) for constructing a DelfGraph from either paths or `&str` of yaml.
pub fn new(yamls: &DelfYamls) -> DelfGraph {
let schema = &yamls.schema;
let config = &yamls.config;
let mut edges_to_insert = Vec::new();
let mut nodes = HashMap::<String, NodeIndex>::new();
let mut edges = HashMap::<String, EdgeIndex>::new();
let mut graph = Graph::<object::DelfObject, edge::DelfEdge>::new();
// each yaml is an object
for yaml in schema.iter() {
let obj_name = String::from(yaml["object_type"]["name"].as_str().unwrap());
let obj_node = object::DelfObject::from(&yaml["object_type"]);
let node_id = graph.add_node(obj_node);
nodes.insert(obj_name.clone(), node_id);
// need to make sure all the nodes exist before edges can be added to the graph
for e in yaml["object_type"]["edge_types"].as_vec().unwrap().iter() {
let delf_edge = edge::DelfEdge::from(e);
edges_to_insert.push((obj_name.clone(), delf_edge));
}
}
// add all the edges to the graph
for (from, e) in edges_to_insert.iter_mut() {
if !nodes.contains_key(&e.to.object_type) {
eprintln!("Error creating edge {:#?}: No object with name {:#?}", e.name, e.to.object_type);
exit(1);
}
let edge_id = graph.add_edge(nodes[from], nodes[&e.to.object_type], e.clone());
edges.insert(String::from(&e.name), edge_id);
}
// create the storage map
let mut storages = HashMap::<String, Box<dyn DelfStorageConnection>>::new();
for yaml in config.iter() {
for storage in yaml["storages"].as_vec().unwrap().iter() {
let storage_name = String::from(storage["name"].as_str().unwrap());
storages.insert(
storage_name,
get_connection(
storage["plugin"].as_str().unwrap(),
storage["url"].as_str().unwrap(),
),
);
}
}
return DelfGraph {
nodes,
edges,
graph,
storages,
};
}
/// Pretty print the graph's contents.
pub fn print(&self) {
println!("{:#?}", self.graph);
}
/// Given an edge name, get the corresponding DelfEdge
pub fn get_edge(&self, edge_name: &String) -> &edge::DelfEdge {
let edge_id = self.edges.get(edge_name).unwrap();
return self.graph.edge_weight(*edge_id).unwrap();
}
/// Given an edge name and the ids of the to/from object instances, delete the edge
pub fn delete_edge(&self, edge_name: &String, from_id: &String, to_id: &String) {
let e = self.get_edge(edge_name);
e.delete_one(from_id, to_id, self);
}
/// Given an object name, get the corresponding DelfObject
pub fn get_object(&self, object_name: &String) -> &object::DelfObject {
let object_id = self.nodes.get(object_name).unwrap();
return self.graph.node_weight(*object_id).unwrap();
}
/// Given the object name and the id of the instance, delete the object
pub fn delete_object(&self, object_name: &String, id: &String) {
self._delete_object(object_name, id, None);
}
fn _delete_object(
&self,
object_name: &String,
id: &String,
from_edge: Option<&edge::DelfEdge>,
) {
let obj = self.get_object(object_name);
let deleted = obj.delete(id, from_edge, &self.storages);
if deleted {
let edges = self.graph.edges_directed(self.nodes[&obj.name], Outgoing);
for e in edges {
e.weight().delete_all(id, &obj.id_type, self);
}
}
}
/// Validate that the objects and edges described in the schema exist in the corresponding storage as expected. Additionally, ensure that all objects in the graph are reachable by traversal via `deep` or `refcount` edges starting at an object with deletion type of `directly`, `directly_only`, `short_ttl`, or `not_deleted`. This ensures that all objects are deletable and accounted for.
pub fn validate(&self) {
println!("\u{1f50d} {}", Cyan.bold().paint("Validating DelF graph..."));
let mut errs = Vec::new();
let mut passed = true;
for (_, node_id) in self.nodes.iter() {
match self.graph
.node_weight(*node_id)
.unwrap()
.validate(&self.storages) {
Err(e) => errs.push(e),
_ => ()
}
}
if errs.len() > 0 {
passed = false;
println!("\u{274c} {}", Red.paint("Not all objects found in storage"));
for err in errs.drain(..) {
println!(" {}", err);
}
} else {
println!("\u{2705} {}", Green.paint("Objects exist in storage"));
}
for (_, edge_id) in self.edges.iter() {
match self.graph.edge_weight(*edge_id).unwrap().validate(self) {
Err(e) => errs.push(e),
_ => ()
}
}
if errs.len() > 0 {
passed = false;
println!("\u{274c} {}", Red.paint("Not all edges found in storage"));
for err in errs.drain(..) {
println!(" {}", err);
}
} else {
println!("\u{2705} {}", Green.paint("Edges exist in storage"));
}
match self.reachability_analysis() {
Err(e) => errs.push(e),
_ => ()
}
if errs.len() > 0 {
passed = false;
println!("\u{274c} {}", Red.paint("Not all objects deletable"));
for err in errs.drain(..) {
println!(" {}", err);
}
} else {
println!("\u{2705} {}", Green.paint("All objects deletable"));
}
if passed {
println!("\u{1F680} {} \u{1F680}", Green.bold().paint("Validation successful!"));
} else {
println!("\u{26a0} {} \u{26a0}", Red.bold().paint("Validation errors found"));
}
}
| for (_, node_id) in self.nodes.iter() {
let obj = self.graph.node_weight(*node_id).unwrap();
match obj.deletion {
object::DeleteType::ShortTTL
| object::DeleteType::Directly
| object::DeleteType::DirectlyOnly
| object::DeleteType::NotDeleted => {
// this object is a starting point in traversal, start traversal
self.visit_node(&obj.name, &mut visited_nodes);
}
_ => (),
}
}
if visited_nodes.len() != self.nodes.len() {
let node_set: HashSet<String> = self.nodes.keys().cloned().collect();
return Err(format!(
"Not all objects are deletable: {:?}",
node_set.difference(&visited_nodes)
));
} else {
return Ok(());
}
}
// Recursively visit all un-visited nodes that are connected via depp or refcounte edges from the starting node with the passed in name
fn visit_node(&self, name: &String, visited_nodes: &mut HashSet<String>) {
visited_nodes.insert(name.clone());
let edges = self.graph.edges_directed(self.nodes[name], Outgoing);
for e in edges {
let ew = e.weight();
match ew.deletion {
edge::DeleteType::Deep | edge::DeleteType::RefCount => {
if !visited_nodes.contains(&ew.to.object_type) {
self.visit_node(&ew.to.object_type, visited_nodes);
}
}
_ => (),
}
}
}
// find all | // Starting from a directly deletable (or excepted) node, ensure all ndoes are reached.
fn reachability_analysis(&self) -> Result<(), String> {
let mut visited_nodes = HashSet::new(); | random_line_split |
mod.rs | ;
use crate::storage::{get_connection, DelfStorageConnection};
use crate::DelfYamls;
/// The DelfGraph is the core structure for delf's functionality. It contains the algorithm to traverse the graph, as well as metadata to perform the deletions.
#[derive(Debug)]
pub struct DelfGraph {
pub(crate) nodes: HashMap<String, NodeIndex>,
pub(crate) edges: HashMap<String, EdgeIndex>,
graph: Graph<object::DelfObject, edge::DelfEdge, Directed>,
storages: HashMap<String, Box<dyn DelfStorageConnection>>,
}
impl DelfGraph {
/// Create a new DelfGraph from a schema and a config. See [yaml_rust](../../yaml_rust/index.html) for information on creating the Yaml structs, or alternately use the helper functions: [read_files](../fn.read_files.html), [read_yamls](../fn.read_yamls.html) for constructing a DelfGraph from either paths or `&str` of yaml.
pub fn new(yamls: &DelfYamls) -> DelfGraph | edges_to_insert.push((obj_name.clone(), delf_edge));
}
}
// add all the edges to the graph
for (from, e) in edges_to_insert.iter_mut() {
if !nodes.contains_key(&e.to.object_type) {
eprintln!("Error creating edge {:#?}: No object with name {:#?}", e.name, e.to.object_type);
exit(1);
}
let edge_id = graph.add_edge(nodes[from], nodes[&e.to.object_type], e.clone());
edges.insert(String::from(&e.name), edge_id);
}
// create the storage map
let mut storages = HashMap::<String, Box<dyn DelfStorageConnection>>::new();
for yaml in config.iter() {
for storage in yaml["storages"].as_vec().unwrap().iter() {
let storage_name = String::from(storage["name"].as_str().unwrap());
storages.insert(
storage_name,
get_connection(
storage["plugin"].as_str().unwrap(),
storage["url"].as_str().unwrap(),
),
);
}
}
return DelfGraph {
nodes,
edges,
graph,
storages,
};
}
/// Pretty print the graph's contents.
pub fn print(&self) {
println!("{:#?}", self.graph);
}
/// Given an edge name, get the corresponding DelfEdge
pub fn get_edge(&self, edge_name: &String) -> &edge::DelfEdge {
let edge_id = self.edges.get(edge_name).unwrap();
return self.graph.edge_weight(*edge_id).unwrap();
}
/// Given an edge name and the ids of the to/from object instances, delete the edge
pub fn delete_edge(&self, edge_name: &String, from_id: &String, to_id: &String) {
let e = self.get_edge(edge_name);
e.delete_one(from_id, to_id, self);
}
/// Given an object name, get the corresponding DelfObject
pub fn get_object(&self, object_name: &String) -> &object::DelfObject {
let object_id = self.nodes.get(object_name).unwrap();
return self.graph.node_weight(*object_id).unwrap();
}
/// Given the object name and the id of the instance, delete the object
pub fn delete_object(&self, object_name: &String, id: &String) {
self._delete_object(object_name, id, None);
}
fn _delete_object(
&self,
object_name: &String,
id: &String,
from_edge: Option<&edge::DelfEdge>,
) {
let obj = self.get_object(object_name);
let deleted = obj.delete(id, from_edge, &self.storages);
if deleted {
let edges = self.graph.edges_directed(self.nodes[&obj.name], Outgoing);
for e in edges {
e.weight().delete_all(id, &obj.id_type, self);
}
}
}
/// Validate that the objects and edges described in the schema exist in the corresponding storage as expected. Additionally, ensure that all objects in the graph are reachable by traversal via `deep` or `refcount` edges starting at an object with deletion type of `directly`, `directly_only`, `short_ttl`, or `not_deleted`. This ensures that all objects are deletable and accounted for.
pub fn validate(&self) {
println!("\u{1f50d} {}", Cyan.bold().paint("Validating DelF graph..."));
let mut errs = Vec::new();
let mut passed = true;
for (_, node_id) in self.nodes.iter() {
match self.graph
.node_weight(*node_id)
.unwrap()
.validate(&self.storages) {
Err(e) => errs.push(e),
_ => ()
}
}
if errs.len() > 0 {
passed = false;
println!("\u{274c} {}", Red.paint("Not all objects found in storage"));
for err in errs.drain(..) {
println!(" {}", err);
}
} else {
println!("\u{2705} {}", Green.paint("Objects exist in storage"));
}
for (_, edge_id) in self.edges.iter() {
match self.graph.edge_weight(*edge_id).unwrap().validate(self) {
Err(e) => errs.push(e),
_ => ()
}
}
if errs.len() > 0 {
passed = false;
println!("\u{274c} {}", Red.paint("Not all edges found in storage"));
for err in errs.drain(..) {
println!(" {}", err);
}
} else {
println!("\u{2705} {}", Green.paint("Edges exist in storage"));
}
match self.reachability_analysis() {
Err(e) => errs.push(e),
_ => ()
}
if errs.len() > 0 {
passed = false;
println!("\u{274c} {}", Red.paint("Not all objects deletable"));
for err in errs.drain(..) {
println!(" {}", err);
}
} else {
println!("\u{2705} {}", Green.paint("All objects deletable"));
}
if passed {
println!("\u{1F680} {} \u{1F680}", Green.bold().paint("Validation successful!"));
} else {
println!("\u{26a0} {} \u{26a0}", Red.bold().paint("Validation errors found"));
}
}
// Starting from a directly deletable (or excepted) node, ensure all ndoes are reached.
fn reachability_analysis(&self) -> Result<(), String> {
let mut visited_nodes = HashSet::new();
for (_, node_id) in self.nodes.iter() {
let obj = self.graph.node_weight(*node_id).unwrap();
match obj.deletion {
object::DeleteType::ShortTTL
| object::DeleteType::Directly
| object::DeleteType::DirectlyOnly
| object::DeleteType::NotDeleted => {
// this object is a starting point in traversal, start traversal
self.visit_node(&obj.name, &mut visited_nodes);
}
_ => (),
}
}
if visited_nodes.len() != self.nodes.len() {
let node_set: HashSet<String> = self.nodes.keys().cloned().collect();
return Err(format!(
"Not all objects are deletable: {:?}",
node_set.difference(&visited_nodes)
));
} else {
return Ok(());
}
}
// Recursively visit all un-visited nodes that are connected via depp or refcounte edges from the starting node with the passed in name
fn visit_node(&self, name: &String, visited_nodes: &mut HashSet<String>) {
visited_nodes.insert(name.clone());
let edges = self.graph.edges_directed(self.nodes[name], Outgoing);
for e in edges {
let ew = e.weight();
match ew.deletion {
edge::DeleteType::Deep | edge::DeleteType::RefCount => {
if !visited_nodes.contains(&ew.to.object_type) {
self.visit_node(&ew.to.object_type, visited_nodes);
}
}
_ => (),
}
}
}
// find | {
let schema = &yamls.schema;
let config = &yamls.config;
let mut edges_to_insert = Vec::new();
let mut nodes = HashMap::<String, NodeIndex>::new();
let mut edges = HashMap::<String, EdgeIndex>::new();
let mut graph = Graph::<object::DelfObject, edge::DelfEdge>::new();
// each yaml is an object
for yaml in schema.iter() {
let obj_name = String::from(yaml["object_type"]["name"].as_str().unwrap());
let obj_node = object::DelfObject::from(&yaml["object_type"]);
let node_id = graph.add_node(obj_node);
nodes.insert(obj_name.clone(), node_id);
// need to make sure all the nodes exist before edges can be added to the graph
for e in yaml["object_type"]["edge_types"].as_vec().unwrap().iter() {
let delf_edge = edge::DelfEdge::from(e); | identifier_body |
exec.go | limit [%s] exceeded", cfg.TimeLimit.String()))
}
}()
}
// Т.к. атрибут "ptrace" включен, то, после запуска, начинаем ждать пока
// процесс изменит свой статус (остановится, завершится, подаст сигнал и т.д.)
var ws syscall.WaitStatus
waitPid, err := syscall.Wait4(-1, &ws, syscall.WALL, nil)
if err != nil {
return -1, fmt.Errorf("Error [syscall.Wait4] for \"%s\": %v", cfg.BaseName, err)
}
if waitPid != pid {
return -1, fmt.Errorf("Error [syscall.Wait4]: First waited PID (%d) not equal to \"%s\" PID (%d)", waitPid, cfg.BaseName, pid)
}
// Ptrace-параметры
options := syscall.PTRACE_O_TRACEFORK
options |= syscall.PTRACE_O_TRACEVFORK
options |= syscall.PTRACE_O_TRACECLONE
options |= syscall.PTRACE_O_TRACEEXIT
options |= syscall.PTRACE_O_TRACEEXEC
parentPid := 0
// Начинаем рекурсивно отслеживать поведение запущенного процесса и его потомков.
// Пример. Если запущенный процесс (указанный в параметре "oar") создал потомка_1, то
// начинаем отслеживать этого потомка_1, потомок_1 тоже может создать потомка (потомка_2),
// в новой итерации начинаем отслеживать этого нового потомка (потомка_2).
// Если потомок_2 больше не создает новых потомков, то следущая итерация цикла
// будет принадлежать его родителю (потомку_1), если потомок_1 также не создает потомков,
// переходим к главному процессу (запущенному через "oar").
// Первая (и последняя) итерация будет отслеживать наш запущенный процесс, остальные - потомков.
// Creation 0 | Main Process Exit 5 | - - - - - Main Process
// 1 | - Child_1 4 | - - - Child_1
// 2 | - - - Child_2 3 | Child_2
for {
// После того как процесс-потомок остановился (после syscall.Wait4)
// Передаем ему ptrace-параметры, которые заставят его останавливаться
// в тех случаях, когда он начинает создавать дочерний процесс.
syscall.PtraceSetOptions(waitPid, options)
syscall.PtraceCont(waitPid, 0)
parentPid = waitPid
// Снова ждем пока процесс-потомок изменит свой статус, теперь это может быть
// не только остановка, завершение, сигналирование, но и создание дочернего процесса.
waitPid, err = syscall.Wait4(-1, &ws, syscall.WALL, nil)
if err != nil {
return -1, fmt.Errorf("Error [syscall.Wait4] for [PID: %d, PPID %d]: %v", waitPid, parentPid, err)
}
command := system.GetProcessCommand(waitPid)
util.Debug("Waited PID: %d, PPID: %d, CMD: %s", waitPid, parentPid, command)
// Проверяем, завершился ли процесс-потомок
if ws.Exited() {
util.Debug(" - Process [PID: %d] finished", waitPid)
// Если завершенный процесс-потомок является нашим запущенным процессом
// (процессом, указанным в параметре "oar"), то ломаем цикл for,
// и выходим из функции (runProcess) с кодом выхода ws.ExitStatus()
if waitPid == pid {
break
}
// Если нет, переходим к его родителю.
continue
}
if ws.Signaled() {
util.Debug(" - Process [PID: %d] signaled: %v", waitPid, ws)
continue
}
sigtrap := uint32(syscall.SIGTRAP)
sigsegv := uint32(syscall.SIGSEGV)
// Если причиной изменения статуса является создание дочернего процесса:
// Если параметер "-Xacp" не установлен, то после попытки создать дочерний процесс
// функция (runProcess) завершится, сработает defer cmd.Process.Kill()
// (процесс будет убит), а т.к. в атрибутах запуска процесса стоит
// Pdeathsig: syscall.SIGKILL, то будут убиты все созданные потомки.
if !cfg.AllowCreateProcesses {
switch uint32(ws) >> 8 {
case sigtrap | (syscall.PTRACE_EVENT_CLONE << 8):
// Для создания отдельного потока, процесс создает потомка,
// параметр "-Xamt" разрешает многопоточность.
if !cfg.MultiThreadedProcess {
return -1, fmt.Errorf("Process attempt to clone himself")
}
clonePid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("Unable to retrieve id of cloned process: %v", err)
}
util.Debug("Process [%d] just maked clone [%d]", waitPid, clonePid)
case sigtrap | (syscall.PTRACE_EVENT_FORK << 8):
fallthrough
case sigtrap | (syscall.PTRACE_EVENT_VFORK << 8):
fallthrough
case sigtrap | (syscall.PTRACE_EVENT_VFORK_DONE << 8):
fallthrough
case sigtrap | (syscall.PTRACE_EVENT_EXEC << 8):
return -1, fmt.Errorf("Attempt to create new process")
case sigsegv:
return -1, fmt.Errorf("Segmentation fault! [PID %d, PPID %d]", waitPid, parentPid)
}
// Если параметер "-Xacp" установлен, то просто выводим инфу о созданных потомках
} else {
switch uint32(ws) >> 8 {
case sigtrap | (syscall.PTRACE_EVENT_EXIT << 8):
util.Debug(" - Detected exit event.")
case sigtrap | (syscall.PTRACE_EVENT_CLONE << 8):
nPid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("- [PTRACE_EVENT_CLONE] Ptrace event message retrieval failed: %v", err)
}
util.Debug("- Ptrace clone [%d] event detected", nPid)
case sigtrap | (syscall.PTRACE_EVENT_FORK << 8):
nPid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("- [PTRACE_EVENT_FORK] Ptrace event message retrieval failed: %v", err)
}
util.Debug("- Ptrace fork [%d] event detected", nPid)
case sigtrap | (syscall.PTRACE_EVENT_VFORK << 8):
nPid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("- [PTRACE_EVENT_VFORK] Ptrace event message retrieval failed: %v", err)
}
util.Debug("- Ptrace vfork [%d] event detected", nPid)
case sigtrap | (syscall.PTRACE_EVENT_VFORK_DONE << 8):
nPid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("- [PTRACE_EVENT_VFORK_DONE] Ptrace event message retrieval failed: %v", err)
}
util.Debug("- Ptrace vfork done [%d] event detected", nPid)
case sigtrap | (syscall.PTRACE_EVENT_EXEC << 8):
util.Debug("- Ptrace exec event detected")
case sigtrap | (0x80 << 8): // PTRACE_EVENT_STOP
util.Debug("- Ptrace stop event detected")
case sigtrap:
util.Debug("- Sigtrap detected")
case uint32(syscall.SIGCHLD):
util.Debug("- Sigchld detected")
case uint32(syscall.SIGSTOP):
util.Debug("- Sigstop detected")
case sigsegv:
util.Debug("- Sigsegv detected.")
return -1, fmt.Errorf("Segmentation fault! [PID %d, PPID %d]", waitPid, parentPid)
default:
util.Debug(" - Process [%d] stopped for unknown reasons [Status %v, Signal %d]", waitPid, ws, ws.StopSignal())
}
}
}
// Ждем, пока функции перенаправления stdout и stderr в файл/консоль,
// запущенные в отдельных потоках, закончат свою работу | wg.Wait()
return ws.ExitStatus(), nil
} | random_line_split |
|
exec.go | является создание дочернего процесса:
// Если параметер "-Xacp" не установлен, то после попытки создать дочерний процесс
// функция (runProcess) завершится, сработает defer cmd.Process.Kill()
// (процесс будет убит), а т.к. в атрибутах запуска процесса стоит
// Pdeathsig: syscall.SIGKILL, то будут убиты все созданные потомки.
if !cfg.AllowCreateProcesses {
switch uint32(ws) >> 8 {
case sigtrap | (syscall.PTRACE_EVENT_CLONE << 8):
// Для создания отдельного потока, процесс создает потомка,
// параметр "-Xamt" разрешает многопоточность.
if !cfg.MultiThreadedProcess {
return -1, fmt.Errorf("Process attempt to clone himself")
}
clonePid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("Unable to retrieve id of cloned process: %v", err)
}
util.Debug("Process [%d] just maked clone [%d]", waitPid, clonePid)
case sigtrap | (syscall.PTRACE_EVENT_FORK << 8):
fallthrough
case sigtrap | (syscall.PTRACE_EVENT_VFORK << 8):
fallthrough
case sigtrap | (syscall.PTRACE_EVENT_VFORK_DONE << 8):
fallthrough
case sigtrap | (syscall.PTRACE_EVENT_EXEC << 8):
return -1, fmt.Errorf("Attempt to create new process")
case sigsegv:
return -1, fmt.Errorf("Segmentation fault! [PID %d, PPID %d]", waitPid, parentPid)
}
// Если параметер "-Xacp" установлен, то просто выводим инфу о созданных потомках
} else {
switch uint32(ws) >> 8 {
case sigtrap | (syscall.PTRACE_EVENT_EXIT << 8):
util.Debug(" - Detected exit event.")
case sigtrap | (syscall.PTRACE_EVENT_CLONE << 8):
nPid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("- [PTRACE_EVENT_CLONE] Ptrace event message retrieval failed: %v", err)
}
util.Debug("- Ptrace clone [%d] event detected", nPid)
case sigtrap | (syscall.PTRACE_EVENT_FORK << 8):
nPid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("- [PTRACE_EVENT_FORK] Ptrace event message retrieval failed: %v", err)
}
util.Debug("- Ptrace fork [%d] event detected", nPid)
case sigtrap | (syscall.PTRACE_EVENT_VFORK << 8):
nPid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("- [PTRACE_EVENT_VFORK] Ptrace event message retrieval failed: %v", err)
}
util.Debug("- Ptrace vfork [%d] event detected", nPid)
case sigtrap | (syscall.PTRACE_EVENT_VFORK_DONE << 8):
nPid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("- [PTRACE_EVENT_VFORK_DONE] Ptrace event message retrieval failed: %v", err)
}
util.Debug("- Ptrace vfork done [%d] event detected", nPid)
case sigtrap | (syscall.PTRACE_EVENT_EXEC << 8):
util.Debug("- Ptrace exec event detected")
case sigtrap | (0x80 << 8): // PTRACE_EVENT_STOP
util.Debug("- Ptrace stop event detected")
case sigtrap:
util.Debug("- Sigtrap detected")
case uint32(syscall.SIGCHLD):
util.Debug("- Sigchld detected")
case uint32(syscall.SIGSTOP):
util.Debug("- Sigstop detected")
case sigsegv:
util.Debug("- Sigsegv detected.")
return -1, fmt.Errorf("Segmentation fault! [PID %d, PPID %d]", waitPid, parentPid)
default:
util.Debug(" - Process [%d] stopped for unknown reasons [Status %v, Signal %d]", waitPid, ws, ws.StopSignal())
}
}
}
// Ждем, пока функции перенаправления stdout и stderr в файл/консоль,
// запущенные в отдельных потоках, закончат свою работу
wg.Wait()
return ws.ExitStatus(), nil
}
func measureUsage(cfg *util.Config, storage *os.File, process *os.Process) {
// Проверяем, не завершился ли процесс до того, как мы начнем считать потребление ресурсов.
if _, err := os.Stat(fmt.Sprintf("/proc/%d/stat", process.Pid)); err == nil {
// Потребление CPU в % считается по такой формуле:
// consumtion = (cores * (ptA - ptB) * 100) / (ttA - ttB)
// Где cores - Количество используемых ядер процессорa
// ptA - Потребляемое время cpu процессом в момент времени А
// ptB - Потребляемое время cpu процессом в момент времени B
// ttA - Нагруженность процессора (общее время) в момент A
// ttB - Нагруженность процессора (общее время) в момент B
// Замер А позже замера B (A > B)
ptB, _, err := system.GetProcessStats(process.Pid)
checkError(process, err)
ttB, err := system.GetTotalCPUTime()
checkError(process, err)
cores, err := system.GetCPUCount(process.Pid)
checkError(process, err)
util.Debug("Process using %d cpu cores.", cores)
idle := 0
idleLimit := cfg.IdleLimit.Seconds()
// Проводим замер каждую секунду? работы программы
ticker := time.NewTicker(time.Second)
for {
select {
case <-ticker.C:
ttA, err := system.GetTotalCPUTime()
checkError(process, err)
ptA, processMemory, err := system.GetProcessStats(process.Pid)
checkError(process, err)
// Расчитываем потребление CPU
load := float64(uint64(cores)*(ptA-ptB)) / float64(ttA-ttB)
if idleLimit > 0 {
// Если потребление CPU меньше чем допустимая нагрузка
// увеличиваем счетчик простоя (idle)
if cfg.RequiredLoad.Value() > load {
idle++
} else {
idle = 0
}
}
stringMemory := util.StringifyMemory(processMemory)
stringLoad := util.StringifyLoad(load)
util.Debug(" - [Memory: %s/%s, Load: %s/%s]", stringMemory, cfg.MemoryLimit.String(), stringLoad, cfg.RequiredLoad.String())
// Записываем полученные данные о потреблении ресурсов в файл, указанный в "-s".
if storage != nil {
storage.WriteString(fmt.Sprintf("%s,%f,%d\n", time.Now().Format("15:04:05"), load, processMemory))
err = storage.Sync()
checkError(process, err)
}
// Проверка на превышение указанных лимитов (если параметры были указаны)
if idleLimit > 0 && idle >= idleLimit {
checkError(process, fmt.Errorf("Idle time limit [%d] exceeded", cfg.IdleLimit.Seconds()))
}
memoryLimit := cfg.MemoryLimit.Value()
if memoryLimit > 0 && processMemory > memoryLimit {
checkError(process, fmt.Errorf("Memory limit [%s] exceeded", cfg.MemoryLimit.String()))
}
ptB = ptA
ttB = ttA
}
}
}
}
func checkError(process *os.Process, err error) {
if err != nil {
log.Printf("Process killed from subthread. Cause: %v\n", err)
if process != nil {
process.Kill() // Catch the error?
}
os.Exit(0)
}
}
func fromPipe(cfg *util.Config, r io.Reader, f *os.File, wg *sync.WaitGroup) {
defer wg.Done()
scanner := bufio.NewScanner(r)
for scanner.Scan() {
text := scanner.Text()
if len(text) == 0 {
continue
}
if !cfg.Quiet {
log.Printf(util.Bold("[%s]: %s"), cfg.BaseName, text)
}
if f != nil {
f.WriteString(fmt.Sprintf("%s\n", text))
}
}
if err := scanner.Err(); err != nil {
checkError(nil, fmt.Errorf("Pipe handling error: %v", err))
}
}
| conditional_block |
||
exec.go | является создание дочернего процесса:
// Если параметер "-Xacp" не установлен, то после попытки создать дочерний процесс
// функция (runProcess) завершится, сработает defer cmd.Process.Kill()
// (процесс будет убит), а т.к. в атрибутах запуска процесса стоит
// Pdeathsig: syscall.SIGKILL, то будут убиты все созданные потомки.
if !cfg.AllowCreateProcesses {
switch uint32(ws) >> 8 {
case sigtrap | (syscall.PTRACE_EVENT_CLONE << 8):
// Для создания отдельного потока, процесс создает потомка,
// параметр "-Xamt" разрешает многопоточность.
if !cfg.MultiThreadedProcess {
return -1, fmt.Errorf("Process attempt to clone himself")
}
clonePid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("Unable to retrieve id of cloned process: %v", err)
}
util.Debug("Process [%d] just maked clone [%d]", waitPid, clonePid)
case sigtrap | (syscall.PTRACE_EVENT_FORK << 8):
fallthrough
case sigtrap | (syscall.PTRACE_EVENT_VFORK << 8):
fallthrough
case sigtrap | (syscall.PTRACE_EVENT_VFORK_DONE << 8):
fallthrough
case sigtrap | (syscall.PTRACE_EVENT_EXEC << 8):
return -1, fmt.Errorf("Attempt to create new process")
case sigsegv:
return -1, fmt.Errorf("Segmentation fault! [PID %d, PPID %d]", waitPid, parentPid)
}
// Если параметер "-Xacp" установлен, то просто выводим инфу о созданных потомках
} else {
switch uint32(ws) >> 8 {
case sigtrap | (syscall.PTRACE_EVENT_EXIT << 8):
util.Debug(" - Detected exit event.")
case sigtrap | (syscall.PTRACE_EVENT_CLONE << 8):
nPid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("- [PTRACE_EVENT_CLONE] Ptrace event message retrieval failed: %v", err)
}
util.Debug("- Ptrace clone [%d] event detected", nPid)
case sigtrap | (syscall.PTRACE_EVENT_FORK << 8):
nPid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("- [PTRACE_EVENT_FORK] Ptrace event message retrieval failed: %v", err)
}
util.Debug("- Ptrace fork [%d] event detected", nPid)
case sigtrap | (syscall.PTRACE_EVENT_VFORK << 8):
nPid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("- [PTRACE_EVENT_VFORK] Ptrace event message retrieval failed: %v", err)
}
util.Debug("- Ptrace vfork [%d] event detected", nPid)
case sigtrap | (syscall.PTRACE_EVENT_VFORK_DONE << 8):
nPid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("- [PTRACE_EVENT_VFORK_DONE] Ptrace event message retrieval failed: %v", err)
}
util.Debug("- Ptrace vfork done [%d] event detected", nPid)
case sigtrap | (syscall.PTRACE_EVENT_EXEC << 8):
util.Debug("- Ptrace exec event detected")
case sigtrap | (0x80 << 8): // PTRACE_EVENT_STOP
util.Debug("- Ptrace stop event detected")
case sigtrap:
util.Debug("- Sigtrap detected")
case uint32(syscall.SIGCHLD):
util.Debug("- Sigchld detected")
case uint32(syscall.SIGSTOP):
util.Debug("- Sigstop detected")
case sigsegv:
util.Debug("- Sigsegv detected.")
return -1, fmt.Errorf("Segmentation fault! [PID %d, PPID %d]", waitPid, parentPid)
default:
util.Debug(" - Process [%d] stopped for unknown reasons [Status %v, Signal %d]", waitPid, ws, ws.StopSignal())
}
}
}
// Ждем, пока функции перенаправления stdout и stderr в файл/консоль,
// запущенные в отдельных потоках, закончат свою работу
wg.Wait()
return ws.ExitStatus(), nil
}
func measureUsage(cfg *util.Config, storage *os.File, process *os.Process) {
// Проверяем, не завершился ли процесс до того, как мы начнем считать потребление ресурсов.
if _, err := os.Stat(fmt.Sprintf("/proc/%d/stat", process.Pid)); err == nil {
// Потребление CPU в % считается по такой формуле:
// consumtion = (cores * (ptA - ptB) * 100) / (ttA - ttB)
// Где cores - Количество используемых ядер процессорa
// ptA - Потребляемое время cpu процессом в момент времени А
// ptB - Потребляемое время cpu процессом в момент времени B
// ttA - Нагруженность процессора (общее время) в момент A
// ttB - Нагруженность процессора (общее время) в момент B
// Замер А позже замера B (A > B)
ptB, _, err := system.GetProcessStats(process.Pid)
checkError(process, err)
ttB, err := system.GetTotalCPUTime()
checkError(process, err)
cores, err := system.GetCPUCount(process.Pid)
checkError(process, err)
util.Debug("Process using %d cpu cores.", cores)
idle := 0
idleLimit := cfg.IdleLimit.Seconds()
// Проводим замер каждую секунду? работы программы
ticker := time.NewTicker(time.Second)
for {
select {
case <-ticker.C:
ttA, err := system.GetTotalCPUTime()
checkError(process, err)
ptA, processMemory, err := system.GetProcessStats(process.Pid)
checkError(process, err)
// Расчитываем потребление CPU
load := float64(uint64(cores)*(ptA-ptB)) / float64(ttA-ttB)
if idleLimit > 0 {
// Если потребление CPU меньше чем допустимая нагрузка
// увеличиваем счетчик простоя (idle)
if cfg.RequiredLoad.Value() > load {
idle++
} else {
idle = 0
}
}
stringMemory := util.StringifyMemory(processMemory)
stringLoad := util.StringifyLoad(load)
util.Debug(" - [Memory: %s/%s, Load: %s/%s]", stringMemory, cfg.MemoryLimit.String(), stringLoad, cfg.RequiredLoad.String())
// Записываем полученные данные о потреблении ресурсов в файл, указанный в "-s".
if storage != nil {
storage.WriteString(fmt.Sprintf("%s,%f,%d\n", time.Now().Format("15:04:05"), load, processMemory))
err = storage.Sync()
checkError(process, err)
}
// Проверка на превышение указанных лимитов (если параметры были указаны)
if idleLimit > 0 && idle >= idleLimit {
checkError(process, fmt.Errorf("Idle time limit [%d] exceeded", cfg.IdleLimit.Seconds()))
}
memoryLimit := cfg.MemoryLimit.Value()
if memoryLimit > 0 && processMemory > memoryLimit {
checkError(process, fmt.Errorf("Memory limit [%s] exceeded", cfg.MemoryLimit.String()))
}
ptB = ptA
ttB = ttA
}
}
}
}
func checkError(process *os.Process, err error) {
if err != nil {
log.Printf("Process killed from subthread. Cause: %v\n", err)
if process != nil {
process.Kill() // Catch the error?
}
os.Exit(0)
}
}
func fromPipe(cfg *util.Config, r io.Reader, f *os.File, wg *sync.WaitGroup) {
defer wg.Done()
scanner := bufio.NewScanner(r)
for scanner.Scan() {
text := scanner.Text()
if len(text) == 0 {
continue
}
if !cfg.Quiet {
log.Printf(util.Bold("[%s]: %s"), cfg.BaseName, text)
}
if f != nil {
f.WriteString(fmt.Sprintf("%s\n", text))
}
}
if err := scanner.Err(); err != nil {
checkError(nil, fmt.Errorf("Pipe handling error: %v", err))
}
}
| identifier_body |
||
exec.go | является создание дочернего процесса:
// Если параметер "-Xacp" не установлен, то после попытки создать дочерний процесс
// функция (runProcess) завершится, сработает defer cmd.Process.Kill()
// (процесс будет убит), а т.к. в атрибутах запуска процесса стоит
// Pdeathsig: syscall.SIGKILL, то будут убиты все созданные потомки.
if !cfg.AllowCreateProcesses {
switch uint32(ws) >> 8 {
case sigtrap | (syscall.PTRACE_EVENT_CLONE << 8):
// Для создания отдельного потока, процесс создает потомка,
// параметр "-Xamt" разрешает многопоточность.
if !cfg.MultiThreadedProcess {
return -1, fmt.Errorf("Process attempt to clone himself")
}
clonePid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("Unable to retrieve id of cloned process: %v", err)
}
util.Debug("Process [%d] just maked clone [%d]", waitPid, clonePid)
case sigtrap | (syscall.PTRACE_EVENT_FORK << 8):
fallthrough
case sigtrap | (syscall.PTRACE_EVENT_VFORK << 8):
fallthrough
case sigtrap | (syscall.PTRACE_EVENT_VFORK_DONE << 8):
fallthrough
case sigtrap | (syscall.PTRACE_EVENT_EXEC << 8):
return -1, fmt.Errorf("Attempt to create new process")
case sigsegv:
return -1, fmt.Errorf("Segmentation fault! [PID %d, PPID %d]", waitPid, parentPid)
}
// Если параметер "-Xacp" установлен, то просто выводим инфу о созданных потомках
} else {
switch uint32(ws) >> 8 {
case sigtrap | (syscall.PTRACE_EVENT_EXIT << 8):
util.Debug(" - Detected exit event.")
case sigtrap | (syscall.PTRACE_EVENT_CLONE << 8):
nPid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("- [PTRACE_EVENT_CLONE] Ptrace event message retrieval failed: %v", err)
}
util.Debug("- Ptrace clone [%d] event detected", nPid)
case sigtrap | (syscall.PTRACE_EVENT_FORK << 8):
nPid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("- [PTRACE_EVENT_FORK] Ptrace event message retrieval failed: %v", err)
}
util.Debug("- Ptrace fork [%d] event detected", nPid)
case sigtrap | (syscall.PTRACE_EVENT_VFORK << 8):
nPid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("- [PTRACE_EVENT_VFORK] Ptrace event message retrieval failed: %v", err)
}
util.Debug("- Ptrace vfork [%d] event detected", nPid)
case sigtrap | (syscall.PTRACE_EVENT_VFORK_DONE << 8):
nPid, err := syscall.PtraceGetEventMsg(waitPid)
if err != nil {
util.Debug("- [PTRACE_EVENT_VFORK_DONE] Ptrace event message retrieval failed: %v", err)
}
util.Debug("- Ptrace vfork done [%d] event detected", nPid)
case sigtrap | (syscall.PTRACE_EVENT_EXEC << 8):
util.Debug("- Ptrace exec event detected")
case sigtrap | (0x80 << 8): // PTRACE_EVENT_STOP
util.Debug("- Ptrace stop event detected")
case sigtrap:
util.Debug("- Sigtrap detected")
case uint32(syscall.SIGCHLD):
util.Debug("- Sigchld detected")
case uint32(syscall.SIGSTOP):
util.Debug("- Sigstop detected")
case sigsegv:
util.Debug("- Sigsegv detected.")
return -1, fmt.Errorf("Segmentation fault! [PID %d, PPID %d]", waitPid, parentPid)
default:
util.Debug(" - Process [%d] stopped for unknown reasons [Status %v, Signal %d]", waitPid, ws, ws.StopSignal())
}
}
}
// Ждем, пока функции перенаправления stdout и stderr в файл/консоль,
// запущенные в отдельных потоках, закончат свою работу
wg.Wait()
return ws.ExitStatus(), nil
}
func measureUsage(cfg *util.Config, storage *os.File, process *os.Process) {
// Проверяем, не завершился ли процесс до того, как мы начнем считать потребление ресурсов.
if _, err := os.Stat(fmt.Sprintf("/proc/%d/stat", process.Pid)); err == nil {
// Потребление CPU в % считается по такой формуле:
// consumtion = (cores * (ptA - ptB) * 100) / (ttA - ttB)
// Где cores - Количество используемых ядер процессорa
// ptA - Потребляемое время cpu процессом в момент времени А
// ptB - Потребляемое время cpu процессом в момент времени B
// ttA - Нагруженность процессора (общее время) в момент A
// ttB - Нагруженность процессора (общее время) в момент B
// Замер А позже замера B (A > B)
ptB, _, err := system.GetProcessStats(process.Pid)
checkError(process, err)
ttB, err := system.GetTotalCPUTime()
checkError(process, err)
cores, err := system.GetCPUCount(process.Pid)
checkError(process, err)
util.Debug("Process using %d cpu cores.", cores)
idle := 0
idleLimit := cfg.IdleLimit.Seconds()
// Проводим замер каждую секунду? работы программы
ticker := time.NewTicker(time.Second)
for {
select {
case <-ticker.C:
ttA, err := system.GetTotalCPUTime()
checkError(process, err)
ptA, processMemory, err := system.GetProcessStats(process.Pid)
checkError(process, err)
// Расчитываем потребление CPU
load := float64(uint64(cores)*(ptA-ptB)) / float64(ttA-ttB)
if idleLimit > 0 {
// Если потребление CPU меньше чем допустимая нагрузка
// увеличиваем счетчик простоя (idle)
if cfg.RequiredLoad.Value() > load {
idle++
} else {
idle = 0
}
}
stringMemory := util.StringifyMemory(processMemory)
stringLoad := util.StringifyLoad(load)
util.Debug(" - [Memory: %s/%s, Load: %s/%s]", stringMemory, cfg.MemoryLimit.String(), stringLoad, cfg.RequiredLoad.String())
// Записываем полученные данные о потреблении ресурсов в файл, указанный в "-s".
if storage != nil {
storage.WriteString(fmt.Sprintf("%s,%f,%d\n", time.Now().Format("15:04:05"), load, processMemory))
err = storage.Sync()
checkError(process, err)
}
// Проверка на превышение указанных лимитов (если параметры были указаны)
if idleLimit > 0 && idle >= idleLimit {
checkError(process, fmt.Errorf("Idle time limit [%d] exceeded", cfg.IdleLimit.Seconds()))
}
memoryLimit := cfg.MemoryLimit.Value()
if memoryLimit > 0 && processMemory > memoryLimit {
checkError(process, fmt.Errorf("Memory limit [%s] exceeded", cfg.MemoryLimit.String()))
}
ptB = ptA
ttB = ttA
}
}
}
}
func checkError(process *os.Process, err error) {
if err != nil {
log.Printf("Process killed from subthread. Cause: %v\n", err)
if process != nil {
process.Kill() // Catch the error?
}
os.Exit(0)
}
}
func fromPipe(cfg *util.Config, r io.Reader, f *os.File, wg *sync.WaitGroup) {
defer wg.Done()
scanner := bufio.NewScanner(r)
for scanner.Scan() {
text := scanner.Text()
if len(text) == 0 {
continue
}
if !cfg.Quiet {
log.Printf(util.Bold("[%s]: %s"), cfg.BaseName, text)
}
if f != nil {
f.WriteString(fmt.Sprintf("%s\n", text))
}
}
if err := scanner.Err(); err != nil {
checkError(nil, fmt.Errorf("Pipe handling error: %v", err))
}
}
| identifier_name |
||
create-docker-context-for-node-component.js | .com/Maximus5/ConEmu/issues/958 and https://github.com/moby/moby/issues/28814
// So if we're running under ConEmu, we need to add an extra -cur_console:i parameter to disable
// ConEmu's hooks and also set ConEmuANSI to OFF so Docker doesn't do anything drastic.
const env = Object.assign({}, process.env);
const extraParameters = [];
if (env.ConEmuANSI === "ON") {
env.ConEmuANSI = "OFF";
extraParameters.push("-cur_console:i");
}
updateDockerFile(componentSrcDir, componentDestDir);
if (argv.build) {
const cacheFromImage =
argv.cacheFromVersion &&
getRepository(argv.local, argv.repository) +
getName(argv.name) +
":" +
argv.cacheFromVersion;
if (cacheFromImage) {
// Pull this image into the docker daemon - if it fails we don't care, we'll just go from scratch.
const dockerPullProcess = childProcess.spawnSync(
"docker",
[...extraParameters, "pull", cacheFromImage],
{
stdio: "inherit",
env: env
}
);
wrapConsoleOutput(dockerPullProcess);
}
const tarProcess = childProcess.spawn(
tar,
[...extraParameters, "--dereference", "-czf", "-", "*"],
{
cwd: dockerContextDir,
stdio: ["inherit", "pipe", "inherit"],
env: env,
shell: true
}
);
const tags = getTags(
argv.tag,
argv.local,
argv.repository,
argv.version,
argv.name
);
const tagArgs = tags
.map((tag) => ["-t", tag])
.reduce((soFar, tagArgs) => soFar.concat(tagArgs), []);
const cacheFromArgs = cacheFromImage
? ["--cache-from", cacheFromImage]
: [];
const dockerProcess = childProcess.spawn(
"docker",
[
...extraParameters,
...(argv.platform ? ["buildx"] : []),
"build",
...tagArgs,
...cacheFromArgs,
...(argv.noCache ? ["--no-cache"] : []),
...(argv.platform ? ["--platform", argv.platform, "--push"] : []),
"-f",
`./component/Dockerfile`,
"-"
],
{
stdio: ["pipe", "inherit", "inherit"],
env: env
}
);
wrapConsoleOutput(dockerProcess);
dockerProcess.on("close", (code) => {
fse.removeSync(dockerContextDir);
if (code === 0 && argv.push && !argv.platform) {
if (tags.length === 0) {
console.error("Can not push an image without a tag.");
process.exit(1);
}
// Stop if there's a code !== 0
tags.every((tag) => {
const process = childProcess.spawnSync(
"docker",
["push", tag],
{
stdio: "inherit"
}
);
code = process.status;
return code === 0;
});
}
process.exit(code);
});
tarProcess.on("close", (code) => {
dockerProcess.stdin.end();
});
tarProcess.stdout.on("data", (data) => {
dockerProcess.stdin.write(data);
});
} else if (argv.output) {
const outputPath = path.resolve(process.cwd(), argv.output);
const outputTar = fse.openSync(outputPath, "w", 0o644);
const tarProcess = childProcess.spawn(
tar,
["--dereference", "-czf", "-", "*"],
{
cwd: dockerContextDir,
stdio: ["inherit", outputTar, "inherit"],
env: env,
shell: true
}
);
tarProcess.on("close", (code) => {
fse.closeSync(outputTar);
console.log(tarProcess.status);
fse.removeSync(dockerContextDir);
});
}
function updateDockerFile(sourceDir, destDir) {
const tags = getVersions(argv.local, argv.version);
const repository = getRepository(argv.local, argv.repository);
const dockerFileContents = fse.readFileSync(
path.resolve(sourceDir, "Dockerfile"),
"utf-8"
);
const replacedDockerFileContents = dockerFileContents
// Add a repository if this is a magda image
.replace(
/FROM .*(magda-[^:\s]+)(:[^\s]+)/,
"FROM " + repository + "$1" + (tags[0] ? ":" + tags[0] : "$2")
);
fse.writeFileSync(
path.resolve(destDir, "Dockerfile"),
replacedDockerFileContents,
"utf-8"
);
}
function preparePackage(packageDir, destDir) {
const packageJson = require(path.join(packageDir, "package.json"));
const dockerIncludesFromPackageJson =
packageJson.config &&
packageJson.config.docker &&
packageJson.config.docker.include;
let dockerIncludes;
if (!dockerIncludesFromPackageJson) {
console.log(
`WARNING: Package ${packageDir} does not have a config.docker.include key in package.json, so all of its files will be included in the docker image.`
);
dockerIncludes = fse.readdirSync(packageDir);
} else if (dockerIncludesFromPackageJson.trim() === "*") {
dockerIncludes = fse.readdirSync(packageDir);
} else {
if (dockerIncludesFromPackageJson.indexOf("*") >= 0) {
throw new Error(
"Sorry, wildcards are not currently supported in config.docker.include."
);
}
dockerIncludes = dockerIncludesFromPackageJson
.split(" ")
.filter((include) => include.length > 0);
}
dockerIncludes
.filter((include) => include !== "Dockerfile") // Filter out the dockerfile because we'll manually copy over a modified version.
.forEach(function (include) {
const src = path.resolve(packageDir, include);
const dest = path.resolve(destDir, include);
if (include === "node_modules") {
fse.ensureDirSync(dest);
const env = Object.create(process.env);
env.NODE_ENV = "production";
const productionPackages = _.uniqBy(
getPackageList(packageDir, path.resolve(packageDir, "..")),
(package) => package.path
);
prepareNodeModules(src, dest, productionPackages);
return;
}
try {
// On Windows we can't create symlinks to files without special permissions.
// So just copy the file instead. Usually creating directory junctions is
// fine without special permissions, but fall back on copying in the unlikely
// event that fails, too.
const type = fse.statSync(src).isFile() ? "file" : "junction";
fse.ensureSymlinkSync(src, dest, type);
} catch (e) {
fse.copySync(src, dest);
}
});
}
function prepareNodeModules(packageDir, destDir, productionPackages) {
productionPackages.forEach((src) => {
const relativePath = path.relative(packageDir, src.path);
const dest = path.resolve(destDir, relativePath);
const srcPath = path.resolve(packageDir, relativePath);
// console.log("src " + srcPath + " to " + dest);
try {
const stat = fse.lstatSync(srcPath);
const type = stat.isFile() ? "file" : "junction";
fse.ensureSymlinkSync(srcPath, dest, type);
} catch (e) {
fse.copySync(srcPath, dest);
}
});
}
function getPackageList(packagePath, packageSearchRoot, resolvedSoFar = {}) {
const dependencies = getPackageDependencies(packagePath);
const result = [];
if (!dependencies || !dependencies.length) {
return result;
}
dependencies.forEach(function (dependencyName) {
const dependencyNamePath = dependencyName.replace(/\//g, path.sep);
let currentBaseDir = packagePath;
let dependencyDir;
do {
dependencyDir = path.resolve(
currentBaseDir,
"node_modules",
dependencyNamePath
);
if (
currentBaseDir === packageSearchRoot ||
isSubDir(currentBaseDir, packageSearchRoot)
) {
// --- will not look for packages outside project root directory
break;
}
// Does this directory exist? If not, imitate node's module resolution by walking
// up the directory tree.
currentBaseDir = path.resolve(currentBaseDir, "..");
} while (!fse.existsSync(dependencyDir));
if (!fse.existsSync(dependencyDir)) {
throw new Error(
"Could not find path for " +
dependencyName +
" @ " +
packagePath
);
}
// If we haven't already seen this
if (!resolvedSoFar[dependencyDir]) {
result.push({ name: dependencyName, path: dependencyDir });
// Now that we've added this package to the list to resolve, add all its children.
const childPackageResult = getPackageList(
dependencyDir,
packageSearchRoot,
{ ...resolvedSoFar, [dependencyDir]: true }
);
Array.prototype.push.apply(result, childPackageResult);
}
});
return result;
}
function | getPackageDependencies | identifier_name |
|
create-docker-context-for-node-component.js | field in docker. Will use the same repository and name. Using this options causes the image to be pulled before build.",
type: "string"
}
})
// Because 'version is a default yargs thing we need to specifically override its normal parsing.
.version(false)
.array("version")
.help().argv;
if (!argv.build && !argv.output) {
console.log("Either --build or --output <filename> must be specified.");
process.exit(1);
}
if (argv.platform && !argv.push) {
console.log(
"When --platform is specified, --push must be specified as well as multi-arch image can only be pushed to remote registry."
);
process.exit(1);
}
if (argv.noCache && argv.cacheFromVersion) {
console.log("When --noCache=true, --cacheFromVersion can't be specified.");
process.exit(1);
}
const componentSrcDir = path.resolve(process.cwd());
const dockerContextDir = fse.mkdtempSync(
path.resolve(__dirname, "..", "docker-context-")
);
const componentDestDir = path.resolve(dockerContextDir, "component");
fse.emptyDirSync(dockerContextDir);
fse.ensureDirSync(componentDestDir);
preparePackage(componentSrcDir, componentDestDir);
const tar = process.platform === "darwin" ? "gtar" : "tar";
// Docker and ConEmu (an otherwise excellent console for Windows) don't get along.
// See: https://github.com/Maximus5/ConEmu/issues/958 and https://github.com/moby/moby/issues/28814
// So if we're running under ConEmu, we need to add an extra -cur_console:i parameter to disable
// ConEmu's hooks and also set ConEmuANSI to OFF so Docker doesn't do anything drastic.
const env = Object.assign({}, process.env);
const extraParameters = [];
if (env.ConEmuANSI === "ON") {
env.ConEmuANSI = "OFF";
extraParameters.push("-cur_console:i");
}
updateDockerFile(componentSrcDir, componentDestDir);
if (argv.build) {
const cacheFromImage =
argv.cacheFromVersion &&
getRepository(argv.local, argv.repository) +
getName(argv.name) +
":" +
argv.cacheFromVersion;
if (cacheFromImage) {
// Pull this image into the docker daemon - if it fails we don't care, we'll just go from scratch.
const dockerPullProcess = childProcess.spawnSync(
"docker",
[...extraParameters, "pull", cacheFromImage],
{
stdio: "inherit",
env: env
}
);
wrapConsoleOutput(dockerPullProcess);
}
const tarProcess = childProcess.spawn(
tar,
[...extraParameters, "--dereference", "-czf", "-", "*"],
{
cwd: dockerContextDir,
stdio: ["inherit", "pipe", "inherit"],
env: env,
shell: true
}
);
const tags = getTags(
argv.tag,
argv.local,
argv.repository,
argv.version,
argv.name
);
const tagArgs = tags
.map((tag) => ["-t", tag])
.reduce((soFar, tagArgs) => soFar.concat(tagArgs), []);
const cacheFromArgs = cacheFromImage
? ["--cache-from", cacheFromImage]
: [];
const dockerProcess = childProcess.spawn(
"docker",
[
...extraParameters,
...(argv.platform ? ["buildx"] : []),
"build",
...tagArgs,
...cacheFromArgs,
...(argv.noCache ? ["--no-cache"] : []),
...(argv.platform ? ["--platform", argv.platform, "--push"] : []),
"-f",
`./component/Dockerfile`,
"-"
],
{
stdio: ["pipe", "inherit", "inherit"],
env: env
}
);
wrapConsoleOutput(dockerProcess);
dockerProcess.on("close", (code) => {
fse.removeSync(dockerContextDir);
if (code === 0 && argv.push && !argv.platform) {
if (tags.length === 0) {
console.error("Can not push an image without a tag.");
process.exit(1);
}
// Stop if there's a code !== 0
tags.every((tag) => {
const process = childProcess.spawnSync(
"docker",
["push", tag],
{
stdio: "inherit"
}
);
code = process.status;
return code === 0;
});
}
process.exit(code);
});
tarProcess.on("close", (code) => {
dockerProcess.stdin.end();
});
tarProcess.stdout.on("data", (data) => {
dockerProcess.stdin.write(data);
});
} else if (argv.output) {
const outputPath = path.resolve(process.cwd(), argv.output);
const outputTar = fse.openSync(outputPath, "w", 0o644);
const tarProcess = childProcess.spawn(
tar,
["--dereference", "-czf", "-", "*"],
{
cwd: dockerContextDir,
stdio: ["inherit", outputTar, "inherit"],
env: env,
shell: true
}
);
tarProcess.on("close", (code) => {
fse.closeSync(outputTar);
console.log(tarProcess.status);
fse.removeSync(dockerContextDir);
});
}
function updateDockerFile(sourceDir, destDir) {
const tags = getVersions(argv.local, argv.version);
const repository = getRepository(argv.local, argv.repository);
const dockerFileContents = fse.readFileSync(
path.resolve(sourceDir, "Dockerfile"),
"utf-8"
);
const replacedDockerFileContents = dockerFileContents
// Add a repository if this is a magda image
.replace(
/FROM .*(magda-[^:\s]+)(:[^\s]+)/,
"FROM " + repository + "$1" + (tags[0] ? ":" + tags[0] : "$2")
);
fse.writeFileSync(
path.resolve(destDir, "Dockerfile"),
replacedDockerFileContents,
"utf-8"
);
}
function preparePackage(packageDir, destDir) {
const packageJson = require(path.join(packageDir, "package.json"));
const dockerIncludesFromPackageJson =
packageJson.config &&
packageJson.config.docker &&
packageJson.config.docker.include;
let dockerIncludes;
if (!dockerIncludesFromPackageJson) {
console.log(
`WARNING: Package ${packageDir} does not have a config.docker.include key in package.json, so all of its files will be included in the docker image.`
);
dockerIncludes = fse.readdirSync(packageDir);
} else if (dockerIncludesFromPackageJson.trim() === "*") {
dockerIncludes = fse.readdirSync(packageDir);
} else {
if (dockerIncludesFromPackageJson.indexOf("*") >= 0) {
throw new Error(
"Sorry, wildcards are not currently supported in config.docker.include."
);
}
dockerIncludes = dockerIncludesFromPackageJson
.split(" ")
.filter((include) => include.length > 0);
}
dockerIncludes
.filter((include) => include !== "Dockerfile") // Filter out the dockerfile because we'll manually copy over a modified version.
.forEach(function (include) {
const src = path.resolve(packageDir, include);
const dest = path.resolve(destDir, include);
if (include === "node_modules") {
fse.ensureDirSync(dest);
const env = Object.create(process.env);
env.NODE_ENV = "production";
const productionPackages = _.uniqBy(
getPackageList(packageDir, path.resolve(packageDir, "..")),
(package) => package.path
);
prepareNodeModules(src, dest, productionPackages);
return;
}
try {
// On Windows we can't create symlinks to files without special permissions.
// So just copy the file instead. Usually creating directory junctions is
// fine without special permissions, but fall back on copying in the unlikely
// event that fails, too.
const type = fse.statSync(src).isFile() ? "file" : "junction";
fse.ensureSymlinkSync(src, dest, type);
} catch (e) {
fse.copySync(src, dest);
}
});
}
function prepareNodeModules(packageDir, destDir, productionPackages) {
productionPackages.forEach((src) => {
const relativePath = path.relative(packageDir, src.path);
const dest = path.resolve(destDir, relativePath);
const srcPath = path.resolve(packageDir, relativePath);
// console.log("src " + srcPath + " to " + dest);
try {
const stat = fse.lstatSync(srcPath);
const type = stat.isFile() ? "file" : "junction";
fse.ensureSymlinkSync(srcPath, dest, type);
} catch (e) {
fse.copySync(srcPath, dest);
}
});
}
function getPackageList(packagePath, packageSearchRoot, resolvedSoFar = {}) {
const dependencies = getPackageDependencies(packagePath);
const result = [];
if (!dependencies || !dependencies.length) | {
return result;
} | conditional_block |
|
create-docker-context-for-node-component.js | Image) {
// Pull this image into the docker daemon - if it fails we don't care, we'll just go from scratch.
const dockerPullProcess = childProcess.spawnSync(
"docker",
[...extraParameters, "pull", cacheFromImage],
{
stdio: "inherit",
env: env
}
);
wrapConsoleOutput(dockerPullProcess);
}
const tarProcess = childProcess.spawn(
tar,
[...extraParameters, "--dereference", "-czf", "-", "*"],
{
cwd: dockerContextDir,
stdio: ["inherit", "pipe", "inherit"],
env: env,
shell: true
}
);
const tags = getTags(
argv.tag,
argv.local,
argv.repository,
argv.version,
argv.name
);
const tagArgs = tags
.map((tag) => ["-t", tag])
.reduce((soFar, tagArgs) => soFar.concat(tagArgs), []);
const cacheFromArgs = cacheFromImage
? ["--cache-from", cacheFromImage]
: [];
const dockerProcess = childProcess.spawn(
"docker",
[
...extraParameters,
...(argv.platform ? ["buildx"] : []),
"build",
...tagArgs,
...cacheFromArgs,
...(argv.noCache ? ["--no-cache"] : []),
...(argv.platform ? ["--platform", argv.platform, "--push"] : []),
"-f",
`./component/Dockerfile`,
"-"
],
{
stdio: ["pipe", "inherit", "inherit"],
env: env
}
);
wrapConsoleOutput(dockerProcess);
dockerProcess.on("close", (code) => {
fse.removeSync(dockerContextDir);
if (code === 0 && argv.push && !argv.platform) {
if (tags.length === 0) {
console.error("Can not push an image without a tag.");
process.exit(1);
}
// Stop if there's a code !== 0
tags.every((tag) => {
const process = childProcess.spawnSync(
"docker",
["push", tag],
{
stdio: "inherit"
}
);
code = process.status;
return code === 0;
});
}
process.exit(code);
});
tarProcess.on("close", (code) => {
dockerProcess.stdin.end();
});
tarProcess.stdout.on("data", (data) => {
dockerProcess.stdin.write(data);
});
} else if (argv.output) {
const outputPath = path.resolve(process.cwd(), argv.output);
const outputTar = fse.openSync(outputPath, "w", 0o644);
const tarProcess = childProcess.spawn(
tar,
["--dereference", "-czf", "-", "*"],
{
cwd: dockerContextDir,
stdio: ["inherit", outputTar, "inherit"],
env: env,
shell: true
}
);
tarProcess.on("close", (code) => {
fse.closeSync(outputTar);
console.log(tarProcess.status);
fse.removeSync(dockerContextDir);
});
}
function updateDockerFile(sourceDir, destDir) {
const tags = getVersions(argv.local, argv.version);
const repository = getRepository(argv.local, argv.repository);
const dockerFileContents = fse.readFileSync(
path.resolve(sourceDir, "Dockerfile"),
"utf-8"
);
const replacedDockerFileContents = dockerFileContents
// Add a repository if this is a magda image
.replace(
/FROM .*(magda-[^:\s]+)(:[^\s]+)/,
"FROM " + repository + "$1" + (tags[0] ? ":" + tags[0] : "$2")
);
fse.writeFileSync(
path.resolve(destDir, "Dockerfile"),
replacedDockerFileContents,
"utf-8"
);
}
function preparePackage(packageDir, destDir) {
const packageJson = require(path.join(packageDir, "package.json"));
const dockerIncludesFromPackageJson =
packageJson.config &&
packageJson.config.docker &&
packageJson.config.docker.include;
let dockerIncludes;
if (!dockerIncludesFromPackageJson) {
console.log(
`WARNING: Package ${packageDir} does not have a config.docker.include key in package.json, so all of its files will be included in the docker image.`
);
dockerIncludes = fse.readdirSync(packageDir);
} else if (dockerIncludesFromPackageJson.trim() === "*") {
dockerIncludes = fse.readdirSync(packageDir);
} else {
if (dockerIncludesFromPackageJson.indexOf("*") >= 0) {
throw new Error(
"Sorry, wildcards are not currently supported in config.docker.include."
);
}
dockerIncludes = dockerIncludesFromPackageJson
.split(" ")
.filter((include) => include.length > 0);
}
dockerIncludes
.filter((include) => include !== "Dockerfile") // Filter out the dockerfile because we'll manually copy over a modified version.
.forEach(function (include) {
const src = path.resolve(packageDir, include);
const dest = path.resolve(destDir, include);
if (include === "node_modules") {
fse.ensureDirSync(dest);
const env = Object.create(process.env);
env.NODE_ENV = "production";
const productionPackages = _.uniqBy(
getPackageList(packageDir, path.resolve(packageDir, "..")),
(package) => package.path
);
prepareNodeModules(src, dest, productionPackages);
return;
}
try {
// On Windows we can't create symlinks to files without special permissions.
// So just copy the file instead. Usually creating directory junctions is
// fine without special permissions, but fall back on copying in the unlikely
// event that fails, too.
const type = fse.statSync(src).isFile() ? "file" : "junction";
fse.ensureSymlinkSync(src, dest, type);
} catch (e) {
fse.copySync(src, dest);
}
});
}
function prepareNodeModules(packageDir, destDir, productionPackages) {
productionPackages.forEach((src) => {
const relativePath = path.relative(packageDir, src.path);
const dest = path.resolve(destDir, relativePath);
const srcPath = path.resolve(packageDir, relativePath);
// console.log("src " + srcPath + " to " + dest);
try {
const stat = fse.lstatSync(srcPath);
const type = stat.isFile() ? "file" : "junction";
fse.ensureSymlinkSync(srcPath, dest, type);
} catch (e) {
fse.copySync(srcPath, dest);
}
});
}
function getPackageList(packagePath, packageSearchRoot, resolvedSoFar = {}) {
const dependencies = getPackageDependencies(packagePath);
const result = [];
if (!dependencies || !dependencies.length) {
return result;
}
dependencies.forEach(function (dependencyName) {
const dependencyNamePath = dependencyName.replace(/\//g, path.sep);
let currentBaseDir = packagePath;
let dependencyDir;
do {
dependencyDir = path.resolve(
currentBaseDir,
"node_modules",
dependencyNamePath
);
if (
currentBaseDir === packageSearchRoot ||
isSubDir(currentBaseDir, packageSearchRoot)
) {
// --- will not look for packages outside project root directory
break;
}
// Does this directory exist? If not, imitate node's module resolution by walking
// up the directory tree.
currentBaseDir = path.resolve(currentBaseDir, "..");
} while (!fse.existsSync(dependencyDir));
if (!fse.existsSync(dependencyDir)) {
throw new Error(
"Could not find path for " +
dependencyName +
" @ " +
packagePath
);
}
// If we haven't already seen this
if (!resolvedSoFar[dependencyDir]) {
result.push({ name: dependencyName, path: dependencyDir });
// Now that we've added this package to the list to resolve, add all its children.
const childPackageResult = getPackageList(
dependencyDir,
packageSearchRoot,
{ ...resolvedSoFar, [dependencyDir]: true }
);
Array.prototype.push.apply(result, childPackageResult);
}
});
return result;
}
function getPackageDependencies(packagePath) {
const packageJsonPath = path.resolve(packagePath, "package.json");
if (packageDependencyDataCache[packageJsonPath]) {
return packageDependencyDataCache[packageJsonPath];
}
const pkgData = fse.readJSONSync(packageJsonPath);
const depData = pkgData["dependencies"];
if (!depData) {
packageDependencyDataCache[packageJsonPath] = [];
} else {
packageDependencyDataCache[packageJsonPath] = Object.keys(depData);
}
return packageDependencyDataCache[packageJsonPath];
}
function wrapConsoleOutput(process) | {
if (process.stdout) {
process.stdout.on("data", (data) => {
console.log(data.toString());
});
}
if (process.stderr) {
process.stderr.on("data", (data) => {
console.error(data.toString());
});
}
} | identifier_body |
|
create-docker-context-for-node-component.js | _NAME
},
version: {
description:
"The version(s) to use in auto tag generation. Will default to the current version in package.json. Requires --tag=auto",
type: "string",
array: true,
default: process.env.MAGDA_DOCKER_VERSION
},
output: {
description:
"The output path and filename for the Docker context .tar file.",
type: "string"
},
local: {
description:
"Build for a local Kubernetes container registry. This parameter is only used if --build is specified.",
type: "boolean",
default: false
},
push: {
description:
"Push the build image to the docker registry. This parameter is only used if --build is specified.",
type: "boolean",
default: false
},
platform: {
description:
"A list of platform that the docker image build should target. Specify this value will enable multi-arch image build.",
type: "string"
},
noCache: {
description: "Disable the cache during the docker image build.",
type: "boolean",
default: false
},
cacheFromVersion: {
description:
"Version to cache from when building, using the --cache-from field in docker. Will use the same repository and name. Using this options causes the image to be pulled before build.",
type: "string"
}
})
// Because 'version is a default yargs thing we need to specifically override its normal parsing.
.version(false)
.array("version")
.help().argv;
if (!argv.build && !argv.output) {
console.log("Either --build or --output <filename> must be specified.");
process.exit(1);
}
if (argv.platform && !argv.push) {
console.log(
"When --platform is specified, --push must be specified as well as multi-arch image can only be pushed to remote registry."
);
process.exit(1);
}
if (argv.noCache && argv.cacheFromVersion) {
console.log("When --noCache=true, --cacheFromVersion can't be specified.");
process.exit(1);
}
const componentSrcDir = path.resolve(process.cwd());
const dockerContextDir = fse.mkdtempSync(
path.resolve(__dirname, "..", "docker-context-")
);
const componentDestDir = path.resolve(dockerContextDir, "component");
fse.emptyDirSync(dockerContextDir);
fse.ensureDirSync(componentDestDir);
preparePackage(componentSrcDir, componentDestDir);
const tar = process.platform === "darwin" ? "gtar" : "tar";
// Docker and ConEmu (an otherwise excellent console for Windows) don't get along.
// See: https://github.com/Maximus5/ConEmu/issues/958 and https://github.com/moby/moby/issues/28814
// So if we're running under ConEmu, we need to add an extra -cur_console:i parameter to disable
// ConEmu's hooks and also set ConEmuANSI to OFF so Docker doesn't do anything drastic.
const env = Object.assign({}, process.env);
const extraParameters = [];
if (env.ConEmuANSI === "ON") {
env.ConEmuANSI = "OFF";
extraParameters.push("-cur_console:i");
}
updateDockerFile(componentSrcDir, componentDestDir);
if (argv.build) {
const cacheFromImage =
argv.cacheFromVersion &&
getRepository(argv.local, argv.repository) +
getName(argv.name) +
":" +
argv.cacheFromVersion;
if (cacheFromImage) {
// Pull this image into the docker daemon - if it fails we don't care, we'll just go from scratch.
const dockerPullProcess = childProcess.spawnSync(
"docker",
[...extraParameters, "pull", cacheFromImage],
{
stdio: "inherit",
env: env
}
);
wrapConsoleOutput(dockerPullProcess);
}
const tarProcess = childProcess.spawn(
tar,
[...extraParameters, "--dereference", "-czf", "-", "*"],
{
cwd: dockerContextDir,
stdio: ["inherit", "pipe", "inherit"],
env: env,
shell: true
}
);
const tags = getTags(
argv.tag,
argv.local,
argv.repository,
argv.version,
argv.name
);
const tagArgs = tags
.map((tag) => ["-t", tag])
.reduce((soFar, tagArgs) => soFar.concat(tagArgs), []);
const cacheFromArgs = cacheFromImage
? ["--cache-from", cacheFromImage]
: [];
const dockerProcess = childProcess.spawn(
"docker",
[
...extraParameters,
...(argv.platform ? ["buildx"] : []),
"build",
...tagArgs,
...cacheFromArgs,
...(argv.noCache ? ["--no-cache"] : []),
...(argv.platform ? ["--platform", argv.platform, "--push"] : []),
"-f",
`./component/Dockerfile`,
"-"
],
{
stdio: ["pipe", "inherit", "inherit"],
env: env
}
);
wrapConsoleOutput(dockerProcess);
dockerProcess.on("close", (code) => {
fse.removeSync(dockerContextDir);
if (code === 0 && argv.push && !argv.platform) {
if (tags.length === 0) {
console.error("Can not push an image without a tag.");
process.exit(1);
}
// Stop if there's a code !== 0
tags.every((tag) => {
const process = childProcess.spawnSync(
"docker",
["push", tag],
{
stdio: "inherit"
}
);
code = process.status;
return code === 0;
});
}
process.exit(code);
});
tarProcess.on("close", (code) => {
dockerProcess.stdin.end();
});
tarProcess.stdout.on("data", (data) => {
dockerProcess.stdin.write(data);
});
} else if (argv.output) {
const outputPath = path.resolve(process.cwd(), argv.output);
const outputTar = fse.openSync(outputPath, "w", 0o644);
const tarProcess = childProcess.spawn(
tar,
["--dereference", "-czf", "-", "*"],
{
cwd: dockerContextDir,
stdio: ["inherit", outputTar, "inherit"],
env: env,
shell: true
}
);
tarProcess.on("close", (code) => {
fse.closeSync(outputTar);
console.log(tarProcess.status);
fse.removeSync(dockerContextDir);
});
}
function updateDockerFile(sourceDir, destDir) {
const tags = getVersions(argv.local, argv.version);
const repository = getRepository(argv.local, argv.repository);
const dockerFileContents = fse.readFileSync(
path.resolve(sourceDir, "Dockerfile"),
"utf-8"
);
const replacedDockerFileContents = dockerFileContents
// Add a repository if this is a magda image
.replace(
/FROM .*(magda-[^:\s]+)(:[^\s]+)/,
"FROM " + repository + "$1" + (tags[0] ? ":" + tags[0] : "$2")
);
fse.writeFileSync(
path.resolve(destDir, "Dockerfile"),
replacedDockerFileContents,
"utf-8"
);
}
function preparePackage(packageDir, destDir) {
const packageJson = require(path.join(packageDir, "package.json"));
const dockerIncludesFromPackageJson =
packageJson.config &&
packageJson.config.docker &&
packageJson.config.docker.include;
let dockerIncludes;
if (!dockerIncludesFromPackageJson) {
console.log(
`WARNING: Package ${packageDir} does not have a config.docker.include key in package.json, so all of its files will be included in the docker image.`
);
dockerIncludes = fse.readdirSync(packageDir);
} else if (dockerIncludesFromPackageJson.trim() === "*") {
dockerIncludes = fse.readdirSync(packageDir);
} else {
if (dockerIncludesFromPackageJson.indexOf("*") >= 0) {
throw new Error(
"Sorry, wildcards are not currently supported in config.docker.include."
);
}
dockerIncludes = dockerIncludesFromPackageJson
.split(" ")
.filter((include) => include.length > 0);
}
dockerIncludes
.filter((include) => include !== "Dockerfile") // Filter out the dockerfile because we'll manually copy over a modified version.
.forEach(function (include) {
const src = path.resolve(packageDir, include);
const dest = path.resolve(destDir, include);
if (include === "node_modules") {
fse.ensureDirSync(dest);
const env = Object.create(process.env);
env.NODE_ENV = "production";
const productionPackages = _.uniqBy( |
return;
}
try {
// On Windows we can't create symlinks to files without special permissions.
// So just copy the file instead. Usually creating directory | getPackageList(packageDir, path.resolve(packageDir, "..")),
(package) => package.path
);
prepareNodeModules(src, dest, productionPackages); | random_line_split |
main.py | 0
time1 = 0
time2 = 0
dateGap = 0
def opencapture(count):
global img
global imgstring
cap = cv2.VideoCapture(-1)
while True:
ret, img = cap.read() # Read 결과와 frame
img = img[170:450, 40:600]
cv2.imshow("Battery_live", img)
cv2.waitKey(1)
if not ret:
break
print("Image " + str(count) + "saved")
file = '/home/pi/project/' + str(count) + '.jpg'
#file = '/home/pi/project/' + 'CaptureTest'+'.jpg'
cv2.imwrite(file, img)
img2 = cv2.imread(file, cv2.IMREAD_COLOR)
img3 = cv2.imread(file, cv2.IMREAD_GRAYSCALE)
_, threshold = cv2.threshold(img3, 150, 255, cv2.THRESH_BINARY)
contours, _ = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
p_list.clear()
for cnt in contours:
epsilon = 0.02 * cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, epsilon, True)
cv2.drawContours(img2, [approx], 0, (0, 0, 255), 1)
n = approx.ravel()
i = 0
for j in n:
if (i % 2 == 0):
x = n[i]
y = n[i + 1]
string = str(x) + ", " + str(y)
cv2.putText(img2, string, (x, y), font, 0.5, (0, 0, 255))
string_list = string.split(",")
string_list = list(map(int, string_list))
p_list.append(string_list)
i = i + 1
#time.sleep(0.5)
if len(p_list)==4:
cv2.imshow('Battery_(x, y)', img2)
cv2.imwrite(file, img2)
res = cv2.resize(img2,(762,461))
cv2.imwrite(file, res)
image = open(file,'rb')
image_read = image.read()
image_64_encode = base64.encodebytes(image_read)
imgstring = b'<sof>' + image_64_encode + b'<eof>'
return p_list
else:
p_list.clear()
continue
break
def calculate(count): #start를 calculate로 | global imgstring
global hStandard
global vStandard
global TotalunpassCount
global dateGap
Result_horizon, Result_vertical = Dot_Distance(p1[count][0], p1[count][1], p2[count][0], p2[count][1], p3[count][0], p3[count][1], p4[count][0], p4[count][1])
Unit_horizon.append(Result_horizon)
Unit_vertical.append(Result_vertical)
Result_hpass = Unit_Defect(hStandard, Result_horizon)
Result_vpass = Unit_Defect(vStandard, Result_vertical)
temperature = read_temp()
#temperature = '26.5'
if Result_hpass == 1 and Result_vpass == 1:
led_on(led2) #green
setServoPos1(10)
time.sleep(1)
setServoPos2(165)
time.sleep(1)
elif Result_hpass == 1 and Result_vpass == 0: #세로만 불량
led_on(led3) #red
setServoPos1(10)
time.sleep(1)
setServoPos2(125)
TotalunpassCount+=1
time.sleep(1)
elif Result_hpass == 0 and Result_vpass == 1: #가로만 불량
led_on(led3) #red
setServoPos1(45)
time.sleep(1)
setServoPos2(165)
TotalunpassCount+=1
time.sleep(1)
elif Result_hpass == 0 and Result_vpass == 0: #가로, 세로 둘다 불량
led_on(led3) #red
setServoPos1(155)
time.sleep(1)
setServoPos2(80)
TotalunpassCount+=1
time.sleep(1)
time.sleep(0.3)
#Result_pass
Unit_hpass.append(Result_hpass)
Unit_vpass.append(Result_vpass)
print('TOCUnit_no'+str(count+1)+","+'Unit_horizon'+str(Result_horizon)+","+'Unit_vertical'+str(Result_vertical)+","+'Unit_hpass'+str(Result_hpass)+","+'Unit_vpass'+str(Result_vpass)+","+'TEMP'+str(temperature)+","+'Unit_date'+str(Date)+","+'GO')
send_dp = 'TOCUnit_no'+str(count+1)+","+'Unit_horizon'+str(Result_horizon)+","+'Unit_vertical'+str(Result_vertical)+","+'Unit_hpass'+str(Result_hpass)+","+'Unit_vpass'+str(Result_vpass)+","+'TEMP'+str(temperature)+","+'Unit_date'+str(Date)+","+'GO'
s.send(send_dp.encode('UTF-8'))
time.sleep(0.5)
s.send(imgstring)
time.sleep(0.3)
if count == (lot-1):
#AQL_pass
AQL_hpass = AQL_Chart(Unit_hpass, Sample_Letter(lot))
AQL_vpass = AQL_Chart(Unit_vpass, Sample_Letter(lot))
#Deviation
HDeviation = Sigma(Unit_horizon)
VDeviation = Sigma(Unit_vertical)
#Mean
HMean = Avg(Unit_horizon)
VMean = Avg(Unit_vertical)
#Cp
HCp = PCA(HDeviation, hStandard)
VCp = PCA(VDeviation, vStandard)
Hadjust = adjust(Unit_horizon, hStandard, lot)
Vadjust = adjust(Unit_vertical, vStandard, lot)
hunpassCount, hDefectrate = CountRate(Unit_hpass, lot)
vunpassCount, vDefectrate = CountRate(Unit_vpass, lot)
TotalDefectrate = round(TotalunpassCount/lot * 100, 1)
time2 = time.time()
Gap = time2-time1
dateGap = convert_seconds_to_kor_time(Gap)
print('TOCAQL_hpass'+str(AQL_hpass)+","+'AQL_vpass'+str(AQL_vpass)+","+'Sigmah'+str(HDeviation)+","+'Sigmav'+str(VDeviation)+","+'Meanh'+str(HMean)+","+'Meanv'+str(VMean)+","+'Cph' + str(HCp)+","+'Cpv' + str(VCp)+ "," +'hunpassCount'+str(hunpassCount)+","+'vunpassCount'+str(vunpassCount)+","+'hDefectrate' + str(hDefectrate)+","+'vDefectrate' + str(vDefectrate)+","+'Hadjust' + str(Hadjust)+","+'Vadjust' + str(Vadjust)+","+'TotalunpassCount'+str(TotalunpassCount)+","+'TotalDefectrate' + str(TotalDefectrate)+","+'Date' + str(Date)+","+'dateGap' + str(dateGap)+","+'lot' + str(lot)+","+'Model'+str(Model))
send_Final = 'TOCAQL_hpass'+str(AQL_hpass)+","+'AQL_vpass'+str(AQL_vpass)+","+'Sigmah'+str(HDeviation)+","+'Sigmav'+str(VDeviation)+","+'Meanh'+str(HMean)+","+'Meanv'+str(VMean)+","+'Cph'+str(HCp)+","+'Cpv'+str(VCp)+"," +'hunpassCount'+str(hunpassCount)+","+'vunpassCount'+str(vunpassCount)+","+'hDefectrate'+str(hDefectrate)+","+'vDefectrate'+str(vDefectrate)+","+'Hadjust'+str(Hadjust)+","+'Vadjust'+str(Vadjust)+","+'TotalunpassCount'+str(TotalunpassCount)+","+'TotalDefectrate'+str(TotalDefectrate)+","+'Date' + str(Date)+","+'dateGap'+str(dateGap)+","+'lot'+str(lot)+","+'Model'+str(Model)
time.sleep(3)
s.send(send_Final.encode('UTF-8'))
return
if __name__ == "__main__":
s = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
s.connect( ( HOST, PORT ) )
print('Hi! I am client')
try:
while True:
if a == 0:
recv_msg = s.recv(1024).decode('UTF-8')
if recv_msg.startswith('TOR'):
recv_msg = recv_msg.replace('T | 바꿈
| identifier_name |
main.py | count = 0
Hadjust = 0
Vadjust = 0
### open_cv 스크립트 변수들
p_list = []
dot1=[]
dot2=[]
dot3=[]
dot4=[]
imgstring = ''
temperature = 0
### open_cv 스크립트 변수들 2020-11-03 옮김
hunpassCount = 0
vunpassCount = 0
a=0
TotalunpassCount = 0
time1 = 0
time2 = 0
dateGap = 0
def opencapture(count):
global img
global imgstring
cap = cv2.VideoCapture(-1)
while True:
ret, img = cap.read() # Read 결과와 frame
img = img[170:450, 40:600]
cv2.imshow("Battery_live", img)
cv2.waitKey(1)
if not ret:
break
print("Image " + str(count) + "saved")
file = '/home/pi/project/' + str(count) + '.jpg'
#file = '/home/pi/project/' + 'CaptureTest'+'.jpg'
cv2.imwrite(file, img)
img2 = cv2.imread(file, cv2.IMREAD_COLOR)
img3 = cv2.imread(file, cv2.IMREAD_GRAYSCALE)
_, threshold = cv2.threshold(img3, 150, 255, cv2.THRESH_BINARY)
contours, _ = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
p_list.clear()
for cnt in contours:
epsilon = 0.02 * cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, epsilon, True)
cv2.drawContours(img2, [approx], 0, (0, 0, 255), 1)
n = approx.ravel()
i = 0
for j in n:
if (i % 2 == 0):
x = n[i]
y = n[i + 1]
string = str(x) + ", " + str(y)
cv2.putText(img2, string, (x, y), font, 0.5, (0, 0, 255))
string_list = string.split(",")
string_list = list(map(int, string_list))
p_list.append(string_list)
i = i + 1
#time.sleep(0.5)
if len(p_list)==4:
cv2.imshow('Battery_(x, y)', img2)
cv2.imwrite(file, img2)
res = cv2.resize(img2,(762,461))
cv2.imwrite(file, res)
image = open(file,'rb')
image_read = image.read()
image_64_encode = base64.encodebytes(image_read)
imgstring = b'<sof>' + image_64_encode + b'<eof>'
return p_list
else:
p_list.clear()
continue
break
def calculate(count): #start를 calculate로 바꿈
global imgstring
global hStandard
global vStandard
global TotalunpassCount
global dateGap
Result_horizon, Result_vertical = Dot_Distance(p1[count][0], p1[count][1], p2[count][0], p2[count][1], p3[count][0], p3[count][1], p4[count][0], p4[count][1])
Unit_horizon.append(Result_horizon)
Unit_vertical.append(Result_vertical)
Result_hpass = Unit_Defect(hStandard, Result_horizon)
Result_vpass = Unit_Defect(vStandard, Result_vertical)
temperature = read_temp()
#temperature = '26.5'
if Result_hpass == 1 and Result_vpass == 1:
led_on(led2) #green
setServoPos1(10)
time.sleep(1)
setServoPos2(165)
time.sleep(1)
elif Result_hpass == 1 and Result_vpass == 0: #세로만 불량
led_on(led3) #red
setServoPos1(10)
time.sleep(1)
setServoPos2(125)
TotalunpassCount+=1
time.sleep(1)
elif Result_hpass == 0 and Result_vpass == 1: #가로만 불량
led_on(led3) #red
setServoPos1(45)
time.sleep(1)
setServoPos2(165)
TotalunpassCount+=1
time.sleep(1)
elif Result_hpass == 0 and Result_vpass == 0: #가로, 세로 둘다 불량
led_on(led3) #red
setServoPos1(155)
time.sleep(1)
setServoPos2(80)
TotalunpassCount+=1
time.sleep(1)
time.sleep(0.3)
#Result_pass
Unit_hpass.append(Result_hpass)
Unit_vpass.append(Result_vpass)
print('TOCUnit_no'+str(count+1)+","+'Unit_horizon'+str(Result_horizon)+","+'Unit_vertical'+str(Result_vertical)+","+'Unit_hpass'+str(Result_hpass)+","+'Unit_vpass'+str(Result_vpass)+","+'TEMP'+str(temperature)+","+'Unit_date'+str(Date)+","+'GO')
send_dp = 'TOCUnit_no'+str(count+1)+","+'Unit_horizon'+str(Result_horizon)+","+'Unit_vertical'+str(Result_vertical)+","+'Unit_hpass'+str(Result_hpass)+","+'Unit_vpass'+str(Result_vpass)+","+'TEMP'+str(temperature)+","+'Unit_date'+str(Date)+","+'GO'
s.send(send_dp.encode('UTF-8'))
time.sleep(0.5)
s.send(imgstring)
time.sleep(0.3)
if count == (lot-1):
#AQL_pass
AQL_hpass = AQL_Chart(Unit_hpass, Sample_Letter(lot))
AQL_vpass = AQL_Chart(Unit_vpass, Sample_Letter(lot))
#Deviation
HDeviation = Sigma(Unit_horizon)
VDeviation = Sigma(Unit_vertical)
#Mean
HMean = Avg(Unit_horizon)
VMean = Avg(Unit_vertical)
#Cp
HCp = PCA(HDeviation, hStandard)
VCp = PCA(VDeviation, vStandard)
Hadjust = adjust(Unit_horizon, hStandard, lot)
Vadjust = adjust(Unit_vertical, vStandard, lot)
hunpassCount, hDefectrate = CountRate(Unit_hpass, lot)
vunpassCount, vDefectrate = CountRate(Unit_vpass, lot)
TotalDefectrate = round(TotalunpassCount/lot * 100, 1)
time2 = time.time()
Gap = time2-time1
dateGap = convert_seconds_to_kor_time(Gap)
print('TOCAQL_hpass'+str(AQL_hpass)+","+'AQL_vpass'+str(AQL_vpass)+","+'Sigmah'+str(HDeviation)+","+'Sigmav'+str(VDeviation)+","+'Meanh'+str(HMean)+","+'Meanv'+str(VMean)+","+'Cph' + str(HCp)+","+'Cpv' + str(VCp)+ "," +'hunpassCount'+str(hunpassCount)+","+'vunpassCount'+str(vunpassCount)+","+'hDefectrate' + str(hDefectrate)+","+'vDefectrate' + str(vDefectrate)+","+'Hadjust' + str(Hadjust)+","+'Vadjust' + str(Vadjust)+","+'TotalunpassCount'+str(TotalunpassCount)+","+'TotalDefectrate' + str(TotalDefectrate)+","+'Date' + str(Date)+","+'dateGap' + str(dateGap)+","+'lot' + str(lot)+","+'Model'+str(Model))
send_Final = 'TOCAQL_hpass'+str(AQL_hpass)+","+'AQL_vpass'+str(AQL_vpass)+","+'Sigmah'+str(HDeviation)+","+'Sigmav'+str(VDeviation)+","+'Meanh'+str(HMean)+","+'Meanv'+str(VMean)+","+'Cph'+str(HCp)+","+'Cpv'+str(VCp)+"," +'hunpassCount'+str(hunpassCount)+","+'vunpassCount'+str(vunpassCount)+","+'hDefectrate'+str(hDefectrate)+","+'vDefectrate'+str(vDefectrate)+","+'Hadjust'+str(Hadjust)+","+'Vadjust'+str(Vadjust)+","+'TotalunpassCount'+str(TotalunpassCount)+","+'TotalDefectrate'+str(TotalDefectrate)+","+'Date' + str(Date)+","+'dateGap'+str(dateGap)+","+'lot | hStandard = 0.0
vStandard = 0.0
| random_line_split |
|
main.py |
time1 = 0
time2 = 0
dateGap = 0
def opencapture(count):
global img
global imgstring
cap = cv2.VideoCapture(-1)
while True:
ret, img = cap.read() # Read 결과와 frame
img = img[170:450, 40:600]
cv2.imshow("Battery_live", img)
cv2.waitKey(1)
if not ret:
break
print("Image " + str(count) + "saved")
file = '/home/pi/project/' + str(count) + '.jpg'
#file = '/home/pi/project/' + 'CaptureTest'+'.jpg'
cv2.imwrite(file, img)
img2 = cv2.imread(file, cv2.IMREAD_COLOR)
img3 = cv2.imread(file, cv2.IMREAD_GRAYSCALE)
_, threshold = cv2.threshold(img3, 150, 255, cv2.THRESH_BINARY)
contours, _ = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
p_list.clear()
for cnt in contours:
epsilon = 0.02 * cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, epsilon, True)
cv2.drawContours(img2, [approx], 0, (0, 0, 255), 1)
n = approx.ravel()
i = 0
for j in n:
if (i % 2 == 0):
x = n[i]
y = n[i + 1]
string = str(x) + ", " + str(y)
cv2.putText(img2, string, (x, y), font, 0.5, (0, 0, 255))
string_list = string.split(",")
string_list = list(map(int, string_list))
p_list.append(string_list)
i = i + 1
#time.sleep(0.5)
if len(p_list)==4:
cv2.imshow('Battery_(x, y)', img2)
cv2.imwrite(file, img2)
res = cv2.resize(img2,(762,461))
cv2.imwrite(file, res)
image = open(file,'rb')
image_read = image.read()
image_64_encode = base64.encodebytes(image_read)
imgstring = b'<sof>' + image_64_encode + b'<eof>'
return p_list
else:
p_list.clear()
continue
break
def calculate(count): #start를 calculate로 바꿈
global imgstring
global hStandard
gl | time.sleep(1)
setServoPos2(125)
TotalunpassCount+=1
time.sleep(1)
elif Result_hpass == 0 and Result_vpass == 1: #가로만 불량
led_on(led3) #red
setServoPos1(45)
time.sleep(1)
setServoPos2(165)
TotalunpassCount+=1
time.sleep(1)
elif Result_hpass == 0 and Result_vpass == 0: #가로, 세로 둘다 불량
led_on(led3) #red
setServoPos1(155)
time.sleep(1)
setServoPos2(80)
TotalunpassCount+=1
time.sleep(1)
time.sleep(0.3)
#Result_pass
Unit_hpass.append(Result_hpass)
Unit_vpass.append(Result_vpass)
print('TOCUnit_no'+str(count+1)+","+'Unit_horizon'+str(Result_horizon)+","+'Unit_vertical'+str(Result_vertical)+","+'Unit_hpass'+str(Result_hpass)+","+'Unit_vpass'+str(Result_vpass)+","+'TEMP'+str(temperature)+","+'Unit_date'+str(Date)+","+'GO')
send_dp = 'TOCUnit_no'+str(count+1)+","+'Unit_horizon'+str(Result_horizon)+","+'Unit_vertical'+str(Result_vertical)+","+'Unit_hpass'+str(Result_hpass)+","+'Unit_vpass'+str(Result_vpass)+","+'TEMP'+str(temperature)+","+'Unit_date'+str(Date)+","+'GO'
s.send(send_dp.encode('UTF-8'))
time.sleep(0.5)
s.send(imgstring)
time.sleep(0.3)
if count == (lot-1):
#AQL_pass
AQL_hpass = AQL_Chart(Unit_hpass, Sample_Letter(lot))
AQL_vpass = AQL_Chart(Unit_vpass, Sample_Letter(lot))
#Deviation
HDeviation = Sigma(Unit_horizon)
VDeviation = Sigma(Unit_vertical)
#Mean
HMean = Avg(Unit_horizon)
VMean = Avg(Unit_vertical)
#Cp
HCp = PCA(HDeviation, hStandard)
VCp = PCA(VDeviation, vStandard)
Hadjust = adjust(Unit_horizon, hStandard, lot)
Vadjust = adjust(Unit_vertical, vStandard, lot)
hunpassCount, hDefectrate = CountRate(Unit_hpass, lot)
vunpassCount, vDefectrate = CountRate(Unit_vpass, lot)
TotalDefectrate = round(TotalunpassCount/lot * 100, 1)
time2 = time.time()
Gap = time2-time1
dateGap = convert_seconds_to_kor_time(Gap)
print('TOCAQL_hpass'+str(AQL_hpass)+","+'AQL_vpass'+str(AQL_vpass)+","+'Sigmah'+str(HDeviation)+","+'Sigmav'+str(VDeviation)+","+'Meanh'+str(HMean)+","+'Meanv'+str(VMean)+","+'Cph' + str(HCp)+","+'Cpv' + str(VCp)+ "," +'hunpassCount'+str(hunpassCount)+","+'vunpassCount'+str(vunpassCount)+","+'hDefectrate' + str(hDefectrate)+","+'vDefectrate' + str(vDefectrate)+","+'Hadjust' + str(Hadjust)+","+'Vadjust' + str(Vadjust)+","+'TotalunpassCount'+str(TotalunpassCount)+","+'TotalDefectrate' + str(TotalDefectrate)+","+'Date' + str(Date)+","+'dateGap' + str(dateGap)+","+'lot' + str(lot)+","+'Model'+str(Model))
send_Final = 'TOCAQL_hpass'+str(AQL_hpass)+","+'AQL_vpass'+str(AQL_vpass)+","+'Sigmah'+str(HDeviation)+","+'Sigmav'+str(VDeviation)+","+'Meanh'+str(HMean)+","+'Meanv'+str(VMean)+","+'Cph'+str(HCp)+","+'Cpv'+str(VCp)+"," +'hunpassCount'+str(hunpassCount)+","+'vunpassCount'+str(vunpassCount)+","+'hDefectrate'+str(hDefectrate)+","+'vDefectrate'+str(vDefectrate)+","+'Hadjust'+str(Hadjust)+","+'Vadjust'+str(Vadjust)+","+'TotalunpassCount'+str(TotalunpassCount)+","+'TotalDefectrate'+str(TotalDefectrate)+","+'Date' + str(Date)+","+'dateGap'+str(dateGap)+","+'lot'+str(lot)+","+'Model'+str(Model)
time.sleep(3)
s.send(send_Final.encode('UTF-8'))
return
if __name__ == "__main__":
s = socket.socket( socket.AF_INET, socket.SO
CK_STREAM )
s.connect( ( HOST, PORT ) )
print('Hi! I am client')
try:
while True:
if a == 0:
recv_msg = s.recv(1024).decode('UTF-8')
if recv_msg.startswith('TOR'):
recv_msg = recv | obal vStandard
global TotalunpassCount
global dateGap
Result_horizon, Result_vertical = Dot_Distance(p1[count][0], p1[count][1], p2[count][0], p2[count][1], p3[count][0], p3[count][1], p4[count][0], p4[count][1])
Unit_horizon.append(Result_horizon)
Unit_vertical.append(Result_vertical)
Result_hpass = Unit_Defect(hStandard, Result_horizon)
Result_vpass = Unit_Defect(vStandard, Result_vertical)
temperature = read_temp()
#temperature = '26.5'
if Result_hpass == 1 and Result_vpass == 1:
led_on(led2) #green
setServoPos1(10)
time.sleep(1)
setServoPos2(165)
time.sleep(1)
elif Result_hpass == 1 and Result_vpass == 0: #세로만 불량
led_on(led3) #red
setServoPos1(10)
| identifier_body |
main.py | = str(x) + ", " + str(y)
cv2.putText(img2, string, (x, y), font, 0.5, (0, 0, 255))
string_list = string.split(",")
string_list = list(map(int, string_list))
p_list.append(string_list)
i = i + 1
#time.sleep(0.5)
if len(p_list)==4:
cv2.imshow('Battery_(x, y)', img2)
cv2.imwrite(file, img2)
res = cv2.resize(img2,(762,461))
cv2.imwrite(file, res)
image = open(file,'rb')
image_read = image.read()
image_64_encode = base64.encodebytes(image_read)
imgstring = b'<sof>' + image_64_encode + b'<eof>'
return p_list
else:
p_list.clear()
continue
break
def calculate(count): #start를 calculate로 바꿈
global imgstring
global hStandard
global vStandard
global TotalunpassCount
global dateGap
Result_horizon, Result_vertical = Dot_Distance(p1[count][0], p1[count][1], p2[count][0], p2[count][1], p3[count][0], p3[count][1], p4[count][0], p4[count][1])
Unit_horizon.append(Result_horizon)
Unit_vertical.append(Result_vertical)
Result_hpass = Unit_Defect(hStandard, Result_horizon)
Result_vpass = Unit_Defect(vStandard, Result_vertical)
temperature = read_temp()
#temperature = '26.5'
if Result_hpass == 1 and Result_vpass == 1:
led_on(led2) #green
setServoPos1(10)
time.sleep(1)
setServoPos2(165)
time.sleep(1)
elif Result_hpass == 1 and Result_vpass == 0: #세로만 불량
led_on(led3) #red
setServoPos1(10)
time.sleep(1)
setServoPos2(125)
TotalunpassCount+=1
time.sleep(1)
elif Result_hpass == 0 and Result_vpass == 1: #가로만 불량
led_on(led3) #red
setServoPos1(45)
time.sleep(1)
setServoPos2(165)
TotalunpassCount+=1
time.sleep(1)
elif Result_hpass == 0 and Result_vpass == 0: #가로, 세로 둘다 불량
led_on(led3) #red
setServoPos1(155)
time.sleep(1)
setServoPos2(80)
TotalunpassCount+=1
time.sleep(1)
time.sleep(0.3)
#Result_pass
Unit_hpass.append(Result_hpass)
Unit_vpass.append(Result_vpass)
print('TOCUnit_no'+str(count+1)+","+'Unit_horizon'+str(Result_horizon)+","+'Unit_vertical'+str(Result_vertical)+","+'Unit_hpass'+str(Result_hpass)+","+'Unit_vpass'+str(Result_vpass)+","+'TEMP'+str(temperature)+","+'Unit_date'+str(Date)+","+'GO')
send_dp = 'TOCUnit_no'+str(count+1)+","+'Unit_horizon'+str(Result_horizon)+","+'Unit_vertical'+str(Result_vertical)+","+'Unit_hpass'+str(Result_hpass)+","+'Unit_vpass'+str(Result_vpass)+","+'TEMP'+str(temperature)+","+'Unit_date'+str(Date)+","+'GO'
s.send(send_dp.encode('UTF-8'))
time.sleep(0.5)
s.send(imgstring)
time.sleep(0.3)
if count == (lot-1):
#AQL_pass
AQL_hpass = AQL_Chart(Unit_hpass, Sample_Letter(lot))
AQL_vpass = AQL_Chart(Unit_vpass, Sample_Letter(lot))
#Deviation
HDeviation = Sigma(Unit_horizon)
VDeviation = Sigma(Unit_vertical)
#Mean
HMean = Avg(Unit_horizon)
VMean = Avg(Unit_vertical)
#Cp
HCp = PCA(HDeviation, hStandard)
VCp = PCA(VDeviation, vStandard)
Hadjust = adjust(Unit_horizon, hStandard, lot)
Vadjust = adjust(Unit_vertical, vStandard, lot)
hunpassCount, hDefectrate = CountRate(Unit_hpass, lot)
vunpassCount, vDefectrate = CountRate(Unit_vpass, lot)
TotalDefectrate = round(TotalunpassCount/lot * 100, 1)
time2 = time.time()
Gap = time2-time1
dateGap = convert_seconds_to_kor_time(Gap)
print('TOCAQL_hpass'+str(AQL_hpass)+","+'AQL_vpass'+str(AQL_vpass)+","+'Sigmah'+str(HDeviation)+","+'Sigmav'+str(VDeviation)+","+'Meanh'+str(HMean)+","+'Meanv'+str(VMean)+","+'Cph' + str(HCp)+","+'Cpv' + str(VCp)+ "," +'hunpassCount'+str(hunpassCount)+","+'vunpassCount'+str(vunpassCount)+","+'hDefectrate' + str(hDefectrate)+","+'vDefectrate' + str(vDefectrate)+","+'Hadjust' + str(Hadjust)+","+'Vadjust' + str(Vadjust)+","+'TotalunpassCount'+str(TotalunpassCount)+","+'TotalDefectrate' + str(TotalDefectrate)+","+'Date' + str(Date)+","+'dateGap' + str(dateGap)+","+'lot' + str(lot)+","+'Model'+str(Model))
send_Final = 'TOCAQL_hpass'+str(AQL_hpass)+","+'AQL_vpass'+str(AQL_vpass)+","+'Sigmah'+str(HDeviation)+","+'Sigmav'+str(VDeviation)+","+'Meanh'+str(HMean)+","+'Meanv'+str(VMean)+","+'Cph'+str(HCp)+","+'Cpv'+str(VCp)+"," +'hunpassCount'+str(hunpassCount)+","+'vunpassCount'+str(vunpassCount)+","+'hDefectrate'+str(hDefectrate)+","+'vDefectrate'+str(vDefectrate)+","+'Hadjust'+str(Hadjust)+","+'Vadjust'+str(Vadjust)+","+'TotalunpassCount'+str(TotalunpassCount)+","+'TotalDefectrate'+str(TotalDefectrate)+","+'Date' + str(Date)+","+'dateGap'+str(dateGap)+","+'lot'+str(lot)+","+'Model'+str(Model)
time.sleep(3)
s.send(send_Final.encode('UTF-8'))
return
if __name__ == "__main__":
s = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
s.connect( ( HOST, PORT ) )
print('Hi! I am client')
try:
while True:
if a == 0:
recv_msg = s.recv(1024).decode('UTF-8')
if recv_msg.startswith('TOR'):
recv_msg = recv_msg.replace('TORDatesStand,','').split(',')
print(recv_msg)
Model = str(recv_msg[0])
print(Model)
lot = int(recv_msg[1])
print(lot)
hStandard = float(recv_msg[2])
print(hStandard)
vStandard = float(recv_msg[3])
print(vStandard)
Date = int(recv_msg[4])
print(Date)
a=1
recv_msg = ''
elif a == 1:
time1 = time.time()
setServoPos1(90)
time.sleep(1)
setServoPos2(80)
time.sleep(1)
while True:
main_conveyor_On(con1_port)
main_conveyor_On(con2_port)
time.sleep(2)
while True:
d1 = Sonar(sig1)
if d1 <= 4.5:
main_conveyor_Off(con1_port)
break
else:
continue
led_on(led1)
led_off(led2)
led_off(led3)
dot1, dot2, dot3, dot4 = opencapture(count)
a, b, c, d = dot1234(dot1, dot2, dot3, dot4)
temp = [dot1, dot2, dot3, dot4]
for i in range(4):
if i == a:
p1.append(temp[i])
| elif i == b:
p2.append(temp[i])
elif i == c:
p3.append(temp[i])
elif i == d:
p4.append(temp[i])
calculate(count)
led_off(led1)
| conditional_block |
|
client.go | , error) {
uri := *host + ":" + *port + "/images"
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
part, err := writer.CreateFormFile(paramName, filepath.Base(path))
if err != nil {
return nil, err
}
_, err = io.Copy(part, file)
err = writer.Close()
if err != nil {
return nil, err
}
request, err := http.NewRequest("POST", uri, body)
if err != nil {
return nil, err
}
request.Header.Add("Content-Type", writer.FormDataContentType())
for key, val := range params {
fmt.Println("key = ", key, " val = ", val)
request.Header.Add(key, val)
}
return request, nil
}
func applyPush(image string) error {
defer track(time.Now(), "Image Push")
reg, err := regexp.Compile("[^A-Za-z0-9]+")
if err != nil {
return err
}
loc := *location
if _, err := os.Stat(loc); os.IsNotExist(err) {
os.Mkdir(loc, 0644)
}
endpoint := "unix:///var/run/docker.sock"
client, _ := docker.NewClient(endpoint)
imgs, _ := client.ListImages(docker.ListImagesOptions{All: false})
found := false
imageId := ""
filePath := ""
created := ""
for _, img := range imgs {
tags := img.RepoTags
for _, tag := range tags {
if tag == image {
found = true
imageId = img.ID
created = strconv.FormatInt(img.Created, 10)
fmt.Println("Found image: ", image)
fmt.Println("ID: ", img.ID)
fmt.Println("RepoTags: ", img.RepoTags)
fmt.Println("Created: ", img.Created)
fmt.Println("Size: ", img.Size)
fmt.Println("VirtualSize: ", img.VirtualSize)
fmt.Println("ParentId: ", img.ParentID)
safeImageName := reg.ReplaceAllString(image, "_")
s := []string{loc, "/", imageId, "_", safeImageName, ".tar"}
filePath = strings.Join(s, "")
break
}
}
}
if !found {
return errors.New("Sorry the image could not be found.")
}
//os.Remove("temp.json")
//Run export command
//command invocation
//run docker command save to tar ball in location
fmt.Println("Exporting image to tarball...")
cmd := fmt.Sprintf("sudo docker save %s > %s", image, filePath)
_, err1 := exec.Command("sh", "-c", cmd).Output()
if err1 != nil {
return err1
}
fmt.Println("Successively exported tarball...")
//make post request with contents of tarball to docket registry
imageParams := map[string]string{
"image": image,
"id": imageId,
"created": created,
//"layers": layer,
}
//Adapted from http://matt.aimonetti.net/posts/2013/07/01/golang-multipart-file-upload-example/ (C) Matt Aimonetti
request, err := uploadFile(imageParams, "file", filePath)
if err != nil {
log.Fatal(err)
}
uploadClient := &http.Client{}
resp, err := uploadClient.Do(request)
if err != nil {
log.Fatal(err)
} else {
body := &bytes.Buffer{}
_, err := body.ReadFrom(resp.Body)
if err != nil {
log.Fatal(err)
}
resp.Body.Close()
if resp.StatusCode != 200 {
return errors.New("Failed to push image...")
}
}
fmt.Println("Successfully uploaded image: ", image, " to the Docket registry.")
os.Remove(filePath)
return nil
}
//Adapted from https://github.com/thbar/golang-playground/blob/master/download-files.go
func downloadFromUrl(url string, fileName string) (err error) | if err != nil {
fmt.Println("\nError while downloading", url, "-", err)
return err
}
//fmt.Println(n, "bytes downloaded.")
//Hack: trivial check to ensure if file downloaded is not too small
if n < 100 {
return errors.New("Failed to pull image...")
}
return nil
}
func applyPull(image string) error {
defer track(time.Now(), "Image Pull")
reg, err := regexp.Compile("[^A-Za-z0-9]+")
if err != nil {
return err
}
loc := *location
if _, err := os.Stat(loc); os.IsNotExist(err) {
os.Mkdir(loc, 0644)
}
safeImageName := reg.ReplaceAllString(image, "_")
filePath := loc + "/"
fileName := filePath + safeImageName + ".torrent"
fmt.Print("\n\n@@@@ Pulling image ---> ", safeImageName, " @@@@\n\nDownloading meta data file\n")
//Download torrent file
queryParam := map[string]string{
"image": image,
}
queryParamJson, _ := json.Marshal(queryParam)
metaUrl := *host + ":" + *port + "/images?q=" + url.QueryEscape(string(queryParamJson))
response, err3 := http.Get(metaUrl)
if err3 != nil {
fmt.Println("Failed to query image metadata endpoint")
return err3
}
if response.StatusCode != 200 {
fmt.Println("Failed to get image metadata")
return errors.New("Failed to get images metadata...")
}
defer response.Body.Close()
metaJson, err4 := ioutil.ReadAll(response.Body)
if err4 != nil {
fmt.Println("Failed to get image metadata json")
return errors.New("Failed to get image metadata json")
}
fmt.Println("\nSuccessfully Fetched meta data file")
var queryObj map[string]interface{}
if err := json.Unmarshal([]byte(metaJson), &queryObj); err != nil {
return errors.New("Failed to decode images metadata json...")
}
tarballNameInterface := queryObj["fileName"]
tarballName := tarballNameInterface.(string)
layers := (queryObj["layers"]).(string)
result := strings.Split(tarballName, "_")
locToImg := loc+"/" + result[0] + "/"
if _, errI := os.Stat(locToImg); os.IsNotExist(errI) {
os.Mkdir(locToImg, 0644)
}
jsonFile := (queryObj["jsonFile"]).(string)
d1 := []byte(jsonFile)
jsonFileName := locToImg+strings.Split(result[0], ":")[1]+".json"
err2 := ioutil.WriteFile(jsonFileName, d1, 0644)
if err2 != nil {
fmt.Println("Error copying json file")
return errors.New("Error copying json file")
}
metadata := (queryObj["metadata"]).(string)
d2 := []byte(metadata)
errM := ioutil.WriteFile(locToImg+"manifest.json", d2, 0644)
if errM != nil {
fmt.Println("Error copying manifest file")
return errors.New("Error copying manifest file")
}
repository := (queryObj["repository"]).(string)
d3 := []byte(repository)
errR := ioutil.WriteFile(locToImg+"repositories", d3, 0644)
if errR != nil {
fmt.Println("Error copying repository file")
return errors.New("Error copying repository file")
}
layerList := strings.Split(layers, ",")
flag = false
layerMap := (queryObj["layerMap"]).(string)
layersShaList :=strings.Split(layerMap,",")
layerShaMap := make(map[string]string)
nonExistingList := make([]string,0)
for i := 0; i < len(layersShaList); i++ {
layersArray :=strings.Split(layersShaList[i],":")
layerShaMap[layersArray[0]] = layersArray[1]
}
var wg sync.WaitGroup
fmt.Print("\n\nFinding the missing layers in the image\n")
for i := 0; i < len(layerList); i++ {
layerVal := layerList[i]
grepCmd := "sudo find /var/lib/docker/image -name " + layerShaMap[layerVal]
cmd := exec.Command("sh", "-c", grepCmd)
grepOutput, err1 := cmd.CombinedOutput()
if err1 != nil {
fmt.Printf("ERROR GREPING: %s", err1)
}
if len(grepOutput) <= 0 {
fmt.Print("\nlayer -> ", layerList[i], " not present")
nonExistingList = append(nonExistingList, layerList[i])
}
}
//if len | {
output, err := os.Create(fileName)
if err != nil {
fmt.Println("\nError while creating", fileName, "-", err)
return err
}
defer output.Close()
response, err := http.Get(url)
if err != nil {
fmt.Println("\nError while downloading", url, "-", err)
return err
}
if response.StatusCode != 200 {
fmt.Println("\nFailed to pull image")
return errors.New("Failed to pull image...")
}
defer response.Body.Close()
n, err := io.Copy(output, response.Body) | identifier_body |
client.go | != nil {
log.Fatal(err)
}
resp.Body.Close()
if resp.StatusCode != 200 {
return errors.New("Failed to push image...")
}
}
fmt.Println("Successfully uploaded image: ", image, " to the Docket registry.")
os.Remove(filePath)
return nil
}
//Adapted from https://github.com/thbar/golang-playground/blob/master/download-files.go
func downloadFromUrl(url string, fileName string) (err error) {
output, err := os.Create(fileName)
if err != nil {
fmt.Println("\nError while creating", fileName, "-", err)
return err
}
defer output.Close()
response, err := http.Get(url)
if err != nil {
fmt.Println("\nError while downloading", url, "-", err)
return err
}
if response.StatusCode != 200 {
fmt.Println("\nFailed to pull image")
return errors.New("Failed to pull image...")
}
defer response.Body.Close()
n, err := io.Copy(output, response.Body)
if err != nil {
fmt.Println("\nError while downloading", url, "-", err)
return err
}
//fmt.Println(n, "bytes downloaded.")
//Hack: trivial check to ensure if file downloaded is not too small
if n < 100 {
return errors.New("Failed to pull image...")
}
return nil
}
func applyPull(image string) error {
defer track(time.Now(), "Image Pull")
reg, err := regexp.Compile("[^A-Za-z0-9]+")
if err != nil {
return err
}
loc := *location
if _, err := os.Stat(loc); os.IsNotExist(err) {
os.Mkdir(loc, 0644)
}
safeImageName := reg.ReplaceAllString(image, "_")
filePath := loc + "/"
fileName := filePath + safeImageName + ".torrent"
fmt.Print("\n\n@@@@ Pulling image ---> ", safeImageName, " @@@@\n\nDownloading meta data file\n")
//Download torrent file
queryParam := map[string]string{
"image": image,
}
queryParamJson, _ := json.Marshal(queryParam)
metaUrl := *host + ":" + *port + "/images?q=" + url.QueryEscape(string(queryParamJson))
response, err3 := http.Get(metaUrl)
if err3 != nil {
fmt.Println("Failed to query image metadata endpoint")
return err3
}
if response.StatusCode != 200 {
fmt.Println("Failed to get image metadata")
return errors.New("Failed to get images metadata...")
}
defer response.Body.Close()
metaJson, err4 := ioutil.ReadAll(response.Body)
if err4 != nil {
fmt.Println("Failed to get image metadata json")
return errors.New("Failed to get image metadata json")
}
fmt.Println("\nSuccessfully Fetched meta data file")
var queryObj map[string]interface{}
if err := json.Unmarshal([]byte(metaJson), &queryObj); err != nil {
return errors.New("Failed to decode images metadata json...")
}
tarballNameInterface := queryObj["fileName"]
tarballName := tarballNameInterface.(string)
layers := (queryObj["layers"]).(string)
result := strings.Split(tarballName, "_")
locToImg := loc+"/" + result[0] + "/"
if _, errI := os.Stat(locToImg); os.IsNotExist(errI) {
os.Mkdir(locToImg, 0644)
}
jsonFile := (queryObj["jsonFile"]).(string)
d1 := []byte(jsonFile)
jsonFileName := locToImg+strings.Split(result[0], ":")[1]+".json"
err2 := ioutil.WriteFile(jsonFileName, d1, 0644)
if err2 != nil {
fmt.Println("Error copying json file")
return errors.New("Error copying json file")
}
metadata := (queryObj["metadata"]).(string)
d2 := []byte(metadata)
errM := ioutil.WriteFile(locToImg+"manifest.json", d2, 0644)
if errM != nil {
fmt.Println("Error copying manifest file")
return errors.New("Error copying manifest file")
}
repository := (queryObj["repository"]).(string)
d3 := []byte(repository)
errR := ioutil.WriteFile(locToImg+"repositories", d3, 0644)
if errR != nil {
fmt.Println("Error copying repository file")
return errors.New("Error copying repository file")
}
layerList := strings.Split(layers, ",")
flag = false
layerMap := (queryObj["layerMap"]).(string)
layersShaList :=strings.Split(layerMap,",")
layerShaMap := make(map[string]string)
nonExistingList := make([]string,0)
for i := 0; i < len(layersShaList); i++ {
layersArray :=strings.Split(layersShaList[i],":")
layerShaMap[layersArray[0]] = layersArray[1]
}
var wg sync.WaitGroup
fmt.Print("\n\nFinding the missing layers in the image\n")
for i := 0; i < len(layerList); i++ {
layerVal := layerList[i]
grepCmd := "sudo find /var/lib/docker/image -name " + layerShaMap[layerVal]
cmd := exec.Command("sh", "-c", grepCmd)
grepOutput, err1 := cmd.CombinedOutput()
if err1 != nil {
fmt.Printf("ERROR GREPING: %s", err1)
}
if len(grepOutput) <= 0 {
fmt.Print("\nlayer -> ", layerList[i], " not present")
nonExistingList = append(nonExistingList, layerList[i])
}
}
//if len(nonExistingList) == 0{
// fmt.Print("\n\nImage already loaded at machine\n\n")
// return nil
//}
fmt.Print("\n\nDownloading torrent file for each missing layer in the image\n")
for i := 0; i < len(layerList); i++ {
wg.Add(1)
fmt.Print("\nlayer -> ", layerList[i])
go func(layerList []string,i int){
queryPrm := map[string]string{
"image": layerList[i],
}
queryPrmJson, _ := json.Marshal(queryPrm)
url := *host + ":" + *port + "/torrents?q=" + url.QueryEscape(string(queryPrmJson))
err1 := downloadFromUrl(url, filePath+layerList[i]+".tar.torrent")
if err1 != nil {
flag = true
fmt.Println("\n\nTorrent file missing at registry for layer--> ", layerList[i],". Will be downloading the entire image")
}
defer wg.Done()
}(layerList, i)
}
wg.Wait()
if flag == true{
fmt.Println("\n\nDownloading the torrent file for original image: ", safeImageName)
url := *host + ":" + *port + "/torrents?q=" + url.QueryEscape(string(queryParamJson))
err1 := downloadFromUrl(url, fileName)
if err1 != nil {
fmt.Println("Failed to pull image")
return err
}
fmt.Println("\nDownloading the entire image from registry: ", safeImageName)
ctorrentCmd := fmt.Sprintf("cd %s && sudo ctorrent -e 0 %s", filePath, fileName)
cmd := exec.Command("bash", "-c", ctorrentCmd)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Run()
//TODO:Replace filename with that from metadata
tarballPath := filePath + tarballName
//Load the downloaded tarball
fmt.Println("\n\nLoading Image...")
importCmd := fmt.Sprintf("sudo docker load -i %s", tarballPath)
_, err2 := exec.Command("sh", "-c", importCmd).Output()
if err2 != nil {
fmt.Printf("Failed to load image into docker!")
return err2
}
fmt.Printf("\nSuccessfively pulled image: ", safeImageName,"\n\n")
return nil
} else{
flag = true
var wg1 sync.WaitGroup
fmt.Print("\n\nDownloading each image layer\n")
for i := 0; i < len(layerList); i++ {
wg1.Add(1)
go func(layerList []string, i int){
fmt.Print("\nDownloading layer-> ",layerList[i])
ctorrentCmd := fmt.Sprintf("cd %s && sudo ctorrent -e 0 %s", filePath, layerList[i]+".tar.torrent >/dev/null 2>&1")
cmd := exec.Command("bash", "-c", ctorrentCmd)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Run()
fmt.Print("\nExtracting layer-> ",layerList[i])
cmdStr1 := "sudo tar -C "+locToImg + " -xvf "+ filePath+layerList[i]+".tar"
_ , errT := exec.Command("sh", "-c", cmdStr1, " >/dev/null 2>&1").Output()
if errT != nil {
flag = false
}
defer wg1.Done()
}(layerList, i)
}
wg1.Wait()
if flag == false | {
fmt.Printf("\nERROR in downloading layers\n")
} | conditional_block |
|
client.go | (params map[string]string, paramName, path string) (*http.Request, error) {
uri := *host + ":" + *port + "/images"
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
part, err := writer.CreateFormFile(paramName, filepath.Base(path))
if err != nil {
return nil, err
}
_, err = io.Copy(part, file)
err = writer.Close()
if err != nil {
return nil, err
}
request, err := http.NewRequest("POST", uri, body)
if err != nil {
return nil, err
}
request.Header.Add("Content-Type", writer.FormDataContentType())
for key, val := range params {
fmt.Println("key = ", key, " val = ", val)
request.Header.Add(key, val)
}
return request, nil
}
func applyPush(image string) error {
defer track(time.Now(), "Image Push")
reg, err := regexp.Compile("[^A-Za-z0-9]+")
if err != nil {
return err
}
loc := *location
if _, err := os.Stat(loc); os.IsNotExist(err) {
os.Mkdir(loc, 0644)
}
endpoint := "unix:///var/run/docker.sock"
client, _ := docker.NewClient(endpoint)
imgs, _ := client.ListImages(docker.ListImagesOptions{All: false})
found := false
imageId := ""
filePath := ""
created := ""
for _, img := range imgs {
tags := img.RepoTags
for _, tag := range tags {
if tag == image {
found = true
imageId = img.ID
created = strconv.FormatInt(img.Created, 10)
fmt.Println("Found image: ", image)
fmt.Println("ID: ", img.ID)
fmt.Println("RepoTags: ", img.RepoTags)
fmt.Println("Created: ", img.Created)
fmt.Println("Size: ", img.Size)
fmt.Println("VirtualSize: ", img.VirtualSize)
fmt.Println("ParentId: ", img.ParentID)
safeImageName := reg.ReplaceAllString(image, "_")
s := []string{loc, "/", imageId, "_", safeImageName, ".tar"}
filePath = strings.Join(s, "")
break
}
}
}
if !found {
return errors.New("Sorry the image could not be found.")
}
//os.Remove("temp.json")
//Run export command
//command invocation
//run docker command save to tar ball in location
fmt.Println("Exporting image to tarball...")
cmd := fmt.Sprintf("sudo docker save %s > %s", image, filePath)
_, err1 := exec.Command("sh", "-c", cmd).Output()
if err1 != nil {
return err1
}
fmt.Println("Successively exported tarball...")
//make post request with contents of tarball to docket registry
imageParams := map[string]string{
"image": image,
"id": imageId,
"created": created,
//"layers": layer,
}
//Adapted from http://matt.aimonetti.net/posts/2013/07/01/golang-multipart-file-upload-example/ (C) Matt Aimonetti
request, err := uploadFile(imageParams, "file", filePath)
if err != nil {
log.Fatal(err)
}
uploadClient := &http.Client{}
resp, err := uploadClient.Do(request)
if err != nil {
log.Fatal(err)
} else {
body := &bytes.Buffer{}
_, err := body.ReadFrom(resp.Body)
if err != nil {
log.Fatal(err)
}
resp.Body.Close()
if resp.StatusCode != 200 {
return errors.New("Failed to push image...")
}
}
fmt.Println("Successfully uploaded image: ", image, " to the Docket registry.")
os.Remove(filePath)
return nil
}
//Adapted from https://github.com/thbar/golang-playground/blob/master/download-files.go
func downloadFromUrl(url string, fileName string) (err error) {
output, err := os.Create(fileName)
if err != nil {
fmt.Println("\nError while creating", fileName, "-", err)
return err
}
defer output.Close()
response, err := http.Get(url)
if err != nil {
fmt.Println("\nError while downloading", url, "-", err)
return err
}
if response.StatusCode != 200 {
fmt.Println("\nFailed to pull image")
return errors.New("Failed to pull image...")
}
defer response.Body.Close()
n, err := io.Copy(output, response.Body)
if err != nil {
fmt.Println("\nError while downloading", url, "-", err)
return err
}
//fmt.Println(n, "bytes downloaded.")
//Hack: trivial check to ensure if file downloaded is not too small
if n < 100 {
return errors.New("Failed to pull image...")
}
return nil
}
func applyPull(image string) error {
defer track(time.Now(), "Image Pull")
reg, err := regexp.Compile("[^A-Za-z0-9]+")
if err != nil {
return err
}
loc := *location
if _, err := os.Stat(loc); os.IsNotExist(err) {
os.Mkdir(loc, 0644)
}
safeImageName := reg.ReplaceAllString(image, "_")
filePath := loc + "/"
fileName := filePath + safeImageName + ".torrent"
fmt.Print("\n\n@@@@ Pulling image ---> ", safeImageName, " @@@@\n\nDownloading meta data file\n")
//Download torrent file
queryParam := map[string]string{
"image": image,
}
queryParamJson, _ := json.Marshal(queryParam)
metaUrl := *host + ":" + *port + "/images?q=" + url.QueryEscape(string(queryParamJson))
response, err3 := http.Get(metaUrl)
if err3 != nil {
fmt.Println("Failed to query image metadata endpoint")
return err3
}
if response.StatusCode != 200 {
fmt.Println("Failed to get image metadata")
return errors.New("Failed to get images metadata...")
}
defer response.Body.Close()
metaJson, err4 := ioutil.ReadAll(response.Body)
if err4 != nil {
fmt.Println("Failed to get image metadata json")
return errors.New("Failed to get image metadata json")
}
fmt.Println("\nSuccessfully Fetched meta data file")
var queryObj map[string]interface{}
if err := json.Unmarshal([]byte(metaJson), &queryObj); err != nil {
return errors.New("Failed to decode images metadata json...")
}
tarballNameInterface := queryObj["fileName"]
tarballName := tarballNameInterface.(string)
layers := (queryObj["layers"]).(string)
result := strings.Split(tarballName, "_")
locToImg := loc+"/" + result[0] + "/"
if _, errI := os.Stat(locToImg); os.IsNotExist(errI) {
os.Mkdir(locToImg, 0644)
}
jsonFile := (queryObj["jsonFile"]).(string)
d1 := []byte(jsonFile)
jsonFileName := locToImg+strings.Split(result[0], ":")[1]+".json"
err2 := ioutil.WriteFile(jsonFileName, d1, 0644)
if err2 != nil {
fmt.Println("Error copying json file")
return errors.New("Error copying json file")
}
metadata := (queryObj["metadata"]).(string)
d2 := []byte(metadata)
errM := ioutil.WriteFile(locToImg+"manifest.json", d2, 0644)
if errM != nil {
fmt.Println("Error copying manifest file")
return errors.New("Error copying manifest file")
}
repository := (queryObj["repository"]).(string)
d3 := []byte(repository)
errR := ioutil.WriteFile(locToImg+"repositories", d3, 0644)
if errR != nil {
fmt.Println("Error copying repository file")
return errors.New("Error copying repository file")
}
layerList := strings.Split(layers, ",")
flag = false
layerMap := (queryObj["layerMap"]).(string)
layersShaList :=strings.Split(layerMap,",")
layerShaMap := make(map[string]string)
nonExistingList := make([]string,0)
for i := 0; i < len(layersShaList); i++ {
layersArray :=strings.Split(layersShaList[i],":")
layerShaMap[layersArray[0]] = layersArray[1]
}
var wg sync.WaitGroup
fmt.Print("\n\nFinding the missing layers in the image\n")
for i := 0; i < len(layerList); i++ {
layerVal := layerList[i]
grepCmd := "sudo find /var/lib/docker/image -name " + layerShaMap[layerVal]
cmd := exec.Command("sh", "-c", grepCmd)
grepOutput, err1 := cmd.CombinedOutput()
if err1 != nil {
fmt.Printf("ERROR GREPING: %s", err1)
}
if len(grepOutput) <= 0 {
fmt.Print("\nlayer -> ", layerList[i], " not present")
nonExistingList = append(nonExistingList | uploadFile | identifier_name |
|
client.go | "net/url"
"os"
"os/exec"
"path/filepath"
"sync"
"regexp"
"strconv"
"strings"
"time"
)
var (
host = kingpin.Flag("host", "Set host of docket registry.").Short('h').Default("http://127.0.0.1").String()
port = kingpin.Flag("port", "Set port of docket registry.").Short('p').Default("8004").String()
location = kingpin.Flag("location", "Set location to store torrents and tarballs.").Short('l').Default("/tmp/docket").String()
push = kingpin.Command("push", "Push to the docket registry.")
pushImage = push.Arg("push", "Image to push.").Required().String()
pull = kingpin.Command("pull", "pull to the docket registry.")
pullImage = pull.Arg("pull", "Image to pull.").Required().String()
imagesCmd = kingpin.Command("images", "display images in the docket registry.")
imageFlag = imagesCmd.Flag("images", "display images in the docket registry.").Bool()
)
var flag bool
type RootFS struct{
Type string
Layers []string
}
type ManifestFile struct {
Id string
RepoTags interface{}
RepoDigests interface{}
Parent string
Comment string
Created string
Container string
ContainerConfig interface{}
DockerVersion string
Author string
Config interface{}
Architecture string
Os string
Size string
VirtualSize string
GraphicDriver interface{}
RootFS RootFS
Metadata string
}
func track(start time.Time, name string) {
elapsed := time.Since(start)
elapsed = elapsed
log.Printf("\n\n%s took %s\n\n", name, elapsed)
}
// Creates a new tarball upload http request to the Docket registry
func uploadFile(params map[string]string, paramName, path string) (*http.Request, error) {
uri := *host + ":" + *port + "/images"
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
part, err := writer.CreateFormFile(paramName, filepath.Base(path))
if err != nil {
return nil, err
}
_, err = io.Copy(part, file)
err = writer.Close()
if err != nil {
return nil, err
}
request, err := http.NewRequest("POST", uri, body)
if err != nil {
return nil, err
}
request.Header.Add("Content-Type", writer.FormDataContentType())
for key, val := range params {
fmt.Println("key = ", key, " val = ", val)
request.Header.Add(key, val)
}
return request, nil
}
func applyPush(image string) error {
defer track(time.Now(), "Image Push")
reg, err := regexp.Compile("[^A-Za-z0-9]+")
if err != nil {
return err
}
loc := *location
if _, err := os.Stat(loc); os.IsNotExist(err) {
os.Mkdir(loc, 0644)
}
endpoint := "unix:///var/run/docker.sock"
client, _ := docker.NewClient(endpoint)
imgs, _ := client.ListImages(docker.ListImagesOptions{All: false})
found := false
imageId := ""
filePath := ""
created := ""
for _, img := range imgs {
tags := img.RepoTags
for _, tag := range tags {
if tag == image {
found = true
imageId = img.ID
created = strconv.FormatInt(img.Created, 10)
fmt.Println("Found image: ", image)
fmt.Println("ID: ", img.ID)
fmt.Println("RepoTags: ", img.RepoTags)
fmt.Println("Created: ", img.Created)
fmt.Println("Size: ", img.Size)
fmt.Println("VirtualSize: ", img.VirtualSize)
fmt.Println("ParentId: ", img.ParentID)
safeImageName := reg.ReplaceAllString(image, "_")
s := []string{loc, "/", imageId, "_", safeImageName, ".tar"}
filePath = strings.Join(s, "")
break
}
}
}
if !found {
return errors.New("Sorry the image could not be found.")
}
//os.Remove("temp.json")
//Run export command
//command invocation
//run docker command save to tar ball in location
fmt.Println("Exporting image to tarball...")
cmd := fmt.Sprintf("sudo docker save %s > %s", image, filePath)
_, err1 := exec.Command("sh", "-c", cmd).Output()
if err1 != nil {
return err1
}
fmt.Println("Successively exported tarball...")
//make post request with contents of tarball to docket registry
imageParams := map[string]string{
"image": image,
"id": imageId,
"created": created,
//"layers": layer,
}
//Adapted from http://matt.aimonetti.net/posts/2013/07/01/golang-multipart-file-upload-example/ (C) Matt Aimonetti
request, err := uploadFile(imageParams, "file", filePath)
if err != nil {
log.Fatal(err)
}
uploadClient := &http.Client{}
resp, err := uploadClient.Do(request)
if err != nil {
log.Fatal(err)
} else {
body := &bytes.Buffer{}
_, err := body.ReadFrom(resp.Body)
if err != nil {
log.Fatal(err)
}
resp.Body.Close()
if resp.StatusCode != 200 {
return errors.New("Failed to push image...")
}
}
fmt.Println("Successfully uploaded image: ", image, " to the Docket registry.")
os.Remove(filePath)
return nil
}
//Adapted from https://github.com/thbar/golang-playground/blob/master/download-files.go
func downloadFromUrl(url string, fileName string) (err error) {
output, err := os.Create(fileName)
if err != nil {
fmt.Println("\nError while creating", fileName, "-", err)
return err
}
defer output.Close()
response, err := http.Get(url)
if err != nil {
fmt.Println("\nError while downloading", url, "-", err)
return err
}
if response.StatusCode != 200 {
fmt.Println("\nFailed to pull image")
return errors.New("Failed to pull image...")
}
defer response.Body.Close()
n, err := io.Copy(output, response.Body)
if err != nil {
fmt.Println("\nError while downloading", url, "-", err)
return err
}
//fmt.Println(n, "bytes downloaded.")
//Hack: trivial check to ensure if file downloaded is not too small
if n < 100 {
return errors.New("Failed to pull image...")
}
return nil
}
func applyPull(image string) error {
defer track(time.Now(), "Image Pull")
reg, err := regexp.Compile("[^A-Za-z0-9]+")
if err != nil {
return err
}
loc := *location
if _, err := os.Stat(loc); os.IsNotExist(err) {
os.Mkdir(loc, 0644)
}
safeImageName := reg.ReplaceAllString(image, "_")
filePath := loc + "/"
fileName := filePath + safeImageName + ".torrent"
fmt.Print("\n\n@@@@ Pulling image ---> ", safeImageName, " @@@@\n\nDownloading meta data file\n")
//Download torrent file
queryParam := map[string]string{
"image": image,
}
queryParamJson, _ := json.Marshal(queryParam)
metaUrl := *host + ":" + *port + "/images?q=" + url.QueryEscape(string(queryParamJson))
response, err3 := http.Get(metaUrl)
if err3 != nil {
fmt.Println("Failed to query image metadata endpoint")
return err3
}
if response.StatusCode != 200 {
fmt.Println("Failed to get image metadata")
return errors.New("Failed to get images metadata...")
}
defer response.Body.Close()
metaJson, err4 := ioutil.ReadAll(response.Body)
if err4 != nil {
fmt.Println("Failed to get image metadata json")
return errors.New("Failed to get image metadata json")
}
fmt.Println("\nSuccessfully Fetched meta data file")
var queryObj map[string]interface{}
if err := json.Unmarshal([]byte(metaJson), &queryObj); err != nil {
return errors.New("Failed to decode images metadata json...")
}
tarballNameInterface := queryObj["fileName"]
tarballName := tarballNameInterface.(string)
layers := (queryObj["layers"]).(string)
result := strings.Split(tarballName, "_")
locToImg := loc+"/" + result[0] + "/"
if _, errI := os.Stat(locToImg); os.IsNotExist(errI) {
os.Mkdir(locToImg, 0644)
}
jsonFile := (queryObj["jsonFile"]).(string)
d1 := []byte(jsonFile)
jsonFileName := locToImg+strings.Split(result[0], ":")[1]+".json"
err2 := ioutil.WriteFile(jsonFileName, d1, 0644)
if err2 != nil {
| "io"
"io/ioutil"
"log"
"mime/multipart"
"net/http" | random_line_split |
|
context.rs | channel`.
///
/// The current thread is blocking and the passed in future is executed.
///
/// # Panics
///
/// This function panics if called within a [`Context`] thread.
#[track_caller]
pub fn block_on<Fut>(future: Fut) -> Fut::Output
where
Fut: Future + Send + 'static,
Fut::Output: Send + 'static,
{
if let Some(context) = Context::current() {
let msg = format!("Attempt to block within Context {}", context.name());
gst::error!(RUNTIME_CAT, "{}", msg);
panic!("{}", msg);
}
// Not running in a Context thread so we can block
gst::debug!(RUNTIME_CAT, "Blocking on new dummy context");
Scheduler::block_on(future)
}
/// Yields execution back to the runtime.
#[inline]
pub fn yield_now() -> YieldNow {
YieldNow::default()
}
#[derive(Debug, Default)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct YieldNow(bool);
impl Future for YieldNow {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
if !self.0 {
self.0 = true;
cx.waker().wake_by_ref();
Poll::Pending
} else {
Poll::Ready(())
}
}
}
#[derive(Clone, Debug)]
pub struct ContextWeak(HandleWeak);
impl ContextWeak {
pub fn upgrade(&self) -> Option<Context> {
self.0.upgrade().map(Context)
}
}
/// A `threadshare` `runtime` `Context`.
///
/// The `Context` provides low-level asynchronous processing features to
/// multiplex task execution on a single thread.
///
/// `Element` implementations should use [`PadSrc`] and [`PadSink`] which
/// provide high-level features.
///
/// [`PadSrc`]: ../pad/struct.PadSrc.html
/// [`PadSink`]: ../pad/struct.PadSink.html
#[derive(Clone, Debug)]
pub struct Context(Handle);
impl PartialEq for Context {
fn eq(&self, other: &Self) -> bool {
self.0.eq(&other.0)
}
}
impl Eq for Context {}
impl Context {
pub fn acquire(context_name: &str, wait: Duration) -> Result<Self, io::Error> {
assert_ne!(context_name, Scheduler::DUMMY_NAME);
let mut contexts = CONTEXTS.lock().unwrap();
if let Some(context_weak) = contexts.get(context_name) {
if let Some(context) = context_weak.upgrade() {
gst::debug!(RUNTIME_CAT, "Joining Context '{}'", context.name());
return Ok(context);
}
}
let context = Context(Scheduler::start(context_name, wait));
contexts.insert(context_name.into(), context.downgrade());
gst::debug!(
RUNTIME_CAT,
"New Context '{}' throttling {:?}",
context.name(),
wait,
);
Ok(context)
}
pub fn downgrade(&self) -> ContextWeak {
ContextWeak(self.0.downgrade())
}
pub fn name(&self) -> &str {
self.0.context_name()
}
// FIXME this could be renamed as max_throttling
// but then, all elements should also change their
// wait variables and properties to max_throttling.
pub fn wait_duration(&self) -> Duration {
self.0.max_throttling()
}
/// Total duration the scheduler spent parked.
///
/// This is only useful for performance evaluation.
#[cfg(feature = "tuning")]
pub fn parked_duration(&self) -> Duration {
self.0.parked_duration()
}
/// Returns `true` if a `Context` is running on current thread.
pub fn is_context_thread() -> bool {
Scheduler::is_scheduler_thread()
}
/// Returns the `Context` running on current thread, if any.
pub fn current() -> Option<Context> {
Scheduler::current().map(Context)
}
/// Returns the `TaskId` running on current thread, if any.
pub fn current_task() -> Option<(Context, TaskId)> {
Scheduler::current().map(|scheduler| {
// Context users always operate on a Task
(Context(scheduler), TaskId::current().unwrap())
})
} | /// of a [`Context`].
///
/// # Panic
///
/// This will block current thread and would panic if run
/// from the [`Context`].
#[track_caller]
pub fn enter<'a, F, O>(&'a self, f: F) -> O
where
F: FnOnce() -> O + Send + 'a,
O: Send + 'a,
{
if let Some(cur) = Context::current().as_ref() {
if cur == self {
panic!(
"Attempt to enter Context {} within itself, this would deadlock",
self.name()
);
} else {
gst::warning!(
RUNTIME_CAT,
"Entering Context {} within {}",
self.name(),
cur.name()
);
}
} else {
gst::debug!(RUNTIME_CAT, "Entering Context {}", self.name());
}
self.0.enter(f)
}
pub fn spawn<Fut>(&self, future: Fut) -> JoinHandle<Fut::Output>
where
Fut: Future + Send + 'static,
Fut::Output: Send + 'static,
{
self.0.spawn(future)
}
pub fn spawn_and_unpark<Fut>(&self, future: Fut) -> JoinHandle<Fut::Output>
where
Fut: Future + Send + 'static,
Fut::Output: Send + 'static,
{
self.0.spawn_and_unpark(future)
}
/// Forces the scheduler to unpark.
///
/// This is not needed by elements implementors as they are
/// supposed to call [`Self::spawn_and_unpark`] when needed.
/// However, it's useful for lower level implementations such as
/// `runtime::Task` so as to make sure the iteration loop yields
/// as soon as possible when a transition is requested.
pub(in crate::runtime) fn unpark(&self) {
self.0.unpark();
}
pub fn add_sub_task<T>(&self, task_id: TaskId, sub_task: T) -> Result<(), T>
where
T: Future<Output = SubTaskOutput> + Send + 'static,
{
self.0.add_sub_task(task_id, sub_task)
}
pub async fn drain_sub_tasks() -> SubTaskOutput {
let (ctx, task_id) = match Context::current_task() {
Some(task) => task,
None => return Ok(()),
};
ctx.0.drain_sub_tasks(task_id).await
}
}
impl From<Handle> for Context {
fn from(handle: Handle) -> Self {
Context(handle)
}
}
#[cfg(test)]
mod tests {
use futures::channel::mpsc;
use futures::lock::Mutex;
use futures::prelude::*;
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
use std::sync::Arc;
use std::time::{Duration, Instant};
use super::super::Scheduler;
use super::Context;
use crate::runtime::Async;
type Item = i32;
const SLEEP_DURATION_MS: u64 = 2;
const SLEEP_DURATION: Duration = Duration::from_millis(SLEEP_DURATION_MS);
const DELAY: Duration = Duration::from_millis(SLEEP_DURATION_MS * 10);
#[test]
fn block_on_task_id() {
gst::init().unwrap();
assert!(!Context::is_context_thread());
crate::runtime::executor::block_on(async {
let (ctx, task_id) = Context::current_task().unwrap();
assert_eq!(ctx.name(), Scheduler::DUMMY_NAME);
assert_eq!(task_id, super::TaskId(0));
let res = ctx.add_sub_task(task_id, async move {
let (_ctx, task_id) = Context::current_task().unwrap();
assert_eq!(task_id, super::TaskId(0));
Ok(())
});
assert!(res.is_ok());
assert!(Context::is_context_thread());
});
assert!(!Context::is_context_thread());
}
#[test]
fn block_on_timer() {
gst::init().unwrap();
let elapsed = crate::runtime::executor::block_on(async {
let now = Instant::now();
crate::runtime::timer::delay_for(DELAY).await;
now.elapsed()
});
assert!(elapsed >= DELAY);
}
#[test]
fn context_task_id() {
use super::TaskId;
gst::init().unwrap();
let context = Context::acquire("context_task_id", SLEEP_DURATION).unwrap();
let join_handle = context.spawn(async {
let (ctx, task_id) = Context::current_task().unwrap();
assert_eq!(ctx.name(), "context_task_id");
assert_eq!(task_id, TaskId(0));
});
futures::executor::block_on(join_handle).unwrap();
// TaskId( |
/// Executes the provided function relatively to this [`Context`].
///
/// Usefull to initialize i/o sources and timers from outside | random_line_split |
context.rs | `.
///
/// The current thread is blocking and the passed in future is executed.
///
/// # Panics
///
/// This function panics if called within a [`Context`] thread.
#[track_caller]
pub fn block_on<Fut>(future: Fut) -> Fut::Output
where
Fut: Future + Send + 'static,
Fut::Output: Send + 'static,
{
if let Some(context) = Context::current() {
let msg = format!("Attempt to block within Context {}", context.name());
gst::error!(RUNTIME_CAT, "{}", msg);
panic!("{}", msg);
}
// Not running in a Context thread so we can block
gst::debug!(RUNTIME_CAT, "Blocking on new dummy context");
Scheduler::block_on(future)
}
/// Yields execution back to the runtime.
#[inline]
pub fn yield_now() -> YieldNow {
YieldNow::default()
}
#[derive(Debug, Default)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct YieldNow(bool);
impl Future for YieldNow {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
if !self.0 {
self.0 = true;
cx.waker().wake_by_ref();
Poll::Pending
} else {
Poll::Ready(())
}
}
}
#[derive(Clone, Debug)]
pub struct ContextWeak(HandleWeak);
impl ContextWeak {
pub fn upgrade(&self) -> Option<Context> {
self.0.upgrade().map(Context)
}
}
/// A `threadshare` `runtime` `Context`.
///
/// The `Context` provides low-level asynchronous processing features to
/// multiplex task execution on a single thread.
///
/// `Element` implementations should use [`PadSrc`] and [`PadSink`] which
/// provide high-level features.
///
/// [`PadSrc`]: ../pad/struct.PadSrc.html
/// [`PadSink`]: ../pad/struct.PadSink.html
#[derive(Clone, Debug)]
pub struct Context(Handle);
impl PartialEq for Context {
fn eq(&self, other: &Self) -> bool {
self.0.eq(&other.0)
}
}
impl Eq for Context {}
impl Context {
pub fn acquire(context_name: &str, wait: Duration) -> Result<Self, io::Error> {
assert_ne!(context_name, Scheduler::DUMMY_NAME);
let mut contexts = CONTEXTS.lock().unwrap();
if let Some(context_weak) = contexts.get(context_name) {
if let Some(context) = context_weak.upgrade() {
gst::debug!(RUNTIME_CAT, "Joining Context '{}'", context.name());
return Ok(context);
}
}
let context = Context(Scheduler::start(context_name, wait));
contexts.insert(context_name.into(), context.downgrade());
gst::debug!(
RUNTIME_CAT,
"New Context '{}' throttling {:?}",
context.name(),
wait,
);
Ok(context)
}
pub fn downgrade(&self) -> ContextWeak {
ContextWeak(self.0.downgrade())
}
pub fn name(&self) -> &str {
self.0.context_name()
}
// FIXME this could be renamed as max_throttling
// but then, all elements should also change their
// wait variables and properties to max_throttling.
pub fn wait_duration(&self) -> Duration {
self.0.max_throttling()
}
/// Total duration the scheduler spent parked.
///
/// This is only useful for performance evaluation.
#[cfg(feature = "tuning")]
pub fn parked_duration(&self) -> Duration {
self.0.parked_duration()
}
/// Returns `true` if a `Context` is running on current thread.
pub fn is_context_thread() -> bool {
Scheduler::is_scheduler_thread()
}
/// Returns the `Context` running on current thread, if any.
pub fn current() -> Option<Context> {
Scheduler::current().map(Context)
}
/// Returns the `TaskId` running on current thread, if any.
pub fn current_task() -> Option<(Context, TaskId)> {
Scheduler::current().map(|scheduler| {
// Context users always operate on a Task
(Context(scheduler), TaskId::current().unwrap())
})
}
/// Executes the provided function relatively to this [`Context`].
///
/// Usefull to initialize i/o sources and timers from outside
/// of a [`Context`].
///
/// # Panic
///
/// This will block current thread and would panic if run
/// from the [`Context`].
#[track_caller]
pub fn enter<'a, F, O>(&'a self, f: F) -> O
where
F: FnOnce() -> O + Send + 'a,
O: Send + 'a,
{
if let Some(cur) = Context::current().as_ref() {
if cur == self {
panic!(
"Attempt to enter Context {} within itself, this would deadlock",
self.name()
);
} else {
| } else {
gst::debug!(RUNTIME_CAT, "Entering Context {}", self.name());
}
self.0.enter(f)
}
pub fn spawn<Fut>(&self, future: Fut) -> JoinHandle<Fut::Output>
where
Fut: Future + Send + 'static,
Fut::Output: Send + 'static,
{
self.0.spawn(future)
}
pub fn spawn_and_unpark<Fut>(&self, future: Fut) -> JoinHandle<Fut::Output>
where
Fut: Future + Send + 'static,
Fut::Output: Send + 'static,
{
self.0.spawn_and_unpark(future)
}
/// Forces the scheduler to unpark.
///
/// This is not needed by elements implementors as they are
/// supposed to call [`Self::spawn_and_unpark`] when needed.
/// However, it's useful for lower level implementations such as
/// `runtime::Task` so as to make sure the iteration loop yields
/// as soon as possible when a transition is requested.
pub(in crate::runtime) fn unpark(&self) {
self.0.unpark();
}
pub fn add_sub_task<T>(&self, task_id: TaskId, sub_task: T) -> Result<(), T>
where
T: Future<Output = SubTaskOutput> + Send + 'static,
{
self.0.add_sub_task(task_id, sub_task)
}
pub async fn drain_sub_tasks() -> SubTaskOutput {
let (ctx, task_id) = match Context::current_task() {
Some(task) => task,
None => return Ok(()),
};
ctx.0.drain_sub_tasks(task_id).await
}
}
impl From<Handle> for Context {
fn from(handle: Handle) -> Self {
Context(handle)
}
}
#[cfg(test)]
mod tests {
use futures::channel::mpsc;
use futures::lock::Mutex;
use futures::prelude::*;
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
use std::sync::Arc;
use std::time::{Duration, Instant};
use super::super::Scheduler;
use super::Context;
use crate::runtime::Async;
type Item = i32;
const SLEEP_DURATION_MS: u64 = 2;
const SLEEP_DURATION: Duration = Duration::from_millis(SLEEP_DURATION_MS);
const DELAY: Duration = Duration::from_millis(SLEEP_DURATION_MS * 10);
#[test]
fn block_on_task_id() {
gst::init().unwrap();
assert!(!Context::is_context_thread());
crate::runtime::executor::block_on(async {
let (ctx, task_id) = Context::current_task().unwrap();
assert_eq!(ctx.name(), Scheduler::DUMMY_NAME);
assert_eq!(task_id, super::TaskId(0));
let res = ctx.add_sub_task(task_id, async move {
let (_ctx, task_id) = Context::current_task().unwrap();
assert_eq!(task_id, super::TaskId(0));
Ok(())
});
assert!(res.is_ok());
assert!(Context::is_context_thread());
});
assert!(!Context::is_context_thread());
}
#[test]
fn block_on_timer() {
gst::init().unwrap();
let elapsed = crate::runtime::executor::block_on(async {
let now = Instant::now();
crate::runtime::timer::delay_for(DELAY).await;
now.elapsed()
});
assert!(elapsed >= DELAY);
}
#[test]
fn context_task_id() {
use super::TaskId;
gst::init().unwrap();
let context = Context::acquire("context_task_id", SLEEP_DURATION).unwrap();
let join_handle = context.spawn(async {
let (ctx, task_id) = Context::current_task().unwrap();
assert_eq!(ctx.name(), "context_task_id");
assert_eq!(task_id, TaskId(0));
});
futures::executor::block_on(join_handle).unwrap();
// TaskId( | gst::warning!(
RUNTIME_CAT,
"Entering Context {} within {}",
self.name(),
cur.name()
);
}
| conditional_block |
context.rs | `.
///
/// The current thread is blocking and the passed in future is executed.
///
/// # Panics
///
/// This function panics if called within a [`Context`] thread.
#[track_caller]
pub fn block_on<Fut>(future: Fut) -> Fut::Output
where
Fut: Future + Send + 'static,
Fut::Output: Send + 'static,
{
if let Some(context) = Context::current() {
let msg = format!("Attempt to block within Context {}", context.name());
gst::error!(RUNTIME_CAT, "{}", msg);
panic!("{}", msg);
}
// Not running in a Context thread so we can block
gst::debug!(RUNTIME_CAT, "Blocking on new dummy context");
Scheduler::block_on(future)
}
/// Yields execution back to the runtime.
#[inline]
pub fn yield_now() -> YieldNow {
YieldNow::default()
}
#[derive(Debug, Default)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct YieldNow(bool);
impl Future for YieldNow {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
if !self.0 {
self.0 = true;
cx.waker().wake_by_ref();
Poll::Pending
} else {
Poll::Ready(())
}
}
}
#[derive(Clone, Debug)]
pub struct ContextWeak(HandleWeak);
impl ContextWeak {
pub fn upgrade(&self) -> Option<Context> {
self.0.upgrade().map(Context)
}
}
/// A `threadshare` `runtime` `Context`.
///
/// The `Context` provides low-level asynchronous processing features to
/// multiplex task execution on a single thread.
///
/// `Element` implementations should use [`PadSrc`] and [`PadSink`] which
/// provide high-level features.
///
/// [`PadSrc`]: ../pad/struct.PadSrc.html
/// [`PadSink`]: ../pad/struct.PadSink.html
#[derive(Clone, Debug)]
pub struct Context(Handle);
impl PartialEq for Context {
fn eq(&self, other: &Self) -> bool {
self.0.eq(&other.0)
}
}
impl Eq for Context {}
impl Context {
pub fn acquire(context_name: &str, wait: Duration) -> Result<Self, io::Error> {
assert_ne!(context_name, Scheduler::DUMMY_NAME);
let mut contexts = CONTEXTS.lock().unwrap();
if let Some(context_weak) = contexts.get(context_name) {
if let Some(context) = context_weak.upgrade() {
gst::debug!(RUNTIME_CAT, "Joining Context '{}'", context.name());
return Ok(context);
}
}
let context = Context(Scheduler::start(context_name, wait));
contexts.insert(context_name.into(), context.downgrade());
gst::debug!(
RUNTIME_CAT,
"New Context '{}' throttling {:?}",
context.name(),
wait,
);
Ok(context)
}
pub fn downgrade(&self) -> ContextWeak {
ContextWeak(self.0.downgrade())
}
pub fn name(&self) -> &str {
self.0.context_name()
}
// FIXME this could be renamed as max_throttling
// but then, all elements should also change their
// wait variables and properties to max_throttling.
pub fn wait_duration(&self) -> Duration {
self.0.max_throttling()
}
/// Total duration the scheduler spent parked.
///
/// This is only useful for performance evaluation.
#[cfg(feature = "tuning")]
pub fn parked_duration(&self) -> Duration {
self.0.parked_duration()
}
/// Returns `true` if a `Context` is running on current thread.
pub fn is_context_thread() -> bool {
Scheduler::is_scheduler_thread()
}
/// Returns the `Context` running on current thread, if any.
pub fn current() -> Option<Context> {
Scheduler::current().map(Context)
}
/// Returns the `TaskId` running on current thread, if any.
pub fn current_task() -> Option<(Context, TaskId)> {
Scheduler::current().map(|scheduler| {
// Context users always operate on a Task
(Context(scheduler), TaskId::current().unwrap())
})
}
/// Executes the provided function relatively to this [`Context`].
///
/// Usefull to initialize i/o sources and timers from outside
/// of a [`Context`].
///
/// # Panic
///
/// This will block current thread and would panic if run
/// from the [`Context`].
#[track_caller]
pub fn enter<'a, F, O>(&'a self, f: F) -> O
where
F: FnOnce() -> O + Send + 'a,
O: Send + 'a,
{
|
pub fn spawn<Fut>(&self, future: Fut) -> JoinHandle<Fut::Output>
where
Fut: Future + Send + 'static,
Fut::Output: Send + 'static,
{
self.0.spawn(future)
}
pub fn spawn_and_unpark<Fut>(&self, future: Fut) -> JoinHandle<Fut::Output>
where
Fut: Future + Send + 'static,
Fut::Output: Send + 'static,
{
self.0.spawn_and_unpark(future)
}
/// Forces the scheduler to unpark.
///
/// This is not needed by elements implementors as they are
/// supposed to call [`Self::spawn_and_unpark`] when needed.
/// However, it's useful for lower level implementations such as
/// `runtime::Task` so as to make sure the iteration loop yields
/// as soon as possible when a transition is requested.
pub(in crate::runtime) fn unpark(&self) {
self.0.unpark();
}
pub fn add_sub_task<T>(&self, task_id: TaskId, sub_task: T) -> Result<(), T>
where
T: Future<Output = SubTaskOutput> + Send + 'static,
{
self.0.add_sub_task(task_id, sub_task)
}
pub async fn drain_sub_tasks() -> SubTaskOutput {
let (ctx, task_id) = match Context::current_task() {
Some(task) => task,
None => return Ok(()),
};
ctx.0.drain_sub_tasks(task_id).await
}
}
impl From<Handle> for Context {
fn from(handle: Handle) -> Self {
Context(handle)
}
}
#[cfg(test)]
mod tests {
use futures::channel::mpsc;
use futures::lock::Mutex;
use futures::prelude::*;
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
use std::sync::Arc;
use std::time::{Duration, Instant};
use super::super::Scheduler;
use super::Context;
use crate::runtime::Async;
type Item = i32;
const SLEEP_DURATION_MS: u64 = 2;
const SLEEP_DURATION: Duration = Duration::from_millis(SLEEP_DURATION_MS);
const DELAY: Duration = Duration::from_millis(SLEEP_DURATION_MS * 10);
#[test]
fn block_on_task_id() {
gst::init().unwrap();
assert!(!Context::is_context_thread());
crate::runtime::executor::block_on(async {
let (ctx, task_id) = Context::current_task().unwrap();
assert_eq!(ctx.name(), Scheduler::DUMMY_NAME);
assert_eq!(task_id, super::TaskId(0));
let res = ctx.add_sub_task(task_id, async move {
let (_ctx, task_id) = Context::current_task().unwrap();
assert_eq!(task_id, super::TaskId(0));
Ok(())
});
assert!(res.is_ok());
assert!(Context::is_context_thread());
});
assert!(!Context::is_context_thread());
}
#[test]
fn block_on_timer() {
gst::init().unwrap();
let elapsed = crate::runtime::executor::block_on(async {
let now = Instant::now();
crate::runtime::timer::delay_for(DELAY).await;
now.elapsed()
});
assert!(elapsed >= DELAY);
}
#[test]
fn context_task_id() {
use super::TaskId;
gst::init().unwrap();
let context = Context::acquire("context_task_id", SLEEP_DURATION).unwrap();
let join_handle = context.spawn(async {
let (ctx, task_id) = Context::current_task().unwrap();
assert_eq!(ctx.name(), "context_task_id");
assert_eq!(task_id, TaskId(0));
});
futures::executor::block_on(join_handle).unwrap();
// TaskId( | if let Some(cur) = Context::current().as_ref() {
if cur == self {
panic!(
"Attempt to enter Context {} within itself, this would deadlock",
self.name()
);
} else {
gst::warning!(
RUNTIME_CAT,
"Entering Context {} within {}",
self.name(),
cur.name()
);
}
} else {
gst::debug!(RUNTIME_CAT, "Entering Context {}", self.name());
}
self.0.enter(f)
} | identifier_body |
context.rs | `.
///
/// The current thread is blocking and the passed in future is executed.
///
/// # Panics
///
/// This function panics if called within a [`Context`] thread.
#[track_caller]
pub fn block_on<Fut>(future: Fut) -> Fut::Output
where
Fut: Future + Send + 'static,
Fut::Output: Send + 'static,
{
if let Some(context) = Context::current() {
let msg = format!("Attempt to block within Context {}", context.name());
gst::error!(RUNTIME_CAT, "{}", msg);
panic!("{}", msg);
}
// Not running in a Context thread so we can block
gst::debug!(RUNTIME_CAT, "Blocking on new dummy context");
Scheduler::block_on(future)
}
/// Yields execution back to the runtime.
#[inline]
pub fn yield_now() -> YieldNow {
YieldNow::default()
}
#[derive(Debug, Default)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct YieldNow(bool);
impl Future for YieldNow {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
if !self.0 {
self.0 = true;
cx.waker().wake_by_ref();
Poll::Pending
} else {
Poll::Ready(())
}
}
}
#[derive(Clone, Debug)]
pub struct ContextWeak(HandleWeak);
impl ContextWeak {
pub fn upgrade(&self) -> Option<Context> {
self.0.upgrade().map(Context)
}
}
/// A `threadshare` `runtime` `Context`.
///
/// The `Context` provides low-level asynchronous processing features to
/// multiplex task execution on a single thread.
///
/// `Element` implementations should use [`PadSrc`] and [`PadSink`] which
/// provide high-level features.
///
/// [`PadSrc`]: ../pad/struct.PadSrc.html
/// [`PadSink`]: ../pad/struct.PadSink.html
#[derive(Clone, Debug)]
pub struct Context(Handle);
impl PartialEq for Context {
fn eq(&self, other: &Self) -> bool {
self.0.eq(&other.0)
}
}
impl Eq for Context {}
impl Context {
pub fn acquire(context_name: &str, wait: Duration) -> Result<Self, io::Error> {
assert_ne!(context_name, Scheduler::DUMMY_NAME);
let mut contexts = CONTEXTS.lock().unwrap();
if let Some(context_weak) = contexts.get(context_name) {
if let Some(context) = context_weak.upgrade() {
gst::debug!(RUNTIME_CAT, "Joining Context '{}'", context.name());
return Ok(context);
}
}
let context = Context(Scheduler::start(context_name, wait));
contexts.insert(context_name.into(), context.downgrade());
gst::debug!(
RUNTIME_CAT,
"New Context '{}' throttling {:?}",
context.name(),
wait,
);
Ok(context)
}
pub fn downgrade(&self) -> ContextWeak {
ContextWeak(self.0.downgrade())
}
pub fn name(&self) -> &str {
self.0.context_name()
}
// FIXME this could be renamed as max_throttling
// but then, all elements should also change their
// wait variables and properties to max_throttling.
pub fn wait_duration(&self) -> Duration {
self.0.max_throttling()
}
/// Total duration the scheduler spent parked.
///
/// This is only useful for performance evaluation.
#[cfg(feature = "tuning")]
pub fn parked_duration(&self) -> Duration {
self.0.parked_duration()
}
/// Returns `true` if a `Context` is running on current thread.
pub fn is_context_thread() -> bool {
Scheduler::is_scheduler_thread()
}
/// Returns the `Context` running on current thread, if any.
pub fn current() -> Option<Context> {
Scheduler::current().map(Context)
}
/// Returns the `TaskId` running on current thread, if any.
pub fn current_task() -> Option<(Context, TaskId)> {
Scheduler::current().map(|scheduler| {
// Context users always operate on a Task
(Context(scheduler), TaskId::current().unwrap())
})
}
/// Executes the provided function relatively to this [`Context`].
///
/// Usefull to initialize i/o sources and timers from outside
/// of a [`Context`].
///
/// # Panic
///
/// This will block current thread and would panic if run
/// from the [`Context`].
#[track_caller]
pub fn enter<'a, F, O>(&'a self, f: F) -> O
where
F: FnOnce() -> O + Send + 'a,
O: Send + 'a,
{
if let Some(cur) = Context::current().as_ref() {
if cur == self {
panic!(
"Attempt to enter Context {} within itself, this would deadlock",
self.name()
);
} else {
gst::warning!(
RUNTIME_CAT,
"Entering Context {} within {}",
self.name(),
cur.name()
);
}
} else {
gst::debug!(RUNTIME_CAT, "Entering Context {}", self.name());
}
self.0.enter(f)
}
pub fn spawn<Fut>(&self, future: Fut) -> JoinHandle<Fut::Output>
where
Fut: Future + Send + 'static,
Fut::Output: Send + 'static,
{
self.0.spawn(future)
}
pub fn spawn_and_unpark<Fut>(&self, future: Fut) -> JoinHandle<Fut::Output>
where
Fut: Future + Send + 'static,
Fut::Output: Send + 'static,
{
self.0.spawn_and_unpark(future)
}
/// Forces the scheduler to unpark.
///
/// This is not needed by elements implementors as they are
/// supposed to call [`Self::spawn_and_unpark`] when needed.
/// However, it's useful for lower level implementations such as
/// `runtime::Task` so as to make sure the iteration loop yields
/// as soon as possible when a transition is requested.
pub(in crate::runtime) fn unpark(&self) {
self.0.unpark();
}
pub fn add_sub_task<T>(&self, task_id: TaskId, sub_task: T) -> Result<(), T>
where
T: Future<Output = SubTaskOutput> + Send + 'static,
{
self.0.add_sub_task(task_id, sub_task)
}
pub async fn drain_sub_tasks() -> SubTaskOutput {
let (ctx, task_id) = match Context::current_task() {
Some(task) => task,
None => return Ok(()),
};
ctx.0.drain_sub_tasks(task_id).await
}
}
impl From<Handle> for Context {
fn fr | andle: Handle) -> Self {
Context(handle)
}
}
#[cfg(test)]
mod tests {
use futures::channel::mpsc;
use futures::lock::Mutex;
use futures::prelude::*;
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
use std::sync::Arc;
use std::time::{Duration, Instant};
use super::super::Scheduler;
use super::Context;
use crate::runtime::Async;
type Item = i32;
const SLEEP_DURATION_MS: u64 = 2;
const SLEEP_DURATION: Duration = Duration::from_millis(SLEEP_DURATION_MS);
const DELAY: Duration = Duration::from_millis(SLEEP_DURATION_MS * 10);
#[test]
fn block_on_task_id() {
gst::init().unwrap();
assert!(!Context::is_context_thread());
crate::runtime::executor::block_on(async {
let (ctx, task_id) = Context::current_task().unwrap();
assert_eq!(ctx.name(), Scheduler::DUMMY_NAME);
assert_eq!(task_id, super::TaskId(0));
let res = ctx.add_sub_task(task_id, async move {
let (_ctx, task_id) = Context::current_task().unwrap();
assert_eq!(task_id, super::TaskId(0));
Ok(())
});
assert!(res.is_ok());
assert!(Context::is_context_thread());
});
assert!(!Context::is_context_thread());
}
#[test]
fn block_on_timer() {
gst::init().unwrap();
let elapsed = crate::runtime::executor::block_on(async {
let now = Instant::now();
crate::runtime::timer::delay_for(DELAY).await;
now.elapsed()
});
assert!(elapsed >= DELAY);
}
#[test]
fn context_task_id() {
use super::TaskId;
gst::init().unwrap();
let context = Context::acquire("context_task_id", SLEEP_DURATION).unwrap();
let join_handle = context.spawn(async {
let (ctx, task_id) = Context::current_task().unwrap();
assert_eq!(ctx.name(), "context_task_id");
assert_eq!(task_id, TaskId(0));
});
futures::executor::block_on(join_handle).unwrap();
// TaskId | om(h | identifier_name |
core.go | /1024/1024, " Mbytes")
runtime.GOMAXPROCS(coreNum)
//Create a new doble-linked list to act as LRU
lruList = list.New()
//Create the channels
lisChan = make(chan int, 1)
LRUChan = make(chan int, 1)
collectionChan = make(chan int, 1)
collections = make(map[string]collectionChannel)
//Read collections from disk
nRead := readAllFromDisk()
fmt.Println("Read", nRead, "entries from disk")
fmt.Println("Ready, API Listening on http://localhost:8080, Telnet on port 8081")
fmt.Println("------------------------------------------------------------------")
}
// Start the webserver
func Start() {
//Start the console
go console()
//Start the rest API
restAPI()
}
// Convert a Json string to a map
func convertJSONToMap(valor string) (map[string]interface{}, error) {
//Create the Json element
d := json.NewDecoder(strings.NewReader(valor))
d.UseNumber()
var f interface{}
err := d.Decode(&f)
if err != nil {
return nil, err
}
//transform it to a map
m := f.(map[string]interface{})
return m, nil
}
//Create a token for the specified user
func createToken(value string) ([]byte, error) {
m, err := convertJSONToMap(value)
if err != nil {
return nil, err
}
if m["scope"] == nil || !(m["scope"] == "read-only" || m["scope"] == readWrite) {
return nil, errors.New("Invalid scope, try with read-only or read-write")
}
now := time.Now().UnixNano()
r := rand.New(rand.NewSource(now))
id := r.Int63()
m["id"] = id
b, err := json.Marshal(m)
return b, err
}
// Create the element in the collection
func createElement(col string, id string, valor string, saveToDisk bool, deleted bool) (string, error) {
//create the list element
var elemento *list.Element
b := []byte(valor)
if deleted == false {
//Create the Json element
m, err := convertJSONToMap(valor)
if err != nil {
return "", err
}
//Checks the data tye of the ID field
switch m["id"].(type) {
case json.Number:
//id = strconv.FormatFloat(m["id"].(float64),'f',-1,64)
id = m["id"].(json.Number).String()
case string:
id = m["id"].(string)
default:
return "", errors.New("invalid_id")
}
//Add the value to the list and get the pointer to the node
n := &node{m, false, false}
lisChan <- 1
elemento = lruList.PushFront(n)
<-lisChan
} else {
//if not found cache is disabled
if cacheNotFound == false {
return id, nil
}
fmt.Println("Creating node as deleted: ", col, id)
//create the node as deleted
n := &node{nil, false, true}
elemento = &list.Element{Value: n}
}
//get the collection-channel relation
cc := collections[col]
createDir := false
if cc.Mapa == nil {
fmt.Println("Creating new collection: ", col)
//Create the new map and the new channel
var newMapa map[string]*list.Element
var newMapChann chan int
newMapa = make(map[string]*list.Element)
newMapChann = make(chan int, 1)
newCC := collectionChannel{newMapa, newMapChann}
newCC.Mapa[id] = elemento
//The collection doesn't exist, create one
collectionChan <- 1
collections[col] = newCC
<-collectionChan
createDir = true
} else {
fmt.Println("Using collection: ", col)
//Save the node in the map
cc.Canal <- 1
cc.Mapa[id] = elemento
<-cc.Canal
}
//if we are creating a deleted node, do not save it to disk
if deleted == false {
//Increase the memory counter in a diffetet gorutinie, save to disk and purge LRU
go func() {
//Increments the memory counter (Key + Value in LRU + len of col name, + Key in MAP)
fmt.Println("Suma 1: ", int64(len(b)), " --> ", string(b))
atomic.AddInt64(&memBytes, int64(len(b)))
if enablePrint {
fmt.Println("Inc Bytes: ", memBytes)
}
//Save the Json to disk, if it is not already on disk
if saveToDisk == true {
saveJSONToDisk(createDir, col, id, valor)
}
//Purge de LRU
purgeLRU()
}()
}
return id, nil
}
// Get the element from the Map and push the element to the first position of the LRU-List
func getElement(col string, id string) ([]byte, error) {
cc := collections[col]
//Get the element from the map
elemento := cc.Mapa[id]
//checks if the element exists in the cache
if elemento == nil {
fmt.Println("Elemento not in memory, reading disk, ID: ", id)
//read the disk
content, er := readJSONFromDisK(col, id)
//if file doesnt exists cache the not found and return nil
if er != nil {
//create the element and set it as deleted
createElement(col, id, "", false, true) // set as deleted and do not save to disk
} else {
//Create the element from the disk content
_, err := createElement(col, id, string(content), false, false) // set to not save to disk
if err != nil {
return nil, errors.New("Invalid Disk JSON")
}
}
//call get element again (recursively)
return getElement(col, id)
}
//If the Not-found is cached, return false directely
if elemento.Value.(*node).Deleted == true {
fmt.Println("Not-Found cached detected on getting, ID: ", id)
return nil, nil
}
//Move the element to the front of the LRU-List using a gorutine
go moveFront(elemento)
//Check if the element is mark as swapped
if elemento.Value.(*node).Swap == true {
//Read the swapped json from disk
b, _ := readJSONFromDisK(col, id)
//TODO: read if there was an error and do something...
m, err := convertJSONToMap(string(b))
if err != nil {
return nil, err
}
//save the map in the node, mark it as un-swapped
elemento.Value = &node{m, false, false}
//increase de memory counter
fmt.Println("Suma 2: ", int64(len(b)), " --> ", string(b))
atomic.AddInt64(&memBytes, int64(len(b)))
//as we have load content from disk, we have to purge LRU
go purgeLRU()
}
//Return the element
b, err := json.Marshal(elemento.Value.(*node).V)
return b, err
}
// Get the number of elements
func getElements(col string) ([]byte, error) {
cc := collections[col]
b, err := json.Marshal(len(cc.Mapa))
return b, err
}
// Purge the LRU List deleting the last element
func purgeLRU() {
LRUChan <- 1
//Checks the memory limit and decrease it if it's necessary
for atomic.LoadInt64(&memBytes) > maxMemBytes {
//sync this procedure
lisChan <- 1
//Print Message
fmt.Println(memBytes, " - ", maxMemBytes, "dif: ", memBytes-maxMemBytes)
fmt.Println("Max memory reached! swapping", memBytes)
fmt.Println("LRU Elements: ", lruList.Len())
//Get the last element and remove it. Sync is not needed because nothing
//happens if the element is moved in the middle of this rutine, at last it will be removed
lastElement := lruList.Back()
if lastElement == nil {
fmt.Println("Empty LRU")
//unsync
<-lisChan
return
}
//Remove the element from the LRU
deleteElementFromLRU(lastElement)
//Mark the node as swapped
lastElement.Value.(*node).Deleted = false
lastElement.Value.(*node).Swap = true
lastElement.Value.(*node).V = nil
//Print a purge
if enablePrint {
fmt.Println("Purge Done: ", memBytes)
}
//unsync
<-lisChan
}
<-LRUChan | random_line_split |
||
core.go | collections map[string]collectionChannel
var config map[string]interface{}
const readWrite = "read-write"
// Init the system variables
func init() {
//Welcome Message
fmt.Println("------------------------------------------------------------------")
fmt.Println("Starting Natyla...")
fmt.Println("Version: 1.02")
//Set the thread quantity based on the number of CPU's
coreNum := runtime.NumCPU()
fmt.Println("Number of cores: ", coreNum)
//read the config file
readConfig()
//create the data directory
createDataDir()
//set max memory form config
maxMemBytes, _ = config["memory"].(json.Number).Int64()
fmt.Println("Max memory defined as: ", maxMemBytes/1024/1024, " Mbytes")
runtime.GOMAXPROCS(coreNum)
//Create a new doble-linked list to act as LRU
lruList = list.New()
//Create the channels
lisChan = make(chan int, 1)
LRUChan = make(chan int, 1)
collectionChan = make(chan int, 1)
collections = make(map[string]collectionChannel)
//Read collections from disk
nRead := readAllFromDisk()
fmt.Println("Read", nRead, "entries from disk")
fmt.Println("Ready, API Listening on http://localhost:8080, Telnet on port 8081")
fmt.Println("------------------------------------------------------------------")
}
// Start the webserver
func Start() {
//Start the console
go console()
//Start the rest API
restAPI()
}
// Convert a Json string to a map
func | (valor string) (map[string]interface{}, error) {
//Create the Json element
d := json.NewDecoder(strings.NewReader(valor))
d.UseNumber()
var f interface{}
err := d.Decode(&f)
if err != nil {
return nil, err
}
//transform it to a map
m := f.(map[string]interface{})
return m, nil
}
//Create a token for the specified user
func createToken(value string) ([]byte, error) {
m, err := convertJSONToMap(value)
if err != nil {
return nil, err
}
if m["scope"] == nil || !(m["scope"] == "read-only" || m["scope"] == readWrite) {
return nil, errors.New("Invalid scope, try with read-only or read-write")
}
now := time.Now().UnixNano()
r := rand.New(rand.NewSource(now))
id := r.Int63()
m["id"] = id
b, err := json.Marshal(m)
return b, err
}
// Create the element in the collection
func createElement(col string, id string, valor string, saveToDisk bool, deleted bool) (string, error) {
//create the list element
var elemento *list.Element
b := []byte(valor)
if deleted == false {
//Create the Json element
m, err := convertJSONToMap(valor)
if err != nil {
return "", err
}
//Checks the data tye of the ID field
switch m["id"].(type) {
case json.Number:
//id = strconv.FormatFloat(m["id"].(float64),'f',-1,64)
id = m["id"].(json.Number).String()
case string:
id = m["id"].(string)
default:
return "", errors.New("invalid_id")
}
//Add the value to the list and get the pointer to the node
n := &node{m, false, false}
lisChan <- 1
elemento = lruList.PushFront(n)
<-lisChan
} else {
//if not found cache is disabled
if cacheNotFound == false {
return id, nil
}
fmt.Println("Creating node as deleted: ", col, id)
//create the node as deleted
n := &node{nil, false, true}
elemento = &list.Element{Value: n}
}
//get the collection-channel relation
cc := collections[col]
createDir := false
if cc.Mapa == nil {
fmt.Println("Creating new collection: ", col)
//Create the new map and the new channel
var newMapa map[string]*list.Element
var newMapChann chan int
newMapa = make(map[string]*list.Element)
newMapChann = make(chan int, 1)
newCC := collectionChannel{newMapa, newMapChann}
newCC.Mapa[id] = elemento
//The collection doesn't exist, create one
collectionChan <- 1
collections[col] = newCC
<-collectionChan
createDir = true
} else {
fmt.Println("Using collection: ", col)
//Save the node in the map
cc.Canal <- 1
cc.Mapa[id] = elemento
<-cc.Canal
}
//if we are creating a deleted node, do not save it to disk
if deleted == false {
//Increase the memory counter in a diffetet gorutinie, save to disk and purge LRU
go func() {
//Increments the memory counter (Key + Value in LRU + len of col name, + Key in MAP)
fmt.Println("Suma 1: ", int64(len(b)), " --> ", string(b))
atomic.AddInt64(&memBytes, int64(len(b)))
if enablePrint {
fmt.Println("Inc Bytes: ", memBytes)
}
//Save the Json to disk, if it is not already on disk
if saveToDisk == true {
saveJSONToDisk(createDir, col, id, valor)
}
//Purge de LRU
purgeLRU()
}()
}
return id, nil
}
// Get the element from the Map and push the element to the first position of the LRU-List
func getElement(col string, id string) ([]byte, error) {
cc := collections[col]
//Get the element from the map
elemento := cc.Mapa[id]
//checks if the element exists in the cache
if elemento == nil {
fmt.Println("Elemento not in memory, reading disk, ID: ", id)
//read the disk
content, er := readJSONFromDisK(col, id)
//if file doesnt exists cache the not found and return nil
if er != nil {
//create the element and set it as deleted
createElement(col, id, "", false, true) // set as deleted and do not save to disk
} else {
//Create the element from the disk content
_, err := createElement(col, id, string(content), false, false) // set to not save to disk
if err != nil {
return nil, errors.New("Invalid Disk JSON")
}
}
//call get element again (recursively)
return getElement(col, id)
}
//If the Not-found is cached, return false directely
if elemento.Value.(*node).Deleted == true {
fmt.Println("Not-Found cached detected on getting, ID: ", id)
return nil, nil
}
//Move the element to the front of the LRU-List using a gorutine
go moveFront(elemento)
//Check if the element is mark as swapped
if elemento.Value.(*node).Swap == true {
//Read the swapped json from disk
b, _ := readJSONFromDisK(col, id)
//TODO: read if there was an error and do something...
m, err := convertJSONToMap(string(b))
if err != nil {
return nil, err
}
//save the map in the node, mark it as un-swapped
elemento.Value = &node{m, false, false}
//increase de memory counter
fmt.Println("Suma 2: ", int64(len(b)), " --> ", string(b))
atomic.AddInt64(&memBytes, int64(len(b)))
//as we have load content from disk, we have to purge LRU
go purgeLRU()
}
//Return the element
b, err := json.Marshal(elemento.Value.(*node).V)
return b, err
}
// Get the number of elements
func getElements(col string) ([]byte, error) {
cc := collections[col]
b, err := json.Marshal(len(cc.Mapa))
return b, err
}
// Purge the LRU List deleting the last element
func purgeLRU() {
LRUChan <- 1
//Checks the memory limit and decrease it if it's necessary
for atomic.LoadInt64(&memBytes) > maxMemBytes {
//sync this procedure
lisChan <- 1
//Print Message
fmt.Println(memBytes, " - ", maxMemBytes, "dif: ", memBytes-maxMemBytes)
fmt.Println("Max memory reached! swapping", memBytes)
fmt.Println("LRU Elements: ", lruList.Len())
//Get the last element and remove it. Sync is not needed because nothing
//happens if the element is moved in the middle of this rutine, at last it will be removed | convertJSONToMap | identifier_name |
core.go | )
collectionChan = make(chan int, 1)
collections = make(map[string]collectionChannel)
//Read collections from disk
nRead := readAllFromDisk()
fmt.Println("Read", nRead, "entries from disk")
fmt.Println("Ready, API Listening on http://localhost:8080, Telnet on port 8081")
fmt.Println("------------------------------------------------------------------")
}
// Start the webserver
func Start() {
//Start the console
go console()
//Start the rest API
restAPI()
}
// Convert a Json string to a map
func convertJSONToMap(valor string) (map[string]interface{}, error) {
//Create the Json element
d := json.NewDecoder(strings.NewReader(valor))
d.UseNumber()
var f interface{}
err := d.Decode(&f)
if err != nil {
return nil, err
}
//transform it to a map
m := f.(map[string]interface{})
return m, nil
}
//Create a token for the specified user
func createToken(value string) ([]byte, error) {
m, err := convertJSONToMap(value)
if err != nil {
return nil, err
}
if m["scope"] == nil || !(m["scope"] == "read-only" || m["scope"] == readWrite) {
return nil, errors.New("Invalid scope, try with read-only or read-write")
}
now := time.Now().UnixNano()
r := rand.New(rand.NewSource(now))
id := r.Int63()
m["id"] = id
b, err := json.Marshal(m)
return b, err
}
// Create the element in the collection
func createElement(col string, id string, valor string, saveToDisk bool, deleted bool) (string, error) {
//create the list element
var elemento *list.Element
b := []byte(valor)
if deleted == false {
//Create the Json element
m, err := convertJSONToMap(valor)
if err != nil {
return "", err
}
//Checks the data tye of the ID field
switch m["id"].(type) {
case json.Number:
//id = strconv.FormatFloat(m["id"].(float64),'f',-1,64)
id = m["id"].(json.Number).String()
case string:
id = m["id"].(string)
default:
return "", errors.New("invalid_id")
}
//Add the value to the list and get the pointer to the node
n := &node{m, false, false}
lisChan <- 1
elemento = lruList.PushFront(n)
<-lisChan
} else {
//if not found cache is disabled
if cacheNotFound == false {
return id, nil
}
fmt.Println("Creating node as deleted: ", col, id)
//create the node as deleted
n := &node{nil, false, true}
elemento = &list.Element{Value: n}
}
//get the collection-channel relation
cc := collections[col]
createDir := false
if cc.Mapa == nil {
fmt.Println("Creating new collection: ", col)
//Create the new map and the new channel
var newMapa map[string]*list.Element
var newMapChann chan int
newMapa = make(map[string]*list.Element)
newMapChann = make(chan int, 1)
newCC := collectionChannel{newMapa, newMapChann}
newCC.Mapa[id] = elemento
//The collection doesn't exist, create one
collectionChan <- 1
collections[col] = newCC
<-collectionChan
createDir = true
} else {
fmt.Println("Using collection: ", col)
//Save the node in the map
cc.Canal <- 1
cc.Mapa[id] = elemento
<-cc.Canal
}
//if we are creating a deleted node, do not save it to disk
if deleted == false {
//Increase the memory counter in a diffetet gorutinie, save to disk and purge LRU
go func() {
//Increments the memory counter (Key + Value in LRU + len of col name, + Key in MAP)
fmt.Println("Suma 1: ", int64(len(b)), " --> ", string(b))
atomic.AddInt64(&memBytes, int64(len(b)))
if enablePrint {
fmt.Println("Inc Bytes: ", memBytes)
}
//Save the Json to disk, if it is not already on disk
if saveToDisk == true {
saveJSONToDisk(createDir, col, id, valor)
}
//Purge de LRU
purgeLRU()
}()
}
return id, nil
}
// Get the element from the Map and push the element to the first position of the LRU-List
func getElement(col string, id string) ([]byte, error) {
cc := collections[col]
//Get the element from the map
elemento := cc.Mapa[id]
//checks if the element exists in the cache
if elemento == nil {
fmt.Println("Elemento not in memory, reading disk, ID: ", id)
//read the disk
content, er := readJSONFromDisK(col, id)
//if file doesnt exists cache the not found and return nil
if er != nil {
//create the element and set it as deleted
createElement(col, id, "", false, true) // set as deleted and do not save to disk
} else {
//Create the element from the disk content
_, err := createElement(col, id, string(content), false, false) // set to not save to disk
if err != nil {
return nil, errors.New("Invalid Disk JSON")
}
}
//call get element again (recursively)
return getElement(col, id)
}
//If the Not-found is cached, return false directely
if elemento.Value.(*node).Deleted == true {
fmt.Println("Not-Found cached detected on getting, ID: ", id)
return nil, nil
}
//Move the element to the front of the LRU-List using a gorutine
go moveFront(elemento)
//Check if the element is mark as swapped
if elemento.Value.(*node).Swap == true {
//Read the swapped json from disk
b, _ := readJSONFromDisK(col, id)
//TODO: read if there was an error and do something...
m, err := convertJSONToMap(string(b))
if err != nil {
return nil, err
}
//save the map in the node, mark it as un-swapped
elemento.Value = &node{m, false, false}
//increase de memory counter
fmt.Println("Suma 2: ", int64(len(b)), " --> ", string(b))
atomic.AddInt64(&memBytes, int64(len(b)))
//as we have load content from disk, we have to purge LRU
go purgeLRU()
}
//Return the element
b, err := json.Marshal(elemento.Value.(*node).V)
return b, err
}
// Get the number of elements
func getElements(col string) ([]byte, error) {
cc := collections[col]
b, err := json.Marshal(len(cc.Mapa))
return b, err
}
// Purge the LRU List deleting the last element
func purgeLRU() {
LRUChan <- 1
//Checks the memory limit and decrease it if it's necessary
for atomic.LoadInt64(&memBytes) > maxMemBytes {
//sync this procedure
lisChan <- 1
//Print Message
fmt.Println(memBytes, " - ", maxMemBytes, "dif: ", memBytes-maxMemBytes)
fmt.Println("Max memory reached! swapping", memBytes)
fmt.Println("LRU Elements: ", lruList.Len())
//Get the last element and remove it. Sync is not needed because nothing
//happens if the element is moved in the middle of this rutine, at last it will be removed
lastElement := lruList.Back()
if lastElement == nil {
fmt.Println("Empty LRU")
//unsync
<-lisChan
return
}
//Remove the element from the LRU
deleteElementFromLRU(lastElement)
//Mark the node as swapped
lastElement.Value.(*node).Deleted = false
lastElement.Value.(*node).Swap = true
lastElement.Value.(*node).V = nil
//Print a purge
if enablePrint {
fmt.Println("Purge Done: ", memBytes)
}
//unsync
<-lisChan
}
<-LRUChan
}
// Move the element to the front of the LRU, because it was readed or updated
func moveFront(elemento *list.Element) | {
//Move the element
lisChan <- 1
lruList.MoveToFront(elemento)
<-lisChan
if enablePrint {
fmt.Println("LRU Updated")
}
} | identifier_body |
|
core.go | .Number).Int64()
fmt.Println("Max memory defined as: ", maxMemBytes/1024/1024, " Mbytes")
runtime.GOMAXPROCS(coreNum)
//Create a new doble-linked list to act as LRU
lruList = list.New()
//Create the channels
lisChan = make(chan int, 1)
LRUChan = make(chan int, 1)
collectionChan = make(chan int, 1)
collections = make(map[string]collectionChannel)
//Read collections from disk
nRead := readAllFromDisk()
fmt.Println("Read", nRead, "entries from disk")
fmt.Println("Ready, API Listening on http://localhost:8080, Telnet on port 8081")
fmt.Println("------------------------------------------------------------------")
}
// Start the webserver
func Start() {
//Start the console
go console()
//Start the rest API
restAPI()
}
// Convert a Json string to a map
func convertJSONToMap(valor string) (map[string]interface{}, error) {
//Create the Json element
d := json.NewDecoder(strings.NewReader(valor))
d.UseNumber()
var f interface{}
err := d.Decode(&f)
if err != nil {
return nil, err
}
//transform it to a map
m := f.(map[string]interface{})
return m, nil
}
//Create a token for the specified user
func createToken(value string) ([]byte, error) {
m, err := convertJSONToMap(value)
if err != nil {
return nil, err
}
if m["scope"] == nil || !(m["scope"] == "read-only" || m["scope"] == readWrite) {
return nil, errors.New("Invalid scope, try with read-only or read-write")
}
now := time.Now().UnixNano()
r := rand.New(rand.NewSource(now))
id := r.Int63()
m["id"] = id
b, err := json.Marshal(m)
return b, err
}
// Create the element in the collection
func createElement(col string, id string, valor string, saveToDisk bool, deleted bool) (string, error) {
//create the list element
var elemento *list.Element
b := []byte(valor)
if deleted == false {
//Create the Json element
m, err := convertJSONToMap(valor)
if err != nil {
return "", err
}
//Checks the data tye of the ID field
switch m["id"].(type) {
case json.Number:
//id = strconv.FormatFloat(m["id"].(float64),'f',-1,64)
id = m["id"].(json.Number).String()
case string:
id = m["id"].(string)
default:
return "", errors.New("invalid_id")
}
//Add the value to the list and get the pointer to the node
n := &node{m, false, false}
lisChan <- 1
elemento = lruList.PushFront(n)
<-lisChan
} else {
//if not found cache is disabled
if cacheNotFound == false {
return id, nil
}
fmt.Println("Creating node as deleted: ", col, id)
//create the node as deleted
n := &node{nil, false, true}
elemento = &list.Element{Value: n}
}
//get the collection-channel relation
cc := collections[col]
createDir := false
if cc.Mapa == nil {
fmt.Println("Creating new collection: ", col)
//Create the new map and the new channel
var newMapa map[string]*list.Element
var newMapChann chan int
newMapa = make(map[string]*list.Element)
newMapChann = make(chan int, 1)
newCC := collectionChannel{newMapa, newMapChann}
newCC.Mapa[id] = elemento
//The collection doesn't exist, create one
collectionChan <- 1
collections[col] = newCC
<-collectionChan
createDir = true
} else {
fmt.Println("Using collection: ", col)
//Save the node in the map
cc.Canal <- 1
cc.Mapa[id] = elemento
<-cc.Canal
}
//if we are creating a deleted node, do not save it to disk
if deleted == false {
//Increase the memory counter in a diffetet gorutinie, save to disk and purge LRU
go func() {
//Increments the memory counter (Key + Value in LRU + len of col name, + Key in MAP)
fmt.Println("Suma 1: ", int64(len(b)), " --> ", string(b))
atomic.AddInt64(&memBytes, int64(len(b)))
if enablePrint {
fmt.Println("Inc Bytes: ", memBytes)
}
//Save the Json to disk, if it is not already on disk
if saveToDisk == true {
saveJSONToDisk(createDir, col, id, valor)
}
//Purge de LRU
purgeLRU()
}()
}
return id, nil
}
// Get the element from the Map and push the element to the first position of the LRU-List
func getElement(col string, id string) ([]byte, error) {
cc := collections[col]
//Get the element from the map
elemento := cc.Mapa[id]
//checks if the element exists in the cache
if elemento == nil {
fmt.Println("Elemento not in memory, reading disk, ID: ", id)
//read the disk
content, er := readJSONFromDisK(col, id)
//if file doesnt exists cache the not found and return nil
if er != nil {
//create the element and set it as deleted
createElement(col, id, "", false, true) // set as deleted and do not save to disk
} else {
//Create the element from the disk content
_, err := createElement(col, id, string(content), false, false) // set to not save to disk
if err != nil {
return nil, errors.New("Invalid Disk JSON")
}
}
//call get element again (recursively)
return getElement(col, id)
}
//If the Not-found is cached, return false directely
if elemento.Value.(*node).Deleted == true {
fmt.Println("Not-Found cached detected on getting, ID: ", id)
return nil, nil
}
//Move the element to the front of the LRU-List using a gorutine
go moveFront(elemento)
//Check if the element is mark as swapped
if elemento.Value.(*node).Swap == true {
//Read the swapped json from disk
b, _ := readJSONFromDisK(col, id)
//TODO: read if there was an error and do something...
m, err := convertJSONToMap(string(b))
if err != nil {
return nil, err
}
//save the map in the node, mark it as un-swapped
elemento.Value = &node{m, false, false}
//increase de memory counter
fmt.Println("Suma 2: ", int64(len(b)), " --> ", string(b))
atomic.AddInt64(&memBytes, int64(len(b)))
//as we have load content from disk, we have to purge LRU
go purgeLRU()
}
//Return the element
b, err := json.Marshal(elemento.Value.(*node).V)
return b, err
}
// Get the number of elements
func getElements(col string) ([]byte, error) {
cc := collections[col]
b, err := json.Marshal(len(cc.Mapa))
return b, err
}
// Purge the LRU List deleting the last element
func purgeLRU() {
LRUChan <- 1
//Checks the memory limit and decrease it if it's necessary
for atomic.LoadInt64(&memBytes) > maxMemBytes {
//sync this procedure
lisChan <- 1
//Print Message
fmt.Println(memBytes, " - ", maxMemBytes, "dif: ", memBytes-maxMemBytes)
fmt.Println("Max memory reached! swapping", memBytes)
fmt.Println("LRU Elements: ", lruList.Len())
//Get the last element and remove it. Sync is not needed because nothing
//happens if the element is moved in the middle of this rutine, at last it will be removed
lastElement := lruList.Back()
if lastElement == nil {
fmt.Println("Empty LRU")
//unsync
<-lisChan
return
}
//Remove the element from the LRU
deleteElementFromLRU(lastElement)
//Mark the node as swapped
lastElement.Value.(*node).Deleted = false
lastElement.Value.(*node).Swap = true
lastElement.Value.(*node).V = nil
//Print a purge
if enablePrint | {
fmt.Println("Purge Done: ", memBytes)
} | conditional_block |
|
define.kChartDataManager.js | visibleListEndIndexOffsetFromLatest = Math.max(visibleListEndIndexOffsetFromLatest, 0);
visibleListEndIndexOffsetFromLatest = Math.min(visibleListEndIndexOffsetFromLatest, len);
return this;
};
/**
* 设置“可见数据的结束索引距离时间最晚的K线图数据的位移”
* @param {Integer} offset 新的位移
* @returns {KChartData}
*/
this.setVisibleKDataListEndIndexOffsetFromLatest = function(offset){
visibleListEndIndexOffsetFromLatest = offset;
var len = dataList.length;
visibleListEndIndexOffsetFromLatest = Math.max(visibleListEndIndexOffsetFromLatest, 0);
visibleListEndIndexOffsetFromLatest = Math.min(visibleListEndIndexOffsetFromLatest, len);
return this;
};
/**
* 根据当前的“可见数据的结束索引距离时间最晚的K线图数据的位移”,计算并返回“不可见的,时间比可见数据更早的K线图数据”的个数
* @param {Integer} [count=maxPageSize] 可见数据的数据尺寸
* @returns {Integer}
*/
this.getInvisibleEarlierKDataListLength = function(count){
count = count || maxPageSize;
var len = dataList.length;
var visibleAreaEndIndex = len - 1 - visibleListEndIndexOffsetFromLatest;/* min: 0, max: len - 1 */
var visibleAreaBeginIndex = visibleAreaEndIndex - (count - 1);
if(visibleAreaBeginIndex <= 0)
return 0;
return visibleAreaBeginIndex;
};
/**
* 根据当前的“可见数据的结束索引距离时间最晚的K线图数据的位移”,计算并返回“不可见的,时间比可见数据更晚的K线图数据”的个数
* @returns {Integer}
*/
this.getInvisibleLaterKDataListLength = function(){
return visibleListEndIndexOffsetFromLatest;
};
/**
* 状态重置
* @param {Integer} [sizeToKeep=maxPageSize] 要保留的最新的K线图数据的尺寸
* @returns {KChartData}
*/
this.reset = function(sizeToKeep){
if(arguments.length < 1)
sizeToKeep = maxPageSize;
if(sizeToKeep > 0)
this.truncateKDataFromLatest(sizeToKeep);
else
dataList = [];
this.resetVisibleKDataListEndIndexOffsetFromLatest();
return this;
};
};
/**
* @constructor
* K线图数据管理器,用于实现K线图数据的加载等
* @param {String} symbol 产品代码
* @param {String} kType K线类型
*/
var KChartDataManager = function(symbol, kType){
Object.defineProperty(this, "symbol", {value: symbol, configurable: false, writable: false});
Object.defineProperty(this, "kType", {value: kType, configurable: false, writable: false});
var kChartData = new KChartData(symbol, kType);
/** 是否正在加载K线图数据(互斥锁,用于保证同一时刻只有一个加载请求在执行) */
var isLoadingKData = false;
/** 是否所有历史数据均已加载完成 */
var isAllEarlierKDataLoaded = false;
var self = this;
/**
* 获取关联的K线图数据实例
* @returns {KChartData}
*/
this.getKChartData = function(){
return kChartData;
};
/**
* 加载更早的K线图数据
* @param {Integer} [count=maxPageSize] 要获取的数据尺寸
* @param {JsonObject} [ops] 控制选项
* @param {Function} [ops.callback] 获取到K线图数据后执行的方法
* @param {Function} [ops.action4NoMoreEarlierData] 可用数据不足,且历史数据加载完毕(没有更多历史数据)时执行的方法
* @returns {KChartDataManager}
*/
var loadEarlierKData = function(count, ops){
count = count || maxPageSize;
ops = util.setDftValue(ops, {
callback: null,
action4NoMoreEarlierData: null,
});
var loadedList = [];
var execCallback = function(){
kChartData.prependKDataList(loadedList);
var list = loadedList.slice(Math.max(loadedList.length - count, 0));
if(typeof ops.callback == "function")
ops.callback(list);
};
/**
* 执行K线图数据加载。
* 因为服务端限定了单次返回的最大数据量,所以客户端需要不断加载,直至累计的数据量满足业务调用方所需的数据量为止
*/
var doLoad = function(){
var loadedListEarliestSeconds = 0 == loadedList.length? null: loadedList[0].Date;
var kChartDataEarliestSeconds = kChartData.getEarliestKDataSeconds();
var endTime = loadedListEarliestSeconds || kChartDataEarliestSeconds || 0;
getKChartData(symbol, kType, count, endTime, function(resp){
var list = resp || [];
var obtainedListLen = list.length;
list = filterDuplicateKData(loadedList, list);
loadedList = list.concat(loadedList);
if(loadedList.length >= count){/* 数据量满足 */
execCallback();
}else{/* 数据量不足,需继续加载 */
if(obtainedListLen < count){/* 不会有更多历史数据 */
isAllEarlierKDataLoaded = true;
execCallback();
if(typeof ops.action4NoMoreEarlierData == "function")
ops.action4NoMoreEarlierData();
}else/* 继续加载 */
doLoad();
}
}, function(){
execCallback();
});
};
doLoad();
return self;
};
/**
* 获取可见的K线图数据列表。特性:
* 1. 优先使用本地数据执行回调方法(ops.callback),如果本地数据不足,则尝试加载历史数据,并在有更多历史数据时,再次执行回调方法
* 2. 如果本地数据的个数C不满足:C >= 1.5 * CV,则自动加载历史数据。其中CV为业务调用方索取的数据个数
*
* @param {Integer} [count=maxPageSize] 要获取的数据尺寸
* @param {JsonObject} [ops] 控制选项
* @param {Function} [ops.callback] 获取到K线图数据后执行的回调方法
* @param {Function} [ops.action4NoMoreEarlierData] 可用数据不足,且历史数据加载完毕(没有更多历史数据)时执行的方法
* @returns {KChartDataManager}
*/
this.getVisibleKDataList = function(count, ops){
count = count || maxPageSize;
ops = util.setDftValue(ops, {
callback: null,
action4NoMoreEarlierData: null,
});
var list = kChartData.getVisibleKDataList(count);
var invisibleEarlierKDataListLength = kChartData.getInvisibleEarlierKDataListLength(count);
var isLocalDataSufficient = list.length >= count;
var ifNeedToLoadEarlierData = !isLocalDataSufficient || (invisibleEarlierKDataListLength < count / 2);
var self = this;
var len = list.length;
var callbackTriggered = false;
if(0 != len){
if(typeof ops.callback == "function"){
console.log("Exec callback for the first time", strigifyKDataList(list));
ops.callback(list);
callbackTriggered = true;
}
}else
console.debug("No local data exist to exec callback");
if(ifNeedToLoadEarlierData && !isLoadingKData && !isAllEarlierKDataLoaded){
console.debug("Loading earlier data.", list.length, count);
isLoadingKData = true;
loadEarlierKData(count, {
callback: function(list){
isLoadingKData = false;
if(isLocalDataSufficient){
return;
}
console.log("Trying to get new k data list of count: " + count, strigifyKDataList(kChartData.getKDataList()));
var newList = kChartData.getVisibleKDataList(count);
if(!callbackTriggered || newList.length != len){
if(typeof ops.callback == "function"){
console.log("Exec callback for the second time", strigifyKDataList(newList));
ops.callback(newList);
}
}
},
action4NoMoreEarlierData: ops.action4NoMoreEarlierData
});
}
return this;
};
/**
* 更新“可见数据的结束索引距离时间最晚的K线图数据的位移”。如果偏移量为正,且没有更多历史数据,则忽略本次操作 | * @param {Integer} visibleKDataCount 可见数据量
* @param {Integer} offset 位移在既有基础上的偏移量
* @returns {KChartData}
*/ | random_line_split |
|
app.py | 6',
headers={
'kid': KEY_ID
})
return token_bytes.decode('utf-8')
# Configure API key authorization: jwt
smooch.configuration.api_key['Authorization'] = generate_jwt_token()
smooch.configuration.api_key_prefix['Authorization'] = 'Bearer'
# create an instance of the API class
api_instance = smooch.ConversationApi()
app_create_body = smooch.AppCreate() # AppCreate | Body for a createApp request.
### Health Checks
# IMAGES
def image(text):
if text == "Bible":
return "http://static7.bigstockphoto.com/thumbs/8/1/3/small2/318707.jpg"
elif text == "Easter":
return "http://www.beliefnet.com/columnists//deaconsbench/files/import/assets_c/2010/04/jesus-cross-thumb-400x528-12594.jpg"
elif text == "Budgeting":
return "https://www.aiadallas.org/media/uploads/event-images/budget_thumbnail.png"
elif text == "Spending":
return "http://thumbnails.billiondigital.com/297/151/1151297/1151253_small_checkboard.jpg"
elif text == "Talk":
return "https://rfclipart.com/image/thumbnail/22-f6-00/small-coffee-cup-Download-Free-Vector-File-EPS-677.jpg"
else:
return ""
# PING
@app.route('/')
def ping():
# Pull all database data and log on screen
all_messages = json.dumps([eval(redis.get(key).decode('utf8')) for key in redis.scan_iter("messages-*")])
return Response(all_messages, status=200)
def postText(text):
message = smooch.MessagePost(role='appMaker', type='text')
message.text = text
return message
def postTextWithReplies(text, replies):
message = smooch.MessagePost(role='appMaker', type='text')
message.text = text
actions = []
for reply in replies:
actions.append(smooch.Action(type='reply', text=reply, payload=reply))
message.actions = actions
return message
def postTextWithListReplies(text, replies):
message = smooch.MessagePost(role='appMaker', type='text')
message.text = text
actions = []
for reply in replies:
actions.append(smooch.Action(type='postback', text=reply, payload=reply))
message.actions = actions
return message
def postImage(uri):
message = smooch.MessagePost(role='appMaker', type='image')
message.media_url = uri
return message
def postFile(uri):
message = smooch.MessagePost(role='appMaker', type='file')
message.media_url = uri
return message
def postCarousel(list):
message = smooch.MessagePost(role='appMaker', type='carousel')
items = []
for item in list:
actions = []
actions.append(smooch.Action(type='postback', text=item, payload=item))
part = smooch.MessageItem(title=item, actions=actions)
part.media_url = image(item)
part.size = 'compact'
items.append(part)
message.items = items
return message
def handle_message(user_id, text):
if text == "Help":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Just say Hi, we can talk about Jesus or Money."))
elif text == "Talk":
api_response = api_instance.post_message(APP_ID, user_id,
postText("https://capuk.org/connect/contact-us"))
elif text == "Hello" or text == "Hey" or text == "Hi":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithReplies("What do you want to chat about?", ['Jesus', 'Money', 'Rachel']))
elif text == "hello" or text == "hey" or text == "hi":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithReplies("How do you feel about money?", [':D',':)',':(',':@']))
### JESUS ###
elif text == "Jesus":
api_response = api_instance.post_message(APP_ID, user_id,
postCarousel(['Bible', 'Easter', 'Talk']))
elif text == "Bible":
api_response = api_instance.post_message(APP_ID, user_id,
postText("https://www.desiringgod.org/articles/how-to-read-the-bible-for-yourself"))
elif text == "Easter":
api_response = api_instance.post_message(APP_ID, user_id,
postText("http://www.st-helens.org.uk/internationals/who-is-jesus"))
### MONEY ###
elif text == "Money":
api_response = api_instance.post_message(APP_ID, user_id,
postCarousel(['Budgeting', 'Spending', 'Talk']))
elif text == "Budgeting":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithListReplies("Are you happy to tell me about your budget?", ['Regular Budget', 'Weekly Allowance Budget']))
elif text == "Spending":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithListReplies("Are you happy to tell me about your spend?", ['Regular Spend', 'Weekly Allowance Spend']))
### BUDGET ###
elif text == "Regular Budget":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it!"))
api_response = api_instance.post_message(APP_ID, user_id,
postText("Ok, how much is your rent or mortgage?"))
elif text == "Weekly Allowance Budget":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it!"))
api_response = api_instance.post_message(APP_ID, user_id,
postText("Ok, how much is your weekly allowance budget?"))
elif text == "Weekly Allowance Spend":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Regular Spend":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! What did you spend money on?"))
api_response = api_instance.post_message(APP_ID, user_id,
postCarousel(['Home', 'Living', 'Travel', 'Family', 'Leisure', 'Future', 'Giving', 'Repayments']))
elif text == "Home":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Living":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Travel":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Family":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Leisure":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Future":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
| elif text == "Giving":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Repayments":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Yes please":
api_response = api_instance.post_message(APP_ID, user_id,
postCarousel(['Budgeting', 'Spending', 'Talk']))
elif text == "I'm ok":
api_response = api_instance.post_message(APP_ID, user_id,
postText(":)"))
### RACHEL ###
elif text == "Rachel":
api_response = api_instance.post_message(APP_ID, user_id,
postFile("http://rachelschallenge.org/media/media_press_kit/Code_of_ethics.pdf"))
### EMOJI ###
elif text == ":D":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Have you thought about helping other people cope better? https://capuk.org"))
elif text == ":)":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Have you thought about helping other people cope better? https://capuk.org"))
elif text == ":(":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithReplies("You're not alone! I'm your budget buddy. I can help you cope better.", ['Yes please', 'I\'m ok']))
elif text == ":@":
api_response = api_instance.post_message(APP_ID, user_id,
postText("You're not alone!"))
elif text == "":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Speachless"))
else:
api | random_line_split |
|
app.py | ch.configuration.api_key['Authorization'] = generate_jwt_token()
smooch.configuration.api_key_prefix['Authorization'] = 'Bearer'
# create an instance of the API class
api_instance = smooch.ConversationApi()
app_create_body = smooch.AppCreate() # AppCreate | Body for a createApp request.
### Health Checks
# IMAGES
def image(text):
if text == "Bible":
return "http://static7.bigstockphoto.com/thumbs/8/1/3/small2/318707.jpg"
elif text == "Easter":
return "http://www.beliefnet.com/columnists//deaconsbench/files/import/assets_c/2010/04/jesus-cross-thumb-400x528-12594.jpg"
elif text == "Budgeting":
return "https://www.aiadallas.org/media/uploads/event-images/budget_thumbnail.png"
elif text == "Spending":
return "http://thumbnails.billiondigital.com/297/151/1151297/1151253_small_checkboard.jpg"
elif text == "Talk":
return "https://rfclipart.com/image/thumbnail/22-f6-00/small-coffee-cup-Download-Free-Vector-File-EPS-677.jpg"
else:
return ""
# PING
@app.route('/')
def ping():
# Pull all database data and log on screen
all_messages = json.dumps([eval(redis.get(key).decode('utf8')) for key in redis.scan_iter("messages-*")])
return Response(all_messages, status=200)
def postText(text):
message = smooch.MessagePost(role='appMaker', type='text')
message.text = text
return message
def postTextWithReplies(text, replies):
message = smooch.MessagePost(role='appMaker', type='text')
message.text = text
actions = []
for reply in replies:
actions.append(smooch.Action(type='reply', text=reply, payload=reply))
message.actions = actions
return message
def postTextWithListReplies(text, replies):
message = smooch.MessagePost(role='appMaker', type='text')
message.text = text
actions = []
for reply in replies:
actions.append(smooch.Action(type='postback', text=reply, payload=reply))
message.actions = actions
return message
def postImage(uri):
message = smooch.MessagePost(role='appMaker', type='image')
message.media_url = uri
return message
def postFile(uri):
message = smooch.MessagePost(role='appMaker', type='file')
message.media_url = uri
return message
def postCarousel(list):
message = smooch.MessagePost(role='appMaker', type='carousel')
items = []
for item in list:
actions = []
actions.append(smooch.Action(type='postback', text=item, payload=item))
part = smooch.MessageItem(title=item, actions=actions)
part.media_url = image(item)
part.size = 'compact'
items.append(part)
message.items = items
return message
def handle_message(user_id, text):
if text == "Help":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Just say Hi, we can talk about Jesus or Money."))
elif text == "Talk":
api_response = api_instance.post_message(APP_ID, user_id,
postText("https://capuk.org/connect/contact-us"))
elif text == "Hello" or text == "Hey" or text == "Hi":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithReplies("What do you want to chat about?", ['Jesus', 'Money', 'Rachel']))
elif text == "hello" or text == "hey" or text == "hi":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithReplies("How do you feel about money?", [':D',':)',':(',':@']))
### JESUS ###
elif text == "Jesus":
api_response = api_instance.post_message(APP_ID, user_id,
postCarousel(['Bible', 'Easter', 'Talk']))
elif text == "Bible":
api_response = api_instance.post_message(APP_ID, user_id,
postText("https://www.desiringgod.org/articles/how-to-read-the-bible-for-yourself"))
elif text == "Easter":
api_response = api_instance.post_message(APP_ID, user_id,
postText("http://www.st-helens.org.uk/internationals/who-is-jesus"))
### MONEY ###
elif text == "Money":
api_response = api_instance.post_message(APP_ID, user_id,
postCarousel(['Budgeting', 'Spending', 'Talk']))
elif text == "Budgeting":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithListReplies("Are you happy to tell me about your budget?", ['Regular Budget', 'Weekly Allowance Budget']))
elif text == "Spending":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithListReplies("Are you happy to tell me about your spend?", ['Regular Spend', 'Weekly Allowance Spend']))
### BUDGET ###
elif text == "Regular Budget":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it!"))
api_response = api_instance.post_message(APP_ID, user_id,
postText("Ok, how much is your rent or mortgage?"))
elif text == "Weekly Allowance Budget":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it!"))
api_response = api_instance.post_message(APP_ID, user_id,
postText("Ok, how much is your weekly allowance budget?"))
elif text == "Weekly Allowance Spend":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Regular Spend":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! What did you spend money on?"))
api_response = api_instance.post_message(APP_ID, user_id,
postCarousel(['Home', 'Living', 'Travel', 'Family', 'Leisure', 'Future', 'Giving', 'Repayments']))
elif text == "Home":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Living":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Travel":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Family":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Leisure":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Future":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Giving":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Repayments":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Yes please":
api_response = api_instance.post_message(APP_ID, user_id,
postCarousel(['Budgeting', 'Spending', 'Talk']))
elif text == "I'm ok":
api_response = api_instance.post_message(APP_ID, user_id,
postText(":)"))
### RACHEL ###
elif text == "Rachel":
api_response = api_instance.post_message(APP_ID, user_id,
postFile("http://rachelschallenge.org/media/media_press_kit/Code_of_ethics.pdf"))
### EMOJI ###
elif text == ":D":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Have you thought about helping other people cope better? https://capuk.org"))
elif text == ":)":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Have you thought about helping other people cope better? https://capuk.org"))
elif text == ":(":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithReplies("You're not alone! I'm your budget buddy. I can help you cope better.", ['Yes please', 'I\'m ok']))
elif text == ":@":
api_response = api_instance.post_message(APP_ID, user_id,
postText("You're not alone!"))
elif text == "":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Speachless"))
else:
api_response = api_instance.post_message(APP_ID, user_id,
postText("I haven't learned that one yet"))
# Request handling logic
def | parse_request_data | identifier_name |
|
app.py | 6',
headers={
'kid': KEY_ID
})
return token_bytes.decode('utf-8')
# Configure API key authorization: jwt
smooch.configuration.api_key['Authorization'] = generate_jwt_token()
smooch.configuration.api_key_prefix['Authorization'] = 'Bearer'
# create an instance of the API class
api_instance = smooch.ConversationApi()
app_create_body = smooch.AppCreate() # AppCreate | Body for a createApp request.
### Health Checks
# IMAGES
def image(text):
if text == "Bible":
return "http://static7.bigstockphoto.com/thumbs/8/1/3/small2/318707.jpg"
elif text == "Easter":
return "http://www.beliefnet.com/columnists//deaconsbench/files/import/assets_c/2010/04/jesus-cross-thumb-400x528-12594.jpg"
elif text == "Budgeting":
return "https://www.aiadallas.org/media/uploads/event-images/budget_thumbnail.png"
elif text == "Spending":
return "http://thumbnails.billiondigital.com/297/151/1151297/1151253_small_checkboard.jpg"
elif text == "Talk":
return "https://rfclipart.com/image/thumbnail/22-f6-00/small-coffee-cup-Download-Free-Vector-File-EPS-677.jpg"
else:
return ""
# PING
@app.route('/')
def ping():
# Pull all database data and log on screen
all_messages = json.dumps([eval(redis.get(key).decode('utf8')) for key in redis.scan_iter("messages-*")])
return Response(all_messages, status=200)
def postText(text):
message = smooch.MessagePost(role='appMaker', type='text')
message.text = text
return message
def postTextWithReplies(text, replies):
message = smooch.MessagePost(role='appMaker', type='text')
message.text = text
actions = []
for reply in replies:
actions.append(smooch.Action(type='reply', text=reply, payload=reply))
message.actions = actions
return message
def postTextWithListReplies(text, replies):
message = smooch.MessagePost(role='appMaker', type='text')
message.text = text
actions = []
for reply in replies:
actions.append(smooch.Action(type='postback', text=reply, payload=reply))
message.actions = actions
return message
def postImage(uri):
message = smooch.MessagePost(role='appMaker', type='image')
message.media_url = uri
return message
def postFile(uri):
message = smooch.MessagePost(role='appMaker', type='file')
message.media_url = uri
return message
def postCarousel(list):
message = smooch.MessagePost(role='appMaker', type='carousel')
items = []
for item in list:
actions = []
actions.append(smooch.Action(type='postback', text=item, payload=item))
part = smooch.MessageItem(title=item, actions=actions)
part.media_url = image(item)
part.size = 'compact'
items.append(part)
message.items = items
return message
def handle_message(user_id, text):
if text == "Help":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Just say Hi, we can talk about Jesus or Money."))
elif text == "Talk":
api_response = api_instance.post_message(APP_ID, user_id,
postText("https://capuk.org/connect/contact-us"))
elif text == "Hello" or text == "Hey" or text == "Hi":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithReplies("What do you want to chat about?", ['Jesus', 'Money', 'Rachel']))
elif text == "hello" or text == "hey" or text == "hi":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithReplies("How do you feel about money?", [':D',':)',':(',':@']))
### JESUS ###
elif text == "Jesus":
api_response = api_instance.post_message(APP_ID, user_id,
postCarousel(['Bible', 'Easter', 'Talk']))
elif text == "Bible":
api_response = api_instance.post_message(APP_ID, user_id,
postText("https://www.desiringgod.org/articles/how-to-read-the-bible-for-yourself"))
elif text == "Easter":
api_response = api_instance.post_message(APP_ID, user_id,
postText("http://www.st-helens.org.uk/internationals/who-is-jesus"))
### MONEY ###
elif text == "Money":
api_response = api_instance.post_message(APP_ID, user_id,
postCarousel(['Budgeting', 'Spending', 'Talk']))
elif text == "Budgeting":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithListReplies("Are you happy to tell me about your budget?", ['Regular Budget', 'Weekly Allowance Budget']))
elif text == "Spending":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithListReplies("Are you happy to tell me about your spend?", ['Regular Spend', 'Weekly Allowance Spend']))
### BUDGET ###
elif text == "Regular Budget":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it!"))
api_response = api_instance.post_message(APP_ID, user_id,
postText("Ok, how much is your rent or mortgage?"))
elif text == "Weekly Allowance Budget":
|
elif text == "Weekly Allowance Spend":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Regular Spend":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! What did you spend money on?"))
api_response = api_instance.post_message(APP_ID, user_id,
postCarousel(['Home', 'Living', 'Travel', 'Family', 'Leisure', 'Future', 'Giving', 'Repayments']))
elif text == "Home":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Living":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Travel":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Family":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Leisure":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Future":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Giving":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Repayments":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Yes please":
api_response = api_instance.post_message(APP_ID, user_id,
postCarousel(['Budgeting', 'Spending', 'Talk']))
elif text == "I'm ok":
api_response = api_instance.post_message(APP_ID, user_id,
postText(":)"))
### RACHEL ###
elif text == "Rachel":
api_response = api_instance.post_message(APP_ID, user_id,
postFile("http://rachelschallenge.org/media/media_press_kit/Code_of_ethics.pdf"))
### EMOJI ###
elif text == ":D":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Have you thought about helping other people cope better? https://capuk.org"))
elif text == ":)":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Have you thought about helping other people cope better? https://capuk.org"))
elif text == ":(":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithReplies("You're not alone! I'm your budget buddy. I can help you cope better.", ['Yes please', 'I\'m ok']))
elif text == ":@":
api_response = api_instance.post_message(APP_ID, user_id,
postText("You're not alone!"))
elif text == "":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Speachless"))
else:
| api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it!"))
api_response = api_instance.post_message(APP_ID, user_id,
postText("Ok, how much is your weekly allowance budget?")) | conditional_block |
app.py | 6',
headers={
'kid': KEY_ID
})
return token_bytes.decode('utf-8')
# Configure API key authorization: jwt
smooch.configuration.api_key['Authorization'] = generate_jwt_token()
smooch.configuration.api_key_prefix['Authorization'] = 'Bearer'
# create an instance of the API class
api_instance = smooch.ConversationApi()
app_create_body = smooch.AppCreate() # AppCreate | Body for a createApp request.
### Health Checks
# IMAGES
def image(text):
if text == "Bible":
return "http://static7.bigstockphoto.com/thumbs/8/1/3/small2/318707.jpg"
elif text == "Easter":
return "http://www.beliefnet.com/columnists//deaconsbench/files/import/assets_c/2010/04/jesus-cross-thumb-400x528-12594.jpg"
elif text == "Budgeting":
return "https://www.aiadallas.org/media/uploads/event-images/budget_thumbnail.png"
elif text == "Spending":
return "http://thumbnails.billiondigital.com/297/151/1151297/1151253_small_checkboard.jpg"
elif text == "Talk":
return "https://rfclipart.com/image/thumbnail/22-f6-00/small-coffee-cup-Download-Free-Vector-File-EPS-677.jpg"
else:
return ""
# PING
@app.route('/')
def ping():
# Pull all database data and log on screen
all_messages = json.dumps([eval(redis.get(key).decode('utf8')) for key in redis.scan_iter("messages-*")])
return Response(all_messages, status=200)
def postText(text):
message = smooch.MessagePost(role='appMaker', type='text')
message.text = text
return message
def postTextWithReplies(text, replies):
message = smooch.MessagePost(role='appMaker', type='text')
message.text = text
actions = []
for reply in replies:
actions.append(smooch.Action(type='reply', text=reply, payload=reply))
message.actions = actions
return message
def postTextWithListReplies(text, replies):
message = smooch.MessagePost(role='appMaker', type='text')
message.text = text
actions = []
for reply in replies:
actions.append(smooch.Action(type='postback', text=reply, payload=reply))
message.actions = actions
return message
def postImage(uri):
message = smooch.MessagePost(role='appMaker', type='image')
message.media_url = uri
return message
def postFile(uri):
|
def postCarousel(list):
message = smooch.MessagePost(role='appMaker', type='carousel')
items = []
for item in list:
actions = []
actions.append(smooch.Action(type='postback', text=item, payload=item))
part = smooch.MessageItem(title=item, actions=actions)
part.media_url = image(item)
part.size = 'compact'
items.append(part)
message.items = items
return message
def handle_message(user_id, text):
if text == "Help":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Just say Hi, we can talk about Jesus or Money."))
elif text == "Talk":
api_response = api_instance.post_message(APP_ID, user_id,
postText("https://capuk.org/connect/contact-us"))
elif text == "Hello" or text == "Hey" or text == "Hi":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithReplies("What do you want to chat about?", ['Jesus', 'Money', 'Rachel']))
elif text == "hello" or text == "hey" or text == "hi":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithReplies("How do you feel about money?", [':D',':)',':(',':@']))
### JESUS ###
elif text == "Jesus":
api_response = api_instance.post_message(APP_ID, user_id,
postCarousel(['Bible', 'Easter', 'Talk']))
elif text == "Bible":
api_response = api_instance.post_message(APP_ID, user_id,
postText("https://www.desiringgod.org/articles/how-to-read-the-bible-for-yourself"))
elif text == "Easter":
api_response = api_instance.post_message(APP_ID, user_id,
postText("http://www.st-helens.org.uk/internationals/who-is-jesus"))
### MONEY ###
elif text == "Money":
api_response = api_instance.post_message(APP_ID, user_id,
postCarousel(['Budgeting', 'Spending', 'Talk']))
elif text == "Budgeting":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithListReplies("Are you happy to tell me about your budget?", ['Regular Budget', 'Weekly Allowance Budget']))
elif text == "Spending":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithListReplies("Are you happy to tell me about your spend?", ['Regular Spend', 'Weekly Allowance Spend']))
### BUDGET ###
elif text == "Regular Budget":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it!"))
api_response = api_instance.post_message(APP_ID, user_id,
postText("Ok, how much is your rent or mortgage?"))
elif text == "Weekly Allowance Budget":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it!"))
api_response = api_instance.post_message(APP_ID, user_id,
postText("Ok, how much is your weekly allowance budget?"))
elif text == "Weekly Allowance Spend":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Regular Spend":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! What did you spend money on?"))
api_response = api_instance.post_message(APP_ID, user_id,
postCarousel(['Home', 'Living', 'Travel', 'Family', 'Leisure', 'Future', 'Giving', 'Repayments']))
elif text == "Home":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Living":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Travel":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Family":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Leisure":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Future":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Giving":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Repayments":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Got it! How much did you spend?"))
elif text == "Yes please":
api_response = api_instance.post_message(APP_ID, user_id,
postCarousel(['Budgeting', 'Spending', 'Talk']))
elif text == "I'm ok":
api_response = api_instance.post_message(APP_ID, user_id,
postText(":)"))
### RACHEL ###
elif text == "Rachel":
api_response = api_instance.post_message(APP_ID, user_id,
postFile("http://rachelschallenge.org/media/media_press_kit/Code_of_ethics.pdf"))
### EMOJI ###
elif text == ":D":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Have you thought about helping other people cope better? https://capuk.org"))
elif text == ":)":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Have you thought about helping other people cope better? https://capuk.org"))
elif text == ":(":
api_response = api_instance.post_message(APP_ID, user_id,
postTextWithReplies("You're not alone! I'm your budget buddy. I can help you cope better.", ['Yes please', 'I\'m ok']))
elif text == ":@":
api_response = api_instance.post_message(APP_ID, user_id,
postText("You're not alone!"))
elif text == "":
api_response = api_instance.post_message(APP_ID, user_id,
postText("Speachless"))
else:
api | message = smooch.MessagePost(role='appMaker', type='file')
message.media_url = uri
return message | identifier_body |
google_spreadsheets.rs | #[derive(Deserialize, Debug)]
struct SpreadsheetValues {
range: String,
#[serde(rename = "majorDimension")]
major_dimension: String,
values: Vec<Vec<String>>,
}
// TODO: should we support optional column?
fn infer_value_type(v: &str) -> DataType {
// match order matters
match v {
// TODO: support Date64 and Time64
_ if v.parse::<i64>().is_ok() => DataType::Int64,
_ if v.parse::<f64>().is_ok() => DataType::Float64,
_ => match v.to_lowercase().as_str() {
"false" | "true" => DataType::Boolean,
_ => DataType::Utf8,
},
}
}
// util wrapper for calling google spreadsheet API
async fn gs_api_get(token: &str, url: &str) -> Result<reqwest::Response, ColumnQError> {
Client::builder()
.build()
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Failed to initialize HTTP client: {}",
e.to_string()
))
})?
.get(url)
.bearer_auth(token)
.send()
.await
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Failed to send API request: {}",
e.to_string()
))
})
}
fn coerce_type(l: DataType, r: DataType) -> DataType {
match (l, r) {
(DataType::Boolean, DataType::Boolean) => DataType::Boolean,
(DataType::Date32, DataType::Date32) => DataType::Date32,
(DataType::Date64, DataType::Date64)
| (DataType::Date64, DataType::Date32)
| (DataType::Date32, DataType::Date64) => DataType::Date64,
(DataType::Int64, DataType::Int64) => DataType::Int64,
(DataType::Float64, DataType::Float64)
| (DataType::Float64, DataType::Int64)
| (DataType::Int64, DataType::Float64) => DataType::Float64,
_ => DataType::Utf8,
}
}
fn infer_schema(rows: &[Vec<String>]) -> Schema {
let mut col_types: HashMap<&str, HashSet<DataType>> = HashMap::new();
let col_names = &rows[0];
rows.iter().skip(1).for_each(|row| {
row.iter().enumerate().for_each(|(i, col_val)| {
let col_name = &col_names[i];
let col_type = infer_value_type(col_val);
let entry = col_types.entry(col_name).or_insert_with(HashSet::new);
entry.insert(col_type);
});
});
let fields: Vec<Field> = col_names
.iter()
.map(|col_name| {
let set = col_types.entry(col_name).or_insert_with(|| {
// TODO: this should never happen, maybe we should use panic instead?
let mut set = HashSet::new();
set.insert(DataType::Utf8);
set
});
let mut dt_iter = set.iter().cloned();
let dt_init = dt_iter.next().unwrap_or(DataType::Utf8);
let dt = dt_iter.fold(dt_init, coerce_type);
// normalize column name by replacing space with under score
Field::new(&col_name.replace(" ", "_"), dt, false)
})
.collect();
Schema::new(fields)
}
fn parse_boolean(s: &str) -> bool {
s.eq_ignore_ascii_case("true")
}
fn sheet_values_to_record_batch(values: &[Vec<String>]) -> Result<RecordBatch, ColumnQError> {
let schema = infer_schema(values);
let arrays = schema
.fields()
.iter()
.enumerate()
.map(|(i, field)| {
// skip header row
let rows_iter = values.iter().skip(1);
Ok(match field.data_type() {
DataType::Boolean => Arc::new(
rows_iter
.map(|row| Some(parse_boolean(&row[i])))
.collect::<BooleanArray>(),
) as ArrayRef,
DataType::Int64 => Arc::new(
rows_iter
.map(|row| {
Ok(Some(row[i].parse::<i64>().map_err(|_| {
ColumnQError::GoogleSpreadsheets(format!(
"Expect int64 value, got: {}",
row[i]
))
})?))
})
.collect::<Result<PrimitiveArray<Int64Type>, ColumnQError>>()?,
) as ArrayRef,
DataType::Float64 => Arc::new(
rows_iter
.map(|row| {
Ok(Some(row[i].parse::<f64>().map_err(|_| {
ColumnQError::GoogleSpreadsheets(format!(
"Expect float64 value, got: {}",
row[i]
))
})?))
})
.collect::<Result<PrimitiveArray<Float64Type>, ColumnQError>>()?,
) as ArrayRef,
_ => Arc::new(rows_iter.map(|row| Some(&row[i])).collect::<StringArray>())
as ArrayRef,
})
})
.collect::<Result<Vec<ArrayRef>, ColumnQError>>()?;
Ok(RecordBatch::try_new(Arc::new(schema), arrays)?)
}
async fn fetch_auth_token(
opt: &TableOptionGoogleSpreasheet,
) -> Result<yup_oauth2::AccessToken, ColumnQError> {
// Read application creds from a file.The clientsecret file contains JSON like
// `{"installed":{"client_id": ... }}`
let creds = yup_oauth2::read_service_account_key(&opt.application_secret_path)
.await
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Error reading application secret from disk: {}",
e.to_string()
))
})?;
let sa = yup_oauth2::ServiceAccountAuthenticator::builder(creds)
.build()
.await
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Error building service account authenticator: {}",
e.to_string()
))
})?;
let scopes = &["https://www.googleapis.com/auth/spreadsheets.readonly"];
sa.token(scopes).await.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!("Failed to obtain OAuth2 token: {}", e.to_string()))
})
}
async fn resolve_sheet_title<'a, 'b, 'c, 'd>(
token: &'a str,
spreadsheet_id: &'b str,
uri: &'c URIReference<'d>,
) -> Result<String, ColumnQError> {
// look up sheet title by sheet id through API
let resp = gs_api_get(
token,
&format!(
"https://sheets.googleapis.com/v4/spreadsheets/{}",
spreadsheet_id
),
)
.await?
.error_for_status()
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Failed to resolve sheet title from API: {}",
e.to_string()
))
})?;
let spreadsheets = resp.json::<Spreadsheets>().await.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!("Failed to parse API response: {}", e.to_string()))
})?;
// when sheet id is not specified from config, try to parse it from URI
let sheet_id: Option<usize> = match uri.fragment() {
// if sheeit id is specified within the URI in the format of #gid=x
Some(fragment) => {
let s = fragment.as_str();
let parts: Vec<&str> = s.split('=').collect();
match parts.len() {
2 => match parts[0] {
"gid" => parts[1].parse().ok(),
_ => None,
},
_ => None,
}
}
None => None,
};
let sheet = match sheet_id {
Some(id) => spreadsheets
.sheets
.iter()
.find(|s| s.properties.sheet_id == id)
.ok_or_else(|| ColumnQError::GoogleSpreadsheets(format!("Invalid sheet id {}", id)))?,
// no sheet id specified, default to the first sheet
None => spreadsheets
.sheets
.iter()
.find(|s| s.properties.index == 0)
.ok_or_else(|| ColumnQError::GoogleSpreadsheets("spreadsheets is empty".to_string()))?,
};
Ok(sheet.properties.title.clone())
}
pub async fn to_mem_table(
t: &TableSource,
) -> Result<datafusion::datasource::MemTable, ColumnQError> {
lazy_static! {
static ref RE_GOOGLE_SHEET: Regex =
Regex::new(r"https://docs.google.com/spreadsheets/d/(.+)").unwrap();
}
let uri_str = t.get_uri_str();
if RE_GOOGLE_SHEET.captures(uri_str).is_none() |
let uri = URIReference::try_from(uri_str)?;
let spreadsheet_id = uri.path().segments()[2].as_str();
let opt = t
.option
.as_ref()
. | {
return Err(ColumnQError::InvalidUri(uri_str.to_string()));
} | conditional_block |
google_spreadsheets.rs | )]
#[derive(Deserialize, Debug)]
struct SpreadsheetValues {
range: String,
#[serde(rename = "majorDimension")]
major_dimension: String,
values: Vec<Vec<String>>,
}
// TODO: should we support optional column?
fn infer_value_type(v: &str) -> DataType {
// match order matters
match v {
// TODO: support Date64 and Time64
_ if v.parse::<i64>().is_ok() => DataType::Int64,
_ if v.parse::<f64>().is_ok() => DataType::Float64,
_ => match v.to_lowercase().as_str() {
"false" | "true" => DataType::Boolean,
_ => DataType::Utf8,
},
}
}
// util wrapper for calling google spreadsheet API
async fn gs_api_get(token: &str, url: &str) -> Result<reqwest::Response, ColumnQError> {
Client::builder()
.build()
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Failed to initialize HTTP client: {}",
e.to_string()
))
})?
.get(url)
.bearer_auth(token)
.send()
.await
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Failed to send API request: {}",
e.to_string()
))
})
}
fn coerce_type(l: DataType, r: DataType) -> DataType {
match (l, r) {
(DataType::Boolean, DataType::Boolean) => DataType::Boolean,
(DataType::Date32, DataType::Date32) => DataType::Date32,
(DataType::Date64, DataType::Date64)
| (DataType::Date64, DataType::Date32)
| (DataType::Date32, DataType::Date64) => DataType::Date64,
(DataType::Int64, DataType::Int64) => DataType::Int64,
(DataType::Float64, DataType::Float64)
| (DataType::Float64, DataType::Int64)
| (DataType::Int64, DataType::Float64) => DataType::Float64,
_ => DataType::Utf8,
}
}
fn infer_schema(rows: &[Vec<String>]) -> Schema {
let mut col_types: HashMap<&str, HashSet<DataType>> = HashMap::new();
let col_names = &rows[0];
rows.iter().skip(1).for_each(|row| {
row.iter().enumerate().for_each(|(i, col_val)| {
let col_name = &col_names[i];
let col_type = infer_value_type(col_val);
let entry = col_types.entry(col_name).or_insert_with(HashSet::new);
entry.insert(col_type);
});
}); | let fields: Vec<Field> = col_names
.iter()
.map(|col_name| {
let set = col_types.entry(col_name).or_insert_with(|| {
// TODO: this should never happen, maybe we should use panic instead?
let mut set = HashSet::new();
set.insert(DataType::Utf8);
set
});
let mut dt_iter = set.iter().cloned();
let dt_init = dt_iter.next().unwrap_or(DataType::Utf8);
let dt = dt_iter.fold(dt_init, coerce_type);
// normalize column name by replacing space with under score
Field::new(&col_name.replace(" ", "_"), dt, false)
})
.collect();
Schema::new(fields)
}
fn parse_boolean(s: &str) -> bool {
s.eq_ignore_ascii_case("true")
}
fn sheet_values_to_record_batch(values: &[Vec<String>]) -> Result<RecordBatch, ColumnQError> {
let schema = infer_schema(values);
let arrays = schema
.fields()
.iter()
.enumerate()
.map(|(i, field)| {
// skip header row
let rows_iter = values.iter().skip(1);
Ok(match field.data_type() {
DataType::Boolean => Arc::new(
rows_iter
.map(|row| Some(parse_boolean(&row[i])))
.collect::<BooleanArray>(),
) as ArrayRef,
DataType::Int64 => Arc::new(
rows_iter
.map(|row| {
Ok(Some(row[i].parse::<i64>().map_err(|_| {
ColumnQError::GoogleSpreadsheets(format!(
"Expect int64 value, got: {}",
row[i]
))
})?))
})
.collect::<Result<PrimitiveArray<Int64Type>, ColumnQError>>()?,
) as ArrayRef,
DataType::Float64 => Arc::new(
rows_iter
.map(|row| {
Ok(Some(row[i].parse::<f64>().map_err(|_| {
ColumnQError::GoogleSpreadsheets(format!(
"Expect float64 value, got: {}",
row[i]
))
})?))
})
.collect::<Result<PrimitiveArray<Float64Type>, ColumnQError>>()?,
) as ArrayRef,
_ => Arc::new(rows_iter.map(|row| Some(&row[i])).collect::<StringArray>())
as ArrayRef,
})
})
.collect::<Result<Vec<ArrayRef>, ColumnQError>>()?;
Ok(RecordBatch::try_new(Arc::new(schema), arrays)?)
}
async fn fetch_auth_token(
opt: &TableOptionGoogleSpreasheet,
) -> Result<yup_oauth2::AccessToken, ColumnQError> {
// Read application creds from a file.The clientsecret file contains JSON like
// `{"installed":{"client_id": ... }}`
let creds = yup_oauth2::read_service_account_key(&opt.application_secret_path)
.await
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Error reading application secret from disk: {}",
e.to_string()
))
})?;
let sa = yup_oauth2::ServiceAccountAuthenticator::builder(creds)
.build()
.await
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Error building service account authenticator: {}",
e.to_string()
))
})?;
let scopes = &["https://www.googleapis.com/auth/spreadsheets.readonly"];
sa.token(scopes).await.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!("Failed to obtain OAuth2 token: {}", e.to_string()))
})
}
async fn resolve_sheet_title<'a, 'b, 'c, 'd>(
token: &'a str,
spreadsheet_id: &'b str,
uri: &'c URIReference<'d>,
) -> Result<String, ColumnQError> {
// look up sheet title by sheet id through API
let resp = gs_api_get(
token,
&format!(
"https://sheets.googleapis.com/v4/spreadsheets/{}",
spreadsheet_id
),
)
.await?
.error_for_status()
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Failed to resolve sheet title from API: {}",
e.to_string()
))
})?;
let spreadsheets = resp.json::<Spreadsheets>().await.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!("Failed to parse API response: {}", e.to_string()))
})?;
// when sheet id is not specified from config, try to parse it from URI
let sheet_id: Option<usize> = match uri.fragment() {
// if sheeit id is specified within the URI in the format of #gid=x
Some(fragment) => {
let s = fragment.as_str();
let parts: Vec<&str> = s.split('=').collect();
match parts.len() {
2 => match parts[0] {
"gid" => parts[1].parse().ok(),
_ => None,
},
_ => None,
}
}
None => None,
};
let sheet = match sheet_id {
Some(id) => spreadsheets
.sheets
.iter()
.find(|s| s.properties.sheet_id == id)
.ok_or_else(|| ColumnQError::GoogleSpreadsheets(format!("Invalid sheet id {}", id)))?,
// no sheet id specified, default to the first sheet
None => spreadsheets
.sheets
.iter()
.find(|s| s.properties.index == 0)
.ok_or_else(|| ColumnQError::GoogleSpreadsheets("spreadsheets is empty".to_string()))?,
};
Ok(sheet.properties.title.clone())
}
pub async fn to_mem_table(
t: &TableSource,
) -> Result<datafusion::datasource::MemTable, ColumnQError> {
lazy_static! {
static ref RE_GOOGLE_SHEET: Regex =
Regex::new(r"https://docs.google.com/spreadsheets/d/(.+)").unwrap();
}
let uri_str = t.get_uri_str();
if RE_GOOGLE_SHEET.captures(uri_str).is_none() {
return Err(ColumnQError::InvalidUri(uri_str.to_string()));
}
let uri = URIReference::try_from(uri_str)?;
let spreadsheet_id = uri.path().segments()[2].as_str();
let opt = t
.option
.as_ref()
.ok | random_line_split |
|
google_spreadsheets.rs | #[derive(Deserialize, Debug)]
struct SpreadsheetValues {
range: String,
#[serde(rename = "majorDimension")]
major_dimension: String,
values: Vec<Vec<String>>,
}
// TODO: should we support optional column?
fn infer_value_type(v: &str) -> DataType {
// match order matters
match v {
// TODO: support Date64 and Time64
_ if v.parse::<i64>().is_ok() => DataType::Int64,
_ if v.parse::<f64>().is_ok() => DataType::Float64,
_ => match v.to_lowercase().as_str() {
"false" | "true" => DataType::Boolean,
_ => DataType::Utf8,
},
}
}
// util wrapper for calling google spreadsheet API
async fn gs_api_get(token: &str, url: &str) -> Result<reqwest::Response, ColumnQError> |
fn coerce_type(l: DataType, r: DataType) -> DataType {
match (l, r) {
(DataType::Boolean, DataType::Boolean) => DataType::Boolean,
(DataType::Date32, DataType::Date32) => DataType::Date32,
(DataType::Date64, DataType::Date64)
| (DataType::Date64, DataType::Date32)
| (DataType::Date32, DataType::Date64) => DataType::Date64,
(DataType::Int64, DataType::Int64) => DataType::Int64,
(DataType::Float64, DataType::Float64)
| (DataType::Float64, DataType::Int64)
| (DataType::Int64, DataType::Float64) => DataType::Float64,
_ => DataType::Utf8,
}
}
fn infer_schema(rows: &[Vec<String>]) -> Schema {
let mut col_types: HashMap<&str, HashSet<DataType>> = HashMap::new();
let col_names = &rows[0];
rows.iter().skip(1).for_each(|row| {
row.iter().enumerate().for_each(|(i, col_val)| {
let col_name = &col_names[i];
let col_type = infer_value_type(col_val);
let entry = col_types.entry(col_name).or_insert_with(HashSet::new);
entry.insert(col_type);
});
});
let fields: Vec<Field> = col_names
.iter()
.map(|col_name| {
let set = col_types.entry(col_name).or_insert_with(|| {
// TODO: this should never happen, maybe we should use panic instead?
let mut set = HashSet::new();
set.insert(DataType::Utf8);
set
});
let mut dt_iter = set.iter().cloned();
let dt_init = dt_iter.next().unwrap_or(DataType::Utf8);
let dt = dt_iter.fold(dt_init, coerce_type);
// normalize column name by replacing space with under score
Field::new(&col_name.replace(" ", "_"), dt, false)
})
.collect();
Schema::new(fields)
}
fn parse_boolean(s: &str) -> bool {
s.eq_ignore_ascii_case("true")
}
fn sheet_values_to_record_batch(values: &[Vec<String>]) -> Result<RecordBatch, ColumnQError> {
let schema = infer_schema(values);
let arrays = schema
.fields()
.iter()
.enumerate()
.map(|(i, field)| {
// skip header row
let rows_iter = values.iter().skip(1);
Ok(match field.data_type() {
DataType::Boolean => Arc::new(
rows_iter
.map(|row| Some(parse_boolean(&row[i])))
.collect::<BooleanArray>(),
) as ArrayRef,
DataType::Int64 => Arc::new(
rows_iter
.map(|row| {
Ok(Some(row[i].parse::<i64>().map_err(|_| {
ColumnQError::GoogleSpreadsheets(format!(
"Expect int64 value, got: {}",
row[i]
))
})?))
})
.collect::<Result<PrimitiveArray<Int64Type>, ColumnQError>>()?,
) as ArrayRef,
DataType::Float64 => Arc::new(
rows_iter
.map(|row| {
Ok(Some(row[i].parse::<f64>().map_err(|_| {
ColumnQError::GoogleSpreadsheets(format!(
"Expect float64 value, got: {}",
row[i]
))
})?))
})
.collect::<Result<PrimitiveArray<Float64Type>, ColumnQError>>()?,
) as ArrayRef,
_ => Arc::new(rows_iter.map(|row| Some(&row[i])).collect::<StringArray>())
as ArrayRef,
})
})
.collect::<Result<Vec<ArrayRef>, ColumnQError>>()?;
Ok(RecordBatch::try_new(Arc::new(schema), arrays)?)
}
async fn fetch_auth_token(
opt: &TableOptionGoogleSpreasheet,
) -> Result<yup_oauth2::AccessToken, ColumnQError> {
// Read application creds from a file.The clientsecret file contains JSON like
// `{"installed":{"client_id": ... }}`
let creds = yup_oauth2::read_service_account_key(&opt.application_secret_path)
.await
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Error reading application secret from disk: {}",
e.to_string()
))
})?;
let sa = yup_oauth2::ServiceAccountAuthenticator::builder(creds)
.build()
.await
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Error building service account authenticator: {}",
e.to_string()
))
})?;
let scopes = &["https://www.googleapis.com/auth/spreadsheets.readonly"];
sa.token(scopes).await.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!("Failed to obtain OAuth2 token: {}", e.to_string()))
})
}
async fn resolve_sheet_title<'a, 'b, 'c, 'd>(
token: &'a str,
spreadsheet_id: &'b str,
uri: &'c URIReference<'d>,
) -> Result<String, ColumnQError> {
// look up sheet title by sheet id through API
let resp = gs_api_get(
token,
&format!(
"https://sheets.googleapis.com/v4/spreadsheets/{}",
spreadsheet_id
),
)
.await?
.error_for_status()
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Failed to resolve sheet title from API: {}",
e.to_string()
))
})?;
let spreadsheets = resp.json::<Spreadsheets>().await.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!("Failed to parse API response: {}", e.to_string()))
})?;
// when sheet id is not specified from config, try to parse it from URI
let sheet_id: Option<usize> = match uri.fragment() {
// if sheeit id is specified within the URI in the format of #gid=x
Some(fragment) => {
let s = fragment.as_str();
let parts: Vec<&str> = s.split('=').collect();
match parts.len() {
2 => match parts[0] {
"gid" => parts[1].parse().ok(),
_ => None,
},
_ => None,
}
}
None => None,
};
let sheet = match sheet_id {
Some(id) => spreadsheets
.sheets
.iter()
.find(|s| s.properties.sheet_id == id)
.ok_or_else(|| ColumnQError::GoogleSpreadsheets(format!("Invalid sheet id {}", id)))?,
// no sheet id specified, default to the first sheet
None => spreadsheets
.sheets
.iter()
.find(|s| s.properties.index == 0)
.ok_or_else(|| ColumnQError::GoogleSpreadsheets("spreadsheets is empty".to_string()))?,
};
Ok(sheet.properties.title.clone())
}
pub async fn to_mem_table(
t: &TableSource,
) -> Result<datafusion::datasource::MemTable, ColumnQError> {
lazy_static! {
static ref RE_GOOGLE_SHEET: Regex =
Regex::new(r"https://docs.google.com/spreadsheets/d/(.+)").unwrap();
}
let uri_str = t.get_uri_str();
if RE_GOOGLE_SHEET.captures(uri_str).is_none() {
return Err(ColumnQError::InvalidUri(uri_str.to_string()));
}
let uri = URIReference::try_from(uri_str)?;
let spreadsheet_id = uri.path().segments()[2].as_str();
let opt = t
.option
.as_ref()
. | {
Client::builder()
.build()
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Failed to initialize HTTP client: {}",
e.to_string()
))
})?
.get(url)
.bearer_auth(token)
.send()
.await
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Failed to send API request: {}",
e.to_string()
))
})
} | identifier_body |
google_spreadsheets.rs | )]
#[derive(Deserialize, Debug)]
struct SpreadsheetValues {
range: String,
#[serde(rename = "majorDimension")]
major_dimension: String,
values: Vec<Vec<String>>,
}
// TODO: should we support optional column?
fn infer_value_type(v: &str) -> DataType {
// match order matters
match v {
// TODO: support Date64 and Time64
_ if v.parse::<i64>().is_ok() => DataType::Int64,
_ if v.parse::<f64>().is_ok() => DataType::Float64,
_ => match v.to_lowercase().as_str() {
"false" | "true" => DataType::Boolean,
_ => DataType::Utf8,
},
}
}
// util wrapper for calling google spreadsheet API
async fn gs_api_get(token: &str, url: &str) -> Result<reqwest::Response, ColumnQError> {
Client::builder()
.build()
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Failed to initialize HTTP client: {}",
e.to_string()
))
})?
.get(url)
.bearer_auth(token)
.send()
.await
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Failed to send API request: {}",
e.to_string()
))
})
}
fn coerce_type(l: DataType, r: DataType) -> DataType {
match (l, r) {
(DataType::Boolean, DataType::Boolean) => DataType::Boolean,
(DataType::Date32, DataType::Date32) => DataType::Date32,
(DataType::Date64, DataType::Date64)
| (DataType::Date64, DataType::Date32)
| (DataType::Date32, DataType::Date64) => DataType::Date64,
(DataType::Int64, DataType::Int64) => DataType::Int64,
(DataType::Float64, DataType::Float64)
| (DataType::Float64, DataType::Int64)
| (DataType::Int64, DataType::Float64) => DataType::Float64,
_ => DataType::Utf8,
}
}
fn infer_schema(rows: &[Vec<String>]) -> Schema {
let mut col_types: HashMap<&str, HashSet<DataType>> = HashMap::new();
let col_names = &rows[0];
rows.iter().skip(1).for_each(|row| {
row.iter().enumerate().for_each(|(i, col_val)| {
let col_name = &col_names[i];
let col_type = infer_value_type(col_val);
let entry = col_types.entry(col_name).or_insert_with(HashSet::new);
entry.insert(col_type);
});
});
let fields: Vec<Field> = col_names
.iter()
.map(|col_name| {
let set = col_types.entry(col_name).or_insert_with(|| {
// TODO: this should never happen, maybe we should use panic instead?
let mut set = HashSet::new();
set.insert(DataType::Utf8);
set
});
let mut dt_iter = set.iter().cloned();
let dt_init = dt_iter.next().unwrap_or(DataType::Utf8);
let dt = dt_iter.fold(dt_init, coerce_type);
// normalize column name by replacing space with under score
Field::new(&col_name.replace(" ", "_"), dt, false)
})
.collect();
Schema::new(fields)
}
fn | (s: &str) -> bool {
s.eq_ignore_ascii_case("true")
}
fn sheet_values_to_record_batch(values: &[Vec<String>]) -> Result<RecordBatch, ColumnQError> {
let schema = infer_schema(values);
let arrays = schema
.fields()
.iter()
.enumerate()
.map(|(i, field)| {
// skip header row
let rows_iter = values.iter().skip(1);
Ok(match field.data_type() {
DataType::Boolean => Arc::new(
rows_iter
.map(|row| Some(parse_boolean(&row[i])))
.collect::<BooleanArray>(),
) as ArrayRef,
DataType::Int64 => Arc::new(
rows_iter
.map(|row| {
Ok(Some(row[i].parse::<i64>().map_err(|_| {
ColumnQError::GoogleSpreadsheets(format!(
"Expect int64 value, got: {}",
row[i]
))
})?))
})
.collect::<Result<PrimitiveArray<Int64Type>, ColumnQError>>()?,
) as ArrayRef,
DataType::Float64 => Arc::new(
rows_iter
.map(|row| {
Ok(Some(row[i].parse::<f64>().map_err(|_| {
ColumnQError::GoogleSpreadsheets(format!(
"Expect float64 value, got: {}",
row[i]
))
})?))
})
.collect::<Result<PrimitiveArray<Float64Type>, ColumnQError>>()?,
) as ArrayRef,
_ => Arc::new(rows_iter.map(|row| Some(&row[i])).collect::<StringArray>())
as ArrayRef,
})
})
.collect::<Result<Vec<ArrayRef>, ColumnQError>>()?;
Ok(RecordBatch::try_new(Arc::new(schema), arrays)?)
}
async fn fetch_auth_token(
opt: &TableOptionGoogleSpreasheet,
) -> Result<yup_oauth2::AccessToken, ColumnQError> {
// Read application creds from a file.The clientsecret file contains JSON like
// `{"installed":{"client_id": ... }}`
let creds = yup_oauth2::read_service_account_key(&opt.application_secret_path)
.await
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Error reading application secret from disk: {}",
e.to_string()
))
})?;
let sa = yup_oauth2::ServiceAccountAuthenticator::builder(creds)
.build()
.await
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Error building service account authenticator: {}",
e.to_string()
))
})?;
let scopes = &["https://www.googleapis.com/auth/spreadsheets.readonly"];
sa.token(scopes).await.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!("Failed to obtain OAuth2 token: {}", e.to_string()))
})
}
async fn resolve_sheet_title<'a, 'b, 'c, 'd>(
token: &'a str,
spreadsheet_id: &'b str,
uri: &'c URIReference<'d>,
) -> Result<String, ColumnQError> {
// look up sheet title by sheet id through API
let resp = gs_api_get(
token,
&format!(
"https://sheets.googleapis.com/v4/spreadsheets/{}",
spreadsheet_id
),
)
.await?
.error_for_status()
.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!(
"Failed to resolve sheet title from API: {}",
e.to_string()
))
})?;
let spreadsheets = resp.json::<Spreadsheets>().await.map_err(|e| {
ColumnQError::GoogleSpreadsheets(format!("Failed to parse API response: {}", e.to_string()))
})?;
// when sheet id is not specified from config, try to parse it from URI
let sheet_id: Option<usize> = match uri.fragment() {
// if sheeit id is specified within the URI in the format of #gid=x
Some(fragment) => {
let s = fragment.as_str();
let parts: Vec<&str> = s.split('=').collect();
match parts.len() {
2 => match parts[0] {
"gid" => parts[1].parse().ok(),
_ => None,
},
_ => None,
}
}
None => None,
};
let sheet = match sheet_id {
Some(id) => spreadsheets
.sheets
.iter()
.find(|s| s.properties.sheet_id == id)
.ok_or_else(|| ColumnQError::GoogleSpreadsheets(format!("Invalid sheet id {}", id)))?,
// no sheet id specified, default to the first sheet
None => spreadsheets
.sheets
.iter()
.find(|s| s.properties.index == 0)
.ok_or_else(|| ColumnQError::GoogleSpreadsheets("spreadsheets is empty".to_string()))?,
};
Ok(sheet.properties.title.clone())
}
pub async fn to_mem_table(
t: &TableSource,
) -> Result<datafusion::datasource::MemTable, ColumnQError> {
lazy_static! {
static ref RE_GOOGLE_SHEET: Regex =
Regex::new(r"https://docs.google.com/spreadsheets/d/(.+)").unwrap();
}
let uri_str = t.get_uri_str();
if RE_GOOGLE_SHEET.captures(uri_str).is_none() {
return Err(ColumnQError::InvalidUri(uri_str.to_string()));
}
let uri = URIReference::try_from(uri_str)?;
let spreadsheet_id = uri.path().segments()[2].as_str();
let opt = t
.option
.as_ref()
. | parse_boolean | identifier_name |
end2end_test.go | : %v", result.Errors)
}
for name, mktransport := range map[string]func() transport.Transport{
"mqtt": func() transport.Transport { return mqtt.New() },
"mqtt-ws": func() transport.Transport { return mqtt.New(mqtt.WithWebSocket(true)) },
// TODO: "amqp": func() transport.Transport { return amqp.New() },
// TODO: "http": func() transport.Transport { return http.New() },
} {
mktransport := mktransport
t.Run(name, func(t *testing.T) {
for auth, suite := range map[string]struct {
init func(transport transport.Transport) (*iotdevice.Client, error)
only string
}{
// TODO: ca authentication
"x509": {
func(transport transport.Transport) (*iotdevice.Client, error) {
return iotdevice.NewFromX509FromFile(
transport,
"golang-iothub-self-signed",
sc.HostName(),
"testdata/device.crt",
"testdata/device.key",
)
},
"DeviceToCloud", // just need to check access
},
"sas": {
func(transport transport.Transport) (*iotdevice.Client, error) {
device, err := sc.GetDevice(context.Background(), "golang-iothub-sas")
if err != nil {
return nil, err
}
dcs, err := sc.DeviceConnectionString(device, false)
if err != nil {
t.Fatal(err)
}
return iotdevice.NewFromConnectionString(transport, dcs)
},
"*",
},
} {
for name, test := range map[string]func(*testing.T, *iotservice.Client, *iotdevice.Client){
"DeviceToCloud": testDeviceToCloud,
"CloudToDevice": testCloudToDevice,
"DirectMethod": testDirectMethod,
"UpdateDeviceTwin": testUpdateTwin,
"SubscribeTwin": testSubscribeTwin,
} {
if suite.only != "*" && suite.only != name {
continue
}
test, suite, mktransport := test, suite, mktransport
t.Run(auth+"/"+name, func(t *testing.T) {
dc, err := suite.init(mktransport())
if err != nil {
t.Fatal(err)
}
defer dc.Close()
if err := dc.Connect(context.Background()); err != nil {
t.Fatal(err)
}
test(t, sc, dc)
})
}
}
})
}
}
func testDeviceToCloud(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
evsc := make(chan *iotservice.Event, 1)
errc := make(chan error, 2)
go func() {
errc <- sc.SubscribeEvents(context.Background(), func(ev *iotservice.Event) error {
if ev.ConnectionDeviceID == dc.DeviceID() {
evsc <- ev
}
return nil
})
}()
payload := []byte("hello")
props := map[string]string{"a": "a", "b": "b"}
done := make(chan struct{})
defer close(done)
// send events until one of them is received
go func() {
for {
if err := dc.SendEvent(context.Background(), payload,
iotdevice.WithSendMessageID(genID()),
iotdevice.WithSendCorrelationID(genID()),
iotdevice.WithSendProperties(props),
iotdevice.WithSendCreationTime(time.Now().Add(-24*time.Hour)),
); err != nil {
errc <- err
break
}
select {
case <-done:
case <-time.After(500 * time.Millisecond):
}
}
}()
select {
case msg := <-evsc:
if msg.MessageID == "" {
t.Error("MessageID is empty")
}
if msg.CorrelationID == "" {
t.Error("CorrelationID is empty")
}
if msg.ConnectionDeviceID != dc.DeviceID() {
t.Errorf("ConnectionDeviceID = %q, want %q", msg.ConnectionDeviceID, dc.DeviceID())
}
if msg.ConnectionDeviceGenerationID == "" {
t.Error("ConnectionDeviceGenerationID is empty")
}
if msg.ConnectionAuthMethod == nil {
t.Error("ConnectionAuthMethod is nil")
}
if msg.MessageSource == "" {
t.Error("MessageSource is empty")
}
if msg.EnqueuedTime.IsZero() {
t.Error("EnqueuedTime is zero")
}
if !bytes.Equal(msg.Payload, payload) {
t.Errorf("Payload = %v, want %v", msg.Payload, payload)
}
if msg.Properties["iothub-creation-time-utc"] == "" {
t.Error("iothub-creation-time-utc missing")
}
testProperties(t, msg.Properties, props)
case err := <-errc:
t.Fatal(err)
case <-time.After(10 * time.Second):
t.Fatal("d2c timed out")
}
}
func testProperties(t *testing.T, got, want map[string]string) {
t.Helper()
for k, v := range want {
x, ok := got[k]
if !ok || x != v {
t.Errorf("Properties = %v, want %v", got, want)
return
}
}
}
func testCloudToDevice(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
fbsc := make(chan *iotservice.Feedback, 1)
errc := make(chan error, 3)
sub, err := dc.SubscribeEvents(context.Background())
if err != nil {
t.Fatal(err)
}
// subscribe to feedback and report first registered message id
go func() {
errc <- sc.SubscribeFeedback(context.Background(), func(fb *iotservice.Feedback) error {
fbsc <- fb
return nil
})
}()
payload := []byte("hello")
props := map[string]string{"a": "a", "b": "b"}
uid := "golang-iothub"
mid := genID()
if err := sc.SendEvent(context.Background(), dc.DeviceID(), payload,
iotservice.WithSendAck(iotservice.AckFull),
iotservice.WithSendProperties(props),
iotservice.WithSendUserID(uid),
iotservice.WithSendMessageID(mid),
iotservice.WithSendCorrelationID(genID()),
iotservice.WithSendExpiryTime(time.Now().Add(5*time.Second)),
); err != nil {
errc <- err
return
}
for {
select {
case msg := <-sub.C():
if msg.MessageID != mid {
continue
}
// validate event feedback
Outer:
for {
select {
case fb := <-fbsc:
if fb.OriginalMessageID != mid {
continue
}
if fb.StatusCode != "Success" {
t.Errorf("feedback status = %q, want %q", fb.StatusCode, "Success")
}
break Outer
case <-time.After(15 * time.Second):
t.Log("feedback timed out, probably due to eventhub batching")
break Outer
}
}
// validate message content
if msg.To == "" {
t.Error("To is empty")
}
if msg.UserID != uid {
t.Errorf("UserID = %q, want %q", msg.UserID, uid)
}
if !bytes.Equal(msg.Payload, payload) {
t.Errorf("Payload = %v, want %v", msg.Payload, payload)
}
if msg.MessageID == "" {
t.Error("MessageID is empty")
}
if msg.CorrelationID == "" {
t.Error("CorrelationID is empty")
}
if msg.ExpiryTime.IsZero() {
t.Error("ExpiryTime is zero")
}
testProperties(t, msg.Properties, props)
return
case err := <-errc:
t.Fatal(err)
case <-time.After(10 * time.Second):
t.Fatal("c2d timed out")
}
}
}
func testUpdateTwin(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
// update state and keep track of version
s := fmt.Sprintf("%d", time.Now().UnixNano())
v, err := dc.UpdateTwinState(context.Background(), map[string]interface{}{
"ts": s,
})
if err != nil {
t.Fatal(err)
}
_, r, err := dc.RetrieveTwinState(context.Background())
if err != nil {
t.Fatal(err)
}
if v != r.Version() {
t.Errorf("update-twin version = %d, want %d", r.Version(), v)
}
if r["ts"] != s {
t.Errorf("update-twin parameter = %q, want %q", r["ts"], s)
}
}
// TODO: very flaky
func testSubscribeTwin(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
sub, err := dc.SubscribeTwinUpdates(context.Background())
if err != nil | {
t.Fatal(err)
} | conditional_block |
|
end2end_test.go | , dc *iotdevice.Client) {
evsc := make(chan *iotservice.Event, 1)
errc := make(chan error, 2)
go func() {
errc <- sc.SubscribeEvents(context.Background(), func(ev *iotservice.Event) error {
if ev.ConnectionDeviceID == dc.DeviceID() {
evsc <- ev
}
return nil
})
}()
payload := []byte("hello")
props := map[string]string{"a": "a", "b": "b"}
done := make(chan struct{})
defer close(done)
// send events until one of them is received
go func() {
for {
if err := dc.SendEvent(context.Background(), payload,
iotdevice.WithSendMessageID(genID()),
iotdevice.WithSendCorrelationID(genID()),
iotdevice.WithSendProperties(props),
iotdevice.WithSendCreationTime(time.Now().Add(-24*time.Hour)),
); err != nil {
errc <- err
break
}
select {
case <-done:
case <-time.After(500 * time.Millisecond):
}
}
}()
select {
case msg := <-evsc:
if msg.MessageID == "" {
t.Error("MessageID is empty")
}
if msg.CorrelationID == "" {
t.Error("CorrelationID is empty")
}
if msg.ConnectionDeviceID != dc.DeviceID() {
t.Errorf("ConnectionDeviceID = %q, want %q", msg.ConnectionDeviceID, dc.DeviceID())
}
if msg.ConnectionDeviceGenerationID == "" {
t.Error("ConnectionDeviceGenerationID is empty")
}
if msg.ConnectionAuthMethod == nil {
t.Error("ConnectionAuthMethod is nil")
}
if msg.MessageSource == "" {
t.Error("MessageSource is empty")
}
if msg.EnqueuedTime.IsZero() {
t.Error("EnqueuedTime is zero")
}
if !bytes.Equal(msg.Payload, payload) {
t.Errorf("Payload = %v, want %v", msg.Payload, payload)
}
if msg.Properties["iothub-creation-time-utc"] == "" {
t.Error("iothub-creation-time-utc missing")
}
testProperties(t, msg.Properties, props)
case err := <-errc:
t.Fatal(err)
case <-time.After(10 * time.Second):
t.Fatal("d2c timed out")
}
}
func testProperties(t *testing.T, got, want map[string]string) {
t.Helper()
for k, v := range want {
x, ok := got[k]
if !ok || x != v {
t.Errorf("Properties = %v, want %v", got, want)
return
}
}
}
func testCloudToDevice(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
fbsc := make(chan *iotservice.Feedback, 1)
errc := make(chan error, 3)
sub, err := dc.SubscribeEvents(context.Background())
if err != nil {
t.Fatal(err)
}
// subscribe to feedback and report first registered message id
go func() {
errc <- sc.SubscribeFeedback(context.Background(), func(fb *iotservice.Feedback) error {
fbsc <- fb
return nil
})
}()
payload := []byte("hello")
props := map[string]string{"a": "a", "b": "b"}
uid := "golang-iothub"
mid := genID()
if err := sc.SendEvent(context.Background(), dc.DeviceID(), payload,
iotservice.WithSendAck(iotservice.AckFull),
iotservice.WithSendProperties(props),
iotservice.WithSendUserID(uid),
iotservice.WithSendMessageID(mid),
iotservice.WithSendCorrelationID(genID()),
iotservice.WithSendExpiryTime(time.Now().Add(5*time.Second)),
); err != nil {
errc <- err
return
}
for {
select {
case msg := <-sub.C():
if msg.MessageID != mid {
continue
}
// validate event feedback
Outer:
for {
select {
case fb := <-fbsc:
if fb.OriginalMessageID != mid {
continue
}
if fb.StatusCode != "Success" {
t.Errorf("feedback status = %q, want %q", fb.StatusCode, "Success")
}
break Outer
case <-time.After(15 * time.Second):
t.Log("feedback timed out, probably due to eventhub batching")
break Outer
}
}
// validate message content
if msg.To == "" {
t.Error("To is empty")
}
if msg.UserID != uid {
t.Errorf("UserID = %q, want %q", msg.UserID, uid)
}
if !bytes.Equal(msg.Payload, payload) {
t.Errorf("Payload = %v, want %v", msg.Payload, payload)
}
if msg.MessageID == "" {
t.Error("MessageID is empty")
}
if msg.CorrelationID == "" {
t.Error("CorrelationID is empty")
}
if msg.ExpiryTime.IsZero() {
t.Error("ExpiryTime is zero")
}
testProperties(t, msg.Properties, props)
return
case err := <-errc:
t.Fatal(err)
case <-time.After(10 * time.Second):
t.Fatal("c2d timed out")
}
}
}
func testUpdateTwin(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
// update state and keep track of version
s := fmt.Sprintf("%d", time.Now().UnixNano())
v, err := dc.UpdateTwinState(context.Background(), map[string]interface{}{
"ts": s,
})
if err != nil {
t.Fatal(err)
}
_, r, err := dc.RetrieveTwinState(context.Background())
if err != nil {
t.Fatal(err)
}
if v != r.Version() {
t.Errorf("update-twin version = %d, want %d", r.Version(), v)
}
if r["ts"] != s {
t.Errorf("update-twin parameter = %q, want %q", r["ts"], s)
}
}
// TODO: very flaky
func testSubscribeTwin(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
sub, err := dc.SubscribeTwinUpdates(context.Background())
if err != nil {
t.Fatal(err)
}
defer dc.UnsubscribeTwinUpdates(sub)
// TODO: hacky, but reduces flakiness
time.Sleep(time.Second)
twin, err := sc.UpdateDeviceTwin(context.Background(), &iotservice.Twin{
DeviceID: dc.DeviceID(),
Tags: map[string]interface{}{
"test-device": true,
},
Properties: &iotservice.Properties{
Desired: map[string]interface{}{
"test-prop": time.Now().UnixNano() / 1000,
},
},
})
if err != nil {
t.Fatal(err)
}
select {
case state := <-sub.C():
if state["$version"] != twin.Properties.Desired["$version"] {
t.Errorf("version = %d, want %d", state["$version"], twin.Properties.Desired["$version"])
}
if state["test-prop"] != twin.Properties.Desired["test-prop"] {
t.Errorf("test-prop = %q, want %q", state["test-prop"], twin.Properties.Desired["test-prop"])
}
case <-time.After(10 * time.Second):
t.Fatal("SubscribeTwinUpdates timed out")
}
}
func testDirectMethod(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
if err := dc.RegisterMethod(
context.Background(),
"sum",
func(v map[string]interface{}) (int, map[string]interface{}, error) {
return 222, map[string]interface{}{
"result": v["a"].(float64) + v["b"].(float64),
}, nil
},
); err != nil {
t.Fatal(err)
}
resc := make(chan *iotservice.MethodResult, 1)
errc := make(chan error, 2)
go func() {
v, err := sc.CallDeviceMethod(context.Background(), dc.DeviceID(), &iotservice.MethodCall{
MethodName: "sum",
ConnectTimeout: 5,
ResponseTimeout: 5,
Payload: map[string]interface{}{
"a": 1.5,
"b": 3,
},
})
if err != nil {
errc <- err
}
resc <- v
}()
select {
case v := <-resc:
w := &iotservice.MethodResult{
Status: 222,
Payload: map[string]interface{}{
"result": 4.5,
},
}
if !reflect.DeepEqual(v, w) {
t.Errorf("direct-method result = %v, want %v", v, w)
}
case err := <-errc:
t.Fatal(err)
}
}
func | genID | identifier_name |
|
end2end_test.go | .Transport) (*iotdevice.Client, error)
only string
}{
// TODO: ca authentication
"x509": {
func(transport transport.Transport) (*iotdevice.Client, error) {
return iotdevice.NewFromX509FromFile(
transport,
"golang-iothub-self-signed",
sc.HostName(),
"testdata/device.crt",
"testdata/device.key",
)
},
"DeviceToCloud", // just need to check access
},
"sas": {
func(transport transport.Transport) (*iotdevice.Client, error) {
device, err := sc.GetDevice(context.Background(), "golang-iothub-sas")
if err != nil {
return nil, err
}
dcs, err := sc.DeviceConnectionString(device, false)
if err != nil {
t.Fatal(err)
}
return iotdevice.NewFromConnectionString(transport, dcs)
},
"*",
},
} {
for name, test := range map[string]func(*testing.T, *iotservice.Client, *iotdevice.Client){
"DeviceToCloud": testDeviceToCloud,
"CloudToDevice": testCloudToDevice,
"DirectMethod": testDirectMethod,
"UpdateDeviceTwin": testUpdateTwin,
"SubscribeTwin": testSubscribeTwin,
} {
if suite.only != "*" && suite.only != name {
continue
}
test, suite, mktransport := test, suite, mktransport
t.Run(auth+"/"+name, func(t *testing.T) {
dc, err := suite.init(mktransport())
if err != nil {
t.Fatal(err)
}
defer dc.Close()
if err := dc.Connect(context.Background()); err != nil {
t.Fatal(err)
}
test(t, sc, dc)
})
}
}
})
}
}
func testDeviceToCloud(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
evsc := make(chan *iotservice.Event, 1)
errc := make(chan error, 2)
go func() {
errc <- sc.SubscribeEvents(context.Background(), func(ev *iotservice.Event) error {
if ev.ConnectionDeviceID == dc.DeviceID() {
evsc <- ev
}
return nil
})
}()
payload := []byte("hello")
props := map[string]string{"a": "a", "b": "b"}
done := make(chan struct{})
defer close(done)
// send events until one of them is received
go func() {
for {
if err := dc.SendEvent(context.Background(), payload,
iotdevice.WithSendMessageID(genID()),
iotdevice.WithSendCorrelationID(genID()),
iotdevice.WithSendProperties(props),
iotdevice.WithSendCreationTime(time.Now().Add(-24*time.Hour)),
); err != nil {
errc <- err
break
}
select {
case <-done:
case <-time.After(500 * time.Millisecond):
}
}
}()
select {
case msg := <-evsc:
if msg.MessageID == "" {
t.Error("MessageID is empty")
}
if msg.CorrelationID == "" {
t.Error("CorrelationID is empty")
}
if msg.ConnectionDeviceID != dc.DeviceID() {
t.Errorf("ConnectionDeviceID = %q, want %q", msg.ConnectionDeviceID, dc.DeviceID())
}
if msg.ConnectionDeviceGenerationID == "" {
t.Error("ConnectionDeviceGenerationID is empty")
}
if msg.ConnectionAuthMethod == nil {
t.Error("ConnectionAuthMethod is nil")
}
if msg.MessageSource == "" {
t.Error("MessageSource is empty")
}
if msg.EnqueuedTime.IsZero() {
t.Error("EnqueuedTime is zero")
}
if !bytes.Equal(msg.Payload, payload) {
t.Errorf("Payload = %v, want %v", msg.Payload, payload)
}
if msg.Properties["iothub-creation-time-utc"] == "" {
t.Error("iothub-creation-time-utc missing")
}
testProperties(t, msg.Properties, props)
case err := <-errc:
t.Fatal(err)
case <-time.After(10 * time.Second):
t.Fatal("d2c timed out")
}
}
func testProperties(t *testing.T, got, want map[string]string) {
t.Helper()
for k, v := range want {
x, ok := got[k]
if !ok || x != v {
t.Errorf("Properties = %v, want %v", got, want)
return
}
}
}
func testCloudToDevice(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
fbsc := make(chan *iotservice.Feedback, 1)
errc := make(chan error, 3)
sub, err := dc.SubscribeEvents(context.Background())
if err != nil {
t.Fatal(err)
}
// subscribe to feedback and report first registered message id
go func() {
errc <- sc.SubscribeFeedback(context.Background(), func(fb *iotservice.Feedback) error {
fbsc <- fb
return nil
})
}()
payload := []byte("hello")
props := map[string]string{"a": "a", "b": "b"}
uid := "golang-iothub"
mid := genID()
if err := sc.SendEvent(context.Background(), dc.DeviceID(), payload,
iotservice.WithSendAck(iotservice.AckFull),
iotservice.WithSendProperties(props),
iotservice.WithSendUserID(uid),
iotservice.WithSendMessageID(mid),
iotservice.WithSendCorrelationID(genID()),
iotservice.WithSendExpiryTime(time.Now().Add(5*time.Second)),
); err != nil {
errc <- err
return
}
for {
select {
case msg := <-sub.C():
if msg.MessageID != mid {
continue
}
// validate event feedback
Outer:
for {
select {
case fb := <-fbsc:
if fb.OriginalMessageID != mid {
continue
}
if fb.StatusCode != "Success" {
t.Errorf("feedback status = %q, want %q", fb.StatusCode, "Success")
}
break Outer
case <-time.After(15 * time.Second):
t.Log("feedback timed out, probably due to eventhub batching")
break Outer
}
}
// validate message content
if msg.To == "" {
t.Error("To is empty")
}
if msg.UserID != uid {
t.Errorf("UserID = %q, want %q", msg.UserID, uid)
}
if !bytes.Equal(msg.Payload, payload) {
t.Errorf("Payload = %v, want %v", msg.Payload, payload)
}
if msg.MessageID == "" {
t.Error("MessageID is empty")
}
if msg.CorrelationID == "" {
t.Error("CorrelationID is empty")
}
if msg.ExpiryTime.IsZero() {
t.Error("ExpiryTime is zero")
}
testProperties(t, msg.Properties, props)
return
case err := <-errc:
t.Fatal(err)
case <-time.After(10 * time.Second):
t.Fatal("c2d timed out")
}
}
}
func testUpdateTwin(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
// update state and keep track of version
s := fmt.Sprintf("%d", time.Now().UnixNano())
v, err := dc.UpdateTwinState(context.Background(), map[string]interface{}{
"ts": s,
})
if err != nil {
t.Fatal(err)
}
_, r, err := dc.RetrieveTwinState(context.Background())
if err != nil {
t.Fatal(err)
}
if v != r.Version() {
t.Errorf("update-twin version = %d, want %d", r.Version(), v)
}
if r["ts"] != s {
t.Errorf("update-twin parameter = %q, want %q", r["ts"], s)
}
}
// TODO: very flaky
func testSubscribeTwin(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
sub, err := dc.SubscribeTwinUpdates(context.Background())
if err != nil {
t.Fatal(err)
}
defer dc.UnsubscribeTwinUpdates(sub)
// TODO: hacky, but reduces flakiness
time.Sleep(time.Second)
twin, err := sc.UpdateDeviceTwin(context.Background(), &iotservice.Twin{
DeviceID: dc.DeviceID(),
Tags: map[string]interface{}{
"test-device": true,
},
Properties: &iotservice.Properties{
Desired: map[string]interface{}{
"test-prop": time.Now().UnixNano() / 1000,
},
},
})
if err != nil {
t.Fatal(err) | }
select {
case state := <-sub.C(): | random_line_split |
|
end2end_test.go | ,
"SubscribeTwin": testSubscribeTwin,
} {
if suite.only != "*" && suite.only != name {
continue
}
test, suite, mktransport := test, suite, mktransport
t.Run(auth+"/"+name, func(t *testing.T) {
dc, err := suite.init(mktransport())
if err != nil {
t.Fatal(err)
}
defer dc.Close()
if err := dc.Connect(context.Background()); err != nil {
t.Fatal(err)
}
test(t, sc, dc)
})
}
}
})
}
}
func testDeviceToCloud(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
evsc := make(chan *iotservice.Event, 1)
errc := make(chan error, 2)
go func() {
errc <- sc.SubscribeEvents(context.Background(), func(ev *iotservice.Event) error {
if ev.ConnectionDeviceID == dc.DeviceID() {
evsc <- ev
}
return nil
})
}()
payload := []byte("hello")
props := map[string]string{"a": "a", "b": "b"}
done := make(chan struct{})
defer close(done)
// send events until one of them is received
go func() {
for {
if err := dc.SendEvent(context.Background(), payload,
iotdevice.WithSendMessageID(genID()),
iotdevice.WithSendCorrelationID(genID()),
iotdevice.WithSendProperties(props),
iotdevice.WithSendCreationTime(time.Now().Add(-24*time.Hour)),
); err != nil {
errc <- err
break
}
select {
case <-done:
case <-time.After(500 * time.Millisecond):
}
}
}()
select {
case msg := <-evsc:
if msg.MessageID == "" {
t.Error("MessageID is empty")
}
if msg.CorrelationID == "" {
t.Error("CorrelationID is empty")
}
if msg.ConnectionDeviceID != dc.DeviceID() {
t.Errorf("ConnectionDeviceID = %q, want %q", msg.ConnectionDeviceID, dc.DeviceID())
}
if msg.ConnectionDeviceGenerationID == "" {
t.Error("ConnectionDeviceGenerationID is empty")
}
if msg.ConnectionAuthMethod == nil {
t.Error("ConnectionAuthMethod is nil")
}
if msg.MessageSource == "" {
t.Error("MessageSource is empty")
}
if msg.EnqueuedTime.IsZero() {
t.Error("EnqueuedTime is zero")
}
if !bytes.Equal(msg.Payload, payload) {
t.Errorf("Payload = %v, want %v", msg.Payload, payload)
}
if msg.Properties["iothub-creation-time-utc"] == "" {
t.Error("iothub-creation-time-utc missing")
}
testProperties(t, msg.Properties, props)
case err := <-errc:
t.Fatal(err)
case <-time.After(10 * time.Second):
t.Fatal("d2c timed out")
}
}
func testProperties(t *testing.T, got, want map[string]string) {
t.Helper()
for k, v := range want {
x, ok := got[k]
if !ok || x != v {
t.Errorf("Properties = %v, want %v", got, want)
return
}
}
}
func testCloudToDevice(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
fbsc := make(chan *iotservice.Feedback, 1)
errc := make(chan error, 3)
sub, err := dc.SubscribeEvents(context.Background())
if err != nil {
t.Fatal(err)
}
// subscribe to feedback and report first registered message id
go func() {
errc <- sc.SubscribeFeedback(context.Background(), func(fb *iotservice.Feedback) error {
fbsc <- fb
return nil
})
}()
payload := []byte("hello")
props := map[string]string{"a": "a", "b": "b"}
uid := "golang-iothub"
mid := genID()
if err := sc.SendEvent(context.Background(), dc.DeviceID(), payload,
iotservice.WithSendAck(iotservice.AckFull),
iotservice.WithSendProperties(props),
iotservice.WithSendUserID(uid),
iotservice.WithSendMessageID(mid),
iotservice.WithSendCorrelationID(genID()),
iotservice.WithSendExpiryTime(time.Now().Add(5*time.Second)),
); err != nil {
errc <- err
return
}
for {
select {
case msg := <-sub.C():
if msg.MessageID != mid {
continue
}
// validate event feedback
Outer:
for {
select {
case fb := <-fbsc:
if fb.OriginalMessageID != mid {
continue
}
if fb.StatusCode != "Success" {
t.Errorf("feedback status = %q, want %q", fb.StatusCode, "Success")
}
break Outer
case <-time.After(15 * time.Second):
t.Log("feedback timed out, probably due to eventhub batching")
break Outer
}
}
// validate message content
if msg.To == "" {
t.Error("To is empty")
}
if msg.UserID != uid {
t.Errorf("UserID = %q, want %q", msg.UserID, uid)
}
if !bytes.Equal(msg.Payload, payload) {
t.Errorf("Payload = %v, want %v", msg.Payload, payload)
}
if msg.MessageID == "" {
t.Error("MessageID is empty")
}
if msg.CorrelationID == "" {
t.Error("CorrelationID is empty")
}
if msg.ExpiryTime.IsZero() {
t.Error("ExpiryTime is zero")
}
testProperties(t, msg.Properties, props)
return
case err := <-errc:
t.Fatal(err)
case <-time.After(10 * time.Second):
t.Fatal("c2d timed out")
}
}
}
func testUpdateTwin(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
// update state and keep track of version
s := fmt.Sprintf("%d", time.Now().UnixNano())
v, err := dc.UpdateTwinState(context.Background(), map[string]interface{}{
"ts": s,
})
if err != nil {
t.Fatal(err)
}
_, r, err := dc.RetrieveTwinState(context.Background())
if err != nil {
t.Fatal(err)
}
if v != r.Version() {
t.Errorf("update-twin version = %d, want %d", r.Version(), v)
}
if r["ts"] != s {
t.Errorf("update-twin parameter = %q, want %q", r["ts"], s)
}
}
// TODO: very flaky
func testSubscribeTwin(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) {
sub, err := dc.SubscribeTwinUpdates(context.Background())
if err != nil {
t.Fatal(err)
}
defer dc.UnsubscribeTwinUpdates(sub)
// TODO: hacky, but reduces flakiness
time.Sleep(time.Second)
twin, err := sc.UpdateDeviceTwin(context.Background(), &iotservice.Twin{
DeviceID: dc.DeviceID(),
Tags: map[string]interface{}{
"test-device": true,
},
Properties: &iotservice.Properties{
Desired: map[string]interface{}{
"test-prop": time.Now().UnixNano() / 1000,
},
},
})
if err != nil {
t.Fatal(err)
}
select {
case state := <-sub.C():
if state["$version"] != twin.Properties.Desired["$version"] {
t.Errorf("version = %d, want %d", state["$version"], twin.Properties.Desired["$version"])
}
if state["test-prop"] != twin.Properties.Desired["test-prop"] {
t.Errorf("test-prop = %q, want %q", state["test-prop"], twin.Properties.Desired["test-prop"])
}
case <-time.After(10 * time.Second):
t.Fatal("SubscribeTwinUpdates timed out")
}
}
func testDirectMethod(t *testing.T, sc *iotservice.Client, dc *iotdevice.Client) | {
if err := dc.RegisterMethod(
context.Background(),
"sum",
func(v map[string]interface{}) (int, map[string]interface{}, error) {
return 222, map[string]interface{}{
"result": v["a"].(float64) + v["b"].(float64),
}, nil
},
); err != nil {
t.Fatal(err)
}
resc := make(chan *iotservice.MethodResult, 1)
errc := make(chan error, 2)
go func() {
v, err := sc.CallDeviceMethod(context.Background(), dc.DeviceID(), &iotservice.MethodCall{
MethodName: "sum",
ConnectTimeout: 5,
ResponseTimeout: 5, | identifier_body |
|
build_ionospheric_model.py | (xx, a0, c0, s0):
return a0 * (1/(s0*(np.sqrt(2*np.pi))))*(np.exp((-1.0/2.0)*(((xx-c0)/s0)**2)))
def _a_gaussian(xx, a0, c0, s0, sp ):
return _1gaussian(xx, a0, c0, s0 ) * (0.5 + (np.arctan(sp*(xx-c0))/np.pi))
def opt(f, x0, y0, p0):
popt, pcov = curve_fit(f, x0, y0, p0=p0)
perr = np.sqrt(np.diag(pcov))
return popt, perr
def estimate_skip_distance(popt):
xx = np.linspace(0,popt[1],30000)
yy = _a_gaussian(xx, popt_ag[0], popt_ag[1], popt_ag[2], popt_ag[3])
print("\n Power drop {}".format(power_drop))
sd = np.round(xx[np.argmin(np.abs(yy - power_drop))], 1)
return sd
def estimate_skip_distance_1D_params(a0, c0, s0):
xx = np.linspace(0,c0,30000)
yy = _1gaussian(xx, a0, c0, s0)
print("\n Power drop {}".format(power_drop))
sd = np.round(xx[np.argmin(np.abs(yy - power_drop))], 1)
return sd
try:
popt_g, _ = opt(_1gaussian, x, y, p0=[np.max(y), np.mean(x), np.std(x)])
popt_ag, _ = opt(_a_gaussian, x, y, p0=[np.max(y), np.mean(x), np.std(x), 0.1])
skip_distance = estimate_skip_distance(popt_ag)
if plot:
fig = plt.figure(dpi=120,figsize=(3,3))
ax = fig.add_subplot(111)
ax.bar(x, y, width=30, color="y", ec="k", lw=0.3)
xx = np.arange(3500)
ax.plot(xx, _1gaussian(xx, popt_g[0], popt_g[1], popt_g[2]), color="b", lw=0.8, )
ax.plot(xx, _a_gaussian(xx, popt_ag[0], popt_ag[1], popt_ag[2], popt_ag[3]), color="r", lw=0.8, )
ax.text(0.8,0.8,r"$x_0$={} km".format(skip_distance),ha="center", va="center",transform=ax.transAxes)
ax.text(0.2,0.8,r"$\delta_0={}^o$".format(elv),ha="center", va="center",transform=ax.transAxes)
ax.axvline(skip_distance, color="cyan",lw=0.8)
ax.set_xlabel("Slant Range, km")
ax.set_ylabel("Power, db")
ax.set_xlim(0, 3500)
ax.set_ylim(0, 30)
ax.set_title(r"$foF_2^o$={} MHz".format(tfreq))
fig.savefig(fname, bbox_inches="tight")
plt.close()
except:
import traceback
traceback.print_exc()
mx, mean, std = np.max(y), np.mean(x), np.std(x)
skip_distance = estimate_skip_distance_1D_params(mx, mean, std)
fig = plt.figure(dpi=120,figsize=(3,3))
ax = fig.add_subplot(111)
ax.bar(x, y, width=30, color="y", ec="k", lw=0.3)
xx = np.arange(3500)
ax.plot(xx, _1gaussian(xx, mx, mean, std), color="b", lw=0.8, )
ax.text(0.8,0.8,r"$x_0$={} km".format(skip_distance),ha="center", va="center",transform=ax.transAxes)
ax.text(0.2,0.8,r"$\delta_0={}^o$".format(elv),ha="center", va="center",transform=ax.transAxes)
ax.axvline(skip_distance, color="cyan",lw=0.8)
ax.set_xlabel("Slant Range, km")
ax.set_ylabel("Power, db")
ax.set_xlim(0, 3500)
ax.set_ylim(0, 30)
ax.set_title(r"$foF_2^o$={} MHz".format(tfreq))
fig.savefig(fname.replace(".png", "_e.png"), bbox_inches="tight")
plt.close()
return skip_distance
def build_oblique_foF2_observed_by_radar(rad="cvw", dates=[dt.datetime(2017,8,21), dt.datetime(2017,8,21)], bmnum=11, scan_num=1,
remove_first_range=800, remove_last_range=2500, power_drop=10., plot=True):
""" Estimate MUF from the GS power """
csv_fname = "data/{rad}_{start}_{end}.csv".format(rad=rad, start=dates[0].strftime("%Y.%m.%d.%H.%M"),
end=dates[1].strftime("%Y.%m.%d.%H.%M"))
if not os.path.exists(csv_fname):
fdata = FetchData( rad, [dates[0], dates[1]] )
s_params = ["noise.sky", "tfreq", "frang", "rsep", "scan", "bmnum"]
v_params = ["slist", "v", "w_l", "p_l", "elv"]
_, scans = fdata.fetch_data(by="scan", s_params=s_params, v_params=v_params)
beams = []
# Resacle tx_frequency and estimate slant range
for scan in scans:
for beam in scan.beams:
if len(beam.slist) > 0:
setattr(beam, "slant_range", beam.frang + np.array(beam.slist.tolist()) * beam.rsep)
setattr(beam, "tfreq", np.round(beam.tfreq/1e3,1))
beams.append(beam)
# Extract oblique foF2 or MUF scan by scan
print("\n Data will be averaged over {} scans".format(scan_num))
print(" Processing beam {}".format(bmnum))
print(" Remove first range {}".format(remove_first_range))
print(" Remove last range {}".format(remove_last_range))
print(" Power drop {}".format(power_drop))
skip_distance, o_foF2, time_start, time_end = [], [], [], []
for i in range(len(scans)-scan_num):
rscan = scans[i:i+scan_num]
p_l, srange, tfrq, angle = [], [], [], []
for scan in rscan:
for beam in scan.beams:
if beam.bmnum == bmnum:
if len(beam.slist) > 0:
p_l.extend(beam.p_l.tolist())
srange.extend(beam.slant_range.tolist())
tfrq.append(beam.tfreq)
if type(beam.elv) is list: angle.extend(beam.elv)
else: angle.extend(beam.elv.tolist())
du = pd.DataFrame()
du["p_l"], du["srange"] = p_l, srange
du = du[(du.srange>remove_first_range) & (du.srange<remove_last_range)]
fname = "images/{}.png".format(rscan[0].stime.strftime("%Y-%m-%d-%H-%M"))
if len(du) > 0:
sd = fit_lambda(du, power_drop, tfreq=np.mean(tfrq), elv=np.mean(angle).round(1),
xlabel="srange", ylabel="p_l", fname=fname, plot=plot)
if sd != np.nan:
skip_distance.append(sd)
o_foF2.append(np.mean(tfrq))
time_start.append(rscan[0].stime)
time_end.append(rscan[-1].stime)
df = pd.DataFrame()
df["skip_distance"], df["o_foF2"], df["time_start"], df["time_end"] = skip_distance, o_foF2, time_start, time_end
df.to_csv(csv_fname, index=False, header=True)
else: df = pd.read_csv(csv_fname, parse_dates=["time_start", "time_end"])
print(" Header:\n",df.head())
return df
def build_occultation_functions(rad, dates, time_range, bmnum=11, scan_num=1, remove_first_range=500, remove_last_range=2500,
power_drop=10., plot=True):
df = build_oblique_foF2_observed_by_radar(rad, dates, bmnum, scan_num, remove_first_range, remove_last_range, power_drop, plot)
upper = df[df.time_start <= time_range[0 | _1gaussian | identifier_name |
|
build_ionospheric_model.py |
def opt(f, x0, y0, p0):
popt, pcov = curve_fit(f, x0, y0, p0=p0)
perr = np.sqrt(np.diag(pcov))
return popt, perr
def estimate_skip_distance(popt):
xx = np.linspace(0,popt[1],30000)
yy = _a_gaussian(xx, popt_ag[0], popt_ag[1], popt_ag[2], popt_ag[3])
print("\n Power drop {}".format(power_drop))
sd = np.round(xx[np.argmin(np.abs(yy - power_drop))], 1)
return sd
def estimate_skip_distance_1D_params(a0, c0, s0):
xx = np.linspace(0,c0,30000)
yy = _1gaussian(xx, a0, c0, s0)
print("\n Power drop {}".format(power_drop))
sd = np.round(xx[np.argmin(np.abs(yy - power_drop))], 1)
return sd
try:
popt_g, _ = opt(_1gaussian, x, y, p0=[np.max(y), np.mean(x), np.std(x)])
popt_ag, _ = opt(_a_gaussian, x, y, p0=[np.max(y), np.mean(x), np.std(x), 0.1])
skip_distance = estimate_skip_distance(popt_ag)
if plot:
fig = plt.figure(dpi=120,figsize=(3,3))
ax = fig.add_subplot(111)
ax.bar(x, y, width=30, color="y", ec="k", lw=0.3)
xx = np.arange(3500)
ax.plot(xx, _1gaussian(xx, popt_g[0], popt_g[1], popt_g[2]), color="b", lw=0.8, )
ax.plot(xx, _a_gaussian(xx, popt_ag[0], popt_ag[1], popt_ag[2], popt_ag[3]), color="r", lw=0.8, )
ax.text(0.8,0.8,r"$x_0$={} km".format(skip_distance),ha="center", va="center",transform=ax.transAxes)
ax.text(0.2,0.8,r"$\delta_0={}^o$".format(elv),ha="center", va="center",transform=ax.transAxes)
ax.axvline(skip_distance, color="cyan",lw=0.8)
ax.set_xlabel("Slant Range, km")
ax.set_ylabel("Power, db")
ax.set_xlim(0, 3500)
ax.set_ylim(0, 30)
ax.set_title(r"$foF_2^o$={} MHz".format(tfreq))
fig.savefig(fname, bbox_inches="tight")
plt.close()
except:
import traceback
traceback.print_exc()
mx, mean, std = np.max(y), np.mean(x), np.std(x)
skip_distance = estimate_skip_distance_1D_params(mx, mean, std)
fig = plt.figure(dpi=120,figsize=(3,3))
ax = fig.add_subplot(111)
ax.bar(x, y, width=30, color="y", ec="k", lw=0.3)
xx = np.arange(3500)
ax.plot(xx, _1gaussian(xx, mx, mean, std), color="b", lw=0.8, )
ax.text(0.8,0.8,r"$x_0$={} km".format(skip_distance),ha="center", va="center",transform=ax.transAxes)
ax.text(0.2,0.8,r"$\delta_0={}^o$".format(elv),ha="center", va="center",transform=ax.transAxes)
ax.axvline(skip_distance, color="cyan",lw=0.8)
ax.set_xlabel("Slant Range, km")
ax.set_ylabel("Power, db")
ax.set_xlim(0, 3500)
ax.set_ylim(0, 30)
ax.set_title(r"$foF_2^o$={} MHz".format(tfreq))
fig.savefig(fname.replace(".png", "_e.png"), bbox_inches="tight")
plt.close()
return skip_distance
def build_oblique_foF2_observed_by_radar(rad="cvw", dates=[dt.datetime(2017,8,21), dt.datetime(2017,8,21)], bmnum=11, scan_num=1,
remove_first_range=800, remove_last_range=2500, power_drop=10., plot=True):
""" Estimate MUF from the GS power """
csv_fname = "data/{rad}_{start}_{end}.csv".format(rad=rad, start=dates[0].strftime("%Y.%m.%d.%H.%M"),
end=dates[1].strftime("%Y.%m.%d.%H.%M"))
if not os.path.exists(csv_fname):
fdata = FetchData( rad, [dates[0], dates[1]] )
s_params = ["noise.sky", "tfreq", "frang", "rsep", "scan", "bmnum"]
v_params = ["slist", "v", "w_l", "p_l", "elv"]
_, scans = fdata.fetch_data(by="scan", s_params=s_params, v_params=v_params)
beams = []
# Resacle tx_frequency and estimate slant range
for scan in scans:
for beam in scan.beams:
if len(beam.slist) > 0:
setattr(beam, "slant_range", beam.frang + np.array(beam.slist.tolist()) * beam.rsep)
setattr(beam, "tfreq", np.round(beam.tfreq/1e3,1))
beams.append(beam)
# Extract oblique foF2 or MUF scan by scan
print("\n Data will be averaged over {} scans".format(scan_num))
print(" Processing beam {}".format(bmnum))
print(" Remove first range {}".format(remove_first_range))
print(" Remove last range {}".format(remove_last_range))
print(" Power drop {}".format(power_drop))
skip_distance, o_foF2, time_start, time_end = [], [], [], []
for i in range(len(scans)-scan_num):
rscan = scans[i:i+scan_num]
p_l, srange, tfrq, angle = [], [], [], []
for scan in rscan:
for beam in scan.beams:
if beam.bmnum == bmnum:
if len(beam.slist) > 0:
p_l.extend(beam.p_l.tolist())
srange.extend(beam.slant_range.tolist())
tfrq.append(beam.tfreq)
if type(beam.elv) is list: angle.extend(beam.elv)
else: angle.extend(beam.elv.tolist())
du = pd.DataFrame()
du["p_l"], du["srange"] = p_l, srange
du = du[(du.srange>remove_first_range) & (du.srange<remove_last_range)]
fname = "images/{}.png".format(rscan[0].stime.strftime("%Y-%m-%d-%H-%M"))
if len(du) > 0:
sd = fit_lambda(du, power_drop, tfreq=np.mean(tfrq), elv=np.mean(angle).round(1),
xlabel="srange", ylabel="p_l", fname=fname, plot=plot)
if sd != np.nan:
skip_distance.append(sd)
o_foF2.append(np.mean(tfrq))
time_start.append(rscan[0].stime)
time_end.append(rscan[-1].stime)
df = pd.DataFrame()
df["skip_distance"], df["o_foF2"], df["time_start"], df["time_end"] = skip_distance, o_foF2, time_start, time_end
df.to_csv(csv_fname, index=False, header=True)
else: df = pd.read_csv(csv_fname, parse_dates=["time_start", "time_end"])
print(" Header:\n",df.head())
return df
def build_occultation_functions(rad, dates, time_range, bmnum=11, scan_num=1, remove_first_range=500, remove_last_range=2500,
power_drop=10., plot=True):
df = build_oblique_foF2_observed_by_radar(rad, dates, bmnum, scan_num, remove_first_range, remove_last_range, power_drop, plot)
upper = df[df.time_start <= time_range[0]]
lower = df[df.time_start >= time_range[1]]
def plot_rays(ax, u, color="r", ms=1, alpha=0.6, lw=1.5, wl=51):
midnight = dates[0].replace(hour=0, minute=0, second=0, microsecond=0)
| return _1gaussian(xx, a0, c0, s0 ) * (0.5 + (np.arctan(sp*(xx-c0))/np.pi)) | identifier_body |
|
build_ionospheric_model.py | ):
return _1gaussian(xx, a0, c0, s0 ) * (0.5 + (np.arctan(sp*(xx-c0))/np.pi))
def opt(f, x0, y0, p0):
popt, pcov = curve_fit(f, x0, y0, p0=p0)
perr = np.sqrt(np.diag(pcov))
return popt, perr
def estimate_skip_distance(popt):
xx = np.linspace(0,popt[1],30000)
yy = _a_gaussian(xx, popt_ag[0], popt_ag[1], popt_ag[2], popt_ag[3])
print("\n Power drop {}".format(power_drop))
sd = np.round(xx[np.argmin(np.abs(yy - power_drop))], 1)
return sd
def estimate_skip_distance_1D_params(a0, c0, s0):
xx = np.linspace(0,c0,30000)
yy = _1gaussian(xx, a0, c0, s0)
print("\n Power drop {}".format(power_drop))
sd = np.round(xx[np.argmin(np.abs(yy - power_drop))], 1)
return sd
try:
popt_g, _ = opt(_1gaussian, x, y, p0=[np.max(y), np.mean(x), np.std(x)])
popt_ag, _ = opt(_a_gaussian, x, y, p0=[np.max(y), np.mean(x), np.std(x), 0.1])
skip_distance = estimate_skip_distance(popt_ag)
if plot:
fig = plt.figure(dpi=120,figsize=(3,3))
ax = fig.add_subplot(111)
ax.bar(x, y, width=30, color="y", ec="k", lw=0.3)
xx = np.arange(3500)
ax.plot(xx, _1gaussian(xx, popt_g[0], popt_g[1], popt_g[2]), color="b", lw=0.8, )
ax.plot(xx, _a_gaussian(xx, popt_ag[0], popt_ag[1], popt_ag[2], popt_ag[3]), color="r", lw=0.8, )
ax.text(0.8,0.8,r"$x_0$={} km".format(skip_distance),ha="center", va="center",transform=ax.transAxes)
ax.text(0.2,0.8,r"$\delta_0={}^o$".format(elv),ha="center", va="center",transform=ax.transAxes)
ax.axvline(skip_distance, color="cyan",lw=0.8)
ax.set_xlabel("Slant Range, km")
ax.set_ylabel("Power, db")
ax.set_xlim(0, 3500)
ax.set_ylim(0, 30)
ax.set_title(r"$foF_2^o$={} MHz".format(tfreq))
fig.savefig(fname, bbox_inches="tight")
plt.close()
except:
import traceback
traceback.print_exc()
mx, mean, std = np.max(y), np.mean(x), np.std(x)
skip_distance = estimate_skip_distance_1D_params(mx, mean, std)
fig = plt.figure(dpi=120,figsize=(3,3))
ax = fig.add_subplot(111)
ax.bar(x, y, width=30, color="y", ec="k", lw=0.3)
xx = np.arange(3500)
ax.plot(xx, _1gaussian(xx, mx, mean, std), color="b", lw=0.8, )
ax.text(0.8,0.8,r"$x_0$={} km".format(skip_distance),ha="center", va="center",transform=ax.transAxes)
ax.text(0.2,0.8,r"$\delta_0={}^o$".format(elv),ha="center", va="center",transform=ax.transAxes)
ax.axvline(skip_distance, color="cyan",lw=0.8)
ax.set_xlabel("Slant Range, km")
ax.set_ylabel("Power, db")
ax.set_xlim(0, 3500)
ax.set_ylim(0, 30)
ax.set_title(r"$foF_2^o$={} MHz".format(tfreq))
fig.savefig(fname.replace(".png", "_e.png"), bbox_inches="tight")
plt.close()
return skip_distance
def build_oblique_foF2_observed_by_radar(rad="cvw", dates=[dt.datetime(2017,8,21), dt.datetime(2017,8,21)], bmnum=11, scan_num=1,
remove_first_range=800, remove_last_range=2500, power_drop=10., plot=True):
""" Estimate MUF from the GS power """
csv_fname = "data/{rad}_{start}_{end}.csv".format(rad=rad, start=dates[0].strftime("%Y.%m.%d.%H.%M"),
end=dates[1].strftime("%Y.%m.%d.%H.%M"))
if not os.path.exists(csv_fname):
fdata = FetchData( rad, [dates[0], dates[1]] )
s_params = ["noise.sky", "tfreq", "frang", "rsep", "scan", "bmnum"]
v_params = ["slist", "v", "w_l", "p_l", "elv"]
_, scans = fdata.fetch_data(by="scan", s_params=s_params, v_params=v_params)
beams = []
# Resacle tx_frequency and estimate slant range
for scan in scans: | for beam in scan.beams:
if len(beam.slist) > 0:
setattr(beam, "slant_range", beam.frang + np.array(beam.slist.tolist()) * beam.rsep)
setattr(beam, "tfreq", np.round(beam.tfreq/1e3,1))
beams.append(beam)
# Extract oblique foF2 or MUF scan by scan
print("\n Data will be averaged over {} scans".format(scan_num))
print(" Processing beam {}".format(bmnum))
print(" Remove first range {}".format(remove_first_range))
print(" Remove last range {}".format(remove_last_range))
print(" Power drop {}".format(power_drop))
skip_distance, o_foF2, time_start, time_end = [], [], [], []
for i in range(len(scans)-scan_num):
rscan = scans[i:i+scan_num]
p_l, srange, tfrq, angle = [], [], [], []
for scan in rscan:
for beam in scan.beams:
if beam.bmnum == bmnum:
if len(beam.slist) > 0:
p_l.extend(beam.p_l.tolist())
srange.extend(beam.slant_range.tolist())
tfrq.append(beam.tfreq)
if type(beam.elv) is list: angle.extend(beam.elv)
else: angle.extend(beam.elv.tolist())
du = pd.DataFrame()
du["p_l"], du["srange"] = p_l, srange
du = du[(du.srange>remove_first_range) & (du.srange<remove_last_range)]
fname = "images/{}.png".format(rscan[0].stime.strftime("%Y-%m-%d-%H-%M"))
if len(du) > 0:
sd = fit_lambda(du, power_drop, tfreq=np.mean(tfrq), elv=np.mean(angle).round(1),
xlabel="srange", ylabel="p_l", fname=fname, plot=plot)
if sd != np.nan:
skip_distance.append(sd)
o_foF2.append(np.mean(tfrq))
time_start.append(rscan[0].stime)
time_end.append(rscan[-1].stime)
df = pd.DataFrame()
df["skip_distance"], df["o_foF2"], df["time_start"], df["time_end"] = skip_distance, o_foF2, time_start, time_end
df.to_csv(csv_fname, index=False, header=True)
else: df = pd.read_csv(csv_fname, parse_dates=["time_start", "time_end"])
print(" Header:\n",df.head())
return df
def build_occultation_functions(rad, dates, time_range, bmnum=11, scan_num=1, remove_first_range=500, remove_last_range=2500,
power_drop=10., plot=True):
df = build_oblique_foF2_observed_by_radar(rad, dates, bmnum, scan_num, remove_first_range, remove_last_range, power_drop, plot)
upper = df[df.time_start <= time_range[0]]
lower = df[df.time_start >= time_range[1]]
def plot_rays(ax, u, color="r", ms=1, alpha=0.6, lw=1.5, wl=51):
midnight = dates[0].replace(hour=0, minute=0, second=0, microsecond=0)
| random_line_split |
|
build_ionospheric_model.py | return _1gaussian(xx, a0, c0, s0 ) * (0.5 + (np.arctan(sp*(xx-c0))/np.pi))
def opt(f, x0, y0, p0):
popt, pcov = curve_fit(f, x0, y0, p0=p0)
perr = np.sqrt(np.diag(pcov))
return popt, perr
def estimate_skip_distance(popt):
xx = np.linspace(0,popt[1],30000)
yy = _a_gaussian(xx, popt_ag[0], popt_ag[1], popt_ag[2], popt_ag[3])
print("\n Power drop {}".format(power_drop))
sd = np.round(xx[np.argmin(np.abs(yy - power_drop))], 1)
return sd
def estimate_skip_distance_1D_params(a0, c0, s0):
xx = np.linspace(0,c0,30000)
yy = _1gaussian(xx, a0, c0, s0)
print("\n Power drop {}".format(power_drop))
sd = np.round(xx[np.argmin(np.abs(yy - power_drop))], 1)
return sd
try:
popt_g, _ = opt(_1gaussian, x, y, p0=[np.max(y), np.mean(x), np.std(x)])
popt_ag, _ = opt(_a_gaussian, x, y, p0=[np.max(y), np.mean(x), np.std(x), 0.1])
skip_distance = estimate_skip_distance(popt_ag)
if plot:
fig = plt.figure(dpi=120,figsize=(3,3))
ax = fig.add_subplot(111)
ax.bar(x, y, width=30, color="y", ec="k", lw=0.3)
xx = np.arange(3500)
ax.plot(xx, _1gaussian(xx, popt_g[0], popt_g[1], popt_g[2]), color="b", lw=0.8, )
ax.plot(xx, _a_gaussian(xx, popt_ag[0], popt_ag[1], popt_ag[2], popt_ag[3]), color="r", lw=0.8, )
ax.text(0.8,0.8,r"$x_0$={} km".format(skip_distance),ha="center", va="center",transform=ax.transAxes)
ax.text(0.2,0.8,r"$\delta_0={}^o$".format(elv),ha="center", va="center",transform=ax.transAxes)
ax.axvline(skip_distance, color="cyan",lw=0.8)
ax.set_xlabel("Slant Range, km")
ax.set_ylabel("Power, db")
ax.set_xlim(0, 3500)
ax.set_ylim(0, 30)
ax.set_title(r"$foF_2^o$={} MHz".format(tfreq))
fig.savefig(fname, bbox_inches="tight")
plt.close()
except:
import traceback
traceback.print_exc()
mx, mean, std = np.max(y), np.mean(x), np.std(x)
skip_distance = estimate_skip_distance_1D_params(mx, mean, std)
fig = plt.figure(dpi=120,figsize=(3,3))
ax = fig.add_subplot(111)
ax.bar(x, y, width=30, color="y", ec="k", lw=0.3)
xx = np.arange(3500)
ax.plot(xx, _1gaussian(xx, mx, mean, std), color="b", lw=0.8, )
ax.text(0.8,0.8,r"$x_0$={} km".format(skip_distance),ha="center", va="center",transform=ax.transAxes)
ax.text(0.2,0.8,r"$\delta_0={}^o$".format(elv),ha="center", va="center",transform=ax.transAxes)
ax.axvline(skip_distance, color="cyan",lw=0.8)
ax.set_xlabel("Slant Range, km")
ax.set_ylabel("Power, db")
ax.set_xlim(0, 3500)
ax.set_ylim(0, 30)
ax.set_title(r"$foF_2^o$={} MHz".format(tfreq))
fig.savefig(fname.replace(".png", "_e.png"), bbox_inches="tight")
plt.close()
return skip_distance
def build_oblique_foF2_observed_by_radar(rad="cvw", dates=[dt.datetime(2017,8,21), dt.datetime(2017,8,21)], bmnum=11, scan_num=1,
remove_first_range=800, remove_last_range=2500, power_drop=10., plot=True):
""" Estimate MUF from the GS power """
csv_fname = "data/{rad}_{start}_{end}.csv".format(rad=rad, start=dates[0].strftime("%Y.%m.%d.%H.%M"),
end=dates[1].strftime("%Y.%m.%d.%H.%M"))
if not os.path.exists(csv_fname):
fdata = FetchData( rad, [dates[0], dates[1]] )
s_params = ["noise.sky", "tfreq", "frang", "rsep", "scan", "bmnum"]
v_params = ["slist", "v", "w_l", "p_l", "elv"]
_, scans = fdata.fetch_data(by="scan", s_params=s_params, v_params=v_params)
beams = []
# Resacle tx_frequency and estimate slant range
for scan in scans:
for beam in scan.beams:
if len(beam.slist) > 0:
setattr(beam, "slant_range", beam.frang + np.array(beam.slist.tolist()) * beam.rsep)
setattr(beam, "tfreq", np.round(beam.tfreq/1e3,1))
beams.append(beam)
# Extract oblique foF2 or MUF scan by scan
print("\n Data will be averaged over {} scans".format(scan_num))
print(" Processing beam {}".format(bmnum))
print(" Remove first range {}".format(remove_first_range))
print(" Remove last range {}".format(remove_last_range))
print(" Power drop {}".format(power_drop))
skip_distance, o_foF2, time_start, time_end = [], [], [], []
for i in range(len(scans)-scan_num):
rscan = scans[i:i+scan_num]
p_l, srange, tfrq, angle = [], [], [], []
for scan in rscan:
for beam in scan.beams:
if beam.bmnum == bmnum:
|
du = pd.DataFrame()
du["p_l"], du["srange"] = p_l, srange
du = du[(du.srange>remove_first_range) & (du.srange<remove_last_range)]
fname = "images/{}.png".format(rscan[0].stime.strftime("%Y-%m-%d-%H-%M"))
if len(du) > 0:
sd = fit_lambda(du, power_drop, tfreq=np.mean(tfrq), elv=np.mean(angle).round(1),
xlabel="srange", ylabel="p_l", fname=fname, plot=plot)
if sd != np.nan:
skip_distance.append(sd)
o_foF2.append(np.mean(tfrq))
time_start.append(rscan[0].stime)
time_end.append(rscan[-1].stime)
df = pd.DataFrame()
df["skip_distance"], df["o_foF2"], df["time_start"], df["time_end"] = skip_distance, o_foF2, time_start, time_end
df.to_csv(csv_fname, index=False, header=True)
else: df = pd.read_csv(csv_fname, parse_dates=["time_start", "time_end"])
print(" Header:\n",df.head())
return df
def build_occultation_functions(rad, dates, time_range, bmnum=11, scan_num=1, remove_first_range=500, remove_last_range=2500,
power_drop=10., plot=True):
df = build_oblique_foF2_observed_by_radar(rad, dates, bmnum, scan_num, remove_first_range, remove_last_range, power_drop, plot)
upper = df[df.time_start <= time_range[0]]
lower = df[df.time_start >= time_range[1]]
def plot_rays(ax, u, color="r", ms=1, alpha=0.6, lw=1.5, wl=51):
midnight = dates[0].replace(hour=0, minute=0, second=0, microsecond=0)
| if len(beam.slist) > 0:
p_l.extend(beam.p_l.tolist())
srange.extend(beam.slant_range.tolist())
tfrq.append(beam.tfreq)
if type(beam.elv) is list: angle.extend(beam.elv)
else: angle.extend(beam.elv.tolist()) | conditional_block |
actionitems.ts | {filterString: '', placeholder: 'Filter by subthread'}},
{title: 'Question', name: 'questionText', filtering: {filterString: '', placeholder: 'Filter by question'}},
// {title: 'Answer', name: 'currentAnswer', filtering: {filterString: '', placeholder: 'Filter by answer'}},
{title: 'Action', name: 'what', filtering: {filterString: '', placeholder: 'Filter by action'}},
{title: 'Due', name: 'when', filtering: {filterString: '', placeholder: 'Filter by due date'}, sort: 'asc'},
{title: 'Owner', name: 'who', filtering: {filterString: '', placeholder: 'Filter by owner'}},
{title: 'Risk Level', name: 'risk', filtering: {filterString: '', placeholder: 'Filter by risk level'}}
];
public page:number = 1;
public itemsPerPage:number = 10;
public maxSize:number = 5;
public numPages:number = 1;
public length:number = 0;
public config:any = {
paging: true,
sorting: {columns: this.columns},
filtering: {filterString: ''},
className: ['table-striped', 'table-bordered']
};
no: any;
assessmentId: any;
private attachments: any;
pageName: any = "Action Items";
assessmentIdFromParams: any;
constructor( private apollo: Apollo,
public navCtrl: NavController,
public navParams: NavParams,
public popOver: PopoverController,
private assessmentService: AssessmentService) {
this.assessmentIdFromParams = navParams.data.assessmentId;
console.log(this.assessmentIdFromParams);
this.autoFilter = navParams.data.autoFilter;
}
async ngOnInit() {
this.assessmentId = await this.assessmentService.getCurrentAssessmentId();
this.apollo.watchQuery({
query: assessmentQuery,
variables: {_id: this.assessmentId},
fetchPolicy: "network-only"
}).valueChanges
.subscribe(data => {
this.no = (<any>data.data).assessment.questions.filter( a => {
if (a.answers.length > 0 ) {
return a.answers[a.answers.length - 1].answer == "No"
}
});
var targetMRL = (<any>data.data).assessment.targetMRL;
this.attachments = (<any>data.data).assessment.files;
var newData:Array<any> = [];
console.log(this.no);
this.no.forEach( (element) => {
var newObj:any = {};
newObj.threadName = "" + element.threadName;
newObj.subThreadName = "" + element.subThreadName;
newObj.questionText = "" + element.questionText;
// newObj.currentAnswer = "" + element.answers[element.answers.length - 1].answer;
newObj.what = "" + element.answers[element.answers.length - 1].what;
newObj.when = this.formatDate( element.answers[element.answers.length - 1].when);
newObj.who = "" + element.answers[element.answers.length - 1].who;
newObj.level = "" + element.mrLevel;
var cur = element.answers[element.answers.length - 1];
newObj.risk = "" + this.calculateRiskScore(cur.likelihood, cur.consequence);
newData.push(newObj);
});
this.data = newData;
this.unfilteredQuestions = newData;
if (this.autoFilter){
console.log('here');
this.filterList.filterMRL = targetMRL;
console.log(targetMRL)
this.data = this.unfilteredQuestions.filter(question => {
if (question.level == targetMRL){
return question;
}
});
} else {
this.data = this.unfilteredQuestions;
}
// console.log(this.data);
// this.length = this.data.length;
// this.onChangeTable(this.config);
});
}
/**
* @purpose: format data as a sortable string for table
* @input: date: a new Date() parsable string
* @output: string, format YYYY-MM-DD
*/
formatDate(date){
if ( date ) {
return new Date(date).toISOString().substr(0,10);
} else {
return '';
}
}
filterTheList(){
console.log(this.filterList.filterMRL)
if (this.filterList.filterMRL && this.filterList.filterMRL != 0) {
var filteredQuestions = this.unfilteredQuestions.filter(question => {
if (question.level == this.filterList.filterMRL) {
// console.log('here')
return question
}
});
console.log(filteredQuestions);
this.data = filteredQuestions;
} else {
this.data = this.unfilteredQuestions;
}
}
clearFilter() {
this.filterList.filterMRL = 0;
this.filterTheList();
}
public saveXLS() {
var headers = this.columns.map(c => c.title);
var values = this.no.map(nq => {
return [
nq.threadName,
nq.subThreadName,
nq.questionText,
// nq.answers[nq.answers.length - 1].answer,
nq.answers[nq.answers.length - 1].what,
nq.answers[nq.answers.length - 1].when,
nq.answers[nq.answers.length - 1].who,
this.calculateRiskScore(nq.answers[nq.answers.length - 1].likelihood, nq.answers[nq.answers.length - 1].consequence)
];
})
var worksheet = [headers, ...values];
var ws = XLSX.utils.aoa_to_sheet(worksheet);
var wb = XLSX.utils.book_new();
XLSX.utils.book_append_sheet(wb, ws, 'Action Items');
/* save to file */
XLSX.writeFile(wb, 'action_items.xlsx');
}
public changePage(page:any, data:Array<any> = this.data):Array<any> {
let start = (page.page - 1) * page.itemsPerPage;
let end = page.itemsPerPage > -1 ? (start + page.itemsPerPage) : data.length;
return data.slice(start, end);
}
public changeSort(data:any, config:any):any {
if (!config.sorting) {
return data;
}
let columns = this.config.sorting.columns || [];
let columnName:string = void 0;
let sort:string = void 0;
for (let i = 0; i < columns.length; i++) {
if (columns[i].sort !== '' && columns[i].sort !== false) {
columnName = columns[i].name;
sort = columns[i].sort;
}
}
if (!columnName) {
return data;
}
// simple sorting
return data.sort((previous:any, current:any) => {
if (previous[columnName] > current[columnName]) {
return sort === 'desc' ? -1 : 1;
} else if (previous[columnName] < current[columnName]) {
return sort === 'asc' ? -1 : 1;
}
return 0;
});
}
public changeFilter(data:any, config:any):any {
let filteredData:Array<any> = data;
this.columns.forEach((column:any) => {
if (column.filtering) {
filteredData = filteredData.filter((item:any) => {
return item[column.name].match(column.filtering.filterString);
});
}
});
if (!config.filtering) {
return filteredData;
}
if (config.filtering.columnName) {
return filteredData.filter((item:any) =>
item[config.filtering.columnName].match(this.config.filtering.filterString));
}
let tempArray:Array<any> = [];
filteredData.forEach((item:any) => {
let flag = false;
this.columns.forEach((column:any) => {
if (item[column.name].toString().match(this.config.filtering.filterString)) {
flag = true;
}
});
if (flag) {
tempArray.push(item);
}
});
filteredData = tempArray;
return filteredData;
}
public onChangeTable(config:any, page:any = {page: this.page, itemsPerPage: this.itemsPerPage}):any {
if (config.filtering) {
Object.assign(this.config.filtering, config.filtering);
}
if (config.sorting) {
Object.assign(this.config.sorting, config.sorting);
}
let filteredData = this.changeFilter(this.data, this.config);
let sortedData = this.changeSort(filteredData, this.config);
this.rows = page && config.paging ? this.changePage(page, sortedData) : sortedData;
this.length = sortedData.length;
}
public onCellClick(data: any): any {
console.log(data);
}
unique = (item, index, array) => array.indexOf(item) == index
ionViewWillEnter() {
GoogleAnalytics.trackPage("actionitems");
}
displayRisks(q) {
var risks = [];
q.technical ? risks.push("Technical") : null
q.schedule ? risks.push("Schedule") : null
q.cost ? risks.push("Cost") : null
return risks.join(", ") || "none";
}
getAttachments(q) {
return this.attachments.filter(a => a.questionId == q.questionId );
}
| navToQuestion | identifier_name |
|
actionitems.ts | ',
})
export class ActionitemsPage {
public data:any;
filterList: any = {};
unfilteredQuestions: any;
autoFilter = false;
public rows:Array<any> = [];
public columns:Array<any> = [
{title: 'Thread', name: 'threadName', filtering: {filterString: '', placeholder: 'Filter by thread'}},
{title: 'Subthread', name: 'subThreadName', filtering: {filterString: '', placeholder: 'Filter by subthread'}},
{title: 'Question', name: 'questionText', filtering: {filterString: '', placeholder: 'Filter by question'}},
// {title: 'Answer', name: 'currentAnswer', filtering: {filterString: '', placeholder: 'Filter by answer'}},
{title: 'Action', name: 'what', filtering: {filterString: '', placeholder: 'Filter by action'}},
{title: 'Due', name: 'when', filtering: {filterString: '', placeholder: 'Filter by due date'}, sort: 'asc'},
{title: 'Owner', name: 'who', filtering: {filterString: '', placeholder: 'Filter by owner'}},
{title: 'Risk Level', name: 'risk', filtering: {filterString: '', placeholder: 'Filter by risk level'}}
];
public page:number = 1;
public itemsPerPage:number = 10;
public maxSize:number = 5;
public numPages:number = 1;
public length:number = 0;
public config:any = {
paging: true,
sorting: {columns: this.columns},
filtering: {filterString: ''},
className: ['table-striped', 'table-bordered']
};
no: any;
assessmentId: any;
private attachments: any;
pageName: any = "Action Items";
assessmentIdFromParams: any;
constructor( private apollo: Apollo,
public navCtrl: NavController,
public navParams: NavParams,
public popOver: PopoverController,
private assessmentService: AssessmentService) {
this.assessmentIdFromParams = navParams.data.assessmentId;
console.log(this.assessmentIdFromParams);
this.autoFilter = navParams.data.autoFilter;
}
async ngOnInit() {
this.assessmentId = await this.assessmentService.getCurrentAssessmentId();
this.apollo.watchQuery({
query: assessmentQuery,
variables: {_id: this.assessmentId},
fetchPolicy: "network-only"
}).valueChanges
.subscribe(data => {
this.no = (<any>data.data).assessment.questions.filter( a => {
if (a.answers.length > 0 ) {
return a.answers[a.answers.length - 1].answer == "No"
}
});
var targetMRL = (<any>data.data).assessment.targetMRL;
this.attachments = (<any>data.data).assessment.files;
var newData:Array<any> = [];
console.log(this.no);
this.no.forEach( (element) => {
var newObj:any = {};
newObj.threadName = "" + element.threadName;
newObj.subThreadName = "" + element.subThreadName;
newObj.questionText = "" + element.questionText;
// newObj.currentAnswer = "" + element.answers[element.answers.length - 1].answer;
newObj.what = "" + element.answers[element.answers.length - 1].what;
newObj.when = this.formatDate( element.answers[element.answers.length - 1].when);
newObj.who = "" + element.answers[element.answers.length - 1].who;
newObj.level = "" + element.mrLevel;
var cur = element.answers[element.answers.length - 1];
newObj.risk = "" + this.calculateRiskScore(cur.likelihood, cur.consequence);
newData.push(newObj);
});
this.data = newData;
this.unfilteredQuestions = newData;
if (this.autoFilter){
console.log('here');
this.filterList.filterMRL = targetMRL;
console.log(targetMRL)
this.data = this.unfilteredQuestions.filter(question => {
if (question.level == targetMRL){
return question;
}
});
} else {
this.data = this.unfilteredQuestions;
}
// console.log(this.data);
// this.length = this.data.length;
// this.onChangeTable(this.config);
});
}
/**
* @purpose: format data as a sortable string for table
* @input: date: a new Date() parsable string
* @output: string, format YYYY-MM-DD
*/
formatDate(date){
if ( date ) {
return new Date(date).toISOString().substr(0,10);
} else {
return '';
}
}
filterTheList(){
console.log(this.filterList.filterMRL)
if (this.filterList.filterMRL && this.filterList.filterMRL != 0) {
var filteredQuestions = this.unfilteredQuestions.filter(question => {
if (question.level == this.filterList.filterMRL) {
// console.log('here')
return question
}
});
console.log(filteredQuestions);
this.data = filteredQuestions;
} else {
this.data = this.unfilteredQuestions;
}
}
clearFilter() {
this.filterList.filterMRL = 0;
this.filterTheList();
}
public saveXLS() {
var headers = this.columns.map(c => c.title);
var values = this.no.map(nq => {
return [
nq.threadName,
nq.subThreadName,
nq.questionText,
// nq.answers[nq.answers.length - 1].answer,
nq.answers[nq.answers.length - 1].what,
nq.answers[nq.answers.length - 1].when,
nq.answers[nq.answers.length - 1].who,
this.calculateRiskScore(nq.answers[nq.answers.length - 1].likelihood, nq.answers[nq.answers.length - 1].consequence)
];
})
var worksheet = [headers, ...values];
var ws = XLSX.utils.aoa_to_sheet(worksheet);
var wb = XLSX.utils.book_new();
XLSX.utils.book_append_sheet(wb, ws, 'Action Items');
/* save to file */
XLSX.writeFile(wb, 'action_items.xlsx');
}
public changePage(page:any, data:Array<any> = this.data):Array<any> {
let start = (page.page - 1) * page.itemsPerPage;
let end = page.itemsPerPage > -1 ? (start + page.itemsPerPage) : data.length;
return data.slice(start, end);
}
public changeSort(data:any, config:any):any {
if (!config.sorting) {
return data;
}
let columns = this.config.sorting.columns || [];
let columnName:string = void 0;
let sort:string = void 0;
for (let i = 0; i < columns.length; i++) {
if (columns[i].sort !== '' && columns[i].sort !== false) {
columnName = columns[i].name;
sort = columns[i].sort;
}
}
if (!columnName) {
return data;
}
// simple sorting
return data.sort((previous:any, current:any) => {
if (previous[columnName] > current[columnName]) {
return sort === 'desc' ? -1 : 1;
} else if (previous[columnName] < current[columnName]) {
return sort === 'asc' ? -1 : 1;
}
return 0;
});
}
public changeFilter(data:any, config:any):any {
let filteredData:Array<any> = data;
this.columns.forEach((column:any) => {
if (column.filtering) {
filteredData = filteredData.filter((item:any) => {
return item[column.name].match(column.filtering.filterString);
});
}
});
if (!config.filtering) {
return filteredData;
}
if (config.filtering.columnName) {
return filteredData.filter((item:any) =>
item[config.filtering.columnName].match(this.config.filtering.filterString));
}
let tempArray:Array<any> = [];
filteredData.forEach((item:any) => {
let flag = false;
this.columns.forEach((column:any) => {
if (item[column.name].toString().match(this.config.filtering.filterString)) {
flag = true;
}
});
if (flag) {
tempArray.push(item);
}
});
filteredData = tempArray;
return filteredData;
}
public onChangeTable(config:any, page:any = {page: this.page, itemsPerPage: this.itemsPerPage}):any {
if (config.filtering) {
Object.assign(this.config.filtering, config.filtering);
}
if (config.sorting) {
Object.assign(this.config.sorting, config.sorting);
}
let filteredData = this.changeFilter(this.data, this.config);
let sortedData = this.changeSort(filteredData, this.config);
this.rows = page && config.paging ? this.changePage(page, sortedData) : sortedData;
this.length = sortedData.length;
}
public onCellClick(data: any): any {
console.log(data);
}
unique = (item, index, array) => array.indexOf(item) == index
ionViewWillEnter() | {
GoogleAnalytics.trackPage("actionitems");
} | identifier_body |
|
actionitems.ts | Name
subThreadName
currentAnswer
questionId
answers {
when
who
risk
consequence
likelihood
what
reason
assumptionsNo
notesNo
answer
# technical
# schedule
# cost
}
}
files {
name
questionId
url
}
}
}
`
@IonicPage()
@Component({
selector: 'page-actionitems',
templateUrl: 'actionitems.html',
})
export class ActionitemsPage {
public data:any;
filterList: any = {};
unfilteredQuestions: any;
autoFilter = false;
public rows:Array<any> = [];
public columns:Array<any> = [
{title: 'Thread', name: 'threadName', filtering: {filterString: '', placeholder: 'Filter by thread'}},
{title: 'Subthread', name: 'subThreadName', filtering: {filterString: '', placeholder: 'Filter by subthread'}},
{title: 'Question', name: 'questionText', filtering: {filterString: '', placeholder: 'Filter by question'}},
// {title: 'Answer', name: 'currentAnswer', filtering: {filterString: '', placeholder: 'Filter by answer'}},
{title: 'Action', name: 'what', filtering: {filterString: '', placeholder: 'Filter by action'}},
{title: 'Due', name: 'when', filtering: {filterString: '', placeholder: 'Filter by due date'}, sort: 'asc'},
{title: 'Owner', name: 'who', filtering: {filterString: '', placeholder: 'Filter by owner'}},
{title: 'Risk Level', name: 'risk', filtering: {filterString: '', placeholder: 'Filter by risk level'}}
];
public page:number = 1;
public itemsPerPage:number = 10;
public maxSize:number = 5;
public numPages:number = 1;
public length:number = 0;
public config:any = {
paging: true,
sorting: {columns: this.columns},
filtering: {filterString: ''},
className: ['table-striped', 'table-bordered']
};
no: any;
assessmentId: any;
private attachments: any;
pageName: any = "Action Items";
assessmentIdFromParams: any;
constructor( private apollo: Apollo,
public navCtrl: NavController,
public navParams: NavParams,
public popOver: PopoverController,
private assessmentService: AssessmentService) {
this.assessmentIdFromParams = navParams.data.assessmentId;
console.log(this.assessmentIdFromParams);
this.autoFilter = navParams.data.autoFilter;
}
async ngOnInit() {
this.assessmentId = await this.assessmentService.getCurrentAssessmentId();
this.apollo.watchQuery({
query: assessmentQuery,
variables: {_id: this.assessmentId},
fetchPolicy: "network-only"
}).valueChanges
.subscribe(data => {
this.no = (<any>data.data).assessment.questions.filter( a => {
if (a.answers.length > 0 ) {
return a.answers[a.answers.length - 1].answer == "No"
}
});
var targetMRL = (<any>data.data).assessment.targetMRL;
this.attachments = (<any>data.data).assessment.files;
var newData:Array<any> = [];
console.log(this.no);
this.no.forEach( (element) => {
var newObj:any = {};
newObj.threadName = "" + element.threadName;
newObj.subThreadName = "" + element.subThreadName;
newObj.questionText = "" + element.questionText;
// newObj.currentAnswer = "" + element.answers[element.answers.length - 1].answer;
newObj.what = "" + element.answers[element.answers.length - 1].what;
newObj.when = this.formatDate( element.answers[element.answers.length - 1].when);
newObj.who = "" + element.answers[element.answers.length - 1].who;
newObj.level = "" + element.mrLevel;
var cur = element.answers[element.answers.length - 1];
newObj.risk = "" + this.calculateRiskScore(cur.likelihood, cur.consequence);
newData.push(newObj);
});
this.data = newData;
this.unfilteredQuestions = newData;
if (this.autoFilter){
console.log('here');
this.filterList.filterMRL = targetMRL;
console.log(targetMRL)
this.data = this.unfilteredQuestions.filter(question => {
if (question.level == targetMRL){
return question;
}
});
} else {
this.data = this.unfilteredQuestions;
}
// console.log(this.data);
// this.length = this.data.length;
// this.onChangeTable(this.config);
});
}
/**
* @purpose: format data as a sortable string for table
* @input: date: a new Date() parsable string
* @output: string, format YYYY-MM-DD
*/
formatDate(date){
if ( date ) {
return new Date(date).toISOString().substr(0,10);
} else {
return '';
}
}
filterTheList(){
console.log(this.filterList.filterMRL)
if (this.filterList.filterMRL && this.filterList.filterMRL != 0) {
var filteredQuestions = this.unfilteredQuestions.filter(question => {
if (question.level == this.filterList.filterMRL) {
// console.log('here')
return question
}
});
console.log(filteredQuestions);
this.data = filteredQuestions;
} else {
this.data = this.unfilteredQuestions;
}
}
clearFilter() {
this.filterList.filterMRL = 0;
this.filterTheList();
}
public saveXLS() {
var headers = this.columns.map(c => c.title);
var values = this.no.map(nq => {
return [
nq.threadName,
nq.subThreadName,
nq.questionText,
// nq.answers[nq.answers.length - 1].answer,
nq.answers[nq.answers.length - 1].what,
nq.answers[nq.answers.length - 1].when,
nq.answers[nq.answers.length - 1].who,
this.calculateRiskScore(nq.answers[nq.answers.length - 1].likelihood, nq.answers[nq.answers.length - 1].consequence)
];
})
var worksheet = [headers, ...values];
var ws = XLSX.utils.aoa_to_sheet(worksheet);
var wb = XLSX.utils.book_new();
XLSX.utils.book_append_sheet(wb, ws, 'Action Items');
/* save to file */
XLSX.writeFile(wb, 'action_items.xlsx');
}
public changePage(page:any, data:Array<any> = this.data):Array<any> {
let start = (page.page - 1) * page.itemsPerPage;
let end = page.itemsPerPage > -1 ? (start + page.itemsPerPage) : data.length;
return data.slice(start, end);
}
public changeSort(data:any, config:any):any {
if (!config.sorting) {
return data;
}
let columns = this.config.sorting.columns || [];
let columnName:string = void 0;
let sort:string = void 0;
for (let i = 0; i < columns.length; i++) {
if (columns[i].sort !== '' && columns[i].sort !== false) {
columnName = columns[i].name;
sort = columns[i].sort;
}
}
if (!columnName) {
return data;
}
// simple sorting
return data.sort((previous:any, current:any) => {
if (previous[columnName] > current[columnName]) {
return sort === 'desc' ? -1 : 1;
} else if (previous[columnName] < current[columnName]) {
return sort === 'asc' ? -1 : 1;
}
return 0;
});
}
public changeFilter(data:any, config:any):any {
let filteredData:Array<any> = data;
this.columns.forEach((column:any) => {
if (column.filtering) {
filteredData = filteredData.filter((item:any) => {
return item[column.name].match(column.filtering.filterString);
});
}
});
if (!config.filtering) {
return filteredData;
}
if (config.filtering.columnName) {
return filteredData.filter((item:any) =>
item[config.filtering.columnName].match(this.config.filtering.filterString));
}
let tempArray:Array<any> = [];
filteredData.forEach((item:any) => {
let flag = false;
this.columns.forEach((column:any) => {
if (item[column.name].toString().match(this.config.filtering.filterString)) {
flag = true;
}
});
if (flag) {
tempArray.push(item);
}
});
filteredData = tempArray; |
public onChangeTable(config:any, page:any = {page: this.page, itemsPerPage: this.itemsPerPage}):any {
if (config.filtering) {
Object.assign(this.config.filtering, config.filtering);
}
if (config.sorting) {
Object.assign(this.config.sorting, config.sort |
return filteredData;
} | random_line_split |
actionitems.ts | Name
subThreadName
currentAnswer
questionId
answers {
when
who
risk
consequence
likelihood
what
reason
assumptionsNo
notesNo
answer
# technical
# schedule
# cost
}
}
files {
name
questionId
url
}
}
}
`
@IonicPage()
@Component({
selector: 'page-actionitems',
templateUrl: 'actionitems.html',
})
export class ActionitemsPage {
public data:any;
filterList: any = {};
unfilteredQuestions: any;
autoFilter = false;
public rows:Array<any> = [];
public columns:Array<any> = [
{title: 'Thread', name: 'threadName', filtering: {filterString: '', placeholder: 'Filter by thread'}},
{title: 'Subthread', name: 'subThreadName', filtering: {filterString: '', placeholder: 'Filter by subthread'}},
{title: 'Question', name: 'questionText', filtering: {filterString: '', placeholder: 'Filter by question'}},
// {title: 'Answer', name: 'currentAnswer', filtering: {filterString: '', placeholder: 'Filter by answer'}},
{title: 'Action', name: 'what', filtering: {filterString: '', placeholder: 'Filter by action'}},
{title: 'Due', name: 'when', filtering: {filterString: '', placeholder: 'Filter by due date'}, sort: 'asc'},
{title: 'Owner', name: 'who', filtering: {filterString: '', placeholder: 'Filter by owner'}},
{title: 'Risk Level', name: 'risk', filtering: {filterString: '', placeholder: 'Filter by risk level'}}
];
public page:number = 1;
public itemsPerPage:number = 10;
public maxSize:number = 5;
public numPages:number = 1;
public length:number = 0;
public config:any = {
paging: true,
sorting: {columns: this.columns},
filtering: {filterString: ''},
className: ['table-striped', 'table-bordered']
};
no: any;
assessmentId: any;
private attachments: any;
pageName: any = "Action Items";
assessmentIdFromParams: any;
constructor( private apollo: Apollo,
public navCtrl: NavController,
public navParams: NavParams,
public popOver: PopoverController,
private assessmentService: AssessmentService) {
this.assessmentIdFromParams = navParams.data.assessmentId;
console.log(this.assessmentIdFromParams);
this.autoFilter = navParams.data.autoFilter;
}
async ngOnInit() {
this.assessmentId = await this.assessmentService.getCurrentAssessmentId();
this.apollo.watchQuery({
query: assessmentQuery,
variables: {_id: this.assessmentId},
fetchPolicy: "network-only"
}).valueChanges
.subscribe(data => {
this.no = (<any>data.data).assessment.questions.filter( a => {
if (a.answers.length > 0 ) |
});
var targetMRL = (<any>data.data).assessment.targetMRL;
this.attachments = (<any>data.data).assessment.files;
var newData:Array<any> = [];
console.log(this.no);
this.no.forEach( (element) => {
var newObj:any = {};
newObj.threadName = "" + element.threadName;
newObj.subThreadName = "" + element.subThreadName;
newObj.questionText = "" + element.questionText;
// newObj.currentAnswer = "" + element.answers[element.answers.length - 1].answer;
newObj.what = "" + element.answers[element.answers.length - 1].what;
newObj.when = this.formatDate( element.answers[element.answers.length - 1].when);
newObj.who = "" + element.answers[element.answers.length - 1].who;
newObj.level = "" + element.mrLevel;
var cur = element.answers[element.answers.length - 1];
newObj.risk = "" + this.calculateRiskScore(cur.likelihood, cur.consequence);
newData.push(newObj);
});
this.data = newData;
this.unfilteredQuestions = newData;
if (this.autoFilter){
console.log('here');
this.filterList.filterMRL = targetMRL;
console.log(targetMRL)
this.data = this.unfilteredQuestions.filter(question => {
if (question.level == targetMRL){
return question;
}
});
} else {
this.data = this.unfilteredQuestions;
}
// console.log(this.data);
// this.length = this.data.length;
// this.onChangeTable(this.config);
});
}
/**
* @purpose: format data as a sortable string for table
* @input: date: a new Date() parsable string
* @output: string, format YYYY-MM-DD
*/
formatDate(date){
if ( date ) {
return new Date(date).toISOString().substr(0,10);
} else {
return '';
}
}
filterTheList(){
console.log(this.filterList.filterMRL)
if (this.filterList.filterMRL && this.filterList.filterMRL != 0) {
var filteredQuestions = this.unfilteredQuestions.filter(question => {
if (question.level == this.filterList.filterMRL) {
// console.log('here')
return question
}
});
console.log(filteredQuestions);
this.data = filteredQuestions;
} else {
this.data = this.unfilteredQuestions;
}
}
clearFilter() {
this.filterList.filterMRL = 0;
this.filterTheList();
}
public saveXLS() {
var headers = this.columns.map(c => c.title);
var values = this.no.map(nq => {
return [
nq.threadName,
nq.subThreadName,
nq.questionText,
// nq.answers[nq.answers.length - 1].answer,
nq.answers[nq.answers.length - 1].what,
nq.answers[nq.answers.length - 1].when,
nq.answers[nq.answers.length - 1].who,
this.calculateRiskScore(nq.answers[nq.answers.length - 1].likelihood, nq.answers[nq.answers.length - 1].consequence)
];
})
var worksheet = [headers, ...values];
var ws = XLSX.utils.aoa_to_sheet(worksheet);
var wb = XLSX.utils.book_new();
XLSX.utils.book_append_sheet(wb, ws, 'Action Items');
/* save to file */
XLSX.writeFile(wb, 'action_items.xlsx');
}
public changePage(page:any, data:Array<any> = this.data):Array<any> {
let start = (page.page - 1) * page.itemsPerPage;
let end = page.itemsPerPage > -1 ? (start + page.itemsPerPage) : data.length;
return data.slice(start, end);
}
public changeSort(data:any, config:any):any {
if (!config.sorting) {
return data;
}
let columns = this.config.sorting.columns || [];
let columnName:string = void 0;
let sort:string = void 0;
for (let i = 0; i < columns.length; i++) {
if (columns[i].sort !== '' && columns[i].sort !== false) {
columnName = columns[i].name;
sort = columns[i].sort;
}
}
if (!columnName) {
return data;
}
// simple sorting
return data.sort((previous:any, current:any) => {
if (previous[columnName] > current[columnName]) {
return sort === 'desc' ? -1 : 1;
} else if (previous[columnName] < current[columnName]) {
return sort === 'asc' ? -1 : 1;
}
return 0;
});
}
public changeFilter(data:any, config:any):any {
let filteredData:Array<any> = data;
this.columns.forEach((column:any) => {
if (column.filtering) {
filteredData = filteredData.filter((item:any) => {
return item[column.name].match(column.filtering.filterString);
});
}
});
if (!config.filtering) {
return filteredData;
}
if (config.filtering.columnName) {
return filteredData.filter((item:any) =>
item[config.filtering.columnName].match(this.config.filtering.filterString));
}
let tempArray:Array<any> = [];
filteredData.forEach((item:any) => {
let flag = false;
this.columns.forEach((column:any) => {
if (item[column.name].toString().match(this.config.filtering.filterString)) {
flag = true;
}
});
if (flag) {
tempArray.push(item);
}
});
filteredData = tempArray;
return filteredData;
}
public onChangeTable(config:any, page:any = {page: this.page, itemsPerPage: this.itemsPerPage}):any {
if (config.filtering) {
Object.assign(this.config.filtering, config.filtering);
}
if (config.sorting) {
Object.assign(this.config.sorting, config.sort | {
return a.answers[a.answers.length - 1].answer == "No"
} | conditional_block |
pm.rs | pbbsel: VolatileCell<u32>,
pbcsel: VolatileCell<u32>,
pbdsel: VolatileCell<u32>,
_reserved2: VolatileCell<u32>,
cpumask: VolatileCell<u32>, // 0x020
hsbmask: VolatileCell<u32>,
pbamask: VolatileCell<u32>,
pbbmask: VolatileCell<u32>,
pbcmask: VolatileCell<u32>,
pbdmask: VolatileCell<u32>,
_reserved3: [VolatileCell<u32>; 2],
pbadivmask: VolatileCell<u32>, // 0x040
_reserved4: [VolatileCell<u32>; 4],
cfdctrl: VolatileCell<u32>,
unlock: VolatileCell<u32>,
_reserved5: [VolatileCell<u32>; 25], // 0x60
ier: VolatileCell<u32>, // 0xC0
idr: VolatileCell<u32>,
imr: VolatileCell<u32>,
isr: VolatileCell<u32>,
icr: VolatileCell<u32>,
sr: VolatileCell<u32>,
_reserved6: [VolatileCell<u32>; 34], // 0x100
ppcr: VolatileCell<u32>, // 0x160
_reserved7: [VolatileCell<u32>; 7],
rcause: VolatileCell<u32>, // 0x180
wcause: VolatileCell<u32>,
awen: VolatileCell<u32>,
protctrl: VolatileCell<u32>,
_reserved8: VolatileCell<u32>,
fastsleep: VolatileCell<u32>,
_reserved9: [VolatileCell<u32>; 152],
config: VolatileCell<u32>, // 0x200
version: VolatileCell<u32>,
}
pub enum MainClock {
RCSYS,
OSC0,
PLL,
DFLL,
RC80M,
RCFAST,
RC1M,
}
#[derive(Copy, Clone, Debug)]
pub enum Clock {
HSB(HSBClock),
PBA(PBAClock),
PBB(PBBClock),
PBC(PBCClock),
PBD(PBDClock),
}
#[derive(Copy, Clone, Debug)]
pub enum HSBClock {
PDCA,
FLASHCALW,
FLASHCALWP,
USBC,
CRCCU,
APBA,
APBB,
APBC,
APBD,
AESA,
}
#[derive(Copy, Clone, Debug)]
pub enum PBAClock {
IISC,
SPI,
TC0,
TC1,
TWIM0,
TWIS0,
TWIM1,
TWIS1,
USART0,
USART1,
USART2,
USART3,
ADCIFE,
DACC,
ACIFC,
GLOC,
ABSACB,
TRNG,
PARC,
CATB,
NULL,
TWIM2,
TWIM3,
LCDCA,
}
#[derive(Copy, Clone, Debug)]
pub enum PBBClock {
FLASHCALW,
HRAMC1,
HMATRIX,
PDCA,
CRCCU,
USBC,
PEVC,
}
#[derive(Copy, Clone, Debug)]
pub enum PBCClock {
PM,
CHIPID,
SCIF,
FREQM,
GPIO,
}
#[derive(Copy, Clone, Debug)]
pub enum PBDClock {
BPM,
BSCIF,
AST,
WDT,
EIC,
PICOUART,
}
/// Frequency of the external oscillator. For the SAM4L, different
/// configurations are needed for different ranges of oscillator frequency, so
/// based on the input frequency, various configurations may need to change.
/// When additional oscillator frequencies are needed, they should be added
/// here and the `setup_system_clock` function should be modified to support
/// it.
#[derive(Copy, Clone, Debug)]
pub enum OscillatorFrequency {
/// 16 MHz external oscillator
Frequency16MHz,
}
/// Configuration for the startup time of the external oscillator. In practice
/// we have found that some boards work with a short startup time, while others
/// need a slow start in order to properly wake from sleep. In general, we find
/// that for systems that do not work, at fast speed, they will hang or panic
/// after several entries into WAIT mode.
#[derive(Copy, Clone, Debug)]
pub enum OscillatorStartup {
/// Use a fast startup. ~0.5 ms in practice.
FastStart,
/// Use a slow startup. ~8.9 ms in practice.
SlowStart,
}
/// Which source the system clock should be generated from. These are specified
/// as system clock source appended with the clock that it is sourced from
/// appended with the final frequency of the system. So for example, one option
/// is to use the DFLL sourced from the RC32K with a final frequency of 48 MHz.
///
/// When new options (either sources or final frequencies) are needed, they
/// should be added to this list, and then the `setup_system_clock` function
/// can be modified to support it. This is necessary because configurations
/// must be changed not just with the input source but also based on the
/// desired final frequency.
///
/// For options utilizing an external oscillator, the configurations for that
/// oscillator must also be provided.
#[derive(Copy, Clone, Debug)]
pub enum SystemClockSource {
/// Use the RCSYS clock (which the system starts up on anyways). Final
/// system frequency will be 115 kHz. Note that while this is the default,
/// Tock is NOT guaranteed to work on this setting and will likely fail.
RcsysAt115kHz,
/// Use the internal digital frequency locked loop (DFLL) sourced from
/// the internal RC32K clock. Note this typically requires calibration
/// of the RC32K to have a consistent clock. Final frequency of 48 MHz.
DfllRc32kAt48MHz,
/// Use an external crystal oscillator as the direct source for the
/// system clock. The final system frequency will match the frequency of
/// the external oscillator.
ExternalOscillator {
frequency: OscillatorFrequency,
startup_mode: OscillatorStartup,
},
/// Use an external crystal oscillator as the input to the internal phase
/// locked loop (PLL) for the system clock. This results in a final
/// frequency of 48 MHz.
PllExternalOscillatorAt48MHz {
frequency: OscillatorFrequency,
startup_mode: OscillatorStartup,
},
}
const PM_BASE: usize = 0x400E0000;
const HSB_MASK_OFFSET: u32 = 0x24;
const PBA_MASK_OFFSET: u32 = 0x28;
const PBB_MASK_OFFSET: u32 = 0x2C;
const PBC_MASK_OFFSET: u32 = 0x30;
const PBD_MASK_OFFSET: u32 = 0x34;
static mut PM_REGS: *mut PmRegisters = PM_BASE as *mut PmRegisters;
/// Contains state for the power management peripheral. This includes the
/// configurations for various system clocks and the final frequency that the
/// system is running at.
pub struct PowerManager {
/// Frequency at which the system clock is running.
system_frequency: Cell<u32>,
/// Clock source configuration
system_clock_source: Cell<SystemClockSource>,
}
pub static mut PM: PowerManager = PowerManager {
/// Set to the RCSYS frequency by default (115 kHz).
system_frequency: Cell::new(115000),
/// Set to the RCSYS by default.
system_clock_source: Cell::new(SystemClockSource::RcsysAt115kHz),
};
impl PowerManager {
/// Sets up the system clock. This should be called as one of the first
/// lines in the `reset_handler` within the platform's `main.rs`.
pub unsafe fn setup_system_clock(&self, clock_source: SystemClockSource) {
// save configuration
self.system_clock_source.set(clock_source);
// For now, always go to PS2 as it enables all core speeds
bpm::set_power_scaling(bpm::PowerScaling::PS2);
match clock_source {
SystemClockSource::RcsysAt115kHz => {
// no configurations necessary, already running off the RCSYS
self.system_frequency.set(115000);
}
SystemClockSource::DfllRc32kAt48MHz => {
configure_48mhz_dfll();
self.system_frequency.set(48 | mcctrl: VolatileCell<u32>,
cpusel: VolatileCell<u32>,
_reserved1: VolatileCell<u32>,
pbasel: VolatileCell<u32>, | random_line_split |
|
pm.rs | {
HSB(HSBClock),
PBA(PBAClock),
PBB(PBBClock),
PBC(PBCClock),
PBD(PBDClock),
}
#[derive(Copy, Clone, Debug)]
pub enum HSBClock {
PDCA,
FLASHCALW,
FLASHCALWP,
USBC,
CRCCU,
APBA,
APBB,
APBC,
APBD,
AESA,
}
#[derive(Copy, Clone, Debug)]
pub enum PBAClock {
IISC,
SPI,
TC0,
TC1,
TWIM0,
TWIS0,
TWIM1,
TWIS1,
USART0,
USART1,
USART2,
USART3,
ADCIFE,
DACC,
ACIFC,
GLOC,
ABSACB,
TRNG,
PARC,
CATB,
NULL,
TWIM2,
TWIM3,
LCDCA,
}
#[derive(Copy, Clone, Debug)]
pub enum PBBClock {
FLASHCALW,
HRAMC1,
HMATRIX,
PDCA,
CRCCU,
USBC,
PEVC,
}
#[derive(Copy, Clone, Debug)]
pub enum PBCClock {
PM,
CHIPID,
SCIF,
FREQM,
GPIO,
}
#[derive(Copy, Clone, Debug)]
pub enum PBDClock {
BPM,
BSCIF,
AST,
WDT,
EIC,
PICOUART,
}
/// Frequency of the external oscillator. For the SAM4L, different
/// configurations are needed for different ranges of oscillator frequency, so
/// based on the input frequency, various configurations may need to change.
/// When additional oscillator frequencies are needed, they should be added
/// here and the `setup_system_clock` function should be modified to support
/// it.
#[derive(Copy, Clone, Debug)]
pub enum | {
/// 16 MHz external oscillator
Frequency16MHz,
}
/// Configuration for the startup time of the external oscillator. In practice
/// we have found that some boards work with a short startup time, while others
/// need a slow start in order to properly wake from sleep. In general, we find
/// that for systems that do not work, at fast speed, they will hang or panic
/// after several entries into WAIT mode.
#[derive(Copy, Clone, Debug)]
pub enum OscillatorStartup {
/// Use a fast startup. ~0.5 ms in practice.
FastStart,
/// Use a slow startup. ~8.9 ms in practice.
SlowStart,
}
/// Which source the system clock should be generated from. These are specified
/// as system clock source appended with the clock that it is sourced from
/// appended with the final frequency of the system. So for example, one option
/// is to use the DFLL sourced from the RC32K with a final frequency of 48 MHz.
///
/// When new options (either sources or final frequencies) are needed, they
/// should be added to this list, and then the `setup_system_clock` function
/// can be modified to support it. This is necessary because configurations
/// must be changed not just with the input source but also based on the
/// desired final frequency.
///
/// For options utilizing an external oscillator, the configurations for that
/// oscillator must also be provided.
#[derive(Copy, Clone, Debug)]
pub enum SystemClockSource {
/// Use the RCSYS clock (which the system starts up on anyways). Final
/// system frequency will be 115 kHz. Note that while this is the default,
/// Tock is NOT guaranteed to work on this setting and will likely fail.
RcsysAt115kHz,
/// Use the internal digital frequency locked loop (DFLL) sourced from
/// the internal RC32K clock. Note this typically requires calibration
/// of the RC32K to have a consistent clock. Final frequency of 48 MHz.
DfllRc32kAt48MHz,
/// Use an external crystal oscillator as the direct source for the
/// system clock. The final system frequency will match the frequency of
/// the external oscillator.
ExternalOscillator {
frequency: OscillatorFrequency,
startup_mode: OscillatorStartup,
},
/// Use an external crystal oscillator as the input to the internal phase
/// locked loop (PLL) for the system clock. This results in a final
/// frequency of 48 MHz.
PllExternalOscillatorAt48MHz {
frequency: OscillatorFrequency,
startup_mode: OscillatorStartup,
},
}
const PM_BASE: usize = 0x400E0000;
const HSB_MASK_OFFSET: u32 = 0x24;
const PBA_MASK_OFFSET: u32 = 0x28;
const PBB_MASK_OFFSET: u32 = 0x2C;
const PBC_MASK_OFFSET: u32 = 0x30;
const PBD_MASK_OFFSET: u32 = 0x34;
static mut PM_REGS: *mut PmRegisters = PM_BASE as *mut PmRegisters;
/// Contains state for the power management peripheral. This includes the
/// configurations for various system clocks and the final frequency that the
/// system is running at.
pub struct PowerManager {
/// Frequency at which the system clock is running.
system_frequency: Cell<u32>,
/// Clock source configuration
system_clock_source: Cell<SystemClockSource>,
}
pub static mut PM: PowerManager = PowerManager {
/// Set to the RCSYS frequency by default (115 kHz).
system_frequency: Cell::new(115000),
/// Set to the RCSYS by default.
system_clock_source: Cell::new(SystemClockSource::RcsysAt115kHz),
};
impl PowerManager {
/// Sets up the system clock. This should be called as one of the first
/// lines in the `reset_handler` within the platform's `main.rs`.
pub unsafe fn setup_system_clock(&self, clock_source: SystemClockSource) {
// save configuration
self.system_clock_source.set(clock_source);
// For now, always go to PS2 as it enables all core speeds
bpm::set_power_scaling(bpm::PowerScaling::PS2);
match clock_source {
SystemClockSource::RcsysAt115kHz => {
// no configurations necessary, already running off the RCSYS
self.system_frequency.set(115000);
}
SystemClockSource::DfllRc32kAt48MHz => {
configure_48mhz_dfll();
self.system_frequency.set(48000000);
}
SystemClockSource::ExternalOscillator {
frequency,
startup_mode,
} => {
configure_external_oscillator(frequency, startup_mode);
match frequency {
OscillatorFrequency::Frequency16MHz => self.system_frequency.set(16000000),
};
}
SystemClockSource::PllExternalOscillatorAt48MHz {
frequency,
startup_mode,
} => {
configure_external_oscillator_pll(frequency, startup_mode);
self.system_frequency.set(48000000);
}
}
}
}
unsafe fn unlock(register_offset: u32) {
(*PM_REGS).unlock.set(0xAA000000 | register_offset);
}
unsafe fn select_main_clock(clock: MainClock) {
unlock(0);
(*PM_REGS).mcctrl.set(clock as u32);
}
/// Configure the system clock to use the DFLL with the RC32K as the source.
/// Run at 48 MHz.
unsafe fn configure_48mhz_dfll() {
// Enable HCACHE
flashcalw::FLASH_CONTROLLER.enable_cache();
// start the dfll
scif::setup_dfll_rc32k_48mhz();
// Since we are running at a fast speed we have to set a clock delay
// for flash, as well as enable fast flash mode.
flashcalw::FLASH_CONTROLLER.enable_high_speed_flash();
// Choose the main clock
select_main_clock(MainClock::DFLL);
}
/// Configure the system clock to use the 16 MHz external crystal directly
unsafe fn configure_external_oscillator(
frequency: OscillatorFrequency,
startup_mode: OscillatorStartup,
) {
// Use the cache
flashcalw::FLASH_CONTROLLER.enable_cache();
// Need the 32k RC oscillator for things like BPM module and AST.
bscif::enable_rc32k();
// start the external oscillator
match frequency {
OscillatorFrequency::Frequency16MHz => {
match startup_mode {
OscillatorStartup::FastStart => scif::setup_osc_16mhz_fast_startup(),
OscillatorStartup::SlowStart => scif::setup_osc_16mhz_slow_startup(),
};
}
}
// Go to high speed flash mode
flashcalw::FLASH_CONTROLLER.enable_high_speed_flash();
// Set the main clock to be the external oscillator
select_main_clock(MainClock::OSC0);
}
/// Configure the system clock to use the PLL with the 16 MHz external crystal
unsafe fn configure_external_oscillator_pll(
| OscillatorFrequency | identifier_name |
hal.py | , machine.Pin.OUT)
### Sonar
# To average sonar sensor value
self.sonar_score = 0
# Sonar distance sensor
self.echo = machine.Pin(14, machine.Pin.IN)
self.trigger = machine.Pin(27, machine.Pin.OUT)
### ADCs
# Battery gauge
self.bat_status = 4.3 # voltage
self.move_counter = 0
self.battery_level = 0 # percentage
self.adc_battery = machine.ADC(machine.Pin(32))
self.bat_charge = machine.Pin(25, machine.Pin.IN) # charging / not charging
# The pullups for the phototransistors
machine.Pin(19, machine.Pin.IN, machine.Pin.PULL_UP)
machine.Pin(23, machine.Pin.IN, machine.Pin.PULL_UP)
# The phototransistors
self.adc_line_left = machine.ADC(machine.Pin(34))
self.adc_line_right = machine.ADC(machine.Pin(33))
# Set reference voltage to 3.3V
self.adc_battery.atten(machine.ADC.ATTN_11DB)
self.adc_line_left.atten(machine.ADC.ATTN_11DB)
self.adc_line_right.atten(machine.ADC.ATTN_11DB)
# For terminating sleep and loops
self.terminate = False
# For search mode
self.search = False
self.last_line = LEFT
self.search_counter = 0
# Function to set LED states
def set_led(self, led, value):
# Turn the given LED on or off
if led == STATUS:
# Status LED is reverse polarity
self.status_led.value(0 if value else 1)
elif led == SONAR:
self.sonar_led.value(value)
elif led == LEFT_LINE:
self.left_line_led.value(value)
elif led == RIGHT_LINE:
self.right_line_led.value(value)
# Function to get battery level in percentage
def get_battery_level(self):
# When the SumoRobot is not moving
if self.prev_speed[LEFT] == 0 and self.prev_speed[RIGHT] == 0:
# Calculate battery voltage
battery_voltage = round(self.config['battery_coeff'] * (self.adc_battery.read() * 3.3 / 4096), 2)
# Map battery voltage to percentage
temp_battery_level = 0.0 + ((100.0 - 0.0) / (4.2 - 3.2)) * (battery_voltage - 3.2)
# When battery level changed more than 5 percent
if abs(self.battery_level - temp_battery_level) > 5:
# Update battery level
self.battery_level = round(temp_battery_level)
# Return the battery level in percentage
return min(100, max(0, self.battery_level))
# Function to get distance (cm) from the object in front of the SumoRobot
def get_sonar_value(self):
# Send a pulse
self.trigger.value(0)
utime.sleep_us(5)
self.trigger.value(1)
# Wait for the pulse and calculate the distance
return round((machine.time_pulse_us(self.echo, 1, 30000) / 2) / 29.1)
# Function to get boolean if there is something in front of the SumoRobot
def is_sonar(self):
# Get the sonar value
self.sonar_value = self.get_sonar_value()
# When the sonar value is small and the ping actually returned
if self.sonar_value < self.config['sonar_threshold'] and self.sonar_value > 0:
# When not maximum score
if self.sonar_score < 5:
# Increase the sonar score
self.sonar_score += 1
# When no sonar ping was returned
else:
# When not lowest score
if self.sonar_score > 0:
# Decrease the sonar score
self.sonar_score -= 1
# When the sensor saw something more than 2 times
value = True if self.sonar_score > 2 else False
return value
# Function to update the config file
def update_config_file(self):
# Update the config file
with open('config.part', 'w') as config_file:
config_file.write(ujson.dumps(self.config))
os.rename('config.part', 'config.json')
# Function to update line calibration and write it to the config file
def calibrate_line_values(self):
# Read the line sensor values
self.config['left_line_value'] = self.adc_line_left.read()
self.config['right_line_value'] = self.adc_line_right.read()
# Function to get light inensity from the phototransistors
def get_line(self, line):
# Check if the direction is valid
assert line in (LEFT, RIGHT)
# Return the given line sensor value
if line == LEFT:
return self.adc_line_left.read()
elif line == RIGHT:
return self.adc_line_right.read()
def is_line(self, line):
# Check if the direction is valid
assert line in (LEFT, RIGHT)
# Define config prefix
prefix = 'left' if line == LEFT else 'right'
# Check for line
value = abs(self.get_line(line) - self.config[prefix + '_line_value']) > self.config[prefix + '_line_threshold']
# Update last line direction if line was detected
self.last_line = value if value else self.last_line
# Return the given line sensor value
return value
def set_servo(self, servo, speed):
# Check if the direction is valid
assert servo in (LEFT, RIGHT)
# Check if the speed is valid
assert speed <= 100 and speed >= -100
# Reverse the speed for the right wheel
# So negative speeds make wheels go backward, positive forward
if servo == RIGHT:
speed = -speed
# Save the new speed
self.prev_speed[servo] = speed
# Set the given servo speed
if speed == 0:
self.pwm[servo].duty(0)
else:
# Define config prefix
prefix = 'left' if servo == LEFT else 'right'
# -100 ... 100 to min_tuning .. max_tuning
index = 0 if speed < 0 else 2
min_tuning = self.config[prefix + '_servo_calib'][index]
max_tuning = self.config[prefix + '_servo_calib'][index+1]
if speed < 0:
# Reverse the speed, so smaller negative numbers represent slower speeds and larger
# faster speeds
speed = -1 * (speed + 101)
speed = int((speed + 1) * (max_tuning - min_tuning) / -99 + min_tuning)
self.pwm[servo].duty(speed)
else:
speed = int(speed * (max_tuning - min_tuning) / 100 + min_tuning)
self.pwm[servo].duty(speed)
def move(self, dir):
# Check if the direction is valid
assert dir in (SEARCH, STOP, RIGHT, LEFT, BACKWARD, FORWARD)
# Go to the given direction
if dir == STOP:
self.set_servo(LEFT, 0)
self.set_servo(RIGHT, 0)
elif dir == LEFT:
self.set_servo(LEFT, -100)
self.set_servo(RIGHT, 100)
elif dir == RIGHT:
self.set_servo(LEFT, 100)
self.set_servo(RIGHT, -100)
elif dir == SEARCH:
# Change search mode after X seconds
if self.search_counter == 50:
self.search = not self.search
self.search_counter = 0
# When in search mode
if self.search:
self.move(FORWARD)
elif self.last_line == RIGHT:
self.move(LEFT)
else:
self.move(RIGHT)
# Increase search counter
self.search_counter += 1
elif dir == FORWARD:
self.set_servo(LEFT, 100)
self.set_servo(RIGHT, 100)
elif dir == BACKWARD:
self.set_servo(LEFT, -100)
self.set_servo(RIGHT, -100)
def update_sensor_feedback(self):
if self.sensor_feedback:
# Show sensor feedback trough LEDs
self.set_led(SONAR, self.is_sonar())
self.set_led(LEFT_LINE, self.is_line(LEFT))
self.set_led(RIGHT_LINE, self.is_line(RIGHT))
def get_sensor_scope(self):
# TODO: implement sensor value caching
| return str(self.get_sonar_value()) + ',' \
+ str(self.get_line(LEFT)) + ',' \
+ str(self.get_line(RIGHT)) + ',' \
+ str(self.bat_charge.value()) + ',' \
+ str(self.get_battery_level()) | identifier_body |
|
hal.py | # Bottom status LED is in reverse polarity
self.status_led.value(1)
# Sensor LEDs
self.sonar_led = machine.Pin(16, machine.Pin.OUT)
self.left_line_led = machine.Pin(17, machine.Pin.OUT)
self.right_line_led = machine.Pin(12, machine.Pin.OUT)
### Sonar
# To average sonar sensor value
self.sonar_score = 0
# Sonar distance sensor
self.echo = machine.Pin(14, machine.Pin.IN)
self.trigger = machine.Pin(27, machine.Pin.OUT)
### ADCs
# Battery gauge
self.bat_status = 4.3 # voltage
self.move_counter = 0
self.battery_level = 0 # percentage
self.adc_battery = machine.ADC(machine.Pin(32))
self.bat_charge = machine.Pin(25, machine.Pin.IN) # charging / not charging
# The pullups for the phototransistors
machine.Pin(19, machine.Pin.IN, machine.Pin.PULL_UP)
machine.Pin(23, machine.Pin.IN, machine.Pin.PULL_UP)
# The phototransistors
self.adc_line_left = machine.ADC(machine.Pin(34))
self.adc_line_right = machine.ADC(machine.Pin(33))
# Set reference voltage to 3.3V
self.adc_battery.atten(machine.ADC.ATTN_11DB)
self.adc_line_left.atten(machine.ADC.ATTN_11DB)
self.adc_line_right.atten(machine.ADC.ATTN_11DB)
# For terminating sleep and loops
self.terminate = False
# For search mode
self.search = False
self.last_line = LEFT
self.search_counter = 0
# Function to set LED states
def set_led(self, led, value):
# Turn the given LED on or off
if led == STATUS:
# Status LED is reverse polarity
self.status_led.value(0 if value else 1)
elif led == SONAR:
self.sonar_led.value(value)
elif led == LEFT_LINE:
self.left_line_led.value(value)
elif led == RIGHT_LINE:
self.right_line_led.value(value)
# Function to get battery level in percentage
def get_battery_level(self):
# When the SumoRobot is not moving
if self.prev_speed[LEFT] == 0 and self.prev_speed[RIGHT] == 0:
# Calculate battery voltage
battery_voltage = round(self.config['battery_coeff'] * (self.adc_battery.read() * 3.3 / 4096), 2)
# Map battery voltage to percentage
temp_battery_level = 0.0 + ((100.0 - 0.0) / (4.2 - 3.2)) * (battery_voltage - 3.2)
# When battery level changed more than 5 percent
if abs(self.battery_level - temp_battery_level) > 5:
# Update battery level
self.battery_level = round(temp_battery_level)
# Return the battery level in percentage
return min(100, max(0, self.battery_level))
# Function to get distance (cm) from the object in front of the SumoRobot
def get_sonar_value(self):
# Send a pulse
self.trigger.value(0)
utime.sleep_us(5)
self.trigger.value(1)
# Wait for the pulse and calculate the distance
return round((machine.time_pulse_us(self.echo, 1, 30000) / 2) / 29.1)
# Function to get boolean if there is something in front of the SumoRobot
def is_sonar(self):
# Get the sonar value
self.sonar_value = self.get_sonar_value()
# When the sonar value is small and the ping actually returned
if self.sonar_value < self.config['sonar_threshold'] and self.sonar_value > 0:
# When not maximum score
if self.sonar_score < 5:
# Increase the sonar score
self.sonar_score += 1
# When no sonar ping was returned
else:
# When not lowest score
if self.sonar_score > 0:
# Decrease the sonar score
self.sonar_score -= 1
# When the sensor saw something more than 2 times
value = True if self.sonar_score > 2 else False
return value
# Function to update the config file
def update_config_file(self):
# Update the config file
with open('config.part', 'w') as config_file:
config_file.write(ujson.dumps(self.config))
os.rename('config.part', 'config.json')
# Function to update line calibration and write it to the config file
def calibrate_line_values(self):
# Read the line sensor values
self.config['left_line_value'] = self.adc_line_left.read()
self.config['right_line_value'] = self.adc_line_right.read()
# Function to get light inensity from the phototransistors
def get_line(self, line):
# Check if the direction is valid
assert line in (LEFT, RIGHT)
# Return the given line sensor value
if line == LEFT:
return self.adc_line_left.read()
elif line == RIGHT:
return self.adc_line_right.read()
def is_line(self, line):
# Check if the direction is valid
assert line in (LEFT, RIGHT)
# Define config prefix
prefix = 'left' if line == LEFT else 'right'
# Check for line
value = abs(self.get_line(line) - self.config[prefix + '_line_value']) > self.config[prefix + '_line_threshold']
# Update last line direction if line was detected
self.last_line = value if value else self.last_line
# Return the given line sensor value
return value
def set_servo(self, servo, speed):
# Check if the direction is valid
assert servo in (LEFT, RIGHT)
# Check if the speed is valid
assert speed <= 100 and speed >= -100
# Reverse the speed for the right wheel
# So negative speeds make wheels go backward, positive forward
if servo == RIGHT:
speed = -speed
# Save the new speed
self.prev_speed[servo] = speed
# Set the given servo speed
if speed == 0:
self.pwm[servo].duty(0)
else:
# Define config prefix
prefix = 'left' if servo == LEFT else 'right'
# -100 ... 100 to min_tuning .. max_tuning
index = 0 if speed < 0 else 2
min_tuning = self.config[prefix + '_servo_calib'][index]
max_tuning = self.config[prefix + '_servo_calib'][index+1]
if speed < 0:
# Reverse the speed, so smaller negative numbers represent slower speeds and larger
# faster speeds
speed = -1 * (speed + 101)
speed = int((speed + 1) * (max_tuning - min_tuning) / -99 + min_tuning)
self.pwm[servo].duty(speed)
else:
speed = int(speed * (max_tuning - min_tuning) / 100 + min_tuning)
self.pwm[servo].duty(speed)
def move(self, dir):
# Check if the direction is valid
assert dir in (SEARCH, STOP, RIGHT, LEFT, BACKWARD, FORWARD)
# Go to the given direction
if dir == STOP:
self.set_servo(LEFT, 0)
self.set_servo(RIGHT, 0)
elif dir == LEFT:
self.set_servo(LEFT, -100)
self.set_servo(RIGHT, 100)
elif dir == RIGHT:
self.set_servo(LEFT, 100)
self.set_servo(RIGHT, -100)
elif dir == SEARCH:
# Change search mode after X seconds
if self.search_counter == 50:
self.search = not self.search
self.search_counter = 0
# When in search mode
if self.search:
self.move(FORWARD)
elif self.last_line == RIGHT:
self.move(LEFT)
else:
self.move(RIGHT)
# Increase search counter
self.search_counter += 1
elif dir == FORWARD:
self.set_servo(LEFT, 100)
self.set_servo(RIGHT, 100)
elif dir == BACKWARD:
self.set_servo(LEFT, -100)
self.set_servo(RIGHT, -100)
def update_sensor_feedback(self):
if self.sensor_feedback:
# Show sensor feedback trough LEDs
self.set_led(SONAR, self.is_sonar())
self.set_led(LEFT_LINE, self.is_line(LEFT))
self.set_led(RIGHT_LINE, self.is_line(RIGHT))
def | get_sensor_scope | identifier_name |
|
hal.py | LEFT_LINE = 2
RIGHT_LINE = 3
# Directions
STOP = 0
LEFT = 1
RIGHT = 2
SEARCH = 3
FORWARD = 4
BACKWARD = 5
class Sumorobot(object):
# Constructor
def __init__(self):
# Open and parse the config file
with open('config.json', 'r') as config_file:
self.config = ujson.load(config_file)
### PWMs
# Right & Left motor PWMs
self.pwm = {
LEFT: machine.PWM(machine.Pin(15), freq=50, duty=0),
RIGHT: machine.PWM(machine.Pin(4), freq=50, duty=0)
}
# Memorise previous servo speeds
self.prev_speed = {LEFT: 0, RIGHT: 0}
### LEDs
# Enable / Disable LED sensor feedback
self.sensor_feedback = True
# Bottom status LED
self.status_led = machine.Pin(self.config['status_led_pin'], machine.Pin.OUT)
# Bottom status LED is in reverse polarity
self.status_led.value(1)
# Sensor LEDs
self.sonar_led = machine.Pin(16, machine.Pin.OUT)
self.left_line_led = machine.Pin(17, machine.Pin.OUT)
self.right_line_led = machine.Pin(12, machine.Pin.OUT)
### Sonar
# To average sonar sensor value
self.sonar_score = 0
# Sonar distance sensor
self.echo = machine.Pin(14, machine.Pin.IN)
self.trigger = machine.Pin(27, machine.Pin.OUT)
### ADCs
# Battery gauge
self.bat_status = 4.3 # voltage
self.move_counter = 0
self.battery_level = 0 # percentage
self.adc_battery = machine.ADC(machine.Pin(32))
self.bat_charge = machine.Pin(25, machine.Pin.IN) # charging / not charging
# The pullups for the phototransistors
machine.Pin(19, machine.Pin.IN, machine.Pin.PULL_UP)
machine.Pin(23, machine.Pin.IN, machine.Pin.PULL_UP)
# The phototransistors
self.adc_line_left = machine.ADC(machine.Pin(34))
self.adc_line_right = machine.ADC(machine.Pin(33))
# Set reference voltage to 3.3V
self.adc_battery.atten(machine.ADC.ATTN_11DB)
self.adc_line_left.atten(machine.ADC.ATTN_11DB)
self.adc_line_right.atten(machine.ADC.ATTN_11DB)
# For terminating sleep and loops
self.terminate = False
# For search mode
self.search = False
self.last_line = LEFT
self.search_counter = 0
# Function to set LED states
def set_led(self, led, value):
# Turn the given LED on or off
if led == STATUS:
# Status LED is reverse polarity
self.status_led.value(0 if value else 1)
elif led == SONAR:
self.sonar_led.value(value)
elif led == LEFT_LINE:
self.left_line_led.value(value)
elif led == RIGHT_LINE:
self.right_line_led.value(value)
# Function to get battery level in percentage
def get_battery_level(self):
# When the SumoRobot is not moving
if self.prev_speed[LEFT] == 0 and self.prev_speed[RIGHT] == 0:
# Calculate battery voltage
battery_voltage = round(self.config['battery_coeff'] * (self.adc_battery.read() * 3.3 / 4096), 2)
# Map battery voltage to percentage
temp_battery_level = 0.0 + ((100.0 - 0.0) / (4.2 - 3.2)) * (battery_voltage - 3.2)
# When battery level changed more than 5 percent
if abs(self.battery_level - temp_battery_level) > 5:
# Update battery level
self.battery_level = round(temp_battery_level)
# Return the battery level in percentage
return min(100, max(0, self.battery_level))
# Function to get distance (cm) from the object in front of the SumoRobot
def get_sonar_value(self):
# Send a pulse
self.trigger.value(0)
utime.sleep_us(5)
self.trigger.value(1)
# Wait for the pulse and calculate the distance
return round((machine.time_pulse_us(self.echo, 1, 30000) / 2) / 29.1)
# Function to get boolean if there is something in front of the SumoRobot
def is_sonar(self):
# Get the sonar value
self.sonar_value = self.get_sonar_value()
# When the sonar value is small and the ping actually returned
if self.sonar_value < self.config['sonar_threshold'] and self.sonar_value > 0:
# When not maximum score
if self.sonar_score < 5:
# Increase the sonar score
self.sonar_score += 1
# When no sonar ping was returned
else:
# When not lowest score
if self.sonar_score > 0:
# Decrease the sonar score
self.sonar_score -= 1
# When the sensor saw something more than 2 times
value = True if self.sonar_score > 2 else False
return value
# Function to update the config file
def update_config_file(self):
# Update the config file
with open('config.part', 'w') as config_file:
config_file.write(ujson.dumps(self.config))
os.rename('config.part', 'config.json')
# Function to update line calibration and write it to the config file
def calibrate_line_values(self):
# Read the line sensor values
self.config['left_line_value'] = self.adc_line_left.read()
self.config['right_line_value'] = self.adc_line_right.read()
# Function to get light inensity from the phototransistors
def get_line(self, line):
# Check if the direction is valid
assert line in (LEFT, RIGHT)
# Return the given line sensor value
if line == LEFT:
return self.adc_line_left.read()
elif line == RIGHT:
return self.adc_line_right.read()
def is_line(self, line):
# Check if the direction is valid
assert line in (LEFT, RIGHT)
# Define config prefix
prefix = 'left' if line == LEFT else 'right'
# Check for line
value = abs(self.get_line(line) - self.config[prefix + '_line_value']) > self.config[prefix + '_line_threshold']
# Update last line direction if line was detected
self.last_line = value if value else self.last_line
# Return the given line sensor value
return value
def set_servo(self, servo, speed):
# Check if the direction is valid
assert servo in (LEFT, RIGHT)
# Check if the speed is valid
assert speed <= 100 and speed >= -100
# Reverse the speed for the right wheel
# So negative speeds make wheels go backward, positive forward
if servo == RIGHT:
speed = -speed
# Save the new speed
self.prev_speed[servo] = speed
# Set the given servo speed
if speed == 0:
self.pwm[servo].duty(0)
else:
# Define config prefix
prefix = 'left' if servo == LEFT else 'right'
# -100 ... 100 to min_tuning .. max_tuning
index = 0 if speed < 0 else 2
min_tuning = self.config[prefix + '_servo_calib'][index]
max_tuning = self.config[prefix + '_servo_calib'][index+1]
if speed < 0:
# Reverse the speed, so smaller negative numbers represent slower speeds and larger
# faster speeds
speed = -1 * (speed + 101)
speed = int((speed + 1) * (max_tuning - min_tuning) / -99 + min_tuning)
self.pwm[servo].duty(speed)
else:
speed = int(speed * (max_tuning - min_tuning) / 100 + min_tuning)
self.pwm[servo].duty(speed)
def move(self, dir):
# Check if the direction is valid
assert dir in (SEARCH, STOP, RIGHT, LEFT, BACKWARD, FORWARD)
# Go to the given direction
if dir == STOP:
self.set_servo(LEFT, 0)
self.set_servo(RIGHT, 0)
elif dir == LEFT:
self.set_servo(LEFT, -100)
self.set_servo(RIGHT, 100)
elif dir == RIGHT:
self.set_servo(LEFT, 100)
self.set_servo(RIGHT, -100)
elif dir | random_line_split |
||
hal.py | .PWM(machine.Pin(15), freq=50, duty=0),
RIGHT: machine.PWM(machine.Pin(4), freq=50, duty=0)
}
# Memorise previous servo speeds
self.prev_speed = {LEFT: 0, RIGHT: 0}
### LEDs
# Enable / Disable LED sensor feedback
self.sensor_feedback = True
# Bottom status LED
self.status_led = machine.Pin(self.config['status_led_pin'], machine.Pin.OUT)
# Bottom status LED is in reverse polarity
self.status_led.value(1)
# Sensor LEDs
self.sonar_led = machine.Pin(16, machine.Pin.OUT)
self.left_line_led = machine.Pin(17, machine.Pin.OUT)
self.right_line_led = machine.Pin(12, machine.Pin.OUT)
### Sonar
# To average sonar sensor value
self.sonar_score = 0
# Sonar distance sensor
self.echo = machine.Pin(14, machine.Pin.IN)
self.trigger = machine.Pin(27, machine.Pin.OUT)
### ADCs
# Battery gauge
self.bat_status = 4.3 # voltage
self.move_counter = 0
self.battery_level = 0 # percentage
self.adc_battery = machine.ADC(machine.Pin(32))
self.bat_charge = machine.Pin(25, machine.Pin.IN) # charging / not charging
# The pullups for the phototransistors
machine.Pin(19, machine.Pin.IN, machine.Pin.PULL_UP)
machine.Pin(23, machine.Pin.IN, machine.Pin.PULL_UP)
# The phototransistors
self.adc_line_left = machine.ADC(machine.Pin(34))
self.adc_line_right = machine.ADC(machine.Pin(33))
# Set reference voltage to 3.3V
self.adc_battery.atten(machine.ADC.ATTN_11DB)
self.adc_line_left.atten(machine.ADC.ATTN_11DB)
self.adc_line_right.atten(machine.ADC.ATTN_11DB)
# For terminating sleep and loops
self.terminate = False
# For search mode
self.search = False
self.last_line = LEFT
self.search_counter = 0
# Function to set LED states
def set_led(self, led, value):
# Turn the given LED on or off
if led == STATUS:
# Status LED is reverse polarity
self.status_led.value(0 if value else 1)
elif led == SONAR:
self.sonar_led.value(value)
elif led == LEFT_LINE:
self.left_line_led.value(value)
elif led == RIGHT_LINE:
self.right_line_led.value(value)
# Function to get battery level in percentage
def get_battery_level(self):
# When the SumoRobot is not moving
if self.prev_speed[LEFT] == 0 and self.prev_speed[RIGHT] == 0:
# Calculate battery voltage
battery_voltage = round(self.config['battery_coeff'] * (self.adc_battery.read() * 3.3 / 4096), 2)
# Map battery voltage to percentage
temp_battery_level = 0.0 + ((100.0 - 0.0) / (4.2 - 3.2)) * (battery_voltage - 3.2)
# When battery level changed more than 5 percent
if abs(self.battery_level - temp_battery_level) > 5:
# Update battery level
self.battery_level = round(temp_battery_level)
# Return the battery level in percentage
return min(100, max(0, self.battery_level))
# Function to get distance (cm) from the object in front of the SumoRobot
def get_sonar_value(self):
# Send a pulse
self.trigger.value(0)
utime.sleep_us(5)
self.trigger.value(1)
# Wait for the pulse and calculate the distance
return round((machine.time_pulse_us(self.echo, 1, 30000) / 2) / 29.1)
# Function to get boolean if there is something in front of the SumoRobot
def is_sonar(self):
# Get the sonar value
self.sonar_value = self.get_sonar_value()
# When the sonar value is small and the ping actually returned
if self.sonar_value < self.config['sonar_threshold'] and self.sonar_value > 0:
# When not maximum score
if self.sonar_score < 5:
# Increase the sonar score
self.sonar_score += 1
# When no sonar ping was returned
else:
# When not lowest score
if self.sonar_score > 0:
# Decrease the sonar score
self.sonar_score -= 1
# When the sensor saw something more than 2 times
value = True if self.sonar_score > 2 else False
return value
# Function to update the config file
def update_config_file(self):
# Update the config file
with open('config.part', 'w') as config_file:
config_file.write(ujson.dumps(self.config))
os.rename('config.part', 'config.json')
# Function to update line calibration and write it to the config file
def calibrate_line_values(self):
# Read the line sensor values
self.config['left_line_value'] = self.adc_line_left.read()
self.config['right_line_value'] = self.adc_line_right.read()
# Function to get light inensity from the phototransistors
def get_line(self, line):
# Check if the direction is valid
assert line in (LEFT, RIGHT)
# Return the given line sensor value
if line == LEFT:
return self.adc_line_left.read()
elif line == RIGHT:
return self.adc_line_right.read()
def is_line(self, line):
# Check if the direction is valid
assert line in (LEFT, RIGHT)
# Define config prefix
prefix = 'left' if line == LEFT else 'right'
# Check for line
value = abs(self.get_line(line) - self.config[prefix + '_line_value']) > self.config[prefix + '_line_threshold']
# Update last line direction if line was detected
self.last_line = value if value else self.last_line
# Return the given line sensor value
return value
def set_servo(self, servo, speed):
# Check if the direction is valid
assert servo in (LEFT, RIGHT)
# Check if the speed is valid
assert speed <= 100 and speed >= -100
# Reverse the speed for the right wheel
# So negative speeds make wheels go backward, positive forward
if servo == RIGHT:
speed = -speed
# Save the new speed
self.prev_speed[servo] = speed
# Set the given servo speed
if speed == 0:
self.pwm[servo].duty(0)
else:
# Define config prefix
prefix = 'left' if servo == LEFT else 'right'
# -100 ... 100 to min_tuning .. max_tuning
index = 0 if speed < 0 else 2
min_tuning = self.config[prefix + '_servo_calib'][index]
max_tuning = self.config[prefix + '_servo_calib'][index+1]
if speed < 0:
# Reverse the speed, so smaller negative numbers represent slower speeds and larger
# faster speeds
speed = -1 * (speed + 101)
speed = int((speed + 1) * (max_tuning - min_tuning) / -99 + min_tuning)
self.pwm[servo].duty(speed)
else:
speed = int(speed * (max_tuning - min_tuning) / 100 + min_tuning)
self.pwm[servo].duty(speed)
def move(self, dir):
# Check if the direction is valid
assert dir in (SEARCH, STOP, RIGHT, LEFT, BACKWARD, FORWARD)
# Go to the given direction
if dir == STOP:
self.set_servo(LEFT, 0)
self.set_servo(RIGHT, 0)
elif dir == LEFT:
self.set_servo(LEFT, -100)
self.set_servo(RIGHT, 100)
elif dir == RIGHT:
self.set_servo(LEFT, 100)
self.set_servo(RIGHT, -100)
elif dir == SEARCH:
# Change search mode after X seconds
if self.search_counter == 50:
self.search = not self.search
self.search_counter = 0
# When in search mode
if self.search:
self.move(FORWARD)
elif self.last_line == RIGHT:
|
else:
self.move(RIGHT)
# Increase search counter
self.search_counter += 1
elif dir == FORWARD:
self.set_servo(LEFT, 100)
self.set_servo(RIGHT | self.move(LEFT) | conditional_block |
bdplatfrom.js | this.VideoAd1 = "6580222";
/*** 金币获取激励视频 */
this.VideoAd2 = "6580147";
/**签到双倍奖励视频 */
this.VideoAd3 = "6580146";
/**再来一份视频奖励 */
this.VideoAd4 = "6580145";
/**转盘获取次数奖励 */
this.VideoAd5 = "6580144";
/**转盘双倍奖励视频 */
this.VideoAd6= "6580143";
/**金币不足视频 */
this.VideoAd7= "6580142";
/**复活激励视频 */
this.VideoAd8= "6580010";
/**结算双倍奖励视频 */
this.VideoAd9= "6580006"; //OK
/************************************************* */
this.appSid="e3e02a8c";
let that = this;
let shareFunc = swan.onShareAppMessage;
shareFunc(() => ({
title: '炎炎夏日,你确定不来清凉一夏么?',
imageUrl: 'wxlocal/share1.jpg'
}));
//右上角menu转发
swan.showShareMenu({});
//是否开启震动
this.isOpenVibration = true;
this.shareFailCnt = 0;
this.adUnitId = null;
// this.lastShareDate = this.getLastShareDate();
// this.todayShareCount = this.getTodayShareCount();
}
/**登录 */
login(cb)
{
swan.login({
success:res =>
{
let data = {};
if (res.code)
{
data.code = 0;
data.wxcode = res.code;
console.log("bd.login sucess res.code="+res.code);
}
else
{
console.log('bd.login !' + res.errMsg);
data.code = -1;
data.msg = res.errMsg;
}
cb && cb(data);
},
fail(res)
{
console.log('bd.login fail !' + res.errMsg);
let data = {};
data.code = -1;
data.msg = res.errMsg;
cb && cb(data);
}
}
);
}
/**检查用户登录状态是否有效 */
checkSession()
{
swan.checkSession(
{
success: res => {
console.log('登录态有效');
},
fail: function ()
{
console.log('登录态无效');
}
});
}
/**获取小游戏启动参数 */
launchInfo()
{
return swan.getLaunchOptionsSync()
}
/**加载 */
startLoading(_callback)
{
let that = this;
that._loadingCallback = _callback;
}
onLoading(_percent)
{
let that = this;
if (that._loadingCallback) {
that._loadingCallback(_percent);
return true;
}
return false;
}
/**退出小游戏 */
exitMiniProgram()
{
console.log("退出小游戏");
}
/**跳转小程序 进入另一个 */
navigateToMiniProgram(_data)
{
if (swan.navigateToMiniProgram)
{
let successCallback = _data.success;
_data.success = function (res) {
window["UserData"].instance.removeNavAppId(_data.appId);
if (successCallback && typeof (successCallback) == "function")
{
successCallback(res);
}
}
swan.navigateToMiniProgram(_data);
}
}
/**跳转小程序 返回上一个小程序 暂时还没 等待验证 */
navigateBackMiniProgram(data)
{
let callback = data.success
data.success = function (res) {
if (successCallback && typeof (successCallback) == "function")
{
successCallback(true);
}
}
data.fail = function (res) {
if (successCallback && typeof (successCallback) == "function")
{
successCallback(false);
}
}
swan.navigateBackMiniProgram(data);
}
/**创建底部剧中 Banner adUnitId:标识 finishCallback:完成回调 */
createBannerAd(_adUnitId, _finishCallback)
{
if(swan.createBannerAd==null)
{
return null;
};
if (this.bannerAd)
{
this.bannerAd.destroy();
}
const {windowWidth,windowHeight,} = swan.getSystemInfoSync();
let self = this
var o = Math.floor;
var sysInfo = swan.getSystemInfoSync();
var r = o(1 * sysInfo.windowWidth), n = o((sysInfo.windowWidth - r) / 2), d = sysInfo.screenHeight;
var top = d - o(r / 3.4) - (d - r / 0.5625) / 2 + 10;
var bannerAd =swan.createBannerAd({
adUnitId: _adUnitId,
appSid:this.appSid,
style: {
left: n+40, //+ 向右偏移 -向左偏移
top: top+80, // +向下 偏移 -向上偏移
width: r-80
}
});
// 回调里再次调整尺寸,要确保不要触发死循环!!!
bannerAd.onResize(size =>
{
if (scale < 2) {
top = sysInfo.windowHeight - e.height-32;
} else {
top = sysInfo.windowHeight - e.height - 32;
}
if (bannerAd) {
bannerAd.style.top = top;
bannerAd.style.left = (sysInfo.windowWidth - bannerAd.style.realWidth) * 0.5;
}
});
bannerAd.onLoad(() =>
{
bannerAd.show().then(()=>
{
console.log("加载成功 开始展示");
_finishCallback && _finishCallback();
}).catch(err =>
{
console.log('广告组件出现问题', err);
})
});
this.bannerAd = bannerAd
return bannerAd
}
//关闭广告
closeBannerAd()
{
if (!this.bannerAd)
{
return;
}
this.bannerAd.destroy();
this.bannerAd = null;
this.adUnitId = null;
};
//设置广告的显示与隐藏
setBannerVisible(visible)
{
if (!this.bannerAd)
{
return;
}
if (visible)
{
this.bannerAd.show();
}
else
{
this.bannerAd.hide();
}
}
//视频广告 是否预加载 需要在plantfrom中添加
createRewardedVideoAd(_adUnitId, callback)
{
if(!swan.createRewardedVideoAd)
{
return;
};
//回调
let videoItemInfo =
{
adUnitId: _adUnitId,
appSid: this.appSid
}
if(!this.videoAdInfo)
{
this.videoAdInfo = {}
this.videoAdInfo.ins = swan.createRewardedVideoAd(videoItemInfo);
}
else
{
console.log(" 有视频广告信息");
this.videoAdInfo.ins.offError(this.videoAdInfo.error)
this.videoAdInfo.ins.offLoad(this.videoAdInfo.load)
this.videoAdInfo.ins.offClose(this.videoAdInfo.close)
}
let onLoad = function()
{
console.log('视频广告加载成功');
}
let onError = function(errCode, errMsg)
{
console.log(`createRewardedVideoAd errCode = ${errCode}, ${errMsg}`)
callback && callback(false)
}
let onClose = function(res)
{
if(res && res.isEnded)
{
console.log("视频看完了,发放奖励")
window["TaskAchMgr"].instance.refreshProgress(1, 2, 1);
callback && callback(true)
}
else
{
console.log("视频没看完")
callback && callback(false)
}
};
this.videoAdInfo.error = onError
this.videoAdInfo.load = onLoad
this.videoAdInfo.close = onClose
let videoAd = this.videoAdInfo.ins;
if (videoAd)
{
videoAd.load().then(()=>
{
console.log('广告显示成功');
videoAd.show().then(onLoad).catch(err=>{ videoAd.load().then(()=>
{
videoAd.show().then(()=>
{
console.log('广告显示成功');
});
});});
}).catch(err=>
{ console.log("视频加载失败 重新加载一边");
videoAd.load().then(()=>
{
videoAd.show().then(()=>
{
console.log('广告显示成功');
});
});
onsole.log('广告组件出现问题 重新加载一次', err);
})
videoAd.onError(onError)
videoAd.onClose(onClose)
| /*** 离线双倍奖励激励视频 */ | random_line_split |
|
bdplatfrom.js | = sysInfo.windowHeight - e.height-32;
} else {
top = sysInfo.windowHeight - e.height - 32;
}
if (bannerAd) {
bannerAd.style.top = top;
bannerAd.style.left = (sysInfo.windowWidth - bannerAd.style.realWidth) * 0.5;
}
});
bannerAd.onLoad(() =>
{
bannerAd.show().then(()=>
{
console.log("加载成功 开始展示");
_finishCallback && _finishCallback();
}).catch(err =>
{
console.log('广告组件出现问题', err);
})
});
this.bannerAd = bannerAd
return bannerAd
}
//关闭广告
closeBannerAd()
{
if (!this.bannerAd)
{
return;
}
this.bannerAd.destroy();
this.bannerAd = null;
this.adUnitId = null;
};
//设置广告的显示与隐藏
setBannerVisible(visible)
{
if (!this.bannerAd)
{
return;
}
if (visible)
{
this.bannerAd.show();
}
else
{
this.bannerAd.hide();
}
}
//视频广告 是否预加载 需要在plantfrom中添加
createRewardedVideoAd(_adUnitId, callback)
{
if(!swan.createRewardedVideoAd)
{
return;
};
//回调
let videoItemInfo =
{
adUnitId: _adUnitId,
appSid: this.appSid
}
if(!this.videoAdInfo)
{
this.videoAdInfo = {}
this.videoAdInfo.ins = swan.createRewardedVideoAd(videoItemInfo);
}
else
{
console.log(" 有视频广告信息");
this.videoAdInfo.ins.offError(this.videoAdInfo.error)
this.videoAdInfo.ins.offLoad(this.videoAdInfo.load)
this.videoAdInfo.ins.offClose(this.videoAdInfo.close)
}
let onLoad = function()
{
console.log('视频广告加载成功');
}
let onError = function(errCode, errMsg)
{
console.log(`createRewardedVideoAd errCode = ${errCode}, ${errMsg}`)
callback && callback(false)
}
let onClose = function(res)
{
if(res && res.isEnded)
{
console.log("视频看完了,发放奖励")
window["TaskAchMgr"].instance.refreshProgress(1, 2, 1);
callback && callback(true)
}
else
{
console.log("视频没看完")
callback && callback(false)
}
};
this.videoAdInfo.error = onError
this.videoAdInfo.load = onLoad
this.videoAdInfo.close = onClose
let videoAd = this.videoAdInfo.ins;
if (videoAd)
{
videoAd.load().then(()=>
{
console.log('广告显示成功');
videoAd.show().then(onLoad).catch(err=>{ videoAd.load().then(()=>
{
videoAd.show().then(()=>
{
console.log('广告显示成功');
});
});});
}).catch(err=>
{ console.log("视频加载失败 重新加载一边");
videoAd.load().then(()=>
{
videoAd.show().then(()=>
{
console.log('广告显示成功');
});
});
onsole.log('广告组件出现问题 重新加载一次', err);
})
videoAd.onError(onError)
videoAd.onClose(onClose)
}
return videoAd;
}
/**
* 百度授权
*/
authorize( call)
{
console.log("baidu authorize===");
function p()
{
//异步 同步
return new Promise(function(resolve, reject)
{
//设置 获取用户在该小游戏中已授权过的权限
swan.getSetting({
success:function(res)
{
if(res && res.userInfo)
{
console.log("小游戏中已授权过的权限")
call && call(true) //已经授权过了
}
else
{
resolve()
}
},
fail:function()
{
console.log("小游戏未授权过权限")
call && call(false)
},
})
})
}
p().then(function()
{
//授权小游戏使用某项功能或获取用户的某些数据
btnClick(e)
{
let scope = e.currentTarget.id;
swan.authorize({
scope,
success: res => {
swan.showToast({
title: '授权成功'
});
},
fail: err => {
swan.showToast({
title: '授权失败'
});
console.log('authorize fail', err);
}
});
}
})
};
/**小游戏返回前台 */
onShow(_callback)
{
swan.onShow(function (_param)
{
if (_callback) {
_callback(_param);
}
})
}
/**小游戏退到后台 */
onHide(_callback)
{
swan.onHide(function (_param)
{
if (_callback)
{
_callback(_param);
}
})
}
/**主动分享 */
onShare(call)
{
if(!call)
{
return ;
};
swan.shareAppMessage(
{
title: '炎炎夏日,你确定不来清凉一夏么?',//转发标题,不传则默认使用后台配置或当前小游戏的名称。
//imageUrl: 'wxlocal/share1.jpg',//转发显示图片的链接,可以是网络图片路径或本地图片文件路径或相对代码包根目录的图片文件路径,显示图片长宽比推荐 5:4
success()
{
console.log('分享成功');
console.log('分享成功');
// call && call(true)
//显示分享成功的消息提示框
swan.showToast({
title:"分享成功",
//duration:2000,
icon:"none"
})
},
fail(e) {
swan.showToast({
title:"分享失败",
//duration:2000,
icon:"none"
})
console.log('分享失败');
}
})
};
/**短震动 */
vibrateShort()
{
swan.vibrateShort();
}
/**长震动 */
vibrateLong()
{
swan.vibrateLong();
}
/**获取设备信息 同步 */
getSystemInfoSync()
{
let data= swan.getSystemInfoSync();
console.log('手机品牌信息:', data.brand);
}
/**显示Loading提示框 */
showLoading()
{
swan.showLoading(
{
title: title,
success()
{
call && call(true)
},
fail()
{
call && call(false)
}
})
};
/**隐藏 loading 提示框 */
hideLoading()
{
swan.hideLoading(
{
success()
{
console.log("隐藏加载提示框 成功");
},
fail()
{
console.log("隐藏加载提示框 失败");
}
})
};
/**视频组件控制 */
createVideo()
{
//swan.createVideoContext 创建并返回 video 上下文 videoContext 对象。通过 videoId 跟一个 video 组件绑定,通过它可以操作一个 video 组件
//console.log("创建视频")
};
/**退出视频控制 */
videoHide()
{
// VideoContext.play;
// VideoContext.pause;
// VideoContext.seek;
// VideoContext.sendDanmu;
// VideoContext.showStatusBar;
//console.log("退出视频控制");
};
/**获取玩家信息 */
getUserInfo()
{
let p = new Promise((resolve, reject) => {
swan.getUserInfo(
{
// withCredentials: false, //是否需要返回敏感数据
success: function (res)
{
var userInfo = res.userInfo
this.setData(
{
nickname: userInfo.nickName || '百度网友',
imageSrc: userInfo.avatarUrl || '../../images/avator.png',
nameColor: 'active'
});
resolve(userInfo);
},
fail:function (res)
{
console.log(err);
swan.showToast(
{
title: '请先授权'
});
}
})
});
p().then(function(res)
{
console.log("getUserInfo res ==== ", res)
});
};
/**是否支持卖量 */
isGeneralize()
{
return false;
};
/**是否只有视频 没有分享 */
isOnlyVideo()
{
return true;
}
/**是否至此录屏 */
isPlatformSupportRecord ()
{
return false;
}
//停止录屏
stopRecorder()
{
console.log("录屏功能");
}
// // 获取今日日期
// getTodayDate() {
// var myDate = new Date();
// | var year = myDate.getFullYear();
// var month = myDate.getMonth() + 1;
// var date = myDate.getDate();
// var today = '' + year + '_' + month + '_' + date;
// return | identifier_body |
|
bdplatfrom.js | Date();
// this.todayShareCount = this.getTodayShareCount();
}
/**登录 */
login(cb)
{
swan.login({
success:res =>
{
let data = {};
if (res.code)
{
data.code = 0;
data.wxcode = res.code;
console.log("bd.login sucess res.code="+res.code);
}
else
{
console.log('bd.login !' + res.errMsg) | fail(res)
{
console.log('bd.login fail !' + res.errMsg);
let data = {};
data.code = -1;
data.msg = res.errMsg;
cb && cb(data);
}
}
);
}
/**检查用户登录状态是否有效 */
checkSession()
{
swan.checkSession(
{
success: res => {
console.log('登录态有效');
},
fail: function ()
{
console.log('登录态无效');
}
});
}
/**获取小游戏启动参数 */
launchInfo()
{
return swan.getLaunchOptionsSync()
}
/**加载 */
startLoading(_callback)
{
let that = this;
that._loadingCallback = _callback;
}
onLoading(_percent)
{
let that = this;
if (that._loadingCallback) {
that._loadingCallback(_percent);
return true;
}
return false;
}
/**退出小游戏 */
exitMiniProgram()
{
console.log("退出小游戏");
}
/**跳转小程序 进入另一个 */
navigateToMiniProgram(_data)
{
if (swan.navigateToMiniProgram)
{
let successCallback = _data.success;
_data.success = function (res) {
window["UserData"].instance.removeNavAppId(_data.appId);
if (successCallback && typeof (successCallback) == "function")
{
successCallback(res);
}
}
swan.navigateToMiniProgram(_data);
}
}
/**跳转小程序 返回上一个小程序 暂时还没 等待验证 */
navigateBackMiniProgram(data)
{
let callback = data.success
data.success = function (res) {
if (successCallback && typeof (successCallback) == "function")
{
successCallback(true);
}
}
data.fail = function (res) {
if (successCallback && typeof (successCallback) == "function")
{
successCallback(false);
}
}
swan.navigateBackMiniProgram(data);
}
/**创建底部剧中 Banner adUnitId:标识 finishCallback:完成回调 */
createBannerAd(_adUnitId, _finishCallback)
{
if(swan.createBannerAd==null)
{
return null;
};
if (this.bannerAd)
{
this.bannerAd.destroy();
}
const {windowWidth,windowHeight,} = swan.getSystemInfoSync();
let self = this
var o = Math.floor;
var sysInfo = swan.getSystemInfoSync();
var r = o(1 * sysInfo.windowWidth), n = o((sysInfo.windowWidth - r) / 2), d = sysInfo.screenHeight;
var top = d - o(r / 3.4) - (d - r / 0.5625) / 2 + 10;
var bannerAd =swan.createBannerAd({
adUnitId: _adUnitId,
appSid:this.appSid,
style: {
left: n+40, //+ 向右偏移 -向左偏移
top: top+80, // +向下 偏移 -向上偏移
width: r-80
}
});
// 回调里再次调整尺寸,要确保不要触发死循环!!!
bannerAd.onResize(size =>
{
if (scale < 2) {
top = sysInfo.windowHeight - e.height-32;
} else {
top = sysInfo.windowHeight - e.height - 32;
}
if (bannerAd) {
bannerAd.style.top = top;
bannerAd.style.left = (sysInfo.windowWidth - bannerAd.style.realWidth) * 0.5;
}
});
bannerAd.onLoad(() =>
{
bannerAd.show().then(()=>
{
console.log("加载成功 开始展示");
_finishCallback && _finishCallback();
}).catch(err =>
{
console.log('广告组件出现问题', err);
})
});
this.bannerAd = bannerAd
return bannerAd
}
//关闭广告
closeBannerAd()
{
if (!this.bannerAd)
{
return;
}
this.bannerAd.destroy();
this.bannerAd = null;
this.adUnitId = null;
};
//设置广告的显示与隐藏
setBannerVisible(visible)
{
if (!this.bannerAd)
{
return;
}
if (visible)
{
this.bannerAd.show();
}
else
{
this.bannerAd.hide();
}
}
//视频广告 是否预加载 需要在plantfrom中添加
createRewardedVideoAd(_adUnitId, callback)
{
if(!swan.createRewardedVideoAd)
{
return;
};
//回调
let videoItemInfo =
{
adUnitId: _adUnitId,
appSid: this.appSid
}
if(!this.videoAdInfo)
{
this.videoAdInfo = {}
this.videoAdInfo.ins = swan.createRewardedVideoAd(videoItemInfo);
}
else
{
console.log(" 有视频广告信息");
this.videoAdInfo.ins.offError(this.videoAdInfo.error)
this.videoAdInfo.ins.offLoad(this.videoAdInfo.load)
this.videoAdInfo.ins.offClose(this.videoAdInfo.close)
}
let onLoad = function()
{
console.log('视频广告加载成功');
}
let onError = function(errCode, errMsg)
{
console.log(`createRewardedVideoAd errCode = ${errCode}, ${errMsg}`)
callback && callback(false)
}
let onClose = function(res)
{
if(res && res.isEnded)
{
console.log("视频看完了,发放奖励")
window["TaskAchMgr"].instance.refreshProgress(1, 2, 1);
callback && callback(true)
}
else
{
console.log("视频没看完")
callback && callback(false)
}
};
this.videoAdInfo.error = onError
this.videoAdInfo.load = onLoad
this.videoAdInfo.close = onClose
let videoAd = this.videoAdInfo.ins;
if (videoAd)
{
videoAd.load().then(()=>
{
console.log('广告显示成功');
videoAd.show().then(onLoad).catch(err=>{ videoAd.load().then(()=>
{
videoAd.show().then(()=>
{
console.log('广告显示成功');
});
});});
}).catch(err=>
{ console.log("视频加载失败 重新加载一边");
videoAd.load().then(()=>
{
videoAd.show().then(()=>
{
console.log('广告显示成功');
});
});
onsole.log('广告组件出现问题 重新加载一次', err);
})
videoAd.onError(onError)
videoAd.onClose(onClose)
}
return videoAd;
}
/**
* 百度授权
*/
authorize( call)
{
console.log("baidu authorize===");
function p()
{
//异步 同步
return new Promise(function(resolve, reject)
{
//设置 获取用户在该小游戏中已授权过的权限
swan.getSetting({
success:function(res)
{
if(res && res.userInfo)
{
console.log("小游戏中已授权过的权限")
call && call(true) //已经授权过了
}
else
{
resolve()
}
},
fail:function()
{
console.log("小游戏未授权过权限")
call && call(false)
},
})
})
}
p().then(function()
{
//授权小游戏使用某项功能或获取用户的某些数据
btnClick(e)
{
let scope = e.currentTarget.id;
swan.authorize({
scope,
success: res => {
swan.showToast({
title: '授权成功'
});
},
fail: err => {
swan.showToast({
title: '授权失败'
});
console.log('authorize fail', err);
}
});
}
})
};
/**小游戏返回前台 */
onShow(_callback)
{
swan.onShow(function (_param)
{
if (_callback) {
_callback(_param);
}
})
}
/**小游戏退到后台 */
onHide(_callback)
{
swan.onHide(function (_param)
{
| ;
data.code = -1;
data.msg = res.errMsg;
}
cb && cb(data);
},
| conditional_block |
bdplatfrom.js | 台配置******************************************** */
this.AppID = "17008570";
//正常标准广告
this.NormalAdunits = [
"6580232",
"6580230",
"6580229",
"6580225",
"6580224",
];
//弹窗
this.OtherAdunits = [
];
/*** 成功结算 */
/*** 离线双倍奖励激励视频 */
this.VideoAd1 = "6580222";
/*** 金币获取激励视频 */
this.VideoAd2 = "6580147";
/**签到双倍奖励视频 */
this.VideoAd3 = "6580146";
/**再来一份视频奖励 */
this.VideoAd4 = "6580145";
/**转盘获取次数奖励 */
this.VideoAd5 = "6580144";
/**转盘双倍奖励视频 */
this.VideoAd6= "6580143";
/**金币不足视频 */
this.VideoAd7= "6580142";
/**复活激励视频 */
this.VideoAd8= "6580010";
/**结算双倍奖励视频 */
this.VideoAd9= "6580006"; //OK
/************************************************* */
this.appSid="e3e02a8c";
let that = this;
let shareFunc = swan.onShareAppMessage;
shareFunc(() => ({
title: '炎炎夏日,你确定不来清凉一夏么?',
imageUrl: 'wxlocal/share1.jpg'
}));
//右上角menu转发
swan.showShareMenu({});
//是否开启震动
this.isOpenVibration = true;
this.shareFailCnt = 0;
this.adUnitId = null;
// this.lastShareDate = this.getLastShareDate();
// this.todayShareCount = this.getTodayShareCount();
}
/**登录 */
login(cb)
{
swan.login({
success:res =>
{
let data = {};
if (res.code)
{
data.code = 0;
data.wxcode = res.code;
console.log("bd.login sucess res.code="+res.code);
}
else
{
console.log('bd.login !' + res.errMsg);
data.code = -1;
data.msg = res.errMsg;
}
cb && cb(data);
},
fail(res)
{
console.log('bd.login fail !' + res.errMsg);
let data = {};
data.code = -1;
data.msg = res.errMsg;
cb && cb(data);
}
}
);
}
/**检查用户登录状态是否有效 */
checkSession()
{
swan.checkSession(
{
success: res => {
console.log('登录态有效');
},
fail: function ()
{
console.log('登录态无效');
}
});
}
/**获取小游戏启动参数 */
launchInfo()
{
return swan.getLaunchOptionsSync()
}
/**加载 */
startLoading(_callback)
{
let that = this;
that._loadingCallback = _callback;
}
onLoading(_percent)
{
let that = this;
if (that._loadingCallback) {
that._loadingCallback(_percent);
return true;
}
return false;
}
/**退出小游戏 */
exitMiniProgram()
{
console.log("退出小游戏");
}
/**跳转小程序 进入另一个 */
navigateToMiniProgram(_data)
{
if (swan.navigateToMiniProgram)
{
let successCallback = _data.success;
_data.success = function (res) {
window["UserData"].instance.removeNavAppId(_data.appId);
if (successCallback && typeof (successCallback) == "function")
{
successCallback(res);
}
}
swan.navigateToMiniProgram(_data);
}
}
/**跳转小程序 返回上一个小程序 暂时还没 等待验证 */
navigateBackMiniProgram(data)
{
let callback = data.success
data.success = function (res) {
if (successCallback && typeof (successCallback) == "function")
{
successCallback(true);
}
}
data.fail = function (res) {
if (successCallback && typeof (successCallback) == "function")
{
successCallback(false);
}
}
swan.navigateBackMiniProgram(data);
}
/**创建底部剧中 Banner adUnitId:标识 finishCallback:完成回调 */
createBannerAd(_adUnitId, _finishCallback)
{
if(swan.createBannerAd==null)
{
return null;
};
if (this.bannerAd)
{
this.bannerAd.destroy();
}
const {windowWidth,windowHeight,} = swan.getSystemInfoSync();
let self = this
var o = Math.floor;
var sysInfo = swan.getSystemInfoSync();
var r = o(1 * sysInfo.windowWidth), n = o((sysInfo.windowWidth - r) / 2), d = sysInfo.screenHeight;
var top = d - o(r / 3.4) - (d - r / 0.5625) / 2 + 10;
var bannerAd =swan.createBannerAd({
adUnitId: _adUnitId,
appSid:this.appSid,
style: {
left: n+40, //+ 向右偏移 -向左偏移
top: top+80, // +向下 偏移 -向上偏移
width: r-80
}
});
// 回调里再次调整尺寸,要确保不要触发死循环!!!
bannerAd.onResize(size =>
{
if (scale < 2) {
top = sysInfo.windowHeight - e.height-32;
} else {
top = sysInfo.windowHeight - e.height - 32;
}
if (bannerAd) {
bannerAd.style.top = top;
bannerAd.style.left = (sysInfo.windowWidth - bannerAd.style.realWidth) * 0.5;
}
});
bannerAd.onLoad(() =>
{
bannerAd.show().then(()=>
{
console.log("加载成功 开始展示");
_finishCallback && _finishCallback();
}).catch(err =>
{
console.log('广告组件出现问题', err);
})
});
this.bannerAd = bannerAd
return bannerAd
}
//关闭广告
closeBannerAd()
{
if (!this.bannerAd)
{
return;
}
this.bannerAd.destroy();
this.bannerAd = null;
this.adUnitId = null;
};
//设置广告的显示与隐藏
setBannerVisible(visible)
{
if (!this.bannerAd)
{
return;
}
if (visible)
{
this.bannerAd.show();
}
else
{
this.bannerAd.hide();
}
}
//视频广告 是否预加载 需要在plantfrom中添加
createRewardedVideoAd(_adUnitId, callback)
{
if(!swan.createRewardedVideoAd)
{
return;
};
//回调
let videoItemInfo =
{
adUnitId: _adUnitId,
appSid: this.appSid
}
if(!this.videoAdInfo)
{
this.videoAdInfo = {}
this.videoAdInfo.ins = swan.createRewardedVideoAd(videoItemInfo);
}
else
{
console.log(" 有视频广告信息");
this.videoAdInfo.ins.offError(this.videoAdInfo.error)
this.videoAdInfo.ins.offLoad(this.videoAdInfo.load)
this.videoAdInfo.ins.offClose(this.videoAdInfo.close)
}
let onLoad = function()
{
console.log('视频广告加载成功');
}
let onError = function(errCode, errMsg)
{
console.log(`createRewardedVideoAd errCode = ${errCode}, ${errMsg}`)
callback && callback(false)
}
let onClose = function(res)
{
if(res && res.isEnded)
{
console.log("视频看完了,发放奖励")
window["TaskAchMgr"].instance.refreshProgress(1, 2, 1);
callback && callback(true)
}
else
{
console.log("视频没看完")
callback && callback(false)
}
};
this.videoAdInfo.error = onError
this.videoAdInfo.load = onLoad
this.videoAdInfo.close = onClose
let videoAd = this.videoAdInfo.ins;
if (videoAd)
{
videoAd.load().then(()=>
{
console.log('广告显示成功');
videoAd.show().then(onLoad).catch(err=>{ videoAd.load().then(()=>
| /*****平 | identifier_name |
|
blockparse.py | = prefix[:4]
blocksize = struct.unpack('<L', prefix[4:])[0]
logging.debug('yielding block of size %d', blocksize)
yield prefix + currentfile.read(blocksize)
def nextfile(filename):
'''
returns "next" filename in series from numbered files e.g. blk0001.dat
>>> nextfile('blk0001.dat')
'blk0002.dat'
>>> try: nextfile('blk.dat')
... except: pass
>>> nextfile('00041')
'00042'
'''
pattern = r'^(?P<prefix>[^0-9]*)(?P<number>[0-9]+)(?P<suffix>[^0-9]*)$'
directory, filename = os.path.split(filename)
try:
match = re.compile(pattern).match(filename).groupdict()
except AttributeError as match_failed:
raise ValueError('No numeric pattern found in {}'.format(filename))
newnumber = '{number:0{width}}'.format(
number=int(match['number']) + 1,
width=len(match['number']))
filename = match['prefix'] + newnumber + match['suffix']
return os.path.join(directory, filename) if directory else filename
def nextblock(blockfiles=None, minblock=0, maxblock=sys.maxsize):
'''
generator that fetches and returns raw blocks out of blockfiles
'''
minheight, maxheight = int(minblock), int(maxblock)
height = 0
reversemagic = dict([[value, key] for key, value in MAGIC.items()])
blockfiles = blockfiles or DEFAULT
for blockfile in blockfiles:
magic = ''
index = 0
logging.debug('blockfile "%s" of blockfiles %s', blockfile, blockfiles)
with open(blockfile, 'rb') as datainput:
blockdata = datainput.read() # not necessarily very efficient
logging.warning('NOTE: "height" values shown are relative'
' to start of first file and may include'
' orphaned blocks')
while index < len(blockdata):
logging.debug('blockparser at index %d out of %d bytes',
index, len(blockdata))
magic = blockdata[index:index + 4]
blocksize = struct.unpack('<L', blockdata[index + 4:index + 8])[0]
blockheader = blockdata[index + 8:index + 88]
transactions = blockdata[index + 88:index + blocksize + 8]
index += blocksize + 8
if minheight <= height <= maxheight:
|
elif height > maxheight:
logging.debug('height %d > maxheight %d', height, maxheight)
break # still executes `height += 1` below!
else:
logging.debug('height: %d', height)
height += 1
logging.debug('height: %d, maxheight: %d', height, maxheight)
if height > maxheight:
break
def parse(blockfiles=None, minblock=0, maxblock=sys.maxsize):
'''
dump out block files
'''
minheight, maxheight = int(minblock), int(maxblock)
logging.debug('minheight: %d, maxheight: %d', minheight, maxheight)
height = 0
reversemagic = dict([[value, key] for key, value in MAGIC.items()])
blockfiles = blockfiles or DEFAULT
# if file was specified on commandline, make it into a list
for blockfile in blockfiles:
magic = ''
index = 0
with open(blockfile, 'rb') as datainput:
blockdata = datainput.read() # not necessarily very efficient
logging.warning('NOTE: "height" values shown are relative'
' to start of first file and may include'
' orphaned blocks')
while index < len(blockdata):
logging.debug('blockparser at index %d out of %d bytes',
index, len(blockdata))
magic = blockdata[index:index + 4]
blocksize = struct.unpack('<L', blockdata[index + 4:index + 8])[0]
blockheader = blockdata[index + 8:index + 88]
transactions = blockdata[index + 88:index + blocksize + 8]
index += blocksize + 8
if minheight <= height <= maxheight:
logging.info('height: %d', height)
logging.debug('magic: %s', binascii.b2a_hex(magic))
logging.info('block type: %s', reversemagic.get(
magic, 'unknown'))
logging.info('block size: %d', blocksize)
logging.info('block header: %r', blockheader)
parse_blockheader(blockheader)
logging.info('transactions (partial): %r', transactions[:80])
count, data = parse_transactions(transactions)
logging.info('transaction count: %d', count)
logging.debug('remaining data (partial): %r', data[:80])
elif height > maxheight:
logging.debug('height %d > maxheight %d', height, maxheight)
break # still executes `height += 1` below!
else:
logging.debug('height: %d', height)
height += 1
logging.debug('height: %d, maxheight: %d', height, maxheight)
if height > maxheight:
break
def parse_blockheader(blockheader):
'''
return contents of block header
'''
version = blockheader[:4]
previous = blockheader[4:36]
merkle_root = blockheader[36:68]
unix_time = blockheader[68:72]
nbits = blockheader[72:76]
nonce = blockheader[76:]
blockhash = get_hash(blockheader)
if len(nonce) != 4:
raise ValueError('Nonce wrong size: %d bytes' % len(nonce))
logging.info('block version: %s', show_long(version))
logging.info('previous block hash: %s', show_hash(previous))
logging.info('merkle root: %s', show_hash(merkle_root))
logging.info('unix time: %s', timestamp(unix_time))
logging.info('nbits: %r', to_hex(nbits))
logging.info('nonce: %s', to_hex(nonce))
logging.info('block hash: %s', show_hash(blockhash))
return version, previous, merkle_root, unix_time, nbits, nonce, blockhash
def to_long(bytestring):
'''
for unpacking 8, 16, 32, or 64-bit number
'''
return struct.unpack(UNPACKER[(len(bytestring))], bytestring)[0]
def show_long(bytestring):
'''
for displaying 32-bit number
'''
number = to_long(bytestring)
return '0x%08x (%d)' % (number, number)
def timestamp(bytestring):
'''
for displaying 32-bit number as UTC time
'''
return datetime.utcfromtimestamp(to_long(bytestring)).isoformat()
def to_hex(bytestring):
'''
for displaying bytes in hexadecimal
the str() and .decode() stuff is necessary to get an unadorned string
in both Python2 and Python3
to_hex('\x01\xff')
'ff01'
'''
logging.debug('to_hex bytestring: %r', bytestring)
return str(binascii.b2a_hex(bytestring).decode('utf8'))
def get_hash(bytestring, repeat=2):
'''
return sha256 hash digest of bytestring
default is to return hash of hash; for simple hash, pass `repeat=1`
'''
for iteration in range(repeat):
bytestring = hashlib.sha256(bytestring).digest()
return bytestring
def show_hash(bytestring):
'''
return a sha256 hash, or any other bytestring, reversed and hexlified
'''
return to_hex(bytestring[::-1])
def parse_transactions(data):
'''
return parsed transaction length and transactions
'''
transactions = []
rawcount, count, data = get_count(data)
for index in range(count):
raw_transaction, transaction, data = parse_transaction(data)
transactions.append(raw_transaction)
return count, data
def next_transaction(blockfiles=None, minblock=0, maxblock=sys.maxsize):
'''
iterates over each transaction in every input block
'''
logging.debug('blockfiles: %s', blockfiles)
blockfiles = blockfiles or DEFAULT
blocks = nextblock(blockfiles, minblock, maxblock)
for height, header, transactions in blocks:
| logging.debug('height: %d', height)
logging.debug('magic: %s', binascii.b2a_hex(magic))
logging.debug('block type: %s', reversemagic.get(
magic, 'unknown'))
logging.debug('block size: %d', blocksize)
logging.debug('block header: %r', blockheader)
logging.debug('transactions (partial): %r', transactions[:80])
yield (height, blockheader, transactions) | conditional_block |
blockparse.py | :36]
merkle_root = blockheader[36:68]
unix_time = blockheader[68:72]
nbits = blockheader[72:76]
nonce = blockheader[76:]
blockhash = get_hash(blockheader)
if len(nonce) != 4:
raise ValueError('Nonce wrong size: %d bytes' % len(nonce))
logging.info('block version: %s', show_long(version))
logging.info('previous block hash: %s', show_hash(previous))
logging.info('merkle root: %s', show_hash(merkle_root))
logging.info('unix time: %s', timestamp(unix_time))
logging.info('nbits: %r', to_hex(nbits))
logging.info('nonce: %s', to_hex(nonce))
logging.info('block hash: %s', show_hash(blockhash))
return version, previous, merkle_root, unix_time, nbits, nonce, blockhash
def to_long(bytestring):
'''
for unpacking 8, 16, 32, or 64-bit number
'''
return struct.unpack(UNPACKER[(len(bytestring))], bytestring)[0]
def show_long(bytestring):
'''
for displaying 32-bit number
'''
number = to_long(bytestring)
return '0x%08x (%d)' % (number, number)
def timestamp(bytestring):
'''
for displaying 32-bit number as UTC time
'''
return datetime.utcfromtimestamp(to_long(bytestring)).isoformat()
def to_hex(bytestring):
'''
for displaying bytes in hexadecimal
the str() and .decode() stuff is necessary to get an unadorned string
in both Python2 and Python3
to_hex('\x01\xff')
'ff01'
'''
logging.debug('to_hex bytestring: %r', bytestring)
return str(binascii.b2a_hex(bytestring).decode('utf8'))
def get_hash(bytestring, repeat=2):
'''
return sha256 hash digest of bytestring
default is to return hash of hash; for simple hash, pass `repeat=1`
'''
for iteration in range(repeat):
bytestring = hashlib.sha256(bytestring).digest()
return bytestring
def show_hash(bytestring):
'''
return a sha256 hash, or any other bytestring, reversed and hexlified
'''
return to_hex(bytestring[::-1])
def parse_transactions(data):
'''
return parsed transaction length and transactions
'''
transactions = []
rawcount, count, data = get_count(data)
for index in range(count):
raw_transaction, transaction, data = parse_transaction(data)
transactions.append(raw_transaction)
return count, data
def next_transaction(blockfiles=None, minblock=0, maxblock=sys.maxsize):
'''
iterates over each transaction in every input block
'''
logging.debug('blockfiles: %s', blockfiles)
blockfiles = blockfiles or DEFAULT
blocks = nextblock(blockfiles, minblock, maxblock)
for height, header, transactions in blocks:
rawcount, count, data = get_count(transactions)
for index in range(count):
raw_transaction, transaction, data = parse_transaction(data)
txhash = get_hash(raw_transaction)
yield height, txhash, transaction
class Node(object):
'''
tree node
'''
def __init__(self, parent=None, blockhash=None, blocktime=''):
self.parent = parent
self.blockhash = blockhash
self.blocktime = blocktime
def countback(self, searchblock=NULLBLOCK):
r'''
return list of nodes that ends with this block
if attempting to get "height", caller is responsible to zero-base
the result, counting the genesis block as height 0
>>> node = Node(None, NULLBLOCK) # not a real node
>>> node = Node(node, b'\0') # height 0, genesis block
>>> node = Node(node, b'\1') # height 1
>>> node = Node(node, b'\2') # height 2
>>> len(node.countback())
3
>>> len(node.countback(b'\0'))
2
>>> try:
... node.countback(None)
... except AttributeError:
... print('failed')
failed
'''
traversed = [self]
parent = self.parent
while parent.blockhash != searchblock:
#logging.debug('parent.blockhash: %s', show_hash(parent.blockhash))
traversed.insert(0, parent)
parent = parent.parent
return traversed
def __str__(self):
return "{'Node': {'hash': '%s', 'timestamp': '%s'}}" % (
show_hash(self.blockhash),
self.blocktime)
__repr__ = __str__
def reorder(blockfiles=None, minblock=0, maxblock=sys.maxsize):
'''
removes orphan blocks and corrects height
'''
logging.debug('blockfiles: %s', blockfiles)
blockfiles = blockfiles or DEFAULT
blocks = nextblock(blockfiles, minblock, maxblock)
lastnode = Node(None, NULLBLOCK)
chains = [[lastnode]]
logging.debug('chains: %s', chains)
chain = 0
for height, header, transactions in blocks:
parsed = parse_blockheader(header)
previous, blockhash = parsed[1], parsed[6]
blocktime = timestamp(parsed[3])
if previous != lastnode.blockhash:
logging.warning('reorder at block %s',
Node(None, blockhash, blocktime))
logging.debug('previous block should be: %s', show_hash(previous))
logging.info('lastnode: %s', lastnode)
found, count = None, 0
try:
logging.debug('assuming previous block in this same chain')
nodes = lastnode.countback(previous)
found = nodes[0].parent
logging.info('reorder found %s %d blocks back',
found, len(nodes) + 1)
chain = len(chains)
chains.append([])
except AttributeError:
logging.debug('searching other chains')
for chain in reversed(chains):
node = chain[-1]
if node.blockhash == previous:
logging.info('reorder found %s at end of another chain',
found)
found = node
chain = chains.index(chain)
for chain in reversed(chains):
found = ([node for node in chain
if node.blockhash == previous] + [None])[0]
if found is not None:
logging.info('reorder found %s in another chain',
found)
chain = len(chains)
chains.append([])
break
if found is None:
raise ValueError('Previous block %s not found', previous)
else:
lastnode = found
# sanity check on above programming
assert_true(previous == lastnode.blockhash)
node = Node(lastnode, blockhash, blocktime)
chains[chain].append(node)
logging.info('current chain: %d out of %d', chain, len(chains))
lastnode = node
nodes = chains[chain][-1].countback()
logging.info('final [real] height: %d out of %d', len(nodes) - 1, height)
print(nodes)
def parse_transaction(data):
'''
return parsed transaction
'''
version = data[:4]
raw_transaction = version
logging.info('transaction version: %s', show_long(version))
raw_in_count, in_count, data = get_count(data[4:])
logging.info('number of transaction inputs: %d', in_count)
raw_inputs, inputs, data = parse_inputs(in_count, data)
logging.debug('length of data after parse_inputs: %d', len(data))
raw_out_count, out_count, data = get_count(data)
logging.info('number of transaction outputs: %d', out_count)
raw_outputs, outputs, data = parse_outputs(out_count, data)
logging.debug('length of data after parse_outputs: %d', len(data))
raw_transaction += (raw_in_count + b''.join(raw_inputs) +
raw_out_count + b''.join(raw_outputs))
lock_time, data = data[:4], data[4:]
raw_transaction += lock_time
logging.info('lock time: %s', to_hex(lock_time))
logging.debug('raw transaction (%d bytes): %s',
len(raw_transaction), to_hex(raw_transaction))
transaction = [version, raw_in_count, inputs, raw_out_count,
outputs, lock_time]
logging.debug('raw transaction split: %s', transaction)
logging.info('transaction hash: %s', show_hash(get_hash(raw_transaction)))
return raw_transaction, transaction, data
def parse_inputs(count, data):
'''
return transaction inputs
'''
raw_inputs = []
inputs = []
for index in range(count):
logging.debug('parse_inputs: len(data): %d', len(data))
tx_input, input_split, data = parse_input(data)
raw_inputs.append(tx_input)
inputs.append(input_split)
return raw_inputs, inputs, data
def | parse_outputs | identifier_name |
|
blockparse.py |
__str__ = __repr__
bytevalue = lambda byte: ord(byte)
bytevalues = lambda string: map(ord, string)
byte = chr
FileNotFoundError = IOError
else: # python3
bytevalue = lambda byte: byte
bytevalues = list
byte = lambda number: chr(number).encode('latin1')
LOGLEVEL = getattr(logging, os.getenv('LOGLEVEL', 'INFO'))
logging.getLogger().level=logging.DEBUG if __debug__ else LOGLEVEL
DEFAULT = sorted(glob(os.path.expanduser('~/.bitcoin/blocks/blk*.dat')))
MAGIC = {
'bitcoin': binascii.a2b_hex(b'F9BEB4D9'),
'dogecoin': binascii.a2b_hex(b'C0C0C0C0'),
'testnet': binascii.a2b_hex(b'FABFB5DA'),
'testnet3': binascii.a2b_hex(b'0B110907'),
'namecoin': binascii.a2b_hex(b'F9BEB4FE'),
'americancoin': binascii.a2b_hex(b'414D433A'),
}
VARINT = {
# struct format, offset, length
# remember in Python3 b'\xfd'[0] == 253
0xfd: ('<H', 1, 2),
0xfe: ('<L', 1, 4),
0xff: ('<Q', 1, 8),
}
# extend VARINT for Python2:
VARINT.update(dict((chr(n), l) for n, l in VARINT.items()))
UNPACKER = {
# fetch using len(bytestring)
1: 'B',
2: '<H',
4: '<L',
8: '<Q',
}
NULLBLOCK = b'\0' * 32 # pointed to by genesis block
def nextprefix(openfile):
'''
helper function for nextchunk
tries to read block prefix from an open file
'''
try:
prefix = openfile.read(8)
except AttributeError: # openfile is None
prefix = b''
return prefix
def nextchunk(blockfiles=None, minblock=0, maxblock=sys.maxsize, wait=True):
'''
generator that fetches and returns raw blocks out of blockfiles
with defaults, waits forever until terminated by signal
'''
minheight, maxheight = int(minblock), int(maxblock)
height = 0
reversemagic = dict([[value, key] for key, value in MAGIC.items()])
blockfiles = blockfiles or DEFAULT
fileindex = 0
currentfile = None
done = False
while True:
prefix = nextprefix(currentfile)
if prefix == b'':
try:
newfile = open(blockfiles[fileindex], 'rb')
fileindex += 1
if fileindex == len(blockfiles):
blockfiles.append(nextfile(blockfiles[-1]))
currentfile = newfile
except FileNotFoundError:
if not wait:
logging.info('end of current data, not waiting')
done = True
else:
logging.debug('waiting for %s to come online',
blockfiles[fileindex])
time.sleep(10)
continue
if done:
raise StopIteration('No more blocks at this time')
else:
magic = prefix[:4]
blocksize = struct.unpack('<L', prefix[4:])[0]
logging.debug('yielding block of size %d', blocksize)
yield prefix + currentfile.read(blocksize)
def nextfile(filename):
'''
returns "next" filename in series from numbered files e.g. blk0001.dat
>>> nextfile('blk0001.dat')
'blk0002.dat'
>>> try: nextfile('blk.dat')
... except: pass
>>> nextfile('00041')
'00042'
'''
pattern = r'^(?P<prefix>[^0-9]*)(?P<number>[0-9]+)(?P<suffix>[^0-9]*)$'
directory, filename = os.path.split(filename)
try:
match = re.compile(pattern).match(filename).groupdict()
except AttributeError as match_failed:
raise ValueError('No numeric pattern found in {}'.format(filename))
newnumber = '{number:0{width}}'.format(
number=int(match['number']) + 1,
width=len(match['number']))
filename = match['prefix'] + newnumber + match['suffix']
return os.path.join(directory, filename) if directory else filename
def nextblock(blockfiles=None, minblock=0, maxblock=sys.maxsize):
'''
generator that fetches and returns raw blocks out of blockfiles
'''
minheight, maxheight = int(minblock), int(maxblock)
height = 0
reversemagic = dict([[value, key] for key, value in MAGIC.items()])
blockfiles = blockfiles or DEFAULT
for blockfile in blockfiles:
magic = ''
index = 0
logging.debug('blockfile "%s" of blockfiles %s', blockfile, blockfiles)
with open(blockfile, 'rb') as datainput:
blockdata = datainput.read() # not necessarily very efficient
logging.warning('NOTE: "height" values shown are relative'
' to start of first file and may include'
' orphaned blocks')
while index < len(blockdata):
logging.debug('blockparser at index %d out of %d bytes',
index, len(blockdata))
magic = blockdata[index:index + 4]
blocksize = struct.unpack('<L', blockdata[index + 4:index + 8])[0]
blockheader = blockdata[index + 8:index + 88]
transactions = blockdata[index + 88:index + blocksize + 8]
index += blocksize + 8
if minheight <= height <= maxheight:
logging.debug('height: %d', height)
logging.debug('magic: %s', binascii.b2a_hex(magic))
logging.debug('block type: %s', reversemagic.get(
magic, 'unknown'))
logging.debug('block size: %d', blocksize)
logging.debug('block header: %r', blockheader)
logging.debug('transactions (partial): %r', transactions[:80])
yield (height, blockheader, transactions)
elif height > maxheight:
logging.debug('height %d > maxheight %d', height, maxheight)
break # still executes `height += 1` below!
else:
logging.debug('height: %d', height)
height += 1
logging.debug('height: %d, maxheight: %d', height, maxheight)
if height > maxheight:
break
def parse(blockfiles=None, minblock=0, maxblock=sys.maxsize):
'''
dump out block files
'''
minheight, maxheight = int(minblock), int(maxblock)
logging.debug('minheight: %d, maxheight: %d', minheight, maxheight)
height = 0
reversemagic = dict([[value, key] for key, value in MAGIC.items()])
blockfiles = blockfiles or DEFAULT
# if file was specified on commandline, make it into a list
for blockfile in blockfiles:
magic = ''
index = 0
with open(blockfile, 'rb') as datainput:
blockdata = datainput.read() # not necessarily very efficient
logging.warning('NOTE: "height" values shown are relative'
' to start of first file and may include'
' orphaned blocks')
while index < len(blockdata):
logging.debug('blockparser at index %d out of %d bytes',
index, len(blockdata))
magic = blockdata[index:index + 4]
blocksize = struct.unpack('<L', blockdata[index + 4:index + 8])[0]
blockheader = blockdata[index + 8:index + 88]
transactions = blockdata[index + 88:index + blocksize + 8]
index += blocksize + 8
if minheight <= height <= maxheight:
logging.info('height: %d', height)
logging.debug('magic: %s', binascii.b2a_hex(magic))
logging.info('block type: %s', reversemagic.get(
magic, 'unknown'))
logging.info('block size: %d', blocksize)
logging.info('block header: %r', blockheader)
parse_blockheader(blockheader)
logging.info('transactions (partial): %r', transactions[:80])
count, data = parse_transactions(transactions)
logging.info('transaction count: %d', count)
logging.debug('remaining data (partial): %r', data[:80])
elif height > maxheight:
logging.debug('height %d > maxheight %d', height, maxheight)
break # still executes `height += 1` below!
else:
logging.debug('height: %d', height)
height += 1
logging.debug('height: %d, maxheight: %d', height, maxheight)
if | return 'b' + super(bytes, self).__repr__() | identifier_body |
|
blockparse.py | = prefix[:4]
blocksize = struct.unpack('<L', prefix[4:])[0]
logging.debug('yielding block of size %d', blocksize)
yield prefix + currentfile.read(blocksize)
def nextfile(filename):
'''
returns "next" filename in series from numbered files e.g. blk0001.dat
>>> nextfile('blk0001.dat')
'blk0002.dat'
>>> try: nextfile('blk.dat')
... except: pass
>>> nextfile('00041')
'00042'
''' | try:
match = re.compile(pattern).match(filename).groupdict()
except AttributeError as match_failed:
raise ValueError('No numeric pattern found in {}'.format(filename))
newnumber = '{number:0{width}}'.format(
number=int(match['number']) + 1,
width=len(match['number']))
filename = match['prefix'] + newnumber + match['suffix']
return os.path.join(directory, filename) if directory else filename
def nextblock(blockfiles=None, minblock=0, maxblock=sys.maxsize):
'''
generator that fetches and returns raw blocks out of blockfiles
'''
minheight, maxheight = int(minblock), int(maxblock)
height = 0
reversemagic = dict([[value, key] for key, value in MAGIC.items()])
blockfiles = blockfiles or DEFAULT
for blockfile in blockfiles:
magic = ''
index = 0
logging.debug('blockfile "%s" of blockfiles %s', blockfile, blockfiles)
with open(blockfile, 'rb') as datainput:
blockdata = datainput.read() # not necessarily very efficient
logging.warning('NOTE: "height" values shown are relative'
' to start of first file and may include'
' orphaned blocks')
while index < len(blockdata):
logging.debug('blockparser at index %d out of %d bytes',
index, len(blockdata))
magic = blockdata[index:index + 4]
blocksize = struct.unpack('<L', blockdata[index + 4:index + 8])[0]
blockheader = blockdata[index + 8:index + 88]
transactions = blockdata[index + 88:index + blocksize + 8]
index += blocksize + 8
if minheight <= height <= maxheight:
logging.debug('height: %d', height)
logging.debug('magic: %s', binascii.b2a_hex(magic))
logging.debug('block type: %s', reversemagic.get(
magic, 'unknown'))
logging.debug('block size: %d', blocksize)
logging.debug('block header: %r', blockheader)
logging.debug('transactions (partial): %r', transactions[:80])
yield (height, blockheader, transactions)
elif height > maxheight:
logging.debug('height %d > maxheight %d', height, maxheight)
break # still executes `height += 1` below!
else:
logging.debug('height: %d', height)
height += 1
logging.debug('height: %d, maxheight: %d', height, maxheight)
if height > maxheight:
break
def parse(blockfiles=None, minblock=0, maxblock=sys.maxsize):
'''
dump out block files
'''
minheight, maxheight = int(minblock), int(maxblock)
logging.debug('minheight: %d, maxheight: %d', minheight, maxheight)
height = 0
reversemagic = dict([[value, key] for key, value in MAGIC.items()])
blockfiles = blockfiles or DEFAULT
# if file was specified on commandline, make it into a list
for blockfile in blockfiles:
magic = ''
index = 0
with open(blockfile, 'rb') as datainput:
blockdata = datainput.read() # not necessarily very efficient
logging.warning('NOTE: "height" values shown are relative'
' to start of first file and may include'
' orphaned blocks')
while index < len(blockdata):
logging.debug('blockparser at index %d out of %d bytes',
index, len(blockdata))
magic = blockdata[index:index + 4]
blocksize = struct.unpack('<L', blockdata[index + 4:index + 8])[0]
blockheader = blockdata[index + 8:index + 88]
transactions = blockdata[index + 88:index + blocksize + 8]
index += blocksize + 8
if minheight <= height <= maxheight:
logging.info('height: %d', height)
logging.debug('magic: %s', binascii.b2a_hex(magic))
logging.info('block type: %s', reversemagic.get(
magic, 'unknown'))
logging.info('block size: %d', blocksize)
logging.info('block header: %r', blockheader)
parse_blockheader(blockheader)
logging.info('transactions (partial): %r', transactions[:80])
count, data = parse_transactions(transactions)
logging.info('transaction count: %d', count)
logging.debug('remaining data (partial): %r', data[:80])
elif height > maxheight:
logging.debug('height %d > maxheight %d', height, maxheight)
break # still executes `height += 1` below!
else:
logging.debug('height: %d', height)
height += 1
logging.debug('height: %d, maxheight: %d', height, maxheight)
if height > maxheight:
break
def parse_blockheader(blockheader):
'''
return contents of block header
'''
version = blockheader[:4]
previous = blockheader[4:36]
merkle_root = blockheader[36:68]
unix_time = blockheader[68:72]
nbits = blockheader[72:76]
nonce = blockheader[76:]
blockhash = get_hash(blockheader)
if len(nonce) != 4:
raise ValueError('Nonce wrong size: %d bytes' % len(nonce))
logging.info('block version: %s', show_long(version))
logging.info('previous block hash: %s', show_hash(previous))
logging.info('merkle root: %s', show_hash(merkle_root))
logging.info('unix time: %s', timestamp(unix_time))
logging.info('nbits: %r', to_hex(nbits))
logging.info('nonce: %s', to_hex(nonce))
logging.info('block hash: %s', show_hash(blockhash))
return version, previous, merkle_root, unix_time, nbits, nonce, blockhash
def to_long(bytestring):
'''
for unpacking 8, 16, 32, or 64-bit number
'''
return struct.unpack(UNPACKER[(len(bytestring))], bytestring)[0]
def show_long(bytestring):
'''
for displaying 32-bit number
'''
number = to_long(bytestring)
return '0x%08x (%d)' % (number, number)
def timestamp(bytestring):
'''
for displaying 32-bit number as UTC time
'''
return datetime.utcfromtimestamp(to_long(bytestring)).isoformat()
def to_hex(bytestring):
'''
for displaying bytes in hexadecimal
the str() and .decode() stuff is necessary to get an unadorned string
in both Python2 and Python3
to_hex('\x01\xff')
'ff01'
'''
logging.debug('to_hex bytestring: %r', bytestring)
return str(binascii.b2a_hex(bytestring).decode('utf8'))
def get_hash(bytestring, repeat=2):
'''
return sha256 hash digest of bytestring
default is to return hash of hash; for simple hash, pass `repeat=1`
'''
for iteration in range(repeat):
bytestring = hashlib.sha256(bytestring).digest()
return bytestring
def show_hash(bytestring):
'''
return a sha256 hash, or any other bytestring, reversed and hexlified
'''
return to_hex(bytestring[::-1])
def parse_transactions(data):
'''
return parsed transaction length and transactions
'''
transactions = []
rawcount, count, data = get_count(data)
for index in range(count):
raw_transaction, transaction, data = parse_transaction(data)
transactions.append(raw_transaction)
return count, data
def next_transaction(blockfiles=None, minblock=0, maxblock=sys.maxsize):
'''
iterates over each transaction in every input block
'''
logging.debug('blockfiles: %s', blockfiles)
blockfiles = blockfiles or DEFAULT
blocks = nextblock(blockfiles, minblock, maxblock)
for height, header, transactions in blocks:
raw | pattern = r'^(?P<prefix>[^0-9]*)(?P<number>[0-9]+)(?P<suffix>[^0-9]*)$'
directory, filename = os.path.split(filename) | random_line_split |
main.rs | let mut output_file = String::new();
let mut filter_size = 3usize;
if args.len() == 0 {
return Err(Error::new(
ErrorKind::InvalidInput,
"Tool run with no parameters.",
));
}
for i in 0..args.len() {
let mut arg = args[i].replace("\"", "");
arg = arg.replace("\'", "");
let cmd = arg.split("="); // in case an equals sign was used
let vec = cmd.collect::<Vec<&str>>();
let mut keyval = false;
if vec.len() > 1 {
keyval = true;
}
let flag_val = vec[0].to_lowercase().replace("--", "-");
if flag_val == "-d" || flag_val == "-dem" {
if keyval {
input_file = vec[1].to_string();
} else {
input_file = args[i + 1].to_string();
}
} else if flag_val == "-o" || flag_val == "-output" {
if keyval {
output_file = vec[1].to_string();
} else {
output_file = args[i + 1].to_string();
}
} else if flag_val == "-filter" {
if keyval {
filter_size = vec[1]
.to_string()
.parse::<f32>()
.expect(&format!("Error parsing {}", flag_val))
as usize;
} else {
filter_size = args[i + 1]
.to_string()
.parse::<f32>()
.expect(&format!("Error parsing {}", flag_val))
as usize;
}
}
}
if filter_size < 3 { filter_size = 3; }
// The filter dimensions must be odd numbers such that there is a middle pixel
if (filter_size as f64 / 2f64).floor() == (filter_size as f64 / 2f64) {
filter_size += 1;
}
if configurations.verbose_mode {
let welcome_len = format!("* Welcome to {} *", tool_name).len().max(28);
// 28 = length of the 'Powered by' by statement.
println!("{}", "*".repeat(welcome_len));
println!("* Welcome to {} {}*", tool_name, " ".repeat(welcome_len - 15 - tool_name.len()));
println!("* Powered by WhiteboxTools {}*", " ".repeat(welcome_len - 28));
println!("* www.whiteboxgeo.com {}*", " ".repeat(welcome_len - 23));
println!("{}", "*".repeat(welcome_len));
}
let sep: String = path::MAIN_SEPARATOR.to_string();
let mut progress: usize;
let mut old_progress: usize = 1;
if !input_file.contains(&sep) && !input_file.contains("/") {
input_file = format!("{}{}", working_directory, input_file);
}
if !output_file.contains(&sep) && !output_file.contains("/") {
output_file = format!("{}{}", working_directory, output_file);
}
if configurations.verbose_mode {
println!("Reading data...")
};
let input = Arc::new(Raster::new(&input_file, "r")?);
let rows = input.configs.rows as isize;
let columns = input.configs.columns as isize;
let nodata = input.configs.nodata;
let resolution = input.configs.resolution_x; // assume square
let path_parts: Vec<&str> = output_file.rsplitn(2, ".").collect();
let mut outputs: [Raster; 8] = [
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "Slp", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "Asp", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "ProC", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "PlaC", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "LonC", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "CrsC", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "PrCM", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "GoF", &path_parts[0]), &input)
];
let start = Instant::now();
// no weights simplifies matrices
let offset = (filter_size - 1) / 2;
let num_cells = filter_size * filter_size;
// determine filter offsets
let mut dx = vec![0isize; num_cells];
let mut dy = vec![0isize; num_cells];
let mut idx = 0usize;
for i in 0..filter_size {
for j in 0..filter_size {
dx[idx] = (j - offset) as isize;
dy[idx] = (i - offset) as isize;
idx += 1;
}
}
let num_procs = num_cpus::get() as isize;
let (tx, rx) = mpsc::channel();
for tid in 0..num_procs {
let input = input.clone();
let dx = dx.clone();
let dy = dy.clone();
let tx = tx.clone();
// let a_decomp = a_decomp.clone();
thread::spawn(move || {
let mut z: f64;
let mut zi: f64;
for row in (0..rows).filter(|r| r % num_procs == tid) {
let mut slopes = vec![nodata; columns as usize];
let mut aspects = vec![nodata; columns as usize];
let mut prof_cs = vec![nodata; columns as usize];
let mut plan_cs = vec![nodata; columns as usize];
let mut long_cs = vec![nodata; columns as usize];
let mut cross_cs = vec![nodata; columns as usize];
let mut procmin_cs = vec![nodata; columns as usize];
let mut gofs = vec![nodata; columns as usize];
for col in 0..columns {
z = input[(row, col)];
if z != nodata {
let (mut zx2, mut zy2, mut zxy, mut zx, mut zy, mut _zw) = (0f64,0f64,0f64,0f64,0f64,0f64);
let (mut x2, mut x2y2, mut x4) = (0f64, 0f64, 0f64);
let mut num_valid = 0usize;
let (mut z_pred, mut z_act): (f64, f64);
let (mut sum_x, mut sum_y, mut sum_xy, mut sum_xx, mut sum_yy) = (0f64, 0f64, 0f64, 0f64, 0f64);
let (r, n): (f64, f64);
let mut xs = vec![];
let mut ys = vec![]; | for c in 0..num_cells {
zi = input[((row + dy[c] as isize), (col + dx[c] as isize))];
if zi != nodata {
xs.push(dx[c] as f64 * resolution);
ys.push(dy[c] as f64 * resolution);
zs.push(zi - z); // elevation relative to center
num_valid += 1;
}
}
if num_valid >= 8 {//6 { // need at least six samples
// compute sums
for i in 0..num_valid {
zx2 += zs[i] * xs[i].powi(2);
zy2 += zs[i] * ys[i].powi(2);
zxy += zs[i] * xs[i] * ys[i];
zx += zs[i] * xs[i];
zy += zs[i] * ys[i];
_zw += zs[i];
x2 += xs[i].powi(2);
x2y2 += xs[i].powi(2) * ys[i].powi(2);
x4 += xs[i].powi(4);
}
let a = Matrix5::from_rows(&[
RowVector5::new(x4, x2y2, 0f64, 0f64, 0f64),
RowVector5::new(x2y2, x4, 0f64, 0f64, 0f64),
RowVector5::new(0f64,0f64,x2y2, 0f64, 0f64),
RowVector5::new(0f64, 0f64, | let mut zs = vec![];
| random_line_split |
main.rs | (x4, x2y2, 0f64, 0f64, 0f64),
RowVector5::new(x2y2, x4, 0f64, 0f64, 0f64),
RowVector5::new(0f64,0f64,x2y2, 0f64, 0f64),
RowVector5::new(0f64, 0f64, 0f64, x2, 0f64),
RowVector5::new(0f64, 0f64, 0f64, 0f64, x2),
]);
let b = Vector5::new(zx2, zy2, zxy, zx, zy);
let fitted_surface = Quadratic2d::from_normals_origin(a, b);
for i in 0..num_valid {
z_act = zs[i];
sum_x += z_act;
sum_xx += z_act * z_act;
z_pred = fitted_surface.solve(xs[i], ys[i]);
sum_y += z_pred;
sum_yy += z_pred * z_pred;
sum_xy += z_act * z_pred;
}
n = num_valid as f64;
let noom = n * sum_xy - (sum_x * sum_y);
let den = (n * sum_xx - (sum_x * sum_x)).sqrt() * ((n * sum_yy - (sum_y * sum_y)).sqrt());
if noom == 0f64 || den == 0f64 {
r = 0f64;
} else {
r = noom / den;
}
slopes[col as usize] = fitted_surface.slope();
aspects[col as usize] = fitted_surface.aspect();
prof_cs[col as usize] = fitted_surface.profile_convexity();
plan_cs[col as usize] = fitted_surface.plan_convexity();
long_cs[col as usize] = fitted_surface.longitudinal_curvature();
cross_cs[col as usize] = fitted_surface.cross_sectional_curvature();
procmin_cs[col as usize] = fitted_surface.min_prof_convexity();
gofs[col as usize] = r * r;
}
}
}
tx.send(
(row,
slopes,
aspects,
prof_cs,
plan_cs,
long_cs,
cross_cs,
procmin_cs,
gofs)
).unwrap();
}
});
}
for row in 0..rows {
let data = rx.recv().expect("Error receiving data from thread.");
outputs[0].set_row_data(data.0, data.1);
outputs[1].set_row_data(data.0, data.2);
outputs[2].set_row_data(data.0, data.3);
outputs[3].set_row_data(data.0, data.4);
outputs[4].set_row_data(data.0, data.5);
outputs[5].set_row_data(data.0, data.6);
outputs[6].set_row_data(data.0, data.7);
outputs[7].set_row_data(data.0, data.8);
if configurations.verbose_mode {
progress = (100.0_f64 * row as f64 / (rows - 1) as f64) as usize;
if progress != old_progress {
println!("Performing analysis: {}%", progress);
old_progress = progress;
}
}
}
let elapsed_time = get_formatted_elapsed_time(start);
if configurations.verbose_mode {
println!("Saving data...")
};
for o in 0..outputs.len() {
outputs[o].configs.palette = "grey.plt".to_string();
outputs[o].add_metadata_entry(format!(
"Created by whitebox_tools\' {} tool",
tool_name
));
outputs[o].add_metadata_entry(format!("Input file: {}", input_file));
outputs[o].add_metadata_entry(format!("Elapsed Time (excluding I/O): {}", elapsed_time));
let _ = match outputs[o].write() {
Ok(_) => {
if configurations.verbose_mode {
println!("Output file {:?} written", o+1);
}
}
Err(e) => return Err(e),
};
}
if configurations.verbose_mode {
println!(
"{}",
&format!("Elapsed Time (excluding I/O): {}", elapsed_time)
);
}
Ok(())
}
// Equation of a 2d quadratic model:
// z(x,y) = ax^2 + by^2 + cxy + dx + ey + f
#[derive(Default, Clone, Copy)]
struct Quadratic2d {
a: f64,
b: f64,
c: f64,
d: f64,
e: f64,
f: f64
}
impl Quadratic2d {
fn new(a: f64, b: f64, c: f64, d: f64, e: f64, f: f64) -> Quadratic2d {
Quadratic2d {
a: a,
b: b,
c: c,
d: d,
e: e,
f: f
}
}
// solves a system of normal equations ax = b
// fn from_normal_equations(a: Matrix6<f64>, b: Vector6<f64>) -> Quadratic2d {
// let decomp = a.lu();
// if decomp.is_invertible() {
// let x = decomp.solve(&b).expect("Linear resolution failed.");
// Quadratic2d::new(
// *x.get(0).unwrap(), // a
// *x.get(1).unwrap(), // b
// *x.get(2).unwrap(), // c
// *x.get(3).unwrap(), // d
// *x.get(4).unwrap(), // e
// *x.get(5).unwrap() // f
// )
// } else {
// Quadratic2d::new(0f64,0f64,0f64,0f64,0f64,0f64)
// }
// }
fn from_normals_origin(a: Matrix5<f64>, b: Vector5<f64>) -> Quadratic2d {
let decomp = a.lu();
if decomp.is_invertible() {
let x = decomp.solve(&b).expect("Linear resolution failed.");
Quadratic2d::new(
*x.get(0).unwrap(), // a
*x.get(1).unwrap(), // b
*x.get(2).unwrap(), // c
*x.get(3).unwrap(), // d
*x.get(4).unwrap(), // e
0f64, //f
)
} else {
Quadratic2d::new(0f64,0f64,0f64,0f64,0f64,0f64)
}
}
// fn from_decomposed_normals(
// decomp: LU<f64, nalgebra::base::dimension::U6, nalgebra::base::dimension::U6>,
// b: Vector6<f64>
// ) -> Quadratic2d {
// if decomp.is_invertible() {
// let x = decomp.solve(&b).expect("Linear resolution fialed.");
// Quadratic2d::new(
// *x.get(0).unwrap(), // a
// *x.get(1).unwrap(), // b
// *x.get(2).unwrap(), // c
// *x.get(3).unwrap(), // d
// *x.get(4).unwrap(), // e
// *x.get(5).unwrap() // f
// )
// } else {
// Quadratic2d::new(0f64,0f64,0f64,0f64,0f64,0f64)
// }
// }
fn slope(&self) -> f64 {
// (self.a*self.a + self.b*self.b).sqrt().atan().to_degrees()
(self.d*self.d + self.e*self.e).sqrt().atan()
}
fn aspect(&self) -> f64 {
if self.e == 0f64 || self.d == 0f64 {
0f64
} else {
(self.e / self.d).atan()
}
}
fn profile_convexity(&self) -> f64 {
let nu = -200f64 * ((self.a*self.d*self.d) + (self.b*self.e*self.e) + (self.c*self.d*self.e));
let de = ((self.e*self.e) + (self.d*self.d)) * (1f64 + (self.d*self.d) + (self.e*self.e)).powf(1.5);
if nu == 0f64 || de == 0f64 | {
0f64
} | conditional_block |
|
main.rs | fitted_surface.aspect();
prof_cs[col as usize] = fitted_surface.profile_convexity();
plan_cs[col as usize] = fitted_surface.plan_convexity();
long_cs[col as usize] = fitted_surface.longitudinal_curvature();
cross_cs[col as usize] = fitted_surface.cross_sectional_curvature();
procmin_cs[col as usize] = fitted_surface.min_prof_convexity();
gofs[col as usize] = r * r;
}
}
}
tx.send(
(row,
slopes,
aspects,
prof_cs,
plan_cs,
long_cs,
cross_cs,
procmin_cs,
gofs)
).unwrap();
}
});
}
for row in 0..rows {
let data = rx.recv().expect("Error receiving data from thread.");
outputs[0].set_row_data(data.0, data.1);
outputs[1].set_row_data(data.0, data.2);
outputs[2].set_row_data(data.0, data.3);
outputs[3].set_row_data(data.0, data.4);
outputs[4].set_row_data(data.0, data.5);
outputs[5].set_row_data(data.0, data.6);
outputs[6].set_row_data(data.0, data.7);
outputs[7].set_row_data(data.0, data.8);
if configurations.verbose_mode {
progress = (100.0_f64 * row as f64 / (rows - 1) as f64) as usize;
if progress != old_progress {
println!("Performing analysis: {}%", progress);
old_progress = progress;
}
}
}
let elapsed_time = get_formatted_elapsed_time(start);
if configurations.verbose_mode {
println!("Saving data...")
};
for o in 0..outputs.len() {
outputs[o].configs.palette = "grey.plt".to_string();
outputs[o].add_metadata_entry(format!(
"Created by whitebox_tools\' {} tool",
tool_name
));
outputs[o].add_metadata_entry(format!("Input file: {}", input_file));
outputs[o].add_metadata_entry(format!("Elapsed Time (excluding I/O): {}", elapsed_time));
let _ = match outputs[o].write() {
Ok(_) => {
if configurations.verbose_mode {
println!("Output file {:?} written", o+1);
}
}
Err(e) => return Err(e),
};
}
if configurations.verbose_mode {
println!(
"{}",
&format!("Elapsed Time (excluding I/O): {}", elapsed_time)
);
}
Ok(())
}
// Equation of a 2d quadratic model:
// z(x,y) = ax^2 + by^2 + cxy + dx + ey + f
#[derive(Default, Clone, Copy)]
struct Quadratic2d {
a: f64,
b: f64,
c: f64,
d: f64,
e: f64,
f: f64
}
impl Quadratic2d {
fn new(a: f64, b: f64, c: f64, d: f64, e: f64, f: f64) -> Quadratic2d {
Quadratic2d {
a: a,
b: b,
c: c,
d: d,
e: e,
f: f
}
}
// solves a system of normal equations ax = b
// fn from_normal_equations(a: Matrix6<f64>, b: Vector6<f64>) -> Quadratic2d {
// let decomp = a.lu();
// if decomp.is_invertible() {
// let x = decomp.solve(&b).expect("Linear resolution failed.");
// Quadratic2d::new(
// *x.get(0).unwrap(), // a
// *x.get(1).unwrap(), // b
// *x.get(2).unwrap(), // c
// *x.get(3).unwrap(), // d
// *x.get(4).unwrap(), // e
// *x.get(5).unwrap() // f
// )
// } else {
// Quadratic2d::new(0f64,0f64,0f64,0f64,0f64,0f64)
// }
// }
fn from_normals_origin(a: Matrix5<f64>, b: Vector5<f64>) -> Quadratic2d {
let decomp = a.lu();
if decomp.is_invertible() {
let x = decomp.solve(&b).expect("Linear resolution failed.");
Quadratic2d::new(
*x.get(0).unwrap(), // a
*x.get(1).unwrap(), // b
*x.get(2).unwrap(), // c
*x.get(3).unwrap(), // d
*x.get(4).unwrap(), // e
0f64, //f
)
} else {
Quadratic2d::new(0f64,0f64,0f64,0f64,0f64,0f64)
}
}
// fn from_decomposed_normals(
// decomp: LU<f64, nalgebra::base::dimension::U6, nalgebra::base::dimension::U6>,
// b: Vector6<f64>
// ) -> Quadratic2d {
// if decomp.is_invertible() {
// let x = decomp.solve(&b).expect("Linear resolution fialed.");
// Quadratic2d::new(
// *x.get(0).unwrap(), // a
// *x.get(1).unwrap(), // b
// *x.get(2).unwrap(), // c
// *x.get(3).unwrap(), // d
// *x.get(4).unwrap(), // e
// *x.get(5).unwrap() // f
// )
// } else {
// Quadratic2d::new(0f64,0f64,0f64,0f64,0f64,0f64)
// }
// }
fn slope(&self) -> f64 {
// (self.a*self.a + self.b*self.b).sqrt().atan().to_degrees()
(self.d*self.d + self.e*self.e).sqrt().atan()
}
fn aspect(&self) -> f64 {
if self.e == 0f64 || self.d == 0f64 {
0f64
} else {
(self.e / self.d).atan()
}
}
fn profile_convexity(&self) -> f64 {
let nu = -200f64 * ((self.a*self.d*self.d) + (self.b*self.e*self.e) + (self.c*self.d*self.e));
let de = ((self.e*self.e) + (self.d*self.d)) * (1f64 + (self.d*self.d) + (self.e*self.e)).powf(1.5);
if nu == 0f64 || de == 0f64 {
0f64
} else {
nu / de
}
}
fn plan_convexity(&self) -> f64 {
let nu = 200f64 * ((self.b*self.d*self.d) + (self.a*self.e*self.e) - (self.c*self.d*self.e));
let de = ((self.e*self.e) + (self.d*self.d)).powf(1.5);
if nu == 0f64 || de == 0f64 {
0f64
} else {
nu / de
}
}
fn longitudinal_curvature(&self) -> f64 {
let nu = (self.a*self.d*self.d) + (self.b*self.e*self.e) + (self.c*self.d*self.e);
let de = (self.d*self.d) + (self.e*self.e);
if nu == 0f64 || de == 0f64 {
0f64
} else{
-2f64*(nu / de)
}
}
fn cross_sectional_curvature(&self) -> f64 {
let nu = (self.b*self.d*self.d) + (self.a*self.e*self.e) - (self.c*self.d*self.e);
let de = (self.d*self.d) + (self.e*self.e);
if nu == 0f64 || de == 0f64 {
0f64
} else{
-2f64*(nu / de)
}
}
// fn max_prof_convexity(&self) -> f64 {
// (self.a * -1f64) - self.b + ((self.a - self.b).powi(2) + (self.c * self.c)).sqrt()
// }
fn | min_prof_convexity | identifier_name |
|
main.rs | ));
}
for i in 0..args.len() {
let mut arg = args[i].replace("\"", "");
arg = arg.replace("\'", "");
let cmd = arg.split("="); // in case an equals sign was used
let vec = cmd.collect::<Vec<&str>>();
let mut keyval = false;
if vec.len() > 1 {
keyval = true;
}
let flag_val = vec[0].to_lowercase().replace("--", "-");
if flag_val == "-d" || flag_val == "-dem" {
if keyval {
input_file = vec[1].to_string();
} else {
input_file = args[i + 1].to_string();
}
} else if flag_val == "-o" || flag_val == "-output" {
if keyval {
output_file = vec[1].to_string();
} else {
output_file = args[i + 1].to_string();
}
} else if flag_val == "-filter" {
if keyval {
filter_size = vec[1]
.to_string()
.parse::<f32>()
.expect(&format!("Error parsing {}", flag_val))
as usize;
} else {
filter_size = args[i + 1]
.to_string()
.parse::<f32>()
.expect(&format!("Error parsing {}", flag_val))
as usize;
}
}
}
if filter_size < 3 { filter_size = 3; }
// The filter dimensions must be odd numbers such that there is a middle pixel
if (filter_size as f64 / 2f64).floor() == (filter_size as f64 / 2f64) {
filter_size += 1;
}
if configurations.verbose_mode {
let welcome_len = format!("* Welcome to {} *", tool_name).len().max(28);
// 28 = length of the 'Powered by' by statement.
println!("{}", "*".repeat(welcome_len));
println!("* Welcome to {} {}*", tool_name, " ".repeat(welcome_len - 15 - tool_name.len()));
println!("* Powered by WhiteboxTools {}*", " ".repeat(welcome_len - 28));
println!("* www.whiteboxgeo.com {}*", " ".repeat(welcome_len - 23));
println!("{}", "*".repeat(welcome_len));
}
let sep: String = path::MAIN_SEPARATOR.to_string();
let mut progress: usize;
let mut old_progress: usize = 1;
if !input_file.contains(&sep) && !input_file.contains("/") {
input_file = format!("{}{}", working_directory, input_file);
}
if !output_file.contains(&sep) && !output_file.contains("/") {
output_file = format!("{}{}", working_directory, output_file);
}
if configurations.verbose_mode {
println!("Reading data...")
};
let input = Arc::new(Raster::new(&input_file, "r")?);
let rows = input.configs.rows as isize;
let columns = input.configs.columns as isize;
let nodata = input.configs.nodata;
let resolution = input.configs.resolution_x; // assume square
let path_parts: Vec<&str> = output_file.rsplitn(2, ".").collect();
let mut outputs: [Raster; 8] = [
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "Slp", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "Asp", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "ProC", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "PlaC", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "LonC", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "CrsC", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "PrCM", &path_parts[0]), &input),
Raster::initialize_using_file(&format!("{}_{}.{}", &path_parts[1], "GoF", &path_parts[0]), &input)
];
let start = Instant::now();
// no weights simplifies matrices
let offset = (filter_size - 1) / 2;
let num_cells = filter_size * filter_size;
// determine filter offsets
let mut dx = vec![0isize; num_cells];
let mut dy = vec![0isize; num_cells];
let mut idx = 0usize;
for i in 0..filter_size {
for j in 0..filter_size {
dx[idx] = (j - offset) as isize;
dy[idx] = (i - offset) as isize;
idx += 1;
}
}
let num_procs = num_cpus::get() as isize;
let (tx, rx) = mpsc::channel();
for tid in 0..num_procs {
let input = input.clone();
let dx = dx.clone();
let dy = dy.clone();
let tx = tx.clone();
// let a_decomp = a_decomp.clone();
thread::spawn(move || {
let mut z: f64;
let mut zi: f64;
for row in (0..rows).filter(|r| r % num_procs == tid) {
let mut slopes = vec![nodata; columns as usize];
let mut aspects = vec![nodata; columns as usize];
let mut prof_cs = vec![nodata; columns as usize];
let mut plan_cs = vec![nodata; columns as usize];
let mut long_cs = vec![nodata; columns as usize];
let mut cross_cs = vec![nodata; columns as usize];
let mut procmin_cs = vec![nodata; columns as usize];
let mut gofs = vec![nodata; columns as usize];
for col in 0..columns {
z = input[(row, col)];
if z != nodata {
let (mut zx2, mut zy2, mut zxy, mut zx, mut zy, mut _zw) = (0f64,0f64,0f64,0f64,0f64,0f64);
let (mut x2, mut x2y2, mut x4) = (0f64, 0f64, 0f64);
let mut num_valid = 0usize;
let (mut z_pred, mut z_act): (f64, f64);
let (mut sum_x, mut sum_y, mut sum_xy, mut sum_xx, mut sum_yy) = (0f64, 0f64, 0f64, 0f64, 0f64);
let (r, n): (f64, f64);
let mut xs = vec![];
let mut ys = vec![];
let mut zs = vec![];
for c in 0..num_cells {
zi = input[((row + dy[c] as isize), (col + dx[c] as isize))];
if zi != nodata {
xs.push(dx[c] as f64 * resolution);
ys.push(dy[c] as f64 * resolution);
zs.push(zi - z); // elevation relative to center
num_valid += 1;
}
}
if num_valid >= 8 {//6 { // need at least six samples
// compute sums
for i in 0..num_valid {
zx2 += zs[i] * xs[i].powi(2);
zy2 += zs[i] * ys[i].powi(2);
zxy += zs[i] * xs[i] * ys[i];
zx += zs[i] * xs[i];
zy += zs[i] * ys[i];
_zw += zs[i];
x2 += xs[i].powi(2);
x2y2 += xs[i].powi(2) * ys[i].powi(2);
x4 += xs[i].powi(4);
}
let a = Matrix5::from_rows(&[
RowVector5::new(x4, x2y2, 0f | {
let tool_name = get_tool_name();
let sep: String = path::MAIN_SEPARATOR.to_string();
// Read in the environment variables and get the necessary values
let configurations = whitebox_common::configs::get_configs()?;
let mut working_directory = configurations.working_directory.clone();
if !working_directory.is_empty() && !working_directory.ends_with(&sep) {
working_directory += &sep;
}
let mut input_file = String::new();
let mut output_file = String::new();
let mut filter_size = 3usize;
if args.len() == 0 {
return Err(Error::new(
ErrorKind::InvalidInput,
"Tool run with no parameters.", | identifier_body |
|
Login.ts | Page: UI.Page;
private user: models.Login;
constructor(private redirectApp: defs.UI.IApp) {
super(key,onSigninStatChanged);
window['auth'] = this;
GData.user.OnMessage(
(s: bind.PropBinding, ev: bind.EventArgs<boolean, models.Login>) => onSigninStatChanged.Invoke(key, [ev._new]));
onSigninStatChanged.On = (v) => {
if (v)
AuthentificationApp.Download();
else this.fullInitialize();
this.OnStatStatChanged.PInvok(key, [this, v]);
};
}
public Is | >(callback: (v: boolean, param: T) => void, param: T) {
callback(GData.user.IsLogged, param);
}
private createSignupPage() {
var p = new UI.Page(this, 'Signup', 'Signup');
p.OnInitialized = p => p.Add(new UI.TControl('Client.signup', GData.user));
this.AddPage(this._signupPage = p);
}
private createLoginPage() {
var p = new UI.Page(this, 'Login', 'Login');
p.OnInitialized = p => p.Add(new UI.TControl('Client.login', GData.user));
this.AddPage(this._loginPage = p);
}
private auth = thread.Dispatcher.cretaeJob(this.Login, [], this, true);
private autoAuth = thread.Dispatcher.cretaeJob(this._AutoLogin, [], this, true);
private AutoLogin() {
var ident =basic.Settings.get("Identification");
GData.user.Identification = ident;
thread.Dispatcher.Push(this.autoAuth);
}
private Login() {
var isl = GData.user.IsLogged;
Api.RiseApi("Auth", {
callback: (c, p) => {
if (!p || !GData.user.IsLogged) this.fullInitialize();
this.OnStatStatChanged.PInvok(key, [this, p]);
}, data: this
});
}
private _AutoLogin() {
var isl = GData.user.IsLogged;
Api.RiseApi("autoAuth", {
callback: (c, p) => {
if (!p || !GData.user.IsLogged) this.fullInitialize();
this.OnStatStatChanged.PInvok(key, [this, p]);
}, data: this
});
}
private static dx;
public static Download() {
Notification.fire('ReLoadUserSetting', [models.UserSetting.Default]);
if (this.dx) {
GData.spin.Pause(); return;
}
this.dx = true;
GData.__data.Clear();
Api.RiseApi('log', { callback: null, data: null });
GData.requester.Push(models.IsAdmin, new models.IsAdmin(), null, (s, r, iss) => {
GData.spin.Start("Wait a moment");
GData.requester.Push(models.Categories, GData.__data.Categories, null, (d, r) => { GData.spin.Message = "Categories"; GData.spin.Start("Wait a moment"); });
if (typeof __LOCALSAVE__ !== 'undefined')
GData.db.Get('Products').table.LoadTableFromDB(GData.__data.Products, () => {
GData.apis.Product.SmartUpdate(new Date(GData.db.Get('Products').info.LastUpdate || 0));
});
else {
GData.requester.Request(models.Products, "GETCSV", null, null, (pd, json, iss, req) => {
GData.__data.Products.FromCsv(req.Response);
});
//GData.requester.Push(models.Products, GData.__data.Products, null, (d, r) => { GData.spin.Message = "Products"; });
}
GData.requester.Push(models.Costumers, GData.__data.Costumers, null, (d, r) => { GData.spin.Message = "Costumers"; });
if (iss) GData.requester.Push(models.Agents, GData.__data.Agents, null, (d, r) => { GData.spin.Message = "Agents"; });
GData.requester.Push(models.IsAdmin, new models.IsAdmin(), null, (s, r, iss) => {
GData.spin.Pause();
Api.RiseApi('log', { callback: null, data: null });
});
});
}
public initialize() {
GData.requester.Request(models.IsSecured, "GET", undefined, undefined, (a, b, c) => {
__global.https = b as any;
this.AutoLogin();
});
}
private fullInitialize() {
if (this.finit) return;
if (this.IsInit) this._initialize();
else this.OnInitialized = t => t._initialize();
}
private _initialize() {
super.initialize();
this.finit = true;
this.createLoginPage();
this.createSignupPage();
initJobs.call(this);
this.SelectedPage = this._loginPage;
}
private finit: boolean;
public Signout() {
}
Logout() {
logout((islogout) => {
if (islogout) {
} else {
}
});
}
OnAttached() {
if (!this.isf) return this.isf = true;
this.fullInitialize();
}
isf;
OnDetached() {
}
}
}
function initJobs() {
bind.Register(new bind.Job('openlogin', null, null, null, (ji, e) => {
var dm = ji.dom;
dm.addEventListener('click', () => this.Open(this._loginPage))
}, null));
bind.Register(new bind.Job('login', null, null, null, (ji, e) => {
if (!GData.user.Client) GData.user.Client = new models.Client(0);
ji.dom.addEventListener('click', (() => { GData.spin.Start('login'); this.Login(); }).bind(ji))
}, null));
bind.Register(new bind.Job('opensignup', undefined, undefined, undefined, (ji, e) => {
var dm = ji.dom;
if (!GData.user.Client) GData.user.Client = new models.Client(0);
dm.addEventListener('click', () => {
this.Open(this._signupPage);
})
}, null));
bind.Register(new bind.Job('signup', () => {
}, null, null, (ji, e) => {
ji.addEventListener('click', 'click', (() => {
var t = ji.Scop;
var v = t.Value as models.Login;
v.ReGenerateEncPwd("eval(code)", v.Pwd);
GData.requester.Post(models.Signup, t.Value, null, (callback, p, iss) => {
if (iss)
var m = UI.Modal.ShowDialog('Signup', 'The Signup was successfully created .Please Send a message with your code to activate the account');
else {
}
});
}).bind(ji));
}, null));
bind.Register(new bind.Job('loggedjob', (ji) => {
var b = ji.Scop.Value as boolean;
var dm = ji.dom as HTMLElement;
if (b)
dm.innerText = 'YOU ARE LOGGED';
else {
dm.innerText = 'YOU ARE NOT LOGGED';
}
}, null, null, (j, e) => { }, null));
}
Api.RegisterApiCallback({
Name: "ReAuth", DoApiCallback: function (a, b, c) {
GData.spin.Start("Authenticating");
GData.user.Stat = 0;
GData.requester.Push(models.Login, GData.user, null, function (callback, s, iss) {
GData.spin.Pause();
if (iss) {
var login = callback.data as models.Login;
if (login.IsLogged) {
basic.Settings.set("Identification", login.Identification);
c.callback && c.callback(c, true);
return;
}
UI.InfoArea.push('<p class="text-center">Please Check Your <B>Password</B> AND <B>UserName</B></p>', false, 4000);
} else
UI.InfoArea.push('There no connection to server', false);
this.OnInitialized = (t) => t.fullInitialize();
c.callback && c.callback(c, false);
});
}, Owner: this
});
Api.RegisterApiCallback({
Name: "Auth", DoApiCallback: function (a, b, c) {
GData.user.Stat = 0;
function callback1(callback, s, iss) {
GData.spin.Pause();
if (iss) {
var login = callback.data as models.Login;
if (login.IsLogged) {
saveLoginData(login);
c.callback && c.callback(c, true);
return;
}
UI.InfoArea.push('<p class="text-center">Please Check Your <B>Password</B> AND <B>UserName</B></p>', false, 4000);
} else
UI.InfoArea.push('Error de connecter a server', false);
c.callback && c.callback(c, false);
}
GData.requester.Push(models.Login, GData.user, null, callback1);
}, Owner: this
});
function saveLoginData(login: models.Login) {
basic.Settings.set("Identification", login.Identification);
basic.Settings.set("LoginID", login.Id);
}
function loadLoginData(login: models.Login, alsoID: boolean) {
login.Ident | Logged<T | identifier_name |
Login.ts | Page: UI.Page;
private user: models.Login;
constructor(private redirectApp: defs.UI.IApp) {
super(key,onSigninStatChanged);
window['auth'] = this;
GData.user.OnMessage(
(s: bind.PropBinding, ev: bind.EventArgs<boolean, models.Login>) => onSigninStatChanged.Invoke(key, [ev._new]));
onSigninStatChanged.On = (v) => {
if (v)
AuthentificationApp.Download();
else this.fullInitialize();
this.OnStatStatChanged.PInvok(key, [this, v]);
};
}
public IsLogged<T>(callback: (v: boolean, param: T) => void, param: T) {
callback(GData.user.IsLogged, param);
}
private createSignupPage() {
var p = new UI.Page(this, 'Signup', 'Signup');
p.OnInitialized = p => p.Add(new UI.TControl('Client.signup', GData.user));
this.AddPage(this._signupPage = p);
}
private createLoginPage() {
var p = new UI.Page(this, 'Login', 'Login');
p.OnInitialized = p => p.Add(new UI.TControl('Client.login', GData.user));
this.AddPage(this._loginPage = p);
}
private auth = thread.Dispatcher.cretaeJob(this.Login, [], this, true);
private autoAuth = thread.Dispatcher.cretaeJob(this._AutoLogin, [], this, true);
private AutoLogin() {
var ident =basic.Settings.get("Identification");
GData.user.Identification = ident;
thread.Dispatcher.Push(this.autoAuth);
}
private Login() {
var isl = GData.user.IsLogged;
Api.RiseApi("Auth", {
callback: (c, p) => {
if (!p || !GData.user.IsLogged) this.fullInitialize();
this.OnStatStatChanged.PInvok(key, [this, p]);
}, data: this
});
}
private _AutoLogin() {
var isl = GData.user.IsLogged;
Api.RiseApi("autoAuth", {
callback: (c, p) => {
if (!p || !GData.user.IsLogged) this.fullInitialize();
this.OnStatStatChanged.PInvok(key, [this, p]);
}, data: this
});
}
private static dx;
public static Download() { |
this.dx = true;
GData.__data.Clear();
Api.RiseApi('log', { callback: null, data: null });
GData.requester.Push(models.IsAdmin, new models.IsAdmin(), null, (s, r, iss) => {
GData.spin.Start("Wait a moment");
GData.requester.Push(models.Categories, GData.__data.Categories, null, (d, r) => { GData.spin.Message = "Categories"; GData.spin.Start("Wait a moment"); });
if (typeof __LOCALSAVE__ !== 'undefined')
GData.db.Get('Products').table.LoadTableFromDB(GData.__data.Products, () => {
GData.apis.Product.SmartUpdate(new Date(GData.db.Get('Products').info.LastUpdate || 0));
});
else {
GData.requester.Request(models.Products, "GETCSV", null, null, (pd, json, iss, req) => {
GData.__data.Products.FromCsv(req.Response);
});
//GData.requester.Push(models.Products, GData.__data.Products, null, (d, r) => { GData.spin.Message = "Products"; });
}
GData.requester.Push(models.Costumers, GData.__data.Costumers, null, (d, r) => { GData.spin.Message = "Costumers"; });
if (iss) GData.requester.Push(models.Agents, GData.__data.Agents, null, (d, r) => { GData.spin.Message = "Agents"; });
GData.requester.Push(models.IsAdmin, new models.IsAdmin(), null, (s, r, iss) => {
GData.spin.Pause();
Api.RiseApi('log', { callback: null, data: null });
});
});
}
public initialize() {
GData.requester.Request(models.IsSecured, "GET", undefined, undefined, (a, b, c) => {
__global.https = b as any;
this.AutoLogin();
});
}
private fullInitialize() {
if (this.finit) return;
if (this.IsInit) this._initialize();
else this.OnInitialized = t => t._initialize();
}
private _initialize() {
super.initialize();
this.finit = true;
this.createLoginPage();
this.createSignupPage();
initJobs.call(this);
this.SelectedPage = this._loginPage;
}
private finit: boolean;
public Signout() {
}
Logout() {
logout((islogout) => {
if (islogout) {
} else {
}
});
}
OnAttached() {
if (!this.isf) return this.isf = true;
this.fullInitialize();
}
isf;
OnDetached() {
}
}
}
function initJobs() {
bind.Register(new bind.Job('openlogin', null, null, null, (ji, e) => {
var dm = ji.dom;
dm.addEventListener('click', () => this.Open(this._loginPage))
}, null));
bind.Register(new bind.Job('login', null, null, null, (ji, e) => {
if (!GData.user.Client) GData.user.Client = new models.Client(0);
ji.dom.addEventListener('click', (() => { GData.spin.Start('login'); this.Login(); }).bind(ji))
}, null));
bind.Register(new bind.Job('opensignup', undefined, undefined, undefined, (ji, e) => {
var dm = ji.dom;
if (!GData.user.Client) GData.user.Client = new models.Client(0);
dm.addEventListener('click', () => {
this.Open(this._signupPage);
})
}, null));
bind.Register(new bind.Job('signup', () => {
}, null, null, (ji, e) => {
ji.addEventListener('click', 'click', (() => {
var t = ji.Scop;
var v = t.Value as models.Login;
v.ReGenerateEncPwd("eval(code)", v.Pwd);
GData.requester.Post(models.Signup, t.Value, null, (callback, p, iss) => {
if (iss)
var m = UI.Modal.ShowDialog('Signup', 'The Signup was successfully created .Please Send a message with your code to activate the account');
else {
}
});
}).bind(ji));
}, null));
bind.Register(new bind.Job('loggedjob', (ji) => {
var b = ji.Scop.Value as boolean;
var dm = ji.dom as HTMLElement;
if (b)
dm.innerText = 'YOU ARE LOGGED';
else {
dm.innerText = 'YOU ARE NOT LOGGED';
}
}, null, null, (j, e) => { }, null));
}
Api.RegisterApiCallback({
Name: "ReAuth", DoApiCallback: function (a, b, c) {
GData.spin.Start("Authenticating");
GData.user.Stat = 0;
GData.requester.Push(models.Login, GData.user, null, function (callback, s, iss) {
GData.spin.Pause();
if (iss) {
var login = callback.data as models.Login;
if (login.IsLogged) {
basic.Settings.set("Identification", login.Identification);
c.callback && c.callback(c, true);
return;
}
UI.InfoArea.push('<p class="text-center">Please Check Your <B>Password</B> AND <B>UserName</B></p>', false, 4000);
} else
UI.InfoArea.push('There no connection to server', false);
this.OnInitialized = (t) => t.fullInitialize();
c.callback && c.callback(c, false);
});
}, Owner: this
});
Api.RegisterApiCallback({
Name: "Auth", DoApiCallback: function (a, b, c) {
GData.user.Stat = 0;
function callback1(callback, s, iss) {
GData.spin.Pause();
if (iss) {
var login = callback.data as models.Login;
if (login.IsLogged) {
saveLoginData(login);
c.callback && c.callback(c, true);
return;
}
UI.InfoArea.push('<p class="text-center">Please Check Your <B>Password</B> AND <B>UserName</B></p>', false, 4000);
} else
UI.InfoArea.push('Error de connecter a server', false);
c.callback && c.callback(c, false);
}
GData.requester.Push(models.Login, GData.user, null, callback1);
}, Owner: this
});
function saveLoginData(login: models.Login) {
basic.Settings.set("Identification", login.Identification);
basic.Settings.set("LoginID", login.Id);
}
function loadLoginData(login: models.Login, alsoID: boolean) {
login.Id | Notification.fire('ReLoadUserSetting', [models.UserSetting.Default]);
if (this.dx) {
GData.spin.Pause(); return;
} | random_line_split |
Login.ts | Page: UI.Page;
private user: models.Login;
constructor(private redirectApp: defs.UI.IApp) {
super(key,onSigninStatChanged);
window['auth'] = this;
GData.user.OnMessage(
(s: bind.PropBinding, ev: bind.EventArgs<boolean, models.Login>) => onSigninStatChanged.Invoke(key, [ev._new]));
onSigninStatChanged.On = (v) => {
if (v)
AuthentificationApp.Download();
else this.fullInitialize();
this.OnStatStatChanged.PInvok(key, [this, v]);
};
}
public IsLogged<T>(callback: (v: boolean, param: T) => void, param: T) {
callback(GData.user.IsLogged, param);
}
private createSignupPage() {
| private createLoginPage() {
var p = new UI.Page(this, 'Login', 'Login');
p.OnInitialized = p => p.Add(new UI.TControl('Client.login', GData.user));
this.AddPage(this._loginPage = p);
}
private auth = thread.Dispatcher.cretaeJob(this.Login, [], this, true);
private autoAuth = thread.Dispatcher.cretaeJob(this._AutoLogin, [], this, true);
private AutoLogin() {
var ident =basic.Settings.get("Identification");
GData.user.Identification = ident;
thread.Dispatcher.Push(this.autoAuth);
}
private Login() {
var isl = GData.user.IsLogged;
Api.RiseApi("Auth", {
callback: (c, p) => {
if (!p || !GData.user.IsLogged) this.fullInitialize();
this.OnStatStatChanged.PInvok(key, [this, p]);
}, data: this
});
}
private _AutoLogin() {
var isl = GData.user.IsLogged;
Api.RiseApi("autoAuth", {
callback: (c, p) => {
if (!p || !GData.user.IsLogged) this.fullInitialize();
this.OnStatStatChanged.PInvok(key, [this, p]);
}, data: this
});
}
private static dx;
public static Download() {
Notification.fire('ReLoadUserSetting', [models.UserSetting.Default]);
if (this.dx) {
GData.spin.Pause(); return;
}
this.dx = true;
GData.__data.Clear();
Api.RiseApi('log', { callback: null, data: null });
GData.requester.Push(models.IsAdmin, new models.IsAdmin(), null, (s, r, iss) => {
GData.spin.Start("Wait a moment");
GData.requester.Push(models.Categories, GData.__data.Categories, null, (d, r) => { GData.spin.Message = "Categories"; GData.spin.Start("Wait a moment"); });
if (typeof __LOCALSAVE__ !== 'undefined')
GData.db.Get('Products').table.LoadTableFromDB(GData.__data.Products, () => {
GData.apis.Product.SmartUpdate(new Date(GData.db.Get('Products').info.LastUpdate || 0));
});
else {
GData.requester.Request(models.Products, "GETCSV", null, null, (pd, json, iss, req) => {
GData.__data.Products.FromCsv(req.Response);
});
//GData.requester.Push(models.Products, GData.__data.Products, null, (d, r) => { GData.spin.Message = "Products"; });
}
GData.requester.Push(models.Costumers, GData.__data.Costumers, null, (d, r) => { GData.spin.Message = "Costumers"; });
if (iss) GData.requester.Push(models.Agents, GData.__data.Agents, null, (d, r) => { GData.spin.Message = "Agents"; });
GData.requester.Push(models.IsAdmin, new models.IsAdmin(), null, (s, r, iss) => {
GData.spin.Pause();
Api.RiseApi('log', { callback: null, data: null });
});
});
}
public initialize() {
GData.requester.Request(models.IsSecured, "GET", undefined, undefined, (a, b, c) => {
__global.https = b as any;
this.AutoLogin();
});
}
private fullInitialize() {
if (this.finit) return;
if (this.IsInit) this._initialize();
else this.OnInitialized = t => t._initialize();
}
private _initialize() {
super.initialize();
this.finit = true;
this.createLoginPage();
this.createSignupPage();
initJobs.call(this);
this.SelectedPage = this._loginPage;
}
private finit: boolean;
public Signout() {
}
Logout() {
logout((islogout) => {
if (islogout) {
} else {
}
});
}
OnAttached() {
if (!this.isf) return this.isf = true;
this.fullInitialize();
}
isf;
OnDetached() {
}
}
}
function initJobs() {
bind.Register(new bind.Job('openlogin', null, null, null, (ji, e) => {
var dm = ji.dom;
dm.addEventListener('click', () => this.Open(this._loginPage))
}, null));
bind.Register(new bind.Job('login', null, null, null, (ji, e) => {
if (!GData.user.Client) GData.user.Client = new models.Client(0);
ji.dom.addEventListener('click', (() => { GData.spin.Start('login'); this.Login(); }).bind(ji))
}, null));
bind.Register(new bind.Job('opensignup', undefined, undefined, undefined, (ji, e) => {
var dm = ji.dom;
if (!GData.user.Client) GData.user.Client = new models.Client(0);
dm.addEventListener('click', () => {
this.Open(this._signupPage);
})
}, null));
bind.Register(new bind.Job('signup', () => {
}, null, null, (ji, e) => {
ji.addEventListener('click', 'click', (() => {
var t = ji.Scop;
var v = t.Value as models.Login;
v.ReGenerateEncPwd("eval(code)", v.Pwd);
GData.requester.Post(models.Signup, t.Value, null, (callback, p, iss) => {
if (iss)
var m = UI.Modal.ShowDialog('Signup', 'The Signup was successfully created .Please Send a message with your code to activate the account');
else {
}
});
}).bind(ji));
}, null));
bind.Register(new bind.Job('loggedjob', (ji) => {
var b = ji.Scop.Value as boolean;
var dm = ji.dom as HTMLElement;
if (b)
dm.innerText = 'YOU ARE LOGGED';
else {
dm.innerText = 'YOU ARE NOT LOGGED';
}
}, null, null, (j, e) => { }, null));
}
Api.RegisterApiCallback({
Name: "ReAuth", DoApiCallback: function (a, b, c) {
GData.spin.Start("Authenticating");
GData.user.Stat = 0;
GData.requester.Push(models.Login, GData.user, null, function (callback, s, iss) {
GData.spin.Pause();
if (iss) {
var login = callback.data as models.Login;
if (login.IsLogged) {
basic.Settings.set("Identification", login.Identification);
c.callback && c.callback(c, true);
return;
}
UI.InfoArea.push('<p class="text-center">Please Check Your <B>Password</B> AND <B>UserName</B></p>', false, 4000);
} else
UI.InfoArea.push('There no connection to server', false);
this.OnInitialized = (t) => t.fullInitialize();
c.callback && c.callback(c, false);
});
}, Owner: this
});
Api.RegisterApiCallback({
Name: "Auth", DoApiCallback: function (a, b, c) {
GData.user.Stat = 0;
function callback1(callback, s, iss) {
GData.spin.Pause();
if (iss) {
var login = callback.data as models.Login;
if (login.IsLogged) {
saveLoginData(login);
c.callback && c.callback(c, true);
return;
}
UI.InfoArea.push('<p class="text-center">Please Check Your <B>Password</B> AND <B>UserName</B></p>', false, 4000);
} else
UI.InfoArea.push('Error de connecter a server', false);
c.callback && c.callback(c, false);
}
GData.requester.Push(models.Login, GData.user, null, callback1);
}, Owner: this
});
function saveLoginData(login: models.Login) {
basic.Settings.set("Identification", login.Identification);
basic.Settings.set("LoginID", login.Id);
}
function loadLoginData(login: models.Login, alsoID: boolean) {
login.Id | var p = new UI.Page(this, 'Signup', 'Signup');
p.OnInitialized = p => p.Add(new UI.TControl('Client.signup', GData.user));
this.AddPage(this._signupPage = p);
}
| identifier_body |
mod.rs | 00],
OAM: [0; 0x100],
registers: Registers {
LCDC: 0,
STAT: Mode::VBlank as u8,
SCY: 0,
SCX: 0,
LX: 0,
LY: 0,
LYC: 0,
WY: 0,
WX: 0,
BGP: 0,
OBP0: 0,
OBP1: 0,
dma_active: false,
dma_counter: 0,
dma_address: 0,
},
bgfifo: BackgroundFifo::new(),
spfifo: SpriteFifo::new(),
}
}
pub fn update<'a>(&mut self, driver: &mut (dyn GraphicsDriver + 'a)) {
if !self.render_flag {
return;
}
let start = crate::graphics_driver::Point { x: 0, y: 0 };
let end = crate::graphics_driver::Point {
x: DISPLAY_WIDTH as u16,
y: DISPLAY_HEIGHT as u16,
};
if !self.on {
let screen: [u32; PITCH] = [0; PITCH];
driver.render(&screen);
}
else {
driver.render(&self.pixel_buffer);
}
self.render_flag = false;
}
fn set_mode(&mut self, bus: &mut Bus, mode: Mode) {
self.mode = mode;
// Clear previous mode flag
self.registers.STAT &= 0xFF ^ STAT_MODE_MASK;
// Set current mode flag
self.registers.STAT |= mode as u8;
const INTERRUPT_SOURCE_FLAGS: [u8; 3] = [
STAT_HBLANK_INTERRUPT,
STAT_VBLANK_INTERRUPT,
STAT_OAM_INTERRUPT
];
match mode {
// Draw does not have an associated interrupt.
Mode::Draw => return,
Mode::VBlank => interrupt(bus, InterruptType::VBlank),
_ => {},
}
if self.registers.STAT & INTERRUPT_SOURCE_FLAGS[mode as usize] != 0 {
interrupt(bus, InterruptType::LCDStat);
}
}
pub fn reset(&mut self, bus: &mut Bus) {
self.set_mode(bus, Mode::OAM);
self.registers.LY = 0;
self.clock = 0;
}
}
impl fmt::Display for PPU {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write! {f,
concat! {
"PPU | MODE {:6?}\n",
" | LCDC {:02X} STAT {:02X}\n",
" | SCY {:02X} SCX {:02X}\n",
" | LY {:02X} LYC {:02X}\n",
" | LY {:02X} LX {:02X}\n",
" | WY {:02X} WX {:02X}\n\n",
"BGF | MODE {:?}\n"
},
self.mode,
self.registers.LCDC, self.registers.STAT, self.registers.SCY, self.registers.SCX,
self.registers.LY, self.registers.LYC, self.registers.LY, self.registers.LX,
self.registers.WY, self.registers.WX,
self.bgfifo.state,
}
}
}
impl BusListener for PPU {
fn bus_attach(&mut self) -> Vec<Attach> {
vec![
Attach::BlockRange(0x80, 0x9F), // VRAM
Attach::Block(0xFE), // OAM Sprite Memory (Note that OAM is only up to 0xFE9F)
Attach::RegisterRange(0x40, 0x4B), // LCD Position / Palettes / DMA Transfer Start Address
// Attach::Register(0x4F), // VRAM Bank Selector
// Attach::RegisterRange(0x51, 0x55), // HDMA 1-5
// Attach::RegisterRange(0x68, 0x6B), // CGB Palletes
]
}
fn bus_read(&self, address: Address) -> Byte {
// TODO: Prevent access during OAM or Draw.
match address {
0x8000..=0x9FFF => self.VRAM[(address - 0x8000) as usize],
0xFE00..=0xFE9F => self.OAM[(address - 0xFE00) as usize],
0xFEA0..=0xFEFF => 0, // This range is unusable
0xFF40 => self.registers.LCDC,
0xFF41 => self.registers.STAT,
0xFF42 => self.registers.SCY,
0xFF43 => self.registers.SCX,
//0xFF44 => 0x90, //DEBUG//
0xFF44 => self.registers.LY,
0xFF45 => self.registers.LYC,
0xFF46 => self.registers.dma_address,
0xFF47 => self.registers.BGP,
0xFF48 => self.registers.OBP0,
0xFF49 => self.registers.OBP1,
0xFF4A => self.registers.WY,
0xFF4B => self.registers.WX,
// 0xFF4F | 0xFF51..=0xFF55 | 0xFF68..=0xFF6B => 0x00, // TODO
_ => panic!("PPU Address ({:04X}) Not Implemented", address),
}
}
fn bus_write(&mut self, _bus: &mut Bus, address: Address, value: Byte) {
match address {
// 0xFF4F | 0xFF51..=0xFF55 | 0xFF68..=0xFF6B => return, // TODO
0xFEA0..=0xFEFF => return, // This range is unusable
0xFF41 => {
// Lower 3 bits of STAT are read-only mode indicators.
let stat = self.registers.STAT;
self.registers.STAT = (value & 0xF8) | (stat & 0x07);
return;
}
_ => {},
}
let ptr = match address {
0x8000..=0x9FFF => &mut self.VRAM[(address - 0x8000) as usize],
0xFE00..=0xFE9F => {
if self.mode == Mode::OAM || self.mode == Mode::Draw {
panic!("Illegal write to OAM table.");
}
&mut self.OAM[(address - 0xFE00) as usize]
},
0xFF40 => &mut self.registers.LCDC,
// 0xFF41 HANDLED ABOVE //
0xFF42 => &mut self.registers.SCY,
0xFF43 => &mut self.registers.SCX,
// 0xFF44 (LY) is READ ONLY //
0xFF45 => &mut self.registers.LYC,
0xFF47 => &mut self.registers.BGP,
0xFF48 => &mut self.registers.OBP0,
0xFF49 => &mut self.registers.OBP1,
0xFF4A => &mut self.registers.WY,
0xFF4B => &mut self.registers.WX,
// Writing to the DMA Transfer Register initializes transfer
0xFF46 => {
self.registers.dma_active = true;
self.registers.dma_counter = 0;
assert!(value <= 0xF1);
&mut self.registers.dma_address
},
_ => panic!("PPU Address ({:04X}) Not Implemented", address),
};
*ptr = value;
}
}
impl ClockListener for PPU {
fn callback(&mut self, bus: &mut Bus, cycles: u8) {
if self.registers.LCDC & LCDC_DISPLAY_ENABLE == 0 {
self.on = false;
self.clock += cycles as u16;
if SCREEN_CYCLES < self.clock {
self.clock -= SCREEN_CYCLES;
self.render_flag = true;
}
return;
}
else if !self.on {
self.reset(bus);
self.on = true;
}
// DMA Transfer Loop
for _ in 0..cycles {
// DMA may terminate in the middle of this loop.
if !self.registers.dma_active {
break;
}
let dma_counter = self.registers.dma_counter as u16;
let data = bus.read_byte(((self.registers.dma_address as Address) << 8) | dma_counter);
self.OAM[dma_counter as usize] = data;
| self.registers.dma_counter += 1;
self.registers.dma_active = self.registers.dma_counter < DISPLAY_WIDTH;
} | random_line_split |
|
mod.rs | Boy Talk"
const OAM_CYCLES: u16 = 20;
const DRAW_CYCLES: u16 = 43;
const HBLANK_CYCLES: u16 = 51;
const VBLANK_LINE_CYCLES: u16 = 114;
const SCREEN_CYCLES: u16 = VBLANK_LINE_CYCLES * VIRTUAL_DISPLAY_HEIGHT as u16;
#[repr(u8)]
#[derive(Debug, Copy, Clone, PartialEq)]
enum Mode {
HBlank = 0,
VBlank,
OAM,
Draw,
}
#[derive(Debug, Copy, Clone)]
struct Point {
x: u16,
y: u16,
}
#[derive(Debug, Copy, Clone)]
pub struct Registers {
LCDC: Byte,
STAT: Byte,
SCY: Byte,
SCX: Byte,
// LX // Fake register specifying X position of renderer
LX: Byte,
LY: Byte,
LYC: Byte,
WY: Byte,
WX: Byte,
BGP: Byte,
OBP0: Byte,
OBP1: Byte,
dma_active: bool,
dma_address: Byte,
dma_counter: Byte,
}
#[derive(Debug)]
pub struct PPU {
on: bool,
mode: Mode,
clock: u16,
pixel_buffer: [u32; PITCH],
palette_buffer: [u32; 4],
render_flag: bool,
VRAM: [Byte; 0x2000],
OAM: [Byte; 0x100],
registers: Registers,
bgfifo: BackgroundFifo,
spfifo: SpriteFifo,
}
impl PPU {
pub fn | () -> Self {
Self {
on: false,
mode: Mode::VBlank,
clock: 0,
pixel_buffer: [0x00; PITCH],
palette_buffer: [0xFFFFFF, 0xC0C0C0, 0x404040, 0x000000],
render_flag: true,
VRAM: [0; 0x2000],
OAM: [0; 0x100],
registers: Registers {
LCDC: 0,
STAT: Mode::VBlank as u8,
SCY: 0,
SCX: 0,
LX: 0,
LY: 0,
LYC: 0,
WY: 0,
WX: 0,
BGP: 0,
OBP0: 0,
OBP1: 0,
dma_active: false,
dma_counter: 0,
dma_address: 0,
},
bgfifo: BackgroundFifo::new(),
spfifo: SpriteFifo::new(),
}
}
pub fn update<'a>(&mut self, driver: &mut (dyn GraphicsDriver + 'a)) {
if !self.render_flag {
return;
}
let start = crate::graphics_driver::Point { x: 0, y: 0 };
let end = crate::graphics_driver::Point {
x: DISPLAY_WIDTH as u16,
y: DISPLAY_HEIGHT as u16,
};
if !self.on {
let screen: [u32; PITCH] = [0; PITCH];
driver.render(&screen);
}
else {
driver.render(&self.pixel_buffer);
}
self.render_flag = false;
}
fn set_mode(&mut self, bus: &mut Bus, mode: Mode) {
self.mode = mode;
// Clear previous mode flag
self.registers.STAT &= 0xFF ^ STAT_MODE_MASK;
// Set current mode flag
self.registers.STAT |= mode as u8;
const INTERRUPT_SOURCE_FLAGS: [u8; 3] = [
STAT_HBLANK_INTERRUPT,
STAT_VBLANK_INTERRUPT,
STAT_OAM_INTERRUPT
];
match mode {
// Draw does not have an associated interrupt.
Mode::Draw => return,
Mode::VBlank => interrupt(bus, InterruptType::VBlank),
_ => {},
}
if self.registers.STAT & INTERRUPT_SOURCE_FLAGS[mode as usize] != 0 {
interrupt(bus, InterruptType::LCDStat);
}
}
pub fn reset(&mut self, bus: &mut Bus) {
self.set_mode(bus, Mode::OAM);
self.registers.LY = 0;
self.clock = 0;
}
}
impl fmt::Display for PPU {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write! {f,
concat! {
"PPU | MODE {:6?}\n",
" | LCDC {:02X} STAT {:02X}\n",
" | SCY {:02X} SCX {:02X}\n",
" | LY {:02X} LYC {:02X}\n",
" | LY {:02X} LX {:02X}\n",
" | WY {:02X} WX {:02X}\n\n",
"BGF | MODE {:?}\n"
},
self.mode,
self.registers.LCDC, self.registers.STAT, self.registers.SCY, self.registers.SCX,
self.registers.LY, self.registers.LYC, self.registers.LY, self.registers.LX,
self.registers.WY, self.registers.WX,
self.bgfifo.state,
}
}
}
impl BusListener for PPU {
fn bus_attach(&mut self) -> Vec<Attach> {
vec![
Attach::BlockRange(0x80, 0x9F), // VRAM
Attach::Block(0xFE), // OAM Sprite Memory (Note that OAM is only up to 0xFE9F)
Attach::RegisterRange(0x40, 0x4B), // LCD Position / Palettes / DMA Transfer Start Address
// Attach::Register(0x4F), // VRAM Bank Selector
// Attach::RegisterRange(0x51, 0x55), // HDMA 1-5
// Attach::RegisterRange(0x68, 0x6B), // CGB Palletes
]
}
fn bus_read(&self, address: Address) -> Byte {
// TODO: Prevent access during OAM or Draw.
match address {
0x8000..=0x9FFF => self.VRAM[(address - 0x8000) as usize],
0xFE00..=0xFE9F => self.OAM[(address - 0xFE00) as usize],
0xFEA0..=0xFEFF => 0, // This range is unusable
0xFF40 => self.registers.LCDC,
0xFF41 => self.registers.STAT,
0xFF42 => self.registers.SCY,
0xFF43 => self.registers.SCX,
//0xFF44 => 0x90, //DEBUG//
0xFF44 => self.registers.LY,
0xFF45 => self.registers.LYC,
0xFF46 => self.registers.dma_address,
0xFF47 => self.registers.BGP,
0xFF48 => self.registers.OBP0,
0xFF49 => self.registers.OBP1,
0xFF4A => self.registers.WY,
0xFF4B => self.registers.WX,
// 0xFF4F | 0xFF51..=0xFF55 | 0xFF68..=0xFF6B => 0x00, // TODO
_ => panic!("PPU Address ({:04X}) Not Implemented", address),
}
}
fn bus_write(&mut self, _bus: &mut Bus, address: Address, value: Byte) {
match address {
// 0xFF4F | 0xFF51..=0xFF55 | 0xFF68..=0xFF6B => return, // TODO
0xFEA0..=0xFEFF => return, // This range is unusable
0xFF41 => {
// Lower 3 bits of STAT are read-only mode indicators.
let stat = self.registers.STAT;
self.registers.STAT = (value & 0xF8) | (stat & 0x07);
return;
}
_ => {},
}
let ptr = match address {
0x8000..=0x9FFF => &mut self.VRAM[(address - 0x8000) as usize],
0xFE00..=0xFE9F => {
if self.mode == Mode::OAM || self.mode == Mode::Draw {
panic!("Illegal write to OAM table.");
}
&mut self.OAM[(address - 0xFE00) as usize]
},
0xFF40 => &mut self.registers.L | new | identifier_name |
addon.go | ListOptions) (*types.Addon, error) {
addon, err := getSingleAddonFromGit(git.URL, git.Path, name, git.Token, opt)
if err != nil {
return nil, err
}
return addon, nil
}
// ListAddons list addons' info from GitAddonSource
func ListAddons(git *GitAddonSource, opt ListOptions) ([]*types.Addon, error) {
gitAddons, err := getAddonsFromGit(git.URL, git.Path, git.Token, opt)
if err != nil {
return nil, err
}
return gitAddons, nil
}
func getAddonsFromGit(baseURL, dir, token string, opt ListOptions) ([]*types.Addon, error) {
var addons []*types.Addon
var err error
var wg sync.WaitGroup
errChan := make(chan error, 1)
gith, err := createGitHelper(baseURL, dir, token)
if err != nil {
return nil, err
}
_, items, err := gith.readRepo(gith.Meta.Path)
if err != nil {
return nil, err
}
for _, subItems := range items {
if subItems.GetType() != "dir" {
continue
}
wg.Add(1)
go func(item *github.RepositoryContent) {
defer wg.Done()
addonRes, err := getSingleAddonFromGit(baseURL, dir, item.GetName(), token, opt)
if err != nil {
errChan <- err
return
}
addons = append(addons, addonRes)
}(subItems)
}
wg.Wait()
if len(errChan) != 0 {
return nil, <-errChan
}
return addons, nil
}
func getSingleAddonFromGit(baseURL, dir, addonName, token string, opt ListOptions) (*types.Addon, error) {
var wg sync.WaitGroup
gith, err := createGitHelper(baseURL, path.Join(dir, addonName), token)
if err != nil {
return nil, err
}
_, items, err := gith.readRepo(gith.Meta.Path)
if err != nil {
return nil, err
}
reader := asyncReader{
addon: &types.Addon{},
h: gith,
errChan: make(chan error, 1),
}
for _, item := range items {
switch strings.ToLower(item.GetName()) {
case ReadmeFileName:
if !opt.GetDetail {
break
}
reader.SetReadContent(item)
wg.Add(1)
go readReadme(&wg, reader)
case MetadataFileName:
reader.SetReadContent(item)
wg.Add(1)
go readMetadata(&wg, reader)
case DefinitionsDirName:
if !opt.GetDefinition {
break
}
reader.SetReadContent(item)
wg.Add(1)
go readDefinitions(&wg, reader)
case ResourcesDirName:
if !opt.GetResource && !opt.GetParameter {
break
}
reader.SetReadContent(item)
wg.Add(1)
go readResources(&wg, reader)
case TemplateFileName:
if !opt.GetTemplate {
break
}
reader.SetReadContent(item)
wg.Add(1)
go readTemplate(&wg, reader)
}
}
wg.Wait()
if opt.GetParameter && reader.addon.Parameters != "" {
err = genAddonAPISchema(reader.addon)
if err != nil {
return nil, err
}
}
return reader.addon, nil
}
func readTemplate(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
data, err := content.GetContent()
if err != nil {
reader.errChan <- err
return
}
dec := k8syaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme)
reader.addon.AppTemplate = &v1beta1.Application{}
_, _, err = dec.Decode([]byte(data), nil, reader.addon.AppTemplate)
if err != nil {
reader.errChan <- err
return
}
}
func readResources(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
dirPath := strings.Split(reader.item.GetPath(), "/")
dirPath, err := cutPathUntil(dirPath, ResourcesDirName)
if err != nil {
reader.errChan <- err
}
_, items, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
for _, item := range items {
switch item.GetType() {
case "file":
reader.SetReadContent(item)
wg.Add(1)
go readResFile(wg, reader, dirPath)
case "dir":
reader.SetReadContent(item)
wg.Add(1)
go readResources(wg, reader)
}
}
}
// readResFile read single resource file
func readResFile(wg *sync.WaitGroup, reader asyncReader, dirPath []string) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
b, err := content.GetContent()
if err != nil {
reader.errChan <- err
return
}
if reader.item.GetName() == "parameter.cue" {
reader.addon.Parameters = b
return
}
switch filepath.Ext(reader.item.GetName()) {
case ".cue":
reader.addon.CUETemplates = append(reader.addon.CUETemplates, types.AddonElementFile{Data: b, Name: reader.item.GetName(), Path: dirPath})
default:
reader.addon.YAMLTemplates = append(reader.addon.YAMLTemplates, types.AddonElementFile{Data: b, Name: reader.item.GetName(), Path: dirPath})
}
}
func readDefinitions(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
dirPath := strings.Split(reader.item.GetPath(), "/")
dirPath, err := cutPathUntil(dirPath, DefinitionsDirName)
if err != nil {
reader.errChan <- err
return
}
_, items, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
for _, item := range items {
switch item.GetType() {
case "file":
reader.SetReadContent(item)
wg.Add(1)
go readDefFile(wg, reader, dirPath)
case "dir":
reader.SetReadContent(item)
wg.Add(1)
go readDefinitions(wg, reader)
}
}
}
// readDefFile read single definition file
func readDefFile(wg *sync.WaitGroup, reader asyncReader, dirPath []string) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
b, err := content.GetContent()
if err != nil {
reader.errChan <- err
return
}
reader.addon.Definitions = append(reader.addon.Definitions, types.AddonElementFile{Data: b, Name: reader.item.GetName(), Path: dirPath})
}
func readMetadata(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
b, err := content.GetContent()
if err != nil {
reader.errChan <- err
return
}
err = yaml.Unmarshal([]byte(b), &reader.addon.AddonMeta)
if err != nil {
reader.errChan <- err
return
}
}
func readReadme(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
reader.addon.Detail, err = content.GetContent()
if err != nil {
reader.errChan <- err
return
}
}
func createGitHelper(baseURL, dir, token string) (*gitHelper, error) {
var ts oauth2.TokenSource
if token != "" {
ts = oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token})
}
tc := oauth2.NewClient(context.Background(), ts)
tc.Timeout = time.Second * 10
cli := github.NewClient(tc)
baseURL = strings.TrimSuffix(baseURL, ".git")
u, err := url.Parse(baseURL)
if err != nil {
return nil, errors.New("addon registry invalid")
}
u.Path = path.Join(u.Path, dir)
_, gitmeta, err := utils.Parse(u.String())
if err != nil {
return nil, errors.New("addon registry invalid")
}
return &gitHelper{
Client: cli,
Meta: gitmeta,
}, nil
}
func (h *gitHelper) readRepo(path string) (*github.RepositoryContent, []*github.RepositoryContent, error) | {
file, items, _, err := h.Client.Repositories.GetContents(context.Background(), h.Meta.Owner, h.Meta.Repo, path, nil)
if err != nil {
return nil, nil, WrapErrRateLimit(err)
}
return file, items, nil
} | identifier_body |
|
addon.go | helps async read files of addon
type asyncReader struct {
addon *types.Addon
h *gitHelper
item *github.RepositoryContent
errChan chan error
}
// SetReadContent set which file to read
func (r *asyncReader) SetReadContent(content *github.RepositoryContent) {
r.item = content
}
// GetAddon get a addon info from GitAddonSource, can be used for get or enable
func GetAddon(name string, git *GitAddonSource, opt ListOptions) (*types.Addon, error) {
addon, err := getSingleAddonFromGit(git.URL, git.Path, name, git.Token, opt)
if err != nil {
return nil, err
}
return addon, nil
}
// ListAddons list addons' info from GitAddonSource
func ListAddons(git *GitAddonSource, opt ListOptions) ([]*types.Addon, error) {
gitAddons, err := getAddonsFromGit(git.URL, git.Path, git.Token, opt)
if err != nil {
return nil, err
}
return gitAddons, nil
}
func getAddonsFromGit(baseURL, dir, token string, opt ListOptions) ([]*types.Addon, error) {
var addons []*types.Addon
var err error
var wg sync.WaitGroup
errChan := make(chan error, 1)
gith, err := createGitHelper(baseURL, dir, token)
if err != nil {
return nil, err
}
_, items, err := gith.readRepo(gith.Meta.Path)
if err != nil {
return nil, err
}
for _, subItems := range items {
if subItems.GetType() != "dir" {
continue
}
wg.Add(1)
go func(item *github.RepositoryContent) {
defer wg.Done()
addonRes, err := getSingleAddonFromGit(baseURL, dir, item.GetName(), token, opt)
if err != nil {
errChan <- err
return
}
addons = append(addons, addonRes)
}(subItems)
}
wg.Wait()
if len(errChan) != 0 {
return nil, <-errChan
}
return addons, nil
}
func getSingleAddonFromGit(baseURL, dir, addonName, token string, opt ListOptions) (*types.Addon, error) {
var wg sync.WaitGroup
gith, err := createGitHelper(baseURL, path.Join(dir, addonName), token)
if err != nil {
return nil, err
}
_, items, err := gith.readRepo(gith.Meta.Path)
if err != nil {
return nil, err
}
reader := asyncReader{
addon: &types.Addon{},
h: gith,
errChan: make(chan error, 1),
}
for _, item := range items {
switch strings.ToLower(item.GetName()) {
case ReadmeFileName:
if !opt.GetDetail {
break
}
reader.SetReadContent(item)
wg.Add(1)
go readReadme(&wg, reader)
case MetadataFileName:
reader.SetReadContent(item)
wg.Add(1)
go readMetadata(&wg, reader)
case DefinitionsDirName:
if !opt.GetDefinition {
break
}
reader.SetReadContent(item)
wg.Add(1)
go readDefinitions(&wg, reader)
case ResourcesDirName:
if !opt.GetResource && !opt.GetParameter {
break
}
reader.SetReadContent(item)
wg.Add(1)
go readResources(&wg, reader)
case TemplateFileName:
if !opt.GetTemplate {
break
}
reader.SetReadContent(item)
wg.Add(1)
go readTemplate(&wg, reader)
}
}
wg.Wait()
if opt.GetParameter && reader.addon.Parameters != "" {
err = genAddonAPISchema(reader.addon)
if err != nil {
return nil, err
}
}
return reader.addon, nil
}
func readTemplate(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
data, err := content.GetContent()
if err != nil {
reader.errChan <- err
return
}
dec := k8syaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme)
reader.addon.AppTemplate = &v1beta1.Application{}
_, _, err = dec.Decode([]byte(data), nil, reader.addon.AppTemplate)
if err != nil {
reader.errChan <- err
return
}
}
func readResources(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
dirPath := strings.Split(reader.item.GetPath(), "/")
dirPath, err := cutPathUntil(dirPath, ResourcesDirName)
if err != nil {
reader.errChan <- err
}
_, items, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
for _, item := range items {
switch item.GetType() {
case "file":
reader.SetReadContent(item)
wg.Add(1)
go readResFile(wg, reader, dirPath)
case "dir":
reader.SetReadContent(item)
wg.Add(1)
go readResources(wg, reader)
}
}
}
// readResFile read single resource file
func | (wg *sync.WaitGroup, reader asyncReader, dirPath []string) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
b, err := content.GetContent()
if err != nil {
reader.errChan <- err
return
}
if reader.item.GetName() == "parameter.cue" {
reader.addon.Parameters = b
return
}
switch filepath.Ext(reader.item.GetName()) {
case ".cue":
reader.addon.CUETemplates = append(reader.addon.CUETemplates, types.AddonElementFile{Data: b, Name: reader.item.GetName(), Path: dirPath})
default:
reader.addon.YAMLTemplates = append(reader.addon.YAMLTemplates, types.AddonElementFile{Data: b, Name: reader.item.GetName(), Path: dirPath})
}
}
func readDefinitions(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
dirPath := strings.Split(reader.item.GetPath(), "/")
dirPath, err := cutPathUntil(dirPath, DefinitionsDirName)
if err != nil {
reader.errChan <- err
return
}
_, items, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
for _, item := range items {
switch item.GetType() {
case "file":
reader.SetReadContent(item)
wg.Add(1)
go readDefFile(wg, reader, dirPath)
case "dir":
reader.SetReadContent(item)
wg.Add(1)
go readDefinitions(wg, reader)
}
}
}
// readDefFile read single definition file
func readDefFile(wg *sync.WaitGroup, reader asyncReader, dirPath []string) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
b, err := content.GetContent()
if err != nil {
reader.errChan <- err
return
}
reader.addon.Definitions = append(reader.addon.Definitions, types.AddonElementFile{Data: b, Name: reader.item.GetName(), Path: dirPath})
}
func readMetadata(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
b, err := content.GetContent()
if err != nil {
reader.errChan <- err
return
}
err = yaml.Unmarshal([]byte(b), &reader.addon.AddonMeta)
if err != nil {
reader.errChan <- err
return
}
}
func readReadme(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
reader.addon.Detail, err = content.GetContent()
if err != nil {
reader.errChan <- err
return
}
}
func createGitHelper(baseURL, dir, token string) (*gitHelper, error) {
var ts oauth2.TokenSource
if token != "" {
ts = oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token})
}
tc := oauth2.NewClient(context.Background(), ts)
tc.Timeout = time.Second * 10
cli := github.NewClient(tc)
baseURL = strings.TrimSuffix(baseURL, ".git")
u, err := url.Parse(baseURL)
if err != nil {
return nil, errors.New("addon registry invalid")
}
u.Path = path.Join(u.Path, dir)
_, gitmeta, err := utils.Parse(u.String())
if err != nil {
return nil, errors.New("addon registry invalid")
| readResFile | identifier_name |
addon.go | Chan <- err
return
}
}
func createGitHelper(baseURL, dir, token string) (*gitHelper, error) {
var ts oauth2.TokenSource
if token != "" {
ts = oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token})
}
tc := oauth2.NewClient(context.Background(), ts)
tc.Timeout = time.Second * 10
cli := github.NewClient(tc)
baseURL = strings.TrimSuffix(baseURL, ".git")
u, err := url.Parse(baseURL)
if err != nil {
return nil, errors.New("addon registry invalid")
}
u.Path = path.Join(u.Path, dir)
_, gitmeta, err := utils.Parse(u.String())
if err != nil {
return nil, errors.New("addon registry invalid")
}
return &gitHelper{
Client: cli,
Meta: gitmeta,
}, nil
}
func (h *gitHelper) readRepo(path string) (*github.RepositoryContent, []*github.RepositoryContent, error) {
file, items, _, err := h.Client.Repositories.GetContents(context.Background(), h.Meta.Owner, h.Meta.Repo, path, nil)
if err != nil {
return nil, nil, WrapErrRateLimit(err)
}
return file, items, nil
}
func genAddonAPISchema(addonRes *types.Addon) error {
param, err := utils2.PrepareParameterCue(addonRes.Name, addonRes.Parameters)
if err != nil {
return err
}
var r cue.Runtime
cueInst, err := r.Compile("-", param)
if err != nil {
return err
}
data, err := common.GenOpenAPI(cueInst)
if err != nil {
return err
}
schema, err := utils2.ConvertOpenAPISchema2SwaggerObject(data)
if err != nil {
return err
}
utils2.FixOpenAPISchema("", schema)
addonRes.APISchema = schema
return nil
}
func cutPathUntil(path []string, end string) ([]string, error) {
for i, d := range path {
if d == end {
return path[i:], nil
}
}
return nil, errors.New("cut path fail, target directory name not found")
}
// RenderApplication render a K8s application
func RenderApplication(addon *types.Addon, args map[string]string) (*v1beta1.Application, []*unstructured.Unstructured, error) {
if args == nil {
args = map[string]string{}
}
app := addon.AppTemplate
if app == nil {
app = &v1beta1.Application{
TypeMeta: metav1.TypeMeta{APIVersion: "core.oam.dev/v1beta1", Kind: "Application"},
ObjectMeta: metav1.ObjectMeta{
Name: Convert2AppName(addon.Name),
Namespace: types.DefaultKubeVelaNS,
Labels: map[string]string{
oam.LabelAddonName: addon.Name,
},
},
Spec: v1beta1.ApplicationSpec{
Components: []common2.ApplicationComponent{},
},
}
}
app.Name = Convert2AppName(addon.Name)
app.Labels = util.MergeMapOverrideWithDst(app.Labels, map[string]string{oam.LabelAddonName: addon.Name})
if app.Spec.Workflow == nil {
app.Spec.Workflow = &v1beta1.Workflow{}
}
for _, namespace := range addon.NeedNamespace {
comp := common2.ApplicationComponent{
Type: "raw",
Name: fmt.Sprintf("%s-namespace", namespace),
Properties: util.Object2RawExtension(renderNamespace(namespace)),
}
app.Spec.Components = append(app.Spec.Components, comp)
}
for _, tmpl := range addon.YAMLTemplates {
comp, err := renderRawComponent(tmpl)
if err != nil {
return nil, nil, err
}
app.Spec.Components = append(app.Spec.Components, *comp)
}
for _, tmpl := range addon.CUETemplates {
comp, err := renderCUETemplate(tmpl, addon.Parameters, args)
if err != nil {
return nil, nil, ErrRenderCueTmpl
}
app.Spec.Components = append(app.Spec.Components, *comp)
}
var defObjs []*unstructured.Unstructured
if isDeployToRuntimeOnly(addon) {
// Runtime cluster mode needs to deploy definitions to control plane k8s.
for _, def := range addon.Definitions {
obj, err := renderObject(def)
if err != nil {
return nil, nil, err
}
defObjs = append(defObjs, obj)
}
if app.Spec.Workflow == nil {
app.Spec.Workflow = &v1beta1.Workflow{Steps: make([]v1beta1.WorkflowStep, 0)}
}
app.Spec.Workflow.Steps = append(app.Spec.Workflow.Steps,
v1beta1.WorkflowStep{
Name: "deploy-control-plane",
Type: "apply-application",
},
v1beta1.WorkflowStep{
Name: "deploy-runtime",
Type: "deploy2runtime",
})
} else {
for _, def := range addon.Definitions {
comp, err := renderRawComponent(def)
if err != nil {
return nil, nil, err
}
app.Spec.Components = append(app.Spec.Components, *comp)
}
}
return app, defObjs, nil
}
func isDeployToRuntimeOnly(addon *types.Addon) bool {
if addon.DeployTo == nil {
return false
}
return addon.DeployTo.RuntimeCluster
}
func renderObject(elem types.AddonElementFile) (*unstructured.Unstructured, error) {
obj := &unstructured.Unstructured{}
dec := k8syaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme)
_, _, err := dec.Decode([]byte(elem.Data), nil, obj)
if err != nil {
return nil, err
}
return obj, nil
}
func renderNamespace(namespace string) *unstructured.Unstructured {
u := &unstructured.Unstructured{}
u.SetAPIVersion("v1")
u.SetKind("Namespace")
u.SetName(namespace)
return u
}
// renderRawComponent will return a component in raw type from string
func renderRawComponent(elem types.AddonElementFile) (*common2.ApplicationComponent, error) {
baseRawComponent := common2.ApplicationComponent{
Type: "raw",
Name: strings.Join(append(elem.Path, elem.Name), "-"),
}
obj, err := renderObject(elem)
if err != nil {
return nil, err
}
baseRawComponent.Properties = util.Object2RawExtension(obj)
return &baseRawComponent, nil
}
// renderCUETemplate will return a component from cue template
func renderCUETemplate(elem types.AddonElementFile, parameters string, args map[string]string) (*common2.ApplicationComponent, error) {
bt, err := json.Marshal(args)
if err != nil {
return nil, err
}
var paramFile = cuemodel.ParameterFieldName + ": {}"
if string(bt) != "null" {
paramFile = fmt.Sprintf("%s: %s", cuemodel.ParameterFieldName, string(bt))
}
param := fmt.Sprintf("%s\n%s", paramFile, parameters)
v, err := value.NewValue(param, nil, "")
if err != nil {
return nil, err
}
out, err := v.LookupByScript(fmt.Sprintf("{%s}", elem.Data))
if err != nil {
return nil, err
}
compContent, err := out.LookupValue("output")
if err != nil {
return nil, err
}
b, err := cueyaml.Encode(compContent.CueValue())
if err != nil {
return nil, err
}
comp := common2.ApplicationComponent{
Name: strings.Join(append(elem.Path, elem.Name), "-"),
}
err = yaml.Unmarshal(b, &comp)
if err != nil {
return nil, err
}
return &comp, err
}
const addonAppPrefix = "addon-"
const addonSecPrefix = "addon-secret-"
// Convert2AppName -
func Convert2AppName(name string) string {
return addonAppPrefix + name
}
// Convert2AddonName -
func Convert2AddonName(name string) string {
return strings.TrimPrefix(name, addonAppPrefix)
}
// RenderArgsSecret TODO add desc
func RenderArgsSecret(addon *types.Addon, args map[string]string) *v1.Secret {
sec := v1.Secret{
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "Secret"},
ObjectMeta: metav1.ObjectMeta{
Name: Convert2SecName(addon.Name),
Namespace: types.DefaultKubeVelaNS,
},
StringData: args,
Type: v1.SecretTypeOpaque,
}
return &sec
}
// Convert2SecName TODO add desc
func Convert2SecName(name string) string {
return addonSecPrefix + name
}
// CheckDependencies checks if addon's dependent addons is enabled
func CheckDependencies(ctx context.Context, clt client.Client, addon *types.Addon) bool {
var app v1beta1.Application
for _, dep := range addon.Dependencies | {
err := clt.Get(ctx, client.ObjectKey{
Namespace: types.DefaultKubeVelaNS,
Name: Convert2AppName(dep.Name),
}, &app)
if err != nil {
return false
}
} | conditional_block |
|
addon.go | helps async read files of addon
type asyncReader struct {
addon *types.Addon
h *gitHelper
item *github.RepositoryContent
errChan chan error
}
// SetReadContent set which file to read
func (r *asyncReader) SetReadContent(content *github.RepositoryContent) {
r.item = content
}
// GetAddon get a addon info from GitAddonSource, can be used for get or enable
func GetAddon(name string, git *GitAddonSource, opt ListOptions) (*types.Addon, error) {
addon, err := getSingleAddonFromGit(git.URL, git.Path, name, git.Token, opt)
if err != nil {
return nil, err
}
return addon, nil
}
// ListAddons list addons' info from GitAddonSource
func ListAddons(git *GitAddonSource, opt ListOptions) ([]*types.Addon, error) {
gitAddons, err := getAddonsFromGit(git.URL, git.Path, git.Token, opt)
if err != nil {
return nil, err
}
return gitAddons, nil
}
func getAddonsFromGit(baseURL, dir, token string, opt ListOptions) ([]*types.Addon, error) {
var addons []*types.Addon
var err error
var wg sync.WaitGroup
errChan := make(chan error, 1)
gith, err := createGitHelper(baseURL, dir, token)
if err != nil {
return nil, err
}
_, items, err := gith.readRepo(gith.Meta.Path)
if err != nil {
return nil, err
}
for _, subItems := range items {
if subItems.GetType() != "dir" {
continue
}
wg.Add(1)
go func(item *github.RepositoryContent) {
defer wg.Done()
addonRes, err := getSingleAddonFromGit(baseURL, dir, item.GetName(), token, opt)
if err != nil {
errChan <- err
return
}
addons = append(addons, addonRes)
}(subItems)
}
wg.Wait()
if len(errChan) != 0 {
return nil, <-errChan
}
return addons, nil
}
func getSingleAddonFromGit(baseURL, dir, addonName, token string, opt ListOptions) (*types.Addon, error) {
var wg sync.WaitGroup
gith, err := createGitHelper(baseURL, path.Join(dir, addonName), token)
if err != nil {
return nil, err
}
_, items, err := gith.readRepo(gith.Meta.Path)
if err != nil {
return nil, err
}
reader := asyncReader{
addon: &types.Addon{},
h: gith,
errChan: make(chan error, 1),
}
for _, item := range items {
switch strings.ToLower(item.GetName()) {
case ReadmeFileName:
if !opt.GetDetail {
break
}
reader.SetReadContent(item)
wg.Add(1)
go readReadme(&wg, reader)
case MetadataFileName:
reader.SetReadContent(item)
wg.Add(1)
go readMetadata(&wg, reader)
case DefinitionsDirName:
if !opt.GetDefinition {
break
}
reader.SetReadContent(item)
wg.Add(1)
go readDefinitions(&wg, reader)
case ResourcesDirName:
if !opt.GetResource && !opt.GetParameter {
break
}
reader.SetReadContent(item)
wg.Add(1)
go readResources(&wg, reader)
case TemplateFileName:
if !opt.GetTemplate {
break
}
reader.SetReadContent(item)
wg.Add(1)
go readTemplate(&wg, reader)
}
}
wg.Wait()
if opt.GetParameter && reader.addon.Parameters != "" {
err = genAddonAPISchema(reader.addon)
if err != nil {
return nil, err
}
}
return reader.addon, nil
}
func readTemplate(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
data, err := content.GetContent()
if err != nil {
reader.errChan <- err
return
}
dec := k8syaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme)
reader.addon.AppTemplate = &v1beta1.Application{}
_, _, err = dec.Decode([]byte(data), nil, reader.addon.AppTemplate)
if err != nil {
reader.errChan <- err
return
}
}
func readResources(wg *sync.WaitGroup, reader asyncReader) { | dirPath, err := cutPathUntil(dirPath, ResourcesDirName)
if err != nil {
reader.errChan <- err
}
_, items, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
for _, item := range items {
switch item.GetType() {
case "file":
reader.SetReadContent(item)
wg.Add(1)
go readResFile(wg, reader, dirPath)
case "dir":
reader.SetReadContent(item)
wg.Add(1)
go readResources(wg, reader)
}
}
}
// readResFile read single resource file
func readResFile(wg *sync.WaitGroup, reader asyncReader, dirPath []string) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
b, err := content.GetContent()
if err != nil {
reader.errChan <- err
return
}
if reader.item.GetName() == "parameter.cue" {
reader.addon.Parameters = b
return
}
switch filepath.Ext(reader.item.GetName()) {
case ".cue":
reader.addon.CUETemplates = append(reader.addon.CUETemplates, types.AddonElementFile{Data: b, Name: reader.item.GetName(), Path: dirPath})
default:
reader.addon.YAMLTemplates = append(reader.addon.YAMLTemplates, types.AddonElementFile{Data: b, Name: reader.item.GetName(), Path: dirPath})
}
}
func readDefinitions(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
dirPath := strings.Split(reader.item.GetPath(), "/")
dirPath, err := cutPathUntil(dirPath, DefinitionsDirName)
if err != nil {
reader.errChan <- err
return
}
_, items, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
for _, item := range items {
switch item.GetType() {
case "file":
reader.SetReadContent(item)
wg.Add(1)
go readDefFile(wg, reader, dirPath)
case "dir":
reader.SetReadContent(item)
wg.Add(1)
go readDefinitions(wg, reader)
}
}
}
// readDefFile read single definition file
func readDefFile(wg *sync.WaitGroup, reader asyncReader, dirPath []string) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
b, err := content.GetContent()
if err != nil {
reader.errChan <- err
return
}
reader.addon.Definitions = append(reader.addon.Definitions, types.AddonElementFile{Data: b, Name: reader.item.GetName(), Path: dirPath})
}
func readMetadata(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
b, err := content.GetContent()
if err != nil {
reader.errChan <- err
return
}
err = yaml.Unmarshal([]byte(b), &reader.addon.AddonMeta)
if err != nil {
reader.errChan <- err
return
}
}
func readReadme(wg *sync.WaitGroup, reader asyncReader) {
defer wg.Done()
content, _, err := reader.h.readRepo(*reader.item.Path)
if err != nil {
reader.errChan <- err
return
}
reader.addon.Detail, err = content.GetContent()
if err != nil {
reader.errChan <- err
return
}
}
func createGitHelper(baseURL, dir, token string) (*gitHelper, error) {
var ts oauth2.TokenSource
if token != "" {
ts = oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token})
}
tc := oauth2.NewClient(context.Background(), ts)
tc.Timeout = time.Second * 10
cli := github.NewClient(tc)
baseURL = strings.TrimSuffix(baseURL, ".git")
u, err := url.Parse(baseURL)
if err != nil {
return nil, errors.New("addon registry invalid")
}
u.Path = path.Join(u.Path, dir)
_, gitmeta, err := utils.Parse(u.String())
if err != nil {
return nil, errors.New("addon registry invalid")
| defer wg.Done()
dirPath := strings.Split(reader.item.GetPath(), "/") | random_line_split |
cryptologer.py | ,bruteforce
from vigenere_enc import encrypt
from affine_enc import aff_enc
from bacon_enc import steak
from railfence_enc import rail
from atbash import atb
from polybius_enc import tsquaree
from substitution_enc import substitute
from rsa_encryptor import rivest
from rot import rotate,rotate_brute
from skip import skip
from rot47 import rot47'''
#algorithms import
from alg.Ext_Euclid import reversing, printing
#file imports for decryption
'''
from vigenere_dec import impossible, withkey#import impossible, withkey
from affine_dec import fine
from bacon_dec import pork
from polybius_dec import psquaree
from railfence_dec import fence
from substitution_dec import manual
from simplersa import init
from Weiner import attack
from small_e import smallie
from internal_attack import company
from hastad import broadcast
from multi_cipher import multi
from boneh_durfee import example'''
#parsing starts
def initializeParser():
parser=argparse.ArgumentParser(description="Decryptor for Caeser, Vigenere, types of RSA and more...")
parser.add_argument("--decrypt","--dec","-d",help="Performs Decryption",action="store_true")
parser.add_argument("--encrypt","--enc","-e",help="Performs Encryption",action="store_true")
parser.add_argument("--sourcefile","--sf","-f",help="Input file with ciphertext",type=str)
parser.add_argument("--cipher","--cip","-c",help="Input cipher as test",type=str)
parser.add_argument("--key","-k",help="If the key is known (text for vignere, shift for caeser,ROT)",type=str)
parser.add_argument("--len","-l",help="User-defined max probable key length",type=str)
parser.add_argument("--caeser","-C",help="If the cipher is caeser cipher",action="store_true")
parser.add_argument("--vignere","-V",help="If the cipher is vignere cipher",action="store_true")
parser.add_argument("--affine","-A",help="If the cipher is affine cipher",action="store_true")
parser.add_argument("--bacon","-B",help="If the cipher is bacon cipher",action="store_true")
parser.add_argument("--polybius","-P",help="If the cipher is encrypted by a simple 6x6 polybius square",action="store_true")
parser.add_argument("--railfence","-F",help="If railfence encryption is used",action="store_true")
parser.add_argument("--skip","-K",help="If skip cipher is used",action="store_true")
parser.add_argument("--atbash","-T",help="If atbash rotation is done on the plaintext",action="store_true")
parser.add_argument("--rot","-O",help="If the cipher is any rotation cipher",action="store_true")
parser.add_argument("--rot47","-47",help="If the cipher is rotated by ROT47",action="store_true")
parser.add_argument("--substitution","-S",help="If the plaintext in encrypted using simple substitution cipher",action="store_true")
parser.add_argument("--rsa","-R",help="If the cipher is RSA related",action="store_true") #contains simple and multi_rsa
#parser.add_argument("--factordb","--fb","-O",help="Using factordb to crack the rsa",action="store_true")
parser.add_argument("--weiner","-W",help="Cracking RSA using Weiner attack",action="store_true")
parser.add_argument("--smalle","-E",help="Cracking RSA provided e is very small",action="store_true")
parser.add_argument("--internal","-I",help="If an internal attack for RSA is being performed",action="store_true")
parser.add_argument("--multi","-M",help="If the message has loads of encrypted ciphers",action="store_true")
#parser.add_argument("--fermat","-M",help="Fermat's attack on the RSA encrypted text",action="store_true")
#parser.add_argument("--twin","-N",help="If the RSA public is a product of twin prime, use this",action="store_true")
parser.add_argument("--chinese","-H",help="Using the Chinese Remainder Theorem for cracking RSA from e packets having the same n",action="store_true")
parser.add_argument("--boneh","-D",help="Using the famous boneh_durfee to calculate d, provided d< N^0.292",action="store_true")
return parser
def readfile(filename):
with open(filename,"r") as f1:
return f1.read()
def read_rsa(filename):
with open(filename,"r") as f2:
line=f2.readline()
e=0
while line:
symbol=line[0].lower()
if symbol=='n':
n=int(line[2:])
elif symbol=='e':
e=int(line[2:])
elif symbol=='c':
c=line[2:]
elif symbol=='m':
c=line[2:]
else:
raise Exception("the contents of the file can't be read properly")
break
line=f2.readline()
return n,e,c
def read_chinese(filename):
with open(filename,"r") as f3:
line=f3.readline()
while line:
symbol=line[:2].lower()
if symbol=='n1':
n1=int(line[3:])
elif symbol=='n2':
n2=int(line[3:])
elif symbol=='n3':
n3=int(line[3:])
elif symbol=='c1':
c1=int(line[3:])
elif symbol=='c2':
c2=int(line[3:])
elif symbol=='c3':
c3=int(line[3:])
else:
raise Exception("the contents of the file can't be read properly")
break
line=f3.readline()
return n1,n2,n3,c1,c2,c3
def main():
parser=initializeParser()
args=parser.parse_args()
if args.encrypt or args.decrypt:
if args.cipher!=None:
rawtext=args.cipher
else:
rawtext=readfile(args.sourcefile)
key=args.key
if args.encrypt:
plaintext=rawtext
if args.caeser:
from enc.caeser import julius
if args.key==None:
shift=int(input("enter shift:"))
else:
shift=int(key)
ciphertext=julius(plaintext,shift)
elif args.vignere:
from enc.vigenere_enc import encrypt
if args.key==None:
key=input("enter key:")
ciphertext=encrypt(plaintext,key)
elif args.affine:
from enc.affine_enc import aff_enc
ciphertext=aff_enc(plaintext)
elif args.bacon:
|
elif args.railfence:
from enc.railfence_enc import rail
ciphertext=rail(plaintext)
elif args.rot:
from enc.rot import rotate
if args.key==None:
shift=input("enter shift:")
ciphertext=rotate(plaintext,shift)
elif args.rot47:
from enc.rot47 import rot47
ciphertext=rot47(plaintext)
elif args.skip:
from enc.skip import skip
loop=int(input("Enter skip:"))
ciphertext=skip(plaintext,loop)
elif args.atbash:
from enc.atbash import atb
ciphertext=atb(plaintext)
elif args.polybius:
from enc.polybius_enc import tsquaree
ciphertext=tsquaree(plaintext)
elif args.substitution:
from enc.substitution_enc import substitute
ciphertext=substitute(plaintext)
elif args.rsa:
from enc.rsa_encryptor import rivest
n,e,m=read_rsa(args.sourcefile)
ciphertext=rivest(n,e,m)
display='message:'
try:
print("ciphertext:",ciphertext,end='')
except UnboundLocalError:
print("NullError: no ciphering technique mentioned")
elif args.decrypt:
ciphertext=rawtext
display='plaintext:'
if args.caeser:
from enc.caeser import julius,bruteforce
if args.key!=None:
plaintext=julius(ciphertext,-int(key))
else:
display=''
plaintext=bruteforce(ciphertext)
elif args.vignere:
from dec.vigenere_dec import impossible, withkey
length=args.len
if key!=None:
plaintext=withkey(ciphertext,key)
else:
plaintext=impossible(ciphertext,length)
elif args.affine:
from dec.affine_dec import fine
plaintext=fine(ciphertext)
elif args.bacon:
from dec.bacon_dec import pork
if args.key!=None:
plaintext=pork(ciphertext,key)
else:
plaintext=pork(ciphertext,0)
elif args.railfence:
from dec.railfence_dec import fence
length=args.len
if args.len!=None:
plaintext=fence(ciphertext,int(length))
else:
plaintext=fence(ciphertext,None | from enc.bacon_enc import steak
ciphertext=steak(plaintext) | conditional_block |
cryptologer.py | #import operator
#import math
#import requests
#import json
#import binascii
#import os
#import time
'''
def whereami():
with open('/root/directory.txt','r') as f1:
location=f1.read()
return location
location=whereami()'''
#appending file paths
'''
import Encrypting as enc
import Decrypting as dec
import Algorithms as alg
import rsa_types as RSA'''
'''
sys.path.append('%s/Encrypting/' % location)
sys.path.append('%s/Decrypting/' % location)
sys.path.append('%s/Algorithms/' % location)
sys.path.append('%s/rsa_types/' % location)'''
#file imports for encryption
'''
from caeser import julius,bruteforce
from vigenere_enc import encrypt
from affine_enc import aff_enc
from bacon_enc import steak
from railfence_enc import rail
from atbash import atb
from polybius_enc import tsquaree
from substitution_enc import substitute
from rsa_encryptor import rivest
from rot import rotate,rotate_brute
from skip import skip
from rot47 import rot47'''
#algorithms import
from alg.Ext_Euclid import reversing, printing
#file imports for decryption
'''
from vigenere_dec import impossible, withkey#import impossible, withkey
from affine_dec import fine
from bacon_dec import pork
from polybius_dec import psquaree
from railfence_dec import fence
from substitution_dec import manual
from simplersa import init
from Weiner import attack
from small_e import smallie
from internal_attack import company
from hastad import broadcast
from multi_cipher import multi
from boneh_durfee import example'''
#parsing starts
def initializeParser():
parser=argparse.ArgumentParser(description="Decryptor for Caeser, Vigenere, types of RSA and more...")
parser.add_argument("--decrypt","--dec","-d",help="Performs Decryption",action="store_true")
parser.add_argument("--encrypt","--enc","-e",help="Performs Encryption",action="store_true")
parser.add_argument("--sourcefile","--sf","-f",help="Input file with ciphertext",type=str)
parser.add_argument("--cipher","--cip","-c",help="Input cipher as test",type=str)
parser.add_argument("--key","-k",help="If the key is known (text for vignere, shift for caeser,ROT)",type=str)
parser.add_argument("--len","-l",help="User-defined max probable key length",type=str)
parser.add_argument("--caeser","-C",help="If the cipher is caeser cipher",action="store_true")
parser.add_argument("--vignere","-V",help="If the cipher is vignere cipher",action="store_true")
parser.add_argument("--affine","-A",help="If the cipher is affine cipher",action="store_true")
parser.add_argument("--bacon","-B",help="If the cipher is bacon cipher",action="store_true")
parser.add_argument("--polybius","-P",help="If the cipher is encrypted by a simple 6x6 polybius square",action="store_true")
parser.add_argument("--railfence","-F",help="If railfence encryption is used",action="store_true")
parser.add_argument("--skip","-K",help="If skip cipher is used",action="store_true")
parser.add_argument("--atbash","-T",help="If atbash rotation is done on the plaintext",action="store_true")
parser.add_argument("--rot","-O",help="If the cipher is any rotation cipher",action="store_true")
parser.add_argument("--rot47","-47",help="If the cipher is rotated by ROT47",action="store_true")
parser.add_argument("--substitution","-S",help="If the plaintext in encrypted using simple substitution cipher",action="store_true")
parser.add_argument("--rsa","-R",help="If the cipher is RSA related",action="store_true") #contains simple and multi_rsa
#parser.add_argument("--factordb","--fb","-O",help="Using factordb to crack the rsa",action="store_true")
parser.add_argument("--weiner","-W",help="Cracking RSA using Weiner attack",action="store_true")
parser.add_argument("--smalle","-E",help="Cracking RSA provided e is very small",action="store_true")
parser.add_argument("--internal","-I",help="If an internal attack for RSA is being performed",action="store_true")
parser.add_argument("--multi","-M",help="If the message has loads of encrypted ciphers",action="store_true")
#parser.add_argument("--fermat","-M",help="Fermat's attack on the RSA encrypted text",action="store_true")
#parser.add_argument("--twin","-N",help="If the RSA public is a product of twin prime, use this",action="store_true")
parser.add_argument("--chinese","-H",help="Using the Chinese Remainder Theorem for cracking RSA from e packets having the same n",action="store_true")
parser.add_argument("--boneh","-D",help="Using the famous boneh_durfee to calculate d, provided d< N^0.292",action="store_true")
return parser
def readfile(filename):
with open(filename,"r") as f1:
return f1.read()
def read_rsa(filename):
with open(filename,"r") as f2:
line=f2.readline()
e=0
while line:
symbol=line[0].lower()
if symbol=='n':
n=int(line[2:])
elif symbol=='e':
e=int(line[2:])
elif symbol=='c':
c=line[2:]
elif symbol=='m':
c=line[2:]
else:
raise Exception("the contents of the file can't be read properly")
break
line=f2.readline()
return n,e,c
def read_chinese(filename):
with open(filename,"r") as f3:
line=f3.readline()
while line:
symbol=line[:2].lower()
if symbol=='n1':
n1=int(line[3:])
elif symbol=='n2':
n2=int(line[3:])
elif symbol=='n3':
n3=int(line[3:])
elif symbol=='c1':
c1=int(line[3:])
elif symbol=='c2':
c2=int(line[3:])
elif symbol=='c3':
c3=int(line[3:])
else:
raise Exception("the contents of the file can't be read properly")
break
line=f3.readline()
return n1,n2,n3,c1,c2,c3
def main():
parser=initializeParser()
args=parser.parse_args()
if args.encrypt or args.decrypt:
if args.cipher!=None:
rawtext=args.cipher
else:
rawtext=readfile(args.sourcefile)
key=args.key
if args.encrypt:
plaintext=rawtext
if args.caeser:
from enc.caeser import julius
if args.key==None:
shift=int(input("enter shift:"))
else:
shift=int(key)
ciphertext=julius(plaintext,shift)
elif args.vignere:
from enc.vigenere_enc import encrypt
if args.key==None:
key=input("enter key:")
ciphertext=encrypt(plaintext,key)
elif args.affine:
from enc.affine_enc import aff_enc
ciphertext=aff_enc(plaintext)
elif args.bacon:
from enc.bacon_enc import steak
ciphertext=steak(plaintext)
elif args.railfence:
from enc.railfence_enc import rail
ciphertext=rail(plaintext)
elif args.rot:
from enc.rot import rotate
if args.key==None:
shift=input("enter shift:")
ciphertext=rotate(plaintext,shift)
elif args.rot47:
from enc.rot47 import rot47
ciphertext=rot47(plaintext)
elif args.skip:
from enc.skip import skip
loop=int(input("Enter skip:"))
ciphertext=skip(plaintext,loop)
elif args.atbash:
from enc.atbash import atb
ciphertext=atb(plaintext)
elif args.polybius:
from enc.polybius_enc import tsquaree
ciphertext=tsquaree(plaintext)
elif args.substitution:
from enc.substitution_enc import substitute
ciphertext=substitute(plaintext)
elif args.rsa:
from enc.rsa_encryptor import rivest
n,e,m=read_rsa(args.sourcefile)
ciphertext=rivest(n,e,m)
display='message:'
try:
print("ciphertext:",ciphertext,end='')
except UnboundLocalError:
print("NullError: no ciphering technique mentioned")
elif args.decrypt:
ciphertext=rawtext
display='plaintext:'
if args.caeser:
from enc.caeser import julius,bruteforce
if args.key!=None:
plaintext=julius(ciphertext,-int(key))
else:
display=''
plaintext=bruteforce(ciphertext)
elif args.vignere:
from dec.vigenere_dec import impossible, with | #import re
import argparse
| random_line_split |
|
cryptologer.py | ,bruteforce
from vigenere_enc import encrypt
from affine_enc import aff_enc
from bacon_enc import steak
from railfence_enc import rail
from atbash import atb
from polybius_enc import tsquaree
from substitution_enc import substitute
from rsa_encryptor import rivest
from rot import rotate,rotate_brute
from skip import skip
from rot47 import rot47'''
#algorithms import
from alg.Ext_Euclid import reversing, printing
#file imports for decryption
'''
from vigenere_dec import impossible, withkey#import impossible, withkey
from affine_dec import fine
from bacon_dec import pork
from polybius_dec import psquaree
from railfence_dec import fence
from substitution_dec import manual
from simplersa import init
from Weiner import attack
from small_e import smallie
from internal_attack import company
from hastad import broadcast
from multi_cipher import multi
from boneh_durfee import example'''
#parsing starts
def | ():
parser=argparse.ArgumentParser(description="Decryptor for Caeser, Vigenere, types of RSA and more...")
parser.add_argument("--decrypt","--dec","-d",help="Performs Decryption",action="store_true")
parser.add_argument("--encrypt","--enc","-e",help="Performs Encryption",action="store_true")
parser.add_argument("--sourcefile","--sf","-f",help="Input file with ciphertext",type=str)
parser.add_argument("--cipher","--cip","-c",help="Input cipher as test",type=str)
parser.add_argument("--key","-k",help="If the key is known (text for vignere, shift for caeser,ROT)",type=str)
parser.add_argument("--len","-l",help="User-defined max probable key length",type=str)
parser.add_argument("--caeser","-C",help="If the cipher is caeser cipher",action="store_true")
parser.add_argument("--vignere","-V",help="If the cipher is vignere cipher",action="store_true")
parser.add_argument("--affine","-A",help="If the cipher is affine cipher",action="store_true")
parser.add_argument("--bacon","-B",help="If the cipher is bacon cipher",action="store_true")
parser.add_argument("--polybius","-P",help="If the cipher is encrypted by a simple 6x6 polybius square",action="store_true")
parser.add_argument("--railfence","-F",help="If railfence encryption is used",action="store_true")
parser.add_argument("--skip","-K",help="If skip cipher is used",action="store_true")
parser.add_argument("--atbash","-T",help="If atbash rotation is done on the plaintext",action="store_true")
parser.add_argument("--rot","-O",help="If the cipher is any rotation cipher",action="store_true")
parser.add_argument("--rot47","-47",help="If the cipher is rotated by ROT47",action="store_true")
parser.add_argument("--substitution","-S",help="If the plaintext in encrypted using simple substitution cipher",action="store_true")
parser.add_argument("--rsa","-R",help="If the cipher is RSA related",action="store_true") #contains simple and multi_rsa
#parser.add_argument("--factordb","--fb","-O",help="Using factordb to crack the rsa",action="store_true")
parser.add_argument("--weiner","-W",help="Cracking RSA using Weiner attack",action="store_true")
parser.add_argument("--smalle","-E",help="Cracking RSA provided e is very small",action="store_true")
parser.add_argument("--internal","-I",help="If an internal attack for RSA is being performed",action="store_true")
parser.add_argument("--multi","-M",help="If the message has loads of encrypted ciphers",action="store_true")
#parser.add_argument("--fermat","-M",help="Fermat's attack on the RSA encrypted text",action="store_true")
#parser.add_argument("--twin","-N",help="If the RSA public is a product of twin prime, use this",action="store_true")
parser.add_argument("--chinese","-H",help="Using the Chinese Remainder Theorem for cracking RSA from e packets having the same n",action="store_true")
parser.add_argument("--boneh","-D",help="Using the famous boneh_durfee to calculate d, provided d< N^0.292",action="store_true")
return parser
def readfile(filename):
with open(filename,"r") as f1:
return f1.read()
def read_rsa(filename):
with open(filename,"r") as f2:
line=f2.readline()
e=0
while line:
symbol=line[0].lower()
if symbol=='n':
n=int(line[2:])
elif symbol=='e':
e=int(line[2:])
elif symbol=='c':
c=line[2:]
elif symbol=='m':
c=line[2:]
else:
raise Exception("the contents of the file can't be read properly")
break
line=f2.readline()
return n,e,c
def read_chinese(filename):
with open(filename,"r") as f3:
line=f3.readline()
while line:
symbol=line[:2].lower()
if symbol=='n1':
n1=int(line[3:])
elif symbol=='n2':
n2=int(line[3:])
elif symbol=='n3':
n3=int(line[3:])
elif symbol=='c1':
c1=int(line[3:])
elif symbol=='c2':
c2=int(line[3:])
elif symbol=='c3':
c3=int(line[3:])
else:
raise Exception("the contents of the file can't be read properly")
break
line=f3.readline()
return n1,n2,n3,c1,c2,c3
def main():
parser=initializeParser()
args=parser.parse_args()
if args.encrypt or args.decrypt:
if args.cipher!=None:
rawtext=args.cipher
else:
rawtext=readfile(args.sourcefile)
key=args.key
if args.encrypt:
plaintext=rawtext
if args.caeser:
from enc.caeser import julius
if args.key==None:
shift=int(input("enter shift:"))
else:
shift=int(key)
ciphertext=julius(plaintext,shift)
elif args.vignere:
from enc.vigenere_enc import encrypt
if args.key==None:
key=input("enter key:")
ciphertext=encrypt(plaintext,key)
elif args.affine:
from enc.affine_enc import aff_enc
ciphertext=aff_enc(plaintext)
elif args.bacon:
from enc.bacon_enc import steak
ciphertext=steak(plaintext)
elif args.railfence:
from enc.railfence_enc import rail
ciphertext=rail(plaintext)
elif args.rot:
from enc.rot import rotate
if args.key==None:
shift=input("enter shift:")
ciphertext=rotate(plaintext,shift)
elif args.rot47:
from enc.rot47 import rot47
ciphertext=rot47(plaintext)
elif args.skip:
from enc.skip import skip
loop=int(input("Enter skip:"))
ciphertext=skip(plaintext,loop)
elif args.atbash:
from enc.atbash import atb
ciphertext=atb(plaintext)
elif args.polybius:
from enc.polybius_enc import tsquaree
ciphertext=tsquaree(plaintext)
elif args.substitution:
from enc.substitution_enc import substitute
ciphertext=substitute(plaintext)
elif args.rsa:
from enc.rsa_encryptor import rivest
n,e,m=read_rsa(args.sourcefile)
ciphertext=rivest(n,e,m)
display='message:'
try:
print("ciphertext:",ciphertext,end='')
except UnboundLocalError:
print("NullError: no ciphering technique mentioned")
elif args.decrypt:
ciphertext=rawtext
display='plaintext:'
if args.caeser:
from enc.caeser import julius,bruteforce
if args.key!=None:
plaintext=julius(ciphertext,-int(key))
else:
display=''
plaintext=bruteforce(ciphertext)
elif args.vignere:
from dec.vigenere_dec import impossible, withkey
length=args.len
if key!=None:
plaintext=withkey(ciphertext,key)
else:
plaintext=impossible(ciphertext,length)
elif args.affine:
from dec.affine_dec import fine
plaintext=fine(ciphertext)
elif args.bacon:
from dec.bacon_dec import pork
if args.key!=None:
plaintext=pork(ciphertext,key)
else:
plaintext=pork(ciphertext,0)
elif args.railfence:
from dec.railfence_dec import fence
length=args.len
if args.len!=None:
plaintext=fence(ciphertext,int(length))
else:
plaintext=fence(ciphertext,None | initializeParser | identifier_name |
cryptologer.py | ,bruteforce
from vigenere_enc import encrypt
from affine_enc import aff_enc
from bacon_enc import steak
from railfence_enc import rail
from atbash import atb
from polybius_enc import tsquaree
from substitution_enc import substitute
from rsa_encryptor import rivest
from rot import rotate,rotate_brute
from skip import skip
from rot47 import rot47'''
#algorithms import
from alg.Ext_Euclid import reversing, printing
#file imports for decryption
'''
from vigenere_dec import impossible, withkey#import impossible, withkey
from affine_dec import fine
from bacon_dec import pork
from polybius_dec import psquaree
from railfence_dec import fence
from substitution_dec import manual
from simplersa import init
from Weiner import attack
from small_e import smallie
from internal_attack import company
from hastad import broadcast
from multi_cipher import multi
from boneh_durfee import example'''
#parsing starts
def initializeParser():
parser=argparse.ArgumentParser(description="Decryptor for Caeser, Vigenere, types of RSA and more...")
parser.add_argument("--decrypt","--dec","-d",help="Performs Decryption",action="store_true")
parser.add_argument("--encrypt","--enc","-e",help="Performs Encryption",action="store_true")
parser.add_argument("--sourcefile","--sf","-f",help="Input file with ciphertext",type=str)
parser.add_argument("--cipher","--cip","-c",help="Input cipher as test",type=str)
parser.add_argument("--key","-k",help="If the key is known (text for vignere, shift for caeser,ROT)",type=str)
parser.add_argument("--len","-l",help="User-defined max probable key length",type=str)
parser.add_argument("--caeser","-C",help="If the cipher is caeser cipher",action="store_true")
parser.add_argument("--vignere","-V",help="If the cipher is vignere cipher",action="store_true")
parser.add_argument("--affine","-A",help="If the cipher is affine cipher",action="store_true")
parser.add_argument("--bacon","-B",help="If the cipher is bacon cipher",action="store_true")
parser.add_argument("--polybius","-P",help="If the cipher is encrypted by a simple 6x6 polybius square",action="store_true")
parser.add_argument("--railfence","-F",help="If railfence encryption is used",action="store_true")
parser.add_argument("--skip","-K",help="If skip cipher is used",action="store_true")
parser.add_argument("--atbash","-T",help="If atbash rotation is done on the plaintext",action="store_true")
parser.add_argument("--rot","-O",help="If the cipher is any rotation cipher",action="store_true")
parser.add_argument("--rot47","-47",help="If the cipher is rotated by ROT47",action="store_true")
parser.add_argument("--substitution","-S",help="If the plaintext in encrypted using simple substitution cipher",action="store_true")
parser.add_argument("--rsa","-R",help="If the cipher is RSA related",action="store_true") #contains simple and multi_rsa
#parser.add_argument("--factordb","--fb","-O",help="Using factordb to crack the rsa",action="store_true")
parser.add_argument("--weiner","-W",help="Cracking RSA using Weiner attack",action="store_true")
parser.add_argument("--smalle","-E",help="Cracking RSA provided e is very small",action="store_true")
parser.add_argument("--internal","-I",help="If an internal attack for RSA is being performed",action="store_true")
parser.add_argument("--multi","-M",help="If the message has loads of encrypted ciphers",action="store_true")
#parser.add_argument("--fermat","-M",help="Fermat's attack on the RSA encrypted text",action="store_true")
#parser.add_argument("--twin","-N",help="If the RSA public is a product of twin prime, use this",action="store_true")
parser.add_argument("--chinese","-H",help="Using the Chinese Remainder Theorem for cracking RSA from e packets having the same n",action="store_true")
parser.add_argument("--boneh","-D",help="Using the famous boneh_durfee to calculate d, provided d< N^0.292",action="store_true")
return parser
def readfile(filename):
with open(filename,"r") as f1:
return f1.read()
def read_rsa(filename):
|
def read_chinese(filename):
with open(filename,"r") as f3:
line=f3.readline()
while line:
symbol=line[:2].lower()
if symbol=='n1':
n1=int(line[3:])
elif symbol=='n2':
n2=int(line[3:])
elif symbol=='n3':
n3=int(line[3:])
elif symbol=='c1':
c1=int(line[3:])
elif symbol=='c2':
c2=int(line[3:])
elif symbol=='c3':
c3=int(line[3:])
else:
raise Exception("the contents of the file can't be read properly")
break
line=f3.readline()
return n1,n2,n3,c1,c2,c3
def main():
parser=initializeParser()
args=parser.parse_args()
if args.encrypt or args.decrypt:
if args.cipher!=None:
rawtext=args.cipher
else:
rawtext=readfile(args.sourcefile)
key=args.key
if args.encrypt:
plaintext=rawtext
if args.caeser:
from enc.caeser import julius
if args.key==None:
shift=int(input("enter shift:"))
else:
shift=int(key)
ciphertext=julius(plaintext,shift)
elif args.vignere:
from enc.vigenere_enc import encrypt
if args.key==None:
key=input("enter key:")
ciphertext=encrypt(plaintext,key)
elif args.affine:
from enc.affine_enc import aff_enc
ciphertext=aff_enc(plaintext)
elif args.bacon:
from enc.bacon_enc import steak
ciphertext=steak(plaintext)
elif args.railfence:
from enc.railfence_enc import rail
ciphertext=rail(plaintext)
elif args.rot:
from enc.rot import rotate
if args.key==None:
shift=input("enter shift:")
ciphertext=rotate(plaintext,shift)
elif args.rot47:
from enc.rot47 import rot47
ciphertext=rot47(plaintext)
elif args.skip:
from enc.skip import skip
loop=int(input("Enter skip:"))
ciphertext=skip(plaintext,loop)
elif args.atbash:
from enc.atbash import atb
ciphertext=atb(plaintext)
elif args.polybius:
from enc.polybius_enc import tsquaree
ciphertext=tsquaree(plaintext)
elif args.substitution:
from enc.substitution_enc import substitute
ciphertext=substitute(plaintext)
elif args.rsa:
from enc.rsa_encryptor import rivest
n,e,m=read_rsa(args.sourcefile)
ciphertext=rivest(n,e,m)
display='message:'
try:
print("ciphertext:",ciphertext,end='')
except UnboundLocalError:
print("NullError: no ciphering technique mentioned")
elif args.decrypt:
ciphertext=rawtext
display='plaintext:'
if args.caeser:
from enc.caeser import julius,bruteforce
if args.key!=None:
plaintext=julius(ciphertext,-int(key))
else:
display=''
plaintext=bruteforce(ciphertext)
elif args.vignere:
from dec.vigenere_dec import impossible, withkey
length=args.len
if key!=None:
plaintext=withkey(ciphertext,key)
else:
plaintext=impossible(ciphertext,length)
elif args.affine:
from dec.affine_dec import fine
plaintext=fine(ciphertext)
elif args.bacon:
from dec.bacon_dec import pork
if args.key!=None:
plaintext=pork(ciphertext,key)
else:
plaintext=pork(ciphertext,0)
elif args.railfence:
from dec.railfence_dec import fence
length=args.len
if args.len!=None:
plaintext=fence(ciphertext,int(length))
else:
plaintext=fence(ciphertext,None | with open(filename,"r") as f2:
line=f2.readline()
e=0
while line:
symbol=line[0].lower()
if symbol=='n':
n=int(line[2:])
elif symbol=='e':
e=int(line[2:])
elif symbol=='c':
c=line[2:]
elif symbol=='m':
c=line[2:]
else:
raise Exception("the contents of the file can't be read properly")
break
line=f2.readline()
return n,e,c | identifier_body |
metadata.rs | samples: if n_samples == 0 {
None
} else {
Some(n_samples)
},
md5sum: md5sum,
};
Ok(stream_info)
}
fn read_vorbis_comment_block<R: ReadBytes>(input: &mut R, length: u32) -> Result<VorbisComment> {
if length < 8 {
// We expect at a minimum a 32-bit vendor string length, and a 32-bit
// comment count.
return fmt_err("Vorbis comment block is too short")
}
// Fail if the length of the Vorbis comment block is larger than 1 MiB. This
// block is full of length-prefixed strings for which we allocate memory up
// front. If there were no limit on these, a maliciously crafted file could
// cause OOM by claiming to contain large strings. But at least the strings
// cannot be longer than the size of the Vorbis comment block, and by
// limiting the size of that block, we can mitigate such DoS attacks.
//
// The typical size of a the Vorbis comment block is 1 KiB; on a corpus of
// real-world flac files, the 0.05 and 0.95 quantiles were 792 and 1257
// bytes respectively, with even the 0.99 quantile below 2 KiB. The only
// reason for having a large Vorbis comment block is when cover art is
// incorrectly embedded there, but the Vorbis comment block is not the right
// place for that anyway.
if length > 10 * 1024 * 1024 {
let msg = "Vorbis comment blocks larger than 10 MiB are not supported";
return Err(Error::Unsupported(msg))
}
// The Vorbis comment block starts with a length-prefixed "vendor string".
// It cannot be larger than the block length - 8, because there are the
// 32-bit vendor string length, and comment count.
let vendor_len = try!(input.read_le_u32());
if vendor_len > length - 8 { return fmt_err("vendor string too long") }
let mut vendor_bytes = Vec::with_capacity(vendor_len as usize);
// We can safely set the lenght of the vector here; the uninitialized memory
// is not exposed. If `read_into` succeeds, it will have overwritten all
// bytes. If not, an error is returned and the memory is never exposed.
unsafe { vendor_bytes.set_len(vendor_len as usize); }
try!(input.read_into(&mut vendor_bytes));
let vendor = try!(String::from_utf8(vendor_bytes));
// Next up is the number of comments. Because every comment is at least 4
// bytes to indicate its length, there cannot be more comments than the
// length of the block divided by 4. This is only an upper bound to ensure
// that we don't allocate a big vector, to protect against DoS attacks.
let mut comments_len = try!(input.read_le_u32());
if comments_len >= length / 4 {
return fmt_err("too many entries for Vorbis comment block")
}
let mut comments = Vec::with_capacity(comments_len as usize);
let mut bytes_left = length - 8 - vendor_len;
// For every comment, there is a length-prefixed string of the form
// "NAME=value".
while bytes_left >= 4 && comments.len() < comments_len as usize {
let comment_len = try!(input.read_le_u32());
bytes_left -= 4;
if comment_len > bytes_left {
return fmt_err("Vorbis comment too long for Vorbis comment block")
}
// Some older versions of libflac allowed writing zero-length Vorbis
// comments. ALthough such files are invalid, they do occur in the wild,
// so we skip over the empty comment.
if comment_len == 0 {
// Does not overflow because `comments_len > comments.len() >= 0`.
comments_len -= 1;
continue;
}
// For the same reason as above, setting the length is safe here.
let mut comment_bytes = Vec::with_capacity(comment_len as usize);
unsafe { comment_bytes.set_len(comment_len as usize); }
try!(input.read_into(&mut comment_bytes));
bytes_left -= comment_len;
if let Some(sep_index) = comment_bytes.iter().position(|&x| x == b'=') {
{
let name_bytes = &comment_bytes[..sep_index];
// According to the Vorbis spec, the field name may consist of ascii
// bytes 0x20 through 0x7d, 0x3d (`=`) excluded. Verifying this has
// the advantage that if the check passes, the result is valid
// UTF-8, so the conversion to string will not fail.
if name_bytes.iter().any(|&x| x < 0x20 || x > 0x7d) {
return fmt_err("Vorbis comment field name contains invalid byte")
}
}
let comment = try!(String::from_utf8(comment_bytes));
comments.push((comment, sep_index));
} else {
return fmt_err("Vorbis comment does not contain '='")
}
}
if bytes_left != 0 {
return fmt_err("Vorbis comment block has excess data")
}
if comments.len() != comments_len as usize {
return fmt_err("Vorbis comment block contains wrong number of entries")
}
let vorbis_comment = VorbisComment {
vendor: vendor,
comments: comments,
};
Ok(vorbis_comment)
}
fn read_padding_block<R: ReadBytes>(input: &mut R, length: u32) -> Result<()> {
// The specification dictates that all bits of the padding block must be 0.
// However, the reference implementation does not issue an error when this
// is not the case, and frankly, when you are going to skip over these
// bytes and do nothing with them whatsoever, why waste all those CPU
// cycles checking that the padding is valid?
Ok(try!(input.skip(length)))
}
fn read_application_block<R: ReadBytes>(input: &mut R, length: u32) -> Result<(u32, Vec<u8>)> {
if length < 4 {
return fmt_err("application block length must be at least 4 bytes")
}
// Reject large application blocks to avoid memory-based denial-
// of-service attacks. See also the more elaborate motivation in
// `read_vorbis_comment_block()`.
if length > 10 * 1024 * 1024 {
let msg = "application blocks larger than 10 MiB are not supported";
return Err(Error::Unsupported(msg))
}
let id = try!(input.read_be_u32());
// Four bytes of the block have been used for the ID, the rest is payload.
// Create a vector of uninitialized memory, and read the block into it. The
// uninitialized memory is never exposed: read_into will either fill the
// buffer completely, or return an err, in which case the memory is not
// exposed.
let mut data = Vec::with_capacity(length as usize - 4);
unsafe { data.set_len(length as usize - 4); }
try!(input.read_into(&mut data));
Ok((id, data))
}
/// Reads metadata blocks from a stream and exposes them as an iterator.
///
/// It is assumed that the next byte that the reader will read, is the first
/// byte of a metadata block header. This means that the iterator will yield at
/// least a single value. If the iterator ever yields an error, then no more
/// data will be read thereafter, and the next value will be `None`.
pub struct MetadataBlockReader<R: ReadBytes> {
input: R,
done: bool,
}
/// Either a `MetadataBlock` or an `Error`.
pub type MetadataBlockResult = Result<MetadataBlock>;
impl<R: ReadBytes> MetadataBlockReader<R> {
/// Creates a metadata block reader that will yield at least one element.
pub fn new(input: R) -> MetadataBlockReader<R> {
MetadataBlockReader {
input: input,
done: false,
}
}
#[inline]
fn read_next(&mut self) -> MetadataBlockResult {
let header = try!(read_metadata_block_header(&mut self.input));
let block = try!(read_metadata_block(&mut self.input, header.block_type, header.length));
self.done = header.is_last;
Ok(block)
}
}
impl<R: ReadBytes> Iterator for MetadataBlockReader<R> {
type Item = MetadataBlockResult;
#[inline]
fn next(&mut self) -> Option<MetadataBlockResult> {
| if self.done {
None
} else {
let block = self.read_next();
// After a failure, no more attempts to read will be made,
// because we don't know where we are in the stream.
if !block.is_ok() {
self.done = true;
}
Some(block)
}
}
#[ | identifier_body |
|
metadata.rs | max_frame_size && max_frame_size != 0 {
return fmt_err("inconsistent bounds, min frame size > max frame size");
}
// A sample rate of 0 is invalid, and the maximum sample rate is limited by
// the structure of the frame headers to 655350 Hz.
if sample_rate == 0 || sample_rate > 655350 {
return fmt_err("invalid sample rate");
}
let stream_info = StreamInfo {
min_block_size: min_block_size,
max_block_size: max_block_size,
min_frame_size: if min_frame_size == 0 {
None
} else {
Some(min_frame_size)
},
max_frame_size: if max_frame_size == 0 {
None
} else {
Some(max_frame_size)
},
sample_rate: sample_rate,
channels: n_channels as u32,
bits_per_sample: bits_per_sample as u32,
samples: if n_samples == 0 {
None
} else {
Some(n_samples)
},
md5sum: md5sum,
};
Ok(stream_info)
}
fn read_vorbis_comment_block<R: ReadBytes>(input: &mut R, length: u32) -> Result<VorbisComment> {
if length < 8 {
// We expect at a minimum a 32-bit vendor string length, and a 32-bit
// comment count.
return fmt_err("Vorbis comment block is too short")
}
// Fail if the length of the Vorbis comment block is larger than 1 MiB. This
// block is full of length-prefixed strings for which we allocate memory up
// front. If there were no limit on these, a maliciously crafted file could
// cause OOM by claiming to contain large strings. But at least the strings
// cannot be longer than the size of the Vorbis comment block, and by
// limiting the size of that block, we can mitigate such DoS attacks.
//
// The typical size of a the Vorbis comment block is 1 KiB; on a corpus of
// real-world flac files, the 0.05 and 0.95 quantiles were 792 and 1257
// bytes respectively, with even the 0.99 quantile below 2 KiB. The only
// reason for having a large Vorbis comment block is when cover art is
// incorrectly embedded there, but the Vorbis comment block is not the right
// place for that anyway.
if length > 10 * 1024 * 1024 {
let msg = "Vorbis comment blocks larger than 10 MiB are not supported";
return Err(Error::Unsupported(msg))
}
// The Vorbis comment block starts with a length-prefixed "vendor string".
// It cannot be larger than the block length - 8, because there are the
// 32-bit vendor string length, and comment count.
let vendor_len = try!(input.read_le_u32());
if vendor_len > length - 8 { return fmt_err("vendor string too long") }
let mut vendor_bytes = Vec::with_capacity(vendor_len as usize);
// We can safely set the lenght of the vector here; the uninitialized memory
// is not exposed. If `read_into` succeeds, it will have overwritten all
// bytes. If not, an error is returned and the memory is never exposed.
unsafe { vendor_bytes.set_len(vendor_len as usize); }
try!(input.read_into(&mut vendor_bytes));
let vendor = try!(String::from_utf8(vendor_bytes));
// Next up is the number of comments. Because every comment is at least 4
// bytes to indicate its length, there cannot be more comments than the
// length of the block divided by 4. This is only an upper bound to ensure
// that we don't allocate a big vector, to protect against DoS attacks.
let mut comments_len = try!(input.read_le_u32());
if comments_len >= length / 4 {
return fmt_err("too many entries for Vorbis comment block")
}
let mut comments = Vec::with_capacity(comments_len as usize);
let mut bytes_left = length - 8 - vendor_len;
// For every comment, there is a length-prefixed string of the form
// "NAME=value".
while bytes_left >= 4 && comments.len() < comments_len as usize {
let comment_len = try!(input.read_le_u32());
bytes_left -= 4;
if comment_len > bytes_left {
return fmt_err("Vorbis comment too long for Vorbis comment block")
}
// Some older versions of libflac allowed writing zero-length Vorbis
// comments. ALthough such files are invalid, they do occur in the wild,
// so we skip over the empty comment.
if comment_len == 0 {
// Does not overflow because `comments_len > comments.len() >= 0`.
comments_len -= 1;
continue;
}
// For the same reason as above, setting the length is safe here.
let mut comment_bytes = Vec::with_capacity(comment_len as usize);
unsafe { comment_bytes.set_len(comment_len as usize); }
try!(input.read_into(&mut comment_bytes));
bytes_left -= comment_len;
if let Some(sep_index) = comment_bytes.iter().position(|&x| x == b'=') {
{
let name_bytes = &comment_bytes[..sep_index];
// According to the Vorbis spec, the field name may consist of ascii
// bytes 0x20 through 0x7d, 0x3d (`=`) excluded. Verifying this has
// the advantage that if the check passes, the result is valid
// UTF-8, so the conversion to string will not fail.
if name_bytes.iter().any(|&x| x < 0x20 || x > 0x7d) {
return fmt_err("Vorbis comment field name contains invalid byte")
}
}
let comment = try!(String::from_utf8(comment_bytes));
comments.push((comment, sep_index));
} else {
return fmt_err("Vorbis comment does not contain '='")
}
}
if bytes_left != 0 {
return fmt_err("Vorbis comment block has excess data")
}
if comments.len() != comments_len as usize {
return fmt_err("Vorbis comment block contains wrong number of entries")
}
let vorbis_comment = VorbisComment {
vendor: vendor,
comments: comments,
};
Ok(vorbis_comment)
}
fn read_padding_block<R: ReadBytes>(input: &mut R, length: u32) -> Result<()> {
// The specification dictates that all bits of the padding block must be 0.
// However, the reference implementation does not issue an error when this
// is not the case, and frankly, when you are going to skip over these
// bytes and do nothing with them whatsoever, why waste all those CPU
// cycles checking that the padding is valid?
Ok(try!(input.skip(length)))
}
fn read_application_block<R: ReadBytes>(input: &mut R, length: u32) -> Result<(u32, Vec<u8>)> {
if length < 4 {
return fmt_err("application block length must be at least 4 bytes")
}
// Reject large application blocks to avoid memory-based denial-
// of-service attacks. See also the more elaborate motivation in
// `read_vorbis_comment_block()`.
if length > 10 * 1024 * 1024 {
let msg = "application blocks larger than 10 MiB are not supported";
return Err(Error::Unsupported(msg))
}
let id = try!(input.read_be_u32());
// Four bytes of the block have been used for the ID, the rest is payload.
// Create a vector of uninitialized memory, and read the block into it. The
// uninitialized memory is never exposed: read_into will either fill the
// buffer completely, or return an err, in which case the memory is not
// exposed.
let mut data = Vec::with_capacity(length as usize - 4);
unsafe { data.set_len(length as usize - 4); }
try!(input.read_into(&mut data));
Ok((id, data))
}
/// Reads metadata blocks from a stream and exposes them as an iterator.
///
/// It is assumed that the next byte that the reader will read, is the first
/// byte of a metadata block header. This means that the iterator will yield at
/// least a single value. If the iterator ever yields an error, then no more
/// data will be read thereafter, and the next value will be `None`.
pub struct MetadataBlockReader<R: ReadBytes> {
input: R,
done: bool,
}
/// Either a `MetadataBlock` or an `Error`.
pub type MetadataBlockResult = Result<MetadataBlock>;
impl<R: ReadBytes> MetadataBlockReader<R> {
/// Creates a metadata block reader that will yield at least one element.
pub fn new(inpu | t: | identifier_name |
|
metadata.rs | : u32,
/// The contents of the application block.
data: Vec<u8>,
},
/// A seek table block.
SeekTable(SeekTable),
/// A Vorbis comment block, also known as FLAC tags.
VorbisComment(VorbisComment),
/// A CUE sheet block.
CueSheet, // TODO
/// A picture block.
Picture, // TODO
/// A block with a reserved block type, not supported by this library.
Reserved,
}
/// Iterates over Vorbis comments (FLAC tags) in a FLAC stream.
///
/// See `FlacReader::tags()` for more details.
pub struct Tags<'a> {
/// The underlying iterator.
iter: slice::Iter<'a, (String, usize)>,
}
impl<'a> Tags<'a> {
/// Returns a new `Tags` iterator.
#[inline]
pub fn new(comments: &'a [(String, usize)]) -> Tags<'a> {
Tags {
iter: comments.iter(),
}
}
}
impl<'a> Iterator for Tags<'a> {
type Item = (&'a str, &'a str);
#[inline]
fn next(&mut self) -> Option<(&'a str, &'a str)> {
return self.iter.next().map(|&(ref comment, sep_idx)| {
(&comment[..sep_idx], &comment[sep_idx+1..])
})
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
impl<'a> ExactSizeIterator for Tags<'a> {}
/// Iterates over Vorbis comments looking for a specific one; returns its values as `&str`.
///
/// See `FlacReader::get_tag()` for more details.
pub struct GetTag<'a> {
/// The Vorbis comments to search through.
vorbis_comments: &'a [(String, usize)],
/// The tag to look for.
needle: &'a str,
/// The index of the (name, value) pair that should be inspected next.
index: usize,
}
impl<'a> GetTag<'a> {
/// Returns a new `GetTag` iterator.
#[inline]
pub fn new(vorbis_comments: &'a [(String, usize)], needle: &'a str) -> GetTag<'a> {
GetTag {
vorbis_comments: vorbis_comments,
needle: needle,
index: 0,
}
}
}
impl<'a> Iterator for GetTag<'a> {
type Item = &'a str;
#[inline]
fn next(&mut self) -> Option<&'a str> {
// This import is actually required on Rust 1.13.
#[allow(unused_imports)]
use std::ascii::AsciiExt;
while self.index < self.vorbis_comments.len() {
let (ref comment, sep_idx) = self.vorbis_comments[self.index];
self.index += 1;
if comment[..sep_idx].eq_ignore_ascii_case(self.needle) {
return Some(&comment[sep_idx + 1..])
}
}
return None
}
}
#[inline]
fn read_metadata_block_header<R: ReadBytes>(input: &mut R) -> Result<MetadataBlockHeader> {
let byte = try!(input.read_u8());
// The first bit specifies whether this is the last block, the next 7 bits
// specify the type of the metadata block to follow.
let is_last = (byte >> 7) == 1;
let block_type = byte & 0b0111_1111;
// The length field is 24 bits, or 3 bytes.
let length = try!(input.read_be_u24());
let header = MetadataBlockHeader {
is_last: is_last,
block_type: block_type,
length: length,
};
Ok(header)
}
/// Read a single metadata block header and body from the input.
///
/// When reading a regular flac stream, there is no need to use this function
/// directly; constructing a `FlacReader` will read the header and its metadata
/// blocks.
///
/// When a flac stream is embedded in a container format, this function can be
/// used to decode a single metadata block. For instance, the Ogg format embeds
/// metadata blocks including their header verbatim in packets. This function
/// can be used to decode that raw data.
#[inline]
pub fn read_metadata_block_with_header<R: ReadBytes>(input: &mut R)
-> Result<MetadataBlock> {
let header = try!(read_metadata_block_header(input));
read_metadata_block(input, header.block_type, header.length)
}
/// Read a single metadata block of the given type and length from the input.
///
/// When reading a regular flac stream, there is no need to use this function
/// directly; constructing a `FlacReader` will read the header and its metadata
/// blocks.
///
/// When a flac stream is embedded in a container format, this function can be
/// used to decode a single metadata block. For instance, the MP4 format sports
/// a “FLAC Specific Box” which contains the block type and the raw data. This
/// function can be used to decode that raw data.
#[inline]
pub fn read_metadata_block<R: ReadBytes>(input: &mut R,
block_type: u8,
length: u32)
-> Result<MetadataBlock> {
match block_type {
0 => {
// The streaminfo block has a fixed size of 34 bytes.
if length == 34 {
let streaminfo = try!(read_streaminfo_block(input));
Ok(MetadataBlock::StreamInfo(streaminfo))
} else {
fmt_err("invalid streaminfo metadata block length")
}
}
1 => {
| 2 => {
let (id, data) = try!(read_application_block(input, length));
Ok(MetadataBlock::Application {
id: id,
data: data,
})
}
3 => {
// TODO: implement seektable reading. For now, pretend it is padding.
try!(input.skip(length));
Ok(MetadataBlock::Padding { length: length })
}
4 => {
let vorbis_comment = try!(read_vorbis_comment_block(input, length));
Ok(MetadataBlock::VorbisComment(vorbis_comment))
}
5 => {
// TODO: implement CUE sheet reading. For now, pretend it is padding.
try!(input.skip(length));
Ok(MetadataBlock::Padding { length: length })
}
6 => {
// TODO: implement picture reading. For now, pretend it is padding.
try!(input.skip(length));
Ok(MetadataBlock::Padding { length: length })
}
127 => {
// This code is invalid to avoid confusion with a frame sync code.
fmt_err("invalid metadata block type")
}
_ => {
// Any other block type is 'reserved' at the moment of writing. The
// reference implementation reads it as an 'unknown' block. That is
// one way of handling it, but maybe there should be some kind of
// 'strict' mode (configurable at compile time?) so that this can
// be an error if desired.
try!(input.skip(length));
Ok(MetadataBlock::Reserved)
}
}
}
fn read_streaminfo_block<R: ReadBytes>(input: &mut R) -> Result<StreamInfo> {
let min_block_size = try!(input.read_be_u16());
let max_block_size = try!(input.read_be_u16());
// The frame size fields are 24 bits, or 3 bytes.
let min_frame_size = try!(input.read_be_u24());
let max_frame_size = try!(input.read_be_u24());
// Next up are 20 bits that determine the sample rate.
let sample_rate_msb = try!(input.read_be_u16());
let sample_rate_lsb = try!(input.read_u8());
// Stitch together the value from the first 16 bits,
// and then the 4 most significant bits of the next byte.
let sample_rate = (sample_rate_msb as u32) << 4 | (sample_rate_lsb as u32) >> 4;
// Next three bits are the number of channels - 1. Mask them out and add 1.
let n_channels_bps = sample_rate_lsb;
let n_channels = ((n_channels_bps >> 1) & 0b0000_0111) + 1;
// The final bit is the most significant of bits per sample - 1. Bits per
// sample - 1 is 5 bits in total.
let bps_msb = n_channels_bps & 1;
let bps_lsb_n_samples = try!(input.read_u8());
// Stitch together these values, add 1 because # - 1 is stored.
let bits_per_sample = (bps_msb << 4 | (bps_lsb_n_samples >> 4)) + 1;
// Number of samples in 36 bits, we have 4 already, | try!(read_padding_block(input, length));
Ok(MetadataBlock::Padding { length: length })
}
| conditional_block |
metadata.rs | /// The number of padding bytes.
length: u32,
},
/// An application block with application-specific data.
Application {
/// The registered application ID.
id: u32,
/// The contents of the application block.
data: Vec<u8>,
},
/// A seek table block.
SeekTable(SeekTable),
/// A Vorbis comment block, also known as FLAC tags.
VorbisComment(VorbisComment),
/// A CUE sheet block.
CueSheet, // TODO
/// A picture block.
Picture, // TODO
/// A block with a reserved block type, not supported by this library.
Reserved,
}
/// Iterates over Vorbis comments (FLAC tags) in a FLAC stream.
///
/// See `FlacReader::tags()` for more details.
pub struct Tags<'a> {
/// The underlying iterator.
iter: slice::Iter<'a, (String, usize)>,
}
impl<'a> Tags<'a> {
/// Returns a new `Tags` iterator.
#[inline]
pub fn new(comments: &'a [(String, usize)]) -> Tags<'a> {
Tags {
iter: comments.iter(),
}
}
}
impl<'a> Iterator for Tags<'a> {
type Item = (&'a str, &'a str);
#[inline]
fn next(&mut self) -> Option<(&'a str, &'a str)> {
return self.iter.next().map(|&(ref comment, sep_idx)| {
(&comment[..sep_idx], &comment[sep_idx+1..])
})
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
impl<'a> ExactSizeIterator for Tags<'a> {}
/// Iterates over Vorbis comments looking for a specific one; returns its values as `&str`.
///
/// See `FlacReader::get_tag()` for more details.
pub struct GetTag<'a> {
/// The Vorbis comments to search through.
vorbis_comments: &'a [(String, usize)],
/// The tag to look for.
needle: &'a str,
/// The index of the (name, value) pair that should be inspected next.
index: usize,
}
impl<'a> GetTag<'a> {
/// Returns a new `GetTag` iterator.
#[inline]
pub fn new(vorbis_comments: &'a [(String, usize)], needle: &'a str) -> GetTag<'a> {
GetTag {
vorbis_comments: vorbis_comments,
needle: needle,
index: 0,
}
}
}
impl<'a> Iterator for GetTag<'a> {
type Item = &'a str;
#[inline]
fn next(&mut self) -> Option<&'a str> {
// This import is actually required on Rust 1.13.
#[allow(unused_imports)]
use std::ascii::AsciiExt;
while self.index < self.vorbis_comments.len() {
let (ref comment, sep_idx) = self.vorbis_comments[self.index];
self.index += 1;
if comment[..sep_idx].eq_ignore_ascii_case(self.needle) {
return Some(&comment[sep_idx + 1..])
}
}
return None
}
}
#[inline]
fn read_metadata_block_header<R: ReadBytes>(input: &mut R) -> Result<MetadataBlockHeader> {
let byte = try!(input.read_u8());
// The first bit specifies whether this is the last block, the next 7 bits
// specify the type of the metadata block to follow.
let is_last = (byte >> 7) == 1;
let block_type = byte & 0b0111_1111;
// The length field is 24 bits, or 3 bytes.
let length = try!(input.read_be_u24());
let header = MetadataBlockHeader {
is_last: is_last,
block_type: block_type,
length: length,
};
Ok(header)
}
/// Read a single metadata block header and body from the input.
///
/// When reading a regular flac stream, there is no need to use this function
/// directly; constructing a `FlacReader` will read the header and its metadata
/// blocks.
///
/// When a flac stream is embedded in a container format, this function can be
/// used to decode a single metadata block. For instance, the Ogg format embeds
/// metadata blocks including their header verbatim in packets. This function
/// can be used to decode that raw data.
#[inline]
pub fn read_metadata_block_with_header<R: ReadBytes>(input: &mut R)
-> Result<MetadataBlock> {
let header = try!(read_metadata_block_header(input));
read_metadata_block(input, header.block_type, header.length)
}
/// Read a single metadata block of the given type and length from the input.
///
/// When reading a regular flac stream, there is no need to use this function
/// directly; constructing a `FlacReader` will read the header and its metadata
/// blocks.
///
/// When a flac stream is embedded in a container format, this function can be
/// used to decode a single metadata block. For instance, the MP4 format sports
/// a “FLAC Specific Box” which contains the block type and the raw data. This
/// function can be used to decode that raw data.
#[inline]
pub fn read_metadata_block<R: ReadBytes>(input: &mut R,
block_type: u8,
length: u32)
-> Result<MetadataBlock> {
match block_type {
0 => {
// The streaminfo block has a fixed size of 34 bytes.
if length == 34 {
let streaminfo = try!(read_streaminfo_block(input));
Ok(MetadataBlock::StreamInfo(streaminfo))
} else {
fmt_err("invalid streaminfo metadata block length")
}
}
1 => {
try!(read_padding_block(input, length));
Ok(MetadataBlock::Padding { length: length })
}
2 => {
let (id, data) = try!(read_application_block(input, length));
Ok(MetadataBlock::Application {
id: id,
data: data,
})
}
3 => {
// TODO: implement seektable reading. For now, pretend it is padding.
try!(input.skip(length));
Ok(MetadataBlock::Padding { length: length })
}
4 => {
let vorbis_comment = try!(read_vorbis_comment_block(input, length));
Ok(MetadataBlock::VorbisComment(vorbis_comment))
}
5 => {
// TODO: implement CUE sheet reading. For now, pretend it is padding.
try!(input.skip(length));
Ok(MetadataBlock::Padding { length: length })
}
6 => {
// TODO: implement picture reading. For now, pretend it is padding.
try!(input.skip(length));
Ok(MetadataBlock::Padding { length: length })
}
127 => {
// This code is invalid to avoid confusion with a frame sync code.
fmt_err("invalid metadata block type")
}
_ => {
// Any other block type is 'reserved' at the moment of writing. The
// reference implementation reads it as an 'unknown' block. That is
// one way of handling it, but maybe there should be some kind of
// 'strict' mode (configurable at compile time?) so that this can
// be an error if desired.
try!(input.skip(length));
Ok(MetadataBlock::Reserved)
}
}
}
fn read_streaminfo_block<R: ReadBytes>(input: &mut R) -> Result<StreamInfo> {
let min_block_size = try!(input.read_be_u16());
let max_block_size = try!(input.read_be_u16());
// The frame size fields are 24 bits, or 3 bytes.
let min_frame_size = try!(input.read_be_u24());
let max_frame_size = try!(input.read_be_u24());
// Next up are 20 bits that determine the sample rate.
let sample_rate_msb = try!(input.read_be_u16());
let sample_rate_lsb = try!(input.read_u8());
// Stitch together the value from the first 16 bits,
// and then the 4 most significant bits of the next byte.
let sample_rate = (sample_rate_msb as u32) << 4 | (sample_rate_lsb as u32) >> 4;
// Next three bits are the number of channels - 1. Mask them out and add 1.
let n_channels_bps = sample_rate_lsb;
let n_channels = ((n_channels_bps >> 1) & 0b0000_0111) + 1;
// The final bit is the most significant of bits per sample - 1. Bits per
// sample - 1 is 5 bits in total.
let bps_msb = n_channels_bps & 1;
let bps_lsb_n_samples = | pub enum MetadataBlock {
/// A stream info block.
StreamInfo(StreamInfo),
/// A padding block (with no meaningful data).
Padding { | random_line_split |
|
test.py | L2 regularization. '
'Default: False')
parser.add_argument('--silence_threshold', type=float,
default=SILENCE_THRESHOLD,
help='Volume threshold below which to trim the start '
'and the end from the training set samples. Default: ' + str(SILENCE_THRESHOLD) + '.')
parser.add_argument('--optimizer', type=str, default='adam',
choices=optimizer_factory.keys(),
help='Select the optimizer specified by this option. Default: adam.')
parser.add_argument('--momentum', type=float,
default=MOMENTUM, help='Specify the momentum to be '
'used by sgd or rmsprop optimizer. Ignored by the '
'adam optimizer. Default: ' + str(MOMENTUM) + '.')
parser.add_argument('--histograms', type=_str_to_bool, default=False,
help='Whether to store histogram summaries. Default: False')
parser.add_argument('--gc_channels', type=int, default=None,
help='Number of global condition channels. Default: None. Expecting: Int')
parser.add_argument('--max_checkpoints', type=int, default=MAX_TO_KEEP,
help='Maximum amount of checkpoints that will be kept alive. Default: '
+ str(MAX_TO_KEEP) + '.')
return parser.parse_args()
def save(saver, sess, logdir, step):
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
print('Storing checkpoint to {} ...'.format(logdir), end="")
sys.stdout.flush()
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print(' Done.')
def load(saver, sess, logdir):
print("Trying to restore saved checkpoints from {} ...".format(logdir),
end="")
ckpt = tf.train.get_checkpoint_state(logdir)
if ckpt:
print(" Checkpoint found: {}".format(ckpt.model_checkpoint_path))
global_step = int(ckpt.model_checkpoint_path
.split('/')[-1]
.split('-')[-1])
print(" Global step was: {}".format(global_step))
print(" Restoring...", end="")
saver.restore(sess, ckpt.model_checkpoint_path)
print(" Done.")
return global_step
else:
print(" No checkpoint found.")
return None
def get_default_logdir(logdir_root):
logdir = os.path.join(logdir_root, 'train', STARTED_DATESTRING)
return logdir
def validate_directories(args):
"""Validate and arrange directory related arguments."""
# Validation
if args.logdir and args.logdir_root:
raise ValueError("--logdir and --logdir_root cannot be "
"specified at the same time.")
if args.logdir and args.restore_from:
raise ValueError(
"--logdir and --restore_from cannot be specified at the same "
"time. This is to keep your previous model from unexpected "
"overwrites.\n"
"Use --logdir_root to specify the root of the directory which "
"will be automatically created with current date and time, or use "
"only --logdir to just continue the training from the last "
"checkpoint.")
# Arrangement
logdir_root = args.logdir_root
if logdir_root is None:
logdir_root = LOGDIR_ROOT
logdir = args.logdir
if logdir is None:
logdir = get_default_logdir(logdir_root)
print('Using default logdir: {}'.format(logdir))
restore_from = args.restore_from
if restore_from is None:
# args.logdir and args.restore_from are exclusive,
# so it is guaranteed the logdir here is newly created.
restore_from = logdir
return {
'logdir': logdir,
'logdir_root': args.logdir_root,
'restore_from': restore_from
}
istest = False
def main():
args = get_arguments()
try:
directories = validate_directories(args)
except ValueError as e:
print("Some arguments are wrong:")
print(str(e))
return
logdir = directories['logdir']
restore_from = directories['restore_from']
# Even if we restored the model, we will treat it as new training
# if the trained model is written into an arbitrary location.
is_overwritten_training = logdir != restore_from
wavenet_params = []
for file_name in WAVENET_PARAMS:
with open(file_name, 'r') as f:
wavenet_params.append(json.load(f))
# Create coordinator.
coord = tf.train.Coordinator()
# Load raw waveform from VCTK corpus.
with tf.name_scope('create_inputs'):
# Allow silence trimming to be skipped by specifying a threshold near
# zero.
silence_threshold = args.silence_threshold if args.silence_threshold > \
EPSILON else None
gc_enabled = args.gc_channels is not None
reader = AudioReader(
args.data_dir,
coord,
sample_rate=wavenet_params[2]['sample_rate'],
gc_enabled=gc_enabled,
receptive_field=WaveNetModel.calculate_receptive_field(wavenet_params[2]["filter_width"],
wavenet_params[2]["dilations"],
wavenet_params[2]["scalar_input"],
wavenet_params[2]["initial_filter_width"]),
sample_size=args.sample_size,
silence_threshold=silence_threshold,
pad = False,
path = "/media/chen/4CBEA7F1BEA7D1AE/Download/hand_dataset/pakinson/degree_dataset/3",
rand = False)
audio_batch = reader.dequeue(args.batch_size)
audio_batch_file = reader.dequeue_str(args.batch_size)
audio_batch = tf.squeeze(audio_batch)
one_receptive_field = WaveNetModel.calculate_receptive_field(wavenet_params[2]["filter_width"],
wavenet_params[2]["dilations"],
wavenet_params[2]["scalar_input"],
wavenet_params[2]["initial_filter_width"])
audio_batch = tf.pad(audio_batch, [[one_receptive_field, 0], [0, 0]],
'constant')
audio_batch = tf.expand_dims(audio_batch, 0)
if gc_enabled:
gc_id_batch = reader.dequeue_gc(args.batch_size)
else:
gc_id_batch = None
# Create network.
net = [WaveNetModel(
batch_size=args.batch_size,
dilations=one_params["dilations"],
filter_width=one_params["filter_width"],
residual_channels=one_params["residual_channels"],
dilation_channels=one_params["dilation_channels"],
skip_channels=one_params["skip_channels"],
quantization_channels=one_params["quantization_channels"],
use_biases=one_params["use_biases"],
scalar_input=one_params["scalar_input"],
initial_filter_width=one_params["initial_filter_width"],
histograms=args.histograms,
global_condition_channels=args.gc_channels,
global_condition_cardinality=reader.gc_category_cardinality,
namespace = str(one_params_i))
for one_params_i,one_params in enumerate(wavenet_params)]
post_par = []
for one_params_i, one_params in enumerate(wavenet_params):
with tf.variable_scope('postprocessing_'+'stage_id_'+str(one_params_i)):
current = dict()
current['postprocess1'] = create_variable(
'postprocess1',
[1, 64, 32])
current['postprocess2'] = create_variable(
'postprocess2',
[1, 32, 3])
current['postprocess1_bias'] = create_bias_variable(
'postprocess1_bias',
[32])
current['postprocess2_bias'] = create_bias_variable(
'postprocess2_bias',
[3])
post_par.append(current)
if args.l2_regularization_strength == 0:
args.l2_regularization_strength = None
#compute
loss_list = []
optimizer = optimizer_factory[args.optimizer](
learning_rate=args.learning_rate,
momentum=args.momentum)
optim_list = []
raw_output_list = []
audio_batch_list = []
loss_all_list = []
for one_params_i, _ in enumerate(wavenet_params):
with tf.name_scope('stage_' + str(one_params_i) + '_postcompute'):
if one_params_i==0:
raw_output, network_label = net[one_params_i].pre_loss(input_batch=audio_batch,
global_condition_batch=gc_id_batch,
l2_regularization_strength=args.l2_regularization_strength)
audio_batch_list.append(audio_batch)
else:
#将前一步骤的补偿结果作用在下一步的输入
raw_output = tf.pad(raw_output, [[one_ | receptive_field-1, 0], [0, 0]])
raw_output = tf.concat([raw_output, raw_output, raw_output, raw_output], axis=1)
raw_output = tf.pad(raw_output, [[0, 0], [0, 6]])
raw_output = tf.expand_dims(raw_output, 0)
audio_batch = audio_batch - raw_output
audio_batch_list.append(audio_batch)
raw_output, network_label = net[one_params_i].pre_loss(input_batch=audio_batch,
global_condition_batch=gc_id_batch,
l2_regularization_strength=args.l2_regularization_strength)
# Perform (+) -> ReLU -> | conditional_block |
|
test.py | 0], uvd_pt2[2, 1], uvd_pt2[2, 2], s=10, c='r')
ax.plot([uvd_pt2[0, 0], uvd_pt2[1, 0]],
[uvd_pt2[0, 1], uvd_pt2[1, 1]],
[uvd_pt2[0, 2], uvd_pt2[1, 2]], color='r', linewidth=1)
ax.plot([uvd_pt2[1, 0], uvd_pt2[2, 0]],
[uvd_pt2[1, 1], uvd_pt2[2, 1]],
[uvd_pt2[1, 2], uvd_pt2[2, 2]], color='r', linewidth=1)
ax.plot([uvd_pt2[2, 0], uvd_pt2[0, 0]],
[uvd_pt2[2, 1], uvd_pt2[0, 1]],
[uvd_pt2[2, 2], uvd_pt2[0, 2]], color='r', linewidth=1)
plt.savefig(path+str(test_num).zfill(7)+".png")
def get_arguments():
def _str_to_bool(s):
"""Convert string to bool (in argparse context)."""
if s.lower() not in ['true', 'false']:
raise ValueError('Argument needs to be a '
'boolean, got {}'.format(s))
return {'true': True, 'false': False}[s.lower()]
parser = argparse.ArgumentParser(description='WaveNet example network')
parser.add_argument('--batch_size', type=int, default=BATCH_SIZE,
help='How many wav files to process at once. Default: ' + str(BATCH_SIZE) + '.')
parser.add_argument('--data_dir', type=str, default=DATA_DIRECTORY,
help='The directory containing the VCTK corpus.')
parser.add_argument('--store_metadata', type=bool, default=METADATA,
help='Whether to store advanced debugging information '
'(execution time, memory consumption) for use with '
'TensorBoard. Default: ' + str(METADATA) + '.')
parser.add_argument('--logdir', type=str, default=None,
help='Directory in which to store the logging '
'information for TensorBoard. '
'If the model already exists, it will restore '
'the state and will continue training. '
'Cannot use with --logdir_root and --restore_from.')
parser.add_argument('--logdir_root', type=str, default=None,
help='Root directory to place the logging '
'output and generated model. These are stored '
'under the dated subdirectory of --logdir_root. '
'Cannot use with --logdir.')
parser.add_argument('--restore_from', type=str, default=None,
help='Directory in which to restore the model from. '
'This creates the new model under the dated directory '
'in --logdir_root. '
'Cannot use with --logdir.')
parser.add_argument('--checkpoint_every', type=int,
default=CHECKPOINT_EVERY,
help='How many steps to save each checkpoint after. Default: ' + str(CHECKPOINT_EVERY) + '.')
parser.add_argument('--num_steps', type=int, default=NUM_STEPS,
help='Number of training steps. Default: ' + str(NUM_STEPS) + '.')
parser.add_argument('--learning_rate', type=float, default=LEARNING_RATE,
help='Learning rate for training. Default: ' + str(LEARNING_RATE) + '.')
#parser.add_argument('--wavenet_params', type=list, default=WAVENET_PARAMS,
# help='JSON file with the network parameters. Default: ' + WAVENET_PARAMS + '.')
parser.add_argument('--sample_size', type=int, default=SAMPLE_SIZE,
help='Concatenate and cut audio samples to this many '
'samples. Default: ' + str(SAMPLE_SIZE) + '.')
parser.add_argument('--l2_regularization_strength', type=float,
default=L2_REGULARIZATION_STRENGTH,
help='Coefficient in the L2 regularization. '
'Default: False')
parser.add_argument('--silence_threshold', type=float,
default=SILENCE_THRESHOLD,
help='Volume threshold below which to trim the start '
'and the end from the training set samples. Default: ' + str(SILENCE_THRESHOLD) + '.')
parser.add_argument('--optimizer', type=str, default='adam',
choices=optimizer_factory.keys(),
help='Select the optimizer specified by this option. Default: adam.')
parser.add_argument('--momentum', type=float,
default=MOMENTUM, help='Specify the momentum to be '
'used by sgd or rmsprop optimizer. Ignored by the '
'adam optimizer. Default: ' + str(MOMENTUM) + '.')
parser.add_argument('--histograms', type=_str_to_bool, default=False,
help='Whether to store histogram summaries. Default: False')
parser.add_argument('--gc_channels', type=int, default=None,
help='Number of global condition channels. Default: None. Expecting: Int')
parser.add_argument('--max_checkpoints', type=int, default=MAX_TO_KEEP,
help='Maximum amount of checkpoints that will be kept alive. Default: '
+ str(MAX_TO_KEEP) + '.')
return parser.parse_args()
def save(saver, sess, logdir, step):
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
print('Storing checkpoint to {} ...'.format(logdir), end="")
sys.stdout.flush()
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print(' Done.')
def load(saver, sess, logdir):
print("Trying to restore saved checkpoints from {} ...".format(logdir),
end="")
ckpt = tf.train.get_checkpoint_state(logdir)
if ckpt:
print(" Checkpoint found: {}".format(ckpt.model_checkpoint_path))
global_step = int(ckpt.model_checkpoint_path
.split('/')[-1]
.split('-')[-1])
print(" Global step was: {}".format(global_step))
print(" Restoring...", end="")
saver.restore(sess, ckpt.model_checkpoint_path)
print(" Done.")
return global_step
else:
print(" No checkpoint found.")
return None
def get_default_logdir(logdir_root):
logdir = os.path.join(logdir_root, 'train', STARTED_DATESTRING)
return logdir
def | (args):
"""Validate and arrange directory related arguments."""
# Validation
if args.logdir and args.logdir_root:
raise ValueError("--logdir and --logdir_root cannot be "
"specified at the same time.")
if args.logdir and args.restore_from:
raise ValueError(
"--logdir and --restore_from cannot be specified at the same "
"time. This is to keep your previous model from unexpected "
"overwrites.\n"
"Use --logdir_root to specify the root of the directory which "
"will be automatically created with current date and time, or use "
"only --logdir to just continue the training from the last "
"checkpoint.")
# Arrangement
logdir_root = args.logdir_root
if logdir_root is None:
logdir_root = LOGDIR_ROOT
logdir = args.logdir
if logdir is None:
logdir = get_default_logdir(logdir_root)
print('Using default logdir: {}'.format(logdir))
restore_from = args.restore_from
if restore_from is None:
# args.logdir and args.restore_from are exclusive,
# so it is guaranteed the logdir here is newly created.
restore_from = logdir
return {
'logdir': logdir,
'logdir_root': args.logdir_root,
'restore_from': restore_from
}
istest = False
def main():
args = get_arguments()
try:
directories = validate_directories(args)
except ValueError as e:
print("Some arguments are wrong:")
print(str(e))
return
logdir = directories['logdir']
restore_from = directories['restore_from']
# Even if we restored the model, we will treat it as new training
# if the trained model is written into an arbitrary location.
is_overwritten_training = logdir != restore_from
wavenet_params = []
for file_name in WAVENET_PARAMS:
with open(file_name, 'r') as f:
wavenet_params.append(json.load(f))
# Create coordinator.
coord = tf.train.Coordinator()
# Load raw waveform from VCTK corpus.
with tf.name_scope('create_inputs'):
# Allow silence trimming to be skipped by specifying a threshold near
# zero.
silence_threshold = args.silence_threshold if args.silence_threshold > \
EPSILON else None
gc_enabled = args.gc_channels is not None
reader = AudioReader(
args.data_dir,
coord,
sample_rate=wavenet_params[2]['sample_rate'],
gc_enabled=gc_enabled,
receptive_field=WaveNetModel.calculate_receptive_field(wavenet_params[2]["filter_width"],
wavenet_params[2 | validate_directories | identifier_name |
test.py | 0], uvd_pt2[2, 1], uvd_pt2[2, 2], s=10, c='r')
ax.plot([uvd_pt2[0, 0], uvd_pt2[1, 0]],
[uvd_pt2[0, 1], uvd_pt2[1, 1]],
[uvd_pt2[0, 2], uvd_pt2[1, 2]], color='r', linewidth=1)
ax.plot([uvd_pt2[1, 0], uvd_pt2[2, 0]],
[uvd_pt2[1, 1], uvd_pt2[2, 1]],
[uvd_pt2[1, 2], uvd_pt2[2, 2]], color='r', linewidth=1)
ax.plot([uvd_pt2[2, 0], uvd_pt2[0, 0]],
[uvd_pt2[2, 1], uvd_pt2[0, 1]],
[uvd_pt2[2, 2], uvd_pt2[0, 2]], color='r', linewidth=1)
plt.savefig(path+str(test_num).zfill(7)+".png")
def get_arguments():
| 'the state and will continue training. '
'Cannot use with --logdir_root and --restore_from.')
parser.add_argument('--logdir_root', type=str, default=None,
help='Root directory to place the logging '
'output and generated model. These are stored '
'under the dated subdirectory of --logdir_root. '
'Cannot use with --logdir.')
parser.add_argument('--restore_from', type=str, default=None,
help='Directory in which to restore the model from. '
'This creates the new model under the dated directory '
'in --logdir_root. '
'Cannot use with --logdir.')
parser.add_argument('--checkpoint_every', type=int,
default=CHECKPOINT_EVERY,
help='How many steps to save each checkpoint after. Default: ' + str(CHECKPOINT_EVERY) + '.')
parser.add_argument('--num_steps', type=int, default=NUM_STEPS,
help='Number of training steps. Default: ' + str(NUM_STEPS) + '.')
parser.add_argument('--learning_rate', type=float, default=LEARNING_RATE,
help='Learning rate for training. Default: ' + str(LEARNING_RATE) + '.')
#parser.add_argument('--wavenet_params', type=list, default=WAVENET_PARAMS,
# help='JSON file with the network parameters. Default: ' + WAVENET_PARAMS + '.')
parser.add_argument('--sample_size', type=int, default=SAMPLE_SIZE,
help='Concatenate and cut audio samples to this many '
'samples. Default: ' + str(SAMPLE_SIZE) + '.')
parser.add_argument('--l2_regularization_strength', type=float,
default=L2_REGULARIZATION_STRENGTH,
help='Coefficient in the L2 regularization. '
'Default: False')
parser.add_argument('--silence_threshold', type=float,
default=SILENCE_THRESHOLD,
help='Volume threshold below which to trim the start '
'and the end from the training set samples. Default: ' + str(SILENCE_THRESHOLD) + '.')
parser.add_argument('--optimizer', type=str, default='adam',
choices=optimizer_factory.keys(),
help='Select the optimizer specified by this option. Default: adam.')
parser.add_argument('--momentum', type=float,
default=MOMENTUM, help='Specify the momentum to be '
'used by sgd or rmsprop optimizer. Ignored by the '
'adam optimizer. Default: ' + str(MOMENTUM) + '.')
parser.add_argument('--histograms', type=_str_to_bool, default=False,
help='Whether to store histogram summaries. Default: False')
parser.add_argument('--gc_channels', type=int, default=None,
help='Number of global condition channels. Default: None. Expecting: Int')
parser.add_argument('--max_checkpoints', type=int, default=MAX_TO_KEEP,
help='Maximum amount of checkpoints that will be kept alive. Default: '
+ str(MAX_TO_KEEP) + '.')
return parser.parse_args()
def save(saver, sess, logdir, step):
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
print('Storing checkpoint to {} ...'.format(logdir), end="")
sys.stdout.flush()
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print(' Done.')
def load(saver, sess, logdir):
print("Trying to restore saved checkpoints from {} ...".format(logdir),
end="")
ckpt = tf.train.get_checkpoint_state(logdir)
if ckpt:
print(" Checkpoint found: {}".format(ckpt.model_checkpoint_path))
global_step = int(ckpt.model_checkpoint_path
.split('/')[-1]
.split('-')[-1])
print(" Global step was: {}".format(global_step))
print(" Restoring...", end="")
saver.restore(sess, ckpt.model_checkpoint_path)
print(" Done.")
return global_step
else:
print(" No checkpoint found.")
return None
def get_default_logdir(logdir_root):
logdir = os.path.join(logdir_root, 'train', STARTED_DATESTRING)
return logdir
def validate_directories(args):
"""Validate and arrange directory related arguments."""
# Validation
if args.logdir and args.logdir_root:
raise ValueError("--logdir and --logdir_root cannot be "
"specified at the same time.")
if args.logdir and args.restore_from:
raise ValueError(
"--logdir and --restore_from cannot be specified at the same "
"time. This is to keep your previous model from unexpected "
"overwrites.\n"
"Use --logdir_root to specify the root of the directory which "
"will be automatically created with current date and time, or use "
"only --logdir to just continue the training from the last "
"checkpoint.")
# Arrangement
logdir_root = args.logdir_root
if logdir_root is None:
logdir_root = LOGDIR_ROOT
logdir = args.logdir
if logdir is None:
logdir = get_default_logdir(logdir_root)
print('Using default logdir: {}'.format(logdir))
restore_from = args.restore_from
if restore_from is None:
# args.logdir and args.restore_from are exclusive,
# so it is guaranteed the logdir here is newly created.
restore_from = logdir
return {
'logdir': logdir,
'logdir_root': args.logdir_root,
'restore_from': restore_from
}
istest = False
def main():
args = get_arguments()
try:
directories = validate_directories(args)
except ValueError as e:
print("Some arguments are wrong:")
print(str(e))
return
logdir = directories['logdir']
restore_from = directories['restore_from']
# Even if we restored the model, we will treat it as new training
# if the trained model is written into an arbitrary location.
is_overwritten_training = logdir != restore_from
wavenet_params = []
for file_name in WAVENET_PARAMS:
with open(file_name, 'r') as f:
wavenet_params.append(json.load(f))
# Create coordinator.
coord = tf.train.Coordinator()
# Load raw waveform from VCTK corpus.
with tf.name_scope('create_inputs'):
# Allow silence trimming to be skipped by specifying a threshold near
# zero.
silence_threshold = args.silence_threshold if args.silence_threshold > \
EPSILON else None
gc_enabled = args.gc_channels is not None
reader = AudioReader(
args.data_dir,
coord,
sample_rate=wavenet_params[2]['sample_rate'],
gc_enabled=gc_enabled,
receptive_field=WaveNetModel.calculate_receptive_field(wavenet_params[2]["filter_width"],
wavenet_params[2 | def _str_to_bool(s):
"""Convert string to bool (in argparse context)."""
if s.lower() not in ['true', 'false']:
raise ValueError('Argument needs to be a '
'boolean, got {}'.format(s))
return {'true': True, 'false': False}[s.lower()]
parser = argparse.ArgumentParser(description='WaveNet example network')
parser.add_argument('--batch_size', type=int, default=BATCH_SIZE,
help='How many wav files to process at once. Default: ' + str(BATCH_SIZE) + '.')
parser.add_argument('--data_dir', type=str, default=DATA_DIRECTORY,
help='The directory containing the VCTK corpus.')
parser.add_argument('--store_metadata', type=bool, default=METADATA,
help='Whether to store advanced debugging information '
'(execution time, memory consumption) for use with '
'TensorBoard. Default: ' + str(METADATA) + '.')
parser.add_argument('--logdir', type=str, default=None,
help='Directory in which to store the logging '
'information for TensorBoard. '
'If the model already exists, it will restore ' | identifier_body |
test.py | def validate_directories(args):
"""Validate and arrange directory related arguments."""
# Validation
if args.logdir and args.logdir_root:
raise ValueError("--logdir and --logdir_root cannot be "
"specified at the same time.")
if args.logdir and args.restore_from:
raise ValueError(
"--logdir and --restore_from cannot be specified at the same "
"time. This is to keep your previous model from unexpected "
"overwrites.\n"
"Use --logdir_root to specify the root of the directory which "
"will be automatically created with current date and time, or use "
"only --logdir to just continue the training from the last "
"checkpoint.")
# Arrangement
logdir_root = args.logdir_root
if logdir_root is None:
logdir_root = LOGDIR_ROOT
logdir = args.logdir
if logdir is None:
logdir = get_default_logdir(logdir_root)
print('Using default logdir: {}'.format(logdir))
restore_from = args.restore_from
if restore_from is None:
# args.logdir and args.restore_from are exclusive,
# so it is guaranteed the logdir here is newly created.
restore_from = logdir
return {
'logdir': logdir,
'logdir_root': args.logdir_root,
'restore_from': restore_from
}
istest = False
def main():
args = get_arguments()
try:
directories = validate_directories(args)
except ValueError as e:
print("Some arguments are wrong:")
print(str(e))
return
logdir = directories['logdir']
restore_from = directories['restore_from']
# Even if we restored the model, we will treat it as new training
# if the trained model is written into an arbitrary location.
is_overwritten_training = logdir != restore_from
wavenet_params = []
for file_name in WAVENET_PARAMS:
with open(file_name, 'r') as f:
wavenet_params.append(json.load(f))
# Create coordinator.
coord = tf.train.Coordinator()
# Load raw waveform from VCTK corpus.
with tf.name_scope('create_inputs'):
# Allow silence trimming to be skipped by specifying a threshold near
# zero.
silence_threshold = args.silence_threshold if args.silence_threshold > \
EPSILON else None
gc_enabled = args.gc_channels is not None
reader = AudioReader(
args.data_dir,
coord,
sample_rate=wavenet_params[2]['sample_rate'],
gc_enabled=gc_enabled,
receptive_field=WaveNetModel.calculate_receptive_field(wavenet_params[2]["filter_width"],
wavenet_params[2]["dilations"],
wavenet_params[2]["scalar_input"],
wavenet_params[2]["initial_filter_width"]),
sample_size=args.sample_size,
silence_threshold=silence_threshold,
pad = False,
path = "/media/chen/4CBEA7F1BEA7D1AE/Download/hand_dataset/pakinson/degree_dataset/3",
rand = False)
audio_batch = reader.dequeue(args.batch_size)
audio_batch_file = reader.dequeue_str(args.batch_size)
audio_batch = tf.squeeze(audio_batch)
one_receptive_field = WaveNetModel.calculate_receptive_field(wavenet_params[2]["filter_width"],
wavenet_params[2]["dilations"],
wavenet_params[2]["scalar_input"],
wavenet_params[2]["initial_filter_width"])
audio_batch = tf.pad(audio_batch, [[one_receptive_field, 0], [0, 0]],
'constant')
audio_batch = tf.expand_dims(audio_batch, 0)
if gc_enabled:
gc_id_batch = reader.dequeue_gc(args.batch_size)
else:
gc_id_batch = None
# Create network.
net = [WaveNetModel(
batch_size=args.batch_size,
dilations=one_params["dilations"],
filter_width=one_params["filter_width"],
residual_channels=one_params["residual_channels"],
dilation_channels=one_params["dilation_channels"],
skip_channels=one_params["skip_channels"],
quantization_channels=one_params["quantization_channels"],
use_biases=one_params["use_biases"],
scalar_input=one_params["scalar_input"],
initial_filter_width=one_params["initial_filter_width"],
histograms=args.histograms,
global_condition_channels=args.gc_channels,
global_condition_cardinality=reader.gc_category_cardinality,
namespace = str(one_params_i))
for one_params_i,one_params in enumerate(wavenet_params)]
post_par = []
for one_params_i, one_params in enumerate(wavenet_params):
with tf.variable_scope('postprocessing_'+'stage_id_'+str(one_params_i)):
current = dict()
current['postprocess1'] = create_variable(
'postprocess1',
[1, 64, 32])
current['postprocess2'] = create_variable(
'postprocess2',
[1, 32, 3])
current['postprocess1_bias'] = create_bias_variable(
'postprocess1_bias',
[32])
current['postprocess2_bias'] = create_bias_variable(
'postprocess2_bias',
[3])
post_par.append(current)
if args.l2_regularization_strength == 0:
args.l2_regularization_strength = None
#compute
loss_list = []
optimizer = optimizer_factory[args.optimizer](
learning_rate=args.learning_rate,
momentum=args.momentum)
optim_list = []
raw_output_list = []
audio_batch_list = []
loss_all_list = []
for one_params_i, _ in enumerate(wavenet_params):
with tf.name_scope('stage_' + str(one_params_i) + '_postcompute'):
if one_params_i==0:
raw_output, network_label = net[one_params_i].pre_loss(input_batch=audio_batch,
global_condition_batch=gc_id_batch,
l2_regularization_strength=args.l2_regularization_strength)
audio_batch_list.append(audio_batch)
else:
#将前一步骤的补偿结果作用在下一步的输入
raw_output = tf.pad(raw_output, [[one_receptive_field-1, 0], [0, 0]])
raw_output = tf.concat([raw_output, raw_output, raw_output, raw_output], axis=1)
raw_output = tf.pad(raw_output, [[0, 0], [0, 6]])
raw_output = tf.expand_dims(raw_output, 0)
audio_batch = audio_batch - raw_output
audio_batch_list.append(audio_batch)
raw_output, network_label = net[one_params_i].pre_loss(input_batch=audio_batch,
global_condition_batch=gc_id_batch,
l2_regularization_strength=args.l2_regularization_strength)
# Perform (+) -> ReLU -> 1x1 conv -> ReLU -> 1x1 conv to
# postprocess the output.
w1 = post_par[one_params_i]['postprocess1']
w2 = post_par[one_params_i]['postprocess2']
b1 = post_par[one_params_i]['postprocess1_bias']
b2 = post_par[one_params_i]['postprocess2_bias']
raw_output = tf.nn.relu(raw_output)
raw_output = tf.nn.conv1d(raw_output, w1, stride=1, padding="SAME")
raw_output = tf.add(raw_output, b1)
raw_output = tf.nn.relu(raw_output)
raw_output = tf.nn.conv1d(raw_output, w2, stride=1, padding="SAME")
raw_output = tf.add(raw_output, b2)
raw_output = tf.squeeze(raw_output)
raw_output_list.append(raw_output)
network_label = tf.squeeze(network_label)
# Set up logging for TensorBoard.
writer = tf.summary.FileWriter(logdir)
writer.add_graph(tf.get_default_graph())
run_metadata = tf.RunMetadata()
summaries = tf.summary.merge_all()
# Set up session
#config = tf.ConfigProto(log_device_placement=False)
#config.gpu_options.allow_growth = True
tf_config = tf.ConfigProto()
tf_config.gpu_options.per_process_gpu_memory_fraction = 0.9
sess = tf.Session(config=tf_config)
init = tf.global_variables_initializer()
sess.run(init)
# Saver for storing checkpoints of the model.
saver = tf.train.Saver(var_list=tf.trainable_variables(), max_to_keep=args.max_checkpoints)
try:
saved_global_step = load(saver, sess, restore_from)
if is_overwritten_training or saved_global_step is None:
# The first training step will be saved_global_step + 1,
# therefore we put -1 here for new or overwritten trainings.
saved_global_step = -1
except:
print("Something went wrong while restoring checkpoint. "
"We will terminate training to avoid accidentally overwriting "
"the previous model.")
raise
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
reader.start_threads(sess)
step = None
last_saved_step = saved_global_step
from tqdm import tqdm
try:
pass_loss = 0.0
for step in tqdm(range(90400)):
''' | if step < args.num_steps*0.0:
muti_step_id = 0 | random_line_split |
|
orgreminders.go | Test
http.HandleFunc("/", DefaultHandler)
http.HandleFunc("/newevent", NewEventHandler)
http.HandleFunc("/neworg", NewOrgHandler)
http.HandleFunc("/events", EventsHandler)
http.HandleFunc("/organizations", OrgsHandler)
http.HandleFunc("/saveevent", EventSaveHandler)
http.HandleFunc("/saveorg", OrgSaveHandler)
http.HandleFunc("/editorg", OrgEditHandler)
http.HandleFunc("/editevent", EventEditHandler)
http.HandleFunc("/cron", CronHandler)
http.HandleFunc("/logout", LogoutHandler)
http.HandleFunc("/newmember", NewMemberHandler)
http.HandleFunc("/savemember", MemberSaveHandler)
http.HandleFunc("/members", MembersHandler)
http.HandleFunc("/editmember", MemberEditHandler)
}
func | (w http.ResponseWriter, r *http.Request) {
c := appengine.NewContext(r)
url, _ := user.LogoutURL(c, "/")
http.Redirect(w, r, url, http.StatusFound)
}
func DefaultHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
title := "home"
renderTemplate(w, title, p)
}
func NewOrgHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
title := "new-org"
renderTemplate(w, title, p)
}
func NewEventHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
title := "new-event"
for _, org := range u.Orgs {
p.Orgs = append(p.Orgs, org.Name)
}
sort.Strings(p.Orgs)
renderTemplate(w, title, p)
}
func EventSaveHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
r.ParseForm()
event := NewEvent()
event.Title = r.PostFormValue("title")
event.EmailMessage = template.HTML(r.PostFormValue("emailmessage"))
event.TextMessage = r.PostFormValue("textmessage")
event.Submitter = *u.Meta
event.Orgs = r.PostForm["orgs"]
if len(event.Orgs) < 1 {
p.Error = "You must choose an organization."
renderTemplate(w, "error", p)
return
}
if r.PostFormValue("sendemail") == "on" {
event.Email = true
}
if r.PostFormValue("sendtext") == "on" {
event.Text = true
}
// save reminder schedule
var remqtys = r.PostForm["remqty[]"]
var remtyps = r.PostForm["remtyp[]"]
for remkey, remval := range remqtys {
var entry = fmt.Sprintf("%s%s", remval, remtyps[remkey])
event.Reminders.Add(entry)
}
o, err := GetOrganizationByName(c, event.Orgs[0])
if err != nil {
c.Infof("Error: %s", err.Error())
p.Error = err.Error()
renderTemplate(w, "error", p)
return
}
location, _ := time.LoadLocation(o.TimeZone)
const longForm = "01/02/2006 3:04pm"
t, timeerr := time.ParseInLocation(longForm, r.PostFormValue("due"), location)
if timeerr != nil {
http.Error(w, "Invalid time string", http.StatusInternalServerError)
return
}
event.Due = t
event.Key = r.PostFormValue("key")
var subject = "Event Saved: "
if event.Key == "" {
_, event.Key = event.Save(c)
} else {
event.Update(c)
subject = "Event Updated: "
}
if r.PostFormValue("oncreate") == "on" {
event.Notify(c, true)
}
event.DueFormatted = event.Due.In(location).Format("01/02/2006 3:04pm")
AdminNotify(c, u.Meta.Email, subject+event.Title, "The following event was just saved: <br><br>"+event.GetHTMLView(c))
p.Event2Edit = event
p.SavedEvent = true
renderTemplate(w, "save", p)
}
func EventEditHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
var ok bool
ok, p.Event2Edit = GetEventByKey(c, r.FormValue("id"))
if ok {
org, _ := GetOrganizationByName(c, p.Event2Edit.Orgs[0])
location, _ := time.LoadLocation(org.TimeZone)
p.Event2Edit.DueFormatted = p.Event2Edit.Due.In(location).Format("01/02/2006 3:04pm")
uorgs := GetOrganizationsByUser(c, u.Meta.Email)
for _, uorg := range uorgs {
missing := true
for _, porg := range p.Event2Edit.Orgs {
if uorg.Name == porg {
missing = false
break
}
}
if missing == true {
p.Orgs = append(p.Orgs, uorg.Name)
}
}
// Extract usable event reminder list
p.ScheduleHTML = p.Event2Edit.Reminders.HTML()
sort.Strings(p.Orgs)
renderTemplate(w, "editevent", p)
} else {
p.Error = "Event not found."
renderTemplate(w, "error", p)
}
}
func OrgSaveHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
org := NewOrganization()
org.Name = r.PostFormValue("name")
org.Description = r.PostFormValue("description")
org.Active = true
org.Expires = time.Now().UTC().Add(Duration_Week)
org.Administrator = strings.Split(r.PostFormValue("admin"), "\r\n")
org.TimeZone = r.PostFormValue("timezone")
key := r.PostFormValue("key")
if key == "" {
c.Infof("saving org")
org.Save(c)
} else {
c.Infof("updating org")
org.Update(c, key)
}
p.SavedOrg = true
p.Org2Edit = org
p.Org2EditKey = key
renderTemplate(w, "save", p)
}
func OrgEditHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
p.Org2EditKey = r.FormValue("id")
p.Org2Edit = GetOrganizationByKey(c, p.Org2EditKey)
renderTemplate(w, "editorg", p)
}
func EventsHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
p.Events = make(map[string]Event)
c := appengine.NewContext(r)
for _, org := range u.Orgs {
events := org.GetEvents(c, true)
location, _ := time.LoadLocation(org.TimeZone)
for indx, event := range events {
event.Due = event.Due.In(location)
event.DueFormatted = event.Due.Format("01/02/2006 3:04pm")
p.Events[indx] = event
}
}
renderTemplate(w, "events", p)
}
func OrgsHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
mapResults := make(map[string]Organization)
for indx, org := range u.Orgs {
org.Members = org.GetMembers(c)
mapResults[indx] = org
}
p.Organizations = mapResults
renderTemplate(w, "organizations", p)
}
func MembersHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
p.Members = make(map[string]Member)
c := appengine.NewContext(r)
for _, org := range u.Orgs {
members := org.GetMembers(c)
for indx, member := range members {
p.Members[indx] = member
}
}
renderTemplate(w, "members", p)
}
func NewMemberHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
title := "new-member"
for _, org := range u.Orgs {
p.Orgs = append(p.Orgs, org.Name)
}
sort.Strings(p.Orgs)
renderTemplate(w, title, p)
}
func MemberEditHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
var ok bool
p.Member2EditKey = r.FormValue("id")
ok, p.Member2Edit = GetMemberByKey(c, p.Member2EditKey)
// Protect web users
if ok && p.Member2Edit.WebUser {
if u.Meta.Email != p.Member2Edit.Email && u.SuperUser == false {
ok = false
}
}
if ok {
uorgs := GetOrganizationsBy | LogoutHandler | identifier_name |
orgreminders.go | SuperUser bool
LoggedIn bool
UserEmail string
Orgs []string
Members map[string]Member
SavedEvent bool
SavedOrg bool
SavedMember bool
Member2Edit Member
Member2EditKey string
ScheduleHTML map[string][]string
}
func NewPage(u *User) (*Page, error) {
var result = Page{}
if u.Meta != nil {
result.LoggedIn = true
result.AllowNewOrg = true
result.UserEmail = u.Meta.Email
if u.SuperUser {
result.SuperUser = true
}
}
return &result, nil
}
func renderTemplate(w http.ResponseWriter, tmpl string, p *Page) {
err := Templates.ExecuteTemplate(w, tmpl+".html", p)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
func init() {
templTest, err := template.ParseFiles(TemplateFiles...)
if err != nil {
log.Println("Some (or all) of the required templates are missing, exiting: ", err.Error())
return
}
Templates = templTest
http.HandleFunc("/", DefaultHandler)
http.HandleFunc("/newevent", NewEventHandler)
http.HandleFunc("/neworg", NewOrgHandler)
http.HandleFunc("/events", EventsHandler)
http.HandleFunc("/organizations", OrgsHandler)
http.HandleFunc("/saveevent", EventSaveHandler)
http.HandleFunc("/saveorg", OrgSaveHandler)
http.HandleFunc("/editorg", OrgEditHandler)
http.HandleFunc("/editevent", EventEditHandler)
http.HandleFunc("/cron", CronHandler)
http.HandleFunc("/logout", LogoutHandler)
http.HandleFunc("/newmember", NewMemberHandler)
http.HandleFunc("/savemember", MemberSaveHandler)
http.HandleFunc("/members", MembersHandler)
http.HandleFunc("/editmember", MemberEditHandler)
}
func LogoutHandler(w http.ResponseWriter, r *http.Request) {
c := appengine.NewContext(r)
url, _ := user.LogoutURL(c, "/")
http.Redirect(w, r, url, http.StatusFound)
}
func DefaultHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
title := "home"
renderTemplate(w, title, p)
}
func NewOrgHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
title := "new-org"
renderTemplate(w, title, p)
}
func NewEventHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
title := "new-event"
for _, org := range u.Orgs {
p.Orgs = append(p.Orgs, org.Name)
}
sort.Strings(p.Orgs)
renderTemplate(w, title, p)
}
func EventSaveHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
r.ParseForm()
event := NewEvent()
event.Title = r.PostFormValue("title")
event.EmailMessage = template.HTML(r.PostFormValue("emailmessage"))
event.TextMessage = r.PostFormValue("textmessage")
event.Submitter = *u.Meta
event.Orgs = r.PostForm["orgs"]
if len(event.Orgs) < 1 {
p.Error = "You must choose an organization."
renderTemplate(w, "error", p)
return
}
if r.PostFormValue("sendemail") == "on" {
event.Email = true
}
if r.PostFormValue("sendtext") == "on" {
event.Text = true
}
// save reminder schedule
var remqtys = r.PostForm["remqty[]"]
var remtyps = r.PostForm["remtyp[]"]
for remkey, remval := range remqtys {
var entry = fmt.Sprintf("%s%s", remval, remtyps[remkey])
event.Reminders.Add(entry)
}
o, err := GetOrganizationByName(c, event.Orgs[0])
if err != nil {
c.Infof("Error: %s", err.Error())
p.Error = err.Error()
renderTemplate(w, "error", p)
return
}
location, _ := time.LoadLocation(o.TimeZone)
const longForm = "01/02/2006 3:04pm"
t, timeerr := time.ParseInLocation(longForm, r.PostFormValue("due"), location)
if timeerr != nil {
http.Error(w, "Invalid time string", http.StatusInternalServerError)
return
}
event.Due = t
event.Key = r.PostFormValue("key")
var subject = "Event Saved: "
if event.Key == "" {
_, event.Key = event.Save(c)
} else {
event.Update(c)
subject = "Event Updated: "
}
if r.PostFormValue("oncreate") == "on" {
event.Notify(c, true)
}
event.DueFormatted = event.Due.In(location).Format("01/02/2006 3:04pm")
AdminNotify(c, u.Meta.Email, subject+event.Title, "The following event was just saved: <br><br>"+event.GetHTMLView(c))
p.Event2Edit = event
p.SavedEvent = true
renderTemplate(w, "save", p)
}
func EventEditHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
var ok bool
ok, p.Event2Edit = GetEventByKey(c, r.FormValue("id"))
if ok {
org, _ := GetOrganizationByName(c, p.Event2Edit.Orgs[0])
location, _ := time.LoadLocation(org.TimeZone)
p.Event2Edit.DueFormatted = p.Event2Edit.Due.In(location).Format("01/02/2006 3:04pm")
uorgs := GetOrganizationsByUser(c, u.Meta.Email)
for _, uorg := range uorgs {
missing := true
for _, porg := range p.Event2Edit.Orgs {
if uorg.Name == porg {
missing = false
break
}
}
if missing == true {
p.Orgs = append(p.Orgs, uorg.Name)
}
}
// Extract usable event reminder list
p.ScheduleHTML = p.Event2Edit.Reminders.HTML()
sort.Strings(p.Orgs)
renderTemplate(w, "editevent", p)
} else {
p.Error = "Event not found."
renderTemplate(w, "error", p)
}
}
func OrgSaveHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
org := NewOrganization()
org.Name = r.PostFormValue("name")
org.Description = r.PostFormValue("description")
org.Active = true
org.Expires = time.Now().UTC().Add(Duration_Week)
org.Administrator = strings.Split(r.PostFormValue("admin"), "\r\n")
org.TimeZone = r.PostFormValue("timezone")
key := r.PostFormValue("key")
if key == "" {
c.Infof("saving org")
org.Save(c)
} else {
c.Infof("updating org")
org.Update(c, key)
}
p.SavedOrg = true
p.Org2Edit = org
p.Org2EditKey = key
renderTemplate(w, "save", p)
}
func OrgEditHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
p.Org2EditKey = r.FormValue("id")
p.Org2Edit = GetOrganizationByKey(c, p.Org2EditKey)
renderTemplate(w, "editorg", p)
}
func EventsHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
p.Events = make(map[string]Event)
c := appengine.NewContext(r)
for _, org := range u.Orgs {
events := org.GetEvents(c, true)
location, _ := time.LoadLocation(org.TimeZone)
for indx, event := range events {
event.Due = event.Due.In(location)
event.DueFormatted = event.Due.Format("01/02/2006 3:04pm")
p.Events[indx] = event
}
}
renderTemplate(w, "events", p)
}
func OrgsHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
mapResults := make(map[string]Organization)
for indx, org := range u.Orgs {
org.Members = org.GetMembers(c)
mapResults[indx] = org
}
p.Organizations = mapResults
renderTemplate(w, "organizations", p)
}
func MembersHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
p.Members = make(map[string]Member)
c := appengine.NewContext(r)
for _, org := range u.Org | AllowNewOrg bool | random_line_split |
|
orgreminders.go | Test
http.HandleFunc("/", DefaultHandler)
http.HandleFunc("/newevent", NewEventHandler)
http.HandleFunc("/neworg", NewOrgHandler)
http.HandleFunc("/events", EventsHandler)
http.HandleFunc("/organizations", OrgsHandler)
http.HandleFunc("/saveevent", EventSaveHandler)
http.HandleFunc("/saveorg", OrgSaveHandler)
http.HandleFunc("/editorg", OrgEditHandler)
http.HandleFunc("/editevent", EventEditHandler)
http.HandleFunc("/cron", CronHandler)
http.HandleFunc("/logout", LogoutHandler)
http.HandleFunc("/newmember", NewMemberHandler)
http.HandleFunc("/savemember", MemberSaveHandler)
http.HandleFunc("/members", MembersHandler)
http.HandleFunc("/editmember", MemberEditHandler)
}
func LogoutHandler(w http.ResponseWriter, r *http.Request) {
c := appengine.NewContext(r)
url, _ := user.LogoutURL(c, "/")
http.Redirect(w, r, url, http.StatusFound)
}
func DefaultHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
title := "home"
renderTemplate(w, title, p)
}
func NewOrgHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
title := "new-org"
renderTemplate(w, title, p)
}
func NewEventHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
title := "new-event"
for _, org := range u.Orgs {
p.Orgs = append(p.Orgs, org.Name)
}
sort.Strings(p.Orgs)
renderTemplate(w, title, p)
}
func EventSaveHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
r.ParseForm()
event := NewEvent()
event.Title = r.PostFormValue("title")
event.EmailMessage = template.HTML(r.PostFormValue("emailmessage"))
event.TextMessage = r.PostFormValue("textmessage")
event.Submitter = *u.Meta
event.Orgs = r.PostForm["orgs"]
if len(event.Orgs) < 1 {
p.Error = "You must choose an organization."
renderTemplate(w, "error", p)
return
}
if r.PostFormValue("sendemail") == "on" {
event.Email = true
}
if r.PostFormValue("sendtext") == "on" {
event.Text = true
}
// save reminder schedule
var remqtys = r.PostForm["remqty[]"]
var remtyps = r.PostForm["remtyp[]"]
for remkey, remval := range remqtys {
var entry = fmt.Sprintf("%s%s", remval, remtyps[remkey])
event.Reminders.Add(entry)
}
o, err := GetOrganizationByName(c, event.Orgs[0])
if err != nil {
c.Infof("Error: %s", err.Error())
p.Error = err.Error()
renderTemplate(w, "error", p)
return
}
location, _ := time.LoadLocation(o.TimeZone)
const longForm = "01/02/2006 3:04pm"
t, timeerr := time.ParseInLocation(longForm, r.PostFormValue("due"), location)
if timeerr != nil {
http.Error(w, "Invalid time string", http.StatusInternalServerError)
return
}
event.Due = t
event.Key = r.PostFormValue("key")
var subject = "Event Saved: "
if event.Key == "" {
_, event.Key = event.Save(c)
} else {
event.Update(c)
subject = "Event Updated: "
}
if r.PostFormValue("oncreate") == "on" {
event.Notify(c, true)
}
event.DueFormatted = event.Due.In(location).Format("01/02/2006 3:04pm")
AdminNotify(c, u.Meta.Email, subject+event.Title, "The following event was just saved: <br><br>"+event.GetHTMLView(c))
p.Event2Edit = event
p.SavedEvent = true
renderTemplate(w, "save", p)
}
func EventEditHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
var ok bool
ok, p.Event2Edit = GetEventByKey(c, r.FormValue("id"))
if ok {
org, _ := GetOrganizationByName(c, p.Event2Edit.Orgs[0])
location, _ := time.LoadLocation(org.TimeZone)
p.Event2Edit.DueFormatted = p.Event2Edit.Due.In(location).Format("01/02/2006 3:04pm")
uorgs := GetOrganizationsByUser(c, u.Meta.Email)
for _, uorg := range uorgs {
missing := true
for _, porg := range p.Event2Edit.Orgs {
if uorg.Name == porg {
missing = false
break
}
}
if missing == true {
p.Orgs = append(p.Orgs, uorg.Name)
}
}
// Extract usable event reminder list
p.ScheduleHTML = p.Event2Edit.Reminders.HTML()
sort.Strings(p.Orgs)
renderTemplate(w, "editevent", p)
} else {
p.Error = "Event not found."
renderTemplate(w, "error", p)
}
}
func OrgSaveHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
org := NewOrganization()
org.Name = r.PostFormValue("name")
org.Description = r.PostFormValue("description")
org.Active = true
org.Expires = time.Now().UTC().Add(Duration_Week)
org.Administrator = strings.Split(r.PostFormValue("admin"), "\r\n")
org.TimeZone = r.PostFormValue("timezone")
key := r.PostFormValue("key")
if key == "" {
c.Infof("saving org")
org.Save(c)
} else {
c.Infof("updating org")
org.Update(c, key)
}
p.SavedOrg = true
p.Org2Edit = org
p.Org2EditKey = key
renderTemplate(w, "save", p)
}
func OrgEditHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
p.Org2EditKey = r.FormValue("id")
p.Org2Edit = GetOrganizationByKey(c, p.Org2EditKey)
renderTemplate(w, "editorg", p)
}
func EventsHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
p.Events = make(map[string]Event)
c := appengine.NewContext(r)
for _, org := range u.Orgs {
events := org.GetEvents(c, true)
location, _ := time.LoadLocation(org.TimeZone)
for indx, event := range events {
event.Due = event.Due.In(location)
event.DueFormatted = event.Due.Format("01/02/2006 3:04pm")
p.Events[indx] = event
}
}
renderTemplate(w, "events", p)
}
func OrgsHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
mapResults := make(map[string]Organization)
for indx, org := range u.Orgs {
org.Members = org.GetMembers(c)
mapResults[indx] = org
}
p.Organizations = mapResults
renderTemplate(w, "organizations", p)
}
func MembersHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
p.Members = make(map[string]Member)
c := appengine.NewContext(r)
for _, org := range u.Orgs {
members := org.GetMembers(c)
for indx, member := range members {
p.Members[indx] = member
}
}
renderTemplate(w, "members", p)
}
func NewMemberHandler(w http.ResponseWriter, r *http.Request) |
func MemberEditHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
var ok bool
p.Member2EditKey = r.FormValue("id")
ok, p.Member2Edit = GetMemberByKey(c, p.Member2EditKey)
// Protect web users
if ok && p.Member2Edit.WebUser {
if u.Meta.Email != p.Member2Edit.Email && u.SuperUser == false {
ok = false
}
}
if ok {
uorgs := GetOrganizations | {
u := UserLookup(w, r)
p, _ := NewPage(&u)
title := "new-member"
for _, org := range u.Orgs {
p.Orgs = append(p.Orgs, org.Name)
}
sort.Strings(p.Orgs)
renderTemplate(w, title, p)
} | identifier_body |
orgreminders.go | ."
renderTemplate(w, "error", p)
}
}
func OrgSaveHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
org := NewOrganization()
org.Name = r.PostFormValue("name")
org.Description = r.PostFormValue("description")
org.Active = true
org.Expires = time.Now().UTC().Add(Duration_Week)
org.Administrator = strings.Split(r.PostFormValue("admin"), "\r\n")
org.TimeZone = r.PostFormValue("timezone")
key := r.PostFormValue("key")
if key == "" {
c.Infof("saving org")
org.Save(c)
} else {
c.Infof("updating org")
org.Update(c, key)
}
p.SavedOrg = true
p.Org2Edit = org
p.Org2EditKey = key
renderTemplate(w, "save", p)
}
func OrgEditHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
p.Org2EditKey = r.FormValue("id")
p.Org2Edit = GetOrganizationByKey(c, p.Org2EditKey)
renderTemplate(w, "editorg", p)
}
func EventsHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
p.Events = make(map[string]Event)
c := appengine.NewContext(r)
for _, org := range u.Orgs {
events := org.GetEvents(c, true)
location, _ := time.LoadLocation(org.TimeZone)
for indx, event := range events {
event.Due = event.Due.In(location)
event.DueFormatted = event.Due.Format("01/02/2006 3:04pm")
p.Events[indx] = event
}
}
renderTemplate(w, "events", p)
}
func OrgsHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
mapResults := make(map[string]Organization)
for indx, org := range u.Orgs {
org.Members = org.GetMembers(c)
mapResults[indx] = org
}
p.Organizations = mapResults
renderTemplate(w, "organizations", p)
}
func MembersHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
p.Members = make(map[string]Member)
c := appengine.NewContext(r)
for _, org := range u.Orgs {
members := org.GetMembers(c)
for indx, member := range members {
p.Members[indx] = member
}
}
renderTemplate(w, "members", p)
}
func NewMemberHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
title := "new-member"
for _, org := range u.Orgs {
p.Orgs = append(p.Orgs, org.Name)
}
sort.Strings(p.Orgs)
renderTemplate(w, title, p)
}
func MemberEditHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
var ok bool
p.Member2EditKey = r.FormValue("id")
ok, p.Member2Edit = GetMemberByKey(c, p.Member2EditKey)
// Protect web users
if ok && p.Member2Edit.WebUser {
if u.Meta.Email != p.Member2Edit.Email && u.SuperUser == false {
ok = false
}
}
if ok {
uorgs := GetOrganizationsByUser(c, u.Meta.Email)
for _, uorg := range uorgs {
missing := true
for _, porg := range p.Member2Edit.Orgs {
if uorg.Name == porg {
missing = false
break
}
}
if missing == true {
p.Orgs = append(p.Orgs, uorg.Name)
}
}
sort.Strings(p.Orgs)
renderTemplate(w, "editmember", p)
} else {
p.Error = "Member not found or access denied."
renderTemplate(w, "error", p)
}
}
func MemberSaveHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
c := appengine.NewContext(r)
r.ParseForm()
member := Member{}
member.Name = r.PostFormValue("name")
member.Email = r.PostFormValue("email")
member.Cell = r.PostFormValue("cell")
member.Carrier = r.PostFormValue("carrier")
member.TextAddr = GenTextAddr(member.Cell, member.Carrier)
member.Orgs = r.PostForm["orgs"]
if r.PostFormValue("emailon") == "on" {
member.EmailOn = true
}
if r.PostFormValue("texton") == "on" {
member.TextOn = true
}
if u.SuperUser && r.PostFormValue("webuser") == "on" {
member.WebUser = true
}
// Must have or don't save
if len(r.PostForm["orgs"]) <= 0 && member.WebUser == false {
p.Error = "Cannot save without an organization."
renderTemplate(w, "error", p)
return
}
key := r.PostFormValue("key")
if key == "" {
c.Infof("saving member")
_, key = member.Save(c)
} else {
c.Infof("updating member")
member.Update(c, key)
}
p.Member2Edit = member
p.Member2EditKey = key
p.SavedMember = true
renderTemplate(w, "save", p)
}
func AdminNotify(c appengine.Context, creator string, subject string, message string) {
var appid = appengine.AppID(c)
msg := &mail.Message{
Sender: "orgreminders@" + appid + ".appspotmail.com",
Subject: subject,
HTMLBody: message,
To: []string{creator},
}
c.Infof("notify (%s): %v", subject, creator)
if err := mail.Send(c, msg); err != nil {
c.Errorf("Couldn't send email: %v", err)
}
}
func SendOrgMessage(c appengine.Context, o Organization, e Event, t string) (result bool) {
var appid = appengine.AppID(c)
var senderUserName = strings.Replace(o.Name, " ", "_", -1)
var sender = fmt.Sprintf("%s Reminders <%s@%s.appspotmail.com", o.Name, senderUserName, appid)
members := o.GetMembers(c)
recipients := []string{}
for _, m := range members {
if t == "email" && m.EmailOn {
recipients = append(recipients, m.Email)
} else if t == "text" && m.TextOn {
recipients = append(recipients, m.TextAddr)
}
}
if len(recipients) == 0 {
c.Infof("No recipients, not sending reminder (" + t + ")")
result = true
return
}
// get rid of duplicate recipients
recipients = removeDuplicates(recipients)
msg := &mail.Message{
Sender: sender,
Bcc: recipients,
Subject: e.Title,
Body: e.TextMessage,
HTMLBody: string(e.EmailMessage),
}
c.Infof("notify (%s): %v", e.Title, recipients)
if err := mail.Send(c, msg); err != nil {
c.Errorf("Couldn't send email: %v", err)
} else {
result = true
}
return
}
func CronHandler(w http.ResponseWriter, r *http.Request) {
u := UserLookup(w, r)
p, _ := NewPage(&u)
p.Events = make(map[string]Event)
c := appengine.NewContext(r)
events := GetAllEvents(c, true) // active only
//c.Infof("# events to check for cron: %v", len(events))
for key, event := range events {
//c.Infof("checking event: %s", event.Title)
res := event.Notify(c, false)
if res {
org, _ := GetOrganizationByName(c, event.Orgs[0])
location, _ := time.LoadLocation(org.TimeZone)
event.Due = event.Due.In(location)
event.DueFormatted = event.Due.Format("01/02/2006 3:04pm")
p.Events[key] = event
}
}
renderTemplate(w, "cron", p)
}
// from: https://groups.google.com/d/msg/golang-nuts/-pqkICuokio/KqJ0091EzVcJ
func removeDuplicates(a []string) []string {
result := []string{}
seen := map[string]string{}
for _, val := range a | {
if _, ok := seen[val]; !ok {
result = append(result, val)
seen[val] = val
}
} | conditional_block |
|
index.ts | }
export type PhotoBuffObj = {
label: string;
description: string;
formattedDate: string;
base64Str: any;
}
export type SignatureObj = {
uri: string;
timeStamp: number;
formattedDate: string;
s3Key: string;
}
export type Signatures = {
nonWaiverInsuredSignature: SignatureObj;
nonWaiverAdjusterSignature: SignatureObj;
advPaymentSignatureInsured: SignatureObj;
advPaymentSignatureAdjuster: SignatureObj;
handbookInsuredSignature: SignatureObj;
handbookAdjusterSignature: SignatureObj;
authorizedRepSignature: SignatureObj;
initialsConfirmAddress: SignatureObj;
initialsConfirmMortgagee: SignatureObj;
}
export type PriorLossObj = {
id: string;
timestamp: number;
formattedDate: string;
insuredAtLoss: boolean;
repairs: boolean;
lossAmount: string;
}
export type CertificationObj = {
state: string;
licenseNumber: string;
expDate: string
}
export type PDFObject = {
name: string;
uri: string;
s3Key: string;
docStatus: string;
}
export type UnderwritingState = {
"insuredFirstName": string;
"insuredLastName": string;
"mortgageeName": string;
"adjusterFullName": string;
"claimType": string;
"lossType": string;
"lossDate": number;
"occupancy": string;
"typeOfBuilding": string;
"hasBasement": boolean;
"buildingElevated": boolean;
"residencyType": string;
"floorCount": string;
"floorsOccupiedByInsured": string;
"lossStreet1": string;
"lossStreet2": string;
"lossCity": string;
"lossStateName": string;
"lossZip": string;
}
export type PDFState = {
underwritingReport: PDFObject;
floodLossQuestionnaire: PDFObject;
inspectionReport: PDFObject;
preliminaryReport: PDFObject;
prelimDiagrams: PDFObject;
prelimPhotos: PDFObject;
advancePaymentRequest: PDFObject;
handbookSignature: PDFObject;
surveyorRequest: PDFObject;
cpaRequest: PDFObject;
prelimDamageAssessment: PDFObject;
nonWaiverAgreement: PDFObject;
engineerRequest: PDFObject;
salvorRequest: PDFObject;
}
export type ClaimState = {
version: number;
sha1: string;
versionsList: number[];
claimid: string;
claimNumber: string;
policyNumber: string;
firmDate: number;
postFirm: number;
policyStartDate: number;
policyEndDate: number;
subcatNumber: string;
RNFSPathPrefix: string;
floodControlNumber: string;
adjusterId: string;
adjusterFullName: string;
adjusterPhoneMobile: string;
adjusterPhoneWork: string;
insuredFirstName: string;
insuredLastName: string;
insuredEmail: string;
insuredWorkPhone: string;
insuredPreferredPhone: string;
company: string;
lossType: string;
lossDate: number;
claimType: string;
mortgageVerified: boolean;
ercv: number;
acv: number;
carrier: string;
claimStatus: string;
constructionDate: number;
inspectionDate: number;
coverageA: number;
coverageB: number;
deductibleA: string;
deductibleB: string;
nonWaiver: boolean;
nonWaiverDescription: string;
nonWaiverDay: string;
nonWaiverMonth: string;
nonWaiverYear: string;
occupancy: string;
residencyType: string;
insuredNameCorrect: boolean;
updatedNameReason: string;
insuredPresent: boolean;
insuredIsRepresented: boolean;
insuredRepresentativeName: string;
insuredRepresentativeAddress: string;
insuredRepresentativePhone: string;
hasDetachedGarage: boolean;
hasAppurtenantStructure: boolean;
typeOfBuilding: string;
mobileHomeMake: string;
mobileHomeModel: string;
mobileHomeSerial: string;
foundationStructure: string;
foundationPilesType: string;
otherPilesMaterial: string;
foundationPiersType: string[];
otherPierMaterial: string;
foundationWallsType: string[];
otherWallMaterial: string;
exteriorWallStructure: string[];
otherWallStructure: string;
exteriorWallSurfaceTreatment: string[];
otherExteriorSurfaceTreatment: string;
isUnderConstruction: string;
foundationAreaEnclosure: string;
hasBasement: boolean;
basementType: string;
basementHeightInches: string;
determineElevationGrades: boolean;
buildingElevated: boolean;
priorConditionOfBuilding: string;
exteriorElevationPhotos: string[];
wasThereFlooding: boolean;
floodWaterType: string;
isFloodWaterTypeSewage: boolean;
causeOfLoss: string[];
floodCharacteristics: string[];
floodAssociatedWithFloodControl: boolean;
floodAssociatedDesc: string;
dateWaterEntered: number;
dateWaterReceded: number;
timeFlooded: {
days: number;
hours: number;
minutes: number;
};
otherThanNaturalCauseContribute: boolean;
otherThanNaturalDesc: string;
waterHeightMainBuildingExtInches: string;
waterHeightDetachedGarageExtInches: string;
waterHeightMainBuildingIntInches: string;
waterHeightDetachedGarageIntInches: string;
nearestBodyOfWater: string;
distanceFromBodyOfWaterFeet: string;
floorCount: string;
isSplitLevel: boolean;
floorsOccupiedByInsured: string;
basementFloodproofed: boolean;
priorConditionOfContents: string;
contentsClassification: string;
contentsLocated: string;
hasTitleVerified: boolean;
sourceOfVerification: string;
hasAuthorizedRepresentative: boolean;
authorizedRepName: string;
authorizedRepEmail: string;
insuredHiredMitigationContractor: boolean;
mitigationContactName: string;
mitigationContactAddress: string;
mitigationContactPhone: string;
hasOtherInvolvedParties: boolean;
otherInvolvedPartiesDesc: string;
GPTypeOfBuilding: string;
GPTypeOfBuildingOtherDesc: string;
GPMultipleBuildings: boolean;
GPVerifiedBuilding: boolean;
GPOtherInsurance: string[];
GPExcessPolicyLimits: string;
GPLiabilityPolicyLimits: string;
GPDamageDesc: string;
GPHasLease: boolean;
GPHasDocOfOwnership: boolean;
GPDocsListOwnership: boolean;
GPDocsTiedOwnership: boolean;
GPPersonalProperty: boolean;
GPStockHasBoxesOpen: boolean;
GPStockVerifiedDamage: boolean;
GPStockHasPhysicalDamage: boolean;
GPMerchHasBoxesOpen: boolean;
GPMerchVerifiedDamage: boolean;
GPMerchHasPhysicalDamage: boolean;
GPAuthorizedRep: string;
GPHasDocAuthorizingRep: boolean;
GPAccessToInvoices: string;
GPAccessToRepairReceipts: string;
GPAccessToStockInventoryRecords: string;
GPInspectedWith: string;
GPTenants: string; //maybe string[]
GPAccessRep: string;
GPOwnerOfBuilding: string;
GPBusinessOwner: string;
GPContentsOwner: string;
reservesBuildingReserve: number;
reservesContentsReserve: number;
coverageVerifiedFrom: string;
emergencyOrRegulaFloodProgram: string;
advPaymentRequest: string;
advPaymentRequestNoReason: string;
advPaymentRequestOtherReason: string;
advPaymentRequestBuildingValue: number;
advPaymentRequestContentsValue: number;
selectiveAPRContentsValue: number;
isOwnerOfProperty: boolean;
propertyOwnerName: string;
propertyOwnerAddress: string;
propertyOwnerDBA: string;
hasMajorImprovements: boolean;
improvementsDetails: string;
improvementsValue: string;
isCondoUnit: boolean;
ownerHasCondoDocs: boolean;
acknowledgeRecentAppraisal: boolean;
isCurrentAddress: boolean;
principleStreet1: string;
principleStreet2: string;
principleCity: string;
principleStateName: string;
principleZip: string;
lossStreet1: string;
lossStreet2: string;
lossCity: string;
lossStateName: string;
lossZip: string;
mortgagePaidOff: boolean;
mortgagePaidOffYear: string;
payOffLetter: string;
excessFloodCoverageForBuilding: boolean;
excessFloodCoverageForBuildingCarrier: string;
excessFloodCoverageForContents: boolean;
excessFloodCoverageForContentsCarrier: string;
hasPriorLoss: boolean;
priorLossArr: PriorLossObj[];
usingAGeneralContractor: boolean;
generalContractorName: string;
generalContractorAddress: string;
generalContractorLocation: string;
generalContractorPhone: string;
generalContractorLicense: string;
hasOtherInsurance: boolean;
nameOfOtherInsurance: string;
otherProvidesFloodCoverage: boolean;
acknowledgeOtherInsurance: boolean | random_line_split |
||
jsonast.go | }}
// oneOf:
// - External ARM resources
// oneOf:
// allOf:
// $ref: {{ base resource for ARM specific stuff like locks, deployments, etc }}
// oneOf:
// - ARM specific resources. I'm not 100% sure why...
//
// allOf acts like composition which composites each schema from the child oneOf with the base reference from allOf.
func (scanner *SchemaScanner) | (ctx context.Context, schema *gojsonschema.SubSchema, opts ...BuilderOption) ([]astmodel.TypeDefiner, error) {
ctx, span := tab.StartSpan(ctx, "GenerateDefinitions")
defer span.End()
for _, opt := range opts {
if err := opt(scanner); err != nil {
return nil, err
}
}
// get initial topic from ID and Title:
url := schema.ID.GetUrl()
if schema.Title == nil {
return nil, fmt.Errorf("Given schema has no Title")
}
rootName := *schema.Title
rootGroup, err := groupOf(url)
if err != nil {
return nil, fmt.Errorf("Unable to extract group for schema: %w", err)
}
rootVersion, err := versionOf(url)
if err != nil {
return nil, fmt.Errorf("Unable to extract version for schema: %w", err)
}
rootPackage := astmodel.NewLocalPackageReference(
scanner.idFactory.CreateGroupName(rootGroup),
scanner.idFactory.CreatePackageNameFromVersion(rootVersion))
rootTypeName := astmodel.NewTypeName(*rootPackage, rootName)
_, err = generateDefinitionsFor(ctx, scanner, rootTypeName, false, url, schema)
if err != nil {
return nil, err
}
// produce the results
var defs []astmodel.TypeDefiner
for _, def := range scanner.definitions {
defs = append(defs, def)
}
return defs, nil
}
// DefaultTypeHandlers will create a default map of JSONType to AST transformers
func DefaultTypeHandlers() map[SchemaType]TypeHandler {
return map[SchemaType]TypeHandler{
Array: arrayHandler,
OneOf: oneOfHandler,
AnyOf: anyOfHandler,
AllOf: allOfHandler,
Ref: refHandler,
Object: objectHandler,
Enum: enumHandler,
String: fixedTypeHandler(astmodel.StringType, "string"),
Int: fixedTypeHandler(astmodel.IntType, "int"),
Number: fixedTypeHandler(astmodel.FloatType, "number"),
Bool: fixedTypeHandler(astmodel.BoolType, "bool"),
}
}
func enumHandler(ctx context.Context, scanner *SchemaScanner, schema *gojsonschema.SubSchema) (astmodel.Type, error) {
_, span := tab.StartSpan(ctx, "enumHandler")
defer span.End()
// Default to a string base type
baseType := astmodel.StringType
for _, t := range []SchemaType{Bool, Int, Number, String} {
if schema.Types.Contains(string(t)) {
bt, err := getPrimitiveType(t)
if err != nil {
return nil, err
}
baseType = bt
}
}
var values []astmodel.EnumValue
for _, v := range schema.Enum {
id := scanner.idFactory.CreateIdentifier(v, astmodel.Exported)
values = append(values, astmodel.EnumValue{Identifier: id, Value: v})
}
enumType := astmodel.NewEnumType(baseType, values)
return enumType, nil
}
func fixedTypeHandler(typeToReturn astmodel.Type, handlerName string) TypeHandler {
return func(ctx context.Context, scanner *SchemaScanner, schema *gojsonschema.SubSchema) (astmodel.Type, error) {
_, span := tab.StartSpan(ctx, handlerName+"Handler")
defer span.End()
return typeToReturn, nil
}
}
func objectHandler(ctx context.Context, scanner *SchemaScanner, schema *gojsonschema.SubSchema) (astmodel.Type, error) {
ctx, span := tab.StartSpan(ctx, "objectHandler")
defer span.End()
fields, err := getFields(ctx, scanner, schema)
if err != nil {
return nil, err
}
// if we _only_ have an 'additionalProperties' field, then we are making
// a dictionary-like type, and we won't generate a struct; instead, we
// will just use the 'additionalProperties' type directly
if len(fields) == 1 && fields[0].FieldName() == "additionalProperties" {
return fields[0].FieldType(), nil
}
structDefinition := astmodel.NewStructType(fields...)
return structDefinition, nil
}
func generateFieldDefinition(ctx context.Context, scanner *SchemaScanner, prop *gojsonschema.SubSchema) (*astmodel.FieldDefinition, error) {
fieldName := scanner.idFactory.CreateFieldName(prop.Property, astmodel.Exported)
schemaType, err := getSubSchemaType(prop)
if _, ok := err.(*UnknownSchemaError); ok {
// if we don't know the type, we still need to provide the property, we will just provide open interface
field := astmodel.NewFieldDefinition(fieldName, prop.Property, astmodel.AnyType)
return field, nil
}
if err != nil {
return nil, err
}
propType, err := scanner.RunHandler(ctx, schemaType, prop)
if _, ok := err.(*UnknownSchemaError); ok {
// if we don't know the type, we still need to provide the property, we will just provide open interface
field := astmodel.NewFieldDefinition(fieldName, prop.Property, astmodel.AnyType)
return field, nil
}
if err != nil {
return nil, err
}
field := astmodel.NewFieldDefinition(fieldName, prop.Property, propType)
return field, nil
}
func getFields(ctx context.Context, scanner *SchemaScanner, schema *gojsonschema.SubSchema) ([]*astmodel.FieldDefinition, error) {
ctx, span := tab.StartSpan(ctx, "getFields")
defer span.End()
var fields []*astmodel.FieldDefinition
for _, prop := range schema.PropertiesChildren {
fieldDefinition, err := generateFieldDefinition(ctx, scanner, prop)
if err != nil {
return nil, err
}
// add documentation
fieldDefinition = fieldDefinition.WithDescription(prop.Description)
// add validations
isRequired := false
for _, required := range schema.Required {
if prop.Property == required {
isRequired = true
break
}
}
if isRequired {
fieldDefinition = fieldDefinition.MakeRequired()
} else {
fieldDefinition = fieldDefinition.MakeOptional()
}
fields = append(fields, fieldDefinition)
}
// see: https://json-schema.org/understanding-json-schema/reference/object.html#properties
if schema.AdditionalProperties == nil {
// if not specified, any additional properties are allowed (TODO: tell all Azure teams this fact and get them to update their API definitions)
// for now we aren't following the spec 100% as it pollutes the generated code
// only generate this field if there are no other fields:
if len(fields) == 0 {
// TODO: for JSON serialization this needs to be unpacked into "parent"
additionalPropsField := astmodel.NewFieldDefinition("additionalProperties", "additionalProperties", astmodel.NewStringMapType(astmodel.AnyType))
fields = append(fields, additionalPropsField)
}
} else if schema.AdditionalProperties != false {
// otherwise, if not false then it is a type for all additional fields
// TODO: for JSON serialization this needs to be unpacked into "parent"
additionalPropsType, err := scanner.RunHandlerForSchema(ctx, schema.AdditionalProperties.(*gojsonschema.SubSchema))
if err != nil {
return nil, err
}
additionalPropsField := astmodel.NewFieldDefinition(astmodel.FieldName("additionalProperties"), "additionalProperties", astmodel.NewStringMapType(additionalPropsType))
fields = append(fields, additionalPropsField)
}
return fields, nil
}
func refHandler(ctx context.Context, scanner *SchemaScanner, schema *gojsonschema.SubSchema) (astmodel.Type, error) {
ctx, span := tab.StartSpan(ctx, "refHandler")
defer span.End()
url := schema.Ref.GetUrl()
if url.Fragment == expressionFragment {
// skip expressions
return nil, nil
}
// make a new topic based on the ref URL
name, err := objectTypeOf(url)
if err != nil {
return nil, err
}
group, err := groupOf(url)
if err != nil {
return nil, err
}
version, err := versionOf(url)
if err != nil {
return nil, err
}
isResource := isResource(url)
// produce a usable name:
typeName := astmodel.NewTypeName(
*astmodel.NewLocalPackageReference(
scanner.idFactory.CreateGroupName(group),
scanner.idFactory.CreatePackageNameFromVersion(version)),
scanner.idFactory.CreateIdentifier(name, astmodel.Exported))
return generateDefinitionsFor(ctx, scanner, typeName, isResource, url, schema.RefSchema)
}
func generateDefinitionsFor(ctx context.Context, scanner *SchemaScanner, typeName *astmodel.TypeName, isResource bool, url *url.URL, schema *gojsonschema.SubSchema) (astmodel.Type, error) {
| GenerateDefinitions | identifier_name |
jsonast.go | .StartSpan(ctx, "refHandler")
defer span.End()
url := schema.Ref.GetUrl()
if url.Fragment == expressionFragment {
// skip expressions
return nil, nil
}
// make a new topic based on the ref URL
name, err := objectTypeOf(url)
if err != nil {
return nil, err
}
group, err := groupOf(url)
if err != nil {
return nil, err
}
version, err := versionOf(url)
if err != nil {
return nil, err
}
isResource := isResource(url)
// produce a usable name:
typeName := astmodel.NewTypeName(
*astmodel.NewLocalPackageReference(
scanner.idFactory.CreateGroupName(group),
scanner.idFactory.CreatePackageNameFromVersion(version)),
scanner.idFactory.CreateIdentifier(name, astmodel.Exported))
return generateDefinitionsFor(ctx, scanner, typeName, isResource, url, schema.RefSchema)
}
func generateDefinitionsFor(ctx context.Context, scanner *SchemaScanner, typeName *astmodel.TypeName, isResource bool, url *url.URL, schema *gojsonschema.SubSchema) (astmodel.Type, error) {
schemaType, err := getSubSchemaType(schema)
if err != nil {
return nil, err
}
// see if we already generated something for this ref
if _, ok := scanner.findTypeDefinition(typeName); ok {
return typeName, nil
}
// Add a placeholder to avoid recursive calls
// we will overwrite this later
scanner.addEmptyTypeDefinition(typeName)
result, err := scanner.RunHandler(ctx, schemaType, schema)
if err != nil {
scanner.removeTypeDefinition(typeName) // we weren't able to generate it, remove placeholder
return nil, err
}
// Give the type a name:
definer, otherDefs := result.CreateDefinitions(typeName, scanner.idFactory, isResource)
description := "Generated from: " + url.String()
definer = definer.WithDescription(&description)
// register all definitions
scanner.addTypeDefinition(definer)
for _, otherDef := range otherDefs {
scanner.addTypeDefinition(otherDef)
}
// return the name of the primary type
return definer.Name(), nil
}
func allOfHandler(ctx context.Context, scanner *SchemaScanner, schema *gojsonschema.SubSchema) (astmodel.Type, error) {
ctx, span := tab.StartSpan(ctx, "allOfHandler")
defer span.End()
var types []astmodel.Type
for _, all := range schema.AllOf {
d, err := scanner.RunHandlerForSchema(ctx, all)
if err != nil {
return nil, err
}
if d != nil {
types = appendIfUniqueType(types, d)
}
}
if len(types) == 1 {
return types[0], nil
}
var handleType func(fields []*astmodel.FieldDefinition, st astmodel.Type) ([]*astmodel.FieldDefinition, error)
handleType = func(fields []*astmodel.FieldDefinition, st astmodel.Type) ([]*astmodel.FieldDefinition, error) {
switch concreteType := st.(type) {
case *astmodel.StructType:
// if it's a struct type get all its fields:
fields = append(fields, concreteType.Fields()...)
case *astmodel.TypeName:
// TODO: need to check if this is a reference to a struct type or not
if def, ok := scanner.findTypeDefinition(concreteType); ok {
var err error
fields, err = handleType(fields, def.Type())
if err != nil {
return nil, err
}
} else {
return nil, fmt.Errorf("couldn't find definition for: %v", concreteType)
}
default:
klog.Errorf("Unhandled type in allOf: %#v\n", concreteType)
}
return fields, nil
}
// If there's more than one option, synthesize a type.
var fields []*astmodel.FieldDefinition
for _, d := range types {
// unpack the contents of what we got from subhandlers:
var err error
fields, err = handleType(fields, d)
if err != nil {
return nil, err
}
}
result := astmodel.NewStructType(fields...)
return result, nil
}
func oneOfHandler(ctx context.Context, scanner *SchemaScanner, schema *gojsonschema.SubSchema) (astmodel.Type, error) {
ctx, span := tab.StartSpan(ctx, "oneOfHandler")
defer span.End()
return generateOneOfUnionType(ctx, schema.OneOf, scanner)
}
func generateOneOfUnionType(ctx context.Context, subschemas []*gojsonschema.SubSchema, scanner *SchemaScanner) (astmodel.Type, error) {
// make sure we visit everything before bailing out,
// to get all types generated even if we can't use them
var results []astmodel.Type
for _, one := range subschemas {
result, err := scanner.RunHandlerForSchema(ctx, one)
if err != nil {
return nil, err
}
if result != nil {
results = appendIfUniqueType(results, result)
}
}
if len(results) == 1 {
return results[0], nil
}
// If there's more than one option, synthesize a type.
// Note that this is required because Kubernetes CRDs do not support OneOf the same way
// OpenAPI does, see https://github.com/Azure/k8s-infra/issues/71
var fields []*astmodel.FieldDefinition
fieldDescription := "mutually exclusive with all other properties"
for i, t := range results {
switch concreteType := t.(type) {
case *astmodel.TypeName:
// Just a sanity check that we've already scanned this definition
// TODO: Could remove this?
if _, ok := scanner.findTypeDefinition(concreteType); !ok {
return nil, fmt.Errorf("couldn't find struct for definition: %v", concreteType)
}
fieldName := scanner.idFactory.CreateFieldName(concreteType.Name(), astmodel.Exported)
// JSON name is unimportant here because we will implement the JSON marshaller anyway,
// but we still need it for controller-gen
jsonName := scanner.idFactory.CreateIdentifier(concreteType.Name(), astmodel.NotExported)
field := astmodel.NewFieldDefinition(
fieldName, jsonName, concreteType).MakeOptional().WithDescription(&fieldDescription)
fields = append(fields, field)
case *astmodel.EnumType:
// TODO: This name sucks but what alternative do we have?
name := fmt.Sprintf("enum%v", i)
fieldName := scanner.idFactory.CreateFieldName(name, astmodel.Exported)
// JSON name is unimportant here because we will implement the JSON marshaller anyway,
// but we still need it for controller-gen
jsonName := scanner.idFactory.CreateIdentifier(name, astmodel.NotExported)
field := astmodel.NewFieldDefinition(
fieldName, jsonName, concreteType).MakeOptional().WithDescription(&fieldDescription)
fields = append(fields, field)
case *astmodel.StructType:
// TODO: This name sucks but what alternative do we have?
name := fmt.Sprintf("object%v", i)
fieldName := scanner.idFactory.CreateFieldName(name, astmodel.Exported)
// JSON name is unimportant here because we will implement the JSON marshaller anyway,
// but we still need it for controller-gen
jsonName := scanner.idFactory.CreateIdentifier(name, astmodel.NotExported)
field := astmodel.NewFieldDefinition(
fieldName, jsonName, concreteType).MakeOptional().WithDescription(&fieldDescription)
fields = append(fields, field)
case *astmodel.PrimitiveType:
var primitiveTypeName string
if concreteType == astmodel.AnyType {
primitiveTypeName = "anything"
} else {
primitiveTypeName = concreteType.Name()
}
// TODO: This name sucks but what alternative do we have?
name := fmt.Sprintf("%v%v", primitiveTypeName, i)
fieldName := scanner.idFactory.CreateFieldName(name, astmodel.Exported)
// JSON name is unimportant here because we will implement the JSON marshaller anyway,
// but we still need it for controller-gen
jsonName := scanner.idFactory.CreateIdentifier(name, astmodel.NotExported)
field := astmodel.NewFieldDefinition(
fieldName, jsonName, concreteType).MakeOptional().WithDescription(&fieldDescription)
fields = append(fields, field)
default:
return nil, fmt.Errorf("unexpected oneOf member, type: %T", t)
}
}
structType := astmodel.NewStructType(fields...)
structType = structType.WithFunction(
"MarshalJSON",
astmodel.NewOneOfJSONMarshalFunction(structType, scanner.idFactory))
return structType, nil
}
func anyOfHandler(ctx context.Context, scanner *SchemaScanner, schema *gojsonschema.SubSchema) (astmodel.Type, error) | {
ctx, span := tab.StartSpan(ctx, "anyOfHandler")
defer span.End()
// See https://github.com/Azure/k8s-infra/issues/111 for details about why this is treated as oneOf
klog.Warningf("Handling anyOf type as if it were oneOf: %v\n", schema.Ref.GetUrl())
return generateOneOfUnionType(ctx, schema.AnyOf, scanner)
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.