code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
query = gql('''
query Models($entity: String, $project: String!) {
model(name: $project, entityName: $entity) {
id
name
repo
dockerImage
description
}
}
''')
return self.gql(query, variable_values={
'entity': entity, 'project': project})['model'] | def project(self, project, entity=None) | Retrive project
Args:
project (str): The project to get details for
entity (str, optional): The entity to scope this project to.
Returns:
[{"id","name","repo","dockerImage","description"}] | 4.581565 | 3.371146 | 1.359053 |
query = gql('''
query Buckets($model: String!, $entity: String!) {
model(name: $model, entityName: $entity) {
buckets(first: 10) {
edges {
node {
id
name
description
}
}
}
}
}
''')
return self._flatten_edges(self.gql(query, variable_values={
'entity': entity or self.settings('entity'),
'model': project or self.settings('project')})['model']['buckets']) | def list_runs(self, project, entity=None) | Lists runs in W&B scoped by project.
Args:
project (str): The project to scope the runs to
entity (str, optional): The entity to scope this project to. Defaults to public models
Returns:
[{"id",name","description"}] | 3.795146 | 3.719423 | 1.020359 |
query = gql('''
mutation launchRun(
$entity: String
$model: String
$runId: String
$image: String
$command: String
$patch: String
$cwd: String
$datasets: [String]
) {
launchRun(input: {id: $runId, entityName: $entity, patch: $patch, modelName: $model,
image: $image, command: $command, datasets: $datasets, cwd: $cwd}) {
podName
status
runId
}
}
''')
patch = BytesIO()
if self.git.dirty:
self.git.repo.git.execute(['git', 'diff'], output_stream=patch)
patch.seek(0)
cwd = "."
if self.git.enabled:
cwd = cwd + os.getcwd().replace(self.git.repo.working_dir, "")
return self.gql(query, variable_values={
'entity': entity or self.settings('entity'),
'model': project or self.settings('project'),
'command': command,
'runId': run_id,
'patch': patch.read().decode("utf8"),
'cwd': cwd
}) | def launch_run(self, command, project=None, entity=None, run_id=None) | Launch a run in the cloud.
Args:
command (str): The command to run
program (str): The file to run
project (str): The project to scope the runs to
entity (str, optional): The entity to scope this project to. Defaults to public models
run_id (str, optional): The run_id to scope to
Returns:
[{"podName","status"}] | 3.205234 | 3.097093 | 1.034917 |
query = gql('''
query Model($name: String!, $entity: String!, $run: String!) {
model(name: $name, entityName: $entity) {
bucket(name: $run) {
config
commit
patch
files(names: ["wandb-metadata.json"]) {
edges {
node {
url
}
}
}
}
}
}
''')
response = self.gql(query, variable_values={
'name': project, 'run': run, 'entity': entity
})
if response['model'] == None:
raise ValueError("Run {}/{}/{} not found".format(entity, project, run) )
run = response['model']['bucket']
commit = run['commit']
patch = run['patch']
config = json.loads(run['config'] or '{}')
if len(run['files']['edges']) > 0:
url = run['files']['edges'][0]['node']['url']
res = requests.get(url)
res.raise_for_status()
metadata = res.json()
else:
metadata = {}
return (commit, config, patch, metadata) | def run_config(self, project, run=None, entity=None) | Get the relevant configs for a run
Args:
project (str): The project to download, (can include bucket)
run (str, optional): The run to download
entity (str, optional): The entity to scope this project to. | 2.860418 | 2.999764 | 0.953548 |
query = gql('''
query Model($project: String!, $entity: String, $name: String!) {
model(name: $project, entityName: $entity) {
id
name
entity {
id
name
}
bucket(name: $name, missingOk: true) {
id
name
logLineCount
historyLineCount
eventsLineCount
historyTail
eventsTail
}
}
}
''')
response = self.gql(query, variable_values={
'entity': entity, 'project': project_name, 'name': name,
})
if 'model' not in response or 'bucket' not in response['model']:
return None
project = response['model']
self.set_setting('project', project_name)
if 'entity' in project:
self.set_setting('entity', project['entity']['name'])
return project['bucket'] | def run_resume_status(self, entity, project_name, name) | Check if a run exists and get resume information.
Args:
entity (str, optional): The entity to scope this project to.
project_name (str): The project to download, (can include bucket)
run (str, optional): The run to download | 3.559478 | 3.697014 | 0.962798 |
mutation = gql('''
mutation UpsertModel($name: String!, $id: String, $entity: String!, $description: String, $repo: String) {
upsertModel(input: { id: $id, name: $name, entityName: $entity, description: $description, repo: $repo }) {
model {
name
description
}
}
}
''')
response = self.gql(mutation, variable_values={
'name': self.format_project(project), 'entity': entity or self.settings('entity'),
'description': description, 'repo': self.git.remote_url, 'id': id})
return response['upsertModel']['model'] | def upsert_project(self, project, id=None, description=None, entity=None) | Create a new project
Args:
project (str): The project to create
description (str, optional): A description of this project
entity (str, optional): The entity to scope this project to. | 3.088653 | 3.314883 | 0.931753 |
mutation = gql('''
mutation UpsertBucket(
$id: String, $name: String,
$project: String,
$entity: String!,
$groupName: String,
$description: String,
$commit: String,
$config: JSONString,
$host: String,
$debug: Boolean,
$program: String,
$repo: String,
$jobType: String,
$state: String,
$sweep: String,
$tags: [String!],
$summaryMetrics: JSONString,
) {
upsertBucket(input: {
id: $id,
name: $name,
groupName: $groupName,
modelName: $project,
entityName: $entity,
description: $description,
config: $config,
commit: $commit,
host: $host,
debug: $debug,
jobProgram: $program,
jobRepo: $repo,
jobType: $jobType,
state: $state,
sweep: $sweep,
tags: $tags,
summaryMetrics: $summaryMetrics,
}) {
bucket {
id
name
description
config
project {
id
name
entity {
id
name
}
}
}
}
}
''')
if config is not None:
config = json.dumps(config)
if not description:
description = None
kwargs = {}
if num_retries is not None:
kwargs['num_retries'] = num_retries
variable_values = {
'id': id, 'entity': entity or self.settings('entity'), 'name': name, 'project': project,
'groupName': group, 'tags': tags,
'description': description, 'config': config, 'commit': commit,
'host': host, 'debug': env.is_debug(), 'repo': repo, 'program': program_path, 'jobType': job_type,
'state': state, 'sweep': sweep_name, 'summaryMetrics': summary_metrics
}
response = self.gql(
mutation, variable_values=variable_values, **kwargs)
run = response['upsertBucket']['bucket']
project = run.get('project')
if project:
self.set_setting('project', project['name'])
entity = project.get('entity')
if entity:
self.set_setting('entity', entity['name'])
return response['upsertBucket']['bucket'] | def upsert_run(self, id=None, name=None, project=None, host=None,
group=None, tags=None,
config=None, description=None, entity=None, state=None,
repo=None, job_type=None, program_path=None, commit=None,
sweep_name=None, summary_metrics=None, num_retries=None) | Update a run
Args:
id (str, optional): The existing run to update
name (str, optional): The name of the run to create
group (str, optional): Name of the group this run is a part of
project (str, optional): The name of the project
config (dict, optional): The latest config params
description (str, optional): A description of this project
entity (str, optional): The entity to scope this project to.
repo (str, optional): Url of the program's repository.
state (str, optional): State of the program.
job_type (str, optional): Type of job, e.g 'train'.
program_path (str, optional): Path to the program.
commit (str, optional): The Git SHA to associate the run with
summary_metrics (str, optional): The JSON summary metrics | 2.11062 | 2.14878 | 0.982241 |
query = gql('''
query Model($name: String!, $files: [String]!, $entity: String!, $run: String!, $description: String) {
model(name: $name, entityName: $entity) {
bucket(name: $run, desc: $description) {
id
files(names: $files) {
edges {
node {
name
url(upload: true)
updatedAt
}
}
}
}
}
}
''')
run_id = run or self.settings('run')
entity = entity or self.settings('entity')
query_result = self.gql(query, variable_values={
'name': project, 'run': run_id,
'entity': entity,
'description': description,
'files': [file for file in files]
})
run = query_result['model']['bucket']
if run:
result = {file['name']: file for file in self._flatten_edges(run['files'])}
return run['id'], result
else:
raise CommError("Run does not exist {}/{}/{}.".format(entity, project, run_id)) | def upload_urls(self, project, files, run=None, entity=None, description=None) | Generate temporary resumeable upload urls
Args:
project (str): The project to download
files (list or dict): The filenames to upload
run (str, optional): The run to upload to
entity (str, optional): The entity to scope this project to. Defaults to wandb models
Returns:
(bucket_id, file_info)
bucket_id: id of bucket we uploaded to
file_info: A dict of filenames and urls, also indicates if this revision already has uploaded files.
{
'weights.h5': { "url": "https://weights.url" },
'model.json': { "url": "https://model.json", "updatedAt": '2013-04-26T22:22:23.832Z', 'md5': 'mZFLkyvTelC5g8XnyQrpOw==' },
} | 3.207458 | 2.943001 | 1.08986 |
query = gql('''
query Model($name: String!, $entity: String!, $run: String!) {
model(name: $name, entityName: $entity) {
bucket(name: $run) {
files {
edges {
node {
name
url
md5
updatedAt
}
}
}
}
}
}
''')
query_result = self.gql(query, variable_values={
'name': project, 'run': run or self.settings('run'),
'entity': entity or self.settings('entity')})
files = self._flatten_edges(query_result['model']['bucket']['files'])
return {file['name']: file for file in files if file} | def download_urls(self, project, run=None, entity=None) | Generate download urls
Args:
project (str): The project to download
run (str, optional): The run to upload to
entity (str, optional): The entity to scope this project to. Defaults to wandb models
Returns:
A dict of extensions and urls
{
'weights.h5': { "url": "https://weights.url", "updatedAt": '2013-04-26T22:22:23.832Z', 'md5': 'mZFLkyvTelC5g8XnyQrpOw==' },
'model.json': { "url": "https://model.url", "updatedAt": '2013-04-26T22:22:23.832Z', 'md5': 'mZFLkyvTelC5g8XnyQrpOw==' }
} | 3.264751 | 3.01954 | 1.081208 |
query = gql('''
query Model($name: String!, $fileName: String!, $entity: String!, $run: String!) {
model(name: $name, entityName: $entity) {
bucket(name: $run) {
files(names: [$fileName]) {
edges {
node {
name
url
md5
updatedAt
}
}
}
}
}
}
''')
query_result = self.gql(query, variable_values={
'name': project, 'run': run or self.settings('run'), 'fileName': file_name,
'entity': entity or self.settings('entity')})
files = self._flatten_edges(query_result['model']['bucket']['files'])
return files[0] if len(files) > 0 and files[0].get('updatedAt') else None | def download_url(self, project, file_name, run=None, entity=None) | Generate download urls
Args:
project (str): The project to download
file_name (str): The name of the file to download
run (str, optional): The run to upload to
entity (str, optional): The entity to scope this project to. Defaults to wandb models
Returns:
A dict of extensions and urls
{ "url": "https://weights.url", "updatedAt": '2013-04-26T22:22:23.832Z', 'md5': 'mZFLkyvTelC5g8XnyQrpOw==' } | 3.004167 | 2.812737 | 1.068058 |
response = requests.get(url, stream=True)
response.raise_for_status()
return (int(response.headers.get('content-length', 0)), response) | def download_file(self, url) | Initiate a streaming download
Args:
url (str): The url to download
Returns:
A tuple of the content length and the streaming response | 2.715919 | 2.563857 | 1.05931 |
fileName = metadata['name']
path = os.path.join(out_dir or wandb_dir(), fileName)
if self.file_current(fileName, metadata['md5']):
return path, None
size, response = self.download_file(metadata['url'])
with open(path, "wb") as file:
for data in response.iter_content(chunk_size=1024):
file.write(data)
return path, response | def download_write_file(self, metadata, out_dir=None) | Download a file from a run and write it to wandb/
Args:
metadata (obj): The metadata object for the file to download. Comes from Api.download_urls().
Returns:
A tuple of the file's local path and the streaming response. The streaming response is None if the file already existed and was up to date. | 3.832367 | 3.100505 | 1.236046 |
extra_headers = extra_headers.copy()
response = None
if os.stat(file.name).st_size == 0:
raise CommError("%s is an empty file" % file.name)
try:
progress = Progress(file, callback=callback)
response = requests.put(
url, data=progress, headers=extra_headers)
response.raise_for_status()
except requests.exceptions.RequestException as e:
total = progress.len
status = self._status_request(url, total)
# TODO(adrian): there's probably even more stuff we should add here
# like if we're offline, we should retry then too
if status.status_code in (308, 408, 500, 502, 503, 504):
util.sentry_reraise(retry.TransientException(exc=e))
else:
util.sentry_reraise(e)
return response | def upload_file(self, url, file, callback=None, extra_headers={}) | Uploads a file to W&B with failure resumption
Args:
url (str): The url to download
file (str): The path to the file you want to upload
callback (:obj:`func`, optional): A callback which is passed the number of
bytes uploaded since the last time it was called, used to report progress
Returns:
The requests library response object | 4.416202 | 4.748238 | 0.930072 |
mutation = gql('''
mutation CreateAgent(
$host: String!
$projectName: String!,
$entityName: String!,
$sweep: String!
) {
createAgent(input: {
host: $host,
projectName: $projectName,
entityName: $entityName,
sweep: $sweep,
}) {
agent {
id
}
}
}
''')
if project_name is None:
project_name = self.settings('project')
# don't retry on validation errors
def no_retry_400(e):
if not isinstance(e, requests.HTTPError):
return True
if e.response.status_code != 400:
return True
body = json.loads(e.response.content)
raise UsageError(body['errors'][0]['message'])
response = self.gql(mutation, variable_values={
'host': host,
'entityName': self.settings("entity"),
'projectName': project_name,
'sweep': sweep_id}, check_retry_fn=no_retry_400)
return response['createAgent']['agent'] | def register_agent(self, host, sweep_id=None, project_name=None) | Register a new agent
Args:
host (str): hostname
persistent (bool): long running or oneoff
sweep (str): sweep id
project_name: (str): model that contains sweep | 2.719577 | 2.847856 | 0.954956 |
mutation = gql('''
mutation Heartbeat(
$id: ID!,
$metrics: JSONString,
$runState: JSONString
) {
agentHeartbeat(input: {
id: $id,
metrics: $metrics,
runState: $runState
}) {
agent {
id
}
commands
}
}
''')
try:
response = self.gql(mutation, variable_values={
'id': agent_id,
'metrics': json.dumps(metrics),
'runState': json.dumps(run_states)})
except Exception as e:
# GQL raises exceptions with stringified python dictionaries :/
message = ast.literal_eval(e.args[0])["message"]
logger.error('Error communicating with W&B: %s', message)
return []
else:
return json.loads(response['agentHeartbeat']['commands']) | def agent_heartbeat(self, agent_id, metrics, run_states) | Notify server about agent state, receive commands.
Args:
agent_id (str): agent_id
metrics (dict): system metrics
run_states (dict): run_id: state mapping
Returns:
List of commands to execute. | 3.092976 | 2.976754 | 1.039043 |
mutation = gql('''
mutation UpsertSweep(
$config: String,
$description: String,
$entityName: String!,
$projectName: String!
) {
upsertSweep(input: {
config: $config,
description: $description,
entityName: $entityName,
projectName: $projectName
}) {
sweep {
name
}
}
}
''')
# don't retry on validation errors
# TODO(jhr): generalize error handling routines
def no_retry_400_or_404(e):
if not isinstance(e, requests.HTTPError):
return True
if e.response.status_code != 400 and e.response.status_code != 404:
return True
body = json.loads(e.response.content)
raise UsageError(body['errors'][0]['message'])
response = self.gql(mutation, variable_values={
'config': yaml.dump(config),
'description': config.get("description"),
'entityName': self.settings("entity"),
'projectName': self.settings("project")},
check_retry_fn=no_retry_400_or_404)
return response['upsertSweep']['sweep']['name'] | def upsert_sweep(self, config) | Upsert a sweep object.
Args:
config (str): sweep config (will be converted to yaml) | 3.043361 | 3.036304 | 1.002324 |
return os.path.isfile(fname) and util.md5_file(fname) == md5 | def file_current(self, fname, md5) | Checksum a file and compare the md5 with the known md5 | 4.08961 | 4.101719 | 0.997048 |
project, run = self.parse_slug(project, run=run)
urls = self.download_urls(project, run, entity)
responses = []
for fileName in urls:
_, response = self.download_write_file(urls[fileName])
if response:
responses.append(response)
return responses | def pull(self, project, run=None, entity=None) | Download files from W&B
Args:
project (str): The project to download
run (str, optional): The run to upload to
entity (str, optional): The entity to scope this project to. Defaults to wandb models
Returns:
The requests library response object | 4.830217 | 5.389893 | 0.896162 |
if project is None:
project = self.get_project()
if project is None:
raise CommError("No project configured.")
if run is None:
run = self.current_run_id
# TODO(adrian): we use a retriable version of self.upload_file() so
# will never retry self.upload_urls() here. Instead, maybe we should
# make push itself retriable.
run_id, result = self.upload_urls(
project, files, run, entity, description)
responses = []
for file_name, file_info in result.items():
try:
# To handle Windows paths
# TODO: this doesn't handle absolute paths...
normal_name = os.path.join(*file_name.split("/"))
open_file = files[normal_name] if isinstance(
files, dict) else open(normal_name, "rb")
except IOError:
print("%s does not exist" % file_name)
continue
if progress:
if hasattr(progress, '__call__'):
responses.append(self.upload_file_retry(
file_info['url'], open_file, progress))
else:
length = os.fstat(open_file.fileno()).st_size
with click.progressbar(file=progress, length=length, label='Uploading file: %s' % (file_name),
fill_char=click.style('&', fg='green')) as bar:
responses.append(self.upload_file_retry(
file_info['url'], open_file, lambda bites, _: bar.update(bites)))
else:
responses.append(self.upload_file_retry(file_info['url'], open_file))
open_file.close()
return responses | def push(self, files, run=None, entity=None, project=None, description=None, force=True, progress=False) | Uploads multiple files to W&B
Args:
files (list or dict): The filenames to upload
run (str, optional): The run to upload to
entity (str, optional): The entity to scope this project to. Defaults to wandb models
project (str, optional): The name of the project to upload to. Defaults to the one in settings.
description (str, optional): The description of the changes
force (bool, optional): Whether to prevent push if git has uncommitted changes
progress (callable, or stream): If callable, will be called with (chunk_bytes,
total_bytes) as argument else if True, renders a progress bar to stream.
Returns:
The requests library response object | 3.683974 | 3.563096 | 1.033925 |
if not self._file_stream_api:
if self._current_run_id is None:
raise UsageError(
'Must have a current run to use file stream API.')
self._file_stream_api = FileStreamApi(self, self._current_run_id)
return self._file_stream_api | def get_file_stream_api(self) | This creates a new file pusher thread. Call start to initiate the thread that talks to W&B | 3.132192 | 2.724233 | 1.149752 |
return requests.put(
url=url,
headers={'Content-Length': '0',
'Content-Range': 'bytes */%i' % length}
) | def _status_request(self, url, length) | Ask google how much we've uploaded | 4.568961 | 3.803866 | 1.201136 |
# Take only completions that don't change the text before the cursor.
def doesnt_change_before_cursor(completion):
end = completion.text[:-completion.start_position]
return document.text_before_cursor.endswith(end)
completions2 = [c for c in completions if doesnt_change_before_cursor(c)]
# When there is at least one completion that changes the text before the
# cursor, don't return any common part.
if len(completions2) != len(completions):
return ''
# Return the common prefix.
def get_suffix(completion):
return completion.text[-completion.start_position:]
return _commonprefix([get_suffix(c) for c in completions2]) | def get_common_complete_suffix(document, completions) | Return the common prefix for all completions. | 3.704953 | 3.65213 | 1.014464 |
assert isinstance(position, int) and position - self.start_position >= 0
return Completion(
text=self.text[position - self.start_position:],
display=self.display,
display_meta=self._display_meta,
get_display_meta=self._get_display_meta) | def new_completion_from_position(self, position) | (Only for internal use!)
Get a new completion by splitting this one. Used by
`CommandLineInterface` when it needs to have a list of new completions
after inserting the common prefix. | 3.829539 | 3.750624 | 1.021041 |
" Width to report to the `Window`. "
# Take the width from the first line.
text = token_list_to_text(self.get_prompt_tokens(cli))
return get_cwidth(text) | def get_width(self, cli, ui_content) | Width to report to the `Window`. | 20.156954 | 11.780832 | 1.710996 |
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except wandb.Error as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(
exc_type, exc_value, exc_traceback)
logger.error(''.join(lines))
click_exc = ClickWandbException(e)
click_exc.orig_type = exc_type
six.reraise(ClickWandbException, click_exc, sys.exc_info()[2])
return wrapper | def display_error(func) | Function decorator for catching common errors and re-raising as wandb.Error | 2.124898 | 1.976596 | 1.075029 |
result = ctx.invoke(projects, entity=entity, display=False)
try:
if len(result) == 0:
project = click.prompt("Enter a name for your first project")
#description = editor()
project = api.upsert_project(project, entity=entity)["name"]
else:
project_names = [project["name"] for project in result]
question = {
'type': 'list',
'name': 'project_name',
'message': "Which project should we use?",
'choices': project_names + ["Create New"]
}
result = whaaaaat.prompt([question])
if result:
project = result['project_name']
else:
project = "Create New"
# TODO: check with the server if the project exists
if project == "Create New":
project = click.prompt(
"Enter a name for your new project", value_proc=api.format_project)
#description = editor()
project = api.upsert_project(project, entity=entity)["name"]
except wandb.apis.CommError as e:
raise ClickException(str(e))
return project | def prompt_for_project(ctx, entity) | Ask the user for a project, creating one if necessary. | 3.617868 | 3.555406 | 1.017568 |
wandb.try_to_set_up_global_logging()
if ctx.invoked_subcommand is None:
click.echo(ctx.get_help()) | def cli(ctx) | Weights & Biases.
Run "wandb docs" for full documentation. | 5.943052 | 5.097263 | 1.16593 |
args = list(docker_run_args)
if len(args) > 0 and args[0] == "run":
args.pop(0)
if help or len(args) == 0:
wandb.termlog("This commands adds wandb env variables to your docker run calls")
subprocess.call(['docker', 'run'] + args + ['--help'])
exit()
#TODO: is this what we want?
if len([a for a in args if a.startswith("--runtime")]) == 0 and find_executable('nvidia-docker'):
args = ["--runtime", "nvidia"] + args
#TODO: image_from_docker_args uses heuristics to find the docker image arg, there are likely cases
#where this won't work
image = util.image_from_docker_args(args)
resolved_image = None
if image:
resolved_image = wandb.docker.image_id(image)
if resolved_image:
args = ['-e', 'WANDB_DOCKER=%s' % resolved_image] + args
else:
wandb.termlog("Couldn't detect image argument, running command without the WANDB_DOCKER env variable")
if api.api_key:
args = ['-e', 'WANDB_API_KEY=%s' % api.api_key] + args
else:
wandb.termlog("Not logged in, run `wandb login` from the host machine to enable result logging")
subprocess.call(['docker', 'run'] + args) | def docker_run(ctx, docker_run_args, help) | Simple docker wrapper that adds WANDB_API_KEY and WANDB_DOCKER to any docker run command.
This will also set the runtime to nvidia if the nvidia-docker executable is present on the system
and --runtime wasn't set. | 3.777434 | 3.463964 | 1.090495 |
path = self._config_path()
if path is not None and os.path.isfile(path):
self._load_file(path) | def _load_values(self) | Load config.yaml from the run directory if available. | 4.439068 | 3.02422 | 1.467839 |
for key in json:
if key == "wandb_version":
continue
self._items[key] = json[key].get('value')
self._descriptions[key] = json[key].get('desc') | def load_json(self, json) | Loads existing config from JSON | 5.089104 | 5.056859 | 1.006376 |
# In dryrun mode, without wandb run, we don't
# save config on initial load, because the run directory
# may not be created yet (because we don't know if we're
# being used in a run context, or as an API).
# TODO: Defer saving somehow, maybe via an events system
path = self._config_path()
if path is None:
return
with open(path, "w") as conf_file:
conf_file.write(str(self)) | def persist(self) | Stores the current configuration for pushing to W&B | 11.101476 | 10.120296 | 1.096952 |
possible_relatives = []
try:
if not self.repo:
return None
try:
active_branch = self.repo.active_branch
except (TypeError, ValueError):
logger.debug("git is in a detached head state")
return None # detached head
else:
tracking_branch = active_branch.tracking_branch()
if tracking_branch:
possible_relatives.append(tracking_branch.commit)
if not possible_relatives:
for branch in self.repo.branches:
tracking_branch = branch.tracking_branch()
if tracking_branch is not None:
possible_relatives.append(tracking_branch.commit)
head = self.repo.head
most_recent_ancestor = None
for possible_relative in possible_relatives:
# at most one:
for ancestor in self.repo.merge_base(head, possible_relative):
if most_recent_ancestor is None:
most_recent_ancestor = ancestor
elif self.repo.is_ancestor(most_recent_ancestor, ancestor):
most_recent_ancestor = ancestor
return most_recent_ancestor
except exc.GitCommandError as e:
logger.debug("git remote upstream fork point could not be found")
logger.debug(e.message)
return None | def get_upstream_fork_point(self) | Get the most recent ancestor of HEAD that occurs on an upstream
branch.
First looks at the current branch's tracking branch, if applicable. If
that doesn't work, looks at every other branch to find the most recent
ancestor of HEAD that occurs on a tracking branch.
Returns:
git.Commit object or None | 2.639169 | 2.573261 | 1.025613 |
# Map to ensure that we return the objects that were passed in originally.
# Whether they are a fd integer or an object that has a fileno().
# (The 'poll' implementation for instance, returns always integers.)
fd_map = dict((fd_to_int(fd), fd) for fd in read_fds)
# Wait, using selector.
sel = selector()
try:
for fd in read_fds:
sel.register(fd)
result = sel.select(timeout)
if result is not None:
return [fd_map[fd_to_int(fd)] for fd in result]
finally:
sel.close() | def select_fds(read_fds, timeout, selector=AutoSelector) | Wait for a list of file descriptors (`read_fds`) to become ready for
reading. This chooses the most appropriate select-tool for use in
prompt-toolkit. | 6.132438 | 5.95566 | 1.029682 |
assert isinstance(registry, BaseRegistry)
operator_given = ViWaitingForTextObjectMode()
navigation_mode = ViNavigationMode()
selection_mode = ViSelectionMode()
def text_object_decorator(*keys, **kw):
filter = kw.pop('filter', Always())
no_move_handler = kw.pop('no_move_handler', False)
no_selection_handler = kw.pop('no_selection_handler', False)
eager = kw.pop('eager', False)
assert not kw
def decorator(text_object_func):
assert callable(text_object_func)
@registry.add_binding(*keys, filter=operator_given & filter, eager=eager)
def _(event):
# Arguments are multiplied.
vi_state = event.cli.vi_state
event._arg = (vi_state.operator_arg or 1) * (event.arg or 1)
# Call the text object handler.
text_obj = text_object_func(event)
if text_obj is not None:
assert isinstance(text_obj, TextObject)
# Call the operator function with the text object.
vi_state.operator_func(event, text_obj)
# Clear operator.
event.cli.vi_state.operator_func = None
event.cli.vi_state.operator_arg = None
# Register a move operation. (Doesn't need an operator.)
if not no_move_handler:
@registry.add_binding(*keys, filter=~operator_given & filter & navigation_mode, eager=eager)
def _(event):
" Move handler for navigation mode. "
text_object = text_object_func(event)
event.current_buffer.cursor_position += text_object.start
# Register a move selection operation.
if not no_selection_handler:
@registry.add_binding(*keys, filter=~operator_given & filter & selection_mode, eager=eager)
def _(event):
" Move handler for selection mode. "
text_object = text_object_func(event)
buff = event.current_buffer
# When the text object has both a start and end position, like 'i(' or 'iw',
# Turn this into a selection, otherwise the cursor.
if text_object.end:
# Take selection positions from text object.
start, end = text_object.operator_range(buff.document)
start += buff.cursor_position
end += buff.cursor_position
buff.selection_state.original_cursor_position = start
buff.cursor_position = end
# Take selection type from text object.
if text_object.type == TextObjectType.LINEWISE:
buff.selection_state.type = SelectionType.LINES
else:
buff.selection_state.type = SelectionType.CHARACTERS
else:
event.current_buffer.cursor_position += text_object.start
# Make it possible to chain @text_object decorators.
return text_object_func
return decorator
return text_object_decorator | def create_text_object_decorator(registry) | Create a decorator that can be used to register Vi text object implementations. | 3.57315 | 3.508795 | 1.018341 |
assert isinstance(registry, BaseRegistry)
operator_given = ViWaitingForTextObjectMode()
navigation_mode = ViNavigationMode()
selection_mode = ViSelectionMode()
def operator_decorator(*keys, **kw):
filter = kw.pop('filter', Always())
eager = kw.pop('eager', False)
assert not kw
def decorator(operator_func):
@registry.add_binding(*keys, filter=~operator_given & filter & navigation_mode, eager=eager)
def _(event):
# When this key binding is matched, only set the operator
# function in the ViState. We should execute it after a text
# object has been received.
event.cli.vi_state.operator_func = operator_func
event.cli.vi_state.operator_arg = event.arg
@registry.add_binding(*keys, filter=~operator_given & filter & selection_mode, eager=eager)
def _(event):
buff = event.current_buffer
selection_state = buff.selection_state
# Create text object from selection.
if selection_state.type == SelectionType.LINES:
text_obj_type = TextObjectType.LINEWISE
elif selection_state.type == SelectionType.BLOCK:
text_obj_type = TextObjectType.BLOCK
else:
text_obj_type = TextObjectType.INCLUSIVE
text_object = TextObject(
selection_state.original_cursor_position - buff.cursor_position,
type=text_obj_type)
# Execute operator.
operator_func(event, text_object)
# Quit selection mode.
buff.selection_state = None
return operator_func
return decorator
return operator_decorator | def create_operator_decorator(registry) | Create a decorator that can be used for registering Vi operators. | 4.331109 | 4.238286 | 1.021901 |
registry = Registry()
navigation_mode = ViNavigationMode()
registry.add_binding('v', filter=navigation_mode)(
get_by_name('edit-and-execute-command'))
return registry | def load_vi_open_in_editor_bindings() | Pressing 'v' in navigation mode will open the buffer in an external editor. | 16.818085 | 12.867383 | 1.307032 |
registry = ConditionalRegistry(Registry(), ViMode())
handle = registry.add_binding
handle(Keys.ControlF)(scroll_forward)
handle(Keys.ControlB)(scroll_backward)
handle(Keys.ControlD)(scroll_half_page_down)
handle(Keys.ControlU)(scroll_half_page_up)
handle(Keys.ControlE)(scroll_one_line_down)
handle(Keys.ControlY)(scroll_one_line_up)
handle(Keys.PageDown)(scroll_page_down)
handle(Keys.PageUp)(scroll_page_up)
return registry | def load_extra_vi_page_navigation_bindings() | Key bindings, for scrolling up and down through pages.
This are separate bindings, because GNU readline doesn't have them. | 2.860945 | 2.814846 | 1.016377 |
if self.start < self.end:
return self.start, self.end
else:
return self.end, self.start | def sorted(self) | Return a (start, end) tuple where start <= end. | 2.95715 | 2.106377 | 1.403904 |
start, end = self.sorted()
doc = document
if (self.type == TextObjectType.EXCLUSIVE and
doc.translate_index_to_position(end + doc.cursor_position)[1] == 0):
# If the motion is exclusive and the end of motion is on the first
# column, the end position becomes end of previous line.
end -= 1
if self.type == TextObjectType.INCLUSIVE:
end += 1
if self.type == TextObjectType.LINEWISE:
# Select whole lines
row, col = doc.translate_index_to_position(start + doc.cursor_position)
start = doc.translate_row_col_to_index(row, 0) - doc.cursor_position
row, col = doc.translate_index_to_position(end + doc.cursor_position)
end = doc.translate_row_col_to_index(row, len(doc.lines[row])) - doc.cursor_position
return start, end | def operator_range(self, document) | Return a (start, end) tuple with start <= end that indicates the range
operators should operate on.
`buffer` is used to get start and end of line positions. | 3.840037 | 3.615628 | 1.062066 |
# Get absolute cursor positions from the text object.
from_, to = self.operator_range(buffer.document)
from_ += buffer.cursor_position
to += buffer.cursor_position
# Take the start of the lines.
from_, _ = buffer.document.translate_index_to_position(from_)
to, _ = buffer.document.translate_index_to_position(to)
return from_, to | def get_line_numbers(self, buffer) | Return a (start_line, end_line) pair. | 4.897135 | 4.462611 | 1.09737 |
from_, to = self.operator_range(buffer.document)
from_ += buffer.cursor_position
to += buffer.cursor_position
to -= 1 # SelectionState does not include the end position, `operator_range` does.
document = Document(buffer.text, to, SelectionState(
original_cursor_position=from_, type=self.selection_type))
new_document, clipboard_data = document.cut_selection()
return new_document, clipboard_data | def cut(self, buffer) | Turn text object into `ClipboardData` instance. | 8.776269 | 7.515104 | 1.167817 |
" Scan backwards, and find a possible position to start. "
pattern = self._compiled_pattern
lines = document.lines
# Scan upwards, until we find a point where we can start the syntax
# synchronisation.
for i in range(lineno, max(-1, lineno - self.MAX_BACKWARDS), -1):
match = pattern.match(lines[i])
if match:
return i, match.start()
# No synchronisation point found. If we aren't that far from the
# beginning, start at the very beginning, otherwise, just try to start
# at the current line.
if lineno < self.FROM_START_IF_NO_SYNC_POS_FOUND:
return 0, 0
else:
return lineno, 0 | def get_sync_start_position(self, document, lineno) | Scan backwards, and find a possible position to start. | 5.799985 | 4.862469 | 1.192807 |
patterns = {
# For Python, start highlighting at any class/def block.
'Python': r'^\s*(class|def)\s+',
'Python 3': r'^\s*(class|def)\s+',
# For HTML, start at any open/close tag definition.
'HTML': r'<[/a-zA-Z]',
# For javascript, start at a function.
'JavaScript': r'\bfunction\b'
# TODO: Add definitions for other languages.
# By default, we start at every possible line.
}
p = patterns.get(lexer_cls.name, '^')
return cls(p) | def from_pygments_lexer_cls(cls, lexer_cls) | Create a :class:`.RegexSync` instance for this Pygments lexer class. | 6.823475 | 6.730292 | 1.013845 |
# Inline imports: the Pygments dependency is optional!
from pygments.util import ClassNotFound
from pygments.lexers import get_lexer_for_filename
try:
pygments_lexer = get_lexer_for_filename(filename)
except ClassNotFound:
return SimpleLexer()
else:
return cls(pygments_lexer.__class__, sync_from_start=sync_from_start) | def from_filename(cls, filename, sync_from_start=True) | Create a `Lexer` from a filename. | 3.917817 | 3.589659 | 1.091417 |
# Cache of already lexed lines.
cache = {}
# Pygments generators that are currently lexing.
line_generators = {} # Map lexer generator to the line number.
def get_syntax_sync():
" The Syntax synchronisation objcet that we currently use. "
if self.sync_from_start(cli):
return SyncFromStart()
else:
return self.syntax_sync
def find_closest_generator(i):
" Return a generator close to line 'i', or None if none was fonud. "
for generator, lineno in line_generators.items():
if lineno < i and i - lineno < self.REUSE_GENERATOR_MAX_DISTANCE:
return generator
def create_line_generator(start_lineno, column=0):
def get_tokens():
text = '\n'.join(document.lines[start_lineno:])[column:]
# We call `get_tokens_unprocessed`, because `get_tokens` will
# still replace \r\n and \r by \n. (We don't want that,
# Pygments should return exactly the same amount of text, as we
# have given as input.)
for _, t, v in self.pygments_lexer.get_tokens_unprocessed(text):
yield t, v
return enumerate(split_lines(get_tokens()), start_lineno)
def get_generator(i):
# Find closest line generator.
generator = find_closest_generator(i)
if generator:
return generator
# No generator found. Determine starting point for the syntax
# synchronisation first.
# Go at least x lines back. (Make scrolling upwards more
# efficient.)
i = max(0, i - self.MIN_LINES_BACKWARDS)
if i == 0:
row = 0
column = 0
else:
row, column = get_syntax_sync().get_sync_start_position(document, i)
# Find generator close to this point, or otherwise create a new one.
generator = find_closest_generator(i)
if generator:
return generator
else:
generator = create_line_generator(row, column)
# If the column is not 0, ignore the first line. (Which is
# incomplete. This happens when the synchronisation algorithm tells
# us to start parsing in the middle of a line.)
if column:
next(generator)
row += 1
line_generators[generator] = row
return generator
def get_line(i):
" Return the tokens for a given line number. "
try:
return cache[i]
except KeyError:
generator = get_generator(i)
# Exhaust the generator, until we find the requested line.
for num, line in generator:
cache[num] = line
if num == i:
line_generators[generator] = i
# Remove the next item from the cache.
# (It could happen that it's already there, because of
# another generator that started filling these lines,
# but we want to synchronise these lines with the
# current lexer's state.)
if num + 1 in cache:
del cache[num + 1]
return cache[num]
return []
return get_line | def lex_document(self, cli, document) | Create a lexer function that takes a line number and returns the list
of (Token, text) tuples as the Pygments lexer returns for that line. | 5.50414 | 5.404008 | 1.018529 |
lbound = 0
ubound = len(table) - 1
if ucs < table[0][0] or ucs > table[ubound][1]:
return 0
while ubound >= lbound:
mid = (lbound + ubound) // 2
if ucs > table[mid][1]:
lbound = mid + 1
elif ucs < table[mid][0]:
ubound = mid - 1
else:
return 1
return 0 | def _bisearch(ucs, table) | Auxiliary function for binary search in interval table.
:arg int ucs: Ordinal value of unicode character.
:arg list table: List of starting and ending ranges of ordinal values,
in form of ``[(start, end), ...]``.
:rtype: int
:returns: 1 if ordinal value ucs is found within lookup table, else 0. | 1.868571 | 1.795844 | 1.040498 |
r
# pylint: disable=C0103
# Invalid argument name "wc"
ucs = ord(wc)
# NOTE: created by hand, there isn't anything identifiable other than
# general Cf category code to identify these, and some characters in Cf
# category code are of non-zero width.
# pylint: disable=too-many-boolean-expressions
# Too many boolean expressions in if statement (7/5)
if (ucs == 0 or
ucs == 0x034F or
0x200B <= ucs <= 0x200F or
ucs == 0x2028 or
ucs == 0x2029 or
0x202A <= ucs <= 0x202E or
0x2060 <= ucs <= 0x2063):
return 0
# C0/C1 control characters
if ucs < 32 or 0x07F <= ucs < 0x0A0:
return -1
# combining characters with zero width
if _bisearch(ucs, ZERO_WIDTH):
return 0
return 1 + _bisearch(ucs, WIDE_EASTASIAN) | def wcwidth(wc) | r"""
Given one unicode character, return its printable length on a terminal.
The wcwidth() function returns 0 if the wc argument has no printable effect
on a terminal (such as NUL '\0'), -1 if wc is not printable, or has an
indeterminate effect on the terminal, such as a control character.
Otherwise, the number of column positions the character occupies on a
graphic terminal (1 or 2) is returned.
The following have a column width of -1:
- C0 control characters (U+001 through U+01F).
- C1 control characters and DEL (U+07F through U+0A0).
The following have a column width of 0:
- Non-spacing and enclosing combining characters (general
category code Mn or Me in the Unicode database).
- NULL (U+0000, 0).
- COMBINING GRAPHEME JOINER (U+034F).
- ZERO WIDTH SPACE (U+200B) through
RIGHT-TO-LEFT MARK (U+200F).
- LINE SEPERATOR (U+2028) and
PARAGRAPH SEPERATOR (U+2029).
- LEFT-TO-RIGHT EMBEDDING (U+202A) through
RIGHT-TO-LEFT OVERRIDE (U+202E).
- WORD JOINER (U+2060) through
INVISIBLE SEPARATOR (U+2063).
The following have a column width of 1:
- SOFT HYPHEN (U+00AD) has a column width of 1.
- All remaining characters (including all printable
ISO 8859-1 and WGL4 characters, Unicode control characters,
etc.) have a column width of 1.
The following have a column width of 2:
- Spacing characters in the East Asian Wide (W) or East Asian
Full-width (F) category as defined in Unicode Technical
Report #11 have a column width of 2. | 4.539029 | 4.19597 | 1.081759 |
# pylint: disable=C0103
# Invalid argument name "n"
end = len(pwcs) if n is None else n
idx = slice(0, end)
width = 0
for char in pwcs[idx]:
wcw = wcwidth(char)
if wcw < 0:
return -1
else:
width += wcw
return width | def wcswidth(pwcs, n=None) | Given a unicode string, return its printable length on a terminal.
Return the width, in cells, necessary to display the first ``n``
characters of the unicode string ``pwcs``. When ``n`` is None (default),
return the length of the entire string.
Returns ``-1`` if a non-printable character is encountered. | 3.653822 | 3.76614 | 0.970177 |
file_event_handler = PatternMatchingEventHandler()
file_event_handler.on_created = self._on_file_created
file_event_handler.on_modified = self._on_file_modified
file_event_handler.on_moved = self._on_file_moved
file_event_handler._patterns = [
os.path.join(self._watch_dir, os.path.normpath('*'))]
# Ignore hidden files/folders and output.log because we stream it specially
file_event_handler._ignore_patterns = [
'*/.*',
'*.tmp',
os.path.join(self._run.dir, OUTPUT_FNAME)
]
for glob in self._api.settings("ignore_globs"):
file_event_handler._ignore_patterns.append(
os.path.join(self._run.dir, glob))
return file_event_handler | def _per_file_event_handler(self) | Create a Watchdog file event handler that does different things for every file | 3.43283 | 3.331043 | 1.030557 |
# TODO: there was a case where _file_event_handlers was getting modified in the loop.
for handler in list(self._file_event_handlers.values()):
handler.finish()
self._file_pusher.finish()
self._api.get_file_stream_api().finish(exitcode)
# In Jupyter notebooks, wandb.init can be called multiple times in the same
# process, creating new runs each time. This ensures we get a new file stream
# thread
self._api._file_stream_api = None | def _end_file_syncing(self, exitcode) | Stops file syncing/streaming but doesn't actually wait for everything to
finish. We print progress info later. | 9.286333 | 8.902004 | 1.043173 |
self._file_pusher.update_file(save_name, file_path) # track upload progress
if save_name not in self._file_event_handlers:
if save_name == 'wandb-history.jsonl':
self._file_event_handlers['wandb-history.jsonl'] = FileEventHandlerTextStream(
file_path, 'wandb-history.jsonl', self._api)
elif save_name == 'wandb-events.jsonl':
self._file_event_handlers['wandb-events.jsonl'] = FileEventHandlerTextStream(
file_path, 'wandb-events.jsonl', self._api)
elif 'tfevents' in save_name or 'graph.pbtxt' in save_name:
# overwrite the tensorboard but not every reload -- just
# frequently enough to resemble realtime
self._file_event_handlers[save_name] = FileEventHandlerThrottledOverwrite(
file_path, save_name, self._api, self._file_pusher)
# Don't try to stream tensorboard files for now.
# elif 'tfevents' in save_name:
# # TODO: This is hard-coded, but we want to give users control
# # over streaming files (or detect them).
# self._api.get_file_stream_api().set_file_policy(save_name,
# BinaryFilePolicy())
# self._file_event_handlers[save_name] = FileEventHandlerBinaryStream(
# file_path, save_name, self._api)
# Overwrite handler (non-deferred) has a bug, wherein if the file is truncated
# during upload, the request to Google hangs (at least, this is my working
# theory). So for now we defer uploading everything til the end of the run.
# TODO: send wandb-summary during run. One option is to copy to a temporary
# file before uploading.
elif save_name == config.FNAME:
self._file_event_handlers[save_name] = FileEventHandlerConfig(
file_path, save_name, self._api, self._file_pusher, self._run)
elif save_name == 'wandb-summary.json':
# Load the summary into the syncer process for meta etc to work
self._run.summary.load()
self._api.get_file_stream_api().set_file_policy(save_name, OverwriteFilePolicy())
self._file_event_handlers[save_name] = FileEventHandlerSummary(
file_path, save_name, self._api, self._file_pusher, self._run)
elif save_name.startswith('media/'):
# Save media files immediately
self._file_event_handlers[save_name] = FileEventHandlerOverwrite(
file_path, save_name, self._api, self._file_pusher)
else:
Handler = FileEventHandlerOverwriteDeferred
for policy, globs in six.iteritems(self._user_file_policies):
if policy == "end":
continue
for g in globs:
if any(save_name in p for p in glob.glob(os.path.join(self._run.dir, g))):
if policy == "live":
Handler = FileEventHandlerThrottledOverwriteMinWait
self._file_event_handlers[save_name] = Handler(
file_path, save_name, self._api, self._file_pusher)
return self._file_event_handlers[save_name] | def _get_file_event_handler(self, file_path, save_name) | Get or create an event handler for a particular file.
file_path: the file's actual path
save_name: its path relative to the run directory (aka the watch directory) | 3.9811 | 3.966103 | 1.003781 |
# TODO: Ideally we could start collecting logs without pushing
fs_api = self._api.get_file_stream_api()
io_wrap.SimpleTee(sys.stdout, streaming_log.TextStreamPusher(
fs_api, OUTPUT_FNAME, prepend_timestamp=True))
io_wrap.SimpleTee(sys.stderr, streaming_log.TextStreamPusher(
fs_api, OUTPUT_FNAME, prepend_timestamp=True, line_prepend='ERROR')) | def mirror_stdout_stderr(self) | Simple STDOUT and STDERR mirroring used by _init_jupyter | 7.796471 | 7.142776 | 1.091518 |
if six.PY2 or not hasattr(sys.stdout, "buffer"):
if hasattr(sys.stdout, "fileno") and sys.stdout.isatty():
try:
stdout = os.fdopen(sys.stdout.fileno(), "w+", 0)
stderr = os.fdopen(sys.stderr.fileno(), "w+", 0)
# OSError [Errno 22] Invalid argument wandb
except OSError:
stdout = sys.stdout
stderr = sys.stderr
else:
stdout = sys.stdout
stderr = sys.stderr
else: # we write binary so grab the raw I/O objects in python 3
try:
stdout = sys.stdout.buffer.raw
stderr = sys.stderr.buffer.raw
except AttributeError:
# The testing environment and potentially others may have screwed with their
# io so we fallback to raw stdout / err
stdout = sys.stdout.buffer
stderr = sys.stderr.buffer
output_log_path = os.path.join(self._run.dir, OUTPUT_FNAME)
self._output_log = WriteSerializingFile(open(output_log_path, 'wb'))
stdout_streams = [stdout, self._output_log]
stderr_streams = [stderr, self._output_log]
if self._cloud:
# Tee stdout/stderr into our TextOutputStream, which will push lines to the cloud.
fs_api = self._api.get_file_stream_api()
self._stdout_stream = streaming_log.TextStreamPusher(
fs_api, OUTPUT_FNAME, prepend_timestamp=True)
self._stderr_stream = streaming_log.TextStreamPusher(
fs_api, OUTPUT_FNAME, line_prepend='ERROR',
prepend_timestamp=True)
stdout_streams.append(self._stdout_stream)
stderr_streams.append(self._stderr_stream)
return stdout_streams, stderr_streams | def _get_stdout_stderr_streams(self) | Sets up STDOUT and STDERR streams. Only call this once. | 3.584313 | 3.56638 | 1.005029 |
# we don't have tee_file's in headless mode
if self._stdout_tee.tee_file is not None:
self._stdout_tee.tee_file.close()
if self._stderr_tee.tee_file is not None:
self._stderr_tee.tee_file.close()
# TODO(adrian): we should close these even in headless mode
# but in python 2 the read thread doesn't stop on its own
# for some reason
self._stdout_tee.close_join()
self._stderr_tee.close_join()
if self._cloud:
# not set in dry run mode
self._stdout_stream.close()
self._stderr_stream.close()
self._output_log.f.close()
self._output_log = None | def _close_stdout_stderr_streams(self) | Close output-capturing stuff. This also flushes anything left in
the buffers. | 4.821187 | 4.836948 | 0.996741 |
io_wrap.init_sigwinch_handler()
self._check_update_available(__version__)
if self._output:
wandb.termlog("Local directory: %s" % os.path.relpath(self._run.dir))
self._system_stats.start()
self._meta.start()
logger.info("system metrics and metadata threads started")
new_step = None
if self._cloud:
storage_id = None
if self._run.resume != 'never':
# DNS can hang for 60 seconds, we check for resume status in a thread
# TODO: Ideally this thread would continue retrying in case of failure.
# Currently we assume we're not resuming in the case of resume = auto,
# and we throw an error in the case of resume = must.
logger.info("checking resume status, waiting at most %d seconds" % InternalApi.HTTP_TIMEOUT)
async_resume_status = util.async_call(self._api.run_resume_status, InternalApi.HTTP_TIMEOUT)
resume_status, thread = async_resume_status(self._api.settings("entity"), self._project, self._run.id)
if resume_status == None and self._run.resume == 'must':
if thread.isAlive():
raise LaunchError(
"resume='must' but we were unable to connect to the W&B service after %i seconds" % InternalApi.HTTP_TIMEOUT)
else:
raise LaunchError(
"resume='must' but run (%s) doesn't exist" % self._run.id)
if resume_status:
storage_id = resume_status['id']
logger.info("resuming run from id: %s" % storage_id)
self._project = self._resolve_project_name(self._project)
self._setup_resume(resume_status)
try:
history = json.loads(json.loads(resume_status['historyTail'])[-1])
except (IndexError,ValueError):
history = {}
new_step = history.get("_step", 0)
else:
new_step = 0
# DNS lookups can hang for upto 60 seconds, we wait for HTTP_TIMEOUT (10s)
logger.info("upserting run before process can begin, waiting at most %d seconds" % InternalApi.HTTP_TIMEOUT)
async_upsert = util.async_call(self._upsert_run, timeout=InternalApi.HTTP_TIMEOUT)
_, self._upsert_run_thread = async_upsert(True, storage_id, env)
if self._upsert_run_thread.isAlive():
logger.error("Failed to connect to W&B servers after %i seconds.\
Letting user process proceed while attempting to reconnect." % InternalApi.HTTP_TIMEOUT)
return new_step | def init_run(self, env=None) | Ensure we create a Run (Bucket) object
We either create it now or, if the API call fails for some reason (eg.
the network is down), we do it from a thread that we start. We hold
off file syncing and streaming until it succeeds.
Returns the initial step of the run, or None if we didn't create a run | 5.748455 | 5.698616 | 1.008746 |
if retry:
num_retries = None
else:
num_retries = 0 # no retries because we want to let the user process run even if the backend is down
try:
upsert_result = self._run.save(
id=storage_id, num_retries=num_retries, api=self._api)
except wandb.apis.CommError as e:
logger.exception("communication error with wandb %s" % e.exc)
# TODO: Get rid of str contains check
if self._run.resume == 'never' and 'exists' in str(e):
raise LaunchError(
"resume='never' but run (%s) exists" % self._run.id)
else:
# Detect bad request code -- this is usually trying to
# create a run that has been already deleted
if (isinstance(e.exc, requests.exceptions.HTTPError) and
e.exc.response.status_code == 400):
raise LaunchError(
'Failed to connect to W&B. See {} for details.'.format(
util.get_log_file_path()))
if isinstance(e.exc, (requests.exceptions.HTTPError,
requests.exceptions.Timeout,
requests.exceptions.ConnectionError)):
wandb.termerror(
'Failed to connect to W&B. Retrying in the background.')
return False
launch_error_s = 'Launch exception: {}, see {} for details. To disable wandb set WANDB_MODE=dryrun'.format(e, util.get_log_file_path())
if 'Permission denied' in str(e):
launch_error_s += '\nRun "wandb login", or provide your API key with the WANDB_API_KEY environment variable.'
raise LaunchError(launch_error_s)
if self._output:
url = self._run.get_url(self._api)
wandb.termlog("Syncing to %s" % url)
wandb.termlog("Run `wandb off` to turn off syncing.")
self._run.set_environment(environment=env)
logger.info("saving patches")
self._api.save_patches(self._watch_dir)
logger.info("saving pip packages")
self._api.save_pip(self._watch_dir)
logger.info("initializing streaming files api")
self._api.get_file_stream_api().set_file_policy(
OUTPUT_FNAME, CRDedupeFilePolicy())
self._api.get_file_stream_api().start()
self._project = self._api.settings("project")
# unblock file syncing and console streaming, which need the Run to have a .storage_id
logger.info("unblocking file change observer, beginning sync with W&B servers")
self._unblock_file_observer()
return True | def _upsert_run(self, retry, storage_id, env) | Upsert the Run (ie. for the first time with all its attributes)
Arguments:
retry: (bool) Whether to retry if the connection fails (ie. if the backend is down).
False is useful so we can start running the user process even when the W&B backend
is down, and let syncing finish later.
Returns:
True if the upsert succeeded, False if it failed because the backend is down.
Throws:
LaunchError on other failures | 5.524308 | 5.327266 | 1.036987 |
logger.info("shutting down system stats and metadata service")
self._system_stats.shutdown()
self._meta.shutdown()
if self._cloud:
logger.info("stopping streaming files and file change observer")
self._stop_file_observer()
self._end_file_syncing(exitcode)
self._run.history.close() | def shutdown(self, exitcode=0) | Stops system stats, streaming handlers, and uploads files without output, used by wandb.monitor | 10.987575 | 8.16978 | 1.344905 |
stdout_streams, stderr_streams = self._get_stdout_stderr_streams()
if sys.platform == "win32":
# PTYs don't work in windows so we use pipes.
self._stdout_tee = io_wrap.Tee.pipe(*stdout_streams)
self._stderr_tee = io_wrap.Tee.pipe(*stderr_streams)
# Seems like the following actually isn't necessary on Windows
# TODO(adrian): we may need to do the following if we use pipes instead of PTYs
# because Python on Unix doesn't like writing UTF-8 to files
# tell child python interpreters we accept utf-8
# env['PYTHONIOENCODING'] = 'UTF-8'
else:
self._stdout_tee = io_wrap.Tee.pty(*stdout_streams)
self._stderr_tee = io_wrap.Tee.pty(*stderr_streams)
command = [program] + list(args)
runner = util.find_runner(program)
if runner:
command = runner + command
command = ' '.join(six.moves.shlex_quote(arg) for arg in command)
self._stdout_stream.write_string(command + "\n\n")
try:
self.proc = subprocess.Popen(
command,
env=env,
stdout=self._stdout_tee.tee_file,
stderr=self._stderr_tee.tee_file,
shell=True,
)
self._run.pid = self.proc.pid
except (OSError, IOError):
raise Exception('Could not find program: %s' % command)
self._sync_etc() | def run_user_process(self, program, args, env) | Launch a user process, capture its output, and sync its files to the backend.
This returns after the process has ended and syncing is done.
Captures ctrl-c's, signals, etc. | 4.297207 | 4.293925 | 1.000764 |
stdout_read_file = os.fdopen(stdout_read_fd, 'rb')
stderr_read_file = os.fdopen(stderr_read_fd, 'rb')
stdout_streams, stderr_streams = self._get_stdout_stderr_streams()
self._stdout_tee = io_wrap.Tee(stdout_read_file, *stdout_streams)
self._stderr_tee = io_wrap.Tee(stderr_read_file, *stderr_streams)
self.proc = Process(pid)
self._run.pid = pid
logger.info("wrapping existing process %i" % pid)
try:
self.init_run()
except LaunchError as e:
logger.exception("catostrophic launch error")
wandb.termerror(str(e))
util.sentry_exc(e)
self._socket.launch_error()
return
if io_wrap.SIGWINCH_HANDLER is not None:
# SIGWINCH_HANDLER (maybe) gets set in self.init_run()
io_wrap.SIGWINCH_HANDLER.add_fd(stdout_read_fd)
io_wrap.SIGWINCH_HANDLER.add_fd(stderr_read_fd)
# Signal the main process that we're all hooked up
logger.info("informing user process we are ready to proceed")
self._socket.ready()
self._sync_etc(headless=True) | def wrap_existing_process(self, pid, stdout_read_fd, stderr_read_fd, port=None) | Do syncing, etc. for an already-running process.
This returns after the process has ended and syncing is done.
Captures ctrl-c's, signals, etc. | 4.210299 | 4.227098 | 0.996026 |
ZeroWidthEscape = Token.ZeroWidthEscape
return sum(len(item[1]) for item in tokenlist if item[0] != ZeroWidthEscape) | def token_list_len(tokenlist) | Return the amount of characters in this token list.
:param tokenlist: List of (token, text) or (token, text, mouse_handler)
tuples. | 6.873692 | 7.899019 | 0.870196 |
ZeroWidthEscape = Token.ZeroWidthEscape
return sum(get_cwidth(c) for item in tokenlist for c in item[1] if item[0] != ZeroWidthEscape) | def token_list_width(tokenlist) | Return the character width of this token list.
(Take double width characters into account.)
:param tokenlist: List of (token, text) or (token, text, mouse_handler)
tuples. | 7.5911 | 9.308548 | 0.815498 |
ZeroWidthEscape = Token.ZeroWidthEscape
return ''.join(item[1] for item in tokenlist if item[0] != ZeroWidthEscape) | def token_list_to_text(tokenlist) | Concatenate all the text parts again. | 6.549732 | 5.941524 | 1.102366 |
line = []
for token, c in explode_tokens(tokenlist):
line.append((token, c))
if c == '\n':
yield line
line = []
yield line | def iter_token_lines(tokenlist) | Iterator that yields tokenlists for each line. | 4.385359 | 4.144465 | 1.058124 |
line = []
for item in tokenlist:
# For (token, text) tuples.
if len(item) == 2:
token, string = item
parts = string.split('\n')
for part in parts[:-1]:
if part:
line.append((token, part))
yield line
line = []
line.append((token, parts[-1]))
# Note that parts[-1] can be empty, and that's fine. It happens
# in the case of [(Token.SetCursorPosition, '')].
# For (token, text, mouse_handler) tuples.
# I know, partly copy/paste, but understandable and more efficient
# than many tests.
else:
token, string, mouse_handler = item
parts = string.split('\n')
for part in parts[:-1]:
if part:
line.append((token, part, mouse_handler))
yield line
line = []
line.append((token, parts[-1], mouse_handler))
# Always yield the last line, even when this is an empty line. This ensures
# that when `tokenlist` ends with a newline character, an additional empty
# line is yielded. (Otherwise, there's no way to differentiate between the
# cases where `tokenlist` does and doesn't end with a newline.)
yield line | def split_lines(tokenlist) | Take a single list of (Token, text) tuples and yield one such list for each
line. Just like str.split, this will yield at least one item.
:param tokenlist: List of (token, text) or (token, text, mouse_handler)
tuples. | 4.491429 | 3.918302 | 1.146269 |
# When the tokenlist is already exploded, don't explode again.
if getattr(tokenlist, 'exploded', False):
return tokenlist
result = []
for token, string in tokenlist:
for c in string:
result.append((token, c))
return _ExplodedList(result) | def explode_tokens(tokenlist) | Turn a list of (token, text) tuples into another list where each string is
exactly one character.
It should be fine to call this function several times. Calling this on a
list that is already exploded, is a null operation.
:param tokenlist: List of (token, text) tuples. | 4.278244 | 4.362813 | 0.980616 |
from prompt_toolkit.interface import CommandLineInterface
assert isinstance(cli, CommandLineInterface)
from .containers import Window
from .controls import BufferControl
for l in cli.layout.walk(cli):
if isinstance(l, Window) and isinstance(l.content, BufferControl):
if l.content.buffer_name == buffer_name:
return l | def find_window_for_buffer_name(cli, buffer_name) | Look for a :class:`~prompt_toolkit.layout.containers.Window` in the Layout
that contains the :class:`~prompt_toolkit.layout.controls.BufferControl`
for the given buffer and return it. If no such Window is found, return None. | 4.131888 | 3.147969 | 1.312557 |
keys = tuple(k.key for k in key_presses)
cli = self._cli_ref()
# Try match, with mode flag
return [b for b in self._registry.get_bindings_for_keys(keys) if b.filter(cli)] | def _get_matches(self, key_presses) | For a list of :class:`KeyPress` instances. Give the matching handlers
that would handle this. | 10.302949 | 9.233513 | 1.115821 |
keys = tuple(k.key for k in key_presses)
cli = self._cli_ref()
# Get the filters for all the key bindings that have a longer match.
# Note that we transform it into a `set`, because we don't care about
# the actual bindings and executing it more than once doesn't make
# sense. (Many key bindings share the same filter.)
filters = set(b.filter for b in self._registry.get_bindings_starting_with_keys(keys))
# When any key binding is active, return True.
return any(f(cli) for f in filters) | def _is_prefix_of_longer_match(self, key_presses) | For a list of :class:`KeyPress` instances. Return True if there is any
handler that is bound to a suffix of this keys. | 8.00103 | 7.485163 | 1.068919 |
buffer = self.key_buffer
retry = False
while True:
if retry:
retry = False
else:
buffer.append((yield))
# If we have some key presses, check for matches.
if buffer:
is_prefix_of_longer_match = self._is_prefix_of_longer_match(buffer)
matches = self._get_matches(buffer)
# When eager matches were found, give priority to them and also
# ignore all the longer matches.
eager_matches = [m for m in matches if m.eager(self._cli_ref())]
if eager_matches:
matches = eager_matches
is_prefix_of_longer_match = False
# Exact matches found, call handler.
if not is_prefix_of_longer_match and matches:
self._call_handler(matches[-1], key_sequence=buffer[:])
del buffer[:] # Keep reference.
# No match found.
elif not is_prefix_of_longer_match and not matches:
retry = True
found = False
# Loop over the input, try longest match first and shift.
for i in range(len(buffer), 0, -1):
matches = self._get_matches(buffer[:i])
if matches:
self._call_handler(matches[-1], key_sequence=buffer[:i])
del buffer[:i]
found = True
break
if not found:
del buffer[:1] | def _process(self) | Coroutine implementing the key match algorithm. Key strokes are sent
into this generator, and it calls the appropriate handlers. | 4.11939 | 3.845937 | 1.071102 |
assert isinstance(key_press, KeyPress)
self.input_queue.append(key_press) | def feed(self, key_press) | Add a new :class:`KeyPress` to the input queue.
(Don't forget to call `process_keys` in order to process the queue.) | 4.201684 | 2.836139 | 1.48148 |
while self.input_queue:
key_press = self.input_queue.popleft()
if key_press.key != Keys.CPRResponse:
self.beforeKeyPress.fire()
self._process_coroutine.send(key_press)
if key_press.key != Keys.CPRResponse:
self.afterKeyPress.fire()
# Invalidate user interface.
cli = self._cli_ref()
if cli:
cli.invalidate() | def process_keys(self) | Process all the keys in the `input_queue`.
(To be called after `feed`.)
Note: because of the `feed`/`process_keys` separation, it is
possible to call `feed` from inside a key binding.
This function keeps looping until the queue is empty. | 6.393485 | 5.742589 | 1.113345 |
cli = self._cli_ref()
if cli:
buff = cli.current_buffer
preferred_column = buff.preferred_column
if (ViNavigationMode()(event.cli) and
buff.document.is_cursor_at_the_end_of_line and
len(buff.document.current_line) > 0):
buff.cursor_position -= 1
# Set the preferred_column for arrow up/down again.
# (This was cleared after changing the cursor position.)
buff.preferred_column = preferred_column | def _fix_vi_cursor_position(self, event) | After every command, make sure that if we are in Vi navigation mode, we
never put the cursor after the last character of a line. (Unless it's
an empty line.) | 7.072863 | 6.050201 | 1.169029 |
if self._arg == '-':
return -1
result = int(self._arg or 1)
# Don't exceed a million.
if int(result) >= 1000000:
result = 1
return result | def arg(self) | Repetition argument. | 6.813803 | 5.828009 | 1.169148 |
assert data in '-0123456789'
current = self._arg
if data == '-':
assert current is None or current == '-'
result = data
elif current is None:
result = data
else:
result = "%s%s" % (current, data)
self.input_processor.arg = result | def append_to_arg_count(self, data) | Add digit to the input argument.
:param data: the typed digit as string | 4.439879 | 4.487628 | 0.98936 |
max_count = 2048 # Max events to read at the same time.
read = DWORD(0)
arrtype = INPUT_RECORD * max_count
input_records = arrtype()
# Get next batch of input event.
windll.kernel32.ReadConsoleInputW(
self.handle, pointer(input_records), max_count, pointer(read))
# First, get all the keys from the input buffer, in order to determine
# whether we should consider this a paste event or not.
all_keys = list(self._get_keys(read, input_records))
if self.recognize_paste and self._is_paste(all_keys):
gen = iter(all_keys)
for k in gen:
# Pasting: if the current key consists of text or \n, turn it
# into a BracketedPaste.
data = []
while k and (isinstance(k.key, six.text_type) or
k.key == Keys.ControlJ):
data.append(k.data)
try:
k = next(gen)
except StopIteration:
k = None
if data:
yield KeyPress(Keys.BracketedPaste, ''.join(data))
if k is not None:
yield k
else:
for k in all_keys:
yield k | def read(self) | Return a list of `KeyPress` instances. It won't return anything when
there was nothing to read. (This function doesn't block.)
http://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx | 5.282822 | 4.746474 | 1.112999 |
for i in range(read.value):
ir = input_records[i]
# Get the right EventType from the EVENT_RECORD.
# (For some reason the Windows console application 'cmder'
# [http://gooseberrycreative.com/cmder/] can return '0' for
# ir.EventType. -- Just ignore that.)
if ir.EventType in EventTypes:
ev = getattr(ir.Event, EventTypes[ir.EventType])
# Process if this is a key event. (We also have mouse, menu and
# focus events.)
if type(ev) == KEY_EVENT_RECORD and ev.KeyDown:
for key_press in self._event_to_key_presses(ev):
yield key_press
elif type(ev) == MOUSE_EVENT_RECORD:
for key_press in self._handle_mouse(ev):
yield key_press | def _get_keys(self, read, input_records) | Generator that yields `KeyPress` objects from the input records. | 8.060422 | 7.254147 | 1.111147 |
# Consider paste when it contains at least one newline and at least one
# other character.
text_count = 0
newline_count = 0
for k in keys:
if isinstance(k.key, six.text_type):
text_count += 1
if k.key == Keys.ControlJ:
newline_count += 1
return newline_count >= 1 and text_count > 1 | def _is_paste(keys) | Return `True` when we should consider this list of keys as a paste
event. Pasted text on windows will be turned into a
`Keys.BracketedPaste` event. (It's not 100% correct, but it is probably
the best possible way to detect pasting of text and handle that
correctly.) | 4.271563 | 3.528548 | 1.210572 |
assert type(ev) == KEY_EVENT_RECORD and ev.KeyDown
result = None
u_char = ev.uChar.UnicodeChar
ascii_char = u_char.encode('utf-8')
# NOTE: We don't use `ev.uChar.AsciiChar`. That appears to be latin-1
# encoded. See also:
# https://github.com/ipython/ipython/issues/10004
# https://github.com/jonathanslenders/python-prompt-toolkit/issues/389
if u_char == '\x00':
if ev.VirtualKeyCode in self.keycodes:
result = KeyPress(self.keycodes[ev.VirtualKeyCode], '')
else:
if ascii_char in self.mappings:
if self.mappings[ascii_char] == Keys.ControlJ:
u_char = '\n' # Windows sends \n, turn into \r for unix compatibility.
result = KeyPress(self.mappings[ascii_char], u_char)
else:
result = KeyPress(u_char, u_char)
# Correctly handle Control-Arrow keys.
if (ev.ControlKeyState & self.LEFT_CTRL_PRESSED or
ev.ControlKeyState & self.RIGHT_CTRL_PRESSED) and result:
if result.key == Keys.Left:
result.key = Keys.ControlLeft
if result.key == Keys.Right:
result.key = Keys.ControlRight
if result.key == Keys.Up:
result.key = Keys.ControlUp
if result.key == Keys.Down:
result.key = Keys.ControlDown
# Turn 'Tab' into 'BackTab' when shift was pressed.
if ev.ControlKeyState & self.SHIFT_PRESSED and result:
if result.key == Keys.Tab:
result.key = Keys.BackTab
# Turn 'Space' into 'ControlSpace' when control was pressed.
if (ev.ControlKeyState & self.LEFT_CTRL_PRESSED or
ev.ControlKeyState & self.RIGHT_CTRL_PRESSED) and result and result.data == ' ':
result = KeyPress(Keys.ControlSpace, ' ')
# Turn Control-Enter into META-Enter. (On a vt100 terminal, we cannot
# detect this combination. But it's really practical on Windows.)
if (ev.ControlKeyState & self.LEFT_CTRL_PRESSED or
ev.ControlKeyState & self.RIGHT_CTRL_PRESSED) and result and \
result.key == Keys.ControlJ:
return [KeyPress(Keys.Escape, ''), result]
# Return result. If alt was pressed, prefix the result with an
# 'Escape' key, just like unix VT100 terminals do.
# NOTE: Only replace the left alt with escape. The right alt key often
# acts as altgr and is used in many non US keyboard layouts for
# typing some special characters, like a backslash. We don't want
# all backslashes to be prefixed with escape. (Esc-\ has a
# meaning in E-macs, for instance.)
if result:
meta_pressed = ev.ControlKeyState & self.LEFT_ALT_PRESSED
if meta_pressed:
return [KeyPress(Keys.Escape, ''), result]
else:
return [result]
else:
return [] | def _event_to_key_presses(self, ev) | For this `KEY_EVENT_RECORD`, return a list of `KeyPress` instances. | 4.009305 | 3.897771 | 1.028615 |
FROM_LEFT_1ST_BUTTON_PRESSED = 0x1
result = []
# Check event type.
if ev.ButtonState == FROM_LEFT_1ST_BUTTON_PRESSED:
# On a key press, generate both the mouse down and up event.
for event_type in [MouseEventType.MOUSE_DOWN, MouseEventType.MOUSE_UP]:
data = ';'.join([
event_type,
str(ev.MousePosition.X),
str(ev.MousePosition.Y)
])
result.append(KeyPress(Keys.WindowsMouseEvent, data))
return result | def _handle_mouse(self, ev) | Handle mouse events. Return a list of KeyPress instances. | 6.405595 | 5.43341 | 1.178927 |
# Regular expression for tokenizing other regular expressions.
p = re.compile(r'''^(
\(\?P\<[a-zA-Z0-9_-]+\> | # Start of named group.
\(\?#[^)]*\) | # Comment
\(\?= | # Start of lookahead assertion
\(\?! | # Start of negative lookahead assertion
\(\?<= | # If preceded by.
\(\?< | # If not preceded by.
\(?: | # Start of group. (non capturing.)
\( | # Start of group.
\(?[iLmsux] | # Flags.
\(?P=[a-zA-Z]+\) | # Back reference to named group
\) | # End of group.
\{[^{}]*\} | # Repetition
\*\? | \+\? | \?\?\ | # Non greedy repetition.
\* | \+ | \? | # Repetition
\#.*\n | # Comment
\\. |
# Character group.
\[
( [^\]\\] | \\.)*
\] |
[^(){}] |
.
)''', re.VERBOSE)
tokens = []
while input:
m = p.match(input)
if m:
token, input = input[:m.end()], input[m.end():]
if not token.isspace():
tokens.append(token)
else:
raise Exception('Could not tokenize input regex.')
return tokens | def tokenize_regex(input) | Takes a string, representing a regular expression as input, and tokenizes
it.
:param input: string, representing a regular expression.
:returns: List of tokens. | 5.274118 | 5.27577 | 0.999687 |
# We add a closing brace because that represents the final pop of the stack.
tokens = [')'] + regex_tokens[::-1]
def wrap(lst):
if len(lst) == 1:
return lst[0]
else:
return Sequence(lst)
def _parse():
or_list = []
result = []
def wrapped_result():
if or_list == []:
return wrap(result)
else:
or_list.append(result)
return Any([wrap(i) for i in or_list])
while tokens:
t = tokens.pop()
if t.startswith('(?P<'):
variable = Variable(_parse(), varname=t[4:-1])
result.append(variable)
elif t in ('*', '*?'):
greedy = (t == '*')
result[-1] = Repeat(result[-1], greedy=greedy)
elif t in ('+', '+?'):
greedy = (t == '+')
result[-1] = Repeat(result[-1], min_repeat=1, greedy=greedy)
elif t in ('?', '??'):
if result == []:
raise Exception('Nothing to repeat.' + repr(tokens))
else:
greedy = (t == '?')
result[-1] = Repeat(result[-1], min_repeat=0, max_repeat=1, greedy=greedy)
elif t == '|':
or_list.append(result)
result = []
elif t in ('(', '(?:'):
result.append(_parse())
elif t == '(?!':
result.append(Lookahead(_parse(), negative=True))
elif t == '(?=':
result.append(Lookahead(_parse(), negative=False))
elif t == ')':
return wrapped_result()
elif t.startswith('#'):
pass
elif t.startswith('{'):
# TODO: implement!
raise Exception('{}-style repitition not yet supported' % t)
elif t.startswith('(?'):
raise Exception('%r not supported' % t)
elif t.isspace():
pass
else:
result.append(Regex(t))
raise Exception("Expecting ')' token")
result = _parse()
if len(tokens) != 0:
raise Exception("Unmatched parantheses.")
else:
return result | def parse_regex(regex_tokens) | Takes a list of tokens from the tokenizer, and returns a parse tree. | 3.118237 | 3.057618 | 1.019826 |
assert isinstance(items, list)
assert isinstance(weights, list)
assert all(isinstance(i, int) for i in weights)
assert len(items) == len(weights)
assert len(items) > 0
already_taken = [0 for i in items]
item_count = len(items)
max_weight = max(weights)
i = 0
while True:
# Each iteration of this loop, we fill up until by (total_weight/max_weight).
adding = True
while adding:
adding = False
for item_i, item, weight in zip(range(item_count), items, weights):
if already_taken[item_i] < i * weight / float(max_weight):
yield item
already_taken[item_i] += 1
adding = True
i += 1 | def take_using_weights(items, weights) | Generator that keeps yielding items from the items list, in proportion to
their weight. For instance::
# Getting the first 70 items from this generator should have yielded 10
# times A, 20 times B and 40 times C, all distributed equally..
take_using_weights(['A', 'B', 'C'], [5, 10, 20])
:param items: List of items to take from.
:param weights: Integers representing the weight. (Numbers have to be
integers, not floats.) | 3.241044 | 3.169855 | 1.022458 |
sizes = self._divide_heigths(cli, write_position)
if self.report_dimensions_callback:
self.report_dimensions_callback(cli, sizes)
if sizes is None:
self.window_too_small.write_to_screen(
cli, screen, mouse_handlers, write_position)
else:
# Draw child panes.
ypos = write_position.ypos
xpos = write_position.xpos
width = write_position.width
for s, c in zip(sizes, self.children):
c.write_to_screen(cli, screen, mouse_handlers, WritePosition(xpos, ypos, width, s))
ypos += s | def write_to_screen(self, cli, screen, mouse_handlers, write_position) | Render the prompt to a `Screen` instance.
:param screen: The :class:`~prompt_toolkit.layout.screen.Screen` class
to which the output has to be written. | 3.702988 | 3.936105 | 0.940775 |
if not self.children:
return []
# Calculate heights.
given_dimensions = self.get_dimensions(cli) if self.get_dimensions else None
def get_dimension_for_child(c, index):
if given_dimensions and given_dimensions[index] is not None:
return given_dimensions[index]
else:
return c.preferred_height(cli, write_position.width, write_position.extended_height)
dimensions = [get_dimension_for_child(c, index) for index, c in enumerate(self.children)]
# Sum dimensions
sum_dimensions = sum_layout_dimensions(dimensions)
# If there is not enough space for both.
# Don't do anything.
if sum_dimensions.min > write_position.extended_height:
return
# Find optimal sizes. (Start with minimal size, increase until we cover
# the whole height.)
sizes = [d.min for d in dimensions]
child_generator = take_using_weights(
items=list(range(len(dimensions))),
weights=[d.weight for d in dimensions])
i = next(child_generator)
while sum(sizes) < min(write_position.extended_height, sum_dimensions.preferred):
# Increase until we meet at least the 'preferred' size.
if sizes[i] < dimensions[i].preferred:
sizes[i] += 1
i = next(child_generator)
if not any([cli.is_returning, cli.is_exiting, cli.is_aborting]):
while sum(sizes) < min(write_position.height, sum_dimensions.max):
# Increase until we use all the available space. (or until "max")
if sizes[i] < dimensions[i].max:
sizes[i] += 1
i = next(child_generator)
return sizes | def _divide_heigths(self, cli, write_position) | Return the heights for all rows.
Or None when there is not enough space. | 3.871595 | 3.735192 | 1.036518 |
yield self
for c in self.children:
for i in c.walk(cli):
yield i | def walk(self, cli) | Walk through children. | 4.710085 | 3.426615 | 1.374559 |
if not self.children:
return []
# Calculate widths.
given_dimensions = self.get_dimensions(cli) if self.get_dimensions else None
def get_dimension_for_child(c, index):
if given_dimensions and given_dimensions[index] is not None:
return given_dimensions[index]
else:
return c.preferred_width(cli, width)
dimensions = [get_dimension_for_child(c, index) for index, c in enumerate(self.children)]
# Sum dimensions
sum_dimensions = sum_layout_dimensions(dimensions)
# If there is not enough space for both.
# Don't do anything.
if sum_dimensions.min > width:
return
# Find optimal sizes. (Start with minimal size, increase until we cover
# the whole height.)
sizes = [d.min for d in dimensions]
child_generator = take_using_weights(
items=list(range(len(dimensions))),
weights=[d.weight for d in dimensions])
i = next(child_generator)
while sum(sizes) < min(width, sum_dimensions.preferred):
# Increase until we meet at least the 'preferred' size.
if sizes[i] < dimensions[i].preferred:
sizes[i] += 1
i = next(child_generator)
while sum(sizes) < min(width, sum_dimensions.max):
# Increase until we use all the available space.
if sizes[i] < dimensions[i].max:
sizes[i] += 1
i = next(child_generator)
return sizes | def _divide_widths(self, cli, width) | Return the widths for all columns.
Or None when there is not enough space. | 3.544057 | 3.429598 | 1.033374 |
if not self.children:
return
sizes = self._divide_widths(cli, write_position.width)
if self.report_dimensions_callback:
self.report_dimensions_callback(cli, sizes)
# If there is not enough space.
if sizes is None:
self.window_too_small.write_to_screen(
cli, screen, mouse_handlers, write_position)
return
# Calculate heights, take the largest possible, but not larger than write_position.extended_height.
heights = [child.preferred_height(cli, width, write_position.extended_height).preferred
for width, child in zip(sizes, self.children)]
height = max(write_position.height, min(write_position.extended_height, max(heights)))
# Draw child panes.
ypos = write_position.ypos
xpos = write_position.xpos
for s, c in zip(sizes, self.children):
c.write_to_screen(cli, screen, mouse_handlers, WritePosition(xpos, ypos, s, height))
xpos += s | def write_to_screen(self, cli, screen, mouse_handlers, write_position) | Render the prompt to a `Screen` instance.
:param screen: The :class:`~prompt_toolkit.layout.screen.Screen` class
to which the output has to be written. | 3.834072 | 3.91923 | 0.978272 |
return self.content.preferred_height(cli, width, max_available_height) | def preferred_height(self, cli, width, max_available_height) | Return the preferred height of the float container.
(We don't care about the height of the floats, they should always fit
into the dimensions provided by the container.) | 4.275869 | 3.17057 | 1.348612 |
wp = write_position
Transparent = Token.Transparent
for y in range(wp.ypos, wp.ypos + wp.height):
if y in screen.data_buffer:
row = screen.data_buffer[y]
for x in range(wp.xpos, wp.xpos + wp.width):
c = row[x]
if c.char != ' ' or c.token != Transparent:
return False
return True | def _area_is_empty(self, screen, write_position) | Return True when the area below the write position is still empty.
(For floats that should not hide content underneath.) | 3.778948 | 3.607246 | 1.047599 |
yield self
for i in self.content.walk(cli):
yield i
for f in self.floats:
for i in f.content.walk(cli):
yield i | def walk(self, cli) | Walk through children. | 5.247584 | 4.619984 | 1.135845 |
cpos = self.ui_content.cursor_position
y, x = self._rowcol_to_yx[cpos.y, cpos.x]
return Point(x=x - self._x_offset, y=y - self._y_offset) | def cursor_position(self) | Return the cursor position coordinates, relative to the left/top corner
of the rendered screen. | 5.0339 | 4.677788 | 1.076128 |
if self.displayed_lines[0] == 0:
top = 0
else:
# Get row where the cursor is displayed.
y = self.input_line_to_visible_line[self.ui_content.cursor_position.y]
top = min(y, self.configured_scroll_offsets.top)
return ScrollOffsets(
top=top,
bottom=min(self.ui_content.line_count - self.displayed_lines[-1] - 1,
self.configured_scroll_offsets.bottom),
# For left/right, it probably doesn't make sense to return something.
# (We would have to calculate the widths of all the lines and keep
# double width characters in mind.)
left=0, right=0) | def applied_scroll_offsets(self) | Return a :class:`.ScrollOffsets` instance that indicates the actual
offset. This can be less than or equal to what's configured. E.g, when
the cursor is completely at the top, the top offset will be zero rather
than what's configured. | 5.571109 | 4.924162 | 1.131382 |
result = {}
for k, v in self.visible_line_to_input_line.items():
if v in result:
result[v] = min(result[v], k)
else:
result[v] = k
return result | def input_line_to_visible_line(self) | Return the dictionary mapping the line numbers of the input buffer to
the lines of the screen. When a line spans several rows at the screen,
the first row appears in the dictionary. | 2.745841 | 2.255114 | 1.217606 |
if after_scroll_offset:
return self.displayed_lines[self.applied_scroll_offsets.top]
else:
return self.displayed_lines[0] | def first_visible_line(self, after_scroll_offset=False) | Return the line number (0 based) of the input document that corresponds
with the first visible line. | 4.334514 | 3.991215 | 1.086014 |
if before_scroll_offset:
return self.displayed_lines[-1 - self.applied_scroll_offsets.bottom]
else:
return self.displayed_lines[-1] | def last_visible_line(self, before_scroll_offset=False) | Like `first_visible_line`, but for the last visible line. | 4.85085 | 4.303362 | 1.127223 |
return (self.first_visible_line(after_scroll_offset) +
(self.last_visible_line(before_scroll_offset) -
self.first_visible_line(after_scroll_offset)) // 2
) | def center_visible_line(self, before_scroll_offset=False,
after_scroll_offset=False) | Like `first_visible_line`, but for the center visible line. | 2.825039 | 2.223119 | 1.270755 |
if self.wrap_lines:
return self.ui_content.get_height_for_line(lineno, self.window_width)
else:
return 1 | def get_height_for_line(self, lineno) | Return the height of the given line.
(The height that it would take, if this line became visible.) | 5.411123 | 5.102141 | 1.060559 |
# Margin.get_width, needs to have a UIContent instance.
def get_ui_content():
return self._get_ui_content(cli, width=0, height=0)
def get_width():
return margin.get_width(cli, get_ui_content)
key = (margin, cli.render_counter)
return self._margin_width_cache.get(key, get_width) | def _get_margin_width(self, cli, margin) | Return the width for this margin.
(Calculate only once per render time.) | 6.247931 | 5.098926 | 1.225343 |
dimension = dimension or LayoutDimension()
# When a preferred dimension was explicitly given to the Window,
# ignore the UIControl.
if dimension.preferred_specified:
preferred = dimension.preferred
# When a 'preferred' dimension is given by the UIControl, make sure
# that it stays within the bounds of the Window.
if preferred is not None:
if dimension.max:
preferred = min(preferred, dimension.max)
if dimension.min:
preferred = max(preferred, dimension.min)
# When a `dont_extend` flag has been given, use the preferred dimension
# also as the max dimension.
if dont_extend and preferred is not None:
max_ = min(dimension.max, preferred)
else:
max_ = dimension.max
return LayoutDimension(
min=dimension.min, max=max_,
preferred=preferred, weight=dimension.weight) | def _merge_dimensions(dimension, preferred=None, dont_extend=False) | Take the LayoutDimension from this `Window` class and the received
preferred size from the `UIControl` and return a `LayoutDimension` to
report to the parent container. | 3.850726 | 3.342227 | 1.152144 |
def get_content():
return self.content.create_content(cli, width=width, height=height)
key = (cli.render_counter, width, height)
return self._ui_content_cache.get(key, get_content) | def _get_ui_content(self, cli, width, height) | Create a `UIContent` instance. | 5.008636 | 4.683606 | 1.069397 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.