code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
def helper_fn(self, arg):
'''helper function for docopt'''
try:
opt = docopt(helper_fn.__doc__, arg)
except DocoptExit as exception:
# The DocoptExit is thrown when the args do not match.
# We print a message to the user and the usage block.
print 'Invalid Command!'
print exception
return
except SystemExit:
# The SystemExit exception prints the usage for --help
# We do not need to do the print here.
return
return func(self, opt)
helper_fn.__name__ = func.__name__
helper_fn.__doc__ = func.__doc__
helper_fn.__dict__.update(func.__dict__)
return helper_fn | def docopt_cmd(func) | This decorator is used to simplify the try/except block and pass the result
of the docopt parsing to the called action. | 2.956726 | 2.740784 | 1.078788 |
''' Collect incorrect and mistyped commands '''
choices = ["help", "mode", "psiturk_status", "server", "shortcuts",
"worker", "db", "edit", "open", "config", "show",
"debug", "setup_example", "status", "tunnel", "amt_balance",
"download_datafiles", "exit", "hit", "load", "quit", "save",
"shell", "version"]
print "%s is not a psiTurk command. See 'help'." %(cmd)
print "Did you mean this?\n %s" %(process.extractOne(cmd,
choices)[0]) | def default(self, cmd) | Collect incorrect and mistyped commands | 13.770998 | 11.810789 | 1.165968 |
''' Check offline configuration file'''
quit_on_start = False
database_url = self.config.get('Database Parameters', 'database_url')
host = self.config.get('Server Parameters', 'host', 'localhost')
if database_url[:6] != 'sqlite':
print("*** Error: config.txt option 'database_url' set to use "
"mysql://. Please change this sqllite:// while in cabin mode.")
quit_on_start = True
if host != 'localhost':
print("*** Error: config option 'host' is not set to localhost. "
"Please change this to localhost while in cabin mode.")
quit_on_start = True
if quit_on_start:
exit() | def check_offline_configuration(self) | Check offline configuration file | 5.664639 | 5.494212 | 1.031019 |
''' Print cabin mode message '''
sys_status = open(self.help_path + 'cabin.txt', 'r')
server_msg = sys_status.read()
return server_msg + colorize('psiTurk version ' + version_number +
'\nType "help" for more information.',
'green', False) | def get_intro_prompt(self) | Print cabin mode message | 13.018118 | 9.601506 | 1.355841 |
''' Construct psiTurk shell prompt '''
prompt = '[' + colorize('psiTurk', 'bold')
server_string = ''
server_status = self.server.is_server_running()
if server_status == 'yes':
server_string = colorize('on', 'green')
elif server_status == 'no':
server_string = colorize('off', 'red')
elif server_status == 'maybe':
server_string = colorize('unknown', 'yellow')
elif server_status == 'blocked':
server_string = colorize('blocked', 'red')
prompt += ' server:' + server_string
prompt += ' mode:' + colorize('cabin', 'bold')
prompt += ']$ '
self.prompt = prompt | def color_prompt(self) | Construct psiTurk shell prompt | 3.680535 | 3.044029 | 1.2091 |
''' Keep persistent command history. '''
if not self.already_prelooped:
self.already_prelooped = True
open('.psiturk_history', 'a').close() # create file if it doesn't exist
readline.read_history_file('.psiturk_history')
for i in range(readline.get_current_history_length()):
if readline.get_history_item(i) is not None:
self.history.append(readline.get_history_item(i)) | def preloop(self) | Keep persistent command history. | 3.357774 | 2.938153 | 1.142818 |
''' Trigger hooks after command. '''
if not line:
return self.emptyline()
return Cmd.onecmd_plus_hooks(self, line) | def onecmd_plus_hooks(self, line) | Trigger hooks after command. | 6.383165 | 5.868701 | 1.087662 |
''' Exit cmd cleanly. '''
self.color_prompt()
return Cmd.postcmd(self, stop, line) | def postcmd(self, stop, line) | Exit cmd cleanly. | 10.344095 | 6.945323 | 1.489361 |
''' List hits. '''
if active_hits:
hits_data = self.amt_services_wrapper.get_active_hits(all_studies)
elif reviewable_hits:
hits_data = self.amt_services_wrapper.get_reviewable_hits(all_studies)
else:
hits_data = self.amt_services_wrapper.get_all_hits(all_studies)
if not hits_data:
print '*** no hits retrieved'
else:
for hit in hits_data:
print hit | def hit_list(self, active_hits, reviewable_hits, all_studies) | List hits. | 2.797575 | 2.739335 | 1.021261 |
''' Returns tuple describing expenses:
amount paid to workers
amount paid to amazon'''
# fee structure changed 07.22.15:
# 20% for HITS with < 10 assignments
# 40% for HITS with >= 10 assignments
commission = 0.2
if float(num_workers) >= 10:
commission = 0.4
work = float(num_workers) * float(reward)
fee = work * commission
return (work, fee, work+fee) | def _estimate_expenses(self, num_workers, reward) | Returns tuple describing expenses:
amount paid to workers
amount paid to amazon | 7.508395 | 4.952858 | 1.515972 |
''' Prompts for a 'yes' or 'no' to given prompt. '''
response = raw_input(prompt).strip().lower()
valid = {'y': True, 'ye': True, 'yes': True, 'n': False, 'no': False}
while True:
try:
return valid[response]
except:
response = raw_input("Please respond 'y' or 'n': ").strip().lower() | def _confirm_dialog(self, prompt) | Prompts for a 'yes' or 'no' to given prompt. | 2.82731 | 2.399971 | 1.17806 |
''' Stop experiment server '''
if (self.server.is_server_running() == 'yes' or
self.server.is_server_running() == 'maybe'):
self.server.shutdown()
print 'Please wait. This could take a few seconds.'
time.sleep(0.5) | def server_off(self) | Stop experiment server | 6.066391 | 5.567269 | 1.089653 |
''' Launch log '''
logfilename = self.config.get('Server Parameters', 'logfile')
if sys.platform == "darwin":
args = ["open", "-a", "Console.app", logfilename]
else:
args = ["xterm", "-e", "'tail -f %s'" % logfilename]
subprocess.Popen(args, close_fds=True)
print "Log program launching..." | def server_log(self) | Launch log | 5.253855 | 5.092721 | 1.03164 |
if arg['print']:
self.print_config(arg)
elif arg['reload']:
self.reload_config(arg)
else:
self.help_server() | def do_config(self, arg) | Usage:
config print
config reload
config help | 4.541141 | 3.590086 | 1.264912 |
''' Tab-complete config command '''
return [i for i in PsiturkShell.config_commands if i.startswith(text)] | def complete_config(self, text, line, begidx, endidx) | Tab-complete config command | 13.823449 | 12.537205 | 1.102594 |
''' Print configuration. '''
for section in self.config.sections():
print '[%s]' % section
items = dict(self.config.items(section))
for k in items:
print "%(a)s=%(b)s" % {'a': k, 'b': items[k]}
print '' | def print_config(self, _) | Print configuration. | 3.569193 | 3.476947 | 1.026531 |
''' Reload config. '''
restart_server = False
if (self.server.is_server_running() == 'yes' or
self.server.is_server_running() == 'maybe'):
user_input = raw_input("Reloading configuration requires the server "
"to restart. Really reload? y or n: ")
if user_input != 'y':
return
restart_server = True
self.config.load_config()
if restart_server:
self.server_restart() | def reload_config(self, _) | Reload config. | 4.123357 | 4.026898 | 1.023954 |
''' Notify user of server status. '''
server_status = self.server.is_server_running()
if server_status == 'yes':
print 'Server: ' + colorize('currently online', 'green')
elif server_status == 'no':
print 'Server: ' + colorize('currently offline', 'red')
elif server_status == 'maybe':
print 'Server: ' + colorize('status unknown', 'yellow')
elif server_status == 'blocked':
print 'Server: ' + colorize('blocked', 'red') | def do_status(self, _) | Notify user of server status. | 3.021025 | 2.78452 | 1.084935 |
''' Use local file for DB. '''
# interactive = False # Never used
if filename is None:
# interactive = True # Never used
filename = raw_input('Enter the filename of the local SQLLite '
'database you would like to use '
'[default=participants.db]: ')
if filename == '':
filename = 'participants.db'
base_url = "sqlite:///" + filename
self.config.set("Database Parameters", "database_url", base_url)
print "Updated database setting (database_url): \n\t", \
self.config.get("Database Parameters", "database_url")
if self.server.is_server_running() == 'yes':
self.server_restart() | def db_use_local_file(self, arg, filename=None) | Use local file for DB. | 5.470059 | 5.48439 | 0.997387 |
''' Download datafiles. '''
contents = {"trialdata": lambda p: p.get_trial_data(), "eventdata": \
lambda p: p.get_event_data(), "questiondata": lambda p: \
p.get_question_data()}
query = Participant.query.all()
for k in contents:
ret = "".join([contents[k](p) for p in query])
temp_file = open(k + '.csv', 'w')
temp_file.write(ret)
temp_file.close() | def do_download_datafiles(self, _) | Download datafiles. | 4.518005 | 4.663167 | 0.96887 |
if arg['on']:
self.server_on()
elif arg['off']:
self.server_off()
elif arg['restart']:
self.server_restart()
elif arg['log']:
self.server_log()
else:
self.help_server() | def do_server(self, arg) | Usage:
server on
server off
server restart
server log
server help | 2.398866 | 1.902383 | 1.26098 |
''' Tab-complete server command '''
return [i for i in PsiturkShell.server_commands if i.startswith(text)] | def complete_server(self, text, line, begidx, endidx) | Tab-complete server command | 14.270716 | 12.647883 | 1.128309 |
'''Override do_quit for network clean up.'''
if (self.server.is_server_running() == 'yes' or
self.server.is_server_running() == 'maybe'):
user_input = raw_input("Quitting shell will shut down experiment "
"server. Really quit? y or n: ")
if user_input == 'y':
self.server_off()
else:
return False
return True | def do_quit(self, _) | Override do_quit for network clean up. | 6.792595 | 5.112958 | 1.328506 |
''' Clean up child and orphaned processes. '''
if self.tunnel.is_open:
print 'Closing tunnel...'
self.tunnel.close()
print 'Done.'
else:
pass | def clean_up(self) | Clean up child and orphaned processes. | 9.334738 | 6.44609 | 1.448124 |
''' Overloads intro prompt with network-aware version if you can reach
psiTurk.org, request system status message'''
server_msg = self.web_services.get_system_status()
return server_msg + colorize('psiTurk version ' + version_number +
'\nType "help" for more information.',
'green', False) | def get_intro_prompt(self) | Overloads intro prompt with network-aware version if you can reach
psiTurk.org, request system status message | 23.887627 | 5.771617 | 4.13881 |
''' Tally hits '''
if not self.quiet:
num_hits = self.amt_services_wrapper.tally_hits()
if self.sandbox:
self.sandbox_hits = num_hits
else:
self.live_hits = num_hits | def update_hit_tally(self) | Tally hits | 8.978553 | 8.324564 | 1.078561 |
if arg['get_config']:
self.db_get_config()
elif arg['use_local_file']:
self.db_use_local_file(arg, filename=arg['<filename>'])
elif arg['use_aws_instance']:
self.db_use_aws_instance(arg['<instance_id>'], arg)
elif arg['aws_list_regions']:
self.db_aws_list_regions()
elif arg['aws_get_region']:
self.db_aws_get_region()
elif arg['aws_set_region']:
self.db_aws_set_region(arg['<region_name>'])
elif arg['aws_list_instances']:
self.db_aws_list_instances()
elif arg['aws_create_instance']:
self.db_create_aws_db_instance(arg['<instance_id>'], arg['<size>'],
arg['<username>'],
arg['<password>'], arg['<dbname>'])
elif arg['aws_delete_instance']:
self.db_aws_delete_instance(arg['<instance_id>'])
else:
self.help_db() | def do_db(self, arg) | Usage:
db get_config
db use_local_file [<filename>]
db use_aws_instance [<instance_id>]
db aws_list_regions
db aws_get_region
db aws_set_region [<region_name>]
db aws_list_instances
db aws_create_instance [<instance_id> <size> <username> <password>
<dbname>]
db aws_delete_instance [<instance_id>]
db help | 2.15943 | 1.42544 | 1.514922 |
''' Tab-complete db command '''
return [i for i in PsiturkNetworkShell.db_commands if \
i.startswith(text)] | def complete_db(self, text, line, begidx, endidx) | Tab-complete db command | 20.66885 | 18.190624 | 1.136236 |
restart_server = False
if self.server.is_server_running() == 'yes' or self.server.is_server_running() == 'maybe':
r = raw_input("Switching modes requires the server to restart. Really "
"switch modes? y or n: ")
if r != 'y':
return
restart_server = True
if arg['<which>'] is None:
if self.sandbox:
arg['<which>'] = 'live'
else:
arg['<which>'] = 'sandbox'
if arg['<which>'] == 'live':
self.set_sandbox(False)
self.update_hit_tally()
print 'Entered %s mode' % colorize('live', 'bold')
else:
self.set_sandbox(True)
self.update_hit_tally()
print 'Entered %s mode' % colorize('sandbox', 'bold')
if restart_server:
self.server_restart() | def do_mode(self, arg) | Usage: mode
mode <which> | 3.444537 | 3.343149 | 1.030327 |
if arg['open']:
self.tunnel_open()
elif arg['change']:
self.tunnel_change()
elif arg['status']:
self.tunnel_status() | def do_tunnel(self, arg) | Usage: tunnel open
tunnel change
tunnel status | 4.024999 | 2.973762 | 1.353504 |
if arg['create']:
self.hit_create(arg['<numWorkers>'], arg['<reward>'],
arg['<duration>'])
self.update_hit_tally()
elif arg['extend']:
self.amt_services_wrapper.hit_extend(arg['<HITid>'], arg['<number>'], arg['<minutes>'])
elif arg['expire']:
self.amt_services_wrapper.hit_expire(arg['--all'], arg['<HITid>'])
self.update_hit_tally()
elif arg['delete'] or arg['dispose']:
self.amt_services_wrapper.hit_delete(arg['--all'], arg['<HITid>'])
self.update_hit_tally()
elif arg['list']:
self.hit_list(arg['--active'], arg['--reviewable'], arg['--all-studies'])
else:
self.help_hit() | def do_hit(self, arg) | Usage:
hit create [<numWorkers> <reward> <duration>]
hit extend <HITid> [(--assignments <number>)] [(--expiration <minutes>)]
hit expire (--all | <HITid> ...)
hit dispose (--all | <HITid> ...)
hit delete (--all | <HITid> ...)
hit list [--active | --reviewable] [--all-studies]
hit help | 3.772763 | 2.080418 | 1.813464 |
''' Tab-complete hit command. '''
return [i for i in PsiturkNetworkShell.hit_commands if \
i.startswith(text)] | def complete_hit(self, text, line, begidx, endidx) | Tab-complete hit command. | 24.880116 | 21.197563 | 1.173725 |
if arg['approve']:
self.worker_approve(arg['--all'], arg['<hit_id>'], arg['<assignment_id>'], arg['--all-studies'], arg['--force'])
elif arg['reject']:
self.amt_services_wrapper.worker_reject(arg['<hit_id>'], arg['<assignment_id>'])
elif arg['unreject']:
self.amt_services_wrapper.worker_unreject(arg['<hit_id>'], arg['<assignment_id>'])
elif arg['list']:
self.worker_list(arg['--submitted'], arg['--approved'], arg['--rejected'], arg['<hit_id>'], arg['--all-studies'])
elif arg['bonus']:
self.amt_services_wrapper.worker_bonus(arg['<hit_id>'], arg['--auto'], arg['<amount>'], '',
arg['<assignment_id>'])
else:
self.help_worker() | def do_worker(self, arg) | Usage:
worker approve (--all | --hit <hit_id> ... | <assignment_id> ...) [--all-studies] [--force]
worker reject (--hit <hit_id> | <assignment_id> ...)
worker unreject (--hit <hit_id> | <assignment_id> ...)
worker bonus (--amount <amount> | --auto) (--hit <hit_id> | <assignment_id> ...)
worker list [--submitted | --approved | --rejected] [(--hit <hit_id>)] [--all-studies]
worker help | 3.01143 | 1.922262 | 1.566608 |
''' Tab-complete worker command. '''
return [i for i in PsiturkNetworkShell.worker_commands if \
i.startswith(text)] | def complete_worker(self, text, line, begidx, endidx) | Tab-complete worker command. | 22.438581 | 17.85146 | 1.256961 |
revproxy_url = False
if self.config.has_option('Server Parameters','adserver_revproxy_host'):
if self.config.has_option( 'Server Parameters', 'adserver_revproxy_port'):
port = self.config.get( 'Server Parameters', 'adserver_revproxy_port')
else:
port = 80
revproxy_url = "http://{}:{}/ad".format(self.config.get('Server Parameters',
'adserver_revproxy_host'),
port)
if revproxy_url:
base_url = revproxy_url
elif 'OPENSHIFT_SECRET_TOKEN' in os.environ:
base_url = "http://" + self.config.get('Server Parameters', 'host') + "/ad"
else:
if arg['--print-only']:
my_ip = get_my_ip()
base_url = "http://" + my_ip + ":" + \
self.config.get('Server Parameters', 'port') + "/ad"
else:
base_url = "http://" + self.config.get('Server Parameters',
'host') + \
":" + self.config.get('Server Parameters', 'port') + "/ad"
launch_url = base_url + "?assignmentId=debug" + \
str(self.random_id_generator()) \
+ "&hitId=debug" + str(self.random_id_generator()) \
+ "&workerId=debug" + str(self.random_id_generator() \
+ "&mode=debug")
if arg['--print-only']:
print("Here's your randomized debug link, feel free to request " \
"another:\n\t" + launch_url)
else:
print("Launching browser pointed at your randomized debug link, " \
"feel free to request another.\n\t" + launch_url)
webbrowser.open(launch_url, new=1, autoraise=True) | def do_debug(self, arg) | Usage: debug [options]
-p, --print-only just provides the URL, doesn't attempt to
launch browser | 3.065446 | 2.976244 | 1.029971 |
''' Open tunnel '''
if (self.server.is_server_running() == 'no' or
self.server.is_server_running() == 'maybe'):
print("Error: Sorry, you need to have the server running to open a "
"tunnel. Try 'server on' first.")
else:
self.tunnel.open() | def tunnel_open(self) | Open tunnel | 5.883352 | 6.413752 | 0.917303 |
''' Update credentials '''
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key | def update_credentials(self, aws_access_key_id, aws_secret_access_key) | Update credentials | 2.244337 | 2.627113 | 0.854298 |
'''Verify AWS login '''
if ((self.aws_access_key_id == 'YourAccessKeyId') or
(self.aws_secret_access_key == 'YourSecretAccessKey')):
return False
else:
try:
self.rdsc.describe_db_instances()
except botocore.exceptions.ClientError as exception:
print exception
return False
except AttributeError as e:
print e
print "*** Unable to establish connection to AWS region %s "\
"using your access key/secret key", self.region
return False
except boto.exception.BotoServerError as e:
print "***********************************************************"
print "WARNING"
print "Unable to establish connection to AWS RDS (Amazon relational database services)."
print "See relevant psiturk docs here:"
print "\thttp://psiturk.readthedocs.io/en/latest/configure_databases.html#obtaining-a-low-cost-or-free-mysql-database-on-amazon-s-web-services-cloud"
print "***********************************************************"
return False
else:
return True | def verify_aws_login(self) | Verify AWS login | 5.536893 | 5.627222 | 0.983948 |
''' Get DB instance info '''
if not self.connect_to_aws_rds():
return False
try:
instances = self.rdsc.describe_db_instances(dbid).get('DBInstances')
except:
return False
else:
myinstance = instances[0]
return myinstance | def get_db_instance_info(self, dbid) | Get DB instance info | 5.236185 | 5.904088 | 0.886874 |
''' Allow access to instance. '''
if not self.connect_to_aws_rds():
return False
try:
conn = boto.ec2.connect_to_region(
self.region,
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key
)
sgs = conn.get_all_security_groups('default')
default_sg = sgs[0]
default_sg.authorize(ip_protocol='tcp', from_port=3306,
to_port=3306, cidr_ip=str(ip_address)+'/32')
except EC2ResponseError, exception:
if exception.error_code == "InvalidPermission.Duplicate":
return True # ok it already exists
else:
return False
else:
return True | def allow_access_to_instance(self, _, ip_address) | Allow access to instance. | 2.532068 | 2.549996 | 0.992969 |
''' DB instance '''
if not self.connect_to_aws_rds():
return False
try:
instances = self.rdsc.describe_db_instances().get('DBInstances')
except:
return False
else:
return instances | def get_db_instances(self) | DB instance | 5.419201 | 5.047837 | 1.073569 |
''' Delete DB '''
if not self.connect_to_aws_rds():
return False
try:
database = self.rdsc.delete_dbinstance(dbid,
skip_final_snapshot=True)
print database
except:
return False
else:
return True | def delete_db_instance(self, dbid) | Delete DB | 5.926774 | 6.803065 | 0.871192 |
''' Validate instance ID '''
# 1-63 alphanumeric characters, first must be a letter.
if re.match('[\w-]+$', instid) is not None:
if len(instid) <= 63 and len(instid) >= 1:
if instid[0].isalpha():
return True
return "*** Error: Instance ids must be 1-63 alphanumeric characters, \
first is a letter." | def validate_instance_id(self, instid) | Validate instance ID | 4.851305 | 5.18602 | 0.935458 |
''' integer between 5-1024 (inclusive) '''
try:
int(size)
except ValueError:
return '*** Error: size must be a whole number between 5 and 1024.'
if int(size) < 5 or int(size) > 1024:
return '*** Error: size must be between 5-1024 GB.'
return True | def validate_instance_size(self, size) | integer between 5-1024 (inclusive) | 3.379879 | 2.885516 | 1.171326 |
''' Validate instance username '''
# 1-16 alphanumeric characters - first character must be a letter -
# cannot be a reserved MySQL word
if re.match('[\w-]+$', username) is not None:
if len(username) <= 16 and len(username) >= 1:
if username[0].isalpha():
if username not in MYSQL_RESERVED_WORDS:
return True
return '*** Error: Usernames must be 1-16 alphanumeric chracters, \
first a letter, cannot be reserved MySQL word.' | def validate_instance_username(self, username) | Validate instance username | 6.079933 | 6.412167 | 0.948187 |
''' Validate instance passwords '''
# 1-16 alphanumeric characters - first character must be a letter -
# cannot be a reserved MySQL word
if re.match('[\w-]+$', password) is not None:
if len(password) <= 41 and len(password) >= 8:
return True
return '*** Error: Passwords must be 8-41 alphanumeric characters' | def validate_instance_password(self, password) | Validate instance passwords | 7.919509 | 7.95683 | 0.99531 |
''' Validate instance database name '''
# 1-64 alphanumeric characters, cannot be a reserved MySQL word
if re.match('[\w-]+$', dbname) is not None:
if len(dbname) <= 41 and len(dbname) >= 1:
if dbname.lower() not in MYSQL_RESERVED_WORDS:
return True
return '*** Error: Database names must be 1-64 alphanumeric characters,\
cannot be a reserved MySQL word.' | def validate_instance_dbname(self, dbname) | Validate instance database name | 5.476243 | 5.769964 | 0.949095 |
''' Create db instance '''
if not self.connect_to_aws_rds():
return False
try:
database = self.rdsc.create_dbinstance(
id=params['id'],
allocated_storage=params['size'],
instance_class='db.t1.micro',
engine='MySQL',
master_username=params['username'],
master_password=params['password'],
db_name=params['dbname'],
multi_az=False
)
except:
return False
else:
return True | def create_db_instance(self, params) | Create db instance | 3.395584 | 3.806964 | 0.89194 |
''' Get all HITs '''
if not self.connect_to_turk():
return False
try:
hits = []
paginator = self.mtc.get_paginator('list_hits')
for page in paginator.paginate():
hits.extend(page['HITs'])
except Exception as e:
print e
return False
hits_data = self._hit_xml_to_object(hits)
return hits_data | def get_all_hits(self) | Get all HITs | 3.925346 | 3.889042 | 1.009335 |
''' Get workers '''
if not self.connect_to_turk():
return False
try:
if chosen_hits:
hit_ids = chosen_hits
else:
hits = self.get_all_hits()
hit_ids = [hit.options['hitid'] for hit in hits]
assignments = []
for hit_id in hit_ids:
paginator = self.mtc.get_paginator('list_assignments_for_hit')
args = dict(
HITId=hit_id
)
if assignment_status:
args['AssignmentStatuses'] = [assignment_status]
for page in paginator.paginate(**args):
assignments.extend(page['Assignments'])
except Exception as e:
print e
return False
workers = [{
'hitId': assignment['HITId'],
'assignmentId': assignment['AssignmentId'],
'workerId': assignment['WorkerId'],
'submit_time': assignment['SubmitTime'],
'accept_time': assignment['AcceptTime'],
'status': assignment['AssignmentStatus'],
} for assignment in assignments]
return workers | def get_workers(self, assignment_status=None, chosen_hits=None) | Get workers | 2.428649 | 2.443617 | 0.993874 |
''' Bonus worker '''
if not self.connect_to_turk():
return False
try:
assignment = self.mtc.get_assignment(AssignmentId=assignment_id)['Assignment']
worker_id = assignment['WorkerId']
self.mtc.send_bonus(WorkerId=worker_id, AssignmentId=assignment_id, BonusAmount=str(amount), Reason=reason)
return True
except Exception as exception:
print exception
return False | def bonus_worker(self, assignment_id, amount, reason="") | Bonus worker | 3.161377 | 3.142821 | 1.005904 |
''' Approve worker '''
if not self.connect_to_turk():
return False
try:
self.mtc.approve_assignment(AssignmentId=assignment_id,
OverrideRejection=override_rejection)
return True
except Exception as e:
return False | def approve_worker(self, assignment_id, override_rejection = False) | Approve worker | 4.745584 | 5.10709 | 0.929215 |
''' Reject worker '''
if not self.connect_to_turk():
return False
try:
self.mtc.reject_assignment(AssignmentId=assignment_id, RequesterFeedback='')
return True
except Exception as e:
print e
return False | def reject_worker(self, assignment_id) | Reject worker | 5.451107 | 5.522795 | 0.98702 |
''' Connect to turk '''
if ((self.aws_access_key_id == 'YourAccessKeyId') or
(self.aws_secret_access_key == 'YourSecretAccessKey')):
print "AWS access key not set in ~/.psiturkconfig; please enter a valid access key."
assert False
if self.is_sandbox:
endpoint_url = 'https://mturk-requester-sandbox.us-east-1.amazonaws.com'
else:
endpoint_url = 'https://mturk-requester.us-east-1.amazonaws.com'
self.mtc = boto3.client('mturk',
region_name='us-east-1',
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
endpoint_url=endpoint_url)
return True | def setup_mturk_connection(self) | Connect to turk | 2.325068 | 2.321519 | 1.001529 |
''' Verify AWS login '''
if not self.mtc:
return False
try:
self.mtc.get_account_balance()
except Exception as exception:
print exception
return False
else:
return True | def verify_aws_login(self) | Verify AWS login | 6.0529 | 6.397758 | 0.946097 |
''' Configure HIT '''
# Qualification:
quals = []
quals.append(dict(
QualificationTypeId=PERCENT_ASSIGNMENTS_APPROVED_QUAL_ID,
Comparator='GreaterThanOrEqualTo',
IntegerValues=[int(hit_config['approve_requirement'])]
))
quals.append(dict(
QualificationTypeId=NUMBER_HITS_APPROVED_QUAL_ID,
Comparator='GreaterThanOrEqualTo',
IntegerValues=[int(hit_config['number_hits_approved'])]
))
if hit_config['require_master_workers']:
master_qualId = MASTERS_SANDBOX_QUAL_ID if self.is_sandbox else MASTERS_QUAL_ID
quals.append(dict(
QualificationTypeId=master_qualId,
Comparator='Exists'
))
if hit_config['us_only']:
quals.append(dict(
QualificationTypeId=LOCALE_QUAL_ID,
Comparator='EqualTo',
LocaleValues=[{'Country': 'US'}]
))
# Create a HIT type for this HIT.
hit_type = self.mtc.create_hit_type(
Title=hit_config['title'],
Description=hit_config['description'],
Reward=str(hit_config['reward']),
AssignmentDurationInSeconds=int(hit_config['duration'].total_seconds()),
Keywords=hit_config['keywords'],
QualificationRequirements=quals)
# Check the config file to see if notifications are wanted.
config = PsiturkConfig()
config.load_config()
try:
url = config.get('Server Parameters', 'notification_url')
all_event_types = [
"AssignmentAccepted",
"AssignmentAbandoned",
"AssignmentReturned",
"AssignmentSubmitted",
"HITReviewable",
"HITExpired",
]
# TODO: not sure if this works. Can't find documentation in PsiTurk or MTurk
self.mtc.update_notification_settings(
HitTypeId=hit_type['HITTypeId'],
Notification=dict(
Destination=url,
Transport='REST',
Version=NOTIFICATION_VERSION,
EventTypes=all_event_types,
),
)
except Exception as e:
pass
schema_url = "http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2006-07-14/ExternalQuestion.xsd"
template = '<ExternalQuestion xmlns="%(schema_url)s"><ExternalURL>%%(external_url)s</ExternalURL><FrameHeight>%%(frame_height)s</FrameHeight></ExternalQuestion>' % vars()
question = template % dict(
external_url=hit_config['ad_location'],
frame_height=600,
)
# Specify all the HIT parameters
self.param_dict = dict(
HITTypeId=hit_type['HITTypeId'],
Question=question,
LifetimeInSeconds=int(hit_config['lifetime'].total_seconds()),
MaxAssignments=hit_config['max_assignments'],
# TODO
# ResponseGroups=[
# 'Minimal',
# 'HITDetail',
# 'HITQuestion',
# 'HITAssignmentSummary'
# ]
) | def configure_hit(self, hit_config) | Configure HIT | 3.191601 | 3.222896 | 0.99029 |
''' Create HIT '''
try:
if not self.connect_to_turk():
return False
self.configure_hit(hit_config)
myhit = self.mtc.create_hit_with_hit_type(**self.param_dict)['HIT']
self.hitid = myhit['HITId']
except Exception as e:
print e
return False
else:
return self.hitid | def create_hit(self, hit_config) | Create HIT | 4.962247 | 5.040333 | 0.984508 |
''' Expire HIT '''
if not self.connect_to_turk():
return False
try:
self.mtc.update_expiration_for_hit(
HITId=hitid,
ExpireAt=datetime.datetime.now()
)
return True
except Exception as e:
print "Failed to expire HIT. Please check the ID and try again."
print e
return False | def expire_hit(self, hitid) | Expire HIT | 3.848558 | 4.21032 | 0.914077 |
''' Delete HIT '''
if not self.connect_to_turk():
return False
try:
self.mtc.delete_hit(HITId=hitid)
except Exception, e:
print "Failed to delete of HIT %s. Make sure there are no "\
"assignments remaining to be reviewed." % hitid | def delete_hit(self, hitid) | Delete HIT | 7.134512 | 7.593113 | 0.939603 |
''' Get HIT '''
if not self.connect_to_turk():
return False
try:
hitdata = self.mtc.get_hit(HITId=hitid)
except Exception as e:
print e
return False
return hitdata['HIT'] | def get_hit(self, hitid) | Get HIT | 4.876286 | 5.335738 | 0.913892 |
''' Get HIT status '''
hitdata = self.get_hit(hitid)
if not hitdata:
return False
return hitdata['HITStatus'] | def get_hit_status(self, hitid) | Get HIT status | 4.420488 | 4.838456 | 0.913615 |
''' Get summary '''
try:
balance = self.check_balance()
summary = jsonify(balance=str(balance))
return summary
except MTurkRequestError as exception:
print exception.error_message
return False | def get_summary(self) | Get summary | 10.483199 | 11.005132 | 0.952574 |
''' Check credentials '''
req = requests.get(self.api_server + '/api/ad',
auth=(self.access_key, self.secret_key))
# Not sure 500 server error should be included here
if req.status_code in [401, 403, 500]:
return False
else:
return True | def check_credentials(self) | Check credentials | 5.165247 | 5.37325 | 0.961289 |
''' Update credentials '''
self.access_key = key
self.secret_key = secret | def update_credentials(self, key, secret) | Update credentials | 5.533505 | 7.995864 | 0.692046 |
try:
api_server_status_link = self.api_server + '/status_msg?version=' +\
version_number
response = urllib2.urlopen(api_server_status_link, timeout=1)
status_msg = json.load(response)['status']
except urllib2.HTTPError:
status_msg = "Sorry, can't connect to psiturk.org, please check\
your internet connection.\nYou will not be able to create new\
hits, but testing locally should work.\n"
return status_msg | def get_system_status(self) | get_system_status: | 5.961061 | 5.820768 | 1.024102 |
''' Create record '''
#headers = {'key': username, 'secret': password}
req = requests.post(self.api_server + '/api/' + name,
data=json.dumps(content), auth=(username, password))
return req | def create_record(self, name, content, username, password) | Create record | 5.205234 | 5.968638 | 0.872097 |
''' Update record '''
# headers = {'key': username, 'secret': password}
req = requests.put(self.api_server + '/api/' + name + '/' +
str(recordid), data=json.dumps(content),
auth=(username, password))
return req | def update_record(self, name, recordid, content, username, password) | Update record | 4.557896 | 5.018067 | 0.908297 |
''' Delete record '''
#headers = {'key': username, 'secret': password}
req = requests.delete(self.api_server + '/api/' + name + '/' +
str(recordid), auth=(username, password))
return req | def delete_record(self, name, recordid, username, password) | Delete record | 5.376999 | 6.312646 | 0.851782 |
''' Query records '''
#headers = {'key': username, 'secret': password}
req = requests.get(self.api_server + '/api/' + name + "/" + query,
auth=(username, password))
return req | def query_records(self, name, username, password, query='') | Query records | 5.986479 | 6.757356 | 0.88592 |
if sandbox:
return self.sandbox_ad_server + '/view/' + str(ad_id)
else:
return self.ad_server + '/view/' + str(ad_id) | def get_ad_url(self, ad_id, sandbox) | get_ad_url:
gets ad server thing | 2.634907 | 2.398588 | 1.098524 |
if sandbox:
req = self.update_record('sandboxad', ad_id, {'amt_hit_id':hit_id},
self.access_key, self.secret_key)
else:
req = self.update_record('ad', ad_id, {'amt_hit_id':hit_id},
self.access_key, self.secret_key)
if req.status_code == 201:
return True
else:
return False | def set_ad_hitid(self, ad_id, hit_id, sandbox) | get_ad_hitid:
updates the ad with the corresponding hitid | 2.636232 | 2.612603 | 1.009044 |
if not 'is_sandbox' in ad_content:
return False
else:
if ad_content['is_sandbox']:
req = self.create_record(
'sandboxad', ad_content, self.access_key, self.secret_key
)
else:
req = self.create_record(
'ad', ad_content, self.access_key, self.secret_key
)
if req.status_code == 201:
return req.json()['ad_id']
else:
return False | def create_ad(self, ad_content) | create_ad: | 2.632185 | 2.575699 | 1.021931 |
req = self.query_records('experiment', self.access_key,
self.secret_key,
query='download/'+experiment_id)
print req.text
return | def download_experiment(self, experiment_id) | download_experiment: | 11.350623 | 10.777216 | 1.053206 |
''' Query records without authorization '''
#headers = {'key': username, 'secret': password}
req = requests.get(self.api_server + '/api/' + name + "/" + query)
return req | def query_records_no_auth(self, name, query='') | Query records without authorization | 6.986181 | 7.578553 | 0.921836 |
req = self.query_records_no_auth('experiment',
query='download/'+experiment_id)
if req.status_code == 404:
print "Sorry, no experiment matching id # " + experiment_id
print "Please double check the code you obtained on the\
http://psiturk.org/ee"
else:
# Check if folder with same name already exists.
expinfo = req.json()
gitr = requests.get(expinfo['git_url']).json()
if os.path.exists('./'+gitr['name']):
print "*"*20
print "Sorry, you already have a file or folder named\
"+gitr['name']+". Please rename or delete it before trying\
to download this experiment. You can do this by typing `rm\
-rf " + gitr['name'] + "`"
print "*"*20
return
if "clone_url" in gitr:
git.Git().clone(gitr["clone_url"])
print "="*20
print "Downloading..."
print "Name: " + expinfo['name']
print "Downloads: " + str(expinfo['downloads'])
print "Keywords: " + expinfo['keywords']
print "psiTurk Version: " +\
str(expinfo['psiturk_version_string'])
print "URL: http://psiturk.org/ee/"+experiment_id
print "\n"
print "Experiment downloaded into the `" + gitr['name'] + "`\
folder of the current directory"
print "Type 'cd " + gitr['name'] + "` then run the `psiturk`\
command."
print "="*20
else:
print "Sorry, experiment not located on github. You might\
contact the author of this experiment. Experiment NOT\
downloaded."
return | def download_experiment(self, experiment_id) | download_experiment: | 4.741427 | 4.702682 | 1.008239 |
''' Check OS '''
is_64bit = struct.calcsize('P')*8 == 64
if (_platform == "darwin" and is_64bit):
return True
else:
print("Linux tunnels are currenlty unsupported. Please notify "\
"[email protected]\nif you'd like to see this feature.")
return False | def is_compatible(cls) | Check OS | 13.000836 | 11.518649 | 1.128677 |
''' Get tunnel hostname from psiturk.org '''
req = requests.get('https://api.psiturk.org/api/tunnel',
auth=(self.access_key, self.secret_key))
if req.status_code in [401, 403, 500]:
print(req.content)
return False
else:
return req.json()['tunnel_hostname'] | def get_tunnel_ad_url(self) | Get tunnel hostname from psiturk.org | 4.198705 | 3.145288 | 1.334919 |
''' Change tunnel ad url. '''
if self.is_open:
self.close()
req = requests.delete('https://api.psiturk.org/api/tunnel/',
auth=(self.access_key, self.secret_key))
# the request content here actually will include the tunnel_hostname
# if needed or wanted.
if req.status_code in [401, 403, 500]:
print(req.content)
return False | def change_tunnel_ad_url(self) | Change tunnel ad url. | 6.964625 | 7.424779 | 0.938024 |
''' Open tunnel '''
if self.is_compatible():
tunnel_ad_url = self.get_tunnel_ad_url()
if not tunnel_ad_url:
return("Tunnel server appears to be down.")
cmd = '%s -subdomain=%s -config=%s -log=stdout %s 2>&1 > server.log' \
%(self.tunnel_server, tunnel_ad_url, self.tunnel_config,
self.local_port)
self.tunnel = subprocess.Popen(cmd, shell=True)
self.url = '%s.%s' %(tunnel_ad_url, self.tunnel_host)
self.full_url = 'http://%s.%s:%s' %(tunnel_ad_url, self.tunnel_host,
self.tunnel_port)
self.is_open = True
print "Tunnel URL: %s" % self.full_url
print "Hint: In OSX, you can open a terminal link using cmd + click" | def open(self) | Open tunnel | 5.04052 | 5.005694 | 1.006957 |
''' Close tunnel '''
parent_pid = psutil.Process(self.tunnel.pid)
child_pid = parent_pid.get_children(recursive=True)
for pid in child_pid:
pid.send_signal(signal.SIGTERM)
self.is_open = False | def close(self) | Close tunnel | 4.477787 | 4.421328 | 1.01277 |
# Get a QUBO representation of the problem
Q = traveling_salesman_qubo(G, lagrange, weight)
# use the sampler to find low energy states
response = sampler.sample_qubo(Q, **sampler_args)
# we want the lowest energy sample, in order by stop number
sample = next(iter(response))
route = []
for entry in sample:
if sample[entry] > 0:
route.append(entry)
route.sort(key=lambda x: x[1])
return list((x[0] for x in route)) | def traveling_salesman(G, sampler=None, lagrange=2, weight='weight',
**sampler_args) | Returns an approximate minimum traveling salesperson route.
Defines a QUBO with ground states corresponding to the
minimum routes and uses the sampler to sample
from it.
A route is a cycle in the graph that reaches each node exactly once.
A minimum route is a route with the smallest total edge weight.
Parameters
----------
G : NetworkX graph
The graph on which to find a minimum traveling salesperson route.
This should be a complete graph with non-zero weights on every edge.
sampler :
A binary quadratic model sampler. A sampler is a process that
samples from low energy states in models defined by an Ising
equation or a Quadratic Unconstrained Binary Optimization
Problem (QUBO). A sampler is expected to have a 'sample_qubo'
and 'sample_ising' method. A sampler is expected to return an
iterable of samples, in order of increasing energy. If no
sampler is provided, one must be provided using the
`set_default_sampler` function.
lagrange : optional (default 2)
Lagrange parameter to weight constraints (visit every city once)
versus objective (shortest distance route).
weight : optional (default 'weight')
The name of the edge attribute containing the weight.
sampler_args :
Additional keyword parameters are passed to the sampler.
Returns
-------
route : list
List of nodes in order to be visited on a route
Examples
--------
This example uses a `dimod <https://github.com/dwavesystems/dimod>`_ sampler
to find a minimum route in a five-cities problem.
>>> import dwave_networkx as dnx
>>> import networkx as nx
>>> import dimod
...
>>> G = nx.complete_graph(4)
>>> G.add_weighted_edges_from({(0, 1, 1), (0, 2, 2), (0, 3, 3), (1, 2, 3),
... (1, 3, 4), (2, 3, 5)})
>>> dnx.traveling_salesman(G, dimod.ExactSolver())
[2, 1, 0, 3]
Notes
-----
Samplers by their nature may not return the optimal solution. This
function does not attempt to confirm the quality of the returned
sample. | 4.525476 | 5.004676 | 0.90425 |
N = G.number_of_nodes()
# some input checking
if N in (1, 2) or len(G.edges) != N*(N-1)//2:
msg = "graph must be a complete graph with at least 3 nodes or empty"
raise ValueError(msg)
# Creating the QUBO
Q = defaultdict(float)
# Constraint that each row has exactly one 1
for node in G:
for pos_1 in range(N):
Q[((node, pos_1), (node, pos_1))] -= lagrange
for pos_2 in range(pos_1+1, N):
Q[((node, pos_1), (node, pos_2))] += 2.0*lagrange
# Constraint that each col has exactly one 1
for pos in range(N):
for node_1 in G:
Q[((node_1, pos), (node_1, pos))] -= lagrange
for node_2 in set(G)-{node_1}:
Q[((node_1, pos), (node_2, pos))] += 2.0*lagrange
# Objective that minimizes distance
for u, v in itertools.combinations(G.nodes, 2):
for pos in range(N):
nextpos = (pos + 1) % N
# going from u -> v
Q[((u, pos), (v, nextpos))] += G[u][v][weight]
# going from v -> u
Q[((v, pos), (u, nextpos))] += G[u][v][weight]
return Q | def traveling_salesman_qubo(G, lagrange=2, weight='weight') | Return the QUBO with ground states corresponding to a minimum TSP route.
If :math:`|G|` is the number of nodes in the graph, the resulting qubo will have:
* :math:`|G|^2` variables/nodes
* :math:`2 |G|^2 (|G| - 1)` interactions/edges
Parameters
----------
G : NetworkX graph
A complete graph in which each edge has a attribute giving its weight.
lagrange : number, optional (default 2)
Lagrange parameter to weight constraints (no edges within set)
versus objective (largest set possible).
weight : optional (default 'weight')
The name of the edge attribute containing the weight.
Returns
-------
QUBO : dict
The QUBO with ground states corresponding to a minimum travelling
salesperson route. | 2.399975 | 2.468739 | 0.972146 |
G = nx.Graph()
G.name = 'markov_network({!r})'.format(potentials)
# we use 'clique' because the keys of potentials can be either nodes or
# edges, but in either case they are fully connected.
for clique, phis in potentials.items():
num_vars = len(clique)
# because this data potentially wont be used for a while, let's do some
# input checking now and save some debugging issues later
if not isinstance(phis, abc.Mapping):
raise TypeError("phis should be a dict")
elif not all(config in phis for config in itertools.product((0, 1), repeat=num_vars)):
raise ValueError("not all potentials provided for {!r}".format(clique))
if num_vars == 1:
u, = clique
G.add_node(u, potential=phis)
elif num_vars == 2:
u, v = clique
# in python<=3.5 the edge order might not be consistent so we store
# the relevant order of the variables relative to the potentials
G.add_edge(u, v, potential=phis, order=(u, v))
else:
# developer note: in principle supporting larger cliques can be done
# using higher-order, but it would make the use of networkx graphs
# far more difficult
raise ValueError("Only supports cliques up to size 2")
return G | def markov_network(potentials) | Creates a Markov Network from potentials.
A Markov Network is also knows as a `Markov Random Field`_
Parameters
----------
potentials : dict[tuple, dict]
A dict where the keys are either nodes or edges and the values are a
dictionary of potentials. The potential dict should map each possible
assignment of the nodes/edges to their energy.
Returns
-------
MN : :obj:`networkx.Graph`
A markov network as a graph where each node/edge stores its potential
dict as above.
Examples
--------
>>> potentials = {('a', 'b'): {(0, 0): -1,
... (0, 1): .5,
... (1, 0): .5,
... (1, 1): 2}}
>>> MN = dnx.markov_network(potentials)
>>> MN['a']['b']['potential'][(0, 0)]
-1
.. _Markov Random Field: https://en.wikipedia.org/wiki/Markov_random_field | 6.306644 | 6.264057 | 1.006799 |
# In order to form the Ising problem, we want to increase the
# energy by 1 for each edge between two nodes of the same color.
# The linear biases can all be 0.
h = {v: 0. for v in G}
J = {(u, v): 1 for u, v in G.edges}
# draw the lowest energy sample from the sampler
response = sampler.sample_ising(h, J, **sampler_args)
sample = next(iter(response))
return set(v for v in G if sample[v] >= 0) | def maximum_cut(G, sampler=None, **sampler_args) | Returns an approximate maximum cut.
Defines an Ising problem with ground states corresponding to
a maximum cut and uses the sampler to sample from it.
A maximum cut is a subset S of the vertices of G such that
the number of edges between S and the complementary subset
is as large as possible.
Parameters
----------
G : NetworkX graph
The graph on which to find a maximum cut.
sampler
A binary quadratic model sampler. A sampler is a process that
samples from low energy states in models defined by an Ising
equation or a Quadratic Unconstrained Binary Optimization
Problem (QUBO). A sampler is expected to have a 'sample_qubo'
and 'sample_ising' method. A sampler is expected to return an
iterable of samples, in order of increasing energy. If no
sampler is provided, one must be provided using the
`set_default_sampler` function.
sampler_args
Additional keyword parameters are passed to the sampler.
Returns
-------
S : set
A maximum cut of G.
Example
-------
This example uses a sampler from
`dimod <https://github.com/dwavesystems/dimod>`_ to find a maximum cut
for a graph of a Chimera unit cell created using the `chimera_graph()`
function.
>>> import dimod
>>> import dwave_networkx as dnx
>>> samplerSA = dimod.SimulatedAnnealingSampler()
>>> G = dnx.chimera_graph(1, 1, 4)
>>> cut = dnx.maximum_cut(G, samplerSA)
Notes
-----
Samplers by their nature may not return the optimal solution. This
function does not attempt to confirm the quality of the returned
sample. | 5.745699 | 5.730354 | 1.002678 |
# In order to form the Ising problem, we want to increase the
# energy by 1 for each edge between two nodes of the same color.
# The linear biases can all be 0.
h = {v: 0. for v in G}
try:
J = {(u, v): G[u][v]['weight'] for u, v in G.edges}
except KeyError:
raise DWaveNetworkXException("edges must have 'weight' attribute")
# draw the lowest energy sample from the sampler
response = sampler.sample_ising(h, J, **sampler_args)
sample = next(iter(response))
return set(v for v in G if sample[v] >= 0) | def weighted_maximum_cut(G, sampler=None, **sampler_args) | Returns an approximate weighted maximum cut.
Defines an Ising problem with ground states corresponding to
a weighted maximum cut and uses the sampler to sample from it.
A weighted maximum cut is a subset S of the vertices of G that
maximizes the sum of the edge weights between S and its
complementary subset.
Parameters
----------
G : NetworkX graph
The graph on which to find a weighted maximum cut. Each edge in G should
have a numeric `weight` attribute.
sampler
A binary quadratic model sampler. A sampler is a process that
samples from low energy states in models defined by an Ising
equation or a Quadratic Unconstrained Binary Optimization
Problem (QUBO). A sampler is expected to have a 'sample_qubo'
and 'sample_ising' method. A sampler is expected to return an
iterable of samples, in order of increasing energy. If no
sampler is provided, one must be provided using the
`set_default_sampler` function.
sampler_args
Additional keyword parameters are passed to the sampler.
Returns
-------
S : set
A maximum cut of G.
Example
-------
This example uses a sampler from
`dimod <https://github.com/dwavesystems/dimod>`_ to find a weighted maximum
cut for a graph of a Chimera unit cell. The graph is created using the
`chimera_graph()` function with weights added to all its edges such that
those incident to nodes {6, 7} have weight -1 while the others are +1. A
weighted maximum cut should cut as many of the latter and few of the former
as possible.
>>> import dimod
>>> import dwave_networkx as dnx
>>> samplerSA = dimod.SimulatedAnnealingSampler()
>>> G = dnx.chimera_graph(1, 1, 4)
>>> for u, v in G.edges:
....: if (u >= 6) | (v >=6):
....: G[u][v]['weight']=-1
....: else: G[u][v]['weight']=1
....:
>>> dnx.weighted_maximum_cut(G, samplerSA)
{4, 5}
Notes
-----
Samplers by their nature may not return the optimal solution. This
function does not attempt to confirm the quality of the returned
sample. | 5.474553 | 5.096051 | 1.074274 |
'''
Produces a variable elimination order for a pegasus P(n) graph, which provides an upper bound on the treewidth.
Simple pegasus variable elimination order rules:
- eliminate vertical qubits, one column at a time
- eliminate horizontal qubits in each column once their adjacent vertical qubits have been eliminated
Many other orderings are possible giving the same treewidth upper bound of 12n-4 for a P(n)
see pegasus_var_order_extra.py for a few.
P(n) contains cliques of size 12n-10 so we know the treewidth of P(n) is in [12n-11,12n-4].
the treewidth bound generated by a variable elimination order can be verified using
dwave_networkx.elimination_order_width. Example:
import dwave_networkx as dwnx
n = 6
P = dwnx.generators.pegasus_graph(n)
order = pegasus_var_order(n)
print(dwnx.elimination_order_width(P,order))
'''
m = n
l = 12
# ordering for horizontal qubits in each tile, from east to west:
h_order = [4, 5, 6, 7, 0, 1, 2, 3, 8, 9, 10, 11]
order = []
for n_i in range(n): # for each tile offset
# eliminate vertical qubits:
for l_i in range(0, l, 2):
for l_v in range(l_i, l_i + 2):
for m_i in range(m - 1): # for each column
order.append((0, n_i, l_v, m_i))
# eliminate horizontal qubits:
if n_i > 0 and not(l_i % 4):
# a new set of horizontal qubits have had all their neighbouring vertical qubits eliminated.
for m_i in range(m):
for l_h in range(h_order[l_i], h_order[l_i] + 4):
order.append((1, m_i, l_h, n_i - 1))
if coordinates:
return order
else:
return pegasus_coordinates(n).ints(order) | def pegasus_elimination_order(n, coordinates=False) | Produces a variable elimination order for a pegasus P(n) graph, which provides an upper bound on the treewidth.
Simple pegasus variable elimination order rules:
- eliminate vertical qubits, one column at a time
- eliminate horizontal qubits in each column once their adjacent vertical qubits have been eliminated
Many other orderings are possible giving the same treewidth upper bound of 12n-4 for a P(n)
see pegasus_var_order_extra.py for a few.
P(n) contains cliques of size 12n-10 so we know the treewidth of P(n) is in [12n-11,12n-4].
the treewidth bound generated by a variable elimination order can be verified using
dwave_networkx.elimination_order_width. Example:
import dwave_networkx as dwnx
n = 6
P = dwnx.generators.pegasus_graph(n)
order = pegasus_var_order(n)
print(dwnx.elimination_order_width(P,order)) | 5.079637 | 1.996113 | 2.544765 |
horizontal_offsets = pegasus_graph.graph['horizontal_offsets']
vertical_offsets = pegasus_graph.graph['vertical_offsets']
# Note: we are returning a fragmentation function rather than fragmenting the pegasus
# coordinates ourselves because:
# (1) We don't want the user to have to deal with Pegasus horizontal/vertical offsets directly.
# (i.e. Don't want fragment_tuple(pegasus_coord, vertical_offset, horizontal_offset))
# (2) We don't want the user to have to pass entire Pegasus graph each time they want to
# fragment some pegasus coordinates.
# (i.e. Don't want fragment_tuple(pegasus_coord, pegasus_graph))
def fragment_tuple(pegasus_coords):
fragments = []
for u, w, k, z in pegasus_coords:
# Determine offset
offset = horizontal_offsets if u else vertical_offsets
offset = offset[k]
# Find the base (i.e. zeroth) Chimera fragment of this pegasus coordinate
x0 = (z * 12 + offset) // 2
y = (w * 12 + k) // 2
r = k % 2
base = [0, 0, u, r]
# Generate the six fragments associated with this pegasus coordinate
for x in range(x0, x0 + 6):
base[u] = x
base[1 - u] = y
fragments.append(tuple(base))
return fragments
return fragment_tuple | def get_tuple_fragmentation_fn(pegasus_graph) | Returns a fragmentation function that is specific to pegasus_graph. This fragmentation function,
fragment_tuple(..), takes in a list of Pegasus qubit coordinates and returns their corresponding
K2,2 Chimera fragment coordinates.
Details on the returned function, fragment_tuple(list_of_pegasus_coordinates):
Each Pegasus qubit is split into six fragments. If edges are drawn between adjacent
fragments and drawn between fragments that are connected by an existing Pegasus coupler, we
can see that a K2,2 Chimera graph is formed.
The K2,2 Chimera graph uses a coordinate system with an origin at the upper left corner of
the graph.
y: number of vertical fragments from the top-most row
x: number of horizontal fragments from the left-most column
u: 1 if it belongs to a horizontal qubit, 0 otherwise
r: fragment index on the K2,2 shore
Parameters
----------
pegasus_graph: networkx.graph
A pegasus graph
Returns
-------
fragment_tuple(pegasus_coordinates): a function
A function that accepts a list of pegasus coordinates and returns a list of their
corresponding K2,2 Chimera coordinates. | 4.344804 | 3.686149 | 1.178684 |
horizontal_offsets = pegasus_graph.graph['horizontal_offsets']
vertical_offsets = pegasus_graph.graph['vertical_offsets']
# Note: we are returning a defragmentation function rather than defragmenting the chimera
# fragments ourselves because:
# (1) We don't want the user to have to deal with Pegasus horizontal/vertical offsets directly.
# (i.e. Don't want defragment_tuple(chimera_coord, vertical_offset, horizontal_offset))
# (2) We don't want the user to have to pass entire Pegasus graph each time they want to
# defragment some chimera coordinates.
# (i.e. Don't want defragment_tuple(chimera_coord, pegasus_graph))
def defragment_tuple(chimera_coords):
pegasus_coords = []
for y, x, u, r in chimera_coords:
# Set up shifts and offsets
shifts = [x, y]
offsets = horizontal_offsets if u else vertical_offsets
# Determine number of tiles and track number
w, k = divmod(2 * shifts[u] + r, 12)
# Determine qubit index on track
x0 = shifts[1-u] * 2 - offsets[k]
z = x0 // 12
pegasus_coords.append((u, w, k, z))
# Several chimera coordinates may map to the same pegasus coordinate, hence, apply set(..)
return list(set(pegasus_coords))
return defragment_tuple | def get_tuple_defragmentation_fn(pegasus_graph) | Returns a de-fragmentation function that is specific to pegasus_graph. The returned
de-fragmentation function, defragment_tuple(..), takes in a list of K2,2 Chimera coordinates and
returns the corresponding list of unique pegasus coordinates.
Details on the returned function, defragment_tuple(list_of_chimera_fragment_coordinates):
Each Pegasus qubit is split into six fragments. If edges are drawn between adjacent
fragments and drawn between fragments that are connected by an existing Pegasus coupler, we
can see that a K2,2 Chimera graph is formed.
The K2,2 Chimera graph uses a coordinate system with an origin at the upper left corner of
the graph.
y: number of vertical fragments from the top-most row
x: number of horizontal fragments from the left-most column
u: 1 if it belongs to a horizontal qubit, 0 otherwise
r: fragment index on the K2,2 shore
The defragment_tuple(..) takes in the list of Chimera fragments and returns a list of their
corresponding Pegasus qubit coordinates. Note that the returned list has a unique set of
Pegasus coordinates.
Parameters
----------
pegasus_graph: networkx.graph
A Pegasus graph
Returns
-------
defragment_tuple(chimera_coordinates): a function
A function that accepts a list of chimera coordinates and returns a set of their
corresponding Pegasus coordinates. | 4.871075 | 4.210745 | 1.15682 |
if args or kwargs:
warnings.warn("Deprecation warning: get_pegasus_to_nice_fn does not need / use parameters anymore")
def p2n0(u, w, k, z): return (0, w-1 if u else z, z if u else w, u, k-4 if u else k-4)
def p2n1(u, w, k, z): return (1, w-1 if u else z, z if u else w, u, k if u else k-8)
def p2n2(u, w, k, z): return (2, w if u else z, z if u else w-1, u, k-8 if u else k)
def p2n(u, w, k, z): return [p2n0, p2n1, p2n2][(2-u-(2*u-1)*(k//4)) % 3](u, w, k, z)
return p2n | def get_pegasus_to_nice_fn(*args, **kwargs) | Returns a coordinate translation function from the 4-term pegasus_index
coordinates to the 5-term "nice" coordinates.
Details on the returned function, pegasus_to_nice(u,w,k,z)
Inputs are 4-tuples of ints, return is a 5-tuple of ints. See
pegasus_graph for description of the pegasus_index and "nice"
coordinate systems.
Returns
-------
pegasus_to_nice_fn(chimera_coordinates): a function
A function that accepts augmented chimera coordinates and returns corresponding
Pegasus coordinates. | 3.33404 | 3.340808 | 0.997974 |
if args or kwargs:
warnings.warn("Deprecation warning: get_pegasus_to_nice_fn does not need / use parameters anymore")
def c2p0(y, x, u, k): return (u, y+1 if u else x, 4+k if u else 4+k, x if u else y)
def c2p1(y, x, u, k): return (u, y+1 if u else x, k if u else 8+k, x if u else y)
def c2p2(y, x, u, k): return (u, y if u else x + 1, 8+k if u else k, x if u else y)
def n2p(t, y, x, u, k): return [c2p0, c2p1, c2p2][t](y, x, u, k)
return n2p | def get_nice_to_pegasus_fn(*args, **kwargs) | Returns a coordinate translation function from the 5-term "nice"
coordinates to the 4-term pegasus_index coordinates.
Details on the returned function, nice_to_pegasus(t, y, x, u, k)
Inputs are 5-tuples of ints, return is a 4-tuple of ints. See
pegasus_graph for description of the pegasus_index and "nice"
coordinate systems.
Returns
-------
nice_to_pegasus_fn(pegasus_coordinates): a function
A function that accepts Pegasus coordinates and returns the corresponding
augmented chimera coordinates | 3.463552 | 3.439939 | 1.006864 |
u, w, k, z = q
m, m1 = self.args
return ((m * u + w) * 12 + k) * m1 + z | def int(self, q) | Converts the chimera_index `q` into an linear_index
Parameters
----------
q : tuple
The chimera_index node label
Returns
-------
r : int
The linear_index node label corresponding to q | 12.657716 | 12.704523 | 0.996316 |
m, m1 = self.args
r, z = divmod(r, m1)
r, k = divmod(r, 12)
u, w = divmod(r, m)
return u, w, k, z | def tuple(self, r) | Converts the linear_index `q` into an pegasus_index
Parameters
----------
r : int
The linear_index node label
Returns
-------
q : tuple
The pegasus_index node label corresponding to r | 6.043485 | 6.062225 | 0.996909 |
m, m1 = self.args
return (((m * u + w) * 12 + k) * m1 + z for (u, w, k, z) in qlist) | def ints(self, qlist) | Converts a sequence of pegasus_index node labels into
linear_index node labels, preserving order
Parameters
----------
qlist : sequence of ints
The pegasus_index node labels
Returns
-------
rlist : iterable of tuples
The linear_lindex node lables corresponding to qlist | 14.155285 | 13.280351 | 1.065882 |
m, m1 = self.args
for r in rlist:
r, z = divmod(r, m1)
r, k = divmod(r, 12)
u, w = divmod(r, m)
yield u, w, k, z | def tuples(self, rlist) | Converts a sequence of linear_index node labels into
pegasus_index node labels, preserving order
Parameters
----------
rlist : sequence of tuples
The linear_index node labels
Returns
-------
qlist : iterable of ints
The pegasus_index node lables corresponding to rlist | 5.718121 | 6.090928 | 0.938793 |
ulist = f(u for p in plist for u in p)
for u in ulist:
v = next(ulist)
yield u, v | def __pair_repack(self, f, plist) | Flattens a sequence of pairs to pass through `f`, and then
re-pairs the result.
Parameters
----------
f : callable
A function that accepts a sequence and returns a sequence
plist:
A sequence of pairs
Returns
-------
qlist : sequence
Equivalent to (tuple(f(p)) for p in plist) | 6.634275 | 8.040243 | 0.825134 |
bqm = markov_network_bqm(MN)
# use the FixedVar
fv_sampler = dimod.FixedVariableComposite(sampler)
sampleset = fv_sampler.sample(bqm, fixed_variables=fixed_variables,
**sampler_args)
if return_sampleset:
return sampleset
else:
return list(map(dict, sampleset.samples())) | def sample_markov_network(MN, sampler=None, fixed_variables=None,
return_sampleset=False,
**sampler_args) | Samples from a markov network using the provided sampler.
Parameters
----------
G : NetworkX graph
A Markov Network as returned by :func:`.markov_network`
sampler
A binary quadratic model sampler. A sampler is a process that
samples from low energy states in models defined by an Ising
equation or a Quadratic Unconstrained Binary Optimization
Problem (QUBO). A sampler is expected to have a 'sample_qubo'
and 'sample_ising' method. A sampler is expected to return an
iterable of samples, in order of increasing energy. If no
sampler is provided, one must be provided using the
`set_default_sampler` function.
fixed_variables : dict
A dictionary of variable assignments to be fixed in the markov network.
return_sampleset : bool (optional, default=False)
If True, returns a :obj:`dimod.SampleSet` rather than a list of samples.
**sampler_args
Additional keyword parameters are passed to the sampler.
Returns
-------
samples : list[dict]/:obj:`dimod.SampleSet`
A list of samples ordered from low-to-high energy or a sample set.
Examples
--------
>>> import dimod
...
>>> potentials = {('a', 'b'): {(0, 0): -1,
... (0, 1): .5,
... (1, 0): .5,
... (1, 1): 2}}
>>> MN = dnx.markov_network(potentials)
>>> sampler = dimod.ExactSolver()
>>> samples = dnx.sample_markov_network(MN, sampler)
>>> samples[0]
{'a': 0, 'b': 0}
>>> import dimod
...
>>> potentials = {('a', 'b'): {(0, 0): -1,
... (0, 1): .5,
... (1, 0): .5,
... (1, 1): 2}}
>>> MN = dnx.markov_network(potentials)
>>> sampler = dimod.ExactSolver()
>>> samples = dnx.sample_markov_network(MN, sampler, return_sampleset=True)
>>> samples.first
Sample(sample={'a': 0, 'b': 0}, energy=-1.0, num_occurrences=1)
>>> import dimod
...
>>> potentials = {('a', 'b'): {(0, 0): -1,
... (0, 1): .5,
... (1, 0): .5,
... (1, 1): 2},
... ('b', 'c'): {(0, 0): -9,
... (0, 1): 1.2,
... (1, 0): 7.2,
... (1, 1): 5}}
>>> MN = dnx.markov_network(potentials)
>>> sampler = dimod.ExactSolver()
>>> samples = dnx.sample_markov_network(MN, sampler, fixed_variables={'b': 0})
>>> samples[0]
{'a': 0, 'c': 0, 'b': 0}
Notes
-----
Samplers by their nature may not return the optimal solution. This
function does not attempt to confirm the quality of the returned
sample. | 4.187901 | 5.081844 | 0.824091 |
bqm = dimod.BinaryQuadraticModel.empty(dimod.BINARY)
# the variable potentials
for v, ddict in MN.nodes(data=True, default=None):
potential = ddict.get('potential', None)
if potential is None:
continue
# for single nodes we don't need to worry about order
phi0 = potential[(0,)]
phi1 = potential[(1,)]
bqm.add_variable(v, phi1 - phi0)
bqm.add_offset(phi0)
# the interaction potentials
for u, v, ddict in MN.edges(data=True, default=None):
potential = ddict.get('potential', None)
if potential is None:
continue
# in python<=3.5 the edge order might not be consistent so we use the
# one that was stored
order = ddict['order']
u, v = order
phi00 = potential[(0, 0)]
phi01 = potential[(0, 1)]
phi10 = potential[(1, 0)]
phi11 = potential[(1, 1)]
bqm.add_variable(u, phi10 - phi00)
bqm.add_variable(v, phi01 - phi00)
bqm.add_interaction(u, v, phi11 - phi10 - phi01 + phi00)
bqm.add_offset(phi00)
return bqm | def markov_network_bqm(MN) | Construct a binary quadratic model for a markov network.
Parameters
----------
G : NetworkX graph
A Markov Network as returned by :func:`.markov_network`
Returns
-------
bqm : :obj:`dimod.BinaryQuadraticModel`
A binary quadratic model. | 2.530653 | 2.634496 | 0.960583 |
if not isinstance(G, nx.Graph):
empty_graph = nx.Graph()
empty_graph.add_edges_from(G)
G = empty_graph
# now we get chimera coordinates for the translation
# first, check if we made it
if G.graph.get("family") == "chimera":
m = G.graph['rows']
n = G.graph['columns']
t = G.graph['tile']
# get a node placement function
xy_coords = chimera_node_placer_2d(m, n, t, scale, center, dim)
if G.graph.get('labels') == 'coordinate':
pos = {v: xy_coords(*v) for v in G.nodes()}
elif G.graph.get('data'):
pos = {v: xy_coords(*dat['chimera_index']) for v, dat in G.nodes(data=True)}
else:
coord = chimera_coordinates(m, n, t)
pos = {v: xy_coords(*coord.tuple(v)) for v in G.nodes()}
else:
# best case scenario, each node in G has a chimera_index attribute. Otherwise
# we will try to determine it using the find_chimera_indices function.
if all('chimera_index' in dat for __, dat in G.nodes(data=True)):
chimera_indices = {v: dat['chimera_index'] for v, dat in G.nodes(data=True)}
else:
chimera_indices = find_chimera_indices(G)
# we could read these off of the name attribute for G, but we would want the values in
# the nodes to override the name in case of conflict.
m = max(idx[0] for idx in itervalues(chimera_indices)) + 1
n = max(idx[1] for idx in itervalues(chimera_indices)) + 1
t = max(idx[3] for idx in itervalues(chimera_indices)) + 1
xy_coords = chimera_node_placer_2d(m, n, t, scale, center, dim)
# compute our coordinates
pos = {v: xy_coords(i, j, u, k) for v, (i, j, u, k) in iteritems(chimera_indices)}
return pos | def chimera_layout(G, scale=1., center=None, dim=2) | Positions the nodes of graph G in a Chimera cross topology.
NumPy (http://scipy.org) is required for this function.
Parameters
----------
G : NetworkX graph
Should be a Chimera graph or a subgraph of a
Chimera graph. If every node in G has a `chimera_index`
attribute, those are used to place the nodes. Otherwise makes
a best-effort attempt to find positions.
scale : float (default 1.)
Scale factor. When scale = 1, all positions fit within [0, 1]
on the x-axis and [-1, 0] on the y-axis.
center : None or array (default None)
Coordinates of the top left corner.
dim : int (default 2)
Number of dimensions. When dim > 2, all extra dimensions are
set to 0.
Returns
-------
pos : dict
A dictionary of positions keyed by node.
Examples
--------
>>> G = dnx.chimera_graph(1)
>>> pos = dnx.chimera_layout(G) | 3.204844 | 3.230551 | 0.992043 |
import numpy as np
tile_center = t // 2
tile_length = t + 3 # 1 for middle of cross, 2 for spacing between tiles
# want the enter plot to fill in [0, 1] when scale=1
scale /= max(m, n) * tile_length - 3
grid_offsets = {}
if center is None:
center = np.zeros(dim)
else:
center = np.asarray(center)
paddims = dim - 2
if paddims < 0:
raise ValueError("layout must have at least two dimensions")
if len(center) != dim:
raise ValueError("length of center coordinates must match dimension of layout")
def _xy_coords(i, j, u, k):
# row, col, shore, shore index
# first get the coordinatiates within the tile
if k < tile_center:
p = k
else:
p = k + 1
if u:
xy = np.array([tile_center, -1 * p])
else:
xy = np.array([p, -1 * tile_center])
# next offset the corrdinates based on the which tile
if i > 0 or j > 0:
if (i, j) in grid_offsets:
xy += grid_offsets[(i, j)]
else:
off = np.array([j * tile_length, -1 * i * tile_length])
xy += off
grid_offsets[(i, j)] = off
# convention for Chimera-lattice pictures is to invert the y-axis
return np.hstack((xy * scale, np.zeros(paddims))) + center
return _xy_coords | def chimera_node_placer_2d(m, n, t, scale=1., center=None, dim=2) | Generates a function that converts Chimera indices to x, y
coordinates for a plot.
Parameters
----------
m : int
Number of rows in the Chimera lattice.
n : int
Number of columns in the Chimera lattice.
t : int
Size of the shore within each Chimera tile.
scale : float (default 1.)
Scale factor. When scale = 1, all positions fit within [0, 1]
on the x-axis and [-1, 0] on the y-axis.
center : None or array (default None)
Coordinates of the top left corner.
dim : int (default 2)
Number of dimensions. When dim > 2, all extra dimensions are
set to 0.
Returns
-------
xy_coords : function
A function that maps a Chimera index (i, j, u, k) in an
(m, n, t) Chimera lattice to x,y coordinates such as
used by a plot. | 5.190403 | 4.710151 | 1.101961 |
draw_embedding(G, chimera_layout(G), *args, **kwargs) | def draw_chimera_embedding(G, *args, **kwargs) | Draws an embedding onto the chimera graph G, according to layout.
If interaction_edges is not None, then only display the couplers in that
list. If embedded_graph is not None, the only display the couplers between
chains with intended couplings according to embedded_graph.
Parameters
----------
G : NetworkX graph
Should be a Chimera graph or a subgraph of a Chimera graph.
emb : dict
A dict of chains associated with each node in G. Should be
of the form {node: chain, ...}. Chains should be iterables
of qubit labels (qubits are nodes in G).
embedded_graph : NetworkX graph (optional, default None)
A graph which contains all keys of emb as nodes. If specified,
edges of G will be considered interactions if and only if they
exist between two chains of emb if their keys are connected by
an edge in embedded_graph
interaction_edges : list (optional, default None)
A list of edges which will be used as interactions.
show_labels: boolean (optional, default False)
If show_labels is True, then each chain in emb is labelled with its key.
chain_color : dict (optional, default None)
A dict of colors associated with each key in emb. Should be
of the form {node: rgba_color, ...}. Colors should be length-4
tuples of floats between 0 and 1 inclusive. If chain_color is None,
each chain will be assigned a different color.
unused_color : tuple (optional, default (0.9,0.9,0.9,1.0))
The color to use for nodes and edges of G which are not involved
in chains, and edges which are neither chain edges nor interactions.
If unused_color is None, these nodes and edges will not be shown at all.
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the `pos` parameter which is not used by this
function. If `linear_biases` or `quadratic_biases` are provided,
any provided `node_color` or `edge_color` arguments are ignored. | 4.715817 | 11.639843 | 0.405144 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.