prompt
large_stringlengths
72
9.34k
completion
large_stringlengths
0
7.61k
<|file_name|>checkdbs.py<|end_file_name|><|fim▁begin|># Copyright (C) 1998-2014 by the Free Software Foundation, Inc. # # This file is part of GNU Mailman. # # GNU Mailman is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # # GNU Mailman is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # GNU Mailman. If not, see <http://www.gnu.org/licenses/>. import sys import time import optparse from email.Charset import Charset from mailman import MailList from mailman import Utils from mailman.app.requests import handle_request from mailman.configuration import config from mailman.core.i18n import _ from mailman.email.message import UserNotification from mailman.initialize import initialize from mailman.interfaces.requests import IListRequests, RequestType from mailman.version import MAILMAN_VERSION # Work around known problems with some RedHat cron daemons import signal signal.signal(signal.SIGCHLD, signal.SIG_DFL) NL = u'\n' now = time.time() def parseargs(): parser = optparse.OptionParser(version=MAILMAN_VERSION, usage=_("""\ %prog [options] Check for pending admin requests and mail the list owners if necessary.""")) parser.add_option('-C', '--config', help=_('Alternative configuration file to use')) opts, args = parser.parse_args() if args: parser.print_help() print(_('Unexpected arguments'), file=sys.stderr) sys.exit(1) return opts, args, parser def pending_requests(mlist): # Must return a byte string lcset = mlist.preferred_language.charset pending = [] first = True requestsdb = IListRequests(mlist) for request in requestsdb.of_type(RequestType.subscription): if first: pending.append(_('Pending subscriptions:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] addr = data['addr'] fullname = data['fullname'] passwd = data['passwd'] digest = data['digest'] lang = data['lang'] if fullname: if isinstance(fullname, unicode): fullname = fullname.encode(lcset, 'replace') fullname = ' (%s)' % fullname pending.append(' %s%s %s' % (addr, fullname, time.ctime(when))) first = True for request in requestsdb.of_type(RequestType.held_message): if first: <|fim_middle|> key, data = requestsdb.get_request(request.id) when = data['when'] sender = data['sender'] subject = data['subject'] reason = data['reason'] text = data['text'] msgdata = data['msgdata'] subject = Utils.oneline(subject, lcset) date = time.ctime(when) reason = _(reason) pending.append(_("""\ From: $sender on $date Subject: $subject Cause: $reason""")) pending.append('') # Coerce all items in pending to a Unicode so we can join them upending = [] charset = mlist.preferred_language.charset for s in pending: if isinstance(s, unicode): upending.append(s) else: upending.append(unicode(s, charset, 'replace')) # Make sure that the text we return from here can be encoded to a byte # string in the charset of the list's language. This could fail if for # example, the request was pended while the list's language was French, # but then it was changed to English before checkdbs ran. text = NL.join(upending) charset = Charset(mlist.preferred_language.charset) incodec = charset.input_codec or 'ascii' outcodec = charset.output_codec or 'ascii' if isinstance(text, unicode): return text.encode(outcodec, 'replace') # Be sure this is a byte string encodeable in the list's charset utext = unicode(text, incodec, 'replace') return utext.encode(outcodec, 'replace') def auto_discard(mlist): # Discard old held messages discard_count = 0 expire = config.days(mlist.max_days_to_hold) requestsdb = IListRequests(mlist) heldmsgs = list(requestsdb.of_type(RequestType.held_message)) if expire and heldmsgs: for request in heldmsgs: key, data = requestsdb.get_request(request.id) if now - data['date'] > expire: handle_request(mlist, request.id, config.DISCARD) discard_count += 1 mlist.Save() return discard_count # Figure out epoch seconds of midnight at the start of today (or the given # 3-tuple date of (year, month, day). def midnight(date=None): if date is None: date = time.localtime()[:3] # -1 for dst flag tells the library to figure it out return time.mktime(date + (0,)*5 + (-1,)) def main(): opts, args, parser = parseargs() initialize(opts.config) for name in config.list_manager.names: # The list must be locked in order to open the requests database mlist = MailList.MailList(name) try: count = IListRequests(mlist).count # While we're at it, let's evict yesterday's autoresponse data midnight_today = midnight() evictions = [] for sender in mlist.hold_and_cmd_autoresponses.keys(): date, respcount = mlist.hold_and_cmd_autoresponses[sender] if midnight(date) < midnight_today: evictions.append(sender) if evictions: for sender in evictions: del mlist.hold_and_cmd_autoresponses[sender] # This is the only place we've changed the list's database mlist.Save() if count: # Set the default language the the list's preferred language. _.default = mlist.preferred_language realname = mlist.real_name discarded = auto_discard(mlist) if discarded: count = count - discarded text = _('Notice: $discarded old request(s) ' 'automatically expired.\n\n') else: text = '' if count: text += Utils.maketext( 'checkdbs.txt', {'count' : count, 'mail_host': mlist.mail_host, 'adminDB' : mlist.GetScriptURL('admindb', absolute=1), 'real_name': realname, }, mlist=mlist) text += '\n' + pending_requests(mlist) subject = _('$count $realname moderator ' 'request(s) waiting') else: subject = _('$realname moderator request check result') msg = UserNotification(mlist.GetOwnerEmail(), mlist.GetBouncesEmail(), subject, text, mlist.preferred_language) msg.send(mlist, **{'tomoderators': True}) finally: mlist.Unlock() if __name__ == '__main__': main() <|fim▁end|>
pending.append(_('\nPending posts:')) first = False
<|file_name|>checkdbs.py<|end_file_name|><|fim▁begin|># Copyright (C) 1998-2014 by the Free Software Foundation, Inc. # # This file is part of GNU Mailman. # # GNU Mailman is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # # GNU Mailman is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # GNU Mailman. If not, see <http://www.gnu.org/licenses/>. import sys import time import optparse from email.Charset import Charset from mailman import MailList from mailman import Utils from mailman.app.requests import handle_request from mailman.configuration import config from mailman.core.i18n import _ from mailman.email.message import UserNotification from mailman.initialize import initialize from mailman.interfaces.requests import IListRequests, RequestType from mailman.version import MAILMAN_VERSION # Work around known problems with some RedHat cron daemons import signal signal.signal(signal.SIGCHLD, signal.SIG_DFL) NL = u'\n' now = time.time() def parseargs(): parser = optparse.OptionParser(version=MAILMAN_VERSION, usage=_("""\ %prog [options] Check for pending admin requests and mail the list owners if necessary.""")) parser.add_option('-C', '--config', help=_('Alternative configuration file to use')) opts, args = parser.parse_args() if args: parser.print_help() print(_('Unexpected arguments'), file=sys.stderr) sys.exit(1) return opts, args, parser def pending_requests(mlist): # Must return a byte string lcset = mlist.preferred_language.charset pending = [] first = True requestsdb = IListRequests(mlist) for request in requestsdb.of_type(RequestType.subscription): if first: pending.append(_('Pending subscriptions:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] addr = data['addr'] fullname = data['fullname'] passwd = data['passwd'] digest = data['digest'] lang = data['lang'] if fullname: if isinstance(fullname, unicode): fullname = fullname.encode(lcset, 'replace') fullname = ' (%s)' % fullname pending.append(' %s%s %s' % (addr, fullname, time.ctime(when))) first = True for request in requestsdb.of_type(RequestType.held_message): if first: pending.append(_('\nPending posts:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] sender = data['sender'] subject = data['subject'] reason = data['reason'] text = data['text'] msgdata = data['msgdata'] subject = Utils.oneline(subject, lcset) date = time.ctime(when) reason = _(reason) pending.append(_("""\ From: $sender on $date Subject: $subject Cause: $reason""")) pending.append('') # Coerce all items in pending to a Unicode so we can join them upending = [] charset = mlist.preferred_language.charset for s in pending: if isinstance(s, unicode): <|fim_middle|> else: upending.append(unicode(s, charset, 'replace')) # Make sure that the text we return from here can be encoded to a byte # string in the charset of the list's language. This could fail if for # example, the request was pended while the list's language was French, # but then it was changed to English before checkdbs ran. text = NL.join(upending) charset = Charset(mlist.preferred_language.charset) incodec = charset.input_codec or 'ascii' outcodec = charset.output_codec or 'ascii' if isinstance(text, unicode): return text.encode(outcodec, 'replace') # Be sure this is a byte string encodeable in the list's charset utext = unicode(text, incodec, 'replace') return utext.encode(outcodec, 'replace') def auto_discard(mlist): # Discard old held messages discard_count = 0 expire = config.days(mlist.max_days_to_hold) requestsdb = IListRequests(mlist) heldmsgs = list(requestsdb.of_type(RequestType.held_message)) if expire and heldmsgs: for request in heldmsgs: key, data = requestsdb.get_request(request.id) if now - data['date'] > expire: handle_request(mlist, request.id, config.DISCARD) discard_count += 1 mlist.Save() return discard_count # Figure out epoch seconds of midnight at the start of today (or the given # 3-tuple date of (year, month, day). def midnight(date=None): if date is None: date = time.localtime()[:3] # -1 for dst flag tells the library to figure it out return time.mktime(date + (0,)*5 + (-1,)) def main(): opts, args, parser = parseargs() initialize(opts.config) for name in config.list_manager.names: # The list must be locked in order to open the requests database mlist = MailList.MailList(name) try: count = IListRequests(mlist).count # While we're at it, let's evict yesterday's autoresponse data midnight_today = midnight() evictions = [] for sender in mlist.hold_and_cmd_autoresponses.keys(): date, respcount = mlist.hold_and_cmd_autoresponses[sender] if midnight(date) < midnight_today: evictions.append(sender) if evictions: for sender in evictions: del mlist.hold_and_cmd_autoresponses[sender] # This is the only place we've changed the list's database mlist.Save() if count: # Set the default language the the list's preferred language. _.default = mlist.preferred_language realname = mlist.real_name discarded = auto_discard(mlist) if discarded: count = count - discarded text = _('Notice: $discarded old request(s) ' 'automatically expired.\n\n') else: text = '' if count: text += Utils.maketext( 'checkdbs.txt', {'count' : count, 'mail_host': mlist.mail_host, 'adminDB' : mlist.GetScriptURL('admindb', absolute=1), 'real_name': realname, }, mlist=mlist) text += '\n' + pending_requests(mlist) subject = _('$count $realname moderator ' 'request(s) waiting') else: subject = _('$realname moderator request check result') msg = UserNotification(mlist.GetOwnerEmail(), mlist.GetBouncesEmail(), subject, text, mlist.preferred_language) msg.send(mlist, **{'tomoderators': True}) finally: mlist.Unlock() if __name__ == '__main__': main() <|fim▁end|>
upending.append(s)
<|file_name|>checkdbs.py<|end_file_name|><|fim▁begin|># Copyright (C) 1998-2014 by the Free Software Foundation, Inc. # # This file is part of GNU Mailman. # # GNU Mailman is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # # GNU Mailman is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # GNU Mailman. If not, see <http://www.gnu.org/licenses/>. import sys import time import optparse from email.Charset import Charset from mailman import MailList from mailman import Utils from mailman.app.requests import handle_request from mailman.configuration import config from mailman.core.i18n import _ from mailman.email.message import UserNotification from mailman.initialize import initialize from mailman.interfaces.requests import IListRequests, RequestType from mailman.version import MAILMAN_VERSION # Work around known problems with some RedHat cron daemons import signal signal.signal(signal.SIGCHLD, signal.SIG_DFL) NL = u'\n' now = time.time() def parseargs(): parser = optparse.OptionParser(version=MAILMAN_VERSION, usage=_("""\ %prog [options] Check for pending admin requests and mail the list owners if necessary.""")) parser.add_option('-C', '--config', help=_('Alternative configuration file to use')) opts, args = parser.parse_args() if args: parser.print_help() print(_('Unexpected arguments'), file=sys.stderr) sys.exit(1) return opts, args, parser def pending_requests(mlist): # Must return a byte string lcset = mlist.preferred_language.charset pending = [] first = True requestsdb = IListRequests(mlist) for request in requestsdb.of_type(RequestType.subscription): if first: pending.append(_('Pending subscriptions:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] addr = data['addr'] fullname = data['fullname'] passwd = data['passwd'] digest = data['digest'] lang = data['lang'] if fullname: if isinstance(fullname, unicode): fullname = fullname.encode(lcset, 'replace') fullname = ' (%s)' % fullname pending.append(' %s%s %s' % (addr, fullname, time.ctime(when))) first = True for request in requestsdb.of_type(RequestType.held_message): if first: pending.append(_('\nPending posts:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] sender = data['sender'] subject = data['subject'] reason = data['reason'] text = data['text'] msgdata = data['msgdata'] subject = Utils.oneline(subject, lcset) date = time.ctime(when) reason = _(reason) pending.append(_("""\ From: $sender on $date Subject: $subject Cause: $reason""")) pending.append('') # Coerce all items in pending to a Unicode so we can join them upending = [] charset = mlist.preferred_language.charset for s in pending: if isinstance(s, unicode): upending.append(s) else: <|fim_middle|> # Make sure that the text we return from here can be encoded to a byte # string in the charset of the list's language. This could fail if for # example, the request was pended while the list's language was French, # but then it was changed to English before checkdbs ran. text = NL.join(upending) charset = Charset(mlist.preferred_language.charset) incodec = charset.input_codec or 'ascii' outcodec = charset.output_codec or 'ascii' if isinstance(text, unicode): return text.encode(outcodec, 'replace') # Be sure this is a byte string encodeable in the list's charset utext = unicode(text, incodec, 'replace') return utext.encode(outcodec, 'replace') def auto_discard(mlist): # Discard old held messages discard_count = 0 expire = config.days(mlist.max_days_to_hold) requestsdb = IListRequests(mlist) heldmsgs = list(requestsdb.of_type(RequestType.held_message)) if expire and heldmsgs: for request in heldmsgs: key, data = requestsdb.get_request(request.id) if now - data['date'] > expire: handle_request(mlist, request.id, config.DISCARD) discard_count += 1 mlist.Save() return discard_count # Figure out epoch seconds of midnight at the start of today (or the given # 3-tuple date of (year, month, day). def midnight(date=None): if date is None: date = time.localtime()[:3] # -1 for dst flag tells the library to figure it out return time.mktime(date + (0,)*5 + (-1,)) def main(): opts, args, parser = parseargs() initialize(opts.config) for name in config.list_manager.names: # The list must be locked in order to open the requests database mlist = MailList.MailList(name) try: count = IListRequests(mlist).count # While we're at it, let's evict yesterday's autoresponse data midnight_today = midnight() evictions = [] for sender in mlist.hold_and_cmd_autoresponses.keys(): date, respcount = mlist.hold_and_cmd_autoresponses[sender] if midnight(date) < midnight_today: evictions.append(sender) if evictions: for sender in evictions: del mlist.hold_and_cmd_autoresponses[sender] # This is the only place we've changed the list's database mlist.Save() if count: # Set the default language the the list's preferred language. _.default = mlist.preferred_language realname = mlist.real_name discarded = auto_discard(mlist) if discarded: count = count - discarded text = _('Notice: $discarded old request(s) ' 'automatically expired.\n\n') else: text = '' if count: text += Utils.maketext( 'checkdbs.txt', {'count' : count, 'mail_host': mlist.mail_host, 'adminDB' : mlist.GetScriptURL('admindb', absolute=1), 'real_name': realname, }, mlist=mlist) text += '\n' + pending_requests(mlist) subject = _('$count $realname moderator ' 'request(s) waiting') else: subject = _('$realname moderator request check result') msg = UserNotification(mlist.GetOwnerEmail(), mlist.GetBouncesEmail(), subject, text, mlist.preferred_language) msg.send(mlist, **{'tomoderators': True}) finally: mlist.Unlock() if __name__ == '__main__': main() <|fim▁end|>
upending.append(unicode(s, charset, 'replace'))
<|file_name|>checkdbs.py<|end_file_name|><|fim▁begin|># Copyright (C) 1998-2014 by the Free Software Foundation, Inc. # # This file is part of GNU Mailman. # # GNU Mailman is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # # GNU Mailman is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # GNU Mailman. If not, see <http://www.gnu.org/licenses/>. import sys import time import optparse from email.Charset import Charset from mailman import MailList from mailman import Utils from mailman.app.requests import handle_request from mailman.configuration import config from mailman.core.i18n import _ from mailman.email.message import UserNotification from mailman.initialize import initialize from mailman.interfaces.requests import IListRequests, RequestType from mailman.version import MAILMAN_VERSION # Work around known problems with some RedHat cron daemons import signal signal.signal(signal.SIGCHLD, signal.SIG_DFL) NL = u'\n' now = time.time() def parseargs(): parser = optparse.OptionParser(version=MAILMAN_VERSION, usage=_("""\ %prog [options] Check for pending admin requests and mail the list owners if necessary.""")) parser.add_option('-C', '--config', help=_('Alternative configuration file to use')) opts, args = parser.parse_args() if args: parser.print_help() print(_('Unexpected arguments'), file=sys.stderr) sys.exit(1) return opts, args, parser def pending_requests(mlist): # Must return a byte string lcset = mlist.preferred_language.charset pending = [] first = True requestsdb = IListRequests(mlist) for request in requestsdb.of_type(RequestType.subscription): if first: pending.append(_('Pending subscriptions:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] addr = data['addr'] fullname = data['fullname'] passwd = data['passwd'] digest = data['digest'] lang = data['lang'] if fullname: if isinstance(fullname, unicode): fullname = fullname.encode(lcset, 'replace') fullname = ' (%s)' % fullname pending.append(' %s%s %s' % (addr, fullname, time.ctime(when))) first = True for request in requestsdb.of_type(RequestType.held_message): if first: pending.append(_('\nPending posts:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] sender = data['sender'] subject = data['subject'] reason = data['reason'] text = data['text'] msgdata = data['msgdata'] subject = Utils.oneline(subject, lcset) date = time.ctime(when) reason = _(reason) pending.append(_("""\ From: $sender on $date Subject: $subject Cause: $reason""")) pending.append('') # Coerce all items in pending to a Unicode so we can join them upending = [] charset = mlist.preferred_language.charset for s in pending: if isinstance(s, unicode): upending.append(s) else: upending.append(unicode(s, charset, 'replace')) # Make sure that the text we return from here can be encoded to a byte # string in the charset of the list's language. This could fail if for # example, the request was pended while the list's language was French, # but then it was changed to English before checkdbs ran. text = NL.join(upending) charset = Charset(mlist.preferred_language.charset) incodec = charset.input_codec or 'ascii' outcodec = charset.output_codec or 'ascii' if isinstance(text, unicode): <|fim_middle|> # Be sure this is a byte string encodeable in the list's charset utext = unicode(text, incodec, 'replace') return utext.encode(outcodec, 'replace') def auto_discard(mlist): # Discard old held messages discard_count = 0 expire = config.days(mlist.max_days_to_hold) requestsdb = IListRequests(mlist) heldmsgs = list(requestsdb.of_type(RequestType.held_message)) if expire and heldmsgs: for request in heldmsgs: key, data = requestsdb.get_request(request.id) if now - data['date'] > expire: handle_request(mlist, request.id, config.DISCARD) discard_count += 1 mlist.Save() return discard_count # Figure out epoch seconds of midnight at the start of today (or the given # 3-tuple date of (year, month, day). def midnight(date=None): if date is None: date = time.localtime()[:3] # -1 for dst flag tells the library to figure it out return time.mktime(date + (0,)*5 + (-1,)) def main(): opts, args, parser = parseargs() initialize(opts.config) for name in config.list_manager.names: # The list must be locked in order to open the requests database mlist = MailList.MailList(name) try: count = IListRequests(mlist).count # While we're at it, let's evict yesterday's autoresponse data midnight_today = midnight() evictions = [] for sender in mlist.hold_and_cmd_autoresponses.keys(): date, respcount = mlist.hold_and_cmd_autoresponses[sender] if midnight(date) < midnight_today: evictions.append(sender) if evictions: for sender in evictions: del mlist.hold_and_cmd_autoresponses[sender] # This is the only place we've changed the list's database mlist.Save() if count: # Set the default language the the list's preferred language. _.default = mlist.preferred_language realname = mlist.real_name discarded = auto_discard(mlist) if discarded: count = count - discarded text = _('Notice: $discarded old request(s) ' 'automatically expired.\n\n') else: text = '' if count: text += Utils.maketext( 'checkdbs.txt', {'count' : count, 'mail_host': mlist.mail_host, 'adminDB' : mlist.GetScriptURL('admindb', absolute=1), 'real_name': realname, }, mlist=mlist) text += '\n' + pending_requests(mlist) subject = _('$count $realname moderator ' 'request(s) waiting') else: subject = _('$realname moderator request check result') msg = UserNotification(mlist.GetOwnerEmail(), mlist.GetBouncesEmail(), subject, text, mlist.preferred_language) msg.send(mlist, **{'tomoderators': True}) finally: mlist.Unlock() if __name__ == '__main__': main() <|fim▁end|>
return text.encode(outcodec, 'replace')
<|file_name|>checkdbs.py<|end_file_name|><|fim▁begin|># Copyright (C) 1998-2014 by the Free Software Foundation, Inc. # # This file is part of GNU Mailman. # # GNU Mailman is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # # GNU Mailman is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # GNU Mailman. If not, see <http://www.gnu.org/licenses/>. import sys import time import optparse from email.Charset import Charset from mailman import MailList from mailman import Utils from mailman.app.requests import handle_request from mailman.configuration import config from mailman.core.i18n import _ from mailman.email.message import UserNotification from mailman.initialize import initialize from mailman.interfaces.requests import IListRequests, RequestType from mailman.version import MAILMAN_VERSION # Work around known problems with some RedHat cron daemons import signal signal.signal(signal.SIGCHLD, signal.SIG_DFL) NL = u'\n' now = time.time() def parseargs(): parser = optparse.OptionParser(version=MAILMAN_VERSION, usage=_("""\ %prog [options] Check for pending admin requests and mail the list owners if necessary.""")) parser.add_option('-C', '--config', help=_('Alternative configuration file to use')) opts, args = parser.parse_args() if args: parser.print_help() print(_('Unexpected arguments'), file=sys.stderr) sys.exit(1) return opts, args, parser def pending_requests(mlist): # Must return a byte string lcset = mlist.preferred_language.charset pending = [] first = True requestsdb = IListRequests(mlist) for request in requestsdb.of_type(RequestType.subscription): if first: pending.append(_('Pending subscriptions:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] addr = data['addr'] fullname = data['fullname'] passwd = data['passwd'] digest = data['digest'] lang = data['lang'] if fullname: if isinstance(fullname, unicode): fullname = fullname.encode(lcset, 'replace') fullname = ' (%s)' % fullname pending.append(' %s%s %s' % (addr, fullname, time.ctime(when))) first = True for request in requestsdb.of_type(RequestType.held_message): if first: pending.append(_('\nPending posts:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] sender = data['sender'] subject = data['subject'] reason = data['reason'] text = data['text'] msgdata = data['msgdata'] subject = Utils.oneline(subject, lcset) date = time.ctime(when) reason = _(reason) pending.append(_("""\ From: $sender on $date Subject: $subject Cause: $reason""")) pending.append('') # Coerce all items in pending to a Unicode so we can join them upending = [] charset = mlist.preferred_language.charset for s in pending: if isinstance(s, unicode): upending.append(s) else: upending.append(unicode(s, charset, 'replace')) # Make sure that the text we return from here can be encoded to a byte # string in the charset of the list's language. This could fail if for # example, the request was pended while the list's language was French, # but then it was changed to English before checkdbs ran. text = NL.join(upending) charset = Charset(mlist.preferred_language.charset) incodec = charset.input_codec or 'ascii' outcodec = charset.output_codec or 'ascii' if isinstance(text, unicode): return text.encode(outcodec, 'replace') # Be sure this is a byte string encodeable in the list's charset utext = unicode(text, incodec, 'replace') return utext.encode(outcodec, 'replace') def auto_discard(mlist): # Discard old held messages discard_count = 0 expire = config.days(mlist.max_days_to_hold) requestsdb = IListRequests(mlist) heldmsgs = list(requestsdb.of_type(RequestType.held_message)) if expire and heldmsgs: <|fim_middle|> return discard_count # Figure out epoch seconds of midnight at the start of today (or the given # 3-tuple date of (year, month, day). def midnight(date=None): if date is None: date = time.localtime()[:3] # -1 for dst flag tells the library to figure it out return time.mktime(date + (0,)*5 + (-1,)) def main(): opts, args, parser = parseargs() initialize(opts.config) for name in config.list_manager.names: # The list must be locked in order to open the requests database mlist = MailList.MailList(name) try: count = IListRequests(mlist).count # While we're at it, let's evict yesterday's autoresponse data midnight_today = midnight() evictions = [] for sender in mlist.hold_and_cmd_autoresponses.keys(): date, respcount = mlist.hold_and_cmd_autoresponses[sender] if midnight(date) < midnight_today: evictions.append(sender) if evictions: for sender in evictions: del mlist.hold_and_cmd_autoresponses[sender] # This is the only place we've changed the list's database mlist.Save() if count: # Set the default language the the list's preferred language. _.default = mlist.preferred_language realname = mlist.real_name discarded = auto_discard(mlist) if discarded: count = count - discarded text = _('Notice: $discarded old request(s) ' 'automatically expired.\n\n') else: text = '' if count: text += Utils.maketext( 'checkdbs.txt', {'count' : count, 'mail_host': mlist.mail_host, 'adminDB' : mlist.GetScriptURL('admindb', absolute=1), 'real_name': realname, }, mlist=mlist) text += '\n' + pending_requests(mlist) subject = _('$count $realname moderator ' 'request(s) waiting') else: subject = _('$realname moderator request check result') msg = UserNotification(mlist.GetOwnerEmail(), mlist.GetBouncesEmail(), subject, text, mlist.preferred_language) msg.send(mlist, **{'tomoderators': True}) finally: mlist.Unlock() if __name__ == '__main__': main() <|fim▁end|>
for request in heldmsgs: key, data = requestsdb.get_request(request.id) if now - data['date'] > expire: handle_request(mlist, request.id, config.DISCARD) discard_count += 1 mlist.Save()
<|file_name|>checkdbs.py<|end_file_name|><|fim▁begin|># Copyright (C) 1998-2014 by the Free Software Foundation, Inc. # # This file is part of GNU Mailman. # # GNU Mailman is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # # GNU Mailman is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # GNU Mailman. If not, see <http://www.gnu.org/licenses/>. import sys import time import optparse from email.Charset import Charset from mailman import MailList from mailman import Utils from mailman.app.requests import handle_request from mailman.configuration import config from mailman.core.i18n import _ from mailman.email.message import UserNotification from mailman.initialize import initialize from mailman.interfaces.requests import IListRequests, RequestType from mailman.version import MAILMAN_VERSION # Work around known problems with some RedHat cron daemons import signal signal.signal(signal.SIGCHLD, signal.SIG_DFL) NL = u'\n' now = time.time() def parseargs(): parser = optparse.OptionParser(version=MAILMAN_VERSION, usage=_("""\ %prog [options] Check for pending admin requests and mail the list owners if necessary.""")) parser.add_option('-C', '--config', help=_('Alternative configuration file to use')) opts, args = parser.parse_args() if args: parser.print_help() print(_('Unexpected arguments'), file=sys.stderr) sys.exit(1) return opts, args, parser def pending_requests(mlist): # Must return a byte string lcset = mlist.preferred_language.charset pending = [] first = True requestsdb = IListRequests(mlist) for request in requestsdb.of_type(RequestType.subscription): if first: pending.append(_('Pending subscriptions:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] addr = data['addr'] fullname = data['fullname'] passwd = data['passwd'] digest = data['digest'] lang = data['lang'] if fullname: if isinstance(fullname, unicode): fullname = fullname.encode(lcset, 'replace') fullname = ' (%s)' % fullname pending.append(' %s%s %s' % (addr, fullname, time.ctime(when))) first = True for request in requestsdb.of_type(RequestType.held_message): if first: pending.append(_('\nPending posts:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] sender = data['sender'] subject = data['subject'] reason = data['reason'] text = data['text'] msgdata = data['msgdata'] subject = Utils.oneline(subject, lcset) date = time.ctime(when) reason = _(reason) pending.append(_("""\ From: $sender on $date Subject: $subject Cause: $reason""")) pending.append('') # Coerce all items in pending to a Unicode so we can join them upending = [] charset = mlist.preferred_language.charset for s in pending: if isinstance(s, unicode): upending.append(s) else: upending.append(unicode(s, charset, 'replace')) # Make sure that the text we return from here can be encoded to a byte # string in the charset of the list's language. This could fail if for # example, the request was pended while the list's language was French, # but then it was changed to English before checkdbs ran. text = NL.join(upending) charset = Charset(mlist.preferred_language.charset) incodec = charset.input_codec or 'ascii' outcodec = charset.output_codec or 'ascii' if isinstance(text, unicode): return text.encode(outcodec, 'replace') # Be sure this is a byte string encodeable in the list's charset utext = unicode(text, incodec, 'replace') return utext.encode(outcodec, 'replace') def auto_discard(mlist): # Discard old held messages discard_count = 0 expire = config.days(mlist.max_days_to_hold) requestsdb = IListRequests(mlist) heldmsgs = list(requestsdb.of_type(RequestType.held_message)) if expire and heldmsgs: for request in heldmsgs: key, data = requestsdb.get_request(request.id) if now - data['date'] > expire: <|fim_middle|> mlist.Save() return discard_count # Figure out epoch seconds of midnight at the start of today (or the given # 3-tuple date of (year, month, day). def midnight(date=None): if date is None: date = time.localtime()[:3] # -1 for dst flag tells the library to figure it out return time.mktime(date + (0,)*5 + (-1,)) def main(): opts, args, parser = parseargs() initialize(opts.config) for name in config.list_manager.names: # The list must be locked in order to open the requests database mlist = MailList.MailList(name) try: count = IListRequests(mlist).count # While we're at it, let's evict yesterday's autoresponse data midnight_today = midnight() evictions = [] for sender in mlist.hold_and_cmd_autoresponses.keys(): date, respcount = mlist.hold_and_cmd_autoresponses[sender] if midnight(date) < midnight_today: evictions.append(sender) if evictions: for sender in evictions: del mlist.hold_and_cmd_autoresponses[sender] # This is the only place we've changed the list's database mlist.Save() if count: # Set the default language the the list's preferred language. _.default = mlist.preferred_language realname = mlist.real_name discarded = auto_discard(mlist) if discarded: count = count - discarded text = _('Notice: $discarded old request(s) ' 'automatically expired.\n\n') else: text = '' if count: text += Utils.maketext( 'checkdbs.txt', {'count' : count, 'mail_host': mlist.mail_host, 'adminDB' : mlist.GetScriptURL('admindb', absolute=1), 'real_name': realname, }, mlist=mlist) text += '\n' + pending_requests(mlist) subject = _('$count $realname moderator ' 'request(s) waiting') else: subject = _('$realname moderator request check result') msg = UserNotification(mlist.GetOwnerEmail(), mlist.GetBouncesEmail(), subject, text, mlist.preferred_language) msg.send(mlist, **{'tomoderators': True}) finally: mlist.Unlock() if __name__ == '__main__': main() <|fim▁end|>
handle_request(mlist, request.id, config.DISCARD) discard_count += 1
<|file_name|>checkdbs.py<|end_file_name|><|fim▁begin|># Copyright (C) 1998-2014 by the Free Software Foundation, Inc. # # This file is part of GNU Mailman. # # GNU Mailman is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # # GNU Mailman is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # GNU Mailman. If not, see <http://www.gnu.org/licenses/>. import sys import time import optparse from email.Charset import Charset from mailman import MailList from mailman import Utils from mailman.app.requests import handle_request from mailman.configuration import config from mailman.core.i18n import _ from mailman.email.message import UserNotification from mailman.initialize import initialize from mailman.interfaces.requests import IListRequests, RequestType from mailman.version import MAILMAN_VERSION # Work around known problems with some RedHat cron daemons import signal signal.signal(signal.SIGCHLD, signal.SIG_DFL) NL = u'\n' now = time.time() def parseargs(): parser = optparse.OptionParser(version=MAILMAN_VERSION, usage=_("""\ %prog [options] Check for pending admin requests and mail the list owners if necessary.""")) parser.add_option('-C', '--config', help=_('Alternative configuration file to use')) opts, args = parser.parse_args() if args: parser.print_help() print(_('Unexpected arguments'), file=sys.stderr) sys.exit(1) return opts, args, parser def pending_requests(mlist): # Must return a byte string lcset = mlist.preferred_language.charset pending = [] first = True requestsdb = IListRequests(mlist) for request in requestsdb.of_type(RequestType.subscription): if first: pending.append(_('Pending subscriptions:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] addr = data['addr'] fullname = data['fullname'] passwd = data['passwd'] digest = data['digest'] lang = data['lang'] if fullname: if isinstance(fullname, unicode): fullname = fullname.encode(lcset, 'replace') fullname = ' (%s)' % fullname pending.append(' %s%s %s' % (addr, fullname, time.ctime(when))) first = True for request in requestsdb.of_type(RequestType.held_message): if first: pending.append(_('\nPending posts:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] sender = data['sender'] subject = data['subject'] reason = data['reason'] text = data['text'] msgdata = data['msgdata'] subject = Utils.oneline(subject, lcset) date = time.ctime(when) reason = _(reason) pending.append(_("""\ From: $sender on $date Subject: $subject Cause: $reason""")) pending.append('') # Coerce all items in pending to a Unicode so we can join them upending = [] charset = mlist.preferred_language.charset for s in pending: if isinstance(s, unicode): upending.append(s) else: upending.append(unicode(s, charset, 'replace')) # Make sure that the text we return from here can be encoded to a byte # string in the charset of the list's language. This could fail if for # example, the request was pended while the list's language was French, # but then it was changed to English before checkdbs ran. text = NL.join(upending) charset = Charset(mlist.preferred_language.charset) incodec = charset.input_codec or 'ascii' outcodec = charset.output_codec or 'ascii' if isinstance(text, unicode): return text.encode(outcodec, 'replace') # Be sure this is a byte string encodeable in the list's charset utext = unicode(text, incodec, 'replace') return utext.encode(outcodec, 'replace') def auto_discard(mlist): # Discard old held messages discard_count = 0 expire = config.days(mlist.max_days_to_hold) requestsdb = IListRequests(mlist) heldmsgs = list(requestsdb.of_type(RequestType.held_message)) if expire and heldmsgs: for request in heldmsgs: key, data = requestsdb.get_request(request.id) if now - data['date'] > expire: handle_request(mlist, request.id, config.DISCARD) discard_count += 1 mlist.Save() return discard_count # Figure out epoch seconds of midnight at the start of today (or the given # 3-tuple date of (year, month, day). def midnight(date=None): if date is None: <|fim_middle|> # -1 for dst flag tells the library to figure it out return time.mktime(date + (0,)*5 + (-1,)) def main(): opts, args, parser = parseargs() initialize(opts.config) for name in config.list_manager.names: # The list must be locked in order to open the requests database mlist = MailList.MailList(name) try: count = IListRequests(mlist).count # While we're at it, let's evict yesterday's autoresponse data midnight_today = midnight() evictions = [] for sender in mlist.hold_and_cmd_autoresponses.keys(): date, respcount = mlist.hold_and_cmd_autoresponses[sender] if midnight(date) < midnight_today: evictions.append(sender) if evictions: for sender in evictions: del mlist.hold_and_cmd_autoresponses[sender] # This is the only place we've changed the list's database mlist.Save() if count: # Set the default language the the list's preferred language. _.default = mlist.preferred_language realname = mlist.real_name discarded = auto_discard(mlist) if discarded: count = count - discarded text = _('Notice: $discarded old request(s) ' 'automatically expired.\n\n') else: text = '' if count: text += Utils.maketext( 'checkdbs.txt', {'count' : count, 'mail_host': mlist.mail_host, 'adminDB' : mlist.GetScriptURL('admindb', absolute=1), 'real_name': realname, }, mlist=mlist) text += '\n' + pending_requests(mlist) subject = _('$count $realname moderator ' 'request(s) waiting') else: subject = _('$realname moderator request check result') msg = UserNotification(mlist.GetOwnerEmail(), mlist.GetBouncesEmail(), subject, text, mlist.preferred_language) msg.send(mlist, **{'tomoderators': True}) finally: mlist.Unlock() if __name__ == '__main__': main() <|fim▁end|>
date = time.localtime()[:3]
<|file_name|>checkdbs.py<|end_file_name|><|fim▁begin|># Copyright (C) 1998-2014 by the Free Software Foundation, Inc. # # This file is part of GNU Mailman. # # GNU Mailman is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # # GNU Mailman is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # GNU Mailman. If not, see <http://www.gnu.org/licenses/>. import sys import time import optparse from email.Charset import Charset from mailman import MailList from mailman import Utils from mailman.app.requests import handle_request from mailman.configuration import config from mailman.core.i18n import _ from mailman.email.message import UserNotification from mailman.initialize import initialize from mailman.interfaces.requests import IListRequests, RequestType from mailman.version import MAILMAN_VERSION # Work around known problems with some RedHat cron daemons import signal signal.signal(signal.SIGCHLD, signal.SIG_DFL) NL = u'\n' now = time.time() def parseargs(): parser = optparse.OptionParser(version=MAILMAN_VERSION, usage=_("""\ %prog [options] Check for pending admin requests and mail the list owners if necessary.""")) parser.add_option('-C', '--config', help=_('Alternative configuration file to use')) opts, args = parser.parse_args() if args: parser.print_help() print(_('Unexpected arguments'), file=sys.stderr) sys.exit(1) return opts, args, parser def pending_requests(mlist): # Must return a byte string lcset = mlist.preferred_language.charset pending = [] first = True requestsdb = IListRequests(mlist) for request in requestsdb.of_type(RequestType.subscription): if first: pending.append(_('Pending subscriptions:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] addr = data['addr'] fullname = data['fullname'] passwd = data['passwd'] digest = data['digest'] lang = data['lang'] if fullname: if isinstance(fullname, unicode): fullname = fullname.encode(lcset, 'replace') fullname = ' (%s)' % fullname pending.append(' %s%s %s' % (addr, fullname, time.ctime(when))) first = True for request in requestsdb.of_type(RequestType.held_message): if first: pending.append(_('\nPending posts:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] sender = data['sender'] subject = data['subject'] reason = data['reason'] text = data['text'] msgdata = data['msgdata'] subject = Utils.oneline(subject, lcset) date = time.ctime(when) reason = _(reason) pending.append(_("""\ From: $sender on $date Subject: $subject Cause: $reason""")) pending.append('') # Coerce all items in pending to a Unicode so we can join them upending = [] charset = mlist.preferred_language.charset for s in pending: if isinstance(s, unicode): upending.append(s) else: upending.append(unicode(s, charset, 'replace')) # Make sure that the text we return from here can be encoded to a byte # string in the charset of the list's language. This could fail if for # example, the request was pended while the list's language was French, # but then it was changed to English before checkdbs ran. text = NL.join(upending) charset = Charset(mlist.preferred_language.charset) incodec = charset.input_codec or 'ascii' outcodec = charset.output_codec or 'ascii' if isinstance(text, unicode): return text.encode(outcodec, 'replace') # Be sure this is a byte string encodeable in the list's charset utext = unicode(text, incodec, 'replace') return utext.encode(outcodec, 'replace') def auto_discard(mlist): # Discard old held messages discard_count = 0 expire = config.days(mlist.max_days_to_hold) requestsdb = IListRequests(mlist) heldmsgs = list(requestsdb.of_type(RequestType.held_message)) if expire and heldmsgs: for request in heldmsgs: key, data = requestsdb.get_request(request.id) if now - data['date'] > expire: handle_request(mlist, request.id, config.DISCARD) discard_count += 1 mlist.Save() return discard_count # Figure out epoch seconds of midnight at the start of today (or the given # 3-tuple date of (year, month, day). def midnight(date=None): if date is None: date = time.localtime()[:3] # -1 for dst flag tells the library to figure it out return time.mktime(date + (0,)*5 + (-1,)) def main(): opts, args, parser = parseargs() initialize(opts.config) for name in config.list_manager.names: # The list must be locked in order to open the requests database mlist = MailList.MailList(name) try: count = IListRequests(mlist).count # While we're at it, let's evict yesterday's autoresponse data midnight_today = midnight() evictions = [] for sender in mlist.hold_and_cmd_autoresponses.keys(): date, respcount = mlist.hold_and_cmd_autoresponses[sender] if midnight(date) < midnight_today: <|fim_middle|> if evictions: for sender in evictions: del mlist.hold_and_cmd_autoresponses[sender] # This is the only place we've changed the list's database mlist.Save() if count: # Set the default language the the list's preferred language. _.default = mlist.preferred_language realname = mlist.real_name discarded = auto_discard(mlist) if discarded: count = count - discarded text = _('Notice: $discarded old request(s) ' 'automatically expired.\n\n') else: text = '' if count: text += Utils.maketext( 'checkdbs.txt', {'count' : count, 'mail_host': mlist.mail_host, 'adminDB' : mlist.GetScriptURL('admindb', absolute=1), 'real_name': realname, }, mlist=mlist) text += '\n' + pending_requests(mlist) subject = _('$count $realname moderator ' 'request(s) waiting') else: subject = _('$realname moderator request check result') msg = UserNotification(mlist.GetOwnerEmail(), mlist.GetBouncesEmail(), subject, text, mlist.preferred_language) msg.send(mlist, **{'tomoderators': True}) finally: mlist.Unlock() if __name__ == '__main__': main() <|fim▁end|>
evictions.append(sender)
<|file_name|>checkdbs.py<|end_file_name|><|fim▁begin|># Copyright (C) 1998-2014 by the Free Software Foundation, Inc. # # This file is part of GNU Mailman. # # GNU Mailman is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # # GNU Mailman is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # GNU Mailman. If not, see <http://www.gnu.org/licenses/>. import sys import time import optparse from email.Charset import Charset from mailman import MailList from mailman import Utils from mailman.app.requests import handle_request from mailman.configuration import config from mailman.core.i18n import _ from mailman.email.message import UserNotification from mailman.initialize import initialize from mailman.interfaces.requests import IListRequests, RequestType from mailman.version import MAILMAN_VERSION # Work around known problems with some RedHat cron daemons import signal signal.signal(signal.SIGCHLD, signal.SIG_DFL) NL = u'\n' now = time.time() def parseargs(): parser = optparse.OptionParser(version=MAILMAN_VERSION, usage=_("""\ %prog [options] Check for pending admin requests and mail the list owners if necessary.""")) parser.add_option('-C', '--config', help=_('Alternative configuration file to use')) opts, args = parser.parse_args() if args: parser.print_help() print(_('Unexpected arguments'), file=sys.stderr) sys.exit(1) return opts, args, parser def pending_requests(mlist): # Must return a byte string lcset = mlist.preferred_language.charset pending = [] first = True requestsdb = IListRequests(mlist) for request in requestsdb.of_type(RequestType.subscription): if first: pending.append(_('Pending subscriptions:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] addr = data['addr'] fullname = data['fullname'] passwd = data['passwd'] digest = data['digest'] lang = data['lang'] if fullname: if isinstance(fullname, unicode): fullname = fullname.encode(lcset, 'replace') fullname = ' (%s)' % fullname pending.append(' %s%s %s' % (addr, fullname, time.ctime(when))) first = True for request in requestsdb.of_type(RequestType.held_message): if first: pending.append(_('\nPending posts:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] sender = data['sender'] subject = data['subject'] reason = data['reason'] text = data['text'] msgdata = data['msgdata'] subject = Utils.oneline(subject, lcset) date = time.ctime(when) reason = _(reason) pending.append(_("""\ From: $sender on $date Subject: $subject Cause: $reason""")) pending.append('') # Coerce all items in pending to a Unicode so we can join them upending = [] charset = mlist.preferred_language.charset for s in pending: if isinstance(s, unicode): upending.append(s) else: upending.append(unicode(s, charset, 'replace')) # Make sure that the text we return from here can be encoded to a byte # string in the charset of the list's language. This could fail if for # example, the request was pended while the list's language was French, # but then it was changed to English before checkdbs ran. text = NL.join(upending) charset = Charset(mlist.preferred_language.charset) incodec = charset.input_codec or 'ascii' outcodec = charset.output_codec or 'ascii' if isinstance(text, unicode): return text.encode(outcodec, 'replace') # Be sure this is a byte string encodeable in the list's charset utext = unicode(text, incodec, 'replace') return utext.encode(outcodec, 'replace') def auto_discard(mlist): # Discard old held messages discard_count = 0 expire = config.days(mlist.max_days_to_hold) requestsdb = IListRequests(mlist) heldmsgs = list(requestsdb.of_type(RequestType.held_message)) if expire and heldmsgs: for request in heldmsgs: key, data = requestsdb.get_request(request.id) if now - data['date'] > expire: handle_request(mlist, request.id, config.DISCARD) discard_count += 1 mlist.Save() return discard_count # Figure out epoch seconds of midnight at the start of today (or the given # 3-tuple date of (year, month, day). def midnight(date=None): if date is None: date = time.localtime()[:3] # -1 for dst flag tells the library to figure it out return time.mktime(date + (0,)*5 + (-1,)) def main(): opts, args, parser = parseargs() initialize(opts.config) for name in config.list_manager.names: # The list must be locked in order to open the requests database mlist = MailList.MailList(name) try: count = IListRequests(mlist).count # While we're at it, let's evict yesterday's autoresponse data midnight_today = midnight() evictions = [] for sender in mlist.hold_and_cmd_autoresponses.keys(): date, respcount = mlist.hold_and_cmd_autoresponses[sender] if midnight(date) < midnight_today: evictions.append(sender) if evictions: <|fim_middle|> if count: # Set the default language the the list's preferred language. _.default = mlist.preferred_language realname = mlist.real_name discarded = auto_discard(mlist) if discarded: count = count - discarded text = _('Notice: $discarded old request(s) ' 'automatically expired.\n\n') else: text = '' if count: text += Utils.maketext( 'checkdbs.txt', {'count' : count, 'mail_host': mlist.mail_host, 'adminDB' : mlist.GetScriptURL('admindb', absolute=1), 'real_name': realname, }, mlist=mlist) text += '\n' + pending_requests(mlist) subject = _('$count $realname moderator ' 'request(s) waiting') else: subject = _('$realname moderator request check result') msg = UserNotification(mlist.GetOwnerEmail(), mlist.GetBouncesEmail(), subject, text, mlist.preferred_language) msg.send(mlist, **{'tomoderators': True}) finally: mlist.Unlock() if __name__ == '__main__': main() <|fim▁end|>
for sender in evictions: del mlist.hold_and_cmd_autoresponses[sender] # This is the only place we've changed the list's database mlist.Save()
<|file_name|>checkdbs.py<|end_file_name|><|fim▁begin|># Copyright (C) 1998-2014 by the Free Software Foundation, Inc. # # This file is part of GNU Mailman. # # GNU Mailman is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # # GNU Mailman is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # GNU Mailman. If not, see <http://www.gnu.org/licenses/>. import sys import time import optparse from email.Charset import Charset from mailman import MailList from mailman import Utils from mailman.app.requests import handle_request from mailman.configuration import config from mailman.core.i18n import _ from mailman.email.message import UserNotification from mailman.initialize import initialize from mailman.interfaces.requests import IListRequests, RequestType from mailman.version import MAILMAN_VERSION # Work around known problems with some RedHat cron daemons import signal signal.signal(signal.SIGCHLD, signal.SIG_DFL) NL = u'\n' now = time.time() def parseargs(): parser = optparse.OptionParser(version=MAILMAN_VERSION, usage=_("""\ %prog [options] Check for pending admin requests and mail the list owners if necessary.""")) parser.add_option('-C', '--config', help=_('Alternative configuration file to use')) opts, args = parser.parse_args() if args: parser.print_help() print(_('Unexpected arguments'), file=sys.stderr) sys.exit(1) return opts, args, parser def pending_requests(mlist): # Must return a byte string lcset = mlist.preferred_language.charset pending = [] first = True requestsdb = IListRequests(mlist) for request in requestsdb.of_type(RequestType.subscription): if first: pending.append(_('Pending subscriptions:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] addr = data['addr'] fullname = data['fullname'] passwd = data['passwd'] digest = data['digest'] lang = data['lang'] if fullname: if isinstance(fullname, unicode): fullname = fullname.encode(lcset, 'replace') fullname = ' (%s)' % fullname pending.append(' %s%s %s' % (addr, fullname, time.ctime(when))) first = True for request in requestsdb.of_type(RequestType.held_message): if first: pending.append(_('\nPending posts:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] sender = data['sender'] subject = data['subject'] reason = data['reason'] text = data['text'] msgdata = data['msgdata'] subject = Utils.oneline(subject, lcset) date = time.ctime(when) reason = _(reason) pending.append(_("""\ From: $sender on $date Subject: $subject Cause: $reason""")) pending.append('') # Coerce all items in pending to a Unicode so we can join them upending = [] charset = mlist.preferred_language.charset for s in pending: if isinstance(s, unicode): upending.append(s) else: upending.append(unicode(s, charset, 'replace')) # Make sure that the text we return from here can be encoded to a byte # string in the charset of the list's language. This could fail if for # example, the request was pended while the list's language was French, # but then it was changed to English before checkdbs ran. text = NL.join(upending) charset = Charset(mlist.preferred_language.charset) incodec = charset.input_codec or 'ascii' outcodec = charset.output_codec or 'ascii' if isinstance(text, unicode): return text.encode(outcodec, 'replace') # Be sure this is a byte string encodeable in the list's charset utext = unicode(text, incodec, 'replace') return utext.encode(outcodec, 'replace') def auto_discard(mlist): # Discard old held messages discard_count = 0 expire = config.days(mlist.max_days_to_hold) requestsdb = IListRequests(mlist) heldmsgs = list(requestsdb.of_type(RequestType.held_message)) if expire and heldmsgs: for request in heldmsgs: key, data = requestsdb.get_request(request.id) if now - data['date'] > expire: handle_request(mlist, request.id, config.DISCARD) discard_count += 1 mlist.Save() return discard_count # Figure out epoch seconds of midnight at the start of today (or the given # 3-tuple date of (year, month, day). def midnight(date=None): if date is None: date = time.localtime()[:3] # -1 for dst flag tells the library to figure it out return time.mktime(date + (0,)*5 + (-1,)) def main(): opts, args, parser = parseargs() initialize(opts.config) for name in config.list_manager.names: # The list must be locked in order to open the requests database mlist = MailList.MailList(name) try: count = IListRequests(mlist).count # While we're at it, let's evict yesterday's autoresponse data midnight_today = midnight() evictions = [] for sender in mlist.hold_and_cmd_autoresponses.keys(): date, respcount = mlist.hold_and_cmd_autoresponses[sender] if midnight(date) < midnight_today: evictions.append(sender) if evictions: for sender in evictions: del mlist.hold_and_cmd_autoresponses[sender] # This is the only place we've changed the list's database mlist.Save() if count: # Set the default language the the list's preferred language. <|fim_middle|> finally: mlist.Unlock() if __name__ == '__main__': main() <|fim▁end|>
_.default = mlist.preferred_language realname = mlist.real_name discarded = auto_discard(mlist) if discarded: count = count - discarded text = _('Notice: $discarded old request(s) ' 'automatically expired.\n\n') else: text = '' if count: text += Utils.maketext( 'checkdbs.txt', {'count' : count, 'mail_host': mlist.mail_host, 'adminDB' : mlist.GetScriptURL('admindb', absolute=1), 'real_name': realname, }, mlist=mlist) text += '\n' + pending_requests(mlist) subject = _('$count $realname moderator ' 'request(s) waiting') else: subject = _('$realname moderator request check result') msg = UserNotification(mlist.GetOwnerEmail(), mlist.GetBouncesEmail(), subject, text, mlist.preferred_language) msg.send(mlist, **{'tomoderators': True})
<|file_name|>checkdbs.py<|end_file_name|><|fim▁begin|># Copyright (C) 1998-2014 by the Free Software Foundation, Inc. # # This file is part of GNU Mailman. # # GNU Mailman is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # # GNU Mailman is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # GNU Mailman. If not, see <http://www.gnu.org/licenses/>. import sys import time import optparse from email.Charset import Charset from mailman import MailList from mailman import Utils from mailman.app.requests import handle_request from mailman.configuration import config from mailman.core.i18n import _ from mailman.email.message import UserNotification from mailman.initialize import initialize from mailman.interfaces.requests import IListRequests, RequestType from mailman.version import MAILMAN_VERSION # Work around known problems with some RedHat cron daemons import signal signal.signal(signal.SIGCHLD, signal.SIG_DFL) NL = u'\n' now = time.time() def parseargs(): parser = optparse.OptionParser(version=MAILMAN_VERSION, usage=_("""\ %prog [options] Check for pending admin requests and mail the list owners if necessary.""")) parser.add_option('-C', '--config', help=_('Alternative configuration file to use')) opts, args = parser.parse_args() if args: parser.print_help() print(_('Unexpected arguments'), file=sys.stderr) sys.exit(1) return opts, args, parser def pending_requests(mlist): # Must return a byte string lcset = mlist.preferred_language.charset pending = [] first = True requestsdb = IListRequests(mlist) for request in requestsdb.of_type(RequestType.subscription): if first: pending.append(_('Pending subscriptions:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] addr = data['addr'] fullname = data['fullname'] passwd = data['passwd'] digest = data['digest'] lang = data['lang'] if fullname: if isinstance(fullname, unicode): fullname = fullname.encode(lcset, 'replace') fullname = ' (%s)' % fullname pending.append(' %s%s %s' % (addr, fullname, time.ctime(when))) first = True for request in requestsdb.of_type(RequestType.held_message): if first: pending.append(_('\nPending posts:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] sender = data['sender'] subject = data['subject'] reason = data['reason'] text = data['text'] msgdata = data['msgdata'] subject = Utils.oneline(subject, lcset) date = time.ctime(when) reason = _(reason) pending.append(_("""\ From: $sender on $date Subject: $subject Cause: $reason""")) pending.append('') # Coerce all items in pending to a Unicode so we can join them upending = [] charset = mlist.preferred_language.charset for s in pending: if isinstance(s, unicode): upending.append(s) else: upending.append(unicode(s, charset, 'replace')) # Make sure that the text we return from here can be encoded to a byte # string in the charset of the list's language. This could fail if for # example, the request was pended while the list's language was French, # but then it was changed to English before checkdbs ran. text = NL.join(upending) charset = Charset(mlist.preferred_language.charset) incodec = charset.input_codec or 'ascii' outcodec = charset.output_codec or 'ascii' if isinstance(text, unicode): return text.encode(outcodec, 'replace') # Be sure this is a byte string encodeable in the list's charset utext = unicode(text, incodec, 'replace') return utext.encode(outcodec, 'replace') def auto_discard(mlist): # Discard old held messages discard_count = 0 expire = config.days(mlist.max_days_to_hold) requestsdb = IListRequests(mlist) heldmsgs = list(requestsdb.of_type(RequestType.held_message)) if expire and heldmsgs: for request in heldmsgs: key, data = requestsdb.get_request(request.id) if now - data['date'] > expire: handle_request(mlist, request.id, config.DISCARD) discard_count += 1 mlist.Save() return discard_count # Figure out epoch seconds of midnight at the start of today (or the given # 3-tuple date of (year, month, day). def midnight(date=None): if date is None: date = time.localtime()[:3] # -1 for dst flag tells the library to figure it out return time.mktime(date + (0,)*5 + (-1,)) def main(): opts, args, parser = parseargs() initialize(opts.config) for name in config.list_manager.names: # The list must be locked in order to open the requests database mlist = MailList.MailList(name) try: count = IListRequests(mlist).count # While we're at it, let's evict yesterday's autoresponse data midnight_today = midnight() evictions = [] for sender in mlist.hold_and_cmd_autoresponses.keys(): date, respcount = mlist.hold_and_cmd_autoresponses[sender] if midnight(date) < midnight_today: evictions.append(sender) if evictions: for sender in evictions: del mlist.hold_and_cmd_autoresponses[sender] # This is the only place we've changed the list's database mlist.Save() if count: # Set the default language the the list's preferred language. _.default = mlist.preferred_language realname = mlist.real_name discarded = auto_discard(mlist) if discarded: <|fim_middle|> else: text = '' if count: text += Utils.maketext( 'checkdbs.txt', {'count' : count, 'mail_host': mlist.mail_host, 'adminDB' : mlist.GetScriptURL('admindb', absolute=1), 'real_name': realname, }, mlist=mlist) text += '\n' + pending_requests(mlist) subject = _('$count $realname moderator ' 'request(s) waiting') else: subject = _('$realname moderator request check result') msg = UserNotification(mlist.GetOwnerEmail(), mlist.GetBouncesEmail(), subject, text, mlist.preferred_language) msg.send(mlist, **{'tomoderators': True}) finally: mlist.Unlock() if __name__ == '__main__': main() <|fim▁end|>
count = count - discarded text = _('Notice: $discarded old request(s) ' 'automatically expired.\n\n')
<|file_name|>checkdbs.py<|end_file_name|><|fim▁begin|># Copyright (C) 1998-2014 by the Free Software Foundation, Inc. # # This file is part of GNU Mailman. # # GNU Mailman is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # # GNU Mailman is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # GNU Mailman. If not, see <http://www.gnu.org/licenses/>. import sys import time import optparse from email.Charset import Charset from mailman import MailList from mailman import Utils from mailman.app.requests import handle_request from mailman.configuration import config from mailman.core.i18n import _ from mailman.email.message import UserNotification from mailman.initialize import initialize from mailman.interfaces.requests import IListRequests, RequestType from mailman.version import MAILMAN_VERSION # Work around known problems with some RedHat cron daemons import signal signal.signal(signal.SIGCHLD, signal.SIG_DFL) NL = u'\n' now = time.time() def parseargs(): parser = optparse.OptionParser(version=MAILMAN_VERSION, usage=_("""\ %prog [options] Check for pending admin requests and mail the list owners if necessary.""")) parser.add_option('-C', '--config', help=_('Alternative configuration file to use')) opts, args = parser.parse_args() if args: parser.print_help() print(_('Unexpected arguments'), file=sys.stderr) sys.exit(1) return opts, args, parser def pending_requests(mlist): # Must return a byte string lcset = mlist.preferred_language.charset pending = [] first = True requestsdb = IListRequests(mlist) for request in requestsdb.of_type(RequestType.subscription): if first: pending.append(_('Pending subscriptions:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] addr = data['addr'] fullname = data['fullname'] passwd = data['passwd'] digest = data['digest'] lang = data['lang'] if fullname: if isinstance(fullname, unicode): fullname = fullname.encode(lcset, 'replace') fullname = ' (%s)' % fullname pending.append(' %s%s %s' % (addr, fullname, time.ctime(when))) first = True for request in requestsdb.of_type(RequestType.held_message): if first: pending.append(_('\nPending posts:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] sender = data['sender'] subject = data['subject'] reason = data['reason'] text = data['text'] msgdata = data['msgdata'] subject = Utils.oneline(subject, lcset) date = time.ctime(when) reason = _(reason) pending.append(_("""\ From: $sender on $date Subject: $subject Cause: $reason""")) pending.append('') # Coerce all items in pending to a Unicode so we can join them upending = [] charset = mlist.preferred_language.charset for s in pending: if isinstance(s, unicode): upending.append(s) else: upending.append(unicode(s, charset, 'replace')) # Make sure that the text we return from here can be encoded to a byte # string in the charset of the list's language. This could fail if for # example, the request was pended while the list's language was French, # but then it was changed to English before checkdbs ran. text = NL.join(upending) charset = Charset(mlist.preferred_language.charset) incodec = charset.input_codec or 'ascii' outcodec = charset.output_codec or 'ascii' if isinstance(text, unicode): return text.encode(outcodec, 'replace') # Be sure this is a byte string encodeable in the list's charset utext = unicode(text, incodec, 'replace') return utext.encode(outcodec, 'replace') def auto_discard(mlist): # Discard old held messages discard_count = 0 expire = config.days(mlist.max_days_to_hold) requestsdb = IListRequests(mlist) heldmsgs = list(requestsdb.of_type(RequestType.held_message)) if expire and heldmsgs: for request in heldmsgs: key, data = requestsdb.get_request(request.id) if now - data['date'] > expire: handle_request(mlist, request.id, config.DISCARD) discard_count += 1 mlist.Save() return discard_count # Figure out epoch seconds of midnight at the start of today (or the given # 3-tuple date of (year, month, day). def midnight(date=None): if date is None: date = time.localtime()[:3] # -1 for dst flag tells the library to figure it out return time.mktime(date + (0,)*5 + (-1,)) def main(): opts, args, parser = parseargs() initialize(opts.config) for name in config.list_manager.names: # The list must be locked in order to open the requests database mlist = MailList.MailList(name) try: count = IListRequests(mlist).count # While we're at it, let's evict yesterday's autoresponse data midnight_today = midnight() evictions = [] for sender in mlist.hold_and_cmd_autoresponses.keys(): date, respcount = mlist.hold_and_cmd_autoresponses[sender] if midnight(date) < midnight_today: evictions.append(sender) if evictions: for sender in evictions: del mlist.hold_and_cmd_autoresponses[sender] # This is the only place we've changed the list's database mlist.Save() if count: # Set the default language the the list's preferred language. _.default = mlist.preferred_language realname = mlist.real_name discarded = auto_discard(mlist) if discarded: count = count - discarded text = _('Notice: $discarded old request(s) ' 'automatically expired.\n\n') else: <|fim_middle|> if count: text += Utils.maketext( 'checkdbs.txt', {'count' : count, 'mail_host': mlist.mail_host, 'adminDB' : mlist.GetScriptURL('admindb', absolute=1), 'real_name': realname, }, mlist=mlist) text += '\n' + pending_requests(mlist) subject = _('$count $realname moderator ' 'request(s) waiting') else: subject = _('$realname moderator request check result') msg = UserNotification(mlist.GetOwnerEmail(), mlist.GetBouncesEmail(), subject, text, mlist.preferred_language) msg.send(mlist, **{'tomoderators': True}) finally: mlist.Unlock() if __name__ == '__main__': main() <|fim▁end|>
text = ''
<|file_name|>checkdbs.py<|end_file_name|><|fim▁begin|># Copyright (C) 1998-2014 by the Free Software Foundation, Inc. # # This file is part of GNU Mailman. # # GNU Mailman is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # # GNU Mailman is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # GNU Mailman. If not, see <http://www.gnu.org/licenses/>. import sys import time import optparse from email.Charset import Charset from mailman import MailList from mailman import Utils from mailman.app.requests import handle_request from mailman.configuration import config from mailman.core.i18n import _ from mailman.email.message import UserNotification from mailman.initialize import initialize from mailman.interfaces.requests import IListRequests, RequestType from mailman.version import MAILMAN_VERSION # Work around known problems with some RedHat cron daemons import signal signal.signal(signal.SIGCHLD, signal.SIG_DFL) NL = u'\n' now = time.time() def parseargs(): parser = optparse.OptionParser(version=MAILMAN_VERSION, usage=_("""\ %prog [options] Check for pending admin requests and mail the list owners if necessary.""")) parser.add_option('-C', '--config', help=_('Alternative configuration file to use')) opts, args = parser.parse_args() if args: parser.print_help() print(_('Unexpected arguments'), file=sys.stderr) sys.exit(1) return opts, args, parser def pending_requests(mlist): # Must return a byte string lcset = mlist.preferred_language.charset pending = [] first = True requestsdb = IListRequests(mlist) for request in requestsdb.of_type(RequestType.subscription): if first: pending.append(_('Pending subscriptions:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] addr = data['addr'] fullname = data['fullname'] passwd = data['passwd'] digest = data['digest'] lang = data['lang'] if fullname: if isinstance(fullname, unicode): fullname = fullname.encode(lcset, 'replace') fullname = ' (%s)' % fullname pending.append(' %s%s %s' % (addr, fullname, time.ctime(when))) first = True for request in requestsdb.of_type(RequestType.held_message): if first: pending.append(_('\nPending posts:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] sender = data['sender'] subject = data['subject'] reason = data['reason'] text = data['text'] msgdata = data['msgdata'] subject = Utils.oneline(subject, lcset) date = time.ctime(when) reason = _(reason) pending.append(_("""\ From: $sender on $date Subject: $subject Cause: $reason""")) pending.append('') # Coerce all items in pending to a Unicode so we can join them upending = [] charset = mlist.preferred_language.charset for s in pending: if isinstance(s, unicode): upending.append(s) else: upending.append(unicode(s, charset, 'replace')) # Make sure that the text we return from here can be encoded to a byte # string in the charset of the list's language. This could fail if for # example, the request was pended while the list's language was French, # but then it was changed to English before checkdbs ran. text = NL.join(upending) charset = Charset(mlist.preferred_language.charset) incodec = charset.input_codec or 'ascii' outcodec = charset.output_codec or 'ascii' if isinstance(text, unicode): return text.encode(outcodec, 'replace') # Be sure this is a byte string encodeable in the list's charset utext = unicode(text, incodec, 'replace') return utext.encode(outcodec, 'replace') def auto_discard(mlist): # Discard old held messages discard_count = 0 expire = config.days(mlist.max_days_to_hold) requestsdb = IListRequests(mlist) heldmsgs = list(requestsdb.of_type(RequestType.held_message)) if expire and heldmsgs: for request in heldmsgs: key, data = requestsdb.get_request(request.id) if now - data['date'] > expire: handle_request(mlist, request.id, config.DISCARD) discard_count += 1 mlist.Save() return discard_count # Figure out epoch seconds of midnight at the start of today (or the given # 3-tuple date of (year, month, day). def midnight(date=None): if date is None: date = time.localtime()[:3] # -1 for dst flag tells the library to figure it out return time.mktime(date + (0,)*5 + (-1,)) def main(): opts, args, parser = parseargs() initialize(opts.config) for name in config.list_manager.names: # The list must be locked in order to open the requests database mlist = MailList.MailList(name) try: count = IListRequests(mlist).count # While we're at it, let's evict yesterday's autoresponse data midnight_today = midnight() evictions = [] for sender in mlist.hold_and_cmd_autoresponses.keys(): date, respcount = mlist.hold_and_cmd_autoresponses[sender] if midnight(date) < midnight_today: evictions.append(sender) if evictions: for sender in evictions: del mlist.hold_and_cmd_autoresponses[sender] # This is the only place we've changed the list's database mlist.Save() if count: # Set the default language the the list's preferred language. _.default = mlist.preferred_language realname = mlist.real_name discarded = auto_discard(mlist) if discarded: count = count - discarded text = _('Notice: $discarded old request(s) ' 'automatically expired.\n\n') else: text = '' if count: <|fim_middle|> else: subject = _('$realname moderator request check result') msg = UserNotification(mlist.GetOwnerEmail(), mlist.GetBouncesEmail(), subject, text, mlist.preferred_language) msg.send(mlist, **{'tomoderators': True}) finally: mlist.Unlock() if __name__ == '__main__': main() <|fim▁end|>
text += Utils.maketext( 'checkdbs.txt', {'count' : count, 'mail_host': mlist.mail_host, 'adminDB' : mlist.GetScriptURL('admindb', absolute=1), 'real_name': realname, }, mlist=mlist) text += '\n' + pending_requests(mlist) subject = _('$count $realname moderator ' 'request(s) waiting')
<|file_name|>checkdbs.py<|end_file_name|><|fim▁begin|># Copyright (C) 1998-2014 by the Free Software Foundation, Inc. # # This file is part of GNU Mailman. # # GNU Mailman is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # # GNU Mailman is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # GNU Mailman. If not, see <http://www.gnu.org/licenses/>. import sys import time import optparse from email.Charset import Charset from mailman import MailList from mailman import Utils from mailman.app.requests import handle_request from mailman.configuration import config from mailman.core.i18n import _ from mailman.email.message import UserNotification from mailman.initialize import initialize from mailman.interfaces.requests import IListRequests, RequestType from mailman.version import MAILMAN_VERSION # Work around known problems with some RedHat cron daemons import signal signal.signal(signal.SIGCHLD, signal.SIG_DFL) NL = u'\n' now = time.time() def parseargs(): parser = optparse.OptionParser(version=MAILMAN_VERSION, usage=_("""\ %prog [options] Check for pending admin requests and mail the list owners if necessary.""")) parser.add_option('-C', '--config', help=_('Alternative configuration file to use')) opts, args = parser.parse_args() if args: parser.print_help() print(_('Unexpected arguments'), file=sys.stderr) sys.exit(1) return opts, args, parser def pending_requests(mlist): # Must return a byte string lcset = mlist.preferred_language.charset pending = [] first = True requestsdb = IListRequests(mlist) for request in requestsdb.of_type(RequestType.subscription): if first: pending.append(_('Pending subscriptions:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] addr = data['addr'] fullname = data['fullname'] passwd = data['passwd'] digest = data['digest'] lang = data['lang'] if fullname: if isinstance(fullname, unicode): fullname = fullname.encode(lcset, 'replace') fullname = ' (%s)' % fullname pending.append(' %s%s %s' % (addr, fullname, time.ctime(when))) first = True for request in requestsdb.of_type(RequestType.held_message): if first: pending.append(_('\nPending posts:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] sender = data['sender'] subject = data['subject'] reason = data['reason'] text = data['text'] msgdata = data['msgdata'] subject = Utils.oneline(subject, lcset) date = time.ctime(when) reason = _(reason) pending.append(_("""\ From: $sender on $date Subject: $subject Cause: $reason""")) pending.append('') # Coerce all items in pending to a Unicode so we can join them upending = [] charset = mlist.preferred_language.charset for s in pending: if isinstance(s, unicode): upending.append(s) else: upending.append(unicode(s, charset, 'replace')) # Make sure that the text we return from here can be encoded to a byte # string in the charset of the list's language. This could fail if for # example, the request was pended while the list's language was French, # but then it was changed to English before checkdbs ran. text = NL.join(upending) charset = Charset(mlist.preferred_language.charset) incodec = charset.input_codec or 'ascii' outcodec = charset.output_codec or 'ascii' if isinstance(text, unicode): return text.encode(outcodec, 'replace') # Be sure this is a byte string encodeable in the list's charset utext = unicode(text, incodec, 'replace') return utext.encode(outcodec, 'replace') def auto_discard(mlist): # Discard old held messages discard_count = 0 expire = config.days(mlist.max_days_to_hold) requestsdb = IListRequests(mlist) heldmsgs = list(requestsdb.of_type(RequestType.held_message)) if expire and heldmsgs: for request in heldmsgs: key, data = requestsdb.get_request(request.id) if now - data['date'] > expire: handle_request(mlist, request.id, config.DISCARD) discard_count += 1 mlist.Save() return discard_count # Figure out epoch seconds of midnight at the start of today (or the given # 3-tuple date of (year, month, day). def midnight(date=None): if date is None: date = time.localtime()[:3] # -1 for dst flag tells the library to figure it out return time.mktime(date + (0,)*5 + (-1,)) def main(): opts, args, parser = parseargs() initialize(opts.config) for name in config.list_manager.names: # The list must be locked in order to open the requests database mlist = MailList.MailList(name) try: count = IListRequests(mlist).count # While we're at it, let's evict yesterday's autoresponse data midnight_today = midnight() evictions = [] for sender in mlist.hold_and_cmd_autoresponses.keys(): date, respcount = mlist.hold_and_cmd_autoresponses[sender] if midnight(date) < midnight_today: evictions.append(sender) if evictions: for sender in evictions: del mlist.hold_and_cmd_autoresponses[sender] # This is the only place we've changed the list's database mlist.Save() if count: # Set the default language the the list's preferred language. _.default = mlist.preferred_language realname = mlist.real_name discarded = auto_discard(mlist) if discarded: count = count - discarded text = _('Notice: $discarded old request(s) ' 'automatically expired.\n\n') else: text = '' if count: text += Utils.maketext( 'checkdbs.txt', {'count' : count, 'mail_host': mlist.mail_host, 'adminDB' : mlist.GetScriptURL('admindb', absolute=1), 'real_name': realname, }, mlist=mlist) text += '\n' + pending_requests(mlist) subject = _('$count $realname moderator ' 'request(s) waiting') else: <|fim_middle|> msg = UserNotification(mlist.GetOwnerEmail(), mlist.GetBouncesEmail(), subject, text, mlist.preferred_language) msg.send(mlist, **{'tomoderators': True}) finally: mlist.Unlock() if __name__ == '__main__': main() <|fim▁end|>
subject = _('$realname moderator request check result')
<|file_name|>checkdbs.py<|end_file_name|><|fim▁begin|># Copyright (C) 1998-2014 by the Free Software Foundation, Inc. # # This file is part of GNU Mailman. # # GNU Mailman is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # # GNU Mailman is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # GNU Mailman. If not, see <http://www.gnu.org/licenses/>. import sys import time import optparse from email.Charset import Charset from mailman import MailList from mailman import Utils from mailman.app.requests import handle_request from mailman.configuration import config from mailman.core.i18n import _ from mailman.email.message import UserNotification from mailman.initialize import initialize from mailman.interfaces.requests import IListRequests, RequestType from mailman.version import MAILMAN_VERSION # Work around known problems with some RedHat cron daemons import signal signal.signal(signal.SIGCHLD, signal.SIG_DFL) NL = u'\n' now = time.time() def parseargs(): parser = optparse.OptionParser(version=MAILMAN_VERSION, usage=_("""\ %prog [options] Check for pending admin requests and mail the list owners if necessary.""")) parser.add_option('-C', '--config', help=_('Alternative configuration file to use')) opts, args = parser.parse_args() if args: parser.print_help() print(_('Unexpected arguments'), file=sys.stderr) sys.exit(1) return opts, args, parser def pending_requests(mlist): # Must return a byte string lcset = mlist.preferred_language.charset pending = [] first = True requestsdb = IListRequests(mlist) for request in requestsdb.of_type(RequestType.subscription): if first: pending.append(_('Pending subscriptions:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] addr = data['addr'] fullname = data['fullname'] passwd = data['passwd'] digest = data['digest'] lang = data['lang'] if fullname: if isinstance(fullname, unicode): fullname = fullname.encode(lcset, 'replace') fullname = ' (%s)' % fullname pending.append(' %s%s %s' % (addr, fullname, time.ctime(when))) first = True for request in requestsdb.of_type(RequestType.held_message): if first: pending.append(_('\nPending posts:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] sender = data['sender'] subject = data['subject'] reason = data['reason'] text = data['text'] msgdata = data['msgdata'] subject = Utils.oneline(subject, lcset) date = time.ctime(when) reason = _(reason) pending.append(_("""\ From: $sender on $date Subject: $subject Cause: $reason""")) pending.append('') # Coerce all items in pending to a Unicode so we can join them upending = [] charset = mlist.preferred_language.charset for s in pending: if isinstance(s, unicode): upending.append(s) else: upending.append(unicode(s, charset, 'replace')) # Make sure that the text we return from here can be encoded to a byte # string in the charset of the list's language. This could fail if for # example, the request was pended while the list's language was French, # but then it was changed to English before checkdbs ran. text = NL.join(upending) charset = Charset(mlist.preferred_language.charset) incodec = charset.input_codec or 'ascii' outcodec = charset.output_codec or 'ascii' if isinstance(text, unicode): return text.encode(outcodec, 'replace') # Be sure this is a byte string encodeable in the list's charset utext = unicode(text, incodec, 'replace') return utext.encode(outcodec, 'replace') def auto_discard(mlist): # Discard old held messages discard_count = 0 expire = config.days(mlist.max_days_to_hold) requestsdb = IListRequests(mlist) heldmsgs = list(requestsdb.of_type(RequestType.held_message)) if expire and heldmsgs: for request in heldmsgs: key, data = requestsdb.get_request(request.id) if now - data['date'] > expire: handle_request(mlist, request.id, config.DISCARD) discard_count += 1 mlist.Save() return discard_count # Figure out epoch seconds of midnight at the start of today (or the given # 3-tuple date of (year, month, day). def midnight(date=None): if date is None: date = time.localtime()[:3] # -1 for dst flag tells the library to figure it out return time.mktime(date + (0,)*5 + (-1,)) def main(): opts, args, parser = parseargs() initialize(opts.config) for name in config.list_manager.names: # The list must be locked in order to open the requests database mlist = MailList.MailList(name) try: count = IListRequests(mlist).count # While we're at it, let's evict yesterday's autoresponse data midnight_today = midnight() evictions = [] for sender in mlist.hold_and_cmd_autoresponses.keys(): date, respcount = mlist.hold_and_cmd_autoresponses[sender] if midnight(date) < midnight_today: evictions.append(sender) if evictions: for sender in evictions: del mlist.hold_and_cmd_autoresponses[sender] # This is the only place we've changed the list's database mlist.Save() if count: # Set the default language the the list's preferred language. _.default = mlist.preferred_language realname = mlist.real_name discarded = auto_discard(mlist) if discarded: count = count - discarded text = _('Notice: $discarded old request(s) ' 'automatically expired.\n\n') else: text = '' if count: text += Utils.maketext( 'checkdbs.txt', {'count' : count, 'mail_host': mlist.mail_host, 'adminDB' : mlist.GetScriptURL('admindb', absolute=1), 'real_name': realname, }, mlist=mlist) text += '\n' + pending_requests(mlist) subject = _('$count $realname moderator ' 'request(s) waiting') else: subject = _('$realname moderator request check result') msg = UserNotification(mlist.GetOwnerEmail(), mlist.GetBouncesEmail(), subject, text, mlist.preferred_language) msg.send(mlist, **{'tomoderators': True}) finally: mlist.Unlock() if __name__ == '__main__': <|fim_middle|> <|fim▁end|>
main()
<|file_name|>checkdbs.py<|end_file_name|><|fim▁begin|># Copyright (C) 1998-2014 by the Free Software Foundation, Inc. # # This file is part of GNU Mailman. # # GNU Mailman is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # # GNU Mailman is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # GNU Mailman. If not, see <http://www.gnu.org/licenses/>. import sys import time import optparse from email.Charset import Charset from mailman import MailList from mailman import Utils from mailman.app.requests import handle_request from mailman.configuration import config from mailman.core.i18n import _ from mailman.email.message import UserNotification from mailman.initialize import initialize from mailman.interfaces.requests import IListRequests, RequestType from mailman.version import MAILMAN_VERSION # Work around known problems with some RedHat cron daemons import signal signal.signal(signal.SIGCHLD, signal.SIG_DFL) NL = u'\n' now = time.time() def <|fim_middle|>(): parser = optparse.OptionParser(version=MAILMAN_VERSION, usage=_("""\ %prog [options] Check for pending admin requests and mail the list owners if necessary.""")) parser.add_option('-C', '--config', help=_('Alternative configuration file to use')) opts, args = parser.parse_args() if args: parser.print_help() print(_('Unexpected arguments'), file=sys.stderr) sys.exit(1) return opts, args, parser def pending_requests(mlist): # Must return a byte string lcset = mlist.preferred_language.charset pending = [] first = True requestsdb = IListRequests(mlist) for request in requestsdb.of_type(RequestType.subscription): if first: pending.append(_('Pending subscriptions:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] addr = data['addr'] fullname = data['fullname'] passwd = data['passwd'] digest = data['digest'] lang = data['lang'] if fullname: if isinstance(fullname, unicode): fullname = fullname.encode(lcset, 'replace') fullname = ' (%s)' % fullname pending.append(' %s%s %s' % (addr, fullname, time.ctime(when))) first = True for request in requestsdb.of_type(RequestType.held_message): if first: pending.append(_('\nPending posts:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] sender = data['sender'] subject = data['subject'] reason = data['reason'] text = data['text'] msgdata = data['msgdata'] subject = Utils.oneline(subject, lcset) date = time.ctime(when) reason = _(reason) pending.append(_("""\ From: $sender on $date Subject: $subject Cause: $reason""")) pending.append('') # Coerce all items in pending to a Unicode so we can join them upending = [] charset = mlist.preferred_language.charset for s in pending: if isinstance(s, unicode): upending.append(s) else: upending.append(unicode(s, charset, 'replace')) # Make sure that the text we return from here can be encoded to a byte # string in the charset of the list's language. This could fail if for # example, the request was pended while the list's language was French, # but then it was changed to English before checkdbs ran. text = NL.join(upending) charset = Charset(mlist.preferred_language.charset) incodec = charset.input_codec or 'ascii' outcodec = charset.output_codec or 'ascii' if isinstance(text, unicode): return text.encode(outcodec, 'replace') # Be sure this is a byte string encodeable in the list's charset utext = unicode(text, incodec, 'replace') return utext.encode(outcodec, 'replace') def auto_discard(mlist): # Discard old held messages discard_count = 0 expire = config.days(mlist.max_days_to_hold) requestsdb = IListRequests(mlist) heldmsgs = list(requestsdb.of_type(RequestType.held_message)) if expire and heldmsgs: for request in heldmsgs: key, data = requestsdb.get_request(request.id) if now - data['date'] > expire: handle_request(mlist, request.id, config.DISCARD) discard_count += 1 mlist.Save() return discard_count # Figure out epoch seconds of midnight at the start of today (or the given # 3-tuple date of (year, month, day). def midnight(date=None): if date is None: date = time.localtime()[:3] # -1 for dst flag tells the library to figure it out return time.mktime(date + (0,)*5 + (-1,)) def main(): opts, args, parser = parseargs() initialize(opts.config) for name in config.list_manager.names: # The list must be locked in order to open the requests database mlist = MailList.MailList(name) try: count = IListRequests(mlist).count # While we're at it, let's evict yesterday's autoresponse data midnight_today = midnight() evictions = [] for sender in mlist.hold_and_cmd_autoresponses.keys(): date, respcount = mlist.hold_and_cmd_autoresponses[sender] if midnight(date) < midnight_today: evictions.append(sender) if evictions: for sender in evictions: del mlist.hold_and_cmd_autoresponses[sender] # This is the only place we've changed the list's database mlist.Save() if count: # Set the default language the the list's preferred language. _.default = mlist.preferred_language realname = mlist.real_name discarded = auto_discard(mlist) if discarded: count = count - discarded text = _('Notice: $discarded old request(s) ' 'automatically expired.\n\n') else: text = '' if count: text += Utils.maketext( 'checkdbs.txt', {'count' : count, 'mail_host': mlist.mail_host, 'adminDB' : mlist.GetScriptURL('admindb', absolute=1), 'real_name': realname, }, mlist=mlist) text += '\n' + pending_requests(mlist) subject = _('$count $realname moderator ' 'request(s) waiting') else: subject = _('$realname moderator request check result') msg = UserNotification(mlist.GetOwnerEmail(), mlist.GetBouncesEmail(), subject, text, mlist.preferred_language) msg.send(mlist, **{'tomoderators': True}) finally: mlist.Unlock() if __name__ == '__main__': main() <|fim▁end|>
parseargs
<|file_name|>checkdbs.py<|end_file_name|><|fim▁begin|># Copyright (C) 1998-2014 by the Free Software Foundation, Inc. # # This file is part of GNU Mailman. # # GNU Mailman is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # # GNU Mailman is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # GNU Mailman. If not, see <http://www.gnu.org/licenses/>. import sys import time import optparse from email.Charset import Charset from mailman import MailList from mailman import Utils from mailman.app.requests import handle_request from mailman.configuration import config from mailman.core.i18n import _ from mailman.email.message import UserNotification from mailman.initialize import initialize from mailman.interfaces.requests import IListRequests, RequestType from mailman.version import MAILMAN_VERSION # Work around known problems with some RedHat cron daemons import signal signal.signal(signal.SIGCHLD, signal.SIG_DFL) NL = u'\n' now = time.time() def parseargs(): parser = optparse.OptionParser(version=MAILMAN_VERSION, usage=_("""\ %prog [options] Check for pending admin requests and mail the list owners if necessary.""")) parser.add_option('-C', '--config', help=_('Alternative configuration file to use')) opts, args = parser.parse_args() if args: parser.print_help() print(_('Unexpected arguments'), file=sys.stderr) sys.exit(1) return opts, args, parser def <|fim_middle|>(mlist): # Must return a byte string lcset = mlist.preferred_language.charset pending = [] first = True requestsdb = IListRequests(mlist) for request in requestsdb.of_type(RequestType.subscription): if first: pending.append(_('Pending subscriptions:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] addr = data['addr'] fullname = data['fullname'] passwd = data['passwd'] digest = data['digest'] lang = data['lang'] if fullname: if isinstance(fullname, unicode): fullname = fullname.encode(lcset, 'replace') fullname = ' (%s)' % fullname pending.append(' %s%s %s' % (addr, fullname, time.ctime(when))) first = True for request in requestsdb.of_type(RequestType.held_message): if first: pending.append(_('\nPending posts:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] sender = data['sender'] subject = data['subject'] reason = data['reason'] text = data['text'] msgdata = data['msgdata'] subject = Utils.oneline(subject, lcset) date = time.ctime(when) reason = _(reason) pending.append(_("""\ From: $sender on $date Subject: $subject Cause: $reason""")) pending.append('') # Coerce all items in pending to a Unicode so we can join them upending = [] charset = mlist.preferred_language.charset for s in pending: if isinstance(s, unicode): upending.append(s) else: upending.append(unicode(s, charset, 'replace')) # Make sure that the text we return from here can be encoded to a byte # string in the charset of the list's language. This could fail if for # example, the request was pended while the list's language was French, # but then it was changed to English before checkdbs ran. text = NL.join(upending) charset = Charset(mlist.preferred_language.charset) incodec = charset.input_codec or 'ascii' outcodec = charset.output_codec or 'ascii' if isinstance(text, unicode): return text.encode(outcodec, 'replace') # Be sure this is a byte string encodeable in the list's charset utext = unicode(text, incodec, 'replace') return utext.encode(outcodec, 'replace') def auto_discard(mlist): # Discard old held messages discard_count = 0 expire = config.days(mlist.max_days_to_hold) requestsdb = IListRequests(mlist) heldmsgs = list(requestsdb.of_type(RequestType.held_message)) if expire and heldmsgs: for request in heldmsgs: key, data = requestsdb.get_request(request.id) if now - data['date'] > expire: handle_request(mlist, request.id, config.DISCARD) discard_count += 1 mlist.Save() return discard_count # Figure out epoch seconds of midnight at the start of today (or the given # 3-tuple date of (year, month, day). def midnight(date=None): if date is None: date = time.localtime()[:3] # -1 for dst flag tells the library to figure it out return time.mktime(date + (0,)*5 + (-1,)) def main(): opts, args, parser = parseargs() initialize(opts.config) for name in config.list_manager.names: # The list must be locked in order to open the requests database mlist = MailList.MailList(name) try: count = IListRequests(mlist).count # While we're at it, let's evict yesterday's autoresponse data midnight_today = midnight() evictions = [] for sender in mlist.hold_and_cmd_autoresponses.keys(): date, respcount = mlist.hold_and_cmd_autoresponses[sender] if midnight(date) < midnight_today: evictions.append(sender) if evictions: for sender in evictions: del mlist.hold_and_cmd_autoresponses[sender] # This is the only place we've changed the list's database mlist.Save() if count: # Set the default language the the list's preferred language. _.default = mlist.preferred_language realname = mlist.real_name discarded = auto_discard(mlist) if discarded: count = count - discarded text = _('Notice: $discarded old request(s) ' 'automatically expired.\n\n') else: text = '' if count: text += Utils.maketext( 'checkdbs.txt', {'count' : count, 'mail_host': mlist.mail_host, 'adminDB' : mlist.GetScriptURL('admindb', absolute=1), 'real_name': realname, }, mlist=mlist) text += '\n' + pending_requests(mlist) subject = _('$count $realname moderator ' 'request(s) waiting') else: subject = _('$realname moderator request check result') msg = UserNotification(mlist.GetOwnerEmail(), mlist.GetBouncesEmail(), subject, text, mlist.preferred_language) msg.send(mlist, **{'tomoderators': True}) finally: mlist.Unlock() if __name__ == '__main__': main() <|fim▁end|>
pending_requests
<|file_name|>checkdbs.py<|end_file_name|><|fim▁begin|># Copyright (C) 1998-2014 by the Free Software Foundation, Inc. # # This file is part of GNU Mailman. # # GNU Mailman is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # # GNU Mailman is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # GNU Mailman. If not, see <http://www.gnu.org/licenses/>. import sys import time import optparse from email.Charset import Charset from mailman import MailList from mailman import Utils from mailman.app.requests import handle_request from mailman.configuration import config from mailman.core.i18n import _ from mailman.email.message import UserNotification from mailman.initialize import initialize from mailman.interfaces.requests import IListRequests, RequestType from mailman.version import MAILMAN_VERSION # Work around known problems with some RedHat cron daemons import signal signal.signal(signal.SIGCHLD, signal.SIG_DFL) NL = u'\n' now = time.time() def parseargs(): parser = optparse.OptionParser(version=MAILMAN_VERSION, usage=_("""\ %prog [options] Check for pending admin requests and mail the list owners if necessary.""")) parser.add_option('-C', '--config', help=_('Alternative configuration file to use')) opts, args = parser.parse_args() if args: parser.print_help() print(_('Unexpected arguments'), file=sys.stderr) sys.exit(1) return opts, args, parser def pending_requests(mlist): # Must return a byte string lcset = mlist.preferred_language.charset pending = [] first = True requestsdb = IListRequests(mlist) for request in requestsdb.of_type(RequestType.subscription): if first: pending.append(_('Pending subscriptions:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] addr = data['addr'] fullname = data['fullname'] passwd = data['passwd'] digest = data['digest'] lang = data['lang'] if fullname: if isinstance(fullname, unicode): fullname = fullname.encode(lcset, 'replace') fullname = ' (%s)' % fullname pending.append(' %s%s %s' % (addr, fullname, time.ctime(when))) first = True for request in requestsdb.of_type(RequestType.held_message): if first: pending.append(_('\nPending posts:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] sender = data['sender'] subject = data['subject'] reason = data['reason'] text = data['text'] msgdata = data['msgdata'] subject = Utils.oneline(subject, lcset) date = time.ctime(when) reason = _(reason) pending.append(_("""\ From: $sender on $date Subject: $subject Cause: $reason""")) pending.append('') # Coerce all items in pending to a Unicode so we can join them upending = [] charset = mlist.preferred_language.charset for s in pending: if isinstance(s, unicode): upending.append(s) else: upending.append(unicode(s, charset, 'replace')) # Make sure that the text we return from here can be encoded to a byte # string in the charset of the list's language. This could fail if for # example, the request was pended while the list's language was French, # but then it was changed to English before checkdbs ran. text = NL.join(upending) charset = Charset(mlist.preferred_language.charset) incodec = charset.input_codec or 'ascii' outcodec = charset.output_codec or 'ascii' if isinstance(text, unicode): return text.encode(outcodec, 'replace') # Be sure this is a byte string encodeable in the list's charset utext = unicode(text, incodec, 'replace') return utext.encode(outcodec, 'replace') def <|fim_middle|>(mlist): # Discard old held messages discard_count = 0 expire = config.days(mlist.max_days_to_hold) requestsdb = IListRequests(mlist) heldmsgs = list(requestsdb.of_type(RequestType.held_message)) if expire and heldmsgs: for request in heldmsgs: key, data = requestsdb.get_request(request.id) if now - data['date'] > expire: handle_request(mlist, request.id, config.DISCARD) discard_count += 1 mlist.Save() return discard_count # Figure out epoch seconds of midnight at the start of today (or the given # 3-tuple date of (year, month, day). def midnight(date=None): if date is None: date = time.localtime()[:3] # -1 for dst flag tells the library to figure it out return time.mktime(date + (0,)*5 + (-1,)) def main(): opts, args, parser = parseargs() initialize(opts.config) for name in config.list_manager.names: # The list must be locked in order to open the requests database mlist = MailList.MailList(name) try: count = IListRequests(mlist).count # While we're at it, let's evict yesterday's autoresponse data midnight_today = midnight() evictions = [] for sender in mlist.hold_and_cmd_autoresponses.keys(): date, respcount = mlist.hold_and_cmd_autoresponses[sender] if midnight(date) < midnight_today: evictions.append(sender) if evictions: for sender in evictions: del mlist.hold_and_cmd_autoresponses[sender] # This is the only place we've changed the list's database mlist.Save() if count: # Set the default language the the list's preferred language. _.default = mlist.preferred_language realname = mlist.real_name discarded = auto_discard(mlist) if discarded: count = count - discarded text = _('Notice: $discarded old request(s) ' 'automatically expired.\n\n') else: text = '' if count: text += Utils.maketext( 'checkdbs.txt', {'count' : count, 'mail_host': mlist.mail_host, 'adminDB' : mlist.GetScriptURL('admindb', absolute=1), 'real_name': realname, }, mlist=mlist) text += '\n' + pending_requests(mlist) subject = _('$count $realname moderator ' 'request(s) waiting') else: subject = _('$realname moderator request check result') msg = UserNotification(mlist.GetOwnerEmail(), mlist.GetBouncesEmail(), subject, text, mlist.preferred_language) msg.send(mlist, **{'tomoderators': True}) finally: mlist.Unlock() if __name__ == '__main__': main() <|fim▁end|>
auto_discard
<|file_name|>checkdbs.py<|end_file_name|><|fim▁begin|># Copyright (C) 1998-2014 by the Free Software Foundation, Inc. # # This file is part of GNU Mailman. # # GNU Mailman is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # # GNU Mailman is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # GNU Mailman. If not, see <http://www.gnu.org/licenses/>. import sys import time import optparse from email.Charset import Charset from mailman import MailList from mailman import Utils from mailman.app.requests import handle_request from mailman.configuration import config from mailman.core.i18n import _ from mailman.email.message import UserNotification from mailman.initialize import initialize from mailman.interfaces.requests import IListRequests, RequestType from mailman.version import MAILMAN_VERSION # Work around known problems with some RedHat cron daemons import signal signal.signal(signal.SIGCHLD, signal.SIG_DFL) NL = u'\n' now = time.time() def parseargs(): parser = optparse.OptionParser(version=MAILMAN_VERSION, usage=_("""\ %prog [options] Check for pending admin requests and mail the list owners if necessary.""")) parser.add_option('-C', '--config', help=_('Alternative configuration file to use')) opts, args = parser.parse_args() if args: parser.print_help() print(_('Unexpected arguments'), file=sys.stderr) sys.exit(1) return opts, args, parser def pending_requests(mlist): # Must return a byte string lcset = mlist.preferred_language.charset pending = [] first = True requestsdb = IListRequests(mlist) for request in requestsdb.of_type(RequestType.subscription): if first: pending.append(_('Pending subscriptions:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] addr = data['addr'] fullname = data['fullname'] passwd = data['passwd'] digest = data['digest'] lang = data['lang'] if fullname: if isinstance(fullname, unicode): fullname = fullname.encode(lcset, 'replace') fullname = ' (%s)' % fullname pending.append(' %s%s %s' % (addr, fullname, time.ctime(when))) first = True for request in requestsdb.of_type(RequestType.held_message): if first: pending.append(_('\nPending posts:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] sender = data['sender'] subject = data['subject'] reason = data['reason'] text = data['text'] msgdata = data['msgdata'] subject = Utils.oneline(subject, lcset) date = time.ctime(when) reason = _(reason) pending.append(_("""\ From: $sender on $date Subject: $subject Cause: $reason""")) pending.append('') # Coerce all items in pending to a Unicode so we can join them upending = [] charset = mlist.preferred_language.charset for s in pending: if isinstance(s, unicode): upending.append(s) else: upending.append(unicode(s, charset, 'replace')) # Make sure that the text we return from here can be encoded to a byte # string in the charset of the list's language. This could fail if for # example, the request was pended while the list's language was French, # but then it was changed to English before checkdbs ran. text = NL.join(upending) charset = Charset(mlist.preferred_language.charset) incodec = charset.input_codec or 'ascii' outcodec = charset.output_codec or 'ascii' if isinstance(text, unicode): return text.encode(outcodec, 'replace') # Be sure this is a byte string encodeable in the list's charset utext = unicode(text, incodec, 'replace') return utext.encode(outcodec, 'replace') def auto_discard(mlist): # Discard old held messages discard_count = 0 expire = config.days(mlist.max_days_to_hold) requestsdb = IListRequests(mlist) heldmsgs = list(requestsdb.of_type(RequestType.held_message)) if expire and heldmsgs: for request in heldmsgs: key, data = requestsdb.get_request(request.id) if now - data['date'] > expire: handle_request(mlist, request.id, config.DISCARD) discard_count += 1 mlist.Save() return discard_count # Figure out epoch seconds of midnight at the start of today (or the given # 3-tuple date of (year, month, day). def <|fim_middle|>(date=None): if date is None: date = time.localtime()[:3] # -1 for dst flag tells the library to figure it out return time.mktime(date + (0,)*5 + (-1,)) def main(): opts, args, parser = parseargs() initialize(opts.config) for name in config.list_manager.names: # The list must be locked in order to open the requests database mlist = MailList.MailList(name) try: count = IListRequests(mlist).count # While we're at it, let's evict yesterday's autoresponse data midnight_today = midnight() evictions = [] for sender in mlist.hold_and_cmd_autoresponses.keys(): date, respcount = mlist.hold_and_cmd_autoresponses[sender] if midnight(date) < midnight_today: evictions.append(sender) if evictions: for sender in evictions: del mlist.hold_and_cmd_autoresponses[sender] # This is the only place we've changed the list's database mlist.Save() if count: # Set the default language the the list's preferred language. _.default = mlist.preferred_language realname = mlist.real_name discarded = auto_discard(mlist) if discarded: count = count - discarded text = _('Notice: $discarded old request(s) ' 'automatically expired.\n\n') else: text = '' if count: text += Utils.maketext( 'checkdbs.txt', {'count' : count, 'mail_host': mlist.mail_host, 'adminDB' : mlist.GetScriptURL('admindb', absolute=1), 'real_name': realname, }, mlist=mlist) text += '\n' + pending_requests(mlist) subject = _('$count $realname moderator ' 'request(s) waiting') else: subject = _('$realname moderator request check result') msg = UserNotification(mlist.GetOwnerEmail(), mlist.GetBouncesEmail(), subject, text, mlist.preferred_language) msg.send(mlist, **{'tomoderators': True}) finally: mlist.Unlock() if __name__ == '__main__': main() <|fim▁end|>
midnight
<|file_name|>checkdbs.py<|end_file_name|><|fim▁begin|># Copyright (C) 1998-2014 by the Free Software Foundation, Inc. # # This file is part of GNU Mailman. # # GNU Mailman is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # # GNU Mailman is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # GNU Mailman. If not, see <http://www.gnu.org/licenses/>. import sys import time import optparse from email.Charset import Charset from mailman import MailList from mailman import Utils from mailman.app.requests import handle_request from mailman.configuration import config from mailman.core.i18n import _ from mailman.email.message import UserNotification from mailman.initialize import initialize from mailman.interfaces.requests import IListRequests, RequestType from mailman.version import MAILMAN_VERSION # Work around known problems with some RedHat cron daemons import signal signal.signal(signal.SIGCHLD, signal.SIG_DFL) NL = u'\n' now = time.time() def parseargs(): parser = optparse.OptionParser(version=MAILMAN_VERSION, usage=_("""\ %prog [options] Check for pending admin requests and mail the list owners if necessary.""")) parser.add_option('-C', '--config', help=_('Alternative configuration file to use')) opts, args = parser.parse_args() if args: parser.print_help() print(_('Unexpected arguments'), file=sys.stderr) sys.exit(1) return opts, args, parser def pending_requests(mlist): # Must return a byte string lcset = mlist.preferred_language.charset pending = [] first = True requestsdb = IListRequests(mlist) for request in requestsdb.of_type(RequestType.subscription): if first: pending.append(_('Pending subscriptions:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] addr = data['addr'] fullname = data['fullname'] passwd = data['passwd'] digest = data['digest'] lang = data['lang'] if fullname: if isinstance(fullname, unicode): fullname = fullname.encode(lcset, 'replace') fullname = ' (%s)' % fullname pending.append(' %s%s %s' % (addr, fullname, time.ctime(when))) first = True for request in requestsdb.of_type(RequestType.held_message): if first: pending.append(_('\nPending posts:')) first = False key, data = requestsdb.get_request(request.id) when = data['when'] sender = data['sender'] subject = data['subject'] reason = data['reason'] text = data['text'] msgdata = data['msgdata'] subject = Utils.oneline(subject, lcset) date = time.ctime(when) reason = _(reason) pending.append(_("""\ From: $sender on $date Subject: $subject Cause: $reason""")) pending.append('') # Coerce all items in pending to a Unicode so we can join them upending = [] charset = mlist.preferred_language.charset for s in pending: if isinstance(s, unicode): upending.append(s) else: upending.append(unicode(s, charset, 'replace')) # Make sure that the text we return from here can be encoded to a byte # string in the charset of the list's language. This could fail if for # example, the request was pended while the list's language was French, # but then it was changed to English before checkdbs ran. text = NL.join(upending) charset = Charset(mlist.preferred_language.charset) incodec = charset.input_codec or 'ascii' outcodec = charset.output_codec or 'ascii' if isinstance(text, unicode): return text.encode(outcodec, 'replace') # Be sure this is a byte string encodeable in the list's charset utext = unicode(text, incodec, 'replace') return utext.encode(outcodec, 'replace') def auto_discard(mlist): # Discard old held messages discard_count = 0 expire = config.days(mlist.max_days_to_hold) requestsdb = IListRequests(mlist) heldmsgs = list(requestsdb.of_type(RequestType.held_message)) if expire and heldmsgs: for request in heldmsgs: key, data = requestsdb.get_request(request.id) if now - data['date'] > expire: handle_request(mlist, request.id, config.DISCARD) discard_count += 1 mlist.Save() return discard_count # Figure out epoch seconds of midnight at the start of today (or the given # 3-tuple date of (year, month, day). def midnight(date=None): if date is None: date = time.localtime()[:3] # -1 for dst flag tells the library to figure it out return time.mktime(date + (0,)*5 + (-1,)) def <|fim_middle|>(): opts, args, parser = parseargs() initialize(opts.config) for name in config.list_manager.names: # The list must be locked in order to open the requests database mlist = MailList.MailList(name) try: count = IListRequests(mlist).count # While we're at it, let's evict yesterday's autoresponse data midnight_today = midnight() evictions = [] for sender in mlist.hold_and_cmd_autoresponses.keys(): date, respcount = mlist.hold_and_cmd_autoresponses[sender] if midnight(date) < midnight_today: evictions.append(sender) if evictions: for sender in evictions: del mlist.hold_and_cmd_autoresponses[sender] # This is the only place we've changed the list's database mlist.Save() if count: # Set the default language the the list's preferred language. _.default = mlist.preferred_language realname = mlist.real_name discarded = auto_discard(mlist) if discarded: count = count - discarded text = _('Notice: $discarded old request(s) ' 'automatically expired.\n\n') else: text = '' if count: text += Utils.maketext( 'checkdbs.txt', {'count' : count, 'mail_host': mlist.mail_host, 'adminDB' : mlist.GetScriptURL('admindb', absolute=1), 'real_name': realname, }, mlist=mlist) text += '\n' + pending_requests(mlist) subject = _('$count $realname moderator ' 'request(s) waiting') else: subject = _('$realname moderator request check result') msg = UserNotification(mlist.GetOwnerEmail(), mlist.GetBouncesEmail(), subject, text, mlist.preferred_language) msg.send(mlist, **{'tomoderators': True}) finally: mlist.Unlock() if __name__ == '__main__': main() <|fim▁end|>
main
<|file_name|>makerealworddoc.py<|end_file_name|><|fim▁begin|>#coding=gbk """Convert HTML page to Word 97 document This script is used during the build process of "Dive Into Python" (http://diveintopython.org/) to create the downloadable Word 97 version of the book (http://diveintopython.org/diveintopython.doc) Looks for 2 arguments on the command line. The first argument is the input (HTML) file; the second argument is the output (.doc) file. Only runs on Windows. Requires Microsoft Word 2000. Safe to run on the same file(s) more than once. The output file will be silently overwritten if it already exists. The script has been modified by xiaq ([email protected]) to fit Simplified Chinese version of Microsoft Word. """ __author__ = "Mark Pilgrim ([email protected])" __version__ = "$Revision: 1.2 $" __date__ = "$Date: 2004/05/05 21:57:19 $" __copyright__ = "Copyright (c) 2001 Mark Pilgrim" __license__ = "Python" import sys, os from win32com.client import gencache, constants def makeRealWordDoc(infile, outfile): word = gencache.EnsureDispatch("Word.Application") try: worddoc = word.Documents.Open(FileName=infile) try: worddoc.TablesOfContents.Add(Range=word.ActiveWindow.Selection.Range, \ RightAlignPageNumbers=1, \ UseHeadingStyles=1, \ UpperHeadingLevel=1, \ LowerHeadingLevel=2, \ IncludePageNumbers=1, \ AddedStyles='', \ UseHyperlinks=1, \ HidePageNumbersInWeb=1) worddoc.TablesOfContents(1).TabLeader = constants.wdTabLeaderDots worddoc.TablesOfContents.Format = constants.wdIndexIndent word.ActiveWindow.ActivePane.View.SeekView = constants.wdSeekCurrentPageHeader word.Selection.TypeText(Text="Dive Into Python\t\thttp://diveintopython.org/") word.ActiveWindow.ActivePane.View.SeekView = constants.wdSeekCurrentPageFooter word.NormalTemplate.AutoTextEntries("- Ò³Âë -").Insert(Where=word.ActiveWindow.Selection.Range) word.ActiveWindow.View.Type = constants.wdPrintView worddoc.TablesOfContents(1).Update() worddoc.SaveAs(FileName=outfile, \ FileFormat=constants.wdFormatDocument) finally: worddoc.Close(0)<|fim▁hole|> del worddoc finally: word.Quit() del word if __name__ == "__main__": infile = os.path.normpath(os.path.join(os.getcwd(), sys.argv[1])) outfile = os.path.normpath(os.path.join(os.getcwd(), sys.argv[2])) makeRealWordDoc(infile, outfile)<|fim▁end|>
<|file_name|>makerealworddoc.py<|end_file_name|><|fim▁begin|>#coding=gbk """Convert HTML page to Word 97 document This script is used during the build process of "Dive Into Python" (http://diveintopython.org/) to create the downloadable Word 97 version of the book (http://diveintopython.org/diveintopython.doc) Looks for 2 arguments on the command line. The first argument is the input (HTML) file; the second argument is the output (.doc) file. Only runs on Windows. Requires Microsoft Word 2000. Safe to run on the same file(s) more than once. The output file will be silently overwritten if it already exists. The script has been modified by xiaq ([email protected]) to fit Simplified Chinese version of Microsoft Word. """ __author__ = "Mark Pilgrim ([email protected])" __version__ = "$Revision: 1.2 $" __date__ = "$Date: 2004/05/05 21:57:19 $" __copyright__ = "Copyright (c) 2001 Mark Pilgrim" __license__ = "Python" import sys, os from win32com.client import gencache, constants def makeRealWordDoc(infile, outfile): <|fim_middle|> __name__ == "__main__": infile = os.path.normpath(os.path.join(os.getcwd(), sys.argv[1])) outfile = os.path.normpath(os.path.join(os.getcwd(), sys.argv[2])) makeRealWordDoc(infile, outfile) <|fim▁end|>
word = gencache.EnsureDispatch("Word.Application") try: worddoc = word.Documents.Open(FileName=infile) try: worddoc.TablesOfContents.Add(Range=word.ActiveWindow.Selection.Range, \ RightAlignPageNumbers=1, \ UseHeadingStyles=1, \ UpperHeadingLevel=1, \ LowerHeadingLevel=2, \ IncludePageNumbers=1, \ AddedStyles='', \ UseHyperlinks=1, \ HidePageNumbersInWeb=1) worddoc.TablesOfContents(1).TabLeader = constants.wdTabLeaderDots worddoc.TablesOfContents.Format = constants.wdIndexIndent word.ActiveWindow.ActivePane.View.SeekView = constants.wdSeekCurrentPageHeader word.Selection.TypeText(Text="Dive Into Python\t\thttp://diveintopython.org/") word.ActiveWindow.ActivePane.View.SeekView = constants.wdSeekCurrentPageFooter word.NormalTemplate.AutoTextEntries("- Ò³Âë -").Insert(Where=word.ActiveWindow.Selection.Range) word.ActiveWindow.View.Type = constants.wdPrintView worddoc.TablesOfContents(1).Update() worddoc.SaveAs(FileName=outfile, \ FileFormat=constants.wdFormatDocument) finally: worddoc.Close(0) del worddoc finally: word.Quit() del word if
<|file_name|>makerealworddoc.py<|end_file_name|><|fim▁begin|>#coding=gbk """Convert HTML page to Word 97 document This script is used during the build process of "Dive Into Python" (http://diveintopython.org/) to create the downloadable Word 97 version of the book (http://diveintopython.org/diveintopython.doc) Looks for 2 arguments on the command line. The first argument is the input (HTML) file; the second argument is the output (.doc) file. Only runs on Windows. Requires Microsoft Word 2000. Safe to run on the same file(s) more than once. The output file will be silently overwritten if it already exists. The script has been modified by xiaq ([email protected]) to fit Simplified Chinese version of Microsoft Word. """ __author__ = "Mark Pilgrim ([email protected])" __version__ = "$Revision: 1.2 $" __date__ = "$Date: 2004/05/05 21:57:19 $" __copyright__ = "Copyright (c) 2001 Mark Pilgrim" __license__ = "Python" import sys, os from win32com.client import gencache, constants def makeRealWordDoc(infile, outfile): word = gencache.EnsureDispatch("Word.Application") try: worddoc = word.Documents.Open(FileName=infile) try: worddoc.TablesOfContents.Add(Range=word.ActiveWindow.Selection.Range, \ RightAlignPageNumbers=1, \ UseHeadingStyles=1, \ UpperHeadingLevel=1, \ LowerHeadingLevel=2, \ IncludePageNumbers=1, \ AddedStyles='', \ UseHyperlinks=1, \ HidePageNumbersInWeb=1) worddoc.TablesOfContents(1).TabLeader = constants.wdTabLeaderDots worddoc.TablesOfContents.Format = constants.wdIndexIndent word.ActiveWindow.ActivePane.View.SeekView = constants.wdSeekCurrentPageHeader word.Selection.TypeText(Text="Dive Into Python\t\thttp://diveintopython.org/") word.ActiveWindow.ActivePane.View.SeekView = constants.wdSeekCurrentPageFooter word.NormalTemplate.AutoTextEntries("- Ò³Âë -").Insert(Where=word.ActiveWindow.Selection.Range) word.ActiveWindow.View.Type = constants.wdPrintView worddoc.TablesOfContents(1).Update() worddoc.SaveAs(FileName=outfile, \ FileFormat=constants.wdFormatDocument) finally: worddoc.Close(0) del worddoc finally: word.Quit() del word if __name__ == "__main__": infi <|fim_middle|> <|fim▁end|>
le = os.path.normpath(os.path.join(os.getcwd(), sys.argv[1])) outfile = os.path.normpath(os.path.join(os.getcwd(), sys.argv[2])) makeRealWordDoc(infile, outfile)
<|file_name|>makerealworddoc.py<|end_file_name|><|fim▁begin|>#coding=gbk """Convert HTML page to Word 97 document This script is used during the build process of "Dive Into Python" (http://diveintopython.org/) to create the downloadable Word 97 version of the book (http://diveintopython.org/diveintopython.doc) Looks for 2 arguments on the command line. The first argument is the input (HTML) file; the second argument is the output (.doc) file. Only runs on Windows. Requires Microsoft Word 2000. Safe to run on the same file(s) more than once. The output file will be silently overwritten if it already exists. The script has been modified by xiaq ([email protected]) to fit Simplified Chinese version of Microsoft Word. """ __author__ = "Mark Pilgrim ([email protected])" __version__ = "$Revision: 1.2 $" __date__ = "$Date: 2004/05/05 21:57:19 $" __copyright__ = "Copyright (c) 2001 Mark Pilgrim" __license__ = "Python" import sys, os from win32com.client import gencache, constants def <|fim_middle|>(infile, outfile): word = gencache.EnsureDispatch("Word.Application") try: worddoc = word.Documents.Open(FileName=infile) try: worddoc.TablesOfContents.Add(Range=word.ActiveWindow.Selection.Range, \ RightAlignPageNumbers=1, \ UseHeadingStyles=1, \ UpperHeadingLevel=1, \ LowerHeadingLevel=2, \ IncludePageNumbers=1, \ AddedStyles='', \ UseHyperlinks=1, \ HidePageNumbersInWeb=1) worddoc.TablesOfContents(1).TabLeader = constants.wdTabLeaderDots worddoc.TablesOfContents.Format = constants.wdIndexIndent word.ActiveWindow.ActivePane.View.SeekView = constants.wdSeekCurrentPageHeader word.Selection.TypeText(Text="Dive Into Python\t\thttp://diveintopython.org/") word.ActiveWindow.ActivePane.View.SeekView = constants.wdSeekCurrentPageFooter word.NormalTemplate.AutoTextEntries("- Ò³Âë -").Insert(Where=word.ActiveWindow.Selection.Range) word.ActiveWindow.View.Type = constants.wdPrintView worddoc.TablesOfContents(1).Update() worddoc.SaveAs(FileName=outfile, \ FileFormat=constants.wdFormatDocument) finally: worddoc.Close(0) del worddoc finally: word.Quit() del word if __name__ == "__main__": infile = os.path.normpath(os.path.join(os.getcwd(), sys.argv[1])) outfile = os.path.normpath(os.path.join(os.getcwd(), sys.argv[2])) makeRealWordDoc(infile, outfile) <|fim▁end|>
makeRealWordDoc
<|file_name|>newenglandfilm.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # -*- coding: utf-8 -*- <|fim▁hole|> from my_settings import name_file, test_mode, difference_days from datetime import datetime, timedelta print "Run spider NewenglandFilm" file_output = open(name_file, 'a') email_current_session = [] email_in_file = open(name_file, 'r').readlines() if test_mode: current_date = (datetime.today() - timedelta(days=difference_days)).strftime('%m/%d/%Y') else: current_date = datetime.today().strftime('%m/%d/%Y') class NewenglandFilm(Spider): name = 'newenglandfilm' allowed_domains = ["newenglandfilm.com"] start_urls = ["http://newenglandfilm.com/jobs.htm"] def parse(self, response): sel = Selector(response) for num_div in xrange(1, 31): date = sel.xpath('//*[@id="mainContent"]/div[{0}]/span/text()'.format(str(num_div))).re('(\d{1,2}\/\d{1,2}\/\d{4})')[0] email = sel.xpath('//*[@id="mainContent"]/div[{0}]/div/text()'.format(str(num_div))).re('(\w+@[a-zA-Z0-9_]+?\.[a-zA-Z]{2,6})') if current_date == date: for address in email: if address + "\n" not in email_in_file and address not in email_current_session: file_output.write(address + "\n") email_current_session.append(address) print "Spider: NewenglandFilm. Email {0} added to file".format(address) else: print "Spider: NewenglandFilm. Email {0} already in the file".format(address)<|fim▁end|>
from scrapy.spider import Spider from scrapy.selector import Selector
<|file_name|>newenglandfilm.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # -*- coding: utf-8 -*- from scrapy.spider import Spider from scrapy.selector import Selector from my_settings import name_file, test_mode, difference_days from datetime import datetime, timedelta print "Run spider NewenglandFilm" file_output = open(name_file, 'a') email_current_session = [] email_in_file = open(name_file, 'r').readlines() if test_mode: current_date = (datetime.today() - timedelta(days=difference_days)).strftime('%m/%d/%Y') else: current_date = datetime.today().strftime('%m/%d/%Y') class NewenglandFilm(Spider): <|fim_middle|> <|fim▁end|>
name = 'newenglandfilm' allowed_domains = ["newenglandfilm.com"] start_urls = ["http://newenglandfilm.com/jobs.htm"] def parse(self, response): sel = Selector(response) for num_div in xrange(1, 31): date = sel.xpath('//*[@id="mainContent"]/div[{0}]/span/text()'.format(str(num_div))).re('(\d{1,2}\/\d{1,2}\/\d{4})')[0] email = sel.xpath('//*[@id="mainContent"]/div[{0}]/div/text()'.format(str(num_div))).re('(\w+@[a-zA-Z0-9_]+?\.[a-zA-Z]{2,6})') if current_date == date: for address in email: if address + "\n" not in email_in_file and address not in email_current_session: file_output.write(address + "\n") email_current_session.append(address) print "Spider: NewenglandFilm. Email {0} added to file".format(address) else: print "Spider: NewenglandFilm. Email {0} already in the file".format(address)
<|file_name|>newenglandfilm.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # -*- coding: utf-8 -*- from scrapy.spider import Spider from scrapy.selector import Selector from my_settings import name_file, test_mode, difference_days from datetime import datetime, timedelta print "Run spider NewenglandFilm" file_output = open(name_file, 'a') email_current_session = [] email_in_file = open(name_file, 'r').readlines() if test_mode: current_date = (datetime.today() - timedelta(days=difference_days)).strftime('%m/%d/%Y') else: current_date = datetime.today().strftime('%m/%d/%Y') class NewenglandFilm(Spider): name = 'newenglandfilm' allowed_domains = ["newenglandfilm.com"] start_urls = ["http://newenglandfilm.com/jobs.htm"] def parse(self, response): <|fim_middle|> <|fim▁end|>
sel = Selector(response) for num_div in xrange(1, 31): date = sel.xpath('//*[@id="mainContent"]/div[{0}]/span/text()'.format(str(num_div))).re('(\d{1,2}\/\d{1,2}\/\d{4})')[0] email = sel.xpath('//*[@id="mainContent"]/div[{0}]/div/text()'.format(str(num_div))).re('(\w+@[a-zA-Z0-9_]+?\.[a-zA-Z]{2,6})') if current_date == date: for address in email: if address + "\n" not in email_in_file and address not in email_current_session: file_output.write(address + "\n") email_current_session.append(address) print "Spider: NewenglandFilm. Email {0} added to file".format(address) else: print "Spider: NewenglandFilm. Email {0} already in the file".format(address)
<|file_name|>newenglandfilm.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # -*- coding: utf-8 -*- from scrapy.spider import Spider from scrapy.selector import Selector from my_settings import name_file, test_mode, difference_days from datetime import datetime, timedelta print "Run spider NewenglandFilm" file_output = open(name_file, 'a') email_current_session = [] email_in_file = open(name_file, 'r').readlines() if test_mode: <|fim_middle|> else: current_date = datetime.today().strftime('%m/%d/%Y') class NewenglandFilm(Spider): name = 'newenglandfilm' allowed_domains = ["newenglandfilm.com"] start_urls = ["http://newenglandfilm.com/jobs.htm"] def parse(self, response): sel = Selector(response) for num_div in xrange(1, 31): date = sel.xpath('//*[@id="mainContent"]/div[{0}]/span/text()'.format(str(num_div))).re('(\d{1,2}\/\d{1,2}\/\d{4})')[0] email = sel.xpath('//*[@id="mainContent"]/div[{0}]/div/text()'.format(str(num_div))).re('(\w+@[a-zA-Z0-9_]+?\.[a-zA-Z]{2,6})') if current_date == date: for address in email: if address + "\n" not in email_in_file and address not in email_current_session: file_output.write(address + "\n") email_current_session.append(address) print "Spider: NewenglandFilm. Email {0} added to file".format(address) else: print "Spider: NewenglandFilm. Email {0} already in the file".format(address)<|fim▁end|>
current_date = (datetime.today() - timedelta(days=difference_days)).strftime('%m/%d/%Y')
<|file_name|>newenglandfilm.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # -*- coding: utf-8 -*- from scrapy.spider import Spider from scrapy.selector import Selector from my_settings import name_file, test_mode, difference_days from datetime import datetime, timedelta print "Run spider NewenglandFilm" file_output = open(name_file, 'a') email_current_session = [] email_in_file = open(name_file, 'r').readlines() if test_mode: current_date = (datetime.today() - timedelta(days=difference_days)).strftime('%m/%d/%Y') else: <|fim_middle|> class NewenglandFilm(Spider): name = 'newenglandfilm' allowed_domains = ["newenglandfilm.com"] start_urls = ["http://newenglandfilm.com/jobs.htm"] def parse(self, response): sel = Selector(response) for num_div in xrange(1, 31): date = sel.xpath('//*[@id="mainContent"]/div[{0}]/span/text()'.format(str(num_div))).re('(\d{1,2}\/\d{1,2}\/\d{4})')[0] email = sel.xpath('//*[@id="mainContent"]/div[{0}]/div/text()'.format(str(num_div))).re('(\w+@[a-zA-Z0-9_]+?\.[a-zA-Z]{2,6})') if current_date == date: for address in email: if address + "\n" not in email_in_file and address not in email_current_session: file_output.write(address + "\n") email_current_session.append(address) print "Spider: NewenglandFilm. Email {0} added to file".format(address) else: print "Spider: NewenglandFilm. Email {0} already in the file".format(address)<|fim▁end|>
current_date = datetime.today().strftime('%m/%d/%Y')
<|file_name|>newenglandfilm.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # -*- coding: utf-8 -*- from scrapy.spider import Spider from scrapy.selector import Selector from my_settings import name_file, test_mode, difference_days from datetime import datetime, timedelta print "Run spider NewenglandFilm" file_output = open(name_file, 'a') email_current_session = [] email_in_file = open(name_file, 'r').readlines() if test_mode: current_date = (datetime.today() - timedelta(days=difference_days)).strftime('%m/%d/%Y') else: current_date = datetime.today().strftime('%m/%d/%Y') class NewenglandFilm(Spider): name = 'newenglandfilm' allowed_domains = ["newenglandfilm.com"] start_urls = ["http://newenglandfilm.com/jobs.htm"] def parse(self, response): sel = Selector(response) for num_div in xrange(1, 31): date = sel.xpath('//*[@id="mainContent"]/div[{0}]/span/text()'.format(str(num_div))).re('(\d{1,2}\/\d{1,2}\/\d{4})')[0] email = sel.xpath('//*[@id="mainContent"]/div[{0}]/div/text()'.format(str(num_div))).re('(\w+@[a-zA-Z0-9_]+?\.[a-zA-Z]{2,6})') if current_date == date: <|fim_middle|> <|fim▁end|>
for address in email: if address + "\n" not in email_in_file and address not in email_current_session: file_output.write(address + "\n") email_current_session.append(address) print "Spider: NewenglandFilm. Email {0} added to file".format(address) else: print "Spider: NewenglandFilm. Email {0} already in the file".format(address)
<|file_name|>newenglandfilm.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # -*- coding: utf-8 -*- from scrapy.spider import Spider from scrapy.selector import Selector from my_settings import name_file, test_mode, difference_days from datetime import datetime, timedelta print "Run spider NewenglandFilm" file_output = open(name_file, 'a') email_current_session = [] email_in_file = open(name_file, 'r').readlines() if test_mode: current_date = (datetime.today() - timedelta(days=difference_days)).strftime('%m/%d/%Y') else: current_date = datetime.today().strftime('%m/%d/%Y') class NewenglandFilm(Spider): name = 'newenglandfilm' allowed_domains = ["newenglandfilm.com"] start_urls = ["http://newenglandfilm.com/jobs.htm"] def parse(self, response): sel = Selector(response) for num_div in xrange(1, 31): date = sel.xpath('//*[@id="mainContent"]/div[{0}]/span/text()'.format(str(num_div))).re('(\d{1,2}\/\d{1,2}\/\d{4})')[0] email = sel.xpath('//*[@id="mainContent"]/div[{0}]/div/text()'.format(str(num_div))).re('(\w+@[a-zA-Z0-9_]+?\.[a-zA-Z]{2,6})') if current_date == date: for address in email: if address + "\n" not in email_in_file and address not in email_current_session: <|fim_middle|> else: print "Spider: NewenglandFilm. Email {0} already in the file".format(address)<|fim▁end|>
file_output.write(address + "\n") email_current_session.append(address) print "Spider: NewenglandFilm. Email {0} added to file".format(address)
<|file_name|>newenglandfilm.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # -*- coding: utf-8 -*- from scrapy.spider import Spider from scrapy.selector import Selector from my_settings import name_file, test_mode, difference_days from datetime import datetime, timedelta print "Run spider NewenglandFilm" file_output = open(name_file, 'a') email_current_session = [] email_in_file = open(name_file, 'r').readlines() if test_mode: current_date = (datetime.today() - timedelta(days=difference_days)).strftime('%m/%d/%Y') else: current_date = datetime.today().strftime('%m/%d/%Y') class NewenglandFilm(Spider): name = 'newenglandfilm' allowed_domains = ["newenglandfilm.com"] start_urls = ["http://newenglandfilm.com/jobs.htm"] def parse(self, response): sel = Selector(response) for num_div in xrange(1, 31): date = sel.xpath('//*[@id="mainContent"]/div[{0}]/span/text()'.format(str(num_div))).re('(\d{1,2}\/\d{1,2}\/\d{4})')[0] email = sel.xpath('//*[@id="mainContent"]/div[{0}]/div/text()'.format(str(num_div))).re('(\w+@[a-zA-Z0-9_]+?\.[a-zA-Z]{2,6})') if current_date == date: for address in email: if address + "\n" not in email_in_file and address not in email_current_session: file_output.write(address + "\n") email_current_session.append(address) print "Spider: NewenglandFilm. Email {0} added to file".format(address) else: <|fim_middle|> <|fim▁end|>
print "Spider: NewenglandFilm. Email {0} already in the file".format(address)
<|file_name|>newenglandfilm.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # -*- coding: utf-8 -*- from scrapy.spider import Spider from scrapy.selector import Selector from my_settings import name_file, test_mode, difference_days from datetime import datetime, timedelta print "Run spider NewenglandFilm" file_output = open(name_file, 'a') email_current_session = [] email_in_file = open(name_file, 'r').readlines() if test_mode: current_date = (datetime.today() - timedelta(days=difference_days)).strftime('%m/%d/%Y') else: current_date = datetime.today().strftime('%m/%d/%Y') class NewenglandFilm(Spider): name = 'newenglandfilm' allowed_domains = ["newenglandfilm.com"] start_urls = ["http://newenglandfilm.com/jobs.htm"] def <|fim_middle|>(self, response): sel = Selector(response) for num_div in xrange(1, 31): date = sel.xpath('//*[@id="mainContent"]/div[{0}]/span/text()'.format(str(num_div))).re('(\d{1,2}\/\d{1,2}\/\d{4})')[0] email = sel.xpath('//*[@id="mainContent"]/div[{0}]/div/text()'.format(str(num_div))).re('(\w+@[a-zA-Z0-9_]+?\.[a-zA-Z]{2,6})') if current_date == date: for address in email: if address + "\n" not in email_in_file and address not in email_current_session: file_output.write(address + "\n") email_current_session.append(address) print "Spider: NewenglandFilm. Email {0} added to file".format(address) else: print "Spider: NewenglandFilm. Email {0} already in the file".format(address)<|fim▁end|>
parse
<|file_name|>encoding.py<|end_file_name|><|fim▁begin|>import datetime<|fim▁hole|>import types import six def is_protected_type(obj): """Determine if the object instance is of a protected type. Objects of protected types are preserved as-is when passed to force_unicode(strings_only=True). """ return isinstance(obj, ( six.integer_types + (types.NoneType, datetime.datetime, datetime.date, datetime.time, float, Decimal)) ) def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'): """ Similar to smart_text, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects. """ # Handle the common case first, saves 30-40% when s is an instance of # six.text_type. This function gets called often in that setting. if isinstance(s, six.text_type): return s if strings_only and is_protected_type(s): return s try: if not isinstance(s, six.string_types): if hasattr(s, '__unicode__'): s = s.__unicode__() else: if six.PY3: if isinstance(s, bytes): s = six.text_type(s, encoding, errors) else: s = six.text_type(s) else: s = six.text_type(bytes(s), encoding, errors) else: # Note: We use .decode() here, instead of six.text_type(s, # encoding, errors), so that if s is a SafeBytes, it ends up being # a SafeText at the end. s = s.decode(encoding, errors) except UnicodeDecodeError as e: if not isinstance(s, Exception): raise UnicodeDecodeError(*e.args) else: # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII bytestring data without a # working unicode method. Try to handle this without raising a # further exception by individually forcing the exception args # to unicode. s = ' '.join([force_unicode(arg, encoding, strings_only, errors) for arg in s]) return s<|fim▁end|>
from decimal import Decimal
<|file_name|>encoding.py<|end_file_name|><|fim▁begin|>import datetime from decimal import Decimal import types import six def is_protected_type(obj): <|fim_middle|> def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'): """ Similar to smart_text, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects. """ # Handle the common case first, saves 30-40% when s is an instance of # six.text_type. This function gets called often in that setting. if isinstance(s, six.text_type): return s if strings_only and is_protected_type(s): return s try: if not isinstance(s, six.string_types): if hasattr(s, '__unicode__'): s = s.__unicode__() else: if six.PY3: if isinstance(s, bytes): s = six.text_type(s, encoding, errors) else: s = six.text_type(s) else: s = six.text_type(bytes(s), encoding, errors) else: # Note: We use .decode() here, instead of six.text_type(s, # encoding, errors), so that if s is a SafeBytes, it ends up being # a SafeText at the end. s = s.decode(encoding, errors) except UnicodeDecodeError as e: if not isinstance(s, Exception): raise UnicodeDecodeError(*e.args) else: # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII bytestring data without a # working unicode method. Try to handle this without raising a # further exception by individually forcing the exception args # to unicode. s = ' '.join([force_unicode(arg, encoding, strings_only, errors) for arg in s]) return s <|fim▁end|>
"""Determine if the object instance is of a protected type. Objects of protected types are preserved as-is when passed to force_unicode(strings_only=True). """ return isinstance(obj, ( six.integer_types + (types.NoneType, datetime.datetime, datetime.date, datetime.time, float, Decimal)) )
<|file_name|>encoding.py<|end_file_name|><|fim▁begin|>import datetime from decimal import Decimal import types import six def is_protected_type(obj): """Determine if the object instance is of a protected type. Objects of protected types are preserved as-is when passed to force_unicode(strings_only=True). """ return isinstance(obj, ( six.integer_types + (types.NoneType, datetime.datetime, datetime.date, datetime.time, float, Decimal)) ) def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'): <|fim_middle|> <|fim▁end|>
""" Similar to smart_text, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects. """ # Handle the common case first, saves 30-40% when s is an instance of # six.text_type. This function gets called often in that setting. if isinstance(s, six.text_type): return s if strings_only and is_protected_type(s): return s try: if not isinstance(s, six.string_types): if hasattr(s, '__unicode__'): s = s.__unicode__() else: if six.PY3: if isinstance(s, bytes): s = six.text_type(s, encoding, errors) else: s = six.text_type(s) else: s = six.text_type(bytes(s), encoding, errors) else: # Note: We use .decode() here, instead of six.text_type(s, # encoding, errors), so that if s is a SafeBytes, it ends up being # a SafeText at the end. s = s.decode(encoding, errors) except UnicodeDecodeError as e: if not isinstance(s, Exception): raise UnicodeDecodeError(*e.args) else: # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII bytestring data without a # working unicode method. Try to handle this without raising a # further exception by individually forcing the exception args # to unicode. s = ' '.join([force_unicode(arg, encoding, strings_only, errors) for arg in s]) return s
<|file_name|>encoding.py<|end_file_name|><|fim▁begin|>import datetime from decimal import Decimal import types import six def is_protected_type(obj): """Determine if the object instance is of a protected type. Objects of protected types are preserved as-is when passed to force_unicode(strings_only=True). """ return isinstance(obj, ( six.integer_types + (types.NoneType, datetime.datetime, datetime.date, datetime.time, float, Decimal)) ) def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'): """ Similar to smart_text, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects. """ # Handle the common case first, saves 30-40% when s is an instance of # six.text_type. This function gets called often in that setting. if isinstance(s, six.text_type): <|fim_middle|> if strings_only and is_protected_type(s): return s try: if not isinstance(s, six.string_types): if hasattr(s, '__unicode__'): s = s.__unicode__() else: if six.PY3: if isinstance(s, bytes): s = six.text_type(s, encoding, errors) else: s = six.text_type(s) else: s = six.text_type(bytes(s), encoding, errors) else: # Note: We use .decode() here, instead of six.text_type(s, # encoding, errors), so that if s is a SafeBytes, it ends up being # a SafeText at the end. s = s.decode(encoding, errors) except UnicodeDecodeError as e: if not isinstance(s, Exception): raise UnicodeDecodeError(*e.args) else: # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII bytestring data without a # working unicode method. Try to handle this without raising a # further exception by individually forcing the exception args # to unicode. s = ' '.join([force_unicode(arg, encoding, strings_only, errors) for arg in s]) return s <|fim▁end|>
return s
<|file_name|>encoding.py<|end_file_name|><|fim▁begin|>import datetime from decimal import Decimal import types import six def is_protected_type(obj): """Determine if the object instance is of a protected type. Objects of protected types are preserved as-is when passed to force_unicode(strings_only=True). """ return isinstance(obj, ( six.integer_types + (types.NoneType, datetime.datetime, datetime.date, datetime.time, float, Decimal)) ) def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'): """ Similar to smart_text, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects. """ # Handle the common case first, saves 30-40% when s is an instance of # six.text_type. This function gets called often in that setting. if isinstance(s, six.text_type): return s if strings_only and is_protected_type(s): <|fim_middle|> try: if not isinstance(s, six.string_types): if hasattr(s, '__unicode__'): s = s.__unicode__() else: if six.PY3: if isinstance(s, bytes): s = six.text_type(s, encoding, errors) else: s = six.text_type(s) else: s = six.text_type(bytes(s), encoding, errors) else: # Note: We use .decode() here, instead of six.text_type(s, # encoding, errors), so that if s is a SafeBytes, it ends up being # a SafeText at the end. s = s.decode(encoding, errors) except UnicodeDecodeError as e: if not isinstance(s, Exception): raise UnicodeDecodeError(*e.args) else: # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII bytestring data without a # working unicode method. Try to handle this without raising a # further exception by individually forcing the exception args # to unicode. s = ' '.join([force_unicode(arg, encoding, strings_only, errors) for arg in s]) return s <|fim▁end|>
return s
<|file_name|>encoding.py<|end_file_name|><|fim▁begin|>import datetime from decimal import Decimal import types import six def is_protected_type(obj): """Determine if the object instance is of a protected type. Objects of protected types are preserved as-is when passed to force_unicode(strings_only=True). """ return isinstance(obj, ( six.integer_types + (types.NoneType, datetime.datetime, datetime.date, datetime.time, float, Decimal)) ) def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'): """ Similar to smart_text, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects. """ # Handle the common case first, saves 30-40% when s is an instance of # six.text_type. This function gets called often in that setting. if isinstance(s, six.text_type): return s if strings_only and is_protected_type(s): return s try: if not isinstance(s, six.string_types): <|fim_middle|> else: # Note: We use .decode() here, instead of six.text_type(s, # encoding, errors), so that if s is a SafeBytes, it ends up being # a SafeText at the end. s = s.decode(encoding, errors) except UnicodeDecodeError as e: if not isinstance(s, Exception): raise UnicodeDecodeError(*e.args) else: # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII bytestring data without a # working unicode method. Try to handle this without raising a # further exception by individually forcing the exception args # to unicode. s = ' '.join([force_unicode(arg, encoding, strings_only, errors) for arg in s]) return s <|fim▁end|>
if hasattr(s, '__unicode__'): s = s.__unicode__() else: if six.PY3: if isinstance(s, bytes): s = six.text_type(s, encoding, errors) else: s = six.text_type(s) else: s = six.text_type(bytes(s), encoding, errors)
<|file_name|>encoding.py<|end_file_name|><|fim▁begin|>import datetime from decimal import Decimal import types import six def is_protected_type(obj): """Determine if the object instance is of a protected type. Objects of protected types are preserved as-is when passed to force_unicode(strings_only=True). """ return isinstance(obj, ( six.integer_types + (types.NoneType, datetime.datetime, datetime.date, datetime.time, float, Decimal)) ) def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'): """ Similar to smart_text, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects. """ # Handle the common case first, saves 30-40% when s is an instance of # six.text_type. This function gets called often in that setting. if isinstance(s, six.text_type): return s if strings_only and is_protected_type(s): return s try: if not isinstance(s, six.string_types): if hasattr(s, '__unicode__'): <|fim_middle|> else: if six.PY3: if isinstance(s, bytes): s = six.text_type(s, encoding, errors) else: s = six.text_type(s) else: s = six.text_type(bytes(s), encoding, errors) else: # Note: We use .decode() here, instead of six.text_type(s, # encoding, errors), so that if s is a SafeBytes, it ends up being # a SafeText at the end. s = s.decode(encoding, errors) except UnicodeDecodeError as e: if not isinstance(s, Exception): raise UnicodeDecodeError(*e.args) else: # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII bytestring data without a # working unicode method. Try to handle this without raising a # further exception by individually forcing the exception args # to unicode. s = ' '.join([force_unicode(arg, encoding, strings_only, errors) for arg in s]) return s <|fim▁end|>
s = s.__unicode__()
<|file_name|>encoding.py<|end_file_name|><|fim▁begin|>import datetime from decimal import Decimal import types import six def is_protected_type(obj): """Determine if the object instance is of a protected type. Objects of protected types are preserved as-is when passed to force_unicode(strings_only=True). """ return isinstance(obj, ( six.integer_types + (types.NoneType, datetime.datetime, datetime.date, datetime.time, float, Decimal)) ) def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'): """ Similar to smart_text, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects. """ # Handle the common case first, saves 30-40% when s is an instance of # six.text_type. This function gets called often in that setting. if isinstance(s, six.text_type): return s if strings_only and is_protected_type(s): return s try: if not isinstance(s, six.string_types): if hasattr(s, '__unicode__'): s = s.__unicode__() else: <|fim_middle|> else: # Note: We use .decode() here, instead of six.text_type(s, # encoding, errors), so that if s is a SafeBytes, it ends up being # a SafeText at the end. s = s.decode(encoding, errors) except UnicodeDecodeError as e: if not isinstance(s, Exception): raise UnicodeDecodeError(*e.args) else: # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII bytestring data without a # working unicode method. Try to handle this without raising a # further exception by individually forcing the exception args # to unicode. s = ' '.join([force_unicode(arg, encoding, strings_only, errors) for arg in s]) return s <|fim▁end|>
if six.PY3: if isinstance(s, bytes): s = six.text_type(s, encoding, errors) else: s = six.text_type(s) else: s = six.text_type(bytes(s), encoding, errors)
<|file_name|>encoding.py<|end_file_name|><|fim▁begin|>import datetime from decimal import Decimal import types import six def is_protected_type(obj): """Determine if the object instance is of a protected type. Objects of protected types are preserved as-is when passed to force_unicode(strings_only=True). """ return isinstance(obj, ( six.integer_types + (types.NoneType, datetime.datetime, datetime.date, datetime.time, float, Decimal)) ) def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'): """ Similar to smart_text, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects. """ # Handle the common case first, saves 30-40% when s is an instance of # six.text_type. This function gets called often in that setting. if isinstance(s, six.text_type): return s if strings_only and is_protected_type(s): return s try: if not isinstance(s, six.string_types): if hasattr(s, '__unicode__'): s = s.__unicode__() else: if six.PY3: <|fim_middle|> else: s = six.text_type(bytes(s), encoding, errors) else: # Note: We use .decode() here, instead of six.text_type(s, # encoding, errors), so that if s is a SafeBytes, it ends up being # a SafeText at the end. s = s.decode(encoding, errors) except UnicodeDecodeError as e: if not isinstance(s, Exception): raise UnicodeDecodeError(*e.args) else: # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII bytestring data without a # working unicode method. Try to handle this without raising a # further exception by individually forcing the exception args # to unicode. s = ' '.join([force_unicode(arg, encoding, strings_only, errors) for arg in s]) return s <|fim▁end|>
if isinstance(s, bytes): s = six.text_type(s, encoding, errors) else: s = six.text_type(s)
<|file_name|>encoding.py<|end_file_name|><|fim▁begin|>import datetime from decimal import Decimal import types import six def is_protected_type(obj): """Determine if the object instance is of a protected type. Objects of protected types are preserved as-is when passed to force_unicode(strings_only=True). """ return isinstance(obj, ( six.integer_types + (types.NoneType, datetime.datetime, datetime.date, datetime.time, float, Decimal)) ) def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'): """ Similar to smart_text, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects. """ # Handle the common case first, saves 30-40% when s is an instance of # six.text_type. This function gets called often in that setting. if isinstance(s, six.text_type): return s if strings_only and is_protected_type(s): return s try: if not isinstance(s, six.string_types): if hasattr(s, '__unicode__'): s = s.__unicode__() else: if six.PY3: if isinstance(s, bytes): <|fim_middle|> else: s = six.text_type(s) else: s = six.text_type(bytes(s), encoding, errors) else: # Note: We use .decode() here, instead of six.text_type(s, # encoding, errors), so that if s is a SafeBytes, it ends up being # a SafeText at the end. s = s.decode(encoding, errors) except UnicodeDecodeError as e: if not isinstance(s, Exception): raise UnicodeDecodeError(*e.args) else: # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII bytestring data without a # working unicode method. Try to handle this without raising a # further exception by individually forcing the exception args # to unicode. s = ' '.join([force_unicode(arg, encoding, strings_only, errors) for arg in s]) return s <|fim▁end|>
s = six.text_type(s, encoding, errors)
<|file_name|>encoding.py<|end_file_name|><|fim▁begin|>import datetime from decimal import Decimal import types import six def is_protected_type(obj): """Determine if the object instance is of a protected type. Objects of protected types are preserved as-is when passed to force_unicode(strings_only=True). """ return isinstance(obj, ( six.integer_types + (types.NoneType, datetime.datetime, datetime.date, datetime.time, float, Decimal)) ) def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'): """ Similar to smart_text, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects. """ # Handle the common case first, saves 30-40% when s is an instance of # six.text_type. This function gets called often in that setting. if isinstance(s, six.text_type): return s if strings_only and is_protected_type(s): return s try: if not isinstance(s, six.string_types): if hasattr(s, '__unicode__'): s = s.__unicode__() else: if six.PY3: if isinstance(s, bytes): s = six.text_type(s, encoding, errors) else: <|fim_middle|> else: s = six.text_type(bytes(s), encoding, errors) else: # Note: We use .decode() here, instead of six.text_type(s, # encoding, errors), so that if s is a SafeBytes, it ends up being # a SafeText at the end. s = s.decode(encoding, errors) except UnicodeDecodeError as e: if not isinstance(s, Exception): raise UnicodeDecodeError(*e.args) else: # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII bytestring data without a # working unicode method. Try to handle this without raising a # further exception by individually forcing the exception args # to unicode. s = ' '.join([force_unicode(arg, encoding, strings_only, errors) for arg in s]) return s <|fim▁end|>
s = six.text_type(s)
<|file_name|>encoding.py<|end_file_name|><|fim▁begin|>import datetime from decimal import Decimal import types import six def is_protected_type(obj): """Determine if the object instance is of a protected type. Objects of protected types are preserved as-is when passed to force_unicode(strings_only=True). """ return isinstance(obj, ( six.integer_types + (types.NoneType, datetime.datetime, datetime.date, datetime.time, float, Decimal)) ) def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'): """ Similar to smart_text, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects. """ # Handle the common case first, saves 30-40% when s is an instance of # six.text_type. This function gets called often in that setting. if isinstance(s, six.text_type): return s if strings_only and is_protected_type(s): return s try: if not isinstance(s, six.string_types): if hasattr(s, '__unicode__'): s = s.__unicode__() else: if six.PY3: if isinstance(s, bytes): s = six.text_type(s, encoding, errors) else: s = six.text_type(s) else: <|fim_middle|> else: # Note: We use .decode() here, instead of six.text_type(s, # encoding, errors), so that if s is a SafeBytes, it ends up being # a SafeText at the end. s = s.decode(encoding, errors) except UnicodeDecodeError as e: if not isinstance(s, Exception): raise UnicodeDecodeError(*e.args) else: # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII bytestring data without a # working unicode method. Try to handle this without raising a # further exception by individually forcing the exception args # to unicode. s = ' '.join([force_unicode(arg, encoding, strings_only, errors) for arg in s]) return s <|fim▁end|>
s = six.text_type(bytes(s), encoding, errors)
<|file_name|>encoding.py<|end_file_name|><|fim▁begin|>import datetime from decimal import Decimal import types import six def is_protected_type(obj): """Determine if the object instance is of a protected type. Objects of protected types are preserved as-is when passed to force_unicode(strings_only=True). """ return isinstance(obj, ( six.integer_types + (types.NoneType, datetime.datetime, datetime.date, datetime.time, float, Decimal)) ) def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'): """ Similar to smart_text, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects. """ # Handle the common case first, saves 30-40% when s is an instance of # six.text_type. This function gets called often in that setting. if isinstance(s, six.text_type): return s if strings_only and is_protected_type(s): return s try: if not isinstance(s, six.string_types): if hasattr(s, '__unicode__'): s = s.__unicode__() else: if six.PY3: if isinstance(s, bytes): s = six.text_type(s, encoding, errors) else: s = six.text_type(s) else: s = six.text_type(bytes(s), encoding, errors) else: # Note: We use .decode() here, instead of six.text_type(s, # encoding, errors), so that if s is a SafeBytes, it ends up being # a SafeText at the end. <|fim_middle|> except UnicodeDecodeError as e: if not isinstance(s, Exception): raise UnicodeDecodeError(*e.args) else: # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII bytestring data without a # working unicode method. Try to handle this without raising a # further exception by individually forcing the exception args # to unicode. s = ' '.join([force_unicode(arg, encoding, strings_only, errors) for arg in s]) return s <|fim▁end|>
s = s.decode(encoding, errors)
<|file_name|>encoding.py<|end_file_name|><|fim▁begin|>import datetime from decimal import Decimal import types import six def is_protected_type(obj): """Determine if the object instance is of a protected type. Objects of protected types are preserved as-is when passed to force_unicode(strings_only=True). """ return isinstance(obj, ( six.integer_types + (types.NoneType, datetime.datetime, datetime.date, datetime.time, float, Decimal)) ) def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'): """ Similar to smart_text, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects. """ # Handle the common case first, saves 30-40% when s is an instance of # six.text_type. This function gets called often in that setting. if isinstance(s, six.text_type): return s if strings_only and is_protected_type(s): return s try: if not isinstance(s, six.string_types): if hasattr(s, '__unicode__'): s = s.__unicode__() else: if six.PY3: if isinstance(s, bytes): s = six.text_type(s, encoding, errors) else: s = six.text_type(s) else: s = six.text_type(bytes(s), encoding, errors) else: # Note: We use .decode() here, instead of six.text_type(s, # encoding, errors), so that if s is a SafeBytes, it ends up being # a SafeText at the end. s = s.decode(encoding, errors) except UnicodeDecodeError as e: if not isinstance(s, Exception): <|fim_middle|> else: # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII bytestring data without a # working unicode method. Try to handle this without raising a # further exception by individually forcing the exception args # to unicode. s = ' '.join([force_unicode(arg, encoding, strings_only, errors) for arg in s]) return s <|fim▁end|>
raise UnicodeDecodeError(*e.args)
<|file_name|>encoding.py<|end_file_name|><|fim▁begin|>import datetime from decimal import Decimal import types import six def is_protected_type(obj): """Determine if the object instance is of a protected type. Objects of protected types are preserved as-is when passed to force_unicode(strings_only=True). """ return isinstance(obj, ( six.integer_types + (types.NoneType, datetime.datetime, datetime.date, datetime.time, float, Decimal)) ) def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'): """ Similar to smart_text, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects. """ # Handle the common case first, saves 30-40% when s is an instance of # six.text_type. This function gets called often in that setting. if isinstance(s, six.text_type): return s if strings_only and is_protected_type(s): return s try: if not isinstance(s, six.string_types): if hasattr(s, '__unicode__'): s = s.__unicode__() else: if six.PY3: if isinstance(s, bytes): s = six.text_type(s, encoding, errors) else: s = six.text_type(s) else: s = six.text_type(bytes(s), encoding, errors) else: # Note: We use .decode() here, instead of six.text_type(s, # encoding, errors), so that if s is a SafeBytes, it ends up being # a SafeText at the end. s = s.decode(encoding, errors) except UnicodeDecodeError as e: if not isinstance(s, Exception): raise UnicodeDecodeError(*e.args) else: # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII bytestring data without a # working unicode method. Try to handle this without raising a # further exception by individually forcing the exception args # to unicode. <|fim_middle|> return s <|fim▁end|>
s = ' '.join([force_unicode(arg, encoding, strings_only, errors) for arg in s])
<|file_name|>encoding.py<|end_file_name|><|fim▁begin|>import datetime from decimal import Decimal import types import six def <|fim_middle|>(obj): """Determine if the object instance is of a protected type. Objects of protected types are preserved as-is when passed to force_unicode(strings_only=True). """ return isinstance(obj, ( six.integer_types + (types.NoneType, datetime.datetime, datetime.date, datetime.time, float, Decimal)) ) def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'): """ Similar to smart_text, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects. """ # Handle the common case first, saves 30-40% when s is an instance of # six.text_type. This function gets called often in that setting. if isinstance(s, six.text_type): return s if strings_only and is_protected_type(s): return s try: if not isinstance(s, six.string_types): if hasattr(s, '__unicode__'): s = s.__unicode__() else: if six.PY3: if isinstance(s, bytes): s = six.text_type(s, encoding, errors) else: s = six.text_type(s) else: s = six.text_type(bytes(s), encoding, errors) else: # Note: We use .decode() here, instead of six.text_type(s, # encoding, errors), so that if s is a SafeBytes, it ends up being # a SafeText at the end. s = s.decode(encoding, errors) except UnicodeDecodeError as e: if not isinstance(s, Exception): raise UnicodeDecodeError(*e.args) else: # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII bytestring data without a # working unicode method. Try to handle this without raising a # further exception by individually forcing the exception args # to unicode. s = ' '.join([force_unicode(arg, encoding, strings_only, errors) for arg in s]) return s <|fim▁end|>
is_protected_type
<|file_name|>encoding.py<|end_file_name|><|fim▁begin|>import datetime from decimal import Decimal import types import six def is_protected_type(obj): """Determine if the object instance is of a protected type. Objects of protected types are preserved as-is when passed to force_unicode(strings_only=True). """ return isinstance(obj, ( six.integer_types + (types.NoneType, datetime.datetime, datetime.date, datetime.time, float, Decimal)) ) def <|fim_middle|>(s, encoding='utf-8', strings_only=False, errors='strict'): """ Similar to smart_text, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects. """ # Handle the common case first, saves 30-40% when s is an instance of # six.text_type. This function gets called often in that setting. if isinstance(s, six.text_type): return s if strings_only and is_protected_type(s): return s try: if not isinstance(s, six.string_types): if hasattr(s, '__unicode__'): s = s.__unicode__() else: if six.PY3: if isinstance(s, bytes): s = six.text_type(s, encoding, errors) else: s = six.text_type(s) else: s = six.text_type(bytes(s), encoding, errors) else: # Note: We use .decode() here, instead of six.text_type(s, # encoding, errors), so that if s is a SafeBytes, it ends up being # a SafeText at the end. s = s.decode(encoding, errors) except UnicodeDecodeError as e: if not isinstance(s, Exception): raise UnicodeDecodeError(*e.args) else: # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII bytestring data without a # working unicode method. Try to handle this without raising a # further exception by individually forcing the exception args # to unicode. s = ' '.join([force_unicode(arg, encoding, strings_only, errors) for arg in s]) return s <|fim▁end|>
force_unicode
<|file_name|>script.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python import os import sys import re # file name unified by the following rule: # 1. always save the osm under ../osmFiles directory # 2. the result automatically generate to ../trajectorySets # 3.1. change variable "osmName", or # 3.2. use command argument to specify osm file name # 4. this script generates a set of paths, each includes a series of of points,<|fim▁hole|> # also, please scroll down the very bottom to see what's the next step osmName = 'San_Jose_20x20.osm' # sample: 'ucla.osm' #osmName = 'Los_Angeles_20x20.osm' # sample: 'ucla.osm' #osmName = 'ucla_5x5.osm' # sample: 'ucla.osm' optionAllowLoop = False # most of the cases are building bounding boxes # support system parameters if len(sys.argv) >= 2: osmName = sys.argv[1] if len(sys.argv) >= 3: optionAllowLoop = (sys.argv[2] == '1') inFile = '../../../Data/osmFiles/' + osmName if len(osmName.split('.')) == 1: osmNameWoExt = osmName else: osmNameWoExt = osmName[:-(1+len(osmName.split('.')[-1]))] outRootDir = '../../../Data/trajectorySets/' outFile = outRootDir + osmNameWoExt + '.tfix' print('input file = ' + inFile) print('output file = ' + outFile) print('') f = open('/tmp/in', 'w') f.write('<in>' + inFile + '</in>'); f.close() # the following command can be slow. a 3x3 mile^2 area takes 53 seconds to generate the result. xmlWayDetail = outRootDir + 'originOfLife/' + osmNameWoExt + '.xml' cmd = 'basex findWayTrajectory.xq > ' + xmlWayDetail print('CMD: ' + cmd) if os.path.isfile(xmlWayDetail): print('File existed. Skip.') else: os.system(cmd) # the next step should be executing the python3 ../makeElevSegMap.py with the input # parameter outFile, but because of the relative folder path issue, integrating # makeElevSegMap.py into this code needs to make big changes. So at this stage, # we still stay on manually executing that script.<|fim▁end|>
# and save in originOfLife folder for further parsing.
<|file_name|>script.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python import os import sys import re # file name unified by the following rule: # 1. always save the osm under ../osmFiles directory # 2. the result automatically generate to ../trajectorySets # 3.1. change variable "osmName", or # 3.2. use command argument to specify osm file name # 4. this script generates a set of paths, each includes a series of of points, # and save in originOfLife folder for further parsing. # also, please scroll down the very bottom to see what's the next step osmName = 'San_Jose_20x20.osm' # sample: 'ucla.osm' #osmName = 'Los_Angeles_20x20.osm' # sample: 'ucla.osm' #osmName = 'ucla_5x5.osm' # sample: 'ucla.osm' optionAllowLoop = False # most of the cases are building bounding boxes # support system parameters if len(sys.argv) >= 2: <|fim_middle|> if len(sys.argv) >= 3: optionAllowLoop = (sys.argv[2] == '1') inFile = '../../../Data/osmFiles/' + osmName if len(osmName.split('.')) == 1: osmNameWoExt = osmName else: osmNameWoExt = osmName[:-(1+len(osmName.split('.')[-1]))] outRootDir = '../../../Data/trajectorySets/' outFile = outRootDir + osmNameWoExt + '.tfix' print('input file = ' + inFile) print('output file = ' + outFile) print('') f = open('/tmp/in', 'w') f.write('<in>' + inFile + '</in>'); f.close() # the following command can be slow. a 3x3 mile^2 area takes 53 seconds to generate the result. xmlWayDetail = outRootDir + 'originOfLife/' + osmNameWoExt + '.xml' cmd = 'basex findWayTrajectory.xq > ' + xmlWayDetail print('CMD: ' + cmd) if os.path.isfile(xmlWayDetail): print('File existed. Skip.') else: os.system(cmd) # the next step should be executing the python3 ../makeElevSegMap.py with the input # parameter outFile, but because of the relative folder path issue, integrating # makeElevSegMap.py into this code needs to make big changes. So at this stage, # we still stay on manually executing that script. <|fim▁end|>
osmName = sys.argv[1]
<|file_name|>script.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python import os import sys import re # file name unified by the following rule: # 1. always save the osm under ../osmFiles directory # 2. the result automatically generate to ../trajectorySets # 3.1. change variable "osmName", or # 3.2. use command argument to specify osm file name # 4. this script generates a set of paths, each includes a series of of points, # and save in originOfLife folder for further parsing. # also, please scroll down the very bottom to see what's the next step osmName = 'San_Jose_20x20.osm' # sample: 'ucla.osm' #osmName = 'Los_Angeles_20x20.osm' # sample: 'ucla.osm' #osmName = 'ucla_5x5.osm' # sample: 'ucla.osm' optionAllowLoop = False # most of the cases are building bounding boxes # support system parameters if len(sys.argv) >= 2: osmName = sys.argv[1] if len(sys.argv) >= 3: <|fim_middle|> inFile = '../../../Data/osmFiles/' + osmName if len(osmName.split('.')) == 1: osmNameWoExt = osmName else: osmNameWoExt = osmName[:-(1+len(osmName.split('.')[-1]))] outRootDir = '../../../Data/trajectorySets/' outFile = outRootDir + osmNameWoExt + '.tfix' print('input file = ' + inFile) print('output file = ' + outFile) print('') f = open('/tmp/in', 'w') f.write('<in>' + inFile + '</in>'); f.close() # the following command can be slow. a 3x3 mile^2 area takes 53 seconds to generate the result. xmlWayDetail = outRootDir + 'originOfLife/' + osmNameWoExt + '.xml' cmd = 'basex findWayTrajectory.xq > ' + xmlWayDetail print('CMD: ' + cmd) if os.path.isfile(xmlWayDetail): print('File existed. Skip.') else: os.system(cmd) # the next step should be executing the python3 ../makeElevSegMap.py with the input # parameter outFile, but because of the relative folder path issue, integrating # makeElevSegMap.py into this code needs to make big changes. So at this stage, # we still stay on manually executing that script. <|fim▁end|>
optionAllowLoop = (sys.argv[2] == '1')
<|file_name|>script.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python import os import sys import re # file name unified by the following rule: # 1. always save the osm under ../osmFiles directory # 2. the result automatically generate to ../trajectorySets # 3.1. change variable "osmName", or # 3.2. use command argument to specify osm file name # 4. this script generates a set of paths, each includes a series of of points, # and save in originOfLife folder for further parsing. # also, please scroll down the very bottom to see what's the next step osmName = 'San_Jose_20x20.osm' # sample: 'ucla.osm' #osmName = 'Los_Angeles_20x20.osm' # sample: 'ucla.osm' #osmName = 'ucla_5x5.osm' # sample: 'ucla.osm' optionAllowLoop = False # most of the cases are building bounding boxes # support system parameters if len(sys.argv) >= 2: osmName = sys.argv[1] if len(sys.argv) >= 3: optionAllowLoop = (sys.argv[2] == '1') inFile = '../../../Data/osmFiles/' + osmName if len(osmName.split('.')) == 1: <|fim_middle|> else: osmNameWoExt = osmName[:-(1+len(osmName.split('.')[-1]))] outRootDir = '../../../Data/trajectorySets/' outFile = outRootDir + osmNameWoExt + '.tfix' print('input file = ' + inFile) print('output file = ' + outFile) print('') f = open('/tmp/in', 'w') f.write('<in>' + inFile + '</in>'); f.close() # the following command can be slow. a 3x3 mile^2 area takes 53 seconds to generate the result. xmlWayDetail = outRootDir + 'originOfLife/' + osmNameWoExt + '.xml' cmd = 'basex findWayTrajectory.xq > ' + xmlWayDetail print('CMD: ' + cmd) if os.path.isfile(xmlWayDetail): print('File existed. Skip.') else: os.system(cmd) # the next step should be executing the python3 ../makeElevSegMap.py with the input # parameter outFile, but because of the relative folder path issue, integrating # makeElevSegMap.py into this code needs to make big changes. So at this stage, # we still stay on manually executing that script. <|fim▁end|>
osmNameWoExt = osmName
<|file_name|>script.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python import os import sys import re # file name unified by the following rule: # 1. always save the osm under ../osmFiles directory # 2. the result automatically generate to ../trajectorySets # 3.1. change variable "osmName", or # 3.2. use command argument to specify osm file name # 4. this script generates a set of paths, each includes a series of of points, # and save in originOfLife folder for further parsing. # also, please scroll down the very bottom to see what's the next step osmName = 'San_Jose_20x20.osm' # sample: 'ucla.osm' #osmName = 'Los_Angeles_20x20.osm' # sample: 'ucla.osm' #osmName = 'ucla_5x5.osm' # sample: 'ucla.osm' optionAllowLoop = False # most of the cases are building bounding boxes # support system parameters if len(sys.argv) >= 2: osmName = sys.argv[1] if len(sys.argv) >= 3: optionAllowLoop = (sys.argv[2] == '1') inFile = '../../../Data/osmFiles/' + osmName if len(osmName.split('.')) == 1: osmNameWoExt = osmName else: <|fim_middle|> outRootDir = '../../../Data/trajectorySets/' outFile = outRootDir + osmNameWoExt + '.tfix' print('input file = ' + inFile) print('output file = ' + outFile) print('') f = open('/tmp/in', 'w') f.write('<in>' + inFile + '</in>'); f.close() # the following command can be slow. a 3x3 mile^2 area takes 53 seconds to generate the result. xmlWayDetail = outRootDir + 'originOfLife/' + osmNameWoExt + '.xml' cmd = 'basex findWayTrajectory.xq > ' + xmlWayDetail print('CMD: ' + cmd) if os.path.isfile(xmlWayDetail): print('File existed. Skip.') else: os.system(cmd) # the next step should be executing the python3 ../makeElevSegMap.py with the input # parameter outFile, but because of the relative folder path issue, integrating # makeElevSegMap.py into this code needs to make big changes. So at this stage, # we still stay on manually executing that script. <|fim▁end|>
osmNameWoExt = osmName[:-(1+len(osmName.split('.')[-1]))]
<|file_name|>script.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python import os import sys import re # file name unified by the following rule: # 1. always save the osm under ../osmFiles directory # 2. the result automatically generate to ../trajectorySets # 3.1. change variable "osmName", or # 3.2. use command argument to specify osm file name # 4. this script generates a set of paths, each includes a series of of points, # and save in originOfLife folder for further parsing. # also, please scroll down the very bottom to see what's the next step osmName = 'San_Jose_20x20.osm' # sample: 'ucla.osm' #osmName = 'Los_Angeles_20x20.osm' # sample: 'ucla.osm' #osmName = 'ucla_5x5.osm' # sample: 'ucla.osm' optionAllowLoop = False # most of the cases are building bounding boxes # support system parameters if len(sys.argv) >= 2: osmName = sys.argv[1] if len(sys.argv) >= 3: optionAllowLoop = (sys.argv[2] == '1') inFile = '../../../Data/osmFiles/' + osmName if len(osmName.split('.')) == 1: osmNameWoExt = osmName else: osmNameWoExt = osmName[:-(1+len(osmName.split('.')[-1]))] outRootDir = '../../../Data/trajectorySets/' outFile = outRootDir + osmNameWoExt + '.tfix' print('input file = ' + inFile) print('output file = ' + outFile) print('') f = open('/tmp/in', 'w') f.write('<in>' + inFile + '</in>'); f.close() # the following command can be slow. a 3x3 mile^2 area takes 53 seconds to generate the result. xmlWayDetail = outRootDir + 'originOfLife/' + osmNameWoExt + '.xml' cmd = 'basex findWayTrajectory.xq > ' + xmlWayDetail print('CMD: ' + cmd) if os.path.isfile(xmlWayDetail): <|fim_middle|> else: os.system(cmd) # the next step should be executing the python3 ../makeElevSegMap.py with the input # parameter outFile, but because of the relative folder path issue, integrating # makeElevSegMap.py into this code needs to make big changes. So at this stage, # we still stay on manually executing that script. <|fim▁end|>
print('File existed. Skip.')
<|file_name|>script.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python import os import sys import re # file name unified by the following rule: # 1. always save the osm under ../osmFiles directory # 2. the result automatically generate to ../trajectorySets # 3.1. change variable "osmName", or # 3.2. use command argument to specify osm file name # 4. this script generates a set of paths, each includes a series of of points, # and save in originOfLife folder for further parsing. # also, please scroll down the very bottom to see what's the next step osmName = 'San_Jose_20x20.osm' # sample: 'ucla.osm' #osmName = 'Los_Angeles_20x20.osm' # sample: 'ucla.osm' #osmName = 'ucla_5x5.osm' # sample: 'ucla.osm' optionAllowLoop = False # most of the cases are building bounding boxes # support system parameters if len(sys.argv) >= 2: osmName = sys.argv[1] if len(sys.argv) >= 3: optionAllowLoop = (sys.argv[2] == '1') inFile = '../../../Data/osmFiles/' + osmName if len(osmName.split('.')) == 1: osmNameWoExt = osmName else: osmNameWoExt = osmName[:-(1+len(osmName.split('.')[-1]))] outRootDir = '../../../Data/trajectorySets/' outFile = outRootDir + osmNameWoExt + '.tfix' print('input file = ' + inFile) print('output file = ' + outFile) print('') f = open('/tmp/in', 'w') f.write('<in>' + inFile + '</in>'); f.close() # the following command can be slow. a 3x3 mile^2 area takes 53 seconds to generate the result. xmlWayDetail = outRootDir + 'originOfLife/' + osmNameWoExt + '.xml' cmd = 'basex findWayTrajectory.xq > ' + xmlWayDetail print('CMD: ' + cmd) if os.path.isfile(xmlWayDetail): print('File existed. Skip.') else: <|fim_middle|> # the next step should be executing the python3 ../makeElevSegMap.py with the input # parameter outFile, but because of the relative folder path issue, integrating # makeElevSegMap.py into this code needs to make big changes. So at this stage, # we still stay on manually executing that script. <|fim▁end|>
os.system(cmd)
<|file_name|>excellon.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python import os import re from lxml import etree as et import pcbmode.config as config from . import messages as msg # pcbmode modules from . import utils from .point import Point def makeExcellon(manufacturer='default'): """ """ ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Open the board's SVG svg_in = utils.openBoardSVG() drills_layer = svg_in.find("//svg:g[@pcbmode:sheet='drills']", namespaces=ns) excellon = Excellon(drills_layer) # Save to file base_dir = os.path.join(config.cfg['base-dir'], config.cfg['locations']['build'], 'production') base_name = "%s_rev_%s" % (config.brd['config']['name'], config.brd['config']['rev']) filename_info = config.cfg['manufacturers'][manufacturer]['filenames']['drills'] add = '_%s.%s' % ('drills', filename_info['plated'].get('ext') or 'txt') filename = os.path.join(base_dir, base_name + add) with open(filename, "wb") as f: for line in excellon.getExcellon(): f.write(line) class Excellon(): """ """ def __init__(self, svg): """ """ self._svg = svg self._ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Get all drill paths except for the ones used in the # drill-index drill_paths = self._svg.findall(".//svg:g[@pcbmode:type='component-shapes']//svg:path", namespaces=self._ns) drills_dict = {} for drill_path in drill_paths: diameter = drill_path.get('{'+config.cfg['ns']['pcbmode']+'}diameter') location = self._getLocation(drill_path) if diameter not in drills_dict: drills_dict[diameter] = {} drills_dict[diameter]['locations'] = [] drills_dict[diameter]['locations'].append(location) self._preamble = self._createPreamble() self._content = self._createContent(drills_dict) self._postamble = self._createPostamble() def getExcellon(self): return (self._preamble+ self._content+ self._postamble) def _createContent(self, drills): """ """ ex = [] for i, diameter in enumerate(drills): # This is probably not necessary, but I'm not 100% certain # that if the item order of a dict is gurenteed. If not # the result can be quite devastating where drill # diameters are wrong! # Drill index must be greater than 0 drills[diameter]['index'] = i+1 ex.append("T%dC%s\n" % (i+1, diameter)) ex.append('M95\n') # End of a part program header for diameter in drills: ex.append("T%s\n" % drills[diameter]['index']) for coord in drills[diameter]['locations']: ex.append(self._getPoint(coord)) return ex def _createPreamble(self): """ """ ex = [] ex.append('M48\n') # Beginning of a part program header ex.append('METRIC,TZ\n') # Metric, trailing zeros ex.append('G90\n') # Absolute mode ex.append('M71\n') # Metric measuring mode return ex def _createPostamble(self): """ """ ex = [] ex.append('M30\n') # End of Program, rewind return ex def _getLocation(self, path): """ Returns the location of a path, factoring in all the transforms of its ancestors, and its own transform """ location = Point() # We need to get the transforms of all ancestors that have # one in order to get the location correctly ancestors = path.xpath("ancestor::*[@transform]") for ancestor in ancestors: transform = ancestor.get('transform') transform_data = utils.parseTransform(transform) # Add them up location += transform_data['location'] # Add the transform of the path itself transform = path.get('transform')<|fim▁hole|> if transform != None: transform_data = utils.parseTransform(transform) location += transform_data['location'] return location def _getPoint(self, point): """ Converts a Point type into an Excellon coordinate """ return "X%.6fY%.6f\n" % (point.x, -point.y)<|fim▁end|>
<|file_name|>excellon.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python import os import re from lxml import etree as et import pcbmode.config as config from . import messages as msg # pcbmode modules from . import utils from .point import Point def makeExcellon(manufacturer='default'): <|fim_middle|> class Excellon(): """ """ def __init__(self, svg): """ """ self._svg = svg self._ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Get all drill paths except for the ones used in the # drill-index drill_paths = self._svg.findall(".//svg:g[@pcbmode:type='component-shapes']//svg:path", namespaces=self._ns) drills_dict = {} for drill_path in drill_paths: diameter = drill_path.get('{'+config.cfg['ns']['pcbmode']+'}diameter') location = self._getLocation(drill_path) if diameter not in drills_dict: drills_dict[diameter] = {} drills_dict[diameter]['locations'] = [] drills_dict[diameter]['locations'].append(location) self._preamble = self._createPreamble() self._content = self._createContent(drills_dict) self._postamble = self._createPostamble() def getExcellon(self): return (self._preamble+ self._content+ self._postamble) def _createContent(self, drills): """ """ ex = [] for i, diameter in enumerate(drills): # This is probably not necessary, but I'm not 100% certain # that if the item order of a dict is gurenteed. If not # the result can be quite devastating where drill # diameters are wrong! # Drill index must be greater than 0 drills[diameter]['index'] = i+1 ex.append("T%dC%s\n" % (i+1, diameter)) ex.append('M95\n') # End of a part program header for diameter in drills: ex.append("T%s\n" % drills[diameter]['index']) for coord in drills[diameter]['locations']: ex.append(self._getPoint(coord)) return ex def _createPreamble(self): """ """ ex = [] ex.append('M48\n') # Beginning of a part program header ex.append('METRIC,TZ\n') # Metric, trailing zeros ex.append('G90\n') # Absolute mode ex.append('M71\n') # Metric measuring mode return ex def _createPostamble(self): """ """ ex = [] ex.append('M30\n') # End of Program, rewind return ex def _getLocation(self, path): """ Returns the location of a path, factoring in all the transforms of its ancestors, and its own transform """ location = Point() # We need to get the transforms of all ancestors that have # one in order to get the location correctly ancestors = path.xpath("ancestor::*[@transform]") for ancestor in ancestors: transform = ancestor.get('transform') transform_data = utils.parseTransform(transform) # Add them up location += transform_data['location'] # Add the transform of the path itself transform = path.get('transform') if transform != None: transform_data = utils.parseTransform(transform) location += transform_data['location'] return location def _getPoint(self, point): """ Converts a Point type into an Excellon coordinate """ return "X%.6fY%.6f\n" % (point.x, -point.y) <|fim▁end|>
""" """ ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Open the board's SVG svg_in = utils.openBoardSVG() drills_layer = svg_in.find("//svg:g[@pcbmode:sheet='drills']", namespaces=ns) excellon = Excellon(drills_layer) # Save to file base_dir = os.path.join(config.cfg['base-dir'], config.cfg['locations']['build'], 'production') base_name = "%s_rev_%s" % (config.brd['config']['name'], config.brd['config']['rev']) filename_info = config.cfg['manufacturers'][manufacturer]['filenames']['drills'] add = '_%s.%s' % ('drills', filename_info['plated'].get('ext') or 'txt') filename = os.path.join(base_dir, base_name + add) with open(filename, "wb") as f: for line in excellon.getExcellon(): f.write(line)
<|file_name|>excellon.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python import os import re from lxml import etree as et import pcbmode.config as config from . import messages as msg # pcbmode modules from . import utils from .point import Point def makeExcellon(manufacturer='default'): """ """ ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Open the board's SVG svg_in = utils.openBoardSVG() drills_layer = svg_in.find("//svg:g[@pcbmode:sheet='drills']", namespaces=ns) excellon = Excellon(drills_layer) # Save to file base_dir = os.path.join(config.cfg['base-dir'], config.cfg['locations']['build'], 'production') base_name = "%s_rev_%s" % (config.brd['config']['name'], config.brd['config']['rev']) filename_info = config.cfg['manufacturers'][manufacturer]['filenames']['drills'] add = '_%s.%s' % ('drills', filename_info['plated'].get('ext') or 'txt') filename = os.path.join(base_dir, base_name + add) with open(filename, "wb") as f: for line in excellon.getExcellon(): f.write(line) class Excellon(): <|fim_middle|> <|fim▁end|>
""" """ def __init__(self, svg): """ """ self._svg = svg self._ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Get all drill paths except for the ones used in the # drill-index drill_paths = self._svg.findall(".//svg:g[@pcbmode:type='component-shapes']//svg:path", namespaces=self._ns) drills_dict = {} for drill_path in drill_paths: diameter = drill_path.get('{'+config.cfg['ns']['pcbmode']+'}diameter') location = self._getLocation(drill_path) if diameter not in drills_dict: drills_dict[diameter] = {} drills_dict[diameter]['locations'] = [] drills_dict[diameter]['locations'].append(location) self._preamble = self._createPreamble() self._content = self._createContent(drills_dict) self._postamble = self._createPostamble() def getExcellon(self): return (self._preamble+ self._content+ self._postamble) def _createContent(self, drills): """ """ ex = [] for i, diameter in enumerate(drills): # This is probably not necessary, but I'm not 100% certain # that if the item order of a dict is gurenteed. If not # the result can be quite devastating where drill # diameters are wrong! # Drill index must be greater than 0 drills[diameter]['index'] = i+1 ex.append("T%dC%s\n" % (i+1, diameter)) ex.append('M95\n') # End of a part program header for diameter in drills: ex.append("T%s\n" % drills[diameter]['index']) for coord in drills[diameter]['locations']: ex.append(self._getPoint(coord)) return ex def _createPreamble(self): """ """ ex = [] ex.append('M48\n') # Beginning of a part program header ex.append('METRIC,TZ\n') # Metric, trailing zeros ex.append('G90\n') # Absolute mode ex.append('M71\n') # Metric measuring mode return ex def _createPostamble(self): """ """ ex = [] ex.append('M30\n') # End of Program, rewind return ex def _getLocation(self, path): """ Returns the location of a path, factoring in all the transforms of its ancestors, and its own transform """ location = Point() # We need to get the transforms of all ancestors that have # one in order to get the location correctly ancestors = path.xpath("ancestor::*[@transform]") for ancestor in ancestors: transform = ancestor.get('transform') transform_data = utils.parseTransform(transform) # Add them up location += transform_data['location'] # Add the transform of the path itself transform = path.get('transform') if transform != None: transform_data = utils.parseTransform(transform) location += transform_data['location'] return location def _getPoint(self, point): """ Converts a Point type into an Excellon coordinate """ return "X%.6fY%.6f\n" % (point.x, -point.y)
<|file_name|>excellon.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python import os import re from lxml import etree as et import pcbmode.config as config from . import messages as msg # pcbmode modules from . import utils from .point import Point def makeExcellon(manufacturer='default'): """ """ ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Open the board's SVG svg_in = utils.openBoardSVG() drills_layer = svg_in.find("//svg:g[@pcbmode:sheet='drills']", namespaces=ns) excellon = Excellon(drills_layer) # Save to file base_dir = os.path.join(config.cfg['base-dir'], config.cfg['locations']['build'], 'production') base_name = "%s_rev_%s" % (config.brd['config']['name'], config.brd['config']['rev']) filename_info = config.cfg['manufacturers'][manufacturer]['filenames']['drills'] add = '_%s.%s' % ('drills', filename_info['plated'].get('ext') or 'txt') filename = os.path.join(base_dir, base_name + add) with open(filename, "wb") as f: for line in excellon.getExcellon(): f.write(line) class Excellon(): """ """ def __init__(self, svg): <|fim_middle|> def getExcellon(self): return (self._preamble+ self._content+ self._postamble) def _createContent(self, drills): """ """ ex = [] for i, diameter in enumerate(drills): # This is probably not necessary, but I'm not 100% certain # that if the item order of a dict is gurenteed. If not # the result can be quite devastating where drill # diameters are wrong! # Drill index must be greater than 0 drills[diameter]['index'] = i+1 ex.append("T%dC%s\n" % (i+1, diameter)) ex.append('M95\n') # End of a part program header for diameter in drills: ex.append("T%s\n" % drills[diameter]['index']) for coord in drills[diameter]['locations']: ex.append(self._getPoint(coord)) return ex def _createPreamble(self): """ """ ex = [] ex.append('M48\n') # Beginning of a part program header ex.append('METRIC,TZ\n') # Metric, trailing zeros ex.append('G90\n') # Absolute mode ex.append('M71\n') # Metric measuring mode return ex def _createPostamble(self): """ """ ex = [] ex.append('M30\n') # End of Program, rewind return ex def _getLocation(self, path): """ Returns the location of a path, factoring in all the transforms of its ancestors, and its own transform """ location = Point() # We need to get the transforms of all ancestors that have # one in order to get the location correctly ancestors = path.xpath("ancestor::*[@transform]") for ancestor in ancestors: transform = ancestor.get('transform') transform_data = utils.parseTransform(transform) # Add them up location += transform_data['location'] # Add the transform of the path itself transform = path.get('transform') if transform != None: transform_data = utils.parseTransform(transform) location += transform_data['location'] return location def _getPoint(self, point): """ Converts a Point type into an Excellon coordinate """ return "X%.6fY%.6f\n" % (point.x, -point.y) <|fim▁end|>
""" """ self._svg = svg self._ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Get all drill paths except for the ones used in the # drill-index drill_paths = self._svg.findall(".//svg:g[@pcbmode:type='component-shapes']//svg:path", namespaces=self._ns) drills_dict = {} for drill_path in drill_paths: diameter = drill_path.get('{'+config.cfg['ns']['pcbmode']+'}diameter') location = self._getLocation(drill_path) if diameter not in drills_dict: drills_dict[diameter] = {} drills_dict[diameter]['locations'] = [] drills_dict[diameter]['locations'].append(location) self._preamble = self._createPreamble() self._content = self._createContent(drills_dict) self._postamble = self._createPostamble()
<|file_name|>excellon.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python import os import re from lxml import etree as et import pcbmode.config as config from . import messages as msg # pcbmode modules from . import utils from .point import Point def makeExcellon(manufacturer='default'): """ """ ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Open the board's SVG svg_in = utils.openBoardSVG() drills_layer = svg_in.find("//svg:g[@pcbmode:sheet='drills']", namespaces=ns) excellon = Excellon(drills_layer) # Save to file base_dir = os.path.join(config.cfg['base-dir'], config.cfg['locations']['build'], 'production') base_name = "%s_rev_%s" % (config.brd['config']['name'], config.brd['config']['rev']) filename_info = config.cfg['manufacturers'][manufacturer]['filenames']['drills'] add = '_%s.%s' % ('drills', filename_info['plated'].get('ext') or 'txt') filename = os.path.join(base_dir, base_name + add) with open(filename, "wb") as f: for line in excellon.getExcellon(): f.write(line) class Excellon(): """ """ def __init__(self, svg): """ """ self._svg = svg self._ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Get all drill paths except for the ones used in the # drill-index drill_paths = self._svg.findall(".//svg:g[@pcbmode:type='component-shapes']//svg:path", namespaces=self._ns) drills_dict = {} for drill_path in drill_paths: diameter = drill_path.get('{'+config.cfg['ns']['pcbmode']+'}diameter') location = self._getLocation(drill_path) if diameter not in drills_dict: drills_dict[diameter] = {} drills_dict[diameter]['locations'] = [] drills_dict[diameter]['locations'].append(location) self._preamble = self._createPreamble() self._content = self._createContent(drills_dict) self._postamble = self._createPostamble() def getExcellon(self): <|fim_middle|> def _createContent(self, drills): """ """ ex = [] for i, diameter in enumerate(drills): # This is probably not necessary, but I'm not 100% certain # that if the item order of a dict is gurenteed. If not # the result can be quite devastating where drill # diameters are wrong! # Drill index must be greater than 0 drills[diameter]['index'] = i+1 ex.append("T%dC%s\n" % (i+1, diameter)) ex.append('M95\n') # End of a part program header for diameter in drills: ex.append("T%s\n" % drills[diameter]['index']) for coord in drills[diameter]['locations']: ex.append(self._getPoint(coord)) return ex def _createPreamble(self): """ """ ex = [] ex.append('M48\n') # Beginning of a part program header ex.append('METRIC,TZ\n') # Metric, trailing zeros ex.append('G90\n') # Absolute mode ex.append('M71\n') # Metric measuring mode return ex def _createPostamble(self): """ """ ex = [] ex.append('M30\n') # End of Program, rewind return ex def _getLocation(self, path): """ Returns the location of a path, factoring in all the transforms of its ancestors, and its own transform """ location = Point() # We need to get the transforms of all ancestors that have # one in order to get the location correctly ancestors = path.xpath("ancestor::*[@transform]") for ancestor in ancestors: transform = ancestor.get('transform') transform_data = utils.parseTransform(transform) # Add them up location += transform_data['location'] # Add the transform of the path itself transform = path.get('transform') if transform != None: transform_data = utils.parseTransform(transform) location += transform_data['location'] return location def _getPoint(self, point): """ Converts a Point type into an Excellon coordinate """ return "X%.6fY%.6f\n" % (point.x, -point.y) <|fim▁end|>
return (self._preamble+ self._content+ self._postamble)
<|file_name|>excellon.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python import os import re from lxml import etree as et import pcbmode.config as config from . import messages as msg # pcbmode modules from . import utils from .point import Point def makeExcellon(manufacturer='default'): """ """ ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Open the board's SVG svg_in = utils.openBoardSVG() drills_layer = svg_in.find("//svg:g[@pcbmode:sheet='drills']", namespaces=ns) excellon = Excellon(drills_layer) # Save to file base_dir = os.path.join(config.cfg['base-dir'], config.cfg['locations']['build'], 'production') base_name = "%s_rev_%s" % (config.brd['config']['name'], config.brd['config']['rev']) filename_info = config.cfg['manufacturers'][manufacturer]['filenames']['drills'] add = '_%s.%s' % ('drills', filename_info['plated'].get('ext') or 'txt') filename = os.path.join(base_dir, base_name + add) with open(filename, "wb") as f: for line in excellon.getExcellon(): f.write(line) class Excellon(): """ """ def __init__(self, svg): """ """ self._svg = svg self._ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Get all drill paths except for the ones used in the # drill-index drill_paths = self._svg.findall(".//svg:g[@pcbmode:type='component-shapes']//svg:path", namespaces=self._ns) drills_dict = {} for drill_path in drill_paths: diameter = drill_path.get('{'+config.cfg['ns']['pcbmode']+'}diameter') location = self._getLocation(drill_path) if diameter not in drills_dict: drills_dict[diameter] = {} drills_dict[diameter]['locations'] = [] drills_dict[diameter]['locations'].append(location) self._preamble = self._createPreamble() self._content = self._createContent(drills_dict) self._postamble = self._createPostamble() def getExcellon(self): return (self._preamble+ self._content+ self._postamble) def _createContent(self, drills): <|fim_middle|> def _createPreamble(self): """ """ ex = [] ex.append('M48\n') # Beginning of a part program header ex.append('METRIC,TZ\n') # Metric, trailing zeros ex.append('G90\n') # Absolute mode ex.append('M71\n') # Metric measuring mode return ex def _createPostamble(self): """ """ ex = [] ex.append('M30\n') # End of Program, rewind return ex def _getLocation(self, path): """ Returns the location of a path, factoring in all the transforms of its ancestors, and its own transform """ location = Point() # We need to get the transforms of all ancestors that have # one in order to get the location correctly ancestors = path.xpath("ancestor::*[@transform]") for ancestor in ancestors: transform = ancestor.get('transform') transform_data = utils.parseTransform(transform) # Add them up location += transform_data['location'] # Add the transform of the path itself transform = path.get('transform') if transform != None: transform_data = utils.parseTransform(transform) location += transform_data['location'] return location def _getPoint(self, point): """ Converts a Point type into an Excellon coordinate """ return "X%.6fY%.6f\n" % (point.x, -point.y) <|fim▁end|>
""" """ ex = [] for i, diameter in enumerate(drills): # This is probably not necessary, but I'm not 100% certain # that if the item order of a dict is gurenteed. If not # the result can be quite devastating where drill # diameters are wrong! # Drill index must be greater than 0 drills[diameter]['index'] = i+1 ex.append("T%dC%s\n" % (i+1, diameter)) ex.append('M95\n') # End of a part program header for diameter in drills: ex.append("T%s\n" % drills[diameter]['index']) for coord in drills[diameter]['locations']: ex.append(self._getPoint(coord)) return ex
<|file_name|>excellon.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python import os import re from lxml import etree as et import pcbmode.config as config from . import messages as msg # pcbmode modules from . import utils from .point import Point def makeExcellon(manufacturer='default'): """ """ ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Open the board's SVG svg_in = utils.openBoardSVG() drills_layer = svg_in.find("//svg:g[@pcbmode:sheet='drills']", namespaces=ns) excellon = Excellon(drills_layer) # Save to file base_dir = os.path.join(config.cfg['base-dir'], config.cfg['locations']['build'], 'production') base_name = "%s_rev_%s" % (config.brd['config']['name'], config.brd['config']['rev']) filename_info = config.cfg['manufacturers'][manufacturer]['filenames']['drills'] add = '_%s.%s' % ('drills', filename_info['plated'].get('ext') or 'txt') filename = os.path.join(base_dir, base_name + add) with open(filename, "wb") as f: for line in excellon.getExcellon(): f.write(line) class Excellon(): """ """ def __init__(self, svg): """ """ self._svg = svg self._ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Get all drill paths except for the ones used in the # drill-index drill_paths = self._svg.findall(".//svg:g[@pcbmode:type='component-shapes']//svg:path", namespaces=self._ns) drills_dict = {} for drill_path in drill_paths: diameter = drill_path.get('{'+config.cfg['ns']['pcbmode']+'}diameter') location = self._getLocation(drill_path) if diameter not in drills_dict: drills_dict[diameter] = {} drills_dict[diameter]['locations'] = [] drills_dict[diameter]['locations'].append(location) self._preamble = self._createPreamble() self._content = self._createContent(drills_dict) self._postamble = self._createPostamble() def getExcellon(self): return (self._preamble+ self._content+ self._postamble) def _createContent(self, drills): """ """ ex = [] for i, diameter in enumerate(drills): # This is probably not necessary, but I'm not 100% certain # that if the item order of a dict is gurenteed. If not # the result can be quite devastating where drill # diameters are wrong! # Drill index must be greater than 0 drills[diameter]['index'] = i+1 ex.append("T%dC%s\n" % (i+1, diameter)) ex.append('M95\n') # End of a part program header for diameter in drills: ex.append("T%s\n" % drills[diameter]['index']) for coord in drills[diameter]['locations']: ex.append(self._getPoint(coord)) return ex def _createPreamble(self): <|fim_middle|> def _createPostamble(self): """ """ ex = [] ex.append('M30\n') # End of Program, rewind return ex def _getLocation(self, path): """ Returns the location of a path, factoring in all the transforms of its ancestors, and its own transform """ location = Point() # We need to get the transforms of all ancestors that have # one in order to get the location correctly ancestors = path.xpath("ancestor::*[@transform]") for ancestor in ancestors: transform = ancestor.get('transform') transform_data = utils.parseTransform(transform) # Add them up location += transform_data['location'] # Add the transform of the path itself transform = path.get('transform') if transform != None: transform_data = utils.parseTransform(transform) location += transform_data['location'] return location def _getPoint(self, point): """ Converts a Point type into an Excellon coordinate """ return "X%.6fY%.6f\n" % (point.x, -point.y) <|fim▁end|>
""" """ ex = [] ex.append('M48\n') # Beginning of a part program header ex.append('METRIC,TZ\n') # Metric, trailing zeros ex.append('G90\n') # Absolute mode ex.append('M71\n') # Metric measuring mode return ex
<|file_name|>excellon.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python import os import re from lxml import etree as et import pcbmode.config as config from . import messages as msg # pcbmode modules from . import utils from .point import Point def makeExcellon(manufacturer='default'): """ """ ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Open the board's SVG svg_in = utils.openBoardSVG() drills_layer = svg_in.find("//svg:g[@pcbmode:sheet='drills']", namespaces=ns) excellon = Excellon(drills_layer) # Save to file base_dir = os.path.join(config.cfg['base-dir'], config.cfg['locations']['build'], 'production') base_name = "%s_rev_%s" % (config.brd['config']['name'], config.brd['config']['rev']) filename_info = config.cfg['manufacturers'][manufacturer]['filenames']['drills'] add = '_%s.%s' % ('drills', filename_info['plated'].get('ext') or 'txt') filename = os.path.join(base_dir, base_name + add) with open(filename, "wb") as f: for line in excellon.getExcellon(): f.write(line) class Excellon(): """ """ def __init__(self, svg): """ """ self._svg = svg self._ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Get all drill paths except for the ones used in the # drill-index drill_paths = self._svg.findall(".//svg:g[@pcbmode:type='component-shapes']//svg:path", namespaces=self._ns) drills_dict = {} for drill_path in drill_paths: diameter = drill_path.get('{'+config.cfg['ns']['pcbmode']+'}diameter') location = self._getLocation(drill_path) if diameter not in drills_dict: drills_dict[diameter] = {} drills_dict[diameter]['locations'] = [] drills_dict[diameter]['locations'].append(location) self._preamble = self._createPreamble() self._content = self._createContent(drills_dict) self._postamble = self._createPostamble() def getExcellon(self): return (self._preamble+ self._content+ self._postamble) def _createContent(self, drills): """ """ ex = [] for i, diameter in enumerate(drills): # This is probably not necessary, but I'm not 100% certain # that if the item order of a dict is gurenteed. If not # the result can be quite devastating where drill # diameters are wrong! # Drill index must be greater than 0 drills[diameter]['index'] = i+1 ex.append("T%dC%s\n" % (i+1, diameter)) ex.append('M95\n') # End of a part program header for diameter in drills: ex.append("T%s\n" % drills[diameter]['index']) for coord in drills[diameter]['locations']: ex.append(self._getPoint(coord)) return ex def _createPreamble(self): """ """ ex = [] ex.append('M48\n') # Beginning of a part program header ex.append('METRIC,TZ\n') # Metric, trailing zeros ex.append('G90\n') # Absolute mode ex.append('M71\n') # Metric measuring mode return ex def _createPostamble(self): <|fim_middle|> def _getLocation(self, path): """ Returns the location of a path, factoring in all the transforms of its ancestors, and its own transform """ location = Point() # We need to get the transforms of all ancestors that have # one in order to get the location correctly ancestors = path.xpath("ancestor::*[@transform]") for ancestor in ancestors: transform = ancestor.get('transform') transform_data = utils.parseTransform(transform) # Add them up location += transform_data['location'] # Add the transform of the path itself transform = path.get('transform') if transform != None: transform_data = utils.parseTransform(transform) location += transform_data['location'] return location def _getPoint(self, point): """ Converts a Point type into an Excellon coordinate """ return "X%.6fY%.6f\n" % (point.x, -point.y) <|fim▁end|>
""" """ ex = [] ex.append('M30\n') # End of Program, rewind return ex
<|file_name|>excellon.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python import os import re from lxml import etree as et import pcbmode.config as config from . import messages as msg # pcbmode modules from . import utils from .point import Point def makeExcellon(manufacturer='default'): """ """ ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Open the board's SVG svg_in = utils.openBoardSVG() drills_layer = svg_in.find("//svg:g[@pcbmode:sheet='drills']", namespaces=ns) excellon = Excellon(drills_layer) # Save to file base_dir = os.path.join(config.cfg['base-dir'], config.cfg['locations']['build'], 'production') base_name = "%s_rev_%s" % (config.brd['config']['name'], config.brd['config']['rev']) filename_info = config.cfg['manufacturers'][manufacturer]['filenames']['drills'] add = '_%s.%s' % ('drills', filename_info['plated'].get('ext') or 'txt') filename = os.path.join(base_dir, base_name + add) with open(filename, "wb") as f: for line in excellon.getExcellon(): f.write(line) class Excellon(): """ """ def __init__(self, svg): """ """ self._svg = svg self._ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Get all drill paths except for the ones used in the # drill-index drill_paths = self._svg.findall(".//svg:g[@pcbmode:type='component-shapes']//svg:path", namespaces=self._ns) drills_dict = {} for drill_path in drill_paths: diameter = drill_path.get('{'+config.cfg['ns']['pcbmode']+'}diameter') location = self._getLocation(drill_path) if diameter not in drills_dict: drills_dict[diameter] = {} drills_dict[diameter]['locations'] = [] drills_dict[diameter]['locations'].append(location) self._preamble = self._createPreamble() self._content = self._createContent(drills_dict) self._postamble = self._createPostamble() def getExcellon(self): return (self._preamble+ self._content+ self._postamble) def _createContent(self, drills): """ """ ex = [] for i, diameter in enumerate(drills): # This is probably not necessary, but I'm not 100% certain # that if the item order of a dict is gurenteed. If not # the result can be quite devastating where drill # diameters are wrong! # Drill index must be greater than 0 drills[diameter]['index'] = i+1 ex.append("T%dC%s\n" % (i+1, diameter)) ex.append('M95\n') # End of a part program header for diameter in drills: ex.append("T%s\n" % drills[diameter]['index']) for coord in drills[diameter]['locations']: ex.append(self._getPoint(coord)) return ex def _createPreamble(self): """ """ ex = [] ex.append('M48\n') # Beginning of a part program header ex.append('METRIC,TZ\n') # Metric, trailing zeros ex.append('G90\n') # Absolute mode ex.append('M71\n') # Metric measuring mode return ex def _createPostamble(self): """ """ ex = [] ex.append('M30\n') # End of Program, rewind return ex def _getLocation(self, path): <|fim_middle|> def _getPoint(self, point): """ Converts a Point type into an Excellon coordinate """ return "X%.6fY%.6f\n" % (point.x, -point.y) <|fim▁end|>
""" Returns the location of a path, factoring in all the transforms of its ancestors, and its own transform """ location = Point() # We need to get the transforms of all ancestors that have # one in order to get the location correctly ancestors = path.xpath("ancestor::*[@transform]") for ancestor in ancestors: transform = ancestor.get('transform') transform_data = utils.parseTransform(transform) # Add them up location += transform_data['location'] # Add the transform of the path itself transform = path.get('transform') if transform != None: transform_data = utils.parseTransform(transform) location += transform_data['location'] return location
<|file_name|>excellon.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python import os import re from lxml import etree as et import pcbmode.config as config from . import messages as msg # pcbmode modules from . import utils from .point import Point def makeExcellon(manufacturer='default'): """ """ ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Open the board's SVG svg_in = utils.openBoardSVG() drills_layer = svg_in.find("//svg:g[@pcbmode:sheet='drills']", namespaces=ns) excellon = Excellon(drills_layer) # Save to file base_dir = os.path.join(config.cfg['base-dir'], config.cfg['locations']['build'], 'production') base_name = "%s_rev_%s" % (config.brd['config']['name'], config.brd['config']['rev']) filename_info = config.cfg['manufacturers'][manufacturer]['filenames']['drills'] add = '_%s.%s' % ('drills', filename_info['plated'].get('ext') or 'txt') filename = os.path.join(base_dir, base_name + add) with open(filename, "wb") as f: for line in excellon.getExcellon(): f.write(line) class Excellon(): """ """ def __init__(self, svg): """ """ self._svg = svg self._ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Get all drill paths except for the ones used in the # drill-index drill_paths = self._svg.findall(".//svg:g[@pcbmode:type='component-shapes']//svg:path", namespaces=self._ns) drills_dict = {} for drill_path in drill_paths: diameter = drill_path.get('{'+config.cfg['ns']['pcbmode']+'}diameter') location = self._getLocation(drill_path) if diameter not in drills_dict: drills_dict[diameter] = {} drills_dict[diameter]['locations'] = [] drills_dict[diameter]['locations'].append(location) self._preamble = self._createPreamble() self._content = self._createContent(drills_dict) self._postamble = self._createPostamble() def getExcellon(self): return (self._preamble+ self._content+ self._postamble) def _createContent(self, drills): """ """ ex = [] for i, diameter in enumerate(drills): # This is probably not necessary, but I'm not 100% certain # that if the item order of a dict is gurenteed. If not # the result can be quite devastating where drill # diameters are wrong! # Drill index must be greater than 0 drills[diameter]['index'] = i+1 ex.append("T%dC%s\n" % (i+1, diameter)) ex.append('M95\n') # End of a part program header for diameter in drills: ex.append("T%s\n" % drills[diameter]['index']) for coord in drills[diameter]['locations']: ex.append(self._getPoint(coord)) return ex def _createPreamble(self): """ """ ex = [] ex.append('M48\n') # Beginning of a part program header ex.append('METRIC,TZ\n') # Metric, trailing zeros ex.append('G90\n') # Absolute mode ex.append('M71\n') # Metric measuring mode return ex def _createPostamble(self): """ """ ex = [] ex.append('M30\n') # End of Program, rewind return ex def _getLocation(self, path): """ Returns the location of a path, factoring in all the transforms of its ancestors, and its own transform """ location = Point() # We need to get the transforms of all ancestors that have # one in order to get the location correctly ancestors = path.xpath("ancestor::*[@transform]") for ancestor in ancestors: transform = ancestor.get('transform') transform_data = utils.parseTransform(transform) # Add them up location += transform_data['location'] # Add the transform of the path itself transform = path.get('transform') if transform != None: transform_data = utils.parseTransform(transform) location += transform_data['location'] return location def _getPoint(self, point): <|fim_middle|> <|fim▁end|>
""" Converts a Point type into an Excellon coordinate """ return "X%.6fY%.6f\n" % (point.x, -point.y)
<|file_name|>excellon.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python import os import re from lxml import etree as et import pcbmode.config as config from . import messages as msg # pcbmode modules from . import utils from .point import Point def makeExcellon(manufacturer='default'): """ """ ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Open the board's SVG svg_in = utils.openBoardSVG() drills_layer = svg_in.find("//svg:g[@pcbmode:sheet='drills']", namespaces=ns) excellon = Excellon(drills_layer) # Save to file base_dir = os.path.join(config.cfg['base-dir'], config.cfg['locations']['build'], 'production') base_name = "%s_rev_%s" % (config.brd['config']['name'], config.brd['config']['rev']) filename_info = config.cfg['manufacturers'][manufacturer]['filenames']['drills'] add = '_%s.%s' % ('drills', filename_info['plated'].get('ext') or 'txt') filename = os.path.join(base_dir, base_name + add) with open(filename, "wb") as f: for line in excellon.getExcellon(): f.write(line) class Excellon(): """ """ def __init__(self, svg): """ """ self._svg = svg self._ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Get all drill paths except for the ones used in the # drill-index drill_paths = self._svg.findall(".//svg:g[@pcbmode:type='component-shapes']//svg:path", namespaces=self._ns) drills_dict = {} for drill_path in drill_paths: diameter = drill_path.get('{'+config.cfg['ns']['pcbmode']+'}diameter') location = self._getLocation(drill_path) if diameter not in drills_dict: <|fim_middle|> drills_dict[diameter]['locations'].append(location) self._preamble = self._createPreamble() self._content = self._createContent(drills_dict) self._postamble = self._createPostamble() def getExcellon(self): return (self._preamble+ self._content+ self._postamble) def _createContent(self, drills): """ """ ex = [] for i, diameter in enumerate(drills): # This is probably not necessary, but I'm not 100% certain # that if the item order of a dict is gurenteed. If not # the result can be quite devastating where drill # diameters are wrong! # Drill index must be greater than 0 drills[diameter]['index'] = i+1 ex.append("T%dC%s\n" % (i+1, diameter)) ex.append('M95\n') # End of a part program header for diameter in drills: ex.append("T%s\n" % drills[diameter]['index']) for coord in drills[diameter]['locations']: ex.append(self._getPoint(coord)) return ex def _createPreamble(self): """ """ ex = [] ex.append('M48\n') # Beginning of a part program header ex.append('METRIC,TZ\n') # Metric, trailing zeros ex.append('G90\n') # Absolute mode ex.append('M71\n') # Metric measuring mode return ex def _createPostamble(self): """ """ ex = [] ex.append('M30\n') # End of Program, rewind return ex def _getLocation(self, path): """ Returns the location of a path, factoring in all the transforms of its ancestors, and its own transform """ location = Point() # We need to get the transforms of all ancestors that have # one in order to get the location correctly ancestors = path.xpath("ancestor::*[@transform]") for ancestor in ancestors: transform = ancestor.get('transform') transform_data = utils.parseTransform(transform) # Add them up location += transform_data['location'] # Add the transform of the path itself transform = path.get('transform') if transform != None: transform_data = utils.parseTransform(transform) location += transform_data['location'] return location def _getPoint(self, point): """ Converts a Point type into an Excellon coordinate """ return "X%.6fY%.6f\n" % (point.x, -point.y) <|fim▁end|>
drills_dict[diameter] = {} drills_dict[diameter]['locations'] = []
<|file_name|>excellon.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python import os import re from lxml import etree as et import pcbmode.config as config from . import messages as msg # pcbmode modules from . import utils from .point import Point def makeExcellon(manufacturer='default'): """ """ ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Open the board's SVG svg_in = utils.openBoardSVG() drills_layer = svg_in.find("//svg:g[@pcbmode:sheet='drills']", namespaces=ns) excellon = Excellon(drills_layer) # Save to file base_dir = os.path.join(config.cfg['base-dir'], config.cfg['locations']['build'], 'production') base_name = "%s_rev_%s" % (config.brd['config']['name'], config.brd['config']['rev']) filename_info = config.cfg['manufacturers'][manufacturer]['filenames']['drills'] add = '_%s.%s' % ('drills', filename_info['plated'].get('ext') or 'txt') filename = os.path.join(base_dir, base_name + add) with open(filename, "wb") as f: for line in excellon.getExcellon(): f.write(line) class Excellon(): """ """ def __init__(self, svg): """ """ self._svg = svg self._ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Get all drill paths except for the ones used in the # drill-index drill_paths = self._svg.findall(".//svg:g[@pcbmode:type='component-shapes']//svg:path", namespaces=self._ns) drills_dict = {} for drill_path in drill_paths: diameter = drill_path.get('{'+config.cfg['ns']['pcbmode']+'}diameter') location = self._getLocation(drill_path) if diameter not in drills_dict: drills_dict[diameter] = {} drills_dict[diameter]['locations'] = [] drills_dict[diameter]['locations'].append(location) self._preamble = self._createPreamble() self._content = self._createContent(drills_dict) self._postamble = self._createPostamble() def getExcellon(self): return (self._preamble+ self._content+ self._postamble) def _createContent(self, drills): """ """ ex = [] for i, diameter in enumerate(drills): # This is probably not necessary, but I'm not 100% certain # that if the item order of a dict is gurenteed. If not # the result can be quite devastating where drill # diameters are wrong! # Drill index must be greater than 0 drills[diameter]['index'] = i+1 ex.append("T%dC%s\n" % (i+1, diameter)) ex.append('M95\n') # End of a part program header for diameter in drills: ex.append("T%s\n" % drills[diameter]['index']) for coord in drills[diameter]['locations']: ex.append(self._getPoint(coord)) return ex def _createPreamble(self): """ """ ex = [] ex.append('M48\n') # Beginning of a part program header ex.append('METRIC,TZ\n') # Metric, trailing zeros ex.append('G90\n') # Absolute mode ex.append('M71\n') # Metric measuring mode return ex def _createPostamble(self): """ """ ex = [] ex.append('M30\n') # End of Program, rewind return ex def _getLocation(self, path): """ Returns the location of a path, factoring in all the transforms of its ancestors, and its own transform """ location = Point() # We need to get the transforms of all ancestors that have # one in order to get the location correctly ancestors = path.xpath("ancestor::*[@transform]") for ancestor in ancestors: transform = ancestor.get('transform') transform_data = utils.parseTransform(transform) # Add them up location += transform_data['location'] # Add the transform of the path itself transform = path.get('transform') if transform != None: <|fim_middle|> return location def _getPoint(self, point): """ Converts a Point type into an Excellon coordinate """ return "X%.6fY%.6f\n" % (point.x, -point.y) <|fim▁end|>
transform_data = utils.parseTransform(transform) location += transform_data['location']
<|file_name|>excellon.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python import os import re from lxml import etree as et import pcbmode.config as config from . import messages as msg # pcbmode modules from . import utils from .point import Point def <|fim_middle|>(manufacturer='default'): """ """ ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Open the board's SVG svg_in = utils.openBoardSVG() drills_layer = svg_in.find("//svg:g[@pcbmode:sheet='drills']", namespaces=ns) excellon = Excellon(drills_layer) # Save to file base_dir = os.path.join(config.cfg['base-dir'], config.cfg['locations']['build'], 'production') base_name = "%s_rev_%s" % (config.brd['config']['name'], config.brd['config']['rev']) filename_info = config.cfg['manufacturers'][manufacturer]['filenames']['drills'] add = '_%s.%s' % ('drills', filename_info['plated'].get('ext') or 'txt') filename = os.path.join(base_dir, base_name + add) with open(filename, "wb") as f: for line in excellon.getExcellon(): f.write(line) class Excellon(): """ """ def __init__(self, svg): """ """ self._svg = svg self._ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Get all drill paths except for the ones used in the # drill-index drill_paths = self._svg.findall(".//svg:g[@pcbmode:type='component-shapes']//svg:path", namespaces=self._ns) drills_dict = {} for drill_path in drill_paths: diameter = drill_path.get('{'+config.cfg['ns']['pcbmode']+'}diameter') location = self._getLocation(drill_path) if diameter not in drills_dict: drills_dict[diameter] = {} drills_dict[diameter]['locations'] = [] drills_dict[diameter]['locations'].append(location) self._preamble = self._createPreamble() self._content = self._createContent(drills_dict) self._postamble = self._createPostamble() def getExcellon(self): return (self._preamble+ self._content+ self._postamble) def _createContent(self, drills): """ """ ex = [] for i, diameter in enumerate(drills): # This is probably not necessary, but I'm not 100% certain # that if the item order of a dict is gurenteed. If not # the result can be quite devastating where drill # diameters are wrong! # Drill index must be greater than 0 drills[diameter]['index'] = i+1 ex.append("T%dC%s\n" % (i+1, diameter)) ex.append('M95\n') # End of a part program header for diameter in drills: ex.append("T%s\n" % drills[diameter]['index']) for coord in drills[diameter]['locations']: ex.append(self._getPoint(coord)) return ex def _createPreamble(self): """ """ ex = [] ex.append('M48\n') # Beginning of a part program header ex.append('METRIC,TZ\n') # Metric, trailing zeros ex.append('G90\n') # Absolute mode ex.append('M71\n') # Metric measuring mode return ex def _createPostamble(self): """ """ ex = [] ex.append('M30\n') # End of Program, rewind return ex def _getLocation(self, path): """ Returns the location of a path, factoring in all the transforms of its ancestors, and its own transform """ location = Point() # We need to get the transforms of all ancestors that have # one in order to get the location correctly ancestors = path.xpath("ancestor::*[@transform]") for ancestor in ancestors: transform = ancestor.get('transform') transform_data = utils.parseTransform(transform) # Add them up location += transform_data['location'] # Add the transform of the path itself transform = path.get('transform') if transform != None: transform_data = utils.parseTransform(transform) location += transform_data['location'] return location def _getPoint(self, point): """ Converts a Point type into an Excellon coordinate """ return "X%.6fY%.6f\n" % (point.x, -point.y) <|fim▁end|>
makeExcellon
<|file_name|>excellon.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python import os import re from lxml import etree as et import pcbmode.config as config from . import messages as msg # pcbmode modules from . import utils from .point import Point def makeExcellon(manufacturer='default'): """ """ ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Open the board's SVG svg_in = utils.openBoardSVG() drills_layer = svg_in.find("//svg:g[@pcbmode:sheet='drills']", namespaces=ns) excellon = Excellon(drills_layer) # Save to file base_dir = os.path.join(config.cfg['base-dir'], config.cfg['locations']['build'], 'production') base_name = "%s_rev_%s" % (config.brd['config']['name'], config.brd['config']['rev']) filename_info = config.cfg['manufacturers'][manufacturer]['filenames']['drills'] add = '_%s.%s' % ('drills', filename_info['plated'].get('ext') or 'txt') filename = os.path.join(base_dir, base_name + add) with open(filename, "wb") as f: for line in excellon.getExcellon(): f.write(line) class Excellon(): """ """ def <|fim_middle|>(self, svg): """ """ self._svg = svg self._ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Get all drill paths except for the ones used in the # drill-index drill_paths = self._svg.findall(".//svg:g[@pcbmode:type='component-shapes']//svg:path", namespaces=self._ns) drills_dict = {} for drill_path in drill_paths: diameter = drill_path.get('{'+config.cfg['ns']['pcbmode']+'}diameter') location = self._getLocation(drill_path) if diameter not in drills_dict: drills_dict[diameter] = {} drills_dict[diameter]['locations'] = [] drills_dict[diameter]['locations'].append(location) self._preamble = self._createPreamble() self._content = self._createContent(drills_dict) self._postamble = self._createPostamble() def getExcellon(self): return (self._preamble+ self._content+ self._postamble) def _createContent(self, drills): """ """ ex = [] for i, diameter in enumerate(drills): # This is probably not necessary, but I'm not 100% certain # that if the item order of a dict is gurenteed. If not # the result can be quite devastating where drill # diameters are wrong! # Drill index must be greater than 0 drills[diameter]['index'] = i+1 ex.append("T%dC%s\n" % (i+1, diameter)) ex.append('M95\n') # End of a part program header for diameter in drills: ex.append("T%s\n" % drills[diameter]['index']) for coord in drills[diameter]['locations']: ex.append(self._getPoint(coord)) return ex def _createPreamble(self): """ """ ex = [] ex.append('M48\n') # Beginning of a part program header ex.append('METRIC,TZ\n') # Metric, trailing zeros ex.append('G90\n') # Absolute mode ex.append('M71\n') # Metric measuring mode return ex def _createPostamble(self): """ """ ex = [] ex.append('M30\n') # End of Program, rewind return ex def _getLocation(self, path): """ Returns the location of a path, factoring in all the transforms of its ancestors, and its own transform """ location = Point() # We need to get the transforms of all ancestors that have # one in order to get the location correctly ancestors = path.xpath("ancestor::*[@transform]") for ancestor in ancestors: transform = ancestor.get('transform') transform_data = utils.parseTransform(transform) # Add them up location += transform_data['location'] # Add the transform of the path itself transform = path.get('transform') if transform != None: transform_data = utils.parseTransform(transform) location += transform_data['location'] return location def _getPoint(self, point): """ Converts a Point type into an Excellon coordinate """ return "X%.6fY%.6f\n" % (point.x, -point.y) <|fim▁end|>
__init__
<|file_name|>excellon.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python import os import re from lxml import etree as et import pcbmode.config as config from . import messages as msg # pcbmode modules from . import utils from .point import Point def makeExcellon(manufacturer='default'): """ """ ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Open the board's SVG svg_in = utils.openBoardSVG() drills_layer = svg_in.find("//svg:g[@pcbmode:sheet='drills']", namespaces=ns) excellon = Excellon(drills_layer) # Save to file base_dir = os.path.join(config.cfg['base-dir'], config.cfg['locations']['build'], 'production') base_name = "%s_rev_%s" % (config.brd['config']['name'], config.brd['config']['rev']) filename_info = config.cfg['manufacturers'][manufacturer]['filenames']['drills'] add = '_%s.%s' % ('drills', filename_info['plated'].get('ext') or 'txt') filename = os.path.join(base_dir, base_name + add) with open(filename, "wb") as f: for line in excellon.getExcellon(): f.write(line) class Excellon(): """ """ def __init__(self, svg): """ """ self._svg = svg self._ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Get all drill paths except for the ones used in the # drill-index drill_paths = self._svg.findall(".//svg:g[@pcbmode:type='component-shapes']//svg:path", namespaces=self._ns) drills_dict = {} for drill_path in drill_paths: diameter = drill_path.get('{'+config.cfg['ns']['pcbmode']+'}diameter') location = self._getLocation(drill_path) if diameter not in drills_dict: drills_dict[diameter] = {} drills_dict[diameter]['locations'] = [] drills_dict[diameter]['locations'].append(location) self._preamble = self._createPreamble() self._content = self._createContent(drills_dict) self._postamble = self._createPostamble() def <|fim_middle|>(self): return (self._preamble+ self._content+ self._postamble) def _createContent(self, drills): """ """ ex = [] for i, diameter in enumerate(drills): # This is probably not necessary, but I'm not 100% certain # that if the item order of a dict is gurenteed. If not # the result can be quite devastating where drill # diameters are wrong! # Drill index must be greater than 0 drills[diameter]['index'] = i+1 ex.append("T%dC%s\n" % (i+1, diameter)) ex.append('M95\n') # End of a part program header for diameter in drills: ex.append("T%s\n" % drills[diameter]['index']) for coord in drills[diameter]['locations']: ex.append(self._getPoint(coord)) return ex def _createPreamble(self): """ """ ex = [] ex.append('M48\n') # Beginning of a part program header ex.append('METRIC,TZ\n') # Metric, trailing zeros ex.append('G90\n') # Absolute mode ex.append('M71\n') # Metric measuring mode return ex def _createPostamble(self): """ """ ex = [] ex.append('M30\n') # End of Program, rewind return ex def _getLocation(self, path): """ Returns the location of a path, factoring in all the transforms of its ancestors, and its own transform """ location = Point() # We need to get the transforms of all ancestors that have # one in order to get the location correctly ancestors = path.xpath("ancestor::*[@transform]") for ancestor in ancestors: transform = ancestor.get('transform') transform_data = utils.parseTransform(transform) # Add them up location += transform_data['location'] # Add the transform of the path itself transform = path.get('transform') if transform != None: transform_data = utils.parseTransform(transform) location += transform_data['location'] return location def _getPoint(self, point): """ Converts a Point type into an Excellon coordinate """ return "X%.6fY%.6f\n" % (point.x, -point.y) <|fim▁end|>
getExcellon
<|file_name|>excellon.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python import os import re from lxml import etree as et import pcbmode.config as config from . import messages as msg # pcbmode modules from . import utils from .point import Point def makeExcellon(manufacturer='default'): """ """ ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Open the board's SVG svg_in = utils.openBoardSVG() drills_layer = svg_in.find("//svg:g[@pcbmode:sheet='drills']", namespaces=ns) excellon = Excellon(drills_layer) # Save to file base_dir = os.path.join(config.cfg['base-dir'], config.cfg['locations']['build'], 'production') base_name = "%s_rev_%s" % (config.brd['config']['name'], config.brd['config']['rev']) filename_info = config.cfg['manufacturers'][manufacturer]['filenames']['drills'] add = '_%s.%s' % ('drills', filename_info['plated'].get('ext') or 'txt') filename = os.path.join(base_dir, base_name + add) with open(filename, "wb") as f: for line in excellon.getExcellon(): f.write(line) class Excellon(): """ """ def __init__(self, svg): """ """ self._svg = svg self._ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Get all drill paths except for the ones used in the # drill-index drill_paths = self._svg.findall(".//svg:g[@pcbmode:type='component-shapes']//svg:path", namespaces=self._ns) drills_dict = {} for drill_path in drill_paths: diameter = drill_path.get('{'+config.cfg['ns']['pcbmode']+'}diameter') location = self._getLocation(drill_path) if diameter not in drills_dict: drills_dict[diameter] = {} drills_dict[diameter]['locations'] = [] drills_dict[diameter]['locations'].append(location) self._preamble = self._createPreamble() self._content = self._createContent(drills_dict) self._postamble = self._createPostamble() def getExcellon(self): return (self._preamble+ self._content+ self._postamble) def <|fim_middle|>(self, drills): """ """ ex = [] for i, diameter in enumerate(drills): # This is probably not necessary, but I'm not 100% certain # that if the item order of a dict is gurenteed. If not # the result can be quite devastating where drill # diameters are wrong! # Drill index must be greater than 0 drills[diameter]['index'] = i+1 ex.append("T%dC%s\n" % (i+1, diameter)) ex.append('M95\n') # End of a part program header for diameter in drills: ex.append("T%s\n" % drills[diameter]['index']) for coord in drills[diameter]['locations']: ex.append(self._getPoint(coord)) return ex def _createPreamble(self): """ """ ex = [] ex.append('M48\n') # Beginning of a part program header ex.append('METRIC,TZ\n') # Metric, trailing zeros ex.append('G90\n') # Absolute mode ex.append('M71\n') # Metric measuring mode return ex def _createPostamble(self): """ """ ex = [] ex.append('M30\n') # End of Program, rewind return ex def _getLocation(self, path): """ Returns the location of a path, factoring in all the transforms of its ancestors, and its own transform """ location = Point() # We need to get the transforms of all ancestors that have # one in order to get the location correctly ancestors = path.xpath("ancestor::*[@transform]") for ancestor in ancestors: transform = ancestor.get('transform') transform_data = utils.parseTransform(transform) # Add them up location += transform_data['location'] # Add the transform of the path itself transform = path.get('transform') if transform != None: transform_data = utils.parseTransform(transform) location += transform_data['location'] return location def _getPoint(self, point): """ Converts a Point type into an Excellon coordinate """ return "X%.6fY%.6f\n" % (point.x, -point.y) <|fim▁end|>
_createContent
<|file_name|>excellon.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python import os import re from lxml import etree as et import pcbmode.config as config from . import messages as msg # pcbmode modules from . import utils from .point import Point def makeExcellon(manufacturer='default'): """ """ ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Open the board's SVG svg_in = utils.openBoardSVG() drills_layer = svg_in.find("//svg:g[@pcbmode:sheet='drills']", namespaces=ns) excellon = Excellon(drills_layer) # Save to file base_dir = os.path.join(config.cfg['base-dir'], config.cfg['locations']['build'], 'production') base_name = "%s_rev_%s" % (config.brd['config']['name'], config.brd['config']['rev']) filename_info = config.cfg['manufacturers'][manufacturer]['filenames']['drills'] add = '_%s.%s' % ('drills', filename_info['plated'].get('ext') or 'txt') filename = os.path.join(base_dir, base_name + add) with open(filename, "wb") as f: for line in excellon.getExcellon(): f.write(line) class Excellon(): """ """ def __init__(self, svg): """ """ self._svg = svg self._ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Get all drill paths except for the ones used in the # drill-index drill_paths = self._svg.findall(".//svg:g[@pcbmode:type='component-shapes']//svg:path", namespaces=self._ns) drills_dict = {} for drill_path in drill_paths: diameter = drill_path.get('{'+config.cfg['ns']['pcbmode']+'}diameter') location = self._getLocation(drill_path) if diameter not in drills_dict: drills_dict[diameter] = {} drills_dict[diameter]['locations'] = [] drills_dict[diameter]['locations'].append(location) self._preamble = self._createPreamble() self._content = self._createContent(drills_dict) self._postamble = self._createPostamble() def getExcellon(self): return (self._preamble+ self._content+ self._postamble) def _createContent(self, drills): """ """ ex = [] for i, diameter in enumerate(drills): # This is probably not necessary, but I'm not 100% certain # that if the item order of a dict is gurenteed. If not # the result can be quite devastating where drill # diameters are wrong! # Drill index must be greater than 0 drills[diameter]['index'] = i+1 ex.append("T%dC%s\n" % (i+1, diameter)) ex.append('M95\n') # End of a part program header for diameter in drills: ex.append("T%s\n" % drills[diameter]['index']) for coord in drills[diameter]['locations']: ex.append(self._getPoint(coord)) return ex def <|fim_middle|>(self): """ """ ex = [] ex.append('M48\n') # Beginning of a part program header ex.append('METRIC,TZ\n') # Metric, trailing zeros ex.append('G90\n') # Absolute mode ex.append('M71\n') # Metric measuring mode return ex def _createPostamble(self): """ """ ex = [] ex.append('M30\n') # End of Program, rewind return ex def _getLocation(self, path): """ Returns the location of a path, factoring in all the transforms of its ancestors, and its own transform """ location = Point() # We need to get the transforms of all ancestors that have # one in order to get the location correctly ancestors = path.xpath("ancestor::*[@transform]") for ancestor in ancestors: transform = ancestor.get('transform') transform_data = utils.parseTransform(transform) # Add them up location += transform_data['location'] # Add the transform of the path itself transform = path.get('transform') if transform != None: transform_data = utils.parseTransform(transform) location += transform_data['location'] return location def _getPoint(self, point): """ Converts a Point type into an Excellon coordinate """ return "X%.6fY%.6f\n" % (point.x, -point.y) <|fim▁end|>
_createPreamble
<|file_name|>excellon.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python import os import re from lxml import etree as et import pcbmode.config as config from . import messages as msg # pcbmode modules from . import utils from .point import Point def makeExcellon(manufacturer='default'): """ """ ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Open the board's SVG svg_in = utils.openBoardSVG() drills_layer = svg_in.find("//svg:g[@pcbmode:sheet='drills']", namespaces=ns) excellon = Excellon(drills_layer) # Save to file base_dir = os.path.join(config.cfg['base-dir'], config.cfg['locations']['build'], 'production') base_name = "%s_rev_%s" % (config.brd['config']['name'], config.brd['config']['rev']) filename_info = config.cfg['manufacturers'][manufacturer]['filenames']['drills'] add = '_%s.%s' % ('drills', filename_info['plated'].get('ext') or 'txt') filename = os.path.join(base_dir, base_name + add) with open(filename, "wb") as f: for line in excellon.getExcellon(): f.write(line) class Excellon(): """ """ def __init__(self, svg): """ """ self._svg = svg self._ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Get all drill paths except for the ones used in the # drill-index drill_paths = self._svg.findall(".//svg:g[@pcbmode:type='component-shapes']//svg:path", namespaces=self._ns) drills_dict = {} for drill_path in drill_paths: diameter = drill_path.get('{'+config.cfg['ns']['pcbmode']+'}diameter') location = self._getLocation(drill_path) if diameter not in drills_dict: drills_dict[diameter] = {} drills_dict[diameter]['locations'] = [] drills_dict[diameter]['locations'].append(location) self._preamble = self._createPreamble() self._content = self._createContent(drills_dict) self._postamble = self._createPostamble() def getExcellon(self): return (self._preamble+ self._content+ self._postamble) def _createContent(self, drills): """ """ ex = [] for i, diameter in enumerate(drills): # This is probably not necessary, but I'm not 100% certain # that if the item order of a dict is gurenteed. If not # the result can be quite devastating where drill # diameters are wrong! # Drill index must be greater than 0 drills[diameter]['index'] = i+1 ex.append("T%dC%s\n" % (i+1, diameter)) ex.append('M95\n') # End of a part program header for diameter in drills: ex.append("T%s\n" % drills[diameter]['index']) for coord in drills[diameter]['locations']: ex.append(self._getPoint(coord)) return ex def _createPreamble(self): """ """ ex = [] ex.append('M48\n') # Beginning of a part program header ex.append('METRIC,TZ\n') # Metric, trailing zeros ex.append('G90\n') # Absolute mode ex.append('M71\n') # Metric measuring mode return ex def <|fim_middle|>(self): """ """ ex = [] ex.append('M30\n') # End of Program, rewind return ex def _getLocation(self, path): """ Returns the location of a path, factoring in all the transforms of its ancestors, and its own transform """ location = Point() # We need to get the transforms of all ancestors that have # one in order to get the location correctly ancestors = path.xpath("ancestor::*[@transform]") for ancestor in ancestors: transform = ancestor.get('transform') transform_data = utils.parseTransform(transform) # Add them up location += transform_data['location'] # Add the transform of the path itself transform = path.get('transform') if transform != None: transform_data = utils.parseTransform(transform) location += transform_data['location'] return location def _getPoint(self, point): """ Converts a Point type into an Excellon coordinate """ return "X%.6fY%.6f\n" % (point.x, -point.y) <|fim▁end|>
_createPostamble
<|file_name|>excellon.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python import os import re from lxml import etree as et import pcbmode.config as config from . import messages as msg # pcbmode modules from . import utils from .point import Point def makeExcellon(manufacturer='default'): """ """ ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Open the board's SVG svg_in = utils.openBoardSVG() drills_layer = svg_in.find("//svg:g[@pcbmode:sheet='drills']", namespaces=ns) excellon = Excellon(drills_layer) # Save to file base_dir = os.path.join(config.cfg['base-dir'], config.cfg['locations']['build'], 'production') base_name = "%s_rev_%s" % (config.brd['config']['name'], config.brd['config']['rev']) filename_info = config.cfg['manufacturers'][manufacturer]['filenames']['drills'] add = '_%s.%s' % ('drills', filename_info['plated'].get('ext') or 'txt') filename = os.path.join(base_dir, base_name + add) with open(filename, "wb") as f: for line in excellon.getExcellon(): f.write(line) class Excellon(): """ """ def __init__(self, svg): """ """ self._svg = svg self._ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Get all drill paths except for the ones used in the # drill-index drill_paths = self._svg.findall(".//svg:g[@pcbmode:type='component-shapes']//svg:path", namespaces=self._ns) drills_dict = {} for drill_path in drill_paths: diameter = drill_path.get('{'+config.cfg['ns']['pcbmode']+'}diameter') location = self._getLocation(drill_path) if diameter not in drills_dict: drills_dict[diameter] = {} drills_dict[diameter]['locations'] = [] drills_dict[diameter]['locations'].append(location) self._preamble = self._createPreamble() self._content = self._createContent(drills_dict) self._postamble = self._createPostamble() def getExcellon(self): return (self._preamble+ self._content+ self._postamble) def _createContent(self, drills): """ """ ex = [] for i, diameter in enumerate(drills): # This is probably not necessary, but I'm not 100% certain # that if the item order of a dict is gurenteed. If not # the result can be quite devastating where drill # diameters are wrong! # Drill index must be greater than 0 drills[diameter]['index'] = i+1 ex.append("T%dC%s\n" % (i+1, diameter)) ex.append('M95\n') # End of a part program header for diameter in drills: ex.append("T%s\n" % drills[diameter]['index']) for coord in drills[diameter]['locations']: ex.append(self._getPoint(coord)) return ex def _createPreamble(self): """ """ ex = [] ex.append('M48\n') # Beginning of a part program header ex.append('METRIC,TZ\n') # Metric, trailing zeros ex.append('G90\n') # Absolute mode ex.append('M71\n') # Metric measuring mode return ex def _createPostamble(self): """ """ ex = [] ex.append('M30\n') # End of Program, rewind return ex def <|fim_middle|>(self, path): """ Returns the location of a path, factoring in all the transforms of its ancestors, and its own transform """ location = Point() # We need to get the transforms of all ancestors that have # one in order to get the location correctly ancestors = path.xpath("ancestor::*[@transform]") for ancestor in ancestors: transform = ancestor.get('transform') transform_data = utils.parseTransform(transform) # Add them up location += transform_data['location'] # Add the transform of the path itself transform = path.get('transform') if transform != None: transform_data = utils.parseTransform(transform) location += transform_data['location'] return location def _getPoint(self, point): """ Converts a Point type into an Excellon coordinate """ return "X%.6fY%.6f\n" % (point.x, -point.y) <|fim▁end|>
_getLocation
<|file_name|>excellon.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python import os import re from lxml import etree as et import pcbmode.config as config from . import messages as msg # pcbmode modules from . import utils from .point import Point def makeExcellon(manufacturer='default'): """ """ ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Open the board's SVG svg_in = utils.openBoardSVG() drills_layer = svg_in.find("//svg:g[@pcbmode:sheet='drills']", namespaces=ns) excellon = Excellon(drills_layer) # Save to file base_dir = os.path.join(config.cfg['base-dir'], config.cfg['locations']['build'], 'production') base_name = "%s_rev_%s" % (config.brd['config']['name'], config.brd['config']['rev']) filename_info = config.cfg['manufacturers'][manufacturer]['filenames']['drills'] add = '_%s.%s' % ('drills', filename_info['plated'].get('ext') or 'txt') filename = os.path.join(base_dir, base_name + add) with open(filename, "wb") as f: for line in excellon.getExcellon(): f.write(line) class Excellon(): """ """ def __init__(self, svg): """ """ self._svg = svg self._ns = {'pcbmode':config.cfg['ns']['pcbmode'], 'svg':config.cfg['ns']['svg']} # Get all drill paths except for the ones used in the # drill-index drill_paths = self._svg.findall(".//svg:g[@pcbmode:type='component-shapes']//svg:path", namespaces=self._ns) drills_dict = {} for drill_path in drill_paths: diameter = drill_path.get('{'+config.cfg['ns']['pcbmode']+'}diameter') location = self._getLocation(drill_path) if diameter not in drills_dict: drills_dict[diameter] = {} drills_dict[diameter]['locations'] = [] drills_dict[diameter]['locations'].append(location) self._preamble = self._createPreamble() self._content = self._createContent(drills_dict) self._postamble = self._createPostamble() def getExcellon(self): return (self._preamble+ self._content+ self._postamble) def _createContent(self, drills): """ """ ex = [] for i, diameter in enumerate(drills): # This is probably not necessary, but I'm not 100% certain # that if the item order of a dict is gurenteed. If not # the result can be quite devastating where drill # diameters are wrong! # Drill index must be greater than 0 drills[diameter]['index'] = i+1 ex.append("T%dC%s\n" % (i+1, diameter)) ex.append('M95\n') # End of a part program header for diameter in drills: ex.append("T%s\n" % drills[diameter]['index']) for coord in drills[diameter]['locations']: ex.append(self._getPoint(coord)) return ex def _createPreamble(self): """ """ ex = [] ex.append('M48\n') # Beginning of a part program header ex.append('METRIC,TZ\n') # Metric, trailing zeros ex.append('G90\n') # Absolute mode ex.append('M71\n') # Metric measuring mode return ex def _createPostamble(self): """ """ ex = [] ex.append('M30\n') # End of Program, rewind return ex def _getLocation(self, path): """ Returns the location of a path, factoring in all the transforms of its ancestors, and its own transform """ location = Point() # We need to get the transforms of all ancestors that have # one in order to get the location correctly ancestors = path.xpath("ancestor::*[@transform]") for ancestor in ancestors: transform = ancestor.get('transform') transform_data = utils.parseTransform(transform) # Add them up location += transform_data['location'] # Add the transform of the path itself transform = path.get('transform') if transform != None: transform_data = utils.parseTransform(transform) location += transform_data['location'] return location def <|fim_middle|>(self, point): """ Converts a Point type into an Excellon coordinate """ return "X%.6fY%.6f\n" % (point.x, -point.y) <|fim▁end|>
_getPoint
<|file_name|>forms.py<|end_file_name|><|fim▁begin|># # farmwork/forms.py # from django import forms from django.utils.text import slugify from .models import Farmwork # ======================================================== # FARMWORK FORM # ======================================================== <|fim▁hole|> super(FarmworkForm, self).__init__(*args, **kwargs) class Meta: model = Farmwork fields = [ 'job_role', 'job_fruit', 'job_pay', 'job_pay_type', 'job_start_date', 'job_duration', 'job_duration_type', 'job_description', 'con_first_name', 'con_surname', 'con_number', 'con_email', 'con_description', 'acc_variety', 'acc_price', 'acc_price_type', 'acc_description', 'loc_street_address', 'loc_city', 'loc_state', 'loc_post_code', ] # -- # AUTO GENERATE SLUG ON SAVE # Credit: https://keyerror.com/blog/automatically-generating-unique-slugs-in-django # -- def save(self): if self.instance.pk: return super(FarmworkForm, self).save() instance = super(FarmworkForm, self).save(commit=False) instance.slug = slugify(instance.get_job_fruit_display() + '-' + instance.get_job_role_display() + '-in-' + instance.loc_city) instance.save() return instance<|fim▁end|>
class FarmworkForm(forms.ModelForm): def __init__(self, *args, **kwargs):
<|file_name|>forms.py<|end_file_name|><|fim▁begin|># # farmwork/forms.py # from django import forms from django.utils.text import slugify from .models import Farmwork # ======================================================== # FARMWORK FORM # ======================================================== class FarmworkForm(forms.ModelForm): <|fim_middle|> <|fim▁end|>
def __init__(self, *args, **kwargs): super(FarmworkForm, self).__init__(*args, **kwargs) class Meta: model = Farmwork fields = [ 'job_role', 'job_fruit', 'job_pay', 'job_pay_type', 'job_start_date', 'job_duration', 'job_duration_type', 'job_description', 'con_first_name', 'con_surname', 'con_number', 'con_email', 'con_description', 'acc_variety', 'acc_price', 'acc_price_type', 'acc_description', 'loc_street_address', 'loc_city', 'loc_state', 'loc_post_code', ] # -- # AUTO GENERATE SLUG ON SAVE # Credit: https://keyerror.com/blog/automatically-generating-unique-slugs-in-django # -- def save(self): if self.instance.pk: return super(FarmworkForm, self).save() instance = super(FarmworkForm, self).save(commit=False) instance.slug = slugify(instance.get_job_fruit_display() + '-' + instance.get_job_role_display() + '-in-' + instance.loc_city) instance.save() return instance
<|file_name|>forms.py<|end_file_name|><|fim▁begin|># # farmwork/forms.py # from django import forms from django.utils.text import slugify from .models import Farmwork # ======================================================== # FARMWORK FORM # ======================================================== class FarmworkForm(forms.ModelForm): def __init__(self, *args, **kwargs): <|fim_middle|> class Meta: model = Farmwork fields = [ 'job_role', 'job_fruit', 'job_pay', 'job_pay_type', 'job_start_date', 'job_duration', 'job_duration_type', 'job_description', 'con_first_name', 'con_surname', 'con_number', 'con_email', 'con_description', 'acc_variety', 'acc_price', 'acc_price_type', 'acc_description', 'loc_street_address', 'loc_city', 'loc_state', 'loc_post_code', ] # -- # AUTO GENERATE SLUG ON SAVE # Credit: https://keyerror.com/blog/automatically-generating-unique-slugs-in-django # -- def save(self): if self.instance.pk: return super(FarmworkForm, self).save() instance = super(FarmworkForm, self).save(commit=False) instance.slug = slugify(instance.get_job_fruit_display() + '-' + instance.get_job_role_display() + '-in-' + instance.loc_city) instance.save() return instance <|fim▁end|>
super(FarmworkForm, self).__init__(*args, **kwargs)
<|file_name|>forms.py<|end_file_name|><|fim▁begin|># # farmwork/forms.py # from django import forms from django.utils.text import slugify from .models import Farmwork # ======================================================== # FARMWORK FORM # ======================================================== class FarmworkForm(forms.ModelForm): def __init__(self, *args, **kwargs): super(FarmworkForm, self).__init__(*args, **kwargs) class Meta: <|fim_middle|> # -- # AUTO GENERATE SLUG ON SAVE # Credit: https://keyerror.com/blog/automatically-generating-unique-slugs-in-django # -- def save(self): if self.instance.pk: return super(FarmworkForm, self).save() instance = super(FarmworkForm, self).save(commit=False) instance.slug = slugify(instance.get_job_fruit_display() + '-' + instance.get_job_role_display() + '-in-' + instance.loc_city) instance.save() return instance <|fim▁end|>
model = Farmwork fields = [ 'job_role', 'job_fruit', 'job_pay', 'job_pay_type', 'job_start_date', 'job_duration', 'job_duration_type', 'job_description', 'con_first_name', 'con_surname', 'con_number', 'con_email', 'con_description', 'acc_variety', 'acc_price', 'acc_price_type', 'acc_description', 'loc_street_address', 'loc_city', 'loc_state', 'loc_post_code', ]
<|file_name|>forms.py<|end_file_name|><|fim▁begin|># # farmwork/forms.py # from django import forms from django.utils.text import slugify from .models import Farmwork # ======================================================== # FARMWORK FORM # ======================================================== class FarmworkForm(forms.ModelForm): def __init__(self, *args, **kwargs): super(FarmworkForm, self).__init__(*args, **kwargs) class Meta: model = Farmwork fields = [ 'job_role', 'job_fruit', 'job_pay', 'job_pay_type', 'job_start_date', 'job_duration', 'job_duration_type', 'job_description', 'con_first_name', 'con_surname', 'con_number', 'con_email', 'con_description', 'acc_variety', 'acc_price', 'acc_price_type', 'acc_description', 'loc_street_address', 'loc_city', 'loc_state', 'loc_post_code', ] # -- # AUTO GENERATE SLUG ON SAVE # Credit: https://keyerror.com/blog/automatically-generating-unique-slugs-in-django # -- def save(self): <|fim_middle|> <|fim▁end|>
if self.instance.pk: return super(FarmworkForm, self).save() instance = super(FarmworkForm, self).save(commit=False) instance.slug = slugify(instance.get_job_fruit_display() + '-' + instance.get_job_role_display() + '-in-' + instance.loc_city) instance.save() return instance
<|file_name|>forms.py<|end_file_name|><|fim▁begin|># # farmwork/forms.py # from django import forms from django.utils.text import slugify from .models import Farmwork # ======================================================== # FARMWORK FORM # ======================================================== class FarmworkForm(forms.ModelForm): def __init__(self, *args, **kwargs): super(FarmworkForm, self).__init__(*args, **kwargs) class Meta: model = Farmwork fields = [ 'job_role', 'job_fruit', 'job_pay', 'job_pay_type', 'job_start_date', 'job_duration', 'job_duration_type', 'job_description', 'con_first_name', 'con_surname', 'con_number', 'con_email', 'con_description', 'acc_variety', 'acc_price', 'acc_price_type', 'acc_description', 'loc_street_address', 'loc_city', 'loc_state', 'loc_post_code', ] # -- # AUTO GENERATE SLUG ON SAVE # Credit: https://keyerror.com/blog/automatically-generating-unique-slugs-in-django # -- def save(self): if self.instance.pk: <|fim_middle|> instance = super(FarmworkForm, self).save(commit=False) instance.slug = slugify(instance.get_job_fruit_display() + '-' + instance.get_job_role_display() + '-in-' + instance.loc_city) instance.save() return instance <|fim▁end|>
return super(FarmworkForm, self).save()
<|file_name|>forms.py<|end_file_name|><|fim▁begin|># # farmwork/forms.py # from django import forms from django.utils.text import slugify from .models import Farmwork # ======================================================== # FARMWORK FORM # ======================================================== class FarmworkForm(forms.ModelForm): def <|fim_middle|>(self, *args, **kwargs): super(FarmworkForm, self).__init__(*args, **kwargs) class Meta: model = Farmwork fields = [ 'job_role', 'job_fruit', 'job_pay', 'job_pay_type', 'job_start_date', 'job_duration', 'job_duration_type', 'job_description', 'con_first_name', 'con_surname', 'con_number', 'con_email', 'con_description', 'acc_variety', 'acc_price', 'acc_price_type', 'acc_description', 'loc_street_address', 'loc_city', 'loc_state', 'loc_post_code', ] # -- # AUTO GENERATE SLUG ON SAVE # Credit: https://keyerror.com/blog/automatically-generating-unique-slugs-in-django # -- def save(self): if self.instance.pk: return super(FarmworkForm, self).save() instance = super(FarmworkForm, self).save(commit=False) instance.slug = slugify(instance.get_job_fruit_display() + '-' + instance.get_job_role_display() + '-in-' + instance.loc_city) instance.save() return instance <|fim▁end|>
__init__
<|file_name|>forms.py<|end_file_name|><|fim▁begin|># # farmwork/forms.py # from django import forms from django.utils.text import slugify from .models import Farmwork # ======================================================== # FARMWORK FORM # ======================================================== class FarmworkForm(forms.ModelForm): def __init__(self, *args, **kwargs): super(FarmworkForm, self).__init__(*args, **kwargs) class Meta: model = Farmwork fields = [ 'job_role', 'job_fruit', 'job_pay', 'job_pay_type', 'job_start_date', 'job_duration', 'job_duration_type', 'job_description', 'con_first_name', 'con_surname', 'con_number', 'con_email', 'con_description', 'acc_variety', 'acc_price', 'acc_price_type', 'acc_description', 'loc_street_address', 'loc_city', 'loc_state', 'loc_post_code', ] # -- # AUTO GENERATE SLUG ON SAVE # Credit: https://keyerror.com/blog/automatically-generating-unique-slugs-in-django # -- def <|fim_middle|>(self): if self.instance.pk: return super(FarmworkForm, self).save() instance = super(FarmworkForm, self).save(commit=False) instance.slug = slugify(instance.get_job_fruit_display() + '-' + instance.get_job_role_display() + '-in-' + instance.loc_city) instance.save() return instance <|fim▁end|>
save
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models from .workflow import TestStateMachine class TestModel(models.Model): name = models.CharField(max_length=100) state = models.CharField(max_length=20, null=True, blank=True) state_num = models.IntegerField(null=True, blank=True) other_state = models.CharField(max_length=20, null=True, blank=True)<|fim▁hole|><|fim▁end|>
message = models.CharField(max_length=250, null=True, blank=True) class Meta: permissions = TestStateMachine.get_permissions('testmodel', 'Test')
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models from .workflow import TestStateMachine class TestModel(models.Model): <|fim_middle|> <|fim▁end|>
name = models.CharField(max_length=100) state = models.CharField(max_length=20, null=True, blank=True) state_num = models.IntegerField(null=True, blank=True) other_state = models.CharField(max_length=20, null=True, blank=True) message = models.CharField(max_length=250, null=True, blank=True) class Meta: permissions = TestStateMachine.get_permissions('testmodel', 'Test')
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models from .workflow import TestStateMachine class TestModel(models.Model): name = models.CharField(max_length=100) state = models.CharField(max_length=20, null=True, blank=True) state_num = models.IntegerField(null=True, blank=True) other_state = models.CharField(max_length=20, null=True, blank=True) message = models.CharField(max_length=250, null=True, blank=True) class Meta: <|fim_middle|> <|fim▁end|>
permissions = TestStateMachine.get_permissions('testmodel', 'Test')
<|file_name|>multiprocess.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 from multiprocessing import Process, Pool import os, time def proc(name): <|fim▁hole|> print(time.asctime(), 'child process(name: %s) id %s. ppid %s' % (name, os.getpid(), os.getppid())) time.sleep(3) print(time.asctime(), 'child process end') if __name__ == '__main__': p = Process(target = proc, args = ('child',)) print(time.asctime(), 'child process will start') p.start() p.join() print('first child process end') pl = Pool(4) for index in range(4): pl.apply_async(proc, args = (index,)) pl.close() pl.join() print(time.asctime(), 'parent process end')<|fim▁end|>
<|file_name|>multiprocess.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 from multiprocessing import Process, Pool import os, time def proc(name): <|fim_middle|> if __name__ == '__main__': p = Process(target = proc, args = ('child',)) print(time.asctime(), 'child process will start') p.start() p.join() print('first child process end') pl = Pool(4) for index in range(4): pl.apply_async(proc, args = (index,)) pl.close() pl.join() print(time.asctime(), 'parent process end') <|fim▁end|>
print(time.asctime(), 'child process(name: %s) id %s. ppid %s' % (name, os.getpid(), os.getppid())) time.sleep(3) print(time.asctime(), 'child process end')
<|file_name|>multiprocess.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 from multiprocessing import Process, Pool import os, time def proc(name): print(time.asctime(), 'child process(name: %s) id %s. ppid %s' % (name, os.getpid(), os.getppid())) time.sleep(3) print(time.asctime(), 'child process end') if __name__ == '__main__': <|fim_middle|> <|fim▁end|>
p = Process(target = proc, args = ('child',)) print(time.asctime(), 'child process will start') p.start() p.join() print('first child process end') pl = Pool(4) for index in range(4): pl.apply_async(proc, args = (index,)) pl.close() pl.join() print(time.asctime(), 'parent process end')
<|file_name|>multiprocess.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 from multiprocessing import Process, Pool import os, time def <|fim_middle|>(name): print(time.asctime(), 'child process(name: %s) id %s. ppid %s' % (name, os.getpid(), os.getppid())) time.sleep(3) print(time.asctime(), 'child process end') if __name__ == '__main__': p = Process(target = proc, args = ('child',)) print(time.asctime(), 'child process will start') p.start() p.join() print('first child process end') pl = Pool(4) for index in range(4): pl.apply_async(proc, args = (index,)) pl.close() pl.join() print(time.asctime(), 'parent process end') <|fim▁end|>
proc
<|file_name|>settings.py<|end_file_name|><|fim▁begin|># Django settings for example_project project. import os import sys sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) import django DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = (<|fim▁hole|>) ENGINE = os.environ.get('DATABASE_ENGINE', 'django.db.backends.sqlite3') DATABASES = { 'default': { 'ENGINE': ENGINE, 'NAME': 'test', 'OPTIONS': { } } } if 'postgres' in ENGINE or 'mysql' in ENGINE: USER, PASSWORD = 'test', 'test' if os.environ.get('TRAVIS', False): if 'mysql' in ENGINE: USER, PASSWORD = 'travis', '' else: USER, PASSWORD = 'postgres', '' DATABASES['default'].update( USER=os.environ.get('DATABASE_USER', USER), PASSWORD=os.environ.get('DATABASE_PASSWORD', PASSWORD), HOST=os.environ.get('DATABASE_HOST', 'localhost') ) print(ENGINE) # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/New_York' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # Absolute path to the directory that holds media. # Example: "/home/media/media.lawrence.com/" MEDIA_ROOT = 'media' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash if there is a path component (optional in other cases). # Examples: "http://media.lawrence.com", "http://example.com/media/" MEDIA_URL = '/media/' # Make this unique, and don't share it with anybody. SECRET_KEY = 'wzf0h@r2u%m^_zgj^39-y(kd%+n+j0r7=du(q0^s@q1asdfasdfasdft%^2!p' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ) ROOT_URLCONF = 'actstream.runtests.urls' TEMPLATE_DIRS = ( 'templates', # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.admin', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.admindocs', 'django.contrib.sites', 'django.contrib.comments', 'actstream.runtests.testapp', 'actstream.runtests.testapp_nested', 'actstream', ) TEMPLATE_CONTEXT_PROCESSORS = ( 'django.contrib.auth.context_processors.auth', 'django.core.context_processors.debug', 'django.core.context_processors.i18n', 'django.core.context_processors.media', 'django.core.context_processors.request', ) ACTSTREAM_SETTINGS = { 'MANAGER': 'actstream.runtests.testapp.streams.MyActionManager', 'FETCH_RELATIONS': True, 'USE_PREFETCH': True, 'USE_JSONFIELD': True, 'GFK_FETCH_DEPTH': 0, } if django.VERSION[:2] >= (1, 5): AUTH_USER_MODEL = 'testapp.MyUser' TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner' if 'COVERAGE' in os.environ: INSTALLED_APPS += ('django_coverage',) TEST_RUNNER = 'django_coverage.coverage_runner.CoverageRunner' COVERAGE_REPORT_HTML_OUTPUT_DIR = 'coverage' COVERAGE_REPORT_DATA_FILE = '.coverage'<|fim▁end|>
('Justin Quick', '[email protected]'),
<|file_name|>settings.py<|end_file_name|><|fim▁begin|># Django settings for example_project project. import os import sys sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) import django DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( ('Justin Quick', '[email protected]'), ) ENGINE = os.environ.get('DATABASE_ENGINE', 'django.db.backends.sqlite3') DATABASES = { 'default': { 'ENGINE': ENGINE, 'NAME': 'test', 'OPTIONS': { } } } if 'postgres' in ENGINE or 'mysql' in ENGINE: <|fim_middle|> print(ENGINE) # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/New_York' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # Absolute path to the directory that holds media. # Example: "/home/media/media.lawrence.com/" MEDIA_ROOT = 'media' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash if there is a path component (optional in other cases). # Examples: "http://media.lawrence.com", "http://example.com/media/" MEDIA_URL = '/media/' # Make this unique, and don't share it with anybody. SECRET_KEY = 'wzf0h@r2u%m^_zgj^39-y(kd%+n+j0r7=du(q0^s@q1asdfasdfasdft%^2!p' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ) ROOT_URLCONF = 'actstream.runtests.urls' TEMPLATE_DIRS = ( 'templates', # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.admin', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.admindocs', 'django.contrib.sites', 'django.contrib.comments', 'actstream.runtests.testapp', 'actstream.runtests.testapp_nested', 'actstream', ) TEMPLATE_CONTEXT_PROCESSORS = ( 'django.contrib.auth.context_processors.auth', 'django.core.context_processors.debug', 'django.core.context_processors.i18n', 'django.core.context_processors.media', 'django.core.context_processors.request', ) ACTSTREAM_SETTINGS = { 'MANAGER': 'actstream.runtests.testapp.streams.MyActionManager', 'FETCH_RELATIONS': True, 'USE_PREFETCH': True, 'USE_JSONFIELD': True, 'GFK_FETCH_DEPTH': 0, } if django.VERSION[:2] >= (1, 5): AUTH_USER_MODEL = 'testapp.MyUser' TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner' if 'COVERAGE' in os.environ: INSTALLED_APPS += ('django_coverage',) TEST_RUNNER = 'django_coverage.coverage_runner.CoverageRunner' COVERAGE_REPORT_HTML_OUTPUT_DIR = 'coverage' COVERAGE_REPORT_DATA_FILE = '.coverage' <|fim▁end|>
USER, PASSWORD = 'test', 'test' if os.environ.get('TRAVIS', False): if 'mysql' in ENGINE: USER, PASSWORD = 'travis', '' else: USER, PASSWORD = 'postgres', '' DATABASES['default'].update( USER=os.environ.get('DATABASE_USER', USER), PASSWORD=os.environ.get('DATABASE_PASSWORD', PASSWORD), HOST=os.environ.get('DATABASE_HOST', 'localhost') )
<|file_name|>settings.py<|end_file_name|><|fim▁begin|># Django settings for example_project project. import os import sys sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) import django DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( ('Justin Quick', '[email protected]'), ) ENGINE = os.environ.get('DATABASE_ENGINE', 'django.db.backends.sqlite3') DATABASES = { 'default': { 'ENGINE': ENGINE, 'NAME': 'test', 'OPTIONS': { } } } if 'postgres' in ENGINE or 'mysql' in ENGINE: USER, PASSWORD = 'test', 'test' if os.environ.get('TRAVIS', False): <|fim_middle|> DATABASES['default'].update( USER=os.environ.get('DATABASE_USER', USER), PASSWORD=os.environ.get('DATABASE_PASSWORD', PASSWORD), HOST=os.environ.get('DATABASE_HOST', 'localhost') ) print(ENGINE) # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/New_York' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # Absolute path to the directory that holds media. # Example: "/home/media/media.lawrence.com/" MEDIA_ROOT = 'media' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash if there is a path component (optional in other cases). # Examples: "http://media.lawrence.com", "http://example.com/media/" MEDIA_URL = '/media/' # Make this unique, and don't share it with anybody. SECRET_KEY = 'wzf0h@r2u%m^_zgj^39-y(kd%+n+j0r7=du(q0^s@q1asdfasdfasdft%^2!p' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ) ROOT_URLCONF = 'actstream.runtests.urls' TEMPLATE_DIRS = ( 'templates', # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.admin', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.admindocs', 'django.contrib.sites', 'django.contrib.comments', 'actstream.runtests.testapp', 'actstream.runtests.testapp_nested', 'actstream', ) TEMPLATE_CONTEXT_PROCESSORS = ( 'django.contrib.auth.context_processors.auth', 'django.core.context_processors.debug', 'django.core.context_processors.i18n', 'django.core.context_processors.media', 'django.core.context_processors.request', ) ACTSTREAM_SETTINGS = { 'MANAGER': 'actstream.runtests.testapp.streams.MyActionManager', 'FETCH_RELATIONS': True, 'USE_PREFETCH': True, 'USE_JSONFIELD': True, 'GFK_FETCH_DEPTH': 0, } if django.VERSION[:2] >= (1, 5): AUTH_USER_MODEL = 'testapp.MyUser' TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner' if 'COVERAGE' in os.environ: INSTALLED_APPS += ('django_coverage',) TEST_RUNNER = 'django_coverage.coverage_runner.CoverageRunner' COVERAGE_REPORT_HTML_OUTPUT_DIR = 'coverage' COVERAGE_REPORT_DATA_FILE = '.coverage' <|fim▁end|>
if 'mysql' in ENGINE: USER, PASSWORD = 'travis', '' else: USER, PASSWORD = 'postgres', ''
<|file_name|>settings.py<|end_file_name|><|fim▁begin|># Django settings for example_project project. import os import sys sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) import django DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( ('Justin Quick', '[email protected]'), ) ENGINE = os.environ.get('DATABASE_ENGINE', 'django.db.backends.sqlite3') DATABASES = { 'default': { 'ENGINE': ENGINE, 'NAME': 'test', 'OPTIONS': { } } } if 'postgres' in ENGINE or 'mysql' in ENGINE: USER, PASSWORD = 'test', 'test' if os.environ.get('TRAVIS', False): if 'mysql' in ENGINE: <|fim_middle|> else: USER, PASSWORD = 'postgres', '' DATABASES['default'].update( USER=os.environ.get('DATABASE_USER', USER), PASSWORD=os.environ.get('DATABASE_PASSWORD', PASSWORD), HOST=os.environ.get('DATABASE_HOST', 'localhost') ) print(ENGINE) # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/New_York' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # Absolute path to the directory that holds media. # Example: "/home/media/media.lawrence.com/" MEDIA_ROOT = 'media' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash if there is a path component (optional in other cases). # Examples: "http://media.lawrence.com", "http://example.com/media/" MEDIA_URL = '/media/' # Make this unique, and don't share it with anybody. SECRET_KEY = 'wzf0h@r2u%m^_zgj^39-y(kd%+n+j0r7=du(q0^s@q1asdfasdfasdft%^2!p' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ) ROOT_URLCONF = 'actstream.runtests.urls' TEMPLATE_DIRS = ( 'templates', # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.admin', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.admindocs', 'django.contrib.sites', 'django.contrib.comments', 'actstream.runtests.testapp', 'actstream.runtests.testapp_nested', 'actstream', ) TEMPLATE_CONTEXT_PROCESSORS = ( 'django.contrib.auth.context_processors.auth', 'django.core.context_processors.debug', 'django.core.context_processors.i18n', 'django.core.context_processors.media', 'django.core.context_processors.request', ) ACTSTREAM_SETTINGS = { 'MANAGER': 'actstream.runtests.testapp.streams.MyActionManager', 'FETCH_RELATIONS': True, 'USE_PREFETCH': True, 'USE_JSONFIELD': True, 'GFK_FETCH_DEPTH': 0, } if django.VERSION[:2] >= (1, 5): AUTH_USER_MODEL = 'testapp.MyUser' TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner' if 'COVERAGE' in os.environ: INSTALLED_APPS += ('django_coverage',) TEST_RUNNER = 'django_coverage.coverage_runner.CoverageRunner' COVERAGE_REPORT_HTML_OUTPUT_DIR = 'coverage' COVERAGE_REPORT_DATA_FILE = '.coverage' <|fim▁end|>
USER, PASSWORD = 'travis', ''
<|file_name|>settings.py<|end_file_name|><|fim▁begin|># Django settings for example_project project. import os import sys sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) import django DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( ('Justin Quick', '[email protected]'), ) ENGINE = os.environ.get('DATABASE_ENGINE', 'django.db.backends.sqlite3') DATABASES = { 'default': { 'ENGINE': ENGINE, 'NAME': 'test', 'OPTIONS': { } } } if 'postgres' in ENGINE or 'mysql' in ENGINE: USER, PASSWORD = 'test', 'test' if os.environ.get('TRAVIS', False): if 'mysql' in ENGINE: USER, PASSWORD = 'travis', '' else: <|fim_middle|> DATABASES['default'].update( USER=os.environ.get('DATABASE_USER', USER), PASSWORD=os.environ.get('DATABASE_PASSWORD', PASSWORD), HOST=os.environ.get('DATABASE_HOST', 'localhost') ) print(ENGINE) # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/New_York' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # Absolute path to the directory that holds media. # Example: "/home/media/media.lawrence.com/" MEDIA_ROOT = 'media' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash if there is a path component (optional in other cases). # Examples: "http://media.lawrence.com", "http://example.com/media/" MEDIA_URL = '/media/' # Make this unique, and don't share it with anybody. SECRET_KEY = 'wzf0h@r2u%m^_zgj^39-y(kd%+n+j0r7=du(q0^s@q1asdfasdfasdft%^2!p' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ) ROOT_URLCONF = 'actstream.runtests.urls' TEMPLATE_DIRS = ( 'templates', # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.admin', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.admindocs', 'django.contrib.sites', 'django.contrib.comments', 'actstream.runtests.testapp', 'actstream.runtests.testapp_nested', 'actstream', ) TEMPLATE_CONTEXT_PROCESSORS = ( 'django.contrib.auth.context_processors.auth', 'django.core.context_processors.debug', 'django.core.context_processors.i18n', 'django.core.context_processors.media', 'django.core.context_processors.request', ) ACTSTREAM_SETTINGS = { 'MANAGER': 'actstream.runtests.testapp.streams.MyActionManager', 'FETCH_RELATIONS': True, 'USE_PREFETCH': True, 'USE_JSONFIELD': True, 'GFK_FETCH_DEPTH': 0, } if django.VERSION[:2] >= (1, 5): AUTH_USER_MODEL = 'testapp.MyUser' TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner' if 'COVERAGE' in os.environ: INSTALLED_APPS += ('django_coverage',) TEST_RUNNER = 'django_coverage.coverage_runner.CoverageRunner' COVERAGE_REPORT_HTML_OUTPUT_DIR = 'coverage' COVERAGE_REPORT_DATA_FILE = '.coverage' <|fim▁end|>
USER, PASSWORD = 'postgres', ''
<|file_name|>settings.py<|end_file_name|><|fim▁begin|># Django settings for example_project project. import os import sys sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) import django DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( ('Justin Quick', '[email protected]'), ) ENGINE = os.environ.get('DATABASE_ENGINE', 'django.db.backends.sqlite3') DATABASES = { 'default': { 'ENGINE': ENGINE, 'NAME': 'test', 'OPTIONS': { } } } if 'postgres' in ENGINE or 'mysql' in ENGINE: USER, PASSWORD = 'test', 'test' if os.environ.get('TRAVIS', False): if 'mysql' in ENGINE: USER, PASSWORD = 'travis', '' else: USER, PASSWORD = 'postgres', '' DATABASES['default'].update( USER=os.environ.get('DATABASE_USER', USER), PASSWORD=os.environ.get('DATABASE_PASSWORD', PASSWORD), HOST=os.environ.get('DATABASE_HOST', 'localhost') ) print(ENGINE) # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/New_York' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # Absolute path to the directory that holds media. # Example: "/home/media/media.lawrence.com/" MEDIA_ROOT = 'media' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash if there is a path component (optional in other cases). # Examples: "http://media.lawrence.com", "http://example.com/media/" MEDIA_URL = '/media/' # Make this unique, and don't share it with anybody. SECRET_KEY = 'wzf0h@r2u%m^_zgj^39-y(kd%+n+j0r7=du(q0^s@q1asdfasdfasdft%^2!p' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ) ROOT_URLCONF = 'actstream.runtests.urls' TEMPLATE_DIRS = ( 'templates', # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.admin', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.admindocs', 'django.contrib.sites', 'django.contrib.comments', 'actstream.runtests.testapp', 'actstream.runtests.testapp_nested', 'actstream', ) TEMPLATE_CONTEXT_PROCESSORS = ( 'django.contrib.auth.context_processors.auth', 'django.core.context_processors.debug', 'django.core.context_processors.i18n', 'django.core.context_processors.media', 'django.core.context_processors.request', ) ACTSTREAM_SETTINGS = { 'MANAGER': 'actstream.runtests.testapp.streams.MyActionManager', 'FETCH_RELATIONS': True, 'USE_PREFETCH': True, 'USE_JSONFIELD': True, 'GFK_FETCH_DEPTH': 0, } if django.VERSION[:2] >= (1, 5): <|fim_middle|> TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner' if 'COVERAGE' in os.environ: INSTALLED_APPS += ('django_coverage',) TEST_RUNNER = 'django_coverage.coverage_runner.CoverageRunner' COVERAGE_REPORT_HTML_OUTPUT_DIR = 'coverage' COVERAGE_REPORT_DATA_FILE = '.coverage' <|fim▁end|>
AUTH_USER_MODEL = 'testapp.MyUser'
<|file_name|>settings.py<|end_file_name|><|fim▁begin|># Django settings for example_project project. import os import sys sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) import django DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( ('Justin Quick', '[email protected]'), ) ENGINE = os.environ.get('DATABASE_ENGINE', 'django.db.backends.sqlite3') DATABASES = { 'default': { 'ENGINE': ENGINE, 'NAME': 'test', 'OPTIONS': { } } } if 'postgres' in ENGINE or 'mysql' in ENGINE: USER, PASSWORD = 'test', 'test' if os.environ.get('TRAVIS', False): if 'mysql' in ENGINE: USER, PASSWORD = 'travis', '' else: USER, PASSWORD = 'postgres', '' DATABASES['default'].update( USER=os.environ.get('DATABASE_USER', USER), PASSWORD=os.environ.get('DATABASE_PASSWORD', PASSWORD), HOST=os.environ.get('DATABASE_HOST', 'localhost') ) print(ENGINE) # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/New_York' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # Absolute path to the directory that holds media. # Example: "/home/media/media.lawrence.com/" MEDIA_ROOT = 'media' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash if there is a path component (optional in other cases). # Examples: "http://media.lawrence.com", "http://example.com/media/" MEDIA_URL = '/media/' # Make this unique, and don't share it with anybody. SECRET_KEY = 'wzf0h@r2u%m^_zgj^39-y(kd%+n+j0r7=du(q0^s@q1asdfasdfasdft%^2!p' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ) ROOT_URLCONF = 'actstream.runtests.urls' TEMPLATE_DIRS = ( 'templates', # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.admin', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.admindocs', 'django.contrib.sites', 'django.contrib.comments', 'actstream.runtests.testapp', 'actstream.runtests.testapp_nested', 'actstream', ) TEMPLATE_CONTEXT_PROCESSORS = ( 'django.contrib.auth.context_processors.auth', 'django.core.context_processors.debug', 'django.core.context_processors.i18n', 'django.core.context_processors.media', 'django.core.context_processors.request', ) ACTSTREAM_SETTINGS = { 'MANAGER': 'actstream.runtests.testapp.streams.MyActionManager', 'FETCH_RELATIONS': True, 'USE_PREFETCH': True, 'USE_JSONFIELD': True, 'GFK_FETCH_DEPTH': 0, } if django.VERSION[:2] >= (1, 5): AUTH_USER_MODEL = 'testapp.MyUser' TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner' if 'COVERAGE' in os.environ: <|fim_middle|> <|fim▁end|>
INSTALLED_APPS += ('django_coverage',) TEST_RUNNER = 'django_coverage.coverage_runner.CoverageRunner' COVERAGE_REPORT_HTML_OUTPUT_DIR = 'coverage' COVERAGE_REPORT_DATA_FILE = '.coverage'
<|file_name|>ext.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from datetime import timedelta, datetime import asyncio import random from .api import every, once_at, JobSchedule, default_schedule_manager __all__ = ['every_day', 'every_week', 'every_monday', 'every_tuesday', 'every_wednesday', 'every_thursday', 'every_friday', 'every_saturday', 'every_sunday', 'once_at_next_monday', 'once_at_next_tuesday', 'once_at_next_wednesday', 'once_at_next_thursday', 'once_at_next_friday', 'once_at_next_saturday', 'once_at_next_sunday', 'every_random_interval'] def every_random_interval(job, interval: timedelta, loop=None): """ executes the job randomly once in the specified interval. example: run a job every day at random time run a job every hour at random time :param job: a callable(co-routine function) which returns a co-routine or a future or an awaitable :param interval: the interval can also be given in the format of datetime.timedelta, then seconds, minutes, hours, days, weeks parameters are ignored. :param loop: io loop if the provided job is a custom future linked up<|fim▁hole|> :return: schedule object, so it could be cancelled at will of the user by aschedule.cancel(schedule) """ if loop is None: loop = asyncio.get_event_loop() start = loop.time() def wait_time_gen(): count = 0 while True: rand = random.randrange(round(interval.total_seconds())) tmp = round(start + interval.total_seconds() * count + rand - loop.time()) yield tmp count += 1 schedule = JobSchedule(job, wait_time_gen(), loop=loop) # add it to default_schedule_manager, so that user can aschedule.cancel it default_schedule_manager.add_schedule(schedule) return schedule def every_day(job, loop=None): return every(job, timedelta=timedelta(days=1), loop=loop) def every_week(job, loop=None): return every(job, timedelta=timedelta(days=7), loop=loop) every_monday = lambda job, loop=None: _every_weekday(job, 0, loop=loop) every_tuesday = lambda job, loop=None: _every_weekday(job, 1, loop=loop) every_wednesday = lambda job, loop=None: _every_weekday(job, 2, loop=loop) every_thursday = lambda job, loop=None: _every_weekday(job, 3, loop=loop) every_friday = lambda job, loop=None: _every_weekday(job, 4, loop=loop) every_saturday = lambda job, loop=None: _every_weekday(job, 5, loop=loop) every_sunday = lambda job, loop=None: _every_weekday(job, 6, loop=loop) once_at_next_monday = lambda job, loop=None: _once_at_weekday(job, 0, loop=loop) once_at_next_tuesday = lambda job, loop=None: _once_at_weekday(job, 1, loop=loop) once_at_next_wednesday = lambda job, loop=None: _once_at_weekday(job, 2, loop=loop) once_at_next_thursday = lambda job, loop=None: _once_at_weekday(job, 3, loop=loop) once_at_next_friday = lambda job, loop=None: _once_at_weekday(job, 4, loop=loop) once_at_next_saturday = lambda job, loop=None: _once_at_weekday(job, 5, loop=loop) once_at_next_sunday = lambda job, loop=None: _once_at_weekday(job, 6, loop=loop) def _nearest_weekday(weekday): return datetime.now() + timedelta(days=(weekday - datetime.now().weekday()) % 7) def _every_weekday(job, weekday, loop=None): return every(job, timedelta=timedelta(days=7), start_at=_nearest_weekday(weekday), loop=loop) def _once_at_weekday(job, weekday, loop=None): return once_at(job, _nearest_weekday(weekday), loop=loop)<|fim▁end|>
with a different event loop.