content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
#!/usr/bin/python
import aud, sys, time, multiprocessing
device = aud.Device()
hrtf = aud.HRTF().loadLeftHrtfSet(".wav", sys.argv[2])
threadPool = aud.ThreadPool(multiprocessing.cpu_count())
source = aud.Source(0, 0, 0)
sound = aud.Sound.file(sys.argv[1]).rechannel(1).binaural(hrtf, source, threadPool)
handle = device.play(sound)
while handle.status:
source.azimuth += 1
print("Azimuth: " + str(source.azimuth))
time.sleep(0.1)
|
python
|
# coding: utf-8
# flake8: noqa
"""
Velo Payments APIs
## Terms and Definitions Throughout this document and the Velo platform the following terms are used: * **Payor.** An entity (typically a corporation) which wishes to pay funds to one or more payees via a payout. * **Payee.** The recipient of funds paid out by a payor. * **Payment.** A single transfer of funds from a payor to a payee. * **Payout.** A batch of Payments, typically used by a payor to logically group payments (e.g. by business day). Technically there need be no relationship between the payments in a payout - a single payout can contain payments to multiple payees and/or multiple payments to a single payee. * **Sandbox.** An integration environment provided by Velo Payments which offers a similar API experience to the production environment, but all funding and payment events are simulated, along with many other services such as OFAC sanctions list checking. ## Overview The Velo Payments API allows a payor to perform a number of operations. The following is a list of the main capabilities in a natural order of execution: * Authenticate with the Velo platform * Maintain a collection of payees * Query the payor’s current balance of funds within the platform and perform additional funding * Issue payments to payees * Query the platform for a history of those payments This document describes the main concepts and APIs required to get up and running with the Velo Payments platform. It is not an exhaustive API reference. For that, please see the separate Velo Payments API Reference. ## API Considerations The Velo Payments API is REST based and uses the JSON format for requests and responses. Most calls are secured using OAuth 2 security and require a valid authentication access token for successful operation. See the Authentication section for details. Where a dynamic value is required in the examples below, the {token} format is used, suggesting that the caller needs to supply the appropriate value of the token in question (without including the { or } characters). Where curl examples are given, the –d @filename.json approach is used, indicating that the request body should be placed into a file named filename.json in the current directory. Each of the curl examples in this document should be considered a single line on the command-line, regardless of how they appear in print. ## Authenticating with the Velo Platform Once Velo backoffice staff have added your organization as a payor within the Velo platform sandbox, they will create you a payor Id, an API key and an API secret and share these with you in a secure manner. You will need to use these values to authenticate with the Velo platform in order to gain access to the APIs. The steps to take are explained in the following: create a string comprising the API key (e.g. 44a9537d-d55d-4b47-8082-14061c2bcdd8) and API secret (e.g. c396b26b-137a-44fd-87f5-34631f8fd529) with a colon between them. E.g. 44a9537d-d55d-4b47-8082-14061c2bcdd8:c396b26b-137a-44fd-87f5-34631f8fd529 base64 encode this string. E.g.: NDRhOTUzN2QtZDU1ZC00YjQ3LTgwODItMTQwNjFjMmJjZGQ4OmMzOTZiMjZiLTEzN2EtNDRmZC04N2Y1LTM0NjMxZjhmZDUyOQ== create an HTTP **Authorization** header with the value set to e.g. Basic NDRhOTUzN2QtZDU1ZC00YjQ3LTgwODItMTQwNjFjMmJjZGQ4OmMzOTZiMjZiLTEzN2EtNDRmZC04N2Y1LTM0NjMxZjhmZDUyOQ== perform the Velo authentication REST call using the HTTP header created above e.g. via curl: ``` curl -X POST \\ -H \"Content-Type: application/json\" \\ -H \"Authorization: Basic NDRhOTUzN2QtZDU1ZC00YjQ3LTgwODItMTQwNjFjMmJjZGQ4OmMzOTZiMjZiLTEzN2EtNDRmZC04N2Y1LTM0NjMxZjhmZDUyOQ==\" \\ 'https://api.sandbox.velopayments.com/v1/authenticate?grant_type=client_credentials' ``` If successful, this call will result in a **200** HTTP status code and a response body such as: ``` { \"access_token\":\"19f6bafd-93fd-4747-b229-00507bbc991f\", \"token_type\":\"bearer\", \"expires_in\":1799, \"scope\":\"...\" } ``` ## API access following authentication Following successful authentication, the value of the access_token field in the response (indicated in green above) should then be presented with all subsequent API calls to allow the Velo platform to validate that the caller is authenticated. This is achieved by setting the HTTP Authorization header with the value set to e.g. Bearer 19f6bafd-93fd-4747-b229-00507bbc991f such as the curl example below: ``` -H \"Authorization: Bearer 19f6bafd-93fd-4747-b229-00507bbc991f \" ``` If you make other Velo API calls which require authorization but the Authorization header is missing or invalid then you will get a **401** HTTP status response. # noqa: E501
The version of the OpenAPI document: 2.26.124
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
# import models into model package
from velo_payments.models.accepted_payment_v3 import AcceptedPaymentV3
from velo_payments.models.access_token_response import AccessTokenResponse
from velo_payments.models.access_token_validation_request import AccessTokenValidationRequest
from velo_payments.models.auth_response import AuthResponse
from velo_payments.models.auto_top_up_config import AutoTopUpConfig
from velo_payments.models.auto_top_up_config2 import AutoTopUpConfig2
from velo_payments.models.category import Category
from velo_payments.models.challenge import Challenge
from velo_payments.models.challenge2 import Challenge2
from velo_payments.models.company import Company
from velo_payments.models.company2 import Company2
from velo_payments.models.create_funding_account_request_v2 import CreateFundingAccountRequestV2
from velo_payments.models.create_individual import CreateIndividual
from velo_payments.models.create_individual2 import CreateIndividual2
from velo_payments.models.create_individual_name import CreateIndividualName
from velo_payments.models.create_payee import CreatePayee
from velo_payments.models.create_payee2 import CreatePayee2
from velo_payments.models.create_payee_address import CreatePayeeAddress
from velo_payments.models.create_payee_address2 import CreatePayeeAddress2
from velo_payments.models.create_payees_csv_request import CreatePayeesCSVRequest
from velo_payments.models.create_payees_csv_request2 import CreatePayeesCSVRequest2
from velo_payments.models.create_payees_csv_response import CreatePayeesCSVResponse
from velo_payments.models.create_payees_csv_response2 import CreatePayeesCSVResponse2
from velo_payments.models.create_payees_csv_response_rejected_csv_rows import CreatePayeesCSVResponseRejectedCsvRows
from velo_payments.models.create_payees_request import CreatePayeesRequest
from velo_payments.models.create_payees_request2 import CreatePayeesRequest2
from velo_payments.models.create_payment_channel import CreatePaymentChannel
from velo_payments.models.create_payment_channel2 import CreatePaymentChannel2
from velo_payments.models.create_payor_link_request import CreatePayorLinkRequest
from velo_payments.models.create_payout_request_v3 import CreatePayoutRequestV3
from velo_payments.models.create_webhook_request import CreateWebhookRequest
from velo_payments.models.debit_event import DebitEvent
from velo_payments.models.debit_event_all_of import DebitEventAllOf
from velo_payments.models.debit_status_changed import DebitStatusChanged
from velo_payments.models.debit_status_changed_all_of import DebitStatusChangedAllOf
from velo_payments.models.error import Error
from velo_payments.models.error_data import ErrorData
from velo_payments.models.error_response import ErrorResponse
from velo_payments.models.failed_payee import FailedPayee
from velo_payments.models.failed_payee2 import FailedPayee2
from velo_payments.models.failed_submission import FailedSubmission
from velo_payments.models.failed_submission2 import FailedSubmission2
from velo_payments.models.funding_account_response import FundingAccountResponse
from velo_payments.models.funding_account_response2 import FundingAccountResponse2
from velo_payments.models.funding_account_type import FundingAccountType
from velo_payments.models.funding_audit import FundingAudit
from velo_payments.models.funding_event import FundingEvent
from velo_payments.models.funding_event_type import FundingEventType
from velo_payments.models.funding_payor_status_audit_response import FundingPayorStatusAuditResponse
from velo_payments.models.funding_request_v1 import FundingRequestV1
from velo_payments.models.funding_request_v2 import FundingRequestV2
from velo_payments.models.funding_request_v3 import FundingRequestV3
from velo_payments.models.fx_summary import FxSummary
from velo_payments.models.fx_summary_v3 import FxSummaryV3
from velo_payments.models.get_fundings_response import GetFundingsResponse
from velo_payments.models.get_fundings_response_links import GetFundingsResponseLinks
from velo_payments.models.get_payee_list_response import GetPayeeListResponse
from velo_payments.models.get_payee_list_response2 import GetPayeeListResponse2
from velo_payments.models.get_payee_list_response_company import GetPayeeListResponseCompany
from velo_payments.models.get_payee_list_response_company2 import GetPayeeListResponseCompany2
from velo_payments.models.get_payee_list_response_individual import GetPayeeListResponseIndividual
from velo_payments.models.get_payee_list_response_individual2 import GetPayeeListResponseIndividual2
from velo_payments.models.get_payments_for_payout_response_v3 import GetPaymentsForPayoutResponseV3
from velo_payments.models.get_payments_for_payout_response_v3_page import GetPaymentsForPayoutResponseV3Page
from velo_payments.models.get_payments_for_payout_response_v3_summary import GetPaymentsForPayoutResponseV3Summary
from velo_payments.models.get_payments_for_payout_response_v4 import GetPaymentsForPayoutResponseV4
from velo_payments.models.get_payments_for_payout_response_v4_summary import GetPaymentsForPayoutResponseV4Summary
from velo_payments.models.get_payout_statistics import GetPayoutStatistics
from velo_payments.models.get_payouts_response import GetPayoutsResponse
from velo_payments.models.get_payouts_response_v3 import GetPayoutsResponseV3
from velo_payments.models.get_payouts_response_v3_links import GetPayoutsResponseV3Links
from velo_payments.models.get_payouts_response_v3_page import GetPayoutsResponseV3Page
from velo_payments.models.individual import Individual
from velo_payments.models.individual2 import Individual2
from velo_payments.models.individual_name import IndividualName
from velo_payments.models.inline_response400 import InlineResponse400
from velo_payments.models.inline_response401 import InlineResponse401
from velo_payments.models.inline_response403 import InlineResponse403
from velo_payments.models.inline_response404 import InlineResponse404
from velo_payments.models.inline_response409 import InlineResponse409
from velo_payments.models.inline_response412 import InlineResponse412
from velo_payments.models.invitation_status import InvitationStatus
from velo_payments.models.invitation_status2 import InvitationStatus2
from velo_payments.models.invite_payee_request import InvitePayeeRequest
from velo_payments.models.invite_payee_request2 import InvitePayeeRequest2
from velo_payments.models.invite_user_request import InviteUserRequest
from velo_payments.models.kyc_state import KycState
from velo_payments.models.link_for_response import LinkForResponse
from velo_payments.models.list_funding_accounts_response import ListFundingAccountsResponse
from velo_payments.models.list_funding_accounts_response2 import ListFundingAccountsResponse2
from velo_payments.models.list_payments_response_v3 import ListPaymentsResponseV3
from velo_payments.models.list_payments_response_v3_page import ListPaymentsResponseV3Page
from velo_payments.models.list_payments_response_v4 import ListPaymentsResponseV4
from velo_payments.models.list_source_account_response import ListSourceAccountResponse
from velo_payments.models.list_source_account_response_links import ListSourceAccountResponseLinks
from velo_payments.models.list_source_account_response_page import ListSourceAccountResponsePage
from velo_payments.models.list_source_account_response_v2 import ListSourceAccountResponseV2
from velo_payments.models.list_source_account_response_v2_links import ListSourceAccountResponseV2Links
from velo_payments.models.list_source_account_response_v3 import ListSourceAccountResponseV3
from velo_payments.models.list_source_account_response_v3_links import ListSourceAccountResponseV3Links
from velo_payments.models.localisation_details import LocalisationDetails
from velo_payments.models.mfa_details import MFADetails
from velo_payments.models.mfa_type import MFAType
from velo_payments.models.name import Name
from velo_payments.models.name2 import Name2
from velo_payments.models.notification import Notification
from velo_payments.models.notifications import Notifications
from velo_payments.models.notifications2 import Notifications2
from velo_payments.models.ofac_status import OfacStatus
from velo_payments.models.onboarded_status import OnboardedStatus
from velo_payments.models.onboarded_status2 import OnboardedStatus2
from velo_payments.models.onboarding_status_changed import OnboardingStatusChanged
from velo_payments.models.page_for_response import PageForResponse
from velo_payments.models.page_resource_funding_payor_status_audit_response_funding_payor_status_audit_response import PageResourceFundingPayorStatusAuditResponseFundingPayorStatusAuditResponse
from velo_payments.models.paged_payee_invitation_status_response import PagedPayeeInvitationStatusResponse
from velo_payments.models.paged_payee_invitation_status_response2 import PagedPayeeInvitationStatusResponse2
from velo_payments.models.paged_payee_invitation_status_response_page import PagedPayeeInvitationStatusResponsePage
from velo_payments.models.paged_payee_response import PagedPayeeResponse
from velo_payments.models.paged_payee_response2 import PagedPayeeResponse2
from velo_payments.models.paged_payee_response_links import PagedPayeeResponseLinks
from velo_payments.models.paged_payee_response_page import PagedPayeeResponsePage
from velo_payments.models.paged_payee_response_summary import PagedPayeeResponseSummary
from velo_payments.models.paged_payments_response_v3 import PagedPaymentsResponseV3
from velo_payments.models.paged_user_response import PagedUserResponse
from velo_payments.models.paged_user_response_links import PagedUserResponseLinks
from velo_payments.models.paged_user_response_page import PagedUserResponsePage
from velo_payments.models.password_request import PasswordRequest
from velo_payments.models.payable_issue import PayableIssue
from velo_payments.models.payable_issue2 import PayableIssue2
from velo_payments.models.payable_status_changed import PayableStatusChanged
from velo_payments.models.payee_address import PayeeAddress
from velo_payments.models.payee_address2 import PayeeAddress2
from velo_payments.models.payee_delta import PayeeDelta
from velo_payments.models.payee_delta2 import PayeeDelta2
from velo_payments.models.payee_delta_response import PayeeDeltaResponse
from velo_payments.models.payee_delta_response2 import PayeeDeltaResponse2
from velo_payments.models.payee_delta_response2_links import PayeeDeltaResponse2Links
from velo_payments.models.payee_delta_response_links import PayeeDeltaResponseLinks
from velo_payments.models.payee_delta_response_page import PayeeDeltaResponsePage
from velo_payments.models.payee_detail_response import PayeeDetailResponse
from velo_payments.models.payee_detail_response2 import PayeeDetailResponse2
from velo_payments.models.payee_details_changed import PayeeDetailsChanged
from velo_payments.models.payee_event import PayeeEvent
from velo_payments.models.payee_event_all_of import PayeeEventAllOf
from velo_payments.models.payee_event_all_of_reasons import PayeeEventAllOfReasons
from velo_payments.models.payee_invitation_status_response import PayeeInvitationStatusResponse
from velo_payments.models.payee_invitation_status_response2 import PayeeInvitationStatusResponse2
from velo_payments.models.payee_payor_ref import PayeePayorRef
from velo_payments.models.payee_payor_ref_v3 import PayeePayorRefV3
from velo_payments.models.payee_type import PayeeType
from velo_payments.models.payee_user_self_update_request import PayeeUserSelfUpdateRequest
from velo_payments.models.payment_audit_currency import PaymentAuditCurrency
from velo_payments.models.payment_audit_currency_v3 import PaymentAuditCurrencyV3
from velo_payments.models.payment_channel_country import PaymentChannelCountry
from velo_payments.models.payment_channel_rule import PaymentChannelRule
from velo_payments.models.payment_channel_rules_response import PaymentChannelRulesResponse
from velo_payments.models.payment_delta import PaymentDelta
from velo_payments.models.payment_delta_response import PaymentDeltaResponse
from velo_payments.models.payment_delta_response_v1 import PaymentDeltaResponseV1
from velo_payments.models.payment_delta_v1 import PaymentDeltaV1
from velo_payments.models.payment_event import PaymentEvent
from velo_payments.models.payment_event_all_of import PaymentEventAllOf
from velo_payments.models.payment_event_response import PaymentEventResponse
from velo_payments.models.payment_event_response_v3 import PaymentEventResponseV3
from velo_payments.models.payment_instruction_v3 import PaymentInstructionV3
from velo_payments.models.payment_rails import PaymentRails
from velo_payments.models.payment_rejected_or_returned import PaymentRejectedOrReturned
from velo_payments.models.payment_rejected_or_returned_all_of import PaymentRejectedOrReturnedAllOf
from velo_payments.models.payment_response_v3 import PaymentResponseV3
from velo_payments.models.payment_response_v4 import PaymentResponseV4
from velo_payments.models.payment_response_v4_payout import PaymentResponseV4Payout
from velo_payments.models.payment_status_changed import PaymentStatusChanged
from velo_payments.models.payment_status_changed_all_of import PaymentStatusChangedAllOf
from velo_payments.models.payment_v3 import PaymentV3
from velo_payments.models.payor_address import PayorAddress
from velo_payments.models.payor_address_v2 import PayorAddressV2
from velo_payments.models.payor_aml_transaction import PayorAmlTransaction
from velo_payments.models.payor_aml_transaction_v3 import PayorAmlTransactionV3
from velo_payments.models.payor_branding_response import PayorBrandingResponse
from velo_payments.models.payor_create_api_key_request import PayorCreateApiKeyRequest
from velo_payments.models.payor_create_api_key_response import PayorCreateApiKeyResponse
from velo_payments.models.payor_create_application_request import PayorCreateApplicationRequest
from velo_payments.models.payor_email_opt_out_request import PayorEmailOptOutRequest
from velo_payments.models.payor_links_response import PayorLinksResponse
from velo_payments.models.payor_links_response_links import PayorLinksResponseLinks
from velo_payments.models.payor_links_response_payors import PayorLinksResponsePayors
from velo_payments.models.payor_logo_request import PayorLogoRequest
from velo_payments.models.payor_v1 import PayorV1
from velo_payments.models.payor_v2 import PayorV2
from velo_payments.models.payout_company_v3 import PayoutCompanyV3
from velo_payments.models.payout_individual_v3 import PayoutIndividualV3
from velo_payments.models.payout_name_v3 import PayoutNameV3
from velo_payments.models.payout_payee_v3 import PayoutPayeeV3
from velo_payments.models.payout_payor import PayoutPayor
from velo_payments.models.payout_payor_ids import PayoutPayorIds
from velo_payments.models.payout_principal import PayoutPrincipal
from velo_payments.models.payout_status import PayoutStatus
from velo_payments.models.payout_status_v3 import PayoutStatusV3
from velo_payments.models.payout_summary_audit import PayoutSummaryAudit
from velo_payments.models.payout_summary_audit_v3 import PayoutSummaryAuditV3
from velo_payments.models.payout_summary_response_v3 import PayoutSummaryResponseV3
from velo_payments.models.payout_type import PayoutType
from velo_payments.models.ping import Ping
from velo_payments.models.query_batch_response import QueryBatchResponse
from velo_payments.models.query_batch_response2 import QueryBatchResponse2
from velo_payments.models.quote_fx_summary_v3 import QuoteFxSummaryV3
from velo_payments.models.quote_response_v3 import QuoteResponseV3
from velo_payments.models.region_v2 import RegionV2
from velo_payments.models.register_sms_request import RegisterSmsRequest
from velo_payments.models.rejected_payment_v3 import RejectedPaymentV3
from velo_payments.models.resend_token_request import ResendTokenRequest
from velo_payments.models.reset_password_request import ResetPasswordRequest
from velo_payments.models.role import Role
from velo_payments.models.role_update_request import RoleUpdateRequest
from velo_payments.models.self_mfa_type_unregister_request import SelfMFATypeUnregisterRequest
from velo_payments.models.self_update_password_request import SelfUpdatePasswordRequest
from velo_payments.models.set_notifications_request import SetNotificationsRequest
from velo_payments.models.source_account_response import SourceAccountResponse
from velo_payments.models.source_account_response_v2 import SourceAccountResponseV2
from velo_payments.models.source_account_response_v3 import SourceAccountResponseV3
from velo_payments.models.source_account_summary import SourceAccountSummary
from velo_payments.models.source_account_summary_v3 import SourceAccountSummaryV3
from velo_payments.models.source_account_type import SourceAccountType
from velo_payments.models.source_account_v3 import SourceAccountV3
from velo_payments.models.source_event import SourceEvent
from velo_payments.models.supported_countries_response import SupportedCountriesResponse
from velo_payments.models.supported_countries_response_v2 import SupportedCountriesResponseV2
from velo_payments.models.supported_country import SupportedCountry
from velo_payments.models.supported_country_v2 import SupportedCountryV2
from velo_payments.models.supported_currency_response_v2 import SupportedCurrencyResponseV2
from velo_payments.models.supported_currency_v2 import SupportedCurrencyV2
from velo_payments.models.transfer_request import TransferRequest
from velo_payments.models.transfer_request2 import TransferRequest2
from velo_payments.models.transmission_type import TransmissionType
from velo_payments.models.transmission_types import TransmissionTypes
from velo_payments.models.transmission_types2 import TransmissionTypes2
from velo_payments.models.unregister_mfa_request import UnregisterMFARequest
from velo_payments.models.update_payee_details_request import UpdatePayeeDetailsRequest
from velo_payments.models.update_payee_details_request2 import UpdatePayeeDetailsRequest2
from velo_payments.models.update_remote_id_request import UpdateRemoteIdRequest
from velo_payments.models.update_remote_id_request2 import UpdateRemoteIdRequest2
from velo_payments.models.update_webhook_request import UpdateWebhookRequest
from velo_payments.models.user_details_update_request import UserDetailsUpdateRequest
from velo_payments.models.user_info import UserInfo
from velo_payments.models.user_response import UserResponse
from velo_payments.models.user_status import UserStatus
from velo_payments.models.user_type import UserType
from velo_payments.models.user_type2 import UserType2
from velo_payments.models.validate_password_response import ValidatePasswordResponse
from velo_payments.models.watchlist_status import WatchlistStatus
from velo_payments.models.watchlist_status2 import WatchlistStatus2
from velo_payments.models.webhook_response import WebhookResponse
from velo_payments.models.webhooks_response import WebhooksResponse
from velo_payments.models.withdraw_payment_request import WithdrawPaymentRequest
|
python
|
from data.mag.build_docs_from_sqlite import generate_papers, UPDATE_GENERATORS, strip_and_dump_from_gen
from solr.instances import get_session
from data import upload_batches_unparsed
from solr.session import SolrSession
from solr.configsets import get_config
from multiprocessing import Pool, cpu_count
import itertools
def batch_jsonl(generator, batchsize):
"""
creates larger chunks from a genenerator function.
:param generator: the generator function that yields lines of json
:param batchsize: the maximum size of the batch
:return: yields utf-8 encoded bytes
"""
while True:
batch = itertools.islice(generator, batchsize)
batch = '\n,'.join(batch)
if 0 < len(batch):
batch = '['+batch+']'
yield batch.encode('utf-8')
else:
break
def upload_parallel(generator, session):
with Pool(processes=cpu_count()) as pool:
yield from pool.imap(session.collection('mag').update.jsonUpdate, generator)
def reset_collection(s: SolrSession):
print('deleting collection')
print(s.admin.collections.delete('mag').json())
print('deleting config')
print(s.admin.configs.delete('mag').json())
print('sending latest config')
print(s.admin.configs.upload('mag', get_config('mag')).json())
print('creating collection')
print(s.admin.collections.create('mag', 4, 1, 1, 'mag').json())
def main():
s = get_session('localhost', 8984)
reset = False
if reset is True:
reset_collection(s)
upload_batches_unparsed(s, 'mag', strip_and_dump_from_gen(generate_papers()))
for generator in UPDATE_GENERATORS:
strip_gen = strip_and_dump_from_gen(generator())
batch_gen = batch_jsonl(strip_gen, 10_000)
for response in upload_parallel(batch_gen, s): # 3922 batches with 10_000 size
d = response.json()
if d['responseHeader']['status'] != 0:
print(f'{d}')
if __name__ == '__main__':
main()
|
python
|
"""
Module to log on screen
====================================
"""
# ============================================================================
# STANDARD IMPORTS
# ============================================================================
import click
from functools import partial
from click.termui import style
# ============================================================================
# CLASS AND DEFINITIONS
# ============================================================================
def log_in_terminal(message: str, *args, **kwargs) -> None:
"""This function logs in the terminal a message with a specific color
Args:
message (str): Message to log on the console
foreground(str): Foreground color see click.style for options
"""
if args:
message = message + "\n" + "\n".join(args)
click.echo(click.style(message, **kwargs))
def log_error(message, *args, **kwargs) -> None:
"""Logs an error message in red"""
f = partial(log_in_terminal, fg="red", **kwargs)
f(message, *args)
def log_verify(message, *args, **kwargs) -> None:
"""Logs an verification message in blue"""
f = partial(log_in_terminal, fg="blue", **kwargs)
f(message, *args)
def log_warning(message, *args, **kwargs) -> None:
"""Logs a warning message in yellow"""
f = partial(log_in_terminal, fg="yellow", **kwargs)
f(message, *args)
def log_success(message, *args, **kwargs) -> None:
"""Logs a success message in green"""
f = partial(log_in_terminal, fg="green", **kwargs)
f(message, *args)
if __name__ == "__main__":
log_in_terminal("Special", "message")
log_error("Error", "message")
log_verify("Verification", "message")
log_warning("Warning", "multiple", "lines")
log_success("Success", "message", "for", "testing")
|
python
|
#
# -------------------------------------------------------------------------
# Copyright (c) 2015-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
#
import time
from oslo_config import cfg
from conductor.common import db_backend
from conductor import service
CONF = cfg.CONF
def current_time_millis():
"""Current time in milliseconds."""
return int(round(time.time() * 1000))
def main():
"""Sample usage of Music."""
service.prepare_service()
CONF.set_override('debug', True, 'music_api')
CONF.set_override('mock', True, 'music_api')
CONF.set_override('hostnames', ['music2'], 'music_api')
music = db_backend.get_client()
print("Music version %s" % music.version())
# Randomize the name so that we don't step on each other.
keyspace = 'NewVotingApp' + str(current_time_millis() / 100)
music.keyspace_create(keyspace)
# Create the table
kwargs = {
'keyspace': keyspace,
'table': 'votecount',
'schema': {
'name': 'text',
'count': 'varint',
'PRIMARY KEY': '(name)'
}
}
music.table_create(**kwargs)
# Candidate data
data = {
'Joe': 5,
'Shankar': 7,
'Gueyoung': 8,
'Matti': 2,
'Kaustubh': 0
}
# Create an entry in the voting table for each candidate
# and with a vote count of 0.
kwargs = {'keyspace': keyspace, 'table': 'votecount', 'pk_name': 'name'}
for name in data: # We only want the keys
kwargs['pk_value'] = name
kwargs['values'] = {'name': name, 'count': 0}
music.row_create(**kwargs)
# Update each candidate's count atomically.
kwargs = {'keyspace': keyspace, 'table': 'votecount', 'pk_name': 'name'}
for name in data:
count = data[name]
kwargs['pk_value'] = name
kwargs['values'] = {'count': count}
kwargs['atomic'] = True
music.row_update(**kwargs)
# Read all rows
kwargs = {'keyspace': keyspace, 'table': 'votecount'}
print(music.row_read(**kwargs)) # Reads all rows
# Delete Joe, read Matti
kwargs = {'keyspace': keyspace, 'table': 'votecount', 'pk_name': 'name'}
kwargs['pk_value'] = 'Joe'
music.row_delete(**kwargs)
kwargs['pk_value'] = 'Matti'
print(music.row_read(**kwargs))
# Read all rows again
kwargs = {'keyspace': keyspace, 'table': 'votecount'}
print(music.row_read(**kwargs)) # Reads all rows
# Cleanup.
music.keyspace_delete(keyspace)
if __name__ == "__main__":
main()
|
python
|
#!/usr/bin/env python3
import subprocess
import os
import glob
import sys
import time
import argparse
def merge(infile, outfile, id_name_map):
merge_dict = {}
with open(infile, 'r') as f:
content = ""
name = ""
for line in f:
if line[0] == ">":
name = line[1:].strip()
if name not in merge_dict:
merge_dict[name] = []
else:
content = line
merge_dict[name].append(content)
with open(outfile, 'w') as f:
for k, v in merge_dict.items():
ref_name = id_name_map[k]
f.write(">" + ref_name + "\n")
for line in v:
f.write(line)
def main_step(args):
work_space = args.workspace
infile_list = args.infile_list
out_file = args.outfile
gu_thread_number = args.gu_worker
KMERLEN = args.kmer_len
bin_num = args.bin_num
uniq_ref_num = args.uniq_ref_num
if not os.path.exists(work_space):
os.mkdir(work_space)
file_path = os.path.dirname(os.path.realpath(__file__))
bin_dir = os.path.join(file_path, 'bin')
#---------step1-----------#
# use part of kmc to generate intermediate results
# but we modified the process of generating intermediate results, the super-kmer in the generated
# intermediate results carries information about which species it belong to.
s1_start = time.time()
try:
print("step1: ", " ".join([os.path.join(bin_dir, 'kmc'),
'-k'+str(KMERLEN),
'-n'+str(bin_num),
'-fm', '@'+infile_list,
"tmp", work_space]))
# execute the modified kmc
kmc_out = subprocess.check_output([os.path.join(bin_dir, 'kmc'),
'-k'+str(KMERLEN),
'-n'+str(bin_num),
'-fm', '@'+infile_list,
"tmp", work_space])
id2filenames = set(kmc_out.decode().split('\n'))
print("id2filenames: ", id2filenames)
for s in id2filenames:
print(s)
except e:
print("run kmc error!")
# collect the names of the intermediate result files for use in the following steps.
with open(os.path.join(work_space, 'binList.list'), 'w') as f:
for filename in glob.glob(os.path.join(work_space, "*.bin")):
f.write(filename + '\n')
s1_end = time.time()
print("step 1 time: ", s1_end - s1_start)
#---------step2-----------#
# generate unique kmer
try:
print("step2: ", " ".join([os.path.join(bin_dir, 'generate_uniq'),
os.path.join(work_space, "binList.list"),
str(KMERLEN), "outfile.txt", str(gu_thread_number),
infile_list,
str(1 if args.exclude_last else 0),
str(uniq_ref_num)
]))
gu_out = subprocess.check_output([os.path.join(bin_dir, 'generate_uniq'),
os.path.join(work_space, "binList.list"),
str(KMERLEN), "outfile.txt", str(gu_thread_number),
infile_list,
str(1 if args.exclude_last else 0),
str(uniq_ref_num)])
except subprocess.CalledProcessError as e:
print("run gene uniq error!")
print(e.output)
exit(-1)
#print("done, out file is in outfile.txt")
s2_end = time.time()
print("step 2 time: ", s2_end - s1_end)
try:
if args.output_char: #if output character, need kmer length
print('step3: ', ' '.join([os.path.join(bin_dir, 'change_format'),
"./outfile.txt", out_file, infile_list, str(KMERLEN)]))
cf_out = subprocess.check_output([os.path.join(bin_dir, 'change_format'),
"./outfile.txt", out_file, infile_list, str(KMERLEN)])
else: #if not output character, output .bin files, kmer_len specify to 0
print('step3: ', ' '.join([os.path.join(bin_dir, 'change_format'),
"./outfile.txt", out_file, infile_list, str(0)]))
cf_out = subprocess.check_output([os.path.join(bin_dir, 'change_format'),
"./outfile.txt", out_file, infile_list, str(0)])
except subprocess.CalledProcessError as e:
print("run change format error")
print(e.output)
exit(-1)
s3_end = time.time()
print("step 3: merge step time: ", s3_end - s2_end)
if __name__ == "__main__":
#if len(sys.argv) < 4:
# print("./run_this.py [tmp workspace] [fasta file list] [result file] [gen uniq thread number]")
# exit(-1)
#work_space = sys.argv[1] #"./workspace"
#infile_list = sys.argv[2]
#out_file = sys.argv[3]
#gu_thread_number = sys.argv[4]
parser = argparse.ArgumentParser(description = "RabbitUniq")
parser.add_argument('--workspace', '-w', help = "workspace directory the bin files stored [default: workspace]", type = str, required = False, default = "workspace")
parser.add_argument('--infile_list', '-l', help = "input file list, one line per file", type = str, required = True)
parser.add_argument('--outfile', '-o', help = "out put file", type = str, required = True)
parser.add_argument('--gu_worker', '-n', help = "The number of worker thread when generate unique kmer [default: 20]", type = int, required = False, default = 20)
parser.add_argument('--kmer_len', '-k', help = "Unique k-mer length [default: 25]", type = int, required = False, default = 25)
parser.add_argument('--bin_num', '-b', help = "Number of bin files to be store, from 64 to 2000[default: 512]", type = int, required = False, default = 512)
parser.add_argument('--exclude_last', '-e', help = "Exclude the last element in infile_list when output", default=False, action = "store_true")
parser.add_argument('--uniq_ref_num', '-u', help = "Threshold considered as unique kmer, default is 1", type = int, required = False, default = 1)
parser.add_argument('--output_char', '-c', help = "Output the unique k-mer collection in character-based file instead of binary file (slower, so not recommended)", action = "store_true")
args = parser.parse_args()
main_step(args)
|
python
|
#---------------------------------------
# Import Libraries
#---------------------------------------
import sys
import json
import codecs
import os
#---------------------------------------
# [Required] Script Information
#---------------------------------------
ScriptName = "Queue Display"
Website = "twitch.tv/encryptedthoughts"
Description = "A script to populate an overlay showing queue information in an overlay"
Creator = "EncryptedThoughts"
Version = "1.0.0"
# ---------------------------------------
# Set Variables
# ---------------------------------------
SettingsFile = os.path.join(os.path.dirname(__file__), "settings.json")
ReadMe = os.path.join(os.path.dirname(__file__), "README.md")
ScriptSettings = None
# ---------------------------------------
# Script Classes
# ---------------------------------------
class Settings(object):
def __init__(self, settingsfile=None):
with codecs.open(settingsfile, encoding="utf-8-sig", mode="r") as f:
self.__dict__ = json.load(f, encoding="utf-8")
def Reload(self, jsonData):
self.__dict__ = json.loads(jsonData, encoding="utf-8")
# ---------------------------------------
# Functions
# ---------------------------------------
def UpdateQueue():
payload = {}
queue = Parent.GetQueue(10)
count = 1
for item in queue:
payload[str(count)] = queue[item]
count += 1
Parent.Log(ScriptName, str(payload))
Parent.BroadcastWsEvent("EVENT_QUEUE_UPDATE", json.dumps(payload))
return
def ChangeQueueStatus(status):
payload = { "status": status }
Parent.BroadcastWsEvent("EVENT_QUEUE_STATUS", json.dumps(payload))
return
#---------------------------------------
# [Required] Initialize Data / Load Only
#---------------------------------------
def Init():
global ScriptSettings
ScriptSettings = Settings(SettingsFile)
return
# ---------------------------------------
# Chatbot Save Settings Function
# ---------------------------------------
def ReloadSettings(jsondata):
ScriptSettings.Reload(jsondata)
return
def Execute(data):
if data.IsChatMessage():
if "!queue open" in data.Message.lower():
if Parent.HasPermission(data.User,"Moderator",""):
ChangeQueueStatus("Open")
elif "!queue close" in data.Message.lower():
if Parent.HasPermission(data.User,"Moderator",""):
ChangeQueueStatus("Closed")
UpdateQueue();
return
def Tick():
return
# ---------------------------------------
# Script UI Button Functions
# ---------------------------------------
def OpenReadMe():
os.startfile(ReadMeFile)
return
|
python
|
"""Implementation of the StarBound block file v2/3 storage"""
import logging
import struct
from starfuse.fs.mapped_file import MappedFile
log = logging.getLogger(__name__)
class InvalidMagic(Exception):
"""A block file has an invalid magic string"""
def __init__(self, path):
super(InvalidMagic, self).__init__('a block file has an invalid magic string: %s' % path)
class SBBF03(MappedFile):
"""Implements a StarBound block file v3 store that is backed by a file
Can also be used to read in v2.
It's worth noting that the memory regions in this class are mapped and not
read-in."""
def __init__(self, path, page_count, read_only=False):
super(SBBF03, self).__init__(path, page_count, read_only=read_only)
self._header_size = 0
self._block_size = 0
self.header = None
self.user_header = None
self.blocks = dict()
self.__load(path)
def __del__(self):
self.close()
def block_region(self, bid):
"""Gets a block region given the block ID"""
base_offset = self._header_size + (self._block_size * bid)
return self.region(offset=base_offset, size=self._block_size)
@property
def block_count(self):
block_region_size = len(self) - self._header_size
return block_region_size // self._block_size
def __load(self, path):
log.debug('loading SBBF03 block file: %s', path)
region = self.region(0, 32)
# magic constant
magic = region.read(6)
if magic not in [b'SBBF03', b'SBBF02']:
raise InvalidMagic(path)
log.debug('block file has valid magic constant: %s', magic)
# get the header and block size
# this is all we need to actually read from the file before we start mmap-ing.
# this is because we want to be able to mmap the header as well, and all we need to know
# are the header sizes and block sizes.
(self._header_size, self._block_size) = struct.unpack('>ii', region.read(8))
log.debug('header_size=%d, block_size=%d', self._header_size, self._block_size)
# calculate number of blocks
log.debug('block count: %d', self.block_count)
# map header
self.header = self.region(offset=0, size=self._header_size)
self.user_header = self.header.region(0x20)
# map user header
self.user_header = self.header.region(offset=0x20)
log.debug('mapped headers successfully')
|
python
|
import pandas as pd
import numpy as np
def load_file(filename: str):
"""Load the .xls file and return as a dataframe object."""
df = pd.read_csv(filename, delimiter='\t')
return df
data = load_file('outputs.xls')
print(data.info())
|
python
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import os
import csv
import sys
import json
import pickle
import logging
from copy import deepcopy
from collections import Counter
from itertools import groupby, chain
from indra.statements import Agent
from indra.databases import uniprot_client, hgnc_client
from indra.util import read_unicode_csv, write_unicode_csv
logger = logging.getLogger('grounding_mapper')
class GroundingMapper(object):
"""Maps grounding of INDRA Agents based on a given grounding map.
Attributes
----------
gm : dict
The grounding map, a dictionary mapping strings (entity names) to
a dictionary of database identifiers.
agent_map : Optional[dict]
A dictionary mapping strings to grounded INDRA Agents with given state.
"""
def __init__(self, gm, agent_map=None):
self.gm = gm
self.agent_map = agent_map if agent_map is not None else {}
def update_agent_db_refs(self, agent, agent_text, do_rename=True):
gene_name = None
map_db_refs = deepcopy(self.gm.get(agent_text))
up_id = map_db_refs.get('UP')
hgnc_sym = map_db_refs.get('HGNC')
if up_id and not hgnc_sym:
gene_name = uniprot_client.get_gene_name(up_id, False)
if gene_name:
hgnc_id = hgnc_client.get_hgnc_id(gene_name)
if hgnc_id:
map_db_refs['HGNC'] = hgnc_id
elif hgnc_sym and not up_id:
# Override the HGNC symbol entry from the grounding
# map with an HGNC ID
hgnc_id = hgnc_client.get_hgnc_id(hgnc_sym)
if hgnc_id:
map_db_refs['HGNC'] = hgnc_id
# Now get the Uniprot ID for the gene
up_id = hgnc_client.get_uniprot_id(hgnc_id)
if up_id:
map_db_refs['UP'] = up_id
# If there's no HGNC ID for this symbol, raise an
# Exception
else:
raise ValueError('No HGNC ID corresponding to gene '
'symbol %s in grounding map.' %
hgnc_sym)
# If we have both, check the gene symbol ID against the
# mapping from Uniprot
elif up_id and hgnc_sym:
# Get HGNC Symbol from Uniprot
gene_name = uniprot_client.get_gene_name(up_id)
if not gene_name:
raise ValueError('No gene name found for Uniprot '
'ID %s (expected %s)' %
(up_id, hgnc_sym))
# We got gene name, compare it to the HGNC name
else:
if gene_name != hgnc_sym:
raise ValueError('Gene name %s for Uniprot ID '
'%s does not match HGNC '
'symbol %s given in grounding '
'map.' %
(gene_name, up_id, hgnc_sym))
else:
hgnc_id = hgnc_client.get_hgnc_id(hgnc_sym)
if not hgnc_id:
logger.error('No HGNC ID corresponding to gene '
'symbol %s in grounding map.' % hgnc_sym)
else:
map_db_refs['HGNC'] = hgnc_id
# Assign the DB refs from the grounding map to the agent
agent.db_refs = map_db_refs
# Are we renaming right now?
if do_rename:
# If there's a FamPlex ID, prefer that for the name
if agent.db_refs.get('FPLX'):
agent.name = agent.db_refs.get('FPLX')
# Get the HGNC symbol or gene name (retrieved above)
elif hgnc_sym is not None:
agent.name = hgnc_sym
elif gene_name is not None:
agent.name = gene_name
return
def map_agents_for_stmt(self, stmt, do_rename=True):
mapped_stmt = deepcopy(stmt)
# Iterate over the agents
mapped_agent_list = mapped_stmt.agent_list()
# Update agents directly participating in the statement
agent_list = mapped_stmt.agent_list()
for idx in range(len(agent_list)):
agent = agent_list[idx]
if agent is None or agent.db_refs.get('TEXT') is None:
continue
new_agent, maps_to_none = self.map_agent(agent, do_rename)
if maps_to_none:
# Skip the entire statement if the agent maps to None in the
# grounding map
return None
# If the old agent had bound conditions, but the new agent does
# not, copy the bound conditions over
if new_agent is not None and len(new_agent.bound_conditions) == 0:
new_agent.bound_conditions = agent.bound_conditions
agent_list[idx] = new_agent
mapped_stmt.set_agent_list(agent_list)
# Update agents in the bound conditions
for agent in agent_list:
if agent is not None:
for bc in agent.bound_conditions:
bc.agent, maps_to_none = self.map_agent(bc.agent, do_rename)
if maps_to_none:
# Skip the entire statement if the agent maps to None
# in the grounding map
return None
return mapped_stmt
def map_agent(self, agent, do_rename):
"""Grounds an agent; returns the new agent object (which might be
a different object if we load a new agent state from javascript).
Parameters
----------
agent: indra.statements.Agent
The agent to map
do_rename: bool
Whether to rename the agent text
Returns
-------
grounded_agent: indra.statements.Agent
The grounded agent
maps_to_none: bool
Whether the agent is in the grounding map and maps to None
"""
agent_text = agent.db_refs.get('TEXT')
mapped_to_agent_json = self.agent_map.get(agent_text)
if mapped_to_agent_json:
mapped_to_agent = \
Agent._from_json(mapped_to_agent_json['agent'])
return mapped_to_agent, False
# Look this string up in the grounding map
# If not in the map, leave agent alone and continue
if agent_text in self.gm.keys():
map_db_refs = self.gm[agent_text]
else:
return agent, False
# If it's in the map but it maps to None, then filter out
# this statement by skipping it
if map_db_refs is None:
# Increase counter if this statement has not already
# been skipped via another agent
logger.debug("Skipping %s" % agent_text)
return None, True
# If it has a value that's not None, map it and add it
else:
# Otherwise, update the agent's db_refs field
self.update_agent_db_refs(agent, agent_text, do_rename)
return agent, False
def map_agents(self, stmts, do_rename=True):
# Make a copy of the stmts
mapped_stmts = []
num_skipped = 0
# Iterate over the statements
for stmt in stmts:
mapped_stmt = self.map_agents_for_stmt(stmt, do_rename)
# Check if we should skip the statement
if mapped_stmt is not None:
mapped_stmts.append(mapped_stmt)
else:
num_skipped += 1
logger.info('%s statements filtered out' % num_skipped)
return mapped_stmts
def rename_agents(self, stmts):
# Make a copy of the stmts
mapped_stmts = deepcopy(stmts)
# Iterate over the statements
for _, stmt in enumerate(mapped_stmts):
# Iterate over the agents
for agent in stmt.agent_list():
if agent is None:
continue
# If there's a FamPlex ID, prefer that for the name
if agent.db_refs.get('FPLX'):
agent.name = agent.db_refs.get('FPLX')
# Take a HGNC name from Uniprot next
elif agent.db_refs.get('UP'):
# Try for the gene name
gene_name = uniprot_client.get_gene_name(
agent.db_refs.get('UP'),
web_fallback=False)
if gene_name:
agent.name = gene_name
hgnc_id = hgnc_client.get_hgnc_id(gene_name)
if hgnc_id:
agent.db_refs['HGNC'] = hgnc_id
# Take the text string
#if agent.db_refs.get('TEXT'):
# agent.name = agent.db_refs.get('TEXT')
# If this fails, then we continue with no change
# Fall back to the text string
#elif agent.db_refs.get('TEXT'):
# agent.name = agent.db_refs.get('TEXT')
return mapped_stmts
# TODO: handle the cases when there is more than one entry for the same
# key (e.g., ROS, ER)
def load_grounding_map(grounding_map_path, ignore_path=None):
g_map = {}
map_rows = read_unicode_csv(grounding_map_path, delimiter=',',
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
lineterminator='\r\n')
if ignore_path and os.path.exists(ignore_path):
ignore_rows = read_unicode_csv(ignore_path, delimiter=',',
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
lineterminator='\r\n')
else:
ignore_rows = []
csv_rows = chain(map_rows, ignore_rows)
for row in csv_rows:
key = row[0]
db_refs = {'TEXT': key}
keys = [entry for entry in row[1::2] if entry != '']
values = [entry for entry in row[2::2] if entry != '']
if len(keys) != len(values):
logger.info('ERROR: Mismatched keys and values in row %s' %
str(row))
continue
else:
db_refs.update(dict(zip(keys, values)))
if len(db_refs.keys()) > 1:
g_map[key] = db_refs
else:
g_map[key] = None
return g_map
# Some useful functions for analyzing the grounding of sets of statements
# Put together all agent texts along with their grounding
def all_agents(stmts):
agents = []
for stmt in stmts:
for agent in stmt.agent_list():
# Agents don't always have a TEXT db_refs entry (for instance
# in the case of Statements from databases) so we check for this.
if agent is not None and agent.db_refs.get('TEXT') is not None:
agents.append(agent)
return agents
def agent_texts(agents):
return [ag.db_refs.get('TEXT') for ag in agents]
def get_sentences_for_agent(text, stmts, max_sentences=None):
sentences = []
for stmt in stmts:
for agent in stmt.agent_list():
if agent is not None and agent.db_refs.get('TEXT') == text:
sentences.append((stmt.evidence[0].pmid,
stmt.evidence[0].text))
if max_sentences is not None and \
len(sentences) >= max_sentences:
return sentences
return sentences
def agent_texts_with_grounding(stmts):
allag = all_agents(stmts)
# Convert PFAM-DEF lists into tuples so that they are hashable and can
# be tabulated with a Counter
for ag in allag:
pfam_def = ag.db_refs.get('PFAM-DEF')
if pfam_def is not None:
ag.db_refs['PFAM-DEF'] = tuple(pfam_def)
refs = [tuple(ag.db_refs.items()) for ag in allag]
refs_counter = Counter(refs)
refs_counter_dict = [(dict(entry[0]), entry[1])
for entry in refs_counter.items()]
# First, sort by text so that we can do a groupby
refs_counter_dict.sort(key=lambda x: x[0].get('TEXT'))
# Then group by text
grouped_by_text = []
for k, g in groupby(refs_counter_dict, key=lambda x: x[0].get('TEXT')):
# Total occurrences of this agent text
total = 0
entry = [k]
db_ref_list = []
for db_refs, count in g:
# Check if TEXT is our only key, indicating no grounding
if list(db_refs.keys()) == ['TEXT']:
db_ref_list.append((None, None, count))
# Add any other db_refs (not TEXT)
for db, db_id in db_refs.items():
if db == 'TEXT':
continue
else:
db_ref_list.append((db, db_id, count))
total += count
# Sort the db_ref_list by the occurrences of each grounding
entry.append(tuple(sorted(db_ref_list, key=lambda x: x[2],
reverse=True)))
# Now add the total frequency to the entry
entry.append(total)
# And add the entry to the overall list
grouped_by_text.append(tuple(entry))
# Sort the list by the total number of occurrences of each unique key
grouped_by_text.sort(key=lambda x: x[2], reverse=True)
return grouped_by_text
# List of all ungrounded entities by number of mentions
def ungrounded_texts(stmts):
ungrounded = [ag.db_refs['TEXT']
for s in stmts
for ag in s.agent_list()
if ag is not None and list(ag.db_refs.keys()) == ['TEXT']]
ungroundc = Counter(ungrounded)
ungroundc = ungroundc.items()
ungroundc = sorted(ungroundc, key=lambda x: x[1], reverse=True)
return ungroundc
def get_agents_with_name(name, stmts):
return [ag for stmt in stmts for ag in stmt.agent_list()
if ag is not None and ag.name == name]
def save_base_map(filename, grouped_by_text):
rows = []
for group in grouped_by_text:
text_string = group[0]
for db, db_id, count in group[1]:
if db == 'UP':
name = uniprot_client.get_mnemonic(db_id)
else:
name = ''
row = [text_string, db, db_id, count, name]
rows.append(row)
write_unicode_csv(filename, rows, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL, lineterminator='\r\n')
def protein_map_from_twg(twg):
"""Build map of entity texts to validated protein grounding.
Looks at the grounding of the entity texts extracted from the statements
and finds proteins where there is grounding to a human protein that maps to
an HGNC name that is an exact match to the entity text. Returns a dict that
can be used to update/expand the grounding map.
"""
protein_map = {}
unmatched = 0
matched = 0
logger.info('Building grounding map for human proteins')
for agent_text, grounding_list, _ in twg:
# If 'UP' (Uniprot) not one of the grounding entries for this text,
# then we skip it.
if 'UP' not in [entry[0] for entry in grounding_list]:
continue
# Otherwise, collect all the Uniprot IDs for this protein.
uniprot_ids = [entry[1] for entry in grounding_list
if entry[0] == 'UP']
# For each Uniprot ID, look up the species
for uniprot_id in uniprot_ids:
# If it's not a human protein, skip it
mnemonic = uniprot_client.get_mnemonic(uniprot_id)
if mnemonic is None or not mnemonic.endswith('_HUMAN'):
continue
# Otherwise, look up the gene name in HGNC and match against the
# agent text
gene_name = uniprot_client.get_gene_name(uniprot_id)
if gene_name is None:
unmatched += 1
continue
if agent_text.upper() == gene_name.upper():
matched += 1
protein_map[agent_text] = {'TEXT': agent_text,
'UP': uniprot_id}
else:
unmatched += 1
logger.info('Exact matches for %d proteins' % matched)
logger.info('No match (or no gene name) for %d proteins' % unmatched)
return protein_map
def save_sentences(twg, stmts, filename, agent_limit=300):
sentences = []
unmapped_texts = [t[0] for t in twg]
counter = 0
logger.info('Getting sentences for top %d unmapped agent texts.' %
agent_limit)
for text in unmapped_texts:
agent_sentences = get_sentences_for_agent(text, stmts)
sentences += map(lambda tup: (text,) + tup, agent_sentences)
counter += 1
if counter >= agent_limit:
break
# Write sentences to CSV file
write_unicode_csv(filename, sentences, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL, lineterminator='\r\n')
default_grounding_map_path = \
os.path.join(os.path.dirname(__file__),
'../resources/famplex/grounding_map.csv')
default_ignore_path = \
os.path.join(os.path.dirname(__file__),
'../resources/famplex/ignore.csv')
default_agent_grounding_path = \
os.path.join(os.path.dirname(__file__),
'../resources/grounding_agents.json')
default_grounding_map = \
load_grounding_map(default_grounding_map_path, default_ignore_path)
gm = default_grounding_map
with open(default_agent_grounding_path, 'r') as fh:
default_agent_map = json.load(fh)
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: %s stmt_file" % sys.argv[0])
sys.exit()
statement_file = sys.argv[1]
logger.info("Opening statement file %s" % statement_file)
with open(statement_file, 'rb') as f:
st = pickle.load(f)
stmts = []
for stmt_list in st.values():
stmts += stmt_list
twg = agent_texts_with_grounding(stmts)
save_base_map('%s_twg.csv' % statement_file, twg)
# Filter out those entries that are NOT already in the grounding map
filtered_twg = [entry for entry in twg
if entry[0] not in default_grounding_map.keys()]
# For proteins that aren't explicitly grounded in the grounding map,
# check for trivial corrections by building the protein map
prot_map = protein_map_from_twg(twg)
filtered_twg = [entry for entry in filtered_twg
if entry[0] not in prot_map.keys()]
save_base_map('%s_unmapped_twg.csv' % statement_file, filtered_twg)
# For each unmapped string, get sentences and write to file
save_sentences(filtered_twg, stmts,
'%s_unmapped_sentences.csv' % statement_file)
|
python
|
__author__ = 'marcusmorgenstern'
__mail__ = ''
import copy
import os
import unittest
from os.path import join
import numpy as np
from pyfluka.base import InvalidInputError
from pyfluka.reader.UsrbinReader import UsrbinReader as UR
from pyfluka.utils import PhysicsQuantities as PQ
_basedir = os.path.dirname(__file__)
class TestUsrbinReader(unittest.TestCase):
def setUp(self):
self.reader = UR("Activity")
dataraw = [4.3201E-07, 1.5970E-06, 4.6090E-05, 1.5935E-06, 5.0045E-07, 8.6618E-07, 4.1063E-06, 9.8403E-05,
3.5158E-06, 7.2260E-07]
dataraw = [PQ.Activity(i) for i in dataraw]
binning = [(-45., 45., 1), (-54., 54., 5), (-33., 36., 2)]
binning_reverse = [2, 5, 1]
self.data_tutorial = {"EneDep2": {"Weight": (100., 100.),
"Binning": binning,
"Activity": np.reshape(np.array(dataraw), binning_reverse).transpose()}}
def test_read_keys(self):
d = self.reader.load(join(_basedir, "test_data/UsrbinInputTest.ascii"))
self.assertTrue("EneDep2" in d)
self.assertTrue("Activity" in d["EneDep2"])
self.assertTrue("Binning" in d["EneDep2"])
self.assertTrue("Weight" in d["EneDep2"])
def test_read_simple(self):
d = self.reader.load(join(_basedir, "test_data/UsrbinInputTest.ascii"))
self.assertEqual(d["EneDep2"]["Weight"], self.data_tutorial["EneDep2"]["Weight"])
self.assertEqual(d["EneDep2"]["Binning"], self.data_tutorial["EneDep2"]["Binning"])
self.assertEqual(d["EneDep2"]["Activity"].all(), self.data_tutorial["EneDep2"]["Activity"].all())
def test_read_multiple(self):
d = self.reader.load([join(_basedir, "test_data/UsrbinInputTest.ascii"),
join(_basedir, "test_data/UsrbinInputTest.ascii")])
self.data_tutorial["EneDep2"]["Activity"] *= 2
self.data_tutorial["EneDep2"]["Weight"] = (200, 200)
self.assertEqual(d["EneDep2"]["Weight"], self.data_tutorial["EneDep2"]["Weight"])
self.assertEqual(d["EneDep2"]["Binning"], self.data_tutorial["EneDep2"]["Binning"])
self.assertEqual(d["EneDep2"]["Activity"].all(), self.data_tutorial["EneDep2"]["Activity"].all())
def test_validate_merging_exception_binning_merge_call(self):
data_fail = copy.deepcopy(self.data_tutorial)
data_fail["EneDep2"]["Binning"] = [(-45., 45., 3), (-54., 54., 5), (-33., 36., 2)]
self.assertRaises(InvalidInputError, UR._merge, self.data_tutorial, data_fail)
def test_read_multiple_weighted(self):
reader = UR("Activity", weights=[0.8, 0.7])
d = reader.load([join(_basedir, "test_data/UsrbinInputTest.ascii"),
join(_basedir, "test_data/UsrbinInputTest.ascii")])
self.data_tutorial["EneDep2"]["Activity"] *= 1.5
self.data_tutorial["EneDep2"]["Weight"] = (150, 150)
self.assertEqual(d["EneDep2"]["Weight"], self.data_tutorial["EneDep2"]["Weight"])
self.assertEqual(d["EneDep2"]["Binning"], self.data_tutorial["EneDep2"]["Binning"])
self.assertEqual(d["EneDep2"]["Activity"].all(), self.data_tutorial["EneDep2"]["Activity"].all())
@unittest.skip("Not implemented - cannot test nested function directly")
def test_pack_data_2d(self):
binning = [(-54., 54., 5), (-33., 36., 2)]
binning_reverse = [2, 5, 1]
dataraw = [4.3201E-07, 1.5970E-06, 4.6090E-05, 1.5935E-06, 5.0045E-07, 8.6618E-07, 4.1063E-06, 9.8403E-05,
3.5158E-06, 7.2260E-07]
reader = UR()
packed_data = reader.pack_data(dataraw, binning)
res = np.reshape(np.array(dataraw), binning_reverse).transpose()
self.assertEqual(packed_data, res)
def test_axis_index_lower(self):
axis_data = (-54., 54., 5)
self.assertEqual(UR.get_axis_index(axis_data, -60.), -1)
def test_axis_index_upper(self):
axis_data = (-54., 54., 5)
self.assertEqual(UR.get_axis_index(axis_data, 60.), 5)
def test_axis_index(self):
axis_data = (-50., 50., 5)
self.assertEqual(UR.get_axis_index(axis_data, 0.), 2)
def test_merge_exception(self):
merged_data = {"foo": {"bar": {}, "Binning": []}}
data = {"foo": {"bar": {}, "Binning": []}}
self.assertRaises(InvalidInputError, UR._merge, *[merged_data, data])
|
python
|
# @lc app=leetcode id=211 lang=python3
#
# [211] Design Add and Search Words Data Structure
#
# https://leetcode.com/problems/design-add-and-search-words-data-structure/description/
#
# algorithms
# Medium (41.04%)
# Likes: 3106
# Dislikes: 130
# Total Accepted: 293.8K
# Total Submissions: 712.7K
# Testcase Example: '["WordDictionary","addWord","addWord","addWord","search","search","search","search"]\n' +
# '[[],["bad"],["dad"],["mad"],["pad"],["bad"],[".ad"],["b.."]]'
#
# Design a data structure that supports adding new words and finding if a
# string matches any previously added string.
#
# Implement the WordDictionary class:
#
#
# WordDictionary() Initializes the object.
# void addWord(word) Adds word to the data structure, it can be matched
# later.
# bool search(word) Returns true if there is any string in the data structure
# that matches word or false otherwise. word may contain dots '.' where dots
# can be matched with any letter.
#
#
#
# Example:
#
#
# Input
#
# ["WordDictionary","addWord","addWord","addWord","search","search","search","search"]
# [[],["bad"],["dad"],["mad"],["pad"],["bad"],[".ad"],["b.."]]
# Output
# [null,null,null,null,false,true,true,true]
#
# Explanation
# WordDictionary wordDictionary = new WordDictionary();
# wordDictionary.addWord("bad");
# wordDictionary.addWord("dad");
# wordDictionary.addWord("mad");
# wordDictionary.search("pad"); // return False
# wordDictionary.search("bad"); // return True
# wordDictionary.search(".ad"); // return True
# wordDictionary.search("b.."); // return True
#
#
#
# Constraints:
#
#
# 1 <= word.length <= 500
# word in addWord consists lower-case English letters.
# word in search consist of '.' or lower-case English letters.
# At most 50000 calls will be made to addWord and search.
#
#
#
# @lc tags=backtracking;design;trie
# @lc imports=start
from imports import *
# @lc imports=end
# @lc idea=start
#
# 设计一个结构可以存储字符串,并进行查找,支持通配符。
# 使用前缀树。
#
# @lc idea=end
# @lc group=
# @lc rank=
# @lc code=start
class TreeNode:
def __init__(self, val: str = ''):
self.val = val
self.children = [None] * 26
self.flag = False
class WordDictionary:
def __init__(self):
self.root = TreeNode()
def addWord(self, word: str) -> None:
p = self.root
for c in word:
index = ord(c) - ord('a')
if not p.children[index]:
p.children[index] = TreeNode(c)
p = p.children[index]
p.flag = True
def search(self, word: str) -> bool:
def recur(p: TreeNode, index: int) -> bool:
if index == len(word):
return p.flag
c = word[index]
if c == '.':
for child in p.children:
if child and recur(child, index + 1):
return True
else:
return p.children[ord(c) - ord('a')] and recur(
p.children[ord(c) - ord('a')], index + 1)
return recur(self.root, 0)
# @lc code=end
# @lc main=start
if __name__ == '__main__':
pass
# @lc main=end
|
python
|
from django.shortcuts import render
from django.views.generic import TemplateView
from booking.models import Booking
from userprofile.models import userProfile
class IndexPageView(TemplateView):
template_name = 'main/index.html'
class ChangeLanguageView(TemplateView):
template_name = 'main/change_language.html'
def home(request):
template_name = 'main/index.html'
context = {'userProfile': userProfile , 'booking': Booking}
return render(request, template_name, context)
|
python
|
from sqlalchemy import (
create_engine, Column, MetaData,
Integer, String, Numeric, DateTime)
from sqlalchemy.orm import class_mapper
from .common import (
generate_sqla_connection_uri,
_insert_entries_from_log_file,
_import_logs_from_folder)
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
uri = generate_sqla_connection_uri("postgresql")
engine = create_engine(uri)
session_maker = sessionmaker(bind=engine)
metadata = MetaData(bind=engine)
Base = declarative_base(metadata=metadata)
def column_names(modelcls):
return [c.name for c in class_mapper(WeblogEntry).columns]
class WeblogEntry(Base):
__tablename__ = "weblog_entries"
id = Column(Integer, primary_key=True, autoincrement=True)
raw_text = Column(String)
remote_address = Column(String)
remote_user = Column(String, nullable=True)
created_on = Column(DateTime)
method = Column(String)
request_uri = Column(String)
http_version = Column(String, nullable=True)
response_status = Column(Integer, nullable=True)
response_bytes_sent = Column(Integer, nullable=True)
http_referrer = Column(String, nullable=True)
http_user_agent = Column(String, nullable=True)
forwarded_for_ips = Column(String, nullable=True)
hostname = Column(String, nullable=True)
server_name = Column(String, nullable=True)
request_time = Column(
Numeric(precision=10, scale=4))
upstream_status = Column(Integer, nullable=True)
upstream_response_time = Column(
Numeric(precision=10, scale=4))
upstream_response_length = Column(Integer, nullable=True)
clientip = Column(String, nullable=True)
user_id = Column(String, nullable=True)
session_id = Column(String, nullable=True)
def create_tables():
weblog_entries = WeblogEntry.__table__
weblog_entries.create()
def drop_tables():
weblog_entries = WeblogEntry.__table__
weblog_entries.drop()
def insert_entries_from_log_file(filepath):
_insert_entries_from_log_file(
filepath, session_maker, WeblogEntry)
def import_logs_from_folder(folder_path):
_import_logs_from_folder(
folder_path, session_maker, WeblogEntry)
|
python
|
from __future__ import print_function, division, absolute_import
import numpy as np
import networkx as nx
from visvis import ssdf
from stentseg.utils.new_pointset import PointSet
from stentseg.stentdirect.stentgraph import (StentGraph, check_path_integrity,
_get_pairs_of_neighbours, add_nodes_at_crossings,
_detect_corners, _add_corner_to_edge,
_pop_node, pop_nodes,
prune_very_weak, prune_weak,
prune_clusters, prune_redundant, prune_tails,)
class TestStentGraph:
def test_prune_redundant1(self):
""" Test removing redundant edges on a graph with two triangles
that are connected by a single edge.
"""
# Create two triangles that are connected with a single edge
graph = StentGraph()
graph.add_edge(11, 12, cost=1, ctvalue=50)
graph.add_edge(12, 13, cost=3, ctvalue=50)
graph.add_edge(13, 11, cost=2, ctvalue=50)
#
graph.add_edge(21, 22, cost=2, ctvalue=60)
graph.add_edge(22, 23, cost=3, ctvalue=60)
graph.add_edge(23, 21, cost=1, ctvalue=60)
#
graph.add_edge(21, 11, cost=4, ctvalue=10)
assert graph.number_of_nodes() == 6
assert graph.number_of_edges() == 7
prune_redundant(graph, 55)
assert graph.number_of_nodes() == 6
assert graph.number_of_edges() == 6
prune_redundant(graph, 55)
assert graph.number_of_nodes() == 6
assert graph.number_of_edges() == 6
prune_redundant(graph, 65)
assert graph.number_of_nodes() == 6
assert graph.number_of_edges() == 5
prune_tails(graph, 2)
assert graph.number_of_nodes() == 2
assert graph.number_of_edges() == 1
def test_prune_redundant2(self):
""" Test removing redundant edges on a graph with two triangles
that are connected by a two edges, twice.
"""
# Create two triangles that are connected with a single edge
graph = StentGraph()
graph.add_edge(11, 12, cost=1, ctvalue=50)
graph.add_edge(12, 13, cost=3, ctvalue=50)
graph.add_edge(13, 11, cost=2, ctvalue=50)
#
graph.add_edge(21, 22, cost=2, ctvalue=60)
graph.add_edge(22, 23, cost=3, ctvalue=60)
graph.add_edge(23, 21, cost=1, ctvalue=60)
#
graph.add_edge(21, 1, cost=4, ctvalue=10)
graph.add_edge(1, 11, cost=4, ctvalue=10)
#
graph.add_edge(22, 2, cost=4, ctvalue=10)
graph.add_edge(2, 12, cost=4, ctvalue=10)
assert graph.number_of_nodes() == 8
assert graph.number_of_edges() == 10
prune_redundant(graph, 55)
assert graph.number_of_nodes() == 8
assert graph.number_of_edges() == 10-1
prune_redundant(graph, 55)
assert graph.number_of_nodes() == 8
assert graph.number_of_edges() == 10-1
prune_redundant(graph, 65)
assert graph.number_of_nodes() == 8
assert graph.number_of_edges() == 10-2
prune_tails(graph, 2)
assert graph.number_of_nodes() == 8-2
assert graph.number_of_edges() == 10-2-2
def test_prune_tails(self):
graph = StentGraph()
graph.add_edge(1, 2, cost=2, ctvalue=50)
graph.add_edge(2, 3, cost=2, ctvalue=50)
graph.add_edge(3, 1, cost=2, ctvalue=50)
# Tail from 1
graph.add_edge(1, 11, cost=3, ctvalue=50)
graph.add_edge(11, 12, cost=3, ctvalue=50)
graph.add_edge(12, 13, cost=3, ctvalue=50)
graph.add_edge(13, 14, cost=3, ctvalue=50)
# Tail from 2
graph.add_edge(2, 21, cost=3, ctvalue=50)
graph.add_edge(21, 22, cost=3, ctvalue=50)
graph.add_edge(22, 23, cost=3, ctvalue=50)
assert graph.number_of_nodes() == 3+4+3
assert graph.number_of_edges() == 3+4+3
prune_tails(graph, 3)
assert graph.number_of_nodes() == 3+4
assert graph.number_of_edges() == 3+4
prune_tails(graph, 9)
assert graph.number_of_nodes() == 3
assert graph.number_of_edges() == 3
def test_prune_clusters(self):
# Create two small cliques
graph = StentGraph()
graph.add_edge(1, 2, cost=2, ctvalue=50)
graph.add_edge(2, 3, cost=2, ctvalue=50)
graph.add_edge(3, 1, cost=2, ctvalue=50)
#
graph.add_edge(4, 5, cost=2, ctvalue=50)
graph.add_edge(5, 6, cost=2, ctvalue=50)
graph.add_edge(6, 7, cost=2, ctvalue=50)
graph.add_edge(7, 4, cost=2, ctvalue=50)
# Connect them
graph.add_edge(1, 4, cost=3, ctvalue=50)
# Also add loose node
graph.add_nodes_from([101, 102])
# Remove cliques and check that nothing happened
prune_clusters(graph, 4)
assert graph.number_of_edges() == 8
assert graph.number_of_nodes() == 7
# Remove connection
graph.remove_edge(1, 4)
# Remove cliques and check that one clique is removed
prune_clusters(graph, 4)
assert graph.number_of_edges() == 4
assert graph.number_of_nodes() == 4
# Remove cliques and check that one clique is removed
prune_clusters(graph, 5)
assert graph.number_of_edges() == 0
assert graph.number_of_nodes() == 0
def test_very_weak(self):
# Create simple graph
graph = StentGraph()
graph.add_edge(1, 4, ctvalue=50)
graph.add_edge(1, 5, ctvalue=40)
graph.add_edge(1, 2, ctvalue=30)
graph.add_edge(1, 3, ctvalue=20)
# Remove weak edges
th = 35
prune_very_weak(graph, th)
# Check result
assert graph.number_of_edges() == 2
for (n1, n2) in graph.edges_iter():
assert graph[n1][n2]['ctvalue'] > th
def test_weak1(self):
""" 2
/ | \
5 - 1 - 3
\ | /
4
"""
# Test that indeed only weakest are removed
graph = StentGraph()
graph.add_edge(1, 2, cost=2, ctvalue=50)
graph.add_edge(1, 3, cost=3, ctvalue=50) # gets removed
graph.add_edge(1, 4, cost=4, ctvalue=50) # gets removed
graph.add_edge(1, 5, cost=1, ctvalue=50)
#
graph.add_edge(2, 3, cost=1, ctvalue=50)
graph.add_edge(3, 4, cost=1, ctvalue=50)
graph.add_edge(4, 5, cost=1, ctvalue=50)
graph.add_edge(5, 2, cost=1, ctvalue=50)
prune_weak(graph, 2, 80)
# Check result
assert graph.number_of_edges() == 6
for e in graph.edges_iter():
assert e not in [(1, 3), (1, 4)]
def test_weak2(self):
""" 2 5
/ | | \
3 - 1 - 4 - 6
"""
# Test that indeed only weakest are removed
graph = StentGraph()
graph.add_edge(1, 2, cost=2, ctvalue=50)
graph.add_edge(2, 3, cost=2, ctvalue=50)
graph.add_edge(3, 1, cost=2, ctvalue=50)
#
graph.add_edge(4, 5, cost=2, ctvalue=50)
graph.add_edge(5, 6, cost=2, ctvalue=50)
graph.add_edge(6, 4, cost=2, ctvalue=50)
# Connect two subgraphs with weaker connection
graph.add_edge(1, 4, cost=3, ctvalue=50)
# Prune
prune_weak(graph, 2, 80)
# Check result
assert graph.number_of_edges() == 6
for e in graph.edges_iter():
assert e not in [(1, 4)]
# Again, now with lower cost (stronger connection)
graph.add_edge(1, 4, cost=1, ctvalue=50)
# Prune
prune_weak(graph, 2, 80)
# Check result
assert graph.number_of_edges() == 7
# Again, now with high ct value
graph.add_edge(1, 4, cost=3, ctvalue=90)
# Prune
prune_weak(graph, 2, 80)
# Check result
assert graph.number_of_edges() == 7
def test_weak3(self):
""" 2 456
/ | |
3 - 1 - 0 - 789
"""
# Test that indeed only weakest are removed
graph = StentGraph()
graph.add_edge(1, 2, cost=2, ctvalue=50)
graph.add_edge(2, 3, cost=2, ctvalue=50)
graph.add_edge(3, 1, cost=2, ctvalue=50)
#
graph.add_edge(4, 5, cost=2, ctvalue=50)
graph.add_edge(5, 6, cost=2, ctvalue=50)
graph.add_edge(6, 4, cost=2, ctvalue=50)
#
graph.add_edge(7, 8, cost=2, ctvalue=50)
graph.add_edge(8, 9, cost=2, ctvalue=50)
graph.add_edge(9, 7, cost=2, ctvalue=50)
# Connect three subgraphs
graph.add_edge(0, 1, cost=2, ctvalue=50)
graph.add_edge(0, 4, cost=3, ctvalue=50) # gets removed
graph.add_edge(0, 7, cost=2, ctvalue=50)
# Prune
prune_weak(graph, 2, 80)
# Check result
assert graph.number_of_edges() == 9+2
for e in graph.edges_iter():
assert e not in [(0, 4)]
# Connect three subgraphs
graph.add_edge(0, 1, cost=1, ctvalue=50)
graph.add_edge(0, 4, cost=1, ctvalue=50)
graph.add_edge(0, 7, cost=2, ctvalue=50) # gets removed
# Prune
prune_weak(graph, 2, 80)
# Check result
assert graph.number_of_edges() == 9+2
for e in graph.edges_iter():
assert e not in [(0, 7)]
# Connect three subgraphs
graph.add_edge(0, 1, cost=3, ctvalue=50)
graph.add_edge(0, 4, cost=4, ctvalue=90) # None gets removed
graph.add_edge(0, 7, cost=3, ctvalue=50)
# Prune
prune_weak(graph, 2, 80)
# Check result
assert graph.number_of_edges() == 9+3
def test_pack1(self):
# Custom stent
g = StentGraph(summary='dit is een stent!', lala=3)
g.add_node((10,20), foo=3)
g.add_node((30,40), foo=5)
g.add_edge((1,1), (2,2), bar=10)
g.add_edge((10,20),(1,1), bar=20)
fname = '/home/almar/test.ssdf'
ssdf.save(fname, g.pack())
g2 = StentGraph()
g2.unpack(ssdf.load(fname))
#print(nx.is_isomorphic(g, g2))
assert nx.is_isomorphic(g, g2)
def test_pack2(self):
# Auto generate
import random
n = 500
p=dict((i,(random.gauss(0,2),random.gauss(0,2))) for i in range(n))
g_ = nx.random_geometric_graph(n, 0.1, dim=3, pos=p)
g = StentGraph(summary='dit is een stent!', lala=3)
g.add_nodes_from(g_.nodes_iter())
g.add_edges_from(g_.edges_iter())
fname = '/home/almar/test.ssdf'
ssdf.save(fname, g.pack())
g2 = StentGraph()
g2.unpack(ssdf.load(fname))
#print(nx.is_isomorphic(g, g2))
assert nx.is_isomorphic(g, g2)
def test_pop_node(self):
# Create paths
path1 = PointSet(2)
path1.append(1, 11)
path1.append(1, 12)
path2 = PointSet(2)
path2.append(1, 12)
path2.append(1, 13)
#
path12 = PointSet(2)
path12.append(1, 11)
path12.append(1, 12)
path12.append(1, 13)
# create 4 nodes (6-7-8-9), remove 8
graph = StentGraph()
graph.add_edge(6, 7, cost=4, ctvalue=70)
graph.add_edge(7, 8, cost=2, ctvalue=50, path=path1)
graph.add_edge(8, 9, cost=3, ctvalue=60, path=path2)
# Pop
_pop_node(graph, 8)
# Check
assert graph.number_of_nodes() == 3
assert 8 not in graph.nodes()
assert graph.edge[7][9]['ctvalue'] == 50
assert graph.edge[7][9]['cost'] == 5
assert np.all(graph.edge[7][9]['path'] == path12)
# create 4 nodes (6-8-7-9), remove 7
graph = StentGraph()
graph.add_edge(6, 8, cost=4, ctvalue=70)
graph.add_edge(8, 7, cost=2, ctvalue=50, path=np.flipud(path1))
graph.add_edge(7, 9, cost=3, ctvalue=60, path=path2)
# Pop
_pop_node(graph, 7)
# Check
assert graph.number_of_nodes() == 3
assert 7 not in graph.nodes()
assert graph.edge[8][9]['ctvalue'] == 50
assert graph.edge[8][9]['cost'] == 5
assert np.all(graph.edge[8][9]['path'] == path12)
# create 4 nodes (7-8-6-9), remove 8
graph = StentGraph()
graph.add_edge(7, 8, cost=4, ctvalue=70, path=np.flipud(path2))
graph.add_edge(8, 6, cost=2, ctvalue=50, path=path1)
graph.add_edge(6, 9, cost=3, ctvalue=60)
# Pop
_pop_node(graph, 8)
# Check
assert graph.number_of_nodes() == 3
assert 8 not in graph.nodes()
assert graph.edge[6][7]['ctvalue'] == 50
assert graph.edge[6][7]['cost'] == 6
assert np.all(graph.edge[6][7]['path'] == path12)
# create 3 nodes in a cycle. It should remove all but one
graph = StentGraph()
graph.add_edge(7, 8, cost=4, ctvalue=70, path=path1)
graph.add_edge(8, 9, cost=2, ctvalue=50, path=path2)
graph.add_edge(9, 7, cost=3, ctvalue=60, path=path2)
# Pop
_pop_node(graph, 8)
# Check
assert graph.number_of_nodes() == 1
assert graph.number_of_edges() == 1
assert 8 not in graph.nodes()
n = graph.nodes()[0]
assert len(graph.edge[n][n]['path']) == 6-1
# create 3 nodes in a cycle, with one subbranch
graph = StentGraph()
graph.add_edge(7, 8, cost=4, ctvalue=70, path=path1)
graph.add_edge(8, 9, cost=2, ctvalue=50, path=path2)
graph.add_edge(9, 7, cost=3, ctvalue=60, path=path2)
graph.add_edge(7, 4, cost=3, ctvalue=60, path=path2)
# Pop
_pop_node(graph, 8)
# Check
assert graph.number_of_nodes() == 2
assert graph.number_of_edges() == 2
assert 8 not in graph.nodes()
assert len(graph.edge[7][7]['path']) == 6-1
def test_pop_nodes(self):
# Create dummy paths
path1 = PointSet(2)
path1.append(1, 11)
path1.append(1, 12)
# create 4 nodes (6-7-8-9), remove 8
graph = StentGraph()
graph.add_edge(6, 7, cost=4, ctvalue=70, path=path1)
graph.add_edge(7, 8, cost=2, ctvalue=50, path=path1)
graph.add_edge(8, 9, cost=3, ctvalue=60, path=path1)
graph0 = graph.copy()
# Pop straight line
graph = graph0.copy()
pop_nodes(graph)
assert graph.number_of_nodes() == 2
assert graph.number_of_edges() == 1
assert graph.edge[6][9]['path'].shape[0] == 3+1
# Pop cycle
graph = graph0.copy()
graph.add_edge(9, 6, cost=3, ctvalue=60, path=path1)
pop_nodes(graph)
assert graph.number_of_nodes() == 1
assert graph.number_of_edges() == 1
n = graph.nodes()[0]
assert graph.edge[n][n]['path'].shape[0] == 4+1+1 # cycle
# arbitrary what node stayed around
# Pop with one side branch popping
graph = graph0.copy()
graph.add_edge(7, 2, cost=3, ctvalue=60, path=path1)
pop_nodes(graph)
assert graph.number_of_nodes() == 4
assert graph.number_of_edges() == 3
assert graph.edge[7][9]['path'].shape[0] == 2+1
# Pop with one prevent popping
graph = graph0.copy()
graph.node[7]['nopop'] = True
pop_nodes(graph)
assert graph.number_of_nodes() == 3
assert graph.number_of_edges() == 2
assert graph.edge[7][9]['path'].shape[0] == 2+1
def test_detect_corners(self):
path = PointSet(3)
path.append(10, 2, 0)
path.append(11, 3, 0)
path.append(12, 4, 0)
path.append(13, 5, 0)
path.append(14, 6, 0)
path.append(15, 7, 0) # top
path.append(16, 6, 0)
path.append(17, 5, 0)
path.append(18, 4, 0)
path.append(19, 3, 0)
path.append(20, 2, 0) # bottom
path.append(21, 3, 0)
path.append(22, 4, 0)
path.append(23, 5, 0)
path.append(24, 6, 0)
path.append(25, 7, 0) # top
path.append(26, 6, 0)
path.append(27, 5, 0)
path.append(28, 4, 0)
path.append(29, 3, 0)
path0 = path
for i in range(3):
path = path0.copy()
path[:,2] = path[:,i]
path[:,i] = 0
# Test that _detect_corners detects the indices correctly
I = _detect_corners(path, smoothFactor=1)
assert I == [5, 10, 15]
# Test that _add_corner_to_edge constructs the graph and splits
# the path in the correct way
graph = StentGraph()
n1, n5 = tuple(path[0].flat), tuple(path[-1].flat)
n2, n3, n4 = tuple(path[5].flat), tuple(path[10].flat), tuple(path[15].flat)
graph.add_edge(n1, n5, path=path, cost=0, ctvalue=0)
_add_corner_to_edge(graph, n1, n5, smoothFactor=1)
assert graph.number_of_nodes() == 5
assert graph.number_of_edges() == 4
for n in [n1, n2, n3, n4, n5]:
assert n in graph.nodes()
path12, path23, path34, path45 = path[0:6], path[5:11], path[10:16], path[15:20]
if n1 > n2: path12 = np.flipud(path12)
if n2 > n3: path23 = np.flipud(path23)
if n3 > n4: path34 = np.flipud(path34)
if n4 > n5: path45 = np.flipud(path45)
assert np.all(graph.edge[n1][n2]['path'] == path12)
assert np.all(graph.edge[n2][n3]['path'] == path23)
assert np.all(graph.edge[n3][n4]['path'] == path34)
assert np.all(graph.edge[n4][n5]['path'] == path45)
def test_pairs(self):
graph = nx.Graph()
graph.add_edge(1, 2)
graph.add_edge(1, 3)
graph.add_edge(1, 4)
graph.add_edge(1, 5)
#
graph.add_edge(2, 6)
graph.add_edge(2, 7)
#
graph.add_edge(3, 8)
pairs1 = _get_pairs_of_neighbours(graph, 1)
assert pairs1 == [(2, 3), (2, 4), (2, 5), (3, 4), (3, 5), (4, 5)]
pairs2 = _get_pairs_of_neighbours(graph, 2)
assert pairs2 == [(1, 6), (1, 7), (6, 7)]
pairs3 = _get_pairs_of_neighbours(graph, 3)
assert pairs3 == [(1, 8)]
pairs4 = _get_pairs_of_neighbours(graph, 4)
assert pairs4 == []
def test_add_nodes_at_crossings1(self):
# N4---N1=====-------N2
# |
# N3
path1 = PointSet(3) # path from n1 to n2
path1.append(10, 2, 0)
path1.append(10, 3, 0)
path1.append(10, 4, 0)
path1.append(10, 5, 0)
#
path3 = path1.copy() # path to n3
#
path1.append(10, 6, 0)
path1.append(10, 7, 0)
path1.append(10, 8, 0)
#
path3.append(11, 5, 0)
path3.append(12, 5, 0)
path3.append(13, 5, 0)
#
path4 = PointSet(3) # path to n4
path4.append(10, 0, 0)
path4.append(10, 1, 0)
path4.append(10, 2, 0)
graph = nx.Graph()
n1 = tuple(path1[0].flat)
n2 = tuple(path1[-1].flat)
n3 = tuple(path3[-1].flat)
n4 = tuple(path4[0].flat)
graph.add_edge(n1, n2, path=path1, cost=3, ctvalue=3)
graph.add_edge(n1, n3, path=path3, cost=3, ctvalue=3)
graph.add_edge(n1, n4, path=path4, cost=3, ctvalue=3)
# Pre-check
assert len(graph.nodes()) == 4
for n in (n1, n2, n3, n4):
assert n in graph.nodes()
# Deal with crossongs
add_nodes_at_crossings(graph)
# Check result
check_path_integrity(graph)
assert len(graph.nodes()) == 5
added_node = 10, 5, 0
for n in (n1, n2, n3, n4, added_node):
assert n in graph.nodes()
def test_add_nodes_at_crossings2(self):
# N4---N1=====-------====N2
# | |
# N3 N5
path1 = PointSet(3) # path from n1 to n2
path1.append(10, 2, 0)
path1.append(10, 3, 0)
path1.append(10, 4, 0)
path1.append(10, 5, 0)
#
path3 = path1.copy() # path to n3
#
path1.append(10, 6, 0)
path1.append(10, 7, 0)
path1.append(10, 8, 0)
path1.append(10, 9, 0)
#
path3.append(11, 5, 0)
path3.append(12, 5, 0)
path3.append(13, 5, 0)
#
path4 = PointSet(3) # path to n4
path4.append(10, 0, 0)
path4.append(10, 1, 0)
path4.append(10, 2, 0)
#
path5 = PointSet(3) # path from n2 to n5 (note the order)
path5.append(10, 9, 0) # dup path1
path5.append(10, 8, 0) # dup path1
path5.append(10, 7, 0) # dup path1
path5.append(11, 7, 0)
path5.append(12, 7, 0)
path5.append(13, 7, 0)
graph = nx.Graph()
n1 = tuple(path1[0].flat)
n2 = tuple(path1[-1].flat)
n3 = tuple(path3[-1].flat)
n4 = tuple(path4[0].flat)
n5 = tuple(path5[-1].flat)
graph.add_edge(n1, n2, path=path1, cost=3, ctvalue=3)
graph.add_edge(n1, n3, path=path3, cost=3, ctvalue=3)
graph.add_edge(n1, n4, path=path4, cost=3, ctvalue=3)
graph.add_edge(n5, n2, path=path5, cost=3, ctvalue=3)
# Pre-check
assert len(graph.nodes()) == 5
for n in (n1, n2, n3, n4, n5):
assert n in graph.nodes()
# Deal with crossongs
add_nodes_at_crossings(graph)
# Check result
check_path_integrity(graph)
assert len(graph.nodes()) == 7
added_node1 = 10, 5, 0
added_node2 = 10, 7, 0
for n in (n1, n2, n3, n4, n5, added_node1, added_node2):
assert n in graph.nodes()
def test_add_nodes_at_crossings3(self):
# N4---N1>>>>>======-------N2
# | |
# N3 N5
path1 = PointSet(3) # path from n1 to n2
path1.append(10, 2, 0)
path1.append(10, 3, 0)
path1.append(10, 4, 0)
path1.append(10, 5, 0)
#
path3 = path1.copy() # path to n3
path3.append(11, 5, 0)
path3.append(12, 5, 0)
path3.append(13, 5, 0)
#
path1.append(10, 6, 0)
path1.append(10, 7, 0)
#
path5 = path1.copy()
path5.append(11, 7, 0)
path5.append(12, 7, 0)
path5.append(13, 7, 0)
#
path1.append(10, 8, 0)
path1.append(10, 9, 0)
#
path4 = PointSet(3) # path to n4
path4.append(10, 0, 0)
path4.append(10, 1, 0)
path4.append(10, 2, 0)
graph = nx.Graph()
n1 = tuple(path1[0].flat)
n2 = tuple(path1[-1].flat)
n3 = tuple(path3[-1].flat)
n4 = tuple(path4[0].flat)
n5 = tuple(path5[-1].flat)
graph.add_edge(n1, n2, path=path1, cost=3, ctvalue=3)
graph.add_edge(n1, n3, path=path3, cost=3, ctvalue=3)
graph.add_edge(n1, n4, path=path4, cost=3, ctvalue=3)
graph.add_edge(n1, n5, path=path5, cost=3, ctvalue=3)
# Pre-check
assert len(graph.nodes()) == 5
for n in (n1, n2, n3, n4, n5):
assert n in graph.nodes()
# Deal with crossongs
add_nodes_at_crossings(graph)
# Check result
check_path_integrity(graph)
assert len(graph.nodes()) == 7
added_node1 = 10, 5, 0
added_node2 = 10, 7, 0
for n in (n1, n2, n3, n4, n5, added_node1, added_node2):
assert n in graph.nodes()
if __name__ == "__main__":
# Run test. Nose is acting weird. So wrote a little test runner myself:
test = TestStentGraph()
for m in dir(test):
if m.startswith('test_'):
print('Running %s ... ' % m, end='')
try:
getattr(test, m)()
except AssertionError as err:
print('Fail')
raise
except Exception:
print('Error')
raise
else:
print("Ok")
# Create simple graph
graph = StentGraph()
graph.add_edge(1, 4, cost=5)
graph.add_edge(1, 5, cost=4)
graph.add_edge(1, 2, cost=3)
graph.add_edge(1, 3, cost=2)
|
python
|
from pyspark import SparkContext
from pyspark.sql.functions import count, lit, col, udf, collect_list, explode, sqrt, mean
from pyspark.sql.types import IntegerType, StringType, MapType, ArrayType, BooleanType, FloatType
from pyspark.sql import SQLContext, HiveContext
import sys
def filter_slot_id(df, slot_ids=[]):
if slot_ids and len(slot_ids) > 0:
return df.filter(udf(lambda x: slot_ids.__contains__(x.split(",")[1]), BooleanType())(df.uckey))
else:
sys.exit("empty slot ids")
if __name__ == '__main__':
sc = SparkContext.getOrCreate()
sqlContext = SQLContext(sc)
hive_context = HiveContext(sc)
sc.setLogLevel('ERROR')
dates = ['2021-06-02', '2021-06-03', '2021-06-04', '2021-06-05', '2021-06-06', '2021-06-07', '2021-06-08', '2021-06-09', '2021-06-10', '2021-06-11', '2021-06-12', '2021-06-13', '2021-06-14', '2021-06-15', '2021-06-16', '2021-06-17', '2021-06-18', '2021-06-19', '2021-06-20', '2021-06-21', '2021-06-22', '2021-06-23', '2021-06-24', '2021-06-25', '2021-06-26', '2021-06-27', '2021-06-28', '2021-06-29', '2021-06-30', '2021-07-01', '2021-07-02', '2021-07-03', '2021-07-04', '2021-07-05', '2021-07-06', '2021-07-07', '2021-07-08', '2021-07-09', '2021-07-10', '2021-07-11', '2021-07-12', '2021-07-13', '2021-07-14', '2021-07-15', '2021-07-16', '2021-07-17', '2021-07-18', '2021-07-19', '2021-07-20', '2021-07-21', '2021-07-22', '2021-07-23', '2021-07-24', '2021-07-25', '2021-07-26', '2021-07-27', '2021-07-28', '2021-07-29', '2021-07-30']
table = 'factdata'
for id in range(0, len(dates)-1):
command = """select count_array,day,uckey from {} where day = '{}'"""
print("Running command:", command.format(table, dates[id]))
df = hive_context.sql(command.format(table, dates[id]))
df = df.select(df.uckey, df.day, explode(df.count_array))
df = df.withColumn('col', udf(lambda x: str(x).split(":"), ArrayType(StringType()))(df.col))
df = df.select(df.uckey, df.day, df.col[1]).withColumnRenamed("col[1]", "actual_impr")
df = df.withColumn('actual_impr', udf(lambda x: int(x), IntegerType())(df.actual_impr))
df = df.groupBy('uckey').sum('actual_impr').withColumnRenamed("sum(actual_impr)", 'total')
df.createOrReplaceTempView("impr_temp_table")
command = """INSERT OVERWRITE TABLE {} PARTITION (pt_d='{}') select uckey, total from impr_temp_table""".format(
'dws_pps_ims_impr_his_data_dm', dates[id])
hive_context.sql(command)
print('Processed data for ', dates[id])
sc.stop()
|
python
|
# -*- coding: utf-8 -*-
# Copyright 2015 www.suishouguan.com
#
# Licensed under the Private License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://github.com/samuelbaizg/ssguan/blob/master/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import timedelta
import datetime
import time
import traceback
import unittest
from ssguan.ignitor.base.error import Error, ProgramError
from ssguan.ignitor.orm import dbpool, properti, config as orm_config
from ssguan.ignitor.base.error import LengthError, RangeError, TypeFloatError
from ssguan.ignitor.orm.model import Model, BaseModel, BaseQuery
from ssguan.ignitor.orm.validator import RangeValidator
from ssguan.ignitor.utility import kind
class BaseModelTest(unittest.TestCase):
class TModel(BaseModel):
@classmethod
def meta_domain(cls):
return "test"
f_str = properti.StringProperty()
f_int = properti.IntegerProperty()
f_bool = properti.BooleanProperty()
f_float = properti.FloatProperty()
f_date = properti.DateProperty()
f_datetime = properti.DateTimeProperty()
f_v = properti.StringProperty(length=8)
f_str1 = properti.StringProperty(persistent=False)
f_json = properti.DictProperty()
@classmethod
def setUpClass(cls):
dbpool.create_db(orm_config.get_default_dbinfo(), dropped=True)
cls.TModel.create_schema()
def test_exist_property(self):
tm = self.TModel().create()
t = self.TModel.exist_property("f_str")
self.assertTrue(t)
t = self.TModel.exist_property("f_str1")
self.assertFalse(t)
tm.delete()
def test_add_property(self):
tm = self.TModel().create()
t = self.TModel.add_property("f_str11", properti.StringProperty())
self.assertTrue(t)
t = self.TModel.add_property("f_str11", properti.StringProperty())
self.assertFalse(t)
tm.delete()
def test_change_property(self):
tm = self.TModel().create()
self.TModel.add_property("f_str33344", properti.StringProperty())
t = self.TModel.exist_property("f_str33344")
self.assertTrue(t)
t = self.TModel.change_property("f_str33344", "f_str444", properti.StringProperty( length=200))
self.assertTrue(t)
t = self.TModel.exist_property("f_str33344")
self.assertFalse(t)
t = self.TModel.exist_property("f_str444")
self.assertTrue(t)
tm.delete()
def test_drop_property(self):
tm = self.TModel().create()
self.TModel.add_property("f_str555", properti.StringProperty())
t = self.TModel.exist_property("f_str555")
self.assertTrue(t)
t = self.TModel.drop_property("f_str555")
self.assertTrue(t)
t = self.TModel.exist_property("f_str555")
self.assertFalse(t)
tm.delete()
def test_create_and_get_by_key(self):
tmodel = self.TModel()
tmodel.f_str = "abcd"
tmodel.f_int = 100
tmodel.f_float = 1000.0
tmodel.f_bool = False
tmodel.f_date = properti.DateProperty.utctoday()
utc = kind.utcnow()
utc = utc.replace(microsecond=0)
tmodel.f_datetime = utc
tmodel.create()
self.assertIsNotNone(tmodel.get_keyvalue())
self.assertNotEqual(tmodel.get_keyvalue(), 0)
tmodel2 = self.TModel.get_by_key(tmodel.get_keyvalue())
self.assertEqual(tmodel.f_str, tmodel2.f_str)
self.assertEqual(tmodel.f_int, tmodel2.f_int)
self.assertEqual(tmodel.f_float, tmodel2.f_float)
self.assertEqual(tmodel.f_bool, tmodel2.f_bool)
self.assertEqual(utc, tmodel2.f_datetime)
self.assertEqual(properti.DateProperty.utctoday(), tmodel2.f_date)
tmodel = self.TModel()
tmodel.f_str = "eeeffffee"
tmodel.f_json = {'1':1}
tmodel.create(key="33333")
tm = tmodel.get_by_key("33333")
self.assertEqual(tm.f_str, tmodel.f_str)
self.assertEqual(tm.f_json, tmodel.f_json)
tm.delete()
def test_model_extproperties_sql_alias(self):
query = self.TModel.all()
query.what("_id", "u1")
query.what("f_str", "s1")
query.what("f_bool", "b1")
tmodel = query.get()
extprops = tmodel._extproperties
if orm_config.get_dbinfo(self.TModel).is_mongo():
self.assertFalse('u1' in extprops)
self.assertFalse('s1' in extprops)
self.assertFalse('b1' in extprops)
self.assertEqual(len(extprops.keys()), 0)
else:
self.assertTrue('u1' in extprops)
self.assertTrue('s1' in extprops)
self.assertTrue('b1' in extprops)
self.assertEqual(len(extprops.keys()), 3)
def test_model_properties(self):
props = self.TModel.get_properties()
self.assertTrue('f_str' in props)
self.assertTrue('f_int' in props)
self.assertTrue('f_bool' in props)
self.assertTrue('f_float' in props)
self.assertTrue('f_datetime' in props)
self.assertTrue('f_date' in props)
self.assertTrue('_id' in props)
self.assertTrue('f_str1' in props)
self.assertEqual(len(props.keys()), 10)
def test_get_properties(self):
props = self.TModel.get_properties(persistent=True)
self.assertEqual(len(props.keys()), 9)
props = self.TModel.get_properties(persistent=False)
self.assertEqual(len(props.keys()), 1)
def test_is_persistent(self):
self.assertTrue(self.TModel.is_persistent("f_str"))
self.assertFalse(self.TModel.is_persistent("f_str1"))
self.assertTrue(self.TModel.is_persistent())
def test_has_prop(self):
tmodel = self.TModel(a="ccc")
b = hasattr(tmodel, "a")
self.assertTrue(b)
b = hasattr(tmodel, "bbb")
self.assertFalse(b)
def test_model_init(self):
utc = datetime.datetime.utcnow()
utc = utc.replace(microsecond=0)
tmodel = self.TModel(f_str="abcdef", f_bool=False, f_int=99909, f_datetime=utc, f_float=999.019, e_u1='风华', e_x2=192)
self.assertEqual(tmodel.f_str, "abcdef")
self.assertEqual(tmodel.f_int, 99909)
self.assertEqual(tmodel.f_float, 999.019)
self.assertEqual(tmodel.f_bool, False)
self.assertEqual(tmodel.f_datetime, utc)
self.assertEqual(tmodel.e_u1, '风华')
self.assertEqual(tmodel.e_x2, 192)
def test_model_all(self):
q = self.TModel.all()
self.assertTrue(isinstance(q, BaseQuery))
def test_length_validate(self):
try:
tmodel = self.TModel()
tmodel.create()
tmodel.f_v = "1213456789"
tmodel.create()
self.assertTrue(False)
except Error as e:
self.assertIsInstance(e, LengthError)
def test_basemodel(self):
class TBModel(BaseModel):
@classmethod
def meta_domain(cls):
return "test"
aaa = properti.IntegerProperty(required=True)
TBModel.create_schema()
tmodel1 = TBModel()
tmodel1.aaa = 2
tmodel1.create()
query = TBModel.all()
self.assertTrue(query.count(), 1)
tmodel2 = query.get()
self.assertEqual(tmodel1.aaa, tmodel2.aaa)
def test_delete(self):
query = self.TModel.all()
tmodel = query.get()
key = tmodel.get_keyvalue()
tmodel.delete()
if orm_config.get_dbinfo(self.TModel).is_mongo():
query.clear()
ql = {'_id':key}
query.ql(ql)
result = query.count()
self.assertEqual(result, 0)
else:
query.clear()
sql = "select * from %s where _id = $uid" % tmodel.get_modelname()
query.ql(sql, {'uid':key})
result = query.count()
self.assertEqual(result, 0)
def test_delete_schema(self):
self.TModel.delete_schema()
result = self.TModel.has_schema()
self.assertFalse(result)
self.TModel.create_schema()
@classmethod
def tearDownClass(cls):
cls.TModel.delete_schema()
dbpool.drop_db(orm_config.get_default_dbinfo())
class BaseQueryTest(unittest.TestCase):
class TModel1(BaseModel):
@classmethod
def meta_domain(cls):
return "test"
f_str = properti.StringProperty()
f_int = properti.IntegerProperty()
f_bool = properti.BooleanProperty()
f_float = properti.FloatProperty()
f_date = properti.DateProperty()
f_datetime = properti.DateTimeProperty(auto_utcnow=True)
f_str1 = properti.StringProperty(persistent=False)
f_json = properti.DictProperty()
class TSubModel1(BaseModel):
@classmethod
def meta_domain(cls):
return "test"
t_sub_name = properti.StringProperty()
t_sub_bool = properti.BooleanProperty(default=True)
t_model_id = properti.StringProperty()
@classmethod
def setUpClass(cls):
dbpool.create_db(orm_config.get_default_dbinfo(), dropped=True)
cls.TModel1.create_schema()
cls.TSubModel1.create_schema()
def test_filter_ex(self):
query = self.TModel1().all()
t1 = self.TModel1(f_str="dddd").create()
t2 = self.TModel1(f_str="dddd2").create()
t3 = self.TModel1(f_str="dddd3").create()
query.filter("ex ex", 1)
query.filter("ex ex", "'a'='a'", wrapper=False)
try:
i = query.count()
self.assertEqual(i, 3)
except ProgramError as e:
if orm_config.get_dbinfo(self.TModel1).is_mongo():
self.assertIn("comparison ex", str(e))
t1.delete()
t2.delete()
t3.delete()
def test_filter_is(self):
query = self.TModel1().all()
query.filter("f_int is", None)
i = query.count()
self.assertEqual(i, 0)
def test_filter_is_not(self):
query = self.TModel1().all()
query.filter("f_int is not", None)
i = query.count()
self.assertEqual(i, 3)
def test_filter_equal(self):
tm = self.TModel1(f_int=1, f_str="abcd", f_bool=False, f_float=2.5, f_datetime=kind.utcnow()).create()
tm2 = self.TModel1(f_bool=True).create()
query = self.TModel1().all()
query.filter("f_int =", 1)
i = query.count()
self.assertEqual(i, 1)
tmodel = query.get()
self.assertEqual(tmodel.f_str, "abcd")
self.assertEqual(tmodel.f_int, 1)
self.assertEqual(tmodel.f_bool, False)
self.assertEqual(tmodel.f_float, 2.5)
self.assertIsNotNone(tmodel.f_datetime)
query.clear()
query.filter("f_bool =", True)
i = query.count()
self.assertEqual(i, 1)
tm.delete()
tm2.delete()
def test_filter_less_equal(self):
query = self.TModel1.all()
query.delete()
t1 = self.TModel1(f_int=1, f_datetime=kind.utcnow()).create()
t2 = self.TModel1(f_int=2, f_datetime=kind.utcnow()).create()
t3 = self.TModel1(f_int=3, f_datetime=kind.utcnow()).create()
query = self.TModel1().all()
query.filter("f_int <", 2)
i = query.count()
self.assertEqual(i, 1)
query.filter("f_int <=", 2, replace=True)
i = query.count()
self.assertEqual(i, 2)
query.clear()
query.filter("f_datetime <=", kind.utcnow() + timedelta(minutes=1))
i = query.count()
self.assertEqual(i, 3)
t1.delete()
t2.delete()
t3.delete()
def test_filter_more_equal(self):
query = self.TModel1().all()
query.delete()
self.TModel1(f_int=11).create()
self.TModel1(f_int=2).create()
self.TModel1(f_int=13).create()
query = self.TModel1().all()
query.filter("f_int >", 2)
i = query.count()
self.assertEqual(i, 2)
query.filter("f_int >=", 1, replace=True)
query.clear()
i = query.count()
self.assertEqual(i, 3)
def test_filter_replace(self):
query = self.TModel1().all()
query.delete()
tm1 = self.TModel1(f_str="adfadf1", f_int=1).create()
tm2 = self.TModel1(f_str="adfadf2", f_int=2).create()
tm3 = self.TModel1(f_str="adfadf3", f_int=3).create()
query.filter("f_int <", 2)
i = query.count()
self.assertEqual(i, 1)
query.clear()
query.filter("f_int <", 2)
query.filter("f_int <=", 2, replace=False)
i = query.count()
self.assertEqual(i, 1)
query.clear()
query.filter("f_int <", 2)
query.filter("f_int <=", 2, replace=True)
i = query.count()
self.assertEqual(i, 2)
tm1.delete()
tm2.delete()
tm3.delete()
def test_filter_not_equal(self):
query = self.TModel1().all()
query.delete()
self.TModel1(f_int=1).create()
self.TModel1(f_int=2).create()
self.TModel1(f_int=3).create()
query = self.TModel1().all()
query.filter("f_int !=", 1)
query.filter("f_int !=", 2)
i = query.count()
self.assertEqual(i, 1)
def test_filter_in(self):
query = self.TModel1().all()
query.delete()
self.TModel1(f_int=1).create()
self.TModel1(f_int=2).create()
self.TModel1(f_int=3).create()
query = self.TModel1().all()
query.filter("f_int in", [1, 2])
i = query.count()
self.assertEqual(i, 2)
query.clear()
query.filter('f_int in', [1, 3], replace=True)
i = query.count()
self.assertEqual(i, 2)
def test_filter_in_sql(self):
if orm_config.get_dbinfo(self.TModel1).is_mongo():
return None
query = self.TModel1().all()
query.clear()
query.filter("f_int in", " (select 4)", wrapper=False)
i = query.count()
self.assertEqual(i, 0)
try:
query.clear()
query.filter("f_int in", " (select 4)")
i = query.count()
self.assertTrue(False)
except ProgramError as e:
self.assertIn("Argument", str(e))
def test_filter_not_in(self):
query = self.TModel1().all()
query.delete()
self.TModel1(f_int=4).create()
self.TModel1(f_int=5).create()
self.TModel1(f_int=6).create()
query = self.TModel1().all()
query.filter("f_int not in", [1, 2])
i = query.count()
self.assertEqual(i, 3)
def test_filter_not_in_sql(self):
if orm_config.get_dbinfo(self.TModel1).is_mongo():
return None
query = self.TModel1().all()
query.delete()
t1 = self.TModel1(f_int=2).create()
t2 = self.TModel1(f_int=3).create()
query.filter("f_int not in", " (select 4)", wrapper=False)
i = query.count()
self.assertEqual(i, 2)
try:
query.clear()
query.filter("f_int not in", " (select 4)")
i = query.count()
self.assertTrue(False)
except ProgramError as e:
self.assertIn("Argument", str(e))
t1.delete()
t2.delete()
def test_filter_like(self):
query = self.TModel1().all()
query.delete()
self.TModel1(f_str="2xd1").create()
self.TModel1(f_str="dzzz").create()
self.TModel1(f_str="xxxx").create()
self.TModel1(f_int=23).create()
self.TModel1(f_int=33).create()
query = self.TModel1().all()
query.filter("f_str like", '%d%')
i = query.count()
self.assertEqual(i, 2)
query.clear()
query.filter("f_str like", 'd%', replace=True)
i = query.count()
self.assertEqual(i, 1)
query.clear()
query.filter("f_str like", '%d', replace=True)
i = query.count()
self.assertEqual(i, 0)
def test_filter_type(self):
query = self.TModel1().all()
try:
query.filter("f_int =", 'a')
self.assertTrue(False)
except ProgramError as e:
self.assertIn('is not the type', str(e))
try:
query.filter("f_str =", 2)
self.assertTrue(False)
except ProgramError as e:
self.assertIn('is not the type', str(e))
try:
query.filter("f_datetime =", 'a')
self.assertTrue(False)
except ProgramError as e:
self.assertIn('is not the type', str(e))
try:
query.filter("f_bool =", 'a')
self.assertTrue(False)
except ProgramError as e:
self.assertIn('is not the type', str(e))
try:
query.filter("f_bool =", 1)
self.assertTrue(True)
except ProgramError as e:
self.assertTrue(False)
def test_filter_logic(self):
tm = self.TModel1(f_int=1).create()
tm2 = self.TModel1(f_int=2).create()
tm3 = self.TModel1(f_int=3).create()
query = self.TModel1().all()
try:
query.filter("f_str =", "a", logic="ax")
except ProgramError as e:
self.assertIn('is not and AND or', str(e))
query.filter('f_int =', 1)
query.filter('f_int =', 2, logic="or")
i = query.count()
self.assertEqual(i, 2)
tm.delete()
tm2.delete()
tm3.delete()
def test_filter_parenthesis(self):
query = self.TModel1().all()
try:
query.filter("f_str =", "a", logic="ax")
except ProgramError as e:
self.assertIn('is not and AND or', str(e))
query.clear()
query.filter('f_int =', 1)
query.filter('f_str =', 'abced', parenthesis='(')
query.filter('f_str =', 'xfg', logic="or", parenthesis=')')
i = query.count()
self.assertEqual(i, 0)
query.clear()
query.filter('f_int =', 1, parenthesis="(")
query.filter('f_str =', 'abced', parenthesis='(')
query.filter('f_str =', 'xfg', logic="or", parenthesis='))')
i = query.count()
self.assertEqual(i, 0)
def test_filter_x(self):
query = self.TModel1().all()
filter1 = [{'property_op':'f_int', 'value':1}, {'property_op':'f_str', 'value':'abced', 'parenthesis':'('}, {'property_op':'f_str', 'value':'xfg', 'parenthesis':')', 'logic':'or'}]
query.filter_x(filter1)
i = query.count()
self.assertEqual(i, 0)
query.clear()
query.filter("f_str =", "a")
query.filter_x(filter1, logic="or", parenthesis_left="(", parenthesis_right=")")
i = query.count()
self.assertEqual(i, 0)
def test_has_filter(self):
query = self.TModel1().all()
query.filter('f_int =', 1)
b = query.has_filter('f_int')
self.assertTrue(b)
b = query.has_filter('f_int', operator='=')
self.assertTrue(b)
b = query.has_filter('f_int', operator='>')
self.assertFalse(b)
def test_filter_illegal(self):
query = self.TModel1().all()
try:
query.filter("f_str -a", "abcd")
except ProgramError as e:
self.assertIn("includes the illegal operator", str(e))
query = self.TModel1().all()
try:
query.filter("f_str1 =", "abcd")
self.assertTrue(False)
except ProgramError as e:
self.assertIn("persistent", str(e))
def test_filter_wrapper(self):
query = self.TModel1.all(alias="a")
query.filter("a._id =", "b.t_id")
query.what("a._id", "uid")
try:
query.filter("a.f_str =", 'abcd', wrapper=False)
query.clear()
query.fetch()
except Exception:
self.assertIn("Unknown column 'abcd", traceback.format_exc())
query = self.TModel1.all("a")
query.filter("a.f_date =", properti.DateProperty.utctoday())
query.fetch(1)
self.assertTrue(True)
query.filter("a.f_datetime =", properti.DateTimeProperty.utcnow())
query.clear()
query.fetch(1)
self.assertTrue(True)
def test_order(self):
query = self.TModel1().all()
query.delete()
self.TModel1(f_int=1).create()
self.TModel1(f_int=2).create()
self.TModel1(f_int=3).create()
query.order("f_int")
tmodel = query.get()
self.assertEqual(tmodel.f_int, 1)
query = self.TModel1().all()
query.order("-f_int")
tmodel = query.get()
self.assertEqual(tmodel.f_int, 3)
def test_alias(self):
tm = self.TModel1(f_int=1).create()
query = self.TModel1.all(alias="a")
query.filter("a.f_int =", 1)
i = query.count()
self.assertEqual(i, 1)
query = self.TModel1.all()
query.filter("f_int =", 1)
i = query.count()
self.assertEqual(i, 1)
query = self.TModel1.all(alias="b")
query.what("b._id")
query.filter("b.f_int =", 1)
i = query.count()
self.assertEqual(i, 1)
query.what("b.f_int", "cccc")
m = query.get()
if orm_config.get_dbinfo(self.TModel1).is_mongo():
self.assertEqual(m.f_int, 1)
else:
self.assertEqual(m.cccc, 1)
tm.delete()
def test_what(self):
query = self.TModel1().all("a")
query.delete()
query = self.TModel1().all("a")
self.TModel1(f_int=1, f_str="abcd").create()
query.what("a.f_int")
query.what("a.f_str")
query.filter("a.f_int =", 1)
result = query.get()
self.assertEqual(result.f_int, 1)
self.assertEqual(result.f_str, 'abcd')
query = self.TModel1().all("a")
query.what("a.f_int", 'fint')
query.what("a.f_str", "fstr")
query.filter("a.f_int =", 1)
result = query.get()
if orm_config.get_dbinfo(self.TSubModel1).is_mongo():
self.assertIsNotNone(result.f_int)
self.assertIsNotNone(result.f_str)
self.assertEqual(len(result._extproperties), 0)
else:
self.assertIsNone(result.f_int)
self.assertIsNone(result.f_str)
self.assertEqual(result.fstr, 'abcd')
self.assertEqual(result.fint, 1)
self.assertEqual(len(result._extproperties), 2)
def test_distinct(self):
""""""
def test_group(self):
query = self.TSubModel1.all()
query.delete()
t1 = self.TSubModel1(t_sub_name="_sub_xxx", t_model_id='2').create()
t2 = self.TSubModel1(t_sub_name="_sub_yyy", t_model_id='1').create()
if orm_config.get_dbinfo(self.TSubModel1).is_mongo():
query = self.TSubModel1.all()
query.group([{ "$group": { "_id": None, "count": { "$sum": 1 } } }])
result = query.count()
self.assertEqual(result, 2)
else:
query = self.TSubModel1.all()
query.what("t_model_id")
query.what("count(*)", "num")
query.group("t_model_id having num >= 0")
query.order("-t_model_id")
i = len(query.fetch())
self.assertEqual(i, 2)
t1.delete()
t2.delete()
def test_sql(self):
if orm_config.get_dbinfo(self.TModel1).is_mongo():
return None
query = self.TSubModel1.all("a")
query.delete()
t1 = self.TSubModel1(t_sub_name="a_sub_xy").create()
t2 = self.TSubModel1(t_sub_name="c_sub_ccxy").create()
t3 = self.TSubModel1(t_sub_name="a_subeexy").create()
t4 = self.TSubModel1(t_sub_name="a_subbaxy").create()
t5 = self.TSubModel1(t_sub_name="bba_swubbaxy").create()
query.ql("select * from %s where t_sub_name like $tname" % self.TSubModel1.get_modelname(), {'tname':"%sub%"})
i = query.count()
self.assertEqual(i, 4)
result = query.get()
self.assertTrue(isinstance(result.t_sub_bool, bool))
self.assertEqual(result.t_sub_bool, True)
query = self.TSubModel1.all()
query.ql("select * from %s where t_sub_name like $tname" % self.TSubModel1.get_modelname(), {'tname':"%sub%"})
result = query.get(metadata={"t_sub_bool":properti.BooleanProperty("tsubbool")})
self.assertTrue(isinstance(result.t_sub_bool, bool))
self.assertEqual(result.t_sub_bool, True)
t1.delete()
t2.delete()
t3.delete()
t4.delete()
t5.delete()
def test_delete(self):
query = self.TSubModel1.all()
query.delete()
self.TSubModel1(t_sub_name="_sub_xxx").create()
self.TSubModel1(t_sub_name="_sub_yyy").create()
self.TSubModel1(t_sub_name="asee").create()
query.filter("t_sub_name like", "%_sub_%")
result = query.delete()
self.assertEqual(result, 2)
query.clear()
if not orm_config.get_dbinfo(self.TSubModel1).is_mongo():
query.ql("delete from %s where t_sub_name = '_sub_'" % self.TSubModel1.get_modelname())
query.delete()
def test_update(self):
tmodel = self.TModel1()
tmodel.f_int = 10000
tmodel.f_float = 1.0
tmodel.f_str = "ccccupdate"
tmodel.create()
query = tmodel.all()
try:
query.set("f_float", "2a.0")
self.assertTrue(False)
except TypeFloatError:
self.assertTrue(True)
query.set("f_str", "dddupdate")
query.set("f_datetime", datetime.datetime.utcnow())
query.filter("f_int =", 10000)
query.update()
query = self.TModel1.all()
query.filter("f_int =", 10000)
tm = query.get()
self.assertEqual(tm.f_str, "dddupdate")
query = tmodel.all()
query.set("f_int inc", 2)
query.set("f_float mul", 3.0)
query.set("f_str set", "setsetup")
query.filter("f_int =", 10000)
query.update()
tm23 = self.TModel1.get_by_key(tm.key())
self.assertEqual(tm23.f_int, 10002)
self.assertEqual(tm23.f_float, 3.0)
self.assertEqual(tm23.f_str, 'setsetup')
def test_find_one_and_update(self):
tmodel = self.TModel1()
tmodel.f_int = 200000
tmodel.f_float = 1.0
tmodel.f_str = "ccccupdatccccce"
tmodel.create().key()
tmodel = self.TModel1()
tmodel.f_int = 300000
tmodel.f_float = 1.0
tmodel.f_str = "32323dadsasddf23"
query = self.TModel1.all()
query.filter("f_int >=", 150000)
query.order("f_int")
query.set("f_str", "uuppa")
m2 = query.find_one_and_update()
self.assertEqual(m2.f_str, "ccccupdatccccce")
query = self.TModel1.all()
query.filter("f_int >=", 4450000)
query.set("f_str", "uuppa")
m2 = query.find_one_and_update()
self.assertIsNone(m2, None)
tmodel = self.TModel1()
tmodel.f_int = 800000
tmodel.f_float = 8.8
tmodel.f_str = "aaa"
key = tmodel.create().key()
query = tmodel.all()
query.set("f_int inc", 222)
query.set("f_float mul", 3.0)
query.set("f_str set", "ccaaedd")
query.filter("f_int =", 800000)
query.update()
tm23 = self.TModel1.get_by_key(key)
self.assertEqual(tm23.f_int, 800000 + 222)
self.assertEqual(round(tm23.f_float, 1), round(8.8 * 3.0, 1))
self.assertEqual(tm23.f_str, 'ccaaedd')
def test_find_one_and_delete(self):
tmodel = self.TModel1()
tmodel.f_int = 300000
tmodel.f_float = 1.0
tmodel.f_str = "32323dadsasddf23"
id3 = tmodel.create().key()
query = self.TModel1.all()
query.filter("f_int >=", 150000)
query.order("-f_int")
m2 = query.find_one_and_delete()
self.assertEqual(m2.f_str, "32323dadsasddf23")
tm3 = self.TModel1.get_by_key(id3)
self.assertIsNone(tm3, None)
query = self.TModel1.all()
query.filter("f_int >=", 1150000)
query.set("f_str", "uuppccca")
m21 = query.find_one_and_update()
self.assertIsNone(m21, None)
query.filter("f_int >=", 2150000)
m21 = query.find_one_and_delete()
self.assertIsNone(m21, None)
def test_mocallback(self):
tmodel = self.TModel1()
tmodel.f_str = "testmodelproc"
tmodel.f_int = 8
tmodel.create()
query = self.TModel1().all()
query.filter("f_str =", "testmodelproc")
mp = "aaaabbbb"
def mproc(tm):
tm.mp = mp
results = query.fetch(mocallback=mproc)
self.assertEqual(results[0].mp, "aaaabbbb")
query.clear()
query.filter("f_str =", "testmodelproc")
self.assertEqual(query.get(mocallback=mproc).mp, "aaaabbbb")
query.delete()
def test_model_to_dict(self):
tmodel = self.TModel1()
dic = tmodel.to_dict()
self.assertEqual(type(dic), dict)
@classmethod
def tearDownClass(cls):
cls.TModel1.delete_schema()
cls.TSubModel1.delete_schema()
dbpool.drop_db(orm_config.get_default_dbinfo())
class ModelTest(unittest.TestCase):
class TTTModel(Model):
@classmethod
def meta_domain(cls):
return "test"
f_str = properti.StringProperty()
f_int = properti.IntegerProperty()
f_bool = properti.BooleanProperty()
f_float = properti.FloatProperty()
f_date = properti.DateProperty()
f_datetime = properti.DateTimeProperty()
f_v = properti.StringProperty(length=8)
f_str1 = properti.StringProperty(persistent=False)
f_str_logged = properti.StringProperty(logged=True)
f_float_logged = properti.FloatProperty(logged=True)
f_json = properti.DictProperty()
f_obj = properti.ObjectProperty()
class TModel22(Model):
@classmethod
def meta_domain(cls):
return "test"
tstr = properti.StringProperty()
@classmethod
def setUpClass(cls):
dbpool.create_db(orm_config.get_default_dbinfo(), dropped=True)
cls.TTTModel.create_schema()
cls.TModel22.create_schema()
def test_create(self):
tmodel = self.TTTModel()
try:
tmodel.create()
self.assertTrue(False)
except TypeError:
self.assertTrue(True)
tmodel.create('2')
tmodel2 = self.TTTModel.get_by_key(tmodel.key())
self.assertEqual(tmodel2.modified_by, '2')
self.assertIsNotNone(tmodel2.modified_time)
self.assertIsNotNone(tmodel2.created_time)
def test_update(self):
tmodel = self.TTTModel()
tmodel.create(2)
try:
tmodel.update()
self.assertTrue(False)
except TypeError:
self.assertTrue(True)
tmodel2 = self.TTTModel.get_by_key(tmodel.key())
time.sleep(1)
tmodel2.update(2)
tmodel3 = self.TTTModel.get_by_key(tmodel.key())
self.assertGreater(tmodel3.modified_time, tmodel.modified_time)
def test_delete(self):
tmodel = self.TTTModel()
tmodel.create(2)
try:
tmodel.delete()
self.assertTrue(False)
except TypeError:
self.assertTrue(True)
tmodel2 = self.TTTModel.get_by_key(tmodel.get_keyvalue())
self.assertIsNotNone(tmodel2)
tmodel.delete(2)
tmodel2 = self.TTTModel.get_by_key(tmodel.get_keyvalue())
self.assertIsNone(tmodel2)
def test_get_by_key(self):
tmodel = self.TModel22(tstr="eeee")
tmodel.create(2)
tmodel2 = tmodel.get_by_key(tmodel.key())
self.assertEqual(tmodel.tstr, tmodel2.tstr)
def test_find_one_and_update(self):
modified_time = kind.utcnow() - timedelta(minutes=1)
tmodel = self.TTTModel()
tmodel.f_int = 200000
tmodel.f_float = 1.0
tmodel.f_str = "131ddd"
id1 = tmodel.create("modifer11").key()
query = self.TTTModel.all()
query.filter("f_int >=", 150000)
query.set("f_str", "caaee")
query.set("f_json", {'a':1, 'b':'b'})
m2 = query.find_one_and_update("modifer22")
self.assertEqual(m2.f_str, "131ddd")
self.assertEqual(m2.f_json, None)
query.set("f_json", {'a':1, 'b':'8'})
m3 = query.find_one_and_update("modifer23", new=True)
self.assertEqual(m3.f_json, {'a':1, 'b':'8'})
mnew = self.TTTModel().get_by_key(id1)
self.assertEqual(mnew.modified_by, "modifer23")
self.assertGreaterEqual(mnew.modified_time, modified_time)
def test_save_objectproperty(self):
tmodel = self.TTTModel()
tmodel.f_obj = RangeValidator(2, 5)
key = tmodel.create(None).key()
t2 = self.TTTModel().get_by_key(key)
self.assertIsInstance(t2.f_obj, RangeValidator)
try:
t2.f_obj.validate(10, "zzz")
except Exception as e:
self.assertIsInstance(e, RangeError)
@classmethod
def tearDownClass(cls):
cls.TTTModel.delete_schema()
cls.TModel22.delete_schema()
dbpool.drop_db(orm_config.get_default_dbinfo())
class QueryTest(unittest.TestCase):
class TModelQT(Model):
@classmethod
def meta_domain(cls):
return "test"
f_str = properti.StringProperty(logged=True)
f_str1 = properti.StringProperty()
@classmethod
def setUpClass(cls):
dbpool.create_db(orm_config.get_default_dbinfo(), dropped=True)
cls.TModelQT.create_schema();
def test_update(self):
try:
query = self.TModelQT.all()
query.set("f_str", "ccc")
query.update(2)
self.assertTrue(False)
except ProgramError:
self.assertTrue(True)
tm = self.TModelQT().create(3)
query = self.TModelQT.all()
query.set("f_str1", "cccc")
query.filter("_id =", tm.key())
query.update(2)
query.filter("modified_by =", "2");
query.filter("_id =", tm.key())
self.assertEqual(query.count(), 1)
def test_delete(self):
query = self.TModelQT.all()
query.delete(None)
@classmethod
def tearDownClass(cls):
cls.TModelQT.delete_schema()
dbpool.drop_db(orm_config.get_default_dbinfo())
|
python
|
"""
These are utilities for the `/bin` scripts, not for the `cibuildwheel` program.
"""
from io import StringIO
from typing import Dict, List
from .typing import Protocol
__all__ = ("Printable", "dump_python_configurations")
class Printable(Protocol):
def __str__(self) -> str:
...
def dump_python_configurations(inp: Dict[str, Dict[str, List[Dict[str, Printable]]]]) -> str:
output = StringIO()
for header, values in inp.items():
output.write(f"[{header}]\n")
for inner_header, listing in values.items():
output.write(f"{inner_header} = [\n")
for item in listing:
output.write(" { ")
dict_contents = (f'{key} = "{value}"' for key, value in item.items())
output.write(", ".join(dict_contents))
output.write(" },\n")
output.write("]\n")
output.write("\n")
# Strip the final newline, to avoid two blank lines at the end.
return output.getvalue()[:-1]
|
python
|
# -*- coding: utf-8 -*-
# global
import io
import sys
import json
import logging
import pandas as pd
# core
import core
import core.utils
import core.indexer
from core.DocVocabulary import DocVocabulary
from core.TermVocabulary import TermVocabulary
from core.features import Features
from core.msg import TwitterMessageParser
# configs
import configs
TTK_TASK = 'ttk'
BANK_TASK = 'bank'
LOGGER_FORMAT = '[%(asctime)-15s] %(message)s'
def init_logger():
logging.basicConfig(filemode='w',
format=LOGGER_FORMAT,
level=logging.DEBUG)
def vectorization_core(vectorizer, init_term_vocabulary=True,
merge_doc_vocabularies=False):
"""
Main function of collection vectorization
vectorizer : message vectorization function
returns : None
"""
init_logger()
if (sys.argv < 8):
exit(0)
config = {'task_type': sys.argv[1],
'database': sys.argv[2], # save the output results
'train_table': sys.argv[3],
'test_table': sys.argv[4],
'train_output': sys.argv[5],
'test_output': sys.argv[6],
'pconf_output': sys.argv[7]}
with io.open(configs.TWITTER_MESSAGE_PARSER_CONFIG, "r") as f:
message_settings = json.load(f, encoding='utf-8')
with io.open(configs.FEATURES_CONFIG, 'r') as f:
features_settings = json.load(f, encoding='utf-8')
# Create vocabulary of terms
if init_term_vocabulary is True:
term_vocabulary = core.indexer.create_term_vocabulary(
[config['train_table'], config['test_table']],
message_settings)
else:
term_vocabulary = TermVocabulary()
features = Features(
TwitterMessageParser(message_settings, config['task_type']),
features_settings)
doc_vocabulary = DocVocabulary()
# Train problem
train_problem = create_problem(config['task_type'],
'train',
config['train_table'],
vectorizer,
features,
term_vocabulary,
doc_vocabulary,
message_settings)
if not merge_doc_vocabularies:
doc_vocabulary = DocVocabulary()
# Test problem
test_problem = create_problem(config['task_type'],
'test',
config['test_table'],
vectorizer,
features,
term_vocabulary,
doc_vocabulary,
message_settings)
result_table = config['test_table'] + '.result.csv'
logging.info('Create a file for classifier results: {}'.format(
result_table))
result_df = pd.read_csv(config['test_table'], sep=',')
result_df.to_csv(result_table, sep=',')
# Save
save_problem(train_problem, config['train_output'])
save_problem(test_problem, config['test_output'])
save_predict_config(columns=get_score_columns(config['task_type']),
prediction_table=result_table,
out_filepath=config['pconf_output'])
def create_problem(task_type, collection_type, table_filepath, vectorizer,
features, term_vocabulary, doc_vocabulary,
message_settings):
"""
Creates problem (vectors from messages with additional features)
Arguments:
---------
task_type : BANK_TASK or TTK_TASK
According to SentiRuEval competiiton
collection_type : str, 'train' or 'test'
It affects on the generated vector prefixes (tone score for 'train'
task, and 'id' for 'test' task respectively)
table_filepath : str
Path to the 'csv' file
vectorizer : func
Function for producing vector from terms
features : core.Features
object of Features class
term_vocabulary : core.TermVocabulary
Vocabulary of terms
messsage_settings : dict
Configuration settings for TwitterMessageParser
Returns: list
-------
List of vectorized messages. Each message presented as list where
first element is a 'score' or 'id' (depending on the 'train' or 'score'
dataset accordingly) and the secont (latter) is a vector -- embedded
sentence.
"""
message_parser = TwitterMessageParser(message_settings, task_type)
labeled_messages = []
df = pd.read_csv(table_filepath, sep=',')
for score in [-1, 0, 1]:
logging.info("Reading tweets: [class: %s, file: %s]" % (
score, table_filepath))
# getting tweets with the same score
filtered_df = tweets_filter(df, get_score_columns(task_type), score)
for row in filtered_df.index:
text = filtered_df['text'][row]
index = filtered_df['twitid'][row]
message_parser.parse(text)
terms = message_parser.get_terms()
doc_vocabulary.add_doc(terms, str(score))
labeled_message = {'score': score,
'id': index,
'terms': to_unicode(terms),
'features': features.vectorize(text)}
labeled_messages.append(labeled_message)
term_vocabulary.insert_terms(
labeled_message['features'].iterkeys())
# Create vectors
problem = []
for labeled_message in labeled_messages:
vector = vectorizer(labeled_message, term_vocabulary, doc_vocabulary)
if (collection_type == 'train'):
problem.append([labeled_message['score'], vector])
elif (collection_type == 'test'):
problem.append([labeled_message['id'], vector])
else:
raise ValueError(
'Unexpected collection_type={}'.format(collection_type))
return problem
def get_score_columns(task_type):
return configs.DATA_TCC_FIELDS if task_type == TTK_TASK else \
configs.DATA_BANK_FIELDS
def to_unicode(terms):
"""
Converts list of 'str' into list of 'unicode' strings
"""
unicode_terms = []
for term in terms:
if (isinstance(term, str)):
unicode_terms.append(unicode(term, 'utf-8'))
else:
unicode_terms.append(term)
return unicode_terms
def save_problem(problem, filepath):
"""
Save problem using the format, supported by classifier libraries
"""
with open(filepath, "w") as out:
logging.info("Vectors count: %s" % (len(problem)))
for vector in problem:
out.write("%s " % (vector[0]))
for index, value in sorted(vector[1].iteritems()):
out.write("%s:%s " % (index, value))
out.write("\n")
def tweets_filter(df, score_columns, score):
ids = []
for row in range(len(df)):
for column in score_columns:
if (not df[column].isnull()[row] and df[column][row] == score):
ids.append(df['twitid'][row])
return df[df['twitid'].isin(ids)]
def save_predict_config(columns, prediction_table, out_filepath):
config = {"columns": columns, "prediction_table": prediction_table}
with open(out_filepath, "w") as out:
json.dump(config, out)
def load_embeddings():
"""
Load configuration files from emeddings folder
"""
with io.open(configs.TWITTER_MESSAGE_PARSER_CONFIG, "r") as f:
message_settings = json.load(f, encoding='utf-8')
with io.open(configs.FEATURES_CONFIG, 'r') as f:
features_settings = json.load(f, encoding='utf-8')
return message_settings, features_settings
|
python
|
class Dinosaur:
def __init__(self, name, attack_power):
self.name = name
self.attack_power = int(attack_power)
self.health = 50
self.energy = 100
self.att_list = ["Tail Whip", "Body Slam", "Stomp", "Hyper Beam"]
self.att = "error"
def attack(self, robot):
self.att_choose()
print(f'{self.name} attacks {robot} with a {self.att} and does {self.attack_power} points of damage!')
def att_choose(self):
choice = 0
while choice != 1:
for each in self.att_list:
print(each)
attack_choice = input("What attack would you like to use?")
for each in self.att_list:
if each == attack_choice:
self.att = each
choice = 1
|
python
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.data import kb
from lib.request.connect import Connect as Request
def getPageTemplate(payload, place):
retVal = (kb.originalPage, kb.errorIsNone)
if payload and place:
if (payload, place) not in kb.pageTemplates:
page, _ = Request.queryPage(payload, place, content=True, raise404=False)
kb.pageTemplates[(payload, place)] = (page, kb.lastParserStatus is None)
retVal = kb.pageTemplates[(payload, place)]
return retVal
|
python
|
# def encode_as_bytes(input_file_path: str, output_gif_path: str):
# """
# The practical encoder with optimized GIF assembler and multiprocessing acceleration.
#
# :param input_file_path: input file path
# :param output_gif_path: output gif file path
# :param chunk_string_length: the length of base64 string for each frame
# :return: None
# """
#
# # open the file to encode
# def bytes_from_file(filename, size):
# with open(filename, "rb") as fb:
# while True:
# chunk = fb.read(size)
# if chunk:
# yield chunk
# else:
# break
#
# string_list = []
# for c in bytes_from_file(input_file_path, self.chuck_length):
# string_list.append([c])
#
# # init the QRCode generator
# # current can only set box_size to 1 due to QR matrix's format
# qr = qrcode.QRCode(version=self.qr_version, error_correction=self.err_crt, box_size=1, border=2)
# qr.add_data(string_list[0])
# qr.make(fit=True)
#
# # extract the first frame as a reference frame, to generate following frames with same dimensions
# frames_template = qr.make_image().convert(mode='L', palette='ADAPTIVE', colors=2)
#
# # make the drawing canvas
# (width, height) = (frames_template.width, frames_template.height)
# surface = GIFSurface.GIFSurface(width, height, bg_color=0)
# # other colors to choose from - 78, 205, 196, 161,35,6, 150, 200, 100, 161, 35, 6, 255, 255, 255
# surface.set_palette([0, 0, 0, 255, 255, 255])
#
# # the colormap for QRCode. map True to white
# cmap = {True: 0, False: 1} # Black -> True -> (0, 0, 0)
# mcl = 2 # related to LZW compression alg, 2-10
# render = GIFSurface.Render(cmap, mcl)
# delay = self.GIF_delay
# trans_index = None
#
# # assuming all frames share same delay
# control = GIFencoder.graphics_control_block(delay, trans_index)
# # create an array to store multiprocessing results
# frames = [None] * len(string_list)
# # create a pool to dispatch frames encoding
# pool1 = multiprocessing.Pool(processes=multiprocessing.cpu_count()) # use up all the cores.
#
# for i, s in enumerate(string_list):
# frames[i] = pool1.apply_async(self.gen_qr_render_frame, args=(qr, s, width, height, render,))
#
# # join the pool
# pool1.close()
# pool1.join()
#
# for i in range(len(string_list)):
# surface.write(control + frames[i].get())
#
# surface.save(output_gif_path)
# surface.close()
# return
#
#
# @staticmethod
# def decode_from_bytes(input_gif_path, output_file_path):
# """
# Decode the GIF to recover its binary file entity.
#
# :param input_gif_path: input GIF file
# :param output_file_path: output binary file, use extension to decide file type.
# :return: None
# """
# if type(input_gif_path) is str:
# img = Image.open(input_gif_path) # GIF file
# else:
# return
#
# decoded_string = b''
# frame_count = 0
# for frame in ImageSequence.Iterator(img):
# frame_count += 1
# if frame_count < 2:
# continue # skip the first black frame
# # the decode CV lib relies on the dimensions, 1px width cannot be recognized
# (width, height) = (frame.width * 2, frame.height * 2)
# im_resized = frame.resize((width, height))
# decoded = QRdecode(im_resized)
# decoded_string += decoded[0].data
#
# print('total frame count is', str(frame_count))
# file_bin = decoded_string
# with open(output_file_path, 'wb') as f:
# f.write(file_bin)
# f.close()
# return
|
python
|
# Version: 2020.02.21
#
# MIT License
#
# Copyright (c) 2018 Jiankang Deng and Jia Guo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# THIS FILE IS FOR EXPERIMENTS, USE train_softmax.py FOR NORMAL TRAINING.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import math
import random
import logging
import pickle
import numpy as np
from data import FaceImageIter
from data import FaceImageIterList
import mxnet as mx
from mxnet import ndarray as nd
import argparse
import mxnet.optimizer as optimizer
sys.path.append(os.path.join(os.path.dirname(__file__), 'common'))
import face_image
from noise_sgd import NoiseSGD
sys.path.append(os.path.join(os.path.dirname(__file__), 'eval'))
sys.path.append(os.path.join(os.path.dirname(__file__), 'symbols'))
import fresnet
import finception_resnet_v2
import fmobilenet
import fmobilenetv2
import fxception
import fdensenet
import fdpn
import fnasnet
import spherenet
# import lfw
import verification
import sklearn
sys.path.append(os.path.join(os.path.dirname(__file__), 'losses'))
import center_loss
logger = logging.getLogger()
logger.setLevel(logging.INFO)
args = None
class AccMetric(mx.metric.EvalMetric):
def __init__(self):
self.axis = 1
super(AccMetric, self).__init__(
'acc', axis=self.axis,
output_names=None, label_names=None)
self.losses = []
self.count = 0
def update(self, labels, preds):
self.count += 1
if args.loss_type >= 2 and args.loss_type <= 7 and args.margin_verbose > 0:
if self.count % args.ctx_num == 0:
mbatch = self.count // args.ctx_num
_verbose = args.margin_verbose
if mbatch == 1 or mbatch % _verbose == 0:
a = 0.0
b = 0.0
if len(preds) >= 4:
a = preds[-2].asnumpy()[0]
b = preds[-1].asnumpy()[0]
elif len(preds) == 3:
a = preds[-1].asnumpy()[0]
b = a
print('[%d][MARGIN]%f,%f' % (mbatch, a, b))
if args.logits_verbose > 0:
if self.count % args.ctx_num == 0:
mbatch = self.count // args.ctx_num
_verbose = args.logits_verbose
if mbatch == 1 or mbatch % _verbose == 0:
a = 0.0
b = 0.0
if len(preds) >= 3:
v = preds[-1].asnumpy()
v = np.sort(v)
num = len(v) // 10
a = np.mean(v[0:num])
b = np.mean(v[-1 * num:])
print('[LOGITS] %d,%f,%f' % (mbatch, a, b))
# loss = preds[2].asnumpy()[0]
# if len(self.losses)==20:
# print('ce loss', sum(self.losses)/len(self.losses))
# self.losses = []
# self.losses.append(loss)
preds = [preds[1]] # use softmax output
for label, pred_label in zip(labels, preds):
# print(pred_label)
# print(label.shape, pred_label.shape)
if pred_label.shape != label.shape:
pred_label = mx.ndarray.argmax(pred_label, axis=self.axis)
pred_label = pred_label.asnumpy().astype('int32').flatten()
label = label.asnumpy()
if label.ndim == 2:
label = label[:, 0]
label = label.astype('int32').flatten()
# print(label)
# print('label',label)
# print('pred_label', pred_label)
assert label.shape == pred_label.shape
self.sum_metric += (pred_label.flat == label.flat).sum()
self.num_inst += len(pred_label.flat)
class LossValueMetric(mx.metric.EvalMetric):
def __init__(self):
self.axis = 1
super(LossValueMetric, self).__init__(
'lossvalue', axis=self.axis,
output_names=None, label_names=None)
self.losses = []
def update(self, labels, preds):
loss = preds[-1].asnumpy()[0]
self.sum_metric += loss
self.num_inst += 1.0
gt_label = preds[-2].asnumpy()
# print(gt_label)
def parse_args():
parser = argparse.ArgumentParser(description='Train face network')
# general
parser.add_argument('--data-dir', default='', help='training set directory')
parser.add_argument('--prefix', default='../model/model', help='directory to save model.')
parser.add_argument('--pretrained', default='', help='pretrained model to load')
parser.add_argument('--ckpt', type=int, default=1,
help='checkpoint saving option. 0: discard saving. 1: save when necessary. 2: always save')
parser.add_argument('--network', default='r50', help='specify network')
parser.add_argument('--version-se', type=int, default=0, help='whether to use se in network')
parser.add_argument('--version-input', type=int, default=1, help='network input config')
parser.add_argument('--version-output', type=str, default='E', help='network embedding output config')
parser.add_argument('--version-unit', type=int, default=3, help='resnet unit config')
parser.add_argument('--version-act', type=str, default='prelu', help='network activation config')
parser.add_argument('--end-epoch', type=int, default=100000, help='training epoch size.')
parser.add_argument('--noise-sgd', type=float, default=0.0, help='')
parser.add_argument('--lr', type=float, default=0.1, help='start learning rate')
parser.add_argument('--wd', type=float, default=0.0005, help='weight decay')
parser.add_argument('--mom', type=float, default=0.9, help='momentum')
parser.add_argument('--emb-size', type=int, default=512, help='embedding length')
parser.add_argument('--per-batch-size', type=int, default=128, help='batch size in each context')
parser.add_argument('--margin-m', type=float, default=0.5, help='')
parser.add_argument('--margin-s', type=float, default=64.0, help='')
parser.add_argument('--margin-a', type=float, default=0.0, help='')
parser.add_argument('--margin-b', type=float, default=0.0, help='')
parser.add_argument('--easy-margin', type=int, default=0, help='')
parser.add_argument('--margin-verbose', type=int, default=0, help='')
parser.add_argument('--logits-verbose', type=int, default=0, help='')
parser.add_argument('--c2c-threshold', type=float, default=0.0, help='')
parser.add_argument('--c2c-mode', type=int, default=-10, help='')
parser.add_argument('--output-c2c', type=int, default=0, help='')
parser.add_argument('--train-limit', type=int, default=0, help='')
parser.add_argument('--margin', type=int, default=4, help='')
parser.add_argument('--beta', type=float, default=1000., help='')
parser.add_argument('--beta-min', type=float, default=5., help='')
parser.add_argument('--beta-freeze', type=int, default=0, help='')
parser.add_argument('--gamma', type=float, default=0.12, help='')
parser.add_argument('--power', type=float, default=1.0, help='')
parser.add_argument('--scale', type=float, default=0.9993, help='')
parser.add_argument('--center-alpha', type=float, default=0.5, help='')
parser.add_argument('--center-scale', type=float, default=0.003, help='')
parser.add_argument('--images-per-identity', type=int, default=0, help='')
parser.add_argument('--triplet-bag-size', type=int, default=3600, help='')
parser.add_argument('--triplet-alpha', type=float, default=0.3, help='')
parser.add_argument('--triplet-max-ap', type=float, default=0.0, help='')
parser.add_argument('--verbose', type=int, default=2000, help='')
parser.add_argument('--loss-type', type=int, default=4, help='')
parser.add_argument('--incay', type=float, default=0.0, help='feature incay')
parser.add_argument('--use-deformable', type=int, default=0, help='')
parser.add_argument('--rand-mirror', type=int, default=1, help='')
parser.add_argument('--cutoff', type=int, default=0, help='')
parser.add_argument('--patch', type=str, default='0_0_96_112_0', help='')
parser.add_argument('--lr-steps', type=str, default='', help='')
parser.add_argument('--max-steps', type=int, default=0, help='')
parser.add_argument('--target', type=str, default='lfw,cfp_fp,agedb_30,cplfw,calfw', help='')
args = parser.parse_args()
return args
def get_symbol(args, arg_params, aux_params):
data_shape = (args.image_channel, args.image_h, args.image_w)
image_shape = ",".join([str(x) for x in data_shape])
margin_symbols = []
if args.network[0] == 'd':
embedding = fdensenet.get_symbol(args.emb_size, args.num_layers,
version_se=args.version_se, version_input=args.version_input,
version_output=args.version_output, version_unit=args.version_unit)
elif args.network[0] == 'm':
print('init mobilenet', args.num_layers)
if args.num_layers == 1:
embedding = fmobilenet.get_symbol(args.emb_size,
version_se=args.version_se, version_input=args.version_input,
version_output=args.version_output, version_unit=args.version_unit)
else:
embedding = fmobilenetv2.get_symbol(args.emb_size)
elif args.network[0] == 'i':
print('init inception-resnet-v2', args.num_layers)
embedding = finception_resnet_v2.get_symbol(args.emb_size,
version_se=args.version_se, version_input=args.version_input,
version_output=args.version_output, version_unit=args.version_unit)
elif args.network[0] == 'x':
print('init xception', args.num_layers)
embedding = fxception.get_symbol(args.emb_size,
version_se=args.version_se, version_input=args.version_input,
version_output=args.version_output, version_unit=args.version_unit)
elif args.network[0] == 'p':
print('init dpn', args.num_layers)
embedding = fdpn.get_symbol(args.emb_size, args.num_layers,
version_se=args.version_se, version_input=args.version_input,
version_output=args.version_output, version_unit=args.version_unit)
elif args.network[0] == 'n':
print('init nasnet', args.num_layers)
embedding = fnasnet.get_symbol(args.emb_size)
elif args.network[0] == 's':
print('init spherenet', args.num_layers)
embedding = spherenet.get_symbol(args.emb_size, args.num_layers)
else:
print('init resnet', args.num_layers)
embedding = fresnet.get_symbol(args.emb_size, args.num_layers,
version_se=args.version_se, version_input=args.version_input,
version_output=args.version_output, version_unit=args.version_unit,
version_act=args.version_act)
all_label = mx.symbol.Variable('softmax_label')
if not args.output_c2c:
gt_label = all_label
else:
gt_label = mx.symbol.slice_axis(all_label, axis=1, begin=0, end=1)
gt_label = mx.symbol.reshape(gt_label, (args.per_batch_size,))
c2c_label = mx.symbol.slice_axis(all_label, axis=1, begin=1, end=2)
c2c_label = mx.symbol.reshape(c2c_label, (args.per_batch_size,))
assert args.loss_type >= 0
extra_loss = None
if args.loss_type == 0: # softmax
_weight = mx.symbol.Variable('fc7_weight')
_bias = mx.symbol.Variable('fc7_bias', lr_mult=2.0, wd_mult=0.0)
fc7 = mx.sym.FullyConnected(data=embedding, weight=_weight, bias=_bias, num_hidden=args.num_classes, name='fc7')
elif args.loss_type == 1: # sphere
_weight = mx.symbol.Variable("fc7_weight", shape=(args.num_classes, args.emb_size), lr_mult=1.0)
_weight = mx.symbol.L2Normalization(_weight, mode='instance')
fc7 = mx.sym.LSoftmax(data=embedding, label=gt_label, num_hidden=args.num_classes,
weight=_weight,
beta=args.beta, margin=args.margin, scale=args.scale,
beta_min=args.beta_min, verbose=1000, name='fc7')
elif args.loss_type == 8: # centerloss, TODO
_weight = mx.symbol.Variable('fc7_weight')
_bias = mx.symbol.Variable('fc7_bias', lr_mult=2.0, wd_mult=0.0)
fc7 = mx.sym.FullyConnected(data=embedding, weight=_weight, bias=_bias, num_hidden=args.num_classes, name='fc7')
print('center-loss', args.center_alpha, args.center_scale)
extra_loss = mx.symbol.Custom(data=embedding, label=gt_label, name='center_loss', op_type='centerloss', \
num_class=args.num_classes, alpha=args.center_alpha, scale=args.center_scale,
batchsize=args.per_batch_size)
elif args.loss_type == 2:
s = args.margin_s
m = args.margin_m
_weight = mx.symbol.Variable("fc7_weight", shape=(args.num_classes, args.emb_size), lr_mult=1.0)
_weight = mx.symbol.L2Normalization(_weight, mode='instance')
if s > 0.0:
nembedding = mx.symbol.L2Normalization(embedding, mode='instance', name='fc1n') * s
fc7 = mx.sym.FullyConnected(data=nembedding, weight=_weight, no_bias=True, num_hidden=args.num_classes,
name='fc7')
if m > 0.0:
if args.margin_verbose > 0:
zy = mx.sym.pick(fc7, gt_label, axis=1)
cos_t = zy / s
margin_symbols.append(mx.symbol.mean(cos_t))
s_m = s * m
gt_one_hot = mx.sym.one_hot(gt_label, depth=args.num_classes, on_value=s_m, off_value=0.0)
fc7 = fc7 - gt_one_hot
if args.margin_verbose > 0:
new_zy = mx.sym.pick(fc7, gt_label, axis=1)
new_cos_t = new_zy / s
margin_symbols.append(mx.symbol.mean(new_cos_t))
else:
fc7 = mx.sym.FullyConnected(data=embedding, weight=_weight, no_bias=True, num_hidden=args.num_classes,
name='fc7')
if m > 0.0:
body = embedding * embedding
body = mx.sym.sum_axis(body, axis=1, keepdims=True)
body = mx.sym.sqrt(body)
body = body * m
gt_one_hot = mx.sym.one_hot(gt_label, depth=args.num_classes, on_value=1.0, off_value=0.0)
body = mx.sym.broadcast_mul(gt_one_hot, body)
fc7 = fc7 - body
elif args.loss_type == 3:
s = args.margin_s
m = args.margin_m
assert args.margin == 2 or args.margin == 4
_weight = mx.symbol.Variable("fc7_weight", shape=(args.num_classes, args.emb_size), lr_mult=1.0)
_weight = mx.symbol.L2Normalization(_weight, mode='instance')
nembedding = mx.symbol.L2Normalization(embedding, mode='instance', name='fc1n') * s
fc7 = mx.sym.FullyConnected(data=nembedding, weight=_weight, no_bias=True, num_hidden=args.num_classes,
name='fc7')
zy = mx.sym.pick(fc7, gt_label, axis=1)
cos_t = zy / s
if args.margin_verbose > 0:
margin_symbols.append(mx.symbol.mean(cos_t))
if m > 1.0:
t = mx.sym.arccos(cos_t)
t = t * m
body = mx.sym.cos(t)
new_zy = body * s
if args.margin_verbose > 0:
new_cos_t = new_zy / s
margin_symbols.append(mx.symbol.mean(new_cos_t))
diff = new_zy - zy
diff = mx.sym.expand_dims(diff, 1)
gt_one_hot = mx.sym.one_hot(gt_label, depth=args.num_classes, on_value=1.0, off_value=0.0)
body = mx.sym.broadcast_mul(gt_one_hot, diff)
fc7 = fc7 + body
# threshold = math.cos(args.margin_m)
# cond_v = cos_t - threshold
# cond = mx.symbol.Activation(data=cond_v, act_type='relu')
# body = cos_t
# for i in xrange(args.margin//2):
# body = body*body
# body = body*2-1
# new_zy = body*s
# zy_keep = zy
# new_zy = mx.sym.where(cond, new_zy, zy_keep)
# if args.margin_verbose>0:
# new_cos_t = new_zy/s
# margin_symbols.append(mx.symbol.mean(new_cos_t))
# diff = new_zy - zy
# diff = mx.sym.expand_dims(diff, 1)
# gt_one_hot = mx.sym.one_hot(gt_label, depth = args.num_classes, on_value = 1.0, off_value = 0.0)
# body = mx.sym.broadcast_mul(gt_one_hot, diff)
# fc7 = fc7+body
elif args.loss_type == 4:
s = args.margin_s
m = args.margin_m
assert s > 0.0
assert m >= 0.0
assert m < (math.pi / 2)
_weight = mx.symbol.Variable("fc7_weight", shape=(args.num_classes, args.emb_size), lr_mult=1.0)
_weight = mx.symbol.L2Normalization(_weight, mode='instance')
nembedding = mx.symbol.L2Normalization(embedding, mode='instance', name='fc1n') * s
fc7 = mx.sym.FullyConnected(data=nembedding, weight=_weight, no_bias=True, num_hidden=args.num_classes,
name='fc7')
zy = mx.sym.pick(fc7, gt_label, axis=1)
cos_t = zy / s
if args.margin_verbose > 0:
margin_symbols.append(mx.symbol.mean(cos_t))
if args.output_c2c == 0:
cos_m = math.cos(m)
sin_m = math.sin(m)
mm = math.sin(math.pi - m) * m
# threshold = 0.0
threshold = math.cos(math.pi - m)
if args.easy_margin:
cond = mx.symbol.Activation(data=cos_t, act_type='relu')
else:
cond_v = cos_t - threshold
cond = mx.symbol.Activation(data=cond_v, act_type='relu')
body = cos_t * cos_t
body = 1.0 - body
sin_t = mx.sym.sqrt(body)
new_zy = cos_t * cos_m
b = sin_t * sin_m
new_zy = new_zy - b
new_zy = new_zy * s
if args.easy_margin:
zy_keep = zy
else:
zy_keep = zy - s * mm
new_zy = mx.sym.where(cond, new_zy, zy_keep)
else:
# set c2c as cosm^2 in data.py
cos_m = mx.sym.sqrt(c2c_label)
sin_m = 1.0 - c2c_label
sin_m = mx.sym.sqrt(sin_m)
body = cos_t * cos_t
body = 1.0 - body
sin_t = mx.sym.sqrt(body)
new_zy = cos_t * cos_m
b = sin_t * sin_m
new_zy = new_zy - b
new_zy = new_zy * s
if args.margin_verbose > 0:
new_cos_t = new_zy / s
margin_symbols.append(mx.symbol.mean(new_cos_t))
diff = new_zy - zy
diff = mx.sym.expand_dims(diff, 1)
gt_one_hot = mx.sym.one_hot(gt_label, depth=args.num_classes, on_value=1.0, off_value=0.0)
body = mx.sym.broadcast_mul(gt_one_hot, diff)
fc7 = fc7 + body
elif args.loss_type == 5:
s = args.margin_s
m = args.margin_m
assert s > 0.0
_weight = mx.symbol.Variable("fc7_weight", shape=(args.num_classes, args.emb_size), lr_mult=1.0)
_weight = mx.symbol.L2Normalization(_weight, mode='instance')
nembedding = mx.symbol.L2Normalization(embedding, mode='instance', name='fc1n') * s
fc7 = mx.sym.FullyConnected(data=nembedding, weight=_weight, no_bias=True, num_hidden=args.num_classes,
name='fc7')
if args.margin_a != 1.0 or args.margin_m != 0.0 or args.margin_b != 0.0:
if args.margin_a == 1.0 and args.margin_m == 0.0:
s_m = s * args.margin_b
gt_one_hot = mx.sym.one_hot(gt_label, depth=args.num_classes, on_value=s_m, off_value=0.0)
fc7 = fc7 - gt_one_hot
else:
zy = mx.sym.pick(fc7, gt_label, axis=1)
cos_t = zy / s
t = mx.sym.arccos(cos_t)
if args.margin_a != 1.0:
t = t * args.margin_a
if args.margin_m > 0.0:
t = t + args.margin_m
body = mx.sym.cos(t)
if args.margin_b > 0.0:
body = body - args.margin_b
new_zy = body * s
diff = new_zy - zy
diff = mx.sym.expand_dims(diff, 1)
gt_one_hot = mx.sym.one_hot(gt_label, depth=args.num_classes, on_value=1.0, off_value=0.0)
body = mx.sym.broadcast_mul(gt_one_hot, diff)
fc7 = fc7 + body
elif args.loss_type == 6:
s = args.margin_s
m = args.margin_m
assert s > 0.0
assert m >= 0.0
assert m < (math.pi / 2)
_weight = mx.symbol.Variable("fc7_weight", shape=(args.num_classes, args.emb_size), lr_mult=1.0)
_weight = mx.symbol.L2Normalization(_weight, mode='instance')
nembedding = mx.symbol.L2Normalization(embedding, mode='instance', name='fc1n') * s
fc7 = mx.sym.FullyConnected(data=nembedding, weight=_weight, no_bias=True, num_hidden=args.num_classes,
name='fc7')
zy = mx.sym.pick(fc7, gt_label, axis=1)
cos_t = zy / s
t = mx.sym.arccos(cos_t)
if args.margin_verbose > 0:
margin_symbols.append(mx.symbol.mean(t))
t_min = mx.sym.min(t)
ta = mx.sym.broadcast_div(t_min, t)
a1 = args.margin_a
r1 = ta - a1
r1 = mx.symbol.Activation(data=r1, act_type='relu')
r1 = r1 + a1
r2 = mx.symbol.zeros(shape=(args.per_batch_size,))
cond = t - 1.0
cond = mx.symbol.Activation(data=cond, act_type='relu')
r = mx.sym.where(cond, r2, r1)
var_m = r * m
t = t + var_m
body = mx.sym.cos(t)
new_zy = body * s
if args.margin_verbose > 0:
# new_cos_t = new_zy/s
# margin_symbols.append(mx.symbol.mean(new_cos_t))
margin_symbols.append(mx.symbol.mean(t))
diff = new_zy - zy
diff = mx.sym.expand_dims(diff, 1)
gt_one_hot = mx.sym.one_hot(gt_label, depth=args.num_classes, on_value=1.0, off_value=0.0)
body = mx.sym.broadcast_mul(gt_one_hot, diff)
fc7 = fc7 + body
elif args.loss_type == 7:
s = args.margin_s
m = args.margin_m
assert s > 0.0
assert m >= 0.0
assert m < (math.pi / 2)
_weight = mx.symbol.Variable("fc7_weight", shape=(args.num_classes, args.emb_size), lr_mult=1.0)
_weight = mx.symbol.L2Normalization(_weight, mode='instance')
nembedding = mx.symbol.L2Normalization(embedding, mode='instance', name='fc1n') * s
fc7 = mx.sym.FullyConnected(data=nembedding, weight=_weight, no_bias=True, num_hidden=args.num_classes,
name='fc7')
zy = mx.sym.pick(fc7, gt_label, axis=1)
cos_t = zy / s
t = mx.sym.arccos(cos_t)
if args.margin_verbose > 0:
margin_symbols.append(mx.symbol.mean(t))
var_m = mx.sym.random.uniform(low=args.margin_b, high=args.margin_m, shape=(1,))
t = mx.sym.broadcast_add(t, var_m)
body = mx.sym.cos(t)
new_zy = body * s
if args.margin_verbose > 0:
# new_cos_t = new_zy/s
# margin_symbols.append(mx.symbol.mean(new_cos_t))
margin_symbols.append(mx.symbol.mean(t))
diff = new_zy - zy
diff = mx.sym.expand_dims(diff, 1)
gt_one_hot = mx.sym.one_hot(gt_label, depth=args.num_classes, on_value=1.0, off_value=0.0)
body = mx.sym.broadcast_mul(gt_one_hot, diff)
fc7 = fc7 + body
elif args.loss_type == 10: # marginal loss
nembedding = mx.symbol.L2Normalization(embedding, mode='instance', name='fc1n')
params = [1.2, 0.3, 1.0]
n1 = mx.sym.expand_dims(nembedding, axis=1) # N,1,C
n2 = mx.sym.expand_dims(nembedding, axis=0) # 1,N,C
body = mx.sym.broadcast_sub(n1, n2) # N,N,C
body = body * body
body = mx.sym.sum(body, axis=2) # N,N
# body = mx.sym.sqrt(body)
body = body - params[0]
mask = mx.sym.Variable('extra')
body = body * mask
body = body + params[1]
# body = mx.sym.maximum(body, 0.0)
body = mx.symbol.Activation(data=body, act_type='relu')
body = mx.sym.sum(body)
body = body / (args.per_batch_size * args.per_batch_size - args.per_batch_size)
extra_loss = mx.symbol.MakeLoss(body, grad_scale=params[2])
elif args.loss_type == 11: # npair loss
params = [0.9, 0.2]
nembedding = mx.symbol.L2Normalization(embedding, mode='instance', name='fc1n')
nembedding = mx.sym.transpose(nembedding)
nembedding = mx.symbol.reshape(nembedding, (args.emb_size, args.per_identities, args.images_per_identity))
nembedding = mx.sym.transpose(nembedding, axes=(2, 1, 0)) # 2*id*512
# nembedding = mx.symbol.reshape(nembedding, (args.emb_size, args.images_per_identity, args.per_identities))
# nembedding = mx.sym.transpose(nembedding, axes=(1,2,0)) #2*id*512
n1 = mx.symbol.slice_axis(nembedding, axis=0, begin=0, end=1)
n2 = mx.symbol.slice_axis(nembedding, axis=0, begin=1, end=2)
# n1 = []
# n2 = []
# for i in xrange(args.per_identities):
# _n1 = mx.symbol.slice_axis(nembedding, axis=0, begin=2*i, end=2*i+1)
# _n2 = mx.symbol.slice_axis(nembedding, axis=0, begin=2*i+1, end=2*i+2)
# n1.append(_n1)
# n2.append(_n2)
# n1 = mx.sym.concat(*n1, dim=0)
# n2 = mx.sym.concat(*n2, dim=0)
# rembeddings = mx.symbol.reshape(nembedding, (args.images_per_identity, args.per_identities, 512))
# n1 = mx.symbol.slice_axis(rembeddings, axis=0, begin=0, end=1)
# n2 = mx.symbol.slice_axis(rembeddings, axis=0, begin=1, end=2)
n1 = mx.symbol.reshape(n1, (args.per_identities, args.emb_size))
n2 = mx.symbol.reshape(n2, (args.per_identities, args.emb_size))
cosine_matrix = mx.symbol.dot(lhs=n1, rhs=n2, transpose_b=True) # id*id, id=N of N-pair
data_extra = mx.sym.Variable('extra')
data_extra = mx.sym.slice_axis(data_extra, axis=0, begin=0, end=args.per_identities)
mask = cosine_matrix * data_extra
# body = mx.sym.mean(mask)
fii = mx.sym.sum_axis(mask, axis=1)
fij_fii = mx.sym.broadcast_sub(cosine_matrix, fii)
fij_fii = mx.sym.exp(fij_fii)
row = mx.sym.sum_axis(fij_fii, axis=1)
row = mx.sym.log(row)
body = mx.sym.mean(row)
extra_loss = mx.sym.MakeLoss(body)
elif args.loss_type == 12: # triplet loss
nembedding = mx.symbol.L2Normalization(embedding, mode='instance', name='fc1n')
anchor = mx.symbol.slice_axis(nembedding, axis=0, begin=0, end=args.per_batch_size // 3)
positive = mx.symbol.slice_axis(nembedding, axis=0, begin=args.per_batch_size // 3,
end=2 * args.per_batch_size // 3)
negative = mx.symbol.slice_axis(nembedding, axis=0, begin=2 * args.per_batch_size // 3, end=args.per_batch_size)
ap = anchor - positive
an = anchor - negative
ap = ap * ap
an = an * an
ap = mx.symbol.sum(ap, axis=1, keepdims=1) # (T,1)
an = mx.symbol.sum(an, axis=1, keepdims=1) # (T,1)
triplet_loss = mx.symbol.Activation(data=(ap - an + args.triplet_alpha), act_type='relu')
triplet_loss = mx.symbol.mean(triplet_loss)
# triplet_loss = mx.symbol.sum(triplet_loss)/(args.per_batch_size//3)
extra_loss = mx.symbol.MakeLoss(triplet_loss)
elif args.loss_type == 13: # triplet loss with angular margin
m = args.margin_m
sin_m = math.sin(m)
cos_m = math.cos(m)
nembedding = mx.symbol.L2Normalization(embedding, mode='instance', name='fc1n')
anchor = mx.symbol.slice_axis(nembedding, axis=0, begin=0, end=args.per_batch_size // 3)
positive = mx.symbol.slice_axis(nembedding, axis=0, begin=args.per_batch_size // 3,
end=2 * args.per_batch_size // 3)
negative = mx.symbol.slice_axis(nembedding, axis=0, begin=2 * args.per_batch_size // 3, end=args.per_batch_size)
ap = anchor * positive
an = anchor * negative
ap = mx.symbol.sum(ap, axis=1, keepdims=1) # (T,1)
an = mx.symbol.sum(an, axis=1, keepdims=1) # (T,1)
ap = mx.symbol.arccos(ap)
an = mx.symbol.arccos(an)
triplet_loss = mx.symbol.Activation(data=(ap - an + args.margin_m), act_type='relu')
# body = ap*ap
# body = 1.0-body
# body = mx.symbol.sqrt(body)
# body = body*sin_m
# ap = ap*cos_m
# ap = ap-body
# triplet_loss = mx.symbol.Activation(data = (an-ap), act_type='relu')
triplet_loss = mx.symbol.mean(triplet_loss)
extra_loss = mx.symbol.MakeLoss(triplet_loss)
elif args.loss_type == 9: # coco loss
centroids = []
for i in xrange(args.per_identities):
xs = mx.symbol.slice_axis(embedding, axis=0, begin=i * args.images_per_identity,
end=(i + 1) * args.images_per_identity)
mean = mx.symbol.mean(xs, axis=0, keepdims=True)
mean = mx.symbol.L2Normalization(mean, mode='instance')
centroids.append(mean)
centroids = mx.symbol.concat(*centroids, dim=0)
nembedding = mx.symbol.L2Normalization(embedding, mode='instance', name='fc1n') * args.coco_scale
fc7 = mx.symbol.dot(nembedding, centroids, transpose_b=True) # (batchsize, per_identities)
# extra_loss = mx.symbol.softmax_cross_entropy(fc7, gt_label, name='softmax_ce')/args.per_batch_size
# extra_loss = mx.symbol.BlockGrad(extra_loss)
else:
# embedding = mx.symbol.L2Normalization(embedding, mode='instance', name='fc1n')*float(args.loss_type)
embedding = embedding * 5
_weight = mx.symbol.Variable("fc7_weight", shape=(args.num_classes, args.emb_size), lr_mult=1.0)
_weight = mx.symbol.L2Normalization(_weight, mode='instance') * 2
fc7 = mx.sym.LSoftmax(data=embedding, label=gt_label, num_hidden=args.num_classes,
weight=_weight,
beta=args.beta, margin=args.margin, scale=args.scale,
beta_min=args.beta_min, verbose=100, name='fc7')
# fc7 = mx.sym.Custom(data=embedding, label=gt_label, weight=_weight, num_hidden=args.num_classes,
# beta=args.beta, margin=args.margin, scale=args.scale,
# op_type='ASoftmax', name='fc7')
if args.loss_type <= 1 and args.incay > 0.0:
params = [1.e-10]
sel = mx.symbol.argmax(data=fc7, axis=1)
sel = (sel == gt_label)
norm = embedding * embedding
norm = mx.symbol.sum(norm, axis=1)
norm = norm + params[0]
feature_incay = sel / norm
feature_incay = mx.symbol.mean(feature_incay) * args.incay
extra_loss = mx.symbol.MakeLoss(feature_incay)
# out = softmax
# l2_embedding = mx.symbol.L2Normalization(embedding)
# ce = mx.symbol.softmax_cross_entropy(fc7, gt_label, name='softmax_ce')/args.per_batch_size
# out = mx.symbol.Group([mx.symbol.BlockGrad(embedding), softmax, mx.symbol.BlockGrad(ce)])
out_list = [mx.symbol.BlockGrad(embedding)]
softmax = None
if args.loss_type < 10:
softmax = mx.symbol.SoftmaxOutput(data=fc7, label=gt_label, name='softmax', normalization='valid')
out_list.append(softmax)
if args.logits_verbose > 0:
logits = mx.symbol.softmax(data=fc7)
logits = mx.sym.pick(logits, gt_label, axis=1)
margin_symbols.append(logits)
# logit_max = mx.sym.max(logits)
# logit_min = mx.sym.min(logits)
# margin_symbols.append(logit_max)
# margin_symbols.append(logit_min)
if softmax is None:
out_list.append(mx.sym.BlockGrad(gt_label))
if extra_loss is not None:
out_list.append(extra_loss)
for _sym in margin_symbols:
_sym = mx.sym.BlockGrad(_sym)
out_list.append(_sym)
out = mx.symbol.Group(out_list)
return (out, arg_params, aux_params)
def train_net(args):
ctx = []
cvd = os.environ['CUDA_VISIBLE_DEVICES'].strip()
if len(cvd) > 0:
for i in xrange(len(cvd.split(','))):
ctx.append(mx.gpu(i))
if len(ctx) == 0:
ctx = [mx.cpu()]
print('use cpu')
else:
print('gpu num:', len(ctx))
prefix = args.prefix
prefix_dir = os.path.dirname(prefix)
if not os.path.exists(prefix_dir):
os.makedirs(prefix_dir)
end_epoch = args.end_epoch
args.ctx_num = len(ctx)
args.num_layers = int(args.network[1:])
print('num_layers', args.num_layers)
if args.per_batch_size == 0:
args.per_batch_size = 128
if args.loss_type == 10:
args.per_batch_size = 256
args.batch_size = args.per_batch_size * args.ctx_num
args.rescale_threshold = 0
args.image_channel = 3
ppatch = [int(x) for x in args.patch.split('_')]
assert len(ppatch) == 5
os.environ['BETA'] = str(args.beta)
data_dir_list = args.data_dir.split(',')
if args.loss_type != 12 and args.loss_type != 13:
assert len(data_dir_list) == 1
data_dir = data_dir_list[0]
args.use_val = False
path_imgrec = None
path_imglist = None
val_rec = None
prop = face_image.load_property(data_dir)
args.num_classes = prop.num_classes
image_size = prop.image_size
args.image_h = image_size[0]
args.image_w = image_size[1]
print('image_size', image_size)
assert (args.num_classes > 0)
print('num_classes', args.num_classes)
args.coco_scale = 0.5 * math.log(float(args.num_classes - 1)) + 3
# path_imglist = "/raid5data/dplearn/MS-Celeb-Aligned/lst2"
path_imgrec = os.path.join(data_dir, "train.rec")
val_rec = os.path.join(data_dir, "val.rec")
if os.path.exists(val_rec) and args.loss_type < 10:
args.use_val = True
else:
val_rec = None
# args.use_val = False
if args.loss_type == 1 and args.num_classes > 20000:
args.beta_freeze = 5000
args.gamma = 0.06
if args.loss_type < 9:
assert args.images_per_identity == 0
else:
if args.images_per_identity == 0:
if args.loss_type == 11:
args.images_per_identity = 2
elif args.loss_type == 10 or args.loss_type == 9:
args.images_per_identity = 16
elif args.loss_type == 12 or args.loss_type == 13:
args.images_per_identity = 5
assert args.per_batch_size % 3 == 0
assert args.images_per_identity >= 2
args.per_identities = int(args.per_batch_size / args.images_per_identity)
print('Called with argument:', args)
data_shape = (args.image_channel, image_size[0], image_size[1])
mean = None
begin_epoch = 0
base_lr = args.lr
base_wd = args.wd
base_mom = args.mom
if len(args.pretrained) == 0:
arg_params = None
aux_params = None
sym, arg_params, aux_params = get_symbol(args, arg_params, aux_params)
else:
vec = args.pretrained.split(',')
print('loading', vec)
_, arg_params, aux_params = mx.model.load_checkpoint(vec[0], int(vec[1]))
sym, arg_params, aux_params = get_symbol(args, arg_params, aux_params)
data_extra = None
hard_mining = False
triplet_params = None
coco_mode = False
if args.loss_type == 10:
hard_mining = True
_shape = (args.batch_size, args.per_batch_size)
data_extra = np.full(_shape, -1.0, dtype=np.float32)
c = 0
while c < args.batch_size:
a = 0
while a < args.per_batch_size:
b = a + args.images_per_identity
data_extra[(c + a):(c + b), a:b] = 1.0
# print(c+a, c+b, a, b)
a = b
c += args.per_batch_size
elif args.loss_type == 11:
data_extra = np.zeros((args.batch_size, args.per_identities), dtype=np.float32)
c = 0
while c < args.batch_size:
for i in xrange(args.per_identities):
data_extra[c + i][i] = 1.0
c += args.per_batch_size
elif args.loss_type == 12 or args.loss_type == 13:
triplet_params = [args.triplet_bag_size, args.triplet_alpha, args.triplet_max_ap]
elif args.loss_type == 9:
coco_mode = True
label_name = 'softmax_label'
label_shape = (args.batch_size,)
if args.output_c2c:
label_shape = (args.batch_size, 2)
if data_extra is None:
model = mx.mod.Module(
context=ctx,
symbol=sym,
)
else:
data_names = ('data', 'extra')
# label_name = ''
model = mx.mod.Module(
context=ctx,
symbol=sym,
data_names=data_names,
label_names=(label_name,),
)
if args.use_val:
val_dataiter = FaceImageIter(
batch_size=args.batch_size,
data_shape=data_shape,
path_imgrec=val_rec,
# path_imglist = val_path,
shuffle=False,
rand_mirror=False,
mean=mean,
ctx_num=args.ctx_num,
data_extra=data_extra,
)
else:
val_dataiter = None
if len(data_dir_list) == 1 and args.loss_type != 12 and args.loss_type != 13:
train_dataiter = FaceImageIter(
batch_size=args.batch_size,
data_shape=data_shape,
path_imgrec=path_imgrec,
shuffle=True,
rand_mirror=args.rand_mirror,
mean=mean,
cutoff=args.cutoff,
c2c_threshold=args.c2c_threshold,
output_c2c=args.output_c2c,
c2c_mode=args.c2c_mode,
limit=args.train_limit,
ctx_num=args.ctx_num,
images_per_identity=args.images_per_identity,
data_extra=data_extra,
hard_mining=hard_mining,
triplet_params=triplet_params,
coco_mode=coco_mode,
mx_model=model,
label_name=label_name,
)
else:
iter_list = []
for _data_dir in data_dir_list:
_path_imgrec = os.path.join(_data_dir, "train.rec")
_dataiter = FaceImageIter(
batch_size=args.batch_size,
data_shape=data_shape,
path_imgrec=_path_imgrec,
shuffle=True,
rand_mirror=args.rand_mirror,
mean=mean,
cutoff=args.cutoff,
c2c_threshold=args.c2c_threshold,
output_c2c=args.output_c2c,
c2c_mode=args.c2c_mode,
limit=args.train_limit,
ctx_num=args.ctx_num,
images_per_identity=args.images_per_identity,
data_extra=data_extra,
hard_mining=hard_mining,
triplet_params=triplet_params,
coco_mode=coco_mode,
mx_model=model,
label_name=label_name,
)
iter_list.append(_dataiter)
iter_list.append(_dataiter)
train_dataiter = FaceImageIterList(iter_list)
if args.loss_type < 10:
_metric = AccMetric()
else:
_metric = LossValueMetric()
eval_metrics = [mx.metric.create(_metric)]
if args.network[0] == 'r':
initializer = mx.init.Xavier(rnd_type='gaussian', factor_type="out", magnitude=2) # resnet style
elif args.network[0] == 'i' or args.network[0] == 'x':
initializer = mx.init.Xavier(rnd_type='gaussian', factor_type="in", magnitude=2) # inception
else:
initializer = mx.init.Xavier(rnd_type='uniform', factor_type="in", magnitude=2)
_rescale = 1.0 / args.ctx_num
if args.noise_sgd > 0.0:
print('use noise sgd')
opt = NoiseSGD(scale=args.noise_sgd, learning_rate=base_lr, momentum=base_mom, wd=base_wd,
rescale_grad=_rescale)
else:
opt = optimizer.SGD(learning_rate=base_lr, momentum=base_mom, wd=base_wd, rescale_grad=_rescale)
som = 20
if args.loss_type == 12 or args.loss_type == 13:
som = 2
_cb = mx.callback.Speedometer(args.batch_size, som)
ver_list = []
ver_name_list = []
for name in args.target.split(','):
path = os.path.join(data_dir, name + ".bin")
if os.path.exists(path):
data_set = verification.load_bin(path, image_size)
ver_list.append(data_set)
ver_name_list.append(name)
print('ver', name)
def ver_test(nbatch):
results = []
for i in xrange(len(ver_list)):
acc1, std1, acc2, std2, xnorm, embeddings_list = verification.test(ver_list[i], model, args.batch_size, 10,
data_extra, label_shape)
print('[%s][%d]XNorm: %f' % (ver_name_list[i], nbatch, xnorm))
# print('[%s][%d]Accuracy: %1.5f+-%1.5f' % (ver_name_list[i], nbatch, acc1, std1))
print('[%s][%d]Accuracy-Flip: %1.5f+-%1.5f' % (ver_name_list[i], nbatch, acc2, std2))
results.append(acc2)
return results
def val_test():
acc = AccMetric()
val_metric = mx.metric.create(acc)
val_metric.reset()
val_dataiter.reset()
for i, eval_batch in enumerate(val_dataiter):
model.forward(eval_batch, is_train=False)
model.update_metric(val_metric, eval_batch.label)
acc_value = val_metric.get_name_value()[0][1]
print('VACC: %f' % (acc_value))
highest_acc = [0.0, 0.0] # lfw and target
# for i in xrange(len(ver_list)):
# highest_acc.append(0.0)
global_step = [0]
save_step = [0]
if len(args.lr_steps) == 0:
lr_steps = [40000, 60000, 80000]
if args.loss_type >= 1 and args.loss_type <= 7:
lr_steps = [100000, 140000, 160000]
p = 512.0 / args.batch_size
for l in xrange(len(lr_steps)):
lr_steps[l] = int(lr_steps[l] * p)
else:
lr_steps = [int(x) for x in args.lr_steps.split(',')]
print('lr_steps', lr_steps)
def _batch_callback(param):
# global global_step
global_step[0] += 1
mbatch = global_step[0]
for _lr in lr_steps:
if mbatch == args.beta_freeze + _lr:
opt.lr *= 0.1
print('lr change to', opt.lr)
break
_cb(param)
if mbatch % 1000 == 0:
print('lr-batch-epoch:', opt.lr, param.nbatch, param.epoch)
if mbatch >= 0 and mbatch % args.verbose == 0:
acc_list = ver_test(mbatch)
save_step[0] += 1
msave = save_step[0]
do_save = False
if len(acc_list) > 0:
lfw_score = acc_list[0]
if lfw_score > highest_acc[0]:
highest_acc[0] = lfw_score
if lfw_score >= 0.998:
do_save = True
if acc_list[-1] >= highest_acc[-1]:
highest_acc[-1] = acc_list[-1]
if lfw_score >= 0.99:
do_save = True
if args.ckpt == 0:
do_save = False
elif args.ckpt > 1:
do_save = True
# for i in xrange(len(acc_list)):
# acc = acc_list[i]
# if acc>=highest_acc[i]:
# highest_acc[i] = acc
# if lfw_score>=0.99:
# do_save = True
# if args.loss_type==1 and mbatch>lr_steps[-1] and mbatch%10000==0:
# do_save = True
if do_save:
print('saving', msave)
if val_dataiter is not None:
val_test()
arg, aux = model.get_params()
mx.model.save_checkpoint(prefix, msave, model.symbol, arg, aux)
# if acc>=highest_acc[0]:
# lfw_npy = "%s-lfw-%04d" % (prefix, msave)
# X = np.concatenate(embeddings_list, axis=0)
# print('saving lfw npy', X.shape)
# np.save(lfw_npy, X)
print('[%d]Accuracy-Highest: %1.5f' % (mbatch, highest_acc[-1]))
if mbatch <= args.beta_freeze:
_beta = args.beta
else:
move = max(0, mbatch - args.beta_freeze)
_beta = max(args.beta_min, args.beta * math.pow(1 + args.gamma * move, -1.0 * args.power))
# print('beta', _beta)
os.environ['BETA'] = str(_beta)
if args.max_steps > 0 and mbatch > args.max_steps:
sys.exit(0)
# epoch_cb = mx.callback.do_checkpoint(prefix, 1)
epoch_cb = None
# def _epoch_callback(epoch, sym, arg_params, aux_params):
# print('epoch-end', epoch)
model.fit(train_dataiter,
begin_epoch=begin_epoch,
num_epoch=end_epoch,
eval_data=val_dataiter,
eval_metric=eval_metrics,
kvstore='device',
optimizer=opt,
# optimizer_params = optimizer_params,
initializer=initializer,
arg_params=arg_params,
aux_params=aux_params,
allow_missing=True,
batch_end_callback=_batch_callback,
epoch_end_callback=epoch_cb)
def main():
# time.sleep(3600*6.5)
global args
args = parse_args()
train_net(args)
if __name__ == '__main__':
main()
|
python
|
n= str(input("digite um valor ")).upper().strip()
d=n.split()
j= ''.join(d)
inver= ''
for i in range(len(j)-1,-1,-1):
inver+=j[i]
if n==inver:
print("o valor é um palindromo")
else:
print("o valor não é um palindromo")
|
python
|
import itertools
import random
S = " "
def main():
# init
gophers_count = 100
windmills_count = 18
factors = [17, 13, 11, 7, 5, 3, 2]
seed = 1951
a, b, c = [], [], []
random.seed(seed)
# generate input data for each night
for f in factors:
windmills = [f] * windmills_count
state = night_check(windmills, gophers_count)
calc = sum(state)
# a = b + c
a.append(calc)
b.append(calc // f)
c.append(calc % f)
print(windmills)
print(state)
print("%d = %d * %d + %d\n" %(a[-1], f, b[-1], c[-1]))
# check k and l from following equations, we search for lowest k and l
# f1 * k + a1 = f2 * l + a2, ex.:
# 17 * k + a1 = 13 * l + a2
# 13 * k + a1 = 11 * l + a2 ...
# later we store results in k array
f_range = range(len(factors))
kl = [[0 for i in f_range] for j in f_range]
for i, j in itertools.product(f_range, f_range):
f1 = factors[i]
f2 = factors[j]
# a = b + c
a1, a2 = a[i], a[j]
b1, b2 = b[i], b[j]
c1, c2 = c[i], c[j]
lowest_common = 0
k = 0
l = 0
while True:
g1 = f1 * (k + b1) + c1
g2 = f2 * (l + b2) + c2
lowest_common = max(g1, g2)
if g1 == g2:
kl[i][j] = str([k, l, lowest_common])
break
elif g1 < g2:
step = (g2 - g1) // f1
k += max(step, 1)
elif g2 < g1:
step = (g1 - g2) // f2
l += max(step, 1)
if g1 > gophers_count or g2 > gophers_count:
print("Error didn't find common")
break
print_array(kl)
def night_check(windmills, gophers_count):
result = [0] * len(windmills)
for i in range(gophers_count):
index = random.randint(0, len(windmills) - 1)
result[index] = (result[index] + 1) % windmills[index]
return result
def print_array(arr):
for row in arr:
s = S.join([str(elem) for elem in row])
print(s)
if __name__ == "__main__":
main()
|
python
|
from tensorflow.keras.layers import LSTM, Dense, TimeDistributed, Masking, BatchNormalization, Dropout, Input, \
Bidirectional, ConvLSTM2D, Attention
from tensorflow.keras.models import Model
from lsct.models.cnn_1d import CNN1D
from cnn_lstm.attention_with_context import Attention
def create_cnn_lstm_model(clip_length, feature_length=4096, cnn_filters=(32, 64), pooling_sizes=(4, 4),
lstm_filters=(32, 64), mlp_filters=(64, 32, 8), using_dropout=True, using_bidirectional=False,
using_cnn=True, using_attention=False, dropout_rate=0.1):
"""
Create CNN-LSTM model for VQA
:param clip_length: clip length
:param feature_length: feature length
:param cnn_filters: filters in 1D CNN
:param pooling_sizes: pooling sizes in 1D CNN
:param lstm_filters: filters in LSTM
:param mlp_filters: filters in the MLP head
:param using_dropout: flag to use dropout or not
:param using_bidirectional: flag to use bidirectional LSTM or not
:param using_cnn: flag to use 1D CNN or not
:param dropout_rate: dropout rate
:return: CNN-LSTM model
"""
if using_cnn:
cnn_model = CNN1D(filters=cnn_filters, pooling_sizes=pooling_sizes, using_dropout=using_dropout,
dropout_rate=dropout_rate)
input_shape = (None, clip_length, feature_length)
else:
input_shape = (None, clip_length)
inputs = Input(shape=input_shape)
if using_cnn:
x = TimeDistributed(cnn_model)(inputs)
else:
x = inputs
x = Masking(mask_value=0.)(x)
for i, lstm_filter in enumerate(lstm_filters):
if i < len(lstm_filters) - 1:
if using_bidirectional:
x = Bidirectional(LSTM(lstm_filter, return_sequences=True))(x)
else:
x = LSTM(lstm_filter, return_sequences=True)(x)
else:
if using_attention:
if using_bidirectional:
x = Bidirectional(LSTM(lstm_filter, return_sequences=True))(x)
else:
x = LSTM(lstm_filter, return_sequences=True)(x)
else:
if using_bidirectional:
x = Bidirectional(LSTM(lstm_filter))(x)
else:
x = LSTM(lstm_filter)(x)
if using_attention:
x = Attention()(x)
for mlp_filter in mlp_filters:
x = Dense(mlp_filter)(x)
if using_dropout:
x = Dropout(dropout_rate)(x)
outputs = Dense(1, activation='linear')(x)
model = Model(inputs=inputs, outputs=outputs)
model.summary()
return model
|
python
|
"""CFNgin entrypoint."""
import logging
import os
import re
import sys
from yaml.constructor import ConstructorError
from runway._logging import PrefixAdaptor
from runway.util import MutableMap, SafeHaven, cached_property
from .actions import build, destroy, diff
from .config import render_parse_load as load_config
from .context import Context as CFNginContext
from .environment import parse_environment
from .providers.aws.default import ProviderBuilder
# explicitly name logger so its not redundant
LOGGER = logging.getLogger("runway.cfngin")
class CFNgin(object):
"""Control CFNgin.
Attributes:
EXCLUDE_REGEX (str): Regex used to exclude YAML files when searching
for config files.
EXCLUDE_LIST (str): Global list of YAML file names to exclude when
searching for config files.
concurrency (int): Max number of CFNgin stacks that can be deployed
concurrently. If the value is ``0``, will be constrained based on
the underlying graph.
interactive (bool): Wether or not to prompt the user before taking
action.
parameters (MutableMap): Combination of the parameters provided when
initalizing the class and any environment files that are found.
recreate_failed (bool): Destroy and re-create stacks that are stuck in
a failed state from an initial deployment when updating.
region (str): The AWS region where CFNgin is currently being executed.
sys_path (str): Working directory.
tail (bool): Wether or not to display all CloudFormation events in the
terminal.
"""
EXCLUDE_REGEX = r"runway(\..*)?\.(yml|yaml)"
EXCLUDE_LIST = ["buildspec.yml", "docker-compose.yml"]
def __init__(self, ctx, parameters=None, sys_path=None):
"""Instantiate class.
Args:
ctx (runway.context.Context): Runway context object.
parameters (Optional[Dict[str. Any]]): Parameters from Runway.
sys_path (Optional[str]): Working directory.
"""
self.__ctx = ctx
self._env_file_name = None
self.concurrency = ctx.env.max_concurrent_cfngin_stacks
self.interactive = ctx.is_interactive
self.parameters = MutableMap()
self.recreate_failed = ctx.is_noninteractive
self.region = ctx.env_region
self.sys_path = sys_path or os.getcwd()
self.tail = bool(ctx.env.debug or ctx.env.verbose)
self.parameters.update(self.env_file)
if parameters:
LOGGER.debug("adding Runway parameters to CFNgin parameters")
self.parameters.update(parameters)
self._inject_common_parameters()
@cached_property
def env_file(self):
"""Contents of a CFNgin environment file.
Returns:
MutableMap
"""
result = {}
supported_names = [
"{}.env".format(self.__ctx.env_name),
"{}-{}.env".format(self.__ctx.env_name, self.region),
]
for _, file_name in enumerate(supported_names):
file_path = os.path.join(self.sys_path, file_name)
if os.path.isfile(file_path):
LOGGER.info("found environment file: %s", file_path)
self._env_file_name = file_path
with open(file_path, "r") as file_:
result.update(parse_environment(file_.read()))
return MutableMap(**result)
def deploy(self, force=False, sys_path=None):
"""Run the CFNgin deploy action.
Args:
force (bool): Explicitly enable the action even if an environment
file is not found.
sys_path (Optional[str]): Explicitly define a path to work in.
If not provided, ``self.sys_path`` is used.
"""
if self.should_skip(force):
return
if not sys_path:
sys_path = self.sys_path
config_file_names = self.find_config_files(sys_path=sys_path)
with SafeHaven(
environ=self.__ctx.env_vars, sys_modules_exclude=["awacs", "troposphere"]
):
for config_name in config_file_names:
logger = PrefixAdaptor(os.path.basename(config_name), LOGGER)
logger.notice("deploy (in progress)")
with SafeHaven(
argv=["stacker", "build", config_name],
sys_modules_exclude=["awacs", "troposphere"],
):
ctx = self.load(config_name)
action = build.Action(
context=ctx,
provider_builder=self._get_provider_builder(
ctx.config.service_role
),
)
action.execute(concurrency=self.concurrency, tail=self.tail)
logger.success("deploy (complete)")
def destroy(self, force=False, sys_path=None):
"""Run the CFNgin destroy action.
Args:
force (bool): Explicitly enable the action even if an environment
file is not found.
sys_path (Optional[str]): Explicitly define a path to work in.
If not provided, ``self.sys_path`` is used.
"""
if self.should_skip(force):
return
if not sys_path:
sys_path = self.sys_path
config_file_names = self.find_config_files(sys_path=sys_path)
# destroy should run in reverse to handle dependencies
config_file_names.reverse()
with SafeHaven(environ=self.__ctx.env_vars):
for config_name in config_file_names:
logger = PrefixAdaptor(os.path.basename(config_name), LOGGER)
logger.notice("destroy (in progress)")
with SafeHaven(argv=["stacker", "destroy", config_name]):
ctx = self.load(config_name)
action = destroy.Action(
context=ctx,
provider_builder=self._get_provider_builder(
ctx.config.service_role
),
)
action.execute(
concurrency=self.concurrency, force=True, tail=self.tail
)
logger.success("destroy (complete)")
def load(self, config_path):
"""Load a CFNgin config into a context object.
Args:
config_path (str): Valid path to a CFNgin config file.
Returns:
:class:`runway.cfngin.context.Context`
"""
LOGGER.debug("loading CFNgin config: %s", os.path.basename(config_path))
try:
config = self._get_config(config_path)
return self._get_context(config, config_path)
except ConstructorError as err:
if err.problem.startswith(
"could not determine a constructor " "for the tag '!"
):
LOGGER.error(
'"%s" is located in the module\'s root directory '
"and appears to be a CloudFormation template; "
"please move CloudFormation templates to a subdirectory",
config_path,
)
sys.exit(1)
raise
def plan(self, force=False, sys_path=None):
"""Run the CFNgin plan action.
Args:
force (bool): Explicitly enable the action even if an environment
file is not found.
sys_path (Optional[str]): Explicitly define a path to work in.
If not provided, ``self.sys_path`` is used.
"""
if self.should_skip(force):
return
if not sys_path:
sys_path = self.sys_path
config_file_names = self.find_config_files(sys_path=sys_path)
with SafeHaven(environ=self.__ctx.env_vars):
for config_name in config_file_names:
logger = PrefixAdaptor(os.path.basename(config_name), LOGGER)
logger.notice("plan (in progress)")
with SafeHaven(argv=["stacker", "diff", config_name]):
ctx = self.load(config_name)
action = diff.Action(
context=ctx,
provider_builder=self._get_provider_builder(
ctx.config.service_role
),
)
action.execute()
logger.success("plan (complete)")
def should_skip(self, force=False):
"""Determine if action should be taken or not.
Args:
force (bool): If ``True``, will always return ``False`` meaning
the action should not be skipped.
Returns:
bool: Skip action or not.
"""
if force or self.env_file:
return False
LOGGER.info("skipped; no parameters and environment file not found")
return True
def _get_config(self, file_path, validate=True):
"""Initialize a CFNgin config object from a file.
Args:
file_path (str): Path to the config file to load.
validate (bool): Validate the loaded config.
Returns:
:class:`runway.cfngin.config.Config`
"""
with open(file_path, "r") as file_:
raw_config = file_.read()
return load_config(raw_config, self.parameters, validate)
def _get_context(self, config, config_path):
"""Initialize a CFNgin context object.
Args:
config (:class:`runway.cfngin.config.Config): CFNgin config object.
config_path (str): Path to the config file that was provided.
Returns:
:class:`runway.cfngin.context.Context`
"""
return CFNginContext(
boto3_credentials=self.__ctx.boto3_credentials,
config=config,
config_path=config_path,
environment=self.parameters,
force_stacks=[], # placeholder
region=self.region,
stack_names=[], # placeholder
)
def _get_provider_builder(self, service_role=None):
"""Initialize provider builder.
Args:
service_role (Optional[str]): CloudFormation service role.
Returns:
ProviderBuilder
"""
if self.interactive:
LOGGER.verbose("using interactive AWS provider mode")
else:
LOGGER.verbose("using default AWS provider mode")
return ProviderBuilder(
interactive=self.interactive,
recreate_failed=self.recreate_failed,
region=self.region,
service_role=service_role,
)
def _inject_common_parameters(self):
"""Add common parameters if they don't already exist.
Adding these commonly used parameters will remove the need to add
lookup support (mainly for environment variable lookups) in places
such as ``cfngin_bucket``.
Injected Parameters
~~~~~~~~~~~~~~~~~~~
**environment (str)**
Taken from the ``DEPLOY_ENVIRONMENT`` environment variable. This
will the be current Runway environment being processed.
**region (str)**
Taken from the ``AWS_REGION`` environment variable. This will be
the current region being deployed to.
"""
if not self.parameters.get("environment"):
self.parameters["environment"] = self.__ctx.env_name
if not self.parameters.get("region"):
self.parameters["region"] = self.region
@classmethod
def find_config_files(cls, exclude=None, sys_path=None):
"""Find CFNgin config files.
Args:
exclude (Optional[List[str]]): List of file names to exclude. This
list is appended to the global exclude list.
sys_path (Optional[str]): Explicitly define a path to search for
config files.
Returns:
List[str]: Path to config files that were found.
"""
if not sys_path:
sys_path = os.getcwd()
elif os.path.isfile(sys_path):
return [sys_path]
exclude = exclude or []
result = []
exclude.extend(cls.EXCLUDE_LIST)
for root, _dirs, files in os.walk(sys_path):
for name in files:
if re.match(cls.EXCLUDE_REGEX, name) or (
name in exclude or name.startswith(".")
):
# Hidden files (e.g. .gitlab-ci.yml), Runway configs,
# and docker-compose files definitely aren't stacker
# config files
continue
if os.path.splitext(name)[-1] in [".yaml", ".yml"]:
result.append(os.path.join(root, name))
break # only need top level files
result.sort()
return result
|
python
|
x3 = ('foo'
('bar'<caret>
|
python
|
import numpy as np
from pymgt import *
from pymgt.metrics import *
from pymgt.ppmt_utils import friedman_index
def test_():
ndata = 1000
np.random.seed(1)
x = np.random.uniform(0.0, 1.0, size=ndata)
metrics = [
("friedman", FRIEDMAN_METRIC, False),
("kstest", KS_METRIC, False),
("anderson", ANDERSON_METRIC, False),
("shapiro", SHAPIRO_METRIC, True),
("jarque", JARQUE_METRIC, True),
]
for (name, metric, maximising) in metrics:
assert metric.name == name
assert metric.maximising == maximising
pi1 = metric(x)
pi2, test = metric.compute_test_best(x, pi1/2.0 if maximising else pi1*2.0)
assert pi1 == pi2
assert test
|
python
|
# -*- coding: utf-8 -*-
#
# Copyright (c) {% now 'utc', '%Y' %}, {{ cookiecutter.author }}
# All rights reserved.
#
import unittest
from cnct import ConnectClient
from connect_processor.app.cancel import Cancel
from unittest.mock import patch, MagicMock
from tests.test_util import TestUtils
client = ConnectClient('Key', use_specs=False)
class TestCancel(unittest.TestCase):
# //////////////////////
# CANCEL UNIT TESTS
# /////////////////////
@patch('connect_processor.app.utils.utils.Utils.approve_fulfillment_request',
MagicMock(return_value=TestUtils.get_response("purchase_subscription_response.json")))
@patch('connect_processor.app.utils.utils.Utils._get_template_by_product',
MagicMock(return_value="TL-###-###-###"))
def test_cancel_pass(self):
request = TestUtils.get_response("create_purchase_request_body.json")
response = TestUtils.get_response("purchase_subscription_response.json")
result = Cancel.process_request(request, client)
self.assertDictEqual(result, response)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module contains functions used to convert osm data to json and save to MongoDB.
"""
import xml.etree.cElementTree as ET
import json
#some post on stackoverflow
def elementtree_to_dict(element):
""" Function used to recursively convert a element tree object to a dictionary
Args:
element (:obj:): cElementTree object.
Returns:
dict: A elementtree object in a JSON formated dict .
"""
node = dict()
node['xml_tag_type'] = element.tag
text = getattr(element, 'text', None)
if text is not None:
node['text'] = text
node.update(element.items()) # element's attributes
child_nodes = {}
for child in element: # element's children
child_nodes.setdefault(child.get('k'), []).append( elementtree_to_dict(child))
# convert all single-element lists into non-lists
for key, value in child_nodes.items():
#print key, value
if len(value) == 1:
child_nodes[key] = value[0]
node.update(child_nodes.items())
return node
def convert_to_json(osm):
""" Convert a osm file to a json and save to a file.
Args:
osm (string): Path to the osm file.
"""
NODE_TAG = 'node'
WAY_TAG = 'way'
context = ET.iterparse(osm, events=("start",))
with open("miami_osm.json", "a") as jsonfile:
for event, elem in context:
if elem.tag == NODE_TAG or elem.tag == WAY_TAG:
jsonfile.write(json.dumps(elementtree_to_dict(elem)))
jsonfile.write('\n')
def save_to_mongo():
""" Insert a JSON formated OSM in MongoDB.
"""
import pymongo
from pymongo import MongoClient
client = MongoClient("mongodb://localhost:27017")
db = client.osm
with open('miami_osm.json') as f:
for line in f:
db.miami.insert_one(json.loads(line))
client.close()
|
python
|
class Solution:
def productExceptSelf(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
productions = []
n = len(nums)
if n == 0:
return None
# def recursiveProductExceptSelf(idx=0, previous_value=1):
# nonlocal productions
# if idx == n-1:
# productions = [previous_value]
# return nums[idx]
# mutply_after_values = recursiveProductExceptSelf(idx+1, previous_value=nums[idx]*previous_value)
# except_self = previous_value*mutply_after_values
# productions = [except_self] + productions
# # productions += [previous_value*recursiveProductExceptSelf(nums[1:], value, previous_value)]
# return nums[idx]*mutply_after_values
# recursiveProductExceptSelf()
productions = [nums[0]]
for i in range(n-1):
productions += [nums[i+1] * productions[i]]
# print("front production", productions)
p = 1
for i in range(n-1, 0, -1):
productions[i] = productions[i-1] * p
p *= nums[i]
productions[0] = p
return productions
|
python
|
# Generated by Django 3.0.4 on 2020-03-22 13:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0006_auto_20200322_1440'),
]
operations = [
migrations.AddField(
model_name='violationreport',
name='content_id',
field=models.PositiveIntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='violationreport',
name='flagged_type',
field=models.TextField(default=0),
preserve_default=False,
),
]
|
python
|
# import the necessary packages
from sklearn.preprocessing import LabelBinarizer
import numpy as np
class CifarGenerator:
"""
Generator class responsible for supplying data to the model.
Attributes
----------
x: np ndarray
array of images
y: np ndarray
array of class labels
batch_size: int
batch size to be used while generating batches
preprocessors: (optional) list
all the preprocessors to be applied to the input data. defaults to None
aug: (optional) tf.keras.preprocessing.image.ImageDataGenerator object
data augmentation to be applied to the input data. defaults to None
"""
def __init__(self, x, y):
# initialize the data
self.x = x
self.y = y
# initialize the instance variables
self.num_images = self.x.shape[0]
# convert the labels from integers into vectors
self.lb = LabelBinarizer()
self.y = self.lb.fit_transform(self.y)
def generator(self, passes=np.inf):
# initialize a variable to keep a count on the epochs
epochs = 0
# loop through the dataset indefinitely
while(epochs < passes):
# loop through the dataset
for idx in range(0, self.num_images):
# yield the current data point
yield self.x[idx], self.y[idx]
# increment the epoch count
epochs += 1
class CifarPreprocessor:
def __init__(self, preprocessors):
# initialize the instance variables
self.preprocessors = preprocessors
def preprocess(self, img, lbl):
# loop through the preprocessors and preprocess the image
for p in self.preprocessors:
img = p.preprocess(img)
# return the processed data
return img, lbl
|
python
|
"""Functions to assist with remote logging of InVEST usage."""
import logging
import urllib
import urllib2
import json
import Pyro4
LOGGER = logging.getLogger('natcap.invest.remote_logging')
Pyro4.config.SERIALIZER = 'marshal' # lets us pass null bytes in strings
_ENDPOINTS_INDEX_URL = (
'http://data.naturalcapitalproject.org/server_registry/'
'invest_usage_logger_v2')
class LoggingServer(object):
"""RPC server for logging invest runs and getting database summaries."""
_LOG_FIELD_NAMES = [
'model_name',
'invest_release',
'time',
'ip_address',
'bounding_box_union',
'bounding_box_intersection',
'node_hash',
'system_full_platform_string',
'system_preferred_encoding',
'system_default_language',
'session_id',
]
_EXIT_LOG_FIELD_NAMES = [
'session_id',
'time',
'ip_address',
'status',
]
@Pyro4.expose
def log_invest_run(self, data, mode):
"""Log some parameters of an InVEST run.
Metadata is saved to a new record in the sqlite database found at
`self.database_filepath`. The mode specifies if it is a log or an
exit status notification. The appropriate table name and fields will
be used in that case.
Parameters:
data (dict): a flat dictionary with data about the InVEST run
where the keys of the dictionary are at least
self._LOG_FIELD_NAMES
mode (string): one of 'log' or 'exit'. If 'log' uses
self._LOG_TABLE_NAME and parameters, while 'exit' logs to
self._LOG_EXIT_TABLE_NAME
Returns:
None
"""
endpoints = json.loads(urllib.urlopen(
_ENDPOINTS_INDEX_URL).read().strip())
try:
if mode == 'log':
url = endpoints['START']
elif mode == 'exit':
url = endpoints['FINISH']
else:
raise ValueError(
"Unknown mode '%s', expected 'log' or 'exit'" % mode)
# Add info about the client's IP
data_copy = data.copy()
if Pyro4.current_context.client is not None:
data_copy['ip_address'] = (
Pyro4.current_context.client.sock.getpeername()[0])
else:
data_copy['ip_address'] = 'local'
urllib2.urlopen(
urllib2.Request(url, urllib.urlencode(data_copy)))
except:
# print something locally for our log and raise back to client
LOGGER.exception("log_invest_run failed")
raise
extra_fields = set(data_copy).difference(self._LOG_FIELD_NAMES)
if len(extra_fields) > 0:
LOGGER.warn(
"Warning there were extra fields %s passed to logger. "
" Expected: %s Received: %s", sorted(extra_fields),
sorted(self._LOG_FIELD_NAMES), sorted(data_copy))
def execute(args):
"""Function to start a remote procedure call server.
Parameters:
args['hostname'] (string): network interface to bind to
args['port'] (int): TCP port to bind to
Returns:
never
"""
daemon = Pyro4.Daemon(args['hostname'], args['port'])
uri = daemon.register(
LoggingServer(),
'natcap.invest.remote_logging')
LOGGER.info("natcap.invest.usage_logger ready. Object uri = %s", uri)
daemon.requestLoop()
|
python
|
from dolfin.fem.problem import LinearVariationalProblem
from dolfin.cpp.fem import LinearVariationalSolver
from fenics_utils.formulation.cfd import IncompressibleNsIpcs
from fenics_utils.formulation.cfd import AdvectionDiffusionScalar
from fenics_utils.solvers.linear import LinearSolver
class AdvectionDiffusionScalarNS:
def __init__(self, V, Q, D, dt_ns, mu, rho, f_ns, eps, f_ad, bcu, bcp,
bcc=(), solvers_parameters=None, dt_ad=None):
self.V = V
self.Q = Q
self.D = D
self.dt_ns = dt_ns
self.mu = mu
self.rho = rho
self.f_ns = f_ns
self.eps = eps
self.f_ad = f_ad
self.bcu = bcu
self.bcp = bcp
self.bcc = bcc
self.solvers_parameters = solvers_parameters or self._set_default_solvers_parameters()
self.dt_ad = dt_ad if dt_ad is not None else dt_ns
# initialize empty variables
self.ns_formulation = None
self.ad_formulation = None
def _set_default_solvers_parameters(self):
return [{'linear_solver': 'bicgstab', 'preconditioner': 'hypre_amg'},
{'linear_solver': 'bicgstab', 'preconditioner': 'hypre_amg'},
{'linear_solver': 'cg', 'preconditioner': 'sor'},
{'linear_solver': 'bicgstab', 'preconditioner': 'hypre_amg'}]
def set(self):
(a1, L1), (a2, L2), (a3, L3), (a4, L4) = self.set_eqs()
u, _, p, _ = self.ns_formulation.get_functions()
c, _ = self.ad_formulation.get_functions()
solvers = [
LinearSolver(a1, L1, u, self.bcu),
LinearSolver(a2, L2, p, self.bcp),
LinearSolver(a3, L3, u, None),
]
problem_ad = LinearVariationalProblem(a4, L4, c, self.bcc)
solvers.append(LinearVariationalSolver(problem_ad))
# update solver parameters
for solver, solver_parameters in zip(solvers, self.solvers_parameters):
solver.parameters.update(solver_parameters)
return solvers
def set_eqs(self):
"""Set equations for all the variational problems.
Useful if the user wants only the equations.
"""
# NS formulation
self.ns_formulation = IncompressibleNsIpcs(self.V, self.Q, self.dt_ns,
self.mu, self.rho, self.f_ns)
a1, L1 = self.ns_formulation.formulate_step1()
a2, L2 = self.ns_formulation.formulate_step2()
a3, L3 = self.ns_formulation.formulate_step3()
u, _, p, _ = self.ns_formulation.get_functions()
# advection-diffusion formulation
self.ad_formulation = AdvectionDiffusionScalar(self.D, self.dt_ad,
self.eps, u, self.f_ad)
a4, L4 = self.ad_formulation.formulate()
return [(a1, L1), (a2, L2), (a3, L3), (a4, L4)]
def get_functions(self):
if self.ns_formulation is None or self.ad_formulation is None:
return None
return self.ns_formulation.get_functions() + self.ad_formulation.get_functions()
|
python
|
from __future__ import print_function
import boto3
import logging
import time
current_session = boto3.session.Session()
current_region = current_session.region_name
logger = logging.getLogger()
logger.setLevel(logging.INFO)
#create a client connection to ec2
ec2_client = boto3.client('ec2', current_region)
# used to evaluate if any returned structures are empty
def is_empty(any_structure):
if any_structure:
return False
else:
return True
def get_instances_to_backup(ec2, server_name):
# This function creates a dict of ec2 instances that have
# a ScheduledBackup tag set to "true"
#print("Looking up instance to backup")
try:
if server_name:
print("Looking up server to image.")
response = ec2_client.describe_instances(
Filters=[{'Name':'tag:Name', 'Values': [server_name]}
])
else:
print("Finding serves with ScheduledBackup tag to image.")
response = ec2_client.describe_instances(
Filters=[{'Name':'instance-state-name', 'Values': ['running']},
{'Name': 'tag:ScheduledBackup','Values':['true']}
])
#see if the response is empty
if is_empty(response["Reservations"]):
raise Exception('No instances were returned. Please make sure that there is a tag ScheduledBackup with a value of true')
else:
return response
except Exception as e:
# Print to the console if there is an error
print("get_instances_to_backup error: " + str(e))
exit()
def create_image(ec2, image_type):
print("Started creating image")
timestamp = time.strftime('-' + image_type + '-%H%M%b%d%Y')
try:
for reservation in ec2["Reservations"]:
for instance in reservation["Instances"]:
instance_id = instance["InstanceId"]
image_name = instance_id + timestamp
#Add the ServerName Tag to the server
for tag in instance["Tags"]:
if tag["Key"] == "Name":
server_name= tag["Value"]
image_name = server_name + "-" + image_name
# for debug purposes and test loop
# print(image_name)
#create the image
response = ec2_client.create_image(InstanceId=instance_id,Name=image_name)
print("Created image with id: " + response["ImageId"])
create_tags_for_image(response, server_name, timestamp, instance_id)
except Exception as e:
print("create_image error: " + str(e))
def create_tags_for_image(image, server_name, timestamp, instance_id):
#this function adds a Name tag to the created image
ec2 = boto3.resource('ec2')
ec2_image = ec2.Image(image["ImageId"])
name_tag = server_name + timestamp
print("Adding Name tag " + name_tag + " to image")
ec2_image.create_tags(
Tags=[
{
'Key': 'Name',
'Value': name_tag,
'Key': 'instanceId',
'Value': instance_id
},
]
)
def lambda_handler(event, context):
try:
if event["server_name"]:
print("Server name passed. Creating image of specific server.")
server_name = event["server_name"]
image_type = "called"
#exit()
except Exception as e:
server_name = ""
image_type = "auto"
print("Starting scheduled image creation.")
try:
#get a list of instances with tag ScheduledBackup set to "true"
instances = get_instances_to_backup(ec2_client, server_name)
if instances == "Fail":
raise Exception("No instances were returned.")
# create images
create_image(instances, image_type)
return True
except Exception as e:
print("lambda_handler error: "+ str(e))
return False
lambda_handler("", "")
|
python
|
"""!
@brief Phase oscillatory network for patten recognition based on modified Kuramoto model.
@details Implementation based on paper @cite article::nnet::syncpr::1.
@authors Andrei Novikov ([email protected])
@date 2014-2020
@copyright BSD-3-Clause
"""
import math
import cmath
import numpy
from pyclustering.nnet import solve_type, initial_type, conn_type,conn_represent
from pyclustering.nnet.sync import sync_network, sync_dynamic, sync_visualizer
import pyclustering.core.syncpr_wrapper as wrapper
from pyclustering.core.wrapper import ccore_library
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.animation as animation
class syncpr_dynamic(sync_dynamic):
"""!
@brief Represents output dynamic of syncpr (Sync for Pattern Recognition).
"""
def __init__(self, phase, time, ccore):
"""!
@brief Constructor of syncpr dynamic.
@param[in] phase (list): Dynamic of oscillators on each step of simulation. If ccore pointer is specified than it can be ignored.
@param[in] time (list): Simulation time.
@param[in] ccore (ctypes.pointer): Pointer to CCORE sync_dynamic instance in memory.
"""
super().__init__(phase, time, ccore)
class syncpr_visualizer(sync_visualizer):
"""!
@brief Visualizer of output dynamic of syncpr network (Sync for Pattern Recognition).
"""
@staticmethod
def show_pattern(syncpr_output_dynamic, image_height, image_width):
"""!
@brief Displays evolution of phase oscillators as set of patterns where the last one means final result of recognition.
@param[in] syncpr_output_dynamic (syncpr_dynamic): Output dynamic of a syncpr network.
@param[in] image_height (uint): Height of the pattern (image_height * image_width should be equal to number of oscillators).
@param[in] image_width (uint): Width of the pattern.
"""
number_pictures = len(syncpr_output_dynamic)
iteration_math_step = 1.0
if number_pictures > 50:
iteration_math_step = number_pictures / 50.0
number_pictures = 50
number_cols = int(numpy.ceil(number_pictures ** 0.5))
number_rows = int(numpy.ceil(number_pictures / number_cols))
real_index = 0, 0
double_indexer = True
if (number_cols == 1) or (number_rows == 1):
real_index = 0
double_indexer = False
(figure, axarr) = plt.subplots(number_rows, number_cols)
if (number_pictures > 1):
plt.setp([ax for ax in axarr], visible=False)
iteration_display = 0.0
for iteration in range(len(syncpr_output_dynamic)):
if iteration >= iteration_display:
iteration_display += iteration_math_step
ax_handle = axarr
if number_pictures > 1:
ax_handle = axarr[real_index]
syncpr_visualizer.__show_pattern(ax_handle, syncpr_output_dynamic, image_height, image_width, iteration)
if double_indexer is True:
real_index = real_index[0], real_index[1] + 1
if (real_index[1] >= number_cols):
real_index = real_index[0] + 1, 0
else:
real_index += 1
plt.show()
plt.close(figure)
@staticmethod
def animate_pattern_recognition(syncpr_output_dynamic, image_height, image_width, animation_velocity = 75, title = None, save_movie = None):
"""!
@brief Shows animation of pattern recognition process that has been preformed by the oscillatory network.
@param[in] syncpr_output_dynamic (syncpr_dynamic): Output dynamic of a syncpr network.
@param[in] image_height (uint): Height of the pattern (image_height * image_width should be equal to number of oscillators).
@param[in] image_width (uint): Width of the pattern.
@param[in] animation_velocity (uint): Interval between frames in milliseconds.
@param[in] title (string): Title of the animation that is displayed on a figure if it is specified.
@param[in] save_movie (string): If it is specified then animation will be stored to file that is specified in this parameter.
"""
figure = plt.figure()
def init_frame():
return frame_generation(0)
def frame_generation(index_dynamic):
figure.clf()
if (title is not None):
figure.suptitle(title, fontsize = 26, fontweight = 'bold')
ax1 = figure.add_subplot(121, projection='polar')
ax2 = figure.add_subplot(122)
dynamic = syncpr_output_dynamic.output[index_dynamic]
artist1, = ax1.plot(dynamic, [1.0] * len(dynamic), marker='o', color='blue', ls='')
artist2 = syncpr_visualizer.__show_pattern(ax2, syncpr_output_dynamic, image_height, image_width, index_dynamic)
return [artist1, artist2]
cluster_animation = animation.FuncAnimation(figure, frame_generation, len(syncpr_output_dynamic), interval = animation_velocity, init_func = init_frame, repeat_delay = 5000);
if (save_movie is not None):
# plt.rcParams['animation.ffmpeg_path'] = 'C:\\Users\\annoviko\\programs\\ffmpeg-win64-static\\bin\\ffmpeg.exe';
# ffmpeg_writer = animation.FFMpegWriter();
# cluster_animation.save(save_movie, writer = ffmpeg_writer, fps = 15);
cluster_animation.save(save_movie, writer='ffmpeg', fps=15, bitrate=1500)
else:
plt.show()
plt.close(figure)
@staticmethod
def __show_pattern(ax_handle, syncpr_output_dynamic, image_height, image_width, iteration):
"""!
@brief Draws pattern on specified ax.
@param[in] ax_handle (Axis): Axis where pattern should be drawn.
@param[in] syncpr_output_dynamic (syncpr_dynamic): Output dynamic of a syncpr network.
@param[in] image_height (uint): Height of the pattern (image_height * image_width should be equal to number of oscillators).
@param[in] image_width (uint): Width of the pattern.
@param[in] iteration (uint): Simulation iteration that should be used for extracting pattern.
@return (matplotlib.artist) Artist (pattern) that is rendered in the canvas.
"""
current_dynamic = syncpr_output_dynamic.output[iteration]
stage_picture = [(255, 255, 255)] * (image_height * image_width)
for index_phase in range(len(current_dynamic)):
phase = current_dynamic[index_phase]
pixel_color = math.floor( phase * (255 / (2 * math.pi)) )
stage_picture[index_phase] = (pixel_color, pixel_color, pixel_color)
stage = numpy.array(stage_picture, numpy.uint8)
stage = numpy.reshape(stage, (image_height, image_width) + ((3),)) # ((3),) it's size of RGB - third dimension.
image_cluster = Image.fromarray(stage)
artist = ax_handle.imshow(image_cluster, interpolation='none')
plt.setp(ax_handle, visible=True)
ax_handle.xaxis.set_ticklabels([])
ax_handle.yaxis.set_ticklabels([])
ax_handle.xaxis.set_ticks_position('none')
ax_handle.yaxis.set_ticks_position('none')
return artist
class syncpr(sync_network):
"""!
@brief Model of phase oscillatory network for pattern recognition that is based on the Kuramoto model.
@details The model uses second-order and third-order modes of the Fourier components.
Example:
@code
# Network size should be equal to size of pattern for learning.
net = syncpr(size_network, 0.3, 0.3);
# Train network using list of patterns (input images).
net.train(image_samples);
# Recognize image using 10 steps during 10 seconds of simulation.
sync_output_dynamic = net.simulate(10, 10, pattern, solve_type.RK4, True);
# Display output dynamic.
syncpr_visualizer.show_output_dynamic(sync_output_dynamic);
# Display evolution of recognition of the pattern.
syncpr_visualizer.show_pattern(sync_output_dynamic, image_height, image_width);
@endcode
"""
def __init__(self, num_osc, increase_strength1, increase_strength2, ccore = True):
"""!
@brief Constructor of oscillatory network for pattern recognition based on Kuramoto model.
@param[in] num_osc (uint): Number of oscillators in the network.
@param[in] increase_strength1 (double): Parameter for increasing strength of the second term of the Fourier component.
@param[in] increase_strength2 (double): Parameter for increasing strength of the third term of the Fourier component.
@param[in] ccore (bool): If True simulation is performed by CCORE library (C++ implementation of pyclustering).
"""
if (ccore is True) and ccore_library.workable():
self._ccore_network_pointer = wrapper.syncpr_create(num_osc, increase_strength1, increase_strength2)
else:
self._increase_strength1 = increase_strength1
self._increase_strength2 = increase_strength2
self._coupling = [[0.0 for i in range(num_osc)] for j in range(num_osc)]
super().__init__(num_osc, 1, 0, conn_type.ALL_TO_ALL, conn_represent.MATRIX, initial_type.RANDOM_GAUSSIAN, ccore)
def __del__(self):
"""!
@brief Default destructor of syncpr.
"""
if (self._ccore_network_pointer is not None):
wrapper.syncpr_destroy(self._ccore_network_pointer)
self._ccore_network_pointer = None
def __len__(self):
"""!
@brief Returns size of the network.
"""
if (self._ccore_network_pointer is not None):
return wrapper.syncpr_get_size(self._ccore_network_pointer)
else:
return self._num_osc
def train(self, samples):
"""!
@brief Trains syncpr network using Hebbian rule for adjusting strength of connections between oscillators during training.
@param[in] samples (list): list of patterns where each pattern is represented by list of features that are equal to [-1; 1].
"""
# Verify pattern for learning
for pattern in samples:
self.__validate_pattern(pattern)
if self._ccore_network_pointer is not None:
return wrapper.syncpr_train(self._ccore_network_pointer, samples)
length = len(self)
number_samples = len(samples)
for i in range(length):
for j in range(i + 1, len(self), 1):
# go through via all patterns
for p in range(number_samples):
value1 = samples[p][i]
value2 = samples[p][j]
self._coupling[i][j] += value1 * value2
self._coupling[i][j] /= length
self._coupling[j][i] = self._coupling[i][j]
def simulate(self, steps, time, pattern, solution = solve_type.RK4, collect_dynamic = True):
"""!
@brief Performs static simulation of syncpr oscillatory network.
@details In other words network performs pattern recognition during simulation.
@param[in] steps (uint): Number steps of simulations during simulation.
@param[in] time (double): Time of simulation.
@param[in] pattern (list): Pattern for recognition represented by list of features that are equal to [-1; 1].
@param[in] solution (solve_type): Type of solver that should be used for simulation.
@param[in] collect_dynamic (bool): If True - returns whole dynamic of oscillatory network, otherwise returns only last values of dynamics.
@return (list) Dynamic of oscillatory network. If argument 'collect_dynamic' = True, than return dynamic for the whole simulation time,
otherwise returns only last values (last step of simulation) of dynamic.
@see simulate_dynamic()
@see simulate_static()
"""
return self.simulate_static(steps, time, pattern, solution, collect_dynamic)
def simulate_dynamic(self, pattern, order = 0.998, solution = solve_type.RK4, collect_dynamic = False, step = 0.1, int_step = 0.01, threshold_changes = 0.0000001):
"""!
@brief Performs dynamic simulation of the network until stop condition is not reached.
@details In other words network performs pattern recognition during simulation.
Stop condition is defined by input argument 'order' that represents memory order, but
process of simulation can be stopped if convergance rate is low whose threshold is defined
by the argument 'threshold_changes'.
@param[in] pattern (list): Pattern for recognition represented by list of features that are equal to [-1; 1].
@param[in] order (double): Order of process synchronization, distributed 0..1.
@param[in] solution (solve_type): Type of solution.
@param[in] collect_dynamic (bool): If True - returns whole dynamic of oscillatory network, otherwise returns only last values of dynamics.
@param[in] step (double): Time step of one iteration of simulation.
@param[in] int_step (double): Integration step, should be less than step.
@param[in] threshold_changes (double): Additional stop condition that helps prevent infinite simulation, defines limit of changes of oscillators between current and previous steps.
@return (list) Dynamic of oscillatory network. If argument 'collect_dynamic' = True, than return dynamic for the whole simulation time,
otherwise returns only last values (last step of simulation) of dynamic.
@see simulate()
@see simulate_static()
"""
self.__validate_pattern(pattern)
if self._ccore_network_pointer is not None:
ccore_instance_dynamic = wrapper.syncpr_simulate_dynamic(self._ccore_network_pointer, pattern, order, solution, collect_dynamic, step)
return syncpr_dynamic(None, None, ccore_instance_dynamic)
for i in range(0, len(pattern), 1):
if pattern[i] > 0.0:
self._phases[i] = 0.0
else:
self._phases[i] = math.pi / 2.0
# For statistics and integration
time_counter = 0
# Prevent infinite loop. It's possible when required state cannot be reached.
previous_order = 0
current_order = self.__calculate_memory_order(pattern)
# If requested input dynamics
dyn_phase = []
dyn_time = []
if collect_dynamic == True:
dyn_phase.append(self._phases)
dyn_time.append(0)
# Execute until sync state will be reached
while (current_order < order):
# update states of oscillators
self._phases = self._calculate_phases(solution, time_counter, step, int_step)
# update time
time_counter += step
# if requested input dynamic
if collect_dynamic == True:
dyn_phase.append(self._phases)
dyn_time.append(time_counter)
# update orders
previous_order = current_order
current_order = self.__calculate_memory_order(pattern)
# hang prevention
if abs(current_order - previous_order) < threshold_changes:
break
if collect_dynamic != True:
dyn_phase.append(self._phases)
dyn_time.append(time_counter)
output_sync_dynamic = syncpr_dynamic(dyn_phase, dyn_time, None)
return output_sync_dynamic
def simulate_static(self, steps, time, pattern, solution = solve_type.FAST, collect_dynamic = False):
"""!
@brief Performs static simulation of syncpr oscillatory network.
@details In other words network performs pattern recognition during simulation.
@param[in] steps (uint): Number steps of simulations during simulation.
@param[in] time (double): Time of simulation.
@param[in] pattern (list): Pattern for recognition represented by list of features that are equal to [-1; 1].
@param[in] solution (solve_type): Type of solution.
@param[in] collect_dynamic (bool): If True - returns whole dynamic of oscillatory network, otherwise returns only last values of dynamics.
@return (list) Dynamic of oscillatory network. If argument 'collect_dynamic' = True, than return dynamic for the whole simulation time,
otherwise returns only last values (last step of simulation) of dynamic.
@see simulate()
@see simulate_dynamic()
"""
self.__validate_pattern(pattern)
if self._ccore_network_pointer is not None:
ccore_instance_dynamic = wrapper.syncpr_simulate_static(self._ccore_network_pointer, steps, time, pattern, solution, collect_dynamic)
return syncpr_dynamic(None, None, ccore_instance_dynamic)
for i in range(0, len(pattern), 1):
if pattern[i] > 0.0:
self._phases[i] = 0.0
else:
self._phases[i] = math.pi / 2.0
return super().simulate_static(steps, time, solution, collect_dynamic)
def memory_order(self, pattern):
"""!
@brief Calculates function of the memorized pattern.
@details Throws exception if length of pattern is not equal to size of the network or if it consists feature with value that are not equal to [-1; 1].
@param[in] pattern (list): Pattern for recognition represented by list of features that are equal to [-1; 1].
@return (double) Order of memory for the specified pattern.
"""
self.__validate_pattern(pattern)
if self._ccore_network_pointer is not None:
return wrapper.syncpr_memory_order(self._ccore_network_pointer, pattern)
else:
return self.__calculate_memory_order(pattern)
def __calculate_memory_order(self, pattern):
"""!
@brief Calculates function of the memorized pattern without any pattern validation.
@param[in] pattern (list): Pattern for recognition represented by list of features that are equal to [-1; 1].
@return (double) Order of memory for the specified pattern.
"""
memory_order = 0.0
for index in range(len(self)):
memory_order += pattern[index] * cmath.exp(1j * self._phases[index])
memory_order /= len(self)
return abs(memory_order)
def _phase_kuramoto(self, teta, t, argv):
"""!
@brief Returns result of phase calculation for specified oscillator in the network.
@param[in] teta (double): Phase of the oscillator that is differentiated.
@param[in] t (double): Current time of simulation.
@param[in] argv (tuple): Index of the oscillator in the list.
@return (double) New phase for specified oscillator (don't assign it here).
"""
index = argv
phase = 0.0
term = 0.0
for k in range(0, self._num_osc):
if k != index:
phase_delta = self._phases[k] - teta
phase += self._coupling[index][k] * math.sin(phase_delta)
term1 = self._increase_strength1 * math.sin(2.0 * phase_delta)
term2 = self._increase_strength2 * math.sin(3.0 * phase_delta)
term += (term1 - term2)
return phase + term / len(self)
def __validate_pattern(self, pattern):
"""!
@brief Validates pattern.
@details Throws exception if length of pattern is not equal to size of the network or if it consists feature with value that are not equal to [-1; 1].
@param[in] pattern (list): Pattern for recognition represented by list of features that are equal to [-1; 1].
"""
if len(pattern) != len(self):
raise NameError("Length of the pattern ('%d') should be equal to size of the network." % len(pattern))
for feature in pattern:
if (feature != -1.0) and (feature != 1.0):
raise NameError("Patten feature ('%s') should be distributed in [-1; 1]." % feature)
|
python
|
from api.libs.base import CoreView
from cmdb.models import MachineRoom, DataCenter
from account.models import UserProfile
from django.db.utils import IntegrityError
class MachineRoomView(CoreView):
"""
机房视图类
"""
login_required_action = ["get_list", "post_create", "post_delete", "post_change"]
superuser_required_action = ["post_create", "post_delete", "post_change"]
def get_list(self):
per_page = self.parameters("per_page")
if per_page:
machineroom_objs = self.page_split(MachineRoom.objects.all())
else:
machineroom_objs = MachineRoom.objects.all()
machineroom_list = []
for machineroom_obj in machineroom_objs:
machineroom_list.append(machineroom_obj.get_info())
self.response_data["data"] = machineroom_list
def post_create(self):
try:
name = self.parameters("name")
contact = self.parameters("contact")
memo = self.parameters("memo")
address = self.parameters("address")
admin_id = int(self.parameters("admin"))
datacenter_id = int(self.parameters("datacenter"))
admin_obj = UserProfile.objects.filter(id=admin_id).first()
datacenter_obj = DataCenter.objects.filter(id=datacenter_id).first()
if admin_obj and admin_obj.user:
new_machineroom_obj = MachineRoom(name=name, contact=contact, memo=memo, admin=admin_obj.user, address=address, center=datacenter_obj)
else:
new_machineroom_obj = MachineRoom(name=name, contact=contact, memo=memo, address=address, center=datacenter_obj)
new_machineroom_obj.save()
self.response_data['data'] = new_machineroom_obj.get_info()
except IntegrityError:
self.response_data['status'] = False
self.status_code = 416
def post_delete(self):
machineroom_id = self.parameters("id")
machineroom_obj = MachineRoom.objects.filter(id=machineroom_id).first()
if machineroom_obj:
machineroom_obj.delete()
else:
self.response_data['status'] = False
self.status_code = 404
def post_change(self):
machineroom_id = self.parameters("id")
datacenter_id = self.parameters("datacenter_id")
name = self.parameters("name")
admin_id = self.parameters("admin_id")
contact = self.parameters("contact")
memo = self.parameters("memo")
address = self.parameters("address")
try:
machineroom_obj = MachineRoom.objects.filter(id=machineroom_id).first()
if machineroom_obj:
machineroom_obj.name = name
admin_obj = UserProfile.objects.filter(id=admin_id).first()
datacenter_obj = DataCenter.objects.filter(id=datacenter_id).first()
machineroom_obj.admin = admin_obj.user if admin_obj and hasattr(admin_obj, "user") else None
machineroom_obj.contact = contact
machineroom_obj.memo = memo
machineroom_obj.address = address
machineroom_obj.center = datacenter_obj
machineroom_obj.save()
self.response_data['data'] = machineroom_obj.get_info()
else:
self.response_data['status'] = False
self.status_code = 404
except IntegrityError:
self.response_data['status'] = False
self.status_code = 416
|
python
|
#!"D:\STORE\0-Admin\Desktop\Chatbot-Osiris bkp\python.exe"
import sys
import subprocess
subprocess.call("start cmd /k echo "+sys.argv[1], shell=True)
|
python
|
from django import template
from django.template.loader import get_template
register = template.Library()
@register.filter(name='is_deploma')
def is_deploma(value):
return value['year'] == '2' and value['course'] == 'BTech'
@register.filter(name='is_first_year')
def is_first_year(value):
return value['year'] == '1' and value['course'] == 'BTech'
@register.filter(name='is_mca')
def is_mca(value):
return value['course'] == 'MCA'
@register.filter(name='is_btech_first_year')
def is_btech_first_year(instance):
return instance.applyYear == '1' and instance.course == 'BTech'
|
python
|
import discord
from discord.ext import commands
import sqlite3
from helper import utils
class SettingsCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(help= 'Used to set the prefix of the bot in this server, use default to default to the global prefix', usage='prefix <prefix> | default')
@commands.guild_only()
@commands.has_permissions(administrator=True)
async def prefix(self, ctx, *, prefix = None):
try:
await utils.auto_register(ctx.author.id)
conn = sqlite3.connect('./Data/Database/settings.db')
c = conn.cursor()
rows = c.execute('SELECT * FROM prefix WHERE guildId = "{}"'.format(ctx.guild.id)).fetchall()
if prefix == None:
if len(rows) == 0:
pr = '+'
else :
pr = rows[0][1]
await ctx.send(embed= discord.Embed(color= 0xf1c40f, description= 'The Current Prefix is **{}**'.format(pr)))
elif prefix == 'default':
c.execute('DELETE FROM prefix WHERE guildId = "{}"'.format(ctx.guild.id))
conn.commit()
await ctx.send(embed= discord.Embed(color= 0x008000, description= 'Prefix changed to **default prefix (+)**'))
else:
if len(prefix) > 3:
return await ctx.send(embed= discord.Embed(color= 0xFF0000, description= 'Prefix exceeded the 3 character limit'))
if len(rows) == 0:
c.execute('INSERT INTO prefix (guildId, prefix) VALUES ("{0}", "{1}")'.format(ctx.guild.id, prefix))
conn.commit()
await ctx.send(embed= discord.Embed(color= 0x008000,description= 'Prefix set to **{}**'.format(prefix)))
else:
c.execute('UPDATE prefix SET prefix = "{1}" WHERE guildId = "{0}"'.format(ctx.guild.id, prefix))
conn.commit()
await ctx.send(embed= discord.Embed(color= 0x008000,description= 'Prefix changed to **{}**'.format(prefix)))
except:
return
@prefix.error
async def prefix_error(self, error, ctx):
if isinstance(error, commands.errors.MissingPermissions):
await ctx.send(embed= discord.Embed(color= 0xFF0000, description= 'You do not have **ADMINISTRATOR** permission!'))
def setup(bot):
bot.add_cog(SettingsCog(bot))
|
python
|
# coding: utf-8
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
import networkx as nx
import itertools
import pandas
from networkx.readwrite import json_graph
from scipy import spatial
import re
__author__ = "Adrien Guille, Pavel Soriano"
__email__ = "[email protected]"
class Corpus:
def __init__(self,
source_file_path,
language=None,
n_gram=1,
vectorization='tfidf',
max_relative_frequency=1.,
min_absolute_frequency=0,
max_features=2000,
sample=None):
self._source_file_path = source_file_path
self._language = language
self._n_gram = n_gram
self._vectorization = vectorization
self._max_relative_frequency = max_relative_frequency
self._min_absolute_frequency = min_absolute_frequency
self.max_features = max_features
self.data_frame = pandas.read_csv(source_file_path, sep='\t', encoding='utf-8')
if sample:
self.data_frame = self.data_frame.sample(frac=0.8)
self.data_frame.fillna(' ')
self.size = self.data_frame.count(0)[0]
stop_words = []
if language is not None:
stop_words = stopwords.words(language)
if vectorization == 'tfidf':
vectorizer = TfidfVectorizer(ngram_range=(1, n_gram),
max_df=max_relative_frequency,
min_df=min_absolute_frequency,
max_features=self.max_features,
stop_words=stop_words)
elif vectorization == 'tf':
vectorizer = CountVectorizer(ngram_range=(1, n_gram),
max_df=max_relative_frequency,
min_df=min_absolute_frequency,
max_features=self.max_features,
stop_words=stop_words)
else:
raise ValueError('Unknown vectorization type: %s' % vectorization)
self.sklearn_vector_space = vectorizer.fit_transform(self.data_frame['text'].tolist())
self.gensim_vector_space = None
vocab = vectorizer.get_feature_names()
self.vocabulary = dict([(i, s) for i, s in enumerate(vocab)])
def export(self, file_path):
self.data_frame.to_csv(path_or_buf=file_path, sep='\t', encoding='utf-8')
def full_text(self, doc_id):
return self.data_frame.iloc[doc_id]['text']
def title(self, doc_id):
return self.data_frame.iloc[doc_id]['title']
def date(self, doc_id):
return self.data_frame.iloc[doc_id]['date']
def author(self, doc_id):
aut_str = str(self.data_frame.iloc[doc_id]['author'])
return aut_str.split(', ')
def affiliation(self, doc_id):
aff_str = str(self.data_frame.iloc[doc_id]['affiliation'])
return aff_str.split(', ')
def documents_by_author(self, author, date=None):
ids = []
potential_ids = range(self.size)
if date:
potential_ids = self.doc_ids(date)
for i in potential_ids:
if self.is_author(author, i):
ids.append(i)
return ids
def all_authors(self):
author_list = []
for doc_id in range(self.size):
author_list.extend(self.author(doc_id))
return list(set(author_list))
def is_author(self, author, doc_id):
return author in self.author(doc_id)
def docs_for_word(self, word_id):
ids = []
for i in range(self.size):
vector = self.vector_for_document(i)
if vector[word_id] > 0:
ids.append(i)
return ids
def doc_ids(self, date):
return self.data_frame[self.data_frame['date'] == date].index.tolist()
def vector_for_document(self, doc_id):
vector = self.sklearn_vector_space[doc_id]
cx = vector.tocoo()
weights = [0.0] * len(self.vocabulary)
for row, word_id, weight in itertools.zip_longest(cx.row, cx.col, cx.data):
weights[word_id] = weight
return weights
def word_for_id(self, word_id):
return self.vocabulary.get(word_id)
def id_for_word(self, word):
for i, s in self.vocabulary.items():
if s == word:
return i
return -1
def similar_documents(self, doc_id, num_docs):
doc_weights = self.vector_for_document(doc_id)
similarities = []
for a_doc_id in range(self.size):
if a_doc_id != doc_id:
similarity = 1.0 - spatial.distance.cosine(doc_weights, self.vector_for_document(a_doc_id))
similarities.append((a_doc_id, similarity))
similarities.sort(key=lambda x: x[1], reverse=True)
return similarities[:num_docs]
def collaboration_network(self, doc_ids=None, nx_format=False):
nx_graph = nx.Graph(name='')
for doc_id in doc_ids:
authors = self.author(doc_id)
for author in authors:
nx_graph.add_node(author)
for i in range(0, len(authors)):
for j in range(i+1, len(authors)):
nx_graph.add_edge(authors[i], authors[j])
bb = nx.betweenness_centrality(nx_graph)
nx.set_node_attributes(nx_graph, 'betweenness', bb)
if nx_format:
return nx_graph
else:
return json_graph.node_link_data(nx_graph)
|
python
|
import RPi.GPIO as GPIO
import time
import threading
import os
import sys
# constants
SLOWFLASHTIMES = [2,2]
FASTFLASHTIMES = [0.5,0.5]
# class for managing LED
class LEDControl():
class LEDStates():
#states of LED
OFF = 0
ON = 1
FLASH = 2
def __init__(self, gpioPin):
#init gpio
self.gpioPin = gpioPin
# setup gpio pin as output
GPIO.setup(self.gpioPin, GPIO.OUT)
#set led to off
self.off()
def set(self, ledValue):
#Sets the value of the LED [True / False]
self.ledValue = ledValue
GPIO.output(self.gpioPin, self.ledValue)
def get(self):
#Gets the value of the led
return self.ledValue
def on(self):
#Turns the LED on
self.state = self.LEDStates.ON
self.set(True)
def off(self):
#Turns the LED off
self.state = self.LEDStates.OFF
self.set(False)
def flash(self, timeOn, timeOff):
#if the led is already flashing, set it to off and wait for it to stop
if self.state == self.LEDStates.FLASH:
self.off()
self.flashthread.join()
# flash the LED on a thread
self.state = self.LEDStates.FLASH
self.flashthread = threading.Thread(target=self.__flashLED, args=(timeOn, timeOff))
self.flashthread.start()
def __flashLED(self, timeOn, timeOff):
#loops untils the LED is changed from FLASH (i.e. on or off)
while self.state == self.LEDStates.FLASH:
if self.get() == True:
self.set(False)
time.sleep(timeOff)
else:
self.set(True)
time.sleep(timeOn)
def toggle(self):
#Toggles the LED, if its on, turns it off and vice versa
if self.get == True: self.off()
else: self.on()
if __name__ == "__main__":
try:
#set gpio mode
GPIO.setmode(GPIO.BCM)
#create LED
led = LEDControl(17)
while(True):
led.on()
time.sleep(1)
led.off()
time.sleep(1)
except KeyboardInterrupt:
print "User Cancelled (Ctrl C)"
except:
print "Unexpected error - ", sys.exc_info()[0], sys.exc_info()[1]
raise
finally:
#turn off led
led.off()
#cleanup gpio
GPIO.cleanup()
print "Stopped"
|
python
|
# REQUIRES: bindings_python
# RUN: %PYTHON% %s | FileCheck %s
import circt
from circt.dialects import rtl
from mlir.ir import *
from mlir.dialects import builtin
with Context() as ctx, Location.unknown():
circt.register_dialects(ctx)
i32 = IntegerType.get_signless(32)
m = builtin.ModuleOp()
with InsertionPoint(m.body):
# CHECK: rtl.module @MyWidget(%my_input: i32) -> (%my_output: i32)
# CHECK: rtl.output %my_input : i32
op = rtl.RTLModuleOp(
name='MyWidget',
input_ports=[('my_input', i32)],
output_ports=[('my_output', i32)],
body_builder=lambda module: rtl.OutputOp(
[module.entry_block.arguments[0]])
)
# CHECK: rtl.module @swap(%a: i32, %b: i32) -> (%{{.+}}: i32, %{{.+}}: i32)
# CHECK: rtl.output %b, %a : i32, i32
@rtl.RTLModuleOp.from_py_func(i32, i32)
def swap(a, b):
return b, a
# CHECK: rtl.module @top(%a: i32, %b: i32) -> (%{{.+}}: i32, %{{.+}}: i32)
# CHECK: %[[a0:.+]], %[[b0:.+]] = rtl.instance "" @swap(%a, %b)
# CHECK: %[[a1:.+]], %[[b1:.+]] = rtl.instance "" @swap(%[[a0]], %[[b0]])
# CHECK: rtl.output %[[a1:.+]], %[[b1:.+]] : i32, i32
@rtl.RTLModuleOp.from_py_func(i32, i32)
def top(a, b):
a, b = swap(a, b)
a, b = swap(a, b)
return a, b
m.print()
|
python
|
#!/usr/bin/env pytest
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test read functionality for FITS driver.
# Author: Even Rouault <even dot rouault @ mines-paris dot org>
#
###############################################################################
# Copyright (c) 2008, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import pytest
from osgeo import gdal
pytestmark = pytest.mark.require_driver('FITS')
@pytest.mark.parametrize(
'filename',
['byte', 'int16', 'uint16', 'int32', 'uint32', 'float32', 'float64']
)
def test_fits(filename):
driver = gdal.GetDriverByName('FITS')
ds = gdal.Open('../gcore/data/' + filename + '.tif')
driver.CreateCopy('tmp/' + filename + '.fits', ds, options=['PAGESIZE=2,2'])
ds2 = gdal.Open('tmp/' + filename + '.fits')
assert ds2.GetRasterBand(1).Checksum() == ds.GetRasterBand(1).Checksum()
assert ds2.GetRasterBand(1).DataType == ds.GetRasterBand(1).DataType
ds2 = None
driver.Delete('tmp/' + filename + '.fits')
def test_fits_metadata():
driver = gdal.GetDriverByName('FITS')
ds = gdal.Open('../gcore/data/byte.tif')
ds2 = driver.CreateCopy('tmp/byte.fits', ds)
md = {'TEST': 'test_value'}
ds2.SetMetadata(md)
ds2 = None
gdal.Unlink('tmp/byte.fits.aux.xml')
ds2 = gdal.Open('tmp/byte.fits')
md = ds2.GetMetadata()
ds2 = None
assert md['TEST'] == 'test_value'
ds2 = gdal.Open('tmp/byte.fits', gdal.GA_Update)
md = {'TEST2': 'test_value2'}
ds2.SetMetadata(md)
ds2 = None
gdal.Unlink('tmp/byte.fits.aux.xml')
ds2 = gdal.Open('tmp/byte.fits')
md = ds2.GetMetadata()
ds2 = None
assert md['TEST2'] == 'test_value2'
def test_fits_nodata():
driver = gdal.GetDriverByName('FITS')
ds = gdal.Open('../gcore/data/nodata_byte.tif')
ds2 = driver.CreateCopy('tmp/nodata_byte.fits', ds)
ds2 = None
gdal.Unlink('tmp/nodata_byte.fits.aux.xml')
ds2 = gdal.Open('tmp/nodata_byte.fits')
nd = ds2.GetRasterBand(1).GetNoDataValue()
ds2 = None
driver.Delete('tmp/nodata_byte.fits')
assert nd == 0
def test_fits_offscale():
driver = gdal.GetDriverByName('FITS')
ds = gdal.Open('../gdrivers/data/offscale_byte.tif')
ds2 = driver.CreateCopy('tmp/offscale_byte.fits', ds)
ds2 = None
gdal.Unlink('tmp/offscale_byte.fits.aux.xml')
ds2 = gdal.Open('tmp/offscale_byte.fits')
offset = ds2.GetRasterBand(1).GetOffset()
scale = ds2.GetRasterBand(1).GetScale()
ds2 = None
driver.Delete('tmp/offscale_byte.fits')
assert offset == -0.0039525691699605
assert scale == 1.00395256917
|
python
|
from xml.dom.minidom import Document
from django.shortcuts import redirect, render
def add_file(request):
if request.method == 'POST':
updated_file = request.FILES['document']
print(updated_file.name)
print(updated_file.size)
return render(request, 'base/add_file.html',{})
|
python
|
import os
import json
import importlib
from typing import Callable, TypeVar
AnyFunction = Callable
Handler = TypeVar('Handler')
def getClass(modulePath: str, className: str):
mod = importlib.import_module(modulePath)
return getattr(mod, className)
def getAction(fileName: str, actionName: str) -> Callable:
"""Load action from file inside shock folder
Args:
fileName (str): file that has the action.
actionName (str): action that will be extracted from file.
Returns:
action: the call result.
"""
modulefullpath = "shock."+fileName
module = __import__(modulefullpath)
action = getattr(module, fileName)
return getattr(action, actionName)
class InvalidAction(Exception):
def __init__(self, action: str) -> None:
self.action = action
def __str__(self):
return 'Invalid action %s requested.' % self.action
class Shock():
"""This class serves as an abstraction for the communication between Spark
and Kafka
Examples (py):
>>> shock = Shock(InterSCity)
Examples (kafka-consumer):
>>> newStream;{"stream": "mynicestream"}
>>> ingestion;{"stream": "mynicestream", "shock_action": "bestaction"}
>>> setup;{"stream": "mynicestream", "shock_action": "kafkaCast"}
>>> publish;{"stream": "mynicestream", "shock_action": "parquetSink"}
"""
def __init__(self, handler: Handler, environment="default") -> None:
"""Shock constructor.
Args:
handler (Handler): A Shock handler to be used.
Examples:
>>> sck = Shock(InterSCity)
"""
self.handler = handler(environment)
self.waitForActions()
def waitForActions(self) -> None:
"""Consume Kafka's msg
Expected Data:
"actionname;{"key1": "val1", "key2": "val2", "keyn": "valn"}"
"""
for pkg in self.handler.consumer:
self.newActionSignal()
msg = pkg.value.decode('ascii')
self.handleNewKafkaMsg(msg)
def handleNewKafkaMsg(self, msg: str) -> None:
"""Normalize Kafka message and send to be handled by the handler
Args:
msg (str): msg received, with at least one `;` char
Returns:
no return
"""
try:
splittedMsg = msg.split(";")
actionName = splittedMsg[0].strip()
args = json.loads(splittedMsg[1])
except:
raise InvalidAction(msg)
self.handler.handle(actionName, args)
def newActionSignal(self) -> None:
"""Alert handler about new action arrived.
Args:
no arguments
Returns:
no return
"""
self.handler.newActionSignal()
|
python
|
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Licensed under the terms of the BSD 3-Clause License
# (see LICENSE for details).
# Copyright © 2018-2021, A.A Suvorov
# All rights reserved.
# --------------------------------------------------------
"""Tests prototype.py"""
from patterns.creational.prototype import Bird
class TestPrototype:
def test_register(self, prototype, bird):
prototype.register('Bird', bird)
assert 'Bird' in prototype._objects
def test_unregister(self, prototype, bird):
prototype.register('Bird', bird)
prototype.unregister('Bird')
assert 'Bird' not in prototype._objects
def test_clone(self, prototype, bird):
prototype.register('Bird', bird)
duck = prototype.clone('Bird', {'name': 'Duck'})
assert isinstance(duck, Bird)
def test_get_attr(self, prototype, bird):
prototype.register('Bird', bird)
duck = prototype.clone('Bird', {'name': 'Duck'})
assert getattr(duck, 'name')
assert duck.name == 'Duck'
|
python
|
import numpy as np
from scipy.stats import iqr
from ..config import MAX_VAL_AFTER_NORMALIZATION
from ..utils.dataUtils import morphDilation, gaussianFilter, radial_profile, applyRelativeThr, getCoordsWithinSphere
from ..utils.genericException import GenericError
DEFAULT_PERCENTIL = 95
DEFAULT_BINARY_MASK_THR= 0.01
def targetNormalizationRegr(x, mask):
mask = morphDilation(mask, 1)
binary_mask = np.where(mask > DEFAULT_BINARY_MASK_THR, 1, 0)
background = np.median(x[binary_mask < 1])
background_median = np.median(background)
background_upper_percentil = np.percentile(background, DEFAULT_PERCENTIL)
x = np.clip(x, background_median, None)
x = x * binary_mask
target_inside_mask = x[binary_mask > 0]
target_inside_mask = target_inside_mask[target_inside_mask > background_median]
target_upper_percentil = np.percentile(target_inside_mask, DEFAULT_PERCENTIL)
target_iqr = target_upper_percentil - background_upper_percentil
if target_iqr <= 0:
raise ValueError("Bad iqr %.3f. Is your input masked?. Unmasked inputs required" % target_iqr)
x = x / target_iqr
x = np.clip(x, None, MAX_VAL_AFTER_NORMALIZATION) #Just to prevent outliers
return x
def targetNormalizationLocscale(x, mask):
binary_mask = np.where(morphDilation(mask, 1) > DEFAULT_BINARY_MASK_THR, 1, 0)
background = np.median(x[binary_mask < 1])
background_median = np.median(background)
background_upper_percentil = np.percentile(background, DEFAULT_PERCENTIL)
x = np.clip(x, background_median, None)
x = x * mask
target_inside_mask = x[binary_mask > 0]
target_inside_mask = target_inside_mask[target_inside_mask > background_median]
target_upper_percentil = np.percentile(target_inside_mask, DEFAULT_PERCENTIL)
target_iqr = target_upper_percentil - background_upper_percentil
if target_iqr <= 0:
raise ValueError("Bad iqr %.3f. Is your input masked?. Unmasked inputs required" % target_iqr)
x = x / target_iqr
x = np.clip(x, None, MAX_VAL_AFTER_NORMALIZATION) #Just to prevent outliers
return x
def targetNormalization_2(x, y, mask):
inside_x = x[morphDilation(mask, 1)>= DEFAULT_BINARY_MASK_THR]
mean_x, std_x = np.mean(inside_x), np.std(inside_x)
inside_y= y[mask>= DEFAULT_BINARY_MASK_THR]
mean_y, std_y = np.mean(inside_y), np.std(inside_y)
y= ((y-mean_y)/std_y)*std_x + mean_x
return y
def targetNormalizationClassif(x):
x= np.clip(x, np.percentile(x,0.1), np.percentile(x,99.9))
x_norm= minMaxNormalization(x)
return x_norm
def inputNormalizationWithMask(x, mask):
mask = morphDilation(mask, 3)
mask= applyRelativeThr(mask, DEFAULT_BINARY_MASK_THR)
median_val = np.median( x[mask>0] )
iqr_val = iqr(x[mask > 0], rng=(10,DEFAULT_PERCENTIL))
x_norm= (x-median_val)/iqr_val
x_norm*= mask
return x_norm
def inputNormalizationWithMask_2(x, mask):
mask = morphDilation(mask, 3)
mask= applyRelativeThr(mask, DEFAULT_BINARY_MASK_THR)
selection= (mask>0) & (x>0)
median_val = np.median( x[selection ] )
iqr_val = iqr(x[selection], rng=(10,DEFAULT_PERCENTIL))
# iqr_val= x[selection].max()- x[selection].min()
x_norm= (x-median_val)/iqr_val
x_norm*= mask
return x_norm
def inputNormalizationWithMask_3(x, mask): #This might is too tight for general purposes
mask = np.where(morphDilation(mask, 1) > DEFAULT_BINARY_MASK_THR, 1, 0)
selection= (mask>0) & (x>0)
median_val = np.median( x[selection ] )
iqr_val = iqr(x[selection], rng=(10,DEFAULT_PERCENTIL))
# iqr_val= x[selection].max()- x[selection].min()
x_norm= (x-median_val)/iqr_val
x_norm*= mask
return x_norm
def inputNormalization_classification(x):
x_min= -x.min()
x_range= x.max()-x_min
midPoint= x_min+ x_range*.5
conditionSplit= x<midPoint
x_min= np.percentile(x[conditionSplit], 5)
x_max = np.percentile(x[~ conditionSplit], DEFAULT_PERCENTIL)
if not np.isclose(x_min, x_max):
x= x/(x_max-x_min)
x = np.clip(x, None, MAX_VAL_AFTER_NORMALIZATION) # Just to prevent outliers
return x
def inputNormalization(x):
x= robustNormalization(x )
x = np.clip(x, None, MAX_VAL_AFTER_NORMALIZATION) # Just to prevent outliers
return x
def inputNormalization_2(x):
from skimage.filters import threshold_otsu
otsu_thr= threshold_otsu(x)
out_mean= np.mean(x[x<otsu_thr])
inner_range= iqr(x[x>=otsu_thr], rng= (10, DEFAULT_PERCENTIL) )
if inner_range==0:
raise NormalizationError("warning, bad iqr %.3f. Is your input masked?. Unmasked inputs required" % inner_range)
x=(x- out_mean)/inner_range
x = np.clip(x, None, MAX_VAL_AFTER_NORMALIZATION) # Just to prevent outliers
return x
def inputNormalization_3(x, noise_stats=None):
'''
Performs input normalization using typical cryo-em schema of normalizing according noise:
let noise mean be 0 and noise std=0.1
:param x: input volume
:param noise_stats=(mean_noise, std_noise): The statistics of the noise for the input volumen. If none, it will try to automatically
guess them
:return: normalized input
'''
if noise_stats is None:
meanInNoise, stdInNosise= _guessNoiseStats_radialProfile(x)
print("Noise stats: mean=%f std=%f"%(meanInNoise, stdInNosise))
else:
meanInNoise, stdInNosise= noise_stats
x_norm= (x-meanInNoise)/ (stdInNosise*10) #Desired noise distribution mean=0 and std=0.1
assert not np.any(np.isnan(x_norm)), "Error normalizing input. Some nans were generated in the volume. Try an alternative normalization option"
return x_norm
def _guessNoiseStats_radialProfile(x):
from .dataUtils import resizeVol
from scipy import ndimage
from scipy.signal import argrelextrema
#First part is a set of heuristics to identify the circular noise around the protein
x_gauss= gaussianFilter(resizeVol(x, (100, 100, 100)), 0.1 ) #Resize to seep up and filter to reduce noise level.
win_size=5
win_mean = ndimage.uniform_filter(x_gauss, win_size)
win_sqr_mean = ndimage.uniform_filter(x_gauss ** 2, win_size) #This is a good estimation of the protein region
win_var = win_sqr_mean - win_mean ** 2
interestingCurve= radial_profile(win_var)-radial_profile(win_mean)
energyCurve= radial_profile(win_sqr_mean)
# import matplotlib.pyplot as plt
# f= plt.figure()
# plt.plot(radial_profile(win_mean),label='win_mean')
# plt.plot(radial_profile(win_sqr_mean),label='win_sqr_mean')
# plt.plot(radial_profile(win_var),label='win_var')
# plt.plot(radial_profile(win_var)-radial_profile(win_mean),label='win_var_minus_win_mean')
# plt.legend()
# f.show()
# #plt.show()
#
# from devel_code.trainNet.dataManager import plot_vol_and_target
# plot_vol_and_target(x, x_gauss, win_sqr_mean)
candidateMinima= argrelextrema(interestingCurve, np.less)[0]
if len(candidateMinima)>0:
toLookIndex= np.min(candidateMinima)
if interestingCurve[toLookIndex]>=0:
toLookIndex = np.min(np.argmin(interestingCurve)) # Noise border will be at index > toLookIndex
else:
toLookIndex = np.min(np.argmin(interestingCurve)) # Noise border will be at index > toLookIndex
if toLookIndex>50: #Radial noise, the most typical, has 50 over 100 voxels radius
candidateNoiseDist = x_gauss.shape[0] // 2
print("Automatic radial noise detection may have failed. No suitable min index found. Guessing radial noise of radius %s%%"%(candidateNoiseDist))
else:
maxInterestingIdx= np.min(np.argmax(interestingCurve[toLookIndex:51])).astype(np.int32)+toLookIndex
if ( energyCurve[maxInterestingIdx]> interestingCurve[maxInterestingIdx] and interestingCurve[maxInterestingIdx]>0) : #np.isclose(maxInterestingIdx, maxWinMean, rtol=1e-2):
raise NormalizationError("Warning, the input might be hollow structure. Automatic masking might fail. Aborting...")
try:
toLookIndex2= np.min(np.where(interestingCurve[toLookIndex:]>0))+toLookIndex
try:
toLookIndex3 = np.min(np.where(interestingCurve[toLookIndex2:] <= 0)) + toLookIndex2
candidateNoiseDist = round((toLookIndex2 + toLookIndex3) * 0.5)
grad_1 = np.mean(np.diff(interestingCurve[-10:]))
grad_2 = np.mean(np.diff(interestingCurve[-25:-10]))
if grad_1 > 1e-8 and grad_2 >1e-8 and grad_1 > 3 * grad_2:
candidateNoiseDist = np.sqrt(3 * (x_gauss.shape[0] // 2) ** 2)
except ValueError:
candidateNoiseDist = x_gauss.shape[0] // 2
print("Automatic radial noise detection may have failed. No trend change found. Guessing radial noise of radius %s %%" % (candidateNoiseDist))
except ValueError:
candidateNoiseDist = x_gauss.shape[0] // 2
print("The input might be a fiber, assuming no masking till radius %d %%"%candidateNoiseDist)
print("Automatic radial noise detected beyond %s %% of volume side" % (candidateNoiseDist))
noiseAndProt_regionIdxs= getCoordsWithinSphere(x_gauss, maxDist=candidateNoiseDist)
protein_region= applyRelativeThr(morphDilation(win_sqr_mean, size=5), r_thr=0.05, robust=False)
noise_regionMask= np.zeros_like(x_gauss)
noise_regionMask[noiseAndProt_regionIdxs]=1
noise_regionMask-=protein_region
# from devel_code.trainNet.dataManager import plot_vol_and_target
# plot_vol_and_target(beforeProtein_mask, protein_region, noise_regionMask)
noise_regionMask= (resizeVol(noise_regionMask, x.shape) >0.5)
interestingPart= x[noise_regionMask]
meanInNoise= np.mean(interestingPart)
stdInNosise = np.std(interestingPart)
return meanInNoise, stdInNosise
def sigmoid(x, factor=1, offset=0):
return 1. / (1 + np.exp(-factor*x)) - offset
def minMaxNormalization(x):
return (x- x.min())/(x.max()-x.min() )
def normalNormalization(x):
x=(x- x.mean())/(x.std() )
return x
def robustNormalization(img, iqrRange=(10, DEFAULT_PERCENTIL), raiseInsteadWarn=True, ignoreExtrema=False):
if ignoreExtrema:
iqr_val= iqr(img[(img>img.min()) & (img<img.max())], rng= iqrRange )
else:
iqr_val= iqr(img, rng= iqrRange )
if iqr_val==0:
if raiseInsteadWarn:
raise NormalizationError("Error, bad iqr %.3f. Is your input masked?. Unmasked inputs required" % iqr_val)
else:
iqr_val = iqr(img+ np.random.normal(img.mean(), img.std()*1e-4), rng=iqrRange)
if iqr_val == 0:
print("warning, bad iqr", iqr_val)
iqr_val= (np.max(img)-np.min(img)) + 1e-12
newImg=(img- np.median(img))/iqr_val
return newImg
def trimmedNormalization(x, percentil=95, binarizeThr=None):
x= x.astype(np.float32)
try:
x= np.clip(x, 0, np.percentile(x[x>0], percentil))
x= (x-x.min())/(x.max()-x.min())
except IndexError:
pass
if binarizeThr:
x[x >= binarizeThr]=1
x[x < binarizeThr] = 0
return x
class NormalizationError(GenericError):
pass
|
python
|
import pytest
import requests
from log_config import log_, pformat
from core import config
from tests.utils.utils import get_server_api, client
### - - - - - - - - - - - - - - - - - - - - - - - ###
### LOGINS
### - - - - - - - - - - - - - - - - - - - - - - - ###
def client_anonymous_login(
as_test = True,
only_access_token = False,
):
# server_api = get_server_api()
# log_.debug("=== client_anonymous_login / server_api : %s", server_api)
# url = f"{server_api}{config.API_V1_STR}/anonymous_login"
url = f"{config.API_V1_STR}/anonymous_login"
# log_.debug("=== url : %s", url)
# response = requests.get(
response = client.get(
url,
)
resp = response.json()
# log_.debug("=== client_anonymous_login / resp : \n%s", pformat( resp ))
if as_test :
assert response.status_code == 200
else :
if only_access_token :
return resp['tokens']['access_token']
else :
return resp
@pytest.mark.user
def test_client_anonymous_login():
client_anonymous_login()
def client_login(
as_test = True,
only_access_token = False
):
# server_api = get_server_api()
### get ano access token
response_ano_json = client_anonymous_login( as_test=False )
# log_.debug("=== client_login / response_ano_json : \n%s", pformat( response_ano_json ))
response_ano_access_token = response_ano_json['tokens']['access_token']
# log_.debug("=== client_login / response_ano_access_token : %s", response_ano_access_token )
### log test user
login_test = {
"user_login": {
"email": "[email protected]",
"password": "a-very-common-password"
}
}
# url = f"{server_api}{config.API_V1_STR}/login"
url = f"{config.API_V1_STR}/login"
# response = requests.post(
response = client.post(
url,
json = login_test,
headers = {
'accept': 'application/json',
'access_token' : response_ano_access_token
}
)
# log_.debug("=== client_login / response : \n%s", pformat(response.json() ) )
resp = response.json()
if as_test :
assert response.status_code == 200
assert resp['tokens']['access_token']
else :
if only_access_token :
return resp['tokens']['access_token']
else :
return resp
@pytest.mark.user
def test_client_login() :
client_login()
### - - - - - - - - - - - - - - - - - - - - - - - ###
### REGISTER - TO DO
### - - - - - - - - - - - - - - - - - - - - - - - ###
def client_register(
as_test = True
):
response_ano_json = client_anonymous_login( as_test=False )
response_ano_access_token = response_ano_json['tokens']['access_token']
register_test = {
"user_register": {
"name": "Elinor",
"surname": "Ostrom",
"email": "[email protected]",
"password": "a-very-common-password",
}
}
url = f"{config.API_V1_STR}/register"
response = client.post(
url,
json = login_test,
headers = {
'accept': 'application/json',
'access_token' : response_ano_access_token
}
)
log_.debug("=== client_register / response : \n%s", pformat(response.json() ) )
resp = response.json()
if as_test :
log_.debug ('=== client_register / resp : \n%s', pformat(resp) )
assert response.status_code == 200
assert resp['tokens']['access_token']
else :
return resp
@pytest.mark.user
@pytest.mark.skip(reason='not developped yet')
def test_client_register():
client_register()
|
python
|
import glob
import os
import re
class JasmineTest:
def __init__(self, test_name, test_class_name, test_path, included_tags: list, excluded_tags: list):
self.test_name = test_name
self.test_class_name = test_class_name
self.test_path = test_path
self.included_tags = included_tags
self.excluded_tags = excluded_tags
def is_runnable(self):
# Return true if test matches include tags and does not match exclude tags
should_run = False
if self.is_included():
should_run = True
if self.is_excluded():
should_run = False
return should_run
def has_ptid(self):
ptid_exists = re.search('PTID\=\d*', self.test_name) # Check for a ptid
return ptid_exists
def is_included(self):
# Return true if test matches all include tags
has_match = False
tags = self.get_included_tags()
if len(tags) > 0:
has_match = True
if len(self.included_tags) == 0:
has_match = True
return has_match
def get_included_tags(self):
# Return matching include tags
found_tags = []
for tag in self.included_tags:
if tag == '':
break
# Tags should always start with a '#', but lib should accept list entries with or without it
tag_stripped = tag.replace('#', '') # Strip '#' in case it is there
tag_fixed = '#' + tag_stripped
if re.search(tag_fixed, self.test_name): # Add '#' back to the tag entry and check for it
found_tags.append(tag_fixed)
return found_tags
def is_excluded(self):
# Return true if test has matching exclude tags
has_match = False
tags = self.get_excluded_tags()
if len(tags) > 0:
has_match = True
return has_match
def get_excluded_tags(self):
# Return matching exclude tags
found_tags = []
for tag in self.excluded_tags:
# Tags should always start with a '#', but lib should accept list entries with or without it
tag_stripped = tag.replace('#', '') # Strip '#' in case it is there
tag_fixed = '#' + tag_stripped
if re.search(tag_fixed, self.test_name): # Add '#' back to the tag entry and check for it
found_tags.append(tag_fixed)
return found_tags
class JasmineFile:
def __init__(self, file_name, file_path, included_tags: list, excluded_tags: list):
self.file_name = file_name
self.file_path = file_path
self.included_tags = included_tags
self.excluded_tags = excluded_tags
self.jasmine_tests = self.get_matching_tests()
def has_tests(self):
# Return true if it contains tests
tests_found = False
if len(self.jasmine_tests) > 0:
tests_found = True
return tests_found
def get_matching_tests(self):
# Return collection of test objects
jasmine_test_list = []
lines = [line.rstrip('\n') for line in open(self.file_path)]
default_test_class_name = self.file_name.replace('.', '_')
test_class_name = default_test_class_name
for line in lines:
it_blocks = re.findall(r'\bit\(.*\'', line)
if 'describe(' in line:
# Set current describe block for matching tests
test_class_name = re.search('(\')(.*)(\')', line).group(2)
if len(it_blocks) > 0:
try:
test_name = re.search('(\')(.*)(\')', line).group(2)
jasmine_test = JasmineTest(test_name, test_class_name, self.file_path, self.included_tags,
self.excluded_tags)
jasmine_test_list.append(jasmine_test)
except:
# If the regex does not match, ignore that line
pass
return jasmine_test_list
class JasmineManifest:
def __init__(self, test_globs: list, included_tags: list, excluded_tags: list):
# Setup constructor
self.test_globs = test_globs
self.included_tags = included_tags
self.excluded_tags = excluded_tags
self.jasmine_tests = self.get_all_tests()
def get_all_tests(self):
# For each test glob, find and return the list of matching files
test_files = []
for file_glob in self.test_globs:
print("Checking for matches with the following glob path: " + file_glob)
matching_files = glob.glob(file_glob)
amount_found = str(len(matching_files))
print("Count of matching files found: " + amount_found)
test_files = test_files + matching_files
# Make a collection of JasmineFile objects and call their get_matching_tests method
jasmine_file_list = self.get_jasmine_file_list(test_files)
# Append matching tests to a collection
jasmine_test_list = []
for jasmine_file in jasmine_file_list:
for each_test in jasmine_file.jasmine_tests:
jasmine_test_list.append(each_test)
# Return collection of JasmineTest objects
return jasmine_test_list
def get_jasmine_file_list(self, file_paths: list):
jasmine_file_list = []
for file_path in file_paths:
file_name = os.path.basename(file_path)
jasmine_file = JasmineFile(file_name, file_path, self.included_tags, self.excluded_tags)
jasmine_file_list.append(jasmine_file)
return jasmine_file_list
def is_runnable(self, test_name):
runnable = False
for each_test in self.get_all_runnable_tests():
tn = each_test.test_name.replace('\\', "").replace("'", "").replace('"', '')
tn1 = test_name.replace('\\', "").replace("'", "").replace('"', '')
if tn == tn1:
if each_test.is_runnable():
runnable = True
break
return runnable
def get_total_number_tests(self):
total = len(self.jasmine_tests)
return total
def get_all_runnable_tests(self):
# Return list of runnable tests
runnable_list = []
for each_test in self.jasmine_tests:
if each_test.is_runnable():
runnable_list.append(each_test)
return runnable_list
def get_total_number_runnable(self):
# Return total number of tests that should be ran
total = len(self.get_all_runnable_tests())
return total
def get_all_non_runnable_tests(self):
# return list of non runnable tests
non_runnable_list = []
for each_test in self.jasmine_tests:
if not each_test.is_runnable():
non_runnable_list.append(each_test)
return non_runnable_list
def get_total_number_not_runnable(self):
# Return total number of tests that should not be ran
total = len(self.get_all_non_runnable_tests())
return total
|
python
|
STATS = []
num_timeouts = 15
num_timeouts = 40
num_problems = 55
|
python
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Configuracion Pyro4
DATABASE = 'ws.db'
OBJETO_PYRO = 'servidor1.configura'
DIRECCION_PYRO = '192.168.1.115' # en nuestro caso la direccion del objeto y del servidor de nombrado será el mismo ya que estan en la misma maquina
DIRECCION_PYRO_LOCAL = '192.168.1.115'
KEY = 'the_same_string_for_server_and_client'
# Configuracion WS
WEBPATH = '/ws'
PROTOCOL = 'restjson'
DIRECCION_WS = '192.168.0.1'
PUERTO_WS = 80
# Configuracion Caster
DIRECCION_BLUETOOTH = "192.168.1.115:80"
MENSAJE = "Servidor de publicidad de restaurante"
PUERTO_DIFUSION = 5555
TIEMPO_ANUNCIOS = 30
|
python
|
from django.contrib import admin
from django.contrib.admin.utils import reverse_field_path
from django.db.models import Max, Min
from django.db.models.fields import DecimalField, FloatField, IntegerField
from .forms import RangeNumericForm, SingleNumericForm, SliderNumericForm
class NumericFilterModelAdmin(admin.ModelAdmin):
class Media:
css = {
'all': (
'js/nouislider.min.css',
'css/admin-numeric-filter.css',
)
}
js = (
'js/wNumb.min.js',
'js/nouislider.min.js',
'js/admin-numeric-filter.js',
)
class SingleNumericFilter(admin.FieldListFilter):
request = None
parameter_name = None
template = 'admin/filter_numeric_single.html'
def __init__(self, field, request, params, model, model_admin, field_path):
super().__init__(field, request, params, model, model_admin, field_path)
if not isinstance(field, (DecimalField, IntegerField, FloatField)):
raise TypeError('Class {} is not supported for {}.'.format(type(self.field), self.__class__.__name__))
self.request = request
if self.parameter_name is None:
self.parameter_name = self.field.name
if self.parameter_name in params:
value = params.pop(self.parameter_name)
self.used_parameters[self.parameter_name] = value
def queryset(self, request, queryset):
if self.value():
return queryset.filter(**{self.parameter_name: self.value()})
def value(self):
return self.used_parameters.get(self.parameter_name, None)
def expected_parameters(self):
return [self.parameter_name]
def choices(self, changelist):
return ({
'request': self.request,
'parameter_name': self.parameter_name,
'form': SingleNumericForm(name=self.parameter_name, data={self.parameter_name: self.value()}),
}, )
class RangeNumericFilter(admin.FieldListFilter):
request = None
parameter_name = None
template = 'admin/filter_numeric_range.html'
def __init__(self, field, request, params, model, model_admin, field_path):
super().__init__(field, request, params, model, model_admin, field_path)
if not isinstance(field, (DecimalField, IntegerField, FloatField)):
raise TypeError('Class {} is not supported for {}.'.format(type(self.field), self.__class__.__name__))
self.request = request
if self.parameter_name is None:
self.parameter_name = self.field.name
if self.parameter_name + '_from' in params:
value = params.pop(self.parameter_name + '_from')
self.used_parameters[self.parameter_name + '_from'] = value
if self.parameter_name + '_to' in params:
value = params.pop(self.parameter_name + '_to')
self.used_parameters[self.parameter_name + '_to'] = value
def queryset(self, request, queryset):
filters = {}
value_from = self.used_parameters.get(self.parameter_name + '_from', None)
if value_from is not None and value_from != '':
filters.update({
self.parameter_name + '__gte': self.used_parameters.get(self.parameter_name + '_from', None),
})
value_to = self.used_parameters.get(self.parameter_name + '_to', None)
if value_to is not None and value_to != '':
filters.update({
self.parameter_name + '__lte': self.used_parameters.get(self.parameter_name + '_to', None),
})
return queryset.filter(**filters)
def expected_parameters(self):
return [
'{}_from'.format(self.parameter_name),
'{}_to'.format(self.parameter_name),
]
def choices(self, changelist):
return ({
'query_string': [],
'request': self.request,
'parameter_name': self.parameter_name,
'form': RangeNumericForm(name=self.parameter_name, data={
self.parameter_name + '_from': self.used_parameters.get(self.parameter_name + '_from', None),
self.parameter_name + '_to': self.used_parameters.get(self.parameter_name + '_to', None),
}),
}, )
class SliderNumericFilter(RangeNumericFilter):
MAX_DECIMALS = 7
MAX_STEP = 7
template = 'admin/filter_numeric_slider.html'
field = None
def __init__(self, field, request, params, model, model_admin, field_path):
super().__init__(field, request, params, model, model_admin, field_path)
self.field = field
parent_model, reverse_path = reverse_field_path(model, field_path)
if model == parent_model:
self.q = model_admin.get_queryset(request)
else:
self.q = parent_model._default_manager.all()
def choices(self, changelist):
min_value = self.q.all().aggregate(min=Min(self.parameter_name)).get('min', 0)
max_value = self.q.all().aggregate(max=Max(self.parameter_name)).get('max', 0)
if isinstance(self.field, IntegerField):
decimals = 0
step = 1
elif isinstance(self.field, FloatField):
values = self.q.all().values_list(self.parameter_name, flat=True)
max_precision = max(str(value)[::-1].find('.') for value in values)
decimals = self._get_decimals(max_precision)
step = self._get_min_step(max_precision)
elif isinstance(self.field, DecimalField):
step = self._get_min_step(self.field.decimal_places)
decimals = self._get_decimals(self.field.decimal_places)
return ({
'query_string': [],
'decimals': decimals,
'step': step,
'parameter_name': self.parameter_name,
'request': self.request,
'min': min_value,
'max': max_value,
'value_from': self.used_parameters.get(self.parameter_name + '_from', min_value),
'value_to': self.used_parameters.get(self.parameter_name + '_to', max_value),
'form': SliderNumericForm(name=self.parameter_name, data={
self.parameter_name + '_from': self.used_parameters.get(self.parameter_name + '_from', min_value),
self.parameter_name + '_to': self.used_parameters.get(self.parameter_name + '_to', max_value),
})
}, )
def _get_decimals(self, decimals):
if decimals >= self.MAX_DECIMALS:
return self.MAX_DECIMALS
return decimals
def _get_min_step(self, precision):
result_format = '{{:.{}f}}'.format(precision - 1)
return float(result_format.format(0) + '1')
|
python
|
# -*- coding: utf-8 -*-
# @Author: Liu Shaoweihua
# @Date: 2019-11-18
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import six
import copy
import json
import math
import collections
import tensorflow as tf
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
def gather_indexes(sequence_tensor, positions):
"""Gathers the vectors at the specific positions over a minibatch."""
sequence_shape = get_shape_list(sequence_tensor, expected_rank=3)
batch_size = sequence_shape[0]
seq_length = sequence_shape[1]
width = sequence_shape[2]
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor,
[batch_size * seq_length, width])
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
return output_tensor
# add sequence mask for:
# 1. random shuffle lm modeling---xlnet with random shuffled input
# 2. left2right and right2left language modeling
# 3. conditional generation
def generate_seq2seq_mask(attention_mask, mask_sequence, seq_type, **kargs):
if seq_type == 'seq2seq':
if mask_sequence is not None:
seq_shape = get_shape_list(mask_sequence, expected_rank=2)
seq_len = seq_shape[1]
ones = tf.ones((1, seq_len, seq_len))
a_mask = tf.matrix_band_part(ones, -1, 0)
s_ex12 = tf.expand_dims(tf.expand_dims(mask_sequence, 1), 2)
s_ex13 = tf.expand_dims(tf.expand_dims(mask_sequence, 1), 3)
a_mask = (1 - s_ex13) * (1 - s_ex12) + s_ex13 * a_mask
# generate mask of batch x seq_len x seq_len
a_mask = tf.reshape(a_mask, (-1, seq_len, seq_len))
out_mask = attention_mask * a_mask
else:
ones = tf.ones_like(attention_mask[:1])
mask = (tf.matrix_band_part(ones, -1, 0))
out_mask = attention_mask * mask
else:
out_mask = attention_mask
return out_mask
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2022 Busana Apparel Group. All rights reserved.
#
# This product and it's source code is protected by patents, copyright laws and
# international copyright treaties, as well as other intellectual property
# laws and treaties. The product is licensed, not sold.
#
# The source code and sample programs in this package or parts hereof
# as well as the documentation shall not be copied, modified or redistributed
# without permission, explicit or implied, of the author.
#
# This module is part of Centric PLM Integration Bridge and is released under
# the Apache-2.0 License: https://www.apache.org/licenses/LICENSE-2.0
import logging
import traceback
from common import consts
from common import modconfig
from core.objfactory import AbstractFactory
from core.startable import Startable, StartableManager
from core.msgobject import MessageFactory, MessageEvent, MessageCommand
from core.msghandler import MessageNotifier
from core.prochandler import CommandProcessor
from multiprocessing.pool import ThreadPool
from configparser import ConfigParser
from jproperties import Properties
from threading import get_ident
class DummyClass(object):
pass
class BaseExecutor(Startable):
def __init__(self, config=None, module_config=None, module=None, workers=4):
super(BaseExecutor, self).__init__(config=config)
self._collection = dict()
self._max_processes = workers
self._pool = None
self._module = module
self._module_config = module_config
self._props = None
def do_configure(self):
self._max_processes = self._max_processes if self._max_processes > 0 else 4
def do_start(self):
self._pool = ThreadPool(processes=self._max_processes)
def do_stop(self):
self._pool.terminate()
def get_module(self):
return self._module
def is_valid_module(self, message_obj):
return message_obj.MODULE == self._module
def get_properties(self):
return self._props
def set_properties(self, props):
self._props = props
def execute_module(self, message_obj):
pass
def set_module_configuration(self, module_config):
self._module_config = module_config
def get_module_configuration(self):
return self._module_config
def _get_klass_from_cache(self, class_name):
return None
def _register_klass_to_cache(self, class_name, mod):
return None
def _get_klass_module(self, msg_obj):
class_name = msg_obj if isinstance(msg_obj, str) \
else self._props.properties[msg_obj.get_module_id()]
components = class_name.split(".")
return components, ".".join(components[:-1]), class_name
def _get_klass(self, msg_obj):
components, import_modules, class_name = self._get_klass_module(msg_obj)
logging.debug("BaseExecutor.get_klass: {0} output {1} - {2}".format(msg_obj, components, import_modules))
mod = self._get_klass_from_cache(class_name)
if not mod:
try:
mod = __import__(import_modules)
for cmp in components[1:]:
mod = getattr(mod, cmp)
mod = mod if issubclass(mod, CommandProcessor) else None
self._register_klass_to_cache(class_name, mod) if mod else None
except Exception as ex:
logging.error(ex)
return mod
def _create_object(self, klass):
if not klass:
return None
if klass.__name__ not in self._collection:
self._collection[klass.__name__] = DummyClass()
parent = self._collection[klass.__name__]
module = object.__new__(klass)
module.__init__()
module.set_configuration(self.get_configuration())
module.set_module_configuration(self.get_module_configuration())
module.set_parent(parent)
module.configure()
logging.debug("BaseExecutor.create_object: {0} output {1}".format(klass, module))
return module
class ModuleExecutor(BaseExecutor):
def __init__(self, config=None, module=None, workers=4):
super(ModuleExecutor, self).__init__(config=config, module=module, workers=workers)
self._module_dict = dict()
def _get_klass_from_cache(self, class_name):
return self._module_dict[class_name] if class_name in self._module_dict else None
def _register_klass_to_cache(self, class_name, mod):
self._module_dict[class_name] = mod
def has_service(self, message_obj):
return None
class EventExecutor(ModuleExecutor):
def __init__(self, config=None, module=None, workers=4):
super(EventExecutor, self).__init__(config=config, module=module, workers=workers)
def is_valid_module(self, message_obj):
return super(EventExecutor, self).is_valid_module(message_obj) and isinstance(message_obj, MessageEvent)
def has_service(self, message_obj):
props = self.get_properties()
return props.has_section(message_obj.get_module_id())
def execute_module(self, message_obj):
if self.has_service(message_obj):
try:
props = self.get_properties()
section_props = props[message_obj.get_module_id()]
str_mod = None if message_obj.EVENT not in section_props else section_props[message_obj.EVENT]
list_mod = [str_item.split(":") for str_item in (str_mod.split(",") if str_mod else [])]
for str_mod, str_func in list_mod:
klass = self._get_klass(str_mod)
module = self._create_object(klass)
logging.debug("EventExecutor.execute_module: klass and module {0} - {1}".format(klass, module))
self.assign_event(module, str_func, message_obj)
except Exception as ex:
logging.error(ex)
else:
logging.error("Could not parse message correctly")
def assign_event(self, module, func, event):
logging.debug("Submitting event {0}.{1}:{2} params: {3}".format(event.MODULE, event.SUBMODULE,
event.EVENT, event.PARAMS))
self._pool.apply_async(self.do_execute_event(module, func, event))
@staticmethod
def do_execute_event(module, func, event):
try:
logging.debug("Processing {0} event on thread {1}".format(event.get_module_id(), get_ident()))
module.perform_notify(func, event)
except Exception:
logging.error(traceback.format_exc())
finally:
logging.debug("End processing {0} event on thread {1}".format(event.get_module_id(), get_ident()))
class CommandExecutor(ModuleExecutor):
def __init__(self, config=None, module=None, workers=4):
super(CommandExecutor, self).__init__(config=config, module=module, workers=workers)
def is_valid_module(self, message_obj):
return super(CommandExecutor, self).is_valid_module(message_obj) and isinstance(message_obj, MessageCommand)
def has_service(self, message_obj):
props = self.get_properties()
return message_obj.get_module_id() in props.keys()
def execute_module(self, message_obj):
if self.has_service(message_obj):
try:
klass = self._get_klass(message_obj)
module = self._create_object(klass)
logging.debug("CommandExecutor.execute_module: klass and module {0} - {1}".format(klass, module))
self.assign_task(module, message_obj)
except Exception as ex:
logging.error(ex)
else:
logging.error("Could not find service for {0}.{1}".format(message_obj.MODULE, message_obj.SUBMODULE))
def assign_task(self, module, command):
logging.debug("Submitting command {0}.{1}:{2} params: {3}".format(command.MODULE, command.SUBMODULE,
command.COMMAND, command.PARAMS))
self._pool.apply_async(self.do_execute, (module, command))
@staticmethod
def do_execute(module, command):
try:
logging.debug("Processing {0} command on thread {1}".format(command.get_module_id(), get_ident()))
module.perform_exec(command)
except Exception:
logging.error(traceback.format_exc())
finally:
logging.debug("End processing {0} command on thread {1}".format(command.get_module_id(), get_ident()))
class ExecutorFactory(AbstractFactory):
def __init__(self, config=None):
super(ExecutorFactory, self).__init__(config=config)
self._command_props = None
self._event_props = None
self._event_props = None
self._configured = False
def do_configure(self):
config_file = "{0}/{1}".format(consts.DEFAULT_SCRIPT_PATH, consts.DEFAULT_COMMAND_FILE)
self._command_props = Properties()
with open(config_file, "rb") as file_prop:
self._command_props.load(file_prop, "utf-8")
config_file = "{0}/{1}".format(consts.DEFAULT_SCRIPT_PATH, consts.DEFAULT_EVENT_FILE)
self._event_props = ConfigParser()
self._event_props.read(config_file)
self._configured = True
def generate(self, config, message_obj):
module_obj = None
self.do_configure() if not self._configured else None
if isinstance(message_obj, MessageEvent):
module_obj = EventExecutor(config=config, module=message_obj.MODULE)
module_obj.set_properties(self._event_props)
elif isinstance(message_obj, MessageCommand):
module_obj = CommandExecutor(config=config, module=message_obj.MODULE)
module_obj.set_properties(self._command_props)
return module_obj
class BaseExecutionManager(StartableManager):
def __init__(self, config):
super(BaseExecutionManager, self).__init__(config=config)
self._executor_factory = ExecutorFactory(config=config)
self._module_config = None
def get_valid_module(self, message_obj):
object_list = [obj for obj in self.get_objects() if isinstance(obj, BaseExecutor)]
for module_obj in object_list:
if module_obj.is_valid_module(message_obj):
return module_obj
return None
def _register_module_object(self, message_obj):
if not self._module_config:
self._module_config = modconfig.get_configuration()
module_object = self._executor_factory.generate(self.get_configuration(), message_obj)
module_object.set_configuration(self.get_configuration())
module_object.set_module_configuration(self._module_config)
self.add_object(module_object if module_object else None)
return module_object
class MessageExecutionManager(BaseExecutionManager):
def __init__(self, config):
super(MessageExecutionManager, self).__init__(config=config)
def register_listener(self, listener):
listener.set_on_message_received(self.on_handle_message) if isinstance(listener, MessageNotifier) else None
def on_handle_message(self, obj, message):
try:
message_object = MessageFactory.generate(message) if message else None
if (not message_object) or (not self.is_running()):
logging.error("Could not parse message correctly: {0}".format(message))
return
module_object = self.get_valid_module(message_object)
module_object = module_object if module_object else self._register_module_object(message_object)
module_object.execute_module(message_object) if module_object else None
except Exception as ex:
logging.exception(ex)
|
python
|
import cv2
import os
import glob
from matplotlib import pyplot as plt
import numpy as np
def readImages(path):
img_array = []
imgc = []
names = []
for filename in glob.glob(path):
names.append(filename)
names.sort()
for filename in names:
img = cv2.imread(filename, 0)
img1 = cv2.imread(filename)
img_array.append(img)
imgc.append(img1)
return img_array, imgc
def jacobian(pt):
dW = np.array([[pt[0], 0, pt[1], 0, 1, 0],
[0, pt[0], 0, pt[1], 0, 1]])
return dW
def affineMatrix(params):
M = np.array([[1 + params[0], params[2], params[4]],
[params[1], 1 + params[3], params[5]]])
return M
def extractWarpedROI(img, p_prev, rect):
M = affineMatrix(p_prev)
I = cv2.warpAffine(img, M, (0, 0), flags=cv2.INTER_CUBIC + cv2.WARP_INVERSE_MAP) # warped Image
I = I[rect[0, 1]:rect[1, 1], rect[0, 0]:rect[1, 0]] # selecting region of interest of warped image
return I
# def Correction(T,img):
# Tmean = np.mean(T)
# iMean = np.mean(img)
# if(abs(Tmean - imean)< 2)):
# return img
# elif((Tmean-imean)<10):
def zScore(Tmean, img, thresh):
Tmeanmat = np.zeros_like(img)
Tmeanmat[0:img.shape[0], 0:img.shape[1]] = Tmean
img_mean_matrix = np.zeros_like(img)
img_mean_matrix[0:img.shape[0], 0:img.shape[1]] = np.mean(img)
standardDevi = np.std(img)
ZscoreTemp = img - Tmeanmat
Zscore = ZscoreTemp/standardDevi
if np.mean(img) - Tmean < thresh:
imgshift = -(Zscore * standardDevi) + img_mean_matrix
else:
imgshift = (Zscore * standardDevi) + img_mean_matrix
return imgshift.astype(dtype=np.uint8)
def gammaCorrection(frame, gamma=1.0):
newPixel = np.zeros(256, np.uint8)
for i in range(256):
newPixel[i] = np.clip(pow(i / 255.0, gamma) * 255.0, 0, 255)
frame = newPixel[frame]
return frame
def affineLKtracker(T, img, rect, p_prev):
oIx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=5)
oIy = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=5)
for i in range(1000):
H = np.zeros((6, 6))
I = extractWarpedROI(img, p_prev, rect) # Warped Image ROI
# I = gammaCorrection(I, 2) # Correcting image
I = zScore(np.mean(T), I, 20)
Ix = extractWarpedROI(oIx, p_prev, rect) # Warped gradient,Ix ROI
Iy = extractWarpedROI(oIy, p_prev, rect) # Warped gradient,Iy ROI
# uncomment to use double for loops
# R = np.zeros((6,1))
# error = (T.astype(int)-I.astype(int)) #computing T(x)- I(w(x,p))
# for j in range(T.shape[0]):
# for k in range(T.shape[1]):
# gradient = np.array([Ix[j,k],Iy[j,k]]).reshape(1,2) #compute warped gradient
# dW = jacobian([j,k]) #compute jacobian
# gradientDw = np.dot(gradient,dW) #compute steepest descent,D
# R += np.dot(gradientDw.T,error[j,k]) #compute transpose(D).(T(x)-I(w(x,p))),R
# H += np.dot(gradientDw.T,gradientDw) #compute hessian matrix
# dp = np.dot(np.linalg.inv(H),R) #get change in p
# uncomment to use meshgrid
error = (T.astype(int) - I.astype(int)).reshape(-1, 1) # computing T(x)- I(w(x,p))
R = np.zeros((T.shape[0] * T.shape[1], 6))
x, y = np.meshgrid(range(T.shape[1]), range(T.shape[0]))
x = x.reshape(-1, 1)
y = y.reshape(-1, 1)
for i in range(0, len(x)):
dW = jacobian([x[i][0], y[i][0]])
gradient = np.array([Ix[y[i][0]][x[i][0]], Iy[y[i][0]][x[i][0]]])
R[i] = np.dot(gradient, dW).reshape(1, -1)
H = R.T @ R
dp = np.linalg.inv(H) @ R.T @ error
# ----
p_prev = p_prev.reshape(6, 1) # change p_prev to a vector
p_prev[0:4] += 80* dp[0:4] # update change in p_prev
p_prev[4:6] += 200 * dp[4:6]
p_prev = p_prev.reshape(6, ) # convert p_prev back to array
if np.linalg.norm(dp) <= 0.1:
return p_prev
return p_prev
def main():
path = "./Data/Car4/img/*.jpg"
images, cimages = readImages(path)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('trackcarrobust.avi', fourcc, 5.0, (images[0].shape[1], images[0].shape[0]))
rect_roi = np.array([(69, 52), (175, 136)])
template = images[0][rect_roi[0][1]:rect_roi[1][1], rect_roi[0][0]:rect_roi[1][0]]
p_prev = np.zeros(6)
for i in range(1, len(images)):
It = images[i]
p_prev = affineLKtracker(template, It, rect_roi, p_prev)
M = np.vstack((affineMatrix(p_prev), [0, 0, 1])) # get new rect coordinates
x1, y1 = M.dot(np.append(rect_roi[0], 1))[0:2]
x2, y2 = M.dot(np.append(rect_roi[1], 1))[0:2]
img = cv2.rectangle(cimages[i], (int(x1), int(y1)), (int(x2), int(y2)), (255, 0, 0), 2)
cv2.imshow('image1', img)
out.write(img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break;
out.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
|
python
|
#!/usr/bin/env python
from __future__ import absolute_import
import sys
import argparse
import symstore
class CompressionNotSupported(Exception):
pass
def parse_args():
parser = argparse.ArgumentParser(
description="publish windows debugging files")
parser.add_argument("-d", "--delete",
metavar="TRANSACTION_ID",
help="delete transaction")
parser.add_argument("-z", "--compress",
action="store_true",
help="publish compressed files")
parser.add_argument("-p", "--product-name", default="",
help="name of the product")
parser.add_argument("-r", "--product-version", default="",
help="version of the product")
parser.add_argument("--version",
action="version",
version="symstore %s" % symstore.VERSION,
help="show program's version number and exit")
parser.add_argument("store_path", metavar="STORE_PATH",
type=str,
help="root directory of the symbol store")
parser.add_argument("files", metavar="FILE", type=str, nargs="*",
help="PDB or PE file(s) to publish")
return parser.parse_args()
def err_exit(error_msg):
sys.stderr.write("%s\n" % error_msg)
sys.exit(1)
def unknown_ext_err(file, file_extension):
if len(file_extension) > 0:
msg = "unknown file extension '%s'" % file_extension
else:
msg = "no file extension"
err_exit("%s: %s, can't figure out file format" % (file, msg))
def check_compression_support(compress_flag):
if not compress_flag:
# compression not request, no need to check
return
from symstore import cab
if not cab.compression_supported:
raise CompressionNotSupported()
def delete_action(sym_store, transaction_id):
try:
sym_store.delete_transaction(transaction_id)
except symstore.TransactionNotFound:
err_exit("no transaction with id '%s' found" % transaction_id)
def add_action(sym_store, files, product_name, product_version, compress):
try:
# error-out if no compression
check_compression_support(compress)
# create new add transaction, add all specified files
transaction = sym_store.new_transaction(product_name, product_version)
for file in files:
transaction.add_file(file, compress)
# commit the transaction to the store
sym_store.commit(transaction)
except symstore.UnknownFileExtension as e:
unknown_ext_err(file, e.file_extension)
except symstore.FileFormatError as e:
err_exit("%s: invalid %s file: %s" % (file, e.format_name, e))
except CompressionNotSupported:
err_exit("gcab module not available, compression not supported")
except symstore.FileNotFound as e:
err_exit("No such file: %s" % e.filename)
def main():
args = parse_args()
sym_store = symstore.Store(args.store_path)
if args.delete is not None:
delete_action(sym_store, args.delete)
return
# otherwise this is an 'add' action
add_action(sym_store, args.files, args.product_name,
args.product_version, args.compress)
|
python
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['StaticSiteArgs', 'StaticSite']
@pulumi.input_type
class StaticSiteArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
allow_config_file_updates: Optional[pulumi.Input[bool]] = None,
branch: Optional[pulumi.Input[str]] = None,
build_properties: Optional[pulumi.Input['StaticSiteBuildPropertiesArgs']] = None,
identity: Optional[pulumi.Input['ManagedServiceIdentityArgs']] = None,
kind: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
repository_token: Optional[pulumi.Input[str]] = None,
repository_url: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input['SkuDescriptionArgs']] = None,
staging_environment_policy: Optional[pulumi.Input['StagingEnvironmentPolicy']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
template_properties: Optional[pulumi.Input['StaticSiteTemplateOptionsArgs']] = None):
"""
The set of arguments for constructing a StaticSite resource.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input[bool] allow_config_file_updates: <code>false</code> if config file is locked for this static web app; otherwise, <code>true</code>.
:param pulumi.Input[str] branch: The target branch in the repository.
:param pulumi.Input['StaticSiteBuildPropertiesArgs'] build_properties: Build properties to configure on the repository.
:param pulumi.Input['ManagedServiceIdentityArgs'] identity: Managed service identity.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] location: Resource Location.
:param pulumi.Input[str] name: Name of the static site to create or update.
:param pulumi.Input[str] repository_token: A user's github repository token. This is used to setup the Github Actions workflow file and API secrets.
:param pulumi.Input[str] repository_url: URL for the repository of the static site.
:param pulumi.Input['SkuDescriptionArgs'] sku: Description of a SKU for a scalable resource.
:param pulumi.Input['StagingEnvironmentPolicy'] staging_environment_policy: State indicating whether staging environments are allowed or not allowed for a static web app.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input['StaticSiteTemplateOptionsArgs'] template_properties: Template options for generating a new repository.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if allow_config_file_updates is not None:
pulumi.set(__self__, "allow_config_file_updates", allow_config_file_updates)
if branch is not None:
pulumi.set(__self__, "branch", branch)
if build_properties is not None:
pulumi.set(__self__, "build_properties", build_properties)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if repository_token is not None:
pulumi.set(__self__, "repository_token", repository_token)
if repository_url is not None:
pulumi.set(__self__, "repository_url", repository_url)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if staging_environment_policy is not None:
pulumi.set(__self__, "staging_environment_policy", staging_environment_policy)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if template_properties is not None:
pulumi.set(__self__, "template_properties", template_properties)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Name of the resource group to which the resource belongs.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="allowConfigFileUpdates")
def allow_config_file_updates(self) -> Optional[pulumi.Input[bool]]:
"""
<code>false</code> if config file is locked for this static web app; otherwise, <code>true</code>.
"""
return pulumi.get(self, "allow_config_file_updates")
@allow_config_file_updates.setter
def allow_config_file_updates(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_config_file_updates", value)
@property
@pulumi.getter
def branch(self) -> Optional[pulumi.Input[str]]:
"""
The target branch in the repository.
"""
return pulumi.get(self, "branch")
@branch.setter
def branch(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "branch", value)
@property
@pulumi.getter(name="buildProperties")
def build_properties(self) -> Optional[pulumi.Input['StaticSiteBuildPropertiesArgs']]:
"""
Build properties to configure on the repository.
"""
return pulumi.get(self, "build_properties")
@build_properties.setter
def build_properties(self, value: Optional[pulumi.Input['StaticSiteBuildPropertiesArgs']]):
pulumi.set(self, "build_properties", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['ManagedServiceIdentityArgs']]:
"""
Managed service identity.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['ManagedServiceIdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource Location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the static site to create or update.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="repositoryToken")
def repository_token(self) -> Optional[pulumi.Input[str]]:
"""
A user's github repository token. This is used to setup the Github Actions workflow file and API secrets.
"""
return pulumi.get(self, "repository_token")
@repository_token.setter
def repository_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "repository_token", value)
@property
@pulumi.getter(name="repositoryUrl")
def repository_url(self) -> Optional[pulumi.Input[str]]:
"""
URL for the repository of the static site.
"""
return pulumi.get(self, "repository_url")
@repository_url.setter
def repository_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "repository_url", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input['SkuDescriptionArgs']]:
"""
Description of a SKU for a scalable resource.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input['SkuDescriptionArgs']]):
pulumi.set(self, "sku", value)
@property
@pulumi.getter(name="stagingEnvironmentPolicy")
def staging_environment_policy(self) -> Optional[pulumi.Input['StagingEnvironmentPolicy']]:
"""
State indicating whether staging environments are allowed or not allowed for a static web app.
"""
return pulumi.get(self, "staging_environment_policy")
@staging_environment_policy.setter
def staging_environment_policy(self, value: Optional[pulumi.Input['StagingEnvironmentPolicy']]):
pulumi.set(self, "staging_environment_policy", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="templateProperties")
def template_properties(self) -> Optional[pulumi.Input['StaticSiteTemplateOptionsArgs']]:
"""
Template options for generating a new repository.
"""
return pulumi.get(self, "template_properties")
@template_properties.setter
def template_properties(self, value: Optional[pulumi.Input['StaticSiteTemplateOptionsArgs']]):
pulumi.set(self, "template_properties", value)
class StaticSite(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allow_config_file_updates: Optional[pulumi.Input[bool]] = None,
branch: Optional[pulumi.Input[str]] = None,
build_properties: Optional[pulumi.Input[pulumi.InputType['StaticSiteBuildPropertiesArgs']]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']]] = None,
kind: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
repository_token: Optional[pulumi.Input[str]] = None,
repository_url: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuDescriptionArgs']]] = None,
staging_environment_policy: Optional[pulumi.Input['StagingEnvironmentPolicy']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
template_properties: Optional[pulumi.Input[pulumi.InputType['StaticSiteTemplateOptionsArgs']]] = None,
__props__=None):
"""
Static Site ARM resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] allow_config_file_updates: <code>false</code> if config file is locked for this static web app; otherwise, <code>true</code>.
:param pulumi.Input[str] branch: The target branch in the repository.
:param pulumi.Input[pulumi.InputType['StaticSiteBuildPropertiesArgs']] build_properties: Build properties to configure on the repository.
:param pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']] identity: Managed service identity.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] location: Resource Location.
:param pulumi.Input[str] name: Name of the static site to create or update.
:param pulumi.Input[str] repository_token: A user's github repository token. This is used to setup the Github Actions workflow file and API secrets.
:param pulumi.Input[str] repository_url: URL for the repository of the static site.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input[pulumi.InputType['SkuDescriptionArgs']] sku: Description of a SKU for a scalable resource.
:param pulumi.Input['StagingEnvironmentPolicy'] staging_environment_policy: State indicating whether staging environments are allowed or not allowed for a static web app.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[pulumi.InputType['StaticSiteTemplateOptionsArgs']] template_properties: Template options for generating a new repository.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: StaticSiteArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Static Site ARM resource.
:param str resource_name: The name of the resource.
:param StaticSiteArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(StaticSiteArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allow_config_file_updates: Optional[pulumi.Input[bool]] = None,
branch: Optional[pulumi.Input[str]] = None,
build_properties: Optional[pulumi.Input[pulumi.InputType['StaticSiteBuildPropertiesArgs']]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']]] = None,
kind: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
repository_token: Optional[pulumi.Input[str]] = None,
repository_url: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuDescriptionArgs']]] = None,
staging_environment_policy: Optional[pulumi.Input['StagingEnvironmentPolicy']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
template_properties: Optional[pulumi.Input[pulumi.InputType['StaticSiteTemplateOptionsArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = StaticSiteArgs.__new__(StaticSiteArgs)
__props__.__dict__["allow_config_file_updates"] = allow_config_file_updates
__props__.__dict__["branch"] = branch
__props__.__dict__["build_properties"] = build_properties
__props__.__dict__["identity"] = identity
__props__.__dict__["kind"] = kind
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["repository_token"] = repository_token
__props__.__dict__["repository_url"] = repository_url
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["sku"] = sku
__props__.__dict__["staging_environment_policy"] = staging_environment_policy
__props__.__dict__["tags"] = tags
__props__.__dict__["template_properties"] = template_properties
__props__.__dict__["content_distribution_endpoint"] = None
__props__.__dict__["custom_domains"] = None
__props__.__dict__["default_hostname"] = None
__props__.__dict__["key_vault_reference_identity"] = None
__props__.__dict__["private_endpoint_connections"] = None
__props__.__dict__["provider"] = None
__props__.__dict__["type"] = None
__props__.__dict__["user_provided_function_apps"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:web/v20210201:StaticSite"), pulumi.Alias(type_="azure-native:web:StaticSite"), pulumi.Alias(type_="azure-nextgen:web:StaticSite"), pulumi.Alias(type_="azure-native:web/v20190801:StaticSite"), pulumi.Alias(type_="azure-nextgen:web/v20190801:StaticSite"), pulumi.Alias(type_="azure-native:web/v20200601:StaticSite"), pulumi.Alias(type_="azure-nextgen:web/v20200601:StaticSite"), pulumi.Alias(type_="azure-native:web/v20200901:StaticSite"), pulumi.Alias(type_="azure-nextgen:web/v20200901:StaticSite"), pulumi.Alias(type_="azure-native:web/v20201001:StaticSite"), pulumi.Alias(type_="azure-nextgen:web/v20201001:StaticSite"), pulumi.Alias(type_="azure-native:web/v20201201:StaticSite"), pulumi.Alias(type_="azure-nextgen:web/v20201201:StaticSite"), pulumi.Alias(type_="azure-native:web/v20210101:StaticSite"), pulumi.Alias(type_="azure-nextgen:web/v20210101:StaticSite"), pulumi.Alias(type_="azure-native:web/v20210115:StaticSite"), pulumi.Alias(type_="azure-nextgen:web/v20210115:StaticSite")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(StaticSite, __self__).__init__(
'azure-native:web/v20210201:StaticSite',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'StaticSite':
"""
Get an existing StaticSite resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = StaticSiteArgs.__new__(StaticSiteArgs)
__props__.__dict__["allow_config_file_updates"] = None
__props__.__dict__["branch"] = None
__props__.__dict__["build_properties"] = None
__props__.__dict__["content_distribution_endpoint"] = None
__props__.__dict__["custom_domains"] = None
__props__.__dict__["default_hostname"] = None
__props__.__dict__["identity"] = None
__props__.__dict__["key_vault_reference_identity"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["private_endpoint_connections"] = None
__props__.__dict__["provider"] = None
__props__.__dict__["repository_token"] = None
__props__.__dict__["repository_url"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["staging_environment_policy"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["template_properties"] = None
__props__.__dict__["type"] = None
__props__.__dict__["user_provided_function_apps"] = None
return StaticSite(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allowConfigFileUpdates")
def allow_config_file_updates(self) -> pulumi.Output[Optional[bool]]:
"""
<code>false</code> if config file is locked for this static web app; otherwise, <code>true</code>.
"""
return pulumi.get(self, "allow_config_file_updates")
@property
@pulumi.getter
def branch(self) -> pulumi.Output[Optional[str]]:
"""
The target branch in the repository.
"""
return pulumi.get(self, "branch")
@property
@pulumi.getter(name="buildProperties")
def build_properties(self) -> pulumi.Output[Optional['outputs.StaticSiteBuildPropertiesResponse']]:
"""
Build properties to configure on the repository.
"""
return pulumi.get(self, "build_properties")
@property
@pulumi.getter(name="contentDistributionEndpoint")
def content_distribution_endpoint(self) -> pulumi.Output[str]:
"""
The content distribution endpoint for the static site.
"""
return pulumi.get(self, "content_distribution_endpoint")
@property
@pulumi.getter(name="customDomains")
def custom_domains(self) -> pulumi.Output[Sequence[str]]:
"""
The custom domains associated with this static site.
"""
return pulumi.get(self, "custom_domains")
@property
@pulumi.getter(name="defaultHostname")
def default_hostname(self) -> pulumi.Output[str]:
"""
The default autogenerated hostname for the static site.
"""
return pulumi.get(self, "default_hostname")
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.ManagedServiceIdentityResponse']]:
"""
Managed service identity.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter(name="keyVaultReferenceIdentity")
def key_vault_reference_identity(self) -> pulumi.Output[str]:
"""
Identity to use for Key Vault Reference authentication.
"""
return pulumi.get(self, "key_vault_reference_identity")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource Location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpointConnections")
def private_endpoint_connections(self) -> pulumi.Output[Sequence['outputs.ResponseMessageEnvelopeRemotePrivateEndpointConnectionResponse']]:
"""
Private endpoint connections
"""
return pulumi.get(self, "private_endpoint_connections")
@property
@pulumi.getter
def provider(self) -> pulumi.Output[str]:
"""
The provider that submitted the last deployment to the primary environment of the static site.
"""
return pulumi.get(self, "provider")
@property
@pulumi.getter(name="repositoryToken")
def repository_token(self) -> pulumi.Output[Optional[str]]:
"""
A user's github repository token. This is used to setup the Github Actions workflow file and API secrets.
"""
return pulumi.get(self, "repository_token")
@property
@pulumi.getter(name="repositoryUrl")
def repository_url(self) -> pulumi.Output[Optional[str]]:
"""
URL for the repository of the static site.
"""
return pulumi.get(self, "repository_url")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.SkuDescriptionResponse']]:
"""
Description of a SKU for a scalable resource.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="stagingEnvironmentPolicy")
def staging_environment_policy(self) -> pulumi.Output[Optional[str]]:
"""
State indicating whether staging environments are allowed or not allowed for a static web app.
"""
return pulumi.get(self, "staging_environment_policy")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="templateProperties")
def template_properties(self) -> pulumi.Output[Optional['outputs.StaticSiteTemplateOptionsResponse']]:
"""
Template options for generating a new repository.
"""
return pulumi.get(self, "template_properties")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userProvidedFunctionApps")
def user_provided_function_apps(self) -> pulumi.Output[Sequence['outputs.StaticSiteUserProvidedFunctionAppResponse']]:
"""
User provided function apps registered with the static site
"""
return pulumi.get(self, "user_provided_function_apps")
|
python
|
__version__ = "v0.1.0"
|
python
|
# flake8: noqa
from .load_catalog import load_catalog
|
python
|
import ctypes
class MemoryBuffer:
def __init__(self, ptr):
self.start_addr = ptr
self.position = ptr
def tell(self):
return self.position
def read(self, count):
if count == 0:
return b''
if count < 0:
raise Exception('Cant read negative numbers')
data = ctypes.string_at(self.position, count)
self.position += len(data)
return data
def seek(self, count, whence = 0):
if whence == 0:
if count < self.start_addr:
self.position = self.start_addr + count
else:
self.position = count
elif whence == 1:
self.position += count
else:
raise Exception('Unsupported whence value: %s' % whence)
|
python
|
# coding: utf-8
from graph import UndirectedGraph, Vertex
from reporter import Reporter
from typing import Callable, List, Set
import random
PivotChoice = Callable[[UndirectedGraph, Set[Vertex]], Vertex]
def visit(graph: UndirectedGraph, reporter: Reporter, pivot_choice_X: bool,
candidates: Set[Vertex], excluded: Set[Vertex],
clique: List[Vertex]) -> None:
assert all(graph.degree(v) > 0 for v in candidates)
assert all(graph.degree(v) > 0 for v in excluded)
assert candidates.isdisjoint(excluded)
assert len(candidates) >= 1
if len(candidates) == 1:
# Same logic as below, stripped down for this common case
for v in candidates:
neighbours = graph.adjacencies[v]
assert neighbours
if excluded.isdisjoint(neighbours):
reporter.record(clique + [v])
return
# Quickly handle locally unconnected candidates while finding pivot
remaining_candidates = []
seen_local_degree = 0
for v in candidates:
neighbours = graph.adjacencies[v]
local_degree = len(candidates.intersection(neighbours))
if local_degree == 0:
# Same logic as below, stripped down
if neighbours.isdisjoint(excluded):
reporter.record(clique + [v])
else:
if seen_local_degree < local_degree:
seen_local_degree = local_degree
pivot = v
remaining_candidates.append(v)
if seen_local_degree == 0:
return
if pivot_choice_X:
for v in excluded:
neighbours = graph.adjacencies[v]
local_degree = len(candidates.intersection(neighbours))
if seen_local_degree < local_degree:
seen_local_degree = local_degree
pivot = v
for v in remaining_candidates:
neighbours = graph.adjacencies[v]
assert neighbours
if pivot not in neighbours:
candidates.remove(v)
if neighbouring_candidates := candidates.intersection(neighbours):
neighbouring_excluded = excluded.intersection(neighbours)
visit(graph=graph,
reporter=reporter,
pivot_choice_X=pivot_choice_X,
candidates=neighbouring_candidates,
excluded=neighbouring_excluded,
clique=clique + [v])
elif excluded.isdisjoint(neighbours):
reporter.record(clique + [v])
excluded.add(v)
|
python
|
class Solution(object):
def reorderLogFiles(self, logs):
"""
:type logs: List[str]
:rtype: List[str]
"""
lls = {}
dls = []
for log in logs:
_, v = log.split(" ", 1)
if v.split(" ", 1)[0].isalpha():
lls[log] = v
else:
dls.append(log)
sort_lls = [k for k, _ in sorted(
lls.items(), key=lambda x: x[1] + x[0])]
return sort_lls + dls
def test_reorder_log_files():
s = Solution()
assert ["g1 act car", "a8 act zoo", "ab1 off key dog",
"a1 9 2 3 1", "zo4 4 7"] == s.reorderLogFiles(
["a1 9 2 3 1", "g1 act car", "zo4 4 7", "ab1 off key dog",
"a8 act zoo"])
assert ["7e apw c y", "m azv x f", "8 hyyq z p",
"8 ksif m u", "c otdk cl", "2 y xyr fc", "27 85717 7",
"52 314 99", "d 046099 0", "6 3272401"] == s.reorderLogFiles(
["27 85717 7", "2 y xyr fc", "52 314 99", "d 046099 0",
"m azv x f", "7e apw c y", "8 hyyq z p",
"6 3272401", "c otdk cl", "8 ksif m u"])
assert ["a2 act car", "g1 act car", "a8 act zoo", "ab1 off key dog",
"a1 9 2 3 1", "zo4 4 7"] == s.reorderLogFiles(
["a1 9 2 3 1", "g1 act car", "zo4 4 7",
"ab1 off key dog", "a8 act zoo", "a2 act car"])
|
python
|
from flask import render_template,request,redirect,url_for, abort
from . import main
from flask_login import login_required,current_user
from .forms import PitchesForm,CommentsForm,UpdateProfile
from ..models import Pitches,Comments,User
from .. import photos, db
from datetime import datetime
# from ..requests import get_sources_by_cat,get_all_articles,get_headline_articles
# from ..models import Source,Article
# @main.template_filter('datetimeformat')
# def datetimeformat(value,format='%B'):
# return value.strftime(format)
@main.route('/')
def home():
'''
View root page function that returns the general news sources by category
'''
# message = "Hello World"
title="Pitches"
interview = Pitches.query.filter_by(category='Interview-Pitch').all()
product = Pitches.query.filter_by(category='Product-Pitch').all()
promotion = Pitches.query.filter_by(category='Promotion-Pitch').all()
business = Pitches.query.filter_by(category='Business-Pitch').all()
# pitches = Pitches.get_all_pitches()
# promotion = Pitches.query.filter_by(category='Promotion-Pitch').all()
# pitches = Pitches.query.all()
# pitches = Pitches.query.order_by('-id').all()
message= 'Welcome to the Pitches'
# return "Hello, World"
return render_template('home.html',title=title,message=message,interview=interview,product=product,promotion=promotion,business=business)
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user)
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
@main.route('/pitch/new',methods = ['GET','POST'])
@login_required
def new_pitch():
'''
View pitch function that returns the pitch page and data
'''
form = PitchesForm()
if form.validate_on_submit() and form.category.data != 'Select':
body = form.body.data
category = form.category.data
new_pitch = Pitches(body=body,category=category,user_id=current_user.id)
new_pitch.save_pitch()
return redirect(url_for('main.home'))
return render_template('new_pitch.html', pitch_form = form)
@main.route('/pitch/<int:id>',methods = ['GET', 'POST'])
@login_required
def comment(id):
comments_form = CommentsForm()
# pitch = Pitches.query.get(pitch_id)
pitches = Pitches.query.filter_by(id=id).first()
print(pitches)
if comments_form.validate_on_submit():
comment = comments_form.comment.data
new_comment = Comments(the_comment=comment,pitches_id=pitches.id, user_id = current_user.id)
db.session.add(new_comment)
db.session.commit()
comments_list = Comments.query.filter_by(pitches_id=id).order_by("-id")
print(comments_list)
return render_template('comments.html', comments_form=comments_form,comments_list=comments_list)
# @main.route('/pitch/<int:id>/comments',methods = ['GET', 'POST'])
# @login_required
# def com_list(id):
#
# # comments_form = CommentsForm()
# # pitch = Pitches.query.get(pitch_id)
# pitches = Pitches.query.filter_by(id=id).first()
#
# # if comments_form.validate_on_submit():
# # comment = comments_form.comment.data
# #
# # new_comment = Comments(the_comment=comment,pitches_id=pitches.id, user_id = current_user.id)
# # new_comment.save_comment()
#
# # return redirect(url_for('main.home'))
# comments_list = Comments.query.filter_by(pitches_id=pitches.id).all()
#
# return render_template('com_list.html',comments_list=comments_list)
# @main.route('/comment/new',methods = ['GET', 'POST'])
# @login_required
# def comment():
#
# comments_form = CommentsForm()
# # pitch = Pitches.query.get(pitch_id)
# # pitches = Pitches.query.filter_by(id=id).first()
#
# if comments_form.validate_on_submit():
# comment = comments_form.comment.data
#
# new_comment = Comments(the_comment=comment, user_id = current_user.id)
# new_comment.save_comment()
#
# return redirect(url_for('main.home'))
# # comments_list = Comments.query.filter_by(pitches_id=pitches.id).all()
#
# return render_template('comments.html', comments_form=comments_form)
|
python
|
from hashlib import sha256
import json
from typing import Protocol
from flask import *
from flask_cors import CORS
from Crypto.Cipher import AES
from script import decrypt, encrypt
from get_data import get_all_data
from flask_talisman import Talisman
from addData import readJson,writeJson
import json
from ml import predML
import numpy as np
from datetime import datetime
# creating a flask app
app = Flask(__name__)
CORS(app, resources={r"/*": {"origins": "*"}})
Talisman(app,content_security_policy=None)
@app.route("/authenticate", methods=["GET", "POST"])
def authenticate():
return {"status": "ok"}
@app.route("/getData", methods=["GET", "POST"])
def get_data():
# try:
if True:
param = request.get_json()
print(param)
# data = get_all_data(param['username']) # Returns dictionary {'message':...,'hash':...}
op_ = get_all_data()[-1]["con"]
dec_key = param["key"]
print(op_)
op = ""
for i in range(len(op_)):
if op_[i] == "'":
op += '"'
else:
op += op_[i]
data = json.loads(op) # Returns dictionary {'message':...,'hash':...}
print('data',data,type(data),'key',dec_key)
dec_msg = decrypt(data['message'],dec_key)
if True or sha256(encrypt(dec_msg,dec_key)).hexdigest() == data['hash']:
update_data(json.loads(dec_msg))
response = {'status':'ok','message':dec_msg,'covidstat':getMLPrediction(dec_msg)}
print('RESPONSE : ',response)
return response
else:
return {'status':'error','log':'checksum failed'}
# except:
# return {'status':'error','log':'server error or invalid key'}
@app.route("/knock")
def knock():
return """
<h1>Hey, I am a messenger from the server</h1>
<p>Don't worry, I am up and running</p>
"""
@app.route("/updateInfo",methods=["GET","POST"])
def updateInfo():
param = request.get_json()
print(param)
di = {}
with open('./databases/user_info.json','r') as f:
di = json.load(f)
for i in param.keys():
di[i] = param[i]
with open('./databases/user_info.json','w') as f:
json.dump(di,f)
return {"status":"ok"}
def update_data(record):
jsD = readJson()
if 'data' not in jsD.keys():
jsD['data'] = []
jsD['data'].append(record)
writeJson(jsD)
def getMLPrediction(info):
di = {}
info = json.loads(info)
with open('./databases/user_info.json','r') as f:
di = json.load(f)
dataPoint = []
dataPoint.append(int(di['gender']))
dataPoint.append(0) # race_White
dataPoint.append(0) # race_AA
dataPoint.append(0) # race_Other
dataPoint.append(0) # ethnicity_Hispanic
dataPoint.append(int(di['age']))
dataPoint.append(4) # patientClass
dataPoint.append(0) # encountered type
dataPoint.append(999) # reason for visit
dataPoint.append(int(info["bpsys"]))
dataPoint.append(int(info["bpdia"]))
dataPoint.append(float(info["temp"]))
dataPoint.append(int(info["hrate"]))
dataPoint.append(22) # RR
dataPoint.append(float(info["spo2"]))
dataPoint.append(float(di["BMI"]))
dataPoint.append(1.85) # BSA
dataPoint.append(datetime.now().month-1)
print(np.array(dataPoint))
try:
return predML(dataPoint)
except:
return 0
if __name__ == "__main__":
# run the app
app.run(debug=True, port=5000)
|
python
|
import pygame
import utils
import constants
import os, sys
# -----------------------------------------------------------
# class Environment
# -----------------------------------------------------------
class Environment:
def __init__(self, zone_name, map_name):
if zone_name is None or map_name is None:
raise ValueError("Error!")
if len(zone_name) == 0 or len(map_name) == 0:
raise ValueError("Error!")
self.zone_name = zone_name
self.map_name = map_name
# ----
self.init_pygame()
# ----
self.zone_description = ""
self.obstacles = Obstacles(self.zone_name, self.map_name)
self.walkables = Walkables(self.zone_name, self.map_name)
# self.persistents = PersistentObjects(self.zone_name, self.map_name)
# ----
self.all_sprites = pygame.sprite.Group()
self.keep_looping = True
def read_data(self):
self.walkables.read_data()
self.obstacles.read_data()
def init_pygame(self):
pygame.init()
self.BG_COLOR = constants.BG_COLOR
self.clock = pygame.time.Clock()
pygame.display.set_caption("Enter {}".format(constants.TITLE))
self.screen = pygame.display.set_mode((constants.SCREEN_WIDTH, constants.SCREEN_HEIGHT))
self.font = pygame.font.Font(None, 40)
# self.font = pygame.font.SysFont(constants.FONT_NAME, constants.FONT_SIZE)
def get_walkable_tile(self, x, y):
# for a_tile in self.obstacles:
# if a_tile.x == x:
# if a_tile.y == y:
# return a_tile
for a_tile in self.walkables:
if a_tile.x == x:
if a_tile.y == y:
return a_tile
return None
def handle_events(self):
# catch all events here
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.keep_looping = False
return True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.keep_looping = False
return True
# else:
# print("I don't recognize this event.key in handle_events: {}".format(event.key))
def update_classes(self, all_sprites):
all_sprites = self.walkables.update_classes(all_sprites)
# all_sprites = self.persistents.update_classes(all_sprites)
all_sprites = self.obstacles.update_classes(all_sprites)
return all_sprites
def draw(self):
self.screen.fill(self.BG_COLOR)
self.all_sprites = self.update_classes(self.all_sprites)
# ----
self.all_sprites.update()
self.all_sprites.draw(self.screen)
# ----
pygame.display.flip()
def main(self):
self.clock.tick(constants.FRAME_RATE)
while self.keep_looping == True:
self.handle_events()
self.draw()
self.goodbye()
self.myquit()
def goodbye(self):
print("Goodbye!")
def myquit(self):
pygame.quit()
def debug_print(self):
s = "zone_name: {}\nzone_description: {}"
s = s.format(self.zone_name, self.zone_description)
print(s)
self.obstacles.debug_print()
self.walkables.debug_print()
# -----------------------------------------------------------
# class Walkable
# -----------------------------------------------------------
"""
As you can see, class Walkable uses inheritance. We do this so that
we can add this class--which is now a subclass of the pygame.sprite.Sprite
class and so, now, is itself a Sprite--to a pygame.sprite.Group.
If none of that makes any sense to you, don't worry!
I would recommend that you start using inheritance and,
as you see how it works, you will come
to understand it. And, please, ask questions! Ask me, ask on
Stack Overflow (https://stackoverflow.com/) or even Twitter.
"""
class Walkable(pygame.sprite.Sprite):
def __init__(self, mydict):
super().__init__()
self.x = mydict["x"]
self.y = mydict["y"]
self.kind = mydict["kind"]
self.image_filename = mydict["image"]
self.image_path = utils.get_filepath(self.image_filename)
self.image = None
self.rect = None
self.comment = ""
# ----
try:
self.image = pygame.image.load(self.image_path).convert_alpha()
except:
s = "Couldn't open: {}".format(self.image_filename)
raise ValueError(s)
self.image = pygame.transform.scale(self.image, (constants.TILESIZE, constants.TILESIZE))
self.rect = self.image.get_rect()
self.rect = self.rect.move(self.x * constants.TILESIZE, self.y * constants.TILESIZE)
def _collide(self, dx=0, dy=0, obstacles=None):
for a_tile in obstacles:
if a_tile.x == self.x + dx and a_tile.y == self.y + dy:
return True
return False
def move(self, dx=0, dy=0, obstacles=None):
if not self._collide(dx, dy, obstacles):
self.x += dx
self.y += dy
# self.rect = self.rect.move(self.x * TILESIZE, self.y * TILESIZE)
self.rect = self.rect.move(dx * constants.TILESIZE, dy * constants.TILESIZE)
# print("Player has moved. x,y: {},{}; dx={}, dy={}".format(self.x, self.y, dx, dy))
def debug_print(self):
s = "x,y: ({},{}); kind: {}, image: {}, comment: {}"
s = s.format(self.x, self.y, self.kind, self.image_filename, self.comment)
print(s)
# -----------------------------------------------------------
# class Walkables
# -----------------------------------------------------------
class Walkables:
def __init__(self, zone_name, map_name):
if zone_name == None:
raise ValueError("Error!")
if len(zone_name) == 0:
raise ValueError("Error!")
if map_name == None:
raise ValueError("Error!")
if len(map_name) == 0:
raise ValueError("Error!")
self.zone_name = zone_name
self.map_name = map_name
# ----
self.all_sprites = pygame.sprite.Group()
self.init_pygame()
self.loop_index = 0
# self.walkables = self.read_data()
self.walkables = []
self.keep_looping = True
# if self.walkables is None:
# raise ValueError("Doh!")
def read_data(self):
self._load_map()
def _load_map(self):
filename = "{}_walkables.txt".format(self.map_name)
filepath = os.path.join("data", "zones", self.zone_name, self.map_name, filename)
with open(filepath, "r") as f:
mytiles = f.readlines()
mytiles = [i.strip() for i in mytiles if len(i.strip()) > 0]
mytiles = [i[3:] for i in mytiles[2:]]
# ------------------------------------------------------------------
filepath = os.path.join("data", "master_files", "tiles.txt")
file_tiles = utils.read_data_file(filepath, num_of_fields=4)
# ------------------------------------------------------------------
self.walkables = []
for col, tiles in enumerate(mytiles):
tile_list = tiles.split(";")
tile_list = [i.strip() for i in tile_list if len(i.strip()) > 0]
for row, tile in enumerate(tile_list):
# print("tile: {}".format(tile))
if not tile == "..":
mydict = utils.get_dictionary(file_tiles, tile)
if mydict is None:
s = "tile: {}\n".format(tile)
raise ValueError(s)
mydict["x"] = row
mydict["y"] = col
mywalk = Walkable(mydict)
self.walkables.append(mywalk)
if tile == "..":
pass
def init_pygame(self):
pygame.init()
self.BG_COLOR = constants.BG_COLOR
self.clock = pygame.time.Clock()
pygame.display.set_caption("Enter {}".format(constants.TITLE))
self.screen = pygame.display.set_mode((constants.SCREEN_WIDTH, constants.SCREEN_HEIGHT))
self.font = pygame.font.Font(None, 40)
# self.font = pygame.font.SysFont(constants.FONT_NAME, constants.FONT_SIZE)
def handle_events(self):
# catch all events here
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.keep_looping = False
return True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.keep_looping = False
return True
else:
print("I don't recognize this event.key in handle_events: {}".format(event.key))
def update_classes(self, all_sprites):
if len(self.walkables) == 0: raise ValueError("Error")
for elem in self.walkables:
all_sprites.add(elem)
return all_sprites
def draw(self):
self.screen.fill(self.BG_COLOR)
self.update_classes(self.all_sprites)
# ----
self.all_sprites.update()
self.all_sprites.draw(self.screen)
# ----
pygame.display.flip()
def main(self):
while self.keep_looping == True:
self.handle_events()
self.draw()
def __len__(self):
return len(self.walkables)
def __getitem__(self, item):
return self.walkables[item]
def __next__(self):
if self.loop_index >= len(self.walkables):
self.loop_index = 0
raise StopIteration
else:
this_value = self.walkables[self.loop_index]
self.loop_index += 1
return this_value
def __iter__(self):
return self
def debug_print(self):
print("Number of grasses: {}".format(len(self.walkables)))
if len(self.walkables) == 0:
s = "Error! There are no grasses to print."
raise ValueError(s)
for grass in self.walkables:
grass.debug_print()
# -----------------------------------------------------------
# class Obstacle
# -----------------------------------------------------------
class Obstacle(pygame.sprite.Sprite):
def __init__(self, mydict):
super().__init__()
self.x = mydict["x"]
self.y = mydict["y"]
self.kind = mydict["kind"]
# self.trigger = mydict["trigger"]
# self.image_filename = mydict["image_filename"]
self.image_filename = mydict["image"]
self.image_path = utils.get_filepath(self.image_filename)
self.image = None
self.rect = None
# ----
try:
self.image = pygame.image.load(self.image_path).convert_alpha()
except Exception as e:
print(e)
s = "Couldn't open: {}".format(self.image_path)
raise ValueError(s)
self.image = pygame.transform.scale(self.image, (constants.TILESIZE, constants.TILESIZE))
self.rect = self.image.get_rect()
self.rect = self.rect.move(self.x * constants.TILESIZE, self.y * constants.TILESIZE)
def read_data(self, zone_name, map_name):
filepath = os.path.join("data", "zones", zone_name, map_name, "actions.txt")
mylines = utils.read_data_file(filepath, 8)
if mylines is None or len(mylines) == 0:
raise ValueError("Error!")
# ----
target_dict = {}
for elem in mylines:
if elem["name"] == self.name:
target_dict = elem
if len(target_dict) == 0:
s = "The name {} was not found in {}".format(self.name, target_dict)
raise ValueError(s)
self.command = target_dict["command"]
if not self.command in constants.MAP_COMMANDS:
raise ValueError("Error! {} is not in {}".format(target_dict["command"], constants.MAP_COMMANDS))
self.image_display = target_dict["image_display"]
self.data = target_dict["data"]
self.inventory_condition = target_dict["inventory_condition"]
if self.inventory_condition == "none":
self.inventory_condition = None
# Need to be able to check that the player has successfully
# completed a conversation.
# Perhaps also check to see that the conversation is in the
# events file.
self.game_condition = target_dict["game_condition"].lower().strip()
if self.game_condition == "none":
self.game_condition = None
self.dialog_text = target_dict["dialog_text"]
self.comment = target_dict["comment"]
self.completed = False
# ----
self.image_display = self.image_display.replace(" ", "_")
if self.image_display.find(".png") == -1:
self.image_display = "{}.png".format(self.image_display)
filepath = utils.get_filepath(self.image_display)
if filepath is None:
s = "I wasn't able to find a path for the file: {}".format(self.image_display)
raise ValueError(s)
try:
self.image = pygame.image.load(filepath).convert_alpha()
except Exception as e:
print(e)
s = "Couldn't open: {}".format(filepath)
raise ValueError(s)
self.image = pygame.transform.scale(self.image, (constants.TILESIZE, constants.TILESIZE))
self.rect = self.image.get_rect()
self.rect = self.rect.move(self.x * constants.TILESIZE, self.y * constants.TILESIZE)
def debug_print(self):
s = "(x,y): {},{}; kind:{}, image_filename: {}, rect: {}"
s = s.format(self.x, self.y, self.kind, self.image_filename, self.rect)
print(s)
# -----------------------------------------------------------
# class Obstacles
# -----------------------------------------------------------
class Obstacles:
def __init__(self, zone_name, map_name):
self.zone_name = zone_name
self.map_name = map_name
self.init_pygame()
self.obstacles = []
self.loop_index = 0
self.keep_looping = True
self.all_sprites = pygame.sprite.Group()
def read_data(self):
self._load_map()
def _load_map(self):
# Load in the map
filename = "{}_obstacles.txt".format(self.map_name)
filepath = os.path.join("data", "zones", self.zone_name, self.map_name, filename)
print("Reading in obstacle map ...")
print("zone: {}, map: {}".format(self.zone_name, self.map_name))
print("filepath for obstacle file: {}".format(filepath))
with open(filepath, "r") as f:
mytiles = f.readlines()
mytiles = [i.strip() for i in mytiles if len(i.strip()) > 0]
mytiles = [i[3:] for i in mytiles[2:]]
# ------------------------------------------------------------------
filepath = os.path.join("data", "master_files", "tiles.txt")
file_tiles = utils.read_data_file(filepath, num_of_fields=4)
# ------------------------------------------------------------------
self.obstacles = []
for col, tiles in enumerate(mytiles):
list_tiles = tiles.split(";")
list_tiles = [i.strip() for i in list_tiles if len(i.strip()) > 0]
for row, tile in enumerate(list_tiles):
if not tile == "..":
tile_dict = utils.get_dictionary(file_tiles, tile)
if tile_dict is None:
raise ValueError("tile: {}".format(tile))
tile_dict["x"] = row
tile_dict["y"] = col
my_obstacle = Obstacle(tile_dict)
self.obstacles.append(my_obstacle)
elif tile == "..":
pass
else:
s = "Error! I don't recognize this: {}".format(tile)
raise ValueError(s)
def init_pygame(self):
pygame.init()
self.BG_COLOR = constants.BG_COLOR
self.clock = pygame.time.Clock()
pygame.display.set_caption("Enter {}".format(constants.TITLE))
self.screen = pygame.display.set_mode((constants.SCREEN_WIDTH, constants.SCREEN_HEIGHT))
self.font = pygame.font.Font(None, 40)
# self.font = pygame.font.SysFont(constants.FONT_NAME, constants.FONT_SIZE)
def collision(self, x, y):
if self.obstacles is None:
raise ValueError("Error")
if len(self.obstacles) == 1:
raise ValueError("Error")
for a_tile in self.obstacles:
if a_tile.kind == "empty":
continue
if a_tile.x == x:
# print("tile y: {}, player y: {}".format(a_tile.y, y))
if a_tile.y == y:
print("tile x,y: ({},{}), player x,y: ({},{})".format(a_tile.x, a_tile.y, x, y))
return True
return False
def collision_is_close(self, x, y):
if self.obstacles is None:
raise ValueError("Error")
if len(self.obstacles) == 1:
raise ValueError("Error")
# ----
for a_tile in self.obstacles:
if a_tile.kind == "empty":
continue
if utils.points_are_close(a_tile.x, x) == True:
# print("tile y: {}, player y: {}".format(a_tile.y, y))
if utils.points_are_close(a_tile.y, y) == True:
print("tile x,y: ({},{}), player x,y: ({},{})".format(a_tile.x, a_tile.y, x, y))
return True
return False
# --------------------------------------------------------
def handle_events(self):
# catch all events here
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.keep_looping = False
return True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.keep_looping = False
return True
else:
print("I don't recognize this event.key in handle_events: {}".format(event.key))
# ------------------------------------------------------
def update_classes(self, all_sprites):
for elem in self.obstacles:
all_sprites.add(elem)
return all_sprites
def draw(self):
self.screen.fill(self.BG_COLOR)
self.update_classes(self.all_sprites)
# ----
self.all_sprites.update()
self.all_sprites.draw(self.screen)
# ----
pygame.display.flip()
def main(self):
while self.keep_looping == True:
self.handle_events()
self.draw()
# --------------------------------------------------------
def __len__(self):
return len(self.obstacles)
def __getitem__(self, item):
return self.obstacles[item]
def __next__(self):
if self.loop_index >= len(self.obstacles):
self.loop_index = 0
raise StopIteration
else:
this_value = self.obstacles[self.loop_index]
self.loop_index += 1
return this_value
def __iter__(self):
return self
def debug_print(self):
for elem in self.obstacles:
elem.debug_print()
# **************************************************
zone_name = "testing"
map_name = "map00"
def debug_walkables():
mywalkables = Walkables(zone_name, map_name)
mywalkables.read_data()
# mywalkables.debug_print()
mywalkables.main()
def debug_obstacles():
myobstacles = Obstacles(zone_name, map_name)
myobstacles.read_data()
# myobstacles.debug_print()
myobstacles.main()
def debug_environment():
myenv = Environment(zone_name, map_name)
myenv.read_data()
myenv.main()
if __name__ == "__main__":
# debug_walkables()
# debug_obstacles()
debug_environment()
|
python
|
# Generated by Django 3.1.8 on 2021-06-24 16:23
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('inventory_api', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='products',
name='Assigned_Employee',
field=models.ManyToManyField(to='inventory_api.Employees'),
),
migrations.AlterField(
model_name='employees',
name='role',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.RESTRICT, to='inventory_api.role'),
),
migrations.AlterField(
model_name='employees',
name='user',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='Request',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField(default=1)),
('priority', models.IntegerField(default=5, validators=[django.core.validators.MaxValueValidator(5), django.core.validators.MinValueValidator(0)])),
('product', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='inventory_api.type')),
],
),
migrations.CreateModel(
name='AssignedProducts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('checked_out', models.BooleanField(default=True)),
('date_returned', models.DateField(blank=True, default=None, null=True)),
('employee', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='inventory_api.employees')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='inventory_api.products')),
],
),
]
|
python
|
import pathlib
import pyopenephys
import numpy as np
"""
The Open Ephys Record Node saves Neuropixels data in binary format according to the following the directory structure:
(https://open-ephys.github.io/gui-docs/User-Manual/Recording-data/Binary-format.html)
Record Node 102
-- experiment1 (equivalent to a Session)
-- recording1
-- recording2
-- continuous
-- Neuropix-PXI-100.0 (probe0 ap)
-- Neuropix-PXI-100.1 (probe0 lf)
-- Neuropix-PXI-100.2 (probe1 ap)
-- Neuropix-PXI-100.3 (probe1 lf)
...
-- events
-- spikes
-- structure.oebin
-- experiment 2
...
-- settings.xml
-- settings2.xml
...
"""
class OpenEphys:
def __init__(self, experiment_dir):
self.sess_dir = pathlib.Path(experiment_dir)
openephys_file = pyopenephys.File(self.sess_dir.parent) # this is on the Record Node level
# extract the "recordings" for this session
self.experiment = next(experiment for experiment in openephys_file.experiments
if pathlib.Path(experiment.absolute_foldername) == self.sess_dir)
self.recording_time = self.experiment.datetime
# extract probe data
self.probes = self.load_probe_data()
def load_probe_data(self):
"""
Loop through all Open Ephys "processors", identify the processor for
the Neuropixels probe(s), extract probe info
Loop through all recordings, associate recordings to
the matching probes, extract recording info
Yielding multiple "Probe" objects, each containing meta information
and timeseries data associated with each probe
"""
probes = {}
for processor in self.experiment.settings['SIGNALCHAIN']['PROCESSOR']:
if processor['@pluginName'] in ('Neuropix-PXI', 'Neuropix-3a'):
if (processor['@pluginName'] == 'Neuropix-3a'
or 'NP_PROBE' not in processor['EDITOR']):
probe = Probe(processor)
probes[probe.probe_SN] = probe
else:
for probe_index in range(len(processor['EDITOR']['NP_PROBE'])):
probe = Probe(processor, probe_index)
probes[probe.probe_SN] = probe
for probe_index, probe_SN in enumerate(probes):
probe = probes[probe_SN]
for rec in self.experiment.recordings:
for continuous_info, analog_signal in zip(rec._oebin['continuous'],
rec.analog_signals):
if continuous_info['source_processor_id'] != probe.processor_id:
continue
if continuous_info['source_processor_sub_idx'] == probe_index * 2: # ap data
assert continuous_info['sample_rate'] == analog_signal.sample_rate == 30000
continuous_type = 'ap'
probe.recording_info['recording_count'] += 1
probe.recording_info['recording_datetimes'].append(
rec.datetime)
probe.recording_info['recording_durations'].append(
float(rec.duration))
probe.recording_info['recording_files'].append(
rec.absolute_foldername / 'continuous' / continuous_info['folder_name'])
elif continuous_info['source_processor_sub_idx'] == probe_index * 2 + 1: # lfp data
assert continuous_info['sample_rate'] == analog_signal.sample_rate == 2500
continuous_type = 'lfp'
meta = getattr(probe, continuous_type + '_meta')
if not meta:
meta.update(**continuous_info,
channels_ids=analog_signal.channel_ids,
channels_names=analog_signal.channel_names,
channels_gains=analog_signal.gains)
signal = getattr(probe, continuous_type + '_analog_signals')
signal.append(analog_signal)
return probes
class Probe:
def __init__(self, processor, probe_index=0):
self.processor_id = int(processor['@NodeId'])
if processor['@pluginName'] == 'Neuropix-3a' or 'NP_PROBE' not in processor['EDITOR']:
self.probe_info = processor['EDITOR']['PROBE']
self.probe_SN = self.probe_info['@probe_serial_number']
self.probe_model = {
"Neuropix-PXI": "neuropixels 1.0 - 3B",
"Neuropix-3a": "neuropixels 1.0 - 3A"}[processor['@pluginName']]
else:
self.probe_info = processor['EDITOR']['NP_PROBE'][probe_index]
self.probe_SN = self.probe_info['@probe_serial_number']
self.probe_model = self.probe_info['@probe_name']
self.ap_meta = {}
self.lfp_meta = {}
self.ap_analog_signals = []
self.lfp_analog_signals = []
self.recording_info = {'recording_count': 0,
'recording_datetimes': [],
'recording_durations': [],
'recording_files': []}
self._ap_timeseries = None
self._ap_timestamps = None
self._lfp_timeseries = None
self._lfp_timestamps = None
@property
def ap_timeseries(self):
"""
AP data concatenated across recordings. Shape: (sample x channel)
Data are stored as int16 - to convert to microvolts,
multiply with self.ap_meta['channels_gains']
"""
if self._ap_timeseries is None:
self._ap_timeseries = np.hstack([s.signal for s in self.ap_analog_signals]).T
return self._ap_timeseries
@property
def ap_timestamps(self):
if self._ap_timestamps is None:
self._ap_timestamps = np.hstack([s.times for s in self.ap_analog_signals])
return self._ap_timestamps
@property
def lfp_timeseries(self):
"""
LFP data concatenated across recordings. Shape: (sample x channel)
Data are stored as int16 - to convert to microvolts,
multiply with self.lfp_meta['channels_gains']
"""
if self._lfp_timeseries is None:
self._lfp_timeseries = np.hstack([s.signal for s in self.lfp_analog_signals]).T
return self._lfp_timeseries
@property
def lfp_timestamps(self):
if self._lfp_timestamps is None:
self._lfp_timestamps = np.hstack([s.times for s in self.lfp_analog_signals])
return self._lfp_timestamps
def extract_spike_waveforms(self, spikes, channel_ind, n_wf=500, wf_win=(-32, 32)):
"""
:param spikes: spike times (in second) to extract waveforms
:param channel_ind: channel indices (of meta['channels_ids']) to extract waveforms
:param n_wf: number of spikes per unit to extract the waveforms
:param wf_win: number of sample pre and post a spike
:return: waveforms (sample x channel x spike)
"""
channel_bit_volts = np.array(self.ap_meta['channels_gains'])[channel_ind]
# ignore spikes at the beginning or end of raw data
spikes = spikes[np.logical_and(spikes > (-wf_win[0] / self.ap_meta['sample_rate']),
spikes < (self.ap_timestamps.max() - wf_win[-1]
/ self.ap_meta['sample_rate']))]
# select a randomized set of "n_wf" spikes
np.random.shuffle(spikes)
spikes = spikes[:n_wf]
# extract waveforms
if len(spikes) > 0:
spike_indices = np.searchsorted(self.ap_timestamps, spikes, side="left")
# waveform at each spike: (sample x channel x spike)
spike_wfs = np.dstack([
self.ap_timeseries[int(spk + wf_win[0]):int(spk + wf_win[-1]), channel_ind]
* channel_bit_volts
for spk in spike_indices])
return spike_wfs
else: # if no spike found, return NaN of size (sample x channel x 1)
return np.full((len(range(*wf_win)), len(channel_ind), 1), np.nan)
|
python
|
import yaml
import xdg
from dataclasses import dataclass, field
from typing import Dict
from pathlib import Path
@dataclass
class Configuration:
path: str = field(default=xdg.XDG_DATA_HOME / "quest/tasks.yaml")
trello: Dict = field(default_factory=dict)
taskfile: Path = field(default=xdg.XDG_DATA_HOME / "quest/taskfile")
def __post_init__(self):
self.path = Path(self.path).expanduser()
if self.taskfile:
self.taskfile = Path(self.taskfile).expanduser()
def load_user_configuration() -> Configuration:
config_path = xdg.XDG_CONFIG_HOME / "quest.yaml"
if config_path.exists():
with open(config_path, "r") as f:
config = yaml.load(f, Loader=yaml.Loader)
return Configuration(**config)
else:
return Configuration()
|
python
|
import os
def modifyFile(_f):
_new_f = _f.replace(' - 副本.js', '.ts')
os.rename(_f, _new_f)
def getFile(_d):
dirs = os.listdir(_d)
for k in dirs:
fpth = _d + "/" + k
f = os.path.isfile(fpth)
if f == False:
getFile(fpth)
else:
i = k.rfind(".")
s = k[i:len(k)]
if( s == ".js"):
modifyFile(fpth)
getFile(".")
|
python
|
from time import sleep
import unittest,random,sys
sys.path.append('./models')
sys.path.append('./page_obj')
from models import function,myunit
from page_obj.loginPage import Login
class LoginTest(myunit.MyTest):
'账户登录测试'
def user_login_verify(self,username='',password=''):
Login(self.driver).user_login(username,password)
# def test_login1(self):
# '错误账号'
# self.user_login_verify(username='123',password='')
# po=Login(self.driver)
#
# # self.assertEqual(po.username_error_msg,'请输入正确的帐号!')
# sleep(1)
# print(po.username_error_msg)
# file_cur=function.insert_img(self.driver,'账号错误.png')
# print(file_cur)
# def test_login2(self):
# '正确账号,错误密码'
# self.user_login_verify(username='137160564',password='123')
# po=Login(self.driver)
# sleep(1)
# function.insert_img(self.driver,'正确账号,错误密码.png')
#
# def test_login3(self):
# '正确账号,密码输入为空'
# self.user_login_verify(username='644326394',password='')
# po = Login(self.driver)
# sleep(1)
# function.insert_img(self.driver, '正确账号,密码输入为空.png')
#
def test_login4(self):
'正确整好,正确密码'
self.user_login_verify(username='137160564', password='chaoheweijing')
# self.user_login_verify(username='137160564', password='fengmang3729')
po = Login(self.driver)
sleep(1)
# print(po.success_msg)
po.success_msg('137160564')
# self.assertEqual(po.success_msg)
function.insert_img(self.driver, '正确登录.png')
if __name__ == '__main__':
unittest.main()
|
python
|
#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
from bes.testing.unit_test import unit_test
from bes.version.software_version import software_version as VC
class test_software_version(unit_test):
def test_compare(self):
self.assertEqual( -1, VC.compare('1.2.3', '1.2.4') )
self.assertEqual( 0, VC.compare('1.2.3', '1.2.3') )
self.assertEqual( 1, VC.compare('1.2.4', '1.2.3') )
self.assertEqual( -1, VC.compare('1.2.8', '1.2.9') )
self.assertEqual( -1, VC.compare('1.2.10', '1.2.11') )
self.assertEqual( -1, VC.compare('1.2.9', '1.2.10') )
self.assertEqual( -1, VC.compare('3.0.4', '3.3') )
self.assertEqual( 1, VC.compare('1:1.2.3', '1.2.4') )
self.assertEqual( -1, VC.compare('0:1.2.3', '1.2.4') )
self.assertEqual( -1, VC.compare('0:1.2.3', '0:1.2.3-1') )
self.assertEqual( 1, VC.compare('0:1.2.3-3', '0:1.2.3-2') )
self.assertEqual( 1, VC.compare('1.2.3', '1.2-3') )
self.assertEqual( -1, VC.compare('1.2-3', '1.2.3') )
def test_sort_versions(self):
self.assertEqual( [
'1.0.1',
'1.0.2',
'1.0.9',
'1.0.10',
'1.0.100',
], VC.sort_versions( [
'1.0.1',
'1.0.10',
'1.0.2',
'1.0.100',
'1.0.9',
] ) )
def test_sort_versions_reversed(self):
self.assertEqual( [
'1.0.100',
'1.0.10',
'1.0.9',
'1.0.2',
'1.0.1',
], VC.sort_versions( [
'1.0.1',
'1.0.10',
'1.0.2',
'1.0.100',
'1.0.9',
], reverse = True ) )
def test_change_version(self):
self.assertEqual( '1.0.1', VC.change_version('1.0.0', [ 0, 0, 1 ]) )
self.assertEqual( '1.0.0', VC.change_version('1.0.1', [ 0, 0, -1 ]) )
self.assertEqual( '2.0.0', VC.change_version('1.0.0', [ 1, 0, 0 ]) )
self.assertEqual( '2.3.4', VC.change_version('1.2.3', [ 1, 1, 1 ]) )
self.assertEqual( '2.3.4.0', VC.change_version('1.2.3.0', [ 1, 1, 1 ]) )
self.assertEqual( '2.2.3.0', VC.change_version('1.2.3.0', [ 1 ]) )
def test_version_range(self):
self.assertEqual( [ '1.0.1', '1.0.2', '1.0.3' ], VC.version_range('1.0.1', '1.0.3', [ 0, 0, 1 ]) )
self.assertEqual( [ '1.0.1' ], VC.version_range('1.0.1', '1.0.1', [ 0, 0, 1 ]) )
self.assertEqual( [ '1.0.1', '1.0.2' ], VC.version_range('1.0.1', '1.0.2', [ 0, 0, 1 ]) )
self.assertEqual( [ '1.0.8', '1.0.9', '1.0.10', '1.0.11' ], VC.version_range('1.0.8', '1.0.11', [ 0, 0, 1 ]) )
def test_bump_version(self):
self.assertEqual( '1.0.1', VC.bump_version('1.0.0', VC.REVISION) )
def test_bump_version_major_reset_lower(self):
self.assertEqual( '2.0.0', VC.bump_version('1.0.0', VC.MAJOR, reset_lower = True) )
self.assertEqual( '2.0.0', VC.bump_version('1.0.1', VC.MAJOR, reset_lower = True) )
self.assertEqual( '2.0.0', VC.bump_version('1.1.0', VC.MAJOR, reset_lower = True) )
self.assertEqual( '2.0.0', VC.bump_version('1.1.1', VC.MAJOR, reset_lower = True) )
self.assertEqual( '2.0.0', VC.bump_version('1.0.0', VC.MAJOR, reset_lower = True) )
self.assertEqual( '2.0.0', VC.bump_version('1.0.1', VC.MAJOR, reset_lower = True) )
self.assertEqual( '2.0.0', VC.bump_version('1.1.0', VC.MAJOR, reset_lower = True) )
self.assertEqual( '2.0.0', VC.bump_version('1.1.1', VC.MAJOR, reset_lower = True) )
def test_bump_version_minor_reset_lower(self):
self.assertEqual( '1.1.0', VC.bump_version('1.0.0', VC.MINOR, reset_lower = True) )
self.assertEqual( '1.1.0', VC.bump_version('1.0.1', VC.MINOR, reset_lower = True) )
self.assertEqual( '1.2.0', VC.bump_version('1.1.1', VC.MINOR, reset_lower = True) )
self.assertEqual( '1.1.0', VC.bump_version('1.0.0', VC.MINOR, reset_lower = True) )
self.assertEqual( '1.1.0', VC.bump_version('1.0.1', VC.MINOR, reset_lower = True) )
self.assertEqual( '1.2.0', VC.bump_version('1.1.1', VC.MINOR, reset_lower = True) )
def test_bump_version_revision_reset_lower(self):
self.assertEqual( '1.0.1', VC.bump_version('1.0.0', VC.REVISION, reset_lower = True) )
self.assertEqual( '1.1.1', VC.bump_version('1.1.0', VC.REVISION, reset_lower = True) )
self.assertEqual( '1.0.1', VC.bump_version('1.0.0', VC.REVISION, reset_lower = True) )
self.assertEqual( '1.1.1', VC.bump_version('1.1.0', VC.REVISION, reset_lower = True) )
def test_bump_version_major(self):
self.assertEqual( '2.0.0', VC.bump_version('1.0.0', VC.MAJOR) )
self.assertEqual( '2.0.1', VC.bump_version('1.0.1', VC.MAJOR) )
self.assertEqual( '2.1.0', VC.bump_version('1.1.0', VC.MAJOR) )
self.assertEqual( '2.1.1', VC.bump_version('1.1.1', VC.MAJOR) )
self.assertEqual( '2.0.0', VC.bump_version('1.0.0', VC.MAJOR) )
self.assertEqual( '2.0.1', VC.bump_version('1.0.1', VC.MAJOR) )
self.assertEqual( '2.1.0', VC.bump_version('1.1.0', VC.MAJOR) )
self.assertEqual( '2.1.1', VC.bump_version('1.1.1', VC.MAJOR) )
def test_bump_version_minor(self):
self.assertEqual( '1.1.0', VC.bump_version('1.0.0', VC.MINOR) )
self.assertEqual( '1.1.1', VC.bump_version('1.0.1', VC.MINOR) )
self.assertEqual( '1.2.1', VC.bump_version('1.1.1', VC.MINOR) )
self.assertEqual( '1.1.0', VC.bump_version('1.0.0', VC.MINOR) )
self.assertEqual( '1.1.1', VC.bump_version('1.0.1', VC.MINOR) )
self.assertEqual( '1.2.1', VC.bump_version('1.1.1', VC.MINOR) )
def test_bump_version_revision(self):
self.assertEqual( '1.0.1', VC.bump_version('1.0.0', VC.REVISION) )
self.assertEqual( '1.1.1', VC.bump_version('1.1.0', VC.REVISION) )
self.assertEqual( '1.0.1', VC.bump_version('1.0.0', VC.REVISION) )
self.assertEqual( '1.1.1', VC.bump_version('1.1.0', VC.REVISION) )
def test_bump_version_invalid_component(self):
with self.assertRaises(ValueError) as _:
VC.bump_version('1.0.0', 3)
def test_bump_version_major_two_components(self):
self.assertEqual( '2.0', VC.bump_version('1.0', VC.MAJOR) )
self.assertEqual( '2.1', VC.bump_version('1.1', VC.MAJOR) )
def test_bump_version_minor_two_components(self):
self.assertEqual( '1.1', VC.bump_version('1.0', VC.MINOR) )
self.assertEqual( '1.2', VC.bump_version('1.1', VC.MINOR) )
def test_bump_version_major_two_components_invalid_component(self):
with self.assertRaises(ValueError) as _:
VC.bump_version('1.0', VC.REVISION)
def test_bump_version_major_one_component(self):
self.assertEqual( '2', VC.bump_version('1', VC.MAJOR) )
self.assertEqual( '3', VC.bump_version('2', VC.MAJOR) )
def test_bump_version_major_one_component_invalid_component(self):
with self.assertRaises(ValueError) as _:
VC.bump_version('1', VC.MINOR)
with self.assertRaises(ValueError) as _:
VC.bump_version('1', VC.REVISION)
def test_change_component(self):
self.assertEqual( '1.2.3', VC.change_component('1.2.2', VC.REVISION, '3') )
self.assertEqual( '1.3.2', VC.change_component('1.2.2', VC.MINOR, '3') )
self.assertEqual( '3.2.2', VC.change_component('1.2.2', VC.MAJOR, '3') )
self.assertEqual( '1.2.3', VC.change_component('1.2.2', 'revision', '3') )
self.assertEqual( '1.3.2', VC.change_component('1.2.2', 'minor', '3') )
self.assertEqual( '3.2.2', VC.change_component('1.2.2', 'major', '3') )
self.assertEqual( '1.2.3', VC.change_component('1.2.2', 2, '3') )
self.assertEqual( '1.3.2', VC.change_component('1.2.2', 1, '3') )
self.assertEqual( '3.2.2', VC.change_component('1.2.2', 0, '3') )
self.assertEqual( '3-2-2', VC.change_component('1-2-2', 0, '3') )
with self.assertRaises(ValueError) as _:
VC.change_component('1.2.2', 4, '3')
def test_parse_version(self):
self.assertEqual( ( ( 1, 0, 1 ), '.' ), VC.parse_version('1.0.1') )
self.assertEqual( ( ( 1, 0, 2 ), '.' ), VC.parse_version('1.0.2') )
self.assertEqual( ( ( 1, 0, 2, 3 ), '.' ), VC.parse_version('1.0.2.3') )
self.assertEqual( ( ( 1, 0, 11 ), '.' ), VC.parse_version('1.0.11') )
self.assertEqual( ( ( 1, 0 ), '.' ), VC.parse_version('1.0') )
self.assertEqual( ( ( 1, ), '' ), VC.parse_version('1') )
if __name__ == "__main__":
unit_test.main()
|
python
|
# Generated by Django 3.1.6 on 2021-05-01 19:10
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ecom', '0007_contact'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'verbose_name_plural': 'Categories'},
),
migrations.AlterModelOptions(
name='contact',
options={'verbose_name_plural': 'Contact Us Entries'},
),
]
|
python
|
import sys
import math
import numpy as np
import pandas as pd
def topsis_fun(file,wght,impact):
try:
mat=pd.read_csv(file)
except FileNotFoundError:
raise Exception("File does not exist")
#print(mat)
row_count=mat.shape[0]
col_count=mat.shape[1]
if(len(wght)<col_count-1 and len(impact)<col_count-1):
raise Exception("less number of weights and impacts assigned")
if(len(wght)>col_count-1 and len(impact)>col_count-1):
raise Exception("more number of weights and impacts assigned")
if(len(wght)<col_count-1):
raise Exception("less number of weights assigned")
if(len(wght)>col_count-1):
raise Exception("more number of weights assigned")
if(len(impact)<col_count-1):
raise Exception("less number of impacts assigned")
if(len(impact)>col_count-1):
raise Exception("more number of impacts assigned")
sa=np.zeros(shape=(row_count, col_count))
mat.astype('float32')
#normalising
x=[]
for i in range(1,col_count):
s=0
for j in range(row_count):
s=s+float(mat.iloc[j][i]**2)
s=float(math.sqrt(s))
x.append(s)
for i in range(1,col_count):
for j in range(row_count):
if(float(x[i-1])==0.0):
raise Exception("Division by zero not possible.")
a=mat.iloc[j,i]/float(x[i-1])
mat.iloc[j,i]=a
for i in range(1,col_count):
for j in range(row_count):
a=mat.iloc[j,i]*wght[i-1]
mat.iloc[j,i]=a
#calculating ideal best and worst
best=[]
worst=[]
for i in range(1,col_count):
if impact[i-1]=='+':
best.append(mat.iloc[:,i].max())
worst.append(mat.iloc[:,i].min())
else:
worst.append(mat.iloc[:,i].max())
best.append(mat.iloc[:,i].min())
#euclidean distance
total=[]
performance=[]
for i in range(row_count):
sum_pos=sum((mat.iloc[i,1:]-best[:])**2)
sum_neg=sum((mat.iloc[i,1:]-worst[:])**2)
sum_pos=math.sqrt(sum_pos)
sum_neg=math.sqrt(sum_neg)
sums=sum_pos + sum_neg
perf=sum_neg/sums
performance.append(perf)
rank1=max(performance)
ind=performance.index(rank1)
return rank1,mat.iloc[ind,0]
if(len(sys.argv)<4):
raise Exception("Less inputs given")
if(len(sys.argv)>4):
raise Exception("More inputs given")
filename=sys.argv[1]
weights=sys.argv[2]
impact=sys.argv[3]
w = list(weights.split(","))
w1=[float(i) for i in w]
im=list(impact.split(","))
for i in im:
if(i=='+' or i=='-'):
pass
else:
raise Exception("Invalid impact input")
ans,ind1=topsis_fun(filename, w1, im)
print(" The most favourable is: ",ind1," with performance value according to TOPSIS: ", ans)
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 22 20:19:16 2020
@author: taotao
"""
import check_db
def status_check():
print('进入计算检测')
return
def calculate(message,trigger,item_price):#识别并计算
if(trigger == 1):
people = message[-1].split("@")
people = people[1:]
print('people_list:',people)
people_counter = len(people)
print('total_people:',people_counter)
unit_price = item_price / people_counter
#check_db.item_write(item_price)#新建项写入
#if(is_comment == 1):
#return_message = "识别记账输入,记账金额" + str(item_price) + "备注:" + comment + "记账人数" + str(people_counter) + "人均价格" + str(unit_price)
return_message = "识别记账输入,记账金额" + str(item_price) + "记账人数" + str(people_counter) + "人均价格" + str(unit_price)
else:
return_message = "未完成模式,请联系作者"
#elif(trigger == 2):
return return_message
|
python
|
# Generated by Django 2.1.5 on 2019-03-24 01:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0007_auto_20190324_0118'),
]
operations = [
migrations.RemoveField(
model_name='serveruser',
name='password',
),
migrations.RemoveField(
model_name='serveruser',
name='username',
),
]
|
python
|
import math
import bcolors
class Plot(object):
def __init__(self, x_min, x_max, x_size, y_min, y_max, y_size, t_min, t_max, t_size):
self.x_min = x_min
self.x_max = x_max
self.x_size = x_size
self.y_min = y_min
self.y_max = y_max
self.y_size = y_size
self.t_min = t_min
self.t_max = t_max
self.t_size = t_size
self.internal_plot = [[' ' for y in range(y_min, y_max+1)] for x in range(x_min, x_max+1)]
self.color = bcolors.ENDC
self.color_plot = [[bcolors.ENDC for y in range(y_min, y_max+1)] for x in range(x_min, x_max+1)]
def set(self,a,b,char):
if (a < self.x_max and a > self.x_min and b < self.y_max and b > self.y_min):
self.color_plot[a][b] = self.color
if set([char,self.internal_plot[a][b]]) == set(['\\','/']):
self.internal_plot[a][b] = 'X'
elif set([char,self.internal_plot[a][b]]) == set(['-','|']):
self.internal_plot[a][b] = '+'
elif char == self.internal_plot[a][b] == '-':
self.internal_plot[a][b] = '='
else:
self.internal_plot[a][b] = char
def plot_axes(self):
for x in range(self.x_min, self.x_max):
plot[x][0] = '.'
for y in range(self.y_min, self.y_max):
plot[0][y] = ':'
def print_plot(self):
result = ''
for y in range(self.y_min, len(self.internal_plot[0])+self.y_min)[::-1]:
for x in range(self.x_min, len(self.internal_plot)+self.x_min):
result += self.color_plot[x][y] + self.internal_plot[x][y] + bcolors.ENDC
result += '\n'
print result
def plot_polar(self, function, color):
def f(t):
try: return function(t*self.t_size)
except: return max(self.x_max,self.y_max,-self.x_min,-self.y_min)+10
def getX(t):
return f(t)*math.cos(t*self.t_size)/self.x_size
def getY(t):
return f(t)*math.sin(t*self.t_size)/self.y_size
#Initialize x_last and y_last to be values the first can never match
x_last = int(getX(int(self.t_min/self.t_size))) + 1
y_last = int(getY(int(self.t_min/self.t_size))) + 1
for t in range(int(self.t_min/self.t_size), int(self.t_max/self.t_size)):
#Set the color so the function graphs in the right color
self.color = color(t)
x = int(getX(t))
y = int(getY(t))
a = (t*self.t_size) % math.pi
#Get cartesian slope
x_diff = getX(t+.5) - getX(t-.5)
y_diff = getY(t+.5) - getY(t-.5)
try:
c_diff = y_diff/x_diff
except ZeroDivisionError:
#Division by zero results in infinite slope
#This is close enough
c_diff = x_diff * 2**64
#No use replotting the same point
if x == x_last and y == y_last:
continue
if abs(c_diff) > 2:
self.set(x,y,'|')
elif c_diff < -.5:
self.set(x,y,'\\')
elif c_diff > .5 :
self.set(x,y,'/')
elif abs(c_diff) < .5:
if getY(t)%1 < .25:
self.set(x,y,'_')
elif getY(t)%1 < .75:
self.set(x,y,'-')
else:
self.set(x,y+1,'_')
else:
self.set(x,y,'*')
x_last = x
y_last = y
#Restore the color to its natural state
self.color = bcolors.ENDC
def plot_function(self, function, color):
#Modify the function
def f(x):
try:
return function(x*self.x_size)/self.y_size
except:
#If x is not in the domain of the function plot it outside of the graph
return self.y_max+10
for x in range(self.x_min, self.x_max):
#Set the color so the function graphs in the right color
self.color = color(x)
back_diff = int(f(x-1)) - int(f(x))
#Check that the last value was valid
try:
function((x-1)*self.x_size)/self.y_size
except:
back_diff = 0
diff = f(x+.5)-f(x-.5)
front_diff = int(f(x+1)) - int(f(x))
#Check that the next value was valid
try:
function((x+1)*self.x_size)/self.y_size
except:
front_diff = 0
if abs(diff) < .5:
floor_diff = f(x)-int(f(x))
if floor_diff > .5:
self.set(x,int(f(x))+1,'_')
elif floor_diff < -.5:
self.set(x,int(f(x)),'_')
else:
self.set(x,int(f(x)),'-')
elif abs(diff) > 2:
self.set(x,int(f(x)),'|')
elif diff < 0:
self.set(x,int(f(x)),'\\')
elif diff > 0:
self.set(x,int(f(x)),'/')
#Complete the line if it is non-continuous
#TODO compress into single for loop
if abs(back_diff) > 1:
for y in range(int(f(x)+math.copysign(1,int(back_diff))), int(f(x-.5))-(back_diff < 0), int(math.copysign(1,back_diff))):
if not self.y_min < y < self.y_max:break
self.set(x,y,'|')
if abs(front_diff) > 1:
for y in range(int(f(x)+math.copysign(1,int(front_diff))), int(f(x+.5))-(front_diff < 0), int(math.copysign(1,front_diff))):
if not self.y_min < y < self.y_max:break
self.set(x,y,'|')
#Restore the color to its natural state
self.color = bcolors.ENDC
|
python
|
# Copyright 2013-2014 The rust-url developers.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
# Run as: python make_uts46_mapping_table.py IdnaMappingTable.txt > uts46_mapping_table.rs
# You can get the latest idna table from
# http://www.unicode.org/Public/idna/latest/IdnaMappingTable.txt
from __future__ import print_function
import collections
import itertools
print('''\
// Copyright 2013-2014 The rust-url developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Generated by make_idna_table.py
''')
txt = open("IdnaMappingTable.txt")
def escape_char(c):
return "\\u{%x}" % ord(c[0])
def char(s):
return unichr(int(s, 16))
strtab = collections.OrderedDict()
strtab_offset = 0
def strtab_slice(s):
global strtab, strtab_offset
if s in strtab:
return strtab[s]
else:
utf8_len = len(s.encode('utf8'))
c = (strtab_offset, utf8_len)
strtab[s] = c
strtab_offset += utf8_len
return c
def rust_slice(s):
start = s[0]
length = s[1]
start_lo = start & 0xff
start_hi = start >> 8
assert length <= 255
assert start_hi <= 255
return "(StringTableSlice { byte_start_lo: %d, byte_start_hi: %d, byte_len: %d })" % (start_lo, start_hi, length)
ranges = []
for line in txt:
# remove comments
line, _, _ = line.partition('#')
# skip empty lines
if len(line.strip()) == 0:
continue
fields = line.split(';')
if fields[0].strip() == 'D800..DFFF':
continue # Surrogates don't occur in Rust strings.
first, _, last = fields[0].strip().partition('..')
if not last:
last = first
mapping = fields[1].strip().replace('_', ' ').title().replace(' ', '')
unicode_str = None
if len(fields) > 2:
if fields[2].strip():
unicode_str = u''.join(char(c) for c in fields[2].strip().split(' '))
elif mapping == "Deviation":
unicode_str = u''
ranges.append((first, last, mapping, unicode_str))
def mergeable_key(r):
mapping = r[2]
# These types have associated data, so we should not merge them.
if mapping in ('Mapped', 'Deviation', 'DisallowedStd3Mapped'):
return r
assert mapping in ('Valid', 'Ignored', 'Disallowed', 'DisallowedStd3Valid')
return mapping
grouped_ranges = itertools.groupby(ranges, key=mergeable_key)
optimized_ranges = []
for (k, g) in grouped_ranges:
group = list(g)
if len(group) == 1:
optimized_ranges.append(group[0])
continue
# Assert that nothing in the group has an associated unicode string.
for g in group:
if g[3] is not None and len(g[3]) > 2:
assert not g[3][2].strip()
# Assert that consecutive members of the group don't leave gaps in
# the codepoint space.
a, b = itertools.tee(group)
next(b, None)
for (g1, g2) in itertools.izip(a, b):
last_char = int(g1[1], 16)
next_char = int(g2[0], 16)
if last_char + 1 == next_char:
continue
# There's a gap where surrogates would appear, but we don't have to
# worry about that gap, as surrogates never appear in Rust strings.
# Assert we're seeing the surrogate case here.
assert last_char == 0xd7ff
assert next_char == 0xe000
first = group[0][0]
last = group[-1][1]
mapping = group[0][2]
unicode_str = group[0][3]
optimized_ranges.append((first, last, mapping, unicode_str))
def is_single_char_range(r):
(first, last, _, _) = r
return first == last
# We can reduce the size of the character range table and the index table to about 1/4
# by merging runs of single character ranges and using character offsets from the start
# of that range to retrieve the correct `Mapping` value
def merge_single_char_ranges(ranges):
current = []
for r in ranges:
if not current or is_single_char_range(current[-1]) and is_single_char_range(r):
current.append(r)
continue
if len(current) != 0:
ret = current
current = [r]
yield ret
continue
current.append(r)
ret = current
current = []
yield ret
yield current
optimized_ranges = list(merge_single_char_ranges(optimized_ranges))
print("static TABLE: &'static [Range] = &[")
for ranges in optimized_ranges:
first = ranges[0][0]
last = ranges[-1][1]
print(" Range { from: '%s', to: '%s', }," % (escape_char(char(first)),
escape_char(char(last))))
print("];\n")
print("static INDEX_TABLE: &'static [u16] = &[")
SINGLE_MARKER = 1 << 15
offset = 0
for ranges in optimized_ranges:
assert offset < SINGLE_MARKER
block_len = len(ranges)
single = SINGLE_MARKER if block_len == 1 else 0
print(" %s," % (offset | single))
offset += block_len
print("];\n")
print("static MAPPING_TABLE: &'static [Mapping] = &[")
for ranges in optimized_ranges:
for (first, last, mapping, unicode_str) in ranges:
if unicode_str is not None:
mapping += rust_slice(strtab_slice(unicode_str))
print(" %s," % mapping)
print("];\n")
def escape_str(s):
return [escape_char(c) for c in s]
print("static STRING_TABLE: &'static str = \"%s\";"
% '\\\n '.join(itertools.chain(*[escape_str(s) for s in strtab.iterkeys()])))
|
python
|
# pyOCD debugger
# Copyright (c) 2013-2019 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from ...core.memory_map import (RamRegion, RomRegion)
from ...flash.flash import Flash
LOG = logging.getLogger(__name__)
class Flash_PSoC64(Flash):
isFlashing = False
def init(self, operation, address=None, clock=0, reset=True):
if self._active_operation != operation and self._active_operation is not None:
self.uninit()
Flash_PSoC64.isFlashing = True
super(Flash_PSoC64, self).init(operation, address, clock, reset)
Flash_PSoC64.isFlashing = True
LOG.debug("Flash_PSoC64: initialised for %s", operation)
def uninit(self):
if self._active_operation is None:
return
super(Flash_PSoC64, self).uninit()
Flash_PSoC64.isFlashing = False
LOG.debug("Flash_PSoC64: uninitialised")
class PSoC6FlashParams:
# Main/Work Flash flash operation weights
MFLASH_ERASE_ALL_WEIGHT = 0.5
MFLASH_ERASE_SECTOR_WEIGHT = 0.05
MFLASH_PROGRAM_PAGE_WEIGHT = 0.07
# External (SMIF) Flash flash operation weights
SMIF_ERASE_ALL_WEIGHT = 140
SMIF_ERASE_SECTOR_WEIGHT = 1
SMIF_PROGRAM_PAGE_WEIGHT = 0.5
defaultRomRegion = RomRegion(start=0x00000000, length=0x20000)
defaultRamRegion = RamRegion(start=0x08000000, length=0x8000)
|
python
|
from rest_framework import viewsets
from rest_framework.exceptions import MethodNotAllowed
from rest_framework_extensions.mixins import NestedViewSetMixin
from resources_portal.models import Material
from resources_portal.serializers import MaterialRelationSerializer
class OrganizationMaterialViewSet(NestedViewSetMixin, viewsets.ModelViewSet):
queryset = Material.objects.all().order_by("-created_at")
http_method_names = ["get", "head", "options"]
permission_classes = []
def get_serializer_class(self):
if self.action == "retrieve":
raise MethodNotAllowed("GET", detail="Cannot get single Material by Organization.")
return MaterialRelationSerializer
|
python
|
"""
Usage:
source env.sh ; python storage.py create_blob_container openflights-raw
source env.sh ; python storage.py create_blob_container openflights-adf
source env.sh ; python storage.py create_blob_container test
source env.sh ; python storage.py delete_blob_container openflights-raw
source env.sh ; python storage.py list_blob_containers
source env.sh ; python storage.py list_blob_container openflights-raw
source env.sh ; python storage.py upload_blob local_file_path cname blob_name
source env.sh ; python storage.py upload_blob requirements.in test requirements.in
source env.sh ; python storage.py download_blob test aaa.txt aaa-down.txt
"""
__author__ = 'Chris Joakim'
__email__ = "[email protected]"
__license__ = "MIT"
__version__ = "October 2021"
import json
import os
import pprint
import sys
import time
import traceback
import uuid
import arrow
from docopt import docopt
from azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient
from azure.core.exceptions import ResourceExistsError
from azure.core.exceptions import ResourceNotFoundError
class StorageUtil(object):
def __init__(self):
conn_str = os.environ['M2C_STORAGE_CONNECTION_STRING']
self.blob_service_client = BlobServiceClient.from_connection_string(conn_str)
def list_containers(self):
try:
return self.blob_service_client.list_containers()
# a list of <class 'azure.storage.blob._models.ContainerProperties'>
except:
self.print_exception('list_containers, returning empty list')
return list()
def create_container(self, cname):
try:
container_client = self.blob_service_client.get_container_client(cname)
container_client.create_container()
print('create_container: {}'.format(cname))
except:
self.print_exception('create_container {}'.format(cname))
def delete_container(self, cname):
try:
container_client = self.blob_service_client.get_container_client(cname)
container_client.delete_container()
print('delete_container: {}'.format(cname))
except:
self.print_exception('delete_container {}'.format(cname))
def list_container(self, cname):
try:
container_client = self.blob_service_client.get_container_client(cname)
return container_client.list_blobs()
except:
self.print_exception('list_container {}'.format(cname))
return list()
def upload_blob(self, local_file_path, cname, blob_name, overwrite=True):
try:
blob_client = self.blob_service_client.get_blob_client(container=cname, blob=blob_name)
with open(local_file_path, "rb") as data:
blob_client.upload_blob(data, overwrite=overwrite)
print('upload_blob: {} {} -> {} {}'.format(local_file_path, overwrite, cname, blob_name))
except:
msg = 'local_file_path: {} cname: {} blob_name: {}'.format(
local_file_path, cname, blob_name)
self.print_exception('upload_blob {}'.format(msg))
def download_blob(self, cname, blob_name, local_file_path):
try:
blob_client = self.blob_service_client.get_blob_client(container=cname, blob=blob_name)
with open(local_file_path, "wb") as download_file:
download_file.write(blob_client.download_blob().readall())
print('download_blob: {} {} -> {}'.format(cname, blob_name, local_file_path))
except:
msg = 'cname: {} blob_name: {} local_file_path: {}'.format(
cname, blob_name, local_file_path)
self.print_exception('download_blob {}'.format(msg))
def blob_properties(self, cname, blob_name):
try:
blob_client = self.blob_service_client.get_blob_client(container=cname, blob=blob_name)
return blob_client.get_blob_properties()
except:
self.print_exception('blob_properties {} {}'.format(cname, blob_name))
def print_exception(self, msg=None):
print('*** exception in storage.py - {}'.format(msg))
exc_type, exc_value, exc_traceback = sys.exc_info()
print("*** traceback:")
traceback.print_tb(exc_traceback, limit=2, file=sys.stderr)
print("*** exception:")
traceback.print_exception(
exc_type, exc_value, exc_traceback, limit=2, file=sys.stderr)
def list_blob_containers():
stor = StorageUtil()
containers = stor.list_containers()
count = 0
for idx, c in enumerate(containers):
count = count + 1
# print(str(type(c))) # <class 'azure.storage.blob._models.ContainerProperties'>
print('{} {}'.format(idx + 1, c.name))
if count < 1:
print('no containers')
def list_blob_container(cname):
stor = StorageUtil()
blobs = stor.list_container(cname)
for idx, b in enumerate(blobs):
#print(str(type(b))) # <class 'azure.storage.blob._models.BlobProperties'>
print('{} {}'.format(idx + 1, b.name))
def create_blob_container(cname):
print('create_blob_container; cname: {}'.format(cname))
stor = StorageUtil()
stor.create_container(cname)
def delete_blob_container(cname):
print('delete_blob_container; cname: {}'.format(cname))
stor = StorageUtil()
stor.delete_container(cname)
def upload_blob(local_file_path, cname, blob_name):
print('upload_blob; {} {} {}'.format(local_file_path, cname, blob_name))
stor = StorageUtil()
stor.upload_blob(local_file_path, cname, blob_name)
def download_blob(cname, blob_name, local_file_path):
print('download_blob; {} {} {}'.format(cname, blob_name, local_file_path))
stor = StorageUtil()
stor.download_blob(cname, blob_name, local_file_path)
def load_json_file(infile):
with open(infile) as json_file:
return json.load(json_file)
def write_obj_as_json_file(outfile, obj):
txt = json.dumps(obj, sort_keys=False, indent=2)
with open(outfile, 'wt') as f:
f.write(txt)
print("file written: " + outfile)
def write(outfile, s, verbose=True):
with open(outfile, 'w') as f:
f.write(s)
if verbose:
print('file written: {}'.format(outfile))
def boolean_flag_arg(flag):
for arg in sys.argv:
if arg == flag:
return True
return False
def print_options(msg):
print(msg)
arguments = docopt(__doc__, version=__version__)
print(arguments)
if __name__ == "__main__":
if len(sys.argv) > 1:
func = sys.argv[1].lower()
if func == 'list_blob_containers':
list_blob_containers()
elif func == 'list_blob_container':
cname = sys.argv[2]
list_blob_container(cname)
elif func == 'create_blob_container':
cname = sys.argv[2]
create_blob_container(cname)
elif func == 'delete_blob_container':
cname = sys.argv[2]
delete_blob_container(cname)
elif func == 'upload_blob':
local_file_path = sys.argv[2]
cname = sys.argv[3]
if len(sys.argv) > 4:
blob_name = sys.argv[4]
else:
blob_name = os.path.basename(local_file_path)
upload_blob(local_file_path, cname, blob_name)
elif func == 'download_blob':
cname = sys.argv[2]
blob_name = sys.argv[3]
local_file_path = sys.argv[4]
skip_download = boolean_flag_arg('--skip-download')
if skip_download == False:
download_blob(cname, blob_name, local_file_path)
else:
print_options('Error: invalid function: {}'.format(func))
else:
print_options('Error: no command-line args entered')
|
python
|
"""Command-line interface for Acton."""
import logging
import struct
import sys
from typing import BinaryIO, Iterable, List
import acton.acton
import acton.predictors
import acton.proto.wrappers
import acton.recommenders
import click
def read_bytes_from_buffer(n: int, buffer: BinaryIO) -> bytes:
"""Reads n bytes from stdin, blocking until all bytes are received.
Parameters
----------
n
How many bytes to read.
buffer
Which buffer to read from.
Returns
-------
bytes
Exactly n bytes.
"""
b = b''
while len(b) < n:
b += buffer.read(n - len(b))
assert len(b) == n
return b
def read_binary() -> bytes:
"""Reads binary data from stdin.
Notes
-----
The first eight bytes are expected to be the length of the input data as an
unsigned long long.
Returns
-------
bytes
Binary data.
"""
logging.debug('Reading 8 bytes from stdin.')
length = read_bytes_from_buffer(8, sys.stdin.buffer)
length, = struct.unpack('<Q', length)
logging.debug('Reading {} bytes from stdin.'.format(length))
return read_bytes_from_buffer(length, sys.stdin.buffer)
def write_binary(string: bytes):
"""Writes binary data to stdout.
Notes
-----
The output will be preceded by the length as an unsigned long long.
"""
logging.debug('Writing 8 + {} bytes to stdout.'.format(len(string)))
length = struct.pack('<Q', len(string))
logging.debug('Writing length {} ({}).'.format(length, len(string)))
sys.stdout.buffer.write(length)
sys.stdout.buffer.write(string)
sys.stdout.buffer.flush()
# acton
@click.command()
@click.option('--data',
type=click.Path(exists=True, dir_okay=False),
help='Path to features/labels file',
required=True)
@click.option('-l', '--label',
type=str,
help='Column name of labels',
required=True)
@click.option('-o', '--output',
type=click.Path(dir_okay=False),
help='Path to output file',
required=True)
@click.option('-f', '--feature',
type=str,
multiple=True,
help='Column names of features')
@click.option('--epochs',
type=int,
help='Number of epochs to run active learning for',
default=100)
@click.option('-i', '--id',
type=str,
help='Column name of IDs')
@click.option('--diversity',
type=float,
help='Diversity of recommendations',
default=0.0)
@click.option('--recommendation-count',
type=int,
help='Number of recommendations to make',
default=1)
@click.option('--labeller-accuracy',
type=float,
help='Accuracy of simulated labellers',
default=1.0)
@click.option('--initial-count',
type=int,
help='Number of random instances to label initially',
default=10)
@click.option('--predictor',
type=click.Choice(acton.predictors.PREDICTORS.keys()),
default='LogisticRegression',
help='Predictor to use')
@click.option('--recommender',
type=click.Choice(acton.recommenders.RECOMMENDERS.keys()),
default='RandomRecommender',
help='Recommender to use')
@click.option('--pandas-key',
type=str,
default='',
help='Key for pandas HDF5')
@click.option('-v', '--verbose',
is_flag=True,
help='Verbose output')
def main(
data: str,
label: str,
output: str,
feature: str,
epochs: int,
id: str,
diversity: float,
recommendation_count: int,
labeller_accuracy: float,
initial_count: int,
predictor: str,
recommender: str,
verbose: bool,
pandas_key: str,
):
logging.warning('Not implemented: diversity, id_col, labeller_accuracy')
logging.captureWarnings(True)
if verbose:
logging.root.setLevel(logging.DEBUG)
return acton.acton.main(
data_path=data,
feature_cols=feature,
label_col=label,
output_path=output,
n_epochs=epochs,
initial_count=initial_count,
recommender=recommender,
predictor=predictor,
pandas_key=pandas_key,
n_recommendations=recommendation_count)
# acton-predict
@click.command()
@click.option('--predictor',
type=click.Choice(acton.predictors.PREDICTORS.keys()),
default='LogisticRegression',
help='Predictor to use')
@click.option('-v', '--verbose',
is_flag=True,
help='Verbose output')
def predict(
predictor: str,
verbose: bool,
):
# Logging setup.
logging.captureWarnings(True)
if verbose:
logging.root.setLevel(logging.DEBUG)
# Read labels.
labels = read_binary()
labels = acton.proto.wrappers.LabelPool.deserialise(labels)
# Write predictions.
proto = acton.acton.predict(labels=labels, predictor=predictor)
write_binary(proto.proto.SerializeToString())
# acton-recommend
@click.command()
@click.option('--diversity',
type=float,
help='Diversity of recommendations',
default=0.0)
@click.option('--recommendation-count',
type=int,
help='Number of recommendations to make',
default=1)
@click.option('--recommender',
type=click.Choice(acton.recommenders.RECOMMENDERS.keys()),
default='RandomRecommender',
help='Recommender to use')
@click.option('-v', '--verbose',
is_flag=True,
help='Verbose output')
def recommend(
diversity: float,
recommendation_count: int,
recommender: str,
verbose: bool,
):
# Logging setup.
logging.warning('Not implemented: diversity')
logging.captureWarnings(True)
if verbose:
logging.root.setLevel(logging.DEBUG)
# Read the predictions protobuf.
predictions = read_binary()
predictions = acton.proto.wrappers.Predictions.deserialise(predictions)
# Write the recommendations protobuf.
proto = acton.acton.recommend(
predictions=predictions,
recommender=recommender,
n_recommendations=recommendation_count)
write_binary(proto.proto.SerializeToString())
# acton-label
def lines_from_stdin() -> Iterable[str]:
"""Yields lines from stdin."""
for line in sys.stdin:
line = line.strip()
logging.debug('Read line {} from stdin.'.format(repr(line)))
if line:
yield line
@click.command()
@click.option('--data',
type=click.Path(exists=True, dir_okay=False),
help='Path to labels file',
required=False)
@click.option('-l', '--label',
type=str,
help='Column name of labels',
required=False)
@click.option('-f', '--feature',
type=str,
multiple=True,
help='Column names of features')
@click.option('--labeller-accuracy',
type=float,
help='Accuracy of simulated labellers',
default=1.0)
@click.option('--pandas-key',
type=str,
default='',
help='Key for pandas HDF5')
@click.option('-v', '--verbose',
is_flag=True,
help='Verbose output')
def label(
data: str,
feature: List[str],
label: str,
labeller_accuracy: float,
verbose: bool,
pandas_key: str,
):
# Logging setup.
logging.warning('Not implemented: labeller_accuracy')
logging.captureWarnings(True)
if verbose:
logging.root.setLevel(logging.DEBUG)
# If any arguments are specified, expect all arguments.
if data or label or pandas_key:
if not data or not label:
raise ValueError('--data, --label, or --pandas-key specified, but '
'missing --data or --label.')
# Handle database arguments.
data_path = data
feature_cols = feature
label_col = label
# Read IDs from stdin.
ids_to_label = [int(i) for i in lines_from_stdin()]
# There wasn't a recommendations protobuf given, so we have no existing
# labelled instances.
labelled_ids = []
# Construct the recommendations protobuf.
DB, db_kwargs = acton.acton.get_DB(data_path, pandas_key=pandas_key)
db_kwargs['label_col'] = label_col
db_kwargs['feature_cols'] = feature_cols
with DB(data_path, **db_kwargs) as db:
recs = acton.proto.wrappers.Recommendations.make(
recommended_ids=ids_to_label,
labelled_ids=labelled_ids,
recommender='None',
db=db)
else:
# Read a recommendations protobuf from stdin.
recs = read_binary()
recs = acton.proto.wrappers.Recommendations.deserialise(recs)
proto = acton.acton.label(recs)
write_binary(proto.proto.SerializeToString())
if __name__ == '__main__':
sys.exit(main())
|
python
|
import logging
def debug(*args, **kw):
logging.basicConfig(level=logging.ERROR, format="%(message)s")
logger = logging.getLogger(__name__)
logger.debug(*args, **kw)
|
python
|
import os
from os import path
from IPython import embed
import click
import pandas as pd
import numpy as np
from root2csv import converter
from csv2hdf5 import glob_and_check
from remove_events import get_run
from melibea import process_melibea
def check_globs(glob1, glob2):
if isinstance(glob1, str):
fnames1 = glob_and_check(glob1)
fnames2 = glob_and_check(glob2)
elif isinstance(glob1, list):
fnames1 = glob1
fnames2 = glob2
runs1 = sorted([get_run(fname) for fname in fnames1])
runs2 = sorted([get_run(fname) for fname in fnames2])
assert np.array_equal(runs1, runs2), "runs are not equal, aborting."
def generate_imputed_luts(basedir, superstar, lutfnames):
"""processes superstar with melibea, taking the energies from the lutfnames text files for imputation
the extracts the lutenergies from the newly imputed files and returns their filenames"""
outdir = path.join(basedir, "tmp")
os.makedirs(outdir, exist_ok=True)
process_melibea(superstar=superstar,
nnenergies=lutfnames,
outdir=outdir,
njobs=10,
mode="nn")
glob_and_check(path.join(outdir, "*_Q_*.root"))
imputedluts = path.join(outdir, "*_Q_*.root")
lutenergies = converter(outdir,
globstr1=path.basename(imputedluts),
globstr2=path.basename(imputedluts),
multidir=False,
njobs=8,
mergecolsonly=True,
parallelprocessing=True)
check_globs(lutenergies, lutfnames)
return sorted(lutenergies)
def compare_energies(basedir, superstar, melibealut):
"""Extracts the energies from melibealut imputes them with the nn into new root files and
extracts the energies from those and then compares the resulting energies"""
lutdir = path.dirname(melibealut)
try:
lutfnames = converter(lutdir,
globstr1=path.basename(melibealut),
globstr2=path.basename(melibealut),
multidir=False,
njobs=8,
mergecolsonly=True,
parallelprocessing=True)
lutfnames = sorted(lutfnames)
for fname in lutfnames:
energy = pd.read_csv(fname)["ELUT"]
energy.to_csv(fname, header=False, index=False)
imputedluts = generate_imputed_luts(lutdir, superstar, lutfnames)
allequal = True
for ilutfname, normallut in zip(imputedluts, lutfnames):
normallutenergies = pd.read_csv(normallut, header=None).values.flatten()
ilutenergies = pd.read_csv(ilutfname)["ELUT"].values
ilen = len(ilutenergies)
np.testing.assert_array_almost_equal(ilutenergies, normallutenergies[:ilen],
decimal=2)
if not np.array_equal(ilutenergies, normallutenergies[:ilen]):
embed()
allequal = False
else:
print("files are equal, continuing")
if allequal:
print("All files are equal. Removing energy files")
except Exception:
embed()
finally:
for f1, f2 in zip(lutfnames, imputedluts):
os.remove(f1)
os.remove(f2)
@click.command()
@click.option('--superstar', "-ss", default="./superstar/*_S_*.root", type=click.Path(),
help='Glob for the superstar files that will get processed with melibea.')
@click.option('--melibealut', "-ml", default="./melibea/*_Q_*.root", type=click.Path(),
help='Glob for the melibea files contain the standard LUT energies')
@click.option('--basedir', "-bd", default="./",
type=click.Path(resolve_path=True),
help='')
def main(basedir, superstar, melibealut):
check_globs(melibealut, superstar)
compare_energies(basedir, superstar, melibealut)
if __name__ == '__main__':
main()
|
python
|
# -*- coding: utf-8 -*-
__all__ = ['Sample', 'reserved_keys']
reserved_keys = ['image_bytes', 'image_type', 'image_path', 'image', 'bboxes', 'bbox_labels']
class Sample(dict):
"""
Sample class is a subclass of dict, storing information of a single sample
The following keys are reserved:
'image_bytes' image data in bytes format
'image_type' image type, such as jpg, png, bmp
'image_path' image path for loading
'image' image data in numpy.ndarray
'bboxes' bbox coordinates info for detection
'bbox_labels' bbox label for detection
"""
def __str__(self):
info_str = 'The sample includes the following keys: \n'
for key in self.keys():
info_str += '[' + str(key) + ']\t'
return info_str
|
python
|
# -*- coding: utf-8 -*-
import json
class User(object):
def __init__(self, user_id, user_name, user_surname, email):
self.user_id = user_id
self.user_name = user_name
self.user_surname = user_surname
self.email = email
# self.userLevel = userLevel
# self.userTitle = userTitle
class Group(object):
def __init__(self, group_name, group_island):
self.group_name = group_name
self.group_island = group_island
self.listUsers = []
def __init__(self, group_id, group_name, group_owner, group_island):
self.group_id = group_id
self.group_name = group_name
self.group_owner = group_owner
self.listUsers = []
self.group_island = group_island
def addUser(self, user_id):
self.listUsers.append(user_id)
class Member(object):
def __init__(self, group_id, user_id):
self.group_id = group_id
self.user_id = user_id
class Friend(object):
def __init__(self, user_id1, user_id2):
self.user_id1 = user_id1
self.user_id2 = user_id2
class Session(object):
def __init__(self, session_id, user_id, date_init, hour_init, date_fin, hour_fin, session_t):
self.session_id = session_id
self.user_id = user_id
self.date_init = date_init
self.hour_init = hour_init
self.date_fin = date_fin
self.hour_fin = hour_fin
self.session_t = session_t
def __init__(self, session_id, user_id, date_init, year_init, month_init, day_init, hour_init, hours_init, minutes_init, seconds_init, date_fin, year_fin, month_fin, day_fin, hour_fin, hours_fin, minutes_fin, seconds_fin, session_t):
self.session_id = session_id
self.user_id = user_id
self.date_init = date_init
self.year_init = year_init
self.month_init = month_init
self.day_init = day_init
self.hour_init = hour_init
self.hours_init = hours_init
self.minutes_init = minutes_init
self.seconds_init = seconds_init
self.date_fin = date_fin
self.year_fin = year_fin
self.month_fin = month_fin
self.day_fin = day_fin
self.hour_fin = hour_fin
self.hours_fin = hours_fin
self.minutes_fin = minutes_fin
self.seconds_fin = seconds_fin
self.session_t = session_t
class Teleport(object):
def __init__(self, user_name, date_init, hour_init, teleport_source, teleport_dest):
self.user_name = user_name
self.date_init = date_init
self.hour_init = hour_init
self.teleport_source = teleport_source
self.teleport_dest = teleport_dest
class Click(object):
def __init__(self, user_name, date_init, hour_init, event_type, object_id):
self.user_name = user_name
self.date_init = date_init
self.hour_init = hour_init
self.event_type = event_type
self.object_id = object_id
class TeleportRule(object):
def __init__(self, user_name, teleport_source, teleport_dest, probability):
self.user_name = user_name
self.teleport_source = teleport_source
self.teleport_dest = teleport_dest
self.probability = probability
class UserEncoder(json.JSONEncoder):
def default(self, obj):
if not isinstance(obj, User):
return super(UserEncoder, self).default(obj)
return obj.__dict__
class GroupEncoder(json.JSONEncoder):
def default(self, obj):
if not isinstance(obj, Group):
return super(GroupEncoder, self).default(obj)
return obj.__dict__
class MembersEncoder(json.JSONEncoder):
def default(self, obj):
if not isinstance(obj, Member):
return super(GroupEncoder, self).default(obj)
return obj.__dict__
class FriendsEncoder(json.JSONEncoder):
def default(self, obj):
if not isinstance(obj, Friend):
return super(GroupEncoder, self).default(obj)
return obj.__dict__
class SessionEncoder(json.JSONEncoder):
def default(self, obj):
if not isinstance(obj, Session):
return super(SessionEncoder, self).default(obj)
return obj.__dict__
class TeleportEncoder(json.JSONEncoder):
def default(self, obj):
if not isinstance(obj, Teleport):
return super(TeleportEncoder, self).default(obj)
return obj.__dict__
class ClickEncoder(json.JSONEncoder):
def default(self, obj):
if not isinstance(obj, Click):
return super(ClickEncoder, self).default(obj)
return obj.__dict__
class TeleportRuleEncoder(json.JSONEncoder):
def default(self, obj):
if not isinstance(obj, TeleportRule):
return super(TeleportRuleEncoder, self).default(obj)
return obj.__dict__
|
python
|
from pandas import DataFrame, read_csv
import matplotlib.pyplot as plt
import pandas as pd
from qgis.core import (
QgsApplication,
QgsDataSourceUri,
QgsCategorizedSymbolRenderer,
QgsClassificationRange,
QgsPointXY,
QgsProject,
QgsExpression,
QgsField,
QgsFields,
QgsFeature,
QgsFeatureRequest,
QgsFeatureRenderer,
QgsGeometry,
QgsGraduatedSymbolRenderer,
QgsMarkerSymbol,
QgsMessageLog,
QgsRectangle,
QgsRendererCategory,
QgsRendererRange,
QgsSymbol,
QgsVectorDataProvider,
QgsVectorLayer,
QgsVectorFileWriter,
QgsWkbTypes,
QgsSpatialIndex,
)
from qgis.utils import iface
from PyQt5.QtCore import QVariant #to add attribute QVariant gives the type of attribute<string or integer
#following imports for taking inputs from user
import sys
# from PyQt5.QtWidgets import QApplication, QWidget, QInputDialog, QLineEdit
#i added QFileDialog to import to take input of csv file form user.
from PyQt5.QtWidgets import QApplication, QWidget, QInputDialog, QLineEdit, QFileDialog
from PyQt5.QtGui import QIcon
from pathlib import Path
#creation of field of fire frequency_of_fire
lyr = iface.activeLayer()
dp = lyr.dataProvider()
#check if the field is already there or not
list_of_fields = lyr.fields().names()
if 'fire_f' not in list_of_fields:
dp.addAttributes( [QgsField ("fire_f" , QVariant.String)])
lyr.updateFields()
#setting the default value to 0
index_of_fire_f = dp.fieldNameIndex('fire_f')
features=lyr.getFeatures()
lyr.startEditing()
for f in features:
id=f.id()
attr_value={index_of_fire_f:0}
dp.changeAttributeValues({id:attr_value})
lyr.commitChanges()
#creating empy lists
list1 = []
list2 = []
#reading the csv file
class selectCsvOfFrequency(QWidget):
def __init__(self):
super().__init__()
self.title = 'Select the csv file containing frequency data please'
self.left = 10
self.top = 10
self.width = 640
self.height = 480
def openFileNameDialog(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self,"QFileDialog.getOpenFileName()", "","All Files (*);;Python Files (*.py)", options=options)
if fileName:
print(fileName)
return fileName
selectCsv = selectCsvOfFrequency()
file = Path(selectCsv.openFileNameDialog())
#file = r'/Users/krishnaninama/Documents/test/Dewas/test.csv'
df = pd.read_csv(file, header = None)
#putting the values of names of beat/range to list1 and number fo fire to list2
list1 = list (df[0])
list2 = list (df[1])
#commenting this because input() doesn't work in qgis console
#name_of_level = input ('kindly enter the name of field form the list above for which you want to enter the frequency data:')
#method to take input from user. he will select from a list and we will take that input
class selectionOfLevel(QWidget):
def __init__(self):
super().__init__()
self.title = 'Select level at which you want to enter frequency data'
self.left = 10
self.top = 10
self.width = 640
self.height = 480
def getChoice(self):
items = list_of_fields
item, okPressed = QInputDialog.getItem(self, "Get item","Color:", items, 0, False)
if okPressed and item:
print(item)
return item
selection = selectionOfLevel()
name_of_level = selection.getChoice()
#name_of_level = 'Beat_Name'
#item, okPressed = QInputDialog.getItem(self, "Get item","Color:", items, 0, False)
#list_of_fields, okPressed = QInputDialog.getItem(self, "Get item","Color:", items, 0, False)
#create an object from class Qwidget
list_of_features = list1 #this will be taken from the csv file. the features can be range, beat, compartment,etc
frequency_of_fire = list2
#now i want to select the feature form this list.for this.
x = 0
#select all features
for i in list_of_features:
y = list_of_features[x].rstrip()
lyr.selectByExpression ( " {} ILIKE '{}' ".format(name_of_level,y)) #this will select the feature
selected_ids = lyr.selectedFeatureIds() #get their ids of the selected features
attr = {dp.fieldNameIndex('fire_f'):frequency_of_fire[x] }
for j in selected_ids:
dp.changeAttributeValues ( {j : attr})
x +=1
lyr.removeSelection()
lyr.updateFields()
#exec(open('/Users/krishnaninama/Documents/test/Dewas/final_working_script.py'.encode('utf-8')).read())
|
python
|
"""Поле для ссылки на сертификат
Revision ID: 7f6d52e2a594
Revises: 4028ddc57d5b
Create Date: 2021-01-17 16:47:35.551727
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7f6d52e2a594'
down_revision = '4028ddc57d5b'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('students', sa.Column('cert_link', sa.String(length=100), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('students', 'cert_link')
# ### end Alembic commands ###
|
python
|
"""
This class defines the Attention Layer to use when training the model with the attention mechanism.
Code readapted (under the courtesy of the author) from:
https://github.com/lukemelas/Machine-Translation
"""
import torch.nn as nn
import torch.nn.functional as F
class Attention(nn.Module):
def __init__(self, bidirectional=False, attn_type='dot', h_dim=300):
super(Attention, self).__init__()
if attn_type not in ['dot', 'none']:
raise Exception('Incorrect attention type')
self.bidirectional = bidirectional
self.attn_type = attn_type
self.h_dim = h_dim
def attention(self, encoder_outputs, decoder_outputs):
'''Produces context and attention distribution'''
if self.attn_type == 'none':
return None
# Deal with bidirectional encoder, move batches first
if self.bidirectional:
encoder_outputs = encoder_outputs.contiguous().\
view(encoder_outputs.size(0), encoder_outputs.size(1), 2, -1).\
sum(2).view(encoder_outputs.size(0), encoder_outputs.size(1), -1)
encoder_outputs = encoder_outputs.transpose(0, 1)
decoder_outputs = decoder_outputs.transpose(0, 1)
attn = encoder_outputs.bmm(decoder_outputs.transpose(1, 2)) # attention weights
attn = F.softmax(attn, dim=1).transpose(1,2) # Attention scores
context = attn.bmm(encoder_outputs) # context c_t
context = context.transpose(0,1)
return context, attn
def forward(self, out_e, out_d):
'''Produces context using attention distribution'''
context, attn = self.attention(out_e, out_d)
return context
def get_visualization(self, in_e, out_e, out_d):
'''Gives attention distribution for visualization'''
context, attn = self.attention(out_e, out_d)
return attn
|
python
|
"""app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path("admin/doc/", include("django.contrib.admindocs.urls")),
path("admin/", admin.site.urls),
path("api/", include("rest_framework.urls")),
path("api/", include("lexicon.urls")),
path("api/", include("grammar.urls")),
# Catchalls for bare URL, and all frontend and unrecognised/unresolved
# paths.
# In particular, `sockjs-node/` is used by the React dev server and will
# always be proxied.
path("", include("frontend.urls")),
]
|
python
|
import numpy as np
from scipy.fftpack import dct
def cutSample(data):
if len(np.shape(data))==2:
data=data[:,0]
fadeamount = 300
maxindex = np.argmax(data > 0.01)
startpos = 1000
if len(data) > 44100:
if maxindex > 44100:
if len(data) > maxindex + (44100):
data = data[maxindex - startpos:maxindex - startpos + (44100)]
else:
data = data[maxindex - startpos:]
else:
data = data[0:44100]
else:
if maxindex > 44100:
data = data[maxindex - startpos:]
# print('data len :'+str(len(data)))
fade = np.geomspace(1, 2, fadeamount) - 1
data[0:fadeamount] = data[0:fadeamount] * fade
data[-fadeamount:] = data[-fadeamount:] * np.flip(fade)
data = np.concatenate((np.zeros(startpos), data), axis=None)
return data
def alternate_mfcc(signal, sr=44100):
def normalize_audio(audio):
audio = audio / np.max(np.abs(audio))
return audio
def freq_to_mel(freq):
return 2595.0 * np.log10(1.0 + freq / 700.0)
def met_to_freq(mels):
return 700.0 * (10.0 ** (mels / 2595.0) - 1.0)
def get_filter_points(fmin, fmax, mel_filter_num, FFT_size, sample_rate=44100):
fmin_mel = freq_to_mel(fmin)
fmax_mel = freq_to_mel(fmax)
mels = np.linspace(fmin_mel, fmax_mel, num=mel_filter_num + 2)
freqs = met_to_freq(mels)
return np.floor((FFT_size + 1) / sample_rate * freqs).astype(int), freqs
def get_filters(filter_points, FFT_size):
#filter_length = int(FFT_size / 2 + 1)
filter_length= int(FFT_size)
filters = np.zeros((len(filter_points) - 2, filter_length))
for n in range(len(filter_points) - 2):
filters[n, filter_points[n]: filter_points[n + 1]] = np.linspace(0, 1,
filter_points[n + 1] - filter_points[n])
filters[n, filter_points[n + 1]: filter_points[n + 2]] = np.linspace(1, 0,
filter_points[n + 2] - filter_points[
n + 1])
return filters
#signal = normalize_audio(signal)
signal = cutSample(signal)
M=len(signal)
complexSpectrum = np.fft.fft(signal)[:M // 2 + 1:-1]
audio_power = np.square(np.abs(complexSpectrum))
FFT_size = len(audio_power)
freq_min = 60
freq_high = 14000# sr / 2
mel_filter_num = 40
filter_points, mel_freqs = get_filter_points(freq_min, freq_high, mel_filter_num, FFT_size, sample_rate=44100)
filters = get_filters(filter_points, FFT_size)
enorm = 2.0 / (mel_freqs[2:mel_filter_num + 2] - mel_freqs[:mel_filter_num])
filters *= enorm[:, np.newaxis]
audio_filtered = np.dot(filters, np.transpose(audio_power))
audio_log = 10.0 * np.log10(audio_filtered)
dctSpectrum = dct(audio_log, type=2)
# Lifter, comment out if not needed
L = 22 # Lifter coeff
ncoeff = len(dctSpectrum)
n = np.arange(ncoeff)
lift = 1 + (L / 2.) * np.sin(np.pi * n / L)
dctSpectrum = lift * dctSpectrum
return dctSpectrum[:12]
|
python
|
import json
from dagster_slack import slack_resource
from mock import patch
from dagster import ModeDefinition, execute_solid, solid
@patch("slack_sdk.WebClient.api_call")
def test_slack_resource(mock_api_call):
@solid(required_resource_keys={"slack"})
def slack_solid(context):
assert context.resources.slack
body = {"ok": True}
mock_api_call.return_value = {
"status": 200,
"body": json.dumps(body),
"headers": "",
}
context.resources.slack.chat_postMessage(channel="#random", text=":wave: hey there!")
assert mock_api_call.called
result = execute_solid(
slack_solid,
run_config={
"resources": {"slack": {"config": {"token": "xoxp-1234123412341234-12341234-1234"}}}
},
mode_def=ModeDefinition(resource_defs={"slack": slack_resource}),
)
assert result.success
|
python
|
import base64, httplib, json
def googlecloud_tagimage(filename):
with open(filename, 'rb') as image_file:
encoded_string = base64.b64encode(image_file.read())
endpoint = '/v1/images:annotate?key=ADD_YOUR_KEY_HERE'
request_body = {
'requests':[
{
'image':{
'content':encoded_string
},
'features':[
{
'type':'LABEL_DETECTION',
'maxResults':10
}
]
}
]
}
conn = httplib.HTTPSConnection('vision.googleapis.com')
conn.request('POST', endpoint, json.dumps(request_body))
response = conn.getresponse()
print(response.read())
conn.close()
googlecloud_tagimage('DogAndBaby.jpg')
|
python
|
import sys
numArray = sys.argv
uniques = []
for num in numArray:
if num not in uniques:
uniques.append(num)
print("Unique Numbers are ", end=':')
for num in uniques:
print(num, sep=",")
|
python
|
# -*- coding: utf-8 -*-
import unittest
from includes import *
from common import getConnectionByEnv, waitForIndex, sortedResults, toSortedFlatList
from time import sleep
from RLTest import Env
def testSyntax1(env):
conn = getConnectionByEnv(env)
env.expect('ft.create', 'idx',
'ONfoo*',
'SCHEMA', 'foo', 'text').equal('Unknown argument `ONfoo*`')
env.expect('ft.create', 'idx2',
'LANGUAGE', 'eng'
'SCHEMA', 'foo', 'text').equal('Invalid language')
env.expect('ft.create', 'idx2',
'SCORE', '1.0'
'SCHEMA', 'foo', 'text').equal('Unknown argument `foo`')
env.expect('ft.create', 'idx2',
'PAYLOAD_FIELD', 'awfw'
'SCHEMA', 'foo', 'text').equal('Unknown argument `foo`')
env.expect('ft.create', 'idx2',
'FILTER', 'a'
'SCHEMA', 'foo', 'text').equal("Unknown symbol 'aSCHEMA'")
def testFilter1(env):
conn = getConnectionByEnv(env)
env.cmd('ft.create', 'things',
'ON', 'HASH',
'FILTER', 'startswith(@__key, "thing:")',
'SCHEMA', 'name', 'text')
conn.execute_command('hset', 'thing:bar', 'name', 'foo')
env.expect('ft.search', 'things', 'foo') \
.equal([1L, 'thing:bar', ['name', 'foo']])
def testPrefix0a(env):
conn = getConnectionByEnv(env)
env.cmd('ft.create', 'things', 'ON', 'HASH',
'PREFIX', '1', '',
'SCHEMA', 'name', 'text')
conn.execute_command('hset', 'thing:bar', 'name', 'foo')
env.expect('ft.search', 'things', 'foo').equal([1L, 'thing:bar', ['name', 'foo']])
def testPrefix0b(env):
conn = getConnectionByEnv(env)
env.cmd('ft.create', 'things', 'ON', 'HASH', 'SCHEMA', 'name', 'text')
conn.execute_command('hset', 'thing:bar', 'name', 'foo')
env.expect('ft.search', 'things', 'foo').equal([1L, 'thing:bar', ['name', 'foo']])
def testPrefix1(env):
conn = getConnectionByEnv(env)
env.cmd('ft.create', 'things', 'ON', 'HASH',
'PREFIX', '1', 'thing:',
'SCHEMA', 'name', 'text')
conn.execute_command('hset', 'thing:bar', 'name', 'foo')
env.expect('ft.search', 'things', 'foo') \
.equal([1L, 'thing:bar', ['name', 'foo']])
def testPrefix2(env):
conn = getConnectionByEnv(env)
env.cmd('ft.create', 'things', 'ON', 'HASH',
'PREFIX', '2', 'this:', 'that:',
'SCHEMA', 'name', 'text')
conn.execute_command('hset', 'this:foo', 'name', 'foo')
conn.execute_command('hset', 'that:foo', 'name', 'foo')
res = env.cmd('ft.search', 'things', 'foo')
env.assertIn('that:foo', res)
env.assertIn('this:foo', res)
def testFilter2(env):
conn = getConnectionByEnv(env)
env.cmd('ft.create', 'stuff', 'ON', 'HASH',
'FILTER', 'startswith(@__key, "stuff:")',
'SCHEMA', 'name', 'text', 'age', 'numeric')
env.cmd('ft.create', 'things', 'ON', 'HASH',
'FILTER', 'startswith(@__key, "thing:")',
'SCHEMA', 'name', 'text', 'age', 'numeric')
conn.execute_command('hset', 'thing:bar', 'name', 'foo')
conn.execute_command('hset', 'object:jojo', 'name', 'vivi')
conn.execute_command('hset', 'thing:bar', 'age', '42')
env.expect('ft.search', 'things', 'foo') \
.equal([1L, 'thing:bar', ['name', 'foo', 'age', '42']])
def testPrefix3(env):
conn = getConnectionByEnv(env)
env.cmd('ft.create', 'stuff',
'ON', 'HASH',
'PREFIX', '1', 'stuff:',
'SCHEMA', 'name', 'text', 'age', 'numeric')
env.cmd('ft.create', 'things', 'ON', 'HASH',
'PREFIX', '1', 'thing:',
'SCHEMA', 'name', 'text', 'age', 'numeric')
conn.execute_command('hset', 'thing:bar', 'name', 'foo')
conn.execute_command('hset', 'object:jojo', 'name', 'vivi')
conn.execute_command('hset', 'thing:bar', 'age', '42')
env.expect('ft.search', 'things', 'foo') \
.equal([1L, 'thing:bar', ['name', 'foo', 'age', '42']])
def testIdxField(env):
conn = getConnectionByEnv(env)
env.cmd('ft.create', 'idx1',
'ON', 'HASH',
'PREFIX', 1, 'doc',
'FILTER', '@indexName=="idx1"',
'SCHEMA', 'name', 'text', 'indexName', 'text')
env.cmd('ft.create', 'idx2',
'ON', 'HASH',
'FILTER', '@indexName=="idx2"',
'SCHEMA', 'name', 'text', 'indexName', 'text')
conn.execute_command('hset', 'doc1', 'name', 'foo', 'indexName', 'idx1')
conn.execute_command('hset', 'doc2', 'name', 'bar', 'indexName', 'idx2')
env.expect('ft.search', 'idx1', '*').equal([1L, 'doc1', ['name', 'foo', 'indexName', 'idx1']])
env.expect('ft.search', 'idx2', '*').equal([1L, 'doc2', ['name', 'bar', 'indexName', 'idx2']])
def testDel(env):
conn = getConnectionByEnv(env)
env.cmd('ft.create', 'things', 'ON', 'HASH',
'PREFIX', '1', 'thing:',
'SCHEMA', 'name', 'text')
env.expect('ft.search', 'things', 'foo').equal([0L])
conn.execute_command('hset', 'thing:bar', 'name', 'foo')
env.expect('ft.search', 'things', 'foo').equal([1L, 'thing:bar', ['name', 'foo']])
conn.execute_command('del', 'thing:bar')
env.expect('ft.search', 'things', 'foo').equal([0L])
def testSet(env):
conn = getConnectionByEnv(env)
env.cmd('ft.create', 'things',
'PREFIX', '1', 'thing:',
'SCHEMA', 'name', 'text')
env.expect('ft.search', 'things', 'foo').equal([0L])
conn.execute_command('hset', 'thing:bar', 'name', 'foo')
env.expect('ft.search', 'things', 'foo').equal([1L, 'thing:bar', ['name', 'foo']])
env.expect('set', 'thing:bar', "bye bye")
env.expect('ft.search', 'things', 'foo').equal([0L])
def testRename(env):
env.skipOnCluster()
conn = getConnectionByEnv(env)
env.cmd('ft.create things PREFIX 1 thing: SCHEMA name text')
env.expect('ft.search things foo').equal([0L])
conn.execute_command('hset thing:bar name foo')
env.expect('ft.search things foo').equal([1L, 'thing:bar', ['name', 'foo']])
env.expect('RENAME thing:bar thing:foo').ok()
env.expect('ft.search things foo').equal([1L, 'thing:foo', ['name', 'foo']])
env.cmd('ft.create otherthings PREFIX 1 otherthing: SCHEMA name text')
env.expect('RENAME thing:foo otherthing:foo').ok()
env.expect('ft.search things foo').equal([0L])
env.expect('ft.search otherthings foo').equal([1L, 'otherthing:foo', ['name', 'foo']])
def testFlush(env):
conn = getConnectionByEnv(env)
env.cmd('ft.create', 'things', 'ON', 'HASH',
'PREFIX', '1', 'thing:',
'FILTER', 'startswith(@__key, "thing:")',
'SCHEMA', 'name', 'text')
conn.execute_command('FLUSHALL')
conn.execute_command('hset', 'thing:bar', 'name', 'foo')
env.expect('ft.search', 'things', 'foo').equal('things: no such index')
def testNotExist(env):
conn = getConnectionByEnv(env)
env.cmd('ft.create', 'things', 'ON', 'HASH',
'PREFIX', '1', 'thing:',
'FILTER', 'startswith(@__key, "thing:")',
'SCHEMA', 'txt', 'text')
conn.execute_command('hset', 'thing:bar', 'not_text', 'foo')
env.expect('ft.search', 'things', 'foo').equal([0L])
def testPayload(env):
conn = getConnectionByEnv(env)
env.expect('ft.create', 'things', 'ON', 'HASH',
'PREFIX', '1', 'thing:',
'PAYLOAD_FIELD', 'payload',
'SCHEMA', 'name', 'text').ok()
conn.execute_command('hset', 'thing:foo', 'name', 'foo', 'payload', 'stuff')
for _ in env.retry_with_rdb_reload():
waitForIndex(env, 'things')
res = env.cmd('ft.search', 'things', 'foo')
env.assertEqual(toSortedFlatList(res), toSortedFlatList([1L, 'thing:foo', ['name', 'foo', 'payload', 'stuff']]))
res = env.cmd('ft.search', 'things', 'foo', 'withpayloads')
env.assertEqual(toSortedFlatList(res), toSortedFlatList([1L, 'thing:foo', 'stuff', ['name', 'foo', 'payload', 'stuff']]))
def testDuplicateFields(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH',
'SCHEMA', 'txt', 'TEXT', 'num', 'NUMERIC', 'SORTABLE').ok()
env.cmd('FT.ADD', 'idx', 'doc', 1.0,
'FIELDS', 'txt', 'foo', 'txt', 'bar', 'txt', 'baz')
env.expect('ft.search', 'idx', 'baz').equal([1L, 'doc', ['txt', 'baz']])
env.expect('ft.search', 'idx', 'foo').equal([0L])
def testReplace(env):
conn = getConnectionByEnv(env)
r = env
r.expect('ft.create idx schema f text').ok()
res = conn.execute_command('HSET', 'doc1', 'f', 'hello world')
env.assertEqual(res, 1)
res = conn.execute_command('HSET', 'doc2', 'f', 'hello world')
env.assertEqual(res, 1)
res = r.execute_command('ft.search', 'idx', 'hello world')
r.assertEqual(2, res[0])
# now replace doc1 with a different content
res = conn.execute_command('HSET', 'doc1', 'f', 'goodbye universe')
env.assertEqual(res, 0)
for _ in r.retry_with_rdb_reload():
waitForIndex(env, 'idx')
# make sure the query for hello world does not return the replaced document
r.expect('ft.search', 'idx', 'hello world', 'nocontent').equal([1, 'doc2'])
# search for the doc's new content
r.expect('ft.search', 'idx', 'goodbye universe', 'nocontent').equal([1, 'doc1'])
def testSortable(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'FILTER', 'startswith(@__key, "")',
'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
def testMissingArgs(env):
env.expect('FT.CREATE', 'idx', 'ON', 'SCHEMA', 'txt', 'TEXT', 'num', 'NUMERIC').error()
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'FILTER', 'SCHEMA', 'txt', 'TEXT', 'num', 'NUMERIC').error()
def testWrongArgs(env):
env.expect('FT.CREATE', 'idx', 'SCORE', 'SCHEMA', 'txt', 'TEXT', 'num', 'NUMERIC').error().contains('Invalid score')
env.expect('FT.CREATE', 'idx', 'SCORE', 10, 'SCHEMA', 'txt', 'TEXT', 'num', 'NUMERIC').error().contains('Invalid score')
env.expect('FT.CREATE', 'idx', 'LANGUAGE', 'SCHEMA', 'txt', 'TEXT', 'num', 'NUMERIC').error().contains('Invalid language')
env.expect('FT.CREATE', 'idx', 'LANGUAGE', 'none', 'SCHEMA', 'txt', 'TEXT', 'num', 'NUMERIC').error().contains('Invalid language')
def testLanguageDefaultAndField(env):
conn = getConnectionByEnv(env)
env.cmd('FT.CREATE', 'idxTest1', 'LANGUAGE_FIELD', 'lang', 'SCHEMA', 'body', 'TEXT')
env.cmd('FT.CREATE', 'idxTest2', 'LANGUAGE', 'hindi', 'SCHEMA', 'body', 'TEXT')
conn.execute_command('HSET', 'doc1', 'lang', 'hindi', 'body', u'अँगरेजी अँगरेजों अँगरेज़')
for _ in env.retry_with_rdb_reload():
waitForIndex(env, 'idxTest1')
waitForIndex(env, 'idxTest2')
#test for language field
res = env.cmd('FT.SEARCH', 'idxTest1', u'अँगरेज़')
res1 = {res[2][i]:res[2][i + 1] for i in range(0, len(res[2]), 2)}
env.assertEqual(u'अँगरेजी अँगरेजों अँगरेज़', unicode(res1['body'], 'utf-8'))
# test for default langauge
res = env.cmd('FT.SEARCH', 'idxTest2', u'अँगरेज़')
res1 = {res[2][i]:res[2][i + 1] for i in range(0, len(res[2]), 2)}
env.assertEqual(u'अँगरेजी अँगरेजों अँगरेज़', unicode(res1['body'], 'utf-8'))
def testScoreDecimal(env):
conn = getConnectionByEnv(env)
env.expect('FT.CREATE', 'idx1', 'SCORE', '0.5', 'schema', 'title', 'text').ok()
env.expect('FT.CREATE', 'idx2', 'SCORE_FIELD', 'score', 'schema', 'title', 'text').ok()
res = conn.execute_command('HSET', 'doc1', 'title', 'hello', 'score', '0.25')
env.assertEqual(res, 2)
for _ in env.retry_with_rdb_reload():
waitForIndex(env, 'idx1')
waitForIndex(env, 'idx2')
res = env.cmd('ft.search', 'idx1', 'hello', 'withscores', 'nocontent')
env.assertEqual(float(res[2]), 0.5)
res = env.cmd('ft.search', 'idx2', 'hello', 'withscores', 'nocontent')
env.assertEqual(float(res[2]), 0.25)
def testMultiFilters1(env):
conn = getConnectionByEnv(env)
env.expect('FT.CREATE', 'test', 'ON', 'HASH',
'PREFIX', '2', 'student:', 'pupil:',
'FILTER', 'startswith(@__key, "student:")',
'SCHEMA', 'first', 'TEXT', 'last', 'TEXT', 'age', 'NUMERIC').ok()
conn.execute_command('HSET', 'student:yes1', 'first', 'yes1', 'last', 'yes1', 'age', '17')
conn.execute_command('HSET', 'student:yes2', 'first', 'yes2', 'last', 'yes2', 'age', '15')
conn.execute_command('HSET', 'pupil:no1', 'first', 'no1', 'last', 'no1', 'age', '17')
conn.execute_command('HSET', 'pupil:no2', 'first', 'no2', 'last', 'no2', 'age', '15')
res1 = [2L, 'student:yes2', ['first', 'yes2', 'last', 'yes2', 'age', '15'],
'student:yes1', ['first', 'yes1', 'last', 'yes1', 'age', '17']]
res = env.cmd('ft.search test *')
env.assertEqual(toSortedFlatList(res), toSortedFlatList(res1))
def testMultiFilters2(env):
conn = getConnectionByEnv(env)
env.expect('FT.CREATE', 'test', 'ON', 'HASH',
'PREFIX', '2', 'student:', 'pupil:',
'FILTER', '@age > 16',
'SCHEMA', 'first', 'TEXT', 'last', 'TEXT', 'age', 'NUMERIC').ok()
conn.execute_command('HSET', 'student:yes1', 'first', 'yes1', 'last', 'yes1', 'age', '17')
conn.execute_command('HSET', 'student:no1', 'first', 'no1', 'last', 'no1', 'age', '15')
conn.execute_command('HSET', 'pupil:yes2', 'first', 'yes2', 'last', 'yes2', 'age', '17')
conn.execute_command('HSET', 'pupil:no2', 'first', 'no2', 'last', 'no2', 'age', '15')
res1 = [2L, 'pupil:yes2', ['first', 'yes2', 'last', 'yes2', 'age', '17'],
'student:yes1', ['first', 'yes1', 'last', 'yes1', 'age', '17']]
res = env.cmd('ft.search test *')
env.assertEqual(toSortedFlatList(res), toSortedFlatList(res1))
def testInfo(env):
env.skipOnCluster()
env.expect('FT.CREATE', 'test', 'ON', 'HASH',
'PREFIX', '2', 'student:', 'pupil:',
'FILTER', '@age > 16',
'language', 'hindi',
'language_field', 'lang',
'score', '0.5',
'score_field', 'score',
'payload_field', 'pl',
'SCHEMA', 't', 'TEXT').ok()
res_actual = env.cmd('FT.INFO test')
res_expected = ['key_type', 'HASH',
'prefixes', ['student:', 'pupil:'],
'filter', '@age > 16',
'default_language', 'hindi',
'language_field', 'lang',
'default_score', '0.5',
'score_field', 'score',
'payload_field', 'pl']
env.assertEqual(res_actual[5], res_expected)
env.expect('ft.drop test').ok()
env.expect('FT.CREATE', 'test', 'SCHEMA', 't', 'TEXT').ok()
res_actual = env.cmd('FT.INFO test')
res_expected = ['key_type', 'HASH',
'prefixes', [''],
'language_field', '__language',
'default_score', '1',
'score_field', '__score',
'payload_field', '__payload']
env.assertEqual(res_actual[5], res_expected)
def testCreateDropCreate(env):
conn = getConnectionByEnv(env)
conn.execute_command('hset', 'thing:bar', 'name', 'foo')
env.expect('ft.create', 'things', 'ON', 'HASH',
'PREFIX', '1', 'thing:', 'SCHEMA', 'name', 'text').ok()
waitForIndex(conn, 'things')
env.expect('ft.search', 'things', 'foo') \
.equal([1L, 'thing:bar', ['name', 'foo']])
env.expect('ft.dropindex things').ok()
env.expect('ft.create', 'things', 'ON', 'HASH',
'PREFIX', '1', 'thing:', 'SCHEMA', 'name', 'text').ok()
waitForIndex(conn, 'things')
env.expect('ft.search', 'things', 'foo') \
.equal([1L, 'thing:bar', ['name', 'foo']])
def testPartial(env):
if env.env == 'existing-env':
env.skip()
env.skipOnCluster()
env = Env(moduleArgs='PARTIAL_INDEXED_DOCS 1')
# HSET
env.expect('FT.CREATE idx SCHEMA test TEXT').equal('OK')
env.expect('HSET doc1 test foo').equal(1)
env.expect('FT.DEBUG docidtoid idx doc1').equal(1)
env.expect('HSET doc1 testtest foo').equal(1)
env.expect('FT.DEBUG docidtoid idx doc1').equal(1)
env.expect('HSET doc1 test bar').equal(0)
env.expect('FT.DEBUG docidtoid idx doc1').equal(2)
env.expect('FT.SEARCH idx bar').equal([1L, 'doc1', ['test', 'bar', 'testtest', 'foo']])
# HMSET
env.expect('HMSET doc2 test foo').ok()
env.expect('FT.DEBUG docidtoid idx doc2').equal(3)
env.expect('HMSET doc2 testtest foo').ok()
env.expect('FT.DEBUG docidtoid idx doc2').equal(3)
env.expect('HMSET doc2 test baz').ok()
env.expect('FT.DEBUG docidtoid idx doc2').equal(4)
env.expect('FT.SEARCH idx baz').equal([1L, 'doc2', ['test', 'baz', 'testtest', 'foo']])
# HSETNX
env.expect('HSETNX doc3 test foo').equal(1)
env.expect('FT.DEBUG docidtoid idx doc3').equal(5)
env.expect('HSETNX doc3 testtest foo').equal(1)
env.expect('FT.DEBUG docidtoid idx doc3').equal(5)
env.expect('HSETNX doc3 test bad').equal(0)
env.expect('FT.DEBUG docidtoid idx doc3').equal(5)
env.expect('FT.SEARCH idx foo').equal([1L, 'doc3', ['test', 'foo', 'testtest', 'foo']])
# HINCRBY
env.expect('HINCRBY doc4 test 5').equal(5)
env.expect('FT.DEBUG docidtoid idx doc4').equal(6)
env.expect('HINCRBY doc4 testtest 5').equal(5)
env.expect('FT.DEBUG docidtoid idx doc4').equal(6)
env.expect('HINCRBY doc4 test 6').equal(11)
env.expect('FT.DEBUG docidtoid idx doc4').equal(7)
env.expect('HINCRBY doc4 test 5.5').error(). contains('value is not an integer or out of range')
env.expect('FT.DEBUG docidtoid idx doc4').equal(7)
env.expect('FT.SEARCH idx 11').equal([1L, 'doc4', ['test', '11', 'testtest', '5']])
# HINCRBYFLOAT
env.expect('HINCRBYFLOAT doc5 test 5.5').equal('5.5')
env.expect('FT.DEBUG docidtoid idx doc5').equal(8)
env.expect('HINCRBYFLOAT doc5 testtest 5.5').equal('5.5')
env.expect('FT.DEBUG docidtoid idx doc5').equal(8)
env.expect('HINCRBYFLOAT doc5 test 6.6').equal('12.1')
env.expect('FT.DEBUG docidtoid idx doc5').equal(9)
env.expect('HINCRBYFLOAT doc5 test 5').equal('17.1')
env.expect('FT.DEBUG docidtoid idx doc5').equal(10)
env.expect('FT.SEARCH idx *').equal([5L, 'doc5', ['test', '17.1', 'testtest', '5.5'],
'doc4', ['test', '11', 'testtest', '5'],
'doc3', ['test', 'foo', 'testtest', 'foo'],
'doc2', ['test', 'baz', 'testtest', 'foo'],
'doc1', ['test', 'bar', 'testtest', 'foo']])
def testHDel(env):
if env.env == 'existing-env':
env.skip()
env.skipOnCluster()
env = Env(moduleArgs='PARTIAL_INDEXED_DOCS 1')
env.expect('FT.CREATE idx SCHEMA test1 TEXT test2 TEXT').equal('OK')
env.expect('FT.CREATE idx2 SCHEMA test1 TEXT test2 TEXT').equal('OK')
env.expect('HSET doc1 test1 foo test2 bar test3 baz').equal(3)
env.expect('FT.DEBUG docidtoid idx doc1').equal(1)
env.expect('HDEL doc1 test1').equal(1)
env.expect('FT.DEBUG docidtoid idx doc1').equal(2)
env.expect('HDEL doc1 test3').equal(1)
env.expect('FT.DEBUG docidtoid idx doc1').equal(2)
env.expect('FT.SEARCH idx bar').equal([1L, 'doc1', ['test2', 'bar']])
env.expect('HDEL doc1 test2').equal(1)
env.expect('FT.SEARCH idx bar').equal([0L])
def testRestore(env):
if env.env == 'existing-env':
env.skip()
env.skipOnCluster()
env.expect('FT.CREATE idx SCHEMA test TEXT').equal('OK')
env.expect('HSET doc1 test foo').equal(1)
env.expect('FT.SEARCH idx foo').equal([1L, 'doc1', ['test', 'foo']])
dump = env.cmd('dump doc1')
env.expect('DEL doc1').equal(1)
env.expect('FT.SEARCH idx foo').equal([0L])
env.expect('RESTORE', 'doc1', 0, dump)
env.expect('FT.SEARCH idx foo').equal([1L, 'doc1', ['test', 'foo']])
def testExpire(env):
conn = getConnectionByEnv(env)
env.expect('FT.CREATE idx SCHEMA test TEXT').equal('OK')
conn.execute_command('HSET', 'doc1', 'test', 'foo')
env.expect('FT.SEARCH idx foo').equal([1L, 'doc1', ['test', 'foo']])
conn.execute_command('EXPIRE', 'doc1', '1')
env.expect('FT.SEARCH idx foo').equal([1L, 'doc1', ['test', 'foo']])
sleep(1.1)
env.expect('FT.SEARCH idx foo').equal([0L])
def testEvicted(env):
env.skipOnCluster()
conn = getConnectionByEnv(env)
env.expect('FT.CREATE idx SCHEMA test TEXT').equal('OK')
memory = 0
info = conn.execute_command('INFO MEMORY')
for line in info.splitlines():
if 'used_memory:' in line:
sub = line.split(':')
memory = int(sub[1])
conn.execute_command('CONFIG', 'SET', 'MAXMEMORY-POLICY', 'ALLKEYS-RANDOM')
conn.execute_command('CONFIG', 'SET', 'MAXMEMORY', memory + 100000)
for i in range(1000):
env.expect('HSET', 'doc{}'.format(i), 'test', 'foo').equal(1)
res = env.cmd('FT.SEARCH idx foo limit 0 0')
env.assertLess(res[0], 1000)
env.assertGreater(res[0], 0)
def createExpire(env, N):
env.flush()
conn = getConnectionByEnv(env)
env.expect('FT.CREATE idx SCHEMA txt1 TEXT n NUMERIC').ok()
for i in range(N):
conn.execute_command('HSET', 'doc%d' % i, 'txt1', 'hello%i' % i, 'n', i)
conn.execute_command('PEXPIRE', 'doc%d' % i, '100')
conn.execute_command('HSET', 'foo', 'txt1', 'hello', 'n', 0)
conn.execute_command('HSET', 'bar', 'txt1', 'hello', 'n', 20)
waitForIndex(env, 'idx')
env.expect('FT.SEARCH', 'idx', 'hello*', 'limit', '0', '0').noEqual([2L])
res = conn.execute_command('HGETALL', 'doc99')
if type(res) is list:
res = {res[i]:res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(res, {'txt1': 'hello99', 'n': '99'})
sleep(0.1)
res = conn.execute_command('HGETALL', 'doc99')
if isinstance(res, list):
res = {res[i]:res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(res, {})
def testExpiredDuringSearch(env):
N = 100
createExpire(env, N)
res = env.cmd('FT.SEARCH', 'idx', 'hello*', 'nocontent', 'limit', '0', '200')
env.assertGreater(103, len(res))
env.assertLess(1, len(res))
createExpire(env, N)
res = env.cmd('FT.SEARCH', 'idx', 'hello*', 'limit', '0', '200')
env.assertEqual(toSortedFlatList(res[1:]), toSortedFlatList(['bar', ['txt1', 'hello', 'n', '20'],
'foo', ['txt1', 'hello', 'n', '0']]))
def testExpiredDuringAggregate(env):
N = 100
res = [1L, ['txt1', 'hello', 'COUNT', '2']]
createExpire(env, N)
_res = env.cmd('FT.AGGREGATE idx hello*')
env.assertGreater(len(_res), 2)
createExpire(env, N)
env.expect('FT.AGGREGATE idx hello* GROUPBY 1 @txt1 REDUCE count 0 AS COUNT').equal(res)
createExpire(env, N)
env.expect('FT.AGGREGATE idx hello* LOAD 1 @txt1 GROUPBY 1 @txt1 REDUCE count 0 AS COUNT').equal(res)
createExpire(env, N)
env.expect('FT.AGGREGATE idx @txt1:hello* LOAD 1 @txt1 GROUPBY 1 @txt1 REDUCE count 0 AS COUNT').equal(res)
def testSkipInitialScan(env):
conn = getConnectionByEnv(env)
conn.execute_command('HSET', 'a', 'test', 'hello', 'text', 'world')
# Regular
env.expect('FT.CREATE idx SCHEMA test TEXT').ok()
waitForIndex(env, 'idx')
env.expect('FT.SEARCH idx hello').equal([1L, 'a', ['test', 'hello', 'text', 'world']])
# SkipInitialIndex
env.expect('FT.CREATE idx_no_scan SKIPINITIALSCAN SCHEMA test TEXT').ok()
waitForIndex(env, 'idx_no_scan')
env.expect('FT.SEARCH idx_no_scan hello').equal([0L])
# Temporary
env.expect('FT.CREATE temp_idx TEMPORARY 10 SCHEMA test TEXT').ok()
waitForIndex(env, 'temp_idx')
env.expect('FT.SEARCH temp_idx hello').equal([1L, 'a', ['test', 'hello', 'text', 'world']])
# Temporary & NoInitialIndex
env.expect('FT.CREATE temp_idx_no_scan SKIPINITIALSCAN TEMPORARY 10 SCHEMA test TEXT').equal('OK')
waitForIndex(env, 'temp_idx_no_scan')
env.expect('FT.SEARCH temp_idx_no_scan hello').equal([0L])
def testWrongFieldType(env):
conn = getConnectionByEnv(env)
env.expect('FT.CREATE idx SCHEMA t TEXT n NUMERIC').ok()
conn.execute_command('HSET', 'a', 't', 'hello', 'n', '42')
conn.execute_command('HSET', 'b', 't', 'hello', 'n', 'world')
env.expect('FT.SEARCH idx hello').equal([1L, 'a', ['t', 'hello', 'n', '42']])
res_actual = env.cmd('FT.INFO idx')
res_actual = {res_actual[i]: res_actual[i + 1] for i in range(0, len(res_actual), 2)}
env.assertEqual(str(res_actual['hash_indexing_failures']), '1')
def testDocIndexedInTwoIndexes():
env = Env(moduleArgs='MAXDOCTABLESIZE 50')
env.skipOnCluster()
env.expect('FT.CREATE idx1 SCHEMA t TEXT').ok()
env.expect('FT.CREATE idx2 SCHEMA t TEXT').ok()
for i in range(1000):
env.expect('HSET', 'doc%d' % i, 't', 'foo').equal(1L)
env.expect('FT.DROPINDEX idx2 DD').ok()
env.expect('FT.SEARCH idx1 foo').equal([0L])
env.expect('FT.DROPINDEX idx1 DD').ok()
def testCountry(env):
conn = getConnectionByEnv(env)
env.cmd('ft.create', 'idx1',
'PREFIX', 1, 'address:',
'FILTER', '@country=="usa"',
'SCHEMA', 'business', 'text', 'country', 'text')
conn.execute_command('hset', 'address:1', 'business', 'foo', 'country', 'usa')
conn.execute_command('hset', 'address:2', 'business', 'bar', 'country', 'israel')
env.expect('ft.search', 'idx1', '*').equal([1L, 'address:1', ['business', 'foo', 'country', 'usa']])
def testIssue1571(env):
conn = getConnectionByEnv(env)
env.cmd('ft.create', 'idx',
'FILTER', '@index=="yes"',
'SCHEMA', 't', 'TEXT')
conn.execute_command('hset', 'doc1', 't', 'foo1', 'index', 'yes')
env.expect('ft.search', 'idx', 'foo*').equal([1L, 'doc1', ['t', 'foo1', 'index', 'yes']])
conn.execute_command('hset', 'doc1', 'index', 'no')
env.expect('ft.search', 'idx', 'foo*').equal([0L])
conn.execute_command('hset', 'doc1', 't', 'foo2')
env.expect('ft.search', 'idx', 'foo*').equal([0L])
conn.execute_command('hset', 'doc1', 'index', 'yes')
env.expect('ft.search', 'idx', 'foo*').equal([1L, 'doc1', ['t', 'foo2', 'index', 'yes']])
def testIssue1571WithRename(env):
conn = getConnectionByEnv(env)
env.cmd('ft.create', 'idx1',
'PREFIX', '1', 'idx1',
'FILTER', '@index=="yes"',
'SCHEMA', 't', 'TEXT')
env.cmd('ft.create', 'idx2',
'PREFIX', '1', 'idx2',
'FILTER', '@index=="yes"',
'SCHEMA', 't', 'TEXT')
conn.execute_command('hset', 'idx1:{doc}1', 't', 'foo1', 'index', 'yes')
env.expect('ft.search', 'idx1', 'foo*').equal([1L, 'idx1:{doc}1', ['t', 'foo1', 'index', 'yes']])
env.expect('ft.search', 'idx2', 'foo*').equal([0L])
conn.execute_command('rename', 'idx1:{doc}1', 'idx2:{doc}1')
env.expect('ft.search', 'idx2', 'foo*').equal([1L, 'idx2:{doc}1', ['t', 'foo1', 'index', 'yes']])
env.expect('ft.search', 'idx1', 'foo*').equal([0L])
conn.execute_command('hset', 'idx2:{doc}1', 'index', 'no')
env.expect('ft.search', 'idx1', 'foo*').equal([0L])
env.expect('ft.search', 'idx2', 'foo*').equal([0L])
conn.execute_command('rename', 'idx2:{doc}1', 'idx1:{doc}1')
env.expect('ft.search', 'idx1', 'foo*').equal([0L])
env.expect('ft.search', 'idx2', 'foo*').equal([0L])
conn.execute_command('hset', 'idx1:{doc}1', 'index', 'yes')
env.expect('ft.search', 'idx1', 'foo*').equal([1L, 'idx1:{doc}1', ['t', 'foo1', 'index', 'yes']])
env.expect('ft.search', 'idx2', 'foo*').equal([0L])
|
python
|
def python_vignette():
from tests import pyunit_utils
story1 = [pyunit_utils.locate("python/ipython_dataprep_input.py")]
story2 = [pyunit_utils.locate("python/ipython_machinelearning_input.py")]
approved_py_code_examples = story1+story2
pybooklet_utils.check_code_examples_in_dir(approved_py_code_examples,
pyunit_utils.locate("python"))
pybooklet_utils.check_story("story1",story1)
pybooklet_utils.check_story("story2",story2)
python_vignette()
|
python
|
# ---------------------------- PASSWORD GENERATOR ------------------------------- #
import random
import pyperclip
import json
def genrate_password():
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u',
'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P',
'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
symbols = ['!', '#', '$', '%', '&', '(', ')', '*', '+']
nr_letters = random.randint(8, 10)
nr_symbols = random.randint(2, 4)
nr_numbers = random.randint(2, 4)
password_list = []
password_list += [random.choice(letters) for letter in range(nr_letters)]
password_list += [random.choice(symbols) for symbol in range(nr_symbols)]
password_list += [random.choice(numbers) for number in range(nr_numbers)]
random.shuffle(password_list)
password = ""
for char in password_list:
password += char
password_entry.insert(END, f"{password}")
pyperclip.copy(password)
# ---------------------------- SAVE PASSWORD ------------------------------- #
from tkinter import messagebox
def generator():
new_dict = {website_entry.get().lower():
{
"EMAIL": email_entry.get(),
"Password": password_entry.get()
}}
if len(website_entry.get()) == 0 or len(password_entry.get()) == 0:
messagebox.showinfo(title="ERROR", message="please donot leave anything unfilled ")
else:
is_ok = messagebox.askokcancel(title=website_entry.get(),
message=f"""Here is your credentials \npassword ={password_entry.get()} \n email = {email_entry.get()}""")
if is_ok:
try:
with open("newdata.json", "r") as f:
data = json.load(f)
except FileNotFoundError:
with open("newdata.json", "w") as f:
json.dump(new_dict,f,indent=4)
else:
data.update(new_dict)
with open("newdata.json", "w") as f:
json.dump(data,f,indent= 4)
finally:
website_entry.delete(0, END)
password_entry.delete(0, END)
def search():
website = website_entry.get().lower()
try:
with open("newdata.json", "r") as f:
data = json.load(f)
messagebox.showinfo(title="info",message = f" email : {data[website]['EMAIL']}\n password : {data[website]['Password']}")
pyperclip.copy(data[website]['Password'])
except KeyError as error:
messagebox.showinfo(title = "WEBSITE NOT FOUND", message=f"{error} you have not added or registerd in this website" )
# ---------------------------- UI SETUP ------------------------------- #
from tkinter import *
window = Tk()
window.title("Password Manager")
window.config(padx=50, pady=50)
image_logo = PhotoImage(file="logo.png")
background = Canvas(width=200, height=200)
background.create_image(100, 100, image=image_logo)
background.grid(row=0, column=1)
website_label = Label(text="Website", font=("Arial", 10))
website_label.grid(column=0, row=1)
website_entry = Entry(width=21)
website_entry.focus()
website_entry.grid(column=1, row=1, sticky="EW")
website_button = Button(text = "Search", width = 14, command = search)
website_button.grid(column = 2 ,row =1 ,sticky = "EW" )
email_label = Label(text="EMAIL/USERNAME")
email_label.grid(column=0, row=2)
email_entry = Entry(width=35)
email_entry.grid(column=1, row=2, columnspan=2, sticky="EW")
email_entry.insert(END, "[email protected]")
password_label = Label(text="PASSWORD")
password_label.bd = -2
password_label.grid(column=0, row=3)
password_entry = Entry(width=21)
password_entry.grid(column=1, row=3, sticky="EW")
password_button = Button(text="Genrate password", width=14, command = genrate_password)
password_button.grid(column=2, row=3, sticky="EW")
add = Button(text="ADD", width=36, command=generator)
add.grid(column=1, row=4, columnspan=2, sticky="EW")
window.mainloop()
|
python
|
from sys import path
from os.path import dirname as dir
path.append(dir(path[0]))
__package__ = "model"
from model import inference
from datetime import datetime
INFERENCE_TYPE = 'local' # local' | 'cmle'
instances = [
{
'is_male': 'True',
'mother_age': 26.0,
'mother_race': 'Asian Indian',
'plurality': 1.0,
'gestation_weeks': 39,
'mother_married': 'True',
'cigarette_use': 'False',
'alcohol_use': 'False'
},
{
'is_male': 'True',
'mother_age': 26.0,
'mother_race': 'Asian Indian',
'plurality': 1.0,
'gestation_weeks': 39,
'mother_married': 'True',
'cigarette_use': 'False',
'alcohol_use': 'False'
}
]
print("")
print("Inference Type:{}".format(INFERENCE_TYPE))
print("")
time_start = datetime.utcnow()
print("Inference started at {}".format(time_start.strftime("%H:%M:%S")))
print(".......................................")
for i in range(10):
if INFERENCE_TYPE == 'local':
output = inference.estimate_local(instances)
else:
output = inference.estimate_cmle(instances)
print(output)
time_end = datetime.utcnow()
print(".......................................")
print("Inference finished at {}".format(time_end.strftime("%H:%M:%S")))
print("")
time_elapsed = time_end - time_start
print("Inference elapsed time: {} seconds".format(time_elapsed.total_seconds()))
|
python
|