text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
#!/bin/bash
set -o errexit
set -o pipefail
set -o nounset
# Uncomment this line to see each command for debugging (careful: this will show secrets!)
# set -o xtrace
export TF_LOG=""
# This script assumes you have created an .env from the sample and the variables
# will come from there.
# shellcheck disable=SC2154
terraform init -input=false -backend=true -reconfigure \
-backend-config="resource_group_name=$TF_VAR_mgmt_resource_group_name" \
-backend-config="storage_account_name=$TF_VAR_mgmt_storage_account_name" \
-backend-config="container_name=$TF_VAR_terraform_state_container_name" \
-backend-config="key=tre-workspace-service-gitea-${TF_VAR_id}"
terraform plan
terraform apply -auto-approve
|
AzureTRE/templates/workspace_services/gitea/terraform/deploy.sh/0
|
{
"file_path": "AzureTRE/templates/workspace_services/gitea/terraform/deploy.sh",
"repo_id": "AzureTRE",
"token_count": 254
}
| 128 |
<html>
<head>
<script type="text/javascript">
if (window.location.href.indexOf("?") != -1) {
window.location = window.location.href.replace("?", "guacamole/#");
} else {
window.location = window.location.href + "guacamole/";
}
</script>
</head>
<body>
Redirecting to Guacamole...
</body>
</html>
|
AzureTRE/templates/workspace_services/guacamole/guacamole-server/docker/index.jsp/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/guacamole-server/docker/index.jsp",
"repo_id": "AzureTRE",
"token_count": 134
}
| 129 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.guacamole.auth.azuretre;
import com.auth0.jwk.UrlJwkProvider;
import com.google.common.base.Strings;
import org.apache.guacamole.GuacamoleException;
import org.apache.guacamole.auth.azuretre.connection.ConnectionService;
import org.apache.guacamole.auth.azuretre.user.AzureTREAuthenticatedUser;
import org.apache.guacamole.auth.azuretre.user.TreUserContext;
import org.apache.guacamole.net.auth.AbstractAuthenticationProvider;
import org.apache.guacamole.net.auth.AuthenticatedUser;
import org.apache.guacamole.net.auth.Credentials;
import org.apache.guacamole.net.auth.UserContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.URL;
public class AzureTREAuthenticationProvider extends AbstractAuthenticationProvider {
public static final String ROOT_CONNECTION_GROUP = "ROOT";
private static final Logger LOGGER = LoggerFactory.getLogger(AzureTREAuthenticationProvider.class);
private final AuthenticationProviderService authenticationProviderService;
public AzureTREAuthenticationProvider() {
this.authenticationProviderService = new AuthenticationProviderService();
}
public AzureTREAuthenticationProvider(
AuthenticationProviderService authenticationProviderService) {
if (authenticationProviderService == null) {
this.authenticationProviderService = new AuthenticationProviderService();
} else {
this.authenticationProviderService = authenticationProviderService;
}
}
@Override
public String getIdentifier() {
return "azuretre";
}
@Override
public AuthenticatedUser updateAuthenticatedUser(AuthenticatedUser authenticatedUser, Credentials credentials)
throws GuacamoleException {
LOGGER.info("updateAuthenticatedUser");
AuthenticatedUser updated = authenticateUser(credentials);
LOGGER.info("updateAuthenticatedUser - done");
return updated;
}
@Override
public AzureTREAuthenticatedUser authenticateUser(final Credentials credentials) {
LOGGER.info("Authenticating user");
// Getting headers from the oauth2 proxy
final String accessToken = credentials.getRequest().getHeader("X-Forwarded-Access-Token");
final String prefUsername = credentials.getRequest().getHeader("X-Forwarded-Preferred-Username");
if (Strings.isNullOrEmpty(accessToken)) {
LOGGER.error("access token was not provided");
return null;
}
if (Strings.isNullOrEmpty(prefUsername)) {
LOGGER.error("preferred username was not present in the token");
return null;
}
return new AzureTREAuthenticatedUser(credentials, accessToken, prefUsername, null, this);
}
@Override
public UserContext getUserContext(final AuthenticatedUser authenticatedUser) throws GuacamoleException {
LOGGER.debug("Getting user context.");
if (authenticatedUser instanceof AzureTREAuthenticatedUser) {
final AzureTREAuthenticatedUser user = (AzureTREAuthenticatedUser) authenticatedUser;
final String accessToken = user.getAccessToken();
LOGGER.debug("Getting configurations in order to populate user context.");
var connections = ConnectionService.getConnections(user);
LOGGER.debug("Creating user context.");
final TreUserContext treUserContext = new TreUserContext(this, connections);
treUserContext.init(user);
// Validate the token 'again', the OpenID extension verified it, but it didn't verify
// that we got the correct roles. The fact that a valid token was returned doesn't mean
// this user is an Owner or a Researcher. If its not, break, don't try to get any VMs.
// Note: At the moment there is NO apparent way to UN-Authorize a user that a previous
// extension authorized... (The user will see an empty list of VMs)
// Note2: The API app will also verify the token an in any case will not return any vms
// in this case.
try {
LOGGER.info("Validating token");
final UrlJwkProvider jwkProvider =
new UrlJwkProvider(new URL(System.getenv("OAUTH2_PROXY_JWKS_ENDPOINT")));
authenticationProviderService.validateToken(accessToken, jwkProvider);
} catch (final Exception ex) {
// Failed to validate the token
LOGGER.error("Failed to validate token. ex: " + ex);
return null;
}
return treUserContext;
}
return null;
}
@Override
public UserContext updateUserContext(UserContext context, AuthenticatedUser authenticatedUser,
Credentials credentials)
throws GuacamoleException {
LOGGER.debug("Updating usercontext");
var userContext = getUserContext(authenticatedUser);
return userContext;
}
}
|
AzureTRE/templates/workspace_services/guacamole/guacamole-server/guacamole-auth-azure/src/main/java/org/apache/guacamole/auth/azuretre/AzureTREAuthenticationProvider.java/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/guacamole-server/guacamole-auth-azure/src/main/java/org/apache/guacamole/auth/azuretre/AzureTREAuthenticationProvider.java",
"repo_id": "AzureTRE",
"token_count": 1998
}
| 130 |
package org.apache.guacamole.auth.azuretre.user;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.guacamole.net.auth.Credentials;
import org.junit.Test;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.mockito.Mockito.mock;
public class AzureTREAuthenticatedUserTest {
String dummyAccessToken =
"eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6ImtnMkxZczJUMENUaklmajRydDZKSXluZW4zOCJ9.eyJhdWQiOiI2ZjY3ZjI3Y"
+ "S04NTk4LTQ4ZGMtYTM1OC00MDVkMzAyOThhMDMiLCJpc3MiOiJodHRwczovL2xvZ2luLm1pY3Jvc29mdG9ubGluZS5jb20vYWY0MTg"
+ "0ZGItNjdhOC00ZDMxLWJjMDYtYmUwN2IwMGJlYWQwL3YyLjAiLCJpYXQiOjE2MDIxNzUxODQsIm5iZiI6MTYwMjE3NTE4NCwiZXhwI"
+ "joxNjAyMTc5MDg0LCJhaW8iOiJBVFFBeS84UkFBQUFRWVRQZW8yM3NpN0ZuQjZXbEtIZUs5MnhFZGN5T3NKWDhzSXBkRUpRd2dnR1g"
+ "3M0ZFL0hPTCtDZU1STjdrQlJoIiwiYXpwIjoiNmY2N2YyN2EtODU5OC00OGRjLWEzNTgtNDA1ZDMwMjk4YTAzIiwiYXpwYWNyIjoiM"
+ "SIsIm5hbWUiOiJNYXJjdXMgVGVzdCIsIm9pZCI6IjYzYTE3NzY0LThiZWEtNDk4Yi1hYzEyLWZjNTRlMzMwMDAxNyIsInByZWZlcnJ"
+ "lZF91c2VybmFtZSI6Im1hcmN1c3Rlc3RAZHJlZGV2Mm91dGxvb2sub25taWNyb3NvZnQuY29tIiwicmgiOiIwLkFBQUEyNFJCcjZob"
+ "k1VMjhCcjRIc0F2cTBIcnlaMi1ZaGR4SW8xaEFYVEFwaWdOMEFITS4iLCJyb2xlcyI6WyJQcm9qZWN0LUFkbWluaXN0cmF0b3IiLCJ"
+ "Qcm9qZWN0LVVzZXIiXSwic2NwIjoiZW1haWwgb3BlbmlkIHByb2ZpbGUgVXNlci5SZWFkIiwic3ViIjoiLUg2aFdjR0pRd2hJVE9Za"
+ "kNJY1RkV2V3UkNfMUZHZXFHZnZpQV91Q0JVRSIsInRpZCI6ImFmNDE4NGRiLTY3YTgtNGQzMS1iYzA2LWJlMDdiMDBiZWFkMCIsInV"
+ "wbiI6Im1hcmN1c3Rlc3RAZHJlZGV2Mm91dGxvb2sub25taWNyb3NvZnQuY29tIiwidXRpIjoiMk1wVHo3WExXVTJzQV9ENVRWaTZBQ"
+ "SIsInZlciI6IjIuMCIsIndpZHMiOlsiYjc5ZmJmNGQtM2VmOS00Njg5LTgxNDMtNzZiMTk0ZTg1NTA5Il19.qG8CZ7_AIxvt7YTy9U"
+ "qhLUujv_fIdwTWrnKZlN9AE5tJvaHCNP_7URJWbE9J3tcH2Ot6pYORHqqhcRAYe6pGP1w4FZFLt-GRLBfZ80V6uuYTIA3BmZEimVBM"
+ "QchPfwpZm6kJhT8Jc9qeMXoZbPVNoeMAf1mFthgQ_VfffGt_tnX-vf9CCsQcS7D175RNpbbpKXvQVoupIt_iwdxhwb6_cJSTolV8P4"
+ "ohJWKcU3dP61wzWuHP50wgxbvDIVqk7ltTTNFG36TAwlzd9-C_sztIoaIKRss_WIhSAu01SY6bWAw75M33KqRZt0KmvQRpwd14yeuG"
+ "K1ulUa8_-t3lynqWfw";
@Test
public void authenticatedUserReturnsClaims() {
final Credentials credentialsMock = mock(Credentials.class);
final AzureTREAuthenticatedUser authenticatedUser =
new AzureTREAuthenticatedUser(credentialsMock, dummyAccessToken, "dummy_username", "dummy_objectId", null);
assertEquals("dummy_objectId", authenticatedUser.getObjectId());
assertEquals("dummy_username", authenticatedUser.getIdentifier());
assertEquals(dummyAccessToken, authenticatedUser.getAccessToken());
assertEquals(credentialsMock, authenticatedUser.getCredentials());
assertNull(authenticatedUser.getAuthenticationProvider());
}
}
|
AzureTRE/templates/workspace_services/guacamole/guacamole-server/guacamole-auth-azure/src/test/java/org/apache/guacamole/auth/azuretre/user/AzureTREAuthenticatedUserTest.java/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/guacamole-server/guacamole-auth-azure/src/test/java/org/apache/guacamole/auth/azuretre/user/AzureTREAuthenticatedUserTest.java",
"repo_id": "AzureTRE",
"token_count": 1962
}
| 131 |
resource "azurerm_healthcare_workspace" "healthcare_workspace" {
name = "hs${local.service_resource_name_suffix}"
resource_group_name = data.azurerm_resource_group.ws.name
location = data.azurerm_resource_group.ws.location
tags = local.workspace_service_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_healthcare_fhir_service" "fhir" {
count = var.deploy_fhir ? 1 : 0
name = "fhir${local.service_resource_name_suffix}"
resource_group_name = data.azurerm_resource_group.ws.name
location = data.azurerm_resource_group.ws.location
workspace_id = azurerm_healthcare_workspace.healthcare_workspace.id
kind = "fhir-${var.fhir_kind}"
tags = local.workspace_service_tags
authentication {
authority = local.authority
audience = "https://hs${local.service_resource_name_suffix}-fhir${local.service_resource_name_suffix}.fhir.azurehealthcareapis.com"
}
identity {
type = "SystemAssigned"
}
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_healthcare_dicom_service" "dicom" {
count = var.deploy_dicom ? 1 : 0
name = "dicom${local.service_resource_name_suffix}"
workspace_id = azurerm_healthcare_workspace.healthcare_workspace.id
location = data.azurerm_resource_group.ws.location
tags = local.workspace_service_tags
identity {
type = "SystemAssigned"
}
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_endpoint" "health_services_private_endpoint" {
name = "pe-${azurerm_healthcare_workspace.healthcare_workspace.name}"
location = data.azurerm_resource_group.ws.location
resource_group_name = data.azurerm_resource_group.ws.name
subnet_id = data.azurerm_subnet.services.id
tags = local.workspace_service_tags
private_dns_zone_group {
name = "private-dns-zone-group"
private_dns_zone_ids = [data.azurerm_private_dns_zone.health.id, data.azurerm_private_dns_zone.dicom.id]
}
private_service_connection {
private_connection_resource_id = azurerm_healthcare_workspace.healthcare_workspace.id
name = "psc-${azurerm_healthcare_workspace.healthcare_workspace.name}"
subresource_names = ["healthcareworkspace"]
is_manual_connection = false
}
depends_on = [
azurerm_healthcare_fhir_service.fhir,
azurerm_healthcare_dicom_service.dicom
]
lifecycle { ignore_changes = [tags] }
}
|
AzureTRE/templates/workspace_services/health-services/terraform/main.tf/0
|
{
"file_path": "AzureTRE/templates/workspace_services/health-services/terraform/main.tf",
"repo_id": "AzureTRE",
"token_count": 1122
}
| 132 |
export TF_LOG=""
terraform init -input=false -backend=true -reconfigure \
-backend-config="resource_group_name=$TF_VAR_mgmt_resource_group_name" \
-backend-config="storage_account_name=$TF_VAR_mgmt_storage_account_name" \
-backend-config="container_name=$TF_VAR_terraform_state_container_name" \
-backend-config="key=tre-service-innereye-${TF_VAR_id}"
terraform plan
terraform apply -auto-approve
|
AzureTRE/templates/workspace_services/innereye/terraform/deploy.sh/0
|
{
"file_path": "AzureTRE/templates/workspace_services/innereye/terraform/deploy.sh",
"repo_id": "AzureTRE",
"token_count": 159
}
| 133 |
export AZURE_STORAGE_CONNECTION_STRING="${MLFlow_Connection_String}"
pip install mlflow==1.24.0
pip install azure-storage-blob==12.10.0
pip install azure-identity==1.8.0
|
AzureTRE/templates/workspace_services/mlflow/mlflow-vm-config/linux/template_config.sh/0
|
{
"file_path": "AzureTRE/templates/workspace_services/mlflow/mlflow-vm-config/linux/template_config.sh",
"repo_id": "AzureTRE",
"token_count": 69
}
| 134 |
ID="__CHANGE_ME__"
WORKSPACE_ID="__CHANGE_ME__"
SQL_SKU="__CHANGE_ME__"
STORAGE_MB="__CHANGE_ME__"
DB_NAME="__CHANGE_ME__"
|
AzureTRE/templates/workspace_services/mysql/.env.sample/0
|
{
"file_path": "AzureTRE/templates/workspace_services/mysql/.env.sample",
"repo_id": "AzureTRE",
"token_count": 63
}
| 135 |
{
"schemaType": "ParameterSet",
"schemaVersion": "1.0.1",
"namespace": "",
"name": "tre-workspace-service-ohdsi",
"parameters": [
{
"name": "tre_id",
"source": {
"env": "TRE_ID"
}
},
{
"name": "id",
"source": {
"env": "ID"
}
},
{
"name": "tfstate_container_name",
"source": {
"env": "TERRAFORM_STATE_CONTAINER_NAME"
}
},
{
"name": "tfstate_resource_group_name",
"source": {
"env": "MGMT_RESOURCE_GROUP_NAME"
}
},
{
"name": "tfstate_storage_account_name",
"source": {
"env": "MGMT_STORAGE_ACCOUNT_NAME"
}
},
{
"name": "workspace_id",
"source": {
"env": "WORKSPACE_ID"
}
},
{
"name": "address_space",
"source": {
"env": "ADDRESS_SPACE"
}
},
{
"name": "arm_environment",
"source": {
"env": "ARM_ENVIRONMENT"
}
},
{
"name": "azure_environment",
"source": {
"env": "AZURE_ENVIRONMENT"
}
},
{
"name": "configure_data_source",
"source": {
"env": "CONFIGURE_DATA_SOURCE"
}
},
{
"name": "data_source_config",
"source": {
"env": "DATA_SOURCE_CONFIG"
}
},
{
"name": "data_source_daimons",
"source": {
"env": "DATA_SOURCE_DAIMONS"
}
}
]
}
|
AzureTRE/templates/workspace_services/ohdsi/parameters.json/0
|
{
"file_path": "AzureTRE/templates/workspace_services/ohdsi/parameters.json",
"repo_id": "AzureTRE",
"token_count": 816
}
| 136 |
resource "random_password" "atlas_security_admin_password" {
length = 8
special = false
}
resource "azurerm_key_vault_secret" "atlas_security_admin_password" {
name = "atlas-security-admin-password-${local.short_service_id}"
key_vault_id = data.azurerm_key_vault.ws.id
value = random_password.atlas_security_admin_password.result
tags = local.tre_workspace_service_tags
lifecycle { ignore_changes = [tags] }
}
resource "terraform_data" "deployment_atlas_security" {
triggers_replace = {
postgres_database_id = azurerm_postgresql_flexible_server_database.db.id
}
provisioner "local-exec" {
environment = {
OHDSI_ADMIN_CONNECTION_STRING = "host=${azurerm_postgresql_flexible_server.postgres.fqdn} port=5432 dbname=${local.postgres_webapi_database_name} user=${local.postgres_webapi_admin_username} password=${azurerm_key_vault_secret.postgres_webapi_admin_password.value} sslmode=require"
ATLAS_SECURITY_ADMIN_PASSWORD = azurerm_key_vault_secret.atlas_security_admin_password.value
ATLAS_USERS = "admin,${azurerm_key_vault_secret.atlas_security_admin_password.value}"
WEB_API_URL = local.ohdsi_webapi_url
}
command = "../scripts/atlas_security.sh"
}
depends_on = [
azurerm_postgresql_flexible_server_database.db,
terraform_data.deployment_ohdsi_webapi_init,
terraform_data.postgres_core_dns_link,
azurerm_private_endpoint.webapi_private_endpoint,
azurerm_subnet_network_security_group_association.postgres
]
}
|
AzureTRE/templates/workspace_services/ohdsi/terraform/atlas_security.tf/0
|
{
"file_path": "AzureTRE/templates/workspace_services/ohdsi/terraform/atlas_security.tf",
"repo_id": "AzureTRE",
"token_count": 643
}
| 137 |
{
"schemaType": "ParameterSet",
"schemaVersion": "1.0.1",
"namespace": "",
"name": "tre-workspace-airlock-import-review",
"parameters": [
{
"name": "address_spaces",
"source": {
"env": "ADDRESS_SPACES"
}
},
{
"name": "azure_location",
"source": {
"env": "LOCATION"
}
},
{
"name": "tre_id",
"source": {
"env": "TRE_ID"
}
},
{
"name": "id",
"source": {
"env": "ID"
}
},
{
"name": "tfstate_container_name",
"source": {
"env": "TERRAFORM_STATE_CONTAINER_NAME"
}
},
{
"name": "tfstate_resource_group_name",
"source": {
"env": "MGMT_RESOURCE_GROUP_NAME"
}
},
{
"name": "tfstate_storage_account_name",
"source": {
"env": "MGMT_STORAGE_ACCOUNT_NAME"
}
},
{
"name": "enable_local_debugging",
"source": {
"env": "ENABLE_LOCAL_DEBUGGING"
}
},
{
"name": "register_aad_application",
"source": {
"env": "REGISTER_AAD_APPLICATION"
}
},
{
"name": "create_aad_groups",
"source": {
"env": "CREATE_AAD_GROUPS"
}
},
{
"name": "client_id",
"source": {
"env": "CLIENT_ID"
}
},
{
"name": "client_secret",
"source": {
"env": "CLIENT_SECRET"
}
},
{
"name": "scope_id",
"source": {
"env": "SCOPE_ID"
}
},
{
"name": "workspace_owner_object_id",
"source": {
"env": "WORKSPACE_OWNER_OBJECT_ID"
}
},
{
"name": "sp_id",
"source": {
"env": "SP_ID"
}
},
{
"name": "app_role_id_workspace_owner",
"source": {
"env": "APP_ROLE_ID_WORKSPACE_OWNER"
}
},
{
"name": "app_role_id_workspace_researcher",
"source": {
"env": "APP_ROLE_ID_WORKSPACE_RESEARCHER"
}
},
{
"name": "app_role_id_workspace_airlock_manager",
"source": {
"env": "APP_ROLE_ID_WORKSPACE_AIRLOCK_MANAGER"
}
},
{
"name": "aad_redirect_uris",
"source": {
"env": "AAD_REDIRECT_URIS"
}
},
{
"name": "app_service_plan_sku",
"source": {
"env": "WORKSPACE_APP_SERVICE_PLAN_SKU"
}
},
{
"name": "arm_environment",
"source": {
"env": "ARM_ENVIRONMENT"
}
},
{
"name": "azure_environment",
"source": {
"env": "AZURE_ENVIRONMENT"
}
}
]
}
|
AzureTRE/templates/workspaces/airlock-import-review/parameters.json/0
|
{
"file_path": "AzureTRE/templates/workspaces/airlock-import-review/parameters.json",
"repo_id": "AzureTRE",
"token_count": 1500
}
| 138 |
data "azurerm_user_assigned_identity" "airlock_id" {
name = "id-airlock-${var.tre_id}"
resource_group_name = "rg-${var.tre_id}"
}
data "azurerm_user_assigned_identity" "api_id" {
name = "id-api-${var.tre_id}"
resource_group_name = "rg-${var.tre_id}"
}
data "azurerm_private_dns_zone" "blobcore" {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.blob.core.windows.net"]
resource_group_name = local.core_resource_group_name
}
data "azurerm_servicebus_namespace" "airlock_sb" {
name = "sb-${var.tre_id}"
resource_group_name = local.core_resource_group_name
}
data "azurerm_servicebus_topic" "blob_created" {
name = local.blob_created_topic_name
resource_group_name = local.core_resource_group_name
namespace_name = data.azurerm_servicebus_namespace.airlock_sb.name
}
|
AzureTRE/templates/workspaces/base/terraform/airlock/data.tf/0
|
{
"file_path": "AzureTRE/templates/workspaces/base/terraform/airlock/data.tf",
"repo_id": "AzureTRE",
"token_count": 403
}
| 139 |
locals {
short_workspace_id = substr(var.tre_resource_id, -4, -1)
workspace_resource_name_suffix = "${var.tre_id}-ws-${local.short_workspace_id}"
storage_name = lower(replace("stg${substr(local.workspace_resource_name_suffix, -8, -1)}", "-", ""))
keyvault_name = lower("kv-${substr(local.workspace_resource_name_suffix, -20, -1)}")
redacted_senstive_value = "REDACTED"
tre_workspace_tags = {
tre_id = var.tre_id
tre_workspace_id = var.tre_resource_id
}
}
|
AzureTRE/templates/workspaces/base/terraform/locals.tf/0
|
{
"file_path": "AzureTRE/templates/workspaces/base/terraform/locals.tf",
"repo_id": "AzureTRE",
"token_count": 259
}
| 140 |
body {
margin: 0;
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', 'Ubuntu', 'Cantarell', 'Fira Sans',
'Droid Sans', 'Helvetica Neue', sans-serif;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
}
h1 {
margin-top: 0;
font-weight: normal;
}
h2 {
font-weight: normal;
}
code {
font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New', monospace;
}
.tre-logout-message {
margin: 40px auto;
width: 70%;
}
.tre-top-nav {
box-shadow: 0 1px 2px 0px #033d68;
z-index: 100;
}
.ms-CommandBar {
background-color: transparent;
padding-left: 0px;
.ms-Button {
background-color: transparent;
}
}
.tre-notifications-button {
position: relative;
top: 7px;
color: #fff;
i {
font-size: 20px !important;
}
}
.tre-notifications-button i {
font-size: 24px;
}
.tre-notifications-dismiss {
text-align: right;
padding-top: 10px;
}
ul.tre-notifications-list {
margin: 0;
padding: 0;
}
.tre-notifications-list li {
list-style: none;
margin-top: 20px;
padding-bottom: 10px;
border-bottom: 1px #ccc solid;
}
ul.tre-notifications-steps-list {
padding: 5px 15px 15px 15px;
background-color: #f9f9f9;
margin: 10px 0 0 0;
}
ul.tre-notifications-steps-list li {
border: none;
}
.tre-notification-time {
font-style: italic;
text-align: right;
font-size: 12px;
}
.tre-home-link {
color: #fff;
text-decoration: none;
font-size:1.2rem;
}
.tre-user-menu {
margin-top: 2px;
.ms-Persona-primaryText:hover {
color: #fff;
}
.ms-Persona-primaryText {
color: #fff;
}
.ms-Icon {
margin-top: 3px;
}
}
.tre-table {
.ms-Persona-primaryText {
font-size: 12px;
color: rgb(108, 108, 108);
}
.ms-DetailsRow-cell {
align-self: baseline;
}
}
.tre-hide-chevron i[data-icon-name=ChevronDown] {
display: none;
}
.tre-body {
height: 100%;
overflow: hidden;
}
.tre-body-inner {
height: 100%;
overflow: hidden;
}
.tre-body-content {
padding-left: 10px;
}
.tre-left-nav {
width: 200px;
overflow-y: auto;
box-shadow: 1px 0 8px 0px #ccc;
z-index: 100;
}
.tre-body-content {
width: calc(100% - 200px);
overflow-y: scroll;
background-color: #faf9f8;
padding-bottom: 80px;
}
.tre-workspace-header h1 {
margin: 10px 0 10px 0;
}
.tre-context-menu button {
text-transform: capitalize;
}
.tre-panel {
margin: 10px 15px 10px 10px;
padding: 10px;
}
.tre-resource-panel {
box-shadow: 1px 0px 5px 0px #ccc;
margin: 10px 15px 10px 10px;
padding: 10px;
background-color: #fff;
}
.ms-Pivot {
margin-bottom: 10px;
}
input[readonly]{
background-color:#efefef;
}
.tre-badge{
border-radius:4px;
background-color: #efefef;
padding:2px 6px;
text-transform: capitalize;
display:inline-block;
font-size:12px;
}
.tre-badge-in-progress{
background-color: #ce7b00;
color: #fff;
}
.tre-badge-failed{
background-color: #990000;
color: #fff;
padding-top: 4px;
padding-left: 7px;
font-size: 16px;
}
.tre-badge-success{
background-color: #006600;
color: #fff;
}
.tre-complex-list{
list-style: none;
padding:0 0 0 20px;
margin:0;
}
.tre-complex-list-border{
border-bottom: 1px #ccc solid;
margin-left:-15px;
}
.tre-complex-list-string{
padding-left:20px;
}
.tre-complex-list .ms-Icon{
font-size:12px!important;
font-weight: bold;
position: relative;
top:2px;
}
// Classes for rendering power state badges
.tre-power-badge {
text-align: center;
color: #636262;
margin: 6px;
.tre-power-on, .tre-power-off {
height: 8px;
width: 8px;
background-color: #006600;
border-radius: 50%;
display: inline-block;
margin-right: 5px;
}
.tre-power-off {
background-color: #990000;
}
}
/* hide fields explicitly */
.tre-hidden {
display: none;
}
/* create form overrides */
/* panel header */
.ms-Panel-commands {
background: #fff;
border-bottom: 1px #ccc solid;
padding-bottom: 10px;
}
/* template description at top of panel */
.rjsf > .ms-Grid-col {
margin-top: -25px;
}
.rjsf > .ms-Grid-col > span:first-of-type {
display: block;
background-color: #efefef;
padding: 10px;
margin-bottom: 15px;
font-style: normal;
}
/* border around sub-blocks */
.ms-Panel-content .rjsf > .ms-Grid-col > .ms-Grid > .ms-Grid-row > .field-object,
.ms-Panel-content .rjsf > .ms-Grid-col > .ms-Grid > .ms-Grid-row > .field-array {
border: 1px #ccc dashed;
padding: 10px;
background-color: #fcfcfc;
}
/* sub titles and sub-sub titles */
.ms-Panel-content .rjsf > .ms-Grid-col > .ms-Grid > .ms-Grid-row > .field-object > label.ms-Label,
.ms-Panel-content .rjsf > .ms-Grid-col > .ms-Grid > .ms-Grid-row > .field-array > label.ms-Label {
font-size: 20px;
}
.ms-Panel-content .rjsf > .ms-Grid-col > .ms-Grid > .ms-Grid-row > .field-object > .ms-Grid > .ms-Grid-row > .ms-Grid-col > label.ms-Label,
.ms-Panel-content .rjsf > .ms-Grid-col > .ms-Grid > .ms-Grid-row > .field-array > .ms-Grid > .ms-Grid-row > .ms-Grid-col > label.ms-Label {
font-size: 16px;
}
/* remove secondary template description at the bottom of each template + sub blocks */
.rjsf > .ms-Grid-col > span:last-of-type,
.rjsf > .ms-Grid-col > .ms-Grid > .ms-Grid-row > .field-object > span:last-of-type,
.rjsf > .ms-Grid-col > .ms-Grid > .ms-Grid-row > .field-object > .ms-Grid > .ms-Grid-row > .ms-Grid-col > span:last-of-type {
display: none;
}
/* make descriptive text italic */
.field span {
font-style: italic;
}
.field span.ms-Checkbox-text {
font-style: normal;
}
|
AzureTRE/ui/app/src/App.scss/0
|
{
"file_path": "AzureTRE/ui/app/src/App.scss",
"repo_id": "AzureTRE",
"token_count": 2337
}
| 141 |
import React, { useEffect, useState } from 'react';
import { AnimationClassNames, Callout, IconButton, FontWeights, Stack, Text, getTheme, mergeStyles, mergeStyleSets, StackItem, IButtonStyles } from '@fluentui/react';
import { HttpMethod, useAuthApiCall } from '../../hooks/useAuthApiCall';
import { ApiEndpoint } from '../../models/apiEndpoints';
import config from "../../config.json";
// TODO:
// - change text to link
// - include any small print
export const Footer: React.FunctionComponent = () => {
const [showInfo, setShowInfo] = useState(false);
const [apiMetadata, setApiMetadata] = useState<any>();
const [health, setHealth] = useState<{services: [{service: string, status: string}]}>();
const apiCall = useAuthApiCall();
useEffect(() => {
const getMeta = async() => {
const result = await apiCall(ApiEndpoint.Metadata, HttpMethod.Get);
setApiMetadata(result);
};
const getHealth = async() => {
const result = await apiCall(ApiEndpoint.Health, HttpMethod.Get);
setHealth(result);
};
getMeta();
getHealth();
}, [apiCall]);
const uiConfig = config as any;
return (
<div className={contentClass}>
<Stack horizontal style={{alignItems:'center'}}>
<StackItem grow={1}>Azure Trusted Research Environment</StackItem>
<StackItem>
<IconButton
styles={iconButtonStyles}
iconProps={{iconName:'Info'}}
id="info"
onClick={() => setShowInfo(!showInfo)}
/>
</StackItem>
</Stack>
{
showInfo && <Callout
className={styles.callout}
ariaLabelledBy="info-label"
ariaDescribedBy="info-description"
role="dialog"
gapSpace={0}
target="#info"
onDismiss={() => setShowInfo(false)}
setInitialFocus
>
<Text block variant="xLarge" className={styles.title} id="info-label">
Azure TRE
</Text>
<Stack tokens={{childrenGap: 5}}>
{
uiConfig.version && <Stack horizontal horizontalAlign='space-between'>
<Stack.Item>UI Version:</Stack.Item>
<Stack.Item>{uiConfig.version}</Stack.Item>
</Stack>
}
{
apiMetadata?.api_version && <Stack horizontal horizontalAlign='space-between'>
<Stack.Item>API Version:</Stack.Item>
<Stack.Item>{apiMetadata.api_version}</Stack.Item>
</Stack>
}
</Stack>
<Stack tokens={{childrenGap: 5}} style={{marginTop: 10, paddingTop: 8, borderTop: '1px solid #e8e8e8'}}>
{
health?.services.map(s => {
return <Stack horizontal horizontalAlign='space-between' key={s.service}>
<Stack.Item>{s.service}:</Stack.Item>
<Stack.Item>{s.status}</Stack.Item>
</Stack>
})
}
</Stack>
</Callout>
}
</div>
);
};
const theme = getTheme();
const contentClass = mergeStyles([
{
alignItems: 'center',
backgroundColor: theme.palette.themeDark,
color: theme.palette.white,
lineHeight: '25px',
padding: '0 20px',
},
AnimationClassNames.scaleUpIn100
]);
const iconButtonStyles: Partial<IButtonStyles> = {
root: {
color: theme.palette.white,
},
rootHovered: {
color: theme.palette.neutralDark,
},
};
const styles = mergeStyleSets({
callout: {
width: 250,
padding: '20px 24px',
},
title: {
marginBottom: 12,
fontWeight: FontWeights.semilight
}
});
|
AzureTRE/ui/app/src/components/shared/Footer.tsx/0
|
{
"file_path": "AzureTRE/ui/app/src/components/shared/Footer.tsx",
"repo_id": "AzureTRE",
"token_count": 1635
}
| 142 |
import React, { useEffect, useState } from 'react';
import { useNavigate, useParams } from 'react-router-dom';
import { ApiEndpoint } from '../../models/apiEndpoints';
import { useAuthApiCall, HttpMethod } from '../../hooks/useAuthApiCall';
import { Spinner, SpinnerSize } from '@fluentui/react';
import { LoadingState } from '../../models/loadingState';
import { SharedService } from '../../models/sharedService';
import { ResourceHeader } from './ResourceHeader';
import { useComponentManager } from '../../hooks/useComponentManager';
import { Resource } from '../../models/resource';
import { ResourceBody } from './ResourceBody';
import { APIError } from '../../models/exceptions';
import { ExceptionLayout } from './ExceptionLayout';
interface SharedServiceItemProps {
readonly?: boolean
}
export const SharedServiceItem: React.FunctionComponent<SharedServiceItemProps> = (props: SharedServiceItemProps) => {
const { sharedServiceId } = useParams();
const [sharedService, setSharedService] = useState({} as SharedService);
const [loadingState, setLoadingState] = useState(LoadingState.Loading);
const navigate = useNavigate();
const apiCall = useAuthApiCall();
const [apiError, setApiError] = useState({} as APIError);
const latestUpdate = useComponentManager(
sharedService,
(r: Resource) => setSharedService(r as SharedService),
(r: Resource) => navigate(`/${ApiEndpoint.SharedServices}`)
);
useEffect(() => {
const getData = async () => {
try {
let ss = await apiCall(`${ApiEndpoint.SharedServices}/${sharedServiceId}`, HttpMethod.Get);
setSharedService(ss.sharedService);
setLoadingState(LoadingState.Ok);
} catch (err:any) {
err.userMessage = "Error retrieving shared service";
setApiError(err);
setLoadingState(LoadingState.Error)
}
};
getData();
}, [apiCall, sharedServiceId]);
switch (loadingState) {
case LoadingState.Ok:
return (
<>
<ResourceHeader resource={sharedService} latestUpdate={latestUpdate} readonly={props.readonly} />
<ResourceBody resource={sharedService} readonly={props.readonly} />
</>
);
case LoadingState.Error:
return (
<ExceptionLayout e={apiError} />
);
default:
return (
<div style={{ marginTop: '20px' }}>
<Spinner label="Loading Shared Service" ariaLive="assertive" labelPosition="top" size={SpinnerSize.large} />
</div>
)
}
};
|
AzureTRE/ui/app/src/components/shared/SharedServiceItem.tsx/0
|
{
"file_path": "AzureTRE/ui/app/src/components/shared/SharedServiceItem.tsx",
"repo_id": "AzureTRE",
"token_count": 875
}
| 143 |
{
"id": "36847de7-aa82-40a8-bbe7-d211bd677467",
"resourceId": "8c70974a-5f66-4ae9-9502-7a54e9e0bb86",
"resourcePath": "/workspaces/1e800001-7385-46a1-9f6d-490a6201ea01/workspace-services/8c70974a-5f66-4ae9-9502-7a54e9e0bb86",
"resourceVersion": 0,
"status": "deploying",
"action": "install",
"message": "8c70974a-5f66-4ae9-9502-7a54e9e0bb86: install action completed successfully.",
"createdWhen": 1650653543.343581,
"updatedWhen": 1650653543.343581,
"user": {
"id": "7f9756c3-7925-4b78-a10b-83927ab9c008",
"name": "[email protected] Lopes de Almeida",
"email": "",
"roles": [
"WorkspaceOwner"
],
"roleAssignments": []
}
}
|
AzureTRE/ui/app/src/components/shared/notifications/dummyOp.json/0
|
{
"file_path": "AzureTRE/ui/app/src/components/shared/notifications/dummyOp.json",
"repo_id": "AzureTRE",
"token_count": 393
}
| 144 |
import { TypedUseSelectorHook, useDispatch, useSelector } from 'react-redux';
import { RootState, AppDispatch } from '../store/store';
// basically alias the generic hooks to give them type information for nicer usage throughout the app
export const useAppDispatch = () => useDispatch<AppDispatch>();
export const useAppSelector: TypedUseSelectorHook<RootState> = useSelector;
|
AzureTRE/ui/app/src/hooks/customReduxHooks.ts/0
|
{
"file_path": "AzureTRE/ui/app/src/hooks/customReduxHooks.ts",
"repo_id": "AzureTRE",
"token_count": 100
}
| 145 |
export interface User {
email: string,
id: string,
name: string,
roleAssignments: Array<any>,
roles: Array<string>
}
|
AzureTRE/ui/app/src/models/user.ts/0
|
{
"file_path": "AzureTRE/ui/app/src/models/user.ts",
"repo_id": "AzureTRE",
"token_count": 53
}
| 146 |
{
"chemical2id": {
"lithium carbonate": "D016651",
"lithium": "D008094",
"phenobarbital": "D010634",
"ammonia": "D000641",
"valproic acid": "D014635",
"vpa": "D014635",
"nh3": "D000641",
"haloperidol": "D006220",
"apomorphine": "D001058",
"da": "D004298",
"dopac": "D015102",
"isoproterenol": "D007545",
"iso": "D007545",
"hydroxyproline": "D006909",
"retinyl acetate": "C009166",
"butylated hydroxyanisole": "D002083",
"ra": "C009166",
"bha": "D002083",
"ketanserin": "D007650",
"alfentanil": "D015760",
"serotonin": "D012701",
"chlordiazepoxide": "D002707",
"glycopyrronium": "D006024",
"edrophonium": "D004491",
"fentanyl": "D005283",
"ketamine": "D007649",
"co2": "D002245",
"prazosin": "D011224",
"epsilon-aminocaproic acid": "D015119",
"cyclophosphamide": "D003520",
"mesna": "D015080",
"benzodiazepine": "D001569",
"flumazenil": "D005442",
"diazepam": "D003975",
"meperidine": "D008614",
"morphine": "D009020",
"oral contraceptives": "D003276",
"heparin": "D006493",
"sodium warfarin": "D014859",
"paracetamol": "D000082",
"methamphetamine": "D008694",
"5-ht": "D012701",
"clozapine": "D003024",
"olanzapine": "C076029",
"d-amphetamine": "D003913",
"phencyclidine": "D010622",
"meth": "D008694",
"bupropion hydrochloride": "D016642",
"bupropion": "D016642",
"bupropion hcl": "D016642",
"penicillin": "D010406",
"urethane": "D014520",
"penicillin-g potassium": "D010400",
"dexmedetomidine": "D020927",
"pentobarbital": "D010424",
"oxygen": "D010100",
"lamotrigine": "C047781",
"ltg": "C047781",
"levodopa": "D007980",
"propofol": "D015742",
"hepatitis b surface antigen": "D006514",
"doxorubicin": "D004317",
"superoxide": "D013481",
"citrate": "C102006",
"amphotericin b": "D000666",
"didanosine": "D016049",
"hydroxyzine": "D006919",
"promethazine": "D011398",
"hydrocortisone": "D006854",
"prochlorperazine": "D011346",
"phenytoin": "D010672",
"lorazepam": "D008140",
"amphotercin b": "D000666",
"alcohol": "D000431",
"tobramycin": "D014031",
"tobramicyn": "D014031",
"creatinine": "D003404",
"anthracycline": "D018943",
"sm-5887": "C055866",
"pilocarpine": "D010862",
"pilo": "D010862",
"steroid": "D013256",
"corticosteroid": "D000305",
"daunorubicin": "D003630",
"benzodiazepines": "D001569",
"isoniazid": "D007538",
"iopentol": "C053571",
"iohexol": "D007472",
"metrizoate": "D008794",
"oral contraceptive": "D003276",
"thallium": "D013793",
"dipyridamole": "D004176",
"furosemide": "D005665",
"steroids": "D013256",
"everolimus": "C107135",
"tacrolimus": "D016559",
"methotrexate": "D008727",
"sirolimus": "D020123",
"loreclezole": "C066440",
"valproate": "D014635",
"clonazepam": "D002998",
"carbamazepine": "D002220",
"bicuculline": "D001640",
"n-methyl-d-aspartic acid": "D016202",
"bay k-8644": "D001498",
"calcium": "D002118",
"aminophylline": "D000628",
"carbimazole": "D002231",
"propranolol": "D011433",
"oestrogens": "D004967",
"progestogens": "D011374",
"oestrogen": "D004967",
"mannitol": "D008353",
"lidocaine": "D008012",
"succinylcholine": "D013390",
"rocuronium": "C061870",
"thiopental": "D013874",
"levofloxacin": "D064704",
"prostacyclin": "D011464",
"beraprost": "C048081",
"cilostazol": "C045645",
"bpt": "C048081",
"clz": "C045645",
"camp": "D000242",
"cyclic adenosine 3',5'-monophosphate": "D000242",
"etoricoxib": "C422649",
"diclofenac sodium": "D004008",
"diclofenac": "D004008",
"aspirin": "D001241",
"quetiapine": "C069541",
"divalproex": "D014635",
"qtp": "C069541",
"li": "D008094",
"dvp": "D014635",
"captopril": "D002216",
"dietary sodium chloride": "D017673",
"sodium chloride": "D012965",
"clonidine": "D003000",
"hexamethonium": "D018738",
"alpha2-adrenergic receptor agonist": "D058647",
"corticosterone": "D003345",
"organophosphorus": "D010755",
"tri-ortho-tolyl phosphate": "C025541",
"totp": "C025541",
"0,0'-diisopropyl phosphorofluoridate": "D007531",
"dfp": "D007531",
"organophosphorous": "D010755",
"adenosine a2a/a1 receptor antagonist": "D058915",
"adenosine a(2a)/a(1) receptor antagonist": "D058915",
"reserpine": "D012110",
"6-hydroxydopamine": "D016627",
"6-ohda": "D016627",
"mptp": "D015632",
"pegylated interferon alpha-2b": "C417083",
"ribavirin": "D012254",
"paroxetine": "D017374",
"alprazolam": "D000525",
"creatine": "D003401",
"aspartate": "D001224",
"alanine": "D000409",
"bromocriptine": "D001971",
"cyclooxygenase inhibitors": "D016861",
"prostaglandin": "D011453",
"n-terminal pro brain natriuretic peptide": "C109794",
"nt-probnp": "C109794",
"angiotensin": "D000809",
"enalapril": "D004656",
"diuretic": "D004232",
"pemoline": "D010389",
"oxazolidine": "C064210",
"amphetamines": "D000662",
"methylphenidate": "D008774",
"ifosfamide": "D007069",
"ammonium acetate": "C018824",
"nh4ac": "C018824",
"acetylcholine": "D000109",
"catecholamine": "D002395",
"kcl": "D011189",
"acetaldehyde": "D000079",
"verapamil": "D014700",
"amphetamine": "D000661",
"metrazol": "D010433",
"contrast media": "D003287",
"cm": "D003287",
"iopromide": "C038192",
"sodium": "D012964",
"na": "D012964",
"potassium": "D011188",
"k": "D011188",
"cr": "D003404",
"caffeine": "D002110",
"ethambutol": "D004977",
"emb": "D004977",
"valdecoxib": "C406224",
"ibuprofen": "D007052",
"naproxen": "D009288",
"ephedrine": "D004809",
"fluoxetine": "D005473",
"sertraline": "D020280",
"dopamine": "D004298",
"a-86929": "C095427",
"skf-82958": "C071262",
"6-chloro-7,8-dihydroxy-3-allyl-1-phenyl-2,3,4,5-tetrahydro-1h-3-benzaze pine hydrobromide": "C071262",
"a-77636": "C079415",
"[1r, 3s] 3-[1'-admantyl]-1-aminomethyl-3,4-dihydro-5,6-dihydroxy-1h-2-benzo pyran hydrochloride": "C079415",
"1-methyl-4-phenyl-1,2,3,6-tetrahydropyridine": "D015632",
"[-]-[5ar,11bs]-4,5,5a,6,7,11b-hexahydro-2-propyl-3-thia-5-+ ++azacyclopent-1- ena[c]phenathrene-9-10-diol": "C095427",
"ly-171555": "C416545",
"[4ar-trans]-4,4a,5,6,7,8,8a,9-o-dihydro-5n-propyl-2h-pyrazo lo-3-4-quinoline hydrochloride": "C416545",
"bupivacaine": "D002045",
"chloroprocaine": "C004616",
"naloxone": "D009270",
"angiotensin-converting enzyme inhibitors": "D000806",
"nmda": "D016202",
"amino acid": "D000596",
"glutamate": "D018698",
"mk-801": "D016291",
"ap7": "C031231",
"n-methyl-d-aspartate": "D016202",
"n,n'-dibenzhydrylethane-1,2-diamine dihydrochloride": "C507346",
"amn082": "C507346",
"nimodipine": "D009553",
"nitroglycerin": "D005996",
"nimo": "D009553",
"ntg": "D005996",
"warfarin": "D014859",
"flavonoids": "D005419",
"fat": "D004041",
"adriamycin": "D004317",
"triglycerides": "D014280",
"atp": "D000255",
"doxorubicinol": "C010013",
"amp": "D000249",
"adp": "D000244",
"ziprasidone": "C092292",
"mangiferin": "C013592",
"polyphenol": "D059808",
"isph": "D007545",
"lactate": "D019344",
"uric acid": "D014527",
"iron": "D007501",
"triphenyl tetrazolium chloride": "C009591",
"ttc": "C009591",
"glutathione": "D005978",
"vitamin c": "D001205",
"vitamin e": "D014810",
"dimethyl sulphoxide": "D004121",
"remifentanil": "C071741",
"etomidate": "D005045",
"daidzein": "C004742",
"choline": "D002794",
"ach": "D000109",
"4',7-dihydroxy-isoflavone": "C004742",
"scopolamine": "D012601",
"azithromycin": "D017963",
"baclofen": "D001418",
"dexamethasone": "D003907",
"methylprednisolone": "D008775",
"midazolam": "D008874",
"macrolide": "D018942",
"macrolides": "D018942",
"glyceryl trinitrate": "D005996",
"nitric oxide": "D009569",
"no": "D009569",
"gtn": "D005996",
"cocaine": "D003042",
"tamoxifen": "D013629",
"crack cocaine": "D016578",
"crack": "D016578",
"levomepromazine": "D008728",
"fluvoxamine": "D016666",
"phenothiazines": "D010640",
"eaca": "D015119",
"ranitidine": "D011899",
"cimetidine": "D002927",
"histamine": "D006632",
"salicylate": "D012459",
"acetaminophen": "D000082",
"salicylates": "D012459",
"methysergide": "D008784",
"ergot": "D004876",
"oxprenolol": "D010096",
"terbutaline": "D013726",
"methyldopa": "D008750",
"5,7-dihydroxytryptamine": "D015116",
"5,7-dht": "D015116",
"yohimbine": "D015016",
"clomipramine": "D002997",
"dilevalol": "D007741",
"puromycin aminonucleoside": "D011692",
"8-oh-dpat": "D017371",
"iron dextran": "D007505",
"tyrosine": "D014443",
"coumarin": "C030123",
"saxitoxin": "D012530",
"stx": "D012530",
"benzoylecgonine": "C005618",
"methadone": "D008691",
"hydromorphone": "D004091",
"ketorolac": "D020910",
"pan": "D011692",
"estrogen": "D004967",
"estrogens": "D004967",
"17beta-estradiol": "D004958",
"e2": "D004958",
"pravastatin": "D017035",
"lovastatin": "D008148",
"simvastatin": "D019821",
"dxr": "D004317",
"chloramphenicol": "D002701",
"trihexyphenidyl hydrochloride": "D014282",
"trihexyphenidyl": "D014282",
"cyclosporine": "D016572",
"csa": "D016572",
"gentamicin": "D005839",
"amphothericin b": "D000666",
"ketoconazole": "D007654",
"nicotine": "D009538",
"spiperone": "D013134",
"norepinephrine": "D009638",
"suprofen": "D013496",
"deoxycholic acid": "D003840",
"ethinyl estradiol": "D004997",
"taurodeoxycholic acid": "D013657",
"sodium deoxycholate": "D003840",
"bile acid": "D001647",
"taurocholic acid": "D013656",
"ouabain": "D010042",
"digitalis glycosides": "D004071",
"cisapride": "D020117",
"diltiazem": "D004110",
"erythromycin": "D004917",
"azole": "D001393",
"paclitaxel": "D017239",
"carboplatin": "D016190",
"taxol": "D017239",
"netilmicin": "D009428",
"netilmicin sulfate": "D009428",
"tobramycin sulfate": "D014031",
"piperacillin sodium": "D010878",
"piperacillin": "D010878",
"aminoglycoside": "D000617",
"prostaglandins": "D011453",
"pgs": "D011453",
"flurothyl": "D005481",
"picrotoxin": "D010852",
"pentetrazol": "D010433",
"ptz": "D010433",
"sulindac": "D013467",
"mefenamic acid": "D008528",
"meclofenamic acid": "D008469",
"fluorthyl": "D005481",
"amlodipine": "D017311",
"benazapril": "C044946",
"15-f(2t)-isoprostane": "C075750",
"15-f(2t)-isop": "C075750",
"lipid hydroperoxides": "D008054",
"lpo": "D008054",
"thiobarbituric acid reactive substances": "D017392",
"tbars": "D017392",
"4-ene-vpa": "C045022",
"2,4-diene-vpa": "C556631",
"amisulpride": "C012052",
"tiapride": "D063325",
"nicardipine": "D009529",
"benzamide": "C037689",
"catecholamines": "D002395",
"isosorbide": "D007547",
"nitrate": "D009566",
"adrenaline": "D004837",
"epinephrine": "D004837",
"fenoprofen calcium": "D005279",
"gold": "D006046",
"beta-adrenergic blocking drugs": "D000319",
"bendrofluazide": "D001539",
"urea": "D014508",
"chloroquine": "D002738",
"azathioprine": "D001379",
"cq": "D002738",
"cidofovir": "C059262",
"guanosine": "D006151",
"amiodarone": "D000638",
"tenofovir": "C096918",
"vancomycin": "D014640",
"tenofovir disoproxil fumarate": "C418563",
"5-fluorouracil": "D005472",
"carmofur": "C017367",
"capecitabine": "C110904",
"desipramine": "D003891",
"tricaine": "C003636",
"interferon": "D007372",
"viramidine": "C026956",
"calcium carbonate": "D002119",
"1,25-dihydroxyvitamin d": "C097949",
"pamidronate": "C019248",
"vitamin d": "D014807",
"interferon-alpha": "D016898",
"amine": "D000588",
"n-acetyl serotonin": "C006389",
"melatonin": "D008550",
"amines": "D000588",
"azelastine": "C020976",
"chlorpheniramine": "D002744",
"chlorpheniramine maleate": "D002744",
"calcium carbon-ate": "D002119",
"sodium bicarbonate": "D017693",
"nondepolarizing neuromuscular blocking agents": "D003473",
"corticosteroids": "D000305",
"nd-nmba": "D003473",
"nd-nmbas": "D003473",
"vecuronium": "D014673",
"prostaglandin e2": "D015232",
"tachykinins": "D015320",
"prostaglandin (pg) e2": "D015232",
"rp 67,580": "C071693",
"sr 48,968": "C073839",
"pge2": "D015232",
"pg": "D011453",
"prostanoids": "D011453",
"thiazide": "D049971",
"hydrochlorothiazide": "D006852",
"hctc": "D006852",
"hctz": "D006852",
"amiloride": "D000584",
"chlorthalidone": "D002752",
"noradrenaline": "D009638",
"gaba": "D005680",
"glycine": "D005998",
"thiosemicarbazide": "C005151",
"amino acids": "D000596",
"hbsag": "D006514",
"hepatitis b e antigen": "D006513",
"hbeag": "D006513",
"hepatitis b vaccine": "D017325",
"prostaglandin d2": "D015230",
"prostaglandin f2 alpha": "D015237",
"acetic acid": "D019342",
"ah6809": "C053876",
"salbutamol": "D000420",
"atenolol": "D001262",
"coenzyme q10": "C024989",
"cisplatin": "D002945",
"blood urea nitrogen": "D001806",
"reduced glutathione": "D005978",
"platinum": "D010984",
"selenium": "D012643",
"zinc": "D015032",
"metformin": "D008687",
"sorafenib": "C471405",
"ca": "D002118",
"nitrates": "D009566",
"nicorandil": "D020108",
"ciprofloxacin": "D002939",
"norfloxacin": "D009643",
"fluoroquinolones": "D024841",
"azt": "D015215",
"zidovudine": "D015215",
"3'-azido-2',3'-deoxythymidine": "D015215",
"ritanserin": "D016713",
"dmso": "D004121",
"anthracyclines": "D018943",
"mitoxantrone": "D008942",
"fenoldopam": "D018818",
"theophylline": "D013806",
"low-molecular-weight heparin": "D006495",
"unfractionated heparin": "D006493",
"uh": "D006493",
"lmwh": "D006495",
"topiramate": "C052342",
"calcium phosphate": "C020243",
"spironolactone": "D013148",
"amb": "D000666",
"kainic acid": "D007608",
"ka": "D007608",
"risperidone": "D018967",
"cabergoline": "C047047",
"metformin hydrochloride": "D008687",
"vigabatrin": "D020888",
"rapamycin": "D020123",
"rapa": "D020123",
"tac": "D016559",
"adr": "D004317",
"gamma-aminobutyric acid": "D005680",
"chloralose": "D002698",
"pindolol": "D010869",
"labetalol": "D007741",
"carbachol": "D002217",
"methylatropine": "C006649",
"folic acid": "D005492",
"clarithromycin": "D017291",
"vasopressin": "D014667",
"rosen's t5 or t10 protocol": "C053519",
"magnesium": "D008274",
"glutamic acid": "D018698",
"digoxigenin": "D004076",
"cresyl violet": "C028911",
"misoprostol": "D016595",
"indomethacin": "D007213",
"bun": "D001806",
"kanamycin": "D007612",
"aminoglycosides": "D000617",
"gyki-41 099": "C025725",
"chlorpropanol": "C025725",
"tobanum": "C025725",
"gyki-41 900": "-1",
"14c-41 099": "-1",
"oral contraception": "D003276",
"l-dopa": "D007980",
"nh4cl": "D000643",
"ammonium salt": "D064751",
"unfractionated heparin sodium": "D006493",
"ufh": "D006493",
"low-molecular weight heparin": "D006495",
"ptu": "D011441",
"propylthiouracil": "D011441",
"sch": "D013390",
"droperidol": "D004329",
"aluminum": "D000535",
"cyanoacrylate": "D003487",
"dx": "D004317",
"etoposide": "D005047",
"digoxin": "D004077",
"testosterone": "D013739",
"estradiol": "D004958",
"allopurinol": "D000493",
"aminonucleoside": "D011692",
"fatty acids": "D005227",
"triacylglycerol": "D014280",
"cholesteryl esters": "D002788",
"tolazamide": "D014042",
"thiamine": "D013831",
"thiamine pyrophosphate": "D013835",
"tpp": "D013835",
"thyroxine": "D013974",
"azidothymidine": "D015215",
"3'-azido-3'dideoxythymidine": "D015215",
"phenylhydrazine": "C030299",
"phz": "C030299",
"iodine-125-metaiodobenzylguanidine": "D019797",
"radiolabeled metaiodobenzylguanidine": "D019797",
"mibg": "D019797",
"ne": "D009638",
"rauwolscine": "D015016",
"cirazoline": "C014282",
"abbott-53693": "C056299",
"progesterone": "D011374",
"levobupivacaine": "C476513",
"ropivacaine": "C037663",
"22-oxacalcitriol": "C051883",
"calcitriol": "D002117",
"oct": "C051883",
"phosphate": "D010710",
"phenylpropanolamine": "D010665",
"ppa": "D010665",
"calcium chloride": "D002122",
"4-aminopyridine": "D015761",
"cacl2": "D002122",
"nahco3": "D017693",
"13-cis-retinoic acid": "D015474",
"cis-ra": "D015474",
"eserine": "D010830",
"atropine": "D001285",
"penicillamine": "D010396",
"mellaril": "D013881",
"thioridazine": "D013881",
"thorazine": "D002746",
"chlorpromazine": "D002746",
"aventyl": "D009661",
"nortriptyline": "D009661",
"elavil": "D000639",
"amitriptyline": "D000639",
"muscimol": "D009118",
"5-hydroxytryptophan": "D006916",
"mk-212": "C014896",
"5-htp": "D006916",
"prednisone": "D011241",
"crocin": "C029036",
"streptozocin": "D013311",
"carotenoids": "D002338",
"crocins": "C029036",
"stz": "D013311",
"pimecrolimus": "C117268",
"poly-l-lactid acid": "-1",
"tacrine": "D013619",
"licl": "D018021",
"aconitine": "D000157",
"4-damp": "C042375",
"4-diphenylacetoxy-n-methylpiperidine-methiodide": "C042375",
"disulfiram": "D004221",
"lamivudine": "D019259",
"barbiturate": "C032232",
"sodium thiopenthal": "D013874",
"nik-247": "C049860",
"e-2020": "C076946",
"tramadol": "D014147",
"dosulepine hydrochloride": "D004308",
"d-penicillamine": "D010396",
"hexachloro-1:3-butadiene": "C001335",
"hcbd": "C001335",
"2-bromoethylamine": "C004504",
"bea": "C004504",
"glucose": "D005947",
"magnesium sulfate": "D008278",
"nifedipine": "D009543",
"clomiphene citrate": "D002996",
"cc": "D002996",
"pb": "D010634",
"nimesulide": "C012655",
"nsaid": "D000894",
"nonsteroidal anti-inflammatory drugs": "D000894",
"nsaids": "D000894",
"nadolol": "D009248",
"ivmp": "D008775",
"thymidine": "D013936",
"nad": "D009243",
"nicotinic acid amide": "D009536",
"poly(adp-ribose)": "D011064",
"aap": "D000082",
"ethanol": "D000431",
"oxaloacetate": "D062907",
"pyruvate": "D019289",
"naa": "D009536",
"cibenzoline": "C032151",
"digitalis": "D004070",
"mexiletine": "D008801",
"tocainide": "D016677",
"diatrizoate": "D003973",
"renografin 76%": "C027278",
"hypaque 76%": "C027278",
"renografin": "D003974",
"sodium citrate": "C102006",
"disodium edetate": "D004492",
"hypaque": "D003973",
"calcium disodium edetate": "D004492",
"prothrombin complex concentrate": "C025667",
"pcc": "C025667",
"tolterodine": "C099041",
"thalidomide": "D013792",
"(rs)-1-aminoindan-1,5-dicarboxylic acid": "C095756",
"aida": "C095756",
"(2r,4r)-4-aminopyrrolidine-2,4-dicarboxylate": "C097299",
"2r,4r-apdc": "C097299",
"isoflurane": "D007530",
"dipyrone": "D004177",
"bilirubin": "D001663",
"dobutamine": "D004280",
"lead": "D007854",
"nitrotyrosine": "C002744",
"lead acetate": "C008261",
"malondialdehyde": "D008315",
"mda": "D008315",
"fenfluramine": "D005277",
"dexfenfluramine": "D020372",
"phentermine": "D010645",
"cbdca": "D016190",
"cddp": "D002945",
"fluoxetine hydrochloride": "D005473",
"thiopentone": "D013874",
"nitrazepam": "D009567",
"mitomycin c": "D016685",
"fluorouracil": "D005472",
"5-fu": "D005472",
"dipivalyl epinephrine": "C015173",
"timolol": "D013999",
"aldosterone": "D000450",
"prostaglandin e": "D011458",
"pge(2)": "D015232",
"cl": "D002713",
"chondroitin sulfate": "D002809",
"dox": "D004317",
"sulfasalazine": "D012460",
"amikacin": "D000583",
"ecstasy": "D018817",
"mdma": "D018817",
"3,4-methylenedioxymethamphetamine": "D018817",
"hal": "D006220",
"prl": "D011388",
"t": "D013739",
"folate": "D005492",
"beta-carotene": "D019207",
"cyproterone acetate": "D017373",
"ethinylestradiol": "D004997",
"cpa": "D017373",
"ee": "D004997",
"combined oral contraceptives": "D003277",
"cocs": "D003277",
"coc": "D003277",
"levonorgestrel": "D016912",
"lindane": "D001556",
"n-nitrosodimethylamine": "D004128",
"ndma": "D004128",
"3-methylcholanthrene": "D008748",
"mc": "D008748",
"cobalt chloride": "C018021",
"iodixanol": "C044834",
"capsaicin": "D002211",
"benzisoxazole": "C441200",
"dantrolene": "D003620",
"puromycin amino-nucleoside": "D011692",
"lignocaine": "D008012",
"chlorpropamide": "D002747",
"diabenese": "D002747",
"urea nitrogen": "D001806",
"para-aminohippurate": "D010130",
"pah": "D010130",
"nitrous oxide": "D009609",
"debrisoquine": "D003647",
"metoprolol": "D008790",
"sparteine": "D013034",
"alpha-hydroxymetoprolol": "C029504",
"cefotetan": "D015313",
"penicillins": "D010406",
"cephalosporins": "D002511",
"rifampicin": "D012293",
"venlafaxine": "C047426",
"tranylcypromine": "D014191",
"l-dopa/benserazide": "C005177",
"physostigmine": "D010830",
"5-mdot": "-1",
"ccnu": "D008130",
"lomustine": "D008130",
"1-(2-chloroethyl)-3-cyclohexyl-1-nitrosourea": "D008130",
"3 alpha-hydroxy pregnane-20-ones": "D011374",
"deoxycorticosterone": "D003900",
"3 alpha-hydroxy pregnane-21-diol-20-ones": "D003900",
"n-butyl-deoxynojirimycin": "C059896",
"sc-48334": "C059896",
"n10-propargyl-5,8-dideazafolic acid": "C031662",
"cb 3717": "C031662",
"ethopropazine": "C084820",
"benztropine": "D001590",
"fluphenazine enanthate": "C017610",
"procyclidine": "D011352",
"procyclindine": "D011352",
"alpha-tocopherol": "D024502",
"deferoxamine": "D003676",
"ma": "D008694",
"alpha-tc": "D024502",
"dfo": "D003676",
"ifs": "D007069",
"dizocilpine": "D016291",
"apomophine": "D001058",
"prostigmine": "D009388",
"nitrolingual": "D005996",
"m": "D008775",
"triamcinolone": "D014221",
"antipurine": "D000983",
"lometrexol": "C045894",
"ddathf": "C045894",
"glycinamide ribonucleotide": "C402896",
"purine": "D011687",
"tyr-d-ala-gly-nme-phe-gly-ol": "D020875",
"morphiceptin": "C028889",
"trans-3,4-dichloro-n-methyl-n[2-(1-pyrrolidinyl) cyclohexyl]benzeneactemide": "D019900",
"[d-pen2.5]-enkephalin": "D020881",
"[d-ser2]-[leu]enkephalin-thr": "C034318",
"8-bromo cyclic adenosine monophosphate": "D015124",
"cyclic adenosine monophosphate": "D000242",
"h": "D006859",
"anastrozole": "C090450",
"nitrite": "D009573",
"aminoguanidine": "C004479",
"ag": "C004479",
"phenylephrine": "D010656",
"carteolol hydrochloride": "D002354",
"carteolol": "D002354",
"biperiden": "D001712",
"antiplatelet agents": "D010975",
"sevoflurane": "C009250",
"umb24": "C519696",
"sm 21": "C107044",
"1-(2-phenethyl)-4-(2-pyridyl)-piperazine": "C519696",
"3alpha-tropanyl-2-(4-chorophenoxy)butyrate": "C107044",
"methimazole": "D008713",
"sodium valproate": "D014635",
"zonisamide": "C022189",
"milrinone": "D020105",
"halothane": "D006221",
"enflurane": "D004737",
"trifluoroacetyl": "D014269",
"bupivacaine hydrochloride": "D002045",
"mepivacaine hydrochloride": "D008619",
"lidocaine hydrochloride": "D008012",
"mepivacaine": "D008619",
"aryl-piperazine": "-1",
"buspirone": "D002065",
"5-hydroxytryptaminergic agonists": "D058825",
"5-hydroxytryptamine": "D012701",
"chloride": "D002712",
"cbz": "D002220",
"fenoldopam mesylate": "D018818",
"nitroprusside": "D009599",
"hibiscus rosa sinensis": "D010936",
"5 flourouracil": "D005472",
"heroin": "D003932",
"buprenorphine": "D002047",
"cytosine arabinoside": "D003561",
"atorvastatin": "C065179",
"atorva": "C065179",
"dex": "D003907",
"kainate": "D007608",
"formalin": "D005557",
"carrageenan": "D002351",
"mycophenolate mofetil": "C063008",
"n-nitro-l-arginine-methyl ester": "D019331",
"l-name": "D019331",
"l-arginine": "D001120",
"technetium-99m sestamibi": "D017256",
"predobutamine": "-1",
"prednisolone": "D011239",
"d-tubocurarine": "D014403",
"ketoprofen": "D007660",
"adenosine diphosphate": "D000244",
"mp": "D008775",
"vincristine": "D014750",
"adalat": "D009543",
"cyclosporin a": "D016572",
"fk506": "D016559",
"fujimycine": "D016559",
"periodic acid": "D010504",
"diethylnitrosamine": "D004052",
"den": "D004052",
"mmc": "D016685",
"phlorizin": "D010695",
"streptozotocin": "D013311",
"p": "D010695",
"metoclopramide": "D008787",
"penicillin g": "D010400",
"penicillin-g": "D010400",
"s-ketamine": "-1",
"l-dihydroxyphenylalanine": "D007980",
"alpha-methyl-para-tyrosine": "D019805",
"tranexamic acid": "D014148",
"txa": "D014148",
"pyrazinamide": "D011718",
"difluoromethylornithine": "D000518",
"dfmo": "D000518",
"parachlorophenylalanine": "-1",
"pcpa": "-1",
"5-hydroxyindoleacetic acid": "D006897",
"5-hiaa": "D006897",
"dextromethorphan": "D003915",
"dm": "D003915",
"3-hydroxy-n-methylmorphinan": "D007981",
"trazodone": "D014196",
"quinidine phenylethylbarbiturate": "C033457",
"quinidine": "D011802",
"phenylethylbarbiturate": "C033457",
"quinine": "D011803",
"alpha-benzene hexachloride": "D001556",
"ethyl-alpha-p-chlorophenoxyisobutyrate": "C012282",
"lactic acid": "D019344",
"cholesteryl hemisuccinate": "C013440",
"carbon tetrachloride": "D002251",
"chloroform": "D002725",
"galactosamine": "D005688",
"tris salt": "-1",
"cs": "-1",
"ccl4": "D002251",
"gamma-cholesteryloxybutyric acid": "C103872",
"cse": "-1",
"chcl3": "D002725",
"bortezomib": "C400082",
"telmisartan": "C084178",
"oxacillin": "D010068",
"naloxazone": "C024224",
"dexrazoxane": "D064730",
"epipodophyllotoxin": "D011034",
"icrf-187": "D064730",
"aniracetam": "C036466",
"ro 13-5057": "C036466",
"1-anisoyl-2-pyrrolidinone": "C036466",
"cycloheximide": "D003513",
"piracetam": "D010889",
"pyrrolidinone": "D011760",
"mecamylamine": "D008464",
"myocet": "D004317",
"bn 52021": "C045856",
"benzylacyclouridine": "C034753",
"uridine": "D014529",
"urd": "D014529",
"bau": "C034753",
"cp": "D003520",
"acrolein": "D000171",
"fluphenazine": "D005476",
"sch 23390": "C534628",
"sulpiride": "D013469",
"skf 38393": "D015647",
"quinpirole": "D019257",
"dextran": "D003911",
"etodolac": "D017308",
"e": "D017308",
"n-acylimidazole": "-1",
"eai": "-1",
"all- trans-retinoic acid": "D014212",
"atra": "D014212",
"valsartan": "C081489",
"angiotensin ii": "D000804",
"dihydropyridine": "C038806",
"kf17837": "C081198",
"adenosine": "D000241",
"cgs 21680": "C061282",
"l-3,4-dihydroxyphenylalanine": "D007980",
"benserazide": "D001545",
"methoxamine": "D008729",
"nomifensine": "D009627",
"citalopram": "D015283",
"t4": "D013974",
"tri-iodo-thyronine": "D014284",
"t3": "D014284",
"m-chlorophenylpiperazine": "C015068",
"mcpp": "C015068",
"pentylenetetrazole": "D010433",
"sulphasalazine": "D012460",
"acetylsalicylic acid": "D001241",
"asa": "D001241",
"procainamide": "D011342",
"tiopronin": "D008625",
"thiosulphate": "-1",
"diphenylhydantoin": "D010672",
"dph": "D010672",
"pgf2alpha": "D015237",
"pgi2": "D011464",
"edaravone": "C005435",
"streptomycin": "D013307",
"glycopyrrolate": "D006024",
"pregnenolone sulphate": "C018370",
"pregs": "C018370",
"11-ketopregnenolone sulphate": "-1",
"epipregnanolone ([3beta-hydroxy-5beta-pregnan-20-one] sulphate": "C018370",
"epipregnanolone sulphate": "C018370",
"5,7-dichlorokynurenic acid": "C066192",
"5,7-dcka": "C066192",
"cholesterol": "D002784",
"quipazine": "D011814",
"6-thioguanine": "D013866",
"sodium 2-mercaptoethane sulphonate": "D015080",
"thiol": "D013438",
"if": "D007069",
"hypaque 76": "C027278",
"n-nitroso-n-methylurea": "D008770",
"testosterone propionate": "D043343",
"tp": "D043343",
"msh": "D009074",
"acth": "D000324",
"d-med": "D020927",
"medetomidine": "D020926",
"idazoxan": "D019329",
"dg-5128": "C032368",
"alf": "D015760",
"imipenem": "D015378",
"imipenem/cilastatin": "C044650",
"beta-lactam": "D047090",
"bethanechol": "D018723",
"2,3,5-triphenyltetrazolium": "C009591",
"rifampin": "D012293",
"oxytocin": "D010121",
"cyp": "D003520",
"fluocinolone acetonide": "D005446",
"fa": "D005446",
"dexatrim": "D010665",
"lsd": "D008238",
"pj34": "C434926",
"fluconazole": "D015725",
"tropicamide": "D014331",
"acetazolamide": "D000086",
"hydrogen peroxide": "D006861",
"formyl-methionyl-leucyl-phenylalanine": "D009240",
"fmlp": "D009240",
"pentamidine": "D010419",
"pentamidine isethionate": "D010419",
"oxypurines": "-1",
"nucleosides": "D009705",
"acetylsalicylate": "D001241",
"echothiophate iodide": "D004456",
"androgen": "D000728",
"androgens": "D000728",
"ketone": "D007659",
"tiazofurin": "C033706",
"ro15-1788": "D005442",
"phentolamine": "D010646",
"hydroxychloroquine": "D006886",
"dextrose": "D005947"
},
"disease2id": {
"tricuspid valve regurgitation": "D014262",
"toxicity": "D064420",
"tricuspid regurgitation": "D014262",
"atrial flutter": "D001282",
"congestive heart failure": "D006333",
"cardiac disease": "D006331",
"congenital heart disease": "D006331",
"neurologic depression": "D003866",
"cyanosis": "D003490",
"cardiac arrhythmia": "D001145",
"dyskinesia": "D004409",
"neurologically-impaired": "D009422",
"neurologic impairment": "D009422",
"seizures": "D012640",
"movement disorders": "D009069",
"epileptic": "D004827",
"drowsiness": "D006970",
"catalepsy": "D002375",
"hyperactivity": "D006948",
"cardiac hypertrophy": "D006332",
"hypertrophic": "D006984",
"hypertrophy": "D006984",
"hyperplasia": "D006965",
"carcinogenic": "D063646",
"forestomach carcinogenesis": "D013274",
"forestomach tumorigenesis": "D013274",
"forestomach tumors": "D013274",
"squamous cell papilloma": "D010212",
"carcinoma": "D002277",
"epithelial hyperplasia": "D017573",
"tumors": "D009369",
"papillomas": "D010212",
"muscle rigidity": "D009127",
"rigidity": "D009127",
"cardiovascular depression": "D002318",
"respiratory depression": "D012131",
"bradycardias": "D001919",
"muscular rigidity": "D009127",
"cerebral sinus thrombosis": "D012851",
"menorrhagia": "D008595",
"sagittal sinus thrombosis": "D020225",
"left transverse sinus thrombosis": "D020227",
"blood loss": "D006473",
"thromboembolic disease": "D013923",
"hemorrhagic": "D006470",
"cystitis": "D003556",
"nausea": "D009325",
"vomiting": "D014839",
"pain": "D010146",
"adenomas": "D000236",
"focal nodular hyperplasia": "D020518",
"adenoma": "D000236",
"thromboembolism": "D013923",
"thrombocytopenia": "D013921",
"arterial occlusion": "D001157",
"thrombi": "D013927",
"ischemia": "D007511",
"gastrointestinal symptoms": "D005767",
"musculoskeletal symptoms": "D009140",
"ischemic": "D007511",
"platelet aggregation": "D001791",
"acute liver failure": "D017114",
"acute liver injury": "D056486",
"liver injury": "D056486",
"liver disease": "D008107",
"substance abuse": "D019966",
"liver failure": "D017093",
"psychosis": "D011605",
"psychotic disorders": "D011605",
"schizophrenia": "D012559",
"paranoid type schizophrenia": "D012563",
"convulsive": "D012640",
"convulsions": "D012640",
"epileptiform activity": "D004827",
"epilepsy": "D004827",
"epilepsies": "D004827",
"bradycardia": "D001919",
"aggressiveness": "D001523",
"aggressive behaviors": "D001523",
"psychiatric disorders": "D001523",
"myoclonus": "D009207",
"idiopathic generalized epilepsies": "C562694",
"ige": "C562694",
"myoclonic jerks": "D009207",
"mj": "D009207",
"myoclonic status": "D009207",
"dyskinesias": "D004409",
"parkinson disease": "D010300",
"dyskinetic": "D004409",
"drug-induced dyskinesias": "D004409",
"amnesia": "D000647",
"inability to repeat words": "D000647",
"retrograde amnesia": "D000648",
"hepatitis b": "D006509",
"rubella": "D012409",
"tumor": "D009369",
"heart failure": "D006333",
"nephrosis": "D009401",
"nephropathy": "D007674",
"glomerular and late-onset tubular lesions": "D007674",
"mitochondrial injury": "D028361",
"glomerular and tubular injury": "D007674",
"glomerular and tubular lesions": "D007674",
"tubular lesions": "D007674",
"glomerular lesions": "D007674",
"renal lesions": "D007674",
"aids": "D000163",
"seizure": "D012640",
"grand mal seizures": "D004830",
"cryptococcal meningitis": "D016919",
"alcohol abuse": "D000437",
"nephrotoxicity": "D007674",
"decreased auditory function": "D034381",
"auditory loss": "D034381",
"cardiomyopathy": "D009202",
"cardiotoxic": "D066126",
"cardiotoxicity": "D066126",
"parkinson's disease": "D010300",
"parkinsonism": "D010302",
"bradykinesia": "D018476",
"temporal lobe epilepsy": "D004833",
"tle": "D004833",
"status epilepticus": "D013226",
"brain damage": "D001930",
"elevated intraocular pressure": "D009798",
"iop rise": "D009798",
"kaposi's sarcoma": "D012514",
"oedema": "D004487",
"toxicities": "D064420",
"neutropenia": "D009503",
"thrombosis": "D013927",
"cardiac arrhythmias": "D001145",
"overdose": "D062787",
"tonic-clonic seizure": "D012640",
"neuropathy": "D009422",
"ventricular fibrillation": "D014693",
"venous thrombosis": "D020246",
"deep venous thrombosis": "D020246",
"coronary stenosis": "D023921",
"coronary occlusion": "D054059",
"coronary stenoses": "D023921",
"hyperemic": "D006940",
"hyperemia": "D006940",
"renal insufficiency": "D051437",
"renal function significantly deteriorated": "D058186",
"renal failure": "D051437",
"weight loss": "D015431",
"diabetes insipidus": "D003919",
"liver tumors": "D008113",
"hamartoma": "D006222",
"hepatoma": "D006528",
"rupture": "D012421",
"graft-versus-host disease": "D006086",
"sinusoidal obstruction syndrome": "D006504",
"microangiopathy": "D014652",
"gvhd": "D006086",
"myelodysplastic syndrome": "D009190",
"mds": "D009436",
"acute myeloid leukemia": "D015470",
"aml": "D015470",
"mucositis": "D052016",
"transplantation-associated microangiopathy": "D014652",
"tma": "D014652",
"acute renal failure": "D058186",
"sos": "D006504",
"hyperthyroidism": "D006980",
"drug-induced acute liver injury": "D056486",
"sepsis": "D018805",
"coagulopathy": "D001778",
"hepatotoxicity": "D056486",
"acute liver insult": "D017114",
"hepatotoxic": "D056486",
"cardiovascular disease": "D002318",
"osteoporosis": "D010024",
"dementia": "D003704",
"cancer": "D009369",
"gallbladder disease": "D005705",
"fractures": "D050723",
"menstrual disorders": "D008599",
"venous thrombo-embolism": "D054556",
"stroke": "D020521",
"breast cancer": "D001943",
"colon cancer": "D003110",
"gliomas": "D005910",
"brain edema": "D001929",
"elevated icp": "D019586",
"brain tumor": "D001932",
"malignant glioma": "D005910",
"metastases": "D009362",
"meningioma": "D008579",
"edematous": "D004487",
"glioma": "D005910",
"edema": "D004487",
"postoperative myalgia": "D010149",
"myalgia": "D063806",
"fasciculation": "D005207",
"muscle fasciculation": "D005207",
"sinusitis": "D012852",
"diarrhea": "D003967",
"flatulence": "D005414",
"headache": "D006261",
"facial flush": "D005483",
"rheumatoid arthritis": "D001172",
"ra": "D001172",
"thrombotic cardiovascular": "D002318",
"gi aes": "D005767",
"hypertension": "D006973",
"extrapyramidal symptoms": "D001480",
"eps": "D001480",
"bipolar mania": "D001714",
"akathisia": "D017109",
"tremor": "D014202",
"extrapyramidal syndrome": "D001480",
"hypertensive": "D006973",
"increase in map": "D006973",
"neurotoxic": "D020258",
"degenerating myelinated fibers": "D009410",
"akinesia": "D004409",
"delusional parasitosis": "D063726",
"chronic hepatitis c": "D019698",
"depression": "D003866",
"psychogenic parasitosis": "D063726",
"neuroleptic malignant syndrome": "D009459",
"depressive symptoms": "D003866",
"psychiatric": "D001523",
"insomnia": "D007319",
"loss of appetite": "D001068",
"agitation": "D011595",
"psychomotor retardation": "D011596",
"tremors": "D014202",
"fever": "D005334",
"nms": "D009459",
"depressive": "D003866",
"dehydration": "D003681",
"malnutrition": "D044342",
"impairment in auditory location discrimination": "D001308",
"deficits in auditory discrimination": "D001308",
"impairment in auditory discrimination": "D001308",
"impaired auditory location discrimination": "D001308",
"myocardial infarction": "D009203",
"cardiovascular toxicity": "D002318",
"decreased renal function": "D051437",
"left ventricular dysfunction": "D018487",
"chf": "D006333",
"reduction in renal function": "D051437",
"diabetes": "D003920",
"renal impairment": "D007674",
"attention deficit disorder": "D001289",
"movement disorder": "D009069",
"emesis": "D014839",
"hyperammonemia": "D022124",
"incoordination": "D001259",
"analgesia": "D000699",
"cin": "D007674",
"injury to the kidney": "D058186",
"failure of kidney function": "D051437",
"loss of kidney function": "D051437",
"renal damage": "D058186",
"renal injury": "D058186",
"congenital heart diseases": "D006331",
"ventricular tachycardia": "D017180",
"ventricular ectopy": "D018879",
"arrhythmia": "D001145",
"ventricular arrhythmias": "D001145",
"optic neuropathy": "D009901",
"axonal degeneration": "D009410",
"tuberculosis": "D014376",
"visual deficits": "D014786",
"optic neuropathies": "D009901",
"thrombotic": "D013927",
"arthritis": "D001168",
"osteoarthritis": "D010003",
"sexual dysfunction": "D020018",
"sexually dysfunctional": "D020018",
"erectile dysfunction": "D007172",
"parkinsonian": "D020734",
"pd": "D010300",
"cardiopulmonary arrest": "D006323",
"myoclonic seizures": "D004831",
"absence seizures": "D004832",
"atypical absence seizures": "D004832",
"ceroid lipofuscinosis": "D009472",
"hypotension": "D007022",
"hypotensive": "D007022",
"cardiac dysfunction": "D006331",
"atrioventricular block": "D054537",
"sinus tachycardias": "D013616",
"bradyarrhythmias": "D001919",
"atrioventricular conduction delay": "D054537",
"akinetic": "D018476",
"memory impairment": "D008569",
"cognitive dysfunction": "D003072",
"haemopericardium": "D010490",
"gastrointestinal haemorrhage": "D006471",
"haemorrhage": "D006470",
"myocardial injury": "D009202",
"myocardial damage": "D009202",
"myocardial hyperactivity": "D009202",
"ischaemic injury": "D007511",
"obese": "D009765",
"obesity": "D009765",
"ob": "D009765",
"renal toxicity": "D007674",
"hepatic toxicity": "D056486",
"sinus node dysfunction": "D012804",
"atrioventricular (av) block": "D054537",
"syncopal attacks": "D013575",
"mi": "D009203",
"ischemic myocardium": "D017202",
"cardiac damage": "D006331",
"pruritus": "D011537",
"apnea": "D001049",
"alzheimer's disease": "D000544",
"ad": "D000544",
"impairments of learning": "D007859",
"impairments of memory": "D008569",
"hiccups": "D006606",
"pharyngitis": "D010612",
"hemorrhage": "D006470",
"bleeding": "D006470",
"intracranial bleeding": "D020300",
"haemorrhagic": "D006470",
"myocarditis": "D009205",
"migraine without aura": "D020326",
"migraine with aura": "D020325",
"migraineurs": "D008881",
"strokes": "D020521",
"endometrial cancer": "D016889",
"breast-cancer": "D001943",
"vascular events": "D014652",
"hypertriglyceridaemia": "D015228",
"pupillary oscillation": "D011681",
"paranoia": "D010259",
"cip": "D010259",
"myopathy": "D009135",
"necrotizing": "D009336",
"subarachnoid haemorrhage": "D013345",
"sah": "D013345",
"zollinger-ellison syndrome": "D015043",
"systemic mastocytosis": "D034721",
"impotence": "D007172",
"hematologic toxicity": "D006402",
"reye": "D012202",
"reye syndrome": "D012202",
"st. anthony's fire": "D004881",
"vasospasm": "D014652",
"gangrene": "D005734",
"migraine headache": "D008881",
"migraine": "D008881",
"hypokalemia": "D007008",
"sexual side effects": "D020018",
"male impotence": "D007172",
"obsessive compulsive disorder": "D009771",
"trichotillomania": "D014256",
"anxiety": "D001008",
"affective disorders": "D019964",
"hypersensitivity": "D004342",
"hepatitis": "D056486",
"myocardial hypertrophy": "D006332",
"cardiac toxicity": "D066126",
"idiopathic parkinson's disease": "D010300",
"homonymous hemianopsia": "D006423",
"nephrotic syndrome": "D009404",
"hematological abnormalities": "D006402",
"renal dysfunction": "D007674",
"hyperlipidemia": "D006949",
"left ventricular apical ballooning syndrome": "D054549",
"takotsubo cardiomyopathy": "D054549",
"apical ballooning": "D054549",
"apical ballooning syndrome": "D054549",
"locomotor hyperactivity": "D006948",
"degeneration of dopaminergic neurons": "D009410",
"leukocytoclastic vasculitis": "C535509",
"lv": "C535509",
"cutaneous small vessel vasculitis": "C565222",
"skin eruptions": "D012871",
"skin lesion": "D012871",
"lv cutaneous lesions": "D018366",
"spinal cord ischemia": "D020760",
"aortic occlusion": "D001157",
"spastic paraparesis": "D020336",
"cocaine abuse": "D019970",
"amphetamine abuse": "D019969",
"drug abuse": "D019966",
"lid": "D004409",
"opioid addicts": "D009293",
"pain-intolerant": "D006930",
"renal tubular dysfunction": "D005198",
"proteinuria": "D011507",
"pituitary tumors": "D010911",
"cancers": "D009369",
"carcinogenesis": "D063646",
"inflammatory myopathy": "D009220",
"non-insulin-dependent diabetes mellitus": "D003924",
"hypercholesterolemia": "D006937",
"hypothyroidism": "D007037",
"autoimmune thyroiditis": "D013967",
"cm": "D009202",
"myocardial disease": "D009202",
"aplastic anemia": "D000741",
"cataract": "D002386",
"pancytopenia": "D010198",
"bone marrow hypoplasia": "D001855",
"ocular toxicity": "D005128",
"schizophrenic": "D012559",
"nephrotoxic": "D007674",
"increased cardiac output": "D016534",
"rise in resting blood pressure": "D006973",
"acute declines in renal function": "D058186",
"cholestasis": "D002779",
"coronary artery disease": "D003324",
"left ventricular end-diastolic volume falls": "D002303",
"prolongation of the qt interval": "D008133",
"gastrointestinal motility disorders": "D015835",
"prolongation of qt interval": "D008133",
"torsades de pointes": "D016171",
"sudden cardiac death": "D016757",
"gastroesophageal reflux disorder": "D005764",
"syncope": "D013575",
"qt-interval prolongation": "D008133",
"ovarian cancer": "D010051",
"myelosuppression": "D001855",
"leukopenia": "D007970",
"granulocytopenia": "D000380",
"neurotoxicity": "D020258",
"(insulin-dependent) diabetes mellitus": "D003922",
"iddm": "D003922",
"gastrointestinal (gi) toxicity": "D005767",
"gi toxicity": "D005767",
"post-transplant lmphoproliferate disease": "D008232",
"ptld": "D008232",
"hemolytic anemia": "D000743",
"infections": "D007239",
"ototoxicity": "D006311",
"angioedema": "D000799",
"abdominal pain": "D015746",
"ascites": "D001201",
"intestinal": "D007410",
"liver toxicity": "D056486",
"inflammation": "D007249",
"necrosis": "D009336",
"steatosis": "D005234",
"pheochromocytoma": "D010673",
"hyperkinesia": "D006948",
"psychotic symptoms": "D011605",
"stereotypies": "D019956",
"asystolic": "D006323",
"cardiac arrest": "D006323",
"tonic-clonic seizures": "D004830",
"asystole": "D006323",
"renal papillary necrosis": "D007681",
"rpn": "D007681",
"ischaemic heart disease": "D017202",
"cardiac failure": "D006333",
"impaired glucose tolerance": "D018149",
"gout": "D006073",
"raynaud's phenomenon": "D011928",
"dyspnoea": "D004417",
"corneal disease": "D003316",
"myocardial ischaemia": "D017202",
"systemic lupus erythematosus": "D008180",
"sle": "D008180",
"renal involvement": "D007674",
"arthralgia": "D018771",
"myositis": "D009220",
"muscular weakness": "D018908",
"muscular atrophy": "D009133",
"polymyositis": "D017285",
"affection of the musculoskeletal system": "D009140",
"adenovirus disease": "D000257",
"respiratory syncytial virus infection": "D018357",
"hepatitis c": "D006526",
"infection with hemorrhagic fever viruses": "D006482",
"anemia": "D000740",
"adenovirus infection": "D000257",
"adenovirus": "D000257",
"pneumonia": "D011014",
"digeorge syndrome": "D004062",
"progressive renal failure": "D058186",
"immunodeficiency": "D007153",
"infection": "D007239",
"tachyarrhythmias": "D013610",
"cholestatic": "D002779",
"hepatic injury": "D056486",
"alcoholic hepatitis": "D006519",
"cirrhosis of the liver": "D008103",
"cholestatic injury": "D002779",
"hepatomegaly": "D006529",
"cataleptic": "D002375",
"osteomyelitis": "D010019",
"fanconi syndrome": "D005198",
"leukoencephalopathy": "D056784",
"cerebrovascular accident": "D002544",
"leukaemia": "D007938",
"cerebrovascular accidents": "D002544",
"lesions within the subcortical white matter": "D056784",
"white matter abnormalities": "D056784",
"cytotoxic oedema within cerebral white matter": "D001929",
"infected with hepatitis c virus": "D006526",
"chronic infection with hepatitis c virus": "D019698",
"cirrhosis": "D005355",
"hepatocellular carcinoma": "D006528",
"end-stage liver disease": "D058625",
"hcv infection": "D006526",
"hemolysis": "D006461",
"renal disorders": "D007674",
"cardiovascular disorders": "D002318",
"milk-alkali syndrome": "D006934",
"hypercalcemia": "D006934",
"acute renal insufficiency": "D058186",
"metabolic alkalosis": "D000471",
"hypocalcemia": "D006996",
"chc": "D019698",
"riha": "D000743",
"catatonia": "D002389",
"spring allergic rhinitis": "D006255",
"altered taste perception": "D013651",
"seasonal allergic rhinitis": "D006255",
"hypercalcaemia": "D006934",
"nephrolithiasis": "D053040",
"paralysis": "D010243",
"muscle weakness": "D018908",
"respiratory insufficiency": "D012131",
"weakness": "D018908",
"acidosis": "D000138",
"loss of thick, myosin filaments": "D009135",
"pathology at both the neuromuscular junction": "D009468",
"hepatic dysfunction": "D008107",
"bladder hyperactivity": "D053201",
"diastolic hypertension": "C563897",
"arrhythmias": "D001145",
"coronary disease": "D003327",
"ventricular ectopic beats": "D018879",
"respiratory paralysis": "D012133",
"respiratory arrest": "D012131",
"infection by hepatitis b virus": "D006509",
"hyperalgesic": "D006930",
"hyperalgesia": "D006930",
"atrial tachyarrhythmia": "D013617",
"siat": "D013617",
"tachycardia": "D013610",
"acute renal injury": "D058186",
"renal tissue damage": "D007674",
"vascular dysfunction": "D014652",
"kidney dysfunction": "D007674",
"postoperative delirium": "D011183",
"delirium": "D003693",
"hip fracture": "D006620",
"mental disorders": "D001523",
"coronary artery spasm": "D003329",
"renal cell carcinoma": "D002292",
"chest pain": "D002637",
"subendocardial infarction": "D009203",
"stable angina": "D060050",
"anxious behaviour": "D001008",
"dilated cardiomyopathy": "D002311",
"dcm": "D002311",
"hiv-infected": "D015658",
"acquired immunodeficiency syndrome": "D000163",
"cardiac and skeletal myopathies": "C538496",
"cardiac dilation": "D002311",
"chorea": "D002819",
"encephalopathy": "D001927",
"nonketotic hyperglycinemia": "D020158",
"disorder of amino acid metabolism": "D000592",
"hypotonia": "D009123",
"language delay": "D007805",
"mental retardation": "D008607",
"hypoxia": "D000860",
"renal disease": "D007674",
"diseased kidney": "D007674",
"tubulointerstitial injury": "-1",
"hypoxic": "D000860",
"glomerular diseases": "D007674",
"autoimmune diseases": "D001327",
"systemic sclerosis": "D012595",
"multiple sclerosis": "D009103",
"heart damage": "D006331",
"autoimmune disease": "D001327",
"arteritis": "D001167",
"thromboembolic": "D013923",
"refractory seizures": "D004827",
"metabolic acidosis": "D000138",
"neutropenic": "D009503",
"hematological disorders": "D006402",
"fungal infection": "D009181",
"epileptic seizures": "D004827",
"excitotoxicity": "-1",
"neurodegeneration": "D019636",
"hyperprolactinemia": "D006966",
"prolactinomas": "D015175",
"delayed puberty": "D011628",
"bipolar disorder": "D001714",
"psychoses": "D011605",
"cholestatic jaundice": "D041781",
"jaundice": "D007565",
"visual field constriction": "D014786",
"visual field loss": "D014786",
"facial dysmorphism": "-1",
"posttransplant lymphoproliferative disorder": "D008232",
"pneumocystis carinii pneumonia": "D011020",
"infectious mononucleosis": "D007244",
"bronchiolitis obliterans": "D001989",
"aphtous ulcers": "D013281",
"structural and functional impairment of mitochondria": "D028361",
"cardiovascular arrhythmias": "D001145",
"swelling": "D004487",
"mitochondrial structural and functional impairment": "D028361",
"birth defects": "D000014",
"cleft lip": "D002971",
"cleft palate": "D002972",
"systemic lupus erythematodes": "D008180",
"stillbirth": "D050497",
"lupus": "D008180",
"irritable bowel syndrome": "D043183",
"disordered gastrointestinal motility": "D005767",
"ibs": "D043183",
"constipation": "D003248",
"diarrhoea": "D003967",
"gastroenteritis": "D005759",
"ventricular dysrhythmias": "D001145",
"dysrhythmias": "D001145",
"nephrogenic diabetes insipidus": "D018500",
"head injury": "D006259",
"polyuric": "D011141",
"polyuria": "D011141",
"bone tumor": "D001859",
"bone tumors": "D001859",
"ventricular dilation": "D002311",
"cardiac abnormalities": "D006331",
"venous complications": "D014652",
"tetany": "D013746",
"rhabdomyolysis": "D012206",
"hypomagnesemia": "C537153",
"muscle spasms": "D013035",
"neuronal degeneration": "D009410",
"neuronal loss": "D009410",
"impairment of renal reabsorption": "D007674",
"rheumatic fever": "D012213",
"coma": "D003128",
"encephalopathic": "D001927",
"hit": "D013921",
"vasculitis": "D014657",
"turner syndrome": "D014424",
"graves' disease": "D006111",
"purpura": "D011693",
"purpuric lesions": "D011693",
"masseter muscle rigidity": "D014313",
"malignant hyperthermia": "D008305",
"jaw of steel": "D014313",
"neurological dysfunction": "D009422",
"asphyxia": "D001237",
"malformations": "D000014",
"periventricular leukomalacia": "D007969",
"neurological dysfunctions": "D009422",
"ventricular septal defect": "D018658",
"ventricular septal rupture": "D018658",
"septal rupture": "D018658",
"angioneurotic edema": "D000799",
"quincke's disease": "D000799",
"upper-airway obstruction": "D000402",
"drug allergies": "D004342",
"tongue swelling": "D014060",
"visual hallucinations": "D006212",
"chronic renal failure": "D007676",
"uremia": "D014511",
"end-stage renal disease": "D007676",
"esrd": "D007676",
"glomerulonephritis": "D005921",
"coronary heart disease": "D003327",
"rheumatic heart disease": "D012214",
"decrease in sexual desire": "D020018",
"albuminuria": "D000419",
"nephrotic": "D009404",
"acute hepatic necrosis": "D017114",
"fulminant hepatic failure": "D017093",
"viral hepatitis": "D006525",
"wernicke's encephalopathy": "D014899",
"diabetic": "D003920",
"wernicke-korsakoff syndrome": "D020915",
"myocardial ischemia": "D017202",
"angina": "D000787",
"contralateral rotation": "D009069",
"substantia nigra lesion": "-1",
"sn lesion": "-1",
"ipsilateral circling": "D009069",
"rotational behavior": "D009069",
"rotation": "D009069",
"hypothyroid": "D007037",
"immunodeficient": "C565469",
"anaemia": "D000740",
"reticulocytosis": "D045262",
"vacuolar degeneration of myocardial cells": "C536522",
"myocardial impairment": "D009202",
"vacuolar degeneration": "C536522",
"amnestic syndrome": "D000647",
"alzheimer": "D000544",
"apnoea": "D001049",
"orthostatic hypotension": "D007024",
"cerebral venous sinus thrombosis": "D012851",
"internal carotid artery thrombosis": "D002341",
"hemiparesis": "D010291",
"diabetes mellitus": "D003920",
"dm": "D003920",
"infarct": "D002544",
"occlusion of the left internal carotid artery": "D001157",
"venous sinus thrombosis": "D012851",
"cerebral artery occlusion": "D002544",
"the venous sinus occlusion": "-1",
"thrombosis of the ica": "D002341",
"thrombosis of the venous sinus": "D012851",
"heart block": "D006327",
"blindness": "D001766",
"deterioration of vision": "D015354",
"fatiguability": "D005221",
"dizziness": "D004244",
"retinopathy": "D012164",
"right bundle branch block": "D002037",
"blind": "D001766",
"secondary hyperparathyroidism": "D006962",
"low bone turnover": "D001851",
"suppression of bone turnover": "D001851",
"adynamic bone disease": "D001851",
"impaired renal function": "D007674",
"hyperphosphatemia": "D054559",
"woven osteoid": "-1",
"fibrosis": "D005355",
"non-small cell lung cancer": "D002289",
"ovarian carcinoma": "D010051",
"breast carcinoma": "D001943",
"nsclc": "D002289",
"polyneuropathy": "D011115",
"cerebral hemorrhage": "D002543",
"subarachnoid hemorrhage": "D013345",
"vt": "D017180",
"vf": "D014693",
"ataxia": "D001259",
"anorexia": "D000855",
"visual halos or blurring": "D014786",
"thyroid function abnormalities": "D013959",
"pulmonary interstitial infiltrates": "-1",
"central nervous system toxicity": "D002493",
"neuroblastoma": "D009447",
"rash": "D005076",
"mydriasis": "D015878",
"clonic-tonic convulsions": "D004830",
"skin rashes": "D005076",
"myasthenia gravis": "D009157",
"elastosis perforans serpiginosa": "C536202",
"wilson's disease": "D006527",
"supraventricular tachycardia": "D013617",
"left bundle branch block": "D002037",
"heart disease": "D006331",
"cardiac complications": "D005117",
"hyperglycemic": "D006943",
"acidotic": "D000140",
"kearns-sayre syndrome": "D007625",
"lethargy": "D053609",
"somnolence": "D006970",
"polydipsia": "D059606",
"polyphagia": "D006963",
"hyperglycemia": "D006943",
"lactic acidosis": "D000140",
"ketosis": "D007662",
"respiratory failure": "D012131",
"metabolic-endocrine failure": "-1",
"learning impairment": "D007859",
"cognitive deficits": "D003072",
"neurodegenerative diseases": "D019636",
"dermatitis": "D003872",
"rosacea": "D012393",
"eruptions": "D003875",
"facial dermatitis": "D005148",
"telangiectasia": "D013684",
"hippocampal damage": "D001930",
"damage of neuronal cells": "D001930",
"arrhythmic": "D001145",
"peripheral neuropathy": "D010523",
"alcohol dependence": "D000437",
"loss of vision": "D014786",
"headaches": "D006261",
"paraesthesia": "D010292",
"numbness": "D006987",
"scotomata": "D012607",
"hepatitis b virus (hbv) infection": "D006509",
"liver cirrhosis": "D008103",
"narcosis": "D053608",
"hallucinations": "D006212",
"tetraparetic": "-1",
"chronic pain": "D059350",
"synovitis": "D013585",
"injury to different regions of the kidney": "D007674",
"acute experimental models of renal damage": "D058186",
"protein excretion": "D011507",
"glomerular damage": "D007674",
"excretion of proteins": "D011507",
"neuromuscular blockade": "D020879",
"hematuria": "D006417",
"confusion": "D003221",
"coronary thrombosis": "D003328",
"radiation injury": "D011832",
"hepatonecrosis": "-1",
"cholangitis": "D002761",
"bronchopneumonia": "D001996",
"liver damage": "D056486",
"chronic hepatic enzyme dysfunction": "D056487",
"visual loss": "D014786",
"bitemporal hemianopia": "D006423",
"loss of central visual acuity, colour vision (ishihara) and visual field": "D014786",
"loss of visual function": "D014786",
"urticaria": "D014581",
"atrial fibrillation": "D001281",
"atrial tachycardia": "D013617",
"af": "D001281",
"pulmonary-renal syndrome": "C538458",
"hemoptysis": "D006469",
"hypoxemia": "D000860",
"ventricular arrhythmia": "D001145",
"sudden death": "D003645",
"myelodysplasia": "D009190",
"macrocytic anemia": "D000748",
"myelodysplastic": "D009190",
"hyperplastic marrow": "D001855",
"dysmyelopoiesis": "D009190",
"hypocellular marrow": "D001855",
"dyserythropoiesis": "-1",
"hemosiderosis": "D006486",
"liver injuries": "D056486",
"hepatic damage": "D056486",
"glomerular disease": "D007674",
"membranous glomerulonephritis": "D015433",
"intracerebral hemorrhage": "D002543",
"ich": "D002543",
"hematomas": "D006406",
"growth impairment": "D006130",
"reduced cerebellar growth": "D006130",
"decreased cranial to body growth": "D006130",
"overactive bladder": "D053201",
"oab": "D053201",
"nocturia": "D053158",
"dry month": "D014987",
"dry mouth": "D014987",
"neurovascular complications": "D013901",
"aneurysmal": "D017542",
"gastric lymphoma": "C535648",
"myotonic dystrophy": "D009223",
"steinert's disease": "D009223",
"muscular dystrophy": "D009136",
"renal cell cancer": "D002292",
"halothane hepatitis": "C562477",
"tension-type headache": "D018781",
"primary headaches": "D051270",
"coronary artery stenosis": "D023921",
"coronary spasm": "D003329",
"coronary spastic angina": "D000788",
"anginal": "D000787",
"variant angina": "D000788",
"valvular heart disease": "D006349",
"valvular disease": "D006349",
"valvular abnormalities": "D006349",
"valvulopathy": "D006349",
"aortic regurgitation": "D001022",
"mitral regurgitation": "D008944",
"bicuspid aortic valve": "C562388",
"aortic insufficiency": "D001022",
"peripheral nervous system damage": "D010523",
"peripheral neurotoxicity": "D010523",
"endometrial carcinoma": "D016889",
"advanced disease": "D020178",
"endometrial cancers": "D016889",
"granulosa cell tumor of the ovary": "C537296",
"liver dysfunction": "D017093",
"granulosa cell tumors": "D006106",
"adenomyosis": "D062788",
"hearing impairment": "D034381",
"auditory dysfunction": "D006311",
"myocardial lesions": "D001768",
"acute myeloblastic leukemia": "D015470",
"leukemia": "D007938",
"wilms tumor": "D009396",
"prostate adenocarcinomas": "D011471",
"prostate cancer": "D011471",
"prostatitis": "D011472",
"hemolytic uremic syndrome": "D006463",
"hus": "D006463",
"pulmonary edema": "D011654",
"gastric adenocarcinoma": "D013274",
"anuria": "D001002",
"postrenal failure": "D007674",
"tubulopathies": "D007674",
"vein thrombosis": "D020246",
"glaucoma": "D005901",
"cardiac apoptosis": "D006331",
"hepatic failure": "D017093",
"skin rash": "D005076",
"eosinophilia": "D004802",
"interstitial nephritis": "D009395",
"febrile neutropenia": "D009503",
"hematologic disorder": "D006402",
"oncologic disorder": "D009369",
"neurotoxic lesions": "D020258",
"impaired memory functioning": "D008569",
"memory impairments": "D008569",
"atherosclerosis": "D050197",
"venous thromboembolism": "D054556",
"vte": "D054556",
"sleep deprivation": "D012892",
"painful": "D010146",
"cluster headache": "D003027",
"ns": "D009404",
"bladder cancer": "D001749",
"urothelial cancer": "D014523",
"bladder tumor": "D001749",
"bladder tumors": "D001749",
"leg and back pain": "D001416",
"pain in the legs and/or back": "D001416",
"leg and/or back pain": "D001416",
"hypertensives": "D006973",
"hallucinatory": "D006212",
"adult-onset diabetes": "D003924",
"toxic optic neuropathy": "D009901",
"diabetics": "D003920",
"dystonic": "D020821",
"thalamic lesions": "D013786",
"bile duct injury": "D002779",
"type b hepatitis": "D006509",
"serotonin syndrome": "D020230",
"salivation": "D012798",
"hyperthermia": "D005334",
"hypoventilation": "D007040",
"paralyzed": "D010243",
"haematological toxicities": "D006402",
"renal toxicities": "D007674",
"hepatic toxicities": "D056486",
"gastrointestinal toxicities": "D005767",
"lymphoma": "D008223",
"mast cell tumour": "D034801",
"brain tumour": "D001932",
"histiocytic tumours": "D015620",
"epitheliotropic lymphoma": "D008223",
"gastrointestinal toxicosis": "D005767",
"hiv-1 infection": "D015658",
"cytotoxicity": "D064420",
"mesothelioma": "D008654",
"malaise": "D005221",
"parkinsonian symptoms": "D010302",
"tardive dyskinesia": "D004409",
"neuronal damage": "D009422",
"orofacial dyskinesia": "D009069",
"oral dyskinesia": "D009069",
"abnormal movements": "D004409",
"tardive dsykinesia": "D004409",
"sphincter of oddi spasm": "D046628",
"spasm": "D013035",
"sphincter of oddi dyskinesia": "D046628",
"reduction of food intake": "D000855",
"loss in body weight": "D015431",
"tetanic": "D013746",
"atrophy": "D001284",
"muscle atrophy": "D009133",
"cardiogenic shock": "D012770",
"tumours": "D009369",
"hypoalbuminemia": "D034141",
"detrimental effect on memory": "D008569",
"detrimental effect on cognition": "D003072",
"mesangial proliferation": "C537346",
"tubulointerstitial inflammation": "D009395",
"hyperlocomotion": "D009069",
"rpgn": "D005921",
"hiv disease": "D015658",
"fatigue": "D005221",
"megaloblastosis": "-1",
"bilateral optic neuropathy": "D009901",
"bilateral retrobulbar neuropathy": "D009901",
"scotoma": "D012607",
"blood coagulation": "D001778",
"hypercoagulability": "D019851",
"thrombophlebitis": "D013924",
"thromboembolic episodes": "D013923",
"cerebral palsy": "D002547",
"seizure disorder": "D004827",
"restlessness": "D011595",
"itching": "D011537",
"icterus": "D007565",
"hyperbilirubinemia": "D006932",
"autoimmune hemolytic anemia": "D000744",
"syndrome of inappropriate secretion of antidiuretic hormone": "D007177",
"siadh": "D007177",
"tonic-clonic convulsions": "D004830",
"weakness of the central nervous system": "D002493",
"bladder tumours": "D001749",
"liver tumours": "D008113",
"bladder carcinomas": "D001749",
"bladder calculi": "D001744",
"hepatocarcinogens": "D008113",
"muscle degeneration": "D009135",
"muscle damage": "D009135",
"diplopia": "D004172",
"localized scleroderma": "D012594",
"contractures": "D003286",
"adrenaline arrhythmia": "D001145",
"open-angle glaucoma": "D005902",
"ocular hypertension": "D009798",
"abs": "D054549",
"acute cardiac syndrome": "D006331",
"hyperkinesis": "D006948",
"epicardial coronary disease": "D003327",
"colorectal cancer": "D015179",
"akinesis": "D018476",
"coronary vasospasm": "D003329",
"thrombus": "D013927",
"fetal pulmonary hypertension": "D005315",
"pulmonary hypertension": "D006976",
"pulmonary hypertension syndrome": "D006976",
"qt prolongation": "D008133",
"torsade de pointes": "D016171",
"tdp": "D016171",
"prolonged qtc interval": "D008133",
"thermal hyperalgesia": "D006930",
"mechanical hyperalgesia": "D006930",
"sarcoma": "D012509",
"convulsion": "D012640",
"muscle dysfunction": "D018908",
"neuromuscular dysfunction": "D009468",
"aneurysmal subarachnoid hemorrhage": "D013345",
"hematoma": "D006406",
"artery aneurysms": "D002532",
"non-hodgkin's lymphoma": "D008228",
"neuropathic symptoms": "D012678",
"primary biliary cirrhosis": "D008105",
"rheumatologic diseases": "D012216",
"dermatomyositis": "D003882",
"flushing": "D005483",
"cerebral edema": "D001929",
"hepatocarcinogenesis": "D063646",
"preneoplastic foci": "D011230",
"precancerous lesions": "D011230",
"inhibition of lactation": "D007775",
"glycosuria": "D006029",
"arf": "D058186",
"tubular necrosis": "D009956",
"involuntary movements": "D004409",
"dystonia": "D004421",
"glutamatergic dysfunction": "D018754",
"dopaminergic deficits": "D009461",
"brainstem dysgenesis": "-1",
"fetal anomalies": "D005315",
"multiple cranial-nerve involvement": "D003389",
"cocaine-addicted": "D019970",
"diabetic nephropathy": "D003928",
"cerebral ischemic injury": "D001930",
"ischemic brain injury": "D001930",
"neurological abnormalities": "D009422",
"brain ischemic": "D002546",
"dysosmia": "D000857",
"hyperuricemia": "D033461",
"olfactory disorder": "D000857",
"colorectal adenomas": "D015179",
"adenomatous polyps": "D018256",
"hearing loss": "D034381",
"mental slowing": "D003072",
"mental slowness": "D003072",
"irritability": "D001523",
"aggression": "D001523",
"aggressive behavior": "D001523",
"glioblastoma": "D005909",
"brain tumors": "D001932",
"obsessive-compulsive symptoms": "D009771",
"treatment-resistant depression": "D061218",
"obsessive-compulsive behavior": "D009771",
"obsessive-compulsive disorder": "D009771",
"hemolytic anemias": "D000743",
"cauda equina syndrome": "D011128",
"neurological deficits": "D009461",
"hyperkinetic": "D006948",
"abnormal involuntary movement": "D004409",
"corticosteroid glaucoma": "D005901",
"hypertensive eyes": "D009798",
"open angle glaucoma": "D005902",
"steroid glaucoma": "D005901",
"primary open angle glaucoma": "D005902",
"cognitive deterioration": "D003072",
"nystagmus": "C564088",
"flank pain": "D021501",
"hay fever": "D006255",
"asthma": "D001249",
"kidney stones": "D007669",
"hemolytic-uremic syndrome": "D006463",
"pyeloureteritis cystica": "D011702",
"submucosal hemorrhage": "D006470",
"hepatomas": "D006528",
"liver lesions": "D017093",
"myeloma": "D009101",
"multiple myeloma": "D009101",
"mm": "D009101",
"hematological toxicities": "D006402",
"cough": "D003371",
"cutaneous leucocytoclastic vasculitis": "D018366",
"staphylococcus aureus bacteremia": "D013203",
"bacteremia": "D016470",
"necrotic": "D009336",
"blisters": "D001768",
"leucocytoclastic vasculitis": "D018366",
"collagen vascular disease": "D003095",
"neoplasia": "D009369",
"hypothermia": "D007035",
"impaired learning": "D007859",
"impaired memory": "D008569",
"impaired cognitive functions": "D003072",
"hypercapnia": "D006935",
"mbc": "D001943",
"cardiovascular impairments": "D002318",
"decrease of mean arterial blood pressure (mbp)": "D007022",
"decrease of heart rate (hr)": "D001919",
"decrease of mbp": "D007022",
"decrease of hr": "D001919",
"cardiovascular alterations": "D018376",
"marrow suppression": "D001855",
"anemic": "D000740",
"leukopenic": "D007970",
"marrow toxicity": "D001855",
"visceral pain": "D059265",
"behavioral disorders": "D001523",
"painful syndromes": "D010146",
"hyperesthesia": "D006941",
"neuralgia": "D009437",
"writhing": "D010146",
"hypertrophic cardiomyopathy": "D002312",
"eosinophilic": "D004802",
"biventricular failure": "D018754",
"erythema nodosum": "D004893",
"acute promyelocytic leukemia": "D015473",
"apl": "D015473",
"erythematous nodules": "D004893",
"arterial thromboemboli": "D001157",
"venous thromboemboli": "D054556",
"thromboemboli": "D013923",
"pulmonary emboli": "D011655",
"myxedemic": "D009230",
"myxedema": "D009230",
"startle": "D012021",
"ulcerative colitis": "D003093",
"red eyes": "D005128",
"pleural effusion": "D010996",
"urinary abnormalities": "D001745",
"segmental necrotizing glomerulonephritis": "D005923",
"pleural effusions": "D010996",
"hypothermic": "D007035",
"chiari malformation": "D001139",
"tic-like symptoms": "D013981",
"tic disorders": "D013981",
"involuntary eye-blinking movements": "D020820",
"developmental anomalies": "D000014",
"gastrointestinal toxicity": "D005767",
"ventricular septal defects": "D006345",
"vsds": "D006345",
"midline defects": "D009436",
"diaphragmatic hernia": "D065630",
"dh": "D065630",
"md": "D009436",
"vsd": "D006345",
"hydrocephalus": "D006849",
"long qt syndrome": "D008133",
"edds": "D003693",
"a falling platelet count": "D001791",
"hemorrhagic complications": "D006470",
"thromboembolic complications": "D013923",
"colon carcinoma": "D003110",
"metastasis": "D009362",
"prinzmetal's angina": "D000788",
"nephritis": "D009393",
"cerebellar dysfunction": "D002526",
"cerebral damage": "D001927",
"low back pain": "D017116",
"progressive supranuclear palsy": "D013494",
"multiple system atrophy": "D019578",
"dystonias": "D004421",
"psp": "D013494",
"oromandibular dystonia": "D008538",
"omd": "D008538",
"vestibulotoxicity": "D015837",
"cerebral infarction": "D002544",
"amnesic": "D000647",
"acute pain": "D059787",
"allodynia": "D006930",
"neuropathic pain": "D009437",
"thyroid disease": "D013959",
"thyroid illness": "D013959",
"psoriasis": "D011565",
"capillary leak syndrome": "D019559",
"capillary leak": "D019559",
"inflammatory diseases": "D007249",
"rem sleep deprivation": "D012892",
"remd": "D012892",
"head twitches": "D009069",
"hepatic veno-occlusive disease": "D006504",
"veno-occlusive disease of the liver": "D006504",
"acute lymphocytic leukemia": "D054198",
"urothelial toxicity": "D001745",
"lung cancer": "D008175",
"haematuria": "D006417",
"bladder irritation": "D001745",
"pollakisuria": "D014555",
"qtc prolongation": "D008133",
"pas": "D011471",
"pa": "D011471",
"muscle flaccidity": "D009123",
"cerebral vascular accident": "D020521",
"cva": "D020521",
"head trauma": "D006259",
"cerebral ischemia": "D002545",
"middle cerebral artery occlusion": "D020244",
"mcao": "D020244",
"neuronal injury": "D009410",
"neuronal dysfunction": "D009410",
"pulmonary toxicity": "D008171",
"pneumonitis": "D000542",
"malignancy": "D009369",
"tuberculous": "D014376",
"hypokalaemia": "D007008",
"bladder inflammation": "D001745",
"bladder dysfunction": "D001745",
"uveitis": "D014605",
"hypotony": "D015814",
"visual disturbances": "D010468",
"premature ventricular contractions": "D018879",
"nsvt": "D017180",
"spinal cord injury": "D013119",
"damage to the muscle": "D009135",
"impairment in word recall": "D008569",
"gerstmann syndrome": "D005862",
"hypomania": "D001714",
"psychotic disorder": "D011618",
"bacterial infections": "D001424",
"alf": "D017114",
"cognitive impairment": "D003072",
"impairment of cognitive function": "D003072",
"ventricular tachyarrhythmias": "D017180",
"qtc interval prolongation": "D008133",
"embryonal rhabdomyosarcoma": "D018233",
"hodgkin disease": "D006689",
"ovarian failure": "D010049",
"obstructive sleep apnea syndrome": "D020181",
"syndrome of obstructive sleep apnea": "D020181",
"obstructive sleep apnea": "D020181",
"sleep apnea": "D012891",
"renal abnormality": "D007674",
"cerebral lesions": "D001927",
"cerebral abnormalities": "D001927",
"juvenile rheumatoid arthritis": "D001171",
"iridocyclitis": "D015863",
"ocular pain": "D058447",
"decreased visual acuity": "D014786",
"photophobia": "D020795",
"band keratopathy": "C562399",
"chorioretinopathy": "D012164",
"cataracts": "D002386",
"keratoconjunctivitis": "D007637",
"water intoxication": "D014869",
"abortion": "D000031",
"abortions": "D000031",
"asthenia": "D001247"
}
}
|
BioGPT/data/BC5CDR/raw/valid.entities.json/0
|
{
"file_path": "BioGPT/data/BC5CDR/raw/valid.entities.json",
"repo_id": "BioGPT",
"token_count": 45139
}
| 147 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
DATA_DIR=../../data/PubMedQA
prefix=biogpt-large-pqal_qcl_ansis
RAW_DATA_DIR=${DATA_DIR}/raw
OUTPUT_DIR=${DATA_DIR}/${prefix}-bin
if [ -d "${OUTPUT_DIR}" ]; then
rm -rf ${OUTPUT_DIR}
fi
python rebuild_data.py ${RAW_DATA_DIR} ${prefix}
cp ${DATA_DIR}/../biogpt_large_dict.txt ${RAW_DATA_DIR}/
cp ${DATA_DIR}/../biogpt_large_bpecodes ${RAW_DATA_DIR}/
SPLIT=(train valid test)
for ff in ${SPLIT[@]}; do
if [ -f "${RAW_DATA_DIR}/${prefix}_$ff.y" ]; then
echo "Preprocessing ${ff}"
perl ${MOSES}/scripts/tokenizer/tokenizer.perl -l en -a -threads 8 < ${RAW_DATA_DIR}/${prefix}_$ff.x > ${RAW_DATA_DIR}/${prefix}_$ff.tok.x
perl ${MOSES}/scripts/tokenizer/tokenizer.perl -l en -a -threads 8 < ${RAW_DATA_DIR}/${prefix}_$ff.y > ${RAW_DATA_DIR}/${prefix}_$ff.tok.y
${FASTBPE}/fast applybpe ${RAW_DATA_DIR}/${prefix}_$ff.tok.bpe.x ${RAW_DATA_DIR}/${prefix}_$ff.tok.x ${RAW_DATA_DIR}/biogpt_large_bpecodes
${FASTBPE}/fast applybpe ${RAW_DATA_DIR}/${prefix}_$ff.tok.bpe.y ${RAW_DATA_DIR}/${prefix}_$ff.tok.y ${RAW_DATA_DIR}/biogpt_large_bpecodes
rm ${RAW_DATA_DIR}/${prefix}_$ff.tok.x ${RAW_DATA_DIR}/${prefix}_$ff.tok.y
fi
done
# do binarize
fairseq-preprocess \
-s x -t y --workers 8 \
--joined-dictionary \
--trainpref ${RAW_DATA_DIR}/${prefix}_train.tok.bpe \
--validpref ${RAW_DATA_DIR}/${prefix}_valid.tok.bpe \
--testpref ${RAW_DATA_DIR}/${prefix}_test.tok.bpe \
--destdir ${OUTPUT_DIR} \
--srcdict ${RAW_DATA_DIR}/biogpt_large_dict.txt
|
BioGPT/examples/QA-PubMedQA/preprocess_large.sh/0
|
{
"file_path": "BioGPT/examples/QA-PubMedQA/preprocess_large.sh",
"repo_id": "BioGPT",
"token_count": 761
}
| 148 |
# Contributing
That would be awesome if you want to contribute something to BitBLAS!
- [Contributing](contributing.md#contributing)
- [Reporting Bugs](contributing.md#reporting-bugs)
- [Asking Questions](contributing.md#asking-questions)
- [Submitting Pull Requests](contributing.md#submitting-pull-requests)
- [Repository Setup](contributing.md#repository-setup)
- [Running Tests](contributing.md#running-tests)
## Reporting Bugs
If you run into any weird behavior while using BitBLAS, feel free to open a new issue in this repository! Please run a **search before opening** a new issue, to make sure that someone else hasn't already reported or solved the bug you've found.
Any issue you open must include:
- Code snippet that reproduces the bug with a minimal setup.
- A clear explanation of what the issue is.
## Asking Questions
Please ask questions in issues.
## Submitting Pull Requests
All pull requests are super welcomed and greatly appreciated! Issues in need of a solution are marked with a [`♥ help`](https://github.com/ianstormtaylor/BitBLAS/issues?q=is%3Aissue+is%3Aopen+label%3A%22%E2%99%A5+help%22) label if you're looking for somewhere to start.
Please run `./format.sh` before submitting a pull request to make sure that your code is formatted correctly.
Please include tests and docs with every pull request!
## Repository Setup
To run the build, you need to have the BitBLAS repository cloned to your computer. After that, you need to `cd` into the directory where you cloned it, and install the dependencies with `python`:
```bash
python setup.py install
```
## Running Tests
To run the tests, start by building the project as described in the [Repository Setup](contributing.md#repository-setup) section.
Then you can rerun the tests with:
```text
python -m pytest testing
```
|
BitBLAS/CONTRIBUTING.md/0
|
{
"file_path": "BitBLAS/CONTRIBUTING.md",
"repo_id": "BitBLAS",
"token_count": 513
}
| 149 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import argparse
import torch
from modeling_bitnet import BitnetForCausalLM
torch.set_grad_enabled(False)
parser = argparse.ArgumentParser()
parser.add_argument('--hf_path', default='1bitLLM/bitnet_b1_58-3B', type=str)
def profile(model, input_data):
import time
import numpy as np
model = model.cuda()
model.eval()
def get_runtime(num_repeats=1):
tic = time.time()
for _ in range(num_repeats):
_ = model(input_data)
torch.cuda.synchronize()
return (time.time() - tic) * 1000 / num_repeats
with torch.no_grad():
st = time.time()
while time.time() - st < 1.0:
get_runtime() # warmup
warmup_runtime = get_runtime()
num_repeats = max(1, int(1000 / warmup_runtime))
times = get_runtime(num_repeats)
return np.mean(times)
def main():
model = BitnetForCausalLM.from_pretrained(
'1bitLLM/bitnet_b1_58-3B',
device_map='auto',
low_cpu_mem_usage=True,
use_flash_attention_2=True,
torch_dtype=torch.float16,
).half()
with torch.no_grad():
model._post_process_weights()
benchmark_sets = [
(1, 1),
(128, 1),
(1, 2048)
]
for batch_size, seq_len in benchmark_sets:
input_id = torch.ones(batch_size, seq_len).long().cuda()
latency = profile(model, input_id)
print(f"Batch size: {batch_size}, Seq len: {seq_len}, Latency: {latency}")
if __name__ == '__main__':
main()
|
BitBLAS/integration/BitNet/benchmark_inference_latency.py/0
|
{
"file_path": "BitBLAS/integration/BitNet/benchmark_inference_latency.py",
"repo_id": "BitBLAS",
"token_count": 723
}
| 150 |
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
#include <cuda_runtime.h>
#include <assert.h>
#include "ladder_kernel.h"
#include "mma.h"
// nvcc ladder_kernel.cu -gencode arch=compute_80,code=sm_80
${kernel_body}
int ladder_gemm_fp16xint2_fp16(half *input_0, half *input_1, half *output, const int M, const int N, const int K, const int trans_a, const int trans_b, half *workspace_ptr)
{
assert(trans_a == 0 && trans_b == 1);
${kernel_call}
return -1;
}
|
BitBLAS/integration/bitdistiller/template/kernel_template.int2.bitblas.cu.template/0
|
{
"file_path": "BitBLAS/integration/bitdistiller/template/kernel_template.int2.bitblas.cu.template",
"repo_id": "BitBLAS",
"token_count": 190
}
| 151 |
#!/bin/bash
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# install torch
pip install torch==2.1.0
# install llvm
apt-get install llvm-10
# clone and build tvm
git submodule update --init --recursive
cd 3rdparty/tvm
mkdir build
cp cmake/config.cmake build
cd build
echo "set(USE_LLVM llvm-config-10)" >> config.cmake && echo "set(USE_CUDA ON)" >> config.cmake
cmake .. && make -j && cd ../../..
echo "export TVM_HOME=$(pwd)/3rdparty/tvm" >> ~/.bashrc
echo "export PYTHONPATH=\$TVM_HOME/python:$(pwd)/python:\$PYTHONPATH" >> ~/.bashrc
source ~/.bashrc
|
BitBLAS/maint/scripts/installation.sh/0
|
{
"file_path": "BitBLAS/maint/scripts/installation.sh",
"repo_id": "BitBLAS",
"token_count": 219
}
| 152 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""Hint definition for schedule"""
from typing import Dict, List, Tuple
from . import PrimFuncNode
import numpy as np
from .rasterization import *
class TensorCoreExtraConfig:
"""
This class is used to store extra information for tensorcore
"""
def __init__(
self,
AS_shape: Tuple[int],
BS_shape: Tuple[int],
AF_shape: Tuple[int],
BF_shape: Tuple[int],
tc_axis: Tuple[int],
) -> None:
self.AS_shape: Tuple[int] = AS_shape
self.BS_shape: Tuple[int] = BS_shape
self.AF_shape: Tuple[int] = AF_shape
self.BF_shape: Tuple[int] = BF_shape
self.tc_axis: Tuple[int] = tc_axis
class Stride:
"""
Manages stride information for a given axis of a tensor.
"""
def __init__(self, stride: int = 1, ax: int = -1) -> None:
# which axis to put stride on
self._ax: int = int(ax)
# the stride size of the axis
self._stride: int = int(stride)
@property
def ax(self) -> int:
return self._ax
@property
def stride(self) -> int:
return self._stride
def compute_strides_from_shape(self, shape: List[int]) -> List[int]:
ndim = len(shape)
strides = [1 for _ in shape]
for i in range(ndim - 2, -1, -1):
if i == self.ax:
strides[i] = self.stride
else:
strides[i] = int(strides[i + 1] * shape[i + 1])
return strides
def compute_elements_from_shape(self, shape: List[int]) -> int:
original_shape = np.prod(shape)
if not self.is_valid():
strided_elem = original_shape
else:
assert self.ax < len(shape)
strided_elem = np.prod(shape[0:self.ax + 1]) * self.stride
assert strided_elem >= original_shape
return int(strided_elem)
def is_valid(self) -> bool:
return self.ax >= 0
def __repr__(self) -> str:
return f"<Stride, {self._ax}, {self._stride}>"
class TileDict:
"""
Manages tiling information and configurations for computational tasks.
"""
def __init__(self, output_tile) -> None:
self.output_tile = output_tile
# schedule config
self.tile_map = {}
self.rstep_map = {}
self.cached_tensors_map = {}
self.output_strides_map = {}
self.tensor_strides_map = {}
# analysis
self.traffic = -1
self.smem_cost = -1
self.block_per_SM = -1
self.num_wave = -1
self.grid_size = -1
self.valid = True
def get_tile(self, func) -> List[int]:
return self.tile_map[func]
def get_rstep(self, func) -> Dict[str, int]:
return self.rstep_map
def __hash__(self) -> int:
return hash(tuple(self.output_tile))
class IntrinInfo:
"""
The information of tensorcore intrinsic related information
"""
def __init__(
self,
in_dtype: str,
out_dtype: str,
trans_b: bool,
input_transform_kind: int = 0,
weight_transform_kind: int = 0,
) -> None:
self.in_dtype = in_dtype
self.out_dtype = out_dtype
self.trans_a = False
self.trans_b = trans_b
self.input_transform_kind = input_transform_kind
self.weight_transform_kind = weight_transform_kind
def __repr__(self) -> str:
return f"<IntrinInfo, {self.in_dtype}, {self.out_dtype}, {self.trans_b}, {self.propagate_b}>"
@property
def smooth_a(self) -> bool:
return self.input_transform_kind >= 2
@property
def smooth_b(self) -> bool:
return self.weight_transform_kind >= 2
@property
def inter_transform_a(self) -> bool:
return self.input_transform_kind >= 1
@property
def inter_transform_b(self) -> bool:
return self.weight_transform_kind >= 1
class Hint(object):
"""
Central configuration class for managing various parameters of computational tasks.
"""
def __init__(self) -> None:
self.arch = None
self.use_tc = None # todo(lei): this should be renamed.
# special axes tiling info
self.block = []
self.thread = []
# special axes for tensorCore
self.warp = []
# reduce axes tiling info
self.rstep = []
self.reduce_thread = []
self.rasterization_plan = NoRasterization()
self.cached_tensors = []
self.output_strides = {}
self.schedule_stages = None
# Experimental
self._raxis_order = []
self._step = []
self.vectorize: Dict[str, int] = {}
self.pipeline_stage = 1
self.use_async = False
self.opt_shapes: Dict[str, int] = {}
self.intrin_info = IntrinInfo("float16", "float16", True)
self.shared_scope: str = "shared"
self.pass_context: Dict = {}
def to_dict(self) -> Dict:
dic = {}
dic["block"] = self.block
if self.use_tc:
dic["warp"] = self.warp
else:
dic["thread"] = self.thread
dic["rstep"] = self.rstep
if np.prod(self.reduce_thread) > 1:
dic["reduce_thread"] = self.reduce_thread
if self.use_tc:
dic["use_tc"] = self.use_tc
if self.output_strides:
dic["strides"] = {}
for k, stride in self.output_strides.items():
if stride.is_valid():
dic["strides"][k] = stride
if len(dic["strides"]) == 0:
del dic["strides"]
if np.prod(self._step) > 1:
dic["step"] = self._step
if self._raxis_order != []:
dic["raxis_order"] = self._raxis_order
if self.vectorize != {}:
dic["vectorize"] = self.vectorize
return dic
def from_dict(self, dic: Dict) -> "Hint":
self.__init__()
for k, v in dic.items():
setattr(self, k, v)
return self
@property
def raxis_order(self) -> List[int]:
if self._raxis_order != []:
return self._raxis_order
return list(range(len(self.rstep)))
@property
def step(self) -> List[int]:
if self._step != []:
return self._step
return [1 for _ in self.block]
def __repr__(self) -> str:
return str(self.to_dict())
def complete_config(self, node: PrimFuncNode):
# analysis pass context, for int8 mma, we should merge static shared memory
merge_static_smem = False
if self.use_tc and self.intrin_info.in_dtype == "int8":
merge_static_smem = True
self.pass_context = {"tir.merge_static_smem": merge_static_smem}
return self
|
BitBLAS/python/bitblas/base/roller/hint.py/0
|
{
"file_path": "BitBLAS/python/bitblas/base/roller/hint.py",
"repo_id": "BitBLAS",
"token_count": 3200
}
| 153 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""
GPU-generic schedule rules.
For CUDA/ROCm/Vulkan/Metal-specific rules, use `tvm.dlight.cuda/rocm/vulkan/metal` instead
"""
from .fallback import Fallback # noqa: F401
from .element_wise import ElementWise # noqa: F401
from .gemv import GEMV # noqa: F401
from .gemv_dequantize import GEMVWithDequantizeInfo # noqa: F401
from .general_reduction import GeneralReduction # noqa: F401
from .matmul import (
Matmul, # noqa: F401
MatmulTensorizationMMA, # noqa: F401
MatmulTensorizationWMMA, # noqa: F401
)
from .matmul_mma_dequantize import (
MatmulTensorizationMMAWithDequantizeInfo, # noqa: F401
)
from .matmul_wmma import MatmulTensorizationLegacy # noqa: F401
from .reduction import Reduction # noqa: F401
from .transpose import Transpose # noqa: F401
|
BitBLAS/python/bitblas/gpu/__init__.py/0
|
{
"file_path": "BitBLAS/python/bitblas/gpu/__init__.py",
"repo_id": "BitBLAS",
"token_count": 319
}
| 154 |
# Copyright 2018 The apache/tvm Authors. All Rights Reserved.
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Modifications Copyright (c) Microsoft.
# The code below is mostly copied from apache/tvm transpose.py in dlight.
"""Reduction rule for operators including softmax, layer norm, RMS norm, etc"""
from typing import List, Union
from tvm import arith, tir
from tvm.target import Target
from tvm.tir import Schedule
from tvm.tir.schedule import BlockRV
from ..base import (
detect_dominant_read,
normalize_prim_func,
try_inline_contiguous_spatial,
)
from .base import GPUScheduleRule
class Transpose(GPUScheduleRule):
"""Schedule rule for transpose"""
def is_transpose(self, sch: Schedule, block_rv: BlockRV):
block = sch.get(block_rv)
if isinstance(block.body, tir.BufferStore):
rhs = block.body.value
if isinstance(rhs, tir.BufferLoad):
lhs_indices = block.body.indices
rhs_indices = rhs.indices
if list(lhs_indices) != list(rhs_indices) and set(lhs_indices) == set(rhs_indices):
return True
return False
def apply( # pylint: disable=too-many-locals
self,
func: tir.PrimFunc,
target: Target,
_: bool,
) -> Union[None, tir.Schedule, List[tir.Schedule]]:
# pylint: disable=invalid-name
if not isinstance(func, tir.PrimFunc) or not self.is_target_available(target):
return None
if target.kind.name == "cuda":
len_tx = 16
len_ty = 8
unroll_depth = 256
else:
len_tx = 8
len_ty = 4
unroll_depth = 64
len_vec = 4
sch = tir.Schedule(func)
blocks = normalize_prim_func(sch)
transpose_block_idx = -1
for idx, block in reversed(list(enumerate(blocks))):
if self.is_transpose(sch, block.block_rv):
transpose_block_idx = idx
break
if not block.is_injective():
return None
if transpose_block_idx == -1:
return None
transpose_block = blocks[transpose_block_idx].block_rv
prologue = None # the optional decoding block
if transpose_block_idx > 0:
spatials = try_inline_contiguous_spatial(sch, blocks[: transpose_block_idx - 1])
assert len(spatials) == 0
prologue = blocks[transpose_block_idx - 1].block_rv
loops = sch.get_loops(transpose_block)
if len(loops) != 2:
# transpose with more than 2 axes is not supported
return None
c_factor = 1
if prologue is not None:
block_stmt = sch.get(prologue)
result = arith.normalize_to_iter_sum(
detect_dominant_read(block_stmt),
input_iters={i.var: i.dom.extent for i in block_stmt.iter_vars},
)
if len(result.args) > 0:
c_factor = int(result.args[0].lower_factor)
i, j = loops
i, vi = sch.split(i, factors=[None, c_factor], preserve_unit_iters=True)
bi, ti = sch.split(i, factors=[None, len_ty], preserve_unit_iters=True)
bj, tj = sch.split(j, factors=[None, len_tx], preserve_unit_iters=True)
sch.reorder(bi, bj, ti, tj, vi)
sch.bind(bi, "blockIdx.y")
sch.bind(bj, "blockIdx.x")
sch.bind(ti, "threadIdx.y")
sch.bind(tj, "threadIdx.x")
len_vec = min(len_vec, c_factor)
_, vi = sch.split(vi, factors=[None, len_vec])
if len_vec > 1:
sch.vectorize(vi)
cache_read = sch.cache_read(transpose_block, read_buffer_index=0, storage_scope="shared")
sch.compute_at(cache_read, bj)
loops = sch.get_loops(cache_read)[2:]
fused = sch.fuse(*loops)
_, ty, tx, v = sch.split(fused, factors=[None, len_ty, len_tx, c_factor])
sch.bind(ty, "threadIdx.y")
sch.bind(tx, "threadIdx.x")
sch.unroll(v)
sch.storage_align(block=cache_read, buffer_index=0, axis=0, factor=32, offset=1)
sch.annotate(bi, ann_key="pragma_auto_unroll_max_step", ann_val=unroll_depth)
sch.annotate(bi, ann_key="pragma_unroll_explicit", ann_val=1)
if prologue is not None:
sch.compute_inline(prologue)
return sch
|
BitBLAS/python/bitblas/gpu/transpose.py/0
|
{
"file_path": "BitBLAS/python/bitblas/gpu/transpose.py",
"repo_id": "BitBLAS",
"token_count": 2290
}
| 155 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from abc import ABC, abstractmethod
import tvm
from tvm import IRModule
from tvm.target import Target
from tvm.tir import PrimFunc
from tvm.contrib.dlpack import to_pytorch_func
from tvm._ffi.base import _LIB, raise_last_ffi_error
from tvm._ffi._ctypes.types import TVMValue, ArgTypeCode
import bitblas
import ctypes
from typing import List, Dict, Any, Optional
import numpy as np
from ..base import fast_tune, fast_tune_with_dynamic_range
from copy import deepcopy
from bitblas.base.roller.arch import get_arch
from bitblas.wrapper import CUDASourceWrapper, CUDASourceWrapperWithDynamic
from dataclasses import dataclass
from enum import IntEnum
import logging
logger = logging.getLogger(__name__)
class TransformKind(IntEnum):
NonTransform = 0
InterWarpTransform = 1
IntraWarpTransform = 2
@dataclass
class OperatorConfig:
"""Base class for operator configurations. Used for typing."""
pass
class Operator(ABC):
def __init__(self, name, config: OperatorConfig, target: Target = None):
if isinstance(target, str):
target = Target(target)
self.name = name
self.config = config
self.target = target
self.prim_func_mod = self._select_implementation()
self.optimized_func = None
self.rt_mod = None
self.time_evaluator = None
self.profile_tensors = None
self.arch = get_arch(target) if target else None
self.dynamic_range = None
self.pass_context: Dict = {}
self.num_args = len(self.prim_func.params)
self.function_handle = None
self.num_output_args: int = (
1 # todo(lei): should be analyzed from the prim_func.
)
self.wrapper = None
self.src_name = None
self.lib_name = None
self.lib = None
def get_source(self, target: Target = None) -> str:
if target is None:
target = self.target
if self.rt_mod is None:
self._build_runtime_module(target)
return self.rt_mod.imported_modules[0].get_source() if self.rt_mod else None
def _build_runtime_module(self, target: Target):
"""
Builds the runtime module based on the architecture platform.
This function attempts to build a runtime module (rt_mod) for the specified target.
If the platform is CUDA and an optimized function is available, it tries to build
using the optimized function with a specific pass context. Otherwise, it falls back
to building with the primary function. After successful build, it initializes a
time evaluator for performance measurement.
Args:
target (Target): The compilation target specification.
Returns:
The compiled runtime module or None if the build was unsuccessful.
"""
# Initialize rt_mod as None to handle cases where build fails or is skipped
rt_mod = None
# Check if the platform is CUDA and we have an optimized function
if self.arch.platform == "CUDA":
if self.optimized_func is None:
return None
@tvm.register_func(func_name="tvm_callback_cuda_postproc", override=True)
def tvm_callback_cuda_postproc(code, _):
return self.post_process(code)
try:
# Use a specific TVM pass context for CUDA platforms
with tvm.transform.PassContext(config={
"tir.use_async_copy": True,
**self.pass_context
}):
rt_mod = tvm.build(self.optimized_func, target=target, name=self.name)
except Exception as e:
rt_build_error = e # noqa
logger.debug(
"Failed to build optimized function for CUDA target with default schedule, Please consider enable hardware aware tuning!"
)
else:
# For non-CUDA platforms or when no optimized function is available, build with the primary function
rt_mod = tvm.build(self.prim_func, target=target, name=self.name)
# If the runtime module was successfully built, set up for evaluation
if rt_mod:
self.rt_mod = rt_mod
# Initialize a time evaluator with the built module, specifying the device and the number of runs
self.time_evaluator = rt_mod.time_evaluator(
rt_mod.entry_name, self.arch.device, number=10)
self.function_handle = rt_mod.get_function(rt_mod.entry_name).handle
self.torch_func = to_pytorch_func(rt_mod)
if self.arch.platform == "CUDA":
try:
if (self.dynamic_range is not None and len(self.optimized_func.functions) > 1):
wrapper = CUDASourceWrapperWithDynamic(self.optimized_func,
self.get_source(target), self.arch)
else:
wrapper = CUDASourceWrapper(self.optimized_func, self.get_source(target),
self.arch)
wrapper.compile_lib()
self.wrapper = wrapper
self.src_name = self.wrapper.src_name
self.lib_name = self.wrapper.lib_name
self.lib = self.wrapper.load_lib()
self.lib.init()
except Exception as e:
build_runtime_library_error = e
logger.debug(
"Failed to build runtime library {}".format(build_runtime_library_error))
return rt_mod
def apply_default_schedule(self, func_mod: IRModule, target: Target) -> IRModule:
mod_for_opt = deepcopy(func_mod)
with target:
optimized_mod = (
bitblas.ApplyDefaultSchedule( # pylint: disable=not-callable
bitblas.gpu.Matmul(),
bitblas.gpu.GEMV(),
bitblas.gpu.Reduction(),
bitblas.gpu.GeneralReduction(),
bitblas.gpu.Fallback(),
)(mod_for_opt))
if optimized_mod is not None:
return optimized_mod
return None
def post_process(self, code: str) -> str:
return code
def apply_fast_tuning(self,
func: PrimFunc,
target: Target,
topk: int = 20,
parallel_build=True) -> IRModule:
_, best = fast_tune(func, target, topk=topk, parallel_build=parallel_build)
if best is not None:
return best.sch.mod
self.pass_context = best.config.pass_context
return None
def apply_fast_tuning_with_dynamic_range(
self,
func: PrimFunc,
target: Target,
topk: int = 20,
dynamic_range: Dict[str, List[int]] = None,
):
optimized_mod = fast_tune_with_dynamic_range(
func, target, topk=topk, parallel_build=True, dynamic_range=dynamic_range)
if optimized_mod is not None:
return optimized_mod
return None
def hardware_aware_finetune(self,
topk: int = 20,
target: tvm.target.Target = None,
parallel_build=True):
if target is None:
target = self.target
dynamic_range = self.dynamic_range
func = self.prim_func
if dynamic_range is not None:
self.optimized_func = self.apply_fast_tuning_with_dynamic_range(
func, target, topk, dynamic_range)
else:
self.optimized_func = self.apply_fast_tuning(
func, target, topk, parallel_build=parallel_build)
self._build_runtime_module(self.target)
def get_profile_tensors(self, dynamic_symbolic_constrains: Optional[Dict] = None):
if dynamic_symbolic_constrains is None:
dynamic_symbolic_constrains = {}
func = self.prim_func
device = self.arch.device
def var_warpper(v):
if isinstance(v, tvm.tir.Var):
if v.name in dynamic_symbolic_constrains:
return dynamic_symbolic_constrains[v.name]
assert "opt_shapes" in func.attrs
assert v.name in func.attrs["opt_shapes"]
return func.attrs["opt_shapes"][v.name].value
elif isinstance(v, tvm.tir.IntImm):
return v.value
else:
raise RuntimeError("Not supported type: ", type(v))
def map_numpy_type(intype):
typemap = {
'e4m3_float8': 'float8_e4m3fn',
'e5m2_float8': 'float8_e5m2',
}
if intype in typemap:
return typemap[intype]
else:
return intype
profile_tensors = []
for param in func.params:
if param not in func.buffer_map:
# in case of dynamic symbolic may in params
continue
arg = func.buffer_map[param]
numpy_dtype = map_numpy_type(arg.dtype)
profile_tensors.append(
tvm.nd.array(
np.random.uniform(0, 1,
[var_warpper(i) for i in arg.shape]).astype(numpy_dtype),
device=device,
))
self.profile_tensors = profile_tensors
return profile_tensors
def profile_latency(self, dynamic_symbolic_constrains: Optional[Dict] = None) -> str:
if dynamic_symbolic_constrains is None:
dynamic_symbolic_constrains = {}
profile_tensors = self.get_profile_tensors(dynamic_symbolic_constrains)
latency = self.time_evaluator(*profile_tensors).mean * 1e3
return latency
def _tensor_adapter(self, tensor, device):
import torch
from torch.utils.dlpack import to_dlpack
if isinstance(tensor, tvm.te.Tensor):
return tensor
elif isinstance(tensor, torch.Tensor):
return tvm.runtime.ndarray.from_dlpack(to_dlpack(tensor))
elif isinstance(tensor, np.ndarray):
return tvm.nd.array(tensor, device=device)
else:
raise RuntimeError("Not supported type: ", type(tensor))
def _forward_from_tvm_args(self, *args):
_tvm_args = [self._tensor_adapter(arg, self.arch.device) for arg in args]
self.rt_mod(*_tvm_args)
def _forward_from_tvm_nd_array(self, *args):
self.rt_mod(*args)
def _forward_from_torch_func(self, *args):
# torch func is not reliable as some datatypes they don't support
# like float8.
self.torch_func(*args)
return args[-1]
def forward(self, *args):
return self._forward_from_torch_func(*args)
def _forward_from_prebuild_lib(self, *args, stream=0):
ctypes_args = [
ctypes.c_void_p(arr.data_ptr()) if not isinstance(arr, int) else arr for arr in args
]
ctypes_args.append(ctypes.c_void_p(stream))
self.lib.call(*ctypes_args)
def call_lib(self, *args, stream=0):
self.lib.call(*args, ctypes.c_void_p(stream))
def _forward_from_tvm_lib_func(self, values):
tcodes = (ctypes.c_int * self.num_args)()
ret_val = TVMValue()
ret_tcode = ctypes.c_int()
for i in range(self.num_args):
tcodes[i] = ArgTypeCode.NDARRAY_HANDLE
if (_LIB.TVMFuncCall(
self.function_handle,
values,
tcodes,
ctypes.c_int(self.num_args),
ctypes.byref(ret_val),
ctypes.byref(ret_tcode),
) != 0):
raise_last_ffi_error()
def __call__(self, *args: Any) -> Any:
return self.forward(*args)
def update_func(self, func: PrimFunc):
self.prim_func_mod["main"] = func
def update_runtime_module(self, rt_mod, src_name=None, lib_name=None):
self.rt_mod = rt_mod
self.time_evaluator = rt_mod.time_evaluator(rt_mod.entry_name, self.arch.device, number=10)
self.function_handle = rt_mod.get_function(rt_mod.entry_name).handle
self.torch_func = to_pytorch_func(rt_mod)
if src_name is not None:
self.src_name = src_name
if lib_name is not None:
self.lib_name = lib_name
self.lib = ctypes.CDLL(lib_name)
self.lib.init()
@abstractmethod
def _select_implementation(self) -> IRModule:
pass
@property
def prim_func(self):
return self.prim_func_mod["main"]
|
BitBLAS/python/bitblas/ops/operator.py/0
|
{
"file_path": "BitBLAS/python/bitblas/ops/operator.py",
"repo_id": "BitBLAS",
"token_count": 6197
}
| 156 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import pytest
import bitblas
from bitblas.ops.lop3_permutate import LOP3Permutate, LOP3PermutateConfig
import tvm
target = tvm.target.Target("llvm")
# fmt: off
@pytest.mark.parametrize("M,N,datatype,dequantize_bits,storage_dtype", [
(1024, 1024, "float16", 4, "uint32"),
])
def test_lop3_permutate_profile_latency(
M,
N,
datatype,
dequantize_bits,
storage_dtype
):
lop3_permutate_config = LOP3PermutateConfig(
M=M,
N=N,
datatype=datatype,
dequantize_bits=dequantize_bits,
storage_dtype=storage_dtype
)
lop3_permutate = LOP3Permutate(
config=lop3_permutate_config,
target=target,
)
latency = lop3_permutate.profile_latency()
assert latency
# fmt: on
if __name__ == "__main__":
bitblas.testing.main()
|
BitBLAS/testing/python/operators/test_lop3_permutate_ops.py/0
|
{
"file_path": "BitBLAS/testing/python/operators/test_lop3_permutate_ops.py",
"repo_id": "BitBLAS",
"token_count": 404
}
| 157 |
from ..datasets import VisualGenomeCaptionDataset
from .datamodule_base import BaseDataModule
class VisualGenomeCaptionDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return VisualGenomeCaptionDataset
@property
def dataset_name(self):
return "vg"
|
BridgeTower/src/datamodules/vg_caption_datamodule.py/0
|
{
"file_path": "BridgeTower/src/datamodules/vg_caption_datamodule.py",
"repo_id": "BridgeTower",
"token_count": 145
}
| 158 |
import torch
import torch.nn as nn
import pytorch_lightning as pl
import torch.nn.functional as F
from .bert_model import BertConfig, BertModel, BertCrossLayer
from . import swin_transformer as swin
from . import vit_model as vit
from .vit_model import resize_pos_embed
from . import heads, objectives, meter_utils
from .clip_model import build_model, adapt_position_encoding
from .swin_helpers import swin_adapt_position_encoding
from transformers import RobertaConfig, RobertaModel
class BTTransformer(pl.LightningModule):
def __init__(self, config):
super().__init__()
self.save_hyperparameters()
self.prepare_data_per_node = False
self.is_clip= ('CLIP' in config["vit"])
self.is_swin= ('swin' in config["vit"])
self.is_vit= ('vit' in config["vit"])
self.jump_val_first_for_irtr_itm_irc = True
if 'roberta' in config['tokenizer']:
bert_config = RobertaConfig(
vocab_size=config["vocab_size"],
hidden_size=config["hidden_size"],
num_hidden_layers=config["num_layers"],
num_attention_heads=config["num_heads"],
intermediate_size=config["hidden_size"] * config["mlp_ratio"],
max_position_embeddings=config["max_text_len"],
hidden_dropout_prob=config["drop_rate"],
attention_probs_dropout_prob=config["drop_rate"],
)
else:
bert_config = BertConfig(
vocab_size=config["vocab_size"],
hidden_size=config["hidden_size"],
num_hidden_layers=config["num_layers"],
num_attention_heads=config["num_heads"],
intermediate_size=config["hidden_size"] * config["mlp_ratio"],
max_position_embeddings=config["max_text_len"],
hidden_dropout_prob=config["drop_rate"],
attention_probs_dropout_prob=config["drop_rate"],
)
resolution_after = config['image_size']
self.cross_modal_text_transform = nn.Linear(config['input_text_embed_size'], config['hidden_size'])
self.cross_modal_image_transform = nn.Linear(config['input_image_embed_size'], config['hidden_size'])
self.cross_modal_text_transform.apply(objectives.init_weights)
self.cross_modal_image_transform.apply(objectives.init_weights)
self.token_type_embeddings = nn.Embedding(2, config["hidden_size"])
self.token_type_embeddings.apply(objectives.init_weights)
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
if self.is_clip:
build_model(config["vit"], resolution_after=resolution_after, model_type=config["model_type"], vit_layernorm_shared=config["vit_layernorm_shared"], vit_remove_last=config["vit_remove_last"])
elif self.is_swin:
getattr(swin, config["vit"])(pretrained=True, config=config,)
else:
getattr(vit, config["vit"])(pretrained=True, img_size=resolution_after, model_type=config["model_type"],)
if 'roberta' in config['tokenizer']:
RobertaModel.from_pretrained(config['tokenizer'])
else:
BertModel.from_pretrained(config['tokenizer'])
torch.distributed.barrier()
if self.is_clip:
self.vit_model = build_model(config["vit"], resolution_after=resolution_after, model_type=config["model_type"], vit_layernorm_shared=config["vit_layernorm_shared"], vit_remove_last=config["vit_remove_last"])
elif self.is_swin:
self.vit_model = getattr(swin, config["vit"])(
pretrained=True, config=config,
)
self.avgpool = nn.AdaptiveAvgPool1d(1)
else:
self.vit_model = getattr(vit, config["vit"])(pretrained=True, img_size=resolution_after, model_type=config["model_type"],)
if 'roberta' in config['tokenizer']:
self.text_transformer = RobertaModel.from_pretrained(config['tokenizer'])
else:
self.text_transformer = BertModel.from_pretrained(config['tokenizer'])
if not config["vit_layernorm_shared"] and config["vit_layernorm_init_from_vit"]:
for ln in self.vit_model.visual.cross_modal_ln_separate:
ln.weight.data = self.vit_model.visual.ln_post.weight.data
ln.bias.data = self.vit_model.visual.ln_post.bias.data
self.cross_modal_image_layers = nn.ModuleList([BertCrossLayer(bert_config) for _ in range(config['num_layers'])])
self.cross_modal_image_layers.apply(objectives.init_weights)
self.cross_modal_text_layers = nn.ModuleList([BertCrossLayer(bert_config) for _ in range(config['num_layers'])])
self.cross_modal_text_layers.apply(objectives.init_weights)
# Class token => Linear => Tanh
self.cross_modal_image_pooler = heads.Pooler(config["hidden_size"])
self.cross_modal_image_pooler.apply(objectives.init_weights)
self.cross_modal_text_pooler = heads.Pooler(config["hidden_size"])
self.cross_modal_text_pooler.apply(objectives.init_weights)
# Temperature for image text contrastive learning
self.temperature = nn.Parameter(torch.ones([]) * config['temperature'])
if config["loss_names"]["mlm"] > 0:
# MLM Head weights don't tie with BERT Embedding weights. Train from scratch.
self.mlm_score = heads.MLMHead(bert_config)
self.mlm_score.apply(objectives.init_weights)
if config["loss_names"]["itm"] > 0 or config["loss_names"]["itm_itc"] > 0 or config["loss_names"]["irtr_itm_itc"] > 0:
self.itm_score = heads.ITMHead(config["hidden_size"] * 2)
self.itm_score.apply(objectives.init_weights)
if config["loss_names"]["itc"] > 0 or config["loss_names"]["itm_itc"] > 0 or config["loss_names"]["irtr_itm_itc"] > 0:
self.itc_text_head = heads.ITCHead(config['hidden_size'], config['contrastive_hidden_size'])
self.itc_text_head.apply(objectives.init_weights)
self.itc_image_head = heads.ITCHead(config['hidden_size'], config['contrastive_hidden_size'])
self.itc_image_head.apply(objectives.init_weights)
hs = config["hidden_size"]
# ===================== Initialize BT Components ===================== #
# just for first layer
self.cross_modal_text_layernorm = nn.LayerNorm(config["hidden_size"])
self.cross_modal_text_layernorm.apply(objectives.init_weights)
self.cross_modal_image_layernorm = nn.LayerNorm(config["hidden_size"])
self.cross_modal_image_layernorm.apply(objectives.init_weights)
self.cross_modal_text_link_tower = nn.ModuleList([heads.LinkTower(config) for _ in range(config['num_layers'] - 1)])
self.cross_modal_image_link_tower = nn.ModuleList([heads.LinkTower(config) for _ in range(config['num_layers'] - 1)])
self.cross_modal_text_link_tower.apply(objectives.init_weights)
self.cross_modal_image_link_tower.apply(objectives.init_weights)
# ===================== Load Pretrained METER Weights =====================
if (config["load_path"] != "" and not config["test_only"]):
ckpt = torch.load(config["load_path"], map_location="cpu")
state_dict = ckpt["state_dict"]
if self.is_clip:
state_dict = adapt_position_encoding(state_dict, after=resolution_after, patch_size=config['patch_size'])
elif self.is_swin:
state_dict = swin_adapt_position_encoding(state_dict, after=resolution_after, before=config['resolution_before'])
else:
state_dict['vit_model.pos_embed'] = resize_pos_embed(state_dict['vit_model.pos_embed'], self.vit_model.pos_embed, getattr(self.vit_model, 'num_tokens', 1), self.vit_model.patch_embed.grid_size)
self.load_state_dict(state_dict, strict=False)
# ===================== Downstream ===================== #
hscale = config["head_hidden_scale"]
if config["loss_names"]["vqa"] > 0:
vs = config["vqav2_label_size"]
if config["task_head_layers"] == 1:
self.vqa_classifier = nn.Sequential(
nn.Linear(hs * 2, vs),
)
elif config["task_head_layers"] == 2:
self.vqa_classifier = nn.Sequential(
nn.Linear(hs * 2, hs * 2 * hscale),
nn.LayerNorm(hs * 2 * hscale),
nn.GELU(),
nn.Linear(hs * 2 * hscale, vs),
)
self.vqa_classifier.apply(objectives.init_weights)
if config["loss_names"]["nlvr2"] > 0:
if config["task_head_layers"] == 1:
self.nlvr2_classifier = nn.Sequential(
nn.Linear(hs * 4, 2),
)
elif config["task_head_layers"] == 2:
self.nlvr2_classifier = nn.Sequential(
nn.Linear(hs * 4, int(hs * 2 * hscale)),
nn.LayerNorm(int(hs * 2 * hscale)),
nn.GELU(),
nn.Linear(int(hs * 2 * hscale), 2),
)
self.nlvr2_classifier.apply(objectives.init_weights)
if config["nlvr2_drop_rate"] > 0:
self.nlvr2_classifier_dropout = nn.Dropout(config['nlvr2_drop_rate'])
emb_data = self.token_type_embeddings.weight.data
self.token_type_embeddings = nn.Embedding(3, hs)
self.token_type_embeddings.apply(objectives.init_weights)
self.token_type_embeddings.weight.data[0, :] = emb_data[0, :]
self.token_type_embeddings.weight.data[1, :] = emb_data[1, :]
self.token_type_embeddings.weight.data[2, :] = emb_data[1, :]
if config["loss_names"]["snli"] > 0:
if config["task_head_layers"] == 1:
self.snli_classifier = nn.Sequential(
nn.Linear(hs * 2, 3),
)
elif config["task_head_layers"] == 2:
self.snli_classifier = nn.Sequential(
nn.Linear(hs * 2, hs * 2 * hscale),
nn.LayerNorm(hs * 2 * hscale),
nn.GELU(),
nn.Linear(hs * 2 * hscale, 3),
)
self.snli_classifier.apply(objectives.init_weights)
if config["loss_names"]["irtr"] > 0:
self.rank_output = nn.Linear(hs * 2, 1)
self.rank_output.weight.data = self.itm_score.fc.weight.data[1:]
self.rank_output.bias.data = self.itm_score.fc.bias.data[1:]
for p in self.itm_score.parameters():
p.requires_grad = False
# ===================== load downstream (test_only) ======================
if config["load_path"] != "" and config["test_only"]:
ckpt = torch.load(config["load_path"], map_location="cpu")
state_dict = ckpt["state_dict"]
if self.is_clip:
state_dict = adapt_position_encoding(state_dict, after=resolution_after, patch_size=config['patch_size'])
elif self.is_swin:
state_dict = swin_adapt_position_encoding(state_dict, after=resolution_after, before=config['resolution_before'])
else:
state_dict['vit_model.pos_embed'] = resize_pos_embed(state_dict['vit_model.pos_embed'], self.vit_model.pos_embed, getattr(self.vit_model, 'num_tokens', 1), self.vit_model.patch_embed.grid_size)
self.load_state_dict(state_dict, strict=False)
meter_utils.set_metrics(self)
self.current_tasks = list()
def get_cls_feats(self, text_feats, image_feats):
cls_feats_text = self.cross_modal_text_pooler(text_feats)
if self.is_clip:
cls_feats_image = self.cross_modal_image_pooler(image_feats)
elif self.is_swin:
avg_image_feats = self.avgpool(image_feats.transpose(1, 2)).view(image_feats.size(0), 1, -1)
cls_feats_image = self.cross_modal_image_pooler(avg_image_feats)
else:
cls_feats_image = self.cross_modal_image_pooler(image_feats)
return torch.cat([cls_feats_text, cls_feats_image], dim=-1)
def get_uni_modal_features(self, batch, fusion_features=False, itc=False):
img = batch["image"][0]
text_ids = batch[f"text_ids"]
text_labels = batch[f"text_labels"]
text_masks = batch[f"text_masks"]
text_embeds = self.text_transformer.embeddings(input_ids=text_ids)
input_shape = text_masks.size()
extend_text_masks = self.text_transformer.get_extended_attention_mask(text_masks, input_shape, self.device)
text_embedss = []
split_index = len(self.text_transformer.encoder.layer) - self.hparams.config['num_layers']
index = 0
for layer in self.text_transformer.encoder.layer:
text_embeds = layer(text_embeds, extend_text_masks)[0]
index += 1
if index > split_index:
text_embedss.append(text_embeds)
text_embedss = torch.stack(text_embedss, dim=0)
image_embedss = self.vit_model(img)
image_embedss = image_embedss[len(image_embedss) - self.hparams.config['num_layers']:]
if itc:
unimodal_feats_text = F.normalize(self.itc_text_head(text_embedss[-1][:, 0, :]), dim=-1, p=2)
unimodal_feats_image = F.normalize(self.itc_image_head(image_embedss[-1][:, 0, :]), dim=-1, p=2)
if not fusion_features:
ret = {
'unimodal_feats_text': unimodal_feats_text,
'unimodal_feats_image': unimodal_feats_image,
}
return ret
# cross_modal transform
text_embedss = self.cross_modal_text_transform(text_embedss)
image_embedss = self.cross_modal_image_transform(image_embedss)
if not itc:
ret = {
"extend_text_masks": extend_text_masks,
"text_embedss": text_embedss,
"image_embedss": image_embedss,
"text_labels": text_labels,
"text_ids": text_ids,
"text_masks": text_masks,
}
else:
if fusion_features:
ret = {
'unimodal_feats_text': unimodal_feats_text,
'unimodal_feats_image': unimodal_feats_image,
"extend_text_masks": extend_text_masks,
"text_embedss": text_embedss,
"image_embedss": image_embedss,
"text_labels": text_labels,
"text_ids": text_ids,
"text_masks": text_masks,
}
return ret
def infer_text(
self,
batch,
mask_text=False,
itc=False,
):
do_mlm = "_mlm" if mask_text else ""
text_ids = batch[f"text_ids{do_mlm}"]
text_masks = batch[f"text_masks"]
text_embeds = self.text_transformer.embeddings(input_ids=text_ids)
input_shape = text_masks.size()
extend_text_masks = self.text_transformer.get_extended_attention_mask(text_masks, input_shape, self.device)
text_embedss = []
split_index = len(self.text_transformer.encoder.layer) - self.hparams.config['num_layers']
index = 0
for layer in self.text_transformer.encoder.layer:
text_embeds = layer(text_embeds, extend_text_masks)[0]
index += 1
if index > split_index:
text_embedss.append(text_embeds)
text_embedss = torch.stack(text_embedss, dim=0)
if itc:
unimodal_feats_text = F.normalize(self.itc_text_head(text_embedss[-1][:, 0, :]), dim=-1, p=2)
text_embedss = self.cross_modal_text_transform(text_embedss)
if itc:
return text_embedss, extend_text_masks, unimodal_feats_text
else:
return text_embedss, extend_text_masks
def infer_image(
self,
img,
itc=False,
):
image_embedss = self.vit_model(img)
image_embedss = image_embedss[len(image_embedss) - self.hparams.config['num_layers']:]
if itc:
unimodal_feats_image = F.normalize(self.itc_image_head(image_embedss[-1][:, 0, :]), dim=-1, p=2)
image_embedss = self.cross_modal_image_transform(image_embedss)
if itc:
return image_embedss, unimodal_feats_image
else:
return image_embedss
def infer_fusion(
self,
image_embedss,
text_embedss,
extend_text_masks,
image_token_type_idx=1,
irtr_len_image=0,
irtr_len_text=0,
):
if irtr_len_image > 0:
image_masks = torch.ones((irtr_len_image, image_embedss.size(1)), dtype=torch.long, device=self.device)
else:
image_masks = torch.ones((image_embedss[0].size(0), image_embedss[0].size(1)), dtype=torch.long, device=self.device)
extend_image_masks = self.text_transformer.get_extended_attention_mask(image_masks, image_masks.size(), self.device)
text_token_type_embeddings = self.token_type_embeddings(torch.zeros(1).long().to(self.device)).expand_as(text_embedss)
image_token_type_embeddings = self.token_type_embeddings(torch.zeros(1).long().to(self.device).fill_(image_token_type_idx)).expand_as(image_embedss)
text_embedss, image_embedss = (
text_embedss + text_token_type_embeddings,
image_embedss + image_token_type_embeddings,
)
# first layer
if irtr_len_text > 0:
_L, _D = text_embedss.size(1), text_embedss.size(2)
x = self.cross_modal_text_layernorm(text_embedss[0]).unsqueeze(0).expand(irtr_len_text, _L, _D)
else:
x = self.cross_modal_text_layernorm(text_embedss[0])
if irtr_len_image > 0:
_L, _D = image_embedss.size(1), image_embedss.size(2)
y = self.cross_modal_image_layernorm(image_embedss[0]).unsqueeze(0).expand(irtr_len_image, _L, _D)
else:
y = self.cross_modal_image_layernorm(image_embedss[0])
x1 = self.cross_modal_text_layers[0](x, y, extend_text_masks, extend_image_masks)[0]
y1 = self.cross_modal_image_layers[0](y, x, extend_image_masks, extend_text_masks)[0]
# link tower fusion
layer_index = 0
for x, y, text_layer, image_layer in zip(text_embedss[1:], image_embedss[1:], self.cross_modal_text_layers[1:], self.cross_modal_image_layers[1:]):
text_link_tower = self.cross_modal_text_link_tower[layer_index]
image_link_tower = self.cross_modal_image_link_tower[layer_index]
if irtr_len_text > 0:
x1_ = text_link_tower(x.unsqueeze(0).expand(irtr_len_text, _L, _D), x1)
else:
x1_ = text_link_tower(x, x1)
if irtr_len_image > 0:
y1_ = image_link_tower(y.unsqueeze(0).expand(irtr_len_image, _L, _D), y1)
else:
y1_ = image_link_tower(y, y1)
x1 = text_layer(x1_, y1_, extend_text_masks, extend_image_masks)[0]
y1 = image_layer(y1_, x1_, extend_image_masks, extend_text_masks)[0]
layer_index += 1
text_feats, image_feats = x1, y1
cls_feats = self.get_cls_feats(text_feats, image_feats)
ret = {
"text_feats": text_feats,
"image_feats": image_feats,
"cls_feats": cls_feats,
}
return ret
def infer(
self,
batch,
mask_text=False,
mask_image=False,
image_token_type_idx=1,
img=None,
irtr_len_text=0,
):
if img is None:
if f"image_{image_token_type_idx - 1}" in batch:
imgkey = f"image_{image_token_type_idx - 1}"
else:
imgkey = "image"
img = batch[imgkey][0]
do_mlm = "_mlm" if mask_text else ""
text_ids = batch[f"text_ids{do_mlm}"]
text_labels = batch[f"text_labels{do_mlm}"]
text_masks = batch[f"text_masks"]
text_embeds = self.text_transformer.embeddings(input_ids=text_ids)
input_shape = text_masks.size()
extend_text_masks = self.text_transformer.get_extended_attention_mask(text_masks, input_shape, self.device)
split_index = len(self.text_transformer.encoder.layer) - self.hparams.config['num_layers'] + 1
for layer in self.text_transformer.encoder.layer[:split_index]:
text_embeds = layer(text_embeds, extend_text_masks)[0]
if self.is_clip:
image_embeds = self.vit_model.visual.forward_pre(img.type(self.vit_model.dtype))
for block in self.vit_model.visual.transformer.resblocks[:split_index]:
image_embeds = block(image_embeds)
image_embeds_ = self.vit_model.visual.forward_post(image_embeds.type(self.vit_model.dtype))
else:
image_embeds = self.vit_model.forward_pre(img)
for block in self.vit_model.blocks[:split_index]:
image_embeds = block(image_embeds)
image_embeds_ = self.vit_model.forward_post(image_embeds)
if self.hparams.config["num_layers"] == 0:
cls_feats = self.get_cls_feats(text_embeds, image_embeds_)
ret = {
"text_feats": text_embeds,
"image_feats": image_embeds_,
"cls_feats": cls_feats,
"text_labels": text_labels,
"text_ids": text_ids,
"text_masks": text_masks,
}
return ret
# first layer
x = self.cross_modal_text_transform(text_embeds)
text_token_type_embeddings = self.token_type_embeddings(torch.zeros(1).long().to(self.device)).expand_as(x)
x = self.cross_modal_text_layernorm(x + text_token_type_embeddings)
image_embeds_ = self.cross_modal_image_transform(image_embeds_)
image_token_type_embeddings = self.token_type_embeddings(torch.zeros(1).long().to(self.device).fill_(image_token_type_idx)).expand_as(image_embeds_)
image_embeds_ = image_embeds_ + image_token_type_embeddings
y = self.cross_modal_image_layernorm(image_embeds_)
if irtr_len_text > 0:
_bs, _L, _D = image_embeds_.size()
y = y.unsqueeze(1).expand(_bs, irtr_len_text, _L, _D).contiguous().view(-1, _L, _D)
image_masks = torch.ones((y.size(0), y.size(1)), dtype=torch.long, device=self.device)
extend_image_masks = self.text_transformer.get_extended_attention_mask(image_masks, image_masks.size(), self.device)
x1 = self.cross_modal_text_layers[0](x, y, extend_text_masks, extend_image_masks)[0]
y1 = self.cross_modal_image_layers[0](y, x, extend_image_masks, extend_text_masks)[0]
link_layer_index = 0
# link tower fusion
for i in range(split_index, len(self.text_transformer.encoder.layer)):
text_embeds = self.text_transformer.encoder.layer[i](text_embeds, extend_text_masks)[0]
if self.is_clip:
image_embeds = self.vit_model.visual.transformer.resblocks[i](image_embeds).type(self.vit_model.dtype)
image_embeds_ = self.cross_modal_image_transform(self.vit_model.visual.forward_post(image_embeds)) + image_token_type_embeddings
else:
image_embeds = self.vit_model.blocks[i](image_embeds)
image_embeds_ = self.cross_modal_image_transform(self.vit_model.forward_post(image_embeds)) + image_token_type_embeddings
text_link_tower = self.cross_modal_text_link_tower[link_layer_index]
image_link_tower = self.cross_modal_image_link_tower[link_layer_index]
x1_ = text_link_tower(self.cross_modal_text_transform(text_embeds) + text_token_type_embeddings, x1)
if irtr_len_text > 0:
y1_ = image_link_tower(image_embeds_.unsqueeze(1).expand(_bs, irtr_len_text, _L, _D).contiguous().view(-1, _L, _D), y1)
else:
y1_ = image_link_tower(image_embeds_, y1)
x1 = self.cross_modal_text_layers[link_layer_index + 1](x1_, y1_, extend_text_masks, extend_image_masks)[0]
y1 = self.cross_modal_image_layers[link_layer_index + 1](y1_, x1_, extend_image_masks, extend_text_masks)[0]
link_layer_index += 1
text_feats, image_feats = x1, y1
cls_feats = self.get_cls_feats(text_feats, image_feats)
ret = {
"text_feats": text_feats,
"image_feats": image_feats,
"cls_feats": cls_feats,
"text_labels": text_labels,
"text_ids": text_ids,
"text_masks": text_masks,
}
return ret
def forward(self, batch, split):
ret = dict()
if len(self.current_tasks) == 0:
ret.update(self.infer(batch))
return ret
# Masked Language Modeling
if "mlm" in self.current_tasks:
ret.update(objectives.compute_mlm(self, batch, split))
# Image Text Matching
if "itm" in self.current_tasks:
ret.update(objectives.compute_itm(self, batch, split))
if "itc" in self.current_tasks:
ret.update(objectives.compute_itc(self, batch, split))
if "itm_itc" in self.current_tasks:
ret.update(objectives.compute_itm_itc(self, batch, split, pretrain=True))
if "irtr_itm_itc" in self.current_tasks:
ret.update(objectives.compute_itm_itc(self, batch, split, pretrain=False))
# Visual Question Answering
if "vqa" in self.current_tasks:
ret.update(objectives.compute_vqa(self, batch, split))
# Natural Language for Visual Reasoning 2
if "nlvr2" in self.current_tasks:
ret.update(objectives.compute_nlvr2(self, batch, split))
# SNLI Visual Entailment
if "snli" in self.current_tasks:
ret.update(objectives.compute_snli(self, batch, split))
# Image Retrieval and Text Retrieval
if "irtr" in self.current_tasks:
ret.update(objectives.compute_irtr(self, batch, split))
return ret
def training_step(self, batch, batch_idx):
meter_utils.set_task(self)
output = self(batch, 'train')
total_loss = sum([v for k, v in output.items() if "loss" in k])
return total_loss
def training_epoch_end(self, outs):
meter_utils.epoch_wrapup(self, 'train')
def validation_step(self, batch, batch_idx):
meter_utils.set_task(self)
output = self(batch, 'val')
def validation_epoch_end(self, outs):
if self.jump_val_first_for_irtr_itm_irc and "irtr_itm_itc" in self.hparams.config["group_name"]:
old_get_recall_metric = self.hparams.config["get_recall_metric"]
self.hparams.config["get_recall_metric"] = False
meter_utils.epoch_wrapup(self, 'val')
self.hparams.config["get_recall_metric"] = old_get_recall_metric
self.jump_val_first_for_irtr_itm_irc = False
else:
meter_utils.epoch_wrapup(self, 'val')
def test_step(self, batch, batch_idx):
meter_utils.set_task(self)
output = self(batch, 'test')
ret = dict()
if self.hparams.config["loss_names"]["vqa"] > 0:
ret.update(objectives.vqa_test_step(self, batch, output))
return ret
def test_epoch_end(self, outs):
model_name = self.hparams.config["load_path"].split("/")[-2]
checkpoint_name = self.hparams.config["load_path"].split("/")[-1][:-5]
if self.hparams.config["loss_names"]["vqa"] > 0:
objectives.vqa_test_wrapup(outs, f"{model_name}_{checkpoint_name}", self.hparams.config["log_dir"])
meter_utils.epoch_wrapup(self, 'test')
def configure_optimizers(self):
# Optimizer: AdamW; Scheduler: linear_schedule_with_warmup
# Parameters for cross-modal and each task head will be multiply by lr_mult_cross_modal or lr_mult_head
# New task heads need to enroll here.
return meter_utils.set_schedule(self)
|
BridgeTower/src/modules/bt_module.py/0
|
{
"file_path": "BridgeTower/src/modules/bt_module.py",
"repo_id": "BridgeTower",
"token_count": 14215
}
| 159 |
import re
contractions = {
"aint": "ain't",
"arent": "aren't",
"cant": "can't",
"couldve": "could've",
"couldnt": "couldn't",
"couldn'tve": "couldn't've",
"couldnt've": "couldn't've",
"didnt": "didn't",
"doesnt": "doesn't",
"dont": "don't",
"hadnt": "hadn't",
"hadnt've": "hadn't've",
"hadn'tve": "hadn't've",
"hasnt": "hasn't",
"havent": "haven't",
"hed": "he'd",
"hed've": "he'd've",
"he'dve": "he'd've",
"hes": "he's",
"howd": "how'd",
"howll": "how'll",
"hows": "how's",
"Id've": "I'd've",
"I'dve": "I'd've",
"Im": "I'm",
"Ive": "I've",
"isnt": "isn't",
"itd": "it'd",
"itd've": "it'd've",
"it'dve": "it'd've",
"itll": "it'll",
"let's": "let's",
"maam": "ma'am",
"mightnt": "mightn't",
"mightnt've": "mightn't've",
"mightn'tve": "mightn't've",
"mightve": "might've",
"mustnt": "mustn't",
"mustve": "must've",
"neednt": "needn't",
"notve": "not've",
"oclock": "o'clock",
"oughtnt": "oughtn't",
"ow's'at": "'ow's'at",
"'ows'at": "'ow's'at",
"'ow'sat": "'ow's'at",
"shant": "shan't",
"shed've": "she'd've",
"she'dve": "she'd've",
"she's": "she's",
"shouldve": "should've",
"shouldnt": "shouldn't",
"shouldnt've": "shouldn't've",
"shouldn'tve": "shouldn't've",
"somebody'd": "somebodyd",
"somebodyd've": "somebody'd've",
"somebody'dve": "somebody'd've",
"somebodyll": "somebody'll",
"somebodys": "somebody's",
"someoned": "someone'd",
"someoned've": "someone'd've",
"someone'dve": "someone'd've",
"someonell": "someone'll",
"someones": "someone's",
"somethingd": "something'd",
"somethingd've": "something'd've",
"something'dve": "something'd've",
"somethingll": "something'll",
"thats": "that's",
"thered": "there'd",
"thered've": "there'd've",
"there'dve": "there'd've",
"therere": "there're",
"theres": "there's",
"theyd": "they'd",
"theyd've": "they'd've",
"they'dve": "they'd've",
"theyll": "they'll",
"theyre": "they're",
"theyve": "they've",
"twas": "'twas",
"wasnt": "wasn't",
"wed've": "we'd've",
"we'dve": "we'd've",
"weve": "we've",
"werent": "weren't",
"whatll": "what'll",
"whatre": "what're",
"whats": "what's",
"whatve": "what've",
"whens": "when's",
"whered": "where'd",
"wheres": "where's",
"whereve": "where've",
"whod": "who'd",
"whod've": "who'd've",
"who'dve": "who'd've",
"wholl": "who'll",
"whos": "who's",
"whove": "who've",
"whyll": "why'll",
"whyre": "why're",
"whys": "why's",
"wont": "won't",
"wouldve": "would've",
"wouldnt": "wouldn't",
"wouldnt've": "wouldn't've",
"wouldn'tve": "wouldn't've",
"yall": "y'all",
"yall'll": "y'all'll",
"y'allll": "y'all'll",
"yall'd've": "y'all'd've",
"y'alld've": "y'all'd've",
"y'all'dve": "y'all'd've",
"youd": "you'd",
"youd've": "you'd've",
"you'dve": "you'd've",
"youll": "you'll",
"youre": "you're",
"youve": "you've",
}
manual_map = {
"none": "0",
"zero": "0",
"one": "1",
"two": "2",
"three": "3",
"four": "4",
"five": "5",
"six": "6",
"seven": "7",
"eight": "8",
"nine": "9",
"ten": "10",
}
articles = ["a", "an", "the"]
period_strip = re.compile("(?!<=\d)(\.)(?!\d)")
comma_strip = re.compile("(\d)(\,)(\d)")
punct = [
";",
r"/",
"[",
"]",
'"',
"{",
"}",
"(",
")",
"=",
"+",
"\\",
"_",
"-",
">",
"<",
"@",
"`",
",",
"?",
"!",
]
def normalize_word(token):
_token = token
for p in punct:
if (p + " " in token or " " + p in token) or (
re.search(comma_strip, token) != None
):
_token = _token.replace(p, "")
else:
_token = _token.replace(p, " ")
token = period_strip.sub("", _token, re.UNICODE)
_token = []
temp = token.lower().split()
for word in temp:
word = manual_map.setdefault(word, word)
if word not in articles:
_token.append(word)
for i, word in enumerate(_token):
if word in contractions:
_token[i] = contractions[word]
token = " ".join(_token)
token = token.replace(",", "")
return token
|
BridgeTower/src/utils/glossary.py/0
|
{
"file_path": "BridgeTower/src/utils/glossary.py",
"repo_id": "BridgeTower",
"token_count": 2254
}
| 160 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch.utils.data as data
from PIL import Image
import os
IMG_EXTENSIONS = [
".jpg",
".JPG",
".jpeg",
".JPEG",
".png",
".PNG",
".ppm",
".PPM",
".bmp",
".BMP",
".tiff",
".webp",
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def make_dataset_rec(dir, images):
assert os.path.isdir(dir), "%s is not a valid directory" % dir
for root, dnames, fnames in sorted(os.walk(dir, followlinks=True)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
def make_dataset(dir, recursive=False, read_cache=False, write_cache=False):
images = []
if read_cache:
possible_filelist = os.path.join(dir, "files.list")
if os.path.isfile(possible_filelist):
with open(possible_filelist, "r") as f:
images = f.read().splitlines()
return images
if recursive:
make_dataset_rec(dir, images)
else:
assert os.path.isdir(dir) or os.path.islink(dir), "%s is not a valid directory" % dir
for root, dnames, fnames in sorted(os.walk(dir)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
if write_cache:
filelist_cache = os.path.join(dir, "files.list")
with open(filelist_cache, "w") as f:
for path in images:
f.write("%s\n" % path)
print("wrote filelist cache at %s" % filelist_cache)
return images
def default_loader(path):
return Image.open(path).convert("RGB")
class ImageFolder(data.Dataset):
def __init__(self, root, transform=None, return_paths=False, loader=default_loader):
imgs = make_dataset(root)
if len(imgs) == 0:
raise (
RuntimeError(
"Found 0 images in: " + root + "\n"
"Supported image extensions are: " + ",".join(IMG_EXTENSIONS)
)
)
self.root = root
self.imgs = imgs
self.transform = transform
self.return_paths = return_paths
self.loader = loader
def __getitem__(self, index):
path = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.return_paths:
return img, path
else:
return img
def __len__(self):
return len(self.imgs)
|
Bringing-Old-Photos-Back-to-Life/Face_Enhancement/data/image_folder.py/0
|
{
"file_path": "Bringing-Old-Photos-Back-to-Life/Face_Enhancement/data/image_folder.py",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 1309
}
| 161 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import time
import numpy as np
# Helper class that keeps track of training iterations
class IterationCounter:
def __init__(self, opt, dataset_size):
self.opt = opt
self.dataset_size = dataset_size
self.first_epoch = 1
self.total_epochs = opt.niter + opt.niter_decay
self.epoch_iter = 0 # iter number within each epoch
self.iter_record_path = os.path.join(self.opt.checkpoints_dir, self.opt.name, "iter.txt")
if opt.isTrain and opt.continue_train:
try:
self.first_epoch, self.epoch_iter = np.loadtxt(
self.iter_record_path, delimiter=",", dtype=int
)
print("Resuming from epoch %d at iteration %d" % (self.first_epoch, self.epoch_iter))
except:
print(
"Could not load iteration record at %s. Starting from beginning." % self.iter_record_path
)
self.total_steps_so_far = (self.first_epoch - 1) * dataset_size + self.epoch_iter
# return the iterator of epochs for the training
def training_epochs(self):
return range(self.first_epoch, self.total_epochs + 1)
def record_epoch_start(self, epoch):
self.epoch_start_time = time.time()
self.epoch_iter = 0
self.last_iter_time = time.time()
self.current_epoch = epoch
def record_one_iteration(self):
current_time = time.time()
# the last remaining batch is dropped (see data/__init__.py),
# so we can assume batch size is always opt.batchSize
self.time_per_iter = (current_time - self.last_iter_time) / self.opt.batchSize
self.last_iter_time = current_time
self.total_steps_so_far += self.opt.batchSize
self.epoch_iter += self.opt.batchSize
def record_epoch_end(self):
current_time = time.time()
self.time_per_epoch = current_time - self.epoch_start_time
print(
"End of epoch %d / %d \t Time Taken: %d sec"
% (self.current_epoch, self.total_epochs, self.time_per_epoch)
)
if self.current_epoch % self.opt.save_epoch_freq == 0:
np.savetxt(self.iter_record_path, (self.current_epoch + 1, 0), delimiter=",", fmt="%d")
print("Saved current iteration count at %s." % self.iter_record_path)
def record_current_iter(self):
np.savetxt(self.iter_record_path, (self.current_epoch, self.epoch_iter), delimiter=",", fmt="%d")
print("Saved current iteration count at %s." % self.iter_record_path)
def needs_saving(self):
return (self.total_steps_so_far % self.opt.save_latest_freq) < self.opt.batchSize
def needs_printing(self):
return (self.total_steps_so_far % self.opt.print_freq) < self.opt.batchSize
def needs_displaying(self):
return (self.total_steps_so_far % self.opt.display_freq) < self.opt.batchSize
|
Bringing-Old-Photos-Back-to-Life/Face_Enhancement/util/iter_counter.py/0
|
{
"file_path": "Bringing-Old-Photos-Back-to-Life/Face_Enhancement/util/iter_counter.py",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 1311
}
| 162 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from detection_models.sync_batchnorm import DataParallelWithCallback
from detection_models.antialiasing import Downsample
class UNet(nn.Module):
def __init__(
self,
in_channels=3,
out_channels=3,
depth=5,
conv_num=2,
wf=6,
padding=True,
batch_norm=True,
up_mode="upsample",
with_tanh=False,
sync_bn=True,
antialiasing=True,
):
"""
Implementation of
U-Net: Convolutional Networks for Biomedical Image Segmentation
(Ronneberger et al., 2015)
https://arxiv.org/abs/1505.04597
Using the default arguments will yield the exact version used
in the original paper
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
depth (int): depth of the network
wf (int): number of filters in the first layer is 2**wf
padding (bool): if True, apply padding such that the input shape
is the same as the output.
This may introduce artifacts
batch_norm (bool): Use BatchNorm after layers with an
activation function
up_mode (str): one of 'upconv' or 'upsample'.
'upconv' will use transposed convolutions for
learned upsampling.
'upsample' will use bilinear upsampling.
"""
super().__init__()
assert up_mode in ("upconv", "upsample")
self.padding = padding
self.depth = depth - 1
prev_channels = in_channels
self.first = nn.Sequential(
*[nn.ReflectionPad2d(3), nn.Conv2d(in_channels, 2 ** wf, kernel_size=7), nn.LeakyReLU(0.2, True)]
)
prev_channels = 2 ** wf
self.down_path = nn.ModuleList()
self.down_sample = nn.ModuleList()
for i in range(depth):
if antialiasing and depth > 0:
self.down_sample.append(
nn.Sequential(
*[
nn.ReflectionPad2d(1),
nn.Conv2d(prev_channels, prev_channels, kernel_size=3, stride=1, padding=0),
nn.BatchNorm2d(prev_channels),
nn.LeakyReLU(0.2, True),
Downsample(channels=prev_channels, stride=2),
]
)
)
else:
self.down_sample.append(
nn.Sequential(
*[
nn.ReflectionPad2d(1),
nn.Conv2d(prev_channels, prev_channels, kernel_size=4, stride=2, padding=0),
nn.BatchNorm2d(prev_channels),
nn.LeakyReLU(0.2, True),
]
)
)
self.down_path.append(
UNetConvBlock(conv_num, prev_channels, 2 ** (wf + i + 1), padding, batch_norm)
)
prev_channels = 2 ** (wf + i + 1)
self.up_path = nn.ModuleList()
for i in reversed(range(depth)):
self.up_path.append(
UNetUpBlock(conv_num, prev_channels, 2 ** (wf + i), up_mode, padding, batch_norm)
)
prev_channels = 2 ** (wf + i)
if with_tanh:
self.last = nn.Sequential(
*[nn.ReflectionPad2d(1), nn.Conv2d(prev_channels, out_channels, kernel_size=3), nn.Tanh()]
)
else:
self.last = nn.Sequential(
*[nn.ReflectionPad2d(1), nn.Conv2d(prev_channels, out_channels, kernel_size=3)]
)
if sync_bn:
self = DataParallelWithCallback(self)
def forward(self, x):
x = self.first(x)
blocks = []
for i, down_block in enumerate(self.down_path):
blocks.append(x)
x = self.down_sample[i](x)
x = down_block(x)
for i, up in enumerate(self.up_path):
x = up(x, blocks[-i - 1])
return self.last(x)
class UNetConvBlock(nn.Module):
def __init__(self, conv_num, in_size, out_size, padding, batch_norm):
super(UNetConvBlock, self).__init__()
block = []
for _ in range(conv_num):
block.append(nn.ReflectionPad2d(padding=int(padding)))
block.append(nn.Conv2d(in_size, out_size, kernel_size=3, padding=0))
if batch_norm:
block.append(nn.BatchNorm2d(out_size))
block.append(nn.LeakyReLU(0.2, True))
in_size = out_size
self.block = nn.Sequential(*block)
def forward(self, x):
out = self.block(x)
return out
class UNetUpBlock(nn.Module):
def __init__(self, conv_num, in_size, out_size, up_mode, padding, batch_norm):
super(UNetUpBlock, self).__init__()
if up_mode == "upconv":
self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2)
elif up_mode == "upsample":
self.up = nn.Sequential(
nn.Upsample(mode="bilinear", scale_factor=2, align_corners=False),
nn.ReflectionPad2d(1),
nn.Conv2d(in_size, out_size, kernel_size=3, padding=0),
)
self.conv_block = UNetConvBlock(conv_num, in_size, out_size, padding, batch_norm)
def center_crop(self, layer, target_size):
_, _, layer_height, layer_width = layer.size()
diff_y = (layer_height - target_size[0]) // 2
diff_x = (layer_width - target_size[1]) // 2
return layer[:, :, diff_y : (diff_y + target_size[0]), diff_x : (diff_x + target_size[1])]
def forward(self, x, bridge):
up = self.up(x)
crop1 = self.center_crop(bridge, up.shape[2:])
out = torch.cat([up, crop1], 1)
out = self.conv_block(out)
return out
class UnetGenerator(nn.Module):
"""Create a Unet-based generator"""
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_type="BN", use_dropout=False):
"""Construct a Unet generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
image of size 128x128 will become of size 1x1 # at the bottleneck
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
We construct the U-Net from the innermost layer to the outermost layer.
It is a recursive process.
"""
super().__init__()
if norm_type == "BN":
norm_layer = nn.BatchNorm2d
elif norm_type == "IN":
norm_layer = nn.InstanceNorm2d
else:
raise NameError("Unknown norm layer")
# construct unet structure
unet_block = UnetSkipConnectionBlock(
ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True
) # add the innermost layer
for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
unet_block = UnetSkipConnectionBlock(
ngf * 8,
ngf * 8,
input_nc=None,
submodule=unet_block,
norm_layer=norm_layer,
use_dropout=use_dropout,
)
# gradually reduce the number of filters from ngf * 8 to ngf
unet_block = UnetSkipConnectionBlock(
ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer
)
unet_block = UnetSkipConnectionBlock(
ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer
)
unet_block = UnetSkipConnectionBlock(
ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer
)
self.model = UnetSkipConnectionBlock(
output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer
) # add the outermost layer
def forward(self, input):
return self.model(input)
class UnetSkipConnectionBlock(nn.Module):
"""Defines the Unet submodule with skip connection.
-------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(
self,
outer_nc,
inner_nc,
input_nc=None,
submodule=None,
outermost=False,
innermost=False,
norm_layer=nn.BatchNorm2d,
use_dropout=False,
):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
input_nc (int) -- the number of channels in input images/features
submodule (UnetSkipConnectionBlock) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
user_dropout (bool) -- if use dropout layers.
"""
super().__init__()
self.outermost = outermost
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.LeakyReLU(0.2, True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(
inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias
)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else: # add skip connections
return torch.cat([x, self.model(x)], 1)
# ============================================
# Network testing
# ============================================
if __name__ == "__main__":
from torchsummary import summary
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = UNet_two_decoders(
in_channels=3,
out_channels1=3,
out_channels2=1,
depth=4,
conv_num=1,
wf=6,
padding=True,
batch_norm=True,
up_mode="upsample",
with_tanh=False,
)
model.to(device)
model_pix2pix = UnetGenerator(3, 3, 5, ngf=64, norm_type="BN", use_dropout=False)
model_pix2pix.to(device)
print("customized unet:")
summary(model, (3, 256, 256))
print("cyclegan unet:")
summary(model_pix2pix, (3, 256, 256))
x = torch.zeros(1, 3, 256, 256).requires_grad_(True).cuda()
g = make_dot(model(x))
g.render("models/Digraph.gv", view=False)
|
Bringing-Old-Photos-Back-to-Life/Global/detection_models/networks.py/0
|
{
"file_path": "Bringing-Old-Photos-Back-to-Life/Global/detection_models/networks.py",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 5734
}
| 163 |
# TEXT ENCODER CONFIG
text_model: 'gpt2'
text_len: 77
transformer_embed_dim: 768
freeze_text_encoder_weights: True
# AUDIO ENCODER CONFIG
audioenc_name: 'HTSAT'
out_emb: 768
sampling_rate: 44100
duration: 7
fmin: 50
fmax: 8000 #14000
n_fft: 1024 # 1028
hop_size: 320
mel_bins: 64
window_size: 1024
# PROJECTION SPACE CONFIG
d_proj: 1024
temperature: 0.003
# TRAINING AND EVALUATION CONFIG
num_classes: 527
batch_size: 1024
demo: False
|
CLAP/msclap/configs/config_2023.yml/0
|
{
"file_path": "CLAP/msclap/configs/config_2023.yml",
"repo_id": "CLAP",
"token_count": 180
}
| 164 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# fairseq documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 17 21:45:30 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
from fairseq import __version__
# source code directory, relative to this file, for sphinx-autobuild
sys.path.insert(0, os.path.abspath(".."))
source_suffix = [".rst"]
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
"sphinxarg.ext",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "fairseq"
copyright = "Facebook AI Research (FAIR)"
author = "Facebook AI Research (FAIR)"
github_doc_root = "https://github.com/pytorch/fairseq/tree/master/docs/"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
highlight_language = "python"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_context = {
"css_files": [
"_static/theme_overrides.css", # override wide tables in RTD theme
],
}
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
# html_sidebars = {
# '**': [
# 'about.html',
# 'navigation.html',
# 'relations.html', # needs 'show_related': True theme option to display
# 'searchbox.html',
# 'donate.html',
# ]
# }
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"numpy": ("http://docs.scipy.org/doc/numpy/", None),
"python": ("https://docs.python.org/", None),
"torch": ("https://pytorch.org/docs/master/", None),
}
|
COCO-LM/fairseq/docs/conf.py/0
|
{
"file_path": "COCO-LM/fairseq/docs/conf.py",
"repo_id": "COCO-LM",
"token_count": 1307
}
| 165 |
.. role:: hidden
:class: hidden-section
.. module:: fairseq.tasks
.. _Tasks:
Tasks
=====
Tasks store dictionaries and provide helpers for loading/iterating over
Datasets, initializing the Model/Criterion and calculating the loss.
Tasks can be selected via the ``--task`` command-line argument. Once selected, a
task may expose additional command-line arguments for further configuration.
Example usage::
# setup the task (e.g., load dictionaries)
task = fairseq.tasks.setup_task(args)
# build model and criterion
model = task.build_model(args)
criterion = task.build_criterion(args)
# load datasets
task.load_dataset('train')
task.load_dataset('valid')
# iterate over mini-batches of data
batch_itr = task.get_batch_iterator(
task.dataset('train'), max_tokens=4096,
)
for batch in batch_itr:
# compute the loss
loss, sample_size, logging_output = task.get_loss(
model, criterion, batch,
)
loss.backward()
Translation
-----------
.. autoclass:: fairseq.tasks.translation.TranslationTask
.. _language modeling:
Language Modeling
-----------------
.. autoclass:: fairseq.tasks.language_modeling.LanguageModelingTask
Adding new tasks
----------------
.. autofunction:: fairseq.tasks.register_task
.. autoclass:: fairseq.tasks.FairseqTask
:members:
:undoc-members:
|
COCO-LM/fairseq/docs/tasks.rst/0
|
{
"file_path": "COCO-LM/fairseq/docs/tasks.rst",
"repo_id": "COCO-LM",
"token_count": 483
}
| 166 |
# Cross-lingual Retrieval for Iterative Self-Supervised Training
https://arxiv.org/pdf/2006.09526.pdf
## Introduction
CRISS is a multilingual sequence-to-sequnce pretraining method where mining and training processes are applied iteratively, improving cross-lingual alignment and translation ability at the same time.
## Requirements:
* faiss: https://github.com/facebookresearch/faiss
* mosesdecoder: https://github.com/moses-smt/mosesdecoder
* flores: https://github.com/facebookresearch/flores
* LASER: https://github.com/facebookresearch/LASER
## Unsupervised Machine Translation
##### 1. Download and decompress CRISS checkpoints
```
cd examples/criss
wget https://dl.fbaipublicfiles.com/criss/criss_3rd_checkpoints.tar.gz
tar -xf criss_checkpoints.tar.gz
```
##### 2. Download and preprocess Flores test dataset
Make sure to run all scripts from examples/criss directory
```
bash download_and_preprocess_flores_test.sh
```
##### 3. Run Evaluation on Sinhala-English
```
bash unsupervised_mt/eval.sh
```
## Sentence Retrieval
##### 1. Download and preprocess Tatoeba dataset
```
bash download_and_preprocess_tatoeba.sh
```
##### 2. Run Sentence Retrieval on Tatoeba Kazakh-English
```
bash sentence_retrieval/sentence_retrieval_tatoeba.sh
```
## Mining
##### 1. Install faiss
Follow instructions on https://github.com/facebookresearch/faiss/blob/master/INSTALL.md
##### 2. Mine pseudo-parallel data between Kazakh and English
```
bash mining/mine_example.sh
```
## Citation
```bibtex
@article{tran2020cross,
title={Cross-lingual retrieval for iterative self-supervised training},
author={Tran, Chau and Tang, Yuqing and Li, Xian and Gu, Jiatao},
journal={arXiv preprint arXiv:2006.09526},
year={2020}
}
```
|
COCO-LM/fairseq/examples/criss/README.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/criss/README.md",
"repo_id": "COCO-LM",
"token_count": 561
}
| 167 |
# GottBERT: a pure German language model
## Introduction
[GottBERT](http://arxiv.org/abs/2012.02110) is a pretrained language model trained on 145GB of German text based on RoBERTa.
## Example usage
### fairseq
##### Load GottBERT from torch.hub (PyTorch >= 1.1):
```python
import torch
gottbert = torch.hub.load('pytorch/fairseq', 'gottbert-base')
gottbert.eval() # disable dropout (or leave in train mode to finetune)
```
##### Load GottBERT (for PyTorch 1.0 or custom models):
```python
# Download gottbert model
wget https://dl.gottbert.de/fairseq/models/gottbert-base.tar.gz
tar -xzvf gottbert.tar.gz
# Load the model in fairseq
from fairseq.models.roberta import GottbertModel
gottbert = GottbertModel.from_pretrained('/path/to/gottbert')
gottbert.eval() # disable dropout (or leave in train mode to finetune)
```
##### Filling masks:
```python
masked_line = 'Gott ist <mask> ! :)'
gottbert.fill_mask(masked_line, topk=3)
# [('Gott ist gut ! :)', 0.3642110526561737, ' gut'),
# ('Gott ist überall ! :)', 0.06009674072265625, ' überall'),
# ('Gott ist großartig ! :)', 0.0370681993663311, ' großartig')]
```
##### Extract features from GottBERT
```python
# Extract the last layer's features
line = "Der erste Schluck aus dem Becher der Naturwissenschaft macht atheistisch , aber auf dem Grunde des Bechers wartet Gott !"
tokens = gottbert.encode(line)
last_layer_features = gottbert.extract_features(tokens)
assert last_layer_features.size() == torch.Size([1, 27, 768])
# Extract all layer's features (layer 0 is the embedding layer)
all_layers = gottbert.extract_features(tokens, return_all_hiddens=True)
assert len(all_layers) == 13
assert torch.all(all_layers[-1] == last_layer_features)
```
## Citation
If you use our work, please cite:
```bibtex
@misc{scheible2020gottbert,
title={GottBERT: a pure German Language Model},
author={Raphael Scheible and Fabian Thomczyk and Patric Tippmann and Victor Jaravine and Martin Boeker},
year={2020},
eprint={2012.02110},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
```
|
COCO-LM/fairseq/examples/gottbert/README.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/gottbert/README.md",
"repo_id": "COCO-LM",
"token_count": 785
}
| 168 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
from torch.nn.modules.loss import _Loss
class LatentLayersKLLoss(_Loss):
def __init__(self, args):
super().__init__()
self.args = args
def forward(self, layer_samples, lang_idx, update_num, sample_size):
prior = self.args.prior
samples = layer_samples[lang_idx]
eps = 1e-7
if prior == "uniform":
# uniform prior
kl_loss = (samples * (torch.log(samples + eps) - math.log(0.5))).sum(-1)
elif prior == "agged_posterior":
# aggregated posterior
y_t = torch.stack([x.detach() for x in layer_samples], dim=0)
agged_q = torch.sum(y_t, dim=0)
row_norm = agged_q.sum(-1)
normed_agg_q = agged_q / row_norm
kl_loss = (
samples * (torch.log(samples + eps) - torch.log(normed_agg_q + eps))
).sum(-1)
else:
raise NotImplementedError("The specified prior is not implemented.")
# normalized by number of layers
kl_loss /= layer_samples[0].size()[0]
kl_weight = min(
self.args.sparsity_weight,
(update_num - self.args.soft_update)
* self.args.sparsity_weight
/ self.args.anneal_updates,
)
kl_loss *= kl_weight * sample_size
return kl_loss
class LatentLayersSparsityLoss(_Loss):
def __init__(self, args):
super().__init__()
self.args = args
def is_valid(self, update_num):
if self.args.target_layers <= 0:
return False
return update_num > (self.args.soft_update + self.args.anneal_updates)
def forward(self, layer_samples_list, update_num, sample_size):
batch_loss = 0
share_loss = 0
global_sparsity_loss = 0
layer_samples = torch.stack(layer_samples_list, dim=0)
if (
self.args.target_layers > 0 or self.args.share_weight > 0
) and update_num > (self.args.soft_update + self.args.anneal_updates):
# anneal sparsity weight
if update_num < (self.args.anneal_updates + self.args.soft_update):
weight_anneal = 0
elif update_num < (2 * self.args.anneal_updates + self.args.soft_update):
weight_anneal = (
(update_num - self.args.soft_update - self.args.anneal_updates)
* self.args.share_weight
/ self.args.anneal_updates
)
else:
weight_anneal = 1
# compute ratio among languages
layer_utilization = torch.sum(layer_samples, dim=0)
layer_utilization /= layer_samples.size()[0]
if self.args.share_weight > 0:
# encouraging sharing across languages
share_loss = sum(
-1.0 * v * math.log(v) for v in layer_utilization if v > 0
)
batch_loss += (
weight_anneal * self.args.share_weight * sample_size * share_loss
)
if self.args.target_layers > 0:
# computed expected number of layers selected
expeted_layers = sum(layer_utilization)
# compute l2 loss wrt target number of layers
global_sparsity_loss = (expeted_layers - self.args.target_layers) ** 2
batch_loss += (
weight_anneal
* self.args.share_weight
* sample_size
* global_sparsity_loss
)
return batch_loss
|
COCO-LM/fairseq/examples/latent_depth/latent_depth_src/loss/latent_depth.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/latent_depth/latent_depth_src/loss/latent_depth.py",
"repo_id": "COCO-LM",
"token_count": 1908
}
| 169 |
# Beyond English-Centric Multilingual Machine Translation
## Introduction
In this work, we create a true Many-to-Many multilingual translation model that can translate directly between any pair of 100 languages. Our focus on non-English-Centric models brings gains of more than 10 BLEU when directly translating between non-English directions while performing competitively with the best single systems of WMT.
If you are new to using fairseq, read the following walkthrough. Otherwise, skip to the sections below.
0. **Generation Data**
To download the generation data, follow the below commands. Note that all datasets need to be detokenized *before* applying SPM in the data preprocessing step. If you use these evaluation datasets, please cite their associated papers.
```bash
# WMT - use sacrebleu, example here:
sacrebleu -t wmt14 -l fr-en --echo src > wmt.test.fr-en.fr
sacrebleu -t wmt14 -l fr-en --echo ref > wmt.test.fr-en.en
# WAT
wget http://lotus.kuee.kyoto-u.ac.jp/WAT/my-en-data/wat2020.my-en.zip
unzip wat2020.my-en.zip
# FLORES
# download from: https://github.com/facebookresearch/flores
# TED - need to detokenize with Moses!
# from: https://github.com/neulab/word-embeddings-for-nmt
wget http://phontron.com/data/ted_talks.tar.gz
# Autshumato
# request to download: https://repo.sadilar.org/handle/20.500.12185/397
# Tatoeba Challenge
# available here: https://github.com/Helsinki-NLP/Tatoeba-Challenge
```
1. **Training Data**
To produce the training data, we use a combination of [CCMatrix](https://arxiv.org/abs/1911.04944) and [CCAligned](https://arxiv.org/abs/1911.06154). Check out the instructions [here](https://github.com/facebookresearch/LASER/tree/master/tasks/CCMatrix) to download the raw data.
2. **Preprocess Data**
After downloading raw data, you will need to postprocess the data, then apply SPM, then binarize. Note that it is very important you run the postprocessing script, because this removes any instance of the evaluation data in the mined training data.
```bash
# preprocess data
# remove sentences with more than 50% punctuation
python /path/to/fairseq/examples/m2m_100/process_data/remove_too_much_punc.py
# deduplicate training data
paste /path/to/datadir/train.$src /path/to/datadir/train.$tgt | awk '!x[$0]++' > /path/to/datadir/train.dedup
echo "keeping $(wc -l /path/to/datadir/train.dedup) bitext out of $(wc -l /path/to/datadir/train.$src)"
cut -f1 /path/to/datadir/train.dedup > /path/to/datadir/train.$src
cut -f2 /path/to/datadir/train.dedup > /path/to/datadir/train.$tgt
# remove all instances of evaluation data from the training data
python /path/to/fairseq/examples/m2m_100/process_data/dedup_data.py
# frequency cleaning
wget https://dl.fbaipublicfiles.com/m2m_100/histograms.tar.gz
tar -xvzf histograms.tar.gz
python /path/to/fairseq/examples/m2m_100/process_data/clean_histogram.py --src $src --tgt $tgt --src-file /path/to/source/file --tgt-file /path/to/output/file --src-output-file source_output.$src --tgt-output-file target_output.$tgt --histograms /path/to/histograms
# apply SPM
wget https://dl.fbaipublicfiles.com/m2m_100/spm.128k.model
python /path/to/fairseq/scripts/spm_encode.py \
--model spm.128k.model \
--output_format=piece \
--inputs=/path/to/input/file/here \
--outputs=/path/to/output/file/here
# length ratio cleaning
perl mosesdecoder/scripts/training/clean-corpus-n.perl --ratio 3 /path/to/training/data/train.spm.$src-$tgt $src $tgt /path/to/output/directory/train.spm.$src-$tgt 1 250
# binarize data
wget https://dl.fbaipublicfiles.com/m2m_100/data_dict.128k.txt
fairseq-preprocess \
--source-lang $src --target-lang $tgt \
--testpref spm.$src.$tgt \
--thresholdsrc 0 --thresholdtgt 0 \
--destdir data_bin \
--srcdict data_dict.128k.txt --tgtdict data_dict.128k.txt
```
3. **Training Scripts**
To reproduce the training of our models, we train with fairseq-py's multilingual translation [task](https://github.com/pytorch/fairseq/tree/master/examples/multilingual). If you are interested in model parallel training, also check out [fairscale](https://github.com/facebookresearch/fairscale).
4. **Generation**
To generate from our models, follow the the commands in the generation section below.
If you use any of the resources listed here, please cite:
```bibtex
@article{fan2020beyond,
title={Beyond English-Centric Multilingual Machine Translation},
author={Fan, Angela and Bhosale, Shruti and Schwenk, Holger and Ma, Zhiyi and El-Kishky, Ahmed and Goyal, Siddharth and Baines, Mandeep and Celebi, Onur and Wenzek, Guillaume and Chaudhary, Vishrav and Goyal, Naman and Birch, Tom and Liptchinsky, Vitaliy and Edunov, Sergey and Grave, Edouard and Auli, Michael and Joulin, Armand},
journal={arXiv preprint},
year={2020}
}
@article{schwenk2019ccmatrix,
title={Ccmatrix: Mining billions of high-quality parallel sentences on the web},
author={Schwenk, Holger and Wenzek, Guillaume and Edunov, Sergey and Grave, Edouard and Joulin, Armand},
journal={arXiv preprint arXiv:1911.04944},
year={2019}
}
@article{el2019massive,
title={A Massive Collection of Cross-Lingual Web-Document Pairs},
author={El-Kishky, Ahmed and Chaudhary, Vishrav and Guzman, Francisco and Koehn, Philipp},
journal={arXiv preprint arXiv:1911.06154},
year={2019}
}
```
## Trained Models
### 418M and 1.2B Model
We include the last checkpoint for both of these models.
```bash
wget https://dl.fbaipublicfiles.com/m2m_100/model_dict.128k.txt
wget https://dl.fbaipublicfiles.com/m2m_100/language_pairs_small_models.txt
# 418M parameter model
wget https://dl.fbaipublicfiles.com/m2m_100/418M_last_checkpoint.pt
# 1.2B parameter model
wget https://dl.fbaipublicfiles.com/m2m_100/1.2B_last_checkpoint.pt
# Generation:
fairseq-generate $binarized_data_path --batch-size 32 --path $path_to_model --fixed-dictionary model_dict.128k.txt -s en -t fr --remove-bpe 'sentencepiece' --beam 5 --task translation_multi_simple_epoch --lang-pairs language_pairs_small_models.txt --decoder-langtok --encoder-langtok src --gen-subset test > gen_out
```
### 12B Model
12B parameter model trained on many-to-many training data for 100 languages. We include the last checkpoint, average of last 5 checkpoints, average of last 10 checkpoints. There isn't a universally best choice out of these three, but all three versions are pretty close in accuracy. You can either sweep over the 3 checkpoints on a dev test and use the best performing checkpoint for final testing. Or the last checkpoint can be a good default choice.
**Model Download Links**
Configuration | 2 32GB GPUs | 4 16GB GPUs | 6 12GB GPUs | 8 8GB GPUs
:--|:--|:--|:--|:--
Last Checkpoint | [12b_last_chk_2_gpus.pt](https://dl.fbaipublicfiles.com/m2m_100/12b_last_chk_2_gpus.pt) | [12b_last_chk_4_gpus.pt](https://dl.fbaipublicfiles.com/m2m_100/12b_last_chk_4_gpus.pt) | [12b_last_chk_6_gpus.pt](https://dl.fbaipublicfiles.com/m2m_100/12b_last_chk_6_gpus.pt) | [12b_last_chk_8_gpus.pt](https://dl.fbaipublicfiles.com/m2m_100/12b_last_chk_8_gpus.pt)
Average of last 5 checkpoints | [12b_avg5_chk_2_gpus.pt](https://dl.fbaipublicfiles.com/m2m_100/12b_avg5_chk_2_gpus.pt) | [12b_avg5_chk_4_gpus.pt](https://dl.fbaipublicfiles.com/m2m_100/12b_avg5_chk_4_gpus.pt) | [12b_avg5_chk_6_gpus.pt](https://dl.fbaipublicfiles.com/m2m_100/12b_avg5_chk_6_gpus.pt) | [12b_avg5_chk_8_gpus.pt](https://dl.fbaipublicfiles.com/m2m_100/12b_avg5_chk_8_gpus.pt)
Average of last 10 checkpoints | [12b_avg10_chk_2_gpus.pt](https://dl.fbaipublicfiles.com/m2m_100/12b_avg10_chk_2_gpus.pt) | [12b_avg10_chk_4_gpus.pt](https://dl.fbaipublicfiles.com/m2m_100/12b_avg10_chk_4_gpus.pt) | [12b_avg10_chk_6_gpus.pt](https://dl.fbaipublicfiles.com/m2m_100/12b_avg10_chk_6_gpus.pt) | [12b_avg10_chk_8_gpus.pt](https://dl.fbaipublicfiles.com/m2m_100/12b_avg10_chk_8_gpus.pt)
**Generation Arguments**
Configuration | 2 32GB GPUs | 4 16GB GPUs | 6 12GB GPUs | 8 8GB GPUs
:--|:--|:--|:--|:--
`--pipeline-encoder-balance` | `[26]` | `[1,15,10]` | `[1,9,9,7]` | `[1,6,6,6,7]`
`--pipeline-encoder-devices` | `[0]` | `[0,1,0]` | `[0,1,2,0]` | `[0,4,5,1,0]`
`--pipeline-decoder-balance` | `[3,22,1]` | `[3,11,11,1]` | `[3,7,7,8,1]` | `[1,6,6,6,6,1]`
`--pipeline-decoder-devices` | `[0,1,0]` | `[0,2,3,0]` | `[0,3,4,5,0]` | `[0,2,6,7,3,0]`
## SentencePiece Model
```bash
wget https://dl.fbaipublicfiles.com/m2m_100/spm.128k.model
```
## Generation with M2M-100
### Encode using our SentencePiece Model
Note: Install SentencePiece from [here](https://github.com/google/sentencepiece)
```bash
fairseq=/path/to/fairseq
cd $fairseq
sacrebleu --echo src -l de-fr -t wmt19 | head -n 20 > raw_input.de-fr.de
sacrebleu --echo ref -l de-fr -t wmt19 | head -n 20 > raw_input.de-fr.fr
wget https://dl.fbaipublicfiles.com/m2m_100/spm.128k.model
for lang in de fr ; do
python scripts/spm_encode.py \
--model spm.128k.model \
--output_format=piece \
--inputs=raw_input.de-fr.${lang} \
--outputs=spm.de-fr.${lang}
done
```
### Binarization
```bash
wget https://dl.fbaipublicfiles.com/m2m_100/data_dict.128k.txt
fairseq-preprocess \
--source-lang de --target-lang fr \
--testpref spm.de-fr \
--thresholdsrc 0 --thresholdtgt 0 \
--destdir data_bin \
--srcdict data_dict.128k.txt --tgtdict data_dict.128k.txt
```
### Generation for the 12B model
Note that generation can currently be run using 2 32GB / 4 16GB / 6 12GB / 8 8GB GPUs, and the corresponding model checkpoints and pipeline arguments can be found in the [12B Model Section](#12b-model).
Generation on CPUs will be added in the future.
```bash
wget https://dl.fbaipublicfiles.com/m2m_100/model_dict.128k.txt
wget https://dl.fbaipublicfiles.com/m2m_100/language_pairs.txt
wget https://dl.fbaipublicfiles.com/m2m_100/12b_last_chk_4_gpus.pt
fairseq-generate \
data_bin \
--batch-size 1 \
--path 12b_last_chk_4_gpus.pt \
--fixed-dictionary model_dict.128k.txt \
-s de -t fr \
--remove-bpe 'sentencepiece' \
--beam 5 \
--task translation_multi_simple_epoch \
--lang-pairs language_pairs.txt \
--decoder-langtok --encoder-langtok src \
--gen-subset test \
--fp16 \
--dataset-impl mmap \
--distributed-world-size 1 --distributed-no-spawn \
--pipeline-model-parallel \
--pipeline-chunks 1 \
--pipeline-encoder-balance '[1,15,10]' \
--pipeline-encoder-devices '[0,1,0]' \
--pipeline-decoder-balance '[3,11,11,1]' \
--pipeline-decoder-devices '[0,2,3,0]' > gen_out
```
## Evaluation with M2M-100
### Tokenization
Note: Refer to tokenizers/README.md for more details on tokenization.
```bash
cd ${fairseq}/examples/m2m_100
cat ${fairseq}/gen_out | grep -P "^H" | sort -V | cut -f 3- | sh tok.sh fr > hyp
cat ${fairseq}/raw_input.de-fr.fr | sh tok.sh fr > ref
```
### BLEU
```bash
sacrebleu -tok 'none' ref < hyp
```
|
COCO-LM/fairseq/examples/m2m_100/README.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/m2m_100/README.md",
"repo_id": "COCO-LM",
"token_count": 4271
}
| 170 |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import fileinput
import sacremoses
def main():
parser = argparse.ArgumentParser(description="")
parser.add_argument("files", nargs="*", help="input files")
args = parser.parse_args()
detok = sacremoses.MosesDetokenizer()
for line in fileinput.input(args.files, openhook=fileinput.hook_compressed):
print(
detok.detokenize(line.strip().split(" "))
.replace(" @", "")
.replace("@ ", "")
.replace(" =", "=")
.replace("= ", "=")
.replace(" – ", "–")
)
if __name__ == "__main__":
main()
|
COCO-LM/fairseq/examples/megatron_11b/detok.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/megatron_11b/detok.py",
"repo_id": "COCO-LM",
"token_count": 332
}
| 171 |
#!/bin/bash
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
if [ -z $WORKDIR_ROOT ] ;
then
echo "please specify your working directory root in environment variable WORKDIR_ROOT. Exitting..."
exit
fi
SRCDIR=$WORKDIR_ROOT/indic_languages_corpus
DESTDIR=$WORKDIR_ROOT/ML50/raw
mkdir -p $SRCDIR
mkdir -p $DESTDIR
WAT_MY_EN=wat2020.my-en.zip
cd $SRCDIR
# please refer to http://lotus.kuee.kyoto-u.ac.jp/WAT/my-en-data/ for latest URL if the following url expired
#- The data used for WAT2020 are identical to those used in WAT2019.
wget http://lotus.kuee.kyoto-u.ac.jp/WAT/my-en-data/$WAT_MY_EN
unzip $WAT_MY_EN
SRC_EXTRACT_DIR=$SRCDIR/wat2020.my-en/alt
cp $SRC_EXTRACT_DIR/train.alt.en $DESTDIR/train.my_MM-en_XX.en_XX
cp $SRC_EXTRACT_DIR/train.alt.my $DESTDIR/train.my_MM-en_XX.my_MM
cp $SRC_EXTRACT_DIR/dev.alt.en $DESTDIR/valid.my_MM-en_XX.en_XX
cp $SRC_EXTRACT_DIR/dev.alt.my $DESTDIR/valid.my_MM-en_XX.my_MM
cp $SRC_EXTRACT_DIR/test.alt.en $DESTDIR/test.my_MM-en_XX.en_XX
cp $SRC_EXTRACT_DIR/test.alt.my $DESTDIR/test.my_MM-en_XX.my_MM
|
COCO-LM/fairseq/examples/multilingual/data_scripts/download_wat19_my.sh/0
|
{
"file_path": "COCO-LM/fairseq/examples/multilingual/data_scripts/download_wat19_my.sh",
"repo_id": "COCO-LM",
"token_count": 527
}
| 172 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq import options
def get_reranking_parser(default_task="translation"):
parser = options.get_parser("Generation and reranking", default_task)
add_reranking_args(parser)
return parser
def get_tuning_parser(default_task="translation"):
parser = options.get_parser("Reranking tuning", default_task)
add_reranking_args(parser)
add_tuning_args(parser)
return parser
def add_reranking_args(parser):
group = parser.add_argument_group("Reranking")
# fmt: off
group.add_argument('--score-model1', '-s1', type=str, metavar='FILE', required=True,
help='path to first model or ensemble of models for rescoring')
group.add_argument('--score-model2', '-s2', type=str, metavar='FILE', required=False,
help='path to second model or ensemble of models for rescoring')
group.add_argument('--num-rescore', '-n', type=int, metavar='N', default=10,
help='the number of candidate hypothesis to rescore')
group.add_argument('-bz', '--batch-size', type=int, metavar='N', default=128,
help='batch size for generating the nbest list')
group.add_argument('--gen-subset', default='test', metavar='SET', choices=['test', 'train', 'valid'],
help='data subset to generate (train, valid, test)')
group.add_argument('--gen-model', default=None, metavar='FILE',
help='the model to generate translations')
group.add_argument('-b1', '--backwards1', action='store_true',
help='whether or not the first model group is backwards')
group.add_argument('-b2', '--backwards2', action='store_true',
help='whether or not the second model group is backwards')
group.add_argument('-a', '--weight1', default=1, nargs='+', type=float,
help='the weight(s) of the first model')
group.add_argument('-b', '--weight2', default=1, nargs='+', type=float,
help='the weight(s) of the second model, or the gen model if using nbest from interactive.py')
group.add_argument('-c', '--weight3', default=1, nargs='+', type=float,
help='the weight(s) of the third model')
# lm arguments
group.add_argument('-lm', '--language-model', default=None, metavar='FILE',
help='language model for target language to rescore translations')
group.add_argument('--lm-dict', default=None, metavar='FILE',
help='the dict of the language model for the target language')
group.add_argument('--lm-name', default=None,
help='the name of the language model for the target language')
group.add_argument('--lm-bpe-code', default=None, metavar='FILE',
help='the bpe code for the language model for the target language')
group.add_argument('--data-dir-name', default=None,
help='name of data directory')
group.add_argument('--lenpen', default=1, nargs='+', type=float,
help='length penalty: <1.0 favors shorter, >1.0 favors longer sentences')
group.add_argument('--score-dict-dir', default=None,
help='the directory with dictionaries for the scoring models')
group.add_argument('--right-to-left1', action='store_true',
help='whether the first model group is a right to left model')
group.add_argument('--right-to-left2', action='store_true',
help='whether the second model group is a right to left model')
group.add_argument('--post-process', '--remove-bpe', default='@@ ',
help='the bpe symbol, used for the bitext and LM')
group.add_argument('--prefix-len', default=None, type=int,
help='the length of the target prefix to use in rescoring (in terms of words wo bpe)')
group.add_argument('--sampling', action='store_true',
help='use sampling instead of beam search for generating n best list')
group.add_argument('--diff-bpe', action='store_true',
help='bpe for rescoring and nbest list not the same')
group.add_argument('--rescore-bpe-code', default=None,
help='bpe code for rescoring models')
group.add_argument('--nbest-list', default=None,
help='use predefined nbest list in interactive.py format')
group.add_argument('--write-hypos', default=None,
help='filename prefix to write hypos to')
group.add_argument('--ref-translation', default=None,
help='reference translation to use with nbest list from interactive.py')
group.add_argument('--backwards-score-dict-dir', default=None,
help='the directory with dictionaries for the backwards model,'
'if None then it is assumed the fw and backwards models share dictionaries')
# extra scaling args
group.add_argument('--gen-model-name', default=None,
help='the name of the models that generated the nbest list')
group.add_argument('--model1-name', default=None,
help='the name of the set for model1 group ')
group.add_argument('--model2-name', default=None,
help='the name of the set for model2 group')
group.add_argument('--shard-id', default=0, type=int,
help='the id of the shard to generate')
group.add_argument('--num-shards', default=1, type=int,
help='the number of shards to generate across')
group.add_argument('--all-shards', action='store_true',
help='use all shards')
group.add_argument('--target-prefix-frac', default=None, type=float,
help='the fraction of the target prefix to use in rescoring (in terms of words wo bpe)')
group.add_argument('--source-prefix-frac', default=None, type=float,
help='the fraction of the source prefix to use in rescoring (in terms of words wo bpe)')
group.add_argument('--normalize', action='store_true',
help='whether to normalize by src and target len')
# fmt: on
return group
def add_tuning_args(parser):
group = parser.add_argument_group("Tuning")
group.add_argument(
"--lower-bound",
default=[-0.7],
nargs="+",
type=float,
help="lower bound of search space",
)
group.add_argument(
"--upper-bound",
default=[3],
nargs="+",
type=float,
help="upper bound of search space",
)
group.add_argument(
"--tune-param",
default=["lenpen"],
nargs="+",
choices=["lenpen", "weight1", "weight2", "weight3"],
help="the parameter(s) to tune",
)
group.add_argument(
"--tune-subset",
default="valid",
choices=["valid", "test", "train"],
help="the subset to tune on ",
)
group.add_argument(
"--num-trials",
default=1000,
type=int,
help="number of trials to do for random search",
)
group.add_argument(
"--share-weights", action="store_true", help="share weight2 and weight 3"
)
return group
|
COCO-LM/fairseq/examples/noisychannel/rerank_options.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/noisychannel/rerank_options.py",
"repo_id": "COCO-LM",
"token_count": 3147
}
| 173 |
# Training with Quantization Noise for Extreme Model Compression ({Fan\*, Stock\*} *et al.*, 2020)
This page contains information for how to train and quantize models with Quantization Noise, for both scalar quantization like `int8` and Iterative Product Quantization.
Check out our paper [here](https://arxiv.org/abs/2004.07320).
Looking for pretrained models? They will be added shortly.
Looking for code to train vision models? We are working on open sourcing our code as part of ClassyVision. Please check back, but note that both the Scalar and Iterative Product Quantization counterparts of the `nn.Conv2d` module are already included in this release.
**Contents**:
- [Walk through of code](#walk-through-the-code)
- [Reproduce NLP Results](#looking-to-reproduce-the-nlp-results-in-the-paper)
- [Reproduce Vision Results](#looking-to-reproduce-the-vision-results-in-the-paper)
## Citation
```bibtex
@article{fan2020training,
title={Training with Quantization Noise for Extreme Model Compression},
author={Angela Fan* and Pierre Stock* and and Benjamin Graham and Edouard Grave and Remi Gribonval and Herve Jegou and Armand Joulin},
year={2020},
eprint={2004.07320},
archivePrefix={arXiv},
primaryClass={cs.ML}
}
```
## Walk through the code
Training a model with Quant-Noise improves the performance in subsequent inference-time quantization by training models to be robust to quantization. This technique is useful for both scalar and product quantization methods, as well as multiple domains. We detail below our approach to train, quantize models and integrate our code to quantize your favorite models.
### Scalar Quantization
Unlike the section [Iterative Product Quantization](#iterative-product-quantization) which gives state-of-the-art compression, this section showcases the usefulness of our approach for simple scalar quantization baselines such as int8 using on-GPU Fake Quantization.
#### Training
Scalar quantization with Quant-Noise consists in randomly quantizing a proportion `p` of the weights during training. Scalar quantization is implemented [here](https://github.com/pytorch/fairseq/tree/master/fairseq/modules/quantization/scalar) under the form of Fake Quantization, meaning that we emulate int8 on GPU by quantizing and de-quantizing both the weights and the activations. We rely on PyTorch's [quantization primitives](https://github.com/pytorch/pytorch/tree/master/torch/quantization).
To train a model with Quant-Noise, add the following flag:
```
--quant-noise-scalar 0.5
```
Large values of noise make the network easier to quantize but may result in higher non-quantized test and validation perplexities.
#### Quantization
When evaluating a network, all quantized modules and activation hooks automatically switch to `p=1` so the validation accuracy reported by Fairseq is actually the quantized one, nothing more to do.
#### Integration with your own code
Looking to quantize your own models with Quant-Noise + Scalar Quantization?
- Use the function `quantize_model_` implemented [here](https://github.com/pytorch/fairseq/tree/master/fairseq/modules/quantization/scalar/utils.py) to (1) replace all your modules by their quantized counterparts and (2) add hooks to those modules to quantize the activations.
- Then, perform your training as usual. Note that in `eval()` mode, the network is always fully quantized (weights and activations) by default (`p=1`).
### Iterative Product Quantization
Iterative Product Quantization with Quant-Noise proceeds in two steps. First, a model must be trained uncompressed with Quant-Noise. Second, the model must be quantized with iPQ. Note that we implement here the simplest form of noise, which consists in randomly dropping a proportion `p` of blocks, and that worked as well as assigning those blocks to their current centroid.
#### Training
To train a model with Quant-Noise, add the following flags:
```
--quant-noise-pq 0.1 --quant-noise-pq-block-size 8
```
`quant-noise-pq` controls how much dropout is applied to the blocks of the weight matrix. `quant-noise-pq-block-size` controls the size of the weight matrix blocks.
We recommend training with 0.05 to 0.2 Quant-Noise, a value that worked well in our experiments. For the block-size, we recommend training with block-size of 8. Note that the block size must be a multiple of `input_features`, see the size checks [here](https://github.com/pytorch/fairseq/tree/master/fairseq/modules/quant_noise.py). Large block sizes result in higher compression ratio but may induce a loss in accuracy.
We currently support training Transformer based models, such as sequence-to-sequence, language models, and BERT architectures. The `quant_noise` function [here](https://github.com/pytorch/fairseq/tree/master/fairseq/modules/quant_noise.py) wraps a module. It splits a weight matrix into blocks and applies random dropout to these blocks.
In the Transformer architectures, quant-noise is applied to the input and output embeddings, the attention, and the FFN.
Quant-Noise can also be combined with **LayerDrop** (see [here](https://github.com/pytorch/fairseq/tree/master/examples/layerdrop)) to add its pruning effect to the quantized model and make the model even smaller. We recommend training with LayerDrop 0.1 or 0.2.
#### Quantization
We implement an improved version of product quantization from Stock et al, **iPQ**, described [here](https://arxiv.org/abs/1907.05686), see code with old API [here](https://github.com/facebookresearch/kill-the-bits). Note that we improved the iPQ API in terms of both compute speed and usability as described below.
For the particular case of PQ, quantization is made sequentially. We recommend first quantizing the FFNs, then the EMBs, and finally the ATTNs. Quantization is done in two sub-steps:
- First, perform `n` steps of Product Quantization (generally `n=20` is enough).
- Then, finetune the obtained centroids.
#### Integration with your own code
Looking to quantize your own models with Quant-Noise + iPQ?
- First wrap your modules with the `quant_noise` function [here](https://github.com/pytorch/fairseq/tree/master/fairseq/modules/quant_noise.py), which is module-agnostic and train your favorite model.
- Then, quantize your trained model using the code [here](https://github.com/pytorch/fairseq/tree/master/fairseq/modules/quantization/pq). This can be done *without any changes to your training loop*. Below is an example code for integration.
Note that we tried our approach only on Transformers and various Convolutional Models such as EfficientNets.
```python
from fairseq.modules.quantization.pq import quantize_model_, SizeTracker
# get configuration parameters
n_centroids_config = config["n_centroids"]
block_sizes_config = config["block_sizes"]
layers_to_quantize = config["layers_to_quantize"]
# size tracker for keeping track of assignments, centroids and non-compressed sizes
size_tracker = SizeTracker(model)
# Quantize model by stages
for step in range(len(layers_to_quantize)):
# quantize model in-place
quantized_layers = quantize_model_(
model,
size_tracker,
layers_to_quantize,
block_sizes_config,
n_centroids_config,
step=step,
)
logger.info(f"Finetuning stage {step}, quantized layers: {quantized_layers}")
logger.info(f"{size_tracker}")
# Don't forget to re-create/update trainer/optimizer since model parameters have changed
optimizer = ...
# Finetune the centroids with your usual training loop for a few epochs
trainer.train_epoch()
```
## Looking to reproduce the NLP results in the paper?
We detail below how to reproduce the state-of-the-art results in reported in the paper for Quant-Noise + Iterative Product Quantization.
### Training with Quant-Noise
To **train** RoBERTa + QuantNoise, we followed this setting [here](https://github.com/pytorch/fairseq/tree/master/examples/roberta).
The following command can be used to train a RoBERTa Base + QuantNoise model:
```bash
TOTAL_UPDATES=125000
WARMUP_UPDATES=10000
PEAK_LR=0.0005
TOKENS_PER_SAMPLE=512
MAX_POSITIONS=512
MAX_SENTENCES=16
UPDATE_FREQ=2
DATA_DIR=/path/to/data/here
fairseq-train $DATA_DIR \
--task masked_lm --criterion masked_lm --arch roberta_base \
--sample-break-mode complete \
--tokens-per-sample $TOKENS_PER_SAMPLE --max-positions $MAX_POSITIONS \
--optimizer adam --adam-betas '(0.9, 0.98)' --adam-eps 1e-6 \
--clip-norm 0.0 \
--lr-scheduler polynomial_decay --lr $PEAK_LR \
--warmup-updates $WARMUP_UPDATES --total-num-update $TOTAL_UPDATES \
--dropout 0.1 --attention-dropout 0.1 \
--weight-decay 0.01 \
--batch-size $MAX_SENTENCES \
--update-freq $UPDATE_FREQ --max-update $TOTAL_UPDATES \
--save-dir checkpoint/roberta \
--ddp-backend legacy_ddp --encoder-layerdrop 0.2 \
--quant-noise-pq 0.2 --quant-noise-pq-block-size 8 --untie-weights-roberta
```
To **finetune** RoBERTa + QuantNoise, we followed this setting [here](https://github.com/pytorch/fairseq/blob/master/examples/roberta/README.glue.md).
The following command can be used to finetune a RoBERTa Base + QuantNoise model on the RTE dataset:
```bash
TOTAL_NUM_UPDATES=2036
WARMUP_UPDATES=122
LR=2e-05
NUM_CLASSES=2
MAX_SENTENCES=16
ROBERTA_PATH=/path/to/roberta_quantnoise/model.pt
fairseq-train /path/to/rte/data/ \
--restore-file $ROBERTA_PATH \
--max-positions 512 \
--batch-size $MAX_SENTENCES \
--max-tokens 4400 \
--task sentence_prediction \
--reset-optimizer --reset-dataloader --reset-meters \
--required-batch-size-multiple 1 \
--init-token 0 --separator-token 2 \
--arch roberta_large \
--criterion sentence_prediction \
--num-classes $NUM_CLASSES \
--dropout 0.1 --attention-dropout 0.1 \
--weight-decay 0.1 --optimizer adam --adam-betas "(0.9, 0.98)" --adam-eps 1e-06 \
--clip-norm 0.0 \
--lr-scheduler polynomial_decay --lr $LR --total-num-update $TOTAL_NUM_UPDATES --warmup-updates $WARMUP_UPDATES \
--fp16 --fp16-init-scale 4 --threshold-loss-scale 1 --fp16-scale-window 128 \
--max-epoch 10 \
--find-unused-parameters \
--best-checkpoint-metric accuracy --maximize-best-checkpoint-metric \
--ddp-backend legacy_ddp \
--quant-noise-pq 0.2 --quant-noise-pq-block-size 8
```
To **train** Language Models on Wikitext-103, we followed this setting [here](https://github.com/pytorch/fairseq/tree/master/examples/language_model).
The following command can be used to train a Transformer + QuantNoise model on Wikitext-103:
```bash
fairseq-train --task language_modeling /path/to/wikitext-103/data \
--save-dir checkpoints/transformer_wikitext-103 \
--adaptive-input --adaptive-input-cutoff 20000,60000 --adaptive-input-factor 4 \
--adaptive-softmax-cutoff 20000,60000 --adaptive-softmax-dropout 0.2 --adaptive-softmax-factor 4.0 \
--tie-adaptive-proj --tie-adaptive-weights \
--arch transformer_lm_gbw \
--attention-dropout 0.1 --dropout 0.2 --relu-dropout 0.1 \
--clip-norm 0.1 --criterion adaptive_loss \
--ddp-backend legacy_ddp \
--decoder-attention-heads 8 --decoder-embed-dim 1024 --decoder-ffn-embed-dim 4096 --decoder-input-dim 1024 \
--decoder-layers 16 --decoder-normalize-before --decoder-output-dim 1024 \
--min-lr 0.0001 --lr-period-updates 270000 --lr-scheduler cosine --lr-shrink 0.75 --lr 1.0 --t-mult 2.0 \
--max-tokens 3072 --tokens-per-sample 3072 --momentum 0.99 --optimizer nag \
--sample-break-mode none --update-freq 3 \
--warmup-init-lr 1e-07 --warmup-updates 16000 \
--weight-decay 0 --seed 1 --stop-min-lr 1e-09 \
--quant-noise-pq 0.05 --quant-noise-pq-block-size 8
```
To **evaluate** this model, note you need to use the `eval.py` script. The following command can be used to evaluate:
```bash
fairseq-eval-lm /path/to/wikitext-103/data --path /path/to/model/checkpoint \
--sample-break-mode complete \
--max-tokens 3072 \
--context-window 2560 \
--softmax-batch 1024 \
--gen-subset valid
```
and change the `--gen-subset` to `test` if you would like to evaluate on the test set instead.
### Iterative Product Quantization
To quantize the finetuned RoBERTa model, we use this command on 1 GPU. This should run in a day.
```bash
TOTAL_NUM_UPDATES=6108 # 2036 updates for each iteration
WARMUP_UPDATES=122
LR=2e-05
NUM_CLASSES=2
MAX_SENTENCES=16
fairseq-train --task sentence_prediction /path/to/data/ \
--restore-file $ROBERTA_PATH \
--save-dir checkpoints/roberta_finetuned \
--max-positions 512 \
--batch-size $MAX_SENTENCES \
--max-tokens 4400 \
--init-token 0 --separator-token 2 \
--arch roberta_large \
--criterion sentence_prediction \
--num-classes $NUM_CLASSES \
--dropout 0.1 --attention-dropout 0.1 \
--weight-decay 0.1 --optimizer adam --adam-betas "(0.9, 0.98)" --adam-eps 1e-06 \
--clip-norm 0.0 --lr-scheduler polynomial_decay \
--fp16 --fp16-init-scale 4 --threshold-loss-scale 1 --fp16-scale-window 128 \
--no-progress-bar --skip-invalid-size-inputs-valid-test --ddp-backend legacy_ddp \
--quantization-config-path /path/to/config/yaml
```
To quantize the trained Language Model, we use this command on 8 V100 23GB GPUs. This should run in a couple of hours.
```bash
fairseq-train --task language_modeling /path/to/wikitext-103/data \
--save-dir checkpoints/transformer_wikitext-103 \
--adaptive-input --adaptive-input-cutoff 20000,60000 --adaptive-input-factor 4 \
--adaptive-softmax-cutoff 20000,60000 --adaptive-softmax-dropout 0.2 --adaptive-softmax-factor 4.0 \
--arch transformer_lm_gbw \
--attention-dropout 0.1 --dropout 0.2 --relu-dropout 0.1 \
--bucket-cap-mb 25 --char-embedder-highway-layers 2 --character-embedding-dim 4 \
--clip-norm 0.1 --criterion adaptive_loss \
--ddp-backend legacy_ddp \
--decoder-attention-heads 8 --decoder-embed-dim 1024 --decoder-ffn-embed-dim 4096 --decoder-input-dim 1024 --decoder-layers 16 --decoder-normalize-before --decoder-output-dim 1024 \
--fp16 --keep-last-epochs -1 \
--min-lr 0.0001 --lr-period-updates 270000 --lr-scheduler cosine --lr-shrink 0.75 --lr 0.05 --stop-min-lr 1e-09 \
--max-tokens 2944 --tokens-per-sample 2944\
--momentum 0.99 --no-epoch-checkpoints --no-progress-bar --optimizer nag --required-batch-size-multiple 8 \
--sample-break-mode none --t-mult 2.0 --skip-invalid-size-inputs-valid-test \
--tie-adaptive-proj --tie-adaptive-weights --update-freq 3 --weight-decay 0 --seed 1 \
--log-interval 100 --no-progress-bar --skip-invalid-size-inputs-valid-test \
--restore-file path/to/trained/lm/with/quant/noise \
--max-update 13500 --quantization-config-path /path/to/config/yaml
```
If you have less capacity or if your distributed training freezes, try reducing `--max-tokens` and `--tokens-per-sample` (this may reduce the quantized accuracy a bit).
### Remarks
We try to keep the open-sourced code as readable and as easy-to-plug as possible. Therefore, we did not test it for the following cases:
- Scalar quantization with RoBERTa.
- Quantization with iPQ and `int8` combined.
If you have trouble adapting it, we will be more than happy to help!
## Looking to reproduce the Vision results in the paper?
We are working on open sourcing our code as part of ClassyVision. Please check back.
## Having an issue or have a question?
Please open an issue in this repository with the details of your question. Thanks!
|
COCO-LM/fairseq/examples/quant_noise/README.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/quant_noise/README.md",
"repo_id": "COCO-LM",
"token_count": 5153
}
| 174 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import wsc_criterion # noqa
from . import wsc_task # noqa
|
COCO-LM/fairseq/examples/roberta/wsc/__init__.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/roberta/wsc/__init__.py",
"repo_id": "COCO-LM",
"token_count": 70
}
| 175 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import DEFAULT_EOS, GET, register_agent
from .simul_trans_agent import SimulTransAgent
from .word_splitter import SPLITTER_DICT
@register_agent("simul_trans_text")
class SimulTransTextAgent(SimulTransAgent):
def build_word_splitter(self, args):
self.word_splitter = {}
self.word_splitter["src"] = SPLITTER_DICT[args.src_splitter_type](
getattr(args, f"src_splitter_path")
)
self.word_splitter["tgt"] = SPLITTER_DICT[args.tgt_splitter_type](
getattr(args, f"tgt_splitter_path")
)
def load_dictionary(self, task):
self.dict = {}
self.dict["tgt"] = task.target_dictionary
self.dict["src"] = task.source_dictionary
def update_states(self, states, new_state):
if states["finish_read"]:
return states
new_word = new_state["segment"]
# Split words and index the token
if new_word not in [DEFAULT_EOS]:
tokens = self.word_splitter["src"].split(new_word)
# Get indices from dictionary
# You can change to you own dictionary
indices = (
self.dict["src"]
.encode_line(
tokens,
line_tokenizer=lambda x: x,
add_if_not_exist=False,
append_eos=False,
)
.tolist()
)
else:
tokens = [new_word]
indices = [self.dict["src"].eos()]
states["finish_read"] = True
# Update states
states["segments"]["src"] += [new_word]
states["tokens"]["src"] += tokens
self._append_indices(states, indices, "src")
return states
def read_action(self, states):
# Increase source step by one
states["steps"]["src"] += 1
# At leat one word is read
if len(states["tokens"]["src"]) == 0:
return {"key": GET, "value": None}
# Only request new word if there is no buffered tokens
if len(states["tokens"]["src"]) <= states["steps"]["src"]:
return {"key": GET, "value": None}
return None
def finish_read(self, states):
# The first means all segments (full words) has been read from server
# The second means all tokens (subwords) has been read locally
return (
states["finish_read"]
and len(states["tokens"]["src"]) == states["steps"]["src"]
)
|
COCO-LM/fairseq/examples/simultaneous_translation/eval/agents/simul_trans_text_agent.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/simultaneous_translation/eval/agents/simul_trans_text_agent.py",
"repo_id": "COCO-LM",
"token_count": 1234
}
| 176 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
# automatically import any Python files in the criterions/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
module = file[: file.find(".py")]
importlib.import_module("examples.simultaneous_translation.utils." + module)
|
COCO-LM/fairseq/examples/simultaneous_translation/utils/__init__.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/simultaneous_translation/utils/__init__.py",
"repo_id": "COCO-LM",
"token_count": 163
}
| 177 |
# Flashlight Decoder
This script runs decoding for pre-trained speech recognition models.
## Usage
Assuming a few variables:
```bash
exp_dir=<path-to-experiment-directory>
data=<path-to-data-directory>
lm_model=<path-to-language-model>
lexicon=<path-to-lexicon>
```
Example usage for decoding a fine-tuned Wav2Vec model:
```bash
python $FAIRSEQ_ROOT/examples/speech_recognition/hydra/infer.py --multirun \
task=audio_pretraining \
task.data=$data \
task.labels=ltr \
decoding.exp_dir=$exp_dir \
decoding.decoder.name=kenlm \
decoding.decoder.lexicon=$lexicon \
decoding.decoder.lmpath=$lm_model \
dataset.gen_subset=dev_clean,dev_other,test_clean,test_other
```
Example usage for using Ax to sweep WER parameters (requires `pip install hydra-ax-sweeper`):
```bash
python $FAIRSEQ_ROOT/examples/speech_recognition/hydra/infer.py --multirun \
hydra/sweeper=ax \
task=audio_pretraining \
task.data=$data \
task.labels=ltr \
decoding.exp_dir=$exp_dir \
decoding.decoder.name=kenlm \
decoding.decoder.lexicon=$lexicon \
decoding.decoder.lmpath=$lm_model \
decoding.write_sentences=false \
decoding.unique_wer_file=true \
dataset.gen_subset=dev_other
```
|
COCO-LM/fairseq/examples/speech_recognition/hydra/README.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/speech_recognition/hydra/README.md",
"repo_id": "COCO-LM",
"token_count": 482
}
| 178 |
[[Back]](..)
# S2T Example: Speech Recognition (ASR) on LibriSpeech
[LibriSpeech](https://www.danielpovey.com/files/2015_icassp_librispeech.pdf) is a de-facto standard English ASR
benchmark. We provide competitive
vanilla [Transformer](https://papers.nips.cc/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf) baselines.
## Data preparation
Download and preprocess LibriSpeech data with
```bash
# additional Python packages for S2T data processing/model training
pip install pandas torchaudio sentencepiece
python examples/speech_to_text/prep_librispeech_data.py \
--output-root ${LS_ROOT} --vocab-type unigram --vocab-size 10000
```
where `LS_ROOT` is the root path for downloaded data as well as generated files (manifest, features, vocabulary and
data configuration).
[Download](https://dl.fbaipublicfiles.com/fairseq/s2t/librispeech_vocab_unigram10000.zip) our vocabulary files
if you want to use our pre-trained models.
## Training
```bash
fairseq-train ${LS_ROOT} --save-dir ${SAVE_DIR} \
--config-yaml config.yaml --train-subset train --valid-subset dev \
--num-workers 4 --max-tokens 40000 --max-update 300000 \
--task speech_to_text --criterion label_smoothed_cross_entropy --report-accuracy \
--arch s2t_transformer_s --share-decoder-input-output-embed \
--optimizer adam --lr 2e-3 --lr-scheduler inverse_sqrt --warmup-updates 10000 \
--clip-norm 10.0 --seed 1 --update-freq 8
```
where `SAVE_DIR` is the checkpoint root path. Here we use `--arch s2t_transformer_s` (31M parameters) as example.
For better performance, you may switch to `s2t_transformer_m` (71M, with `--lr 1e-3`) or `s2t_transformer_l`
(268M, with `--lr 5e-4`). We set `--update-freq 8` to simulate 8 GPUs with 1 GPU. You may want to update it accordingly
when using more than 1 GPU.
## Inference & Evaluation
Average the last 10 checkpoints and evaluate on the 4 splits
(`dev-clean`, `dev-other`, `test-clean` and `test-other`):
```bash
CHECKPOINT_FILENAME=avg_last_10_checkpoint.pt
python scripts/average_checkpoints.py --inputs ${SAVE_DIR} \
--num-epoch-checkpoints 10 \
--output "${SAVE_DIR}/${CHECKPOINT_FILENAME}"
for SUBSET in dev-clean dev-other test-clean test-other; do
fairseq-generate ${LS_ROOT} --config-yaml config.yaml --gen-subset ${SUBSET} \
--task speech_to_text --path ${SAVE_DIR}/${CHECKPOINT_FILENAME} \
--max-tokens 50000 --beam 5 --scoring wer
done
```
## Interactive Decoding
Launch the interactive console via
```bash
fairseq-interactive ${LS_ROOT} --config-yaml config.yaml --task speech_to_text \
--path ${SAVE_DIR}/${CHECKPOINT_FILENAME} --max-tokens 50000 --beam 5
```
Type in WAV/FLAC/OGG audio paths (one per line) after the prompt.
## Results
| --arch | Params | dev-clean | dev-other | test-clean | test-other | Model |
|---|---|---|---|---|---|---|
| s2t_transformer_s | 30M | 3.8 | 8.9 | 4.4 | 9.0 | [Download](https://dl.fbaipublicfiles.com/fairseq/s2t/librispeech_transformer_s.pt) |
| s2t_transformer_m | 71M | 3.2 | 8.0 | 3.4 | 7.9 | [Download](https://dl.fbaipublicfiles.com/fairseq/s2t/librispeech_transformer_m.pt) |
| s2t_transformer_l | 268M | 3.0 | 7.5 | 3.2 | 7.5 | [Download](https://dl.fbaipublicfiles.com/fairseq/s2t/librispeech_transformer_l.pt) |
[[Back]](..)
|
COCO-LM/fairseq/examples/speech_to_text/docs/librispeech_example.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/speech_to_text/docs/librispeech_example.md",
"repo_id": "COCO-LM",
"token_count": 1197
}
| 179 |
# Mixture Models for Diverse Machine Translation: Tricks of the Trade (Shen et al., 2019)
This page includes instructions for reproducing results from the paper [Mixture Models for Diverse Machine Translation: Tricks of the Trade (Shen et al., 2019)](https://arxiv.org/abs/1902.07816).
## Download data
First, follow the [instructions to download and preprocess the WMT'17 En-De dataset](../translation#prepare-wmt14en2desh).
Make sure to learn a joint vocabulary by passing the `--joined-dictionary` option to `fairseq-preprocess`.
## Train a model
Then we can train a mixture of experts model using the `translation_moe` task.
Use the `--method` flag to choose the MoE variant; we support hard mixtures with a learned or uniform prior (`--method hMoElp` and `hMoEup`, respectively) and soft mixures (`--method sMoElp` and `sMoEup`).
The model is trained with online responsibility assignment and shared parameterization.
The following command will train a `hMoElp` model with `3` experts:
```bash
fairseq-train --ddp-backend='legacy_ddp' \
data-bin/wmt17_en_de \
--max-update 100000 \
--task translation_moe --user-dir examples/translation_moe/translation_moe_src \
--method hMoElp --mean-pool-gating-network \
--num-experts 3 \
--arch transformer_wmt_en_de --share-all-embeddings \
--optimizer adam --adam-betas '(0.9, 0.98)' --clip-norm 0.0 \
--lr-scheduler inverse_sqrt --warmup-init-lr 1e-07 --warmup-updates 4000 \
--lr 0.0007 \
--dropout 0.1 --weight-decay 0.0 --criterion cross_entropy \
--max-tokens 3584
```
## Translate
Once a model is trained, we can generate translations from different experts using the `--gen-expert` option.
For example, to generate from expert 0:
```bash
fairseq-generate data-bin/wmt17_en_de \
--path checkpoints/checkpoint_best.pt \
--beam 1 --remove-bpe \
--task translation_moe --user-dir examples/translation_moe/translation_moe_src \
--method hMoElp --mean-pool-gating-network \
--num-experts 3 \
--gen-expert 0
```
## Evaluate
First download a tokenized version of the WMT'14 En-De test set with multiple references:
```bash
wget dl.fbaipublicfiles.com/fairseq/data/wmt14-en-de.extra_refs.tok
```
Next apply BPE on the fly and run generation for each expert:
```bash
BPE_CODE=examples/translation/wmt17_en_de/code
for EXPERT in $(seq 0 2); do \
cat wmt14-en-de.extra_refs.tok \
| grep ^S | cut -f 2 \
| fairseq-interactive data-bin/wmt17_en_de \
--path checkpoints/checkpoint_best.pt \
--beam 1 \
--bpe subword_nmt --bpe-codes $BPE_CODE \
--buffer-size 500 --max-tokens 6000 \
--task translation_moe --user-dir examples/translation_moe/translation_moe_src \
--method hMoElp --mean-pool-gating-network \
--num-experts 3 \
--gen-expert $EXPERT ; \
done > wmt14-en-de.extra_refs.tok.gen.3experts
```
Finally use `score_moe.py` to compute pairwise BLUE and average oracle BLEU:
```bash
python examples/translation_moe/score.py --sys wmt14-en-de.extra_refs.tok.gen.3experts --ref wmt14-en-de.extra_refs.tok
# pairwise BLEU: 48.26
# #refs covered: 2.11
# multi-reference BLEU (leave-one-out): 59.46
```
This matches row 3 from Table 7 in the paper.
## Citation
```bibtex
@article{shen2019mixture,
title = {Mixture Models for Diverse Machine Translation: Tricks of the Trade},
author = {Tianxiao Shen and Myle Ott and Michael Auli and Marc'Aurelio Ranzato},
journal = {International Conference on Machine Learning},
year = 2019,
}
```
|
COCO-LM/fairseq/examples/translation_moe/README.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/translation_moe/README.md",
"repo_id": "COCO-LM",
"token_count": 1289
}
| 180 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Data pre-processing: build vocabularies and binarize training data.
"""
import argparse
import glob
import os
import random
import soundfile
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"root", metavar="DIR", help="root directory containing flac files to index"
)
parser.add_argument(
"--valid-percent",
default=0.01,
type=float,
metavar="D",
help="percentage of data to use as validation set (between 0 and 1)",
)
parser.add_argument(
"--dest", default=".", type=str, metavar="DIR", help="output directory"
)
parser.add_argument(
"--ext", default="flac", type=str, metavar="EXT", help="extension to look for"
)
parser.add_argument("--seed", default=42, type=int, metavar="N", help="random seed")
parser.add_argument(
"--path-must-contain",
default=None,
type=str,
metavar="FRAG",
help="if set, path must contain this substring for a file to be included in the manifest",
)
return parser
def main(args):
assert args.valid_percent >= 0 and args.valid_percent <= 1.0
if not os.path.exists(args.dest):
os.makedirs(args.dest)
dir_path = os.path.realpath(args.root)
search_path = os.path.join(dir_path, "**/*." + args.ext)
rand = random.Random(args.seed)
with open(os.path.join(args.dest, "train.tsv"), "w") as train_f, open(
os.path.join(args.dest, "valid.tsv"), "w"
) as valid_f:
print(dir_path, file=train_f)
print(dir_path, file=valid_f)
for fname in glob.iglob(search_path, recursive=True):
file_path = os.path.realpath(fname)
if args.path_must_contain and args.path_must_contain not in file_path:
continue
frames = soundfile.info(fname).frames
dest = train_f if rand.random() > args.valid_percent else valid_f
print(
"{}\t{}".format(os.path.relpath(file_path, dir_path), frames), file=dest
)
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
main(args)
|
COCO-LM/fairseq/examples/wav2vec/wav2vec_manifest.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/wav2vec/wav2vec_manifest.py",
"repo_id": "COCO-LM",
"token_count": 991
}
| 181 |
/**
* Copyright 2017-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <torch/torch.h> // @manual=//caffe2:torch_extension
#include <pybind11/detail/common.h>
#include <pybind11/pybind11.h>
#include <vector>
#include <algorithm>
#include <cstdint>
#include <iosfwd>
#include <memory>
#include <new>
#include <string>
#include <utility>
using namespace ::std;
vector<vector<uint32_t>> edit_distance2_with_dp(
vector<uint32_t>& x,
vector<uint32_t>& y) {
uint32_t lx = x.size();
uint32_t ly = y.size();
vector<vector<uint32_t>> d(lx + 1, vector<uint32_t>(ly + 1));
for (uint32_t i = 0; i < lx + 1; i++) {
d[i][0] = i;
}
for (uint32_t j = 0; j < ly + 1; j++) {
d[0][j] = j;
}
for (uint32_t i = 1; i < lx + 1; i++) {
for (uint32_t j = 1; j < ly + 1; j++) {
d[i][j] =
min(min(d[i - 1][j], d[i][j - 1]) + 1,
d[i - 1][j - 1] + 2 * (x.at(i - 1) == y.at(j - 1) ? 0 : 1));
}
}
return d;
}
vector<vector<uint32_t>> edit_distance2_backtracking(
vector<vector<uint32_t>>& d,
vector<uint32_t>& x,
vector<uint32_t>& y,
uint32_t terminal_symbol) {
vector<uint32_t> seq;
vector<vector<uint32_t>> edit_seqs(x.size() + 2, vector<uint32_t>());
/*
edit_seqs:
0~x.size() cell is the insertion sequences
last cell is the delete sequence
*/
if (x.size() == 0) {
edit_seqs.at(0) = y;
return edit_seqs;
}
uint32_t i = d.size() - 1;
uint32_t j = d.at(0).size() - 1;
while ((i >= 0) && (j >= 0)) {
if ((i == 0) && (j == 0)) {
break;
}
if ((j > 0) && (d.at(i).at(j - 1) < d.at(i).at(j))) {
seq.push_back(1); // insert
seq.push_back(y.at(j - 1));
j--;
} else if ((i > 0) && (d.at(i - 1).at(j) < d.at(i).at(j))) {
seq.push_back(2); // delete
seq.push_back(x.at(i - 1));
i--;
} else {
seq.push_back(3); // keep
seq.push_back(x.at(i - 1));
i--;
j--;
}
}
uint32_t prev_op, op, s, word;
prev_op = 0, s = 0;
for (uint32_t k = 0; k < seq.size() / 2; k++) {
op = seq.at(seq.size() - 2 * k - 2);
word = seq.at(seq.size() - 2 * k - 1);
if (prev_op != 1) {
s++;
}
if (op == 1) // insert
{
edit_seqs.at(s - 1).push_back(word);
} else if (op == 2) // delete
{
edit_seqs.at(x.size() + 1).push_back(1);
} else {
edit_seqs.at(x.size() + 1).push_back(0);
}
prev_op = op;
}
for (uint32_t k = 0; k < edit_seqs.size(); k++) {
if (edit_seqs[k].size() == 0) {
edit_seqs[k].push_back(terminal_symbol);
}
}
return edit_seqs;
}
vector<vector<uint32_t>> edit_distance2_backtracking_with_delete(
vector<vector<uint32_t>>& d,
vector<uint32_t>& x,
vector<uint32_t>& y,
uint32_t terminal_symbol,
uint32_t deletion_symbol) {
vector<uint32_t> seq;
vector<vector<uint32_t>> edit_seqs(x.size() + 1, vector<uint32_t>());
/*
edit_seqs:
0~x.size() cell is the insertion sequences
last cell is the delete sequence
*/
if (x.size() == 0) {
edit_seqs.at(0) = y;
return edit_seqs;
}
uint32_t i = d.size() - 1;
uint32_t j = d.at(0).size() - 1;
while ((i >= 0) && (j >= 0)) {
if ((i == 0) && (j == 0)) {
break;
}
if ((j > 0) && (d.at(i).at(j - 1) < d.at(i).at(j))) {
seq.push_back(1); // insert
seq.push_back(y.at(j - 1));
j--;
} else if ((i > 0) && (d.at(i - 1).at(j) < d.at(i).at(j))) {
seq.push_back(2); // delete
seq.push_back(x.at(i - 1));
i--;
} else {
seq.push_back(3); // keep
seq.push_back(x.at(i - 1));
i--;
j--;
}
}
uint32_t prev_op, op, s, word;
prev_op = 0, s = 0;
for (uint32_t k = 0; k < seq.size() / 2; k++) {
op = seq.at(seq.size() - 2 * k - 2);
word = seq.at(seq.size() - 2 * k - 1);
if (prev_op != 1) {
s++;
}
if (op == 1) // insert
{
edit_seqs.at(s - 1).push_back(word);
} else if (op == 2) // delete
{
edit_seqs.at(s - 1).push_back(deletion_symbol);
}
prev_op = op;
}
for (uint32_t k = 0; k < edit_seqs.size(); k++) {
if (edit_seqs.at(k).size() == 0) {
edit_seqs.at(k).push_back(terminal_symbol);
}
}
return edit_seqs;
}
vector<uint32_t> compute_ed2(
vector<vector<uint32_t>>& xs,
vector<vector<uint32_t>>& ys) {
vector<uint32_t> distances(xs.size());
for (uint32_t i = 0; i < xs.size(); i++) {
vector<vector<uint32_t>> d = edit_distance2_with_dp(xs.at(i), ys.at(i));
distances.at(i) = d.at(xs.at(i).size()).at(ys.at(i).size());
}
return distances;
}
vector<vector<vector<uint32_t>>> suggested_ed2_path(
vector<vector<uint32_t>>& xs,
vector<vector<uint32_t>>& ys,
uint32_t terminal_symbol) {
vector<vector<vector<uint32_t>>> seq(xs.size());
for (uint32_t i = 0; i < xs.size(); i++) {
vector<vector<uint32_t>> d = edit_distance2_with_dp(xs.at(i), ys.at(i));
seq.at(i) =
edit_distance2_backtracking(d, xs.at(i), ys.at(i), terminal_symbol);
}
return seq;
}
vector<vector<vector<uint32_t>>> suggested_ed2_path_with_delete(
vector<vector<uint32_t>>& xs,
vector<vector<uint32_t>>& ys,
uint32_t terminal_symbol,
uint32_t deletion_symbol) {
vector<vector<vector<uint32_t>>> seq(xs.size());
for (uint32_t i = 0; i < xs.size(); i++) {
vector<vector<uint32_t>> d = edit_distance2_with_dp(xs.at(i), ys.at(i));
seq.at(i) = edit_distance2_backtracking_with_delete(
d, xs.at(i), ys.at(i), terminal_symbol, deletion_symbol);
}
return seq;
}
PYBIND11_MODULE(libnat, m) {
m.def("compute_ed2", &compute_ed2, "compute_ed2");
m.def("suggested_ed2_path", &suggested_ed2_path, "suggested_ed2_path");
m.def(
"suggested_ed2_path_with_delete",
&suggested_ed2_path_with_delete,
"suggested_ed2_path_with_delete");
}
|
COCO-LM/fairseq/fairseq/clib/libnat/edit_dist.cpp/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/clib/libnat/edit_dist.cpp",
"repo_id": "COCO-LM",
"token_count": 2924
}
| 182 |
# @package _group_
quantize_targets: true
final_dim: 256
encoder_layerdrop: 0.05
dropout_input: 0.1
dropout_features: 0.1
feature_grad_mult: 0.1
|
COCO-LM/fairseq/fairseq/config/model/wav2vec2/wav2vec2_base.yaml/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/config/model/wav2vec2/wav2vec2_base.yaml",
"repo_id": "COCO-LM",
"token_count": 61
}
| 183 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion("sentence_ranking")
class SentenceRankingCriterion(FairseqCriterion):
def __init__(self, task, ranking_head_name, save_predictions, num_classes):
super().__init__(task)
self.ranking_head_name = ranking_head_name
if save_predictions is not None:
self.prediction_h = open(save_predictions, "w")
else:
self.prediction_h = None
self.num_classes = num_classes
def __del__(self):
if self.prediction_h is not None:
self.prediction_h.close()
@staticmethod
def add_args(parser):
# fmt: off
parser.add_argument('--save-predictions', metavar='FILE',
help='file to save predictions to')
parser.add_argument('--ranking-head-name',
default='sentence_classification_head',
help='name of the ranking head to use')
# fmt: on
def forward(self, model, sample, reduce=True):
"""Compute ranking loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
assert (
hasattr(model, "classification_heads")
and self.ranking_head_name in model.classification_heads
), "model must provide sentence ranking head for --criterion=sentence_ranking"
scores = []
for idx in range(self.num_classes):
score, _ = model(
**sample["net_input{idx}".format(idx=idx + 1)],
classification_head_name=self.ranking_head_name,
)
scores.append(score)
logits = torch.cat(scores, dim=1)
sample_size = logits.size(0)
if "target" in sample:
targets = model.get_targets(sample, [logits]).view(-1)
lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float32)
loss = F.nll_loss(lprobs, targets, reduction="sum")
else:
targets = None
loss = torch.tensor(0.0, requires_grad=True)
if self.prediction_h is not None:
preds = logits.argmax(dim=1)
for i, (id, pred) in enumerate(zip(sample["id"].tolist(), preds.tolist())):
if targets is not None:
label = targets[i].item()
print("{}\t{}\t{}".format(id, pred, label), file=self.prediction_h)
else:
print("{}\t{}".format(id, pred), file=self.prediction_h)
logging_output = {
"loss": loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample_size,
"sample_size": sample_size,
}
if targets is not None:
logging_output["ncorrect"] = (logits.argmax(dim=1) == targets).sum()
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
if len(logging_outputs) > 0 and "ncorrect" in logging_outputs[0]:
ncorrect = sum(log.get("ncorrect", 0) for log in logging_outputs)
metrics.log_scalar(
"accuracy", 100.0 * ncorrect / nsentences, nsentences, round=1
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
|
COCO-LM/fairseq/fairseq/criterions/sentence_ranking.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/criterions/sentence_ranking.py",
"repo_id": "COCO-LM",
"token_count": 2081
}
| 184 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch.nn.functional as F
from fairseq.data import BaseWrapperDataset
from fairseq.data.data_utils import get_buckets, get_bucketed_sizes
class BucketPadLengthDataset(BaseWrapperDataset):
"""
Bucket and pad item lengths to the nearest bucket size. This can be used to
reduce the number of unique batch shapes, which is important on TPUs since
each new batch shape requires a recompilation.
Args:
dataset (FairseqDatset): dataset to bucket
sizes (List[int]): all item sizes
num_buckets (int): number of buckets to create
pad_idx (int): padding symbol
left_pad (bool): if True, pad on the left; otherwise right pad
"""
def __init__(
self,
dataset,
sizes,
num_buckets,
pad_idx,
left_pad,
tensor_key=None,
):
super().__init__(dataset)
self.pad_idx = pad_idx
self.left_pad = left_pad
assert num_buckets > 0
self.buckets = get_buckets(sizes, num_buckets)
self._bucketed_sizes = get_bucketed_sizes(sizes, self.buckets)
self._tensor_key = tensor_key
def _set_tensor(self, item, val):
if self._tensor_key is None:
return val
item[self._tensor_key] = val
return item
def _get_tensor(self, item):
if self._tensor_key is None:
return item
return item[self._tensor_key]
def _pad(self, tensor, bucket_size, dim=-1):
num_pad = bucket_size - tensor.size(dim)
return F.pad(
tensor,
(num_pad if self.left_pad else 0, 0 if self.left_pad else num_pad),
value=self.pad_idx,
)
def __getitem__(self, index):
item = self.dataset[index]
bucket_size = self._bucketed_sizes[index]
tensor = self._get_tensor(item)
padded = self._pad(tensor, bucket_size)
return self._set_tensor(item, padded)
@property
def sizes(self):
return self._bucketed_sizes
def num_tokens(self, index):
return self._bucketed_sizes[index]
def size(self, index):
return self._bucketed_sizes[index]
|
COCO-LM/fairseq/fairseq/data/bucket_pad_length_dataset.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/bucket_pad_length_dataset.py",
"repo_id": "COCO-LM",
"token_count": 1024
}
| 185 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import numpy as np
import torch
from fairseq.data import FairseqDataset
class BlockPairDataset(FairseqDataset):
"""Break a Dataset of tokens into sentence pair blocks for next sentence
prediction as well as masked language model.
High-level logics are:
1. break input tensor to tensor blocks
2. pair the blocks with 50% next sentence and 50% random sentence
3. return paired blocks as well as related segment labels
Args:
dataset (~torch.utils.data.Dataset): dataset to break into blocks
sizes: array of sentence lengths
dictionary: dictionary for the task
block_size: maximum block size
break_mode: mode for breaking copurs into block pairs. currently we support
2 modes
doc: respect document boundaries and each part of the pair should belong to on document
none: don't respect any boundary and cut tokens evenly
short_seq_prob: probability for generating shorter block pairs
doc_break_size: Size for empty line separating documents. Typically 1 if
the sentences have eos, 0 otherwise.
"""
def __init__(
self,
dataset,
dictionary,
sizes,
block_size,
break_mode="doc",
short_seq_prob=0.1,
doc_break_size=1,
):
super().__init__()
self.dataset = dataset
self.pad = dictionary.pad()
self.eos = dictionary.eos()
self.cls = dictionary.cls()
self.mask = dictionary.mask()
self.sep = dictionary.sep()
self.break_mode = break_mode
self.dictionary = dictionary
self.short_seq_prob = short_seq_prob
self.block_indices = []
assert len(dataset) == len(sizes)
if break_mode == "doc":
cur_doc = []
for sent_id, sz in enumerate(sizes):
assert doc_break_size == 0 or sz != 0, (
"when doc_break_size is non-zero, we expect documents to be"
"separated by a blank line with a single eos."
)
# empty line as document separator
if sz == doc_break_size:
if len(cur_doc) == 0:
continue
self.block_indices.append(cur_doc)
cur_doc = []
else:
cur_doc.append(sent_id)
max_num_tokens = block_size - 3 # Account for [CLS], [SEP], [SEP]
self.sent_pairs = []
self.sizes = []
for doc_id, doc in enumerate(self.block_indices):
self._generate_sentence_pair(doc, doc_id, max_num_tokens, sizes)
elif break_mode is None or break_mode == "none":
# each block should have half of the block size since we are constructing block pair
sent_length = (block_size - 3) // 2
total_len = sum(dataset.sizes)
length = math.ceil(total_len / sent_length)
def block_at(i):
start = i * sent_length
end = min(start + sent_length, total_len)
return (start, end)
sent_indices = np.array([block_at(i) for i in range(length)])
sent_sizes = np.array([e - s for s, e in sent_indices])
dataset_index = self._sent_to_dataset_index(sent_sizes)
# pair sentences
self._pair_sentences(dataset_index)
else:
raise ValueError("Invalid break_mode: " + break_mode)
def _pair_sentences(self, dataset_index):
"""
Give a list of evenly cut blocks/sentences, pair these sentences with 50%
consecutive sentences and 50% random sentences.
This is used for none break mode
"""
# pair sentences
for sent_id, sent in enumerate(dataset_index):
next_sent_label = (
1 if np.random.rand() > 0.5 and sent_id != len(dataset_index) - 1 else 0
)
if next_sent_label:
next_sent = dataset_index[sent_id + 1]
else:
next_sent = dataset_index[
self._skip_sampling(len(dataset_index), [sent_id, sent_id + 1])
]
self.sent_pairs.append((sent, next_sent, next_sent_label))
# The current blocks don't include the special tokens but the
# sizes already account for this
self.sizes.append(3 + sent[3] + next_sent[3])
def _sent_to_dataset_index(self, sent_sizes):
"""
Build index mapping block indices to the underlying dataset indices
"""
dataset_index = []
ds_idx, ds_remaining = -1, 0
for to_consume in sent_sizes:
sent_size = to_consume
if ds_remaining == 0:
ds_idx += 1
ds_remaining = sent_sizes[ds_idx]
start_ds_idx = ds_idx
start_offset = sent_sizes[ds_idx] - ds_remaining
while to_consume > ds_remaining:
to_consume -= ds_remaining
ds_idx += 1
ds_remaining = sent_sizes[ds_idx]
ds_remaining -= to_consume
dataset_index.append(
(
start_ds_idx, # starting index in dataset
start_offset, # starting offset within starting index
ds_idx, # ending index in dataset
sent_size, # sentence length
)
)
assert ds_remaining == 0
assert ds_idx == len(self.dataset) - 1
return dataset_index
def _generate_sentence_pair(self, doc, doc_id, max_num_tokens, sizes):
"""
Go through a single document and genrate sentence paris from it
"""
current_chunk = []
current_length = 0
curr = 0
# To provide more randomness, we decrease target seq length for parts of
# samples (10% by default). Note that max_num_tokens is the hard threshold
# for batching and will never be changed.
target_seq_length = max_num_tokens
if np.random.random() < self.short_seq_prob:
target_seq_length = np.random.randint(2, max_num_tokens)
# loop through all sentences in document
while curr < len(doc):
sent_id = doc[curr]
current_chunk.append(sent_id)
current_length = sum(sizes[current_chunk])
# split chunk and generate pair when exceed target_seq_length or
# finish the loop
if curr == len(doc) - 1 or current_length >= target_seq_length:
# split the chunk into 2 parts
a_end = 1
if len(current_chunk) > 2:
a_end = np.random.randint(1, len(current_chunk) - 1)
sent_a = current_chunk[:a_end]
len_a = sum(sizes[sent_a])
# generate next sentence label, note that if there is only 1 sentence
# in current chunk, label is always 0
next_sent_label = (
1 if np.random.rand() > 0.5 and len(current_chunk) != 1 else 0
)
if not next_sent_label:
# if next sentence label is 0, sample sent_b from a random doc
target_b_length = target_seq_length - len_a
rand_doc_id = self._skip_sampling(len(self.block_indices), [doc_id])
random_doc = self.block_indices[rand_doc_id]
random_start = np.random.randint(0, len(random_doc))
sent_b = []
len_b = 0
for j in range(random_start, len(random_doc)):
sent_b.append(random_doc[j])
len_b = sum(sizes[sent_b])
if len_b >= target_b_length:
break
# return the second part of the chunk since it's not used
num_unused_segments = len(current_chunk) - a_end
curr -= num_unused_segments
else:
# if next sentence label is 1, use the second part of chunk as sent_B
sent_b = current_chunk[a_end:]
len_b = sum(sizes[sent_b])
# currently sent_a and sent_B may be longer than max_num_tokens,
# truncate them and return block idx and offsets for them
sent_a, sent_b = self._truncate_sentences(
sent_a, sent_b, max_num_tokens
)
self.sent_pairs.append((sent_a, sent_b, next_sent_label))
self.sizes.append(3 + sent_a[3] + sent_b[3])
current_chunk = []
curr += 1
def _skip_sampling(self, total, skip_ids):
"""
Generate a random integer which is not in skip_ids. Sample range is [0, total)
TODO: ids in skip_ids should be consecutive, we can extend it to more generic version later
"""
rand_id = np.random.randint(total - len(skip_ids))
return rand_id if rand_id < min(skip_ids) else rand_id + len(skip_ids)
def _truncate_sentences(self, sent_a, sent_b, max_num_tokens):
"""
Trancate a pair of sentence to limit total length under max_num_tokens
Logics:
1. Truncate longer sentence
2. Tokens to be truncated could be at the beginning or the end of the sentnce
Returns:
Truncated sentences represented by dataset idx
"""
len_a, len_b = sum(self.dataset.sizes[sent_a]), sum(self.dataset.sizes[sent_b])
front_cut_a = front_cut_b = end_cut_a = end_cut_b = 0
while True:
total_length = (
len_a + len_b - front_cut_a - front_cut_b - end_cut_a - end_cut_b
)
if total_length <= max_num_tokens:
break
if len_a - front_cut_a - end_cut_a > len_b - front_cut_b - end_cut_b:
if np.random.rand() < 0.5:
front_cut_a += 1
else:
end_cut_a += 1
else:
if np.random.rand() < 0.5:
front_cut_b += 1
else:
end_cut_b += 1
# calculate ds indices as well as offsets and return
truncated_sent_a = self._cut_sentence(sent_a, front_cut_a, end_cut_a)
truncated_sent_b = self._cut_sentence(sent_b, front_cut_b, end_cut_b)
return truncated_sent_a, truncated_sent_b
def _cut_sentence(self, sent, front_cut, end_cut):
"""
Cut a sentence based on the numbers of tokens to be cut from beginning and end
Represent the sentence as dataset idx and return
"""
start_ds_idx, end_ds_idx, offset = sent[0], sent[-1], 0
target_len = sum(self.dataset.sizes[sent]) - front_cut - end_cut
while front_cut > 0:
if self.dataset.sizes[start_ds_idx] > front_cut:
offset += front_cut
break
else:
front_cut -= self.dataset.sizes[start_ds_idx]
start_ds_idx += 1
while end_cut > 0:
if self.dataset.sizes[end_ds_idx] > end_cut:
break
else:
end_cut -= self.dataset.sizes[end_ds_idx]
end_ds_idx -= 1
return start_ds_idx, offset, end_ds_idx, target_len
def _fetch_block(self, start_ds_idx, offset, end_ds_idx, length):
"""
Fetch a block of tokens based on its dataset idx
"""
buffer = torch.cat(
[self.dataset[idx] for idx in range(start_ds_idx, end_ds_idx + 1)]
)
s, e = offset, offset + length
return buffer[s:e]
def __getitem__(self, index):
block1, block2, next_sent_label = self.sent_pairs[index]
block1 = self._fetch_block(*block1)
block2 = self._fetch_block(*block2)
return block1, block2, next_sent_label
def __len__(self):
return len(self.sizes)
@property
def supports_prefetch(self):
return getattr(self.dataset, "supports_prefetch", False)
def prefetch(self, indices):
prefetch_idx = set()
for index in indices:
for block1, block2, _ in [self.sent_pairs[index]]:
for ds_idx in range(block1[0], block1[2] + 1):
prefetch_idx.add(ds_idx)
for ds_idx in range(block2[0], block2[2] + 1):
prefetch_idx.add(ds_idx)
self.dataset.prefetch(prefetch_idx)
|
COCO-LM/fairseq/fairseq/data/legacy/block_pair_dataset.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/legacy/block_pair_dataset.py",
"repo_id": "COCO-LM",
"token_count": 6450
}
| 186 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
import torch
from torch.utils.data.dataloader import default_collate
from . import FairseqDataset
def _flatten(dico, prefix=None):
"""Flatten a nested dictionary."""
new_dico = OrderedDict()
if isinstance(dico, dict):
prefix = prefix + "." if prefix is not None else ""
for k, v in dico.items():
if v is None:
continue
new_dico.update(_flatten(v, prefix + k))
elif isinstance(dico, list):
for i, v in enumerate(dico):
new_dico.update(_flatten(v, prefix + ".[" + str(i) + "]"))
else:
new_dico = OrderedDict({prefix: dico})
return new_dico
def _unflatten(dico):
"""Unflatten a flattened dictionary into a nested dictionary."""
new_dico = OrderedDict()
for full_k, v in dico.items():
full_k = full_k.split(".")
node = new_dico
for k in full_k[:-1]:
if k.startswith("[") and k.endswith("]"):
k = int(k[1:-1])
if k not in node:
node[k] = OrderedDict()
node = node[k]
node[full_k[-1]] = v
return new_dico
class NestedDictionaryDataset(FairseqDataset):
def __init__(self, defn, sizes=None):
super().__init__()
self.defn = _flatten(defn)
self.sizes = [sizes] if not isinstance(sizes, (list, tuple)) else sizes
first = None
for v in self.defn.values():
if not isinstance(
v,
(
FairseqDataset,
torch.utils.data.Dataset,
),
):
raise ValueError("Expected Dataset but found: {}".format(v.__class__))
first = first or v
if len(v) > 0:
assert len(v) == len(first), "dataset lengths must match"
self._len = len(first)
def __getitem__(self, index):
return OrderedDict((k, ds[index]) for k, ds in self.defn.items())
def __len__(self):
return self._len
def collater(self, samples):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
Returns:
dict: a mini-batch suitable for forwarding with a Model
"""
if len(samples) == 0:
return {}
sample = OrderedDict()
for k, ds in self.defn.items():
try:
sample[k] = ds.collater([s[k] for s in samples])
except NotImplementedError:
sample[k] = default_collate([s[k] for s in samples])
return _unflatten(sample)
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
return max(s[index] for s in self.sizes)
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
if len(self.sizes) == 1:
return self.sizes[0][index]
else:
return (s[index] for s in self.sizes)
@property
def supports_prefetch(self):
"""Whether this dataset supports prefetching."""
return any(ds.supports_prefetch for ds in self.defn.values())
def prefetch(self, indices):
"""Prefetch the data required for this epoch."""
for ds in self.defn.values():
if getattr(ds, "supports_prefetch", False):
ds.prefetch(indices)
@property
def can_reuse_epoch_itr_across_epochs(self):
return all(ds.can_reuse_epoch_itr_across_epochs for ds in self.defn.values())
def set_epoch(self, epoch):
super().set_epoch(epoch)
for ds in self.defn.values():
ds.set_epoch(epoch)
|
COCO-LM/fairseq/fairseq/data/nested_dictionary_dataset.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/nested_dictionary_dataset.py",
"repo_id": "COCO-LM",
"token_count": 1898
}
| 187 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from functools import lru_cache
import numpy as np
import torch
from fairseq.data import Dictionary, data_utils
from . import BaseWrapperDataset, LRUCacheDataset
class SpanDataset(BaseWrapperDataset):
"""
A wrapper Dataset for sampling contigous span
"""
def __init__(
self,
dataset: torch.utils.data.Dataset,
seed: int = 1,
span: float = 0,
):
self.dataset = LRUCacheDataset(dataset)
self.span = span
self.epoch = 0
self.seed = seed
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
def __getitem__(self, index: int):
return self.__getitem_cached__(self.seed, self.epoch, index)
@lru_cache(maxsize=16)
def __getitem_cached__(self, seed: int, epoch: int, index: int):
with data_utils.numpy_seed(self.seed, self.epoch, index):
item = self.dataset[index]
sz = len(item)
if self.span > 1:
span_length = min(int(self.span), sz)
else:
span_length = int(self.span * sz)
start_idx = np.random.randint(0, sz - span_length + 1)
new_item = item.clone()
# it seems [CLS] could be removed in span, is that expected?
return new_item[start_idx: start_idx + span_length]
|
COCO-LM/fairseq/fairseq/data/span_dataset.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/span_dataset.py",
"repo_id": "COCO-LM",
"token_count": 687
}
| 188 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import ast
import inspect
import logging
import os
import re
from argparse import ArgumentError, ArgumentParser, Namespace
from dataclasses import _MISSING_TYPE, MISSING
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple, Type
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.configs import FairseqConfig
from hydra.core.global_hydra import GlobalHydra
from hydra.experimental import compose, initialize
from omegaconf import DictConfig, OmegaConf, open_dict
logger = logging.getLogger(__name__)
def eval_str_list(x, x_type=float):
if x is None:
return None
if isinstance(x, str):
if len(x) == 0:
return []
x = ast.literal_eval(x)
try:
return list(map(x_type, x))
except TypeError:
return [x_type(x)]
def interpret_dc_type(field_type):
if isinstance(field_type, str):
raise RuntimeError("field should be a type")
if field_type == Any:
return str
typestring = str(field_type)
if re.match(
r"(typing.|^)Union\[(.*), NoneType\]$", typestring
) or typestring.startswith("typing.Optional"):
return field_type.__args__[0]
return field_type
def gen_parser_from_dataclass(
parser: ArgumentParser,
dataclass_instance: FairseqDataclass,
delete_default: bool = False,
) -> None:
"""convert a dataclass instance to tailing parser arguments"""
def argparse_name(name: str):
if name == "data":
# normally data is positional args
return name
if name == "_name":
# private member, skip
return None
return "--" + name.replace("_", "-")
def get_kwargs_from_dc(
dataclass_instance: FairseqDataclass, k: str
) -> Dict[str, Any]:
"""k: dataclass attributes"""
kwargs = {}
field_type = dataclass_instance._get_type(k)
inter_type = interpret_dc_type(field_type)
field_default = dataclass_instance._get_default(k)
if isinstance(inter_type, type) and issubclass(inter_type, Enum):
field_choices = [t.value for t in list(inter_type)]
else:
field_choices = None
field_help = dataclass_instance._get_help(k)
field_const = dataclass_instance._get_argparse_const(k)
if isinstance(field_default, str) and field_default.startswith("${"):
kwargs["default"] = field_default
else:
if field_default is MISSING:
kwargs["required"] = True
if field_choices is not None:
kwargs["choices"] = field_choices
if (
isinstance(inter_type, type)
and (issubclass(inter_type, List) or issubclass(inter_type, Tuple))
) or ("List" in str(inter_type) or "Tuple" in str(inter_type)):
if "int" in str(inter_type):
kwargs["type"] = lambda x: eval_str_list(x, int)
elif "float" in str(inter_type):
kwargs["type"] = lambda x: eval_str_list(x, float)
elif "str" in str(inter_type):
kwargs["type"] = lambda x: eval_str_list(x, str)
else:
raise NotImplementedError(
"parsing of type " + str(inter_type) + " is not implemented"
)
if field_default is not MISSING:
kwargs["default"] = (
",".join(map(str, field_default))
if field_default is not None
else None
)
elif (
isinstance(inter_type, type) and issubclass(inter_type, Enum)
) or "Enum" in str(inter_type):
kwargs["type"] = str
if field_default is not MISSING:
if isinstance(field_default, Enum):
kwargs["default"] = field_default.value
else:
kwargs["default"] = field_default
elif inter_type is bool:
kwargs["action"] = (
"store_false" if field_default is True else "store_true"
)
kwargs["default"] = field_default
else:
kwargs["type"] = inter_type
if field_default is not MISSING:
kwargs["default"] = field_default
kwargs["help"] = field_help
if field_const is not None:
kwargs["const"] = field_const
kwargs["nargs"] = "?"
return kwargs
for k in dataclass_instance._get_all_attributes():
field_name = argparse_name(dataclass_instance._get_name(k))
field_type = dataclass_instance._get_type(k)
if field_name is None:
continue
elif inspect.isclass(field_type) and issubclass(field_type, FairseqDataclass):
gen_parser_from_dataclass(parser, field_type(), delete_default)
continue
kwargs = get_kwargs_from_dc(dataclass_instance, k)
field_args = [field_name]
alias = dataclass_instance._get_argparse_alias(k)
if alias is not None:
field_args.append(alias)
if "default" in kwargs:
if isinstance(kwargs["default"], str) and kwargs["default"].startswith(
"${"
):
if kwargs["help"] is None:
# this is a field with a name that will be added elsewhere
continue
else:
del kwargs["default"]
if delete_default and "default" in kwargs:
del kwargs["default"]
try:
parser.add_argument(*field_args, **kwargs)
except ArgumentError:
pass
def _set_legacy_defaults(args, cls):
"""Helper to set default arguments based on *add_args*."""
if not hasattr(cls, "add_args"):
return
import argparse
parser = argparse.ArgumentParser(
argument_default=argparse.SUPPRESS, allow_abbrev=False
)
cls.add_args(parser)
# copied from argparse.py:
defaults = argparse.Namespace()
for action in parser._actions:
if action.dest is not argparse.SUPPRESS:
if not hasattr(defaults, action.dest):
if action.default is not argparse.SUPPRESS:
setattr(defaults, action.dest, action.default)
for key, default_value in vars(defaults).items():
if not hasattr(args, key):
setattr(args, key, default_value)
def _override_attr(
sub_node: str, data_class: Type[FairseqDataclass], args: Namespace
) -> List[str]:
overrides = []
if not inspect.isclass(data_class) or not issubclass(data_class, FairseqDataclass):
return overrides
def get_default(f):
if not isinstance(f.default_factory, _MISSING_TYPE):
return f.default_factory()
return f.default
for k, v in data_class.__dataclass_fields__.items():
if k.startswith("_"):
# private member, skip
continue
val = get_default(v) if not hasattr(args, k) else getattr(args, k)
field_type = interpret_dc_type(v.type)
if (
isinstance(val, str)
and not val.startswith("${") # not interpolation
and field_type != str
and (
not inspect.isclass(field_type) or not issubclass(field_type, Enum)
) # not choices enum
):
# upgrade old models that stored complex parameters as string
val = ast.literal_eval(val)
if isinstance(val, tuple):
val = list(val)
v_type = getattr(v.type, "__origin__", None)
if (
(v_type is List or v_type is list or v_type is Optional)
# skip interpolation
and not (isinstance(val, str) and val.startswith("${"))
):
# if type is int but val is float, then we will crash later - try to convert here
if hasattr(v.type, "__args__"):
t_args = v.type.__args__
if len(t_args) == 1 and (t_args[0] is float or t_args[0] is int):
val = list(map(t_args[0], val))
elif val is not None and (
field_type is int or field_type is bool or field_type is float
):
try:
val = field_type(val)
except:
pass # ignore errors here, they are often from interpolation args
if val is None:
overrides.append("{}.{}=null".format(sub_node, k))
elif val == "":
overrides.append("{}.{}=''".format(sub_node, k))
elif isinstance(val, str):
val = val.replace("'", r"\'")
overrides.append("{}.{}='{}'".format(sub_node, k, val))
elif isinstance(val, FairseqDataclass):
overrides += _override_attr(f"{sub_node}.{k}", type(val), args)
elif isinstance(val, Namespace):
sub_overrides, _ = override_module_args(val)
for so in sub_overrides:
overrides.append(f"{sub_node}.{k}.{so}")
else:
overrides.append("{}.{}={}".format(sub_node, k, val))
return overrides
def migrate_registry(
name, value, registry, args, overrides, deletes, use_name_as_val=False
):
if value in registry:
overrides.append("{}={}".format(name, value))
overrides.append("{}._name={}".format(name, value))
overrides.extend(_override_attr(name, registry[value], args))
elif use_name_as_val and value is not None:
overrides.append("{}={}".format(name, value))
else:
deletes.append(name)
def override_module_args(args: Namespace) -> Tuple[List[str], List[str]]:
"""use the field in args to overrides those in cfg"""
overrides = []
deletes = []
for k in FairseqConfig.__dataclass_fields__.keys():
overrides.extend(
_override_attr(k, FairseqConfig.__dataclass_fields__[k].type, args)
)
if args is not None:
if hasattr(args, "task"):
from fairseq.tasks import TASK_DATACLASS_REGISTRY
migrate_registry(
"task", args.task, TASK_DATACLASS_REGISTRY, args, overrides, deletes
)
else:
deletes.append("task")
# these options will be set to "None" if they have not yet been migrated
# so we can populate them with the entire flat args
CORE_REGISTRIES = {"criterion", "optimizer", "lr_scheduler"}
from fairseq.registry import REGISTRIES
for k, v in REGISTRIES.items():
if hasattr(args, k):
migrate_registry(
k,
getattr(args, k),
v["dataclass_registry"],
args,
overrides,
deletes,
use_name_as_val=k not in CORE_REGISTRIES,
)
else:
deletes.append(k)
no_dc = True
if hasattr(args, "arch"):
from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_MODEL_NAME_REGISTRY
if args.arch in ARCH_MODEL_REGISTRY:
m_cls = ARCH_MODEL_REGISTRY[args.arch]
dc = getattr(m_cls, "__dataclass", None)
if dc is not None:
m_name = ARCH_MODEL_NAME_REGISTRY[args.arch]
overrides.append("model={}".format(m_name))
overrides.append("model._name={}".format(args.arch))
# override model params with those exist in args
overrides.extend(_override_attr("model", dc, args))
no_dc = False
if no_dc:
deletes.append("model")
return overrides, deletes
def convert_namespace_to_omegaconf(args: Namespace) -> DictConfig:
"""Convert a flat argparse.Namespace to a structured DictConfig."""
# Here we are using field values provided in args to override counterparts inside config object
overrides, deletes = override_module_args(args)
# configs will be in fairseq/config after installation
config_path = os.path.join("..", "config")
GlobalHydra.instance().clear()
with initialize(config_path=config_path):
try:
composed_cfg = compose("config", overrides=overrides, strict=False)
except:
logger.error("Error when composing. Overrides: " + str(overrides))
raise
for k in deletes:
composed_cfg[k] = None
cfg = OmegaConf.create(
OmegaConf.to_container(composed_cfg, resolve=True, enum_to_str=True)
)
# hack to be able to set Namespace in dict config. this should be removed when we update to newer
# omegaconf version that supports object flags, or when we migrate all existing models
from omegaconf import _utils
old_primitive = _utils.is_primitive_type
_utils.is_primitive_type = lambda _: True
if cfg.task is None and getattr(args, "task", None):
cfg.task = Namespace(**vars(args))
from fairseq.tasks import TASK_REGISTRY
_set_legacy_defaults(cfg.task, TASK_REGISTRY[args.task])
cfg.task._name = args.task
if cfg.model is None and getattr(args, "arch", None):
cfg.model = Namespace(**vars(args))
from fairseq.models import ARCH_MODEL_REGISTRY
_set_legacy_defaults(cfg.model, ARCH_MODEL_REGISTRY[args.arch])
cfg.model._name = args.arch
if cfg.optimizer is None and getattr(args, "optimizer", None):
cfg.optimizer = Namespace(**vars(args))
from fairseq.optim import OPTIMIZER_REGISTRY
_set_legacy_defaults(cfg.optimizer, OPTIMIZER_REGISTRY[args.optimizer])
cfg.optimizer._name = args.optimizer
if cfg.lr_scheduler is None and getattr(args, "lr_scheduler", None):
cfg.lr_scheduler = Namespace(**vars(args))
from fairseq.optim.lr_scheduler import LR_SCHEDULER_REGISTRY
_set_legacy_defaults(cfg.lr_scheduler, LR_SCHEDULER_REGISTRY[args.lr_scheduler])
cfg.lr_scheduler._name = args.lr_scheduler
if cfg.criterion is None and getattr(args, "criterion", None):
cfg.criterion = Namespace(**vars(args))
from fairseq.criterions import CRITERION_REGISTRY
_set_legacy_defaults(cfg.criterion, CRITERION_REGISTRY[args.criterion])
cfg.criterion._name = args.criterion
_utils.is_primitive_type = old_primitive
OmegaConf.set_struct(cfg, True)
return cfg
def populate_dataclass(
dataclass: FairseqDataclass,
args: Namespace,
) -> FairseqDataclass:
for k in dataclass.__dataclass_fields__.keys():
if k.startswith("_"):
# private member, skip
continue
if hasattr(args, k):
setattr(dataclass, k, getattr(args, k))
return dataclass
def overwrite_args_by_name(cfg: DictConfig, overrides: Dict[str, any]):
# this will be deprecated when we get rid of argparse and model_overrides logic
from fairseq.registry import REGISTRIES
with open_dict(cfg):
for k in cfg.keys():
# "k in cfg" will return false if its a "mandatory value (e.g. ???)"
if k in cfg and isinstance(cfg[k], DictConfig):
if k in overrides and isinstance(overrides[k], dict):
for ok, ov in overrides[k].items():
if isinstance(ov, dict) and cfg[k][ok] is not None:
overwrite_args_by_name(cfg[k][ok], ov)
else:
cfg[k][ok] = ov
else:
overwrite_args_by_name(cfg[k], overrides)
elif k in cfg and isinstance(cfg[k], Namespace):
for override_key, val in overrides.items():
setattr(cfg[k], override_key, val)
elif k in overrides:
if (
k in REGISTRIES
and overrides[k] in REGISTRIES[k]["dataclass_registry"]
):
cfg[k] = DictConfig(
REGISTRIES[k]["dataclass_registry"][overrides[k]]
)
overwrite_args_by_name(cfg[k], overrides)
cfg[k]._name = overrides[k]
else:
cfg[k] = overrides[k]
def merge_with_parent(dc: FairseqDataclass, cfg: FairseqDataclass):
merged_cfg = OmegaConf.merge(dc, cfg)
merged_cfg.__dict__["_parent"] = cfg.__dict__["_parent"]
OmegaConf.set_struct(merged_cfg, True)
return merged_cfg
|
COCO-LM/fairseq/fairseq/dataclass/utils.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/dataclass/utils.py",
"repo_id": "COCO-LM",
"token_count": 8103
}
| 189 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Wrapper around various loggers and progress bars (e.g., tqdm).
"""
import atexit
import json
import logging
import os
import sys
from collections import OrderedDict
from contextlib import contextmanager
from numbers import Number
from typing import Optional
import torch
from .meters import AverageMeter, StopwatchMeter, TimeMeter
logger = logging.getLogger(__name__)
def progress_bar(
iterator,
log_format: Optional[str] = None,
log_interval: int = 100,
epoch: Optional[int] = None,
prefix: Optional[str] = None,
tensorboard_logdir: Optional[str] = None,
default_log_format: str = "tqdm",
wandb_project: Optional[str] = None,
wandb_run_name: Optional[str] = None,
azureml_logging: Optional[bool] = False,
):
if log_format is None:
log_format = default_log_format
if log_format == "tqdm" and not sys.stderr.isatty():
log_format = "simple"
if log_format == "json":
bar = JsonProgressBar(iterator, epoch, prefix, log_interval)
elif log_format == "none":
bar = NoopProgressBar(iterator, epoch, prefix)
elif log_format == "simple":
bar = SimpleProgressBar(iterator, epoch, prefix, log_interval)
elif log_format == "tqdm":
bar = TqdmProgressBar(iterator, epoch, prefix)
else:
raise ValueError("Unknown log format: {}".format(log_format))
if tensorboard_logdir:
try:
# [FB only] custom wrapper for TensorBoard
import palaas # noqa
from .fb_tbmf_wrapper import FbTbmfWrapper
bar = FbTbmfWrapper(bar, log_interval)
except ImportError:
bar = TensorboardProgressBarWrapper(bar, tensorboard_logdir)
if wandb_project:
bar = WandBProgressBarWrapper(bar, wandb_project, run_name=wandb_run_name)
if azureml_logging:
bar = AzureMLProgressBarWrapper(bar)
return bar
def build_progress_bar(
args,
iterator,
epoch: Optional[int] = None,
prefix: Optional[str] = None,
default: str = "tqdm",
no_progress_bar: str = "none",
):
"""Legacy wrapper that takes an argparse.Namespace."""
if getattr(args, "no_progress_bar", False):
default = no_progress_bar
if getattr(args, "distributed_rank", 0) == 0:
tensorboard_logdir = getattr(args, "tensorboard_logdir", None)
else:
tensorboard_logdir = None
return progress_bar(
iterator,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch,
prefix=prefix,
tensorboard_logdir=tensorboard_logdir,
default_log_format=default,
)
def format_stat(stat):
if isinstance(stat, Number):
stat = "{:g}".format(stat)
elif isinstance(stat, AverageMeter):
stat = "{:.3f}".format(stat.avg)
elif isinstance(stat, TimeMeter):
stat = "{:g}".format(round(stat.avg))
elif isinstance(stat, StopwatchMeter):
stat = "{:g}".format(round(stat.sum))
elif torch.is_tensor(stat):
stat = stat.tolist()
return stat
class BaseProgressBar(object):
"""Abstract class for progress bars."""
def __init__(self, iterable, epoch=None, prefix=None):
self.iterable = iterable
self.n = getattr(iterable, "n", 0)
self.epoch = epoch
self.prefix = ""
if epoch is not None:
self.prefix += "epoch {:03d}".format(epoch)
if prefix is not None:
self.prefix += (" | " if self.prefix != "" else "") + prefix
def __len__(self):
return len(self.iterable)
def __enter__(self):
return self
def __exit__(self, *exc):
return False
def __iter__(self):
raise NotImplementedError
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
raise NotImplementedError
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
raise NotImplementedError
def update_config(self, config):
"""Log latest configuration."""
pass
def _str_commas(self, stats):
return ", ".join(key + "=" + stats[key].strip() for key in stats.keys())
def _str_pipes(self, stats):
return " | ".join(key + " " + stats[key].strip() for key in stats.keys())
def _format_stats(self, stats):
postfix = OrderedDict(stats)
# Preprocess stats according to datatype
for key in postfix.keys():
postfix[key] = str(format_stat(postfix[key]))
return postfix
@contextmanager
def rename_logger(logger, new_name):
old_name = logger.name
if new_name is not None:
logger.name = new_name
yield logger
logger.name = old_name
class JsonProgressBar(BaseProgressBar):
"""Log output in JSON format."""
def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):
super().__init__(iterable, epoch, prefix)
self.log_interval = log_interval
self.i = None
self.size = None
def __iter__(self):
self.size = len(self.iterable)
for i, obj in enumerate(self.iterable, start=self.n):
self.i = i
yield obj
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
step = step or self.i or 0
if step > 0 and self.log_interval is not None and step % self.log_interval == 0:
update = (
self.epoch - 1 + (self.i + 1) / float(self.size)
if self.epoch is not None
else None
)
stats = self._format_stats(stats, epoch=self.epoch, update=update)
with rename_logger(logger, tag):
logger.info(json.dumps(stats))
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
self.stats = stats
if tag is not None:
self.stats = OrderedDict(
[(tag + "_" + k, v) for k, v in self.stats.items()]
)
stats = self._format_stats(self.stats, epoch=self.epoch)
with rename_logger(logger, tag):
logger.info(json.dumps(stats))
def _format_stats(self, stats, epoch=None, update=None):
postfix = OrderedDict()
if epoch is not None:
postfix["epoch"] = epoch
if update is not None:
postfix["update"] = round(update, 3)
# Preprocess stats according to datatype
for key in stats.keys():
postfix[key] = format_stat(stats[key])
return postfix
class NoopProgressBar(BaseProgressBar):
"""No logging."""
def __init__(self, iterable, epoch=None, prefix=None):
super().__init__(iterable, epoch, prefix)
def __iter__(self):
for obj in self.iterable:
yield obj
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
pass
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
pass
class SimpleProgressBar(BaseProgressBar):
"""A minimal logger for non-TTY environments."""
def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):
super().__init__(iterable, epoch, prefix)
self.log_interval = log_interval
self.i = None
self.size = None
def __iter__(self):
self.size = len(self.iterable)
for i, obj in enumerate(self.iterable, start=self.n):
self.i = i
yield obj
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
step = step or self.i or 0
if step > 0 and self.log_interval is not None and step % self.log_interval == 0:
stats = self._format_stats(stats)
postfix = self._str_commas(stats)
with rename_logger(logger, tag):
logger.info(
"{}: {:5d} / {:d} {}".format(
self.prefix, self.i + 1, self.size, postfix
)
)
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
postfix = self._str_pipes(self._format_stats(stats))
with rename_logger(logger, tag):
logger.info("{} | {}".format(self.prefix, postfix))
class TqdmProgressBar(BaseProgressBar):
"""Log to tqdm."""
def __init__(self, iterable, epoch=None, prefix=None):
super().__init__(iterable, epoch, prefix)
from tqdm import tqdm
self.tqdm = tqdm(
iterable,
self.prefix,
leave=False,
disable=(logger.getEffectiveLevel() > logging.INFO),
)
def __iter__(self):
return iter(self.tqdm)
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
self.tqdm.set_postfix(self._format_stats(stats), refresh=False)
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
postfix = self._str_pipes(self._format_stats(stats))
with rename_logger(logger, tag):
logger.info("{} | {}".format(self.prefix, postfix))
try:
_tensorboard_writers = {}
from torch.utils.tensorboard import SummaryWriter
except ImportError:
try:
from tensorboardX import SummaryWriter
except ImportError:
SummaryWriter = None
def _close_writers():
for w in _tensorboard_writers.values():
w.close()
atexit.register(_close_writers)
class TensorboardProgressBarWrapper(BaseProgressBar):
"""Log to tensorboard."""
def __init__(self, wrapped_bar, tensorboard_logdir):
self.wrapped_bar = wrapped_bar
self.tensorboard_logdir = tensorboard_logdir
if SummaryWriter is None:
logger.warning(
"tensorboard not found, please install with: pip install tensorboard"
)
def _writer(self, key):
if SummaryWriter is None:
return None
_writers = _tensorboard_writers
if key not in _writers:
_writers[key] = SummaryWriter(os.path.join(self.tensorboard_logdir, key))
_writers[key].add_text("sys.argv", " ".join(sys.argv))
return _writers[key]
def __iter__(self):
return iter(self.wrapped_bar)
def log(self, stats, tag=None, step=None):
"""Log intermediate stats to tensorboard."""
self._log_to_tensorboard(stats, tag, step)
self.wrapped_bar.log(stats, tag=tag, step=step)
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
self._log_to_tensorboard(stats, tag, step)
self.wrapped_bar.print(stats, tag=tag, step=step)
def update_config(self, config):
"""Log latest configuration."""
# TODO add hparams to Tensorboard
self.wrapped_bar.update_config(config)
def _log_to_tensorboard(self, stats, tag=None, step=None):
writer = self._writer(tag or "")
if writer is None:
return
if step is None:
step = stats["num_updates"]
for key in stats.keys() - {"num_updates"}:
if isinstance(stats[key], AverageMeter):
writer.add_scalar(key, stats[key].val, step)
elif isinstance(stats[key], Number):
writer.add_scalar(key, stats[key], step)
elif torch.is_tensor(stats[key]) and stats[key].numel() == 1:
writer.add_scalar(key, stats[key].item(), step)
writer.flush()
try:
import wandb
except ImportError:
wandb = None
class WandBProgressBarWrapper(BaseProgressBar):
"""Log to Weights & Biases."""
def __init__(self, wrapped_bar, wandb_project, run_name=None):
self.wrapped_bar = wrapped_bar
if wandb is None:
logger.warning("wandb not found, pip install wandb")
return
# reinit=False to ensure if wandb.init() is called multiple times
# within one process it still references the same run
wandb.init(project=wandb_project, reinit=False, name=run_name)
def __iter__(self):
return iter(self.wrapped_bar)
def log(self, stats, tag=None, step=None):
"""Log intermediate stats to tensorboard."""
self._log_to_wandb(stats, tag, step)
self.wrapped_bar.log(stats, tag=tag, step=step)
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
self._log_to_wandb(stats, tag, step)
self.wrapped_bar.print(stats, tag=tag, step=step)
def update_config(self, config):
"""Log latest configuration."""
if wandb is not None:
wandb.config.update(config)
self.wrapped_bar.update_config(config)
def _log_to_wandb(self, stats, tag=None, step=None):
if wandb is None:
return
if step is None:
step = stats["num_updates"]
prefix = "" if tag is None else tag + "/"
for key in stats.keys() - {"num_updates"}:
if isinstance(stats[key], AverageMeter):
wandb.log({prefix + key: stats[key].val}, step=step)
elif isinstance(stats[key], Number):
wandb.log({prefix + key: stats[key]}, step=step)
try:
from azureml.core import Run
except ImportError:
Run = None
class AzureMLProgressBarWrapper(BaseProgressBar):
"""Log to Azure ML"""
def __init__(self, wrapped_bar):
self.wrapped_bar = wrapped_bar
if Run is None:
logger.warning("azureml.core not found, pip install azureml-core")
return
self.run = Run.get_context()
def __exit__(self, *exc):
if Run is not None:
self.run.complete()
return False
def __iter__(self):
return iter(self.wrapped_bar)
def log(self, stats, tag=None, step=None):
"""Log intermediate stats to AzureML"""
self._log_to_azureml(stats, tag, step)
self.wrapped_bar.log(stats, tag=tag, step=step)
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats"""
self._log_to_azureml(stats, tag, step)
self.wrapped_bar.print(stats, tag=tag, step=step)
def update_config(self, config):
"""Log latest configuration."""
self.wrapped_bar.update_config(config)
def _log_to_azureml(self, stats, tag=None, step=None):
if Run is None:
return
if step is None:
step = stats['num_updates']
prefix = '' if tag is None else tag + '/'
for key in stats.keys() - {'num_updates'}:
name = prefix + key
if isinstance(stats[key], AverageMeter):
self.run.log_row(name=name, **{'step': step, key: stats[key].val})
elif isinstance(stats[key], Number):
self.run.log_row(name=name, **{'step': step, key: stats[key]})
|
COCO-LM/fairseq/fairseq/logging/progress_bar.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/logging/progress_bar.py",
"repo_id": "COCO-LM",
"token_count": 6660
}
| 190 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
import argparse
import importlib
import os
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import merge_with_parent, populate_dataclass
from hydra.core.config_store import ConfigStore
from .composite_encoder import CompositeEncoder
from .distributed_fairseq_model import DistributedFairseqModel
from .fairseq_decoder import FairseqDecoder
from .fairseq_encoder import FairseqEncoder
from .fairseq_incremental_decoder import FairseqIncrementalDecoder
from .fairseq_model import (
BaseFairseqModel,
FairseqEncoderDecoderModel,
FairseqEncoderModel,
FairseqLanguageModel,
FairseqModel,
FairseqMultiModel,
)
MODEL_REGISTRY = {}
MODEL_DATACLASS_REGISTRY = {}
ARCH_MODEL_REGISTRY = {}
ARCH_MODEL_NAME_REGISTRY = {}
ARCH_MODEL_INV_REGISTRY = {}
ARCH_CONFIG_REGISTRY = {}
__all__ = [
"BaseFairseqModel",
"CompositeEncoder",
"DistributedFairseqModel",
"FairseqDecoder",
"FairseqEncoder",
"FairseqEncoderDecoderModel",
"FairseqEncoderModel",
"FairseqIncrementalDecoder",
"FairseqLanguageModel",
"FairseqModel",
"FairseqMultiModel",
]
def build_model(cfg: FairseqDataclass, task):
model = None
model_type = getattr(cfg, "_name", None) or getattr(cfg, "arch", None)
if not model_type and len(cfg) == 1:
# this is hit if config object is nested in directory that is named after model type
model_type = next(iter(cfg))
if model_type in MODEL_DATACLASS_REGISTRY:
cfg = cfg[model_type]
else:
raise Exception(
"Could not infer model type from directory. Please add _name field to indicate model type. "
"Available models: "
+ str(MODEL_DATACLASS_REGISTRY.keys())
+ " Requested model type: "
+ model_type
)
if model_type in ARCH_MODEL_REGISTRY:
# case 1: legacy models
model = ARCH_MODEL_REGISTRY[model_type]
elif model_type in MODEL_DATACLASS_REGISTRY:
# case 2: config-driven models
model = MODEL_REGISTRY[model_type]
if model_type in MODEL_DATACLASS_REGISTRY:
# set defaults from dataclass. note that arch name and model name can be the same
dc = MODEL_DATACLASS_REGISTRY[model_type]
if isinstance(cfg, argparse.Namespace):
cfg = populate_dataclass(dc(), cfg)
else:
cfg = merge_with_parent(dc(), cfg)
assert model is not None, (
f"Could not infer model type from {cfg}. "
f"Available models: "
+ str(MODEL_DATACLASS_REGISTRY.keys())
+ " Requested model type: "
+ model_type
)
return model.build_model(cfg, task)
def register_model(name, dataclass=None):
"""
New model types can be added to fairseq with the :func:`register_model`
function decorator.
For example::
@register_model('lstm')
class LSTM(FairseqEncoderDecoderModel):
(...)
.. note:: All models must implement the :class:`BaseFairseqModel` interface.
Typically you will extend :class:`FairseqEncoderDecoderModel` for
sequence-to-sequence tasks or :class:`FairseqLanguageModel` for
language modeling tasks.
Args:
name (str): the name of the model
"""
def register_model_cls(cls):
if name in MODEL_REGISTRY:
raise ValueError("Cannot register duplicate model ({})".format(name))
if not issubclass(cls, BaseFairseqModel):
raise ValueError(
"Model ({}: {}) must extend BaseFairseqModel".format(name, cls.__name__)
)
MODEL_REGISTRY[name] = cls
if dataclass is not None and not issubclass(dataclass, FairseqDataclass):
raise ValueError(
"Dataclass {} must extend FairseqDataclass".format(dataclass)
)
cls.__dataclass = dataclass
if dataclass is not None:
MODEL_DATACLASS_REGISTRY[name] = dataclass
cs = ConfigStore.instance()
node = dataclass()
node._name = name
cs.store(name=name, group="model", node=node, provider="fairseq")
@register_model_architecture(name, name)
def noop(_):
pass
return cls
return register_model_cls
def register_model_architecture(model_name, arch_name):
"""
New model architectures can be added to fairseq with the
:func:`register_model_architecture` function decorator. After registration,
model architectures can be selected with the ``--arch`` command-line
argument.
For example::
@register_model_architecture('lstm', 'lstm_luong_wmt_en_de')
def lstm_luong_wmt_en_de(cfg):
args.encoder_embed_dim = getattr(cfg.model, 'encoder_embed_dim', 1000)
(...)
The decorated function should take a single argument *cfg*, which is a
:class:`omegaconf.DictConfig`. The decorated function should modify these
arguments in-place to match the desired architecture.
Args:
model_name (str): the name of the Model (Model must already be
registered)
arch_name (str): the name of the model architecture (``--arch``)
"""
def register_model_arch_fn(fn):
if model_name not in MODEL_REGISTRY:
raise ValueError(
"Cannot register model architecture for unknown model type ({})".format(
model_name
)
)
if arch_name in ARCH_MODEL_REGISTRY:
raise ValueError(
"Cannot register duplicate model architecture ({})".format(arch_name)
)
if not callable(fn):
raise ValueError(
"Model architecture must be callable ({})".format(arch_name)
)
ARCH_MODEL_REGISTRY[arch_name] = MODEL_REGISTRY[model_name]
ARCH_MODEL_NAME_REGISTRY[arch_name] = model_name
ARCH_MODEL_INV_REGISTRY.setdefault(model_name, []).append(arch_name)
ARCH_CONFIG_REGISTRY[arch_name] = fn
return fn
return register_model_arch_fn
# automatically import any Python files in the models/ directory
models_dir = os.path.dirname(__file__)
for file in os.listdir(models_dir):
path = os.path.join(models_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
model_name = file[: file.find(".py")] if file.endswith(".py") else file
module = importlib.import_module("fairseq.models." + model_name)
# extra `model_parser` for sphinx
if model_name in MODEL_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_archs = parser.add_argument_group("Named architectures")
group_archs.add_argument(
"--arch", choices=ARCH_MODEL_INV_REGISTRY[model_name]
)
group_args = parser.add_argument_group("Additional command-line arguments")
MODEL_REGISTRY[model_name].add_args(group_args)
globals()[model_name + "_parser"] = parser
|
COCO-LM/fairseq/fairseq/models/__init__.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/__init__.py",
"repo_id": "COCO-LM",
"token_count": 3133
}
| 191 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
from typing import Dict, List, Optional
import torch
from fairseq.models import (
FairseqIncrementalDecoder,
FairseqLanguageModel,
register_model,
register_model_architecture,
)
logger = logging.getLogger(__name__)
DEFAULT_MAX_TARGET_POSITIONS = 1024
@register_model("hf_gpt2")
class HuggingFaceGPT2LanguageModel(FairseqLanguageModel):
def __init__(self, decoder):
super().__init__(decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--embed-dim', type=int, metavar='N',
help='embedding dimension')
parser.add_argument('--num-attention-heads', type=int, metavar='N',
help='num attention heads')
parser.add_argument('--num-layers', type=int, metavar='N',
help='num layers')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability for all fully connected layers '
'in the embeddings, encoder, and pooler')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
default_architecture(args)
return cls(HuggingFaceGPT2Decoder(args, task))
class HuggingFaceGPT2Decoder(FairseqIncrementalDecoder):
def __init__(self, args, task):
try:
from transformers import GPT2Config, GPT2LMHeadModel
except ImportError:
raise ImportError(
"\n\nPlease install huggingface/transformers with:"
"\n\n pip install transformers"
)
super().__init__(task.target_dictionary)
config = GPT2Config(
vocab_size=len(task.target_dictionary),
n_positions=args.max_target_positions + 1,
n_ctx=args.max_target_positions,
n_embd=args.embed_dim,
n_layer=args.num_layers,
n_head=args.num_attention_heads,
resid_pdrop=args.dropout,
embd_pdrop=args.dropout,
attn_pdrop=args.attention_dropout,
layer_norm_epsilon=1e-6,
)
self.model = GPT2LMHeadModel(config)
# set zero embedding for padding symbol
self.pad_idx = task.target_dictionary.pad()
self.model.transformer.wte.weight.data[self.pad_idx].zero_()
self.model.transformer.wpe.weight.data[0].zero_()
def forward(
self,
prev_output_tokens,
src_lengths=None,
incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None,
encoder_out=None,
):
features = self.extract_features(prev_output_tokens, incremental_state)
lm_logits = self.model.lm_head(features)
return (lm_logits,)
def extract_features(
self,
prev_output_tokens,
incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None,
):
if incremental_state:
past = self.get_incremental_state("past")
else:
past = None
# don't attend to padding symbols
attention_mask = prev_output_tokens.ne(self.pad_idx).int()
# set position ids to exclude padding symbols
position_ids = attention_mask * (
torch.arange(1, 1 + prev_output_tokens.size(1))
.to(prev_output_tokens)
.repeat(prev_output_tokens.size(0), 1)
)
outputs = self.model.transformer(
input_ids=prev_output_tokens,
past=past,
attention_mask=attention_mask,
position_ids=position_ids,
)
last_hidden_states = outputs[0]
if incremental_state:
self.set_incremental_state(incremental_state, "past", outputs[1])
return last_hidden_states
def max_positions(self):
return self.model.config.n_positions - 1
@register_model_architecture("hf_gpt2", "hf_gpt2")
def default_architecture(args):
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = getattr(
args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS
)
args.embed_dim = getattr(args, "embed_dim", 768)
args.num_attention_heads = getattr(args, "num_attention_heads", 12)
args.num_layers = getattr(args, "num_layers", 12)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
@register_model_architecture("hf_gpt2", "hf_gpt2_medium")
def hf_gpt2_medium(args):
args.embed_dim = getattr(args, "embed_dim", 1024)
args.num_attention_heads = getattr(args, "num_attention_heads", 16)
args.num_layers = getattr(args, "num_layers", 24)
default_architecture(args)
@register_model_architecture("hf_gpt2", "hf_gpt2_large")
def hf_gpt2_large(args):
args.embed_dim = getattr(args, "embed_dim", 1280)
args.num_attention_heads = getattr(args, "num_attention_heads", 20)
args.num_layers = getattr(args, "num_layers", 36)
default_architecture(args)
@register_model_architecture("hf_gpt2", "hf_gpt2_xl")
def hf_gpt2_xl(args):
args.embed_dim = getattr(args, "embed_dim", 1600)
args.num_attention_heads = getattr(args, "num_attention_heads", 25)
args.num_layers = getattr(args, "num_layers", 48)
default_architecture(args)
|
COCO-LM/fairseq/fairseq/models/huggingface/hf_gpt2.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/huggingface/hf_gpt2.py",
"repo_id": "COCO-LM",
"token_count": 2626
}
| 192 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq.models.nat import (
_apply_del_words,
_apply_ins_masks,
_apply_ins_words,
_fill,
_skip,
_skip_encoder_out,
)
class _EnsembleModelEncoder(object):
def __init__(self, models):
self.models = models
def reorder_encoder_out(self, encoder_outs, new_order):
encoder_outs = [
model.encoder.reorder_encoder_out(encoder_out, new_order)
for model, encoder_out in zip(self.models, encoder_outs)
]
return encoder_outs
class BasicEnsembleModel(torch.nn.Module):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__()
self.models = torch.nn.ModuleList(models)
self.bos = self.models[0].decoder.dictionary.bos()
self.eos = self.models[0].decoder.dictionary.eos()
self.pad = self.models[0].decoder.dictionary.pad()
self.unk = self.models[0].decoder.dictionary.unk()
self.encoder = _EnsembleModelEncoder(self.models)
def has_encoder(self):
return hasattr(self.models[0], "encoder")
def max_decoder_positions(self):
return min(m.max_decoder_positions() for m in self.models)
@torch.no_grad()
def forward_encoder(self, encoder_input):
if not self.has_encoder():
return None
return [model.forward_encoder(encoder_input) for model in self.models]
@torch.no_grad()
def forward_decoder(self, *inputs):
raise NotImplementedError
def initialize_output_tokens(self, *inputs):
raise NotImplementedError
class EnsembleLevT(BasicEnsembleModel):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__(models)
@torch.no_grad()
def forward_decoder(
self, decoder_out, encoder_outs, eos_penalty=0.0, max_ratio=None, **kwargs
):
# LevT ensembling
# A pipeline of three steps: deletion, placeholder, and word insertion.
# We need to average scores in each step in a pipeline way because of dependence.
# deletion
output_tokens = decoder_out.output_tokens
output_scores = decoder_out.output_scores
attn = decoder_out.attn
bsz = output_tokens.size(0)
if max_ratio is None:
max_lens = output_tokens.new().fill_(255)
else:
if not encoder_outs[0]["encoder_padding_mask"]:
src_lens = (
encoder_outs[0]["encoder_out"][0].new(bsz)
.fill_(encoder_outs[0]["encoder_out"][0].size(1))
)
else:
src_lens = (~encoder_outs[0]["encoder_padding_mask"][0]).sum(1)
max_lens = (src_lens * max_ratio).clamp(min=10).long()
# delete words
# do not delete tokens if it is <s> </s>
can_del_word = output_tokens.ne(self.pad).sum(1) > 2
if can_del_word.sum() != 0: # we cannot delete, skip
output_tokens, output_scores, attn = self.forward_word_del(
encoder_outs,
output_tokens,
output_scores,
attn,
can_del_word,
)
# insert placeholders
can_ins_mask = output_tokens.ne(self.pad).sum(1) < max_lens
if can_ins_mask.sum() != 0:
output_tokens, output_scores = self.forward_mask_ins(
encoder_outs,
output_tokens,
output_scores,
can_ins_mask,
eos_penalty,
max_lens,
)
# insert words
can_ins_word = output_tokens.eq(self.unk).sum(1) > 0
if can_ins_word.sum() != 0:
output_tokens, output_scores, attn = self.forward_word_ins(
encoder_outs,
output_tokens,
output_scores,
attn,
can_ins_word,
)
# delete some unnecessary paddings
cut_off = output_tokens.ne(self.pad).sum(1).max()
output_tokens = output_tokens[:, :cut_off]
output_scores = output_scores[:, :cut_off]
attn = None if attn is None else attn[:, :cut_off, :]
return decoder_out._replace(
output_tokens=output_tokens,
output_scores=output_scores,
attn=attn,
history=None,
)
def forward_word_del(
self, encoder_outs, output_tokens, output_scores, attn, can_del_word
):
word_del_score_avg = []
word_del_attn_avg = []
for model, encoder_out in zip(self.models, encoder_outs):
word_del_out, word_del_attn = model.decoder.forward_word_del(
_skip(output_tokens, can_del_word),
_skip_encoder_out(model.encoder, encoder_out, can_del_word),
)
word_del_score = F.log_softmax(word_del_out, 2)
word_del_score_avg.append(word_del_score)
word_del_attn_avg.append(word_del_attn)
word_del_score_avg = torch.logsumexp(
torch.stack(word_del_score_avg, dim=0), dim=0
) - math.log(len(self.models))
word_del_pred = word_del_score_avg.max(-1)[1].bool()
if word_del_attn_avg[0] is not None:
word_del_attn_avg = torch.stack(word_del_attn_avg, dim=0) / len(self.models)
else:
word_del_attn_avg = None
_tokens, _scores, _attn = _apply_del_words(
output_tokens[can_del_word],
output_scores[can_del_word],
word_del_attn_avg,
word_del_pred,
self.pad,
self.bos,
self.eos,
)
output_tokens = _fill(output_tokens, can_del_word, _tokens, self.pad)
output_scores = _fill(output_scores, can_del_word, _scores, 0)
attn = _fill(attn, can_del_word, _attn, 0.0)
return output_tokens, output_scores, attn
def forward_mask_ins(
self,
encoder_outs,
output_tokens,
output_scores,
can_ins_mask,
eos_penalty,
max_lens,
):
mask_ins_score_avg = []
for model, encoder_out in zip(self.models, encoder_outs):
mask_ins_out, _ = model.decoder.forward_mask_ins(
_skip(output_tokens, can_ins_mask),
_skip_encoder_out(model.encoder, encoder_out, can_ins_mask),
)
mask_ins_score = F.log_softmax(mask_ins_out, 2)
if eos_penalty > 0.0:
mask_ins_score[:, :, 0] -= eos_penalty
mask_ins_score_avg.append(mask_ins_score)
mask_ins_score_avg = torch.logsumexp(
torch.stack(mask_ins_score_avg, dim=0), dim=0
) - math.log(len(self.models))
mask_ins_pred = mask_ins_score_avg.max(-1)[1]
mask_ins_pred = torch.min(
mask_ins_pred, max_lens[can_ins_mask, None].expand_as(mask_ins_pred)
)
_tokens, _scores = _apply_ins_masks(
output_tokens[can_ins_mask],
output_scores[can_ins_mask],
mask_ins_pred,
self.pad,
self.unk,
self.eos,
)
output_tokens = _fill(output_tokens, can_ins_mask, _tokens, self.pad)
output_scores = _fill(output_scores, can_ins_mask, _scores, 0)
return output_tokens, output_scores
def forward_word_ins(
self, encoder_outs, output_tokens, output_scores, attn, can_ins_word
):
word_ins_score_avg = []
word_ins_attn_avg = []
for model, encoder_out in zip(self.models, encoder_outs):
word_ins_out, word_ins_attn = model.decoder.forward_word_ins(
_skip(output_tokens, can_ins_word),
_skip_encoder_out(model.encoder, encoder_out, can_ins_word),
)
word_ins_score = F.log_softmax(word_ins_out, 2)
word_ins_score_avg.append(word_ins_score)
word_ins_attn_avg.append(word_ins_attn)
word_ins_score_avg = torch.logsumexp(
torch.stack(word_ins_score_avg, dim=0), dim=0
) - math.log(len(self.models))
if word_ins_attn_avg[0] is not None:
word_ins_attn_avg = torch.stack(word_ins_attn_avg, dim=0) / len(self.models)
else:
word_ins_attn_avg = None
word_ins_score_max, word_ins_pred = word_ins_score_avg.max(-1)
_tokens, _scores = _apply_ins_words(
output_tokens[can_ins_word],
output_scores[can_ins_word],
word_ins_pred,
word_ins_score_max,
self.unk,
)
output_tokens = _fill(output_tokens, can_ins_word, _tokens, self.pad)
output_scores = _fill(output_scores, can_ins_word, _scores, 0)
attn = _fill(attn, can_ins_word, word_ins_attn, 0.0)
return output_tokens, output_scores, attn
def initialize_output_tokens(self, encoder_outs, src_tokens):
# LevT doesn't do length prediction.
return self.models[0].initialize_output_tokens(encoder_outs[0], src_tokens)
|
COCO-LM/fairseq/fairseq/models/nat/nonautoregressive_ensembles.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/nat/nonautoregressive_ensembles.py",
"repo_id": "COCO-LM",
"token_count": 4769
}
| 193 |
from .squad_head import * # noqa
|
COCO-LM/fairseq/fairseq/models/squad/__init__.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/squad/__init__.py",
"repo_id": "COCO-LM",
"token_count": 12
}
| 194 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
from torch.nn.modules.utils import _single
from torch import Tensor
class ConvTBC(torch.nn.Module):
"""1D convolution over an input of shape (time x batch x channel)
The implementation uses gemm to perform the convolution. This implementation
is faster than cuDNN for small kernel sizes.
"""
def __init__(self, in_channels, out_channels, kernel_size, padding=0):
super(ConvTBC, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _single(kernel_size)
self.padding = _single(padding)
self.weight = torch.nn.Parameter(
torch.Tensor(self.kernel_size[0], in_channels, out_channels)
)
self.bias = torch.nn.Parameter(torch.Tensor(out_channels))
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_normal_(self.weight)
nn.init.zeros_(self.bias)
def conv_tbc(self, input: Tensor):
return torch.conv_tbc(
input.contiguous(), self.weight, self.bias, self.padding[0]
)
def forward(self, input: Tensor):
return self.conv_tbc(input)
def __repr__(self):
s = (
"{name}({in_channels}, {out_channels}, kernel_size={kernel_size}"
", padding={padding}"
)
if self.bias is None:
s += ", bias=False"
s += ")"
return s.format(name=self.__class__.__name__, **self.__dict__)
|
COCO-LM/fairseq/fairseq/modules/conv_tbc.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/conv_tbc.py",
"repo_id": "COCO-LM",
"token_count": 701
}
| 195 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
See "Gaussian Error Linear Units (GELUs)" by Dan Hendrycks and Kevin Gimpel with
the corresponding GitHub repo: https://github.com/hendrycks/GELUs
"""
import math
import torch
import torch.nn as nn
def gelu_accurate(x):
if not hasattr(gelu_accurate, "_a"):
gelu_accurate._a = math.sqrt(2 / math.pi)
return (
0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3))))
)
def gelu(x: torch.Tensor) -> torch.Tensor:
# it is safe to use fp16/bf16 here, as pytorch already implemented fused_gelu (use fp32 for calculation internally)
return torch.nn.functional.gelu(x)
|
COCO-LM/fairseq/fairseq/modules/gelu.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/gelu.py",
"repo_id": "COCO-LM",
"token_count": 295
}
| 196 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Dict, Optional, Tuple
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.incremental_decoding_utils import with_incremental_state
from fairseq.modules.fairseq_dropout import FairseqDropout
from fairseq.modules.quant_noise import quant_noise
from torch import Tensor, nn
from torch.nn import Parameter
try:
import fused_softmax_dropout_cuda
except ImportError:
fused_softmax_dropout_cuda = None
print("import fused_softmax_dropout_cuda, please install fused_ops", file=sys.stderr)
class SoftmaxDropout(torch.autograd.Function) :
@staticmethod
def forward(ctx, is_training, heads, inputs, dropout_prob):
heads_t = torch.tensor([heads])
dropout_prob_t = torch.tensor([dropout_prob])
dropout_results, \
dropout_mask, \
softmax_results = \
fused_softmax_dropout_cuda.forward( \
is_training, \
heads, \
inputs, \
dropout_prob)
if is_training:
ctx.save_for_backward(
heads_t, \
softmax_results, \
dropout_mask, \
dropout_prob_t)
if is_training:
return dropout_results.detach()
else:
return softmax_results.detach()
@staticmethod
def backward(ctx, output_grads):
heads_t, \
softmax_results, \
dropout_mask, \
dropout_prob_t = ctx.saved_tensors
input_grads = \
fused_softmax_dropout_cuda.backward( \
heads_t[0], \
output_grads, \
softmax_results, \
dropout_mask, \
dropout_prob_t[0])
return None, None, input_grads, None,
fast_softmax_dropout_func = SoftmaxDropout.apply
class SelfMultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(
self,
embed_dim,
num_heads,
dropout=0.0,
bias=True,
scaling_factor=1,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = (self.head_dim * scaling_factor) ** -0.5
self.in_proj = nn.Linear(embed_dim, embed_dim * 3, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.onnx_trace = False
self.tpu = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def prepare_for_tpu_(self, **kwargs):
self.tpu = True
def forward(
self,
query,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
attn_bias: Optional[Tensor] = None,
before_softmax: bool = False,
need_head_weights: bool = False,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
q, k, v = self.in_proj(query).chunk(3,dim=-1) #, self.k_proj(query), self.v_proj(query)
q = (
q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim)
.transpose(0, 1) * self.scaling
)
if k is not None:
k = (
k.contiguous().view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if v is not None:
v = (
v.contiguous().view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
assert k is not None
src_len = k.size(1)
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
if not self.tpu:
attn_weights.masked_fill_(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf")
)
else:
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.masked_fill(key_padding_mask, float('-inf'))
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if attn_bias is not None:
attn_weights += attn_bias
if before_softmax:
return attn_weights, v
attn_probs = fast_softmax_dropout_func(self.training, self.num_heads, attn_weights, self.dropout)
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
if self.onnx_trace and attn.size(1) == 1:
# when ONNX tracing a single decoder step (sequence length == 1)
# the transpose is a no-op copy before view, thus unnecessary
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
ret_attn_weights = None
if need_weights:
ret_attn_weights = attn_weights.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
ret_attn_weights = attn_weights.mean(dim=0)
return attn, ret_attn_weights
@with_incremental_state
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
q_noise=0.0,
qn_block_size=8,
):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
# self.dropout_module = FairseqDropout(
# dropout, module_name=self.__class__.__name__
# )
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, (
"Self-attention requires query, key and " "value to be of the same size"
)
# Edit
if self.qkv_same_dim:
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
else:
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
# self.k_proj = quant_noise(
# nn.Linear(self.kdim, embed_dim, bias=bias), q_noise, qn_block_size
# )
# self.v_proj = quant_noise(
# nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size
# )
# self.q_proj = quant_noise(
# nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size
# )
# self.out_proj = quant_noise(
# nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size
# )
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.onnx_trace = False
self.enable_torch_version = False
if hasattr(F, "multi_head_attention_forward"):
self.enable_torch_version = True
else:
self.enable_torch_version = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
if self.qkv_same_dim:
nn.init.xavier_uniform_(self.in_proj_weight)
else:
nn.init.xavier_uniform_(self.k_proj_weight)
nn.init.xavier_uniform_(self.v_proj_weight)
nn.init.xavier_uniform_(self.q_proj_weight)
# if self.qkv_same_dim:
# # Empirically observed the convergence to be much better with
# # the scaled initialization
# nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
# nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
# nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
# else:
# nn.init.xavier_uniform_(self.k_proj.weight)
# nn.init.xavier_uniform_(self.v_proj.weight)
# nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.)
nn.init.constant_(self.out_proj.bias, 0.)
# if self.out_proj.bias is not None:
# nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
need_weights: bool = True,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
before_softmax: bool = False,
need_head_weights: bool = False,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
is_tpu = query.device.type == "xla"
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if self.enable_torch_version and not self.onnx_trace and incremental_state is None and not static_kv:
if self.qkv_same_dim:
return F.multi_head_attention_forward(query, key, value,
self.embed_dim, self.num_heads,
self.in_proj_weight,
self.in_proj_bias, self.bias_k, self.bias_v,
self.add_zero_attn, self.dropout,
self.out_proj.weight, self.out_proj.bias,
self.training, key_padding_mask, need_weights,
attn_mask)
else:
return F.multi_head_attention_forward(query, key, value,
self.embed_dim, self.num_heads,
torch.empty([0]),
self.in_proj_bias, self.bias_k, self.bias_v,
self.add_zero_attn, self.dropout,
self.out_proj.weight, self.out_proj.bias,
self.training, key_padding_mask, need_weights,
attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight,
k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight)
# if (
# not self.onnx_trace
# and not is_tpu # don't use PyTorch version on TPUs
# and incremental_state is None
# and not static_kv
# # A workaround for quantization to work. Otherwise JIT compilation
# # treats bias in linear module as method.
# and not torch.jit.is_scripting()
# ):
# assert key is not None and value is not None
# return F.multi_head_attention_forward(
# query,
# key,
# value,
# self.embed_dim,
# self.num_heads,
# torch.empty([0]),
# torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),
# self.bias_k,
# self.bias_v,
# self.add_zero_attn,
# self.dropout_module.p,
# self.out_proj.weight,
# self.out_proj.bias,
# self.training or self.dropout_module.apply_during_inference,
# key_padding_mask,
# need_weights,
# attn_mask,
# use_separate_proj_weight=True,
# q_proj_weight=self.q_proj.weight,
# k_proj_weight=self.k_proj.weight,
# v_proj_weight=self.v_proj.weight,
# )
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and "prev_key" in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
q = self.q_proj(query)
k = self.k_proj(query)
v = self.v_proj(query)
elif self.encoder_decoder_attention:
# encoder-decoder attention
q = self.q_proj(query)
if key is None:
assert value is None
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
key_padding_mask.new_zeros(key_padding_mask.size(0), 1),
],
dim=1,
)
q = (
q.contiguous()
.view(tgt_len, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if k is not None:
k = (
k.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: Optional[Tensor] = None
if "prev_key_padding_mask" in saved_state:
prev_key_padding_mask = saved_state["prev_key_padding_mask"]
assert k is not None and v is not None
key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(
key_padding_mask=key_padding_mask,
prev_key_padding_mask=prev_key_padding_mask,
batch_size=bsz,
src_len=k.size(1),
static_kv=static_kv,
)
saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_key_padding_mask"] = key_padding_mask
# In this branch incremental_state is never None
assert incremental_state is not None
incremental_state = self._set_input_buffer(incremental_state, saved_state)
assert k is not None
src_len = k.size(1)
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
assert v is not None
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
torch.zeros(key_padding_mask.size(0), 1).type_as(
key_padding_mask
),
],
dim=1,
)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
if not is_tpu:
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf"),
)
else:
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf"))
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if before_softmax:
return attn_weights, v
attn_weights_float = utils.softmax(
attn_weights, dim=-1, onnx_trace=self.onnx_trace
)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = self.dropout_module(attn_weights)
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
if self.onnx_trace and attn.size(1) == 1:
# when ONNX tracing a single decoder step (sequence length == 1)
# the transpose is a no-op copy before view, thus unnecessary
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
attn_weights: Optional[Tensor] = None
if need_weights:
attn_weights = attn_weights_float.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
return attn, attn_weights
@staticmethod
def _append_prev_key_padding_mask(
key_padding_mask: Optional[Tensor],
prev_key_padding_mask: Optional[Tensor],
batch_size: int,
src_len: int,
static_kv: bool,
) -> Optional[Tensor]:
# saved key padding masks have shape (bsz, seq_len)
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask is not None and key_padding_mask is not None:
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
)
# During incremental decoding, as the padding token enters and
# leaves the frame, there will be a time when prev or current
# is None
elif prev_key_padding_mask is not None:
filler = torch.zeros(
(batch_size, src_len - prev_key_padding_mask.size(1)),
device=prev_key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), filler.float()], dim=1
)
elif key_padding_mask is not None:
filler = torch.zeros(
(batch_size, src_len - key_padding_mask.size(1)),
device=key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[filler.float(), key_padding_mask.float()], dim=1
)
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
@torch.jit.export
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
new_order: Tensor,
):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer_k = input_buffer[k]
if input_buffer_k is not None:
if self.encoder_decoder_attention and input_buffer_k.size(
0
) == new_order.size(0):
break
input_buffer[k] = input_buffer_k.index_select(0, new_order)
incremental_state = self._set_input_buffer(incremental_state, input_buffer)
return incremental_state
def _get_input_buffer(
self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
) -> Dict[str, Optional[Tensor]]:
result = self.get_incremental_state(incremental_state, "attn_state")
if result is not None:
return result
else:
empty_result: Dict[str, Optional[Tensor]] = {}
return empty_result
def _set_input_buffer(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
buffer: Dict[str, Optional[Tensor]],
):
return self.set_incremental_state(incremental_state, "attn_state", buffer)
def apply_sparse_mask(self, attn_weights, tgt_len: int, src_len: int, bsz: int):
return attn_weights
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
items_to_add = {}
keys_to_remove = []
for k in state_dict.keys():
if k.endswith(prefix + "in_proj_weight"):
# in_proj_weight used to be q + k + v with same dimensions
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim]
items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim]
items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :]
keys_to_remove.append(k)
k_bias = prefix + "in_proj_bias"
if k_bias in state_dict.keys():
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim]
items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][
dim : 2 * dim
]
items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :]
keys_to_remove.append(prefix + "in_proj_bias")
for k in keys_to_remove:
del state_dict[k]
for key, value in items_to_add.items():
state_dict[key] = value
|
COCO-LM/fairseq/fairseq/modules/multihead_attention.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/multihead_attention.py",
"repo_id": "COCO-LM",
"token_count": 16513
}
| 197 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
from torch.nn.modules.conv import _ConvNd
from torch.nn.modules.utils import _pair
from ..ops import emulate_int
class IntConv2d(_ConvNd):
"""
Quantized counterpart of the nn.Conv2d module that applies QuantNoise during training.
Args:
- standard nn.Conv2d parameters
- p: amount of noise to inject (0 = no quantization, 1 = quantize all the weights)
- bits: number of bits
- method: choose among {"tensor", "histogram", "channel"}
- update_step: recompute scale and zero_point every update_steps iterations
Remarks:
- We use the straight-thgourh estimator so that the gradients
back-propagate nicely in the network, this is implemented with
the detach() trick
- Parameters scale and zero_point are recomputed every update_step
forward pass to reduce the overhead
- At test time, the weights are fully quantized
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode="zeros",
p=0,
bits=8,
method="histogram",
update_step=1000,
):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(IntConv2d, self).__init__(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
False,
_pair(0),
groups,
bias,
padding_mode,
)
# quantization parameters
self.p = p
self.bits = bits
self.method = method
self.update_step = update_step
self.counter = 0
def _conv_forward(self, input, weight):
if self.padding_mode != "zeros":
return F.conv2d(
F.pad(input, self._padding_repeated_twice, mode=self.padding_mode),
weight,
self.bias,
self.stride,
_pair(0),
self.dilation,
self.groups,
)
return F.conv2d(
input,
weight,
self.bias,
self.stride,
self.padding,
self.dilation,
self.groups,
)
def forward(self, input):
# train with QuantNoise and evaluate the fully quantized network
p = self.p if self.training else 1
# update parameters every 100 iterations
if self.counter % self.update_step == 0:
self.scale = None
self.zero_point = None
self.counter += 1
# quantize weight
weight_quantized, self.scale, self.zero_point = emulate_int(
self.weight.detach(),
bits=self.bits,
method=self.method,
scale=self.scale,
zero_point=self.zero_point,
)
# mask to apply noise
mask = torch.zeros_like(self.weight)
mask.bernoulli_(1 - p)
noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0)
# using straight-through estimator (STE)
clamp_low = -self.scale * self.zero_point
clamp_high = self.scale * (2 ** self.bits - 1 - self.zero_point)
weight = (
torch.clamp(self.weight, clamp_low.item(), clamp_high.item())
+ noise.detach()
)
# return output
output = self._conv_forward(input, weight)
return output
def extra_repr(self):
return (
"in_channels={}, out_channels={}, kernel_size={}, stride={}, "
"padding={}, dilation={}, groups={}, bias={}, quant_noise={}, "
"bits={}, method={}".format(
self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
self.padding,
self.dilation,
self.groups,
self.bias is not None,
self.p,
self.bits,
self.method,
)
)
|
COCO-LM/fairseq/fairseq/modules/quantization/scalar/modules/qconv.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/quantization/scalar/modules/qconv.py",
"repo_id": "COCO-LM",
"token_count": 2199
}
| 198 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
from collections.abc import Iterable
from itertools import repeat
import torch
import torch.nn as nn
def _pair(v):
if isinstance(v, Iterable):
assert len(v) == 2, "len(v) != 2"
return v
return tuple(repeat(v, 2))
def infer_conv_output_dim(conv_op, input_dim, sample_inchannel):
sample_seq_len = 200
sample_bsz = 10
x = torch.randn(sample_bsz, sample_inchannel, sample_seq_len, input_dim)
# N x C x H x W
# N: sample_bsz, C: sample_inchannel, H: sample_seq_len, W: input_dim
x = conv_op(x)
# N x C x H x W
x = x.transpose(1, 2)
# N x H x C x W
bsz, seq = x.size()[:2]
per_channel_dim = x.size()[3]
# bsz: N, seq: H, CxW the rest
return x.contiguous().view(bsz, seq, -1).size(-1), per_channel_dim
class VGGBlock(torch.nn.Module):
"""
VGG motibated cnn module https://arxiv.org/pdf/1409.1556.pdf
Args:
in_channels: (int) number of input channels (typically 1)
out_channels: (int) number of output channels
conv_kernel_size: convolution channels
pooling_kernel_size: the size of the pooling window to take a max over
num_conv_layers: (int) number of convolution layers
input_dim: (int) input dimension
conv_stride: the stride of the convolving kernel.
Can be a single number or a tuple (sH, sW) Default: 1
padding: implicit paddings on both sides of the input.
Can be a single number or a tuple (padH, padW). Default: None
layer_norm: (bool) if layer norm is going to be applied. Default: False
Shape:
Input: BxCxTxfeat, i.e. (batch_size, input_size, timesteps, features)
Output: BxCxTxfeat, i.e. (batch_size, input_size, timesteps, features)
"""
def __init__(
self,
in_channels,
out_channels,
conv_kernel_size,
pooling_kernel_size,
num_conv_layers,
input_dim,
conv_stride=1,
padding=None,
layer_norm=False,
):
assert (
input_dim is not None
), "Need input_dim for LayerNorm and infer_conv_output_dim"
super(VGGBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.conv_kernel_size = _pair(conv_kernel_size)
self.pooling_kernel_size = _pair(pooling_kernel_size)
self.num_conv_layers = num_conv_layers
self.padding = (
tuple(e // 2 for e in self.conv_kernel_size)
if padding is None
else _pair(padding)
)
self.conv_stride = _pair(conv_stride)
self.layers = nn.ModuleList()
for layer in range(num_conv_layers):
conv_op = nn.Conv2d(
in_channels if layer == 0 else out_channels,
out_channels,
self.conv_kernel_size,
stride=self.conv_stride,
padding=self.padding,
)
self.layers.append(conv_op)
if layer_norm:
conv_output_dim, per_channel_dim = infer_conv_output_dim(
conv_op, input_dim, in_channels if layer == 0 else out_channels
)
self.layers.append(nn.LayerNorm(per_channel_dim))
input_dim = per_channel_dim
self.layers.append(nn.ReLU())
if self.pooling_kernel_size is not None:
pool_op = nn.MaxPool2d(kernel_size=self.pooling_kernel_size, ceil_mode=True)
self.layers.append(pool_op)
self.total_output_dim, self.output_dim = infer_conv_output_dim(
pool_op, input_dim, out_channels
)
def forward(self, x):
for i, _ in enumerate(self.layers):
x = self.layers[i](x)
return x
|
COCO-LM/fairseq/fairseq/modules/vggblock.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/vggblock.py",
"repo_id": "COCO-LM",
"token_count": 1898
}
| 199 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.optim import LegacyFairseqOptimizer, register_optimizer
@register_optimizer("lamb")
class FairseqLAMB(LegacyFairseqOptimizer):
"""LAMB optimizer."""
def __init__(self, args, params):
super().__init__(args)
try:
from apex.optimizers import FusedLAMB
self._optimizer = FusedLAMB(params, **self.optimizer_config)
except ImportError:
raise ImportError("Please install apex to use LAMB optimizer")
@staticmethod
def add_args(parser):
"""Add optimizer-specific arguments to the parser."""
# fmt: off
parser.add_argument('--lamb-betas', default='(0.9, 0.999)', metavar='B',
help='betas for LAMB optimizer')
parser.add_argument('--lamb-eps', type=float, default=1e-8, metavar='D',
help='epsilon for LAMB optimizer')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
help='weight decay')
# fmt: on
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
"lr": self.args.lr[0],
"betas": eval(self.args.lamb_betas),
"eps": self.args.lamb_eps,
"weight_decay": self.args.weight_decay,
}
@property
def supports_flat_params(self):
return False
|
COCO-LM/fairseq/fairseq/optim/fused_lamb.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/optim/fused_lamb.py",
"repo_id": "COCO-LM",
"token_count": 794
}
| 200 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing
import os
import pdb
import sys
__all__ = ["set_trace"]
_stdin = [None]
_stdin_lock = multiprocessing.Lock()
try:
_stdin_fd = sys.stdin.fileno()
except Exception:
_stdin_fd = None
class MultiprocessingPdb(pdb.Pdb):
"""A Pdb wrapper that works in a multiprocessing environment.
Usage: `from fairseq import pdb; pdb.set_trace()`
"""
def __init__(self):
pdb.Pdb.__init__(self, nosigint=True)
def _cmdloop(self):
stdin_bak = sys.stdin
with _stdin_lock:
try:
if _stdin_fd is not None:
if not _stdin[0]:
_stdin[0] = os.fdopen(_stdin_fd)
sys.stdin = _stdin[0]
self.cmdloop()
finally:
sys.stdin = stdin_bak
def set_trace():
pdb = MultiprocessingPdb()
pdb.set_trace(sys._getframe().f_back)
|
COCO-LM/fairseq/fairseq/pdb.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/pdb.py",
"repo_id": "COCO-LM",
"token_count": 518
}
| 201 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from fairseq import utils
from fairseq.data import (
AppendTokenDataset,
Dictionary,
IdDataset,
LMContextWindowDataset,
MonolingualDataset,
NestedDictionaryDataset,
NumelDataset,
PadDataset,
PrependTokenDataset,
StripTokenDataset,
TokenBlockDataset,
TruncatedDictionary,
data_utils,
)
from fairseq.data.indexed_dataset import get_available_dataset_impl
from fairseq.data.shorten_dataset import maybe_shorten_dataset
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.tasks import LegacyFairseqTask, register_task
from omegaconf import II
SAMPLE_BREAK_MODE_CHOICES = ChoiceEnum(["none", "complete", "complete_doc", "eos"])
SHORTEN_METHOD_CHOICES = ChoiceEnum(["none", "truncate", "random_crop"])
logger = logging.getLogger(__name__)
@dataclass
class LanguageModelingConfig(FairseqDataclass):
data: Optional[str] = field(
default=None, metadata={"help": "path to data directory"}
)
sample_break_mode: SAMPLE_BREAK_MODE_CHOICES = field(
default="none",
metadata={
"help": 'If omitted or "none", fills each sample with tokens-per-sample '
'tokens. If set to "complete", splits samples only at the end '
"of sentence, but may include multiple sentences per sample. "
'"complete_doc" is similar but respects doc boundaries. '
'If set to "eos", includes only one sentence per sample.'
},
)
tokens_per_sample: int = field(
default=1024,
metadata={"help": "max number of tokens per sample for LM dataset"},
)
output_dictionary_size: int = field(
default=-1, metadata={"help": "limit the size of output dictionary"}
)
self_target: bool = field(default=False, metadata={"help": "include self target"})
future_target: bool = field(
default=False, metadata={"help": "include future target"}
)
past_target: bool = field(default=False, metadata={"help": "include past target"})
add_bos_token: bool = field(
default=False, metadata={"help": "prepend beginning of sentence token (<s>)"}
)
max_target_positions: Optional[int] = field(
default=None, metadata={"help": "max number of tokens in the target sequence"}
)
shorten_method: SHORTEN_METHOD_CHOICES = field(
default="none",
metadata={
"help": "if not none, shorten sequences that exceed --tokens-per-sample"
},
)
shorten_data_split_list: str = field(
default="",
metadata={
"help": "comma-separated list of dataset splits to apply shortening to, "
'e.g., "train,valid" (default: all dataset splits)'
},
)
# TODO common vars below add to parent
seed: int = II("common.seed")
dataset_impl: Optional[ChoiceEnum(get_available_dataset_impl())] = II(
"dataset.dataset_impl"
)
data_buffer_size: int = II("dataset.data_buffer_size")
tpu: bool = II("common.tpu")
use_plasma_view: bool = II("common.use_plasma_view")
plasma_path: str = II("common.plasma_path")
@register_task("language_modeling", dataclass=LanguageModelingConfig)
class LanguageModelingTask(LegacyFairseqTask):
"""
Train a language model.
Args:
dictionary (~fairseq.data.Dictionary): the dictionary for the input of
the language model
output_dictionary (~fairseq.data.Dictionary): the dictionary for the
output of the language model. In most cases it will be the same as
*dictionary*, but could possibly be a more limited version of the
dictionary (if ``--output-dictionary-size`` is used).
targets (List[str]): list of the target types that the language model
should predict. Can be one of "self", "future", and "past".
Defaults to "future".
.. note::
The language modeling task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate`, :mod:`fairseq-interactive` and
:mod:`fairseq-eval-lm`.
The language modeling task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.language_modeling_parser
:prog:
"""
def __init__(self, args, dictionary, output_dictionary=None, targets=None):
super().__init__(args)
self.dictionary = dictionary
self.output_dictionary = output_dictionary or dictionary
if targets is None:
targets = ["future"]
self.targets = targets
@classmethod
def setup_dictionary(cls, args, **kwargs):
dictionary = None
output_dictionary = None
if args.data:
paths = utils.split_paths(args.data)
assert len(paths) > 0
dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt"))
logger.info("dictionary: {} types".format(len(dictionary)))
output_dictionary = dictionary
if args.output_dictionary_size >= 0:
output_dictionary = TruncatedDictionary(
dictionary, args.output_dictionary_size
)
return (dictionary, output_dictionary)
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
dictionary, output_dictionary = cls.setup_dictionary(args, **kwargs)
# upgrade old checkpoints
if getattr(args, "exclude_self_target", False):
args.self_target = False
targets = []
if getattr(args, "self_target", False):
targets.append("self")
if getattr(args, "future_target", False):
targets.append("future")
if getattr(args, "past_target", False):
targets.append("past")
if len(targets) == 0:
# standard language modeling
targets = ["future"]
return cls(args, dictionary, output_dictionary, targets=targets)
def build_model(self, args):
model = super().build_model(args)
for target in self.targets:
if target not in model.supported_targets:
raise ValueError(
"Unsupported language modeling target: {}".format(target)
)
return model
def load_dataset(
self, split: str, epoch=1, combine=False, **kwargs
) -> MonolingualDataset:
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
split_path = os.path.join(data_path, split)
# each process has its own copy of the raw data (likely to be an np.memmap)
dataset = data_utils.load_indexed_dataset(
split_path, self.dictionary, self.args.dataset_impl, combine=combine
)
if dataset is None:
raise FileNotFoundError(f"Dataset not found: {split} ({split_path})")
dataset = maybe_shorten_dataset(
dataset,
split,
self.args.shorten_data_split_list,
self.args.shorten_method,
self.args.tokens_per_sample,
self.args.seed,
)
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
self.args.tokens_per_sample,
pad=self.dictionary.pad(),
eos=self.dictionary.eos(),
break_mode=self.args.sample_break_mode,
include_targets=True,
use_plasma_view=self.args.use_plasma_view,
split_path=split_path,
plasma_path=self.args.plasma_path,
)
add_eos_for_other_targets = (
self.args.sample_break_mode is not None
and self.args.sample_break_mode != "none"
)
self.datasets[split] = MonolingualDataset(
dataset=dataset,
sizes=dataset.sizes,
src_vocab=self.dictionary,
tgt_vocab=self.output_dictionary,
add_eos_for_other_targets=add_eos_for_other_targets,
shuffle=True,
targets=self.targets,
add_bos_token=self.args.add_bos_token,
)
def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs):
"""
Generate batches for inference. We prepend an eos token to src_tokens
(or bos if `--add-bos-token` is set) and we append a <pad> to target.
This is convenient both for generation with a prefix and LM scoring.
"""
dataset = StripTokenDataset(
TokenBlockDataset(
src_tokens,
src_lengths,
block_size=None, # ignored for "eos" break mode
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode="eos",
),
# remove eos from (end of) target sequence
self.source_dictionary.eos(),
)
src_dataset = PrependTokenDataset(
dataset,
token=(
self.source_dictionary.bos()
if getattr(self.args, "add_bos_token", False)
else self.source_dictionary.eos()
),
)
tgt_dataset = AppendTokenDataset(dataset, token=self.source_dictionary.pad())
return NestedDictionaryDataset(
{
"id": IdDataset(),
"net_input": {
"src_tokens": PadDataset(
src_dataset,
pad_idx=self.source_dictionary.pad(),
left_pad=False,
),
"src_lengths": NumelDataset(src_dataset, reduce=False),
},
"target": PadDataset(
tgt_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False
),
},
sizes=[np.array(src_lengths)],
)
def inference_step(
self, generator, models, sample, prefix_tokens=None, constraints=None
):
with torch.no_grad():
# Generation will always be conditioned on bos_token
if getattr(self.args, "add_bos_token", False):
bos_token = self.source_dictionary.bos()
else:
bos_token = self.source_dictionary.eos()
if constraints is not None:
raise NotImplementedError(
"Constrained decoding with the language_modeling task is not supported"
)
# SequenceGenerator doesn't use src_tokens directly, we need to
# pass the `prefix_tokens` argument instead
if prefix_tokens is None and sample["net_input"]["src_tokens"].nelement():
prefix_tokens = sample["net_input"]["src_tokens"]
if prefix_tokens[:, 0].eq(bos_token).all():
prefix_tokens = prefix_tokens[:, 1:]
return generator.generate(
models, sample, prefix_tokens=prefix_tokens, bos_token=bos_token
)
def eval_lm_dataloader(
self,
dataset,
max_tokens: Optional[int] = 36000,
batch_size: Optional[int] = None,
max_positions: Optional[int] = None,
num_shards: int = 1,
shard_id: int = 0,
num_workers: int = 1,
data_buffer_size: int = 10,
# ensures that every evaluated token has access to a context of at least
# this size, if possible
context_window: int = 0,
):
if context_window > 0:
dataset = LMContextWindowDataset(
dataset=dataset,
tokens_per_sample=self.args.tokens_per_sample,
context_window=context_window,
pad_idx=self.source_dictionary.pad(),
)
return self.get_batch_iterator(
dataset=dataset,
max_tokens=max_tokens,
max_sentences=batch_size,
max_positions=max_positions,
ignore_invalid_inputs=True,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
data_buffer_size=data_buffer_size,
).next_epoch_itr(shuffle=False)
@property
def source_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self.dictionary
@property
def target_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self.output_dictionary
|
COCO-LM/fairseq/fairseq/tasks/language_modeling.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/tasks/language_modeling.py",
"repo_id": "COCO-LM",
"token_count": 6013
}
| 202 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Implements tracking of constraints for a beam item.
A list of constraints is given as a list of one or more token
sequences, each of length at least one token. For example, for an input sentence
> Die maschinelle Übersetzung ist schwer zu kontrollieren.
We could have the constraints:
* to influence
* hard
There are two implementations:
* OrderedConstraintState: Tracks progress through an ordered list of multitoken constraints.
* UnorderedConstraintState: Tracks progress through an unordered list of multitoken constraints.
The difference is that in the first, the constraints are assumed to be
in order; the algorithm will permit zero or more tokens between them.
In the second, the constraints are not ordered, so many orderings will
be explored.
The same sequence can be present any number of times, and will appear
that many times in the output.
"""
from collections import Counter
from typing import List, Optional, Set, Tuple
import torch
class ConstraintState:
def __init__(self):
pass
def pack_constraints(batch_constraints: List[List[torch.Tensor]]) -> torch.Tensor:
"""Takes a list of list of constraints in tensor form (a list of
tensor constraints for each sentence) and transforms it into a
packed Tensor. For example, here is a batch of size 3 with 3, 0,
and 1 constraints:
[ [ [3 1 2], [3], [4 5 6 7], ]
[],
[ [1 8 9 10 1 4 11 12], ]
]
Its corresponding packed structure is:
[ [ 3 3 1 2 0 3 0 4 5 6 7 0],
[ 0 0 0 0 0 0 0 0 0 0 0 0],
[ 1 1 8 9 10 1 4 11 12 0 0 0] ]
The packed tensor has shape (batch size, maxlen), where
maxlen is defined below. Each row contains concatenated
constraint tokens for that sentence, with 0 appended after
each constraint. The first item in each row is the number
of constraints for that sentence. So maxlen is the maximum
of
(number of constraints) + (sum length of constraints) + 1.
across all sentences in the batch.
"""
# The maximum word length of concatenated constraints for any sentence
max_constraints_len = 1
for sentence_constraints in batch_constraints:
if len(sentence_constraints):
# number of constraints, plus sum of constrain lens, plus a zero after each
constraints_len = (
1
+ sum([c.size(0) for c in sentence_constraints])
+ len(sentence_constraints)
)
max_constraints_len = max(max_constraints_len, constraints_len)
batch_size = len(batch_constraints)
constraints_tensor = torch.zeros((batch_size, max_constraints_len)).long()
for i, sentence_constraints in enumerate(batch_constraints):
constraints_tensor[i, 0] = len(sentence_constraints)
offset = 1
for j, constraint in enumerate(sentence_constraints):
this_len = constraint.size(0)
constraints_tensor[i, offset : offset + this_len] = constraint
offset += this_len + 1
return constraints_tensor.long()
def unpack_constraints(constraint_tensor: torch.Tensor) -> List[torch.Tensor]:
"""
Transforms *one row* of a packed constraint tensor (e.g., for one
sentence in the batch) into a list of constraint tensors.
"""
constraint_list = []
num_constraints = constraint_tensor[0]
constraints = constraint_tensor.tolist()
offset = 1
for i in range(num_constraints):
where = constraints.index(0, offset)
constraint_list.append(constraint_tensor[offset:where])
offset = where + 1
return constraint_list
class ConstraintNode:
"""
Represents a node in a trie managing unordered constraints.
"""
def __init__(self, token: int = None, parent=None):
# The token associate with this node (None for the root)
self.token = int(token) if token is not None else None
# The parent (None at the root)
self.parent = parent
# Whether this node is a completed constraint
self.terminal = 0
# List of child nodes
self.children = {}
# The cumulative number of constraints from this point in the
# trie forward
self.num_constraints = 0
@property
def id(self):
return self.token
def __str__(self):
term = self.terminal != 0
return f"[{self.token}].{term}#{self.num_constraints}"
def __getitem__(self, key: int):
return self.children.get(key, None)
def next_tokens(self) -> Set[int]:
"""The set of child labels."""
return set(self.children.keys())
@staticmethod
def create(constraints: List[List[int]]):
root = ConstraintNode()
for sequence in constraints:
root.add_sequence(sequence)
return root
@staticmethod
def print_graph(node: "ConstraintNode"):
if len(node.children) == 0:
return str(node)
else:
s = f"({node}"
for child in node.children.values():
s += " " + ConstraintNode.print_graph(child)
s += ")"
return s
def token_counts(self) -> Counter:
"""Returns a counter of the number of times each token is used
in a constraint.
"""
token_counts = Counter()
kids = list(self.children.values())
while len(kids) > 0:
kid = kids.pop()
token_counts[kid.id] += kid.num_constraints
kids += list(kid.children.values())
return token_counts
def tokens(self) -> Set[int]:
"""Returns the set of tokens in constraints."""
return set(self.token_counts().keys())
def add_sequence(self, sequence: List[int]):
"""Adds a constraint, represented as a list of integers, to
the trie."""
assert len(sequence) > 0
token = int(sequence[0])
if token not in self.children:
self.children[token] = ConstraintNode(token, parent=self)
node = self.children[token]
if len(sequence) == 1:
node.terminal += 1
node.num_constraints += 1
parent = node.parent
while parent is not None:
parent.num_constraints += 1
parent = parent.parent
else:
node.add_sequence(sequence[1:])
class UnorderedConstraintState(ConstraintState):
"""
Records progress through the set of constraints for each item in the beam
using a trie.
"""
def __init__(self, node: ConstraintNode, copy_from: "ConstraintState" = None):
self.node = node
if copy_from is None:
# The root node
self.root = node
# The set of states in the graph that have been completed
self.completed = Counter()
# The...
self.generated = Counter()
# The list of tokens we need to generate
self.needed_tokens = self.root.tokens()
else:
self.completed = Counter(copy_from.completed)
self.generated = Counter(copy_from.generated)
self.root = copy_from.root
# Mark the node as generated
if self.node != self.root:
self.generated[node] += 1
@staticmethod
def create(constraint_tensor: torch.Tensor):
constraint_list = unpack_constraints(constraint_tensor)
constraint_trie_root = ConstraintNode.create(constraint_list)
return UnorderedConstraintState(constraint_trie_root)
def __str__(self):
gen_str = ",".join([str(node) for node in self.generated])
return f"{self.name}/{self.bank}({gen_str})x{self.num_completed}"
def __copy__(self):
copied_state = UnorderedConstraintState(self.node, copy_from=self)
return copied_state
def copy(self):
return self.__copy__()
@property
def name(self):
if self.node.id is None:
return "ROOT"
else:
return str(self.node.id)
@property
def is_root(self):
return self.node == self.root
@property
def bank(self):
return sum(self.generated.values())
@property
def num_completed(self):
"""The number of constraints (not constraint tokens) that are completed.
In addition to the already-completed states, we need to account for the
current state, which might get marked as completed when another token
is generated.
"""
in_final = self.node.terminal and self.completed[self.node] < self.node.terminal
return sum(self.completed.values()) + in_final
@property
def finished(self):
return self.root.num_constraints - self.num_completed == 0
@property
def token_counts(self):
return self.root.token_counts()
@property
def tokens(self):
return self.root.tokens()
@property
def num_constraint_tokens(self):
return sum(self.token_counts.values())
def next_tokens(self) -> Set[int]:
"""Returns the list of tokens that could come next.
These are (a) all tokens extending the root state and, for
non-root states, additionally all tokens extending the current
state."""
if self.node != self.root:
return self.root.next_tokens().union(self.node.next_tokens())
else:
return self.root.next_tokens()
def advance(self, token: int):
"""Reads in a token and advances the state. Here's how it works.
We can advance to the next state if:
- there is a matching child
- its path isn't blocked
A path is blocked when all constraints that are descendants of
that node have already been generated, in the current state.
If we are not able to advance from the current state, we "fall
off the graph" and return to the root state. There, we again
try to advance, checking the same criteria.
In any case, when falling off the graph, we need to do some
bookkeeping. We:
- check whether any constraints were met (all prefixes of
current state)
- if one is found, mark it as completed
- adjust visited nodes accordingly
"""
token = int(token)
next_state = None
child = self.node[token]
if child is not None and self.generated[child] < child.num_constraints:
next_state = UnorderedConstraintState(child, copy_from=self)
def rewind():
"""If we're mid-trie and an "illegal" token is chosen next, we need
to reset our state to the root state. However, along the way, we need
to check whether a prefix of the current trie state represents a state
we could mark as completed.
"""
node = self.node
while node != self.root:
if node.terminal and self.completed[node] < node.terminal:
next_state.completed[node] += 1
return
next_state.generated[node] -= 1
node = node.parent
# Fall off the graph, check the root
if next_state is None and token in self.root.next_tokens():
child = self.root[token]
# We can only traverse this edge if it's not saturated
if self.generated[child] < child.num_constraints:
next_state = UnorderedConstraintState(child, copy_from=self)
else:
next_state = UnorderedConstraintState(self.root, copy_from=self)
# Rewind
rewind()
elif next_state is None:
next_state = UnorderedConstraintState(self.root, copy_from=self)
# Rewind
rewind()
return next_state
class ConstraintSequence:
def __init__(self, sequences: List[List[int]]):
"""Represents a set of possibly multitoken constraints by
concatenating them and internally recording the end points.
"""
self.sequences = []
self.endpoints = []
self.num_tokens = 0
self.tokens = set()
for sequence in sequences:
for token in sequence:
self.tokens.add(token)
self.num_tokens += len(sequence)
self.endpoints += [False for x in range(len(sequence) - 1)] + [True]
self.sequences += sequence
def __getitem__(self, key: int):
return self.sequences[key]
def __len__(self):
return len(self.sequences)
def __str__(self):
return str(self.sequences)
class OrderedConstraintState(ConstraintState):
"""
Records progress through the set of linear nonbranching constraints with gaps.
"""
def __init__(self, sequence: ConstraintSequence, state: int = -1):
self.sequence = sequence
self.state = state
@staticmethod
def create(constraint_tensor: torch.Tensor):
constraint_list = unpack_constraints(constraint_tensor)
return OrderedConstraintState(ConstraintSequence(constraint_list), -1)
def __str__(self):
return f"{self.state}/{self.bank}x{self.num_completed}"
def __copy__(self):
return OrderedConstraintState(self.sequence, self.state)
def copy(self):
return self.__copy__()
@property
def num_completed(self):
if self.state == -1:
return 0
count = len(
list(filter(lambda x: x, self.sequence.endpoints[0 : self.state + 1]))
)
return count
@property
def is_root(self):
return self.state == -1
@property
def name(self):
if self.state == -1:
return "ROOT"
else:
return str(self.sequence[self.state])
@property
def bank(self) -> int:
return self.state + 1
@property
def finished(self):
return self.state + 1 == len(self.sequence)
@property
def token_counts(self):
return self.sequence.token_counts()
@property
def tokens(self):
return self.sequence.tokens
@property
def num_constraint_tokens(self):
return sum(self.token_counts.values())
def next_tokens(self) -> Set[int]:
"""Returns the list of tokens that could come next.
These are (a) all tokens extending the root state and, for
non-root states, additionally all tokens extending the current
state."""
tokens = set()
if self.state > 0:
tokens.add(self.sequence[0])
if not self.finished:
tokens.add(self.sequence[self.state + 1])
return tokens
def advance(self, token: int):
"""Reads in a token and advances the state. Here's how it works.
We can advance to the next state if:
- there is a matching child
- its path isn't blocked
A path is blocked when all constraints that are descendants of
that node have already been generated, in the current state.
If we are not able to advance from the current state, we "fall
off the graph" and return to the root state. There, we again
try to advance, checking the same criteria.
In any case, when falling off the graph, we need to do some
bookkeeping. We:
- check whether any constraints were met (all prefixes of
current state)
- if one is found, mark it as completed
- adjust visited nodes accordingly
"""
token = int(token)
# print(f"{self} ADVANCE({token}) {self.sequence} -> ", end="")
if self.finished:
# Accept anything
next_state = self.copy()
elif self.sequence[self.state + 1] == token:
# Advance to the next token
next_state = OrderedConstraintState(self.sequence, self.state + 1)
elif self.sequence.endpoints[self.state]:
# Accept anything between constraints (*)
next_state = self.copy()
elif token == self.sequence[0]:
# Start over having generated the first token
next_state = OrderedConstraintState(self.sequence, 0)
else:
# Start over from the root
next_state = OrderedConstraintState(self.sequence, -1)
return next_state
|
COCO-LM/fairseq/fairseq/token_generation_constraints.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/token_generation_constraints.py",
"repo_id": "COCO-LM",
"token_count": 6770
}
| 203 |
#include <torch/extension.h>
void fused_adam_cuda(at::Tensor & p, at::Tensor & m, at::Tensor & v, at::Tensor & g, float lr, float beta1, float beta2, float eps, float grad_scale, int step, int bias_correction, float decay);
#define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
void adam(at::Tensor & p, at::Tensor & m, at::Tensor & v, at::Tensor & g, float lr, float beta1, float beta2, float eps, float grad_scale, int step, int bias_correction, float decay) {
CHECK_INPUT(p);
CHECK_INPUT(m);
CHECK_INPUT(v);
CHECK_INPUT(g);
int64_t num_elem = p.numel();
AT_ASSERTM(m.numel() == num_elem, "number of elements in m and p tensors should be equal");
AT_ASSERTM(v.numel() == num_elem, "number of elements in v and p tensors should be equal");
AT_ASSERTM(g.numel() == num_elem, "number of elements in g and p tensors should be equal");
fused_adam_cuda(p, m, v, g, lr, beta1, beta2, eps, grad_scale, step, bias_correction, decay);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("adam", &adam, "Adam optimized CUDA implementation.");
}
|
COCO-LM/fairseq/fused_ops/csrc/adam/interface.cpp/0
|
{
"file_path": "COCO-LM/fairseq/fused_ops/csrc/adam/interface.cpp",
"repo_id": "COCO-LM",
"token_count": 516
}
| 204 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
import functools
import importlib
dependencies = [
"dataclasses",
"hydra",
"numpy",
"omegaconf",
"regex",
"requests",
"torch",
]
# Check for required dependencies and raise a RuntimeError if any are missing.
missing_deps = []
for dep in dependencies:
try:
importlib.import_module(dep)
except ImportError:
# Hack: the hydra package is provided under the "hydra-core" name in
# pypi. We don't want the user mistakenly calling `pip install hydra`
# since that will install an unrelated package.
if dep == "hydra":
dep = "hydra-core"
missing_deps.append(dep)
if len(missing_deps) > 0:
raise RuntimeError("Missing dependencies: {}".format(", ".join(missing_deps)))
# only do fairseq imports after checking for dependencies
from fairseq.hub_utils import ( # noqa; noqa
BPEHubInterface as bpe,
TokenizerHubInterface as tokenizer,
)
from fairseq.models import MODEL_REGISTRY # noqa
# torch.hub doesn't build Cython components, so if they are not found then try
# to build them here
try:
import fairseq.data.token_block_utils_fast # noqa
except ImportError:
try:
import cython # noqa
import os
from setuptools import sandbox
sandbox.run_setup(
os.path.join(os.path.dirname(__file__), "setup.py"),
["build_ext", "--inplace"],
)
except ImportError:
print(
"Unable to build Cython components. Please make sure Cython is "
"installed if the torch.hub model you are loading depends on it."
)
# automatically expose models defined in FairseqModel::hub_models
for _model_type, _cls in MODEL_REGISTRY.items():
for model_name in _cls.hub_models().keys():
globals()[model_name] = functools.partial(
_cls.from_pretrained,
model_name,
)
|
COCO-LM/fairseq/hubconf.py/0
|
{
"file_path": "COCO-LM/fairseq/hubconf.py",
"repo_id": "COCO-LM",
"token_count": 807
}
| 205 |
-- Copyright (c) Facebook, Inc. and its affiliates.
--
-- This source code is licensed under the MIT license found in the
-- LICENSE file in the root directory of this source tree.
--
-- Usage: convert_dictionary.lua <dict.th7>
require 'fairseq'
require 'torch'
require 'paths'
if #arg < 1 then
print('usage: convert_dictionary.lua <dict.th7>')
os.exit(1)
end
if not paths.filep(arg[1]) then
print('error: file does not exit: ' .. arg[1])
os.exit(1)
end
dict = torch.load(arg[1])
dst = paths.basename(arg[1]):gsub('.th7', '.txt')
assert(dst:match('.txt$'))
f = io.open(dst, 'w')
for idx, symbol in ipairs(dict.index_to_symbol) do
if idx > dict.cutoff then
break
end
f:write(symbol)
f:write(' ')
f:write(dict.index_to_freq[idx])
f:write('\n')
end
f:close()
|
COCO-LM/fairseq/scripts/convert_dictionary.lua/0
|
{
"file_path": "COCO-LM/fairseq/scripts/convert_dictionary.lua",
"repo_id": "COCO-LM",
"token_count": 314
}
| 206 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import signal
import time
import unittest
import torch
from torch import nn
from fairseq.distributed import DistributedTimeoutWrapper
class ModuleWithDelay(nn.Module):
def __init__(self, delay):
super().__init__()
self.delay = delay
def forward(self, x):
time.sleep(self.delay)
return x
class TestDistributedTimeoutWrapper(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_no_timeout(self):
module = DistributedTimeoutWrapper(ModuleWithDelay(1), 0, signal.SIGINT)
module(torch.rand(5))
module.stop_timeout()
def test_timeout_safe(self):
module = DistributedTimeoutWrapper(ModuleWithDelay(1), 10, signal.SIGINT)
module(torch.rand(5))
module.stop_timeout()
def test_timeout_killed(self):
with self.assertRaises(KeyboardInterrupt):
module = DistributedTimeoutWrapper(ModuleWithDelay(5), 1, signal.SIGINT)
module(torch.rand(5))
module.stop_timeout()
if __name__ == "__main__":
unittest.main()
|
COCO-LM/fairseq/tests/distributed/test_distributed_timeout_wrapper.py/0
|
{
"file_path": "COCO-LM/fairseq/tests/distributed/test_distributed_timeout_wrapper.py",
"repo_id": "COCO-LM",
"token_count": 525
}
| 207 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import logging
import json
import os
import random
import sys
import tempfile
import unittest
from io import StringIO
from typing import List, Dict
import torch
from fairseq import options
from fairseq_cli import eval_lm, train, validate
from tests.utils import (
create_dummy_data,
generate_main,
preprocess_lm_data,
preprocess_summarization_data,
preprocess_translation_data,
create_laser_data_and_config_json,
train_translation_model,
)
try:
import transformers # noqa
has_hf_transformers = True
except ImportError:
has_hf_transformers = False
class TestTranslation(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_fconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_fconv") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, "fconv_iwslt_de_en")
generate_main(data_dir)
def test_raw(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_fconv_raw") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ["--dataset-impl", "raw"])
train_translation_model(
data_dir, "fconv_iwslt_de_en", ["--dataset-impl", "raw"]
)
generate_main(data_dir, ["--dataset-impl", "raw"])
def test_update_freq(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_update_freq") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir, "fconv_iwslt_de_en", ["--update-freq", "3"]
)
generate_main(data_dir)
def test_max_positions(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_max_positions") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
with self.assertRaises(Exception) as context:
train_translation_model(
data_dir,
"fconv_iwslt_de_en",
["--max-target-positions", "5"],
)
self.assertTrue(
"skip this example with --skip-invalid-size-inputs-valid-test"
in str(context.exception)
)
train_translation_model(
data_dir,
"fconv_iwslt_de_en",
[
"--max-target-positions",
"5",
"--skip-invalid-size-inputs-valid-test",
],
)
with self.assertRaises(Exception) as context:
generate_main(data_dir)
generate_main(data_dir, ["--skip-invalid-size-inputs-valid-test"])
def test_generation(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_sampling") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, "fconv_iwslt_de_en")
generate_main(
data_dir,
[
"--sampling",
"--temperature",
"2",
"--beam",
"2",
"--nbest",
"2",
],
)
generate_main(
data_dir,
[
"--sampling",
"--sampling-topk",
"3",
"--beam",
"2",
"--nbest",
"2",
],
)
generate_main(
data_dir,
[
"--sampling",
"--sampling-topp",
"0.2",
"--beam",
"2",
"--nbest",
"2",
],
)
generate_main(
data_dir,
[
"--diversity-rate",
"0.5",
"--beam",
"6",
],
)
with self.assertRaises(ValueError):
generate_main(
data_dir,
[
"--diverse-beam-groups",
"4",
"--match-source-len",
],
)
generate_main(data_dir, ["--prefix-size", "2"])
generate_main(data_dir, ["--retain-dropout"])
def test_eval_bleu(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_eval_bleu") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
"fconv_iwslt_de_en",
[
"--eval-bleu",
"--eval-bleu-print-samples",
"--eval-bleu-remove-bpe",
"--eval-bleu-detok",
"space",
"--eval-bleu-args",
'{"beam": 4, "min_len": 10}',
],
)
def test_lstm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_lstm") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
"lstm_wiseman_iwslt_de_en",
[
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--decoder-out-embed-dim",
"8",
],
)
generate_main(data_dir)
def test_lstm_bidirectional(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_lstm_bidirectional") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
"lstm",
[
"--encoder-layers",
"2",
"--encoder-bidirectional",
"--encoder-hidden-size",
"16",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--decoder-out-embed-dim",
"8",
"--decoder-layers",
"2",
],
)
generate_main(data_dir)
def test_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_transformer") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
"transformer_iwslt_de_en",
[
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
],
run_validation=True,
)
generate_main(data_dir)
def test_multilingual_transformer(self):
# test with all combinations of encoder/decoder lang tokens
encoder_langtok_flags = [
[],
["--encoder-langtok", "src"],
["--encoder-langtok", "tgt"],
]
decoder_langtok_flags = [[], ["--decoder-langtok"]]
with contextlib.redirect_stdout(StringIO()):
for i in range(len(encoder_langtok_flags)):
for j in range(len(decoder_langtok_flags)):
enc_ltok_flag = encoder_langtok_flags[i]
dec_ltok_flag = decoder_langtok_flags[j]
with tempfile.TemporaryDirectory(
f"test_multilingual_transformer_{i}_{j}"
) as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
arch="multilingual_transformer",
task="multilingual_translation",
extra_flags=[
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
]
+ enc_ltok_flag
+ dec_ltok_flag,
lang_flags=["--lang-pairs", "in-out,out-in"],
run_validation=True,
extra_valid_flags=enc_ltok_flag + dec_ltok_flag,
)
generate_main(
data_dir,
extra_flags=[
"--task",
"multilingual_translation",
"--lang-pairs",
"in-out,out-in",
"--source-lang",
"in",
"--target-lang",
"out",
]
+ enc_ltok_flag
+ dec_ltok_flag,
)
@unittest.skipIf(
sys.platform.lower() == "darwin", "skip latent depth test on MacOS"
)
def test_multilingual_translation_latent_depth(self):
# test with latent depth in encoder, decoder, or both
encoder_latent_layer = [[], ["--encoder-latent-layer"]]
decoder_latent_layer = [[], ["--decoder-latent-layer"]]
with contextlib.redirect_stdout(StringIO()):
for i in range(len(encoder_latent_layer)):
for j in range(len(decoder_latent_layer)):
if i == 0 and j == 0:
continue
enc_ll_flag = encoder_latent_layer[i]
dec_ll_flag = decoder_latent_layer[j]
with tempfile.TemporaryDirectory(
f"test_multilingual_translation_latent_depth_{i}_{j}"
) as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(
data_dir, extra_flags=["--joined-dictionary"]
)
train_translation_model(
data_dir,
arch="latent_multilingual_transformer",
task="multilingual_translation_latent_depth",
extra_flags=[
"--user-dir",
"examples/latent_depth/latent_depth_src",
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--share-encoders",
"--share-decoders",
"--sparsity-weight",
"0.1",
]
+ enc_ll_flag
+ dec_ll_flag,
lang_flags=["--lang-pairs", "in-out,out-in"],
run_validation=True,
extra_valid_flags=[
"--user-dir",
"examples/latent_depth/latent_depth_src",
]
+ enc_ll_flag
+ dec_ll_flag,
)
generate_main(
data_dir,
extra_flags=[
"--user-dir",
"examples/latent_depth/latent_depth_src",
"--task",
"multilingual_translation_latent_depth",
"--lang-pairs",
"in-out,out-in",
"--source-lang",
"in",
"--target-lang",
"out",
]
+ enc_ll_flag
+ dec_ll_flag,
)
def test_translation_multi_simple_epoch(self):
# test with all combinations of encoder/decoder lang tokens
encoder_langtok_flags = [
[],
["--encoder-langtok", "src"],
["--encoder-langtok", "tgt"],
]
decoder_langtok_flags = [[], ["--decoder-langtok"]]
with contextlib.redirect_stdout(StringIO()):
for i in range(len(encoder_langtok_flags)):
for j in range(len(decoder_langtok_flags)):
enc_ltok_flag = encoder_langtok_flags[i]
dec_ltok_flag = decoder_langtok_flags[j]
with tempfile.TemporaryDirectory(
f"test_translation_multi_simple_epoch_{i}_{j}"
) as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(
data_dir, extra_flags=["--joined-dictionary"]
)
train_translation_model(
data_dir,
arch="transformer",
task="translation_multi_simple_epoch",
extra_flags=[
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--sampling-method",
"temperature",
"--sampling-temperature",
"1.5",
"--virtual-epoch-size",
"1000",
]
+ enc_ltok_flag
+ dec_ltok_flag,
lang_flags=["--lang-pairs", "in-out,out-in"],
run_validation=True,
extra_valid_flags=enc_ltok_flag + dec_ltok_flag,
)
generate_main(
data_dir,
extra_flags=[
"--task",
"translation_multi_simple_epoch",
"--lang-pairs",
"in-out,out-in",
"--source-lang",
"in",
"--target-lang",
"out",
]
+ enc_ltok_flag
+ dec_ltok_flag,
)
def test_translation_multi_simple_epoch_no_vepoch(self):
# test with all combinations of encoder/decoder lang tokens
with contextlib.redirect_stdout(StringIO()):
enc_ltok_flag = ["--encoder-langtok", "src"]
dec_ltok_flag = ["--decoder-langtok"]
with tempfile.TemporaryDirectory(
"test_translation_multi_simple_epoch_dict"
) as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, extra_flags=[])
train_translation_model(
data_dir,
arch="transformer",
task="translation_multi_simple_epoch",
extra_flags=[
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--sampling-method",
"temperature",
"--sampling-temperature",
"1.5",
]
+ enc_ltok_flag
+ dec_ltok_flag,
lang_flags=["--lang-pairs", "in-out"],
run_validation=True,
extra_valid_flags=enc_ltok_flag + dec_ltok_flag,
)
generate_main(
data_dir,
extra_flags=[
"--task",
"translation_multi_simple_epoch",
"--lang-pairs",
"in-out",
"--source-lang",
"in",
"--target-lang",
"out",
]
+ enc_ltok_flag
+ dec_ltok_flag,
)
def test_translation_multi_simple_epoch_dicts(self):
# test with all combinations of encoder/decoder lang tokens
with contextlib.redirect_stdout(StringIO()):
enc_ltok_flag = ["--encoder-langtok", "src"]
dec_ltok_flag = ["--decoder-langtok"]
with tempfile.TemporaryDirectory(
"test_translation_multi_simple_epoch_dict"
) as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, extra_flags=[])
train_translation_model(
data_dir,
arch="transformer",
task="translation_multi_simple_epoch",
extra_flags=[
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--sampling-method",
"temperature",
"--sampling-temperature",
"1.5",
"--virtual-epoch-size",
"1000",
]
+ enc_ltok_flag
+ dec_ltok_flag,
lang_flags=["--lang-pairs", "in-out"],
run_validation=True,
extra_valid_flags=enc_ltok_flag + dec_ltok_flag,
)
generate_main(
data_dir,
extra_flags=[
"--task",
"translation_multi_simple_epoch",
"--lang-pairs",
"in-out",
"--source-lang",
"in",
"--target-lang",
"out",
]
+ enc_ltok_flag
+ dec_ltok_flag,
)
def test_translation_multi_simple_epoch_src_tgt_dict_spec(self):
# test the specification of explicit --src-dict and --tgt-dict
with contextlib.redirect_stdout(StringIO()):
enc_ltok_flag = ["--encoder-langtok", "src"]
dec_ltok_flag = ["--decoder-langtok"]
with tempfile.TemporaryDirectory(
"test_translation_multi_simple_epoch_dict"
) as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, extra_flags=[])
train_translation_model(
data_dir,
arch="transformer",
task="translation_multi_simple_epoch",
extra_flags=[
"--source-dict",
f"{data_dir}/dict.in.txt",
"--target-dict",
f"{data_dir}/dict.out.txt",
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--sampling-method",
"temperature",
"--sampling-temperature",
"1.5",
"--virtual-epoch-size",
"1000",
]
+ enc_ltok_flag
+ dec_ltok_flag,
lang_flags=["--lang-pairs", "in-out"],
run_validation=True,
extra_valid_flags=enc_ltok_flag + dec_ltok_flag,
)
generate_main(
data_dir,
extra_flags=[
"--task",
"translation_multi_simple_epoch",
"--lang-pairs",
"in-out",
"--source-lang",
"in",
"--target-lang",
"out",
]
+ enc_ltok_flag
+ dec_ltok_flag,
)
def test_transformer_cross_self_attention(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory(
"test_transformer_cross_self_attention"
) as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
"transformer_iwslt_de_en",
[
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--no-cross-attention",
"--cross-self-attention",
],
run_validation=True,
)
generate_main(data_dir, extra_flags=[])
def test_transformer_pointer_generator(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory(
"test_transformer_pointer_generator"
) as data_dir:
create_dummy_data(data_dir)
preprocess_summarization_data(data_dir)
train_translation_model(
data_dir,
"transformer_pointer_generator",
extra_flags=[
"--user-dir",
"examples/pointer_generator/pointer_generator_src",
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--alignment-layer",
"-1",
"--alignment-heads",
"1",
"--source-position-markers",
"0",
],
run_validation=True,
extra_valid_flags=[
"--user-dir",
"examples/pointer_generator/pointer_generator_src",
],
)
generate_main(
data_dir,
extra_flags=[
"--user-dir",
"examples/pointer_generator/pointer_generator_src",
],
)
def test_lightconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_lightconv") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
"lightconv_iwslt_de_en",
[
"--encoder-conv-type",
"lightweight",
"--decoder-conv-type",
"lightweight",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
],
)
generate_main(data_dir)
def test_dynamicconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_dynamicconv") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
"lightconv_iwslt_de_en",
[
"--encoder-conv-type",
"dynamic",
"--decoder-conv-type",
"dynamic",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
],
)
generate_main(data_dir)
def test_cmlm_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_cmlm_transformer") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ["--joined-dictionary"])
train_translation_model(
data_dir,
"cmlm_transformer",
[
"--apply-bert-init",
"--criterion",
"nat_loss",
"--noise",
"full_mask",
"--pred-length-offset",
"--length-loss-factor",
"0.1",
],
task="translation_lev",
)
generate_main(
data_dir,
[
"--task",
"translation_lev",
"--iter-decode-max-iter",
"9",
"--iter-decode-eos-penalty",
"0",
"--print-step",
],
)
def test_nonautoregressive_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory(
"test_nonautoregressive_transformer"
) as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ["--joined-dictionary"])
train_translation_model(
data_dir,
"nonautoregressive_transformer",
[
"--apply-bert-init",
"--src-embedding-copy",
"--criterion",
"nat_loss",
"--noise",
"full_mask",
"--pred-length-offset",
"--length-loss-factor",
"0.1",
],
task="translation_lev",
)
generate_main(
data_dir,
[
"--task",
"translation_lev",
"--iter-decode-max-iter",
"0",
"--iter-decode-eos-penalty",
"0",
"--print-step",
],
)
# def test_nat_crf_transformer(self):
# with contextlib.redirect_stdout(StringIO()):
# with tempfile.TemporaryDirectory('test_nat_crf_transformer') as data_dir:
# create_dummy_data(data_dir)
# preprocess_translation_data(data_dir, ['--joined-dictionary'])
# train_translation_model(data_dir, 'nacrf_transformer', [
# '--apply-bert-init', '--criterion',
# 'nat_loss', '--noise', 'full_mask', '--pred-length-offset',
# '--length-loss-factor', '0.1',
# '--word-ins-loss-factor', '0.5',
# '--crf-lowrank-approx', '1',
# '--crf-beam-approx', '1'
# ], task='translation_lev')
# generate_main(data_dir, [
# '--task', 'translation_lev',
# '--iter-decode-max-iter', '0',
# '--iter-decode-eos-penalty', '0',
# '--print-step',
# ])
def test_iterative_nonautoregressive_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory(
"test_iterative_nonautoregressive_transformer"
) as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ["--joined-dictionary"])
train_translation_model(
data_dir,
"iterative_nonautoregressive_transformer",
[
"--apply-bert-init",
"--src-embedding-copy",
"--criterion",
"nat_loss",
"--noise",
"full_mask",
"--stochastic-approx",
"--dae-ratio",
"0.5",
"--train-step",
"3",
],
task="translation_lev",
)
generate_main(
data_dir,
[
"--task",
"translation_lev",
"--iter-decode-max-iter",
"9",
"--iter-decode-eos-penalty",
"0",
"--print-step",
],
)
def test_insertion_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_insertion_transformer") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ["--joined-dictionary"])
train_translation_model(
data_dir,
"insertion_transformer",
[
"--apply-bert-init",
"--criterion",
"nat_loss",
"--noise",
"random_mask",
],
task="translation_lev",
)
generate_main(
data_dir,
[
"--task",
"translation_lev",
"--iter-decode-max-iter",
"9",
"--iter-decode-eos-penalty",
"0",
"--print-step",
],
)
def test_mixture_of_experts(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_moe") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
"transformer_iwslt_de_en",
[
"--task",
"translation_moe",
"--user-dir",
"examples/translation_moe/translation_moe_src",
"--method",
"hMoElp",
"--mean-pool-gating-network",
"--num-experts",
"3",
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
],
)
generate_main(
data_dir,
[
"--task",
"translation_moe",
"--user-dir",
"examples/translation_moe/translation_moe_src",
"--method",
"hMoElp",
"--mean-pool-gating-network",
"--num-experts",
"3",
"--gen-expert",
"0",
],
)
def test_alignment(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_alignment") as data_dir:
create_dummy_data(data_dir, alignment=True)
preprocess_translation_data(data_dir, ["--align-suffix", "align"])
train_translation_model(
data_dir,
"transformer_align",
[
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--load-alignments",
"--alignment-layer",
"1",
"--criterion",
"label_smoothed_cross_entropy_with_alignment",
],
run_validation=True,
)
generate_main(data_dir)
def test_laser_lstm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_laser_lstm") as data_dir:
laser_config_file = create_laser_data_and_config_json(data_dir)
train_translation_model(
laser_config_file.name,
"laser_lstm",
[
"--user-dir",
"examples/laser/laser_src",
"--weighting-alpha",
"0.3",
"--encoder-bidirectional",
"--encoder-hidden-size",
"512",
"--encoder-layers",
"5",
"--decoder-layers",
"1",
"--encoder-embed-dim",
"320",
"--decoder-embed-dim",
"320",
"--decoder-lang-embed-dim",
"32",
"--save-dir",
data_dir,
"--disable-validation",
],
task="laser",
lang_flags=[],
)
def test_laser_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_laser_transformer") as data_dir:
laser_config_file = create_laser_data_and_config_json(data_dir)
train_translation_model(
laser_config_file.name,
"laser_transformer",
[
"--user-dir",
"examples/laser/laser_src",
"--weighting-alpha",
"0.3",
"--encoder-embed-dim",
"320",
"--decoder-embed-dim",
"320",
"--decoder-lang-embed-dim",
"32",
"--save-dir",
data_dir,
"--disable-validation",
],
task="laser",
lang_flags=[],
)
def test_alignment_full_context(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_alignment") as data_dir:
create_dummy_data(data_dir, alignment=True)
preprocess_translation_data(data_dir, ["--align-suffix", "align"])
train_translation_model(
data_dir,
"transformer_align",
[
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--load-alignments",
"--alignment-layer",
"1",
"--criterion",
"label_smoothed_cross_entropy_with_alignment",
"--full-context-alignment",
],
run_validation=True,
)
generate_main(data_dir)
def test_transformer_layerdrop(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_transformer_layerdrop") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
"transformer_iwslt_de_en",
[
"--encoder-layers",
"3",
"--decoder-layers",
"3",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--encoder-layerdrop",
"0.01",
"--decoder-layerdrop",
"0.01",
],
)
generate_main(data_dir)
generate_main(
data_dir,
[
"--model-overrides",
"{'encoder_layers_to_keep':'0,2','decoder_layers_to_keep':'1'}",
],
)
class TestStories(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_fconv_self_att_wp(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_fconv_self_att_wp") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
config = [
"--encoder-layers",
"[(128, 3)] * 2",
"--decoder-layers",
"[(128, 3)] * 2",
"--decoder-attention",
"True",
"--encoder-attention",
"False",
"--gated-attention",
"True",
"--self-attention",
"True",
"--project-input",
"True",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--decoder-out-embed-dim",
"8",
"--multihead-self-attention-nheads",
"2",
]
train_translation_model(data_dir, "fconv_self_att_wp", config)
generate_main(data_dir)
# fusion model
os.rename(
os.path.join(data_dir, "checkpoint_last.pt"),
os.path.join(data_dir, "pretrained.pt"),
)
config.extend(
[
"--pretrained",
"True",
"--pretrained-checkpoint",
os.path.join(data_dir, "pretrained.pt"),
"--save-dir",
os.path.join(data_dir, "fusion_model"),
]
)
train_translation_model(data_dir, "fconv_self_att_wp", config)
class TestLanguageModeling(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_fconv_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_fconv_lm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(
data_dir,
"fconv_lm",
[
"--decoder-layers",
"[(850, 3)] * 2 + [(1024,4)]",
"--decoder-embed-dim",
"280",
"--optimizer",
"nag",
"--lr",
"0.1",
],
)
eval_lm_main(data_dir)
generate_main(
data_dir,
[
"--task",
"language_modeling",
"--sample-break-mode",
"eos",
"--tokens-per-sample",
"500",
],
)
def test_transformer_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_transformer_lm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(
data_dir,
"transformer_lm",
["--add-bos-token", '--nval', '1'],
run_validation=True,
)
eval_lm_main(data_dir)
eval_lm_main(data_dir, extra_flags=["--context-window", "25"])
generate_main(
data_dir,
[
"--task",
"language_modeling",
"--sample-break-mode",
"eos",
"--tokens-per-sample",
"500",
],
)
def test_transformer_lm_with_adaptive_softmax(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory(
"test_transformer_lm_with_adaptive_softmax"
) as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(
data_dir,
"transformer_lm",
[
"--add-bos-token",
"--criterion",
"adaptive_loss",
"--adaptive-softmax-cutoff",
"5,10,15",
],
run_validation=True,
)
eval_lm_main(data_dir)
generate_main(
data_dir,
[
"--task",
"language_modeling",
"--sample-break-mode",
"eos",
"--tokens-per-sample",
"500",
],
)
def test_lightconv_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_lightconv_lm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(
data_dir,
"lightconv_lm",
["--add-bos-token"],
run_validation=True,
)
eval_lm_main(data_dir)
generate_main(
data_dir,
[
"--task",
"language_modeling",
"--sample-break-mode",
"eos",
"--tokens-per-sample",
"500",
],
)
def test_lstm_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_lstm_lm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(
data_dir,
"lstm_lm",
["--add-bos-token"],
run_validation=True,
)
eval_lm_main(data_dir)
generate_main(
data_dir,
[
"--task",
"language_modeling",
"--sample-break-mode",
"eos",
"--tokens-per-sample",
"500",
],
)
def test_lstm_lm_residuals(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_lstm_lm_residuals") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(
data_dir,
"lstm_lm",
["--add-bos-token", "--residuals"],
run_validation=True,
)
eval_lm_main(data_dir)
generate_main(
data_dir,
[
"--task",
"language_modeling",
"--sample-break-mode",
"eos",
"--tokens-per-sample",
"500",
],
)
@unittest.skipIf(not has_hf_transformers, "skip test if transformers is missing")
def test_transformer_xl_bptt_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_transformer_xl_bptt_lm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
task_flags = [
"--user-dir",
"examples/truncated_bptt",
"--task",
"truncated_bptt_lm",
"--batch-size",
"2",
"--tokens-per-sample",
"50",
]
train_language_model(
data_dir=data_dir,
arch="transformer_xl",
extra_flags=task_flags
+ [
"--n-layer",
"2",
],
task="truncated_bptt_lm",
run_validation=True,
extra_valid_flags=task_flags,
)
eval_lm_main(data_dir, extra_flags=task_flags)
# Train with activation offloading
train_language_model(
data_dir=data_dir,
arch="transformer_xl",
extra_flags=task_flags
+ [
"--n-layer",
"2",
"--offload-activations",
],
task="truncated_bptt_lm",
run_validation=True,
extra_valid_flags=task_flags,
)
class TestMaskedLanguageModel(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_legacy_masked_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_legacy_mlm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_legacy_masked_language_model(data_dir, "masked_lm")
def test_roberta_masked_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_roberta_mlm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_masked_lm(
data_dir, "roberta_base", extra_flags=["--encoder-layers", "2"]
)
def test_roberta_sentence_prediction(self):
num_classes = 3
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_roberta_head") as data_dir:
create_dummy_roberta_head_data(data_dir, num_classes=num_classes)
preprocess_lm_data(os.path.join(data_dir, "input0"))
preprocess_lm_data(os.path.join(data_dir, "label"))
train_roberta_head(data_dir, "roberta_base", num_classes=num_classes)
def test_roberta_regression_single(self):
num_classes = 1
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory(
"test_roberta_regression_single"
) as data_dir:
create_dummy_roberta_head_data(
data_dir, num_classes=num_classes, regression=True
)
preprocess_lm_data(os.path.join(data_dir, "input0"))
train_roberta_head(
data_dir,
"roberta_base",
num_classes=num_classes,
extra_flags=["--regression-target"],
)
def test_roberta_regression_multiple(self):
num_classes = 3
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory(
"test_roberta_regression_multiple"
) as data_dir:
create_dummy_roberta_head_data(
data_dir, num_classes=num_classes, regression=True
)
preprocess_lm_data(os.path.join(data_dir, "input0"))
train_roberta_head(
data_dir,
"roberta_base",
num_classes=num_classes,
extra_flags=["--regression-target"],
)
def test_linformer_roberta_masked_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_linformer_roberta_mlm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_masked_lm(
data_dir,
"linformer_roberta_base",
extra_flags=[
"--user-dir",
"examples/linformer/linformer_src",
"--encoder-layers",
"2",
],
)
def test_linformer_roberta_sentence_prediction(self):
num_classes = 3
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_linformer_roberta_head") as data_dir:
create_dummy_roberta_head_data(data_dir, num_classes=num_classes)
preprocess_lm_data(os.path.join(data_dir, "input0"))
preprocess_lm_data(os.path.join(data_dir, "label"))
train_roberta_head(
data_dir,
"linformer_roberta_base",
num_classes=num_classes,
extra_flags=["--user-dir", "examples/linformer/linformer_src"],
)
def test_linformer_roberta_regression_single(self):
num_classes = 1
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory(
"test_linformer_roberta_regression_single"
) as data_dir:
create_dummy_roberta_head_data(
data_dir, num_classes=num_classes, regression=True
)
preprocess_lm_data(os.path.join(data_dir, "input0"))
train_roberta_head(
data_dir,
"linformer_roberta_base",
num_classes=num_classes,
extra_flags=[
"--regression-target",
"--user-dir",
"examples/linformer/linformer_src",
],
)
def test_linformer_roberta_regression_multiple(self):
num_classes = 3
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory(
"test_linformer_roberta_regression_multiple"
) as data_dir:
create_dummy_roberta_head_data(
data_dir, num_classes=num_classes, regression=True
)
preprocess_lm_data(os.path.join(data_dir, "input0"))
train_roberta_head(
data_dir,
"linformer_roberta_base",
num_classes=num_classes,
extra_flags=[
"--regression-target",
"--user-dir",
"examples/linformer/linformer_src",
],
)
def _test_pretrained_masked_lm_for_translation(self, learned_pos_emb, encoder_only):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_mlm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_legacy_masked_language_model(
data_dir,
arch="masked_lm",
extra_args=("--encoder-learned-pos",) if learned_pos_emb else (),
)
with tempfile.TemporaryDirectory(
"test_mlm_translation"
) as translation_dir:
create_dummy_data(translation_dir)
preprocess_translation_data(
translation_dir, extra_flags=["--joined-dictionary"]
)
# Train transformer with data_dir/checkpoint_last.pt
train_translation_model(
translation_dir,
arch="transformer_from_pretrained_xlm",
extra_flags=[
"--decoder-layers",
"1",
"--decoder-embed-dim",
"32",
"--decoder-attention-heads",
"1",
"--decoder-ffn-embed-dim",
"32",
"--encoder-layers",
"1",
"--encoder-embed-dim",
"32",
"--encoder-attention-heads",
"1",
"--encoder-ffn-embed-dim",
"32",
"--pretrained-xlm-checkpoint",
"{}/checkpoint_last.pt".format(data_dir),
"--activation-fn",
"gelu",
"--max-source-positions",
"500",
"--max-target-positions",
"500",
]
+ (
["--encoder-learned-pos", "--decoder-learned-pos"]
if learned_pos_emb
else []
)
+ (["--init-encoder-only"] if encoder_only else []),
task="translation_from_pretrained_xlm",
)
def test_pretrained_masked_lm_for_translation_learned_pos_emb(self):
self._test_pretrained_masked_lm_for_translation(True, False)
def test_pretrained_masked_lm_for_translation_sinusoidal_pos_emb(self):
self._test_pretrained_masked_lm_for_translation(False, False)
def test_pretrained_masked_lm_for_translation_encoder_only(self):
self._test_pretrained_masked_lm_for_translation(True, True)
def test_r4f_roberta(self):
num_classes = 3
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_r4f_roberta_head") as data_dir:
create_dummy_roberta_head_data(data_dir, num_classes=num_classes)
preprocess_lm_data(os.path.join(data_dir, "input0"))
preprocess_lm_data(os.path.join(data_dir, "label"))
train_roberta_head(
data_dir,
"roberta_base",
num_classes=num_classes,
extra_flags=[
"--user-dir",
"examples/rxf/rxf_src",
"--criterion",
"sentence_prediction_r3f",
"--spectral-norm-classification-head",
],
)
def train_legacy_masked_language_model(data_dir, arch, extra_args=()):
train_parser = options.get_training_parser()
# TODO: langs should be in and out right?
train_args = options.parse_args_and_arch(
train_parser,
[
"--task",
"cross_lingual_lm",
data_dir,
"--arch",
arch,
# Optimizer args
"--optimizer",
"adam",
"--lr-scheduler",
"reduce_lr_on_plateau",
"--lr-shrink",
"0.5",
"--lr",
"0.0001",
"--stop-min-lr",
"1e-09",
# dropout, attention args
"--dropout",
"0.1",
"--attention-dropout",
"0.1",
# MLM args
"--criterion",
"legacy_masked_lm_loss",
"--masked-lm-only",
"--monolingual-langs",
"in,out",
"--num-segment",
"5",
# Transformer args: use a small transformer model for fast training
"--encoder-layers",
"1",
"--encoder-embed-dim",
"32",
"--encoder-attention-heads",
"1",
"--encoder-ffn-embed-dim",
"32",
# Other training args
"--max-tokens",
"500",
"--tokens-per-sample",
"500",
"--save-dir",
data_dir,
"--max-epoch",
"1",
"--no-progress-bar",
"--distributed-world-size",
"1",
"--dataset-impl",
"raw",
"--num-workers",
"0",
]
+ list(extra_args),
)
train.main(train_args)
class TestOptimizers(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_optimizers(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_optimizers") as data_dir:
# Use just a bit of data and tiny model to keep this test runtime reasonable
create_dummy_data(data_dir, num_examples=10, maxlen=5)
preprocess_translation_data(data_dir)
optimizers = ["adafactor", "adam", "nag", "adagrad", "sgd", "adadelta"]
last_checkpoint = os.path.join(data_dir, "checkpoint_last.pt")
for optimizer in optimizers:
if os.path.exists(last_checkpoint):
os.remove(last_checkpoint)
train_translation_model(
data_dir,
"lstm",
[
"--required-batch-size-multiple",
"1",
"--encoder-layers",
"1",
"--encoder-hidden-size",
"32",
"--decoder-layers",
"1",
"--optimizer",
optimizer,
],
)
generate_main(data_dir)
def read_last_log_entry(
logs: List[logging.LogRecord], logger_name: str
) -> Dict[str, float]:
for x in reversed(logs):
if x.name == logger_name:
return json.loads(x.message)
raise ValueError(f"No entries from {logger_name} found in captured logs")
class TestActivationCheckpointing(unittest.TestCase):
base_flags = [
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--restore-file",
"x.pt",
"--log-format",
"json",
"--log-interval",
"1",
"--max-update",
"2",
]
def _train(self, data_dir, extra_flags):
with self.assertLogs() as logs:
train_translation_model(
data_dir,
"transformer_iwslt_de_en",
self.base_flags + extra_flags,
run_validation=True,
extra_valid_flags=["--log-format", "json"],
)
return logs.records
def test_activation_offloading_does_not_change_metrics(self):
"""Neither ----checkpoint-activations nor --offload-activations should change loss"""
with tempfile.TemporaryDirectory("test_transformer_with_act_cpt") as data_dir:
with self.assertLogs():
create_dummy_data(data_dir, num_examples=20)
preprocess_translation_data(data_dir)
offload_logs = self._train(data_dir, ["--offload-activations"])
baseline_logs = self._train(data_dir, [])
assert len(baseline_logs) == len(offload_logs)
baseline_valid_stats = read_last_log_entry(baseline_logs, "valid")
offload_valid_stats = read_last_log_entry(offload_logs, "valid")
baseline_train_stats = read_last_log_entry(baseline_logs, "train")
offload_train_stats = read_last_log_entry(offload_logs, "train")
assert (
baseline_train_stats["train_loss"] == offload_train_stats["train_loss"]
)
assert (
baseline_valid_stats["valid_loss"] == offload_valid_stats["valid_loss"]
)
def test_activation_checkpointing_does_not_change_metrics(self):
"""--checkpoint-activations should not change loss"""
with tempfile.TemporaryDirectory("test_transformer_with_act_cpt") as data_dir:
with self.assertLogs():
create_dummy_data(data_dir, num_examples=20)
preprocess_translation_data(data_dir)
ckpt_logs = self._train(data_dir, ["--checkpoint-activations"])
baseline_logs = self._train(data_dir, [])
assert len(baseline_logs) == len(ckpt_logs)
baseline_train_stats = read_last_log_entry(baseline_logs, "train")
ckpt_train_stats = read_last_log_entry(ckpt_logs, "train")
assert baseline_train_stats["train_loss"] == ckpt_train_stats["train_loss"]
baseline_valid_stats = read_last_log_entry(baseline_logs, "valid")
ckpt_valid_stats = read_last_log_entry(ckpt_logs, "valid")
assert baseline_valid_stats["valid_loss"] == ckpt_valid_stats["valid_loss"]
def create_dummy_roberta_head_data(
data_dir, num_examples=100, maxlen=10, num_classes=2, regression=False
):
input_dir = "input0"
def _create_dummy_data(filename):
random_data = torch.rand(num_examples * maxlen)
input_data = 97 + torch.floor(26 * random_data).int()
if regression:
output_data = torch.rand((num_examples, num_classes))
else:
output_data = 1 + torch.floor(num_classes * torch.rand(num_examples)).int()
with open(os.path.join(data_dir, input_dir, filename + ".out"), "w") as f_in:
label_filename = filename + ".label" if regression else filename + ".out"
with open(os.path.join(data_dir, "label", label_filename), "w") as f_out:
offset = 0
for i in range(num_examples):
# write example input
ex_len = random.randint(1, maxlen)
ex_str = " ".join(map(chr, input_data[offset : offset + ex_len]))
print(ex_str, file=f_in)
# write example label
if regression:
class_str = " ".join(map(str, output_data[i].numpy()))
print(class_str, file=f_out)
else:
class_str = "class{}".format(output_data[i])
print(class_str, file=f_out)
offset += ex_len
os.mkdir(os.path.join(data_dir, input_dir))
os.mkdir(os.path.join(data_dir, "label"))
_create_dummy_data("train")
_create_dummy_data("valid")
_create_dummy_data("test")
def train_masked_lm(data_dir, arch, extra_flags=None):
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(
train_parser,
[
"--task",
"masked_lm",
data_dir,
"--arch",
arch,
"--optimizer",
"adam",
"--lr",
"0.0001",
"--criterion",
"masked_lm",
"--batch-size",
"500",
"--save-dir",
data_dir,
"--max-epoch",
"1",
"--no-progress-bar",
"--distributed-world-size",
"1",
"--ddp-backend",
"no_c10d",
"--num-workers",
"0",
]
+ (extra_flags or []),
)
train.main(train_args)
def train_roberta_head(data_dir, arch, num_classes=2, extra_flags=None):
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(
train_parser,
[
"--task",
"sentence_prediction",
data_dir,
"--arch",
arch,
"--encoder-layers",
"2",
"--num-classes",
str(num_classes),
"--optimizer",
"adam",
"--lr",
"0.0001",
"--criterion",
"sentence_prediction",
"--max-tokens",
"500",
"--max-positions",
"500",
"--batch-size",
"500",
"--save-dir",
data_dir,
"--max-epoch",
"1",
"--no-progress-bar",
"--distributed-world-size",
"1",
"--ddp-backend",
"no_c10d",
"--num-workers",
"0",
]
+ (extra_flags or []),
)
train.main(train_args)
def train_language_model(
data_dir,
arch,
extra_flags=None,
run_validation=False,
extra_valid_flags=None,
task="language_modeling",
):
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(
train_parser,
[
"--task",
task,
data_dir,
"--arch",
arch,
"--optimizer",
"adam",
"--lr",
"0.0001",
"--max-tokens",
"500",
"--tokens-per-sample",
"500",
"--save-dir",
data_dir,
"--max-epoch",
"1",
"--no-progress-bar",
"--distributed-world-size",
"1",
"--ddp-backend",
"no_c10d",
"--num-workers",
"0",
]
+ (extra_flags or []),
)
train.main(train_args)
if run_validation:
# test validation
validate_parser = options.get_validation_parser()
validate_args = options.parse_args_and_arch(
validate_parser,
[
"--task",
task,
data_dir,
"--path",
os.path.join(data_dir, "checkpoint_last.pt"),
"--valid-subset",
"valid",
"--max-tokens",
"500",
"--no-progress-bar",
"--num-workers",
"0",
]
+ (extra_valid_flags or []),
)
validate.main(validate_args)
def eval_lm_main(data_dir, extra_flags=None):
eval_lm_parser = options.get_eval_lm_parser()
eval_lm_args = options.parse_args_and_arch(
eval_lm_parser,
[
data_dir,
"--path",
os.path.join(data_dir, "checkpoint_last.pt"),
"--no-progress-bar",
"--num-workers",
"0",
]
+ (extra_flags or []),
)
eval_lm.main(eval_lm_args)
if __name__ == "__main__":
unittest.main()
|
COCO-LM/fairseq/tests/test_binaries.py/0
|
{
"file_path": "COCO-LM/fairseq/tests/test_binaries.py",
"repo_id": "COCO-LM",
"token_count": 47182
}
| 208 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from fairseq.data import MonolingualDataset
from fairseq.tasks.language_modeling import LanguageModelingTask, LanguageModelingConfig
from tests import utils as test_utils
class TestLMContextWindow(unittest.TestCase):
def test_eval_dataloader(self):
dictionary = test_utils.dummy_dictionary(10)
assert len(dictionary) == 14 # 4 extra special symbols
assert dictionary.pad() == 1
dataset = test_utils.TestDataset([
torch.tensor([4, 5, 6, 7], dtype=torch.long),
torch.tensor([8, 9, 10, 11], dtype=torch.long),
torch.tensor([12, 13], dtype=torch.long),
])
dataset = MonolingualDataset(dataset, sizes=[4, 4, 2], src_vocab=dictionary)
config = LanguageModelingConfig(tokens_per_sample=4)
task = LanguageModelingTask(config, dictionary)
eval_dataloader = task.eval_lm_dataloader(
dataset=dataset,
batch_size=1,
context_window=2,
)
batch = next(eval_dataloader)
assert batch["net_input"]["src_tokens"][0].tolist() == [4, 5, 6, 7, 1, 1]
assert batch["target"][0].tolist() == [4, 5, 6, 7, 1, 1]
batch = next(eval_dataloader)
assert batch["net_input"]["src_tokens"][0].tolist() == [6, 7, 8, 9, 10, 11]
assert batch["target"][0].tolist() == [1, 1, 8, 9, 10, 11]
batch = next(eval_dataloader)
assert batch["net_input"]["src_tokens"][0].tolist() == [10, 11, 12, 13]
assert batch["target"][0].tolist() == [1, 1, 12, 13]
if __name__ == "__main__":
unittest.main()
|
COCO-LM/fairseq/tests/test_lm_context_window.py/0
|
{
"file_path": "COCO-LM/fairseq/tests/test_lm_context_window.py",
"repo_id": "COCO-LM",
"token_count": 788
}
| 209 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from fairseq import utils
class TestUtils(unittest.TestCase):
def test_convert_padding_direction(self):
pad = 1
left_pad = torch.LongTensor(
[
[2, 3, 4, 5, 6],
[1, 7, 8, 9, 10],
[1, 1, 1, 11, 12],
]
)
right_pad = torch.LongTensor(
[
[2, 3, 4, 5, 6],
[7, 8, 9, 10, 1],
[11, 12, 1, 1, 1],
]
)
self.assertAlmostEqual(
right_pad,
utils.convert_padding_direction(
left_pad,
pad,
left_to_right=True,
),
)
self.assertAlmostEqual(
left_pad,
utils.convert_padding_direction(
right_pad,
pad,
right_to_left=True,
),
)
def test_make_positions(self):
pad = 1
left_pad_input = torch.LongTensor(
[
[9, 9, 9, 9, 9],
[1, 9, 9, 9, 9],
[1, 1, 1, 9, 9],
]
)
left_pad_output = torch.LongTensor(
[
[2, 3, 4, 5, 6],
[1, 2, 3, 4, 5],
[1, 1, 1, 2, 3],
]
)
right_pad_input = torch.LongTensor(
[
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 1],
[9, 9, 1, 1, 1],
]
)
right_pad_output = torch.LongTensor(
[
[2, 3, 4, 5, 6],
[2, 3, 4, 5, 1],
[2, 3, 1, 1, 1],
]
)
self.assertAlmostEqual(
left_pad_output,
utils.make_positions(left_pad_input, pad),
)
self.assertAlmostEqual(
right_pad_output,
utils.make_positions(right_pad_input, pad),
)
def test_clip_grad_norm_(self):
params = torch.nn.Parameter(torch.zeros(5)).requires_grad_(False)
grad_norm = utils.clip_grad_norm_(params, 1.0)
self.assertTrue(torch.is_tensor(grad_norm))
self.assertEqual(grad_norm, 0.0)
params = [torch.nn.Parameter(torch.zeros(5)) for i in range(3)]
for p in params:
p.grad = torch.full((5,), fill_value=2.0)
grad_norm = utils.clip_grad_norm_(params, 1.0)
exp_grad_norm = torch.full((15,), fill_value=2.0).norm()
self.assertTrue(torch.is_tensor(grad_norm))
self.assertEqual(grad_norm, exp_grad_norm)
grad_norm = utils.clip_grad_norm_(params, 1.0)
self.assertAlmostEqual(grad_norm, torch.tensor(1.0))
def test_resolve_max_positions_with_tuple(self):
resolved = utils.resolve_max_positions(None, (2000, 100, 2000), 12000)
self.assertEqual(resolved, (2000, 100, 2000))
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess(utils.item((t1 - t2).abs().max()), 1e-4)
if __name__ == "__main__":
unittest.main()
|
COCO-LM/fairseq/tests/test_utils.py/0
|
{
"file_path": "COCO-LM/fairseq/tests/test_utils.py",
"repo_id": "COCO-LM",
"token_count": 1891
}
| 210 |
""" Official evaluation script for SQuAD version 2.0.
Modified by XLNet authors to update `find_best_threshold` scripts for SQuAD V2.0
In addition to basic functionality, we also compute additional statistics and
plot precision-recall curves if an additional na_prob.json file is provided.
This file is expected to map question ID's to the model's predicted probability
that a question is unanswerable.
"""
import argparse
import collections
import json
import numpy as np
import os
import re
import string
import sys
class EVAL_OPTS():
def __init__(self, data_file, pred_file, out_file="",
na_prob_file="na_prob.json", na_prob_thresh=1.0,
out_image_dir=None, verbose=False):
self.data_file = data_file
self.pred_file = pred_file
self.out_file = out_file
self.na_prob_file = na_prob_file
self.na_prob_thresh = na_prob_thresh
self.out_image_dir = out_image_dir
self.verbose = verbose
OPTS = None
def parse_args():
parser = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.')
parser.add_argument('data_file', metavar='data.json', help='Input data JSON file.')
parser.add_argument('pred_file', metavar='pred.json', help='Model predictions.')
parser.add_argument('--out-file', '-o', metavar='eval.json',
help='Write accuracy metrics to file (default is stdout).')
parser.add_argument('--na-prob-file', '-n', metavar='na_prob.json',
help='Model estimates of probability of no answer.')
parser.add_argument('--na-prob-thresh', '-t', type=float, default=1.0,
help='Predict "" if no-answer probability exceeds this (default = 1.0).')
parser.add_argument('--out-image-dir', '-p', metavar='out_images', default=None,
help='Save precision-recall curves to directory.')
parser.add_argument('--verbose', '-v', action='store_true')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def make_qid_to_has_ans(dataset):
qid_to_has_ans = {}
for article in dataset:
for p in article['paragraphs']:
for qa in p['qas']:
qid_to_has_ans[qa['id']] = bool(qa['answers'])
return qid_to_has_ans
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
return re.sub(regex, ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_tokens(s):
if not s: return []
return normalize_answer(s).split()
def compute_exact(a_gold, a_pred):
return int(normalize_answer(a_gold) == normalize_answer(a_pred))
def compute_f1(a_gold, a_pred):
gold_toks = get_tokens(a_gold)
pred_toks = get_tokens(a_pred)
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def get_raw_scores(dataset, preds):
exact_scores = {}
f1_scores = {}
for article in dataset:
for p in article['paragraphs']:
for qa in p['qas']:
qid = qa['id']
gold_answers = [a['text'] for a in qa['answers']
if normalize_answer(a['text'])]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
gold_answers = ['']
if qid not in preds:
print('Missing prediction for %s' % qid)
continue
a_pred = preds[qid]
# Take max over all gold answers
exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers)
f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers)
return exact_scores, f1_scores
def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh):
new_scores = {}
for qid, s in scores.items():
pred_na = na_probs[qid] > na_prob_thresh
if pred_na:
new_scores[qid] = float(not qid_to_has_ans[qid])
else:
new_scores[qid] = s
return new_scores
def make_eval_dict(exact_scores, f1_scores, qid_list=None):
if not qid_list:
total = len(exact_scores)
return collections.OrderedDict([
('exact', 100.0 * sum(exact_scores.values()) / total),
('f1', 100.0 * sum(f1_scores.values()) / total),
('total', total),
])
else:
total = len(qid_list)
return collections.OrderedDict([
('exact', 100.0 * sum(exact_scores[k] for k in qid_list) / total),
('f1', 100.0 * sum(f1_scores[k] for k in qid_list) / total),
('total', total),
])
def merge_eval(main_eval, new_eval, prefix):
for k in new_eval:
main_eval['%s_%s' % (prefix, k)] = new_eval[k]
def plot_pr_curve(precisions, recalls, out_image, title):
plt.step(recalls, precisions, color='b', alpha=0.2, where='post')
plt.fill_between(recalls, precisions, step='post', alpha=0.2, color='b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.title(title)
plt.savefig(out_image)
plt.clf()
def make_precision_recall_eval(scores, na_probs, num_true_pos, qid_to_has_ans,
out_image=None, title=None):
qid_list = sorted(na_probs, key=lambda k: na_probs[k])
true_pos = 0.0
cur_p = 1.0
cur_r = 0.0
precisions = [1.0]
recalls = [0.0]
avg_prec = 0.0
for i, qid in enumerate(qid_list):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
cur_p = true_pos / float(i+1)
cur_r = true_pos / float(num_true_pos)
if i == len(qid_list) - 1 or na_probs[qid] != na_probs[qid_list[i+1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(cur_p)
recalls.append(cur_r)
if out_image:
plot_pr_curve(precisions, recalls, out_image, title)
return {'ap': 100.0 * avg_prec}
def run_precision_recall_analysis(main_eval, exact_raw, f1_raw, na_probs,
qid_to_has_ans, out_image_dir):
if out_image_dir and not os.path.exists(out_image_dir):
os.makedirs(out_image_dir)
num_true_pos = sum(1 for v in qid_to_has_ans.values() if v)
if num_true_pos == 0:
return
pr_exact = make_precision_recall_eval(
exact_raw, na_probs, num_true_pos, qid_to_has_ans,
out_image=os.path.join(out_image_dir, 'pr_exact.png'),
title='Precision-Recall curve for Exact Match score')
pr_f1 = make_precision_recall_eval(
f1_raw, na_probs, num_true_pos, qid_to_has_ans,
out_image=os.path.join(out_image_dir, 'pr_f1.png'),
title='Precision-Recall curve for F1 score')
oracle_scores = {k: float(v) for k, v in qid_to_has_ans.items()}
pr_oracle = make_precision_recall_eval(
oracle_scores, na_probs, num_true_pos, qid_to_has_ans,
out_image=os.path.join(out_image_dir, 'pr_oracle.png'),
title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)')
merge_eval(main_eval, pr_exact, 'pr_exact')
merge_eval(main_eval, pr_f1, 'pr_f1')
merge_eval(main_eval, pr_oracle, 'pr_oracle')
def histogram_na_prob(na_probs, qid_list, image_dir, name):
if not qid_list:
return
x = [na_probs[k] for k in qid_list]
weights = np.ones_like(x) / float(len(x))
plt.hist(x, weights=weights, bins=20, range=(0.0, 1.0))
plt.xlabel('Model probability of no-answer')
plt.ylabel('Proportion of dataset')
plt.title('Histogram of no-answer probability: %s' % name)
plt.savefig(os.path.join(image_dir, 'na_prob_hist_%s.png' % name))
plt.clf()
def find_best_thresh(preds, scores, na_probs, qid_to_has_ans):
num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
cur_score = num_no_ans
best_score = cur_score
best_thresh = 0.0
qid_list = sorted(na_probs, key=lambda k: na_probs[k])
for i, qid in enumerate(qid_list):
if qid not in scores: continue
if qid_to_has_ans[qid]:
diff = scores[qid]
else:
if preds[qid]:
diff = -1
else:
diff = 0
cur_score += diff
if cur_score > best_score:
best_score = cur_score
best_thresh = na_probs[qid]
return 100.0 * best_score / len(scores), best_thresh
def find_best_thresh_v2(preds, scores, na_probs, qid_to_has_ans):
num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
cur_score = num_no_ans
best_score = cur_score
best_thresh = 0.0
qid_list = sorted(na_probs, key=lambda k: na_probs[k])
for i, qid in enumerate(qid_list):
if qid not in scores: continue
if qid_to_has_ans[qid]:
diff = scores[qid]
else:
if preds[qid]:
diff = -1
else:
diff = 0
cur_score += diff
if cur_score > best_score:
best_score = cur_score
best_thresh = na_probs[qid]
has_ans_score, has_ans_cnt = 0, 0
for qid in qid_list:
if not qid_to_has_ans[qid]: continue
has_ans_cnt += 1
if qid not in scores: continue
has_ans_score += scores[qid]
return 100.0 * best_score / len(scores), best_thresh, 1.0 * has_ans_score / has_ans_cnt
def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans)
best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans)
main_eval['best_exact'] = best_exact
main_eval['best_exact_thresh'] = exact_thresh
main_eval['best_f1'] = best_f1
main_eval['best_f1_thresh'] = f1_thresh
def find_all_best_thresh_v2(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
best_exact, exact_thresh, has_ans_exact = find_best_thresh_v2(preds, exact_raw, na_probs, qid_to_has_ans)
best_f1, f1_thresh, has_ans_f1 = find_best_thresh_v2(preds, f1_raw, na_probs, qid_to_has_ans)
main_eval['best_exact'] = best_exact
main_eval['best_exact_thresh'] = exact_thresh
main_eval['best_f1'] = best_f1
main_eval['best_f1_thresh'] = f1_thresh
main_eval['has_ans_exact'] = has_ans_exact
main_eval['has_ans_f1'] = has_ans_f1
def main(OPTS):
with open(OPTS.data_file) as f:
dataset_json = json.load(f)
dataset = dataset_json['data']
with open(OPTS.pred_file) as f:
preds = json.load(f)
if OPTS.na_prob_file:
with open(OPTS.na_prob_file) as f:
na_probs = json.load(f)
else:
na_probs = {k: 0.0 for k in preds}
qid_to_has_ans = make_qid_to_has_ans(dataset) # maps qid to True/False
has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]
no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]
exact_raw, f1_raw = get_raw_scores(dataset, preds)
exact_thresh = apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans,
OPTS.na_prob_thresh)
f1_thresh = apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans,
OPTS.na_prob_thresh)
out_eval = make_eval_dict(exact_thresh, f1_thresh)
if has_ans_qids:
has_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=has_ans_qids)
merge_eval(out_eval, has_ans_eval, 'HasAns')
if no_ans_qids:
no_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=no_ans_qids)
merge_eval(out_eval, no_ans_eval, 'NoAns')
if OPTS.na_prob_file:
find_all_best_thresh(out_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans)
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(out_eval, exact_raw, f1_raw, na_probs,
qid_to_has_ans, OPTS.out_image_dir)
histogram_na_prob(na_probs, has_ans_qids, OPTS.out_image_dir, 'hasAns')
histogram_na_prob(na_probs, no_ans_qids, OPTS.out_image_dir, 'noAns')
if OPTS.out_file:
with open(OPTS.out_file, 'w') as f:
json.dump(out_eval, f)
else:
print(json.dumps(out_eval, indent=2))
return out_eval
if __name__ == '__main__':
OPTS = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main(OPTS)
|
COCO-LM/huggingface/utils_squad_evaluate.py/0
|
{
"file_path": "COCO-LM/huggingface/utils_squad_evaluate.py",
"repo_id": "COCO-LM",
"token_count": 5684
}
| 211 |
# ------------------------------------------
# CSWin Transformer
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# written By Xiaoyi Dong
# ------------------------------------------
import glob
import operator
import os
import logging
import torch
from timm.utils.model import unwrap_model, get_state_dict
import shutil
_logger = logging.getLogger(__name__)
class CheckpointSaver:
def __init__(
self,
model,
optimizer,
args=None,
model_ema=None,
amp_scaler=None,
checkpoint_prefix='checkpoint',
recovery_prefix='recovery',
checkpoint_dir='',
recovery_dir='',
decreasing=False,
max_history=10,
unwrap_fn=unwrap_model):
# objects to save state_dicts of
self.model = model
self.optimizer = optimizer
self.args = args
self.model_ema = model_ema
self.amp_scaler = amp_scaler
# state
self.checkpoint_files = [] # (filename, metric) tuples in order of decreasing betterness
self.best_epoch = None
self.best_metric = None
self.curr_recovery_file = ''
self.last_recovery_file = ''
# config
self.checkpoint_dir = checkpoint_dir
self.recovery_dir = recovery_dir
self.save_prefix = checkpoint_prefix
self.recovery_prefix = recovery_prefix
self.extension = '.pth.tar'
self.decreasing = decreasing # a lower metric is better if True
self.cmp = operator.lt if decreasing else operator.gt # True if lhs better than rhs
self.max_history = max_history
self.unwrap_fn = unwrap_fn
assert self.max_history >= 1
def save_checkpoint(self, epoch, metric=None):
assert epoch >= 0
tmp_save_path = os.path.join(self.checkpoint_dir, 'tmp' + self.extension)
last_save_path = os.path.join(self.checkpoint_dir, 'last' + self.extension)
self._save(tmp_save_path, epoch, metric)
if os.path.exists(last_save_path):
#os.unlink(last_save_path) # required for Windows support.
os.remove(last_save_path)
os.rename(tmp_save_path, last_save_path)
worst_file = self.checkpoint_files[-1] if self.checkpoint_files else None
if (len(self.checkpoint_files) < self.max_history
or metric is None or self.cmp(metric, worst_file[1])):
if len(self.checkpoint_files) >= self.max_history:
self._cleanup_checkpoints(1)
filename = '-'.join([self.save_prefix, str(epoch)]) + self.extension
save_path = os.path.join(self.checkpoint_dir, filename)
#os.link(last_save_path, save_path)
shutil.copyfile(last_save_path, save_path)
self.checkpoint_files.append((save_path, metric))
self.checkpoint_files = sorted(
self.checkpoint_files, key=lambda x: x[1],
reverse=not self.decreasing) # sort in descending order if a lower metric is not better
checkpoints_str = "Current checkpoints:\n"
for c in self.checkpoint_files:
checkpoints_str += ' {}\n'.format(c)
_logger.info(checkpoints_str)
if metric is not None and (self.best_metric is None or self.cmp(metric, self.best_metric)):
self.best_epoch = epoch
self.best_metric = metric
best_save_path = os.path.join(self.checkpoint_dir, 'model_best' + self.extension)
if os.path.exists(best_save_path):
os.unlink(best_save_path)
#os.link(last_save_path, best_save_path)
shutil.copyfile(last_save_path, best_save_path)
return (None, None) if self.best_metric is None else (self.best_metric, self.best_epoch)
def _save(self, save_path, epoch, metric=None):
save_state = {
'epoch': epoch,
'arch': type(self.model).__name__.lower(),
'state_dict': get_state_dict(self.model, self.unwrap_fn),
'optimizer': self.optimizer.state_dict(),
'version': 2, # version < 2 increments epoch before save
}
if self.args is not None:
save_state['arch'] = self.args.model
save_state['args'] = self.args
if self.amp_scaler is not None:
save_state[self.amp_scaler.state_dict_key] = self.amp_scaler.state_dict()
if self.model_ema is not None:
save_state['state_dict_ema'] = get_state_dict(self.model_ema, self.unwrap_fn)
if metric is not None:
save_state['metric'] = metric
torch.save(save_state, save_path)
def _cleanup_checkpoints(self, trim=0):
trim = min(len(self.checkpoint_files), trim)
delete_index = self.max_history - trim
if delete_index <= 0 or len(self.checkpoint_files) <= delete_index:
return
to_delete = self.checkpoint_files[delete_index:]
for d in to_delete:
try:
_logger.debug("Cleaning checkpoint: {}".format(d))
os.remove(d[0])
except Exception as e:
_logger.error("Exception '{}' while deleting checkpoint".format(e))
self.checkpoint_files = self.checkpoint_files[:delete_index]
def save_recovery(self, epoch, batch_idx=0):
assert epoch >= 0
filename = '-'.join([self.recovery_prefix, str(epoch), str(batch_idx)]) + self.extension
save_path = os.path.join(self.recovery_dir, filename)
self._save(save_path, epoch)
if os.path.exists(self.last_recovery_file):
try:
_logger.debug("Cleaning recovery: {}".format(self.last_recovery_file))
os.remove(self.last_recovery_file)
except Exception as e:
_logger.error("Exception '{}' while removing {}".format(e, self.last_recovery_file))
self.last_recovery_file = self.curr_recovery_file
self.curr_recovery_file = save_path
def find_recovery(self):
recovery_path = os.path.join(self.recovery_dir, self.recovery_prefix)
files = glob.glob(recovery_path + '*' + self.extension)
files = sorted(files)
if len(files):
return files[0]
else:
return ''
|
CSWin-Transformer/checkpoint_saver.py/0
|
{
"file_path": "CSWin-Transformer/checkpoint_saver.py",
"repo_id": "CSWin-Transformer",
"token_count": 2948
}
| 212 |
datadir: /data/CMIP6/AWI-ESM
name: 10m_u_component_of_wind
cmip_name: uas
era_name: u10
run: r1i1p1f1
res:
- 1.40625
# - 5.625
|
ClimaX/snakemake_configs/AWI-ESM/config_10m_u_component_of_wind.yml/0
|
{
"file_path": "ClimaX/snakemake_configs/AWI-ESM/config_10m_u_component_of_wind.yml",
"repo_id": "ClimaX",
"token_count": 72
}
| 213 |
datadir: /data/CMIP6/HAMMOZ
name: 2m_temperature
cmip_name: tas
era_name: t2m
run: r1i1p1f1
version: v20190628
res:
- 1.40625
# - 5.625
|
ClimaX/snakemake_configs/HAMMOZ/config_2m_temperature.yml/0
|
{
"file_path": "ClimaX/snakemake_configs/HAMMOZ/config_2m_temperature.yml",
"repo_id": "ClimaX",
"token_count": 73
}
| 214 |
datadir: /data/CMIP6/TaiESM1
server_prefix: https://esgf.ceda.ac.uk/thredds/fileServer/esg_cmip6/CMIP6/CMIP
name: 2m_temperature
cmip_name: tas
era_name: t2m
run: r1i1p1f1
res:
- 1.40625
# - 5.625
|
ClimaX/snakemake_configs/TaiESM1/config_2m_temperature.yml/0
|
{
"file_path": "ClimaX/snakemake_configs/TaiESM1/config_2m_temperature.yml",
"repo_id": "ClimaX",
"token_count": 105
}
| 215 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# credits: https://github.com/ashleve/lightning-hydra-template/blob/main/src/models/mnist_module.py
from typing import Any
import torch
from pytorch_lightning import LightningModule
from torchvision.transforms import transforms
from climax.arch import ClimaX
from climax.utils.lr_scheduler import LinearWarmupCosineAnnealingLR
from climax.utils.metrics import (
lat_weighted_acc,
lat_weighted_mse,
lat_weighted_mse_val,
lat_weighted_rmse,
)
from climax.utils.pos_embed import interpolate_pos_embed
class GlobalForecastModule(LightningModule):
"""Lightning module for global forecasting with the ClimaX model.
Args:
net (ClimaX): ClimaX model.
pretrained_path (str, optional): Path to pre-trained checkpoint.
lr (float, optional): Learning rate.
beta_1 (float, optional): Beta 1 for AdamW.
beta_2 (float, optional): Beta 2 for AdamW.
weight_decay (float, optional): Weight decay for AdamW.
warmup_epochs (int, optional): Number of warmup epochs.
max_epochs (int, optional): Number of total epochs.
warmup_start_lr (float, optional): Starting learning rate for warmup.
eta_min (float, optional): Minimum learning rate.
"""
def __init__(
self,
net: ClimaX,
pretrained_path: str = "",
lr: float = 5e-4,
beta_1: float = 0.9,
beta_2: float = 0.99,
weight_decay: float = 1e-5,
warmup_epochs: int = 10000,
max_epochs: int = 200000,
warmup_start_lr: float = 1e-8,
eta_min: float = 1e-8,
):
super().__init__()
self.save_hyperparameters(logger=False, ignore=["net"])
self.net = net
if len(pretrained_path) > 0:
self.load_pretrained_weights(pretrained_path)
def load_pretrained_weights(self, pretrained_path):
if pretrained_path.startswith("http"):
checkpoint = torch.hub.load_state_dict_from_url(pretrained_path)
else:
checkpoint = torch.load(pretrained_path, map_location=torch.device("cpu"))
print("Loading pre-trained checkpoint from: %s" % pretrained_path)
checkpoint_model = checkpoint["state_dict"]
# interpolate positional embedding
interpolate_pos_embed(self.net, checkpoint_model, new_size=self.net.img_size)
state_dict = self.state_dict()
if self.net.parallel_patch_embed:
if "token_embeds.proj_weights" not in checkpoint_model.keys():
raise ValueError(
"Pretrained checkpoint does not have token_embeds.proj_weights for parallel processing. Please convert the checkpoints first or disable parallel patch_embed tokenization."
)
# checkpoint_keys = list(checkpoint_model.keys())
for k in list(checkpoint_model.keys()):
if "channel" in k:
checkpoint_model[k.replace("channel", "var")] = checkpoint_model[k]
del checkpoint_model[k]
for k in list(checkpoint_model.keys()):
if k not in state_dict.keys() or checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
# load pre-trained model
msg = self.load_state_dict(checkpoint_model, strict=False)
print(msg)
def set_denormalization(self, mean, std):
self.denormalization = transforms.Normalize(mean, std)
def set_lat_lon(self, lat, lon):
self.lat = lat
self.lon = lon
def set_pred_range(self, r):
self.pred_range = r
def set_val_clim(self, clim):
self.val_clim = clim
def set_test_clim(self, clim):
self.test_clim = clim
def training_step(self, batch: Any, batch_idx: int):
x, y, lead_times, variables, out_variables = batch
loss_dict, _ = self.net.forward(x, y, lead_times, variables, out_variables, [lat_weighted_mse], lat=self.lat)
loss_dict = loss_dict[0]
for var in loss_dict.keys():
self.log(
"train/" + var,
loss_dict[var],
on_step=True,
on_epoch=False,
prog_bar=True,
)
loss = loss_dict["loss"]
return loss
def validation_step(self, batch: Any, batch_idx: int):
x, y, lead_times, variables, out_variables = batch
if self.pred_range < 24:
log_postfix = f"{self.pred_range}_hours"
else:
days = int(self.pred_range / 24)
log_postfix = f"{days}_days"
all_loss_dicts = self.net.evaluate(
x,
y,
lead_times,
variables,
out_variables,
transform=self.denormalization,
metrics=[lat_weighted_mse_val, lat_weighted_rmse, lat_weighted_acc],
lat=self.lat,
clim=self.val_clim,
log_postfix=log_postfix,
)
loss_dict = {}
for d in all_loss_dicts:
for k in d.keys():
loss_dict[k] = d[k]
for var in loss_dict.keys():
self.log(
"val/" + var,
loss_dict[var],
on_step=False,
on_epoch=True,
prog_bar=False,
sync_dist=True,
)
return loss_dict
def test_step(self, batch: Any, batch_idx: int):
x, y, lead_times, variables, out_variables = batch
if self.pred_range < 24:
log_postfix = f"{self.pred_range}_hours"
else:
days = int(self.pred_range / 24)
log_postfix = f"{days}_days"
all_loss_dicts = self.net.evaluate(
x,
y,
lead_times,
variables,
out_variables,
transform=self.denormalization,
metrics=[lat_weighted_mse_val, lat_weighted_rmse, lat_weighted_acc],
lat=self.lat,
clim=self.test_clim,
log_postfix=log_postfix,
)
loss_dict = {}
for d in all_loss_dicts:
for k in d.keys():
loss_dict[k] = d[k]
for var in loss_dict.keys():
self.log(
"test/" + var,
loss_dict[var],
on_step=False,
on_epoch=True,
prog_bar=False,
sync_dist=True,
)
return loss_dict
def configure_optimizers(self):
decay = []
no_decay = []
for name, m in self.named_parameters():
if "var_embed" in name or "pos_embed" in name or "time_pos_embed" in name:
no_decay.append(m)
else:
decay.append(m)
optimizer = torch.optim.AdamW(
[
{
"params": decay,
"lr": self.hparams.lr,
"betas": (self.hparams.beta_1, self.hparams.beta_2),
"weight_decay": self.hparams.weight_decay,
},
{
"params": no_decay,
"lr": self.hparams.lr,
"betas": (self.hparams.beta_1, self.hparams.beta_2),
"weight_decay": 0,
},
]
)
lr_scheduler = LinearWarmupCosineAnnealingLR(
optimizer,
self.hparams.warmup_epochs,
self.hparams.max_epochs,
self.hparams.warmup_start_lr,
self.hparams.eta_min,
)
scheduler = {"scheduler": lr_scheduler, "interval": "step", "frequency": 1}
return {"optimizer": optimizer, "lr_scheduler": scheduler}
|
ClimaX/src/climax/global_forecast/module.py/0
|
{
"file_path": "ClimaX/src/climax/global_forecast/module.py",
"repo_id": "ClimaX",
"token_count": 3936
}
| 216 |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# Position embedding utils
# --------------------------------------------------------
import numpy as np
import torch
# --------------------------------------------------------
# 2D sine-cosine position embedding
# References:
# Transformer: https://github.com/tensorflow/models/blob/master/official/nlp/transformer/model_utils.py
# MoCo v3: https://github.com/facebookresearch/moco-v3
# --------------------------------------------------------
def get_2d_sincos_pos_embed(embed_dim, grid_size_h, grid_size_w, cls_token=False):
"""
grid_size: int of the grid height and width
return:
pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
"""
grid_h = np.arange(grid_size_h, dtype=np.float32)
grid_w = np.arange(grid_size_w, dtype=np.float32)
grid = np.meshgrid(grid_w, grid_h) # here w goes first
grid = np.stack(grid, axis=0)
grid = grid.reshape([2, 1, grid_size_h, grid_size_w])
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
if cls_token:
pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
return pos_embed
def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
assert embed_dim % 2 == 0
# use half of dimensions to encode grid_h
emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
return emb
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
"""
embed_dim: output dimension for each position
pos: a list of positions to be encoded: size (M,)
out: (M, D)
"""
assert embed_dim % 2 == 0
omega = np.arange(embed_dim // 2, dtype=np.float)
omega /= embed_dim / 2.0
omega = 1.0 / 10000**omega # (D/2,)
pos = pos.reshape(-1) # (M,)
out = np.einsum("m,d->md", pos, omega) # (M, D/2), outer product
emb_sin = np.sin(out) # (M, D/2)
emb_cos = np.cos(out) # (M, D/2)
emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
return emb
# --------------------------------------------------------
# Interpolate position embeddings for high-resolution
# References:
# DeiT: https://github.com/facebookresearch/deit
# --------------------------------------------------------
def interpolate_pos_embed(model, checkpoint_model, new_size=(64, 128)):
if "net.pos_embed" in checkpoint_model:
pos_embed_checkpoint = checkpoint_model["net.pos_embed"]
embedding_size = pos_embed_checkpoint.shape[-1]
orig_num_patches = pos_embed_checkpoint.shape[-2]
patch_size = model.patch_size
w_h_ratio = 2
orig_h = int((orig_num_patches // w_h_ratio) ** 0.5)
orig_w = w_h_ratio * orig_h
orig_size = (orig_h, orig_w)
new_size = (new_size[0] // patch_size, new_size[1] // patch_size)
# print (orig_size)
# print (new_size)
if orig_size[0] != new_size[0]:
print("Interpolate PEs from %dx%d to %dx%d" % (orig_size[0], orig_size[1], new_size[0], new_size[1]))
pos_tokens = pos_embed_checkpoint.reshape(-1, orig_size[0], orig_size[1], embedding_size).permute(
0, 3, 1, 2
)
new_pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size[0], new_size[1]), mode="bicubic", align_corners=False
)
new_pos_tokens = new_pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
checkpoint_model["net.pos_embed"] = new_pos_tokens
def interpolate_channel_embed(checkpoint_model, new_len):
if "net.channel_embed" in checkpoint_model:
channel_embed_checkpoint = checkpoint_model["net.channel_embed"]
old_len = channel_embed_checkpoint.shape[1]
if new_len <= old_len:
checkpoint_model["net.channel_embed"] = channel_embed_checkpoint[:, :new_len]
|
ClimaX/src/climax/utils/pos_embed.py/0
|
{
"file_path": "ClimaX/src/climax/utils/pos_embed.py",
"repo_id": "ClimaX",
"token_count": 1756
}
| 217 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from util.util import feature_normalize, mse_loss
class ContextualLoss_forward(nn.Module):
'''
input is Al, Bl, channel = 1, range ~ [0, 255]
'''
def __init__(self, opt):
super(ContextualLoss_forward, self).__init__()
self.opt = opt
return None
def forward(self, X_features, Y_features, h=0.1, feature_centering=True):
'''
X_features&Y_features are are feature vectors or feature 2d array
h: bandwidth
return the per-sample loss
'''
batch_size = X_features.shape[0]
feature_depth = X_features.shape[1]
feature_size = X_features.shape[2]
# to normalized feature vectors
if feature_centering:
if self.opt.PONO:
X_features = X_features - Y_features.mean(dim=1).unsqueeze(dim=1)
Y_features = Y_features - Y_features.mean(dim=1).unsqueeze(dim=1)
else:
X_features = X_features - Y_features.view(batch_size, feature_depth, -1).mean(dim=-1).unsqueeze(dim=-1).unsqueeze(dim=-1)
Y_features = Y_features - Y_features.view(batch_size, feature_depth, -1).mean(dim=-1).unsqueeze(dim=-1).unsqueeze(dim=-1)
# X_features = X_features - Y_features.mean(dim=1).unsqueeze(dim=1)
# Y_features = Y_features - Y_features.mean(dim=1).unsqueeze(dim=1)
X_features = feature_normalize(X_features).view(batch_size, feature_depth, -1) # batch_size * feature_depth * feature_size * feature_size
Y_features = feature_normalize(Y_features).view(batch_size, feature_depth, -1) # batch_size * feature_depth * feature_size * feature_size
# X_features = F.unfold(
# X_features, kernel_size=self.opt.match_kernel, stride=1, padding=int(self.opt.match_kernel // 2)) # batch_size * feature_depth_new * feature_size^2
# Y_features = F.unfold(
# Y_features, kernel_size=self.opt.match_kernel, stride=1, padding=int(self.opt.match_kernel // 2)) # batch_size * feature_depth_new * feature_size^2
# conine distance = 1 - similarity
X_features_permute = X_features.permute(0, 2, 1) # batch_size * feature_size^2 * feature_depth
d = 1 - torch.matmul(X_features_permute, Y_features) # batch_size * feature_size^2 * feature_size^2
# normalized distance: dij_bar
# d_norm = d
d_norm = d / (torch.min(d, dim=-1, keepdim=True)[0] + 1e-3) # batch_size * feature_size^2 * feature_size^2
# pairwise affinity
w = torch.exp((1 - d_norm) / h)
A_ij = w / torch.sum(w, dim=-1, keepdim=True)
# contextual loss per sample
CX = torch.mean(torch.max(A_ij, dim=-1)[0], dim=1)
loss = -torch.log(CX)
# contextual loss per batch
# loss = torch.mean(loss)
return loss
|
CoCosNet-v2/models/networks/ContextualLoss.py/0
|
{
"file_path": "CoCosNet-v2/models/networks/ContextualLoss.py",
"repo_id": "CoCosNet-v2",
"token_count": 1301
}
| 218 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from .base_options import BaseOptions
class TrainOptions(BaseOptions):
def initialize(self, parser):
BaseOptions.initialize(self, parser)
# for displays
parser.add_argument('--display_freq', type=int, default=2000, help='frequency of showing training results on screen')
parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')
parser.add_argument('--save_epoch_freq', type=int, default=10, help='frequency of saving checkpoints at the end of epochs')
# for training
parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate. This is NOT the total #epochs. Totla #epochs is niter + niter_decay')
parser.add_argument('--niter_decay', type=int, default=0, help='# of iter to linearly decay learning rate to zero')
parser.add_argument('--optimizer', type=str, default='adam')
parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
parser.add_argument('--beta2', type=float, default=0.999, help='momentum term of adam')
parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
parser.add_argument('--D_steps_per_G', type=int, default=1, help='number of discriminator iterations per generator iterations.')
# for discriminators
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')
parser.add_argument('--netD', type=str, default='multiscale', help='(n_layers|multiscale|image)')
parser.add_argument('--no_TTUR', action='store_true', help='Use TTUR training scheme')
parser.add_argument('--real_reference_probability', type=float, default=0.0, help='self-supervised training probability')
parser.add_argument('--hard_reference_probability', type=float, default=0.0, help='hard reference training probability')
# training loss weights
parser.add_argument('--weight_warp_self', type=float, default=0.0, help='push warp self to ref')
parser.add_argument('--weight_warp_cycle', type=float, default=0.0, help='push warp cycle to ref')
parser.add_argument('--weight_novgg_featpair', type=float, default=10.0, help='in no vgg setting, use pair feat loss in domain adaptation')
parser.add_argument('--gan_mode', type=str, default='hinge', help='(ls|original|hinge)')
parser.add_argument('--weight_gan', type=float, default=10.0, help='weight of all loss in stage1')
parser.add_argument('--no_ganFeat_loss', action='store_true', help='if specified, do *not* use discriminator feature matching loss')
parser.add_argument('--weight_ganFeat', type=float, default=10.0, help='weight for feature matching loss')
parser.add_argument('--which_perceptual', type=str, default='4_2', help='relu5_2 or relu4_2')
parser.add_argument('--weight_perceptual', type=float, default=0.001)
parser.add_argument('--weight_vgg', type=float, default=10.0, help='weight for vgg loss')
parser.add_argument('--weight_contextual', type=float, default=1.0, help='ctx loss weight')
parser.add_argument('--weight_fm_ratio', type=float, default=1.0, help='vgg fm loss weight comp with ctx loss')
self.isTrain = True
return parser
|
CoCosNet-v2/options/train_options.py/0
|
{
"file_path": "CoCosNet-v2/options/train_options.py",
"repo_id": "CoCosNet-v2",
"token_count": 1340
}
| 219 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import re
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.networks.sync_batchnorm import SynchronizedBatchNorm2d
import torch.nn.utils.spectral_norm as spectral_norm
try:
import apex
from apex import amp
except:
print('apex not found')
pass
# Returns a function that creates a normalization function
# that does not condition on semantic map
def get_nonspade_norm_layer(opt, norm_type='instance'):
# helper function to get # output channels of the previous layer
def get_out_channel(layer):
if hasattr(layer, 'out_channels'):
return getattr(layer, 'out_channels')
return layer.weight.size(0)
# this function will be returned
def add_norm_layer(layer):
nonlocal norm_type
if norm_type.startswith('spectral'):
if opt.eqlr_sn:
layer = equal_lr(layer)
else:
layer = spectral_norm(layer)
subnorm_type = norm_type[len('spectral'):]
if subnorm_type == 'none' or len(subnorm_type) == 0:
return layer
# remove bias in the previous layer, which is meaningless
# since it has no effect after normalization
if getattr(layer, 'bias', None) is not None:
delattr(layer, 'bias')
layer.register_parameter('bias', None)
if subnorm_type == 'batch':
norm_layer = nn.BatchNorm2d(get_out_channel(layer), affine=True)
elif subnorm_type == 'sync_batch':
if opt.apex:
norm_layer = apex.parallel.SyncBatchNorm(get_out_channel(layer), affine=True)
else:
norm_layer = SynchronizedBatchNorm2d(get_out_channel(layer), affine=True)
elif subnorm_type == 'instance':
norm_layer = nn.InstanceNorm2d(get_out_channel(layer), affine=False)
else:
raise ValueError('normalization layer %s is not recognized' % subnorm_type)
return nn.Sequential(layer, norm_layer)
return add_norm_layer
def PositionalNorm2d(x, epsilon=1e-5):
# x: B*C*W*H normalize in C dim
mean = x.mean(dim=1, keepdim=True)
std = x.var(dim=1, keepdim=True).add(epsilon).sqrt()
output = (x - mean) / std
return output
# Creates SPADE normalization layer based on the given configuration
# SPADE consists of two steps. First, it normalizes the activations using
# your favorite normalization method, such as Batch Norm or Instance Norm.
# Second, it applies scale and bias to the normalized output, conditioned on
# the segmentation map.
# The format of |config_text| is spade(norm)(ks), where
# (norm) specifies the type of parameter-free normalization.
# (e.g. syncbatch, batch, instance)
# (ks) specifies the size of kernel in the SPADE module (e.g. 3x3)
# Example |config_text| will be spadesyncbatch3x3, or spadeinstance5x5.
# Also, the other arguments are
# |norm_nc|: the #channels of the normalized activations, hence the output dim of SPADE
# |label_nc|: the #channels of the input semantic map, hence the input dim of SPADE
class SPADE(nn.Module):
def __init__(self, config_text, norm_nc, label_nc, PONO=False, use_apex=False):
super().__init__()
assert config_text.startswith('spade')
parsed = re.search('spade(\D+)(\d)x\d', config_text)
param_free_norm_type = str(parsed.group(1))
ks = int(parsed.group(2))
self.pad_type = 'nozero'
if PONO:
self.param_free_norm = PositionalNorm2d
elif param_free_norm_type == 'instance':
self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False)
elif param_free_norm_type == 'syncbatch':
if use_apex:
self.param_free_norm = apex.parallel.SyncBatchNorm(norm_nc, affine=False)
else:
self.param_free_norm = SynchronizedBatchNorm2d(norm_nc, affine=False)
elif param_free_norm_type == 'batch':
self.param_free_norm = nn.BatchNorm2d(norm_nc, affine=False)
else:
raise ValueError('%s is not a recognized param-free norm type in SPADE'
% param_free_norm_type)
# The dimension of the intermediate embedding space. Yes, hardcoded.
nhidden = 128
pw = ks // 2
if self.pad_type != 'zero':
self.mlp_shared = nn.Sequential(
nn.ReflectionPad2d(pw),
nn.Conv2d(label_nc, nhidden, kernel_size=ks, padding=0),
nn.ReLU()
)
self.pad = nn.ReflectionPad2d(pw)
self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=0)
self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=0)
else:
self.mlp_shared = nn.Sequential(
nn.Conv2d(label_nc, nhidden, kernel_size=ks, padding=pw),
nn.ReLU()
)
self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
def forward(self, x, segmap, similarity_map=None):
# Part 1. generate parameter-free normalized activations
normalized = self.param_free_norm(x)
# Part 2. produce scaling and bias conditioned on semantic map
segmap = F.interpolate(segmap, size=x.size()[2:], mode='nearest')
actv = self.mlp_shared(segmap)
if self.pad_type != 'zero':
gamma = self.mlp_gamma(self.pad(actv))
beta = self.mlp_beta(self.pad(actv))
else:
gamma = self.mlp_gamma(actv)
beta = self.mlp_beta(actv)
if similarity_map is not None:
similarity_map = F.interpolate(similarity_map, size=gamma.size()[2:], mode='nearest')
gamma = gamma * similarity_map
beta = beta * similarity_map
# apply scale and bias
out = normalized * (1 + gamma) + beta
return out
class SPADE_TwoPath(nn.Module):
def __init__(self, config_text, norm_nc, label_nc_example, label_nc_imagine, PONO=False, use_apex=False):
super().__init__()
assert config_text.startswith('spade')
parsed = re.search('spade(\D+)(\d)x\d', config_text)
param_free_norm_type = str(parsed.group(1))
ks = int(parsed.group(2))
self.pad_type = 'nozero'
if PONO:
self.param_free_norm = PositionalNorm2d
elif param_free_norm_type == 'instance':
self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False)
elif param_free_norm_type == 'syncbatch':
if use_apex:
self.param_free_norm = apex.parallel.SyncBatchNorm(norm_nc, affine=False)
else:
self.param_free_norm = SynchronizedBatchNorm2d(norm_nc, affine=False)
elif param_free_norm_type == 'batch':
self.param_free_norm = nn.BatchNorm2d(norm_nc, affine=False)
else:
raise ValueError('%s is not a recognized param-free norm type in SPADE'
% param_free_norm_type)
# The dimension of the intermediate embedding space. Yes, hardcoded.
nhidden = 128
pw = ks // 2
if self.pad_type != 'zero':
self.mlp_shared_example = nn.Sequential(
nn.ReflectionPad2d(pw),
nn.Conv2d(label_nc_example, nhidden, kernel_size=ks, padding=0),
nn.ReLU()
)
self.pad = nn.ReflectionPad2d(pw)
self.mlp_gamma_example = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=0)
self.mlp_beta_example = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=0)
self.mlp_shared_imagine = nn.Sequential(
nn.ReflectionPad2d(pw),
nn.Conv2d(label_nc_imagine, nhidden, kernel_size=ks, padding=0),
nn.ReLU()
)
self.mlp_gamma_imagine = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=0)
self.mlp_beta_imagine = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=0)
else:
self.mlp_shared_example = nn.Sequential(
nn.Conv2d(label_nc_example, nhidden, kernel_size=ks, padding=pw),
nn.ReLU()
)
self.mlp_gamma_example = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
self.mlp_beta_example = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
self.mlp_shared_imagine = nn.Sequential(
nn.Conv2d(label_nc_imagine, nhidden, kernel_size=ks, padding=pw),
nn.ReLU()
)
self.mlp_gamma_imagine = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
self.mlp_beta_imagine = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
def forward(self, x, warpmap, segmap, similarity_map):
similarity_map = similarity_map.detach()
# Part 1. generate parameter-free normalized activations
normalized = self.param_free_norm(x)
# Part 2. produce scaling and bias conditioned on semantic map
segmap = F.interpolate(segmap, size=x.size()[2:], mode='nearest')
warpmap = F.interpolate(warpmap, size=x.size()[2:], mode='nearest')
actv_example = self.mlp_shared_example(warpmap)
actv_imagine = self.mlp_shared_imagine(segmap)
if self.pad_type != 'zero':
gamma_example = self.mlp_gamma_example(self.pad(actv_example))
beta_example = self.mlp_beta_example(self.pad(actv_example))
gamma_imagine = self.mlp_gamma_imagine(self.pad(actv_imagine))
beta_imagine = self.mlp_beta_imagine(self.pad(actv_imagine))
else:
gamma_example = self.mlp_gamma_example(actv_example)
beta_example = self.mlp_beta_example(actv_example)
gamma_imagine = self.mlp_gamma_imagine(actv_imagine)
beta_imagine = self.mlp_beta_imagine(actv_imagine)
similarity_map = F.interpolate(similarity_map, size=x.size()[2:], mode='nearest')
gamma = gamma_example * similarity_map + gamma_imagine * (1 - similarity_map)
beta = beta_example * similarity_map + beta_imagine * (1 - similarity_map)
# apply scale and bias
out = normalized * (1 + gamma) + beta
return out
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * np.sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
def equal_lr(module, name='weight'):
EqualLR.apply(module, name)
return module
|
CoCosNet/models/networks/normalization.py/0
|
{
"file_path": "CoCosNet/models/networks/normalization.py",
"repo_id": "CoCosNet",
"token_count": 5275
}
| 220 |
"""
Evaluation metrics to measure functional correctness of traces.
"""
text_identifier_num = 0
gold_identifier_num = 0
correct_identifier_num = 0
def get_output_from_trace(text):
output_list = []
parse_loc = []
start_len = 0
while True:
num = text.find("<line>",start_len)
if num == -1: break
parse_loc.append(num)
start_len = num + 1
start_len = 0
while True:
num = text.find("<output>",start_len)
if num == -1: break
parse_loc.append(num)
start_len = num + 1
# add 0 and len(text)
parse_loc.append(0)
parse_loc.append(len(text))
parse_loc = list(set(parse_loc))
parse_loc.sort()
for i, loc in enumerate(parse_loc):
if i == 0: continue
# remove the last incomplete sentence in gold
if i == len(parse_loc)-1:
if "</state>" not in text[parse_loc[i-1]:loc]:
continue
if "<output>" in text[parse_loc[i-1]:loc]:
my_output = text[parse_loc[i-1]+len("<output> "):loc].strip()
if "_____event" in my_output:
my_output = my_output[0:my_output.find("_____event")].strip()
if len(my_output) > 0:
output_list.append(my_output)
return output_list
def parse_text_into_sent(text):
text_list = []
parse_loc = []
start_len = 0
while True:
num = text.find("<line>",start_len)
if num == -1: break
parse_loc.append(num)
start_len = num + 1
start_len = 0
while True:
num = text.find("<output>",start_len)
if num == -1: break
parse_loc.append(num)
start_len = num + 1
# add 0 and len(text)
parse_loc.append(0)
parse_loc.append(len(text))
parse_loc = list(set(parse_loc))
parse_loc.sort()
for i, loc in enumerate(parse_loc):
if i == 0: continue
# remove the last incomplete sentence in text
if i == len(parse_loc)-1:
if "</state>" not in text[parse_loc[i-1]:loc]:
continue
text_list.append(text[parse_loc[i-1]:loc])
return text_list
def parse_gold_into_sent(text):
text_list = []
parse_loc = []
start_len = 0
while True:
num = text.find("<line>",start_len)
if num == -1: break
parse_loc.append(num)
start_len = num + 1
start_len = 0
while True:
num = text.find("<output>",start_len)
if num == -1: break
parse_loc.append(num)
start_len = num + 1
# add 0 and len(text)
parse_loc.append(0)
parse_loc.append(len(text))
parse_loc = list(set(parse_loc))
parse_loc.sort()
for i, loc in enumerate(parse_loc):
if i == 0: continue
# remove the last incomplete sentence in gold
if i == len(parse_loc)-1:
if "</state>" not in text[parse_loc[i-1]:loc]:
continue
text_list.append(text[parse_loc[i-1]:loc])
return text_list
def dict_same_key_value_num(d1, d2):
d1_keys = set(d1.keys())
d2_keys = set(d2.keys())
intersect_keys = d1_keys.intersection(d2_keys)
same_num = 0
for key in intersect_keys:
if d1[key] == d2[key]:
same_num += 1
return same_num
def same_sent(text,gold):
if "<output>" in text or "<output>" in gold:
if text == gold: return True
else: return False
if "<state>" not in text or "<state>" not in gold:
if text == gold: return True
else: return False
text_sep = text
text_linenum_info = text[0:text.find("<state>")].strip()
text_sep = text_sep[text_sep.find("<state>"):].strip()
if text_sep.startswith("<state>"):
text_sep = text_sep[len("<state>"):]
if text_sep.endswith("</state>"):
text_sep = text_sep[:-len("</state>")]
text_sep = text_sep.split("<dictsep>")
text_dict = {}
for state in text_sep:
if ":" in state:
text_dict[state[0:state.index(":")].strip()] = state[state.index(":")+1:].strip()
gold_sep = gold
gold_linenum_info = gold[0:gold.find("<state>")].strip()
gold_sep = gold_sep[gold_sep.find("<state>"):].strip()
if gold_sep.startswith("<state>"):
gold_sep = gold_sep[len("<state>"):]
if gold_sep.endswith("</state>"):
gold_sep = gold_sep[:-len("</state>")]
gold_sep = gold_sep.split("<dictsep>")
gold_dict = {}
for state in gold_sep:
if ":" in state:
gold_dict[state[0:state.index(":")].strip()] = state[state.index(":")+1:].strip()
global correct_identifier_num
if text_linenum_info == gold_linenum_info:
correct_identifier_num += dict_same_key_value_num(text_dict,gold_dict)
if text_linenum_info == gold_linenum_info and text_dict == gold_dict: return True
return False
def get_identifier_num(text_list):
res = 0
for text in text_list:
text_sep = text
text_linenum_info = text[0:text.find("<state>")].strip()
text_sep = text_sep[text_sep.find("<state>"):].strip()
if text_sep.startswith("<state>"):
text_sep = text_sep[len("<state>"):]
if text_sep.endswith("</state>"):
text_sep = text_sep[:-len("</state>")]
text_sep = text_sep.split("<dictsep>")
text_dict = {}
for state in text_sep:
if ":" in state:
text_dict[state[0:state.index(":")].strip()] = state[state.index(":")+1:].strip()
res += len(text_dict)
return res
# Compute metrics in the Tutorial or CodeNetMut dataset.
def compute_metrics(preds, golds):
assert(len(preds) == len(golds))
em_num = 0
total_num = 0
output_same_num = 0
gold_has_output_num = 0
precision_list_line = []
recall_list_line = []
precision_list_id = []
recall_list_id = []
right_all_num = 0
text_all_num = 0
gold_all_num = 0
right_id_all_num = 0
text_id_all_num = 0
gold_id_all_num = 0
global correct_identifier_num
for i, pred in enumerate(preds):
text = pred.strip()
gold = golds[i].strip()
total_num += 1
gold_output = get_output_from_trace(gold)
predict_output = get_output_from_trace(text)
if len(gold_output) > 0:
gold_has_output_num += 1
if len(gold_output) > 0 and gold_output == predict_output:
output_same_num += 1
text_list = parse_text_into_sent(text)
gold_list = parse_gold_into_sent(gold)
text_sent_num = len(text_list)
gold_sent_num = len(gold_list)
same_sent_num = 0
text_identifier_num = 0
gold_identifier_num = 0
global correct_identifier_num
correct_identifier_num = 0
for i in range(0,gold_sent_num):
if i < text_sent_num and same_sent(text_list[i],gold_list[i]) == True:
same_sent_num += 1
text_identifier_num = get_identifier_num(text_list)
gold_identifier_num = get_identifier_num(gold_list)
precision_tmp = same_sent_num/text_sent_num if text_sent_num != 0 else 0
recall_tmp = same_sent_num/gold_sent_num if gold_sent_num != 0 else 0
precision_list_line.append(precision_tmp)
recall_list_line.append(recall_tmp)
right_all_num += same_sent_num
text_all_num += text_sent_num
gold_all_num += gold_sent_num
precision_id = correct_identifier_num/text_identifier_num if text_identifier_num != 0 else 0
recall_id = correct_identifier_num/gold_identifier_num if gold_identifier_num != 0 else 0
precision_list_id.append(precision_id)
recall_list_id.append(recall_id)
right_id_all_num += correct_identifier_num
text_id_all_num += text_identifier_num
gold_id_all_num += gold_identifier_num
if same_sent_num == gold_sent_num and text_sent_num == gold_sent_num:
em_num += 1
metric_list = []
output_acc = output_same_num /gold_has_output_num
em = em_num/total_num
metric_list.append(round(100 * output_acc, 2))
metric_list.append(round(100 * em, 2))
line_micro_precision = right_all_num/text_all_num
line_macro_precision = sum(precision_list_line)/len(precision_list_line)
line_micro_recall = right_all_num/gold_all_num
line_macro_recall = sum(recall_list_line)/len(recall_list_line)
line_f1 = 2 * line_micro_precision * line_micro_recall / (line_micro_precision + line_micro_recall)
metric_list.append(round(100 * line_micro_precision, 2))
metric_list.append(round(100 * line_micro_recall, 2))
metric_list.append(round(100 * line_f1, 2))
id_micro_precision = right_id_all_num/text_id_all_num
id_macro_precision = sum(precision_list_id)/len(precision_list_id)
id_micro_recall = right_id_all_num/gold_id_all_num
id_macro_recall = sum(recall_list_id)/len(recall_list_id)
id_f1 = 2 * id_micro_precision * id_micro_recall / (id_micro_precision + id_micro_recall)
metric_list.append(round(100 * id_micro_precision, 2))
metric_list.append(round(100 * id_micro_recall, 2))
metric_list.append(round(100 * id_f1, 2))
return metric_list
# Evaluation metrics to measure correctness of single-line traces, especially designed for the SingleLine dataset.
def compute_singleline_metrics(pred_list, gold_list):
assert(len(pred_list) == len(gold_list))
em_num = 0
total_num = 0
precision_list_id = []
recall_list_id = []
right_id_all_num = 0
text_id_all_num = 0
gold_id_all_num = 0
for i, pred in enumerate(pred_list):
text = pred.strip()
gold = gold_list[i].strip()
total_num += 1
text_sep = text
if text_sep.startswith("<state>"):
text_sep = text_sep[len("<state>"):]
if text_sep.endswith("</state>"):
text_sep = text_sep[:-len("</state>")]
text_sep = text_sep.split("<dictsep>")
text_dict = {}
for state in text_sep:
if ":" in state:
text_dict[state.split(":")[0].strip()] = state.split(":")[1].strip()
gold_sep = gold
if gold_sep.startswith("<state>"):
gold_sep = gold_sep[len("<state>"):]
if gold_sep.endswith("</state>"):
gold_sep = gold_sep[:-len("</state>")]
gold_sep = gold_sep.split("<dictsep>")
gold_dict = {}
for state in gold_sep:
if ":" in state:
gold_dict[state.split(":")[0].strip()] = state.split(":")[1].strip()
correct_id_num = dict_same_key_value_num(text_dict,gold_dict)
text_identifier_num = len(text_dict)
gold_identifier_num = len(gold_dict)
precision_id = correct_id_num/text_identifier_num if text_identifier_num != 0 else 0
recall_id = correct_id_num/gold_identifier_num if gold_identifier_num != 0 else 0
precision_list_id.append(precision_id)
recall_list_id.append(recall_id)
right_id_all_num += correct_id_num
text_id_all_num += text_identifier_num
gold_id_all_num += gold_identifier_num
if text_dict == gold_dict:
em_num += 1
metric_list = []
em = em_num/total_num
metric_list.append(round(100 * em, 2))
id_micro_precision = right_id_all_num/text_id_all_num
id_macro_precision = sum(precision_list_id)/len(precision_list_id)
id_micro_recall = right_id_all_num/gold_id_all_num
id_macro_recall = sum(recall_list_id)/len(recall_list_id)
id_f1 = 2 * id_micro_precision * id_micro_recall / (id_micro_precision + id_micro_recall)
metric_list.append(round(100 * id_micro_precision, 2))
metric_list.append(round(100 * id_micro_recall, 2))
metric_list.append(round(100 * id_f1, 2))
return metric_list
|
CodeBERT/CodeExecutor/inference/metric.py/0
|
{
"file_path": "CodeBERT/CodeExecutor/inference/metric.py",
"repo_id": "CodeBERT",
"token_count": 5504
}
| 221 |
import os, json
import torch
import logging
import argparse
import random
import numpy as np
from tqdm import tqdm
import multiprocessing
import time
from itertools import cycle
from torch.utils.data import DataLoader, SequentialSampler, RandomSampler
from torch.utils.data.distributed import DistributedSampler
from transformers import AdamW, get_linear_schedule_with_warmup
from models import build_or_load_gen_model
from configs import add_args, set_seed, set_dist
from torch.nn.parallel import DistributedDataParallel as DDP
import torch.distributed as dist
from utils import CommentGenDataset, SimpleGenDataset
from evaluator.smooth_bleu import bleu_fromstr
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
def get_loader(data_file, args, tokenizer, pool):
def fn(features):
return features
logger.info(f"Start data file {data_file}.")
if args.raw_input:
dataset = SimpleGenDataset(tokenizer, pool, args, data_file)
else:
dataset = CommentGenDataset(tokenizer, pool, args, data_file)
sampler = SequentialSampler(dataset)
dataloader = DataLoader(dataset, sampler=sampler, batch_size=args.eval_batch_size, num_workers=args.cpu_count, collate_fn=fn)
logger.info(f"Finish data files {data_file}.")
return dataset, sampler, dataloader
def eval_epoch_bleu(args, eval_dataloader, model, tokenizer):
logger.info(f" ***** Running bleu evaluation on {args.eval_file} *****")
logger.info(" Batch size = %d", args.eval_batch_size)
model.eval()
if hasattr(model, "module"):
model = model.module
pred_ids, ex_ids = [], []
for step, examples in tqdm(enumerate(eval_dataloader, 1)):
source_ids = torch.tensor(
[ex.source_ids for ex in examples], dtype=torch.long
).to(args.local_rank)
ids = [ex.example_id for ex in examples]
source_mask = source_ids.ne(tokenizer.pad_id)
preds = model.generate(source_ids,
attention_mask=source_mask,
use_cache=True,
num_beams=args.beam_size,
early_stopping=True,
max_length=args.max_target_length)
top_preds = list(preds.cpu().numpy())
pred_ids.extend(top_preds)
if args.break_cnt > 0 and len(pred_ids) >= args.break_cnt:
break
# [2:] to remove beginning '<s>' '<msg>'
pred_nls = [tokenizer.decode(id[2:], skip_special_tokens=True, clean_up_tokenization_spaces=False) for id in pred_ids]
valid_file = args.eval_file
out_file = args.out_file
outdics = []
golds = []
with open(valid_file, "r") as f:
for line in f:
outdics.append(json.loads(line))
golds.append(outdics[-1]["msg"])
outdics = outdics[:len(pred_nls)]
golds = golds[:len(pred_nls)]
with open(os.path.join(args.model_name_or_path, "preds.txt"), "w", encoding="utf-8") as f:
for pred in pred_nls:
f.write(pred.strip() + "\n")
with open(os.path.join(args.model_name_or_path, "golds.txt"), "w", encoding="utf-8") as f:
for gold in golds:
f.write(gold.strip() + "\n")
with open(out_file, "w", encoding="utf-8") as f:
for i, outdic in enumerate(outdics):
outdic["gen"] = pred_nls[i]
f.write(json.dumps(outdic) + "\n")
bleu = bleu_fromstr(pred_nls, golds, rmstop=False)
return bleu
def main(args):
dist.init_process_group(backend="nccl")
local_rank = dist.get_rank() % args.gpu_per_node
args.global_rank = local_rank + args.node_index * args.gpu_per_node
args.local_rank = local_rank
args.world_size = dist.get_world_size()
logger.warning("Process rank: %s, global rank: %s, world size: %s, bs: %s",
args.local_rank, args.global_rank, \
torch.distributed.get_world_size(), \
args.eval_batch_size)
torch.cuda.set_device(local_rank)
set_seed(args)
config, model, tokenizer = build_or_load_gen_model(args)
model = DDP(model.cuda(), device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True)
pool = multiprocessing.Pool(args.cpu_count)
data_file = args.eval_file
set_seed(args)
_, _, dataloader = get_loader(data_file, args, tokenizer, pool) # WARNING: this is a iterator, to save memory
model.eval()
bleu = eval_epoch_bleu(args, dataloader, model, tokenizer)
logger.warning(f"BLEU: {bleu}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
args = add_args(parser)
args.cpu_count = multiprocessing.cpu_count()
# remove long tokenization warning. ref: https://github.com/huggingface/transformers/issues/991
logging.getLogger("transformers.tokenization_utils_base").setLevel(logging.ERROR)
logger.info(args)
main(args)
logger.info("Test finished.")
# torch.multiprocessing.spawn(main, args=(args,), nprocs=torch.cuda.device_count())
|
CodeBERT/CodeReviewer/code/run_infer_msg.py/0
|
{
"file_path": "CodeBERT/CodeReviewer/code/run_infer_msg.py",
"repo_id": "CodeBERT",
"token_count": 2234
}
| 222 |
# Code Translation
## Task Definition
Code translation aims to migrate legacy software from one programming language in a platform toanother.
Given a piece of Java (C#) code, the task is to translate the code into C# (Java) version.
Models are evaluated by BLEU scores and accuracy (exactly match).
## Dataset
The dataset is collected from several public repos, including Lucene(http://lucene.apache.org/), POI(http://poi.apache.org/), JGit(https://github.com/eclipse/jgit/) and Antlr(https://github.com/antlr/).
We collect both the Java and C# versions of the codes and find the parallel functions. After removing duplicates and functions with the empty body, we split the whole dataset into training, validation and test sets.
### Data Format
The dataset is in the "data" folder. Each line of the files is a function, and the suffix of the file indicates the programming language. You can get data using the following command:
```
unzip data.zip
```
### Data Statistics
Data statistics of the dataset are shown in the below table:
| | #Examples |
| ----- | :-------: |
| Train | 10,300 |
| Valid | 500 |
| Test | 1,000 |
## Pipeline-GraphCodeBERT
### Dependency
- pip install torch
- pip install transformers
- pip install tree_sitter
### Tree-sitter (optional)
If the built file "parser/my-languages.so" doesn't work for you, please rebuild as the following command:
```shell
cd parser
bash build.sh
cd ..
```
### Fine-tune
We use 4*V100-16G to fine-tune. Taking Java to C# translation as example:
```shell
source=java
target=cs
lr=1e-4
batch_size=32
beam_size=10
source_length=320
target_length=256
output_dir=saved_models/$source-$target/
train_file=data/train.java-cs.txt.$source,data/train.java-cs.txt.$target
dev_file=data/valid.java-cs.txt.$source,data/valid.java-cs.txt.$target
epochs=100
pretrained_model=microsoft/graphcodebert-base
mkdir -p $output_dir
python run.py \
--do_train \
--do_eval \
--model_type roberta \
--source_lang $source \
--model_name_or_path $pretrained_model \
--tokenizer_name microsoft/graphcodebert-base \
--config_name microsoft/graphcodebert-base \
--train_filename $train_file \
--dev_filename $dev_file \
--output_dir $output_dir \
--max_source_length $source_length \
--max_target_length $target_length \
--beam_size $beam_size \
--train_batch_size $batch_size \
--eval_batch_size $batch_size \
--learning_rate $lr \
--num_train_epochs $epochs 2>&1| tee $output_dir/train.log
```
### Inference
We use full test data for inference.
```shell
batch_size=64
dev_file=data/valid.java-cs.txt.$source,data/valid.java-cs.txt.$target
test_file=data/test.java-cs.txt.$source,data/test.java-cs.txt.$target
load_model_path=$output_dir/checkpoint-best-bleu/pytorch_model.bin #checkpoint for test
python run.py \
--do_test \
--model_type roberta \
--source_lang $source \
--model_name_or_path $pretrained_model \
--tokenizer_name microsoft/graphcodebert-base \
--config_name microsoft/graphcodebert-base \
--load_model_path $load_model_path \
--dev_filename $dev_file \
--test_filename $test_file \
--output_dir $output_dir \
--max_source_length $source_length \
--max_target_length $target_length \
--beam_size $beam_size \
--eval_batch_size $batch_size 2>&1| tee $output_dir/test.log
```
## Result
The results on the test set are shown as below:
Java to C#:
| Method | BLEU | Acc (100%) |
| -------------- | :-------: | :--------: |
| Naive copy | 18.54 | 0.0 |
| PBSMT | 43.53 | 12.5 |
| Transformer | 55.84 | 33.0 |
| Roborta (code) | 77.46 | 56.1 |
| CodeBERT | 79.92 | 59.0 |
| GraphCodeBERT | **80.58** | **59.4** |
C# to Java:
| Method | BLEU | Acc (100%) |
| -------------- | :-------: | :--------: |
| Naive copy | 18.69 | 0.0 |
| PBSMT | 40.06 | 16.1 |
| Transformer | 50.47 | 37.9 |
| Roborta (code) | 71.99 | 57.9 |
| CodeBERT | 72.14 | 58.0 |
| GraphCodeBERT | **72.64** | **58.8** |
|
CodeBERT/GraphCodeBERT/translation/README.md/0
|
{
"file_path": "CodeBERT/GraphCodeBERT/translation/README.md",
"repo_id": "CodeBERT",
"token_count": 1538
}
| 223 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import json
from tqdm import tqdm
def files(path):
g = os.walk(path)
file=[]
for path,dir_list,file_list in g:
for file_name in file_list:
file.append(os.path.join(path, file_name))
return file
cont=0
with open("train.jsonl",'w') as f:
for i in tqdm(range(1,65),total=64):
items=files("ProgramData/{}".format(i))
for item in items:
js={}
js['label']=item.split('/')[1]
js['index']=str(cont)
js['code']=open(item,encoding='latin-1').read()
f.write(json.dumps(js)+'\n')
cont+=1
with open("valid.jsonl",'w') as f:
for i in tqdm(range(65,81),total=16):
items=files("ProgramData/{}".format(i))
for item in items:
js={}
js['label']=item.split('/')[1]
js['index']=str(cont)
js['code']=open(item,encoding='latin-1').read()
f.write(json.dumps(js)+'\n')
cont+=1
with open("test.jsonl",'w') as f:
for i in tqdm(range(81,195),total=24):
items=files("ProgramData/{}".format(i))
for item in items:
js={}
js['label']=item.split('/')[1]
js['index']=str(cont)
js['code']=open(item,encoding='latin-1').read()
f.write(json.dumps(js)+'\n')
cont+=1
|
CodeBERT/UniXcoder/downstream-tasks/clone-detection/POJ-104/dataset/preprocess.py/0
|
{
"file_path": "CodeBERT/UniXcoder/downstream-tasks/clone-detection/POJ-104/dataset/preprocess.py",
"repo_id": "CodeBERT",
"token_count": 774
}
| 224 |
#!/usr/bin/python
'''
This script was adapted from the original version by hieuhoang1972 which is part of MOSES.
'''
# $Id: bleu.py 1307 2007-03-14 22:22:36Z hieuhoang1972 $
'''Provides:
cook_refs(refs, n=4): Transform a list of reference sentences as strings into a form usable by cook_test().
cook_test(test, refs, n=4): Transform a test sentence as a string (together with the cooked reference sentences) into a form usable by score_cooked().
score_cooked(alltest, n=4): Score a list of cooked test sentences.
score_set(s, testid, refids, n=4): Interface with dataset.py; calculate BLEU score of testid against refids.
The reason for breaking the BLEU computation into three phases cook_refs(), cook_test(), and score_cooked() is to allow the caller to calculate BLEU scores for multiple test sets as efficiently as possible.
'''
import sys, math, re, xml.sax.saxutils
import subprocess
import os
# Added to bypass NIST-style pre-processing of hyp and ref files -- wade
nonorm = 0
preserve_case = False
eff_ref_len = "shortest"
normalize1 = [
('<skipped>', ''), # strip "skipped" tags
(r'-\n', ''), # strip end-of-line hyphenation and join lines
(r'\n', ' '), # join lines
# (r'(\d)\s+(?=\d)', r'\1'), # join digits
]
normalize1 = [(re.compile(pattern), replace) for (pattern, replace) in normalize1]
normalize2 = [
(r'([\{-\~\[-\` -\&\(-\+\:-\@\/])',r' \1 '), # tokenize punctuation. apostrophe is missing
(r'([^0-9])([\.,])',r'\1 \2 '), # tokenize period and comma unless preceded by a digit
(r'([\.,])([^0-9])',r' \1 \2'), # tokenize period and comma unless followed by a digit
(r'([0-9])(-)',r'\1 \2 ') # tokenize dash when preceded by a digit
]
normalize2 = [(re.compile(pattern), replace) for (pattern, replace) in normalize2]
def normalize(s):
'''Normalize and tokenize text. This is lifted from NIST mteval-v11a.pl.'''
# Added to bypass NIST-style pre-processing of hyp and ref files -- wade
if (nonorm):
return s.split()
if type(s) is not str:
s = " ".join(s)
# language-independent part:
for (pattern, replace) in normalize1:
s = re.sub(pattern, replace, s)
s = xml.sax.saxutils.unescape(s, {'"':'"'})
# language-dependent part (assuming Western languages):
s = " %s " % s
if not preserve_case:
s = s.lower() # this might not be identical to the original
for (pattern, replace) in normalize2:
s = re.sub(pattern, replace, s)
return s.split()
def count_ngrams(words, n=4):
counts = {}
for k in range(1,n+1):
for i in range(len(words)-k+1):
ngram = tuple(words[i:i+k])
counts[ngram] = counts.get(ngram, 0)+1
return counts
def cook_refs(refs, n=4):
'''Takes a list of reference sentences for a single segment
and returns an object that encapsulates everything that BLEU
needs to know about them.'''
refs = [normalize(ref) for ref in refs]
maxcounts = {}
for ref in refs:
counts = count_ngrams(ref, n)
for (ngram,count) in counts.items():
maxcounts[ngram] = max(maxcounts.get(ngram,0), count)
return ([len(ref) for ref in refs], maxcounts)
def cook_test(test, item, n=4):
'''Takes a test sentence and returns an object that
encapsulates everything that BLEU needs to know about it.'''
(reflens, refmaxcounts)=item
test = normalize(test)
result = {}
result["testlen"] = len(test)
# Calculate effective reference sentence length.
if eff_ref_len == "shortest":
result["reflen"] = min(reflens)
elif eff_ref_len == "average":
result["reflen"] = float(sum(reflens))/len(reflens)
elif eff_ref_len == "closest":
min_diff = None
for reflen in reflens:
if min_diff is None or abs(reflen-len(test)) < min_diff:
min_diff = abs(reflen-len(test))
result['reflen'] = reflen
result["guess"] = [max(len(test)-k+1,0) for k in range(1,n+1)]
result['correct'] = [0]*n
counts = count_ngrams(test, n)
for (ngram, count) in counts.items():
result["correct"][len(ngram)-1] += min(refmaxcounts.get(ngram,0), count)
return result
def score_cooked(allcomps, n=4, ground=0, smooth=1):
totalcomps = {'testlen':0, 'reflen':0, 'guess':[0]*n, 'correct':[0]*n}
for comps in allcomps:
for key in ['testlen','reflen']:
totalcomps[key] += comps[key]
for key in ['guess','correct']:
for k in range(n):
totalcomps[key][k] += comps[key][k]
logbleu = 0.0
all_bleus = []
for k in range(n):
correct = totalcomps['correct'][k]
guess = totalcomps['guess'][k]
addsmooth = 0
if smooth == 1 and k > 0:
addsmooth = 1
logbleu += math.log(correct + addsmooth + sys.float_info.min)-math.log(guess + addsmooth+ sys.float_info.min)
if guess == 0:
all_bleus.append(-10000000)
else:
all_bleus.append(math.log(correct + sys.float_info.min)-math.log( guess ))
logbleu /= float(n)
all_bleus.insert(0, logbleu)
brevPenalty = min(0,1-float(totalcomps['reflen'] + 1)/(totalcomps['testlen'] + 1))
for i in range(len(all_bleus)):
if i ==0:
all_bleus[i] += brevPenalty
all_bleus[i] = math.exp(all_bleus[i])
return all_bleus
def bleu(refs, candidate, ground=0, smooth=1):
refs = cook_refs(refs)
test = cook_test(candidate, refs)
return score_cooked([test], ground=ground, smooth=smooth)
def splitPuncts(line):
return ' '.join(re.findall(r"[\w]+|[^\s\w]", line))
def computeMaps(predictions, goldfile):
predictionMap = {}
goldMap = {}
gf = open(goldfile, 'r')
for row in predictions:
cols = row.strip().split('\t')
if len(cols) == 1:
(rid, pred) = (cols[0], '')
else:
(rid, pred) = (cols[0], cols[1])
predictionMap[rid] = [splitPuncts(pred.strip().lower())]
for row in gf:
(rid, pred) = row.split('\t')
if rid in predictionMap: # Only insert if the id exists for the method
if rid not in goldMap:
goldMap[rid] = []
goldMap[rid].append(splitPuncts(pred.strip().lower()))
sys.stderr.write('Total: ' + str(len(goldMap)) + '\n')
return (goldMap, predictionMap)
#m1 is the reference map
#m2 is the prediction map
def bleuFromMaps(m1, m2):
score = [0] * 5
num = 0.0
for key in m1:
if key in m2:
bl = bleu(m1[key], m2[key][0])
score = [ score[i] + bl[i] for i in range(0, len(bl))]
num += 1
return [s * 100.0 / num for s in score]
if __name__ == '__main__':
reference_file = sys.argv[1]
predictions = []
for row in sys.stdin:
predictions.append(row)
(goldMap, predictionMap) = computeMaps(predictions, reference_file)
print (bleuFromMaps(goldMap, predictionMap)[0])
|
CodeBERT/UniXcoder/downstream-tasks/code-summarization/bleu.py/0
|
{
"file_path": "CodeBERT/UniXcoder/downstream-tasks/code-summarization/bleu.py",
"repo_id": "CodeBERT",
"token_count": 2960
}
| 225 |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Fine-tuning the library models for named entity recognition on CoNLL-2003. """
import sys
import logging
import os
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import pdb
import numpy as np
from seqeval.metrics import accuracy_score, f1_score, precision_score, recall_score
from torch import nn
import scipy
from verifier_metrics import VerifierMetrics
import utils_io
import shutil
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
from deberta_model import DebertaV2ForTokenClassification
import pdb
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
dataset_name: str = field(
metadata={"help": "Name of the dataset to be run"}
)
previous_run_dir: Optional[str] = field(
default=None, metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
previous_run_epoch: Optional[int] = field(
default=1, metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
task_type: Optional[str] = field(
default="NER", metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
use_fast: bool = field(default=False, metadata={"help": "Set this flag to use fast tokenization."})
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
train_data: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
test_data: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
data_labels: Optional[str] = field(
default="labels.txt",
metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."},
)
max_seq_length: int = field(
default=512,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
alpha: Optional[float] = field(
default=0.0, metadata={"help": "help"}
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
module = import_module("tasks")
try:
token_classification_task_clazz = getattr(module, model_args.task_type)
token_classification_task: TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
f"Available tasks classes are: {TokenClassificationTask.__subclasses__()}"
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args)
# Set seed
set_seed(training_args.seed)
# Prepare CONLL-2003 task
labels = token_classification_task.get_labels(data_args.data_labels)
label_map: Dict[int, str] = {i: label for i, label in enumerate(labels)}
num_labels = len(labels)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.previous_run_dir is not None:
ckpt_path_list = [x for x in os.listdir(model_args.previous_run_dir) if "checkpoint" in x]
ckpt_path_list = sorted(ckpt_path_list, key=lambda x : int(x.split("-")[1]))
load_model_dir = ckpt_path_list[model_args.previous_run_epoch - 1] # index starts from 0
model_args.model_name_or_path = os.path.join(model_args.previous_run_dir, load_model_dir)
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
id2label=label_map,
label2id={label: i for i, label in enumerate(labels)},
cache_dir=model_args.cache_dir,
)
# pdb.set_trace()
# code change begin
config.task_specific_params = {}
config.task_specific_params["solution_correct_loss_weight"] = 1.0
config.task_specific_params["solution_incorrect_loss_weight"] = 1.0
config.task_specific_params["step_correct_loss_weight"] = data_args.alpha
config.task_specific_params["step_incorrect_loss_weight"] = data_args.alpha
config.task_specific_params["other_label_loss_weight"] = 0.0
# code change end
print("alpha:", data_args.alpha)
print("alpha:", config.task_specific_params["step_correct_loss_weight"])
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast,
)
model = DebertaV2ForTokenClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
# data_dir = data_args.train_data.replace("train.txt", "") # for debug use
data_dir = os.path.join(training_args.output_dir, "data/")
print("[data_dir]:", data_dir)
os.makedirs(data_dir, exist_ok=True)
shutil.copy(utils_io.get_file(data_args.train_data), data_dir)
print(f"train file copied to: {data_dir}")
shutil.copy(utils_io.get_file(data_args.test_data), data_dir + "dev.txt")
print(f"dev file copied to: {data_dir}")
shutil.copy(utils_io.get_file(data_args.test_data), data_dir)
print(f"test file copied to: {data_dir}")
shutil.copy(utils_io.get_file(data_args.data_labels), data_dir)
print(f"labels file copied to: {data_dir}")
# Get datasets
train_dataset = (
TokenClassificationDataset(
token_classification_task=token_classification_task,
data_dir=data_dir,
tokenizer=tokenizer,
labels=labels,
model_type=config.model_type,
max_seq_length=data_args.max_seq_length,
overwrite_cache=data_args.overwrite_cache,
mode=Split.train,
)
if training_args.do_train
else None
)
eval_dataset = (
TokenClassificationDataset(
token_classification_task=token_classification_task,
data_dir=data_dir,
tokenizer=tokenizer,
labels=labels,
model_type=config.model_type,
max_seq_length=data_args.max_seq_length,
overwrite_cache=data_args.overwrite_cache,
mode=Split.dev,
)
if training_args.do_eval
else None
)
# save the texual sequences of eval dataset
eval_sequences = [tokenizer.decode(x.input_ids) for x in eval_dataset]
first_test_case_question = eval_sequences[0].split("&&")[-1].strip()
pred_num_per_case = 0
for i, seq in enumerate(eval_sequences[1:]):
if seq.split("&&")[-1].strip() == first_test_case_question:
pred_num_per_case += 1
else:
break
print("pred_num_per_case:", pred_num_per_case)
def align_predictions(predictions: np.ndarray, label_ids: np.ndarray) -> Tuple[List[int], List[int]]:
preds = np.argmax(predictions, axis=2)
batch_size, seq_len = preds.shape
out_label_list = [[] for _ in range(batch_size)]
preds_list = [[] for _ in range(batch_size)]
for i in range(batch_size):
for j in range(seq_len):
if j == 1: # only pick the second index
# if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
return preds_list, out_label_list
def get_solution_logits(predictions: np.ndarray):
scores = []
for i in range(predictions.shape[0]):
solution_correct_index = config.label2id["SOLUTION-CORRECT"]
score = scipy.special.softmax(predictions[i][1])[solution_correct_index].item()
scores.append(score)
return scores
# gsm8k_metric = datasets.load_metric("./gsm8k_verifier_metrics")
metric = VerifierMetrics(
eval_sequences=eval_sequences,
pred_num_per_case=pred_num_per_case,
dataset_name=model_args.dataset_name,
)
def compute_metrics(p: EvalPrediction) -> Dict:
scores = get_solution_logits(p.predictions)
return metric.compute(predictions=scores, references=scores)
# Data collator
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) if training_args.fp16 else None
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=compute_metrics,
data_collator=data_collator,
)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None
)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
result = trainer.evaluate()
output_eval_file = os.path.join(training_args.output_dir, "eval_results.txt")
if trainer.is_world_process_zero():
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key, value in result.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
results.update(result)
# Predict
if training_args.do_predict:
test_dataset = TokenClassificationDataset(
token_classification_task=token_classification_task,
data_dir=data_dir,
tokenizer=tokenizer,
labels=labels,
model_type=config.model_type,
max_seq_length=data_args.max_seq_length,
overwrite_cache=data_args.overwrite_cache,
mode=Split.test,
)
predictions, label_ids, metrics = trainer.predict(test_dataset)
preds_list, _ = align_predictions(predictions, label_ids)
output_test_results_file = os.path.join(training_args.output_dir, "test_results.txt")
if trainer.is_world_process_zero():
with open(output_test_results_file, "w") as writer:
for key, value in metrics.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
# Save predictions
output_test_predictions_file = os.path.join(training_args.output_dir, "test_predictions.txt")
if trainer.is_world_process_zero():
with open(output_test_predictions_file, "w") as writer:
with open(os.path.join(data_dir, "test.txt"), "r") as f:
token_classification_task.write_predictions_to_file(writer, f, preds_list)
return results
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
|
CodeT/DIVERSE/code/src/run_ner.py/0
|
{
"file_path": "CodeT/DIVERSE/code/src/run_ner.py",
"repo_id": "CodeT",
"token_count": 6324
}
| 226 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
from make_window import MakeWindowWrapper
from build_vector import BuildVectorWrapper, BagOfWords
from search_code import CodeSearchWrapper
from build_prompt import BuildPromptWrapper
from utils import CONSTANTS, CodexTokenizer
def make_repo_window(repos, window_sizes, slice_sizes):
worker = MakeWindowWrapper(None, repos, window_sizes, slice_sizes)
worker.window_for_repo_files()
def run_RG1_and_oracle_method(benchmark, repos, window_sizes, slice_sizes):
# build code snippets for all the repositories
make_repo_window(repos, window_sizes, slice_sizes)
# build code snippets for vanilla retrieval-augmented approach and ground truth
MakeWindowWrapper(benchmark, repos, window_sizes, slice_sizes).window_for_baseline_and_ground()
# build vector for vanilla retrieval-augmented approach and ground truth
vectorizer = BagOfWords
BuildVectorWrapper(benchmark, vectorizer, repos, window_sizes, slice_sizes).vectorize_baseline_and_ground_windows()
# search code for vanilla retrieval-augmented approach and ground truth
CodeSearchWrapper('one-gram', benchmark, repos, window_sizes, slice_sizes).search_baseline_and_ground()
# build prompt for vanilla retrieval-augmented approach and ground truth
tokenizer = CodexTokenizer
mode = CONSTANTS.rg
output_file_path = 'prompts/rg-one-gram-ws-20-ss-2.jsonl'
BuildPromptWrapper('one-gram', benchmark, repos, window_sizes, slice_sizes, tokenizer).build_first_search_prompt(mode, output_file_path)
mode = CONSTANTS.gt
output_file_path = 'prompts/gt-one-gram-ws-20-ss-2.jsonl'
BuildPromptWrapper('one-gram', benchmark, repos, window_sizes, slice_sizes, tokenizer).build_first_search_prompt(mode, output_file_path)
def run_RepoCoder_method(benchmark, repos, window_sizes, slice_sizes, prediction_path):
mode = CONSTANTS.rgrg
MakeWindowWrapper(benchmark, repos, window_sizes, slice_sizes).window_for_prediction(mode, prediction_path)
vectorizer = BagOfWords
BuildVectorWrapper(benchmark, vectorizer, repos, window_sizes, slice_sizes).vectorize_prediction_windows(mode, prediction_path)
CodeSearchWrapper('one-gram', benchmark, repos, window_sizes, slice_sizes).search_prediction(mode, prediction_path)
tokenizer = CodexTokenizer
output_file_path = 'prompts/repocoder-one-gram-ws-20-ss-2.jsonl'
BuildPromptWrapper('one-gram', benchmark, repos, window_sizes, slice_sizes, tokenizer).build_prediction_prompt(mode, prediction_path, output_file_path)
if __name__ == '__main__':
repos = [
'huggingface_diffusers',
'nerfstudio-project_nerfstudio',
'awslabs_fortuna',
'huggingface_evaluate',
'google_vizier',
'alibaba_FederatedScope',
'pytorch_rl',
'opendilab_ACE',
]
window_sizes = [20]
slice_sizes = [2] # 20 / 2 = 10
# build prompt for the RG1 and oracle methods
run_RG1_and_oracle_method(CONSTANTS.api_benchmark, repos, window_sizes, slice_sizes)
# build prompt for the RepoCoder method
prediction_path = 'predictions/rg-one-gram-ws-20-ss-2_samples.0.jsonl'
run_RepoCoder_method(CONSTANTS.api_benchmark, repos, window_sizes, slice_sizes, prediction_path)
|
CodeT/RepoCoder/run_pipeline.py/0
|
{
"file_path": "CodeT/RepoCoder/run_pipeline.py",
"repo_id": "CodeT",
"token_count": 1228
}
| 227 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.