text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
resource "azurerm_network_interface" "internal" {
name = "internal-nic-${local.service_resource_name_suffix}"
location = data.azurerm_resource_group.ws.location
resource_group_name = data.azurerm_resource_group.ws.name
tags = local.tre_user_resources_tags
ip_configuration {
name = "primary"
subnet_id = data.azurerm_subnet.services.id
private_ip_address_allocation = "Dynamic"
}
lifecycle { ignore_changes = [tags] }
}
resource "random_string" "username" {
length = 4
upper = true
lower = true
numeric = true
min_numeric = 1
min_lower = 1
special = false
}
resource "random_password" "password" {
length = 16
lower = true
min_lower = 1
upper = true
min_upper = 1
numeric = true
min_numeric = 1
special = true
min_special = 1
override_special = "_%@"
}
resource "azurerm_windows_virtual_machine" "windowsvm" {
name = local.vm_name
location = data.azurerm_resource_group.ws.location
resource_group_name = data.azurerm_resource_group.ws.name
network_interface_ids = [azurerm_network_interface.internal.id]
size = local.vm_sizes[var.vm_size]
allow_extension_operations = true
admin_username = random_string.username.result
admin_password = random_password.password.result
custom_data = base64encode(data.template_file.download_review_data_script.rendered)
# set source_image_id/reference depending on the config for the selected image
source_image_id = local.selected_image_source_id
dynamic "source_image_reference" {
for_each = local.selected_image_source_refs
content {
publisher = source_image_reference.value["publisher"]
offer = source_image_reference.value["offer"]
sku = source_image_reference.value["sku"]
version = source_image_reference.value["version"]
}
}
os_disk {
name = "osdisk-${local.vm_name}"
caching = "ReadWrite"
storage_account_type = "Standard_LRS"
}
identity {
type = "SystemAssigned"
}
tags = local.tre_user_resources_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_virtual_machine_extension" "config_script" {
name = "${azurerm_windows_virtual_machine.windowsvm.name}-vmextension"
virtual_machine_id = azurerm_windows_virtual_machine.windowsvm.id
publisher = "Microsoft.Compute"
type = "CustomScriptExtension"
type_handler_version = "1.10"
tags = local.tre_user_resources_tags
protected_settings = <<PROT
{
"commandToExecute": "powershell -ExecutionPolicy Unrestricted -NoProfile -NonInteractive -command \"cp c:/azuredata/customdata.bin c:/azuredata/configure.ps1; c:/azuredata/configure.ps1 \""
}
PROT
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_key_vault_secret" "windowsvm_password" {
name = "${local.vm_name}-admin-credentials"
value = "${random_string.username.result}\n${random_password.password.result}"
key_vault_id = data.azurerm_key_vault.ws.id
tags = local.tre_user_resources_tags
lifecycle { ignore_changes = [tags] }
}
data "template_file" "download_review_data_script" {
template = file("${path.module}/download_review_data.ps1")
vars = {
airlock_request_sas_url = var.airlock_request_sas_url
}
}
|
AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-import-reviewvm/terraform/windowsvm.tf/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-import-reviewvm/terraform/windowsvm.tf",
"repo_id": "AzureTRE",
"token_count": 1531
}
| 124 |
output "ip" {
value = azurerm_network_interface.internal.private_ip_address
}
output "hostname" {
value = azurerm_linux_virtual_machine.linuxvm.name
}
output "azure_resource_id" {
value = azurerm_linux_virtual_machine.linuxvm.id
}
output "connection_uri" {
value = "https://${data.azurerm_linux_web_app.guacamole.default_hostname}/?/client/${textencodebase64("${azurerm_linux_virtual_machine.linuxvm.name}\u0000c\u0000azuretre", "UTF-8")}"
}
output "vm_username" {
value = random_string.username.result
}
output "vm_password_secret_name" {
value = local.vm_password_secret_name
}
output "keyvault_name" {
value = local.keyvault_name
}
|
AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-linuxvm/terraform/outputs.tf/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-linuxvm/terraform/outputs.tf",
"repo_id": "AzureTRE",
"token_count": 245
}
| 125 |
#!/bin/bash
set -e
eval "$(jq -r '@sh "firewall_name=\(.firewall_name) resource_group_name=\(.resource_group_name) collection_name_suffix=\(.collection_name_suffix)"')"
if NETWORK_RULES=$(az network firewall network-rule list -g $resource_group_name -f $firewall_name --collection-name "nrc-$collection_name_suffix" -o json); then
NETWORK_RULE_PRIORITY=$(echo $NETWORK_RULES | jq '.priority')
else
NETWORK_RULE_MAX_PRIORITY=$(az network firewall network-rule collection list -f $firewall_name -g $resource_group_name -o json --query 'not_null(max_by([],&priority).priority) || `100`')
NETWORK_RULE_PRIORITY=$(($NETWORK_RULE_MAX_PRIORITY+1))
fi
if APPLICATION_RULES=$(az network firewall application-rule list -g $resource_group_name -f $firewall_name --collection-name "arc-$collection_name_suffix" -o json); then
APPLICATION_RULE_PRIORITY=$(echo $APPLICATION_RULES | jq '.priority')
else
APPLICATION_RULE_MAX_PRIORITY=$(az network firewall application-rule collection list -f $firewall_name -g $resource_group_name -o json --query 'not_null(max_by([],&priority).priority) || `100`')
APPLICATION_RULE_PRIORITY=$(($APPLICATION_RULE_MAX_PRIORITY+1))
fi
# Safely produce a JSON object containing the result value.
jq -n --arg network_rule_priority "$NETWORK_RULE_PRIORITY" --arg application_rule_priority "$APPLICATION_RULE_PRIORITY" '{ "network_rule_priority":$network_rule_priority, "application_rule_priority":$application_rule_priority }'
|
AzureTRE/templates/workspace_services/innereye/terraform/get_firewall_priorities.sh/0
|
{
"file_path": "AzureTRE/templates/workspace_services/innereye/terraform/get_firewall_priorities.sh",
"repo_id": "AzureTRE",
"token_count": 518
}
| 126 |
[Environment]::SetEnvironmentVariable("AZURE_STORAGE_CONNECTION_STRING", "${MLFlow_Connection_String}", "Machine")
pip install mlflow==1.24.0
pip install azure-storage-blob==12.10.0
pip install azure-identity==1.8.0
|
AzureTRE/templates/workspace_services/mlflow/mlflow-vm-config/windows/template_config.ps1/0
|
{
"file_path": "AzureTRE/templates/workspace_services/mlflow/mlflow-vm-config/windows/template_config.ps1",
"repo_id": "AzureTRE",
"token_count": 81
}
| 127 |
#!/bin/bash
set -o errexit
set -o pipefail
set -o nounset
function build_daimon_object() {
local DAIMON_TYPE=$1
local VALUE=$2
echo '{
"tableQualifier": "'"$VALUE"'",
"priority": 0,
"sourceDaimonId": null,
"daimonType": "'"$DAIMON_TYPE"'"
}'
}
# Login
login_response=$(curl "https://${OHDSI_WEB_API_URL}/WebAPI/user/login/db" \
--data-raw "login=$OHDSI_WEB_API_USER&password=$OHDSI_WEB_API_PASSWORD" \
--compressed -i)
token=$(echo "$login_response" | grep -i bearer: | sed 's/Bearer: //' | tr -d '[:space:]')
# Build the request payload
JSON_PAYLOAD="{}"
JSON_PAYLOAD=$(echo "$JSON_PAYLOAD" | jq '.dialect = $DIALECT' --arg DIALECT "$DIALECT")
JSON_PAYLOAD=$(echo "$JSON_PAYLOAD" | jq '.name = $SOURCE_NAME' --arg SOURCE_NAME "$SOURCE_NAME")
JSON_PAYLOAD=$(echo "$JSON_PAYLOAD" | jq '.key = $SOURCE_KEY' --arg SOURCE_KEY "$SOURCE_KEY")
JSON_PAYLOAD=$(echo "$JSON_PAYLOAD" | jq '.connectionString = $CONNECTION_STRING' --arg CONNECTION_STRING "$CONNECTION_STRING")
JSON_PAYLOAD=$(echo "$JSON_PAYLOAD" | jq '.username = $USERNAME' --arg USERNAME "$USERNAME")
JSON_PAYLOAD=$(echo "$JSON_PAYLOAD" | jq '.password = $PASSWORD' --arg PASSWORD "$PASSWORD")
JSON_PAYLOAD=$(echo "$JSON_PAYLOAD" | jq '.krbAuthMethod = "password"')
JSON_PAYLOAD=$(echo "$JSON_PAYLOAD" | jq '.krbAdminServer = null')
JSON_PAYLOAD=$(echo "$JSON_PAYLOAD" | jq '.daimons = []')
if [[ -v DAIMON_CDM ]]; then
JSON_PAYLOAD=$(echo "$JSON_PAYLOAD" | jq '.daimons += [$DAIMON_CDM]' --argjson DAIMON_CDM "$(build_daimon_object "CDM" "${DAIMON_CDM}")")
fi
if [[ -v DAIMON_VOCABULARY ]]; then
JSON_PAYLOAD=$(echo "$JSON_PAYLOAD" | jq '.daimons += [$DAIMON_VOCABULARY]' --argjson DAIMON_VOCABULARY "$(build_daimon_object "Vocabulary" "${DAIMON_VOCABULARY}")")
fi
if [[ -v DAIMON_RESULTS ]]; then
JSON_PAYLOAD=$(echo "$JSON_PAYLOAD" | jq '.daimons += [$DAIMON_RESULTS]' --argjson DAIMON_RESULTS "$(build_daimon_object "Results" "${DAIMON_RESULTS}")")
fi
if [[ -v DAIMON_CEM ]]; then
JSON_PAYLOAD=$(echo "$JSON_PAYLOAD" | jq '.daimons += [$DAIMON_CEM]' --argjson DAIMON_CEM "$(build_daimon_object "CEM" "${DAIMON_CEM}")")
fi
if [[ -v DAIMON_CEM_RESULTS ]]; then
JSON_PAYLOAD=$(echo "$JSON_PAYLOAD" | jq '.daimons += [$DAIMON_CEM_RESULTS]' --argjson DAIMON_CEM_RESULTS "$(build_daimon_object "CEMResults" "${DAIMON_CEM_RESULTS}")")
fi
if [[ -v DAIMON_TEMP ]]; then
JSON_PAYLOAD=$(echo "$JSON_PAYLOAD" | jq '.daimons += [$DAIMON_TEMP]' --argjson DAIMON_TEMP "$(build_daimon_object "Temp" "${DAIMON_TEMP}")")
fi
# Add the data source
curl -v "https://${OHDSI_WEB_API_URL}/WebAPI/source/" \
-H "Authorization: Bearer ${token}" \
-H 'Content-Type: multipart/form-data; boundary=----WebKitFormBoundary2C72lleJPQ9UH4DL' \
--data-raw $'------WebKitFormBoundary2C72lleJPQ9UH4DL\r\nContent-Disposition: form-data; name="keyfile"\r\n\r\nundefined\r\n------WebKitFormBoundary2C72lleJPQ9UH4DL\r\nContent-Disposition: form-data; name="source"; filename="blob"\r\nContent-Type: application/json\r\n\r\n'"${JSON_PAYLOAD}"$'\r\n------WebKitFormBoundary2C72lleJPQ9UH4DL--\r\n' \
--compressed
|
AzureTRE/templates/workspace_services/ohdsi/scripts/add_data_source.sh/0
|
{
"file_path": "AzureTRE/templates/workspace_services/ohdsi/scripts/add_data_source.sh",
"repo_id": "AzureTRE",
"token_count": 1329
}
| 128 |
define([], function () {
var configLocal = {};
// clearing local storage otherwise source cache will obscure the override settings
localStorage.clear();
// WebAPI
configLocal.api = {
name: 'OHDSI',
url: "${OHDSI_WEBAPI_URL}"
};
configLocal.cohortComparisonResultsEnabled = false;
configLocal.userAuthenticationEnabled = true;
configLocal.plpResultsEnabled = false;
configLocal.authProviders = [
{
"name": "OpenID",
"url": "user/login/openid",
"ajax": false,
"icon": "fab fa-openid"
},
{
"name": "Local Security Test DB",
"url": "user/login/db",
"ajax": true,
"icon": "fa fa-database",
"isUseCredentialsForm": true
}
];
return configLocal;
});
|
AzureTRE/templates/workspace_services/ohdsi/terraform/config_local.tftpl/0
|
{
"file_path": "AzureTRE/templates/workspace_services/ohdsi/terraform/config_local.tftpl",
"repo_id": "AzureTRE",
"token_count": 290
}
| 129 |
{
"$schema": "http://json-schema.org/draft-07/schema",
"$id": "https://github.com/microsoft/AzureTRE/templates/workspaces/airlock_import_review/template_schema.json",
"type": "object",
"title": "Airlock Import Review Workspace",
"description": "This workspace template is intended to conduct Airlock Data Import reviews from.",
"required": [
"auth_type",
"address_space_size"
],
"authorizedRoles": [],
"properties": {
"app_service_plan_sku": {
"type": "string",
"title": "App Service Plan SKU",
"description": "The SKU that will be used when deploying an Azure App Service Plan.",
"default": "P1v3",
"enum": [
"P1v3",
"P1v2",
"S1"
]
},
"address_space_size": {
"type": "string",
"title": "Address space size",
"description": "Network address size (small, medium, large or custom) to be used by the workspace.",
"default": "small",
"enum": [
"small",
"medium",
"large",
"custom"
]
},
"address_spaces": {
"type": "array",
"title": "Address spaces",
"description": "Network address space to be used by the workspace.",
"updateable": true
},
"auth_type": {
"type": "string",
"title": "Workspace Authentication Type",
"description": "",
"default": "Automatic",
"enum": [
"Automatic",
"Manual"
],
"updateable": true
}
},
"allOf": [
{
"if": {
"properties": {
"address_space_size": {
"enum": [
"custom"
]
}
},
"required": [
"address_space_size"
]
},
"then": {
"properties": {
"address_space": {
"type": "string",
"title": "Address space",
"description": "Network address space to be used by the workspace if 'Address space size' is custom."
}
},
"required": [
"address_space"
]
}
},
{
"if": {
"properties": {
"auth_type": {
"const": "Manual"
}
},
"required": [
"auth_type"
]
},
"then": {
"properties": {
"client_id": {
"type": "string",
"title": "Application (Client) ID",
"description": "The AAD Application Registration ID for the workspace.",
"updateable": true
},
"client_secret": {
"type": "string",
"title": "Application (Client) Secret",
"description": "The AAD Application Registration secret for the workspace. This value will be stored in the Workspace Key Vault.",
"sensitive": true,
"updateable": true
}
},
"required": [
"client_id"
]
},
"else": {
"properties": {
"create_aad_groups": {
"type": "boolean",
"title": "Create AAD Groups for each workspace role",
"description": "Create AAD Groups for the workspace roles. If this is set to true, the workspace will create new AAD Groups.",
"default": false,
"updateable": true
},
"aad_redirect_uris": {
"type": "array",
"title": "AAD Redirect URIs",
"description": "Redirect URIs for the AAD app in Automatic Auth mode",
"updateable": true,
"items": {
"title": "items",
"type": "object",
"required": [
"name",
"value"
],
"properties": {
"name": {
"title": "name",
"type": "string",
"description": "Redirect URI Name",
"examples": [
"My Redirect URI"
],
"pattern": "^.*$"
},
"value": {
"title": "value",
"type": "string",
"description": "Redirect URI Value",
"examples": [
"https://a-domain-name.com/oauth/"
]
}
}
}
}
}
}
}
],
"actions": [],
"customActions": [],
"pipeline": null,
"uiSchema": {
"aad_redirect_uris": {
"classNames": "tre-hidden"
},
"address_spaces": {
"classNames": "tre-hidden"
},
"ui:order": [
"display_name",
"description",
"overview",
"app_service_plan_sku",
"address_space_size",
"address_spaces",
"auth_type",
"create_aad_groups",
"client_id",
"client_secret",
"*"
]
}
}
|
AzureTRE/templates/workspaces/airlock-import-review/template_schema.json/0
|
{
"file_path": "AzureTRE/templates/workspaces/airlock-import-review/template_schema.json",
"repo_id": "AzureTRE",
"token_count": 2550
}
| 130 |
locals {
core_resource_group_name = "rg-${var.tre_id}"
workspace_resource_name_suffix = "${var.tre_id}-ws-${var.short_workspace_id}"
import_approved_sys_topic_name = "evgt-airlock-import-approved-${local.workspace_resource_name_suffix}"
export_inprogress_sys_topic_name = "evgt-airlock-export-inprog-${local.workspace_resource_name_suffix}"
export_rejected_sys_topic_name = "evgt-airlock-export-rejected-${local.workspace_resource_name_suffix}"
export_blocked_sys_topic_name = "evgt-airlock-export-blocked-${local.workspace_resource_name_suffix}"
blob_created_topic_name = "airlock-blob-created"
# STorage AirLock IMport APProved
import_approved_storage_name = lower(replace("stalimapp${substr(local.workspace_resource_name_suffix, -8, -1)}", "-", ""))
# STorage AirLock EXport INTernal
export_internal_storage_name = lower(replace("stalexint${substr(local.workspace_resource_name_suffix, -8, -1)}", "-", ""))
# STorage AirLock EXport InProgress
export_inprogress_storage_name = lower(replace("stalexip${substr(local.workspace_resource_name_suffix, -8, -1)}", "-", ""))
# STorage AirLock EXport REJected
export_rejected_storage_name = lower(replace("stalexrej${substr(local.workspace_resource_name_suffix, -8, -1)}", "-", ""))
# STorage AirLock EXport BLOCKED
export_blocked_storage_name = lower(replace("stalexblocked${substr(local.workspace_resource_name_suffix, -8, -1)}", "-", ""))
airlock_blob_data_contributor = [
azurerm_storage_account.sa_import_approved.id,
azurerm_storage_account.sa_export_internal.id,
azurerm_storage_account.sa_export_inprogress.id,
azurerm_storage_account.sa_export_rejected.id,
azurerm_storage_account.sa_export_blocked.id
]
api_sa_data_contributor = [
azurerm_storage_account.sa_import_approved.id,
azurerm_storage_account.sa_export_internal.id,
azurerm_storage_account.sa_export_inprogress.id
]
}
|
AzureTRE/templates/workspaces/base/terraform/airlock/locals.tf/0
|
{
"file_path": "AzureTRE/templates/workspaces/base/terraform/airlock/locals.tf",
"repo_id": "AzureTRE",
"token_count": 724
}
| 131 |
# For recommended Azure private DNS zone names see https://docs.microsoft.com/azure/private-link/private-endpoint-dns#azure-services-dns-zone-configuration
# To enable connecting to Azure Monitor from within a workspace VNET (where traffic is restricted), we need to have an Azure Monitor Private Link Scope (AMPLS) that is connected to a Private Endpoint within the VNET.
# An AMPLS can be connected to multiple Private Endpoints and multiple Azure Monitor resources, but [an AMPLS can only connect to up to 10 Private Endpoints](https://docs.microsoft.com/en-us/azure/azure-monitor/logs/private-link-design#consider-ampls-limits) so the suggestion is to deploy an AMPLS per workspace for simplicity.
# Because there are some shared endpoints (i.e. not resource-specific), a [single AMPLS should be used for all VNETs that share the same DNS](https://docs.microsoft.com/en-us/azure/azure-monitor/logs/private-link-security#azure-monitor-private-links-rely-on-your-dns). Currently, we have separate VNETs for each workspace but each VNET is linked to the same, single private DNS Zone for Azure Monitor/App Insights. To enable an AMPLS per workspace, we need to update the private DNS Zones for Azure Monitor so that the existing zones are just used for the core VNET and deploy separate zones for each workspace.
# Azure Monitor requires 5 DNS zones:
# - privatelink.monitor.azure.com
# - privatelink.oms.opinsights.azure.com
# - privatelink.ods.opinsights.azure.com
# - privatelink.agentsvc.azure-automation.net
# - privatelink.blob.core.windows.net (used also by Storage module)
resource "azurerm_private_dns_zone" "azure_monitor" {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.monitor.azure.com"]
resource_group_name = var.ws_resource_group_name
tags = var.tre_workspace_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone_virtual_network_link" "azure_monitor" {
name = "azure-monitor-link"
resource_group_name = var.ws_resource_group_name
virtual_network_id = azurerm_virtual_network.ws.id
private_dns_zone_name = azurerm_private_dns_zone.azure_monitor.name
registration_enabled = false
tags = var.tre_workspace_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone" "azure_monitor_oms_opinsights" {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.oms.opinsights.azure.com"]
resource_group_name = var.ws_resource_group_name
tags = var.tre_workspace_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone_virtual_network_link" "azure_monitor_oms_opinsights" {
name = "azure-monitor-link"
resource_group_name = var.ws_resource_group_name
virtual_network_id = azurerm_virtual_network.ws.id
private_dns_zone_name = azurerm_private_dns_zone.azure_monitor_oms_opinsights.name
registration_enabled = false
tags = var.tre_workspace_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone" "azure_monitor_ods_opinsights" {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.ods.opinsights.azure.com"]
resource_group_name = var.ws_resource_group_name
tags = var.tre_workspace_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone_virtual_network_link" "azure_monitor_ods_opinsights" {
name = "azure-monitor-link"
resource_group_name = var.ws_resource_group_name
virtual_network_id = azurerm_virtual_network.ws.id
private_dns_zone_name = azurerm_private_dns_zone.azure_monitor_ods_opinsights.name
registration_enabled = false
tags = var.tre_workspace_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone" "azure_monitor_agentsvc" {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.agentsvc.azure-automation.net"]
resource_group_name = var.ws_resource_group_name
tags = var.tre_workspace_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone_virtual_network_link" "azure_monitor_agentsvc" {
name = "azure-monitor-link"
resource_group_name = var.ws_resource_group_name
virtual_network_id = azurerm_virtual_network.ws.id
private_dns_zone_name = azurerm_private_dns_zone.azure_monitor_agentsvc.name
registration_enabled = false
tags = var.tre_workspace_tags
lifecycle { ignore_changes = [tags] }
}
|
AzureTRE/templates/workspaces/base/terraform/network/dns_zones.tf/0
|
{
"file_path": "AzureTRE/templates/workspaces/base/terraform/network/dns_zones.tf",
"repo_id": "AzureTRE",
"token_count": 1721
}
| 132 |
# syntax=docker/dockerfile-upstream:1.4.0
FROM --platform=linux/amd64 debian:bullseye-slim
# PORTER_INIT
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
# Git is required for terraform_azurerm_environment_configuration
RUN --mount=type=cache,target=/var/cache/apt --mount=type=cache,target=/var/lib/apt \
apt-get update && apt-get install -y git jq curl ca-certificates patch --no-install-recommends
ARG AZURE_TRE_VERSION="0.14.0"
WORKDIR $BUNDLE_DIR
# Copy all files from base workspace (note: some of them will be overwritten with the following COPY command)
RUN curl -o azuretre.tar.gz -L "https://github.com/microsoft/AzureTRE/archive/refs/tags/v${AZURE_TRE_VERSION}.tar.gz" \
&& tar -xzf azuretre.tar.gz "AzureTRE-${AZURE_TRE_VERSION}/templates/workspaces/base" --strip-components=4 --skip-old-files \
&& rm -rf azuretre.tar.gz
# PORTER_MIXINS
# Use the BUNDLE_DIR build argument to copy files into the bundle
COPY --link . ${BUNDLE_DIR}/
|
AzureTRE/templates/workspaces/unrestricted/Dockerfile.tmpl/0
|
{
"file_path": "AzureTRE/templates/workspaces/unrestricted/Dockerfile.tmpl",
"repo_id": "AzureTRE",
"token_count": 403
}
| 133 |
import React, { useEffect, useState } from 'react';
import { DefaultPalette, IStackStyles, MessageBar, MessageBarType, Stack } from '@fluentui/react';
import './App.scss';
import { TopNav } from './components/shared/TopNav';
import { Routes, Route } from 'react-router-dom';
import { RootLayout } from './components/root/RootLayout';
import { WorkspaceProvider } from './components/workspaces/WorkspaceProvider';
import { MsalAuthenticationTemplate } from '@azure/msal-react';
import { InteractionType } from '@azure/msal-browser';
import { Workspace } from './models/workspace';
import { AppRolesContext } from './contexts/AppRolesContext';
import { WorkspaceContext } from './contexts/WorkspaceContext';
import { GenericErrorBoundary } from './components/shared/GenericErrorBoundary';
import { HttpMethod, ResultType, useAuthApiCall } from './hooks/useAuthApiCall';
import { ApiEndpoint } from './models/apiEndpoints';
import { CreateUpdateResource } from './components/shared/create-update-resource/CreateUpdateResource';
import { CreateUpdateResourceContext } from './contexts/CreateUpdateResourceContext';
import { CreateFormResource, ResourceType } from './models/resourceType';
import { Footer } from './components/shared/Footer';
import { initializeFileTypeIcons } from '@fluentui/react-file-type-icons';
import { CostResource } from './models/costs';
import { CostsContext } from './contexts/CostsContext';
import { LoadingState } from './models/loadingState';
export const App: React.FunctionComponent = () => {
const [appRoles, setAppRoles] = useState([] as Array<string>);
const [selectedWorkspace, setSelectedWorkspace] = useState({} as Workspace);
const [workspaceRoles, setWorkspaceRoles] = useState([] as Array<string>);
const [workspaceCosts, setWorkspaceCosts] = useState([] as Array<CostResource>);
const [costs, setCosts] = useState([] as Array<CostResource>);
const [costsLoadingState, setCostsLoadingState] = useState(LoadingState.Loading);
const [createFormOpen, setCreateFormOpen] = useState(false);
const [createFormResource, setCreateFormResource] = useState({ resourceType: ResourceType.Workspace } as CreateFormResource);
const apiCall = useAuthApiCall();
// set the app roles
useEffect(() => {
const setAppRolesOnLoad = async () => {
await apiCall(ApiEndpoint.Workspaces, HttpMethod.Get, undefined, undefined, ResultType.JSON, (roles: Array<string>) => {
setAppRoles(roles);
}, true);
};
setAppRolesOnLoad();
}, [apiCall]);
useEffect(() => initializeFileTypeIcons(), []);
return (
<>
<Routes>
<Route path="*" element={
<MsalAuthenticationTemplate interactionType={InteractionType.Redirect}>
<AppRolesContext.Provider value={{
roles: appRoles,
setAppRoles: (roles: Array<string>) => { setAppRoles(roles) }
}}>
<CreateUpdateResourceContext.Provider value={{
openCreateForm: (createFormResource: CreateFormResource) => {
setCreateFormResource(createFormResource);
setCreateFormOpen(true);
}
}} >
<CreateUpdateResource
isOpen={createFormOpen}
onClose={() => setCreateFormOpen(false)}
resourceType={createFormResource.resourceType}
parentResource={createFormResource.resourceParent}
onAddResource={createFormResource.onAdd}
workspaceApplicationIdURI={createFormResource.workspaceApplicationIdURI}
updateResource={createFormResource.updateResource}
/>
<Stack styles={stackStyles} className='tre-root'>
<Stack.Item grow className='tre-top-nav'>
<TopNav />
</Stack.Item>
<Stack.Item grow={100} className='tre-body'>
<GenericErrorBoundary>
<CostsContext.Provider value={{
loadingState: costsLoadingState,
costs: costs,
setCosts: (costs: Array<CostResource>) => {setCosts(costs)},
setLoadingState: (loadingState: LoadingState) => {setCostsLoadingState(loadingState)}
}}>
<Routes>
<Route path="*" element={<RootLayout />} />
<Route path="/workspaces/:workspaceId//*" element={
<WorkspaceContext.Provider value={{
roles: workspaceRoles,
setRoles: (roles: Array<string>) => {setWorkspaceRoles(roles)},
costs: workspaceCosts,
setCosts: (costs: Array<CostResource>) => {setWorkspaceCosts(costs)},
workspace: selectedWorkspace,
setWorkspace: (w: Workspace) => {setSelectedWorkspace(w)},
workspaceApplicationIdURI: selectedWorkspace.properties?.scope_id
}}>
<WorkspaceProvider />
</WorkspaceContext.Provider>
} />
</Routes>
</CostsContext.Provider>
</GenericErrorBoundary>
</Stack.Item>
<Stack.Item grow>
<Footer />
</Stack.Item>
</Stack>
</CreateUpdateResourceContext.Provider>
</AppRolesContext.Provider>
</MsalAuthenticationTemplate>
} />
<Route path='/logout' element={
<div className='tre-logout-message'>
<MessageBar
messageBarType={MessageBarType.success}
isMultiline={true}
>
<h2>You are logged out.</h2>
<p>It's a good idea to close your browser windows.</p>
</MessageBar>
</div>} />
</Routes>
</>
);
};
const stackStyles: IStackStyles = {
root: {
background: DefaultPalette.white,
height: '100vh',
},
};
export const Admin: React.FunctionComponent = () => {
return (
<h1>Admin (wip)</h1>
)
}
|
AzureTRE/ui/app/src/App.tsx/0
|
{
"file_path": "AzureTRE/ui/app/src/App.tsx",
"repo_id": "AzureTRE",
"token_count": 2860
}
| 134 |
import React from 'react';
import { VMPowerStates } from '../../models/resource';
interface PowerStateBadgeProps {
state: VMPowerStates
}
export const PowerStateBadge: React.FunctionComponent<PowerStateBadgeProps> = (props: PowerStateBadgeProps) => {
let stateClass = "tre-power-off";
if (props.state === VMPowerStates.Running) stateClass = " tre-power-on";
return (
<>
{
props.state && <span className="tre-power-badge">
<span className={stateClass}></span>
<small>{props.state.replace('VM ', '')}</small>
</span>
}
</>
);
};
|
AzureTRE/ui/app/src/components/shared/PowerStateBadge.tsx/0
|
{
"file_path": "AzureTRE/ui/app/src/components/shared/PowerStateBadge.tsx",
"repo_id": "AzureTRE",
"token_count": 236
}
| 135 |
import { Stack, FontWeights, Text, Spinner, FontIcon, mergeStyles, getTheme, SpinnerSize, TooltipHost, ITooltipProps } from '@fluentui/react';
import React from 'react';
import { awaitingStates, failedStates, inProgressStates } from '../../models/operation';
import { Resource } from '../../models/resource';
interface StatusBadgeProps {
status: string
resource?: Resource
}
export const StatusBadge: React.FunctionComponent<StatusBadgeProps> = (props: StatusBadgeProps) => {
let badgeType;
if (props.status && inProgressStates.indexOf(props.status) !== -1) {
badgeType = "inProgress";
} else if (props.status && failedStates.indexOf(props.status) !== -1) {
badgeType = "failed";
} else if (props.resource && !props.resource.isEnabled) {
badgeType = "disabled";
}
const failedTooltipProps: ITooltipProps = {
onRenderContent: () => (
<div style={{padding: '20px 24px'}}>
<Text block variant="xLarge" style={{marginBottom: 12, fontWeight: FontWeights.semilight}}>
{props.status.replace("_", " ")}
</Text>
<Text block variant="small">
<Stack>
<Stack.Item>
<Stack horizontal tokens={{childrenGap: 5}}>
<Stack.Item>
There was an issue with the latest deployment or update for this resource.
Please see the Operations panel within the resource for details.
</Stack.Item>
</Stack>
</Stack.Item>
</Stack>
</Text>
</div>
),
};
switch (badgeType) {
case "inProgress":
let label = awaitingStates.includes(props.status) ? 'pending' : props.status.replace("_", " ");
return <Spinner label={label} style={{padding: 8}} ariaLive="assertive" labelPosition="right" size={SpinnerSize.xSmall} />
case "failed":
return (
<TooltipHost id={`item-${props.resource?.id}-failed`} tooltipProps={failedTooltipProps}>
<FontIcon
aria-describedby={`item-${props.resource?.id}-failed`}
aria-label="Error"
iconName="AlertSolid"
className={errorIcon}
/>
</TooltipHost>
);
case "disabled":
return (
<>
<TooltipHost
content="This resource is disabled"
id={`item-${props.resource?.id}-disabled`}
>
<FontIcon
aria-label="Disabled"
aria-describedby={`item-${props.resource?.id}-disabled`}
iconName="Blocked2Solid"
className={disabledIcon}
/>
</TooltipHost>
</>
)
default:
return <></>
}
};
const { palette } = getTheme();
const errorIcon = mergeStyles({
color: palette.red,
fontSize: 18,
margin: 8
});
const disabledIcon = mergeStyles({
color: palette.blackTranslucent40,
fontSize: 18,
margin: 8
});
|
AzureTRE/ui/app/src/components/shared/StatusBadge.tsx/0
|
{
"file_path": "AzureTRE/ui/app/src/components/shared/StatusBadge.tsx",
"repo_id": "AzureTRE",
"token_count": 1267
}
| 136 |
import { createSlice, PayloadAction } from '@reduxjs/toolkit';
import { completedStates, Operation } from '../../../models/operation';
interface OperationsState {
items: Array<Operation>
}
const initialState: OperationsState = {
items: []
};
// note - we can write what looks like state mutations here because the redux toolkit uses
// Immer under the hood to make everything immutable
const operationsSlice = createSlice({
name: 'operations',
initialState,
reducers: {
setInitialOperations(state, action: PayloadAction<Array<Operation>>) {
state.items = action.payload;
},
addUpdateOperation(state, action: PayloadAction<Operation>) {
let i = state.items.findIndex((f: Operation) => f.id === action.payload.id);
if (i !== -1) {
state.items.splice(i, 1, action.payload);
} else {
state.items.push(action.payload);
}
},
dismissCompleted(state) {
state.items.forEach((o: Operation) => {
if (completedStates.includes(o.status)) {
o.dismiss = true;
}
});
}
}
});
export const { setInitialOperations, addUpdateOperation, dismissCompleted } = operationsSlice.actions;
export default operationsSlice.reducer;
|
AzureTRE/ui/app/src/components/shared/notifications/operationsSlice.ts/0
|
{
"file_path": "AzureTRE/ui/app/src/components/shared/notifications/operationsSlice.ts",
"repo_id": "AzureTRE",
"token_count": 433
}
| 137 |
import { useContext, useEffect, useState } from "react";
import { WorkspaceContext } from "../contexts/WorkspaceContext";
import { completedStates, inProgressStates, Operation } from "../models/operation";
import { ResourceUpdate, ComponentAction, getResourceFromResult, Resource } from "../models/resource";
import { ResourceType } from "../models/resourceType";
import { HttpMethod, useAuthApiCall } from "./useAuthApiCall";
import { useAppSelector } from './customReduxHooks';
export const useComponentManager = (
resource: Resource | undefined,
onUpdate: (r: Resource) => void,
onRemove: (r: Resource) => void,
workspaceScopeId = ""
) => {
const [latestUpdate, setLatestUpdate] = useState({
componentAction: ComponentAction.None,
operation: {} as Operation
} as ResourceUpdate);
const workspaceCtx = useContext(WorkspaceContext);
const apiCall = useAuthApiCall();
const operations = useAppSelector((state) => state.operations);
useEffect(() => {
const checkOps = async () => {
if (resource) {
let resourceOps = operations.items.filter((o: Operation) => o.resourceId === resource.id);
if (resourceOps && resourceOps.length > 0) {
let latestOp = resourceOps[resourceOps.length - 1];
// only act when a status has changed
if (latestOp.status === latestUpdate.operation.status) return;
if (inProgressStates.includes(latestOp.status)) {
setLatestUpdate({componentAction:ComponentAction.Lock, operation: latestOp});
} else if (completedStates.includes(latestOp.status)) {
if (latestOp.status === "deleted") {
onRemove(resource);
} else {
setLatestUpdate({componentAction:ComponentAction.Reload, operation: latestOp});
// if it's transitioned from an in-progress to a completed state, we need to reload it
if (inProgressStates.includes(latestUpdate.operation.status)) {
let scopeId;
if (resource.resourceType !== ResourceType.Workspace) {
// If a workspaceScopeId has been passed, use that, otherwise fall back to workspace context
scopeId = workspaceScopeId ? workspaceScopeId : workspaceCtx.workspaceApplicationIdURI;
}
let result = await apiCall(resource.resourcePath, HttpMethod.Get, scopeId);
onUpdate(getResourceFromResult(result));
}
}
} else {
setLatestUpdate({componentAction:ComponentAction.None, operation: latestOp});
}
}
}
}
checkOps();
}, [
operations.items,
apiCall,
latestUpdate.operation.status,
onRemove,
onUpdate,
resource,
workspaceCtx.workspaceApplicationIdURI,
workspaceScopeId
]);
return latestUpdate;
}
|
AzureTRE/ui/app/src/hooks/useComponentManager.ts/0
|
{
"file_path": "AzureTRE/ui/app/src/hooks/useComponentManager.ts",
"repo_id": "AzureTRE",
"token_count": 1053
}
| 138 |
import { Resource } from "./resource";
export interface Workspace extends Resource {
workspaceURL: string
}
|
AzureTRE/ui/app/src/models/workspace.ts/0
|
{
"file_path": "AzureTRE/ui/app/src/models/workspace.ts",
"repo_id": "AzureTRE",
"token_count": 28
}
| 139 |
# Relation Extraction on BC5CDR
## Data
You can process the data by:
``` bash
bash preprocess.sh
```
## Training
You can fine-tune the pre-trained BioGPT on the task by:
``` bash
bash train.sh
```
## Model Checkpoint
We provide our fine-tuned model on the task. See [here](../../README.md#pre-trained-models)
## Inference and Evaluating
You can inference and evalaute the model on the test set by:
``` bash
bash infer.sh
```
|
BioGPT/examples/RE-BC5CDR/README.md/0
|
{
"file_path": "BioGPT/examples/RE-BC5CDR/README.md",
"repo_id": "BioGPT",
"token_count": 142
}
| 140 |
recursive-include 3rdparty/tvm *
recursive-exclude 3rdparty/tvm/build *
recursive-exclude 3rdparty/clang* *
recursive-exclude 3rdparty/llvm* *
|
BitBLAS/MANIFEST.in/0
|
{
"file_path": "BitBLAS/MANIFEST.in",
"repo_id": "BitBLAS",
"token_count": 56
}
| 141 |
### Using BitBLAS from DSL
```python
from bitblas.base.roller.policy import TensorCorePolicy, DefaultPolicy
from bitblas.base.roller.arch import CUDA
from bitblas.base.utils import apply_and_build
@tvm.script.ir_module
class MatmulNT:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, [M, K], dtype=in_dtype)
B = T.match_buffer(b, [N, K], dtype=in_dtype)
C = T.match_buffer(c, [M, N], dtype=out_dtype)
for i, j, k in T.grid(M, N, K):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = tvm.tir.const(0, out_dtype)
C[vi, vj] = C[vi, vj] + A[vi, vk].astype(out_dtype) * B[
vj, vk
].astype(out_dtype)
ir_module = MatmulNT
func = ir_module["main"]
target = tvm.target.Target("nvidia/nvidia-a100")
arch = CUDA(target)
```
Get tuning policy and candidates:
```python
# Tune with SIMT Cuda Core
policy = DefaultPolicy(func=func, arch=arch)
try:
tensorized_func, tags = get_tensorized_func_and_tags(func, arch.target)
except Exception:
tags = None
# Tune with Tensor Core if possible
if tags:
policy = TensorCorePolicy(func=tensorized_func, arch=arch, tags=tags)
configs = policy.emit_config(topk=20)
'''
[BitBLAS] Evaluation with config {'block': [64, 64], 'warp': [32, 32], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}
[BitBLAS] Time cost of this config: 0.032 ms
[BitBLAS] Evaluation with config {'block': [32, 128], 'warp': [16, 64], 'rstep': [64], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}
[BitBLAS] Time cost of this config: 0.021 ms
[BitBLAS] Evaluation with config {'block': [128, 32], 'warp': [64, 16], 'rstep': [64], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}
[BitBLAS] Time cost of this config: 0.023 ms
[BitBLAS] Evaluation with config {'block': [32, 32], 'warp': [16, 16], 'rstep': [128], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}
[BitBLAS] Time cost of this config: 0.023 ms
[BitBLAS] Evaluation with config {'block': [32, 64], 'warp': [16, 32], 'rstep': [128], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}
[BitBLAS] Time cost of this config: 0.027 ms
[BitBLAS] Evaluation with config {'block': [64, 32], 'warp': [32, 16], 'rstep': [128], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}
[BitBLAS] Time cost of this config: 0.025 ms
[BitBLAS] Evaluation with config {'block': [64, 128], 'warp': [32, 64], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}
[BitBLAS] Time cost of this config: 0.023 ms
[BitBLAS] Evaluation with config {'block': [128, 64], 'warp': [64, 32], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}
[BitBLAS] Time cost of this config: 0.025 ms
[BitBLAS] Evaluation with config {'block': [16, 64], 'warp': [16, 16], 'rstep': [128], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}
[BitBLAS] Time cost of this config: 0.037 ms
[BitBLAS] Evaluation with config {'block': [64, 16], 'warp': [16, 16], 'rstep': [128], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}
[BitBLAS] Time cost of this config: 0.037 ms
[BitBLAS] Evaluation with config {'block': [128, 128], 'warp': [64, 64], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}
[BitBLAS] Time cost of this config: 0.026 ms
[BitBLAS] Evaluation with config {'block': [16, 128], 'warp': [16, 32], 'rstep': [64], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}
[BitBLAS] Time cost of this config: 0.043 ms
[BitBLAS] Evaluation with config {'block': [128, 16], 'warp': [32, 16], 'rstep': [64], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}
[BitBLAS] Time cost of this config: 0.042 ms
[BitBLAS] Evaluation with config {'block': [32, 256], 'warp': [16, 128], 'rstep': [64], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}
[BitBLAS] Time cost of this config: 0.025 ms
[BitBLAS] Evaluation with config {'block': [256, 32], 'warp': [128, 16], 'rstep': [64], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}
[BitBLAS] Time cost of this config: 0.029 ms
[BitBLAS] Evaluation with config {'block': [64, 256], 'warp': [32, 128], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}
[BitBLAS] Time cost of this config: 0.028 ms
[BitBLAS] Evaluation with config {'block': [256, 64], 'warp': [128, 32], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}
[BitBLAS] Time cost of this config: 0.027 ms
[BitBLAS] Evaluation with config {'block': [128, 256], 'warp': [64, 128], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}
[BitBLAS] Time cost of this config: 0.044 ms
[BitBLAS] Evaluation with config {'block': [256, 128], 'warp': [128, 64], 'rstep': [32], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}
[BitBLAS] Time cost of this config: 0.040 ms
[BitBLAS] Evaluation with config {'block': [16, 256], 'warp': [16, 64], 'rstep': [64], 'use_tc': True, 'vectorize': {'A_reindex': 8, 'B_reindex': 8}}
[BitBLAS] Time cost of this config: 0.047 ms
'''
```
Apply and build and get best code generation result:
```python
cpresults, best = apply_and_build(func, configs, arch, parallel_build=True)
# get the best code generation result.
print(best.code)
'''
extern "C" __global__ void __launch_bounds__(128) default_function_kernel(half* __restrict__ A, half* __restrict__ B, half* __restrict__ C) {
...
}
'''
```
we also provide something interesting with DSL.
#### Auto Tensorization
Say we currently have two policies, one is for SIMT Cuda Core, another is for TensorCore. The decision to utilize a TensorCore policy over a SIMT Cuda Core policy can be enhanced by the integration of an auto-tensorization strategy, it allows BitBLAS to automatically select if the DSL Expression can uitlize TensorCore.

```python
# Assume func is conv2d, after this invocation, the tensorized_func is the tensorized version of the conv2d, otherwise, the tags is None.
tensorized_func, tags = get_tensorized_func_and_tags(func, arch.target)
```
#### Tune with dynamic symbolic
As in LLM Serving, the input shape is dynamic, we can use the dynamic symbolic to generate high performance kernel with dynamic shape.
```python
@tvm.script.ir_module
class MatmulNT:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "main", "tir.noalias": True})
m = T.int32()
A = T.match_buffer(a, [m, K], dtype=in_dtype)
B = T.match_buffer(b, [N, K], dtype=in_dtype)
C = T.match_buffer(c, [m, N], dtype=out_dtype)
for i, j, k in T.grid(m, N, K):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = tvm.tir.const(0, out_dtype)
C[vi, vj] = C[vi, vj] + A[vi, vk].astype(out_dtype) * B[
vj, vk
].astype(out_dtype)
from bitblas import fast_tune_with_dynamic_range
# Tune with dynamic symbolic
optimized_mod = fast_tune_with_dynamic_range(
func, target, topk=topk, parallel_build=True,
dynamic_range={
"M": [1, 1024]
}
)
# fianlly, we will generate a dispatch func to dispatch the kernel with dynamic symbolic.
'''
@IRModule
class MatmulNT:
def matmul_nt_opt_m_1(A: Tensor, T_reshape: Tensor, m: int):
...
def matmul_nt_opt_m_256(A: Tensor, T_reshape: Tensor, m: int):
...
def dispatcher(args):
if m <= 1:
matmul_nt_opt_m_1(A.data, T_reshape.data, m)
if m > 1 and m <= 256:
matmul_nt_opt_m_256(A.data, T_reshape.data, m)
if m > 256:
matmul_nt_m_256(A.data, T_reshape.data, m)
'''
```
You can find some example dsl implementation in `python/bitblas/ops/impl` and `benchmark/dsl`, see more examples and tutorials in [apache/tvm](https://github.com/apache/tvm)
|
BitBLAS/docs/ExtendOperatorsWithDSL.md/0
|
{
"file_path": "BitBLAS/docs/ExtendOperatorsWithDSL.md",
"repo_id": "BitBLAS",
"token_count": 3492
}
| 142 |
# coding=utf-8
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" LLaMA model configuration"""
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import logging
logger = logging.get_logger(__name__)
LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
class BitnetConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`BitnetModel`]. It is used to instantiate an LLaMA
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the LLaMA-7B.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`BitnetModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout [this
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. Bitnet 1 supports up to 2048 tokens,
Bitnet 2 up to 4096, CodeBitnet up to 16384.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
pretraining_tp (`int`, *optional*, defaults to 1):
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to understand more about it. This value is
necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
issue](https://github.com/pytorch/pytorch/issues/76232).
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
`{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
`max_position_embeddings` to the expected new maximum. See the following thread for more information on how
these scaling strategies behave:
https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
experimental feature, subject to breaking API changes in future versions.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
```python
>>> from transformers import BitnetModel, BitnetConfig
>>> # Initializing a LLaMA llama-7b style configuration
>>> configuration = BitnetConfig()
>>> # Initializing a model from the llama-7b style configuration
>>> model = BitnetModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "llama"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size=32000,
hidden_size=4096,
intermediate_size=11008,
num_hidden_layers=32,
num_attention_heads=32,
num_key_value_heads=None,
hidden_act="silu",
max_position_embeddings=2048,
initializer_range=0.02,
rms_norm_eps=1e-6,
use_cache=True,
pad_token_id=None,
bos_token_id=1,
eos_token_id=2,
pretraining_tp=1,
tie_word_embeddings=False,
rope_theta=10000.0,
rope_scaling=None,
attention_bias=False,
attention_dropout=0.0,
weight_bits=1,
input_bits=8,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.pretraining_tp = pretraining_tp
self.use_cache = use_cache
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self._rope_scaling_validation()
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.weight_bits = weight_bits
self.input_bits = input_bits
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
def _rope_scaling_validation(self):
"""
Validate the `rope_scaling` configuration.
"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
f"got {self.rope_scaling}"
)
rope_scaling_type = self.rope_scaling.get("type", None)
rope_scaling_factor = self.rope_scaling.get("factor", None)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
)
if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
|
BitBLAS/integration/BitNet/configuration_bitnet.py/0
|
{
"file_path": "BitBLAS/integration/BitNet/configuration_bitnet.py",
"repo_id": "BitBLAS",
"token_count": 3674
}
| 143 |
please checkout https://github.com/kaleid-liner/fastertransformer_backend
|
BitBLAS/integration/fastertransformer/README.md/0
|
{
"file_path": "BitBLAS/integration/fastertransformer/README.md",
"repo_id": "BitBLAS",
"token_count": 21
}
| 144 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from .default import DefaultPolicy
from .tensorcore import TensorCorePolicy
|
BitBLAS/python/bitblas/base/roller/policy/__init__.py/0
|
{
"file_path": "BitBLAS/python/bitblas/base/roller/policy/__init__.py",
"repo_id": "BitBLAS",
"token_count": 36
}
| 145 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# pylint: disable=missing-docstring
"""A fallback schedule rule for GPU operators."""
from typing import List
from tvm import tir
from ..base import ScheduleRule, normalize_prim_func, try_inline
class ElementWise(ScheduleRule):
"""
An elementwise schedule rule for GPU operators.
"""
def apply_config( # pylint: disable=too-many-locals,missing-docstring
self,
func: tir.PrimFunc,
config,
) -> tir.Schedule:
block_factors = config.block
thread_factors = config.thread
step_factors = config.step
sch = tir.Schedule(func)
block_infos = normalize_prim_func(sch)
if block_infos is None:
return None
block_infos = try_inline(sch, block_infos)
for block in block_infos:
s_loops: List[tir.schedule.LoopRV] = []
r_loops: List[tir.schedule.LoopRV] = []
o_loops: List[tir.schedule.LoopRV] = []
dom_kind = block.dom_kind()
block = block.block_rv
if (
any(
[
sch.get(loop_rv).thread_binding is not None
for loop_rv in sch.get_loops(block)
]
)
or len(sch.get_loops(block)) == 0
):
continue
for loop, iter_type in zip(sch.get_loops(block), dom_kind):
{"S": s_loops, "R": r_loops, "O": o_loops}[iter_type].append(loop)
if not s_loops:
s_loops.append(sch.add_unit_loop(block))
sch.reorder(*s_loops, *r_loops, *o_loops)
block_loops = []
vthread_loops = []
thread_loops = []
inner_loops = []
for s_loop, block_factor, step_factor, thread_factor in zip(
s_loops, block_factors, step_factors, thread_factors
):
block_loop, inner_loop = sch.split(s_loop, factors=[None, block_factor])
vthread_loop, inner_loop = sch.split(
inner_loop, factors=[None, thread_factor * step_factor]
)
thread_loop, inner_loop = sch.split(
inner_loop, factors=[None, step_factor]
)
block_loops.append(block_loop)
vthread_loops.append(vthread_loop)
thread_loops.append(thread_loop)
inner_loops.append(inner_loop)
# inner virtual thread first
vthread_loops = list(reversed(vthread_loops))
sch.reorder(
*block_loops,
*vthread_loops,
*thread_loops,
*inner_loops,
*r_loops,
*o_loops
)
sch.bind(sch.fuse(*block_loops), "blockIdx.x")
sch.bind(sch.fuse(*thread_loops), "threadIdx.x")
if len(vthread_loops) > 3:
vthread_loops = vthread_loops[0:2] + [sch.fuse(*vthread_loops[2:])]
for i, ax in enumerate(vthread_loops):
sch.bind(ax, "vthread" + [".x", ".y", ".z"][i])
return sch
|
BitBLAS/python/bitblas/gpu/element_wise.py/0
|
{
"file_path": "BitBLAS/python/bitblas/gpu/element_wise.py",
"repo_id": "BitBLAS",
"token_count": 1757
}
| 146 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import ctypes
import operator
from functools import reduce
from logging import getLogger
import torch
import torch.nn as nn
logger = getLogger(__name__)
from typing import List, Union
from bitblas.cache import global_operator_cache, get_database_path
from bitblas import Matmul, MatmulConfig
from bitblas.quantization.utils import general_compress
from bitblas import auto_detect_nvidia_target
BITBLAS_TARGET = auto_detect_nvidia_target()
BITBLAS_DATABASE_PATH = get_database_path()
def unpack_qzeros(qzeros, bits):
qzeros = qzeros.view(torch.int32)
elems_per_int32 = 32 // bits
unpacked_zeros = torch.zeros(
(qzeros.shape[0], qzeros.shape[1] * elems_per_int32),
dtype=torch.int8,
device=qzeros.device,
requires_grad=False,
)
for col in range(unpacked_zeros.shape[1]):
i = col % elems_per_int32
unpacked_zeros[:, col] = (qzeros[:, col // elems_per_int32] >> (bits * i)) & 0xF
return unpacked_zeros + 1
class Linear(nn.Module):
opt_M = [1, 16, 32, 64, 128, 256, 512]
STORAGE_DTYPE = "int8" # assume int8 storage
TORCH_STORAGE_DTYPE = getattr(torch, STORAGE_DTYPE)
BITBLAS_DTYPES = {
torch.float32: "float32",
torch.float16: "float16",
torch.half: "float16",
torch.int8: "int8",
}
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = False,
A_dtype: str = "float16",
W_dtype: str = "float16",
accum_dtype: str = "float16",
out_dtype: str = "float16",
# configs for weight only quantization
group_size: int = -1,
with_scaling: bool = None,
with_zeros: bool = False,
zeros_mode: str = None,
opt_M: Union[int, List[int]] = opt_M,
# performance related configs
enable_tuning: bool = True,
fast_decoding: bool = True,
propagate_b: bool = False,
):
"""
@opt_M: optimize range of the input shape for dynamic symbolic
if the input shape is a range, we will optimize the matmul with dynamic symbolic.
if the input shape is int, we will optimize the matmul with static symbolic.
"""
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.opt_M = opt_M
self.group_size = self._set_group_size(group_size, in_features)
self.torch_dtype = getattr(torch, A_dtype)
self.is_consitent = A_dtype == W_dtype
self.zeros_mode = zeros_mode
self._validate_parameters(self.group_size, in_features, out_features)
self._configure_bitblas_matmul(
A_dtype,
W_dtype,
accum_dtype,
out_dtype,
with_scaling,
with_zeros,
zeros_mode,
enable_tuning,
fast_decoding,
bias,
propagate_b,
)
self._initialize_buffers(in_features, out_features, bias)
def init_params(self):
# eliminate runtime overhead like exllama state
if self.is_consitent:
param_list = [self.weight]
if self.bitblas_matmul.config.with_bias:
param_list.append(self.bias)
self.q_params = [ctypes.c_void_p(arr.data_ptr()) for arr in param_list]
else:
param_list = [self.qweight]
if self.bitblas_matmul.config.with_scaling:
param_list.append(self.scales)
if self.bitblas_matmul.config.with_zeros:
param_list.append(self.zeros)
if self.bitblas_matmul.config.with_bias:
param_list.append(self.bias)
self.q_params = [ctypes.c_void_p(arr.data_ptr()) for arr in param_list]
def _validate_parameters(self, group_size, in_features, out_features):
if in_features % 16 != 0 or out_features % 16 != 0:
raise ValueError("`in_features` and `out_features` must be divisible by 16.")
if in_features % group_size != 0:
raise ValueError("`in_features` must be divisible by `group_size`.")
def _set_group_size(self, group_size, in_features):
return in_features if (group_size == -1 or group_size is None) else group_size
def _initialize_buffers(self, in_features, out_features, bias):
if self.consistent:
self.register_buffer(
"weight",
torch.zeros((out_features, in_features // self.group_size), dtype=self.torch_dtype),
)
else:
self.register_buffer(
"qweight",
torch.zeros(
self.bitblas_matmul.retrieve_weight_shape(),
dtype=self.TORCH_STORAGE_DTYPE,
),
)
self.register_buffer(
"scales",
torch.zeros((out_features, in_features // self.group_size), dtype=self.torch_dtype),
)
if self.zeros_mode == "quantized":
storage_nbit = int("".join(c for c in self.STORAGE_DTYPE if c.isdigit()))
self.register_buffer(
"zeros",
torch.zeros(
(
in_features // self.group_size,
out_features // storage_nbit * self.bits,
),
dtype=self.TORCH_STORAGE_DTYPE,
),
)
else:
self.register_buffer(
"zeros",
torch.zeros(
(out_features, in_features // self.group_size),
dtype=self.torch_dtype,
),
)
if bias:
self.register_buffer("bias", torch.zeros((out_features), dtype=self.torch_dtype))
else:
self.bias = None
def _configure_bitblas_matmul(
self,
A_dtype,
W_dtype,
accum_dtype,
out_dtype,
with_scaling,
with_zeros,
zeros_mode,
enable_tuning,
fast_decoding,
bias,
propagate_b,
):
matmul_config = MatmulConfig(
M=self.opt_M,
N=self.out_features,
K=self.in_features,
A_dtype=A_dtype,
W_dtype=W_dtype,
accum_dtype=accum_dtype,
out_dtype=out_dtype,
storage_dtype=self.STORAGE_DTYPE,
with_scaling=with_scaling,
with_zeros=with_zeros,
group_size=self.group_size,
fast_decoding=fast_decoding,
with_bias=bias,
propagate_b=propagate_b,
zeros_mode=zeros_mode,
)
self.bitblas_matmul = self._get_or_create_bitblas_operator(matmul_config, enable_tuning)
self.bits = self.bitblas_matmul.bit
self.source_format = self.bitblas_matmul.source_format
def _get_or_create_bitblas_operator(self, config, enable_tuning):
if global_operator_cache.size() == 0:
global_operator_cache.load_from_database(BITBLAS_DATABASE_PATH, BITBLAS_TARGET)
logger.info(f"Loaded {global_operator_cache.size()} operators from database.")
bitblas_matmul = global_operator_cache.get(config)
if bitblas_matmul is None:
# should disable tuning for the first time because we may require loading bitblas operator from database.
bitblas_matmul = Matmul(config, target=BITBLAS_TARGET, enable_tuning=False)
if enable_tuning:
bitblas_matmul.hardware_aware_finetune(topk=20)
global_operator_cache.add(config, bitblas_matmul)
global_operator_cache.save_into_database(BITBLAS_DATABASE_PATH, BITBLAS_TARGET)
print("BitBLAS Tuning done, appended operator to global_operator_cache.")
else:
print("BitBLAS Operator created.")
else:
print("BitBLAS Operator found in global_operator_cache.")
return bitblas_matmul
def warmup(self, topk=20):
self.bitblas_matmul.hardware_aware_finetune(topk=topk)
def forward(self, A, output=None):
if A.dtype != torch.float16:
A = A.half()
# can be lifted to post init.
self.init_params()
if output is None:
output = torch.empty(
A.shape[:-1] + (self.out_features,), dtype=A.dtype, device=A.device)
m = ctypes.c_int32(reduce(operator.mul, A.shape[:-1], 1))
A = self.bitblas_matmul.transform_input(A)
A_void = ctypes.c_void_p(A.data_ptr())
# m is the product of the last n - 1 dimensions of A
self.bitblas_matmul.lib.call(A_void, *self.q_params, ctypes.c_void_p(output.data_ptr()), m)
return output
def load_and_transform_weight(
self,
weight: torch.Tensor,
scales: torch.Tensor = None,
zeros: torch.Tensor = None,
bias: torch.Tensor = None,
):
if self.consistent:
assert scales is None, "scales should be None for consistent mode."
assert zeros is None, "zeros should be None for consistent mode."
weight = self.bitblas_matmul.transform_weight(weight)
self.weight = nn.Parameter(weight)
if bias is not None:
self.bias = bias
else:
weight = self.bitblas_matmul.transform_weight(weight)
self.qweight = weight
if scales is not None:
self.scales = scales
if zeros is not None:
self.zeros = zeros
if bias is not None:
self.bias = bias
def repack_from_gptq(self, gptq_module):
# qweight in gptq old quant linear stored with (out_features, in_features), should be transposed.
qweight = gptq_module.qweight.T.contiguous().view(self.TORCH_STORAGE_DTYPE)
if self.bitblas_matmul.weight_transform is not None:
qweight = self.bitblas_matmul.weight_transform(qweight.cpu()).cuda()
self.qweight = qweight
# scales in gptq old quant linear stored with (in_features // group_size, out_features), should be transposed.
scales = gptq_module.scales.T.contiguous().view(self.torch_dtype)
self.scales = scales
# qzeros should be dequantized to int zeros.
intzeros = unpack_qzeros(gptq_module.qzeros, self.bits).T.contiguous()
if self.bitblas_matmul.config.zeros_mode == "original":
self.zeros = intzeros.to(torch.float16).contiguous()
elif self.bitblas_matmul.config.zeros_mode == "rescale":
self.zeros[:, :] = intzeros.to(torch.float16)[:, :] * self.scales[:, :]
elif self.bitblas_matmul.config.zeros_mode == "quantized":
self.zeros = (
torch.Tensor(general_compress(intzeros.T.contiguous().cpu().numpy(), self.bits)).to(
self.qweight.device).to(self.zeros.dtype).contiguous())
else:
raise ValueError(f"Unsupported zeros type: {self.bitblas_matmul.config.zeros_mode}")
if self.bias is not None:
self.bias = gptq_module.bias.data.to(torch.float16).contiguous()
@property
def consistent(self):
return self.is_consitent
__all__ = ["Linear"]
|
BitBLAS/python/bitblas/module/__init__.py/0
|
{
"file_path": "BitBLAS/python/bitblas/module/__init__.py",
"repo_id": "BitBLAS",
"token_count": 5723
}
| 147 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from .quantization import (
_tir_packed_int_to_int_convert, # noqa: F401
_tir_packed_to_signed_convert, # noqa: F401
_tir_packed_to_unsigned_convert, # noqa: F401
_tir_u32_to_f4_to_f16, # noqa: F401
_tir_packed_to_unsigned_convert_with_zeros, # noqa: F401
)
from .utils import gen_quant4, general_compress # noqa: F401
|
BitBLAS/python/bitblas/quantization/__init__.py/0
|
{
"file_path": "BitBLAS/python/bitblas/quantization/__init__.py",
"repo_id": "BitBLAS",
"token_count": 179
}
| 148 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import pytest
import bitblas
from bitblas.ops.matmul import Matmul, MatmulConfig
from bitblas.utils import auto_detect_nvidia_target
target = auto_detect_nvidia_target()
def get_codegen_result(ops, target):
code = ops.get_source(target=target)
return code
# fmt: off
@pytest.mark.parametrize(
"M,N,K,in_dtype,out_dtype,accum_dtype,with_bias,propagate_a,propagate_b,layout,enable_tuning",
[
(16384, 16384, 16384, "float16", "float16", "float16", False, False, False, "nt", False),
# dynamic shape
([1], 16384, 16384, "float16", "float16", "float16", False, False, False, "nt", False),
([1, 32], 16384, 16384, "float16", "float16", "float16", False, False, False, "nt", True),
],
)
def test_matmul_codegen_default(
M,
N,
K,
in_dtype,
out_dtype,
accum_dtype,
with_bias,
propagate_a,
propagate_b,
layout,
enable_tuning,
):
matmul_config = MatmulConfig(
M=M,
N=N,
K=K,
in_dtype=in_dtype,
out_dtype=out_dtype,
accum_dtype=accum_dtype,
with_bias=with_bias,
propagate_a=propagate_a,
propagate_b=propagate_b,
layout=layout,
)
matmul = Matmul(
config=matmul_config,
target=target,
)
if enable_tuning:
matmul.hardware_aware_finetune(topk=20)
assert get_codegen_result(matmul, target)
@pytest.mark.parametrize(
"M,N,K,in_dtype,out_dtype,accum_dtype,with_bias,propagate_a,propagate_b,layout",
[
(16384, 16384, 16384, "float16", "float16", "float16", False, False, False, "nt"),
# dynamic shape
([1], 16384, 16384, "float16", "float16", "float16", False, False, False, "nt"),
],
)
def test_matmul_codegen_finetune(
M,
N,
K,
in_dtype,
out_dtype,
accum_dtype,
with_bias,
propagate_a,
propagate_b,
layout,
):
matmul_config = MatmulConfig(
M=M,
N=N,
K=K,
in_dtype=in_dtype,
out_dtype=out_dtype,
accum_dtype=accum_dtype,
with_bias=with_bias,
propagate_a=propagate_a,
propagate_b=propagate_b,
layout=layout,
)
matmul = Matmul(
config=matmul_config,
target=target,
)
matmul.hardware_aware_finetune(topk=20)
assert get_codegen_result(matmul, target)
@pytest.mark.parametrize(
"M,N,K,in_dtype,out_dtype,accum_dtype,with_bias,propagate_a,propagate_b,layout",
[
(1024, 1024, 1024, "float16", "float16", "float16", False, False, False, "nt"),
],
)
def test_matmul_profile_latency(
M,
N,
K,
in_dtype,
out_dtype,
accum_dtype,
with_bias,
propagate_a,
propagate_b,
layout,
):
matmul_config = MatmulConfig(
M=M,
N=N,
K=K,
in_dtype=in_dtype,
out_dtype=out_dtype,
accum_dtype=accum_dtype,
with_bias=with_bias,
propagate_a=propagate_a,
propagate_b=propagate_b,
layout=layout,
)
matmul = Matmul(
config=matmul_config,
target=target,
)
latency = matmul.profile_latency()
assert latency
@pytest.mark.parametrize(
"M,N,K,in_dtype,out_dtype,accum_dtype,with_bias,propagate_a,propagate_b,layout",
[
(256, 256, 256, "float16", "float16", "float16", False, False, False, "nt"),
(256, 256, 256, "float16", "float16", "float16", False, False, True, "nt"),
(256, 256, 256, "float16", "float16", "float16", False, False, 0, "nt"),
(256, 256, 256, "float16", "float16", "float16", False, False, 1, "nt"),
(256, 256, 256, "float16", "float16", "float16", False, False, 2, "nt"),
],
)
def test_matmul_torch_forward(
M,
N,
K,
in_dtype,
out_dtype,
accum_dtype,
with_bias,
propagate_a,
propagate_b,
layout,
):
import torch
matmul_config = MatmulConfig(
M=M,
N=N,
K=K,
in_dtype=in_dtype,
out_dtype=out_dtype,
accum_dtype=accum_dtype,
with_bias=with_bias,
propagate_a=propagate_a,
propagate_b=propagate_b,
layout=layout,
)
matmul = Matmul(
config=matmul_config,
target=target,
)
# convert tensors to torch
input_shape = (M, K)
weight_shape = (N, K) if layout == "nt" else (K, N)
output_shape = (M, N)
inputs = []
inputs.append(torch.rand(input_shape, dtype=torch.float16).cuda())
inputs.append(torch.rand(weight_shape, dtype=torch.float16).cuda())
inputs.append(torch.rand(output_shape, dtype=torch.float16).cuda())
ref_result = torch.matmul(inputs[0], inputs[1].t() if layout == "nt" else inputs[1])
permuted_inputs = []
if matmul.input_transform is not None:
permuted_inputs.append(matmul.input_transform(inputs[0].cpu())).cuda()
else:
permuted_inputs.append(inputs[0])
if matmul.weight_transform is not None:
permuted_inputs.append(matmul.weight_transform(inputs[1].cpu()).cuda())
else:
permuted_inputs.append(inputs[1])
permuted_inputs.append(inputs[2])
matmul(*permuted_inputs)
torch.testing.assert_close(permuted_inputs[-1], ref_result, rtol=1e-2, atol=1e-2)
@pytest.mark.parametrize(
"M,N,K,in_dtype,out_dtype,accum_dtype,with_bias,propagate_a,propagate_b,layout",
[
(256, 256, 256, "int8", "int8", "int32", False, False, False, "nt"),
],
)
def test_matmul_torch_forward_int(
M,
N,
K,
in_dtype,
out_dtype,
accum_dtype,
with_bias,
propagate_a,
propagate_b,
layout,
):
import torch
torch.random.manual_seed(0)
matmul_config = MatmulConfig(
M=M,
N=N,
K=K,
in_dtype=in_dtype,
out_dtype=out_dtype,
accum_dtype=accum_dtype,
with_bias=with_bias,
propagate_a=propagate_a,
propagate_b=propagate_b,
layout=layout,
)
matmul = Matmul(
config=matmul_config,
target=target,
)
# convert tensors to torch
input_shape = (M, K)
weight_shape = (N, K) if layout == "nt" else (K, N)
output_shape = (M, N)
inputs = []
inputs.append(torch.randint(-16, 16, input_shape, dtype=torch.int8).cuda())
inputs.append(torch.randint(-1, 2, weight_shape, dtype=torch.int8).cuda())
ref_result = torch.matmul(
inputs[0].to(torch.float32),
inputs[1].t().to(torch.float32) if layout == "nt" else inputs[1].to(torch.float32))
permuted_inputs = []
if matmul.input_transform is not None:
permuted_inputs.append(matmul.input_transform(inputs[0].cpu())).cuda()
else:
permuted_inputs.append(inputs[0])
if matmul.weight_transform is not None:
permuted_inputs.append(matmul.weight_transform(inputs[1].cpu()).cuda())
else:
permuted_inputs.append(inputs[1])
permuted_inputs.append(torch.randint(-7, 7, output_shape, dtype=torch.int32).cuda())
matmul(*permuted_inputs)
print(permuted_inputs[-1])
print(ref_result)
torch.testing.assert_close(
permuted_inputs[-1].to(torch.float32), ref_result, rtol=1e-2, atol=1e-2)
# fmt: on
if __name__ == "__main__":
bitblas.testing.main()
|
BitBLAS/testing/python/operators/test_matmul_ops.py/0
|
{
"file_path": "BitBLAS/testing/python/operators/test_matmul_ops.py",
"repo_id": "BitBLAS",
"token_count": 3626
}
| 149 |
import os
import copy
import pytorch_lightning as pl
import wandb
import torch
import time
from pytorch_lightning.loggers import WandbLogger
import os
os.environ["NCCL_DEBUG"] = "INFO"
from src.config import ex
from src.modules import METERTransformerSS
from src.modules import BTTransformer
from src.datamodules.multitask_datamodule import MTDataModule
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (20480, rlimit[1]))
@ex.automain
def main(_config):
_config = copy.deepcopy(_config)
pl.seed_everything(_config["seed"])
dm = MTDataModule(_config)
if _config["model_type"] == "METER":
model = METERTransformerSS(_config)
elif _config["model_type"] == "BT":
model = BTTransformer(_config)
else:
raise NotImplementedError("model_type {} not implemented".format(_config["model_type"]))
exp_name = _config["exp_name"]
group_name = _config["group_name"]
run_name = _config["run_name"]
output_dir = f'{_config["output_dir"]}/{exp_name}_{group_name}_{run_name}'
os.makedirs(_config["log_dir"], exist_ok=True)
os.makedirs(output_dir, exist_ok=True)
checkpoint_callback = pl.callbacks.ModelCheckpoint(
save_top_k=-1,
verbose=True,
monitor="val/the_metric",
mode="max",
save_last=True if 'irtr' in group_name else False,
filename=f'{exp_name}' + '_{epoch:02d}_{val/the_metric:.4f}',
auto_insert_metric_name=False,
dirpath=output_dir,
)
logger = WandbLogger(save_dir=_config["log_dir"], project=exp_name, name=f'{exp_name}_{group_name}_{run_name}', group=group_name)
lr_callback = pl.callbacks.LearningRateMonitor(logging_interval="step")
callbacks = [checkpoint_callback, lr_callback]
num_gpus = (
_config["num_gpus"]
if isinstance(_config["num_gpus"], int)
else len(_config["num_gpus"])
)
grad_steps = max(_config["batch_size"] // (
_config["per_gpu_batchsize"] * num_gpus * _config["num_nodes"]
), 1)
max_steps = _config["max_steps"]
trainer = pl.Trainer(
gpus=_config["num_gpus"],
num_nodes=_config["num_nodes"],
precision=_config["precision"],
strategy="ddp",
benchmark=True,
deterministic=True,
max_epochs=_config["max_epoch"] if max_steps == -1 else 1000,
max_steps=max_steps,
callbacks=callbacks,
logger=logger,
accumulate_grad_batches=grad_steps,
log_every_n_steps=_config["log_every_n_steps"],
resume_from_checkpoint=_config["resume_from"],
weights_summary="top",
fast_dev_run=_config["fast_dev_run"],
val_check_interval=_config["val_check_interval"],
prepare_data_per_node=False,
replace_sampler_ddp=False,
)
if not _config["test_only"]:
trainer.fit(model, datamodule=dm)
best_metric_log = model.best_metric_log
best_model_path = checkpoint_callback.best_model_path
print(f'best_model_path: {best_model_path}')
if _config["group_name"] in ["irtr_coco", "irtr_f30k"]: # choose the last checkpoint for test evaluation
best_model_path = checkpoint_callback.last_model_path
print(f'last_model_path: {checkpoint_callback.last_model_path}')
# Directly running test evaluation
if _config["group_name"] not in ["mlm_itm", "nlvr2", "snli", "irtr_itm_itc_f30k", "irtr_itm_itc_coco"]: # these tasks do not need to run the test evaluation after training.
# Remember: Here you need to transfer the best model checkpoint to each node. For example, the node-0 upload the best checkpoint and the node-1 and node-2 download the best checkpoint.
test_config = copy.deepcopy(_config)
test_config["load_path"] = best_model_path
test_config["test_only"] = True
if test_config["group_name"] in ["irtr_coco", "irtr_f30k"]:
test_config["get_recall_metric"] = True
test_dm = MTDataModule(test_config)
if test_config["model_type"] == "METER":
test_model = METERTransformerSS(test_config)
elif test_config["model_type"] == "BT":
test_model = BTTransformer(test_config)
trainer.test(test_model, datamodule=test_dm)
if _config["group_name"] not in ["vqa"]:
best_metric_log.update(test_model.best_metric_log)
logger.log_text(key="best_metrics", columns=list(best_metric_log.keys()), data=[list(best_metric_log.values())])
else:
trainer.test(model, datamodule=dm)
|
BridgeTower/run.py/0
|
{
"file_path": "BridgeTower/run.py",
"repo_id": "BridgeTower",
"token_count": 2024
}
| 150 |
date ; hostname ; pwd
EXP_IS=288
EXP_PGB=32
EXP_PGEB=128
EXP_LR=1e-5
export MASTER_ADDR=node-0
export MASTER_PORT=19800
export NODE_RANK=$1
PREFIX_NAME="pt"
echo $MASTER_ADDR, $MASTER_PORT, $NODE_RANK, $EXP_IS, $EXP_PGB, $EXP_PGEB, $EXP_LR
TIME=$(date "+%Y%m%d%H%M")
RUN_NAME=""$PREFIX_NAME"_"$EXP_IS"_"$EXP_PGB"_"$EXP_PGEB"_"$EXP_LR"_"$TIME""
echo $RUN_NAME
python run.py with run_name=$RUN_NAME task_mlm_itm_clip_bert bt clip16 text_roberta data_root='~/BT/dataset/pre-train' num_gpus=8 num_nodes=8 image_size=$EXP_IS per_gpu_batchsize=$EXP_PGB per_gpu_eval_batchsize=$EXP_PGEB learning_rate=$EXP_LR
date
|
BridgeTower/scripts/pre_train.sh/0
|
{
"file_path": "BridgeTower/scripts/pre_train.sh",
"repo_id": "BridgeTower",
"token_count": 299
}
| 151 |
from .vg_caption_dataset import VisualGenomeCaptionDataset
from .coco_caption_karpathy_dataset import CocoCaptionKarpathyDataset
from .f30k_caption_karpathy_dataset import F30KCaptionKarpathyDataset
from .conceptual_caption_dataset import ConceptualCaptionDataset
from .sbu_caption_dataset import SBUCaptionDataset
from .vqav2_dataset import VQAv2Dataset
from .nlvr2_dataset import NLVR2Dataset
from .snli_dataset import SNLIDataset
|
BridgeTower/src/datasets/__init__.py/0
|
{
"file_path": "BridgeTower/src/datasets/__init__.py",
"repo_id": "BridgeTower",
"token_count": 168
}
| 152 |
# Copyright (c) Facebook, Inc. and its affiliates.
# From https://github.com/facebookresearch/detectron2/blob/main/detectron2/utils/comm.py
"""
This file contains primitives for multi-gpu communication.
This is useful when doing distributed training.
"""
import functools
import numpy as np
import torch
import torch.distributed as dist
_LOCAL_PROCESS_GROUP = None
"""
A torch process group which only includes processes that on the same machine as the current process.
This variable is set when processes are spawned by `launch()` in "engine/launch.py".
"""
def get_world_size() -> int:
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank() -> int:
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def get_local_rank() -> int:
"""
Returns:
The rank of the current process within the local (per-machine) process group.
"""
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
assert (
_LOCAL_PROCESS_GROUP is not None
), "Local process group is not created! Please use launch() to spawn processes!"
return dist.get_rank(group=_LOCAL_PROCESS_GROUP)
def get_local_size() -> int:
"""
Returns:
The size of the per-machine process group,
i.e. the number of processes per machine.
"""
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size(group=_LOCAL_PROCESS_GROUP)
def is_main_process() -> bool:
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
if dist.get_backend() == dist.Backend.NCCL:
# This argument is needed to avoid warnings.
# It's valid only for NCCL backend.
dist.barrier(device_ids=[torch.cuda.current_device()])
else:
dist.barrier()
@functools.lru_cache()
def _get_global_gloo_group():
"""
Return a process group based on gloo backend, containing all the ranks
The result is cached.
"""
if dist.get_backend() == "nccl":
return dist.new_group(backend="gloo")
else:
return dist.group.WORLD
def all_gather(data, group=None):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: list of data gathered from each rank
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group() # use CPU group by default, to reduce GPU RAM usage.
world_size = dist.get_world_size(group)
if world_size == 1:
return [data]
output = [None for _ in range(world_size)]
dist.all_gather_object(output, data, group=group)
return output
def gather(data, dst=0, group=None):
"""
Run gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
dst (int): destination rank
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: on dst, a list of data gathered from each rank. Otherwise,
an empty list.
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
world_size = dist.get_world_size(group=group)
if world_size == 1:
return [data]
rank = dist.get_rank(group=group)
if rank == dst:
output = [None for _ in range(world_size)]
dist.gather_object(data, output, dst=dst, group=group)
return output
else:
dist.gather_object(data, None, dst=dst, group=group)
return []
def shared_random_seed():
"""
Returns:
int: a random number that is the same across all workers.
If workers need a shared RNG, they can use this shared seed to
create one.
All workers must call this function, otherwise it will deadlock.
"""
ints = np.random.randint(2 ** 31)
all_ints = all_gather(ints)
return all_ints[0]
def reduce_dict(input_dict, average=True):
"""
Reduce the values in the dictionary from all processes so that process with rank
0 has the reduced results.
Args:
input_dict (dict): inputs to be reduced. All the values must be scalar CUDA Tensor.
average (bool): whether to do average or sum
Returns:
a dict with the same keys as input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
|
BridgeTower/src/modules/dist_utils.py/0
|
{
"file_path": "BridgeTower/src/modules/dist_utils.py",
"repo_id": "BridgeTower",
"token_count": 2222
}
| 153 |
import json
import os
import pandas as pd
import pyarrow as pa
import random
from tqdm import tqdm
from glob import glob
from collections import defaultdict
def path2rest(path, iid2captions, iid2split):
name = path.split("/")[-1]
with open(path, "rb") as fp:
binary = fp.read()
captions = iid2captions[name]
split = iid2split[name]
return [binary, captions, name, split]
def make_arrow(root, dataset_root):
with open(f"{root}/karpathy/dataset_coco.json", "r") as fp:
captions = json.load(fp)
captions = captions["images"]
iid2captions = defaultdict(list)
iid2split = dict()
for cap in tqdm(captions):
filename = cap["filename"]
iid2split[filename] = cap["split"]
for c in cap["sentences"]:
iid2captions[filename].append(c["raw"])
paths = list(glob(f"{root}/train2014/*.jpg")) + list(glob(f"{root}/val2014/*.jpg"))
random.shuffle(paths)
caption_paths = [path for path in paths if path.split("/")[-1] in iid2captions]
if len(paths) == len(caption_paths):
print("all images have caption annotations")
else:
print("not all images have caption annotations")
print(
len(paths), len(caption_paths), len(iid2captions),
)
bs = [path2rest(path, iid2captions, iid2split) for path in tqdm(caption_paths)]
for split in ["train", "val", "restval", "test"]:
batches = [b for b in bs if b[-1] == split]
dataframe = pd.DataFrame(
batches, columns=["image", "caption", "image_id", "split"],
)
table = pa.Table.from_pandas(dataframe)
os.makedirs(dataset_root, exist_ok=True)
with pa.OSFile(
f"{dataset_root}/coco_caption_karpathy_{split}.arrow", "wb"
) as sink:
with pa.RecordBatchFileWriter(sink, table.schema) as writer:
writer.write_table(table)
make_arrow('~/BT/dataset/mscoco_flickr30k_vqav2_snli_ve', '~/BT/dataset/pre-train')
|
BridgeTower/src/utils/write_coco_karpathy.py/0
|
{
"file_path": "BridgeTower/src/utils/write_coco_karpathy.py",
"repo_id": "BridgeTower",
"token_count": 887
}
| 154 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import ntpath
import time
from . import util
import scipy.misc
try:
from StringIO import StringIO # Python 2.7
except ImportError:
from io import BytesIO # Python 3.x
import torchvision.utils as vutils
from tensorboardX import SummaryWriter
import torch
import numpy as np
class Visualizer:
def __init__(self, opt):
self.opt = opt
self.tf_log = opt.isTrain and opt.tf_log
self.tensorboard_log = opt.tensorboard_log
self.win_size = opt.display_winsize
self.name = opt.name
if self.tensorboard_log:
if self.opt.isTrain:
self.log_dir = os.path.join(opt.checkpoints_dir, opt.name, "logs")
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
self.writer = SummaryWriter(log_dir=self.log_dir)
else:
print("hi :)")
self.log_dir = os.path.join(opt.checkpoints_dir, opt.name, opt.results_dir)
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
if opt.isTrain:
self.log_name = os.path.join(opt.checkpoints_dir, opt.name, "loss_log.txt")
with open(self.log_name, "a") as log_file:
now = time.strftime("%c")
log_file.write("================ Training Loss (%s) ================\n" % now)
# |visuals|: dictionary of images to display or save
def display_current_results(self, visuals, epoch, step):
all_tensor = []
if self.tensorboard_log:
for key, tensor in visuals.items():
all_tensor.append((tensor.data.cpu() + 1) / 2)
output = torch.cat(all_tensor, 0)
img_grid = vutils.make_grid(output, nrow=self.opt.batchSize, padding=0, normalize=False)
if self.opt.isTrain:
self.writer.add_image("Face_SPADE/training_samples", img_grid, step)
else:
vutils.save_image(
output,
os.path.join(self.log_dir, str(step) + ".png"),
nrow=self.opt.batchSize,
padding=0,
normalize=False,
)
# errors: dictionary of error labels and values
def plot_current_errors(self, errors, step):
if self.tf_log:
for tag, value in errors.items():
value = value.mean().float()
summary = self.tf.Summary(value=[self.tf.Summary.Value(tag=tag, simple_value=value)])
self.writer.add_summary(summary, step)
if self.tensorboard_log:
self.writer.add_scalar("Loss/GAN_Feat", errors["GAN_Feat"].mean().float(), step)
self.writer.add_scalar("Loss/VGG", errors["VGG"].mean().float(), step)
self.writer.add_scalars(
"Loss/GAN",
{
"G": errors["GAN"].mean().float(),
"D": (errors["D_Fake"].mean().float() + errors["D_real"].mean().float()) / 2,
},
step,
)
# errors: same format as |errors| of plotCurrentErrors
def print_current_errors(self, epoch, i, errors, t):
message = "(epoch: %d, iters: %d, time: %.3f) " % (epoch, i, t)
for k, v in errors.items():
v = v.mean().float()
message += "%s: %.3f " % (k, v)
print(message)
with open(self.log_name, "a") as log_file:
log_file.write("%s\n" % message)
def convert_visuals_to_numpy(self, visuals):
for key, t in visuals.items():
tile = self.opt.batchSize > 8
if "input_label" == key:
t = util.tensor2label(t, self.opt.label_nc + 2, tile=tile) ## B*H*W*C 0-255 numpy
else:
t = util.tensor2im(t, tile=tile)
visuals[key] = t
return visuals
# save image to the disk
def save_images(self, webpage, visuals, image_path):
visuals = self.convert_visuals_to_numpy(visuals)
image_dir = webpage.get_image_dir()
short_path = ntpath.basename(image_path[0])
name = os.path.splitext(short_path)[0]
webpage.add_header(name)
ims = []
txts = []
links = []
for label, image_numpy in visuals.items():
image_name = os.path.join(label, "%s.png" % (name))
save_path = os.path.join(image_dir, image_name)
util.save_image(image_numpy, save_path, create_dir=True)
ims.append(image_name)
txts.append(label)
links.append(image_name)
webpage.add_images(ims, txts, links, width=self.win_size)
|
Bringing-Old-Photos-Back-to-Life/Face_Enhancement/util/visualizer.py/0
|
{
"file_path": "Bringing-Old-Photos-Back-to-Life/Face_Enhancement/util/visualizer.py",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 2392
}
| 155 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import functools
from torch.autograd import Variable
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
import math
class Mapping_Model_with_mask(nn.Module):
def __init__(self, nc, mc=64, n_blocks=3, norm="instance", padding_type="reflect", opt=None):
super(Mapping_Model_with_mask, self).__init__()
norm_layer = networks.get_norm_layer(norm_type=norm)
activation = nn.ReLU(True)
model = []
tmp_nc = 64
n_up = 4
for i in range(n_up):
ic = min(tmp_nc * (2 ** i), mc)
oc = min(tmp_nc * (2 ** (i + 1)), mc)
model += [nn.Conv2d(ic, oc, 3, 1, 1), norm_layer(oc), activation]
self.before_NL = nn.Sequential(*model)
if opt.NL_res:
self.NL = networks.NonLocalBlock2D_with_mask_Res(
mc,
mc,
opt.NL_fusion_method,
opt.correlation_renormalize,
opt.softmax_temperature,
opt.use_self,
opt.cosin_similarity,
)
print("You are using NL + Res")
model = []
for i in range(n_blocks):
model += [
networks.ResnetBlock(
mc,
padding_type=padding_type,
activation=activation,
norm_layer=norm_layer,
opt=opt,
dilation=opt.mapping_net_dilation,
)
]
for i in range(n_up - 1):
ic = min(64 * (2 ** (4 - i)), mc)
oc = min(64 * (2 ** (3 - i)), mc)
model += [nn.Conv2d(ic, oc, 3, 1, 1), norm_layer(oc), activation]
model += [nn.Conv2d(tmp_nc * 2, tmp_nc, 3, 1, 1)]
if opt.feat_dim > 0 and opt.feat_dim < 64:
model += [norm_layer(tmp_nc), activation, nn.Conv2d(tmp_nc, opt.feat_dim, 1, 1)]
# model += [nn.Conv2d(64, 1, 1, 1, 0)]
self.after_NL = nn.Sequential(*model)
def forward(self, input, mask):
x1 = self.before_NL(input)
del input
x2 = self.NL(x1, mask)
del x1, mask
x3 = self.after_NL(x2)
del x2
return x3
class Mapping_Model_with_mask_2(nn.Module): ## Multi-Scale Patch Attention
def __init__(self, nc, mc=64, n_blocks=3, norm="instance", padding_type="reflect", opt=None):
super(Mapping_Model_with_mask_2, self).__init__()
norm_layer = networks.get_norm_layer(norm_type=norm)
activation = nn.ReLU(True)
model = []
tmp_nc = 64
n_up = 4
for i in range(n_up):
ic = min(tmp_nc * (2 ** i), mc)
oc = min(tmp_nc * (2 ** (i + 1)), mc)
model += [nn.Conv2d(ic, oc, 3, 1, 1), norm_layer(oc), activation]
for i in range(2):
model += [
networks.ResnetBlock(
mc,
padding_type=padding_type,
activation=activation,
norm_layer=norm_layer,
opt=opt,
dilation=opt.mapping_net_dilation,
)
]
print("Mapping: You are using multi-scale patch attention, conv combine + mask input")
self.before_NL = nn.Sequential(*model)
if opt.mapping_exp==1:
self.NL_scale_1=networks.Patch_Attention_4(mc,mc,8)
model = []
for i in range(2):
model += [
networks.ResnetBlock(
mc,
padding_type=padding_type,
activation=activation,
norm_layer=norm_layer,
opt=opt,
dilation=opt.mapping_net_dilation,
)
]
self.res_block_1 = nn.Sequential(*model)
if opt.mapping_exp==1:
self.NL_scale_2=networks.Patch_Attention_4(mc,mc,4)
model = []
for i in range(2):
model += [
networks.ResnetBlock(
mc,
padding_type=padding_type,
activation=activation,
norm_layer=norm_layer,
opt=opt,
dilation=opt.mapping_net_dilation,
)
]
self.res_block_2 = nn.Sequential(*model)
if opt.mapping_exp==1:
self.NL_scale_3=networks.Patch_Attention_4(mc,mc,2)
# self.NL_scale_3=networks.Patch_Attention_2(mc,mc,2)
model = []
for i in range(2):
model += [
networks.ResnetBlock(
mc,
padding_type=padding_type,
activation=activation,
norm_layer=norm_layer,
opt=opt,
dilation=opt.mapping_net_dilation,
)
]
for i in range(n_up - 1):
ic = min(64 * (2 ** (4 - i)), mc)
oc = min(64 * (2 ** (3 - i)), mc)
model += [nn.Conv2d(ic, oc, 3, 1, 1), norm_layer(oc), activation]
model += [nn.Conv2d(tmp_nc * 2, tmp_nc, 3, 1, 1)]
if opt.feat_dim > 0 and opt.feat_dim < 64:
model += [norm_layer(tmp_nc), activation, nn.Conv2d(tmp_nc, opt.feat_dim, 1, 1)]
# model += [nn.Conv2d(64, 1, 1, 1, 0)]
self.after_NL = nn.Sequential(*model)
def forward(self, input, mask):
x1 = self.before_NL(input)
x2 = self.NL_scale_1(x1,mask)
x3 = self.res_block_1(x2)
x4 = self.NL_scale_2(x3,mask)
x5 = self.res_block_2(x4)
x6 = self.NL_scale_3(x5,mask)
x7 = self.after_NL(x6)
return x7
def inference_forward(self, input, mask):
x1 = self.before_NL(input)
del input
x2 = self.NL_scale_1.inference_forward(x1,mask)
del x1
x3 = self.res_block_1(x2)
del x2
x4 = self.NL_scale_2.inference_forward(x3,mask)
del x3
x5 = self.res_block_2(x4)
del x4
x6 = self.NL_scale_3.inference_forward(x5,mask)
del x5
x7 = self.after_NL(x6)
del x6
return x7
|
Bringing-Old-Photos-Back-to-Life/Global/models/NonLocal_feature_mapping_model.py/0
|
{
"file_path": "Bringing-Old-Photos-Back-to-Life/Global/models/NonLocal_feature_mapping_model.py",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 3615
}
| 156 |
from . import clap
from . import audio
from . import htsat
from . import config
from . import pytorch_utils
from . import htsat
|
CLAP/msclap/models/__init__.py/0
|
{
"file_path": "CLAP/msclap/models/__init__.py",
"repo_id": "CLAP",
"token_count": 39
}
| 157 |
.. role:: hidden
:class: hidden-section
.. module:: fairseq.data
Data Loading and Utilities
==========================
.. _datasets:
Datasets
--------
**Datasets** define the data format and provide helpers for creating
mini-batches.
.. autoclass:: fairseq.data.FairseqDataset
:members:
.. autoclass:: fairseq.data.LanguagePairDataset
:members:
.. autoclass:: fairseq.data.MonolingualDataset
:members:
**Helper Datasets**
These datasets wrap other :class:`fairseq.data.FairseqDataset` instances and
provide additional functionality:
.. autoclass:: fairseq.data.BacktranslationDataset
:members:
.. autoclass:: fairseq.data.ConcatDataset
:members:
.. autoclass:: fairseq.data.ResamplingDataset
:members:
.. autoclass:: fairseq.data.RoundRobinZipDatasets
:members:
.. autoclass:: fairseq.data.TransformEosDataset
:members:
Dictionary
----------
.. autoclass:: fairseq.data.Dictionary
:members:
Iterators
---------
.. autoclass:: fairseq.data.CountingIterator
:members:
.. autoclass:: fairseq.data.EpochBatchIterator
:members:
.. autoclass:: fairseq.data.GroupedIterator
:members:
.. autoclass:: fairseq.data.ShardedIterator
:members:
|
COCO-LM/fairseq/docs/data.rst/0
|
{
"file_path": "COCO-LM/fairseq/docs/data.rst",
"repo_id": "COCO-LM",
"token_count": 419
}
| 158 |
Tutorial: Simple LSTM
=====================
In this tutorial we will extend fairseq by adding a new
:class:`~fairseq.models.FairseqEncoderDecoderModel` that encodes a source
sentence with an LSTM and then passes the final hidden state to a second LSTM
that decodes the target sentence (without attention).
This tutorial covers:
1. **Writing an Encoder and Decoder** to encode/decode the source/target
sentence, respectively.
2. **Registering a new Model** so that it can be used with the existing
:ref:`Command-line tools`.
3. **Training the Model** using the existing command-line tools.
4. **Making generation faster** by modifying the Decoder to use
:ref:`Incremental decoding`.
1. Building an Encoder and Decoder
----------------------------------
In this section we'll define a simple LSTM Encoder and Decoder. All Encoders
should implement the :class:`~fairseq.models.FairseqEncoder` interface and
Decoders should implement the :class:`~fairseq.models.FairseqDecoder` interface.
These interfaces themselves extend :class:`torch.nn.Module`, so FairseqEncoders
and FairseqDecoders can be written and used in the same ways as ordinary PyTorch
Modules.
Encoder
~~~~~~~
Our Encoder will embed the tokens in the source sentence, feed them to a
:class:`torch.nn.LSTM` and return the final hidden state. To create our encoder
save the following in a new file named :file:`fairseq/models/simple_lstm.py`::
import torch.nn as nn
from fairseq import utils
from fairseq.models import FairseqEncoder
class SimpleLSTMEncoder(FairseqEncoder):
def __init__(
self, args, dictionary, embed_dim=128, hidden_dim=128, dropout=0.1,
):
super().__init__(dictionary)
self.args = args
# Our encoder will embed the inputs before feeding them to the LSTM.
self.embed_tokens = nn.Embedding(
num_embeddings=len(dictionary),
embedding_dim=embed_dim,
padding_idx=dictionary.pad(),
)
self.dropout = nn.Dropout(p=dropout)
# We'll use a single-layer, unidirectional LSTM for simplicity.
self.lstm = nn.LSTM(
input_size=embed_dim,
hidden_size=hidden_dim,
num_layers=1,
bidirectional=False,
batch_first=True,
)
def forward(self, src_tokens, src_lengths):
# The inputs to the ``forward()`` function are determined by the
# Task, and in particular the ``'net_input'`` key in each
# mini-batch. We discuss Tasks in the next tutorial, but for now just
# know that *src_tokens* has shape `(batch, src_len)` and *src_lengths*
# has shape `(batch)`.
# Note that the source is typically padded on the left. This can be
# configured by adding the `--left-pad-source "False"` command-line
# argument, but here we'll make the Encoder handle either kind of
# padding by converting everything to be right-padded.
if self.args.left_pad_source:
# Convert left-padding to right-padding.
src_tokens = utils.convert_padding_direction(
src_tokens,
padding_idx=self.dictionary.pad(),
left_to_right=True
)
# Embed the source.
x = self.embed_tokens(src_tokens)
# Apply dropout.
x = self.dropout(x)
# Pack the sequence into a PackedSequence object to feed to the LSTM.
x = nn.utils.rnn.pack_padded_sequence(x, src_lengths, batch_first=True)
# Get the output from the LSTM.
_outputs, (final_hidden, _final_cell) = self.lstm(x)
# Return the Encoder's output. This can be any object and will be
# passed directly to the Decoder.
return {
# this will have shape `(bsz, hidden_dim)`
'final_hidden': final_hidden.squeeze(0),
}
# Encoders are required to implement this method so that we can rearrange
# the order of the batch elements during inference (e.g., beam search).
def reorder_encoder_out(self, encoder_out, new_order):
"""
Reorder encoder output according to `new_order`.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
`encoder_out` rearranged according to `new_order`
"""
final_hidden = encoder_out['final_hidden']
return {
'final_hidden': final_hidden.index_select(0, new_order),
}
Decoder
~~~~~~~
Our Decoder will predict the next word, conditioned on the Encoder's final
hidden state and an embedded representation of the previous target word -- which
is sometimes called *teacher forcing*. More specifically, we'll use a
:class:`torch.nn.LSTM` to produce a sequence of hidden states that we'll project
to the size of the output vocabulary to predict each target word.
::
import torch
from fairseq.models import FairseqDecoder
class SimpleLSTMDecoder(FairseqDecoder):
def __init__(
self, dictionary, encoder_hidden_dim=128, embed_dim=128, hidden_dim=128,
dropout=0.1,
):
super().__init__(dictionary)
# Our decoder will embed the inputs before feeding them to the LSTM.
self.embed_tokens = nn.Embedding(
num_embeddings=len(dictionary),
embedding_dim=embed_dim,
padding_idx=dictionary.pad(),
)
self.dropout = nn.Dropout(p=dropout)
# We'll use a single-layer, unidirectional LSTM for simplicity.
self.lstm = nn.LSTM(
# For the first layer we'll concatenate the Encoder's final hidden
# state with the embedded target tokens.
input_size=encoder_hidden_dim + embed_dim,
hidden_size=hidden_dim,
num_layers=1,
bidirectional=False,
)
# Define the output projection.
self.output_projection = nn.Linear(hidden_dim, len(dictionary))
# During training Decoders are expected to take the entire target sequence
# (shifted right by one position) and produce logits over the vocabulary.
# The *prev_output_tokens* tensor begins with the end-of-sentence symbol,
# ``dictionary.eos()``, followed by the target sequence.
def forward(self, prev_output_tokens, encoder_out):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
Returns:
tuple:
- the last decoder layer's output of shape
`(batch, tgt_len, vocab)`
- the last decoder layer's attention weights of shape
`(batch, tgt_len, src_len)`
"""
bsz, tgt_len = prev_output_tokens.size()
# Extract the final hidden state from the Encoder.
final_encoder_hidden = encoder_out['final_hidden']
# Embed the target sequence, which has been shifted right by one
# position and now starts with the end-of-sentence symbol.
x = self.embed_tokens(prev_output_tokens)
# Apply dropout.
x = self.dropout(x)
# Concatenate the Encoder's final hidden state to *every* embedded
# target token.
x = torch.cat(
[x, final_encoder_hidden.unsqueeze(1).expand(bsz, tgt_len, -1)],
dim=2,
)
# Using PackedSequence objects in the Decoder is harder than in the
# Encoder, since the targets are not sorted in descending length order,
# which is a requirement of ``pack_padded_sequence()``. Instead we'll
# feed nn.LSTM directly.
initial_state = (
final_encoder_hidden.unsqueeze(0), # hidden
torch.zeros_like(final_encoder_hidden).unsqueeze(0), # cell
)
output, _ = self.lstm(
x.transpose(0, 1), # convert to shape `(tgt_len, bsz, dim)`
initial_state,
)
x = output.transpose(0, 1) # convert to shape `(bsz, tgt_len, hidden)`
# Project the outputs to the size of the vocabulary.
x = self.output_projection(x)
# Return the logits and ``None`` for the attention weights
return x, None
2. Registering the Model
------------------------
Now that we've defined our Encoder and Decoder we must *register* our model with
fairseq using the :func:`~fairseq.models.register_model` function decorator.
Once the model is registered we'll be able to use it with the existing
:ref:`Command-line Tools`.
All registered models must implement the
:class:`~fairseq.models.BaseFairseqModel` interface. For sequence-to-sequence
models (i.e., any model with a single Encoder and Decoder), we can instead
implement the :class:`~fairseq.models.FairseqEncoderDecoderModel` interface.
Create a small wrapper class in the same file and register it in fairseq with
the name ``'simple_lstm'``::
from fairseq.models import FairseqEncoderDecoderModel, register_model
# Note: the register_model "decorator" should immediately precede the
# definition of the Model class.
@register_model('simple_lstm')
class SimpleLSTMModel(FairseqEncoderDecoderModel):
@staticmethod
def add_args(parser):
# Models can override this method to add new command-line arguments.
# Here we'll add some new command-line arguments to configure dropout
# and the dimensionality of the embeddings and hidden states.
parser.add_argument(
'--encoder-embed-dim', type=int, metavar='N',
help='dimensionality of the encoder embeddings',
)
parser.add_argument(
'--encoder-hidden-dim', type=int, metavar='N',
help='dimensionality of the encoder hidden state',
)
parser.add_argument(
'--encoder-dropout', type=float, default=0.1,
help='encoder dropout probability',
)
parser.add_argument(
'--decoder-embed-dim', type=int, metavar='N',
help='dimensionality of the decoder embeddings',
)
parser.add_argument(
'--decoder-hidden-dim', type=int, metavar='N',
help='dimensionality of the decoder hidden state',
)
parser.add_argument(
'--decoder-dropout', type=float, default=0.1,
help='decoder dropout probability',
)
@classmethod
def build_model(cls, args, task):
# Fairseq initializes models by calling the ``build_model()``
# function. This provides more flexibility, since the returned model
# instance can be of a different type than the one that was called.
# In this case we'll just return a SimpleLSTMModel instance.
# Initialize our Encoder and Decoder.
encoder = SimpleLSTMEncoder(
args=args,
dictionary=task.source_dictionary,
embed_dim=args.encoder_embed_dim,
hidden_dim=args.encoder_hidden_dim,
dropout=args.encoder_dropout,
)
decoder = SimpleLSTMDecoder(
dictionary=task.target_dictionary,
encoder_hidden_dim=args.encoder_hidden_dim,
embed_dim=args.decoder_embed_dim,
hidden_dim=args.decoder_hidden_dim,
dropout=args.decoder_dropout,
)
model = SimpleLSTMModel(encoder, decoder)
# Print the model architecture.
print(model)
return model
# We could override the ``forward()`` if we wanted more control over how
# the encoder and decoder interact, but it's not necessary for this
# tutorial since we can inherit the default implementation provided by
# the FairseqEncoderDecoderModel base class, which looks like:
#
# def forward(self, src_tokens, src_lengths, prev_output_tokens):
# encoder_out = self.encoder(src_tokens, src_lengths)
# decoder_out = self.decoder(prev_output_tokens, encoder_out)
# return decoder_out
Finally let's define a *named architecture* with the configuration for our
model. This is done with the :func:`~fairseq.models.register_model_architecture`
function decorator. Thereafter this named architecture can be used with the
``--arch`` command-line argument, e.g., ``--arch tutorial_simple_lstm``::
from fairseq.models import register_model_architecture
# The first argument to ``register_model_architecture()`` should be the name
# of the model we registered above (i.e., 'simple_lstm'). The function we
# register here should take a single argument *args* and modify it in-place
# to match the desired architecture.
@register_model_architecture('simple_lstm', 'tutorial_simple_lstm')
def tutorial_simple_lstm(args):
# We use ``getattr()`` to prioritize arguments that are explicitly given
# on the command-line, so that the defaults defined below are only used
# when no other value has been specified.
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 256)
args.encoder_hidden_dim = getattr(args, 'encoder_hidden_dim', 256)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 256)
args.decoder_hidden_dim = getattr(args, 'decoder_hidden_dim', 256)
3. Training the Model
---------------------
Now we're ready to train the model. We can use the existing :ref:`fairseq-train`
command-line tool for this, making sure to specify our new Model architecture
(``--arch tutorial_simple_lstm``).
.. note::
Make sure you've already preprocessed the data from the IWSLT example in the
:file:`examples/translation/` directory.
.. code-block:: console
> fairseq-train data-bin/iwslt14.tokenized.de-en \
--arch tutorial_simple_lstm \
--encoder-dropout 0.2 --decoder-dropout 0.2 \
--optimizer adam --lr 0.005 --lr-shrink 0.5 \
--max-tokens 12000
(...)
| epoch 052 | loss 4.027 | ppl 16.30 | wps 420805 | ups 39.7 | wpb 9841 | bsz 400 | num_updates 20852 | lr 1.95313e-05 | gnorm 0.218 | clip 0% | oom 0 | wall 529 | train_wall 396
| epoch 052 | valid on 'valid' subset | valid_loss 4.74989 | valid_ppl 26.91 | num_updates 20852 | best 4.74954
The model files should appear in the :file:`checkpoints/` directory. While this
model architecture is not very good, we can use the :ref:`fairseq-generate` script to
generate translations and compute our BLEU score over the test set:
.. code-block:: console
> fairseq-generate data-bin/iwslt14.tokenized.de-en \
--path checkpoints/checkpoint_best.pt \
--beam 5 \
--remove-bpe
(...)
| Translated 6750 sentences (153132 tokens) in 17.3s (389.12 sentences/s, 8827.68 tokens/s)
| Generate test with beam=5: BLEU4 = 8.18, 38.8/12.1/4.7/2.0 (BP=1.000, ratio=1.066, syslen=139865, reflen=131146)
4. Making generation faster
---------------------------
While autoregressive generation from sequence-to-sequence models is inherently
slow, our implementation above is especially slow because it recomputes the
entire sequence of Decoder hidden states for every output token (i.e., it is
``O(n^2)``). We can make this significantly faster by instead caching the
previous hidden states.
In fairseq this is called :ref:`Incremental decoding`. Incremental decoding is a
special mode at inference time where the Model only receives a single timestep
of input corresponding to the immediately previous output token (for teacher
forcing) and must produce the next output incrementally. Thus the model must
cache any long-term state that is needed about the sequence, e.g., hidden
states, convolutional states, etc.
To implement incremental decoding we will modify our model to implement the
:class:`~fairseq.models.FairseqIncrementalDecoder` interface. Compared to the
standard :class:`~fairseq.models.FairseqDecoder` interface, the incremental
decoder interface allows ``forward()`` methods to take an extra keyword argument
(*incremental_state*) that can be used to cache state across time-steps.
Let's replace our ``SimpleLSTMDecoder`` with an incremental one::
import torch
from fairseq.models import FairseqIncrementalDecoder
class SimpleLSTMDecoder(FairseqIncrementalDecoder):
def __init__(
self, dictionary, encoder_hidden_dim=128, embed_dim=128, hidden_dim=128,
dropout=0.1,
):
# This remains the same as before.
super().__init__(dictionary)
self.embed_tokens = nn.Embedding(
num_embeddings=len(dictionary),
embedding_dim=embed_dim,
padding_idx=dictionary.pad(),
)
self.dropout = nn.Dropout(p=dropout)
self.lstm = nn.LSTM(
input_size=encoder_hidden_dim + embed_dim,
hidden_size=hidden_dim,
num_layers=1,
bidirectional=False,
)
self.output_projection = nn.Linear(hidden_dim, len(dictionary))
# We now take an additional kwarg (*incremental_state*) for caching the
# previous hidden and cell states.
def forward(self, prev_output_tokens, encoder_out, incremental_state=None):
if incremental_state is not None:
# If the *incremental_state* argument is not ``None`` then we are
# in incremental inference mode. While *prev_output_tokens* will
# still contain the entire decoded prefix, we will only use the
# last step and assume that the rest of the state is cached.
prev_output_tokens = prev_output_tokens[:, -1:]
# This remains the same as before.
bsz, tgt_len = prev_output_tokens.size()
final_encoder_hidden = encoder_out['final_hidden']
x = self.embed_tokens(prev_output_tokens)
x = self.dropout(x)
x = torch.cat(
[x, final_encoder_hidden.unsqueeze(1).expand(bsz, tgt_len, -1)],
dim=2,
)
# We will now check the cache and load the cached previous hidden and
# cell states, if they exist, otherwise we will initialize them to
# zeros (as before). We will use the ``utils.get_incremental_state()``
# and ``utils.set_incremental_state()`` helpers.
initial_state = utils.get_incremental_state(
self, incremental_state, 'prev_state',
)
if initial_state is None:
# first time initialization, same as the original version
initial_state = (
final_encoder_hidden.unsqueeze(0), # hidden
torch.zeros_like(final_encoder_hidden).unsqueeze(0), # cell
)
# Run one step of our LSTM.
output, latest_state = self.lstm(x.transpose(0, 1), initial_state)
# Update the cache with the latest hidden and cell states.
utils.set_incremental_state(
self, incremental_state, 'prev_state', latest_state,
)
# This remains the same as before
x = output.transpose(0, 1)
x = self.output_projection(x)
return x, None
# The ``FairseqIncrementalDecoder`` interface also requires implementing a
# ``reorder_incremental_state()`` method, which is used during beam search
# to select and reorder the incremental state.
def reorder_incremental_state(self, incremental_state, new_order):
# Load the cached state.
prev_state = utils.get_incremental_state(
self, incremental_state, 'prev_state',
)
# Reorder batches according to *new_order*.
reordered_state = (
prev_state[0].index_select(1, new_order), # hidden
prev_state[1].index_select(1, new_order), # cell
)
# Update the cached state.
utils.set_incremental_state(
self, incremental_state, 'prev_state', reordered_state,
)
Finally, we can rerun generation and observe the speedup:
.. code-block:: console
# Before
> fairseq-generate data-bin/iwslt14.tokenized.de-en \
--path checkpoints/checkpoint_best.pt \
--beam 5 \
--remove-bpe
(...)
| Translated 6750 sentences (153132 tokens) in 17.3s (389.12 sentences/s, 8827.68 tokens/s)
| Generate test with beam=5: BLEU4 = 8.18, 38.8/12.1/4.7/2.0 (BP=1.000, ratio=1.066, syslen=139865, reflen=131146)
# After
> fairseq-generate data-bin/iwslt14.tokenized.de-en \
--path checkpoints/checkpoint_best.pt \
--beam 5 \
--remove-bpe
(...)
| Translated 6750 sentences (153132 tokens) in 5.5s (1225.54 sentences/s, 27802.94 tokens/s)
| Generate test with beam=5: BLEU4 = 8.18, 38.8/12.1/4.7/2.0 (BP=1.000, ratio=1.066, syslen=139865, reflen=131146)
|
COCO-LM/fairseq/docs/tutorial_simple_lstm.rst/0
|
{
"file_path": "COCO-LM/fairseq/docs/tutorial_simple_lstm.rst",
"repo_id": "COCO-LM",
"token_count": 8622
}
| 159 |
#!/bin/bash
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
SPM_ENCODE=flores/scripts/spm_encode.py
DATA=data_tmp
SPM_MODEL=criss_checkpoints/sentence.bpe.model
DICT=criss_checkpoints/dict.txt
if [[ -f flores ]]; then
echo "flores already cloned"
else
git clone https://github.com/facebookresearch/flores
fi
if [[ -f LASER ]]; then
echo "LASER already cloned"
else
git clone https://github.com/facebookresearch/LASER
fi
mkdir -p data_tmp
declare -A lang_tatoeba_map=( ["ar_AR"]="ara" ["de_DE"]="deu" ["es_XX"]="spa" ["et_EE"]="est" ["fi_FI"]="fin" ["fr_XX"]="fra" ["hi_IN"]="hin" ["it_IT"]="ita" ["ja_XX"]="jpn" ["ko_KR"]="kor" ["kk_KZ"]="kaz" ["nl_XX"]="nld" ["ru_RU"]="rus" ["tr_TR"]="tur" ["vi_VN"]="vie" ["zh_CN"]="cmn")
for lang in ar_AR de_DE es_XX et_EE fi_FI fr_XX hi_IN it_IT ja_XX kk_KZ ko_KR nl_XX ru_RU tr_TR vi_VN zh_CN; do
lang_tatoeba=${lang_tatoeba_map[$lang]}
echo $lang_tatoeba
datadir=$DATA/${lang}-en_XX-tatoeba
rm -rf $datadir
mkdir -p $datadir
TEST_PREFIX=LASER/data/tatoeba/v1/tatoeba
python $SPM_ENCODE \
--model ${SPM_MODEL} \
--output_format=piece \
--inputs ${TEST_PREFIX}.${lang_tatoeba}-eng.${lang_tatoeba} ${TEST_PREFIX}.${lang_tatoeba}-eng.eng \
--outputs $datadir/test.bpe.${lang}-en_XX.${lang} $datadir/test.bpe.${lang}-en_XX.en_XX
# binarize data
fairseq-preprocess \
--source-lang ${lang} --target-lang en_XX \
--testpref $datadir/test.bpe.${lang}-en_XX \
--destdir $datadir \
--srcdict ${DICT} \
--joined-dictionary \
--workers 4
done
|
COCO-LM/fairseq/examples/criss/download_and_preprocess_tatoeba.sh/0
|
{
"file_path": "COCO-LM/fairseq/examples/criss/download_and_preprocess_tatoeba.sh",
"repo_id": "COCO-LM",
"token_count": 758
}
| 160 |
#!/bin/bash
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
echo 'Cloning Moses github repository (for tokenization scripts)...'
git clone https://github.com/moses-smt/mosesdecoder.git
SCRIPTS=mosesdecoder/scripts
TOKENIZER=$SCRIPTS/tokenizer/tokenizer.perl
CLEAN=$SCRIPTS/training/clean-corpus-n.perl
REM_NON_PRINT_CHAR=$SCRIPTS/tokenizer/remove-non-printing-char.perl
URLS=(
"http://statmt.org/wmt13/training-parallel-europarl-v7.tgz"
"http://statmt.org/wmt13/training-parallel-commoncrawl.tgz"
"http://data.statmt.org/wmt18/translation-task/training-parallel-nc-v13.tgz"
"http://data.statmt.org/wmt18/translation-task/rapid2016.tgz"
"http://data.statmt.org/wmt17/translation-task/dev.tgz"
"http://statmt.org/wmt14/test-full.tgz"
)
CORPORA=(
"training/europarl-v7.de-en"
"commoncrawl.de-en"
"training-parallel-nc-v13/news-commentary-v13.de-en"
"rapid2016.de-en"
)
if [ ! -d "$SCRIPTS" ]; then
echo "Please set SCRIPTS variable correctly to point to Moses scripts."
exit
fi
src=en
tgt=de
lang=en-de
prep=wmt18_en_de
tmp=$prep/tmp
orig=orig
dev=dev/newstest2012
codes=32000
bpe=bpe.32k
mkdir -p $orig $tmp $prep $bpe
cd $orig
for ((i=0;i<${#URLS[@]};++i)); do
url=${URLS[i]}
file=$(basename $url)
if [ -f $file ]; then
echo "$file already exists, skipping download"
else
wget "$url"
if [ -f $file ]; then
echo "$url successfully downloaded."
else
echo "$url not successfully downloaded."
exit 1
fi
if [ ${file: -4} == ".tgz" ]; then
tar zxvf $file
elif [ ${file: -4} == ".tar" ]; then
tar xvf $file
fi
fi
done
cd ..
echo "pre-processing train data..."
for l in $src $tgt; do
rm -rf $tmp/train.tags.$lang.tok.$l
for f in "${CORPORA[@]}"; do
cat $orig/$f.$l | \
perl $REM_NON_PRINT_CHAR | \
perl $TOKENIZER -threads 8 -l $l -no-escape >> $tmp/train.tags.$lang.tok.$l
done
done
echo "pre-processing test data..."
for l in $src $tgt; do
if [ "$l" == "$src" ]; then
t="src"
else
t="ref"
fi
grep '<seg id' $orig/test-full/newstest2014-deen-$t.$l.sgm | \
sed -e 's/<seg id="[0-9]*">\s*//g' | \
sed -e 's/\s*<\/seg>\s*//g' | \
sed -e "s/\’/\'/g" | \
perl $TOKENIZER -threads 8 -l $l -no-escape > $tmp/test.$l
echo ""
done
# apply length filtering before BPE
perl $CLEAN -ratio 1.5 $tmp/train.tags.$lang.tok $src $tgt $tmp/train 1 100
# use newstest2012 for valid
echo "pre-processing valid data..."
for l in $src $tgt; do
rm -rf $tmp/valid.$l
cat $orig/$dev.$l | \
perl $REM_NON_PRINT_CHAR | \
perl $TOKENIZER -threads 8 -l $l -no-escape >> $tmp/valid.$l
done
mkdir output
mv $tmp/{train,valid,test}.{$src,$tgt} output
#BPE
git clone https://github.com/glample/fastBPE.git
pushd fastBPE
g++ -std=c++11 -pthread -O3 fastBPE/main.cc -IfastBPE -o fast
popd
fastBPE/fast learnbpe $codes output/train.$src output/train.$tgt > $bpe/codes
for split in {train,valid,test}; do for lang in {en,de}; do fastBPE/fast applybpe $bpe/$split.$lang output/$split.$lang $bpe/codes; done; done
|
COCO-LM/fairseq/examples/joint_alignment_translation/prepare-wmt18en2de_no_norm_no_escape_no_agressive.sh/0
|
{
"file_path": "COCO-LM/fairseq/examples/joint_alignment_translation/prepare-wmt18en2de_no_norm_no_escape_no_agressive.sh",
"repo_id": "COCO-LM",
"token_count": 1526
}
| 161 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.models import register_model, register_model_architecture
from fairseq.models.multilingual_transformer import MultilingualTransformerModel
from fairseq.models.transformer import (
TransformerDecoder,
TransformerEncoder,
base_architecture,
)
from .latent_transformer import LatentTransformerDecoder, LatentTransformerEncoder
@register_model("latent_multilingual_transformer")
class LatentMultilingualTransformerModel(MultilingualTransformerModel):
"""A variant of standard multilingual Transformer models which encoder and/or
decoders supports latent depth, as is in "Deep Transformer with Latent Depth"
(https://arxiv.org/abs/2009.13102).
"""
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
MultilingualTransformerModel.add_args(parser)
parser.add_argument(
'--soft-select',
action='store_true',
help='use soft samples in training an inference',
)
parser.add_argument(
'--sampling-tau',
type=float,
default=5.,
help='sampling temperature',
)
@classmethod
def _get_module_class(cls, is_encoder, args, lang_dict, embed_tokens, langs):
if is_encoder:
if hasattr(args, "encoder_latent_layer") and args.encoder_latent_layer:
return LatentTransformerEncoder(
args, lang_dict, embed_tokens, num_logits=len(langs)
)
else:
return TransformerEncoder(args, lang_dict, embed_tokens)
else:
if hasattr(args, "decoder_latent_layer") and args.decoder_latent_layer:
return LatentTransformerDecoder(
args, lang_dict, embed_tokens, num_logits=len(langs)
)
else:
return TransformerDecoder(args, lang_dict, embed_tokens)
@register_model_architecture(
"latent_multilingual_transformer", "latent_multilingual_transformer"
)
def latent_multilingual_architecture(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.decoder_layers = getattr(args, "decoder_layers", 24)
args.share_encoders = getattr(args, "share_encoders", True)
args.share_decoders = getattr(args, "share_decoders", True)
args.share_encoder_embeddings = getattr(args, "share_encoder_embeddings", True)
args.share_decoder_embeddings = getattr(args, "share_decoder_embeddings", True)
base_architecture(args)
|
COCO-LM/fairseq/examples/latent_depth/latent_depth_src/models/latent_multilingual_transformer.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/latent_depth/latent_depth_src/models/latent_multilingual_transformer.py",
"repo_id": "COCO-LM",
"token_count": 1294
}
| 162 |
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--src', type=str, help='Source language')
parser.add_argument('--tgt', type=str, help='Target language')
parser.add_argument('--src-file', type=str, help='Input source file')
parser.add_argument('--tgt-file', type=str, help='Input target file')
parser.add_argument('--src-output-file', type=str, help='Output source file')
parser.add_argument('--tgt-output-file', type=str, help='Output target file')
parser.add_argument('--threshold', type=float, default=0.5, help='Threshold')
parser.add_argument('--threshold-character', type=str, default=']', help='Threshold character')
parser.add_argument('--histograms', type=str, help='Path to histograms')
args = parser.parse_args()
def read_hist(f):
ch = []
for line in f:
c = line[0]
if c == args.threshold_character:
break
ch.append(c)
return ch
with(open("{}/{}".format(args.histograms, args.src), 'r', encoding='utf8')) as f:
ch1 = read_hist(f)
with(open("{}/{}".format(args.histograms, args.tgt), 'r', encoding='utf8')) as f:
ch2 = read_hist(f)
print("Accepted characters for {}: {}".format(args.src, ch1))
print("Accepted characters for {}: {}".format(args.tgt, ch2))
with open(args.src_file, 'r', encoding='utf8') as fs1, open(args.tgt_file, 'r', encoding='utf8') as fs2, open(args.src_output_file, 'w', encoding='utf8') as fos1, open(args.tgt_output_file, 'w', encoding='utf8') as fos2:
ls1 = fs1.readline()
ls2 = fs2.readline()
while ls1 or ls2:
cnt1 = len([c for c in ls1.strip() if c in ch1])
cnt2 = len([c for c in ls2.strip() if c in ch2])
if cnt1 / len(ls1) > args.threshold and cnt2 / len(ls2) > args.threshold:
fos1.write(ls1)
fos2.write(ls2)
else:
print("{} {} {} \n{} {} {}".format(args.src, cnt1 / len(ls1), ls1.strip(), args.tgt, cnt2 / len(ls2), ls2.strip()))
ls1 = fs1.readline()
ls2 = fs2.readline()
|
COCO-LM/fairseq/examples/m2m_100/process_data/clean_histogram.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/m2m_100/process_data/clean_histogram.py",
"repo_id": "COCO-LM",
"token_count": 846
}
| 163 |
# Multilingual Translation
[[Multilingual Translation with Extensible Multilingual Pretraining and Finetuning, https://arxiv.org/abs/2008.00401]](https://arxiv.org/abs/2008.00401)
## Introduction
This work is for training multilingual translation models with multiple bitext datasets. This multilingual translation framework supports (see [[training section]](#Training) and [[finetuning section]](#Finetuning) for examples)
* temperature based sampling over unbalancing datasets of different translation directions
- --sampling-method' with
choices=['uniform', 'temperature', 'concat']
- --sampling-temperature
* configurable to automatically add source and/or target language tokens to source/target sentences using data which are prepared in the same way as bilignual training
- --encoder-langtok with choices=['src', 'tgt', None] to specify whether to add source or target language tokens to the source sentences
- --decoder-langtok (binary option) to specify whether to add target language tokens to the target sentences or not
* finetuning mBART pretrained models for multilingual translation
- --finetune-from-model to specify the path from which to load the pretrained model
## Preprocessing data
Multilingual training requires a joint BPE vocab. Please follow [mBART's preprocessing steps](https://github.com/pytorch/fairseq/tree/master/examples/mbart#bpe-data) to reuse our pretrained sentence-piece model.
You can also train a joint BPE model on your own dataset and then follow the steps in [[link]](https://github.com/pytorch/fairseq/tree/master/examples/translation#multilingual-translation).
## Training
```bash
lang_pairs=<language pairs to be trained, e.g. "en-cs,cs-en">
path_2_data=<set to data path>
lang_list=<a file which contains a list of languages separated by new lines>
fairseq-train $path_2_data \
--encoder-normalize-before --decoder-normalize-before \
--arch transformer --layernorm-embedding \
--task translation_multi_simple_epoch \
--sampling-method "temperature" \
--sampling-temperature 1.5 \
--encoder-langtok "src" \
--decoder-langtok \
--lang-dict "$lang_list" \
--lang-pairs "$lang_pairs" \
--criterion label_smoothed_cross_entropy --label-smoothing 0.2 \
--optimizer adam --adam-eps 1e-06 --adam-betas '(0.9, 0.98)' \
--lr-scheduler inverse_sqrt --lr 3e-05 --warmup-updates 2500 --max-update 40000 \
--dropout 0.3 --attention-dropout 0.1 --weight-decay 0.0 \
--max-tokens 1024 --update-freq 2 \
--save-interval 1 --save-interval-updates 5000 --keep-interval-updates 10 --no-epoch-checkpoints \
--seed 222 --log-format simple --log-interval 2
```
## Finetuning
We can also finetune multilingual models from a monolingual pretrained models, e.g. [mMBART](https://github.com/pytorch/fairseq/tree/master/examples/mbart).
```bash
lang_pairs=<language pairs to be trained, e.g. "en-cs,cs-en">
path_2_data=<set to data path>
lang_list=<a file which contains a list of languages separated by new lines>
pretrained_model=<path to the pretrained model, e.g. mbart or another trained multilingual model>
fairseq-train $path_2_data \
--finetune-from-model $pretrained_model \
--encoder-normalize-before --decoder-normalize-before \
--arch transformer --layernorm-embedding \
--task translation_multi_simple_epoch \
--sampling-method "temperature" \
--sampling-temperature 1.5 \
--encoder-langtok "src" \
--decoder-langtok \
--lang-dict "$lang_list" \
--lang-pairs "$lang_pairs" \
--criterion label_smoothed_cross_entropy --label-smoothing 0.2 \
--optimizer adam --adam-eps 1e-06 --adam-betas '(0.9, 0.98)' \
--lr-scheduler inverse_sqrt --lr 3e-05 --warmup-updates 2500 --max-update 40000 \
--dropout 0.3 --attention-dropout 0.1 --weight-decay 0.0 \
--max-tokens 1024 --update-freq 2 \
--save-interval 1 --save-interval-updates 5000 --keep-interval-updates 10 --no-epoch-checkpoints \
--seed 222 --log-format simple --log-interval 2
```
## Generate
The following command uses the multilingual task (translation_multi_simple_epoch) to generate translation from $source_lang to $target_lang on the test dataset. During generaton, the source language tokens are added to source sentences and the target language tokens are added as the starting token to decode target sentences. Options --lang-dict and --lang-pairs are needed to tell the generation process the ordered list of languages and translation directions that the trained model are awared of; they will need to be consistent with the training.
```bash
model=<multilingual model>
source_lang=<source language>
target_lang=<target language>
fairseq-generate $path_2_data \
--path $model \
--task translation_multi_simple_epoch \
--gen-subset test \
--source-lang $source_lang \
--target-lang $target_lang
--sacrebleu --remove-bpe 'sentencepiece'\
--batch-size 32 \
--encoder-langtok "src" \
--decoder-langtok \
--lang-dict "$lang_list" \
--lang-pairs "$lang_pairs" > ${source_lang}_${target_lang}.txt
```
Fairseq will generate translation into a file {source_lang}_${target_lang}.txt with sacreblue at the end.
You can also use costomized tokenizer to compare the performance with the literature. For example, you get a tokenizer [here](https://github.com/rsennrich/wmt16-scripts) and do the following:
```bash
TOKENIZER=<path to a customized tokenizer for decoding evaluation>
TOK_CMD=<"$TOKENIZER $target_lang" or cat for sacrebleu>
cat {source_lang}_${target_lang}.txt | grep -P "^H" |sort -V |cut -f 3- |$TOK_CMD > ${source_lang}_${target_lang}.hyp
cat {source_lang}_${target_lang}.txt | grep -P "^T" |sort -V |cut -f 2- |$TOK_CMD > ${source_lang}_${target_lang}.ref
sacrebleu -tok 'none' -s 'none' ${source_lang}_${target_lang}.ref < ${source_lang}_${target_lang}.hyp
```
# mBART50 models
* [mMBART 50 pretrained model](https://dl.fbaipublicfiles.com/fairseq/models/mbart50/mbart50.pretrained.tar.gz).
* [mMBART 50 finetuned many-to-one](https://dl.fbaipublicfiles.com/fairseq/models/mbart50/mbart50.ft.n1.tar.gz).
* [mMBART 50 finetuned one-to-many](https://dl.fbaipublicfiles.com/fairseq/models/mbart50/mbart50.ft.1n.tar.gz).
* [mMBART 50 finetuned many-to-many](https://dl.fbaipublicfiles.com/fairseq/models/mbart50/mbart50.ft.nn.tar.gz).
Please download and extract from the above tarballs. Each tarball contains
* The fairseq model checkpoint: model.pt
* The list of supported languages: ML50_langs.txt
* Sentence piece model: sentence.bpe.model
* Fairseq dictionary of each language: dict.{lang}.txt (please replace lang with a language specified in ML50_langs.txt)
To use the trained models,
* use the tool [binarize.py](./data_scripts/binarize.py) to binarize your data using sentence.bpe.model and dict.{lang}.txt, and copy the dictionaries to your data path
* then run the generation command:
```bash
path_2_data=<path to your binarized data with fairseq dictionaries>
model=<path_to_extracted_folder>/model.pt
lang_list=<path_to_extracted_folder>/ML50_langs.txt
source_lang=<source language>
target_lang=<target language>
fairseq-generate $path_2_data \
--path $model \
--task translation_multi_simple_epoch \
--gen-subset test \
--source-lang $source_lang \
--target-lang $target_lang
--sacrebleu --remove-bpe 'sentencepiece'\
--batch-size 32 \
--encoder-langtok "src" \
--decoder-langtok \
--lang-dict "$lang_list"
```
## Citation
```bibtex
@article{tang2020multilingual,
title={Multilingual Translation with Extensible Multilingual Pretraining and Finetuning},
author={Yuqing Tang and Chau Tran and Xian Li and Peng-Jen Chen and Naman Goyal and Vishrav Chaudhary and Jiatao Gu and Angela Fan},
year={2020},
eprint={2008.00401},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
```
|
COCO-LM/fairseq/examples/multilingual/README.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/multilingual/README.md",
"repo_id": "COCO-LM",
"token_count": 2561
}
| 164 |
#!/bin/bash
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
if [ -z $WORKDIR_ROOT ] ;
then
echo "please specify your working directory root in environment variable WORKDIR_ROOT. Exitting..."
exit
fi
set -x -e
# TODO update the workdir and dest dir name
# put fasttext model
WORKDIR=$WORKDIR_ROOT
# put intermediate files
TMP_DIR=$WORKDIR_ROOT/tmp/tmp_wmt20_lowres_download
# output {train,valid,test} files to dest
DEST=$WORKDIR_ROOT/ML50/raw
UTILS=$PWD/utils
# per dataset locations
COMMONCRAWL_DIR=$TMP_DIR/commoncrawl
YANDEX_CORPUS=$WORKDIR_ROOT/wmt20/official/ru/yandex/1mcorpus.zip
# unzipped
CZENG_CORPUS=$WORKDIR_ROOT/wmt20/official/cs/czeng/czeng20-train
CCMT_DIR=$WORKDIR_ROOT/wmt20/official/zh/ccmt/parallel
download_and_select() {
SUBFOLDER=$1
URL=$2
UNCOMPRESS_CMD=$3
LANG=$4
INPUT_FILEPATH=$5
if [[ $# -gt 5 ]]; then
LANG_COL=$6
EN_COL=$7
fi
mkdir -p $SUBFOLDER
cd $SUBFOLDER
wget -nc --content-disposition $URL
$UNCOMPRESS_CMD
if [[ $# -gt 5 ]]; then
cut -f$LANG_COL $INPUT_FILEPATH > $INPUT_FILEPATH.$LANG
cut -f$EN_COL $INPUT_FILEPATH > $INPUT_FILEPATH.en
fi
cd ..
ln -sf $SUBFOLDER/$INPUT_FILEPATH.$LANG $SUBFOLDER.$LANG
ln -sf $SUBFOLDER/$INPUT_FILEPATH.en $SUBFOLDER.en
}
prepare_lid() {
pip install fasttext
# TODO specify global workdir
MODEL=$WORKDIR/fasttext/lid.176.bin
LID_MULTI=$UTILS/fasttext_multi_filter.py
if [ ! -f "$MODEL" ]; then
echo "downloading fasttext lid model..."
mkdir -p $WORKDIR/fasttext
wget -nc https://dl.fbaipublicfiles.com/fasttext/supervised-models/lid.176.bin -O $MODEL
fi
}
prepare_moses() {
pushd $UTILS
echo 'Cloning Moses github repository (for tokenization scripts)...'
git clone https://github.com/moses-smt/mosesdecoder.git
popd
}
lid_filter() {
# TODO specify global workdir
MODEL=$WORKDIR/fasttext/lid.176.bin
LID_MULTI=$UTILS/fasttext_multi_filter.py
prepare_lid
SRC=$1
SRC_FILE=$2
SRC_OUTPUT=$3
TGT=$4
TGT_FILE=$5
TGT_OUTPUT=$6
python $LID_MULTI --model $MODEL --inputs $SRC_FILE $TGT_FILE --langs $SRC $TGT --outputs $SRC_OUTPUT $TGT_OUTPUT
}
prepare_ja_ted() {
mkdir -p ted
cd ted
wget -nc https://wit3.fbk.eu/archive/2017-01-trnted//texts/en/ja/en-ja.tgz
tar -zxvf en-ja.tgz
cat en-ja/train.tags.en-ja.en | grep -v -P "^[ ]*\<" | sed 's/^[ \t]*//g' | sed 's/[ \t]*$//g' > en-ja/train.en-ja.en
cat en-ja/train.tags.en-ja.ja | grep -v -P "^[ ]*\<" | sed 's/^[ \t]*//g' | sed 's/[ \t]*$//g' > en-ja/train.en-ja.ja
cd ..
ln -sf ted/en-ja/train.en-ja.ja ted.ja
ln -sf ted/en-ja/train.en-ja.en ted.en
}
prepare_ja() {
OUTPUT_DIR=$TMP_DIR/ja
mkdir -p $OUTPUT_DIR
cd $OUTPUT_DIR
download_and_select paracrawl "http://www.kecl.ntt.co.jp/icl/lirg/jparacrawl/release/2.0/bitext/en-ja.tar.gz" "tar -zxvf en-ja.tar.gz" ja en-ja/en-ja.bicleaner05.txt 4 3 &
download_and_select newscommentary "http://data.statmt.org/news-commentary/v15/training/news-commentary-v15.en-ja.tsv.gz" "gunzip -f news-commentary-v15.en-ja.tsv.gz" ja news-commentary-v15.en-ja.tsv 2 1 &
download_and_select wikititles "http://data.statmt.org/wikititles/v2/wikititles-v2.ja-en.tsv.gz" "gunzip -f wikititles-v2.ja-en.tsv.gz" ja wikititles-v2.ja-en.tsv 1 2 &
download_and_select wikimatrix "http://data.statmt.org/wmt20/translation-task/WikiMatrix/WikiMatrix.v1.en-ja.langid.tsv.gz" "gunzip -f WikiMatrix.v1.en-ja.langid.tsv.gz" ja WikiMatrix.v1.en-ja.langid.tsv 3 2 &
download_and_select subtitle "https://nlp.stanford.edu/projects/jesc/data/split.tar.gz" "tar -zxvf split.tar.gz" ja split/train 2 1 &
download_and_select kftt "http://www.phontron.com/kftt/download/kftt-data-1.0.tar.gz" "tar -zxvf kftt-data-1.0.tar.gz" ja kftt-data-1.0/data/orig/kyoto-train &
prepare_ja_ted &
# ted data needs to
wait
# remove previous results
rm -f all.??
find ./ -maxdepth 1 -name "*.ja" | sort -V | xargs cat > all.ja
find ./ -maxdepth 1 -name "*.en" | sort -V | xargs cat > all.en
lid_filter ja all.ja $DEST/train.ja_XX-en_XX.ja_XX en all.en $DEST/train.ja_XX-en_XX.en_XX
}
prepare_ta() {
OUTPUT_DIR=$TMP_DIR/ta
mkdir -p $OUTPUT_DIR
cd $OUTPUT_DIR
download_and_select wikititles "http://data.statmt.org/wikititles/v2/wikititles-v2.ta-en.tsv.gz" "gunzip -f wikititles-v2.ta-en.tsv.gz" ta wikititles-v2.ta-en.tsv 1 2 &
download_and_select wikimatrix "http://data.statmt.org/wmt20/translation-task/WikiMatrix/WikiMatrix.v1.en-ta.langid.tsv.gz" "gunzip -f WikiMatrix.v1.en-ta.langid.tsv.gz" ta WikiMatrix.v1.en-ta.langid.tsv 3 2 &
download_and_select pmindia "http://data.statmt.org/pmindia/v1/parallel/pmindia.v1.ta-en.tsv" "" ta pmindia.v1.ta-en.tsv 2 1 &
download_and_select tanzil "https://object.pouta.csc.fi/OPUS-Tanzil/v1/moses/en-ta.txt.zip" "unzip en-ta.txt.zip" ta Tanzil.en-ta &
download_and_select pib "http://preon.iiit.ac.in/~jerin/resources/datasets/pib-v0.tar" "tar -xvf pib-v0.tar" ta pib/en-ta/train &
download_and_select mkb "http://preon.iiit.ac.in/~jerin/resources/datasets/mkb-v0.tar" "tar -xvf mkb-v0.tar" ta mkb/en-ta/mkb &
download_and_select ufal "http://ufal.mff.cuni.cz/~ramasamy/parallel/data/v2/en-ta-parallel-v2.tar.gz" "tar -zxvf en-ta-parallel-v2.tar.gz" ta en-ta-parallel-v2/corpus.bcn.train &
wait
# need special handling for nlpc
mkdir -p nlpc
cd nlpc
wget -nc https://raw.githubusercontent.com/nlpc-uom/English-Tamil-Parallel-Corpus/master/En-Ta%20Corpus/En-Ta%20English.txt
wget -nc https://github.com/nlpc-uom/English-Tamil-Parallel-Corpus/raw/master/En-Ta%20Corpus/En-Ta%20Tamil.txt
tail -n +4 "En-Ta English.txt" > en-ta.en
tail -n +4 "En-Ta Tamil.txt" > en-ta.ta
cd ..
ln -sf nlpc/en-ta.en nlpc.en
ln -sf nlpc/en-ta.ta nlpc.ta
# remove previous results
rm -f all.??
find ./ -maxdepth 1 -name "*.ta" | sort -V | xargs cat > all.ta
find ./ -maxdepth 1 -name "*.en" | sort -V | xargs cat > all.en
lid_filter ta all.ta $DEST/train.ta_IN-en_XX.ta_IN en all.en $DEST/train.ta_IN-en_XX.en_XX
}
prepare_iu() {
OUTPUT_DIR=$TMP_DIR/iu
mkdir -p $OUTPUT_DIR
cd $OUTPUT_DIR
download_and_select nh "https://nrc-digital-repository.canada.ca/eng/view/dataset/?id=c7e34fa7-7629-43c2-bd6d-19b32bf64f60" "tar -zxvf Nunavut-Hansard-Inuktitut-English-Parallel-Corpus-3.0.1.tgz" iu Nunavut-Hansard-Inuktitut-English-Parallel-Corpus-3.0/NunavutHansard > /dev/null &
download_and_select wikititles "http://data.statmt.org/wikititles/v2/wikititles-v2.iu-en.tsv.gz" "gunzip -f wikititles-v2.iu-en.tsv.gz" iu wikititles-v2.iu-en.tsv 1 2 &
wait
# remove previous results
rm -f all.??
find ./ -maxdepth 1 -name "*.iu" | sort -V | xargs cat | nh/Nunavut-Hansard-Inuktitut-English-Parallel-Corpus-3.0/scripts/normalize-iu-spelling.pl > all.iu
find ./ -maxdepth 1 -name "*.en" | sort -V | xargs cat > all.en
paste all.iu all.en | awk -F $'\t' '$1!=""&&$2!=""' > all.iuen
cut -f1 all.iuen > $DEST/train.iu_CA-en_XX.iu_CA
cut -f2 all.iuen > $DEST/train.iu_CA-en_XX.en_XX
}
prepare_km() {
OUTPUT_DIR=$TMP_DIR/km
mkdir -p $OUTPUT_DIR
cd $OUTPUT_DIR
download_and_select paracrawl "http://data.statmt.org/wmt20/translation-task/ps-km/wmt20-sent.en-km.xz" "unxz wmt20-sent.en-km.zx" km wmt20-sent.en-km 2 1 &
# km-parallel has multiple sets, concat all of them together
mkdir -p opus
cd opus
wget -nc "http://data.statmt.org/wmt20/translation-task/ps-km/km-parallel.tgz"
tar -zxvf km-parallel.tgz
find ./km-parallel -maxdepth 1 -name "*.km" | sort -V | xargs cat > opus.km
find ./km-parallel -maxdepth 1 -name "*.en" | sort -V | xargs cat > opus.en
cd ..
ln -sf opus/opus.km .
ln -sf opus/opus.en .
wait
# remove previous results
rm -f all.??
find ./ -maxdepth 1 -name "*.km" | sort -V | xargs cat > all.km
find ./ -maxdepth 1 -name "*.en" | sort -V | xargs cat > all.en
lid_filter km all.km $DEST/train.km_KH-en_XX.km_KH en all.en $DEST/train.km_KH-en_XX.en_XX
}
prepare_ps() {
OUTPUT_DIR=$TMP_DIR/ps
mkdir -p $OUTPUT_DIR
cd $OUTPUT_DIR
download_and_select paracrawl "http://data.statmt.org/wmt20/translation-task/ps-km/wmt20-sent.en-ps.xz" "unxz wmt20-sent.en-ps.xz" ps wmt20-sent.en-ps 2 1 &
download_and_select wikititles "http://data.statmt.org/wikititles/v2/wikititles-v2.ps-en.tsv.gz" "gunzip -f wikititles-v2.ps-en.tsv.gz" ps wikititles-v2.ps-en.tsv 1 2 &
# ps-parallel has multiple sets, concat all of them together
mkdir -p opus
cd opus
wget -nc "http://data.statmt.org/wmt20/translation-task/ps-km/ps-parallel.tgz"
tar -zxvf ps-parallel.tgz
find ./ps-parallel -maxdepth 1 -name "*.ps" | sort -V | xargs cat > opus.ps
find ./ps-parallel -maxdepth 1 -name "*.en" | sort -V | xargs cat > opus.en
cd ..
ln -sf opus/opus.ps opus.ps
ln -sf opus/opus.en opus.en
wait
# remove previous results
rm -f all.??
find ./ -maxdepth 1 -name "*.ps" | sort -V | xargs cat > all.ps
find ./ -maxdepth 1 -name "*.en" | sort -V | xargs cat > all.en
lid_filter ps all.ps $DEST/train.ps_AF-en_XX.ps_AF en all.en $DEST/train.ps_AF-en_XX.en_XX
}
download_commoncrawl() {
mkdir -p $COMMONCRAWL_DIR
cd $COMMONCRAWL_DIR
wget -nc "http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz"
tar -zxvf training-parallel-commoncrawl.tgz
}
link_commoncrawl() {
LANG=$1
ln -sf $COMMONCRAWL_DIR/commoncrawl.$LANG-en.en commoncrawl.en
ln -sf $COMMONCRAWL_DIR/commoncrawl.$LANG-en.$LANG commoncrawl.$LANG
}
strip_xlf() {
INPUT_FILE=$1
SRC=$2
TGT=$3
grep '<source xml:lang=' $INPUT_FILE | sed 's/^<[^<>]*>//g' | sed 's/<[^<>]*>$//g' > $INPUT_FILE.$SRC
grep '<target xml:lang=' $INPUT_FILE | sed 's/^<[^<>]*>//g' | sed 's/<[^<>]*>$//g' > $INPUT_FILE.$TGT
}
download_and_process_tilde() {
URL=$1
UNCOMPRESS_CMD=$2
FILENAME=$3
LANG=$4
PROCESS_CMD=$5
mkdir -p tilde
cd tilde
wget -nc $URL
$UNCOMPRESS_CMD
echo "executing cmd"
echo $PROCESS_CMD
$PROCESS_CMD
cd ..
ln -sf tilde/$FILENAME.$LANG tilde.$LANG
ln -sf tilde/$FILENAME.en tilde.en
}
prepare_cs() {
OUTPUT_DIR=$TMP_DIR/cs
mkdir -p $OUTPUT_DIR
cd $OUTPUT_DIR
#download_and_select europarl "http://www.statmt.org/europarl/v10/training/europarl-v10.cs-en.tsv.gz" "gunzip europarl-v10.cs-en.tsv.gz" cs europarl-v10.cs-en.tsv 1 2 &
#download_and_select paracrawl "https://s3.amazonaws.com/web-language-models/paracrawl/release5.1/en-cs.txt.gz" "gunzip en-cs.txt.gz" cs en-cs.txt 2 1 &
#link_commoncrawl cs
#download_and_select newscommentary "http://data.statmt.org/news-commentary/v15/training/news-commentary-v15.cs-en.tsv.gz" "gunzip news-commentary-v15.cs-en.tsv.gz" cs news-commentary-v15.cs-en.tsv 1 2 &
#download_and_select wikititles "http://data.statmt.org/wikititles/v2/wikititles-v2.cs-en.tsv.gz" "gunzip wikititles-v2.cs-en.tsv.gz" cs wikititles-v2.cs-en.tsv 1 2 &
#download_and_process_tilde "http://data.statmt.org/wmt20/translation-task/rapid/RAPID_2019.cs-en.xlf.gz" "gunzip RAPID_2019.cs-en.xlf.gz" RAPID_2019.cs-en.xlf cs "strip_xlf RAPID_2019.cs-en.xlf cs en" &
#download_and_select wikimatrix "http://data.statmt.org/wmt20/translation-task/WikiMatrix/WikiMatrix.v1.cs-en.langid.tsv.gz" "gunzip WikiMatrix.v1.cs-en.langid.tsv.gz" cs WikiMatrix.v1.cs-en.langid.tsv 2 3 &
#wait
# remove previous results
#rm -f all.??
#find ./ -maxdepth 1 -name "*.cs" | sort -V | xargs cat > all.cs
#find ./ -maxdepth 1 -name "*.en" | sort -V | xargs cat > all.en
if [ -z $CZENG_CORPUS ] ;
then
echo "Please download CZENG_CORPUS manually and place them at $CZENG_CORPUS. Exitting..."
exit
fi
cat $CZENG_CORPUS | sed '/^$/d' | cut -f5 > all.cs
cat $CZENG_CORPUS | sed '/^$/d' | cut -f6 > all.en
lid_filter cs all.cs $DEST/train.cs_CZ-en_XX.cs_CZ en all.en $DEST/train.cs_CZ-en_XX.en_XX
}
prepare_de() {
OUTPUT_DIR=$TMP_DIR/de
mkdir -p $OUTPUT_DIR
cd $OUTPUT_DIR
download_and_select europarl "http://www.statmt.org/europarl/v10/training/europarl-v10.de-en.tsv.gz" "gunzip europarl-v10.de-en.tsv.gz" de europarl-v10.de-en.tsv 1 2 &
download_and_select paracrawl "https://s3.amazonaws.com/web-language-models/paracrawl/release5.1/en-de.txt.gz" "gunzip en-de.txt.gz" de en-de.txt 2 1 &
link_commoncrawl de
download_and_select newscommentary "http://data.statmt.org/news-commentary/v15/training/news-commentary-v15.de-en.tsv.gz" "gunzip news-commentary-v15.de-en.tsv.gz" de news-commentary-v15.de-en.tsv 1 2 &
download_and_select wikititles "http://data.statmt.org/wikititles/v2/wikititles-v2.de-en.tsv.gz" "gunzip wikititles-v2.de-en.tsv.gz" de wikititles-v2.de-en.tsv 1 2 &
download_and_process_tilde "http://data.statmt.org/wmt20/translation-task/rapid/RAPID_2019.de-en.xlf.gz" "gunzip RAPID_2019.de-en.xlf.gz" RAPID_2019.de-en.xlf de "strip_xlf RAPID_2019.de-en.xlf de en" &
download_and_select wikimatrix "http://data.statmt.org/wmt20/translation-task/WikiMatrix/WikiMatrix.v1.de-en.langid.tsv.gz" "gunzip WikiMatrix.v1.de-en.langid.tsv.gz" de WikiMatrix.v1.de-en.langid.tsv 2 3 &
wait
# remove previous results
rm -f all.??
find ./ -maxdepth 1 -name "*.de" | sort -V | xargs cat > all.de
find ./ -maxdepth 1 -name "*.en" | sort -V | xargs cat > all.en
lid_filter de all.de $DEST/train.de_DE-en_XX.de_DE en all.en $DEST/train.de_DE-en_XX.en_XX
}
prepare_tmx() {
TMX_FILE=$1
git clone https://github.com/amake/TMX2Corpus $UTILS/tmx2corpus
pip install tinysegmenter
python $UTILS/tmx2corpus/tmx2corpus.py $TMX_FILE
}
prepare_pl() {
OUTPUT_DIR=$TMP_DIR/pl
mkdir -p $OUTPUT_DIR
cd $OUTPUT_DIR
# download_and_select europarl "http://www.statmt.org/europarl/v10/training/europarl-v10.pl-en.tsv.gz" "gunzip europarl-v10.pl-en.tsv.gz" pl europarl-v10.pl-en.tsv 1 2 &
# download_and_select paracrawl "https://s3.amazonaws.com/web-language-models/paracrawl/release5.1/en-pl.txt.gz" "gunzip en-pl.txt.gz" pl en-pl.txt 2 1 &
# download_and_select wikititles "http://data.statmt.org/wikititles/v2/wikititles-v2.pl-en.tsv.gz" "gunzip wikititles-v2.pl-en.tsv.gz" pl wikititles-v2.pl-en.tsv 1 2 &
download_and_select tilde "https://tilde-model.s3-eu-west-1.amazonaws.com/rapid2019.en-pl.tmx.zip" "gunzip rapid2019.en-pl.tmx.zip" bitext pl "prepare_tmx RAPID_2019.UNIQUE.en-pl.tmx" &
# download_and_select wikimatrix "http://data.statmt.org/wmt20/translation-task/WikiMatrix/WikiMatrix.v1.en-pl.langid.tsv.gz" "gunzip WikiMatrix.v1.en-pl.langid.tsv.gz" pl WikiMatrix.v1.en-pl.langid.tsv 3 2 &
wait
# remove previous results
rm -f all.??
find ./ -maxdepth 1 -name "*.pl" | sort -V | xargs cat > all.pl
find ./ -maxdepth 1 -name "*.en" | sort -V | xargs cat > all.en
lid_filter pl all.pl $DEST/train.pl_PL-en_XX.pl_PL en all.en $DEST/train.pl_PL-en_XX.en_XX
}
prepare_uncorpus() {
$URLS=$1
$FILES=$2
mkdir -p uncorpus
cd uncorpus
for URL in $URLS; do
wget -nc $URL
done
cat $FILES > uncorpus.tar.gz
tar -zxvf uncorpus.tar.gz
cd ..
ln -sf uncorpus/en-$LANG/UNv1.0.en-$LANG.$LANG uncorpus.$LANG
ln -sf uncorpus/en-$LANG/UNv1.0.en-$LANG.en uncorpus.en
}
prepare_yandex() {
mkdir -p yandex
cd yandex
unzip $YANDEX_CORPUS ./
cd ..
ln -s yandex/corpus.en_ru.1m.en yandex.en
ln -s yandex/corpus.en_ru.1m.ru yandex.ru
}
prepare_ru() {
OUTPUT_DIR=$TMP_DIR/ru
mkdir -p $OUTPUT_DIR
cd $OUTPUT_DIR
download_and_select paracrawl "https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-ru.zipporah0-dedup-clean.tgz" "tar -zxvf paracrawl-release1.en-ru.zipporah0-dedup-clean.tgz" ru paracrawl-release1.en-ru.zipporah0-dedup-clean &
link_commoncrawl ru
download_and_select newscommentary "http://data.statmt.org/news-commentary/v15/training/news-commentary-v15.en-ru.tsv.gz" "gunzip news-commentary-v15.en-ru.tsv.gz" ru news-commentary-v15.en-ru.tsv 2 1 &
prepare_yandex &
download_and_select wikititles "http://data.statmt.org/wikititles/v2/wikititles-v2.ru-en.tsv.gz" "gunzip wikititles-v2.ru-en.tsv.gz" ru wikititles-v2.ru-en.tsv 1 2 &
prepare_uncorpus "https://stuncorpusprod.blob.core.windows.net/corpusfiles/UNv1.0.en-ru.tar.gz.00 https://stuncorpusprod.blob.core.windows.net/corpusfiles/UNv1.0.en-ru.tar.gz.01 https://stuncorpusprod.blob.core.windows.net/corpusfiles/UNv1.0.en-ru.tar.gz.02" "UNv1.0.en-ru.tar.gz.00 UNv1.0.en-ru.tar.gz.01 UNv1.0.en-ru.tar.gz.02" &
download_and_select wikimatrix "http://data.statmt.org/wmt20/translation-task/WikiMatrix/WikiMatrix.v1.en-ru.langid.tsv.gz" "gunzip WikiMatrix.v1.en-ru.langid.tsv.gz" ru WikiMatrix.v1.en-ru.langid.tsv 3 2 &
wait
# remove previous results
rm -f all.??
find ./ -maxdepth 1 -name "*.ru" | sort -V | xargs cat > all.ru
find ./ -maxdepth 1 -name "*.en" | sort -V | xargs cat > all.en
lid_filter ru all.ru $DEST/train.ru_RU-en_XX.ru_RU en all.en $DEST/train.ru_RU-en_XX.en_XX
}
prepare_ccmt() {
mkdir -p ccmt
cd ccmt
# assume ccmt data is already unzipped under CCMT_DIR folder
cat $CCMT_DIR/datum2017/Book*_cn.txt | sed 's/ //g' > datum2017.detok.zh
cat $CCMT_DIR/datum2017/Book*_en.txt > datum2017.detok.en
cat $CCMT_DIR/casict2011/casict-A_ch.txt $CCMT_DIR/casict2011/casict-B_ch.txt $CCMT_DIR/casict2015/casict2015_ch.txt $CCMT_DIR/datum2015/datum_ch.txt $CCMT_DIR/neu2017/NEU_cn.txt datum2017.detok.zh > ccmt.zh
cat $CCMT_DIR/casict2011/casict-A_en.txt $CCMT_DIR/casict2011/casict-B_en.txt $CCMT_DIR/casict2015/casict2015_en.txt $CCMT_DIR/datum2015/datum_en.txt $CCMT_DIR/neu2017/NEU_en.txt datum2017.detok.en > ccmt.en
cd ..
ln -sf ccmt/ccmt.zh ccmt.zh
ln -sf ccmt/ccmt.en ccmt.en
}
prepare_zh() {
OUTPUT_DIR=$TMP_DIR/zh
mkdir -p $OUTPUT_DIR
cd $OUTPUT_DIR
download_and_select newscommentary "http://data.statmt.org/news-commentary/v15/training/news-commentary-v15.en-zh.tsv.gz" "gunzip news-commentary-v15.en-zh.tsv.gz" zh news-commentary-v15.en-zh.tsv 2 1 &
download_and_select wikititles "http://data.statmt.org/wikititles/v2/wikititles-v2.zh-en.tsv.gz" "gunzip wikititles-v2.zh-en.tsv.gz" zh wikititles-v2.zh-en.tsv 1 2 &
prepare_uncorpus "https://stuncorpusprod.blob.core.windows.net/corpusfiles/UNv1.0.en-zh.tar.gz.00 https://stuncorpusprod.blob.core.windows.net/corpusfiles/UNv1.0.en-zh.tar.gz.01" "UNv1.0.en-zh.tar.gz.00 UNv1.0.en-zh.tar.gz.01" &
prepare_ccmt &
download_and_select wikimatrix "http://data.statmt.org/wmt20/translation-task/WikiMatrix/WikiMatrix.v1.en-zh.langid.tsv.gz" "gunzip WikiMatrix.v1.en-zh.langid.tsv.gz" zh WikiMatrix.v1.en-zh.langid.tsv 3 2 &
wait
# remove previous results
rm -f all.??
find ./ -maxdepth 1 -name "*.zh" | sort -V | xargs cat > all.zh
find ./ -maxdepth 1 -name "*.en" | sort -V | xargs cat > all.en
lid_filter zh all.zh $DEST/train.zh_CN-en_XX.zh_CN en all.en $DEST/train.zh_CN-en_XX.en_XX
}
prepare_tests() {
OUTPUT_DIR=$TMP_DIR
mkdir -p $OUTPUT_DIR
cd $OUTPUT_DIR
wget -nc http://data.statmt.org/wmt20/translation-task/dev.tgz
tar -zxvf dev.tgz
cd dev
cat newsdev2020-jaen-src.ja.sgm | $UTILS/strip_sgm.sh > newsdev2020-jaen.ja
cat newsdev2020-jaen-ref.en.sgm | $UTILS/strip_sgm.sh > newsdev2020-jaen.en
split newsdev2020-jaen.ja -a 0 -n r/1/2 > $DEST/valid.ja_XX-en_XX.ja_XX
split newsdev2020-jaen.en -a 0 -n r/1/2 > $DEST/valid.ja_XX-en_XX.en_XX
split newsdev2020-jaen.ja -a 0 -n r/2/2 > $DEST/test.ja_XX-en_XX.ja_XX
split newsdev2020-jaen.en -a 0 -n r/2/2 > $DEST/test.ja_XX-en_XX.en_XX
cat newsdev2020-iuen-src.iu.sgm | strip_sgm.sh > newsdev2020-iuen.iu
cat newsdev2020-iuen-ref.en.sgm | strip_sgm.sh > newsdev2020-iuen.en
split newsdev2020-iuen.iu -a 0 -n r/1/2 > $DEST/valid.iu_CA-en_XX.iu_CA
split newsdev2020-iuen.en -a 0 -n r/1/2 > $DEST/valid.iu_CA-en_XX.en_XX
split newsdev2020-iuen.iu -a 0 -n r/2/2 > $DEST/test.iu_CA-en_XX.iu_CA
split newsdev2020-iuen.en -a 0 -n r/2/2 > $DEST/test.iu_CA-en_XX.en_XX
cat newsdev2020-taen-src.ta.sgm | strip_sgm.sh > newsdev2020-taen.ta
cat newsdev2020-taen-ref.en.sgm | strip_sgm.sh > newsdev2020-taen.en
split newsdev2020-taen.ta -a 0 -n r/1/2 > $DEST/valid.ta_IN-en_XX.ta_IN
split newsdev2020-taen.en -a 0 -n r/1/2 > $DEST/valid.ta_IN-en_XX.en_XX
split newsdev2020-taen.ta -a 0 -n r/2/2 > $DEST/test.ta_IN-en_XX.ta_IN
split newsdev2020-taen.en -a 0 -n r/2/2 > $DEST/test.ta_IN-en_XX.en_XX
cp wikipedia.dev.km-en.km $DEST/valid.km_KH-en_XX.km_KH
cp wikipedia.dev.km-en.en $DEST/valid.km_KH-en_XX.en_XX
cp wikipedia.devtest.km-en.km $DEST/test.km_KH-en_XX.km_KH
cp wikipedia.devtest.km-en.en $DEST/test.km_KH-en_XX.en_XX
cp wikipedia.dev.ps-en.ps $DEST/valid.ps_AF-en_XX.ps_AF
cp wikipedia.dev.ps-en.en $DEST/valid.ps_AF-en_XX.en_XX
cp wikipedia.devtest.ps-en.ps $DEST/test.ps_AF-en_XX.ps_AF
cp wikipedia.devtest.ps-en.en $DEST/test.ps_AF-en_XX.en_XX
cat newsdev2020-plen-src.pl.sgm | strip_sgm.sh > newsdev2020-plen.pl
cat newsdev2020-plen-ref.en.sgm | strip_sgm.sh > newsdev2020-plen.en
split newsdev2020-plen.pl -a 0 -n r/1/2 > $DEST/valid.pl_PL-en_XX.pl_PL
split newsdev2020-plen.en -a 0 -n r/1/2 > $DEST/valid.pl_PL-en_XX.en_XX
split newsdev2020-plen.pl -a 0 -n r/2/2 > $DEST/test.pl_PL-en_XX.pl_PL
split newsdev2020-plen.en -a 0 -n r/2/2 > $DEST/test.pl_PL-en_XX.en_XX
cat newstest2018-encs-src.en.sgm | strip_sgm.sh > $DEST/valid.en_XX-cs_CZ.en_XX
cat newstest2018-encs-ref.cs.sgm | strip_sgm.sh > $DEST/valid.en_XX-cs_CZ.cs_CZ
cat newstest2019-encs-src.en.sgm | strip_sgm.sh > $DEST/test.en_XX-cs_CZ.en_XX
cat newstest2019-encs-ref.cs.sgm | strip_sgm.sh > $DEST/test.en_XX-cs_CZ.cs_CZ
cat newstest2018-deen-src.de.sgm | strip_sgm.sh > $DEST/valid.de_DE-en_XX.de_DE
cat newstest2018-deen-ref.en.sgm | strip_sgm.sh > $DEST/valid.de_DE-en_XX.en_XX
cat newstest2018-ende-src.en.sgm | strip_sgm.sh > $DEST/valid.en_XX-de_DE.en_XX
cat newstest2018-ende-ref.de.sgm | strip_sgm.sh > $DEST/valid.en_XX-de_DE.de_DE
cat newstest2019-deen-src.de.sgm | strip_sgm.sh > $DEST/test.de_DE-en_XX.de_DE
cat newstest2019-deen-ref.en.sgm | strip_sgm.sh > $DEST/test.de_DE-en_XX.en_XX
cat newstest2019-ende-src.en.sgm | strip_sgm.sh > $DEST/test.en_XX-de_DE.en_XX
cat newstest2019-ende-ref.de.sgm | strip_sgm.sh > $DEST/test.en_XX-de_DE.de_DE
cat newstest2018-ruen-src.ru.sgm | strip_sgm.sh > $DEST/valid.ru_RU-en_XX.ru_RU
cat newstest2018-ruen-ref.en.sgm | strip_sgm.sh > $DEST/valid.ru_RU-en_XX.en_XX
cat newstest2018-enru-src.en.sgm | strip_sgm.sh > $DEST/valid.en_XX-ru_RU.en_XX
cat newstest2018-enru-ref.ru.sgm | strip_sgm.sh > $DEST/valid.en_XX-ru_RU.ru_RU
cat newstest2019-ruen-src.ru.sgm | strip_sgm.sh > $DEST/test.ru_RU-en_XX.ru_RU
cat newstest2019-ruen-ref.en.sgm | strip_sgm.sh > $DEST/test.ru_RU-en_XX.en_XX
cat newstest2019-enru-src.en.sgm | strip_sgm.sh > $DEST/test.en_XX-ru_RU.en_XX
cat newstest2019-enru-ref.ru.sgm | strip_sgm.sh > $DEST/test.en_XX-ru_RU.ru_RU
cat newstest2018-zhen-src.zh.sgm | strip_sgm.sh > $DEST/valid.zh_CN-en_XX.zh_CN
cat newstest2018-zhen-ref.en.sgm | strip_sgm.sh > $DEST/valid.zh_CN-en_XX.en_XX
cat newstest2018-enzh-src.en.sgm | strip_sgm.sh > $DEST/valid.en_XX-zh_CN.en_XX
cat newstest2018-enzh-ref.zh.sgm | strip_sgm.sh > $DEST/valid.en_XX-zh_CN.zh_CN
cat newstest2019-zhen-src.zh.sgm | strip_sgm.sh > $DEST/test.zh_CN-en_XX.zh_CN
cat newstest2019-zhen-ref.en.sgm | strip_sgm.sh > $DEST/test.zh_CN-en_XX.en_XX
cat newstest2019-enzh-src.en.sgm | strip_sgm.sh > $DEST/test.en_XX-zh_CN.en_XX
cat newstest2019-enzh-ref.zh.sgm | strip_sgm.sh > $DEST/test.en_XX-zh_CN.zh_CN
}
mkdir -p $DEST
prepare_lid
prepare_moses
download_commoncrawl
prepare_ja &
prepare_ta &
prepare_km &
prepare_ps &
prepare_iu &
prepare_cs &
prepare_de &
prepare_pl &
prepare_ru &
prepare_zh &
# prepare valid/test set
prepare_tests &
# wait
# TODO remove intermediate files
# rm -rf $TMP_DIR
|
COCO-LM/fairseq/examples/multilingual/data_scripts/download_wmt20.sh/0
|
{
"file_path": "COCO-LM/fairseq/examples/multilingual/data_scripts/download_wmt20.sh",
"repo_id": "COCO-LM",
"token_count": 11121
}
| 165 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from fairseq import options
from examples.noisychannel import rerank_options, rerank_utils
def score_lm(args):
using_nbest = args.nbest_list is not None
(
pre_gen,
left_to_right_preprocessed_dir,
right_to_left_preprocessed_dir,
backwards_preprocessed_dir,
lm_preprocessed_dir,
) = rerank_utils.get_directories(
args.data_dir_name,
args.num_rescore,
args.gen_subset,
args.gen_model_name,
args.shard_id,
args.num_shards,
args.sampling,
args.prefix_len,
args.target_prefix_frac,
args.source_prefix_frac,
)
predictions_bpe_file = pre_gen + "/generate_output_bpe.txt"
if using_nbest:
print("Using predefined n-best list from interactive.py")
predictions_bpe_file = args.nbest_list
gen_output = rerank_utils.BitextOutputFromGen(
predictions_bpe_file, bpe_symbol=args.post_process, nbest=using_nbest
)
if args.language_model is not None:
lm_score_file = rerank_utils.rescore_file_name(
pre_gen, args.prefix_len, args.lm_name, lm_file=True
)
if args.language_model is not None and not os.path.isfile(lm_score_file):
print("STEP 4.5: language modeling for P(T)")
if args.lm_bpe_code is None:
bpe_status = "no bpe"
elif args.lm_bpe_code == "shared":
bpe_status = "shared"
else:
bpe_status = "different"
rerank_utils.lm_scoring(
lm_preprocessed_dir,
bpe_status,
gen_output,
pre_gen,
args.lm_dict,
args.lm_name,
args.language_model,
args.lm_bpe_code,
128,
lm_score_file,
args.target_lang,
args.source_lang,
prefix_len=args.prefix_len,
)
def cli_main():
parser = rerank_options.get_reranking_parser()
args = options.parse_args_and_arch(parser)
score_lm(args)
if __name__ == "__main__":
cli_main()
|
COCO-LM/fairseq/examples/noisychannel/rerank_score_lm.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/noisychannel/rerank_score_lm.py",
"repo_id": "COCO-LM",
"token_count": 1096
}
| 166 |
# Finetuning RoBERTa on a custom classification task
This example shows how to finetune RoBERTa on the IMDB dataset, but should illustrate the process for most classification tasks.
### 1) Get the data
```bash
wget http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
tar zxvf aclImdb_v1.tar.gz
```
### 2) Format data
`IMDB` data has one data-sample in each file, below python code-snippet converts it one file for train and valid each for ease of processing.
```python
import argparse
import os
import random
from glob import glob
random.seed(0)
def main(args):
for split in ['train', 'test']:
samples = []
for class_label in ['pos', 'neg']:
fnames = glob(os.path.join(args.datadir, split, class_label) + '/*.txt')
for fname in fnames:
with open(fname) as fin:
line = fin.readline()
samples.append((line, 1 if class_label == 'pos' else 0))
random.shuffle(samples)
out_fname = 'train' if split == 'train' else 'dev'
f1 = open(os.path.join(args.datadir, out_fname + '.input0'), 'w')
f2 = open(os.path.join(args.datadir, out_fname + '.label'), 'w')
for sample in samples:
f1.write(sample[0] + '\n')
f2.write(str(sample[1]) + '\n')
f1.close()
f2.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--datadir', default='aclImdb')
args = parser.parse_args()
main(args)
```
### 3) BPE encode
Run `multiprocessing_bpe_encoder`, you can also do this in previous step for each sample but that might be slower.
```bash
# Download encoder.json and vocab.bpe
wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json'
wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe'
for SPLIT in train dev; do
python -m examples.roberta.multiprocessing_bpe_encoder \
--encoder-json encoder.json \
--vocab-bpe vocab.bpe \
--inputs "aclImdb/$SPLIT.input0" \
--outputs "aclImdb/$SPLIT.input0.bpe" \
--workers 60 \
--keep-empty
done
```
### 4) Preprocess data
```bash
# Download fairseq dictionary.
wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/dict.txt'
fairseq-preprocess \
--only-source \
--trainpref "aclImdb/train.input0.bpe" \
--validpref "aclImdb/dev.input0.bpe" \
--destdir "IMDB-bin/input0" \
--workers 60 \
--srcdict dict.txt
fairseq-preprocess \
--only-source \
--trainpref "aclImdb/train.label" \
--validpref "aclImdb/dev.label" \
--destdir "IMDB-bin/label" \
--workers 60
```
### 5) Run training
```bash
TOTAL_NUM_UPDATES=7812 # 10 epochs through IMDB for bsz 32
WARMUP_UPDATES=469 # 6 percent of the number of updates
LR=1e-05 # Peak LR for polynomial LR scheduler.
HEAD_NAME=imdb_head # Custom name for the classification head.
NUM_CLASSES=2 # Number of classes for the classification task.
MAX_SENTENCES=8 # Batch size.
ROBERTA_PATH=/path/to/roberta.large/model.pt
CUDA_VISIBLE_DEVICES=0 fairseq-train IMDB-bin/ \
--restore-file $ROBERTA_PATH \
--max-positions 512 \
--batch-size $MAX_SENTENCES \
--max-tokens 4400 \
--task sentence_prediction \
--reset-optimizer --reset-dataloader --reset-meters \
--required-batch-size-multiple 1 \
--init-token 0 --separator-token 2 \
--arch roberta_large \
--criterion sentence_prediction \
--classification-head-name $HEAD_NAME \
--num-classes $NUM_CLASSES \
--dropout 0.1 --attention-dropout 0.1 \
--weight-decay 0.1 --optimizer adam --adam-betas "(0.9, 0.98)" --adam-eps 1e-06 \
--clip-norm 0.0 \
--lr-scheduler polynomial_decay --lr $LR --total-num-update $TOTAL_NUM_UPDATES --warmup-updates $WARMUP_UPDATES \
--fp16 --fp16-init-scale 4 --threshold-loss-scale 1 --fp16-scale-window 128 \
--max-epoch 10 \
--best-checkpoint-metric accuracy --maximize-best-checkpoint-metric \
--shorten-method "truncate" \
--find-unused-parameters \
--update-freq 4
```
The above command will finetune RoBERTa-large with an effective batch-size of 32
sentences (`--batch-size=8 --update-freq=4`). The expected
`best-validation-accuracy` after 10 epochs is ~96.5%.
If you run out of GPU memory, try decreasing `--batch-size` and increase
`--update-freq` to compensate.
### 6) Load model using hub interface
Now we can load the trained model checkpoint using the RoBERTa hub interface.
Assuming your checkpoints are stored in `checkpoints/`:
```python
from fairseq.models.roberta import RobertaModel
roberta = RobertaModel.from_pretrained(
'checkpoints',
checkpoint_file='checkpoint_best.pt',
data_name_or_path='IMDB-bin'
)
roberta.eval() # disable dropout
```
Finally you can make predictions using the `imdb_head` (or whatever you set
`--classification-head-name` to during training):
```python
label_fn = lambda label: roberta.task.label_dictionary.string(
[label + roberta.task.label_dictionary.nspecial]
)
tokens = roberta.encode('Best movie this year')
pred = label_fn(roberta.predict('imdb_head', tokens).argmax().item())
assert pred == '1' # positive
tokens = roberta.encode('Worst movie ever')
pred = label_fn(roberta.predict('imdb_head', tokens).argmax().item())
assert pred == '0' # negative
```
|
COCO-LM/fairseq/examples/roberta/README.custom_classification.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/roberta/README.custom_classification.md",
"repo_id": "COCO-LM",
"token_count": 2196
}
| 167 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import tempfile
import numpy as np
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.data import (
Dictionary,
IdDataset,
ListDataset,
NestedDictionaryDataset,
NumelDataset,
NumSamplesDataset,
PadDataset,
SortDataset,
data_utils,
encoders,
)
from fairseq.tasks import LegacyFairseqTask, register_task
from . import wsc_utils
@register_task("wsc")
class WSCTask(LegacyFairseqTask):
"""Task to finetune RoBERTa for Winograd Schemas."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument(
"data", metavar="DIR", help="path to data directory; we load <split>.jsonl"
)
parser.add_argument(
"--init-token",
type=int,
default=None,
help="add token at the beginning of each batch item",
)
def __init__(self, args, vocab):
super().__init__(args)
self.vocab = vocab
self.mask = vocab.add_symbol("<mask>")
self.bpe = encoders.build_bpe(args)
self.tokenizer = encoders.build_tokenizer(args)
# hack to handle GPT-2 BPE, which includes leading spaces
if args.bpe == "gpt2":
self.leading_space = True
self.trailing_space = False
else:
self.leading_space = False
self.trailing_space = True
@classmethod
def load_dictionary(cls, filename):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
dictionary = Dictionary.load(filename)
dictionary.add_symbol("<mask>")
return dictionary
@classmethod
def setup_task(cls, args, **kwargs):
assert args.criterion == "wsc", "Must set --criterion=wsc"
# load data and label dictionaries
vocab = cls.load_dictionary(os.path.join(args.data, "dict.txt"))
print("| dictionary: {} types".format(len(vocab)))
return cls(args, vocab)
def binarize(self, s: str, append_eos: bool = False):
if self.tokenizer is not None:
s = self.tokenizer.encode(s)
if self.bpe is not None:
s = self.bpe.encode(s)
tokens = self.vocab.encode_line(
s,
append_eos=append_eos,
add_if_not_exist=False,
).long()
if self.args.init_token is not None:
tokens = torch.cat([tokens.new([self.args.init_token]), tokens])
return tokens
def binarize_with_mask(self, txt, prefix, suffix, leading_space, trailing_space):
toks = self.binarize(
prefix + leading_space + txt + trailing_space + suffix,
append_eos=True,
)
mask = torch.zeros_like(toks, dtype=torch.bool)
mask_start = len(self.binarize(prefix))
mask_size = len(self.binarize(leading_space + txt))
mask[mask_start : mask_start + mask_size] = 1
return toks, mask
def load_dataset(
self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs
):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if data_path is None:
data_path = os.path.join(self.args.data, split + ".jsonl")
if not os.path.exists(data_path):
raise FileNotFoundError("Cannot find data: {}".format(data_path))
query_tokens = []
query_masks = []
query_lengths = []
candidate_tokens = []
candidate_masks = []
candidate_lengths = []
labels = []
for sentence, pronoun_span, query, label in wsc_utils.jsonl_iterator(data_path):
prefix = sentence[: pronoun_span.start].text
suffix = sentence[pronoun_span.end :].text_with_ws
# spaCy spans include trailing spaces, but we need to know about
# leading spaces for the GPT-2 BPE
leading_space = (
" " if sentence[: pronoun_span.start].text_with_ws.endswith(" ") else ""
)
trailing_space = " " if pronoun_span.text_with_ws.endswith(" ") else ""
# get noun phrases, excluding pronouns and anything overlapping with the query
cand_spans = wsc_utils.filter_noun_chunks(
wsc_utils.extended_noun_chunks(sentence),
exclude_pronouns=True,
exclude_query=query,
exact_match=False,
)
if query is not None:
query_toks, query_mask = self.binarize_with_mask(
query, prefix, suffix, leading_space, trailing_space
)
query_len = len(query_toks)
else:
query_toks, query_mask, query_len = None, None, 0
query_tokens.append(query_toks)
query_masks.append(query_mask)
query_lengths.append(query_len)
cand_toks, cand_masks = [], []
for cand_span in cand_spans:
toks, mask = self.binarize_with_mask(
cand_span.text,
prefix,
suffix,
leading_space,
trailing_space,
)
cand_toks.append(toks)
cand_masks.append(mask)
# collate candidates
cand_toks = data_utils.collate_tokens(cand_toks, pad_idx=self.vocab.pad())
cand_masks = data_utils.collate_tokens(cand_masks, pad_idx=0)
assert cand_toks.size() == cand_masks.size()
candidate_tokens.append(cand_toks)
candidate_masks.append(cand_masks)
candidate_lengths.append(cand_toks.size(1))
labels.append(label)
query_lengths = np.array(query_lengths)
query_tokens = ListDataset(query_tokens, query_lengths)
query_masks = ListDataset(query_masks, query_lengths)
candidate_lengths = np.array(candidate_lengths)
candidate_tokens = ListDataset(candidate_tokens, candidate_lengths)
candidate_masks = ListDataset(candidate_masks, candidate_lengths)
labels = ListDataset(labels, [1] * len(labels))
dataset = {
"id": IdDataset(),
"query_tokens": query_tokens,
"query_masks": query_masks,
"candidate_tokens": candidate_tokens,
"candidate_masks": candidate_masks,
"labels": labels,
"nsentences": NumSamplesDataset(),
"ntokens": NumelDataset(query_tokens, reduce=True),
}
nested_dataset = NestedDictionaryDataset(
dataset,
sizes=[query_lengths],
)
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(query_tokens))
dataset = SortDataset(
nested_dataset,
# shuffle
sort_order=[shuffle],
)
if return_only:
return dataset
self.datasets[split] = dataset
return self.datasets[split]
def build_dataset_for_inference(self, sample_json):
with tempfile.NamedTemporaryFile(buffering=0) as h:
h.write((json.dumps(sample_json) + "\n").encode("utf-8"))
dataset = self.load_dataset(
"disambiguate_pronoun",
data_path=h.name,
return_only=True,
)
return dataset
def disambiguate_pronoun(self, model, sentence, use_cuda=False):
sample_json = wsc_utils.convert_sentence_to_json(sentence)
dataset = self.build_dataset_for_inference(sample_json)
sample = dataset.collater([dataset[0]])
if use_cuda:
sample = utils.move_to_cuda(sample)
def get_masked_input(tokens, mask):
masked_tokens = tokens.clone()
masked_tokens[mask.bool()] = self.mask
return masked_tokens
def get_lprobs(tokens, mask):
logits, _ = model(src_tokens=get_masked_input(tokens, mask))
lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float)
scores = lprobs.gather(2, tokens.unsqueeze(-1)).squeeze(-1)
mask = mask.type_as(scores)
scores = (scores * mask).sum(dim=-1) / mask.sum(dim=-1)
return scores
cand_lprobs = get_lprobs(
sample["candidate_tokens"][0],
sample["candidate_masks"][0],
)
if sample["query_tokens"][0] is not None:
query_lprobs = get_lprobs(
sample["query_tokens"][0].unsqueeze(0),
sample["query_masks"][0].unsqueeze(0),
)
return (query_lprobs >= cand_lprobs).all().item() == 1
else:
best_idx = cand_lprobs.argmax().item()
full_cand = sample["candidate_tokens"][0][best_idx]
mask = sample["candidate_masks"][0][best_idx]
toks = full_cand[mask.bool()]
return self.bpe.decode(self.source_dictionary.string(toks)).strip()
@property
def source_dictionary(self):
return self.vocab
@property
def target_dictionary(self):
return self.vocab
@register_task("winogrande")
class WinograndeTask(WSCTask):
"""
Task for WinoGrande dataset. Efficient implementation for Winograd schema
tasks with exactly two candidates, one of which is correct.
"""
@classmethod
def setup_task(cls, args, **kwargs):
assert args.criterion == "winogrande", "Must set --criterion=winogrande"
# load data and label dictionaries
vocab = cls.load_dictionary(os.path.join(args.data, "dict.txt"))
print("| dictionary: {} types".format(len(vocab)))
return cls(args, vocab)
def load_dataset(
self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs
):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if data_path is None:
data_path = os.path.join(self.args.data, split + ".jsonl")
if not os.path.exists(data_path):
raise FileNotFoundError("Cannot find data: {}".format(data_path))
query_tokens = []
query_masks = []
query_lengths = []
candidate_tokens = []
candidate_masks = []
candidate_lengths = []
itr = wsc_utils.winogrande_jsonl_iterator(data_path, eval=(split == "test"))
for sample in itr:
sentence, pronoun_span, query, cand_text = sample
prefix = sentence[: pronoun_span[0]].rstrip()
suffix = sentence[pronoun_span[1] :]
leading_space = " " if sentence[: pronoun_span[0]].endswith(" ") else ""
trailing_space = ""
if query is not None:
query_toks, query_mask = self.binarize_with_mask(
query,
prefix,
suffix,
leading_space,
trailing_space,
)
query_len = len(query_toks)
else:
query_toks, query_mask, query_len = None, None, 0
query_tokens.append(query_toks)
query_masks.append(query_mask)
query_lengths.append(query_len)
cand_toks, cand_mask = self.binarize_with_mask(
cand_text,
prefix,
suffix,
leading_space,
trailing_space,
)
candidate_tokens.append(cand_toks)
candidate_masks.append(cand_mask)
candidate_lengths.append(cand_toks.size(0))
query_lengths = np.array(query_lengths)
def get_pad_dataset_fn(tokens, length, pad_idx):
return PadDataset(
ListDataset(tokens, length),
pad_idx=pad_idx,
left_pad=False,
)
query_tokens = get_pad_dataset_fn(query_tokens, query_lengths, self.vocab.pad())
query_masks = get_pad_dataset_fn(query_masks, query_lengths, 0)
candidate_lengths = np.array(candidate_lengths)
candidate_tokens = get_pad_dataset_fn(
candidate_tokens, candidate_lengths, self.vocab.pad()
)
candidate_masks = get_pad_dataset_fn(candidate_masks, candidate_lengths, 0)
dataset = {
"id": IdDataset(),
"query_tokens": query_tokens,
"query_masks": query_masks,
"candidate_tokens": candidate_tokens,
"candidate_masks": candidate_masks,
"nsentences": NumSamplesDataset(),
"ntokens": NumelDataset(query_tokens, reduce=True),
}
nested_dataset = NestedDictionaryDataset(
dataset,
sizes=[query_lengths],
)
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(query_tokens))
dataset = SortDataset(
nested_dataset,
# shuffle
sort_order=[shuffle],
)
if return_only:
return dataset
self.datasets[split] = dataset
return self.datasets[split]
|
COCO-LM/fairseq/examples/roberta/wsc/wsc_task.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/roberta/wsc/wsc_task.py",
"repo_id": "COCO-LM",
"token_count": 6705
}
| 168 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import requests
from scorers import build_scorer
class SimulSTEvaluationService(object):
DEFAULT_HOSTNAME = "localhost"
DEFAULT_PORT = 12321
def __init__(self, hostname=DEFAULT_HOSTNAME, port=DEFAULT_PORT):
self.hostname = hostname
self.port = port
self.base_url = f"http://{self.hostname}:{self.port}"
def __enter__(self):
self.new_session()
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def new_session(self):
# start eval session
url = f"{self.base_url}"
try:
_ = requests.post(url)
except Exception as e:
print(f"Failed to start an evaluation session: {e}")
print("Evaluation session started.")
return self
def get_scores(self):
# end eval session
url = f"{self.base_url}/result"
try:
r = requests.get(url)
print("Scores: {}".format(r.json()))
print("Evaluation session finished.")
except Exception as e:
print(f"Failed to end an evaluation session: {e}")
def get_src(self, sent_id: int, extra_params: Optional[dict] = None) -> str:
url = f"{self.base_url}/src"
params = {"sent_id": sent_id}
if extra_params is not None:
for key in extra_params.keys():
params[key] = extra_params[key]
try:
r = requests.get(url, params=params)
except Exception as e:
print(f"Failed to request a source segment: {e}")
return r.json()
def send_hypo(self, sent_id: int, hypo: str) -> None:
url = f"{self.base_url}/hypo"
params = {"sent_id": sent_id}
try:
requests.put(url, params=params, data=hypo.encode("utf-8"))
except Exception as e:
print(f"Failed to send a translated segment: {e}")
def corpus_info(self):
url = f"{self.base_url}"
try:
r = requests.get(url)
except Exception as e:
print(f"Failed to request corpus information: {e}")
return r.json()
class SimulSTLocalEvaluationService(object):
def __init__(self, args):
self.scorer = build_scorer(args)
def get_scores(self):
return self.scorer.score()
def get_src(self, sent_id: int, extra_params: Optional[dict] = None) -> str:
if extra_params is not None:
segment_size = extra_params.get("segment_size", None)
else:
segment_size = None
return self.scorer.send_src(int(sent_id), segment_size)
def send_hypo(self, sent_id: int, hypo: str) -> None:
list_of_tokens = hypo.strip().split()
self.scorer.recv_hyp(sent_id, list_of_tokens)
def corpus_info(self):
return self.scorer.get_info()
|
COCO-LM/fairseq/examples/simultaneous_translation/eval/client.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/simultaneous_translation/eval/client.py",
"repo_id": "COCO-LM",
"token_count": 1349
}
| 169 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
def exclusive_cumprod(tensor, dim: int, eps: float = 1e-10):
"""
Implementing exclusive cumprod.
There is cumprod in pytorch, however there is no exclusive mode.
cumprod(x) = [x1, x1x2, x2x3x4, ..., prod_{i=1}^n x_i]
exclusive means cumprod(x) = [1, x1, x1x2, x1x2x3, ..., prod_{i=1}^{n-1} x_i]
"""
tensor_size = list(tensor.size())
tensor_size[dim] = 1
return_tensor = safe_cumprod(
torch.cat([torch.ones(tensor_size).type_as(tensor), tensor], dim=dim),
dim=dim,
eps=eps,
)
if dim == 0:
return return_tensor[:-1]
elif dim == 1:
return return_tensor[:, :-1]
elif dim == 2:
return return_tensor[:, :, :-1]
else:
raise RuntimeError("Cumprod on dimension 3 and more is not implemented")
def safe_cumprod(tensor, dim: int, eps: float = 1e-10):
"""
An implementation of cumprod to prevent precision issue.
cumprod(x)
= [x1, x1x2, x1x2x3, ....]
= [exp(log(x1)), exp(log(x1) + log(x2)), exp(log(x1) + log(x2) + log(x3)), ...]
= exp(cumsum(log(x)))
"""
if (tensor + eps < 0).any().item():
raise RuntimeError(
"Safe cumprod can only take non-negative tensors as input."
"Consider use torch.cumprod if you want to calculate negative values."
)
log_tensor = torch.log(tensor + eps)
cumsum_log_tensor = torch.cumsum(log_tensor, dim)
exp_cumsum_log_tensor = torch.exp(cumsum_log_tensor)
return exp_cumsum_log_tensor
def lengths_to_mask(lengths, max_len: int, dim: int = 0, negative_mask: bool = False):
"""
Convert a tensor of lengths to mask
For example, lengths = [[2, 3, 4]], max_len = 5
mask =
[[1, 1, 1],
[1, 1, 1],
[0, 1, 1],
[0, 0, 1],
[0, 0, 0]]
"""
assert len(lengths.size()) <= 2
if len(lengths) == 2:
if dim == 1:
lengths = lengths.t()
lengths = lengths
else:
lengths = lengths.unsqueeze(1)
# lengths : batch_size, 1
lengths = lengths.view(-1, 1)
batch_size = lengths.size(0)
# batch_size, max_len
mask = torch.arange(max_len).expand(batch_size, max_len).type_as(lengths) < lengths
if negative_mask:
mask = ~mask
if dim == 0:
# max_len, batch_size
mask = mask.t()
return mask
def moving_sum(x, start_idx: int, end_idx: int):
"""
From MONOTONIC CHUNKWISE ATTENTION
https://arxiv.org/pdf/1712.05382.pdf
Equation (18)
x = [x_1, x_2, ..., x_N]
MovingSum(x, start_idx, end_idx)_n = Sigma_{m=n−(start_idx−1)}^{n+end_idx-1} x_m
for n in {1, 2, 3, ..., N}
x : src_len, batch_size
start_idx : start idx
end_idx : end idx
Example
src_len = 5
batch_size = 3
x =
[[ 0, 5, 10],
[ 1, 6, 11],
[ 2, 7, 12],
[ 3, 8, 13],
[ 4, 9, 14]]
MovingSum(x, 3, 1) =
[[ 0, 5, 10],
[ 1, 11, 21],
[ 3, 18, 33],
[ 6, 21, 36],
[ 9, 24, 39]]
MovingSum(x, 1, 3) =
[[ 3, 18, 33],
[ 6, 21, 36],
[ 9, 24, 39],
[ 7, 17, 27],
[ 4, 9, 14]]
"""
assert start_idx > 0 and end_idx > 0
assert len(x.size()) == 2
src_len, batch_size = x.size()
# batch_size, 1, src_len
x = x.t().unsqueeze(1)
# batch_size, 1, src_len
moving_sum_weight = x.new_ones([1, 1, end_idx + start_idx - 1])
moving_sum = (
torch.nn.functional.conv1d(
x, moving_sum_weight, padding=start_idx + end_idx - 1
)
.squeeze(1)
.t()
)
moving_sum = moving_sum[end_idx:-start_idx]
assert src_len == moving_sum.size(0)
assert batch_size == moving_sum.size(1)
return moving_sum
|
COCO-LM/fairseq/examples/simultaneous_translation/utils/functions.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/simultaneous_translation/utils/functions.py",
"repo_id": "COCO-LM",
"token_count": 1936
}
| 170 |
# @package _group_
defaults:
- task: null
- model: null
hydra:
run:
dir: ${common_eval.results_path}/${dataset.gen_subset}
sweep:
dir: ${common_eval.results_path}
subdir: ${dataset.gen_subset}
common_eval:
results_path: ${decoding.exp_dir}/decode/${decoding.decoder.name}
path: ${decoding.exp_dir}/checkpoint_best.pt
post_process: letter
generation:
nbest: 1
beam: 500
dataset:
max_tokens: 1000000
gen_subset: test
|
COCO-LM/fairseq/examples/speech_recognition/hydra/conf/infer.yaml/0
|
{
"file_path": "COCO-LM/fairseq/examples/speech_recognition/hydra/conf/infer.yaml",
"repo_id": "COCO-LM",
"token_count": 194
}
| 171 |
[[Back]](..)
# S2T Example: Speech Translation (ST) on MuST-C
[MuST-C](https://www.aclweb.org/anthology/N19-1202) is multilingual speech-to-text translation corpus with
8-language translations on English TED talks. We match the state-of-the-art performance in
[ESPNet-ST](https://arxiv.org/pdf/2004.10234.pdf) with a simpler model training pipeline.
## Data Preparation
[Download](https://ict.fbk.eu/must-c) and unpack MuST-C data to a path
`${MUSTC_ROOT}/en-${TARGET_LANG_ID}`, then preprocess it with
```bash
# additional Python packages for S2T data processing/model training
pip install pandas torchaudio soundfile sentencepiece
# Generate TSV manifests, features, vocabulary
# and configuration for each language
python examples/speech_to_text/prep_mustc_data.py \
--data-root ${MUSTC_ROOT} --task asr \
--vocab-type unigram --vocab-size 5000
python examples/speech_to_text/prep_mustc_data.py \
--data-root ${MUSTC_ROOT} --task st \
--vocab-type unigram --vocab-size 8000
# Add vocabulary and configuration for joint data
# (based on the manifests and features generated above)
python examples/speech_to_text/prep_mustc_data.py \
--data-root ${MUSTC_ROOT} --task asr --joint \
--vocab-type unigram --vocab-size 10000
python examples/speech_to_text/prep_mustc_data.py \
--data-root ${MUSTC_ROOT} --task st --joint \
--vocab-type unigram --vocab-size 10000
```
The generated files (manifest, features, vocabulary and data configuration) will be added to
`${MUSTC_ROOT}/en-${TARGET_LANG_ID}` (per-language data) and `MUSTC_ROOT` (joint data).
Download our vocabulary files if you want to use our pre-trained models:
- ASR: [En-De](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_de_asr_vocab_unigram5000.zip), [En-Nl](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_nl_asr_vocab_unigram5000.zip), [En-Es](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_es_asr_vocab_unigram5000.zip), [En-Fr](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_fr_asr_vocab_unigram5000.zip), [En-It](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_it_asr_vocab_unigram5000.zip), [En-Pt](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_pt_asr_vocab_unigram5000.zip), [En-Ro](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_ro_asr_vocab_unigram5000.zip), [En-Ru](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_ru_asr_vocab_unigram5000.zip), [Joint](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_joint_asr_vocab_unigram10000.zip)
- ST: [En-De](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_de_st_vocab_unigram8000.zip), [En-Nl](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_nl_st_vocab_unigram8000.zip), [En-Es](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_es_st_vocab_unigram8000.zip), [En-Fr](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_fr_st_vocab_unigram8000.zip), [En-It](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_it_st_vocab_unigram8000.zip), [En-Pt](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_pt_st_vocab_unigram8000.zip), [En-Ro](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_ro_st_vocab_unigram8000.zip), [En-Ru](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_ru_st_vocab_unigram8000.zip), [Multilingual](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_multilingual_st_vocab_unigram10000.zip)
## ASR
#### Training
En-De as example:
```bash
fairseq-train ${MUSTC_ROOT}/en-de \
--config-yaml config_asr.yaml --train-subset train_asr --valid-subset dev_asr \
--save-dir ${ASR_SAVE_DIR} --num-workers 4 --max-tokens 40000 --max-update 100000 \
--task speech_to_text --criterion label_smoothed_cross_entropy --report-accuracy \
--arch s2t_transformer_s --optimizer adam --lr 1e-3 --lr-scheduler inverse_sqrt \
--warmup-updates 10000 --clip-norm 10.0 --seed 1 --update-freq 8
```
For joint model (using ASR data from all 8 directions):
```bash
fairseq-train ${MUSTC_ROOT} \
--config-yaml config_asr.yaml \
--train-subset train_de_asr,train_nl_asr,train_es_asr,train_fr_asr,train_it_asr,train_pt_asr,train_ro_asr,train_ru_asr \
--valid-subset dev_de_asr,dev_nl_asr,dev_es_asr,dev_fr_asr,dev_it_asr,dev_pt_asr,dev_ro_asr,dev_ru_asr \
--save-dir ${JOINT_ASR_SAVE_DIR} --num-workers 4 --max-tokens 40000 --max-update 100000 \
--task speech_to_text --criterion label_smoothed_cross_entropy --report-accuracy \
--arch s2t_transformer_s --optimizer adam --lr 1e-3 --lr-scheduler inverse_sqrt \
--warmup-updates 10000 --clip-norm 10.0 --seed 1 --update-freq 8
```
where `ASR_SAVE_DIR` (`JOINT_ASR_SAVE_DIR`) is the checkpoint root path. We set `--update-freq 8` to simulate 8 GPUs
with 1 GPU. You may want to update it accordingly when using more than 1 GPU.
#### Inference & Evaluation
```bash
CHECKPOINT_FILENAME=avg_last_10_checkpoint.pt
python scripts/average_checkpoints.py \
--inputs ${ASR_SAVE_DIR} --num-epoch-checkpoints 10 \
--output "${ASR_SAVE_DIR}/${CHECKPOINT_FILENAME}"
fairseq-generate ${MUSTC_ROOT}/en-de \
--config-yaml config_asr.yaml --gen-subset tst-COMMON_asr --task speech_to_text \
--path ${ASR_SAVE_DIR}/${CHECKPOINT_FILENAME} --max-tokens 50000 --beam 5 \
--scoring wer --wer-tokenizer 13a --wer-lowercase --wer-remove-punct
# For models trained on joint data
python scripts/average_checkpoints.py \
--inputs ${JOINT_ASR_SAVE_DIR} --num-epoch-checkpoints 10 \
--output "${JOINT_ASR_SAVE_DIR}/${CHECKPOINT_FILENAME}"
for LANG in de nl es fr it pt ro ru; do
fairseq-generate ${MUSTC_ROOT} \
--config-yaml config_asr.yaml --gen-subset tst-COMMON_${LANG}_asr --task speech_to_text \
--path ${JOINT_ASR_SAVE_DIR}/${CHECKPOINT_FILENAME} --max-tokens 50000 --beam 5 \
--scoring wer --wer-tokenizer 13a --wer-lowercase --wer-remove-punct
done
```
#### Results
| Data | --arch | Params | En-De | En-Nl | En-Es | En-Fr | En-It | En-Pt | En-Ro | En-Ru | Model |
|---|---|---|---|---|---|---|---|---|---|---|---|
| Single | s2t_transformer_s | 31M | [18.2](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_de_asr_transformer_s.pt) | [17.6](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_nl_asr_transformer_s.pt) | [17.7](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_es_asr_transformer_s.pt) | [17.2](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_fr_asr_transformer_s.pt) | [17.9](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_it_asr_transformer_s.pt) | [19.1](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_pt_asr_transformer_s.pt) | [18.1](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_ro_asr_transformer_s.pt) | [17.7](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_ru_asr_transformer_s.pt) | (<-Download) |
| Joint | s2t_transformer_m | 76M | 16.8 | 16.7 | 16.9 | 16.9 | 17.0 | 17.4 | 17.0 | 16.9 | [Download](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_joint_asr_transformer_m.pt) |
## ST
#### Training
En-De as example:
```bash
fairseq-train ${MUSTC_ROOT}/en-de \
--config-yaml config_st.yaml --train-subset train_st --valid-subset dev_st \
--save-dir ${ST_SAVE_DIR} --num-workers 4 --max-tokens 40000 --max-update 100000 \
--task speech_to_text --criterion label_smoothed_cross_entropy --report-accuracy \
--arch s2t_transformer_s --optimizer adam --lr 2e-3 --lr-scheduler inverse_sqrt \
--warmup-updates 10000 --clip-norm 10.0 --seed 1 --update-freq 8 \
--load-pretrained-encoder-from ${ASR_SAVE_DIR}/${CHECKPOINT_FILENAME}
```
For multilingual model (all 8 directions):
```bash
fairseq-train ${MUSTC_ROOT} \
--config-yaml config_st.yaml \
--train-subset train_de_st,train_nl_st,train_es_st,train_fr_st,train_it_st,train_pt_st,train_ro_st,train_ru_st \
--valid-subset dev_de_st,dev_nl_st,dev_es_st,dev_fr_st,dev_it_st,dev_pt_st,dev_ro_st,dev_ru_st \
--save-dir ${MULTILINGUAL_ST_SAVE_DIR} --num-workers 4 --max-tokens 40000 --max-update 100000 \
--task speech_to_text --criterion label_smoothed_cross_entropy --report-accuracy \
--arch s2t_transformer_s --ignore-prefix-size 1 --optimizer adam --lr 2e-3 --lr-scheduler inverse_sqrt \
--warmup-updates 10000 --clip-norm 10.0 --seed 1 --update-freq 8 \
--load-pretrained-encoder-from ${JOINT_ASR_SAVE_DIR}/${CHECKPOINT_FILENAME}
```
where `ST_SAVE_DIR` (`MULTILINGUAL_ST_SAVE_DIR`) is the checkpoint root path. The ST encoder is pre-trained by ASR
for faster training and better performance: `--load-pretrained-encoder-from <(JOINT_)ASR checkpoint path>`. We set
`--update-freq 8` to simulate 8 GPUs with 1 GPU. You may want to update it accordingly when using more than 1 GPU.
For multilingual models, we prepend target language ID token as target BOS, which should be excluded from
the training loss via `--ignore-prefix-size 1`.
#### Inference & Evaluation
Average the last 10 checkpoints and evaluate on the `tst-COMMON` split:
```bash
CHECKPOINT_FILENAME=avg_last_10_checkpoint.pt
python scripts/average_checkpoints.py \
--inputs ${ST_SAVE_DIR} --num-epoch-checkpoints 10 \
--output "${ST_SAVE_DIR}/${CHECKPOINT_FILENAME}"
fairseq-generate ${MUSTC_ROOT}/en-de \
--config-yaml config_st.yaml --gen-subset tst-COMMON_st --task speech_to_text \
--path ${ST_SAVE_DIR}/${CHECKPOINT_FILENAME} \
--max-tokens 50000 --beam 5 --scoring sacrebleu
# For multilingual models
python scripts/average_checkpoints.py \
--inputs ${MULTILINGUAL_ST_SAVE_DIR} --num-epoch-checkpoints 10 \
--output "${MULTILINGUAL_ST_SAVE_DIR}/${CHECKPOINT_FILENAME}"
for LANG in de nl es fr it pt ro ru; do
fairseq-generate ${MUSTC_ROOT} \
--config-yaml config_st.yaml --gen-subset tst-COMMON_${LANG}_st --task speech_to_text \
--prefix-size 1 --path ${MULTILINGUAL_ST_SAVE_DIR}/${CHECKPOINT_FILENAME} \
--max-tokens 50000 --beam 5 --scoring sacrebleu
done
```
For multilingual models, we force decoding from the target language ID token (as BOS) via `--prefix-size 1`.
#### Results
| Data | --arch | Params | En-De | En-Nl | En-Es | En-Fr | En-It | En-Pt | En-Ro | En-Ru | Model |
|---|---|---|---|---|---|---|---|---|---|---|---|
| Bilingual | s2t_transformer_s | 31M | [22.7](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_de_st_transformer_s.pt) | [27.3](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_nl_st_transformer_s.pt) | [27.2](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_es_st_transformer_s.pt) | [32.9](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_fr_st_transformer_s.pt) | [22.7](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_it_st_transformer_s.pt) | [28.1](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_pt_st_transformer_s.pt) | [21.9](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_ro_st_transformer_s.pt) | [15.3](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_ru_st_transformer_s.pt) | (<-Download) |
| Multilingual | s2t_transformer_m | 76M | 24.5 | 28.6 | 28.2 | 34.9 | 24.6 | 31.1 | 23.8 | 16.0 | [Download](https://dl.fbaipublicfiles.com/fairseq/s2t/mustc_multilingual_st_transformer_m.pt) |
[[Back]](..)
|
COCO-LM/fairseq/examples/speech_to_text/docs/mustc_example.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/speech_to_text/docs/mustc_example.md",
"repo_id": "COCO-LM",
"token_count": 4426
}
| 172 |
# WMT 20
This page provides pointers to the models of Facebook-FAIR's WMT'20 news translation task submission [(Chen et al., 2020)](https://arxiv.org/abs/2011.08298).
## Single best MT models (after finetuning on part of WMT20 news dev set)
Model | Description | Download
---|---|---
`transformer.wmt20.ta-en` | Ta->En | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt20.ta-en.single.tar.gz)
`transformer.wmt20.en-ta` | En->Ta | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-ta.single.tar.gz)
`transformer.wmt20.iu-en.news` | Iu->En (News domain) | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu-en.news.single.tar.gz)
`transformer.wmt20.en-iu.news` | En->Iu (News domain) | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-iu.news.single.tar.gz)
`transformer.wmt20.iu-en.nh` | Iu->En (Nunavut Hansard domain) | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu-en.nh.single.tar.gz)
`transformer.wmt20.en-iu.nh` | En->Iu (Nunavut Hansard domain) | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-iu.nh.single.tar.gz)
## Language models
Model | Description | Download
---|---|---
`transformer_lm.wmt20.en` | En Language Model | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en.tar.gz)
`transformer_lm.wmt20.ta` | Ta Language Model | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt20.ta.tar.gz)
`transformer_lm.wmt20.iu.news` | Iu Language Model (News domain) | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu.news.tar.gz)
`transformer_lm.wmt20.iu.nh` | Iu Language Model (Nunavut Hansard domain) | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu.nh.tar.gz)
## Example usage (torch.hub)
#### Translation
```python
import torch
# English to Tamil translation
en2ta = torch.hub.load('pytorch/fairseq', 'transformer.wmt20.en-ta')
en2ta.translate("Machine learning is great!") # 'இயந்திரக் கற்றல் அருமை!'
# Tamil to English translation
ta2en = torch.hub.load('pytorch/fairseq', 'transformer.wmt20.ta-en')
ta2en.translate("இயந்திரக் கற்றல் அருமை!") # 'Machine learning is great!'
# English to Inuktitut translation
en2iu = torch.hub.load('pytorch/fairseq', 'transformer.wmt20.en-iu.news')
en2iu.translate("machine learning is great!") # 'ᖃᒧᑕᐅᔭᓄᑦ ᐃᓕᓐᓂᐊᕐᓂᖅ ᐱᐅᔪᒻᒪᕆᒃ!'
# Inuktitut to English translation
iu2en = torch.hub.load('pytorch/fairseq', 'transformer.wmt20.iu-en.news')
iu2en.translate("ᖃᒧᑕᐅᔭᓄᑦ ᐃᓕᓐᓂᐊᕐᓂᖅ ᐱᐅᔪᒻᒪᕆᒃ!") # 'Machine learning excellence!'
```
#### Language Modeling
```python
# Sample from the English LM
en_lm = torch.hub.load('pytorch/fairseq', 'transformer_lm.wmt20.en')
en_lm.sample("Machine learning is") # 'Machine learning is a type of artificial intelligence that uses machine learning to learn from data and make predictions.'
# Sample from the Tamil LM
ta_lm = torch.hub.load('pytorch/fairseq', 'transformer_lm.wmt20.ta')
ta_lm.sample("இயந்திரக் கற்றல் என்பது செயற்கை நுண்ணறிவின்") # 'இயந்திரக் கற்றல் என்பது செயற்கை நுண்ணறிவின் ஒரு பகுதியாகும்.'
# Sample from the Inuktitut LM
iu_lm = torch.hub.load('pytorch/fairseq', 'transformer_lm.wmt20.iu.news')
iu_lm.sample("ᖃᒧᑕᐅᔭᓄᑦ ᐃᓕᓐᓂᐊᕐᓂᖅ") # 'ᖃᒧᑕᐅᔭᓄᑦ ᐃᓕᓐᓂᐊᕐᓂᖅ, ᐊᒻᒪᓗ ᓯᓚᐅᑉ ᐊᓯᙳᖅᐸᓪᓕᐊᓂᖓᓄᑦ ᖃᓄᐃᓕᐅᕈᑎᒃᓴᑦ, ᐃᓚᖃᖅᖢᑎᒃ ᐅᑯᓂᖓ:'
```
## Citation
```bibtex
@inproceedings{chen2020facebook
title={Facebook AI's WMT20 News Translation Task Submission},
author={Peng-Jen Chen and Ann Lee and Changhan Wang and Naman Goyal and Angela Fan and Mary Williamson and Jiatao Gu},
booktitle={Proc. of WMT},
year={2020},
}
```
|
COCO-LM/fairseq/examples/wmt20/README.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/wmt20/README.md",
"repo_id": "COCO-LM",
"token_count": 2042
}
| 173 |
/**
* Copyright 2017-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "edit_dist.h"
#include <THC/THC.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <utility> // std::pair
template <typename scalar_t>
__global__ void generate_deletion_label_kernel(
const scalar_t* __restrict__ source,
const size_t source_size,
const size_t operation_size,
int* __restrict__ operations,
int* __restrict__ labels) {
const int index = blockIdx.x;
const int offset = index * operation_size;
const int offset_label = index * source_size;
for (int i = 0; i < source_size; i++) {
labels[offset_label + i] = 0;
}
int k = 0;
for (int i = 0; i < operation_size; i++){
if (operations[offset + i] == 0){
break;
} else if (operations[offset + i] == 1){
continue;
} else {
labels[offset_label + k] = 3 - operations[offset + i];
k++;
}
}
}
template <typename scalar_t>
__global__ void generate_insertion_label_kernel(
const scalar_t* __restrict__ target,
const size_t target_size,
const size_t operation_size,
int* __restrict__ operations,
int* __restrict__ labels,
int* __restrict__ masks) {
const int index = blockIdx.x;
const int offset = index * operation_size;
const int offset_label = index * target_size;
int k = 0;
int u = 0;
int m = 0;
for (int i = 0; i < target_size; i++) {
labels[offset_label + i] = 0;
masks[offset_label + i] = 0;
}
for (int i = 0; i < operation_size-1; i++){
if (operations[offset + i] == 0){
break;
} else if (operations[offset + i] == 2){
continue;
} else if (operations[offset + i] == 1){
masks[offset_label + m] = 1;
u++; m++;
} else {
labels[offset_label + k] = u;
masks[offset_label + m] = 0;
k++; m++;
u = 0;
}
}
}
template <typename scalar_t>
__global__ void levenshtein_distance_kernel(
const scalar_t* __restrict__ source,
const scalar_t* __restrict__ target,
const int* __restrict__ source_length,
const int* __restrict__ target_length,
const size_t source_size,
const size_t target_size,
int* __restrict__ operations,
int* __restrict__ errors_curr) {
const int index = blockIdx.x;
const int offset = index * (source_size + target_size);
const int d = index * (source_size + 1) * (target_size + 1);
const int t = target_size + 1;
auto err_idx = [d, t](int i, int j) { return d + i * t + j; };
auto opt_idx = [offset](int k) { return offset + k; };
const int hyp_len = source_length[index];
const int ref_len = target_length[index];
const scalar_t* hyp_begin = source + index * source_size;
const scalar_t* ref_begin = target + index * target_size;
// dynamic programming
for (int i = 0; i <= hyp_len; i++){
errors_curr[err_idx(i, 0)] = i;
}
for (int j = 0; j <= ref_len; j++){
errors_curr[err_idx(0, j)] = j;
}
for (int i = 1; i <= hyp_len; i++){
for (int j = 1; j <= ref_len; j++){
errors_curr[err_idx(i, j)] = min(
min(
errors_curr[err_idx(i-1, j)],
errors_curr[err_idx(i, j-1)]
) + 1,
errors_curr[err_idx(i-1, j-1)] + 2 * (
*(hyp_begin+i-1) == *(ref_begin+j-1) ? 0 : 1
)
);
}
}
// back-tracing
int i = hyp_len;
int j = ref_len;
int o = hyp_len + ref_len;
for (int k = 0; k < source_size + target_size; k++) {
operations[opt_idx(k)] = 0;
}
while ((i >= 0) && (j >= 0)) {
if ((i == 0) && (j == 0)) {
break;
}
if ((j > 0) && (errors_curr[err_idx(i, j-1)] < errors_curr[err_idx(i, j)])) {
o--; operations[opt_idx(o)] = 1; j--; // insertion
} else if ((i > 0) && (errors_curr[err_idx(i-1, j)] < errors_curr[err_idx(i, j)])) {
o--; operations[opt_idx(o)] = 2; i--; // deletion
} else {
o--; operations[opt_idx(o)] = 3; i--; j--; // do nothing
}
}
// moving to the left
for (int k = 0; k < hyp_len + ref_len; k++) {
if (k + o < hyp_len + ref_len){
operations[opt_idx(k)] = operations[opt_idx(k+o)];
} else{
operations[opt_idx(k)] = 0; // padding
}
}
}
template <typename scalar_t>
__global__ void faster_levenshtein_distance_kernel(
const scalar_t* __restrict__ source,
const scalar_t* __restrict__ target,
const int* __restrict__ source_length,
const int* __restrict__ target_length,
const size_t source_size,
const size_t target_size,
int* __restrict__ operations) {
extern __shared__ short errors[];
auto errors_curr = errors;
const int index = blockIdx.x;
const int offset = index * (source_size + target_size);
const int t = target_size + 1;
auto err_idx = [t](int i, int j) { return i * t + j; };
auto opt_idx = [offset](int k) { return offset + k; };
const int hyp_len = source_length[index];
const int ref_len = target_length[index];
const scalar_t* hyp_begin = source + index * source_size;
const scalar_t* ref_begin = target + index * target_size;
// dynamic programming
for (int i = 0; i <= hyp_len; i++){
errors_curr[err_idx(i, 0)] = i;
}
for (int j = 0; j <= ref_len; j++){
errors_curr[err_idx(0, j)] = j;
}
for (int i = 1; i <= hyp_len; i++){
for (int j = 1; j <= ref_len; j++){
errors_curr[err_idx(i, j)] = min(
min(
errors_curr[err_idx(i-1, j)],
errors_curr[err_idx(i, j-1)]
) + 1,
errors_curr[err_idx(i-1, j-1)] + 2 * (
*(hyp_begin+i-1) == *(ref_begin+j-1) ? 0 : 1
)
);
}
}
// back-tracing
int i = hyp_len;
int j = ref_len;
int o = hyp_len + ref_len;
for (int k = 0; k < source_size + target_size; k++) {
operations[opt_idx(k)] = 0;
}
while ((i >= 0) && (j >= 0)) {
if ((i == 0) && (j == 0)) {
break;
}
if ((j > 0) && (errors_curr[err_idx(i, j-1)] < errors_curr[err_idx(i, j)])) {
o--; operations[opt_idx(o)] = 1; j--; // insertion
} else if ((i > 0) && (errors_curr[err_idx(i-1, j)] < errors_curr[err_idx(i, j)])) {
o--; operations[opt_idx(o)] = 2; i--; // deletion
} else {
o--; operations[opt_idx(o)] = 3; i--; j--; // do nothing
}
}
// moving to the left
for (int k = 0; k < hyp_len + ref_len; k++) {
if (k + o < hyp_len + ref_len){
operations[opt_idx(k)] = operations[opt_idx(k+o)];
} else{
operations[opt_idx(k)] = 0; // padding
}
}
}
torch::Tensor GenerateDeletionLabelCuda(
torch::Tensor source,
torch::Tensor operations) {
const auto batch_size = source.size(0);
at::TensorOptions options(source.device());
options = options.dtype(at::ScalarType::Int);
auto labels = torch::empty({batch_size, source.size(1)}, options);
auto stream = at::cuda::getCurrentCUDAStream(source.device().index());
AT_DISPATCH_ALL_TYPES(source.scalar_type(), "generate_deletion_labels", ([&] {
generate_deletion_label_kernel<scalar_t><<<batch_size, 1, 0, stream>>>(
source.data_ptr<scalar_t>(),
source.size(1),
operations.size(1),
operations.data_ptr<int>(),
labels.data_ptr<int>());
}));
return labels;
}
std::pair<torch::Tensor, torch::Tensor> GenerateInsertionLabelCuda(
torch::Tensor target,
torch::Tensor operations) {
const auto batch_size = target.size(0);
at::TensorOptions options(target.device());
options = options.dtype(at::ScalarType::Int);
auto labels = torch::empty({batch_size, target.size(1)}, options);
auto masks = torch::empty({batch_size, target.size(1)}, options);
auto stream = at::cuda::getCurrentCUDAStream(target.device().index());
AT_DISPATCH_ALL_TYPES(target.scalar_type(), "generate_insertion_labels", ([&] {
generate_insertion_label_kernel<scalar_t><<<batch_size, 1, 0, stream>>>(
target.data_ptr<scalar_t>(),
target.size(1),
operations.size(1),
operations.data_ptr<int>(),
labels.data_ptr<int>(),
masks.data_ptr<int>());
}));
return std::make_pair(labels, masks);
}
torch::Tensor LevenshteinDistanceCuda(
torch::Tensor source,
torch::Tensor target,
torch::Tensor source_length,
torch::Tensor target_length) {
const auto batch_size = source.size(0);
const auto shared_size = (source.size(1) + 1) * (target.size(1) + 1) * sizeof(short);
at::TensorOptions options(source.device());
options = options.dtype(at::ScalarType::Int);
auto operations = torch::empty({batch_size, source.size(1) + target.size(1)}, options);
auto stream = at::cuda::getCurrentCUDAStream(source.device().index());
if (shared_size > 40000) {
auto distances = torch::empty({batch_size, (source.size(1) + 1) * (target.size(1) + 1)}, options);
AT_DISPATCH_ALL_TYPES(source.scalar_type(), "levenshtein_distance", ([&] {
levenshtein_distance_kernel<scalar_t><<<batch_size, 1, 0, stream>>>(
source.data_ptr<scalar_t>(),
target.data_ptr<scalar_t>(),
source_length.data_ptr<int>(),
target_length.data_ptr<int>(),
source.size(1),
target.size(1),
operations.data_ptr<int>(),
distances.data_ptr<int>());
}));
} else {
AT_DISPATCH_ALL_TYPES(source.scalar_type(), "faster_levenshtein_distance", ([&] {
faster_levenshtein_distance_kernel<scalar_t><<<batch_size, 1, shared_size, stream>>>(
source.data_ptr<scalar_t>(),
target.data_ptr<scalar_t>(),
source_length.data_ptr<int>(),
target_length.data_ptr<int>(),
source.size(1),
target.size(1),
operations.data_ptr<int>());
}));
}
return operations;
}
|
COCO-LM/fairseq/fairseq/clib/libnat_cuda/edit_dist.cu/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/clib/libnat_cuda/edit_dist.cu",
"repo_id": "COCO-LM",
"token_count": 5172
}
| 174 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
import importlib
import os
from fairseq import registry
from fairseq.criterions.fairseq_criterion import ( # noqa
FairseqCriterion,
LegacyFairseqCriterion,
)
from omegaconf import DictConfig
(
build_criterion_,
register_criterion,
CRITERION_REGISTRY,
CRITERION_DATACLASS_REGISTRY,
) = registry.setup_registry(
"--criterion", base_class=FairseqCriterion, default="cross_entropy"
)
def build_criterion(cfg: DictConfig, task):
return build_criterion_(cfg, task)
# automatically import any Python files in the criterions/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
file_name = file[: file.find(".py")]
importlib.import_module("fairseq.criterions." + file_name)
|
COCO-LM/fairseq/fairseq/criterions/__init__.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/criterions/__init__.py",
"repo_id": "COCO-LM",
"token_count": 351
}
| 175 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass, field
from typing import List, Optional
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from fairseq.logging.meters import safe_round
@dataclass
class Wav2VecCriterionConfig(FairseqDataclass):
infonce: bool = field(
default=False,
metadata={
"help": "if set, uses cross entropy instead of binary cross entropy (i.e. InfoNCE loss)"
},
)
loss_weights: Optional[List[float]] = field(
default=None,
metadata={"help": "weights for additional loss terms (not first one)"},
)
log_keys: List[str] = field(
default_factory=lambda: [],
metadata={"help": "output keys to log"},
)
from fairseq.utils import index_put, is_xla_tensor
@register_criterion("wav2vec", dataclass=Wav2VecCriterionConfig)
class Wav2vecCriterion(FairseqCriterion):
def __init__(self, task, infonce=False, loss_weights=None, log_keys=None):
super().__init__(task)
self.infonce = infonce
self.loss_weights = loss_weights
self.log_keys = [] if log_keys is None else log_keys
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
logits = model.get_logits(net_output).float()
target = model.get_targets(sample, net_output)
self.xla = is_xla_tensor(logits)
# XXX: handle weights on xla.
weights = None
if hasattr(model, "get_target_weights") and not self.infonce:
weights = model.get_target_weights(target, net_output)
if torch.is_tensor(weights):
weights = weights.float()
losses = []
reduction = "none" if ((not reduce) or self.xla) else "sum"
if self.infonce:
loss = F.cross_entropy(logits, target, reduction=reduction)
else:
loss = F.binary_cross_entropy_with_logits(
logits, target.float(), weights, reduction=reduction
)
if self.xla:
# tpu-comment: since dynamic shapes lead to recompilations on xla,
# we don't shrink tensors using mask_indices.
# Instead, we use mask indices to adjust loss.
mi = (
sample['net_input']['mask_indices']
.transpose(0, 1) # logits are transposed in `model.get_logits`
.reshape(logits.size(0))
)
loss = (loss * mi).sum() if reduce else (loss * mi)
if 'sample_size' in sample and self.infonce:
sample_size = sample['sample_size']
elif 'mask_indices' in sample['net_input']:
sample_size = sample['net_input']['mask_indices'].sum()
else:
sample_size = target.numel() if self.infonce else target.long().sum().item()
losses.append(loss.detach().clone())
if self.loss_weights is not None:
assert hasattr(model, "get_extra_losses")
extra_losses = model.get_extra_losses(net_output)
if torch.is_tensor(extra_losses):
extra_losses = [extra_losses]
if len(self.loss_weights) == 1 and len(extra_losses) != 1:
self.loss_weights = [self.loss_weights[0]] * len(extra_losses)
assert len(extra_losses) == len(
self.loss_weights
), f"{len(extra_losses)}, {len(self.loss_weights)}"
for p, coef in zip(extra_losses, self.loss_weights):
if coef != 0 and p is not None:
p = coef * p.float() * sample_size
loss += p
losses.append(p)
logging_output = {
"loss": loss.item() if (reduce and not self.xla) else loss.detach(),
"ntokens": sample_size,
"nsentences": sample["id"].numel(),
"sample_size": sample_size,
}
for lk in self.log_keys:
# Only store "logits" and "target" for computing MAP and MAUC
# during validation
if lk == "logits":
if not self.training:
logging_output["logits"] = logits.cpu().numpy()
elif lk == "target":
if not self.training:
logging_output["target"] = target.cpu().numpy()
elif lk in net_output:
value = net_output[lk]
if not is_xla_tensor(value):
value = float(value)
logging_output[lk] = value
if len(losses) > 1:
for i, l in enumerate(losses):
logging_output[f"loss_{i}"] = l.item() if not self.xla else l.detach()
if self.infonce:
with torch.no_grad():
if logits.numel() == 0:
corr = 0
count = 0
else:
assert logits.dim() > 1, logits.shape
max = logits.argmax(-1) == 0
min = logits.argmin(-1) == 0
if is_xla_tensor(logits):
max, min = max * mi, min * mi
both = max & min
corr = max.long().sum() - both.long().sum()
count = mi.sum()
else:
both = max & min
corr = max.long().sum().item() - both.long().sum().item()
count = float(max.numel())
logging_output["correct"] = corr
logging_output["count"] = count
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs))
nsentences = utils.item(
sum(log.get("nsentences", 0) for log in logging_outputs)
)
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
metrics.log_scalar(
"loss", loss_sum / (sample_size or 1) / math.log(2), sample_size, round=3
)
metrics.log_scalar("ntokens", ntokens)
metrics.log_scalar("nsentences", nsentences)
correct = sum(log.get("correct", 0) for log in logging_outputs)
metrics.log_scalar("_correct", correct)
total = sum(log.get("count", 0) for log in logging_outputs)
metrics.log_scalar("_total", total)
if total > 0:
metrics.log_derived(
"accuracy",
lambda meters: safe_round(
meters["_correct"].sum / meters["_total"].sum, 5
)
if meters["_total"].sum > 0
else float("nan"),
)
builtin_keys = {
"loss",
"ntokens",
"nsentences",
"sample_size",
"correct",
"count",
}
for k in logging_outputs[0]:
if k not in builtin_keys:
val = sum(log.get(k, 0) for log in logging_outputs)
if k.startswith("loss"):
metrics.log_scalar(
k, val / (sample_size or 1) / math.log(2), sample_size, round=3
)
else:
metrics.log_scalar(k, val / len(logging_outputs), round=3)
# FIXME: revert when gather based xla reduction is implemented
#@staticmethod
#def logging_outputs_can_be_summed() -> bool:
def logging_outputs_can_be_summed(self) -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
# XXX: Gather based reduction not implemented for xla yet.
# So we fall to sum based reduction for xla.
return self.xla
|
COCO-LM/fairseq/fairseq/criterions/wav2vec_criterion.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/criterions/wav2vec_criterion.py",
"repo_id": "COCO-LM",
"token_count": 4184
}
| 176 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import bisect
import numpy as np
from torch.utils.data.dataloader import default_collate
from . import FairseqDataset
class ConcatDataset(FairseqDataset):
@staticmethod
def cumsum(sequence, sample_ratios):
r, s = [], 0
for e, ratio in zip(sequence, sample_ratios):
curr_len = int(ratio * len(e))
r.append(curr_len + s)
s += curr_len
return r
def __init__(self, datasets, sample_ratios=1):
super(ConcatDataset, self).__init__()
assert len(datasets) > 0, "datasets should not be an empty iterable"
self.datasets = list(datasets)
if isinstance(sample_ratios, int):
sample_ratios = [sample_ratios] * len(self.datasets)
self.sample_ratios = sample_ratios
self.cumulative_sizes = self.cumsum(self.datasets, sample_ratios)
self.real_sizes = [len(d) for d in self.datasets]
def __len__(self):
return self.cumulative_sizes[-1]
def __getitem__(self, idx):
dataset_idx, sample_idx = self._get_dataset_and_sample_index(idx)
return self.datasets[dataset_idx][sample_idx]
def _get_dataset_and_sample_index(self, idx: int):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
sample_idx = sample_idx % self.real_sizes[dataset_idx]
return dataset_idx, sample_idx
def collater(self, samples, **extra_args):
# For now only supports datasets with same underlying collater implementations
if hasattr(self.datasets[0], "collater"):
return self.datasets[0].collater(samples, **extra_args)
else:
return default_collate(samples, **extra_args)
def size(self, idx: int):
"""
Return an example's size as a float or tuple.
"""
dataset_idx, sample_idx = self._get_dataset_and_sample_index(idx)
return self.datasets[dataset_idx].size(sample_idx)
def num_tokens(self, index: int):
return np.max(self.size(index))
def attr(self, attr: str, index: int):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, index)
return getattr(self.datasets[dataset_idx], attr, None)
@property
def sizes(self):
_dataset_sizes = []
for ds, sr in zip(self.datasets, self.sample_ratios):
if isinstance(ds.sizes, np.ndarray):
_dataset_sizes.append(np.tile(ds.sizes, sr))
else:
# Only support underlying dataset with single size array.
assert isinstance(ds.sizes, list)
_dataset_sizes.append(np.tile(ds.sizes[0], sr))
return np.concatenate(_dataset_sizes)
@property
def supports_prefetch(self):
return all(d.supports_prefetch for d in self.datasets)
def ordered_indices(self):
"""
Returns indices sorted by length. So less padding is needed.
"""
if isinstance(self.sizes, np.ndarray) and len(self.sizes.shape) > 1:
# special handling for concatenating lang_pair_datasets
indices = np.arange(len(self))
sizes = self.sizes
tgt_sizes = (
sizes[:, 1] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else None
)
src_sizes = (
sizes[:, 0] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else sizes
)
# sort by target length, then source length
if tgt_sizes is not None:
indices = indices[np.argsort(tgt_sizes[indices], kind="mergesort")]
return indices[np.argsort(src_sizes[indices], kind="mergesort")]
else:
return np.argsort(self.sizes)
def prefetch(self, indices):
frm = 0
for to, ds in zip(self.cumulative_sizes, self.datasets):
real_size = len(ds)
if getattr(ds, "supports_prefetch", False):
ds.prefetch([(i - frm) % real_size for i in indices if frm <= i < to])
frm = to
@property
def can_reuse_epoch_itr_across_epochs(self):
return all(d.can_reuse_epoch_itr_across_epochs for d in self.datasets)
def set_epoch(self, epoch):
super().set_epoch(epoch)
for ds in self.datasets:
if hasattr(ds, "set_epoch"):
ds.set_epoch(epoch)
|
COCO-LM/fairseq/fairseq/data/concat_dataset.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/concat_dataset.py",
"repo_id": "COCO-LM",
"token_count": 2203
}
| 177 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from fairseq.data.encoders import register_bpe
from fairseq.dataclass import FairseqDataclass
from fairseq import file_utils
@dataclass
class HuggingFaceByteLevelBPEConfig(FairseqDataclass):
bpe_merges: str = field(default="???", metadata={"help": "path to merges.txt"})
bpe_vocab: str = field(default="???", metadata={"help": "path to vocab.json"})
bpe_add_prefix_space: bool = field(
default=False, metadata={"help": "add prefix space before encoding"}
)
@register_bpe("hf_byte_bpe", dataclass=HuggingFaceByteLevelBPEConfig)
class HuggingFaceByteLevelBPE(object):
def __init__(self, cfg):
try:
from tokenizers import ByteLevelBPETokenizer
except ImportError:
raise ImportError(
"Please install huggingface/tokenizers with: " "pip install tokenizers"
)
bpe_vocab = file_utils.cached_path(cfg.bpe_vocab)
bpe_merges = file_utils.cached_path(cfg.bpe_merges)
self.bpe = ByteLevelBPETokenizer(
bpe_vocab,
bpe_merges,
add_prefix_space=cfg.bpe_add_prefix_space,
)
def encode(self, x: str) -> str:
return " ".join(map(str, self.bpe.encode(x).ids))
def decode(self, x: str) -> str:
return self.bpe.decode(
[int(tok) if tok not in {"<unk>", "<mask>"} else tok for tok in x.split()]
)
def is_beginning_of_word(self, x: str) -> bool:
return self.decode(x).startswith(" ")
|
COCO-LM/fairseq/fairseq/data/encoders/hf_byte_bpe.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/encoders/hf_byte_bpe.py",
"repo_id": "COCO-LM",
"token_count": 723
}
| 178 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.data import Dictionary
class MaskedLMDictionary(Dictionary):
"""
Dictionary for Masked Language Modelling tasks. This extends Dictionary by
adding the mask symbol.
"""
def __init__(
self,
pad="<pad>",
eos="</s>",
unk="<unk>",
mask="<mask>",
):
super().__init__(pad=pad, eos=eos, unk=unk)
self.mask_word = mask
self.mask_index = self.add_symbol(mask)
self.nspecial = len(self.symbols)
def mask(self):
"""Helper to get index of mask symbol"""
return self.mask_index
class BertDictionary(MaskedLMDictionary):
"""
Dictionary for BERT task. This extends MaskedLMDictionary by adding support
for cls and sep symbols.
"""
def __init__(
self,
pad="<pad>",
eos="</s>",
unk="<unk>",
mask="<mask>",
cls="<cls>",
sep="<sep>",
):
super().__init__(pad=pad, eos=eos, unk=unk, mask=mask)
self.cls_word = cls
self.sep_word = sep
self.cls_index = self.add_symbol(cls)
self.sep_index = self.add_symbol(sep)
self.nspecial = len(self.symbols)
def cls(self):
"""Helper to get index of cls symbol"""
return self.cls_index
def sep(self):
"""Helper to get index of sep symbol"""
return self.sep_index
|
COCO-LM/fairseq/fairseq/data/legacy/masked_lm_dictionary.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/legacy/masked_lm_dictionary.py",
"repo_id": "COCO-LM",
"token_count": 699
}
| 179 |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import unicodedata
import six
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat in ("Cc", "Cf"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
|
COCO-LM/fairseq/fairseq/data/squad/basic_tokenizer.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/squad/basic_tokenizer.py",
"repo_id": "COCO-LM",
"token_count": 2029
}
| 180 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import signal
import threading
from torch import nn
logger = logging.getLogger(__name__)
class DistributedTimeoutWrapper(nn.Module):
"""
A wrapper that kills the process if no progress is made within a given
*timeout*. The timer is reset every time :func:`forward` is called.
Usage::
module = DistributedTimeoutWrapper(module, timeout=30)
x = module(input)
time.sleep(20) # safe
x = module(input)
time.sleep(45) # job will be killed before this returns
Args:
module (nn.Module): module to wrap
timeout (int): number of seconds before killing the process
(set to a value <= 0 to disable the timeout)
signal (Optional): signal to send once timeout is triggered
"""
def __init__(self, module: nn.Module, timeout: int, signal=signal.SIGINT):
super().__init__()
self.module = module
self.timeout = timeout
self.signal = signal
if timeout > 0:
self._heartbeat = threading.Event()
self._heartbeat_thread = threading.Thread(
target=self._check_heartbeat,
args=(os.getpid(),),
daemon=True,
)
self._heartbeat_thread.start()
self._terminated = False
else:
self._heartbeat = None
self._heartbeat_thread = None
def __del__(self):
self.stop_timeout()
def __getattr__(self, name):
"""Forward missing attributes to wrapped module."""
try:
return super().__getattr__(name) # defer to nn.Module's logic
except AttributeError:
return getattr(self.module, name)
def stop_timeout(self):
if self._heartbeat_thread is not None:
self._terminated = True
self._heartbeat_thread.join()
def state_dict(self, *args, **kwargs):
return self.module.state_dict(*args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return self.module.load_state_dict(*args, **kwargs)
def forward(self, *args, **kwargs):
if self._heartbeat is not None:
self._heartbeat.set()
return self.module(*args, **kwargs)
def _check_heartbeat(self, parent_pid):
self._heartbeat.wait() # wait for the first forward pass
while True:
self._heartbeat.clear()
success = self._heartbeat.wait(timeout=self.timeout)
if self._terminated:
break
elif not success:
logger.error((
"Killing job for not making progress in {} seconds. "
"Set --heartbeat-timeout=-1 to disable this timeout."
).format(int(self.timeout)))
os.kill(parent_pid, self.signal)
return
|
COCO-LM/fairseq/fairseq/distributed/distributed_timeout_wrapper.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/distributed/distributed_timeout_wrapper.py",
"repo_id": "COCO-LM",
"token_count": 1304
}
| 181 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import logging
from typing import Dict, List
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.data import encoders
from fairseq.hub_utils import GeneratorHubInterface
from omegaconf import open_dict
logger = logging.getLogger(__name__)
class BARTHubInterface(GeneratorHubInterface):
"""A simple PyTorch Hub interface to BART.
Usage: https://github.com/pytorch/fairseq/tree/master/examples/bart
"""
def __init__(self, cfg, task, model):
super().__init__(cfg, task, [model])
self.model = self.models[0]
def encode(
self, sentence: str, *addl_sentences, no_separator=True
) -> torch.LongTensor:
"""
BPE-encode a sentence (or multiple sentences).
Every sequence begins with a beginning-of-sentence (`<s>`) symbol.
Every sentence ends with an end-of-sentence (`</s>`).
Example (single sentence): `<s> a b c </s>`
Example (sentence pair): `<s> d e f </s> 1 2 3 </s>`
The BPE encoding follows GPT-2. One subtle detail is that the GPT-2 BPE
requires leading spaces. For example::
>>> bart.encode('Hello world').tolist()
[0, 31414, 232, 2]
>>> bart.encode(' world').tolist()
[0, 232, 2]
>>> bart.encode('world').tolist()
[0, 8331, 2]
"""
tokens = self.bpe.encode(sentence)
if len(tokens.split(" ")) > min(self.max_positions) - 2:
tokens = " ".join(tokens.split(" ")[: min(self.max_positions) - 2])
bpe_sentence = "<s> " + tokens + " </s>"
for s in addl_sentences:
bpe_sentence += " </s>" if not no_separator else ""
bpe_sentence += " " + self.bpe.encode(s) + " </s>"
tokens = self.task.source_dictionary.encode_line(bpe_sentence, append_eos=False)
return tokens.long()
def decode(self, tokens: torch.LongTensor):
assert tokens.dim() == 1
tokens = tokens.cpu().numpy()
if tokens[0] == self.task.source_dictionary.bos():
tokens = tokens[1:] # remove <s>
eos_mask = tokens == self.task.source_dictionary.eos()
doc_mask = eos_mask[1:] & eos_mask[:-1]
sentences = np.split(tokens, doc_mask.nonzero()[0] + 1)
sentences = [
self.bpe.decode(self.task.source_dictionary.string(s)) for s in sentences
]
if len(sentences) == 1:
return sentences[0]
return sentences
def _build_sample(self, src_tokens: List[torch.LongTensor]):
# assert torch.is_tensor(src_tokens)
dataset = self.task.build_dataset_for_inference(
src_tokens,
[x.numel() for x in src_tokens],
)
sample = dataset.collater(dataset)
sample = utils.apply_to_sample(lambda tensor: tensor.to(self.device), sample)
return sample
def generate(
self,
tokenized_sentences: List[torch.LongTensor],
*args,
inference_step_args=None,
skip_invalid_size_inputs=False,
**kwargs
) -> List[List[Dict[str, torch.Tensor]]]:
inference_step_args = inference_step_args or {}
if "prefix_tokens" in inference_step_args:
raise NotImplementedError("prefix generation not implemented for BART")
res = []
for batch in self._build_batches(tokenized_sentences, skip_invalid_size_inputs):
src_tokens = batch['net_input']['src_tokens']
inference_step_args["prefix_tokens"] =src_tokens.new_full(
(src_tokens.size(0), 1), fill_value=self.task.source_dictionary.bos()
).to(device=self.device)
results = super().generate(
src_tokens,
*args,
inference_step_args=inference_step_args,
skip_invalid_size_inputs=skip_invalid_size_inputs,
**kwargs
)
res.extend(results)
return res
def extract_features(
self, tokens: torch.LongTensor, return_all_hiddens: bool = False
) -> torch.Tensor:
if tokens.dim() == 1:
tokens = tokens.unsqueeze(0)
if tokens.size(-1) > min(self.model.max_positions()):
raise ValueError(
"tokens exceeds maximum length: {} > {}".format(
tokens.size(-1), self.model.max_positions()
)
)
tokens.to(device=self.device),
prev_output_tokens = tokens.clone()
prev_output_tokens[:, 0] = tokens.gather(
1,
(tokens.ne(self.task.source_dictionary.pad()).sum(dim=1) - 1).unsqueeze(-1),
).squeeze()
prev_output_tokens[:, 1:] = tokens[:, :-1]
features, extra = self.model(
src_tokens=tokens,
src_lengths=None,
prev_output_tokens=prev_output_tokens,
features_only=True,
return_all_hiddens=return_all_hiddens,
)
if return_all_hiddens:
# convert from T x B x C -> B x T x C
inner_states = extra["inner_states"]
return [inner_state.transpose(0, 1) for inner_state in inner_states]
else:
return features # just the last layer's features
def register_classification_head(
self, name: str, num_classes: int = None, embedding_size: int = None, **kwargs
):
self.model.register_classification_head(
name, num_classes=num_classes, embedding_size=embedding_size, **kwargs
)
def predict(self, head: str, tokens: torch.LongTensor, return_logits: bool = False):
if tokens.dim() == 1:
tokens = tokens.unsqueeze(0)
features = self.extract_features(tokens.to(device=self.device))
sentence_representation = features[
tokens.eq(self.task.source_dictionary.eos()), :
].view(features.size(0), -1, features.size(-1))[:, -1, :]
logits = self.model.classification_heads[head](sentence_representation)
if return_logits:
return logits
return F.log_softmax(logits, dim=-1)
def fill_mask(
self,
masked_inputs: List[str],
topk: int = 5,
match_source_len: bool = True,
**generate_kwargs
):
masked_token = '<mask>'
batch_tokens = []
for masked_input in masked_inputs:
assert masked_token in masked_input, \
"please add one {} token for the input".format(masked_token)
text_spans = masked_input.split(masked_token)
text_spans_bpe = (' {0} '.format(masked_token)).join(
[self.bpe.encode(text_span.rstrip()) for text_span in text_spans]
).strip()
tokens = self.task.source_dictionary.encode_line(
'<s> ' + text_spans_bpe + ' </s>',
append_eos=False,
add_if_not_exist=False,
).long()
batch_tokens.append(tokens)
# ensure beam size is at least as big as topk
generate_kwargs['beam'] = max(
topk,
generate_kwargs.get('beam', -1),
)
generate_kwargs['match_source_len'] = match_source_len
batch_hypos = self.generate(batch_tokens, **generate_kwargs)
return [
[(self.decode(hypo['tokens']), hypo['score']) for hypo in hypos[:topk]]
for hypos in batch_hypos
]
|
COCO-LM/fairseq/fairseq/models/bart/hub_interface.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/bart/hub_interface.py",
"repo_id": "COCO-LM",
"token_count": 3628
}
| 182 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq import utils
from fairseq.models import (
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.models.lightconv import Embedding, LightConvDecoder
from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder
@register_model("lightconv_lm")
class LightConvLanguageModel(FairseqLanguageModel):
def __init__(self, decoder):
super().__init__(decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--dropout",
default=0.1,
type=float,
metavar="D",
help="dropout probability",
)
parser.add_argument(
"--attention-dropout",
default=0.0,
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--relu-dropout",
default=0.0,
type=float,
metavar="D",
help="dropout probability after ReLU in FFN",
)
parser.add_argument(
"--input-dropout",
type=float,
metavar="D",
help="dropout probability of the inputs",
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension",
)
parser.add_argument(
"--decoder-output-dim",
type=int,
metavar="N",
help="decoder output dimension",
)
parser.add_argument(
"--decoder-input-dim", type=int, metavar="N", help="decoder input dimension"
)
parser.add_argument(
"--decoder-ffn-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension for FFN",
)
parser.add_argument(
"--decoder-layers", type=int, metavar="N", help="num decoder layers"
)
parser.add_argument(
"--decoder-attention-heads",
type=int,
metavar="N",
help="num decoder attention heads or LightConv/DynamicConv heads",
)
parser.add_argument(
"--decoder-normalize-before",
default=False,
action="store_true",
help="apply layernorm before each decoder block",
)
parser.add_argument(
"--adaptive-softmax-cutoff",
metavar="EXPR",
help="comma separated list of adaptive softmax cutoff points. "
"Must be used with adaptive_loss criterion",
)
parser.add_argument(
"--adaptive-softmax-dropout",
type=float,
metavar="D",
help="sets adaptive softmax dropout for the tail projections",
)
parser.add_argument(
"--adaptive-softmax-factor",
type=float,
metavar="N",
help="adaptive input factor",
)
parser.add_argument(
"--no-token-positional-embeddings",
default=False,
action="store_true",
help="if set, disables positional embeddings (outside self attention)",
)
parser.add_argument(
"--share-decoder-input-output-embed",
default=False,
action="store_true",
help="share decoder input and output embeddings",
)
parser.add_argument(
"--character-embeddings",
default=False,
action="store_true",
help="if set, uses character embedding convolutions to produce token embeddings",
)
parser.add_argument(
"--character-filters",
type=str,
metavar="LIST",
default="[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]",
help="size of character embeddings",
)
parser.add_argument(
"--character-embedding-dim",
type=int,
metavar="N",
default=4,
help="size of character embeddings",
)
parser.add_argument(
"--char-embedder-highway-layers",
type=int,
metavar="N",
default=2,
help="number of highway layers for character token embeddder",
)
parser.add_argument(
"--adaptive-input",
default=False,
action="store_true",
help="if set, uses adaptive input",
)
parser.add_argument(
"--adaptive-input-factor",
type=float,
metavar="N",
help="adaptive input factor",
)
parser.add_argument(
"--adaptive-input-cutoff",
metavar="EXPR",
help="comma separated list of adaptive input cutoff points.",
)
parser.add_argument(
"--tie-adaptive-weights",
action="store_true",
help="if set, ties the weights of adaptive softmax and adaptive input",
)
parser.add_argument(
"--tie-adaptive-proj",
action="store_true",
help="if set, ties the projection weights of adaptive softmax and adaptive input",
)
parser.add_argument(
"--decoder-learned-pos",
action="store_true",
help="use learned positional embeddings in the decoder",
)
"""LightConv and DynamicConv arguments"""
parser.add_argument(
"--decoder-kernel-size-list",
type=lambda x: utils.eval_str_list(x, int),
help='list of kernel size (default: "[3,7,15,31,31,31]")',
)
parser.add_argument(
"--decoder-glu", type=utils.eval_bool, help="glu after in proj"
)
parser.add_argument(
"--decoder-conv-type",
default="dynamic",
type=str,
choices=["dynamic", "lightweight"],
help="type of convolution",
)
parser.add_argument("--weight-softmax", default=True, type=utils.eval_bool)
parser.add_argument(
"--weight-dropout",
type=float,
metavar="D",
help="dropout probability for conv weights",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_lm_architecture(args)
if getattr(args, "max_source_positions", None) is None:
args.max_source_positions = args.tokens_per_sample
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = args.tokens_per_sample
if args.character_embeddings:
embed_tokens = CharacterTokenEmbedder(
task.dictionary,
eval(args.character_filters),
args.character_embedding_dim,
args.decoder_embed_dim,
args.char_embedder_highway_layers,
)
elif args.adaptive_input:
embed_tokens = AdaptiveInput(
len(task.dictionary),
task.dictionary.pad(),
args.decoder_input_dim,
args.adaptive_input_factor,
args.decoder_embed_dim,
utils.eval_str_list(args.adaptive_input_cutoff, type=int),
)
else:
embed_tokens = Embedding(
len(task.dictionary), args.decoder_input_dim, task.dictionary.pad()
)
if args.tie_adaptive_weights:
assert args.adaptive_input
assert args.adaptive_input_factor == args.adaptive_softmax_factor
assert (
args.adaptive_softmax_cutoff == args.adaptive_input_cutoff
), "{} != {}".format(
args.adaptive_softmax_cutoff, args.adaptive_input_cutoff
)
assert args.decoder_input_dim == args.decoder_output_dim
decoder = LightConvDecoder(
args,
task.output_dictionary,
embed_tokens,
no_encoder_attn=True,
final_norm=False,
)
return LightConvLanguageModel(decoder)
@register_model_architecture("lightconv_lm", "lightconv_lm")
def base_lm_architecture(args):
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 2048)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.adaptive_softmax_factor = getattr(args, "adaptive_softmax_factor", 4)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.character_embeddings = getattr(args, "character_embeddings", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.decoder_conv_dim = getattr(args, "decoder_conv_dim", args.decoder_embed_dim)
# The model training is not stable without this
args.decoder_normalize_before = True
args.adaptive_input = getattr(args, "adaptive_input", False)
args.adaptive_input_factor = getattr(args, "adaptive_input_factor", 4)
args.adaptive_input_cutoff = getattr(args, "adaptive_input_cutoff", None)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.tie_adaptive_proj = getattr(args, "tie_adaptive_proj", False)
args.decoder_kernel_size_list = getattr(
args, "decoder_kernel_size_list", [3, 7, 15, 31, 31, 31]
)
if len(args.decoder_kernel_size_list) == 1:
args.decoder_kernel_size_list = (
args.decoder_kernel_size_list * args.decoder_layers
)
assert (
len(args.decoder_kernel_size_list) == args.decoder_layers
), "decoder_kernel_size_list doesn't match decoder_layers"
args.decoder_glu = getattr(args, "decoder_glu", True)
args.input_dropout = getattr(args, "input_dropout", 0.1)
args.weight_dropout = getattr(args, "weight_dropout", args.attention_dropout)
@register_model_architecture("lightconv_lm", "lightconv_lm_gbw")
def lightconv_lm_gbw(args):
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
base_lm_architecture(args)
|
COCO-LM/fairseq/fairseq/models/lightconv_lm.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/lightconv_lm.py",
"repo_id": "COCO-LM",
"token_count": 5387
}
| 183 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.distributed import fsdp_wrap
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
FairseqDropout,
LayerDropModuleList,
LayerNorm,
PositionalEmbedding,
SinusoidalPositionalEmbedding,
TransformerDecoderLayer,
TransformerEncoderLayer,
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
from torch import Tensor
DEFAULT_MAX_SOURCE_POSITIONS = 1024
DEFAULT_MAX_TARGET_POSITIONS = 1024
DEFAULT_MIN_PARAMS_TO_WRAP = int(1e8)
@register_model("transformer")
class TransformerModel(FairseqEncoderDecoderModel):
"""
Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017)
<https://arxiv.org/abs/1706.03762>`_.
Args:
encoder (TransformerEncoder): the encoder
decoder (TransformerDecoder): the decoder
The Transformer model provides the following named architectures and
command-line arguments:
.. argparse::
:ref: fairseq.models.transformer_parser
:prog:
"""
@classmethod
def hub_models(cls):
# fmt: off
def moses_subword(path):
return {
'path': path,
'tokenizer': 'moses',
'bpe': 'subword_nmt',
}
def moses_fastbpe(path):
return {
'path': path,
'tokenizer': 'moses',
'bpe': 'fastbpe',
}
def spm(path):
return {
'path': path,
'bpe': 'sentencepiece',
'tokenizer': 'space',
}
return {
'transformer.wmt14.en-fr': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-fr.joined-dict.transformer.tar.bz2'),
'transformer.wmt16.en-de': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt16.en-de.joined-dict.transformer.tar.bz2',
'transformer.wmt18.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt18.en-de.ensemble.tar.gz'),
'transformer.wmt19.en-de': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.ensemble.tar.gz'),
'transformer.wmt19.en-ru': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.ensemble.tar.gz'),
'transformer.wmt19.de-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.ensemble.tar.gz'),
'transformer.wmt19.ru-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.ensemble.tar.gz'),
'transformer.wmt19.en-de.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.single_model.tar.gz'),
'transformer.wmt19.en-ru.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.single_model.tar.gz'),
'transformer.wmt19.de-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.single_model.tar.gz'),
'transformer.wmt19.ru-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.single_model.tar.gz'),
'transformer.wmt20.en-ta': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-ta.single.tar.gz'),
'transformer.wmt20.en-iu.news': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-iu.news.single.tar.gz'),
'transformer.wmt20.en-iu.nh': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-iu.nh.single.tar.gz'),
'transformer.wmt20.ta-en': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.ta-en.single.tar.gz'),
'transformer.wmt20.iu-en.news': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu-en.news.single.tar.gz'),
'transformer.wmt20.iu-en.nh': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu-en.nh.single.tar.gz'),
}
# fmt: on
def __init__(self, args, encoder, decoder):
super().__init__(encoder, decoder)
self.args = args
self.supports_align_args = True
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--decoder-output-dim', type=int, metavar='N',
help='decoder output dimension (extra linear layer '
'if different from decoder embed dim')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--layernorm-embedding', action='store_true',
help='add layernorm to embedding')
parser.add_argument('--no-scale-embedding', action='store_true',
help='if True, dont scale embeddings')
parser.add_argument('--checkpoint-activations', action='store_true',
help='checkpoint activations at each layer, which saves GPU '
'memory usage at the cost of some additional compute')
parser.add_argument('--offload-activations', action='store_true',
help='checkpoint activations at each layer, then save to gpu. Sets --checkpoint-activations.')
# args for "Cross+Self-Attention for Transformer Models" (Peitz et al., 2019)
parser.add_argument('--no-cross-attention', default=False, action='store_true',
help='do not perform cross-attention')
parser.add_argument('--cross-self-attention', default=False, action='store_true',
help='perform cross+self-attention')
# args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019)
parser.add_argument('--encoder-layerdrop', type=float, metavar='D', default=0,
help='LayerDrop probability for encoder')
parser.add_argument('--decoder-layerdrop', type=float, metavar='D', default=0,
help='LayerDrop probability for decoder')
parser.add_argument('--encoder-layers-to-keep', default=None,
help='which layers to *keep* when pruning as a comma-separated list')
parser.add_argument('--decoder-layers-to-keep', default=None,
help='which layers to *keep* when pruning as a comma-separated list')
# args for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020)
parser.add_argument('--quant-noise-pq', type=float, metavar='D', default=0,
help='iterative PQ quantization noise at training time')
parser.add_argument('--quant-noise-pq-block-size', type=int, metavar='D', default=8,
help='block size of quantization noise at training time')
parser.add_argument('--quant-noise-scalar', type=float, metavar='D', default=0,
help='scalar quantization noise and scalar quantization at training time')
# args for Fully Sharded Data Parallel (FSDP) training
parser.add_argument(
'--min-params-to-wrap', type=int, metavar='D', default=DEFAULT_MIN_PARAMS_TO_WRAP,
help=(
'minimum number of params for a layer to be wrapped with FSDP() when '
'training with --ddp-backend=fully_sharded. Smaller values will '
'improve memory efficiency, but may make torch.distributed '
'communication less efficient due to smaller input sizes. This option '
'is set to 0 (i.e., always wrap) when --checkpoint-activations or '
'--offload-activations are passed.'
)
)
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if args.encoder_layers_to_keep:
args.encoder_layers = len(args.encoder_layers_to_keep.split(","))
if args.decoder_layers_to_keep:
args.decoder_layers = len(args.decoder_layers_to_keep.split(","))
if getattr(args, "max_source_positions", None) is None:
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError("--share-all-embeddings requires a joined dictionary")
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
"--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim"
)
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path
):
raise ValueError(
"--share-all-embeddings not compatible with --decoder-embed-path"
)
encoder_embed_tokens = cls.build_embedding(
args, src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = cls.build_embedding(
args, src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = cls.build_embedding(
args, tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
if getattr(args, "offload_activations", False):
args.checkpoint_activations = True # offloading implies checkpointing
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
if not args.share_all_embeddings:
min_params_to_wrap = getattr(
args, "min_params_to_wrap", DEFAULT_MIN_PARAMS_TO_WRAP
)
# fsdp_wrap is a no-op when --ddp-backend != fully_sharded
encoder = fsdp_wrap(encoder, min_num_params=min_params_to_wrap)
decoder = fsdp_wrap(decoder, min_num_params=min_params_to_wrap)
return cls(args, encoder, decoder)
@classmethod
def build_embedding(cls, args, dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerEncoder(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerDecoder(
args,
tgt_dict,
embed_tokens,
no_encoder_attn=getattr(args, "no_cross_attention", False),
)
# TorchScript doesn't support optional arguments with variable length (**kwargs).
# Current workaround is to add union of all arguments in child classes.
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens,
return_all_hiddens: bool = True,
features_only: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
"""
Run the forward pass for an encoder-decoder model.
Copied from the base class, but without ``**kwargs``,
which are not supported by TorchScript.
"""
encoder_out = self.encoder(
src_tokens, src_lengths=src_lengths, return_all_hiddens=return_all_hiddens
)
decoder_out = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
features_only=features_only,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
src_lengths=src_lengths,
return_all_hiddens=return_all_hiddens,
)
return decoder_out
# Since get_normalized_probs is in the Fairseq Model which is not scriptable,
# I rewrite the get_normalized_probs from Base Class to call the
# helper function in the Base Class.
@torch.jit.export
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
"""Get normalized probabilities (or log probs) from a net's output."""
return self.get_normalized_probs_scriptable(net_output, log_probs, sample)
class TransformerEncoder(FairseqEncoder):
"""
Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_tokens (torch.nn.Embedding): input embedding
"""
def __init__(self, args, dictionary, embed_tokens):
self.args = args
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([3]))
self.dropout_module = FairseqDropout(
args.dropout, module_name=self.__class__.__name__
)
self.encoder_layerdrop = args.encoder_layerdrop
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = args.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)
self.embed_positions = (
PositionalEmbedding(
args.max_source_positions,
embed_dim,
self.padding_idx,
learned=args.encoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
if getattr(args, "layernorm_embedding", False):
self.layernorm_embedding = LayerNorm(embed_dim)
else:
self.layernorm_embedding = None
if not args.adaptive_input and args.quant_noise_pq > 0:
self.quant_noise = apply_quant_noise_(
nn.Linear(embed_dim, embed_dim, bias=False),
args.quant_noise_pq,
args.quant_noise_pq_block_size,
)
else:
self.quant_noise = None
if self.encoder_layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.encoder_layerdrop)
else:
self.layers = nn.ModuleList([])
self.layers.extend(
[self.build_encoder_layer(args) for i in range(args.encoder_layers)]
)
self.num_layers = len(self.layers)
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def build_encoder_layer(self, args):
layer = TransformerEncoderLayer(args)
checkpoint = getattr(args, "checkpoint_activations", False)
if checkpoint:
offload_to_cpu = getattr(args, "offload_activations", False)
layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu)
# if we are checkpointing, enforce that FSDP always wraps the
# checkpointed layer, regardless of layer size
min_params_to_wrap = (
getattr(args, "min_params_to_wrap", DEFAULT_MIN_PARAMS_TO_WRAP)
if not checkpoint else 0
)
layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap)
return layer
def forward_embedding(
self, src_tokens, token_embedding: Optional[torch.Tensor] = None
):
# embed tokens and positions
if token_embedding is None:
token_embedding = self.embed_tokens(src_tokens)
x = embed = self.embed_scale * token_embedding
if self.embed_positions is not None:
x = embed + self.embed_positions(src_tokens)
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
if self.quant_noise is not None:
x = self.quant_noise(x)
return x, embed
def forward(
self,
src_tokens,
src_lengths: Optional[torch.Tensor] = None,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
token_embeddings (torch.Tensor, optional): precomputed embeddings
default `None` will recompute embeddings
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_embedding** (Tensor): the (scaled) embedding lookup
of shape `(batch, src_len, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
"""
return self.forward_scriptable(src_tokens,
src_lengths,
return_all_hiddens,
token_embeddings)
# TorchScript doesn't support super() method so that the scriptable Subclass
# can't access the base class model in Torchscript.
# Current workaround is to add a helper function with different name and
# call the helper function from scriptable Subclass.
def forward_scriptable(
self,
src_tokens,
src_lengths: Optional[torch.Tensor] = None,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
token_embeddings (torch.Tensor, optional): precomputed embeddings
default `None` will recompute embeddings
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_embedding** (Tensor): the (scaled) embedding lookup
of shape `(batch, src_len, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
"""
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
has_pads = (src_tokens.device.type == "xla" or encoder_padding_mask.any())
x, encoder_embedding = self.forward_embedding(src_tokens, token_embeddings)
# account for padding while computing the representation
if has_pads:
x = x * (1 - encoder_padding_mask.unsqueeze(-1).type_as(x))
# B x T x C -> T x B x C
x = x.transpose(0, 1)
encoder_states = []
if return_all_hiddens:
encoder_states.append(x)
# encoder layers
for layer in self.layers:
x = layer(
x, encoder_padding_mask=encoder_padding_mask if has_pads else None
)
if return_all_hiddens:
assert encoder_states is not None
encoder_states.append(x)
if self.layer_norm is not None:
x = self.layer_norm(x)
# The Pytorch Mobile lite interpreter does not supports returning NamedTuple in
# `forward` so we use a dictionary instead.
# TorchScript does not support mixed values so the values are all lists.
# The empty list is equivalent to None.
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [encoder_padding_mask], # B x T
"encoder_embedding": [encoder_embedding], # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
}
@torch.jit.export
def reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
if len(encoder_out["encoder_out"]) == 0:
new_encoder_out = []
else:
new_encoder_out = [encoder_out["encoder_out"][0].index_select(1, new_order)]
if len(encoder_out["encoder_padding_mask"]) == 0:
new_encoder_padding_mask = []
else:
new_encoder_padding_mask = [
encoder_out["encoder_padding_mask"][0].index_select(0, new_order)
]
if len(encoder_out["encoder_embedding"]) == 0:
new_encoder_embedding = []
else:
new_encoder_embedding = [
encoder_out["encoder_embedding"][0].index_select(0, new_order)
]
if len(encoder_out["src_tokens"]) == 0:
src_tokens = []
else:
src_tokens = [(encoder_out["src_tokens"][0]).index_select(0, new_order)]
if len(encoder_out["src_lengths"]) == 0:
src_lengths = []
else:
src_lengths = [(encoder_out["src_lengths"][0]).index_select(0, new_order)]
encoder_states = encoder_out["encoder_states"]
if len(encoder_states) > 0:
for idx, state in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return {
"encoder_out": new_encoder_out, # T x B x C
"encoder_padding_mask": new_encoder_padding_mask, # B x T
"encoder_embedding": new_encoder_embedding, # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": src_tokens, # B x T
"src_lengths": src_lengths, # B x 1
}
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embed_positions is None:
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions)
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = "{}.embed_positions.weights".format(name)
if weights_key in state_dict:
print("deleting {0}".format(weights_key))
del state_dict[weights_key]
state_dict[
"{}.embed_positions._float_tensor".format(name)
] = torch.FloatTensor(1)
for i in range(self.num_layers):
# update layer norms
self.layers[i].upgrade_state_dict_named(
state_dict, "{}.layers.{}".format(name, i)
)
version_key = "{}.version".format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
self.args = args
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([3]))
self._future_mask = torch.empty(0)
self.dropout_module = FairseqDropout(
args.dropout, module_name=self.__class__.__name__
)
self.decoder_layerdrop = args.decoder_layerdrop
self.share_input_output_embed = args.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = args.decoder_embed_dim
self.embed_dim = embed_dim
self.output_embed_dim = args.decoder_output_dim
self.padding_idx = embed_tokens.padding_idx
self.max_target_positions = args.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)
if not args.adaptive_input and args.quant_noise_pq > 0:
self.quant_noise = apply_quant_noise_(
nn.Linear(embed_dim, embed_dim, bias=False),
args.quant_noise_pq,
args.quant_noise_pq_block_size,
)
else:
self.quant_noise = None
self.project_in_dim = (
Linear(input_embed_dim, embed_dim, bias=False)
if embed_dim != input_embed_dim
else None
)
self.embed_positions = (
PositionalEmbedding(
self.max_target_positions,
embed_dim,
self.padding_idx,
learned=args.decoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
if getattr(args, "layernorm_embedding", False):
self.layernorm_embedding = LayerNorm(embed_dim)
else:
self.layernorm_embedding = None
self.cross_self_attention = getattr(args, "cross_self_attention", False)
if self.decoder_layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.decoder_layerdrop)
else:
self.layers = nn.ModuleList([])
self.layers.extend(
[
self.build_decoder_layer(args, no_encoder_attn)
for _ in range(args.decoder_layers)
]
)
self.num_layers = len(self.layers)
if args.decoder_normalize_before and not getattr(
args, "no_decoder_final_norm", False
):
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
self.project_out_dim = (
Linear(embed_dim, self.output_embed_dim, bias=False)
if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights
else None
)
self.adaptive_softmax = None
self.output_projection = None
if args.adaptive_softmax_cutoff is not None:
self.adaptive_softmax = AdaptiveSoftmax(
len(dictionary),
self.output_embed_dim,
utils.eval_str_list(args.adaptive_softmax_cutoff, type=int),
dropout=args.adaptive_softmax_dropout,
adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,
factor=args.adaptive_softmax_factor,
tie_proj=args.tie_adaptive_proj,
)
elif self.share_input_output_embed:
self.output_projection = nn.Linear(
self.embed_tokens.weight.shape[1],
self.embed_tokens.weight.shape[0],
bias=False,
)
self.output_projection.weight = self.embed_tokens.weight
else:
self.output_projection = nn.Linear(
self.output_embed_dim, len(dictionary), bias=False
)
nn.init.normal_(
self.output_projection.weight, mean=0, std=self.output_embed_dim ** -0.5
)
def build_decoder_layer(self, args, no_encoder_attn=False):
layer = TransformerDecoderLayer(args, no_encoder_attn)
checkpoint = getattr(args, "checkpoint_activations", False)
if checkpoint:
offload_to_cpu = getattr(args, "offload_activations", False)
layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu)
# if we are checkpointing, enforce that FSDP always wraps the
# checkpointed layer, regardless of layer size
min_params_to_wrap = (
getattr(args, "min_params_to_wrap", DEFAULT_MIN_PARAMS_TO_WRAP)
if not checkpoint else 0
)
layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap)
return layer
def forward(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
full_context_alignment=full_context_alignment,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
)
if not features_only:
x = self.output_layer(x)
return x, extra
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
return self.extract_features_scriptable(
prev_output_tokens,
encoder_out,
incremental_state,
full_context_alignment,
alignment_layer,
alignment_heads,
)
"""
A scriptable subclass of this class has an extract_features method and calls
super().extract_features, but super() is not supported in torchscript. A copy of
this function is made to be used in the subclass instead.
"""
def extract_features_scriptable(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
"""
Similar to *forward* but only return features.
Includes several features from "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
alignment_layer (int, optional): return mean alignment over
heads at this layer (default: last layer).
alignment_heads (int, optional): only average alignment over
this many heads (default: all heads).
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
if alignment_layer is None:
alignment_layer = self.num_layers - 1
# embed positions
positions = None
if self.embed_positions is not None:
positions = self.embed_positions(
prev_output_tokens, incremental_state=incremental_state
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.quant_noise is not None:
x = self.quant_noise(x)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
self_attn_padding_mask: Optional[Tensor] = None
if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any():
self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
# decoder layers
attn: Optional[Tensor] = None
inner_states: List[Optional[Tensor]] = [x]
for idx, layer in enumerate(self.layers):
if incremental_state is None and not full_context_alignment:
self_attn_mask = self.buffered_future_mask(x)
else:
self_attn_mask = None
x, layer_attn, _ = layer(
x,
encoder_out["encoder_out"][0]
if (encoder_out is not None and len(encoder_out["encoder_out"]) > 0)
else None,
encoder_out["encoder_padding_mask"][0]
if (
encoder_out is not None
and len(encoder_out["encoder_padding_mask"]) > 0
)
else None,
incremental_state,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
need_attn=bool((idx == alignment_layer)),
need_head_weights=bool((idx == alignment_layer)),
)
inner_states.append(x)
if layer_attn is not None and idx == alignment_layer:
attn = layer_attn.float().to(x)
if attn is not None:
if alignment_heads is not None:
attn = attn[:alignment_heads]
# average probabilities over heads
attn = attn.mean(dim=0)
if self.layer_norm is not None:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {"attn": [attn], "inner_states": inner_states}
def output_layer(self, features):
"""Project features to the vocabulary size."""
if self.adaptive_softmax is None:
# project back to size of vocabulary
return self.output_projection(features)
else:
return features
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions)
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
# self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround.
if (
self._future_mask.size(0) == 0
or (not self._future_mask.device == tensor.device)
or self._future_mask.size(0) < dim
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(torch.zeros([dim, dim])), 1
)
self._future_mask = self._future_mask.to(tensor)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = "{}.embed_positions.weights".format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict[
"{}.embed_positions._float_tensor".format(name)
] = torch.FloatTensor(1)
if f"{name}.output_projection.weight" not in state_dict:
if self.share_input_output_embed:
embed_out_key = f"{name}.embed_tokens.weight"
else:
embed_out_key = f"{name}.embed_out"
if embed_out_key in state_dict:
state_dict[f"{name}.output_projection.weight"] = state_dict[
embed_out_key
]
if not self.share_input_output_embed:
del state_dict[embed_out_key]
for i in range(self.num_layers):
# update layer norms
layer_norm_map = {
"0": "self_attn_layer_norm",
"1": "encoder_attn_layer_norm",
"2": "final_layer_norm",
}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layers.{}.layer_norms.{}.{}".format(name, i, old, m)
if k in state_dict:
state_dict[
"{}.layers.{}.{}.{}".format(name, i, new, m)
] = state_dict[k]
del state_dict[k]
version_key = "{}.version".format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
@register_model_architecture("transformer", "transformer_tiny")
def tiny_architecture(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 64)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 64)
args.encoder_layers = getattr(args, "encoder_layers", 2)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 2)
args.decoder_layers = getattr(args, "decoder_layers", 2)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 2)
return base_architecture(args)
@register_model_architecture("transformer", "transformer")
def base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.no_cross_attention = getattr(args, "no_cross_attention", False)
args.cross_self_attention = getattr(args, "cross_self_attention", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
args.offload_activations = getattr(args, "offload_activations", False)
if args.offload_activations:
args.checkpoint_activations = True
args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None)
args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0)
@register_model_architecture("transformer", "transformer_iwslt_de_en")
def transformer_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.decoder_layers = getattr(args, "decoder_layers", 6)
base_architecture(args)
@register_model_architecture("transformer", "transformer_wmt_en_de")
def transformer_wmt_en_de(args):
base_architecture(args)
# parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017)
@register_model_architecture("transformer", "transformer_vaswani_wmt_en_de_big")
def transformer_vaswani_wmt_en_de_big(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.3)
base_architecture(args)
@register_model_architecture("transformer", "transformer_vaswani_wmt_en_fr_big")
def transformer_vaswani_wmt_en_fr_big(args):
args.dropout = getattr(args, "dropout", 0.1)
transformer_vaswani_wmt_en_de_big(args)
@register_model_architecture("transformer", "transformer_wmt_en_de_big")
def transformer_wmt_en_de_big(args):
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
transformer_vaswani_wmt_en_de_big(args)
# default parameters used in tensor2tensor implementation
@register_model_architecture("transformer", "transformer_wmt_en_de_big_t2t")
def transformer_wmt_en_de_big_t2t(args):
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_dropout = getattr(args, "activation_dropout", 0.1)
transformer_vaswani_wmt_en_de_big(args)
|
COCO-LM/fairseq/fairseq/models/transformer.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/transformer.py",
"repo_id": "COCO-LM",
"token_count": 23740
}
| 184 |
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
template <typename U, typename V>
constexpr __host__ __device__ auto divUp(U a, V b) -> decltype(a + b) {
return (a + b - 1) / b;
}
template<int FS, int SB, int padding_l, typename scalar_t>
__inline__ __device__
void zeroSharedMem(scalar_t* data) {
/*
Given an array of length FS + SB, zero out the first padding_l and last
(FS - padding_l) values in the array
*/
int tid = threadIdx.x;
if (FS < SB) {
// zero all if we have enough threads in a block to do all of them
if (tid < padding_l || tid > SB - FS + padding_l - 1) {
data[tid] = scalar_t(0.0);
}
} else {
// otherwise zero out one block at a time
const int numIterations = divUp<int, int>(FS, SB);
for (int i = 0; i < numIterations; i++) {
int offset = i * SB;
if (tid + offset < padding_l) {
data[tid + offset] = scalar_t(0.0);
} else if (tid + offset < FS) {
data[SB + tid + offset] = scalar_t(0.0);
}
}
}
}
template<typename scalar_t>
__inline__ __device__
scalar_t warpReduce(scalar_t data) {
/*
Reduce an array within each warp. After processing all values in warp will
caontain the sum of all original values in that warp.
data - pointer to data to reduce
*/
data += __shfl_xor_sync(SHFL_MASK, data, 16);
data += __shfl_xor_sync(SHFL_MASK, data, 8);
data += __shfl_xor_sync(SHFL_MASK, data, 4);
data += __shfl_xor_sync(SHFL_MASK, data, 2);
data += __shfl_xor_sync(SHFL_MASK, data, 1);
return data;
}
template<typename scalar_t>
__inline__ __device__
scalar_t blockReduce(scalar_t data) {
/*
Reduce an entire array on the block level. After processing, the
first value in the array will contain the reduced sum.
data - pointer to data to reduce
*/
static __shared__ scalar_t warpSum[32];
const int tid = threadIdx.x;
int wid = tid / 32;
int lane = tid % 32;
__syncthreads();
// reduce each warp then write to shared memory
scalar_t sum = warpReduce(data);
if (lane == 0) {
warpSum[wid] = sum;
}
__syncthreads();
scalar_t v;
// perform final sum of partial warp sums
if (tid < blockDim.x / 32) {
v = warpSum[lane];
} else {
v = scalar_t(0.0);
}
if (wid == 0) {
v = warpReduce(v);
}
__syncthreads();
return v;
}
void checkCudaStatus(cudaError_t status, int lineNumber = -1) {
if (status != cudaSuccess) {
std::cout << cudaGetErrorString(status)
<< " at line " << lineNumber << std::endl;
std::cout << "Exiting" << std::endl;
exit(1);
}
}
template<int FS, int SB, int padding_l, typename scalar_t>
__device__
void load_input_to_shared(const scalar_t* input, // global memory
int inputOffset, int sequenceLength,
int iteration, int numIterations,
bool no_prev, scalar_t* output /* shared memory */) {
/*
Load a block size of input into shared memory with
right and left overhang of total size FS. If previously
loaded memory, overlap will be shifted over to reduce
global memory access
input - pointer to start of channel sequence
inputOffset - how far in the sequence to start loading
sequenceLength - total length of sequence
iteration - which block of sequence we are loading
numIterations - total number of blocks to load
no_prev - whether to load the whole block if the previous block
wasn't loaded
output - shared memory to write input to
*/
const int tid = threadIdx.x;
// Load the left "overhang" of input
if (iteration > 0) {
if (padding_l < SB) {
// load all at once
if (tid < padding_l) {
output[tid] = (no_prev) ? input[inputOffset - padding_l + tid] : output[tid + SB];
}
} else {
// load in chunks of size SB
int numIterations = divUp<int, int>(padding_l, SB);
for (int i = 0; i < numIterations; i++) {
int offset = i * SB;
if ((tid + offset) < padding_l) {
output[tid + offset] = (no_prev) ? input[inputOffset - padding_l + tid + offset] : output[tid + offset + SB];
}
}
}
}
// Load the right "overhang" of input
if (iteration < (numIterations - 1)) {
const int elementsLeft = sequenceLength - (iteration+1) * SB;
if ((FS - padding_l) < SB) {
// load all at once
if (tid < (FS - padding_l)) {
output[padding_l + SB + tid] = (tid < elementsLeft) ? input[inputOffset + SB + tid] : scalar_t(0.0);
}
} else {
// load in chunks of size SB
int numIterations = divUp<int, int>(FS - padding_l, SB);
for (int i = 0; i < numIterations; i++) {
int offset = i * SB;
if ((tid + offset) < (FS - padding_l)) {
output[padding_l + SB + tid + offset] = ((tid + offset) < elementsLeft) ? input[inputOffset + SB + tid + offset] : scalar_t(0.0);
}
}
}
}
// We should also clear out the right "overhang"
if (iteration == (numIterations - 1)) {
if ((FS - padding_l) < SB) {
// clear out all at once
if (tid < (FS - padding_l)) {
output[padding_l + SB + tid] = scalar_t(0.0);
}
} else {
// clear in chunks of size SB
int numIterations = divUp<int, int>(FS - padding_l, SB);
for (int i = 0; i < numIterations; i++) {
int offset = i * SB;
if ((tid + offset) < (FS - padding_l)) {
output[padding_l + SB + tid + offset] = scalar_t(0.0);
}
}
}
}
output[tid + padding_l] = ((inputOffset + tid) < sequenceLength) ? input[inputOffset + tid] : scalar_t(0.0);
}
|
COCO-LM/fairseq/fairseq/modules/cuda_utils.cu/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/cuda_utils.cu",
"repo_id": "COCO-LM",
"token_count": 2367
}
| 185 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
class GumbelVectorQuantizer(nn.Module):
def __init__(
self,
dim,
num_vars,
temp,
groups,
combine_groups,
vq_dim,
time_first,
activation=nn.GELU(),
weight_proj_depth=1,
weight_proj_factor=1,
):
"""Vector quantization using gumbel softmax
Args:
dim: input dimension (channels)
num_vars: number of quantized vectors per group
temp: temperature for training. this should be a tuple of 3 elements: (start, stop, decay factor)
groups: number of groups for vector quantization
combine_groups: whether to use the vectors for all groups
vq_dim: dimensionality of the resulting quantized vector
time_first: if true, expect input in BxTxC format, otherwise in BxCxT
activation: what activation to use (should be a module). this is only used if weight_proj_depth is > 1
weight_proj_depth: number of layers (with activation in between) to project input before computing logits
weight_proj_factor: this is used only if weight_proj_depth is > 1. scales the inner dimensionality of
projections by this factor
"""
super().__init__()
self.groups = groups
self.combine_groups = combine_groups
self.input_dim = dim
self.num_vars = num_vars
self.time_first = time_first
assert (
vq_dim % groups == 0
), f"dim {vq_dim} must be divisible by groups {groups} for concatenation"
var_dim = vq_dim // groups
num_groups = groups if not combine_groups else 1
self.vars = nn.Parameter(torch.FloatTensor(1, num_groups * num_vars, var_dim))
nn.init.uniform_(self.vars)
if weight_proj_depth > 1:
def block(input_dim, output_dim):
return nn.Sequential(nn.Linear(input_dim, output_dim), activation)
inner_dim = self.input_dim * weight_proj_factor
self.weight_proj = nn.Sequential(
*[
block(self.input_dim if i == 0 else inner_dim, inner_dim)
for i in range(weight_proj_depth - 1)
],
nn.Linear(inner_dim, groups * num_vars),
)
else:
self.weight_proj = nn.Linear(self.input_dim, groups * num_vars)
nn.init.normal_(self.weight_proj.weight, mean=0, std=1)
nn.init.zeros_(self.weight_proj.bias)
if isinstance(temp, str):
import ast
temp = ast.literal_eval(temp)
assert len(temp) == 3, f"{temp}, {len(temp)}"
self.max_temp, self.min_temp, self.temp_decay = temp
self.curr_temp = self.max_temp
self.codebook_indices = None
def set_num_updates(self, num_updates):
self.curr_temp = max(
self.max_temp * self.temp_decay ** num_updates, self.min_temp
)
def get_codebook_indices(self):
if self.codebook_indices is None:
from itertools import product
p = [range(self.num_vars)] * self.groups
inds = list(product(*p))
self.codebook_indices = torch.tensor(
inds, dtype=torch.long, device=self.vars.device
).flatten()
if not self.combine_groups:
self.codebook_indices = self.codebook_indices.view(
self.num_vars ** self.groups, -1
)
for b in range(1, self.groups):
self.codebook_indices[:, b] += self.num_vars * b
self.codebook_indices = self.codebook_indices.flatten()
return self.codebook_indices
def codebook(self):
indices = self.get_codebook_indices()
return (
self.vars.squeeze(0)
.index_select(0, indices)
.view(self.num_vars ** self.groups, -1)
)
def sample_from_codebook(self, b, n):
indices = self.get_codebook_indices()
indices = indices.view(-1, self.groups)
cb_size = indices.size(0)
assert (
n < cb_size
), f"sample size {n} is greater than size of codebook {cb_size}"
sample_idx = torch.randint(low=0, high=cb_size, size=(b * n,))
indices = indices[sample_idx]
z = self.vars.squeeze(0).index_select(0, indices.flatten()).view(b, n, -1)
return z
def to_codebook_index(self, indices):
res = indices.new_full(indices.shape[:-1], 0)
for i in range(self.groups):
exponent = self.groups - i - 1
res += indices[..., i] * (self.num_vars ** exponent)
return res
def forward_idx(self, x):
res = self.forward(x, produce_targets=True)
return res["x"], res["targets"]
def forward(self, x, produce_targets=False):
result = {"num_vars": self.num_vars * self.groups}
if not self.time_first:
x = x.transpose(1, 2)
bsz, tsz, fsz = x.shape
x = x.reshape(-1, fsz)
x = self.weight_proj(x)
x = x.view(bsz * tsz * self.groups, -1)
_, k = x.max(-1)
hard_x = (
x.new_zeros(*x.shape)
.scatter_(-1, k.view(-1, 1), 1.0)
.view(bsz * tsz, self.groups, -1)
)
hard_probs = torch.mean(hard_x.float(), dim=0)
result["code_perplexity"] = torch.exp(
-torch.sum(hard_probs * torch.log(hard_probs + 1e-7), dim=-1)
).sum()
avg_probs = torch.softmax(
x.view(bsz * tsz, self.groups, -1).float(), dim=-1
).mean(dim=0)
result["prob_perplexity"] = torch.exp(
-torch.sum(avg_probs * torch.log(avg_probs + 1e-7), dim=-1)
).sum()
result["temp"] = self.curr_temp
if self.training:
x = F.gumbel_softmax(x.float(), tau=self.curr_temp, hard=True).type_as(x)
else:
x = hard_x
x = x.view(bsz * tsz, -1)
vars = self.vars
if self.combine_groups:
vars = vars.repeat(1, self.groups, 1)
if produce_targets:
result["targets"] = (
x.view(bsz * tsz * self.groups, -1)
.argmax(dim=-1)
.view(bsz, tsz, self.groups)
.detach()
)
x = x.unsqueeze(-1) * vars
x = x.view(bsz * tsz, self.groups, self.num_vars, -1)
x = x.sum(-2)
x = x.view(bsz, tsz, -1)
if not self.time_first:
x = x.transpose(1, 2) # BTC -> BCT
result["x"] = x
return result
|
COCO-LM/fairseq/fairseq/modules/gumbel_vector_quantizer.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/gumbel_vector_quantizer.py",
"repo_id": "COCO-LM",
"token_count": 3467
}
| 186 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
def quant_noise(module, p, block_size):
"""
Wraps modules and applies quantization noise to the weights for
subsequent quantization with Iterative Product Quantization as
described in "Training with Quantization Noise for Extreme Model Compression"
Args:
- module: nn.Module
- p: amount of Quantization Noise
- block_size: size of the blocks for subsequent quantization with iPQ
Remarks:
- Module weights must have the right sizes wrt the block size
- Only Linear, Embedding and Conv2d modules are supported for the moment
- For more detail on how to quantize by blocks with convolutional weights,
see "And the Bit Goes Down: Revisiting the Quantization of Neural Networks"
- We implement the simplest form of noise here as stated in the paper
which consists in randomly dropping blocks
"""
# if no quantization noise, don't register hook
if p <= 0:
return module
# supported modules
assert isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d))
# test whether module.weight has the right sizes wrt block_size
is_conv = module.weight.ndim == 4
# 2D matrix
if not is_conv:
assert (
module.weight.size(1) % block_size == 0
), "Input features must be a multiple of block sizes"
# 4D matrix
else:
# 1x1 convolutions
if module.kernel_size == (1, 1):
assert (
module.in_channels % block_size == 0
), "Input channels must be a multiple of block sizes"
# regular convolutions
else:
k = module.kernel_size[0] * module.kernel_size[1]
assert k % block_size == 0, "Kernel size must be a multiple of block size"
def _forward_pre_hook(mod, input):
# no noise for evaluation
if mod.training:
if not is_conv:
# gather weight and sizes
weight = mod.weight
in_features = weight.size(1)
out_features = weight.size(0)
# split weight matrix into blocks and randomly drop selected blocks
mask = torch.zeros(
in_features // block_size * out_features, device=weight.device
)
mask.bernoulli_(p)
mask = mask.repeat_interleave(block_size, -1).view(-1, in_features)
else:
# gather weight and sizes
weight = mod.weight
in_channels = mod.in_channels
out_channels = mod.out_channels
# split weight matrix into blocks and randomly drop selected blocks
if mod.kernel_size == (1, 1):
mask = torch.zeros(
int(in_channels // block_size * out_channels),
device=weight.device,
)
mask.bernoulli_(p)
mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels)
else:
mask = torch.zeros(
weight.size(0), weight.size(1), device=weight.device
)
mask.bernoulli_(p)
mask = (
mask.unsqueeze(2)
.unsqueeze(3)
.repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1])
)
# scale weights and apply mask
mask = mask.to(
torch.bool
) # x.bool() is not currently supported in TorchScript
s = 1 / (1 - p)
mod.weight.data = s * weight.masked_fill(mask, 0)
module.register_forward_pre_hook(_forward_pre_hook)
return module
|
COCO-LM/fairseq/fairseq/modules/quant_noise.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/quant_noise.py",
"repo_id": "COCO-LM",
"token_count": 1844
}
| 187 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..ops import emulate_int
class IntLinear(nn.Module):
"""
Quantized counterpart of the nn.Linear module that applies QuantNoise during training.
Args:
- in_features: input features
- out_features: output features
- bias: bias or not
- p: amount of noise to inject (0 = no quantization, 1 = quantize all the weights)
- bits: number of bits
- method: choose among {"tensor", "histogram", "channel"}
- update_step: recompute scale and zero_point every update_steps iterations
Remarks:
- We use the straight-through estimator so that the gradients
back-propagate nicely in the network, this is implemented with
the detach() trick.
- Parameters scale and zero_point are recomputed every update_step
forward pass to reduce the overhead
- At test time, the weights are fully quantized
"""
def __init__(
self,
in_features,
out_features,
bias=True,
p=0,
update_step=3000,
bits=8,
method="histogram",
):
super(IntLinear, self).__init__()
self.in_features = int(in_features)
self.out_features = int(out_features)
self.weight = torch.nn.Parameter(torch.Tensor(out_features, in_features))
self.chosen_bias = bias
if self.chosen_bias:
self.bias = torch.nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter("bias", None)
self.reset_parameters()
# quantization parameters
self.p = p
self.bits = bits
self.method = method
self.update_step = update_step
self.counter = 0
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight)
if self.chosen_bias:
nn.init.constant_(self.bias, 0.0)
return
def forward(self, input):
# train with QuantNoise and evaluate the fully quantized network
p = self.p if self.training else 1
# update parameters every 100 iterations
if self.counter % self.update_step == 0:
self.scale = None
self.zero_point = None
self.counter += 1
# quantize weight
weight_quantized, self.scale, self.zero_point = emulate_int(
self.weight.detach(),
bits=self.bits,
method=self.method,
scale=self.scale,
zero_point=self.zero_point,
)
# mask to apply noise
mask = torch.zeros_like(self.weight)
mask.bernoulli_(1 - p)
noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0)
# using straight-through estimator (STE)
clamp_low = -self.scale * self.zero_point
clamp_high = self.scale * (2 ** self.bits - 1 - self.zero_point)
weight = (
torch.clamp(self.weight, clamp_low.item(), clamp_high.item())
+ noise.detach()
)
# return output
output = F.linear(input, weight, self.bias)
return output
def extra_repr(self):
return "in_features={}, out_features={}, bias={}, quant_noise={}, bits={}, method={}".format(
self.in_features,
self.out_features,
self.bias is not None,
self.p,
self.bits,
self.method,
)
|
COCO-LM/fairseq/fairseq/modules/quantization/scalar/modules/qlinear.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/quantization/scalar/modules/qlinear.py",
"repo_id": "COCO-LM",
"token_count": 1588
}
| 188 |
# Originally from Microsoft Corporation.
# Licensed under the MIT License.
""" Wrapper for ngram_repeat_block cuda extension """
import torch
from torch import nn
import math
from typing import Dict, List, Optional
import warnings
try:
from fairseq import ngram_repeat_block_cuda
EXTENSION_BUILT = True
except ImportError:
EXTENSION_BUILT = False
def is_cuda_extension_usable() -> bool:
"""Check whether ngram_repeat_block_cuda is built properly"""
if not EXTENSION_BUILT or not torch.cuda.is_available():
return False
bsz = 2
tokens = torch.tensor([[4, 4, 3, 2], [1, 2, 3, 4]], dtype=torch.long, device="cuda")
lprobs = torch.rand((8, 12), device="cuda")
try:
outputs = ngram_repeat_block_cuda.forward(tokens, lprobs, bsz, 3, 4, 3)
outputs = outputs + 4 # This line breaks if the extension is built incorrectly.
return True
except RuntimeError:
warnings.warn(
"NGramRepeatBlock extension must be rebuilt."
'Run TORCH_CUDA_ARCH_LIST="6.0;6.1;7.0" python setup.py build_ext --inplace'
)
return False
class NGramRepeatBlock(nn.Module):
""" Wrapper class for calling ngram_repeat_block cuda extension """
def __init__(self, no_repeat_ngram_size: int, use_extension: bool = True):
super().__init__()
self.use_extension = is_cuda_extension_usable() if use_extension else False
self.no_repeat_ngram_size = no_repeat_ngram_size
def reset_parameters(self):
pass
@torch.jit.unused
def call_cuda_extension(
self,
tokens,
lprobs,
bsz: int,
beam_size: int,
step: int,
):
return ngram_repeat_block_cuda.forward(
tokens, lprobs, bsz, step, beam_size, self.no_repeat_ngram_size
)
def forward(
self,
tokens,
lprobs,
bsz: int,
beam_size: int,
step: int,
):
"""
Args:
tokens(Tensor): Input tokens(Bsz*beam, seq_len)
lprobs(Tensor): likelihood probability,
Expected to be updated in place.(Bsz*beam, vocab_size)
bsz(int): batch size
step(int): current step
beam_size(int): beam size
no_repeat_ngram_size(int): Ngram size
"""
msg = f"expected {bsz *beam_size} got"
assert tokens.size(0) == bsz * beam_size, f"{msg} {tokens.size(0)}"
assert lprobs.size(0) == bsz * beam_size, f"{msg} {lprobs.size(0)}"
if self.use_extension:
return self.call_cuda_extension(tokens, lprobs, bsz, beam_size, step)
else:
return self._no_repeat_ngram(
tokens,
lprobs,
bsz,
beam_size,
step,
)
def _no_repeat_ngram(self, tokens, lprobs, bsz: int, beam_size: int, step: int):
"""For each hypothesis generate a list of previous ngrams and set associated lprobs to -inf"""
gen_ngrams: List[Dict[str, List[int]]] = [
torch.jit.annotate(Dict[str, List[int]], {})
for bbsz_idx in range(bsz * beam_size)
]
cpu_tokens = tokens.cpu()
for bbsz_idx in range(bsz * beam_size):
gen_tokens: List[int] = cpu_tokens[bbsz_idx].tolist()
for ngram in self.transpose_list(
[gen_tokens[i:] for i in range(self.no_repeat_ngram_size)]
):
key = ",".join([str(x) for x in ngram[:-1]])
gen_ngrams[bbsz_idx][key] = gen_ngrams[bbsz_idx].get(
key, torch.jit.annotate(List[int], [])
) + [ngram[-1]]
if step + 2 - self.no_repeat_ngram_size >= 0:
# no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
banned_tokens = [
self.calculate_banned_tokens(
tokens, step, gen_ngrams, self.no_repeat_ngram_size, bbsz_idx
)
for bbsz_idx in range(bsz * beam_size)
]
else:
banned_tokens = [
torch.jit.annotate(List[int], []) for bbsz_idx in range(bsz * beam_size)
]
for bbsz_idx in range(bsz * beam_size):
lprobs[bbsz_idx][
torch.tensor(banned_tokens[bbsz_idx]).long()
] = torch.tensor(-math.inf).to(lprobs)
return lprobs
@staticmethod
def calculate_banned_tokens(
tokens,
step: int,
gen_ngrams: List[Dict[str, List[int]]],
no_repeat_ngram_size: int,
bbsz_idx: int,
):
tokens_list: List[int] = tokens[
bbsz_idx, step + 2 - no_repeat_ngram_size : step + 1
].tolist()
# before decoding the next token, prevent decoding of ngrams that have already appeared
ngram_index = ",".join([str(x) for x in tokens_list])
return gen_ngrams[bbsz_idx].get(ngram_index, torch.jit.annotate(List[int], []))
@staticmethod
def transpose_list(l: List[List[int]]):
# GeneratorExp aren't supported in TS so ignoring the lint
min_len = min([len(x) for x in l]) # noqa
l2 = [[row[i] for row in l] for i in range(min_len)]
return l2
|
COCO-LM/fairseq/fairseq/ngram_repeat_block.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/ngram_repeat_block.py",
"repo_id": "COCO-LM",
"token_count": 2626
}
| 189 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from collections.abc import Collection
from dataclasses import dataclass, field
from typing import List
from omegaconf import II
from fairseq.dataclass import FairseqDataclass
from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler
@dataclass
class CosineLRScheduleConfig(FairseqDataclass):
warmup_updates: int = field(
default=0,
metadata={"help": "warmup the learning rate linearly for the first N updates"},
)
warmup_init_lr: float = field(
default=-1,
metadata={
"help": "initial learning rate during warmup phase; default is cfg.lr"
},
)
lr: List[float] = field(
default=II("optimization.lr"),
metadata={"help": "max learning rate, must be more than cfg.min_lr"},
)
min_lr: float = field(default=0.0, metadata={"help": "min learning rate"})
t_mult: float = field(
default=1.0, metadata={"help": "factor to grow the length of each period"}
)
lr_period_updates: float = field(
default=-1, metadata={"help": "initial number of updates per period"}
)
lr_shrink: float = field(
default=0.1, metadata={"help": "shrink factor for annealing"}
)
# This is not required, but is for convenience in inferring lr_period_updates
max_update: int = II("optimization.max_update")
@register_lr_scheduler("cosine", dataclass=CosineLRScheduleConfig)
class CosineLRSchedule(FairseqLRScheduler):
"""Assign LR based on a cyclical schedule that follows the cosine function.
See https://arxiv.org/pdf/1608.03983.pdf for details.
We also support a warmup phase where we linearly increase the learning rate
from some initial learning rate (``--warmup-init-lr``) until the configured
max learning rate (``--lr``).
During warmup::
lrs = torch.linspace(cfg.warmup_init_lr, cfg.lr, cfg.warmup_updates)
lr = lrs[update_num]
After warmup::
lr = cfg.min_lr + 0.5*(cfg.lr - cfg.min_lr)*(1 + cos(t_curr / t_i))
where ``t_curr`` is current percentage of updates within the current period
range and ``t_i`` is the current period range, which is scaled by ``t_mul``
after every iteration.
"""
def __init__(self, cfg: CosineLRScheduleConfig, fairseq_optimizer):
super().__init__(cfg, fairseq_optimizer)
if isinstance(cfg.lr, Collection) and len(cfg.lr) > 1:
raise ValueError(
"Cannot use a fixed learning rate schedule with cosine."
f" Consider --lr-scheduler=fixed instead. ({cfg.lr})"
)
self.max_lr = cfg.lr[0] if isinstance(cfg.lr, Collection) else cfg.lr
assert (
self.max_lr > cfg.min_lr
), f"max_lr (={cfg.lr}) must be more than min_lr (={cfg.min_lr})"
warmup_end_lr = self.max_lr
if cfg.warmup_init_lr < 0:
cfg.warmup_init_lr = cfg.min_lr
self.t_mult = cfg.t_mult
self.period = cfg.lr_period_updates
if self.period <= 0:
assert (
cfg.max_update > 0
), "Either --max_update or --lr-period-updates must be set"
self.period = cfg.max_update - cfg.warmup_updates
if cfg.warmup_updates > 0:
# linearly warmup for the first cfg.warmup_updates
self.lr_step = (warmup_end_lr - cfg.warmup_init_lr) / cfg.warmup_updates
else:
self.lr_step = 1
self.warmup_updates = cfg.warmup_updates
self.lr_shrink = cfg.lr_shrink
# initial learning rate
self.lr = cfg.warmup_init_lr
self.optimizer.set_lr(self.lr)
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
super().step(epoch, val_loss)
# we don't change the learning rate at epoch boundaries
return self.optimizer.get_lr()
def step_update(self, num_updates):
"""Update the learning rate after each update."""
if num_updates < self.cfg.warmup_updates:
self.lr = self.cfg.warmup_init_lr + num_updates * self.lr_step
else:
curr_updates = num_updates - self.cfg.warmup_updates
if self.t_mult != 1:
i = math.floor(
math.log(
1 - curr_updates / self.period * (1 - self.t_mult), self.t_mult
)
)
t_i = self.t_mult ** i * self.period
t_curr = (
curr_updates
- (1 - self.t_mult ** i) / (1 - self.t_mult) * self.period
)
else:
i = math.floor(curr_updates / self.period)
t_i = self.period
t_curr = curr_updates - (self.period * i)
lr_shrink = self.lr_shrink ** i
min_lr = self.cfg.min_lr * lr_shrink
max_lr = self.max_lr * lr_shrink
self.lr = min_lr + 0.5 * (max_lr - min_lr) * (
1 + math.cos(math.pi * t_curr / t_i)
)
self.optimizer.set_lr(self.lr)
return self.lr
|
COCO-LM/fairseq/fairseq/optim/lr_scheduler/cosine_lr_scheduler.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/optim/lr_scheduler/cosine_lr_scheduler.py",
"repo_id": "COCO-LM",
"token_count": 2432
}
| 190 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from argparse import Namespace
from typing import Union
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import populate_dataclass, merge_with_parent
from hydra.core.config_store import ConfigStore
from omegaconf import DictConfig
REGISTRIES = {}
def setup_registry(registry_name: str, base_class=None, default=None, required=False):
assert registry_name.startswith("--")
registry_name = registry_name[2:].replace("-", "_")
REGISTRY = {}
REGISTRY_CLASS_NAMES = set()
DATACLASS_REGISTRY = {}
# maintain a registry of all registries
if registry_name in REGISTRIES:
return # registry already exists
REGISTRIES[registry_name] = {
"registry": REGISTRY,
"default": default,
"dataclass_registry": DATACLASS_REGISTRY,
}
def build_x(cfg: Union[DictConfig, str, Namespace], *extra_args, **extra_kwargs):
if isinstance(cfg, DictConfig):
choice = cfg._name
if choice and choice in DATACLASS_REGISTRY:
dc = DATACLASS_REGISTRY[choice]
cfg = merge_with_parent(dc(), cfg)
elif isinstance(cfg, str):
choice = cfg
if choice in DATACLASS_REGISTRY:
cfg = DATACLASS_REGISTRY[choice]()
else:
choice = getattr(cfg, registry_name, None)
if choice in DATACLASS_REGISTRY:
cfg = populate_dataclass(DATACLASS_REGISTRY[choice](), cfg)
if choice is None:
if required:
raise ValueError("{} is required!".format(registry_name))
return None
cls = REGISTRY[choice]
if hasattr(cls, "build_" + registry_name):
builder = getattr(cls, "build_" + registry_name)
else:
builder = cls
return builder(cfg, *extra_args, **extra_kwargs)
def register_x(name, dataclass=None):
def register_x_cls(cls):
if name in REGISTRY:
raise ValueError(
"Cannot register duplicate {} ({})".format(registry_name, name)
)
if cls.__name__ in REGISTRY_CLASS_NAMES:
raise ValueError(
"Cannot register {} with duplicate class name ({})".format(
registry_name, cls.__name__
)
)
if base_class is not None and not issubclass(cls, base_class):
raise ValueError(
"{} must extend {}".format(cls.__name__, base_class.__name__)
)
if dataclass is not None and not issubclass(dataclass, FairseqDataclass):
raise ValueError(
"Dataclass {} must extend FairseqDataclass".format(dataclass)
)
cls.__dataclass = dataclass
if cls.__dataclass is not None:
DATACLASS_REGISTRY[name] = cls.__dataclass
cs = ConfigStore.instance()
node = dataclass()
node._name = name
cs.store(name=name, group=registry_name, node=node, provider="fairseq")
REGISTRY[name] = cls
return cls
return register_x_cls
return build_x, register_x, REGISTRY, DATACLASS_REGISTRY
|
COCO-LM/fairseq/fairseq/registry.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/registry.py",
"repo_id": "COCO-LM",
"token_count": 1659
}
| 191 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import numpy as np
from fairseq import utils
from fairseq.data import (
Dictionary,
IdDataset,
MaskTokensDataset,
NestedDictionaryDataset,
NumelDataset,
NumSamplesDataset,
PrependTokenDataset,
RightPadDataset,
SortDataset,
TokenBlockDataset,
data_utils,
)
from fairseq.data.encoders.utils import get_whole_word_mask
from fairseq.data.shorten_dataset import maybe_shorten_dataset
from fairseq.tasks import LegacyFairseqTask, register_task
logger = logging.getLogger(__name__)
@register_task("masked_lm")
class MaskedLMTask(LegacyFairseqTask):
"""Task for training masked language models (e.g., BERT, RoBERTa)."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument(
"data",
help="colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner",
)
parser.add_argument(
"--sample-break-mode",
default="complete",
choices=["none", "complete", "complete_doc", "eos"],
help='If omitted or "none", fills each sample with tokens-per-sample '
'tokens. If set to "complete", splits samples only at the end '
"of sentence, but may include multiple sentences per sample. "
'"complete_doc" is similar but respects doc boundaries. '
'If set to "eos", includes only one sentence per sample.',
)
parser.add_argument(
"--tokens-per-sample",
default=512,
type=int,
help="max number of total tokens over all segments "
"per sample for BERT dataset",
)
parser.add_argument(
"--mask-prob",
default=0.15,
type=float,
help="probability of replacing a token with mask",
)
parser.add_argument(
"--leave-unmasked-prob",
default=0.1,
type=float,
help="probability that a masked token is unmasked",
)
parser.add_argument(
"--random-token-prob",
default=0.1,
type=float,
help="probability of replacing a token with a random token",
)
parser.add_argument(
"--freq-weighted-replacement",
default=False,
action="store_true",
help="sample random replacement words based on word frequencies",
)
parser.add_argument(
"--mask-whole-words",
default=False,
action="store_true",
help="mask whole words; you may also want to set --bpe",
)
parser.add_argument(
"--mask-multiple-length",
default=1,
type=int,
help="repeat the mask indices multiple times",
)
parser.add_argument(
"--mask-stdev", default=0.0, type=float, help="stdev of the mask length"
)
parser.add_argument(
"--shorten-method",
default="none",
choices=["none", "truncate", "random_crop"],
help="if not none, shorten sequences that exceed --tokens-per-sample",
)
parser.add_argument(
"--shorten-data-split-list",
default="",
help="comma-separated list of dataset splits to apply shortening to, "
'e.g., "train,valid" (default: all dataset splits)',
)
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
# add mask token
self.mask_idx = dictionary.add_symbol("<mask>")
@classmethod
def setup_task(cls, args, **kwargs):
paths = utils.split_paths(args.data)
assert len(paths) > 0
dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt"))
logger.info("dictionary: {} types".format(len(dictionary)))
return cls(args, dictionary)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
split_path = os.path.join(data_path, split)
dataset = data_utils.load_indexed_dataset(
split_path,
self.source_dictionary,
self.args.dataset_impl,
combine=combine,
)
if dataset is None:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, split_path)
)
dataset = maybe_shorten_dataset(
dataset,
split,
self.args.shorten_data_split_list,
self.args.shorten_method,
self.args.tokens_per_sample,
self.args.seed,
)
# create continuous blocks of tokens
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
self.args.tokens_per_sample - 1, # one less for <s>
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode=self.args.sample_break_mode,
)
logger.info("loaded {} blocks from: {}".format(len(dataset), split_path))
# prepend beginning-of-sentence token (<s>, equiv. to [CLS] in BERT)
dataset = PrependTokenDataset(dataset, self.source_dictionary.bos())
# create masked input and targets
mask_whole_words = (
get_whole_word_mask(self.args, self.source_dictionary)
if self.args.mask_whole_words
else None
)
src_dataset, tgt_dataset = MaskTokensDataset.apply_mask(
dataset,
self.source_dictionary,
pad_idx=self.source_dictionary.pad(),
mask_idx=self.mask_idx,
seed=self.args.seed,
mask_prob=self.args.mask_prob,
leave_unmasked_prob=self.args.leave_unmasked_prob,
random_token_prob=self.args.random_token_prob,
freq_weighted_replacement=self.args.freq_weighted_replacement,
mask_whole_words=mask_whole_words,
mask_multiple_length=self.args.mask_multiple_length,
mask_stdev=self.args.mask_stdev,
)
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(src_dataset))
self.datasets[split] = SortDataset(
NestedDictionaryDataset(
{
"id": IdDataset(),
"net_input": {
"src_tokens": RightPadDataset(
src_dataset,
pad_idx=self.source_dictionary.pad(),
),
"src_lengths": NumelDataset(src_dataset, reduce=False),
},
"target": RightPadDataset(
tgt_dataset,
pad_idx=self.source_dictionary.pad(),
),
"nsentences": NumSamplesDataset(),
"ntokens": NumelDataset(src_dataset, reduce=True),
},
sizes=[src_dataset.sizes],
),
sort_order=[
shuffle,
src_dataset.sizes,
],
)
def build_dataset_for_inference(self, src_tokens, src_lengths, sort=True):
src_dataset = RightPadDataset(
TokenBlockDataset(
src_tokens,
src_lengths,
self.args.tokens_per_sample - 1, # one less for <s>
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode="eos",
),
pad_idx=self.source_dictionary.pad(),
)
src_dataset = PrependTokenDataset(src_dataset, self.source_dictionary.bos())
src_dataset = NestedDictionaryDataset(
{
"id": IdDataset(),
"net_input": {
"src_tokens": src_dataset,
"src_lengths": NumelDataset(src_dataset, reduce=False),
},
},
sizes=src_lengths,
)
if sort:
src_dataset = SortDataset(src_dataset, sort_order=[src_lengths])
return src_dataset
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
|
COCO-LM/fairseq/fairseq/tasks/masked_lm.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/tasks/masked_lm.py",
"repo_id": "COCO-LM",
"token_count": 4534
}
| 192 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a network across multiple GPUs.
"""
import contextlib
import logging
import sys
import time
from argparse import Namespace
from itertools import chain
from typing import Any, Dict, List
import torch
from fairseq import checkpoint_utils, models, optim, utils
from fairseq.dataclass.configs import FairseqConfig
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.distributed import utils as distributed_utils
from fairseq.file_io import PathManager
from fairseq.logging import meters, metrics
from fairseq.nan_detector import NanDetector
from fairseq.optim import lr_scheduler
from omegaconf import OmegaConf
logger = logging.getLogger(__name__)
class Trainer(object):
"""Main class for data parallel training.
This class supports synchronous distributed data parallel training,
where multiple workers each have a full model replica and gradients
are accumulated across workers before each update. We use
:class:`~torch.nn.parallel.DistributedDataParallel` to handle
communication of the gradients across workers.
"""
def __init__(self, cfg: FairseqConfig, task, model, criterion, quantizer=None):
if isinstance(cfg, Namespace):
logger.warning(
"argparse.Namespace configuration is deprecated! Automatically converting to OmegaConf"
)
cfg = convert_namespace_to_omegaconf(cfg)
self.cfg = cfg
self.task = task
# catalog shared parameters
shared_params = _catalog_shared_params(model)
self.tpu = cfg.common.tpu
self.cuda = torch.cuda.is_available() and not cfg.common.cpu and not self.tpu
if self.cuda:
self.device = torch.device("cuda")
elif self.tpu:
self.device = utils.get_tpu_device()
else:
self.device = torch.device("cpu")
if self.cfg.distributed_training.ddp_backend == "fully_sharded":
if self.cfg.common.bf16:
raise ValueError(
"FullyShardedDataParallel is not compatible with --bf16 or "
"--memory-efficient-bf16"
)
if self.cfg.distributed_training.zero_sharding != "none":
raise ValueError(
"FullyShardedDataParallel is not compatible with --zero-sharding "
"option (it's already built in)"
)
else:
if self.cfg.distributed_training.cpu_offload:
raise ValueError("--cpu-offload requires --ddp-backend=fully_sharded")
# copy model and criterion to current device/dtype
self._criterion = criterion
self._model = model
if cfg.distributed_training.ddp_backend != "fully_sharded":
if cfg.common.fp16:
self._criterion = self._criterion.half()
self._model = self._model.half()
elif cfg.common.bf16:
self._criterion = self._criterion.to(dtype=torch.bfloat16)
self._model = self._model.to(dtype=torch.bfloat16)
if (
not cfg.distributed_training.pipeline_model_parallel
# the DistributedFairseqModel wrapper will handle moving to device,
# so only handle cases which don't use the wrapper
and not self.use_distributed_wrapper
):
self._criterion = self._criterion.to(device=self.device)
self._model = self._model.to(device=self.device)
self.pipeline_model_parallel = cfg.distributed_training.pipeline_model_parallel
self.last_device = None
if self.cuda and self.pipeline_model_parallel:
self.last_device = torch.device(
cfg.distributed_training.pipeline_devices[-1]
)
# check that shared parameters are preserved after device transfer
for shared_param in shared_params:
ref = _get_module_by_path(self._model, shared_param[0])
for path in shared_param[1:]:
logger.info(
"detected shared parameter: {} <- {}".format(shared_param[0], path)
)
_set_module_by_path(self._model, path, ref)
self._dummy_batch = None # indicates we don't have a dummy batch at first
self._lr_scheduler = None
self._num_updates = 0
self._num_xla_compiles = 0 # for TPUs
self._optim_history = None
self._optimizer = None
self._warn_once = set()
self._wrapped_criterion = None
self._wrapped_model = None
# TODO(myleott): support tpu
if self.cuda and self.data_parallel_world_size > 1:
self._grad_norm_buf = torch.cuda.DoubleTensor(self.data_parallel_world_size)
else:
self._grad_norm_buf = None
self.quantizer = quantizer
if self.quantizer is not None:
self.quantizer.set_trainer(self)
# get detailed cuda environment
if self.cuda:
self.cuda_env = utils.CudaEnvironment()
if self.data_parallel_world_size > 1:
self.cuda_env_arr = distributed_utils.all_gather_list(
self.cuda_env, group=distributed_utils.get_global_group()
)
else:
self.cuda_env_arr = [self.cuda_env]
if self.data_parallel_rank == 0:
utils.CudaEnvironment.pretty_print_cuda_env_list(self.cuda_env_arr)
else:
self.cuda_env = None
self.cuda_env_arr = None
metrics.log_start_time("wall", priority=790, round=2)
self._start_time = time.time()
self._previous_training_time = 0
self._cumulative_training_time = None
def reinitialize(self):
"""Reinitialize the Trainer, typically after model params change."""
self._lr_scheduler = None
self._optimizer = None
self._wrapped_criterion = None
self._wrapped_model = None
@property
def data_parallel_world_size(self):
if self.cfg.distributed_training.distributed_world_size == 1:
return 1
return distributed_utils.get_data_parallel_world_size()
@property
def data_parallel_process_group(self):
return distributed_utils.get_data_parallel_group()
@property
def data_parallel_rank(self):
if self.cfg.distributed_training.distributed_world_size == 1:
return 0
return distributed_utils.get_data_parallel_rank()
@property
def is_data_parallel_master(self):
# NOTE: this returns true for all model parallel replicas with data
# parallel rank 0
return self.data_parallel_rank == 0
@property
def use_distributed_wrapper(self) -> bool:
return (
self.data_parallel_world_size > 1
and not self.cfg.optimization.use_bmuf
) or (
self.cfg.distributed_training.ddp_backend == "fully_sharded"
and self.cfg.distributed_training.cpu_offload
)
@property
def should_save_checkpoint_on_current_rank(self) -> bool:
"""Indicates whether to save checkpoints on the current DDP rank."""
if self.cfg.distributed_training.ddp_backend == "fully_sharded":
return True
else:
return self.is_data_parallel_master
@property
def checkpoint_suffix(self) -> str:
"""Suffix to add to the checkpoint file name."""
if self.cfg.distributed_training.ddp_backend == "fully_sharded":
return self.cfg.checkpoint.checkpoint_suffix + "-shard{0}".format(self.data_parallel_rank)
else:
return self.cfg.checkpoint.checkpoint_suffix or ""
@property
def criterion(self):
if self._wrapped_criterion is None:
if (
utils.has_parameters(self._criterion)
and self.use_distributed_wrapper
):
self._wrapped_criterion = models.DistributedFairseqModel(
self.cfg.distributed_training,
self._criterion,
process_group=self.data_parallel_process_group,
device=self.device,
)
else:
self._wrapped_criterion = self._criterion
return self._wrapped_criterion
@property
def model(self):
if self._wrapped_model is None:
if self.use_distributed_wrapper:
self._wrapped_model = models.DistributedFairseqModel(
self.cfg.distributed_training,
self._model,
process_group=self.data_parallel_process_group,
device=self.device,
)
else:
self._wrapped_model = self._model
return self._wrapped_model
@property
def optimizer(self):
if self._optimizer is None:
self._build_optimizer()
return self._optimizer
@property
def lr_scheduler(self):
if self._lr_scheduler is None:
self._build_optimizer() # this will initialize self._lr_scheduler
return self._lr_scheduler
def _build_optimizer(self):
params = list(
filter(
lambda p: p.requires_grad,
chain(self.model.parameters(), self.criterion.parameters()),
)
)
if (
self.cfg.distributed_training.ddp_backend == "fully_sharded"
and self.cfg.common.fp16
):
# FullyShardedDataParallel always uses MemoryEfficientFP16 wrapper,
# mostly for the grad scaling. But if we don't have the
# --memory-efficient-fp16 flag set, then we're effectively doing
# regular --fp16 and can allow the use of optimizers that would
# otherwise be unsupported by MemoryEfficientFP16Optimizer.
allow_unsupported = not self.cfg.common.memory_efficient_fp16
self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer(
self.cfg, params, allow_unsupported=allow_unsupported
)
elif self.cfg.common.fp16 or self.cfg.common.bf16:
if self.cuda and torch.cuda.get_device_capability(0)[0] < 7:
logger.info(
"NOTE: your device does NOT support faster training with --fp16, "
"please switch to FP32 which is likely to be faster"
)
if (
self.cfg.common.memory_efficient_fp16
or self.cfg.common.memory_efficient_bf16
):
self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer(
self.cfg, params
)
else:
self._optimizer = optim.FP16Optimizer.build_optimizer(self.cfg, params)
else:
if self.cuda and torch.cuda.get_device_capability(0)[0] >= 7:
logger.info("NOTE: your device may support faster training with --fp16")
self._optimizer = optim.build_optimizer(self.cfg.optimizer, params)
if self.cfg.distributed_training.ddp_backend == "fully_sharded":
assert not self.cfg.optimization.use_bmuf, \
"--ddp-backend=fully_sharded is not compatible with BMUF"
assert self._optimizer.supports_flat_params, (
"--ddp-backend=fully_sharded is only compatible with pointwise "
"optimizers (e.g., Adam, AdamW, Adadelta, Adamax, SGD, etc.). "
"However, the sharding will result in slightly different results when "
"using non-pointwise optimizers (e.g., Adagrad, Adafactor, LAMB)"
)
if self.cfg.optimization.use_bmuf:
self._optimizer = optim.FairseqBMUF(
self.cfg.bmuf,
self._optimizer,
)
if self.cfg.distributed_training.zero_sharding == "os":
if (
self.cfg.common.fp16
and not self.cfg.common.memory_efficient_fp16
and not self.cfg.common.memory_efficient_bf16
) and not self.cfg.common.fp16_no_flatten_grads:
raise ValueError(
"ZeRO is incomptabile with fp16 and flattened grads. "
"Please use --fp16-no-flatten-grads"
)
else:
optim.shard_(self._optimizer, self.data_parallel_process_group)
# We should initialize the learning rate scheduler immediately after
# building the optimizer, so that the initial learning rate is set.
self._lr_scheduler = lr_scheduler.build_lr_scheduler(
self.cfg.lr_scheduler,
self.optimizer,
)
self._lr_scheduler.step_update(0)
def consolidate_optimizer(self):
"""For OSS, we need to consolidate the state dict."""
if hasattr(self.optimizer.optimizer, "consolidate_state_dict"):
self.optimizer.optimizer.consolidate_state_dict()
def state_dict(self):
state_dict = {
"args": None, # legacy
"cfg": (
OmegaConf.to_container(self.cfg)
if OmegaConf.is_config(self.cfg) else self.cfg
),
"model": self.model.state_dict(),
"criterion": (
self.criterion.state_dict()
if utils.has_parameters(self.criterion) else None
),
"optimizer_history": (self._optim_history or [])
+ [
{
"criterion_name": self.get_criterion().__class__.__name__,
"optimizer_name": self.optimizer.__class__.__name__,
"lr_scheduler_state": self.lr_scheduler.state_dict(),
"num_updates": self.get_num_updates(),
}
],
"task_state": self.task.state_dict() if self.task is not None else {},
"extra_state": {
"metrics": metrics.state_dict(),
"previous_training_time": self.cumulative_training_time(),
}
}
if not self.cfg.checkpoint.no_save_optimizer_state:
state_dict["last_optimizer_state"] = self.optimizer.state_dict()
return state_dict
def save_checkpoint(self, filename, extra_state):
"""Save all training state in a checkpoint file."""
logger.info(f"Saving checkpoint to {filename}")
# call state_dict on all ranks in case it needs internal communication
state_dict = utils.move_to_cpu(self.state_dict())
state_dict["extra_state"].update(extra_state)
if self.should_save_checkpoint_on_current_rank:
checkpoint_utils.torch_persistent_save(
state_dict,
filename,
async_write=self.cfg.checkpoint.write_checkpoints_asynchronously,
)
logger.info(f"Finished saving checkpoint to {filename}")
def load_checkpoint(
self,
filename,
reset_optimizer=False,
reset_lr_scheduler=False,
optimizer_overrides=None,
reset_meters=False,
):
"""
Load all training state from a checkpoint file.
rank = 0 will load the checkpoint, and then broadcast it to all
other ranks.
"""
extra_state, self._optim_history, last_optim_state = None, [], None
logger.info(f"Preparing to load checkpoint {filename}")
is_distributed = self.data_parallel_world_size > 1
bexists = PathManager.isfile(filename)
if bexists:
load_on_all_ranks = (
self.cfg.checkpoint.load_checkpoint_on_all_dp_ranks
# TPUs don't support broadcast yet, so load checkpoints
# on every worker for now
or self.tpu
# FSDP requires loading checkpoint shards on all ranks
or self.cfg.distributed_training.ddp_backend == "fully_sharded"
)
if load_on_all_ranks or self.data_parallel_rank == 0:
state = checkpoint_utils.load_checkpoint_to_cpu(
filename, load_on_all_ranks=load_on_all_ranks
)
last_optim_state = state.get("last_optimizer_state", None)
# If doing zero_sharding, do not broadcast global optimizer
# state. Later we will broadcast sharded states to each rank
# to avoid memory from exploding.
if (
not load_on_all_ranks
and self.cfg.distributed_training.zero_sharding == "os"
and "last_optimizer_state" in state
and is_distributed
):
state["last_optimizer_state"] = "SHARDED"
else:
last_optim_state = None
state = None
if is_distributed and not load_on_all_ranks:
state = distributed_utils.broadcast_object(
state,
src_rank=0,
group=self.data_parallel_process_group,
dist_device=self.device,
)
if self.data_parallel_rank > 0:
last_optim_state = state.get("last_optimizer_state", None)
# load model parameters
try:
self.model.load_state_dict(
state, strict=False, model_cfg=self.cfg.model
)
# save memory for later steps
del state
if utils.has_parameters(self.get_criterion()):
self.get_criterion().load_state_dict(
state["criterion"], strict=True
)
del state["criterion"]
except Exception:
raise Exception(
"Cannot load model parameters from checkpoint {}; "
"please ensure that the architectures match.".format(filename)
)
extra_state = None
# extra_state = state["extra_state"]
# self._optim_history = state["optimizer_history"]
if last_optim_state is not None and not reset_optimizer:
# rebuild optimizer after loading model, since params may have changed
self._build_optimizer()
# only reload optimizer and lr_scheduler if they match
last_optim = self._optim_history[-1]
assert (
last_optim["criterion_name"] == self.get_criterion().__class__.__name__
), f"Criterion does not match; please reset the optimizer (--reset-optimizer). {last_optim['criterion_name']} vs {self.get_criterion().__class__.__name__}"
assert (
last_optim["optimizer_name"] == self.optimizer.__class__.__name__
), f"Optimizer does not match; please reset the optimizer (--reset-optimizer). {last_optim['optimizer_name']} vs {self.optimizer.__class__.__name__}"
if not reset_lr_scheduler:
self.lr_scheduler.load_state_dict(last_optim["lr_scheduler_state"])
if not load_on_all_ranks and is_distributed:
last_optim_state = self.optimizer.broadcast_global_state_dict(
last_optim_state
)
self.optimizer.load_state_dict(last_optim_state, optimizer_overrides)
self.set_num_updates(last_optim["num_updates"])
if extra_state is not None:
itr_state = extra_state["train_iterator"]
epoch = itr_state["epoch"]
if "previous_training_time" in extra_state:
self._previous_training_time = extra_state["previous_training_time"]
self._start_time = time.time()
self.lr_step(epoch)
if itr_state.get("version", 1) >= 2 and itr_state["iterations_in_epoch"] == 0:
# reset meters at start of epoch
reset_meters = True
if "metrics" in extra_state and not reset_meters:
metrics.load_state_dict(extra_state["metrics"])
# reset TimeMeters, since their start times don't make sense anymore
for meter in metrics.get_meters("default"):
if isinstance(meter, meters.TimeMeter):
meter.reset()
logger.info(
"Loaded checkpoint {} (epoch {} @ {} updates)".format(
filename, epoch, self.get_num_updates()
)
)
# else:
# logger.info("No existing checkpoint found {}".format(filename))
return extra_state
def get_train_iterator(
self,
epoch,
combine=True,
load_dataset=True,
data_selector=None,
shard_batch_itr=True,
disable_iterator_cache=False,
):
"""Return an EpochBatchIterator over the training set for a given epoch."""
if load_dataset:
logger.info("loading train data for epoch {}".format(epoch))
self.task.load_dataset(
self.cfg.dataset.train_subset,
epoch=epoch,
combine=combine,
data_selector=data_selector,
tpu=self.tpu,
)
batch_iterator = self.task.get_batch_iterator(
dataset=self.task.dataset(self.cfg.dataset.train_subset),
max_tokens=self.cfg.dataset.max_tokens,
max_sentences=self.cfg.dataset.batch_size,
max_positions=utils.resolve_max_positions(
self.task.max_positions(),
self.model.max_positions(),
self.cfg.dataset.max_tokens,
),
ignore_invalid_inputs=True,
required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple,
seed=self.cfg.common.seed,
num_shards=self.data_parallel_world_size if shard_batch_itr else 1,
shard_id=self.data_parallel_rank if shard_batch_itr else 0,
num_workers=self.cfg.dataset.num_workers,
epoch=epoch,
data_buffer_size=self.cfg.dataset.data_buffer_size,
disable_iterator_cache=disable_iterator_cache,
)
self.reset_dummy_batch(batch_iterator.first_batch)
return batch_iterator
def get_valid_iterator(
self,
subset,
disable_iterator_cache=False,
):
"""Return an EpochBatchIterator over given validation subset for a given epoch."""
batch_iterator = self.task.get_batch_iterator(
dataset=self.task.dataset(subset),
max_tokens=self.cfg.dataset.max_tokens_valid,
max_sentences=self.cfg.dataset.batch_size_valid,
max_positions=utils.resolve_max_positions(
self.task.max_positions(),
self.model.max_positions(),
),
ignore_invalid_inputs=self.cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple,
seed=self.cfg.common.seed,
num_shards=self.data_parallel_world_size,
shard_id=self.data_parallel_rank,
num_workers=self.cfg.dataset.num_workers,
# always pass a fixed "epoch" to keep validation data consistent
# across training epochs
epoch=1,
data_buffer_size=self.cfg.dataset.data_buffer_size,
disable_iterator_cache=disable_iterator_cache,
)
self.reset_dummy_batch(batch_iterator.first_batch)
return batch_iterator
def begin_epoch(self, epoch):
"""Called at the beginning of each epoch."""
logger.info("begin training epoch {}".format(epoch))
self.lr_step_begin_epoch(epoch)
if self.quantizer is not None:
self.quantizer.begin_epoch(epoch)
# task specific setup per epoch
self.task.begin_epoch(epoch, self.get_model())
if self.tpu:
import torch_xla.core.xla_model as xm
xm.rendezvous("begin_epoch") # wait for all workers
xm.mark_step()
def begin_valid_epoch(self, epoch):
"""Called at the beginning of each validation epoch."""
# task specific setup per validation epoch
self.task.begin_valid_epoch(epoch, self.get_model())
def reset_dummy_batch(self, batch):
self._dummy_batch = batch
@metrics.aggregate("train")
def train_step(self, samples, raise_oom=False):
"""Do forward, backward and parameter update."""
self._set_seed()
self.model.train()
self.criterion.train()
self.zero_grad()
metrics.log_start_time("train_wall", priority=800, round=2)
# forward and backward pass
logging_outputs, sample_size, ooms = [], 0, 0
for i, sample in enumerate(samples): # delayed update loop
sample, is_dummy_batch = self._prepare_sample(sample)
def maybe_no_sync():
"""
Whenever *samples* contains more than one mini-batch, we
want to accumulate gradients locally and only call
all-reduce in the last backwards pass.
"""
if (
self.data_parallel_world_size > 1
and hasattr(self.model, "no_sync")
and i < len(samples) - 1
):
return self.model.no_sync()
else:
return contextlib.ExitStack() # dummy contextmanager
try:
with maybe_no_sync():
# forward and backward
loss, sample_size_i, logging_output = self.task.train_step(
sample=sample,
model=self.model,
criterion=self.criterion,
optimizer=self.optimizer,
update_num=self.get_num_updates(),
ignore_grad=is_dummy_batch,
)
del loss
logging_outputs.append(logging_output)
sample_size += sample_size_i
# emptying the CUDA cache after the first step can
# reduce the chance of OOM
if self.cuda and self.get_num_updates() == 0:
torch.cuda.empty_cache()
except RuntimeError as e:
if "out of memory" in str(e):
self._log_oom(e)
if raise_oom:
raise e
logger.warning(
"attempting to recover from OOM in forward/backward pass"
)
ooms += 1
self.zero_grad()
if self.cuda:
torch.cuda.empty_cache()
if self.cfg.distributed_training.distributed_world_size == 1:
return None
else:
raise e
if self.tpu and i < len(samples) - 1:
# tpu-comment: every XLA operation before marking step is
# appended to the IR graph, and processing too many batches
# before marking step can lead to OOM errors.
# To handle gradient accumulation use case, we explicitly
# mark step here for every forward pass without a backward pass
self._xla_markstep_and_send_to_cpu()
if is_dummy_batch:
if torch.is_tensor(sample_size):
sample_size.zero_()
else:
sample_size *= 0.0
if torch.is_tensor(sample_size):
sample_size = sample_size.float()
else:
sample_size = float(sample_size)
# gather logging outputs from all replicas
if self._sync_stats():
train_time = self._local_cumulative_training_time()
logging_outputs, (
sample_size,
ooms,
total_train_time,
) = self._aggregate_logging_outputs(
logging_outputs, sample_size, ooms, train_time, ignore=is_dummy_batch
)
self._cumulative_training_time = (
total_train_time / self.data_parallel_world_size
)
overflow = False
try:
with torch.autograd.profiler.record_function("reduce-grads"):
# reduce gradients across workers
self.optimizer.all_reduce_grads(self.model)
if utils.has_parameters(self.criterion):
self.optimizer.all_reduce_grads(self.criterion)
with torch.autograd.profiler.record_function("multiply-grads"):
# multiply gradients by (data_parallel_size / sample_size) since
# DDP normalizes by the number of data parallel workers for
# improved fp16 precision.
# Thus we get (sum_of_gradients / sample_size) at the end.
# In case of fp16, this step also undoes loss scaling.
# (Debugging note: Some optimizers perform this scaling on the
# fly, so inspecting model.parameters() or optimizer.params may
# still show the original, unscaled gradients.)
numer = (
self.data_parallel_world_size
if not self.cfg.optimization.use_bmuf or self._sync_stats()
else 1
)
self.optimizer.multiply_grads(numer / (sample_size or 1.0))
# Note: (sample_size or 1.0) handles the case of a zero gradient, in a
# way that avoids CPU/device transfers in case sample_size is a GPU or
# TPU object. The assumption is that the gradient itself is also 0.
with torch.autograd.profiler.record_function("clip-grads"):
# clip grads
grad_norm = self.clip_grad_norm(self.cfg.optimization.clip_norm)
# check that grad norms are consistent across workers
# on tpu check tensor is slow
if not self.tpu:
if (
not self.cfg.optimization.use_bmuf
and self.cfg.distributed_training.ddp_backend != "slow_mo"
):
self._check_grad_norms(grad_norm)
if not torch.isfinite(grad_norm).all():
# check local gradnorm single GPU case, trigger NanDetector
raise FloatingPointError("gradients are Nan/Inf")
with torch.autograd.profiler.record_function("optimizer"):
# take an optimization step
self.task.optimizer_step(
self.optimizer, model=self.model, update_num=self.get_num_updates()
)
except FloatingPointError:
# re-run the forward and backward pass with hooks attached to print
# out where it fails
self.zero_grad()
with NanDetector(self.get_model()):
for _, sample in enumerate(samples):
sample, _ = self._prepare_sample(sample)
self.task.train_step(
sample,
self.model,
self.criterion,
self.optimizer,
self.get_num_updates(),
ignore_grad=False,
)
raise
except OverflowError as e:
overflow = True
logger.info(f"NOTE: gradient overflow detected, ignoring gradient, {str(e)}")
grad_norm = torch.tensor(0.0).cuda()
self.zero_grad()
except RuntimeError as e:
if "out of memory" in str(e):
self._log_oom(e)
logger.error("OOM during optimization, irrecoverable")
raise e
# Some distributed wrappers (e.g., SlowMo) need access to the optimizer
# after the step
if hasattr(self.model, "perform_additional_optimizer_actions"):
if hasattr(self.optimizer, "fp32_params"):
self.model.perform_additional_optimizer_actions(
self.optimizer.optimizer, self.optimizer.fp32_params
)
else:
self.model.perform_additional_optimizer_actions(
self.optimizer.optimizer
)
logging_output = None
if not overflow or self.cfg.distributed_training.ddp_backend == "slow_mo":
self.set_num_updates(self.get_num_updates() + 1)
if self.tpu:
import torch_xla.core.xla_model as xm
# mark step on TPUs
self._xla_markstep_and_send_to_cpu()
# only log stats every log_interval steps
# this causes wps to be misreported when log_interval > 1
logging_output = {}
if self.get_num_updates() % self.cfg.common.log_interval == 0:
# log memory usage
mem_info = xm.get_memory_info(self.device)
gb_free = mem_info["kb_free"] / 1024 / 1024
gb_total = mem_info["kb_total"] / 1024 / 1024
metrics.log_scalar(
"gb_free", gb_free, priority=1500, round=1, weight=0
)
metrics.log_scalar(
"gb_total", gb_total, priority=1600, round=1, weight=0
)
logging_outputs = self._xla_markstep_and_send_to_cpu(logging_outputs)
logging_output = self._reduce_and_log_stats(
logging_outputs, sample_size, grad_norm
)
# log whenever there's an XLA compilation, since these
# slow down training and may indicate opportunities for
# optimization
self._check_xla_compilation()
else:
if self.cuda and self.cuda_env is not None:
# log minimum free memory over the iteration
gb_used = torch.cuda.max_memory_allocated() / 1024 / 1024 / 1024
torch.cuda.reset_peak_memory_stats()
gb_free = self.cuda_env.total_memory_in_GB - gb_used
metrics.log_scalar(
"gb_free", gb_free, priority=1500, round=1, weight=0
)
# log stats
logging_output = self._reduce_and_log_stats(
logging_outputs, sample_size, grad_norm
)
# clear CUDA cache to reduce memory fragmentation
if (
self.cuda
and self.cfg.common.empty_cache_freq > 0
and (
(self.get_num_updates() + self.cfg.common.empty_cache_freq - 1)
% self.cfg.common.empty_cache_freq
)
== 0
):
torch.cuda.empty_cache()
if self.cfg.common.fp16:
metrics.log_scalar(
"loss_scale",
self.optimizer.scaler.loss_scale,
priority=700,
round=4,
weight=0,
)
metrics.log_stop_time("train_wall")
return logging_output
@metrics.aggregate("valid")
def valid_step(self, sample, raise_oom=False):
"""Do forward pass in evaluation mode."""
if self.tpu:
import torch_xla.core.xla_model as xm
xm.rendezvous("valid_step") # wait for all workers
with torch.no_grad():
self.model.eval()
self.criterion.eval()
sample, is_dummy_batch = self._prepare_sample(sample)
try:
_loss, sample_size, logging_output = self.task.valid_step(
sample, self.model, self.criterion
)
except RuntimeError as e:
if "out of memory" in str(e):
self._log_oom(e)
if not raise_oom:
logger.warning(
"ran out of memory in validation step, retrying batch"
)
for p in self.model.parameters():
if p.grad is not None:
p.grad = None # free some memory
if self.cuda:
torch.cuda.empty_cache()
return self.valid_step(sample, raise_oom=True)
raise e
logging_outputs = [logging_output]
if is_dummy_batch:
if torch.is_tensor(sample_size):
sample_size.zero_()
else:
sample_size *= 0.0
# gather logging outputs from all replicas
if self.data_parallel_world_size > 1:
logging_outputs, (sample_size,) = self._aggregate_logging_outputs(
logging_outputs,
sample_size,
ignore=is_dummy_batch,
)
# log validation stats
if self.tpu:
logging_outputs = self._xla_markstep_and_send_to_cpu(logging_outputs)
# don't reduce here, otherwise the metric is wrong
# logging_output = self._reduce_and_log_stats(logging_outputs, sample_size)
return logging_outputs
def zero_grad(self):
self.optimizer.zero_grad()
def lr_step_begin_epoch(self, epoch):
"""Adjust the learning rate at the beginning of the epoch."""
self.lr_scheduler.step_begin_epoch(epoch)
# prefer updating the LR based on the number of steps
return self.lr_step_update()
def lr_step(self, epoch, val_loss=None):
"""Adjust the learning rate at the end of the epoch."""
self.lr_scheduler.step(epoch, val_loss)
# prefer updating the LR based on the number of steps
return self.lr_step_update()
def lr_step_update(self):
"""Update the learning rate after each update."""
new_lr = self.lr_scheduler.step_update(self.get_num_updates())
if isinstance(new_lr, dict):
for k, v in new_lr.items():
metrics.log_scalar(f"lr_{k}", v, weight=0, priority=300)
new_lr = new_lr.get("default", next(iter(new_lr.values())))
else:
metrics.log_scalar("lr", new_lr, weight=0, priority=300)
return new_lr
def get_lr(self):
"""Get the current learning rate."""
return self.optimizer.get_lr()
def get_model(self):
"""Get the (non-wrapped) model instance."""
return self._model
def get_criterion(self):
"""Get the (non-wrapped) criterion instance."""
return self._criterion
def get_meter(self, name):
"""[deprecated] Get a specific meter by name."""
from fairseq import meters
if "get_meter" not in self._warn_once:
self._warn_once.add("get_meter")
utils.deprecation_warning(
"Trainer.get_meter is deprecated. Please use fairseq.metrics instead."
)
train_meters = metrics.get_meters("train")
if train_meters is None:
train_meters = {}
if name == "train_loss" and "loss" in train_meters:
return train_meters["loss"]
elif name == "train_nll_loss":
# support for legacy train.py, which assumed this meter is
# always initialized
m = train_meters.get("nll_loss", None)
return m or meters.AverageMeter()
elif name == "wall":
# support for legacy train.py, which assumed this meter is
# always initialized
m = metrics.get_meter("default", "wall")
return m or meters.TimeMeter()
elif name == "wps":
m = metrics.get_meter("train", "wps")
return m or meters.TimeMeter()
elif name in {"valid_loss", "valid_nll_loss"}:
# support for legacy train.py, which assumed these meters
# are always initialized
k = name[len("valid_") :]
m = metrics.get_meter("valid", k)
return m or meters.AverageMeter()
elif name == "oom":
return meters.AverageMeter()
elif name in train_meters:
return train_meters[name]
return None
def get_num_updates(self):
"""Get the number of parameters updates."""
return self._num_updates
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
self._num_updates = num_updates
self.lr_step_update()
if self.quantizer:
self.quantizer.step_update(self._num_updates)
metrics.log_scalar("num_updates", self._num_updates, weight=0, priority=200)
def clip_grad_norm(self, clip_norm):
def agg_norm_fn(total_norm):
total_norm = total_norm.cuda().float() ** 2
total_norm = distributed_utils.all_reduce(
total_norm, group=self.data_parallel_process_group
)
return total_norm ** 0.5
should_agg_norm = (
self.cfg.distributed_training.ddp_backend == "fully_sharded"
and (
self.data_parallel_process_group is not None
or torch.distributed.is_initialized()
)
)
return self.optimizer.clip_grad_norm(
clip_norm, aggregate_norm_fn=agg_norm_fn if should_agg_norm else None
)
def cumulative_training_time(self):
if self._cumulative_training_time is None:
# single GPU
return self._local_cumulative_training_time()
else:
return self._cumulative_training_time
def _local_cumulative_training_time(self):
"""Aggregate training time in seconds."""
return time.time() - self._start_time + self._previous_training_time
def _prepare_sample(self, sample, is_dummy=False):
if sample == "DUMMY":
raise Exception(
"Trying to use an uninitialized 'dummy' batch. This usually indicates "
"that the total number of batches is smaller than the number of "
"participating GPUs. Try reducing the batch size or using fewer GPUs."
)
if sample is None or len(sample) == 0:
assert (
self._dummy_batch is not None and len(self._dummy_batch) > 0
), "Invalid dummy batch: {}".format(self._dummy_batch)
sample, _ = self._prepare_sample(self._dummy_batch, is_dummy=True)
return sample, True
if self.cuda:
if self.pipeline_model_parallel:
if "target" in sample:
sample["target"] = utils.move_to_cuda(
sample["target"], device=self.last_device
)
else:
sample = utils.move_to_cuda(sample)
elif self.tpu and is_dummy:
# the dummy batch may not be on the appropriate device
sample = utils.move_to_cuda(sample, device=self.device)
def apply_half(t):
if t.dtype is torch.float32:
return t.half()
return t
def apply_bfloat16(t):
if t.dtype is torch.float32:
return t.to(dtype=torch.bfloat16)
return t
if self.cfg.common.fp16:
sample = utils.apply_to_sample(apply_half, sample)
if self.cfg.common.bf16:
sample = utils.apply_to_sample(apply_bfloat16, sample)
if self._dummy_batch == "DUMMY":
self._dummy_batch = sample
return sample, False
def _set_seed(self):
# Set seed based on args.seed and the update number so that we get
# reproducible results when resuming from checkpoints
seed = self.cfg.common.seed + self.get_num_updates()
utils.set_torch_seed(seed)
def _sync_stats(self):
# Return True if it's using multiple GPUs and DDP or multiple GPUs with
# BMUF and it's a bmuf sync with warmup iterations completed before.
if self.data_parallel_world_size == 1:
return False
elif self.cfg.optimization.use_bmuf:
return (
self.get_num_updates() + 1
) % self.cfg.bmuf.global_sync_iter == 0 and (
self.get_num_updates() + 1
) > self.cfg.bmuf.warmup_iterations
else:
return True
def _log_oom(self, exc):
msg = "OOM: Ran out of memory with exception: {}".format(exc)
logger.warning(msg)
if torch.cuda.is_available() and hasattr(torch.cuda, "memory_summary"):
for device_idx in range(torch.cuda.device_count()):
logger.warning(torch.cuda.memory_summary(device=device_idx))
sys.stderr.flush()
def _aggregate_logging_outputs(
self,
logging_outputs: List[Dict[str, Any]],
*extra_stats_to_sum,
ignore=False,
):
if self.task.__class__.logging_outputs_can_be_summed(self.get_criterion()):
return self._fast_stat_sync_sum(
logging_outputs, *extra_stats_to_sum, ignore=ignore
)
else:
return self._all_gather_list_sync(
logging_outputs, *extra_stats_to_sum, ignore=ignore
)
def _all_gather_list_sync(
self,
logging_outputs: List[Dict[str, Any]],
*extra_stats_to_sum,
ignore=False,
):
"""
Sync logging outputs across workers. all_gather_list_sync is
suitable when logging outputs are complex types.
"""
if self.tpu:
raise NotImplementedError
if ignore:
logging_outputs = []
results = list(
zip(
*distributed_utils.all_gather_list(
[logging_outputs] + list(extra_stats_to_sum),
max_size=getattr(self.cfg.common, "all_gather_list_size", 16384),
group=self.data_parallel_process_group,
)
)
)
logging_outputs, extra_stats_to_sum = results[0], results[1:]
logging_outputs = list(chain.from_iterable(logging_outputs))
extra_stats_to_sum = [sum(s) for s in extra_stats_to_sum]
return logging_outputs, extra_stats_to_sum
def _fast_stat_sync_sum(
self, logging_outputs: List[Dict[str, Any]], *extra_stats_to_sum, ignore=False,
):
"""
Sync logging outputs across workers. fast_stat_sync_sum is
faster than all_gather_list_sync, but is only suitable when
logging outputs are scalars and can be summed. Note that
*logging_outputs* cannot contain any nested dicts/lists.
"""
data = {}
for i, stat in enumerate(extra_stats_to_sum):
data["extra_stats_" + str(i)] = stat
if len(logging_outputs) > 0:
log_keys = list(logging_outputs[0].keys())
for k in log_keys:
if not ignore:
v = sum(log[k] for log in logging_outputs if k in log)
else:
v = logging_outputs[0][k]
v = torch.zeros_like(v) if torch.is_tensor(v) else 0
data["logging_outputs_" + k] = v
else:
log_keys = None
data = distributed_utils.all_reduce_dict(
data, device=self.device, group=self.data_parallel_process_group
)
extra_stats_to_sum = [
data["extra_stats_" + str(i)] for i in range(len(extra_stats_to_sum))
]
if log_keys is not None:
logging_outputs = [{k: data["logging_outputs_" + k] for k in log_keys}]
else:
logging_outputs = []
return logging_outputs, extra_stats_to_sum
def _check_grad_norms(self, grad_norm):
"""Check that grad norms are consistent across workers."""
if self._grad_norm_buf is not None:
self._grad_norm_buf.zero_()
self._grad_norm_buf[self.data_parallel_rank] = grad_norm
distributed_utils.all_reduce(
self._grad_norm_buf, group=self.data_parallel_process_group
)
def is_consistent(tensor):
max_abs_diff = torch.max(torch.abs(tensor - tensor[0]))
return (
torch.isfinite(tensor).all()
and (max_abs_diff / (tensor[0] + 1e-6) < 1e-6).all()
)
if not is_consistent(self._grad_norm_buf):
pretty_detail = "\n".join(
"rank {:3d} = {:.8f}".format(r, n)
for r, n in enumerate(self._grad_norm_buf.tolist())
)
error_detail = "grad_norm across the workers:\n{}\n".format(
pretty_detail
)
# use FloatingPointError to trigger NanDetector
raise FloatingPointError(
"Fatal error: gradients are inconsistent between workers. "
"Try --ddp-backend=legacy_ddp. "
"Or are you mixing up different generation of GPUs in training?"
+ "\n"
+ "-" * 80
+ "\n{}\n".format(error_detail)
+ "-" * 80
)
def _reduce_and_log_stats(self, logging_outputs, sample_size, grad_norm=None):
if grad_norm is not None and (
not torch.is_tensor(grad_norm) or torch.isfinite(grad_norm)
):
metrics.log_speed("ups", 1.0, priority=100, round=2)
metrics.log_scalar("gnorm", grad_norm, priority=400, round=3)
if self.cfg.optimization.clip_norm > 0:
metrics.log_scalar(
"clip",
torch.where(
grad_norm > self.cfg.optimization.clip_norm,
grad_norm.new_tensor(100),
grad_norm.new_tensor(0),
),
priority=500,
round=1,
)
with metrics.aggregate() as agg:
if logging_outputs is not None:
self.task.reduce_metrics(logging_outputs, self.get_criterion())
del logging_outputs
# extra warning for criterions that don't properly log a loss value
if "loss" not in agg:
if "loss" not in self._warn_once:
self._warn_once.add("loss")
logger.warning(
"Criterion.reduce_metrics did not log a 'loss' value, "
"which may break some functionality"
)
metrics.log_scalar("loss", -1)
# support legacy interface
if self.tpu:
logging_output = {}
else:
logging_output = agg.get_smoothed_values()
logging_output["sample_size"] = sample_size
for key_to_delete in ["ppl", "wps", "wpb", "bsz"]:
if key_to_delete in logging_output:
del logging_output[key_to_delete]
return logging_output
def _check_xla_compilation(self):
import torch_xla.debug.metrics as met
compile_stats = met.metric_data("CompileTime")
if compile_stats is None:
return
num_xla_compiles = compile_stats[0]
if num_xla_compiles > self._num_xla_compiles:
logger.warning(
"XLA compilation detected on device #{}; too many of these can lead "
"to slow training, but we expect a few in the beginning".format(
self.cfg.distributed_training.distributed_rank
)
)
self._num_xla_compiles = num_xla_compiles
def _xla_markstep_and_send_to_cpu(self, data=None):
import torch_xla.core.xla_model as xm
xm.mark_step()
if data is not None:
from fairseq.utils import xla_device_to_cpu
return xla_device_to_cpu(data)
def _catalog_shared_params(module, memo=None, prefix=""):
if memo is None:
first_call = True
memo = {}
else:
first_call = False
for name, param in module._parameters.items():
param_prefix = prefix + ("." if prefix else "") + name
if param not in memo:
memo[param] = []
memo[param].append(param_prefix)
for name, m in module._modules.items():
if m is None:
continue
submodule_prefix = prefix + ("." if prefix else "") + name
_catalog_shared_params(m, memo, submodule_prefix)
if first_call:
return [x for x in memo.values() if len(x) > 1]
def _get_module_by_path(module, path):
path = path.split(".")
for name in path:
module = getattr(module, name)
return module
def _set_module_by_path(module, path, value):
path = path.split(".")
for name in path[:-1]:
module = getattr(module, name)
setattr(module, path[-1], value)
|
COCO-LM/fairseq/fairseq/trainer.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/trainer.py",
"repo_id": "COCO-LM",
"token_count": 26727
}
| 193 |
#include <torch/extension.h>
#include <vector>
#include <cassert>
#include "compat.h"
namespace {
void compute_n1_n2(
at::Tensor input,
#ifdef VERSION_GE_1_1
at::IntArrayRef normalized_shape,
#else
at::IntList normalized_shape,
#endif
int& n1,
int& n2)
{
int idiff = input.ndimension() - normalized_shape.size();
n2 = 1;
for (int i = 0; i < (int)normalized_shape.size(); ++i) {
assert( input.sizes()[i+idiff] == normalized_shape[i] );
n2 *= normalized_shape[i];
}
n1 = 1;
for (int i = 0; i < idiff; ++i) {
n1 *= input.sizes()[i];
}
}
void check_args(
#ifdef VERSION_GE_1_1
at::IntArrayRef normalized_shape,
#else
at::IntList normalized_shape,
#endif
at::Tensor gamma,
at::Tensor beta
)
{
TORCH_CHECK(!gamma.defined() || gamma.sizes().equals(normalized_shape));
TORCH_CHECK(!beta.defined() || beta.sizes().equals(normalized_shape));
}
void check_args(
at::Tensor input,
#ifdef VERSION_GE_1_1
at::IntArrayRef normalized_shape,
#else
at::IntList normalized_shape,
#endif
int& n1,
int& n2
)
{
int64_t normalized_ndim = normalized_shape.size();
if (normalized_ndim < 1) {
std::stringstream ss;
ss << "Expected normalized_shape to be at least 1-dimensional, i.e., "
<< "containing at least one element, but got normalized_shape="
<< normalized_shape;
throw std::runtime_error(ss.str());
}
auto input_shape = input.sizes();
auto input_ndim = input.dim();
if (input_ndim < normalized_ndim ||
!input_shape.slice(input_ndim - normalized_ndim).equals(normalized_shape)) {
std::stringstream ss;
ss << "Given normalized_shape=" << normalized_shape
<< ", expected input with shape [*";
for (auto size : normalized_shape) {
ss << ", " << size;
}
ss << "], but got input of size" << input_shape;
throw std::runtime_error(ss.str());
}
compute_n1_n2(input,normalized_shape,n1,n2);
}
void check_args(
at::Tensor input,
#ifdef VERSION_GE_1_1
at::IntArrayRef normalized_shape,
#else
at::IntList normalized_shape,
#endif
at::Tensor gamma,
at::Tensor beta,
int& n1,
int& n2
)
{
check_args(input,normalized_shape,n1,n2);
check_args(normalized_shape,gamma,beta);
}
}
void cuda_layer_norm(
at::Tensor* output,
at::Tensor* mean,
at::Tensor* invvar,
at::Tensor* input,
int n1,
int n2,
#ifdef VERSION_GE_1_1
at::IntArrayRef normalized_shape,
#else
at::IntList normalized_shape,
#endif
at::Tensor* gamma,
at::Tensor* beta,
double epsilon);
#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
std::vector<at::Tensor> layer_norm(
at::Tensor input,
#ifdef VERSION_GE_1_1
at::IntArrayRef normalized_shape,
#else
at::IntList normalized_shape,
#endif
double epsilon) {
CHECK_INPUT(input);
int n1,n2;
check_args(input,normalized_shape,n1,n2);
at::Tensor output = at::empty_like(input);
at::Tensor mean = at::empty({n1}, input.options().dtype((input.scalar_type()==at::ScalarType::Half || input.scalar_type()==at::ScalarType::BFloat16) ? at::ScalarType::Float : input.scalar_type()));
at::Tensor invvar = at::empty_like(mean);
cuda_layer_norm(&output,&mean,&invvar,&input,n1,n2,
normalized_shape,NULL,NULL,epsilon);
return {output, mean, invvar};
}
std::vector<at::Tensor> layer_norm_affine(
at::Tensor input,
#ifdef VERSION_GE_1_1
at::IntArrayRef normalized_shape,
#else
at::IntList normalized_shape,
#endif
at::Tensor gamma,
at::Tensor beta,
double epsilon) {
CHECK_INPUT(input);
CHECK_INPUT(gamma);
CHECK_INPUT(beta);
int n1,n2;
check_args(input,normalized_shape,gamma,beta,n1,n2);
at::Tensor output = at::empty_like(input);
at::Tensor mean = at::empty({n1}, input.options().dtype((input.scalar_type()==at::ScalarType::Half || input.scalar_type()==at::ScalarType::BFloat16) ? at::ScalarType::Float : input.scalar_type()));
at::Tensor invvar = at::empty_like(mean);
cuda_layer_norm(&output,&mean,&invvar,&input,n1,n2,
normalized_shape,&gamma,&beta,epsilon);
return {output, mean, invvar};
}
void cuda_layer_norm_gradient(
at::Tensor* dout,
at::Tensor* mean,
at::Tensor* invvar,
at::Tensor* input,
int n1,
int n2,
#ifdef VERSION_GE_1_1
at::IntArrayRef normalized_shape,
#else
at::IntList normalized_shape,
#endif
at::Tensor* gamma,
at::Tensor* beta,
double epsilon,
at::Tensor* grad_input,
at::Tensor* grad_gamma,
at::Tensor* grad_beta
);
at::Tensor layer_norm_gradient(
at::Tensor dout,
at::Tensor mean,
at::Tensor invvar,
at::Tensor input,
#ifdef VERSION_GE_1_1
at::IntArrayRef normalized_shape,
#else
at::IntList normalized_shape,
#endif
double epsilon) {
CHECK_INPUT(dout);
CHECK_INPUT(mean);
CHECK_INPUT(invvar);
CHECK_INPUT(input);
int n1,n2;
check_args(input,normalized_shape,n1,n2);
at::Tensor grad_input = at::empty_like(input);
cuda_layer_norm_gradient(&dout,&mean,&invvar,&input,n1,n2,
normalized_shape,NULL,NULL,epsilon,
&grad_input,NULL,NULL);
return grad_input;
}
std::vector<at::Tensor> layer_norm_gradient_affine(
at::Tensor dout,
at::Tensor mean,
at::Tensor invvar,
at::Tensor input,
#ifdef VERSION_GE_1_1
at::IntArrayRef normalized_shape,
#else
at::IntList normalized_shape,
#endif
at::Tensor gamma,
at::Tensor beta,
double epsilon) {
CHECK_INPUT(dout);
CHECK_INPUT(mean);
CHECK_INPUT(invvar);
CHECK_INPUT(input);
CHECK_INPUT(gamma);
CHECK_INPUT(beta);
int n1,n2;
check_args(input,normalized_shape,gamma,beta,n1,n2);
at::Tensor grad_input = at::empty_like(input);
at::Tensor grad_gamma = at::empty_like(gamma);
at::Tensor grad_beta = at::empty_like(beta);
cuda_layer_norm_gradient(&dout,&mean,&invvar,&input,n1,n2,
normalized_shape,&gamma,&beta,epsilon,
&grad_input,&grad_gamma,&grad_beta);
return {grad_input, grad_gamma, grad_beta};
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward_affine", &layer_norm_affine, "LayerNorm forward (CUDA)");
m.def("forward", &layer_norm, "LayerNorm forward (CUDA)");
m.def("backward_affine", &layer_norm_gradient_affine, "LayerNorm backward (CUDA)");
m.def("backward", &layer_norm_gradient, "LayerNorm backward (CUDA)");
}
|
COCO-LM/fairseq/fused_ops/csrc/layernorm/interface.cpp/0
|
{
"file_path": "COCO-LM/fairseq/fused_ops/csrc/layernorm/interface.cpp",
"repo_id": "COCO-LM",
"token_count": 2939
}
| 194 |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import contextlib
import sys
from collections import Counter
from multiprocessing import Pool
from fairseq.data.encoders.sentencepiece_bpe import SentencepieceBPE
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--sentencepiece-model",
help='path to encoder.json',
)
parser.add_argument('--vocab', type=str)
parser.add_argument("--workers", type=int, default=20)
args = parser.parse_args()
encoder = MultiprocessingEncoder(args)
with Pool(args.workers, initializer=encoder.initializer) as pool:
for text in pool.imap(encoder.encode, sys.stdin, chunksize=4096):
sys.stdout.write(text)
class MultiprocessingEncoder(object):
def __init__(self, args):
self.args = args
def initializer(self):
global bpe
bpe = SentencepieceBPE(self.args)
def encode(self, line):
global bpe
enc_line = ' '.join(bpe.tokenize(line))
return enc_line + '\n'
if __name__ == "__main__":
main()
|
COCO-LM/fairseq/preprocess/glue/multiprocessing_sp_encoder.py/0
|
{
"file_path": "COCO-LM/fairseq/preprocess/glue/multiprocessing_sp_encoder.py",
"repo_id": "COCO-LM",
"token_count": 482
}
| 195 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Count the number of documents and average number of lines and tokens per
document in a large file. Documents should be separated by a single empty line.
"""
import argparse
import gzip
import sys
import numpy as np
def main():
parser = argparse.ArgumentParser()
parser.add_argument("input")
parser.add_argument("--gzip", action="store_true")
args = parser.parse_args()
def gopen():
if args.gzip:
return gzip.open(args.input, "r")
else:
return open(args.input, "r", encoding="utf-8")
num_lines = []
num_toks = []
with gopen() as h:
num_docs = 1
num_lines_in_doc = 0
num_toks_in_doc = 0
for i, line in enumerate(h):
if len(line.strip()) == 0: # empty line indicates new document
num_docs += 1
num_lines.append(num_lines_in_doc)
num_toks.append(num_toks_in_doc)
num_lines_in_doc = 0
num_toks_in_doc = 0
else:
num_lines_in_doc += 1
num_toks_in_doc += len(line.rstrip().split())
if i % 1000000 == 0:
print(i, file=sys.stderr, end="", flush=True)
elif i % 100000 == 0:
print(".", file=sys.stderr, end="", flush=True)
print(file=sys.stderr, flush=True)
print("found {} docs".format(num_docs))
print("average num lines per doc: {}".format(np.mean(num_lines)))
print("average num toks per doc: {}".format(np.mean(num_toks)))
if __name__ == "__main__":
main()
|
COCO-LM/fairseq/scripts/count_docs.py/0
|
{
"file_path": "COCO-LM/fairseq/scripts/count_docs.py",
"repo_id": "COCO-LM",
"token_count": 803
}
| 196 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
import sys
import unittest
import torch
from fairseq.distributed import utils as dist_utils
from .utils import objects_are_equal, spawn_and_init
class DistributedTest(unittest.TestCase):
def setUp(self):
if not torch.cuda.is_available():
raise unittest.SkipTest("CUDA not available, skipping test")
if sys.platform == "win32":
raise unittest.SkipTest("NCCL doesn't support Windows, skipping test")
if torch.cuda.device_count() < 2:
raise unittest.SkipTest("distributed tests require 2+ GPUs, skipping")
class TestBroadcastObject(DistributedTest):
def test_str(self):
spawn_and_init(
functools.partial(
TestBroadcastObject._test_broadcast_object, "hello world"
),
world_size=2,
)
def test_tensor(self):
spawn_and_init(
functools.partial(
TestBroadcastObject._test_broadcast_object,
torch.rand(5),
),
world_size=2,
)
def test_complex(self):
spawn_and_init(
functools.partial(
TestBroadcastObject._test_broadcast_object,
{
"a": "1",
"b": [2, torch.rand(2, 3), 3],
"c": (torch.rand(2, 3), 4),
"d": {5, torch.rand(5)},
"e": torch.rand(5),
"f": torch.rand(5).int().cuda(),
},
),
world_size=2,
)
@staticmethod
def _test_broadcast_object(ref_obj, rank, group):
obj = dist_utils.broadcast_object(
ref_obj if rank == 0 else None, src_rank=0, group=group
)
assert objects_are_equal(ref_obj, obj)
class TestAllGatherList(DistributedTest):
def test_str_equality(self):
spawn_and_init(
functools.partial(
TestAllGatherList._test_all_gather_list_equality,
"hello world",
),
world_size=2,
)
def test_tensor_equality(self):
spawn_and_init(
functools.partial(
TestAllGatherList._test_all_gather_list_equality,
torch.rand(5),
),
world_size=2,
)
def test_complex_equality(self):
spawn_and_init(
functools.partial(
TestAllGatherList._test_all_gather_list_equality,
{
"a": "1",
"b": [2, torch.rand(2, 3), 3],
"c": (torch.rand(2, 3), 4),
"d": {5, torch.rand(5)},
"e": torch.rand(5),
"f": torch.rand(5).int(),
},
),
world_size=2,
)
@staticmethod
def _test_all_gather_list_equality(ref_obj, rank, group):
objs = dist_utils.all_gather_list(ref_obj, group)
for obj in objs:
assert objects_are_equal(ref_obj, obj)
def test_rank_tensor(self):
spawn_and_init(
TestAllGatherList._test_all_gather_list_rank_tensor, world_size=2
)
@staticmethod
def _test_all_gather_list_rank_tensor(rank, group):
obj = torch.tensor([rank])
objs = dist_utils.all_gather_list(obj, group)
for i, obj in enumerate(objs):
assert obj.item() == i
if __name__ == "__main__":
unittest.main()
|
COCO-LM/fairseq/tests/distributed/test_utils.py/0
|
{
"file_path": "COCO-LM/fairseq/tests/distributed/test_utils.py",
"repo_id": "COCO-LM",
"token_count": 1912
}
| 197 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import logging
import os
import tempfile
import unittest
from io import StringIO
from unittest.mock import patch
from fairseq import checkpoint_utils
from omegaconf import OmegaConf
from tests.utils import (
create_dummy_data,
preprocess_translation_data,
train_translation_model,
)
class TestCheckpointUtils(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
@contextlib.contextmanager
def _train_transformer(self, seed, extra_args=None):
if extra_args is None:
extra_args = []
with tempfile.TemporaryDirectory(f"_train_transformer_seed{seed}") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
"transformer_iwslt_de_en",
[
"--encoder-layers",
"3",
"--decoder-layers",
"3",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--seed",
str(seed),
]
+ extra_args,
)
yield os.path.join(data_dir, "checkpoint_last.pt")
def test_load_model_ensemble_and_task(self):
# with contextlib.redirect_stdout(StringIO()):
with self._train_transformer(seed=123) as model1:
with self._train_transformer(seed=456) as model2:
ensemble, cfg, task = checkpoint_utils.load_model_ensemble_and_task(
filenames=[model1, model2]
)
self.assertEqual(len(ensemble), 2)
# after Transformer has been migrated to Hydra, this will probably
# become cfg.common.seed
self.assertEqual(ensemble[0].args.seed, 123)
self.assertEqual(ensemble[1].args.seed, 456)
# the task from the first model should be returned
self.assertTrue("seed123" in task.cfg.data)
# last cfg is saved
self.assertEqual(cfg.common.seed, 456)
def test_prune_state_dict(self):
with contextlib.redirect_stdout(StringIO()):
extra_args = ["--encoder-layerdrop", "0.01", "--decoder-layerdrop", "0.01"]
with self._train_transformer(seed=1, extra_args=extra_args) as model:
ensemble, cfg, task = checkpoint_utils.load_model_ensemble_and_task(
filenames=[model],
arg_overrides={
"encoder_layers_to_keep": "0,2",
"decoder_layers_to_keep": "1",
},
)
self.assertEqual(len(ensemble), 1)
self.assertEqual(len(ensemble[0].encoder.layers), 2)
self.assertEqual(len(ensemble[0].decoder.layers), 1)
def test_torch_persistent_save_async(self):
state_dict = {}
filename = "async_checkpoint.pt"
with patch(f"{checkpoint_utils.__name__}.PathManager.opena") as mock_opena:
with patch(f"{checkpoint_utils.__name__}._torch_persistent_save") as mock_save:
checkpoint_utils.torch_persistent_save(
state_dict, filename, async_write=True
)
mock_opena.assert_called_with(filename, "wb")
mock_save.assert_called()
if __name__ == "__main__":
unittest.main()
|
COCO-LM/fairseq/tests/test_checkpoint_utils.py/0
|
{
"file_path": "COCO-LM/fairseq/tests/test_checkpoint_utils.py",
"repo_id": "COCO-LM",
"token_count": 1976
}
| 198 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import unittest
import torch
from fairseq.optim.adam import FairseqAdam
from fairseq.optim.fp16_optimizer import MemoryEfficientFP16Optimizer
from omegaconf import OmegaConf
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
class TestMemoryEfficientFP16(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_load_state_dict(self):
# define simple FP16 model
model = torch.nn.Linear(5, 5).cuda().half()
params = list(model.parameters())
# initialize memory efficient FP16 optimizer
# with pseudo DictConfigs
optimizer = FairseqAdam(
cfg=OmegaConf.create(
vars(
argparse.Namespace(
adam_betas="(0.9, 0.999)",
adam_eps=1e-8,
weight_decay=0.0,
lr=[0.00001],
)
)
),
params=params,
)
me_optimizer = MemoryEfficientFP16Optimizer(
cfg=OmegaConf.create(
{
"common": vars(
argparse.Namespace(
fp16_init_scale=1,
fp16_scale_window=1,
fp16_scale_tolerance=1,
threshold_loss_scale=1,
min_loss_scale=1e-4,
)
)
}
),
params=params,
optimizer=optimizer,
)
# optimizer state is created in the first step
loss = model(torch.rand(5).cuda().half()).sum()
me_optimizer.backward(loss)
me_optimizer.step()
# reload state
state = me_optimizer.state_dict()
me_optimizer.load_state_dict(state)
for k, v in me_optimizer.optimizer.state.items():
self.assertTrue(k.dtype == torch.float16)
for v_i in v.values():
if torch.is_tensor(v_i):
self.assertTrue(v_i.dtype == torch.float32)
if __name__ == "__main__":
unittest.main()
|
COCO-LM/fairseq/tests/test_memory_efficient_fp16.py/0
|
{
"file_path": "COCO-LM/fairseq/tests/test_memory_efficient_fp16.py",
"repo_id": "COCO-LM",
"token_count": 1313
}
| 199 |
datadir: /data/CMIP6/AWI-ESM
name: 2m_temperature
cmip_name: tas
era_name: t2m
run: r1i1p1f1
res:
- 1.40625
# - 5.625
|
ClimaX/snakemake_configs/AWI-ESM/config_2m_temperature.yml/0
|
{
"file_path": "ClimaX/snakemake_configs/AWI-ESM/config_2m_temperature.yml",
"repo_id": "ClimaX",
"token_count": 68
}
| 200 |
datadir: /data/CMIP6/HAMMOZ
name: specific_humidity
cmip_name: hus
era_name: q
run: r1i1p1f1
version: v20190628
res:
- 1.40625
# - 5.625
|
ClimaX/snakemake_configs/HAMMOZ/config_specific_humidity.yml/0
|
{
"file_path": "ClimaX/snakemake_configs/HAMMOZ/config_specific_humidity.yml",
"repo_id": "ClimaX",
"token_count": 70
}
| 201 |
datadir: /data/CMIP6/TaiESM1
server_prefix: https://esgf.ceda.ac.uk/thredds/fileServer/esg_cmip6/CMIP6/CMIP
name: specific_humidity
cmip_name: hus
era_name: q
run: r1i1p1f1
res:
- 1.40625
# - 5.625
|
ClimaX/snakemake_configs/TaiESM1/config_specific_humidity.yml/0
|
{
"file_path": "ClimaX/snakemake_configs/TaiESM1/config_specific_humidity.yml",
"repo_id": "ClimaX",
"token_count": 102
}
| 202 |
import math
import torch
import torch.nn.functional as F
from timm.models.layers.helpers import to_2tuple
from torch import nn
def _get_conv2d_weights(
in_channels,
out_channels,
kernel_size,
):
weight = torch.empty(out_channels, in_channels, *kernel_size)
return weight
def _get_conv2d_biases(out_channels):
bias = torch.empty(out_channels)
return bias
class ParallelVarPatchEmbed(nn.Module):
"""Variable to Patch Embedding with multiple variables in a single kernel. Key idea is to use Grouped Convolutions.
Args:
max_vars (int): Maximum number of variables
img_size (int): Image size
patch_size (int): Patch size
embed_dim (int): Embedding dimension
norm_layer (nn.Module, optional): Normalization layer. Defaults to None.
flatten (bool, optional): Flatten the output. Defaults to True.
"""
def __init__(self, max_vars: int, img_size, patch_size, embed_dim, norm_layer=None, flatten=True):
super().__init__()
self.max_vars = max_vars
self.img_size = to_2tuple(img_size)
self.patch_size = to_2tuple(patch_size)
self.grid_size = (img_size[0] // self.patch_size[0], img_size[1] // self.patch_size[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.flatten = flatten
grouped_weights = torch.stack(
[_get_conv2d_weights(1, embed_dim, self.patch_size) for _ in range(max_vars)], dim=0
)
self.proj_weights = nn.Parameter(grouped_weights)
grouped_biases = torch.stack([_get_conv2d_biases(embed_dim) for _ in range(max_vars)], dim=0)
self.proj_biases = nn.Parameter(grouped_biases)
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
self.reset_parameters()
def reset_parameters(self):
for idx in range(self.max_vars):
nn.init.kaiming_uniform_(self.proj_weights[idx], a=math.sqrt(5))
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.proj_weights[idx])
if fan_in != 0:
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.proj_biases[idx], -bound, bound)
def forward(self, x, vars=None):
B, C, H, W = x.shape
if vars is None:
vars = range(self.max_vars)
weights = self.proj_weights[vars].flatten(0, 1)
biases = self.proj_biases[vars].flatten(0, 1)
groups = len(vars)
proj = F.conv2d(x, weights, biases, groups=groups, stride=self.patch_size)
if self.flatten:
proj = proj.reshape(B, groups, -1, *proj.shape[-2:])
proj = proj.flatten(3).transpose(2, 3)
proj = self.norm(proj)
return proj
|
ClimaX/src/climax/parallelpatchembed.py/0
|
{
"file_path": "ClimaX/src/climax/parallelpatchembed.py",
"repo_id": "ClimaX",
"token_count": 1257
}
| 203 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import glob
import os
import click
import numpy as np
import xarray as xr
from tqdm import tqdm
from climax.utils.data_utils import DEFAULT_PRESSURE_LEVELS, NAME_TO_VAR
HOURS_PER_YEAR = 8760 # 365-day year
def nc2np(path, variables, years, save_dir, partition, num_shards_per_year):
os.makedirs(os.path.join(save_dir, partition), exist_ok=True)
if partition == "train":
normalize_mean = {}
normalize_std = {}
climatology = {}
constants = xr.open_mfdataset(os.path.join(path, "constants.nc"), combine="by_coords", parallel=True)
constant_fields = ["land_sea_mask", "orography", "lattitude"]
constant_values = {}
for f in constant_fields:
constant_values[f] = np.expand_dims(constants[NAME_TO_VAR[f]].to_numpy(), axis=(0, 1)).repeat(
HOURS_PER_YEAR, axis=0
)
if partition == "train":
normalize_mean[f] = constant_values[f].mean(axis=(0, 2, 3))
normalize_std[f] = constant_values[f].std(axis=(0, 2, 3))
for year in tqdm(years):
np_vars = {}
# constant variables
for f in constant_fields:
np_vars[f] = constant_values[f]
# non-constant fields
for var in variables:
ps = glob.glob(os.path.join(path, var, f"*{year}*.nc"))
ds = xr.open_mfdataset(ps, combine="by_coords", parallel=True) # dataset for a single variable
code = NAME_TO_VAR[var]
if len(ds[code].shape) == 3: # surface level variables
ds[code] = ds[code].expand_dims("val", axis=1)
# remove the last 24 hours if this year has 366 days
np_vars[var] = ds[code].to_numpy()[:HOURS_PER_YEAR]
if partition == "train": # compute mean and std of each var in each year
var_mean_yearly = np_vars[var].mean(axis=(0, 2, 3))
var_std_yearly = np_vars[var].std(axis=(0, 2, 3))
if var not in normalize_mean:
normalize_mean[var] = [var_mean_yearly]
normalize_std[var] = [var_std_yearly]
else:
normalize_mean[var].append(var_mean_yearly)
normalize_std[var].append(var_std_yearly)
clim_yearly = np_vars[var].mean(axis=0)
if var not in climatology:
climatology[var] = [clim_yearly]
else:
climatology[var].append(clim_yearly)
else: # multiple-level variables, only use a subset
assert len(ds[code].shape) == 4
all_levels = ds["level"][:].to_numpy()
all_levels = np.intersect1d(all_levels, DEFAULT_PRESSURE_LEVELS)
for level in all_levels:
ds_level = ds.sel(level=[level])
level = int(level)
# remove the last 24 hours if this year has 366 days
np_vars[f"{var}_{level}"] = ds_level[code].to_numpy()[:HOURS_PER_YEAR]
if partition == "train": # compute mean and std of each var in each year
var_mean_yearly = np_vars[f"{var}_{level}"].mean(axis=(0, 2, 3))
var_std_yearly = np_vars[f"{var}_{level}"].std(axis=(0, 2, 3))
if var not in normalize_mean:
normalize_mean[f"{var}_{level}"] = [var_mean_yearly]
normalize_std[f"{var}_{level}"] = [var_std_yearly]
else:
normalize_mean[f"{var}_{level}"].append(var_mean_yearly)
normalize_std[f"{var}_{level}"].append(var_std_yearly)
clim_yearly = np_vars[f"{var}_{level}"].mean(axis=0)
if f"{var}_{level}" not in climatology:
climatology[f"{var}_{level}"] = [clim_yearly]
else:
climatology[f"{var}_{level}"].append(clim_yearly)
assert HOURS_PER_YEAR % num_shards_per_year == 0
num_hrs_per_shard = HOURS_PER_YEAR // num_shards_per_year
for shard_id in range(num_shards_per_year):
start_id = shard_id * num_hrs_per_shard
end_id = start_id + num_hrs_per_shard
sharded_data = {k: np_vars[k][start_id:end_id] for k in np_vars.keys()}
np.savez(
os.path.join(save_dir, partition, f"{year}_{shard_id}.npz"),
**sharded_data,
)
if partition == "train":
for var in normalize_mean.keys():
if var not in constant_fields:
normalize_mean[var] = np.stack(normalize_mean[var], axis=0)
normalize_std[var] = np.stack(normalize_std[var], axis=0)
for var in normalize_mean.keys(): # aggregate over the years
if var not in constant_fields:
mean, std = normalize_mean[var], normalize_std[var]
# var(X) = E[var(X|Y)] + var(E[X|Y])
variance = (std**2).mean(axis=0) + (mean**2).mean(axis=0) - mean.mean(axis=0) ** 2
std = np.sqrt(variance)
# E[X] = E[E[X|Y]]
mean = mean.mean(axis=0)
normalize_mean[var] = mean
normalize_std[var] = std
np.savez(os.path.join(save_dir, "normalize_mean.npz"), **normalize_mean)
np.savez(os.path.join(save_dir, "normalize_std.npz"), **normalize_std)
for var in climatology.keys():
climatology[var] = np.stack(climatology[var], axis=0)
climatology = {k: np.mean(v, axis=0) for k, v in climatology.items()}
np.savez(
os.path.join(save_dir, partition, "climatology.npz"),
**climatology,
)
@click.command()
@click.option("--root_dir", type=click.Path(exists=True))
@click.option("--save_dir", type=str)
@click.option(
"--variables",
"-v",
type=click.STRING,
multiple=True,
default=[
"2m_temperature",
"10m_u_component_of_wind",
"10m_v_component_of_wind",
"toa_incident_solar_radiation",
"total_precipitation",
"geopotential",
"u_component_of_wind",
"v_component_of_wind",
"temperature",
"relative_humidity",
"specific_humidity",
],
)
@click.option("--start_train_year", type=int, default=1979)
@click.option("--start_val_year", type=int, default=2016)
@click.option("--start_test_year", type=int, default=2017)
@click.option("--end_year", type=int, default=2019)
@click.option("--num_shards", type=int, default=8)
def main(
root_dir,
save_dir,
variables,
start_train_year,
start_val_year,
start_test_year,
end_year,
num_shards,
):
assert start_val_year > start_train_year and start_test_year > start_val_year and end_year > start_test_year
train_years = range(start_train_year, start_val_year)
val_years = range(start_val_year, start_test_year)
test_years = range(start_test_year, end_year)
os.makedirs(save_dir, exist_ok=True)
nc2np(root_dir, variables, train_years, save_dir, "train", num_shards)
nc2np(root_dir, variables, val_years, save_dir, "val", num_shards)
nc2np(root_dir, variables, test_years, save_dir, "test", num_shards)
# save lat and lon data
ps = glob.glob(os.path.join(root_dir, variables[0], f"*{train_years[0]}*.nc"))
x = xr.open_mfdataset(ps[0], parallel=True)
lat = x["lat"].to_numpy()
lon = x["lon"].to_numpy()
np.save(os.path.join(save_dir, "lat.npy"), lat)
np.save(os.path.join(save_dir, "lon.npy"), lon)
if __name__ == "__main__":
main()
|
ClimaX/src/data_preprocessing/nc2np_equally_era5.py/0
|
{
"file_path": "ClimaX/src/data_preprocessing/nc2np_equally_era5.py",
"repo_id": "ClimaX",
"token_count": 3951
}
| 204 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.spectral_norm as spectral_norm
from models.networks.normalization import SPADE
from util.util import vgg_preprocess
class ResidualBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, padding=1, stride=1):
super(ResidualBlock, self).__init__()
self.relu = nn.PReLU()
self.model = nn.Sequential(
nn.ReflectionPad2d(padding),
nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=0, stride=stride),
nn.InstanceNorm2d(out_channels),
self.relu,
nn.ReflectionPad2d(padding),
nn.Conv2d(out_channels, out_channels, kernel_size=kernel_size, padding=0, stride=stride),
nn.InstanceNorm2d(out_channels),
)
def forward(self, x):
out = self.relu(x + self.model(x))
return out
class SPADEResnetBlock(nn.Module):
def __init__(self, fin, fout, opt, use_se=False, dilation=1):
super().__init__()
# Attributes
self.learned_shortcut = (fin != fout)
fmiddle = min(fin, fout)
self.opt = opt
self.pad_type = 'nozero'
self.use_se = use_se
# create conv layers
if self.pad_type != 'zero':
self.pad = nn.ReflectionPad2d(dilation)
self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=0, dilation=dilation)
self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=0, dilation=dilation)
else:
self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=dilation, dilation=dilation)
self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=dilation, dilation=dilation)
if self.learned_shortcut:
self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False)
# apply spectral norm if specified
if 'spectral' in opt.norm_G:
self.conv_0 = spectral_norm(self.conv_0)
self.conv_1 = spectral_norm(self.conv_1)
if self.learned_shortcut:
self.conv_s = spectral_norm(self.conv_s)
# define normalization layers
spade_config_str = opt.norm_G.replace('spectral', '')
if 'spade_ic' in opt:
ic = opt.spade_ic
else:
ic = 4*3+opt.label_nc
self.norm_0 = SPADE(spade_config_str, fin, ic, PONO=opt.PONO)
self.norm_1 = SPADE(spade_config_str, fmiddle, ic, PONO=opt.PONO)
if self.learned_shortcut:
self.norm_s = SPADE(spade_config_str, fin, ic, PONO=opt.PONO)
def forward(self, x, seg1):
x_s = self.shortcut(x, seg1)
if self.pad_type != 'zero':
dx = self.conv_0(self.pad(self.actvn(self.norm_0(x, seg1))))
dx = self.conv_1(self.pad(self.actvn(self.norm_1(dx, seg1))))
else:
dx = self.conv_0(self.actvn(self.norm_0(x, seg1)))
dx = self.conv_1(self.actvn(self.norm_1(dx, seg1)))
out = x_s + dx
return out
def shortcut(self, x, seg1):
if self.learned_shortcut:
x_s = self.conv_s(self.norm_s(x, seg1))
else:
x_s = x
return x_s
def actvn(self, x):
return F.leaky_relu(x, 2e-1)
class VGG19_feature_color_torchversion(nn.Module):
"""
NOTE: there is no need to pre-process the input
input tensor should range in [0,1]
"""
def __init__(self, pool='max', vgg_normal_correct=False, ic=3):
super(VGG19_feature_color_torchversion, self).__init__()
self.vgg_normal_correct = vgg_normal_correct
self.conv1_1 = nn.Conv2d(ic, 64, kernel_size=3, padding=1)
self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1)
self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.conv3_4 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=1)
self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv4_4 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_4 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
if pool == 'max':
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool5 = nn.MaxPool2d(kernel_size=2, stride=2)
elif pool == 'avg':
self.pool1 = nn.AvgPool2d(kernel_size=2, stride=2)
self.pool2 = nn.AvgPool2d(kernel_size=2, stride=2)
self.pool3 = nn.AvgPool2d(kernel_size=2, stride=2)
self.pool4 = nn.AvgPool2d(kernel_size=2, stride=2)
self.pool5 = nn.AvgPool2d(kernel_size=2, stride=2)
def forward(self, x, out_keys, preprocess=True):
'''
NOTE: input tensor should range in [0,1]
'''
out = {}
if preprocess:
x = vgg_preprocess(x, vgg_normal_correct=self.vgg_normal_correct)
out['r11'] = F.relu(self.conv1_1(x))
out['r12'] = F.relu(self.conv1_2(out['r11']))
out['p1'] = self.pool1(out['r12'])
out['r21'] = F.relu(self.conv2_1(out['p1']))
out['r22'] = F.relu(self.conv2_2(out['r21']))
out['p2'] = self.pool2(out['r22'])
out['r31'] = F.relu(self.conv3_1(out['p2']))
out['r32'] = F.relu(self.conv3_2(out['r31']))
out['r33'] = F.relu(self.conv3_3(out['r32']))
out['r34'] = F.relu(self.conv3_4(out['r33']))
out['p3'] = self.pool3(out['r34'])
out['r41'] = F.relu(self.conv4_1(out['p3']))
out['r42'] = F.relu(self.conv4_2(out['r41']))
out['r43'] = F.relu(self.conv4_3(out['r42']))
out['r44'] = F.relu(self.conv4_4(out['r43']))
out['p4'] = self.pool4(out['r44'])
out['r51'] = F.relu(self.conv5_1(out['p4']))
out['r52'] = F.relu(self.conv5_2(out['r51']))
out['r53'] = F.relu(self.conv5_3(out['r52']))
out['r54'] = F.relu(self.conv5_4(out['r53']))
out['p5'] = self.pool5(out['r54'])
return [out[key] for key in out_keys]
|
CoCosNet-v2/models/networks/architecture.py/0
|
{
"file_path": "CoCosNet-v2/models/networks/architecture.py",
"repo_id": "CoCosNet-v2",
"token_count": 3586
}
| 205 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
from torchvision.utils import save_image
import os
import imageio
import numpy as np
import data
from util.util import mkdir
from options.test_options import TestOptions
from models.pix2pix_model import Pix2PixModel
if __name__ == '__main__':
opt = TestOptions().parse()
dataloader = data.create_dataloader(opt)
model = Pix2PixModel(opt)
if len(opt.gpu_ids) > 1:
model = torch.nn.DataParallel(model, device_ids=opt.gpu_ids)
else:
model.to(opt.gpu_ids[0])
model.eval()
save_root = os.path.join(opt.checkpoints_dir, opt.name, 'test')
mkdir(save_root)
for i, data_i in enumerate(dataloader):
print('{} / {}'.format(i, len(dataloader)))
if i * opt.batchSize >= opt.how_many:
break
imgs_num = data_i['label'].shape[0]
out = model(data_i, mode='inference')
if opt.save_per_img:
try:
for it in range(imgs_num):
save_name = os.path.join(save_root, '%08d_%04d.png' % (i, it))
save_image(out['fake_image'][it:it+1], save_name, padding=0, normalize=True)
except OSError as err:
print(err)
else:
label = data_i['label'][:,:3,:,:]
imgs = torch.cat((label.cpu(), data_i['ref'].cpu(), out['fake_image'].data.cpu()), 0)
try:
save_name = os.path.join(save_root, '%08d.png' % i)
save_image(imgs, save_name, nrow=imgs_num, padding=0, normalize=True)
except OSError as err:
print(err)
|
CoCosNet-v2/test.py/0
|
{
"file_path": "CoCosNet-v2/test.py",
"repo_id": "CoCosNet-v2",
"token_count": 802
}
| 206 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
from data.pix2pix_dataset import Pix2pixDataset
class FlickrDataset(Pix2pixDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
parser = Pix2pixDataset.modify_commandline_options(parser, is_train)
parser.set_defaults(preprocess_mode='resize_and_crop')
if is_train:
parser.set_defaults(load_size=286)
else:
parser.set_defaults(load_size=256)
parser.set_defaults(crop_size=256)
parser.set_defaults(display_winsize=256)
parser.set_defaults(label_nc=150)
parser.set_defaults(contain_dontcare_label=True)
parser.set_defaults(cache_filelist_read=False)
parser.set_defaults(cache_filelist_write=False)
return parser
def get_paths(self, opt):
root = os.path.join(opt.dataroot, 'test/images') if opt.phase == 'test' else os.path.join(opt.dataroot, 'images')
root_mask = root.replace('images', 'mask')
image_paths = sorted(os.listdir(root))
image_paths = [os.path.join(root, it) for it in image_paths]
label_paths = sorted(os.listdir(root_mask))
label_paths = [os.path.join(root_mask, it) for it in label_paths]
return label_paths, image_paths
def get_ref(self, opt):
extra = '_test_from_train' if opt.phase == 'test' else ''
with open('./data/flickr_ref{}.txt'.format(extra)) as fd:
lines = fd.readlines()
ref_dict = {}
for i in range(len(lines)):
items = lines[i].strip().split(',')
key = items[0]
if opt.phase == 'test':
val = items[1:]
else:
val = [items[1], items[-1]]
ref_dict[key] = val
train_test_folder = ('', 'test')
return ref_dict, train_test_folder
def imgpath_to_labelpath(self, path):
path_ref_label = path.replace('images', 'mask')
return path_ref_label
# In ADE20k, 'unknown' label is of value 0.
# Change the 'unknown' label to the last label to match other datasets.
# def postprocess(self, input_dict):
# label = input_dict['label']
# label = label - 1
# label[label == -1] = self.opt.label_nc
# input_dict['label'] = label
# if input_dict['label_ref'] is not None:
# label_ref = input_dict['label_ref']
# label_ref = label_ref - 1
# label_ref[label_ref == -1] = self.opt.label_nc
# input_dict['label_ref'] = label_ref
|
CoCosNet/data/flickr_dataset.py/0
|
{
"file_path": "CoCosNet/data/flickr_dataset.py",
"repo_id": "CoCosNet",
"token_count": 1203
}
| 207 |
# Code Documentation Generation
This repo provides the code for reproducing the experiments on [CodeSearchNet](https://arxiv.org/abs/1909.09436) dataset for code document generation tasks in six programming languages.
**!News: We release a new pipeline for this task. The new pipeline only needs 2 p100 GPUs and less training time for Code Documentation Generation. Please refer to the [website](https://github.com/microsoft/CodeXGLUE/tree/main/Code-Text/code-to-text).**
## Dependency
- pip install torch==1.4.0
- pip install transformers==2.5.0
- pip install filelock
## Data Preprocess
We clean CodeSearchNet dataset for this task by following steps:
- Remove comments in the code
- Remove examples that codes cannot be parsed into an abstract syntax tree.
- Remove examples that #tokens of documents is < 3 or >256
- Remove examples that documents contain special tokens (e.g. <img ...> or https:...)
- Remove examples that documents are not English.
Data statistic about the cleaned dataset for code document generation is shown in this Table. We release the cleaned dataset in this [website](https://drive.google.com/open?id=1rd2Tc6oUWBo7JouwexW3ksQ0PaOhUr6h).
| PL | Training | Dev | Test |
| :--------- | :------: | :----: | :----: |
| Python | 251,820 | 13,914 | 14,918 |
| PHP | 241,241 | 12,982 | 14,014 |
| Go | 167,288 | 7,325 | 8,122 |
| Java | 164,923 | 5,183 | 10,955 |
| JavaScript | 58,025 | 3,885 | 3,291 |
| Ruby | 24,927 | 1,400 | 1,261 |
## Data Download
You can download dataset from the [website](https://drive.google.com/open?id=1rd2Tc6oUWBo7JouwexW3ksQ0PaOhUr6h). Or use the following command.
```shell
pip install gdown
mkdir data data/code2nl
cd data/code2nl
gdown https://drive.google.com/uc?id=1rd2Tc6oUWBo7JouwexW3ksQ0PaOhUr6h
unzip Cleaned_CodeSearchNet.zip
rm Cleaned_CodeSearchNet.zip
cd ../..
```
## Fine-Tune
We fine-tuned the model on 4*P40 GPUs.
```shell
cd code2nl
lang=php #programming language
lr=5e-5
batch_size=64
beam_size=10
source_length=256
target_length=128
data_dir=../data/code2nl/CodeSearchNet
output_dir=model/$lang
train_file=$data_dir/$lang/train.jsonl
dev_file=$data_dir/$lang/valid.jsonl
eval_steps=1000 #400 for ruby, 600 for javascript, 1000 for others
train_steps=50000 #20000 for ruby, 30000 for javascript, 50000 for others
pretrained_model=microsoft/codebert-base #Roberta: roberta-base
python run.py --do_train --do_eval --model_type roberta --model_name_or_path $pretrained_model --train_filename $train_file --dev_filename $dev_file --output_dir $output_dir --max_source_length $source_length --max_target_length $target_length --beam_size $beam_size --train_batch_size $batch_size --eval_batch_size $batch_size --learning_rate $lr --train_steps $train_steps --eval_steps $eval_steps
```
## Inference and Evaluation
After fine-tuning, inference and evaluation are as follows:
```shell
lang=php #programming language
beam_size=10
batch_size=128
source_length=256
target_length=128
output_dir=model/$lang
data_dir=../data/code2nl/CodeSearchNet
dev_file=$data_dir/$lang/valid.jsonl
test_file=$data_dir/$lang/test.jsonl
test_model=$output_dir/checkpoint-best-bleu/pytorch_model.bin #checkpoint for test
python run.py --do_test --model_type roberta --model_name_or_path microsoft/codebert-base --load_model_path $test_model --dev_filename $dev_file --test_filename $test_file --output_dir $output_dir --max_source_length $source_length --max_target_length $target_length --beam_size $beam_size --eval_batch_size $batch_size
```
The results on CodeSearchNet are shown in this Table:
| Model | Ruby | Javascript | Go | Python | Java | PHP | Overall |
| ----------- | :-------: | :--------: | :-------: | :-------: | :-------: | :-------: | :-------: |
| Seq2Seq | 9.64 | 10.21 | 13.98 | 15.93 | 15.09 | 21.08 | 14.32 |
| Transformer | 11.18 | 11.59 | 16.38 | 15.81 | 16.26 | 22.12 | 15.56 |
| RoBERTa | 11.17 | 11.90 | 17.72 | 18.14 | 16.47 | 24.02 | 16.57 |
| CodeBERT | **12.16** | **14.90** | **18.07** | **19.06** | **17.65** | **25.16** | **17.83** |
|
CodeBERT/CodeBERT/code2nl/README.md/0
|
{
"file_path": "CodeBERT/CodeBERT/code2nl/README.md",
"repo_id": "CodeBERT",
"token_count": 1583
}
| 208 |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss.
"""
from __future__ import absolute_import, division, print_function
import argparse
import logging
import os
import pickle
import random
import torch
import numpy as np
from itertools import cycle
import json
from collections import Counter
from model import Seq2Seq
from torch.utils.data import DataLoader, SequentialSampler, RandomSampler,TensorDataset
from torch.utils.data.distributed import DistributedSampler
from transformers import AdamW, get_linear_schedule_with_warmup, RobertaConfig, RobertaModel, RobertaTokenizer
from dataset import TextDataset
from metric import compute_metrics, compute_singleline_metrics
logger = logging.getLogger(__name__)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def eval(args, model, tokenizer, eval_dataset,prefix=""):
model.to(args.device)
eval_output_dir = args.output_dir
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
eval_dataloader = DataLoader(eval_dataset,
sampler = SequentialSampler(eval_dataset),
batch_size = args.eval_batch_size,
num_workers = 4,
drop_last = False)
# Eval!
logger.warning("***** Running evaluation *****")
logger.warning(" Num examples = %d", len(eval_dataset))
logger.warning(" Batch size = %d", args.eval_batch_size)
model.eval()
pred_list = []
gold_list = []
for batch in eval_dataloader:
source_ids, target_ids,gold_ids =[x.to(args.device) for x in batch]
with torch.no_grad():
preds = model(source_ids)
# convert ids to text
for i,pred in enumerate(preds):
t = pred[0].cpu().numpy()
t = list(t)
if 0 in t:
t = t[:t.index(0)]
text = tokenizer.decode(t,clean_up_tokenization_spaces=False)
gold = gold_ids[i].cpu().numpy()
gold = list(gold)
if 1 in gold:
gold = gold[:gold.index(1)]
gold = gold[1:-1]# <mask0> </s>
gold = tokenizer.decode(gold,clean_up_tokenization_spaces=False)
pred_list.append(text)
gold_list.append(gold)
with open(args.output_dir+"/preds.txt",'w') as f:
for i in pred_list:
f.write(str(i) + '\n')
with open(args.output_dir+"/golds.txt",'w') as f:
for i in gold_list:
f.write(str(i) + '\n')
if args.prefix == "singleline":
metric_list = compute_singleline_metrics(pred_list, gold_list)
logger.warning(f"Trace Accuracy: {metric_list[0]}")
logger.warning(f"Identifier Precision: {metric_list[1]}")
logger.warning(f"Identifier Recall: {metric_list[2]}")
logger.warning(f"Identifier F1: {metric_list[3]}")
else:
metric_list = compute_metrics(pred_list, gold_list)
logger.warning(f"Output Accuracy: {metric_list[0]}")
logger.warning(f"Trace Accuracy: {metric_list[1]}")
logger.warning(f"Line Precision: {metric_list[2]}")
logger.warning(f"Line Recall: {metric_list[3]}")
logger.warning(f"Line F1: {metric_list[4]}")
logger.warning(f"Identifier Precision: {metric_list[5]}")
logger.warning(f"Identifier Recall: {metric_list[6]}")
logger.warning(f"Identifier F1: {metric_list[7]}")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--prefix", default="", type=str,
help="The input data prefix.")
## Required parameters
parser.add_argument("--train_data_path", default=None, type=str,
help="The input training data path")
parser.add_argument("--eval_data_path", default=None, type=str,required=True,
help="The input evaluating data path")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--data_cache_dir", default=None, type=str, required=True,
help="The output directory where data cache will be written.")
parser.add_argument("--reload_dir", default=None, type=str,
help="The directory where the model checkpoints will be reloaded from.")
## Other parameters
parser.add_argument("--model_name_or_path", default=None, type=str,
help="The model checkpoint for weights initialization.")
parser.add_argument("--config_name", default=None, type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--tokenizer_name", default=None, type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
parser.add_argument("--block_size", default=1024, type=int,
help="Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens).")
parser.add_argument("--per_gpu_train_batch_size", default=4, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=4, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument('--save_steps', type=int, default=50,
help="Save checkpoint every X updates steps.")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument("--node_index", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument("--gpu_per_node", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
parser.add_argument("--max_source_length", default=256, type=int,
help="The maximum total source sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--max_target_length", default=768, type=int,
help="The maximum total target sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--beam_size", default=10, type=int,
help="beam size for beam search")
args = parser.parse_args()
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
import datetime
torch.distributed.init_process_group(backend='nccl',timeout=datetime.timedelta(0,1800000))
args.local_rank+=args.node_index*args.gpu_per_node
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO if args.local_rank in [-1, 0] else logging.INFO) #logging.WARN
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
args.log_file = os.path.join(args.output_dir, 'log.txt')
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
if os.path.exists(args.log_file):
logfile = logging.FileHandler(args.log_file, 'a')
else:
logfile = logging.FileHandler(args.log_file, 'w')
fmt = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', '%m/%d/%Y %H:%M:%S %p')
logfile.setFormatter(fmt)
logger.addHandler(logfile)
if args.local_rank == 0:
torch.distributed.barrier()
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
model_name_or_path = args.model_name_or_path
tokenizer = RobertaTokenizer.from_pretrained(model_name_or_path if args.tokenizer_name is None else args.tokenizer_name)
config = RobertaConfig.from_pretrained(model_name_or_path if args.config_name is None else args.config_name)
config.is_decoder = True
encoder = RobertaModel.from_pretrained(model_name_or_path,config=config)
model = Seq2Seq(encoder=encoder,decoder=encoder,config=config,
beam_size=args.beam_size,max_length=args.max_target_length,
sos_id=tokenizer.convert_tokens_to_ids(["<mask0>"])[0],eos_id=tokenizer.sep_token_id)
if args.local_rank == 0:
torch.distributed.barrier()
logger.warning("Training/evaluation parameters %s", args)
if args.local_rank == -1:
local_rank = 0
world_size = 1
else:
local_rank = args.local_rank
world_size = torch.distributed.get_world_size()
# reload and preprocess data
eval_dataset = TextDataset(tokenizer, args, args.eval_data_path, local_rank, world_size, logger, "eval",args.prefix)
# eval
eval(args, model, tokenizer,eval_dataset)
if __name__ == "__main__":
main()
|
CodeBERT/CodeExecutor/inference/run.py/0
|
{
"file_path": "CodeBERT/CodeExecutor/inference/run.py",
"repo_id": "CodeBERT",
"token_count": 5702
}
| 209 |
git clone https://github.com/tree-sitter/tree-sitter-c
git clone https://github.com/tree-sitter/tree-sitter-cpp
git clone https://github.com/tree-sitter/tree-sitter-typescript
git clone https://github.com/tree-sitter/tree-sitter-go
git clone https://github.com/tree-sitter/tree-sitter-javascript
git clone https://github.com/tree-sitter/tree-sitter-python
git clone https://github.com/tree-sitter/tree-sitter-ruby
git clone https://github.com/tree-sitter/tree-sitter-php
git clone https://github.com/tree-sitter/tree-sitter-java
git clone https://github.com/tree-sitter/tree-sitter-c-sharp
python build.py
|
CodeBERT/LongCoder/parser/build.sh/0
|
{
"file_path": "CodeBERT/LongCoder/parser/build.sh",
"repo_id": "CodeBERT",
"token_count": 209
}
| 210 |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss.
"""
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import pickle
import random
import re
import shutil
import json
import torch
import numpy as np
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
from torch.utils.data.distributed import DistributedSampler
from model import Model
from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
RobertaConfig, RobertaModel, RobertaTokenizer)
logger = logging.getLogger(__name__)
class InputFeatures(object):
"""A single training/test features for a example."""
def __init__(self,
input_tokens,
input_ids,
index,
label,
):
self.input_tokens = input_tokens
self.input_ids = input_ids
self.index = index
self.label = label
def convert_examples_to_features(js,tokenizer,args):
"""convert examples to token ids"""
code = ' '.join(js['code'].split())
code_tokens = tokenizer.tokenize(code)[:args.block_size-4]
source_tokens = [tokenizer.cls_token,"<encoder_only>",tokenizer.sep_token] + code_tokens + [tokenizer.sep_token]
source_ids = tokenizer.convert_tokens_to_ids(source_tokens)
padding_length = args.block_size - len(source_ids)
source_ids += [tokenizer.pad_token_id]*padding_length
return InputFeatures(source_tokens,source_ids,js['index'],int(js['label']))
class TextDataset(Dataset):
def __init__(self, tokenizer, args, file_path=None):
self.examples = []
data = []
with open(file_path) as f:
for line in f:
line = line.strip()
js = json.loads(line)
data.append(js)
for js in data:
self.examples.append(convert_examples_to_features(js,tokenizer,args))
if 'train' in file_path:
for idx, example in enumerate(self.examples[:3]):
logger.info("*** Example ***")
logger.info("idx: {}".format(idx))
logger.info("label: {}".format(example.label))
logger.info("input_tokens: {}".format([x.replace('\u0120','_') for x in example.input_tokens]))
logger.info("input_ids: {}".format(' '.join(map(str, example.input_ids))))
self.label_examples = {}
for e in self.examples:
if e.label not in self.label_examples:
self.label_examples[e.label]=[]
self.label_examples[e.label].append(e)
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
label = self.examples[i].label
index = self.examples[i].index
labels = list(self.label_examples)
labels.remove(label)
while True:
shuffle_example = random.sample(self.label_examples[label],1)[0]
if shuffle_example.index != index:
p_example = shuffle_example
break
n_example = random.sample(self.label_examples[random.sample(labels,1)[0]],1)[0]
return (torch.tensor(self.examples[i].input_ids),torch.tensor(p_example.input_ids),
torch.tensor(n_example.input_ids),torch.tensor(label))
def set_seed(seed=42):
random.seed(seed)
os.environ['PYHTONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def train(args, train_dataset, model, tokenizer):
""" Train the model """
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler,
batch_size=args.train_batch_size,num_workers=4,pin_memory=True)
args.max_steps = args.num_train_epochs*len( train_dataloader)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.max_steps*0.1,
num_training_steps=args.max_steps)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.train_batch_size // args.n_gpu )
logger.info(" Total train batch size = %d", args.train_batch_size)
logger.info(" Total optimization steps = %d", args.max_steps)
losses, best_map = [], 0
model.zero_grad()
for idx in range(args.num_train_epochs):
for step, batch in enumerate(train_dataloader):
inputs = batch[0].to(args.device)
p_inputs = batch[1].to(args.device)
n_inputs = batch[2].to(args.device)
labels = batch[3].to(args.device)
model.train()
loss,vec = model(inputs,p_inputs,n_inputs,labels)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
losses.append(loss.item())
if (step+1)% 100==0:
logger.info("epoch {} step {} loss {}".format(idx,step+1,round(np.mean(losses[-100:]),4)))
optimizer.step()
optimizer.zero_grad()
scheduler.step()
results = evaluate(args, model, tokenizer, args.eval_data_file)
for key, value in results.items():
logger.info(" %s = %s", key, round(value,4))
if results['eval_map'] > best_map:
best_map = results['eval_map']
logger.info(" "+"*"*20)
logger.info(" Best map:%s",round(best_map,4))
logger.info(" "+"*"*20)
checkpoint_prefix = 'checkpoint-best-map'
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_dir = os.path.join(output_dir, '{}'.format('model.bin'))
model_to_save = model.module if hasattr(model,'module') else model
torch.save(model_to_save.state_dict(), output_dir)
logger.info("Saving model checkpoint to %s", output_dir)
def evaluate(args, model, tokenizer, data_file):
""" Evaluate the model """
eval_dataset = TextDataset(tokenizer, args, data_file)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler,batch_size=args.eval_batch_size, num_workers=4)
eval_output_dir = args.output_dir
if not os.path.exists(eval_output_dir):
os.makedirs(eval_output_dir)
# Eval!
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
vecs=[]
labels=[]
for batch in eval_dataloader:
inputs = batch[0].to(args.device)
p_inputs = batch[1].to(args.device)
n_inputs = batch[2].to(args.device)
label = batch[3].to(args.device)
with torch.no_grad():
lm_loss,vec = model(inputs,p_inputs,n_inputs,label)
eval_loss += lm_loss.mean().item()
vecs.append(vec.cpu().numpy())
labels.append(label.cpu().numpy())
nb_eval_steps += 1
vecs = np.concatenate(vecs,0)
labels = np.concatenate(labels,0)
eval_loss = eval_loss / nb_eval_steps
perplexity = torch.tensor(eval_loss)
scores=np.matmul(vecs,vecs.T)
dic={}
for i in range(scores.shape[0]):
scores[i,i] = -1000000
if int(labels[i]) not in dic:
dic[int(labels[i])] = -1
dic[int(labels[i])] += 1
sort_ids = np.argsort(scores, axis=-1, kind='quicksort', order=None)[:,::-1]
MAP = []
for i in range(scores.shape[0]):
cont = 0
label = int(labels[i])
Avep = []
for j in range(dic[label]):
index = sort_ids[i,j]
if int(labels[index]) == label:
Avep.append((len(Avep)+1)/(j+1))
MAP.append(sum(Avep)/dic[label])
result = {
"eval_loss": float(perplexity),
"eval_map":float(np.mean(MAP))
}
return result
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--train_data_file", default=None, type=str,
help="The input training data file (a jsonl file).")
parser.add_argument("--eval_data_file", default=None, type=str,
help="An optional input evaluation data file to evaluate the perplexity on (a jsonl file).")
parser.add_argument("--test_data_file", default=None, type=str,
help="An optional input test data file to evaluate the perplexity on (a jsonl file).")
parser.add_argument("--model_name_or_path", default=None, type=str,
help="The model checkpoint for weights initialization.")
parser.add_argument("--block_size", default=-1, type=int,
help="Optional input sequence length after tokenization.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_test", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--train_batch_size", default=4, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--eval_batch_size", default=4, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=1, type=int,
help="Total number of training epochs to perform.")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
#print arguments
args = parser.parse_args()
#set log
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',level=logging.INFO )
#set device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.n_gpu = torch.cuda.device_count()
args.device = device
logger.info("device: %s, n_gpu: %s",device, args.n_gpu)
# Set seed
set_seed(args.seed)
#build model
tokenizer = RobertaTokenizer.from_pretrained(args.model_name_or_path)
config = RobertaConfig.from_pretrained(args.model_name_or_path)
model = RobertaModel.from_pretrained(args.model_name_or_path)
model = Model(model,config,tokenizer,args)
logger.info("Training/evaluation parameters %s", args)
model.to(args.device)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Training
if args.do_train:
train_dataset = TextDataset(tokenizer, args,args.train_data_file)
train(args, train_dataset, model, tokenizer)
# Evaluation
results = {}
if args.do_eval:
checkpoint_prefix = 'checkpoint-best-map/model.bin'
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
model_to_load = model.module if hasattr(model, 'module') else model
model_to_load.load_state_dict(torch.load(output_dir))
result = evaluate(args, model, tokenizer, args.eval_data_file)
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(round(result[key]*100 if "map" in key else result[key],2)))
if args.do_test:
checkpoint_prefix = 'checkpoint-best-map/model.bin'
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
model_to_load = model.module if hasattr(model, 'module') else model
model_to_load.load_state_dict(torch.load(output_dir))
result = evaluate(args, model, tokenizer, args.test_data_file)
logger.info("***** Test results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(round(result[key]*100 if "map" in key else result[key],2)))
if __name__ == "__main__":
main()
|
CodeBERT/UniXcoder/downstream-tasks/clone-detection/POJ-104/run.py/0
|
{
"file_path": "CodeBERT/UniXcoder/downstream-tasks/clone-detection/POJ-104/run.py",
"repo_id": "CodeBERT",
"token_count": 6739
}
| 211 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import glob
import pickle
import json
import tiktoken
from transformers import AutoTokenizer
class CONSTANTS:
# regular version for Codex
api_benchmark = 'random_api'
line_benchmark = 'random_line'
# short version for CodeGen
short_api_benchmark = 'short_api'
short_line_benchmark = 'short_line'
gt = 'gt'
rg = 'r-g' # vanilla retrieval-augmented approach
rgrg = 'r-g-r-g' # RepoCoder, two-stage retrieval and generation
class FilePathBuilder:
api_completion_benchmark = 'datasets/random-api-completion.test.jsonl'
random_line_completion_benchmark = 'datasets/random-line-completion.test.jsonl'
# short version for codegen
short_api_completion_benchmark = 'datasets/random-api-completion-short-version.test.jsonl'
short_random_line_completion_benchmark = 'datasets/random-line-completion-short-version.test.jsonl'
repo_base_dir = 'repositories/line_and_api_level'
@staticmethod
def make_needed_dir(file_path):
dir_path = os.path.dirname(file_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
@staticmethod
def repo_windows_path(repo, window_size, slice_size):
out_path = os.path.join('cache/window/repos', f'{repo}_ws{window_size}_slice{slice_size}.pkl')
FilePathBuilder.make_needed_dir(out_path)
return out_path
@staticmethod
def search_first_window_path(benchmark, mode, repo, window_size):
# mode includes gt and s-g
out_path = os.path.join(f'cache/window/{benchmark}/{mode}', f'{repo}_ws{window_size}.pkl')
FilePathBuilder.make_needed_dir(out_path)
return out_path
@staticmethod
def gen_first_window_path(benchmark, mode, prediction_path, repo, window_size):
prediction_file_name = os.path.basename(prediction_path).replace('.0.jsonl', '')
out_path = os.path.join(f'cache/window/{benchmark}/{mode}', f'{prediction_file_name}.{repo}_ws{window_size}.pkl')
FilePathBuilder.make_needed_dir(out_path)
return out_path
@staticmethod
def one_gram_vector_path(window_file):
vector_path = window_file.replace('/window/', '/vector/')
out_path = vector_path.replace('.pkl', '.one-gram.pkl')
FilePathBuilder.make_needed_dir(out_path)
return out_path
@staticmethod
def ada002_vector_path(window_file):
vector_path = window_file.replace('/window/', '/vector/')
out_path = vector_path.replace('.pkl', '.ada002.pkl')
FilePathBuilder.make_needed_dir(out_path)
return out_path
@staticmethod
def retrieval_results_path(query_vector_file, repo_vector_file, max_top_k):
retrieval_base_dir = os.path.dirname(query_vector_file.replace('/vector/', '/retrieval/'))
query_file_name = os.path.basename(query_vector_file)
if query_file_name.endswith('.one-gram.pkl'):
query_file_name = query_file_name[:-len('.one-gram.pkl')]
elif query_file_name.endswith('.ada002.pkl'):
query_file_name = query_file_name[:-len('.ada002.pkl')]
repo_file_name = os.path.basename(repo_vector_file)[:-len('.pkl')]
out_path = os.path.join(retrieval_base_dir, f'{query_file_name}.{repo_file_name}.top{max_top_k}.pkl')
FilePathBuilder.make_needed_dir(out_path)
return out_path
class CodexTokenizer:
def __init__(self):
self.tokenizer = tiktoken.get_encoding("p50k_base")
def tokenize(self, text):
# return self.tokenizer.encode(text)
return self.tokenizer.encode_ordinary(text)
def decode(self, token_ids):
return self.tokenizer.decode(token_ids)
class CodeGenTokenizer:
def __init__(self):
self.tokenizer = AutoTokenizer.from_pretrained('Salesforce/codegen-6B-mono')
def tokenize(self, text):
return self.tokenizer.encode(text)
def decode(self, token_ids):
return self.tokenizer.decode(token_ids)
class Tools:
@staticmethod
def read_code(fname):
with open(fname, 'r', encoding='utf8') as f:
return f.read()
@staticmethod
def load_pickle(fname):
with open(fname, 'rb') as f:
return pickle.load(f)
@staticmethod
def dump_pickle(obj, fname):
with open(fname, 'wb') as f:
pickle.dump(obj, f)
@staticmethod
def dump_json(obj, fname):
with open(fname, 'w', encoding='utf8') as f:
json.dump(obj, f)
@staticmethod
def dump_jsonl(obj, fname):
with open(fname, 'w', encoding='utf8') as f:
for item in obj:
f.write(json.dumps(item) + '\n')
@staticmethod
def load_jsonl(fname):
with open(fname, 'r', encoding='utf8') as f:
lines = []
for line in f:
lines.append(json.loads(line))
return lines
@staticmethod
def iterate_repository(repo):
base_dir = FilePathBuilder.repo_base_dir
pattern = os.path.join(f'{base_dir}/{repo}', "**", "*.py")
files = glob.glob(pattern, recursive=True)
skipped_files = []
loaded_code_files = dict()
base_dir_list = os.path.normpath(base_dir).split(os.sep)
for fname in files:
try:
code = Tools.read_code(fname)
fpath_tuple = tuple(os.path.normpath(fname).split(os.sep)[len(base_dir_list):])
loaded_code_files[fpath_tuple]= code
except Exception as e:
skipped_files.append((fname, e))
continue
if len(skipped_files) > 0:
print(f"Skipped {len(skipped_files)} out of {len(files)} files due to I/O errors")
for fname, e in skipped_files:
print(f"{fname}: {e}")
return loaded_code_files
@staticmethod
def tokenize(code):
tokenizer = CodexTokenizer()
return tokenizer.tokenize(code)
|
CodeT/RepoCoder/utils.py/0
|
{
"file_path": "CodeT/RepoCoder/utils.py",
"repo_id": "CodeT",
"token_count": 2719
}
| 212 |
# Codex CLI - Natural Language Command Line Interface
This project uses [GPT-3 Codex](https://openai.com/blog/openai-codex/) to convert natural language commands into commands in PowerShell, Z shell and Bash.

The Command Line Interface (CLI) was the first major User Interface we used to interact with machines. It's incredibly powerful, you can do almost anything with a CLI, but it requires the user to express their intent extremely precisely. The user needs to _know the language of the computer_.
With the advent of Large Language Models (LLMs), particularly those that have been trained on code, it's possible to interact with a CLI using Natural Language (NL). In effect, these models understand natural language _and_ code well enough that they can translate from one to another.
This project aims to offer a cross-shell NL->Code experience to allow users to interact with their favorite CLI using NL. The user enters a command, like "what's my IP address", hits `Ctrl + G` and gets a suggestion for a command idiomatic to the shell they're using. The project uses the GPT-3 Codex model off-the-shelf, meaning the model has not been explicitly trained for the task. Instead we rely on a discipline called prompt engineering (see [section](#prompt-engineering-and-context-files) below) to coax the right commands from Codex.
**Note: The model can still make mistakes! Don't run a command if you don't understand it. If you're not sure what a command does, hit `Ctrl + C` to cancel it**.
This project took technical inspiration from the [zsh_codex](https://github.com/tom-doerr/zsh_codex) project, extending its functionality to span multiple shells and to customize the prompts passed to the model (see prompt engineering section below).
## Statement of Purpose
This repository aims to grow the understanding of using Codex in applications by providing an example of implementation and references to support the [Microsoft Build conference in 2022](https://mybuild.microsoft.com/). It is not intended to be a released product. Therefore, this repository is not for discussing OpenAI API or requesting new features.
## Requirements
* [Python 3.7.1+](https://www.python.org/downloads/)
* \[Windows\]: Python is added to PATH.
* An [OpenAI account](https://openai.com/api/)
* [OpenAI API Key](https://beta.openai.com/account/api-keys).
* [OpenAI Organization Id](https://beta.openai.com/account/org-settings). If you have multiple organizations, please update your [default organization](https://beta.openai.com/account/api-keys) to the one that has access to codex engines before getting the organization Id.
* [OpenAI Engine Id](https://beta.openai.com/docs/engines/codex-series-private-beta). It provides access to a model. For example, `code-davinci-002` or `code-cushman-001`. See [here](#what-openai-engines-are-available-to-me) for checking available engines.
## Installation
Please follow the installation instructions for PowerShell, bash or zsh from [here](./Installation.md).
## Usage
Once configured for your shell of preference, you can use the Codex CLI by writing a comment (starting with `#`) into your shell, and then hitting `Ctrl + G`.
The Codex CLI supports two primary modes: single-turn and multi-turn.
By default, multi-turn mode is off. It can be toggled on and off using the `# start multi-turn` and `# stop multi-turn` commands.
If the multi-turn mode is on, the Codex CLI will "remember" past interactions with the model, allowing you to refer back to previous actions and entities. If, for example, you asked the Codex CLI to change your time zone to mountain, and then said "change it back to pacific", the model would have the context from the previous interaction to know that "it" is the user's timezone:
```powershell
# change my timezone to mountain
tzutil /s "Mountain Standard Time"
# change it back to pacific
tzutil /s "Pacific Standard Time"
```
The tool creates a `current_context.txt` file that keeps track of past interactions, and passes them to the model on each subsequent command.
When multi-turn mode is off, this tool will not keep track of interaction history. There are tradeoffs to using multi-turn mode - though it enables compelling context resolution, it also increases overhead. If, for example, the model produces the wrong script for the job, the user will want to remove that from the context, otherwise future conversation turns will be more likely to produce the wrong script again. With multi-turn mode off, the model will behave completely deterministically - the same command will always produce the same output.
Any time the model seems to output consistently incorrect commands, you can use the `# stop multi-turn` command to stop the model from remembering past interactions and load in your default context. Alternatively, the `# default context` command does the same while preserving the multi-turn mode as on.
## Commands
| Command | Description |
|--|--|
| `start multi-turn` | Starts a multi-turn experience |
| `stop multi-turn` | Stops a multi-turn experience and loads default context |
| `load context <filename>` | Loads the context file from `contexts` folder |
| `default context` | Loads default shell context |
| `view context` | Opens the context file in a text editor |
| `save context <filename>` | Saves the context file to `contexts` folder, if name not specified, uses current date-time |
| `show config` | Shows the current configuration of your interaction with the model |
| `set <config-key> <config-value>` | Sets the configuration of your interaction with the model |
Feel free to improve your experience by changing the token limit, engine id and temperature using the set command. For example, `# set engine cushman-codex`, `# set temperature 0.5`, `# set max_tokens 50`.
## Prompt Engineering and Context Files
This project uses a discipline called _prompt engineering_ to coax GPT-3 Codex to generate commands from natural language. Specifically, we pass the model a series of examples of NL->Commands, to give it a sense of the kind of code it should be writing, and also to nudge it towards generating commands idiomatic to the shell you're using. These examples live in the `contexts` directory. See snippet from the PowerShell context below:
```powershell
# what's the weather in New York?
(Invoke-WebRequest -uri "wttr.in/NewYork").Content
# make a git ignore with node modules and src in it
"node_modules
src" | Out-File .gitignore
# open it in notepad
notepad .gitignore
```
Note that this project models natural language commands as comments, and provide examples of the kind of PowerShell scripts we expect the model to write. These examples include single line completions, multi-line completions, and multi-turn completions (the "open it in notepad" example refers to the `.gitignore` file generated on the previous turn).
When a user enters a new command (say "what's my IP address"), we simple append that command onto the context (as a comment) and ask Codex to generate the code that should follow it. Having seen the examples above, Codex will know that it should write a short PowerShell script that satisfies the comment.
## Building your own Contexts
This project comes pre-loaded with contexts for each shell, along with some bonus contexts with other capabilities. Beyond these, you can build your own contexts to coax other behaviors out of the model. For example, if you want the Codex CLI to produce Kubernetes scripts, you can create a new context with examples of commands and the `kubectl` script the model might produce:
```bash
# make a K8s cluster IP called my-cs running on 5678:8080
kubectl create service clusterip my-cs --tcp=5678:8080
```
Add your context to the `contexts` folder and run `load context <filename>` to load it. You can also change the default context from to your context file inside `src\prompt_file.py`.
Note that Codex will often produce correct scripts without any examples. Having been trained on a large corpus of code, it frequently knows how to produce specific commands. That said, building your own contexts helps coax the specific kind of script you're looking for - whether it's long or short, whether it declares variables or not, whether it refers back to previous commands, etc. You can also provide examples of your own CLI commands and scripts, to show Codex other tools it should consider using.
One important thing to consider is that if you add a new context, keep the multi-turn mode on to avoid our automatic defaulting (which was added to keep faulty contexts from breaking your experience).
We have added a [cognitive services context](./contexts/CognitiveServiceContext.md) which uses the cognitive services API to provide text to speech type responses as an example.
## Troubleshooting
Use `DEBUG_MODE` to use a terminal input instead of the stdin and debug the code. This is useful when adding new commands and understanding why the tool is unresponsive.
Sometimes the `openai` package will throws errors that aren't caught by the tool, you can add a catch block at the end of `codex_query.py` for that exception and print a custom error message.
## FAQ
### What OpenAI engines are available to me?
You might have access to different [OpenAI engines](https://beta.openai.com/docs/api-reference/engines) per OpenAI organization. To check what engines are available to you, one can query the [List engines API](https://beta.openai.com/docs/api-reference/engines/list) for available engines. See the following commands:
* Shell
```
curl https://api.openai.com/v1/engines \
-H 'Authorization: Bearer YOUR_API_KEY' \
-H 'OpenAI-Organization: YOUR_ORG_ID'
```
* PowerShell
PowerShell v5 (The default one comes with Windows)
```powershell
(Invoke-WebRequest -Uri https://api.openai.com/v1/engines -Headers @{"Authorization" = "Bearer YOUR_API_KEY"; "OpenAI-Organization" = "YOUR_ORG_ID"}).Content
```
PowerShell v7
```powershell
(Invoke-WebRequest -Uri https://api.openai.com/v1/engines -Authentication Bearer -Token (ConvertTo-SecureString "YOUR_API_KEY" -AsPlainText -Force) -Headers @{"OpenAI-Organization" = "YOUR_ORG_ID"}).Content
```
### Can I run the sample on Azure?
The sample code can be currently be used with Codex on OpenAI’s API. In the coming months, the sample will be updated so you can also use it with the [Azure OpenAI Service](https://aka.ms/azure-openai).
|
Codex-CLI/README.md/0
|
{
"file_path": "Codex-CLI/README.md",
"repo_id": "Codex-CLI",
"token_count": 2674
}
| 213 |
################################################
## *** Codex CLI plugin function for Bash *** ##
## loaded by $HOME/.codexclirc ##
################################################
create_completion()
{
# Check settings in case the CLI has just been uninstalled
# Note: CODEX_CLI_PATH is defined in $HOME/.codexclirc
local SETTINGS="$CODEX_CLI_PATH/src/openaiapirc"
local SIZE=$(wc -c $SETTINGS | awk '{print $1}')
if [ ! -f "$SETTINGS" ]; then
echo "Codex CLI configuration is missing, try reinstalling."
return
fi
if (( $SIZE < 10 )); then
echo "Codex CLI configuration is missing, try reinstalling."
return
fi
# Get the text typed until now
text=${READLINE_LINE}
completion=$(echo -n "$text" | $CODEX_CLI_PATH/src/codex_query.py)
# Add completion to the current buffer
READLINE_LINE="${text}${completion}"
# Put the cursor at the end of the line
READLINE_POINT=${#READLINE_LINE}
}
|
Codex-CLI/scripts/bash_plugin.sh/0
|
{
"file_path": "Codex-CLI/scripts/bash_plugin.sh",
"repo_id": "Codex-CLI",
"token_count": 363
}
| 214 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: __init__.py
Description: Python SDK of the Cognitive Face API.
"""
from . import face
from . import face_list
from . import large_face_list
from . import large_face_list_face
from . import large_person_group
from . import large_person_group_person
from . import large_person_group_person_face
from . import person
from . import person_group
from . import util
from .util import CognitiveFaceException
from .util import Key
from .util import BaseUrl
|
Cognitive-Face-Python/cognitive_face/__init__.py/0
|
{
"file_path": "Cognitive-Face-Python/cognitive_face/__init__.py",
"repo_id": "Cognitive-Face-Python",
"token_count": 150
}
| 215 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: test_large_person_group.py
Description: Unittests for Large Person Group section of the Cognitive Face
API.
"""
import uuid
import unittest
import cognitive_face as CF
from . import util
class TestLargePersonGroup(unittest.TestCase):
"""Unittests for Large Person Group section."""
def test_large_person_group(self):
"""Unittests for `large_person_group.create`,
`large_person_group.train`, `large_person_group.update`,
`large_person_group.get_status` and `large_person_group.delete`.
"""
large_person_group_id = str(uuid.uuid1())
res = CF.large_person_group.create(large_person_group_id)
print(res)
self.assertIsInstance(res, dict)
util.wait()
# Fake a person and a face to satisfy training.
res = CF.large_person_group_person.create(large_person_group_id,
'TempPerson')
person_id = res['personId']
image = '{}PersonGroup/Family1-Dad/Family1-Dad3.jpg'.format(
util.BASE_URL_IMAGE)
res = CF.large_person_group_person_face.add(
image, large_person_group_id, person_id)
res = CF.large_person_group.train(large_person_group_id)
print(res)
self.assertIsInstance(res, dict)
util.wait()
res = CF.large_person_group.update(large_person_group_id, 'name')
print(res)
self.assertIsInstance(res, dict)
util.wait()
res = CF.large_person_group.get_status(large_person_group_id)
print(res)
self.assertIsInstance(res, dict)
util.wait()
res = CF.large_person_group.delete(large_person_group_id)
print(res)
self.assertIsInstance(res, dict)
util.wait()
def test_get(self):
"""Unittest for `large_person_group.get`."""
res = CF.large_person_group.get(util.DataStore.large_person_group_id)
print(res)
self.assertIsInstance(res, dict)
util.wait()
def test_list(self):
"""Unittest for `large_person_group.list`."""
res = CF.large_person_group.list()
print(res)
self.assertIsInstance(res, list)
util.wait()
|
Cognitive-Face-Python/cognitive_face/tests/test_large_person_group.py/0
|
{
"file_path": "Cognitive-Face-Python/cognitive_face/tests/test_large_person_group.py",
"repo_id": "Cognitive-Face-Python",
"token_count": 1025
}
| 216 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: panel_group.py
Description: Group Panel for Python SDK sample.
"""
import os
import wx
import wx.lib.scrolledpanel as scrolled
import util
import model
from view import base
class GroupPanel(base.MyPanel):
"""Group Panel."""
def __init__(self, parent):
super(GroupPanel, self).__init__(parent)
self.face_paths = []
self.faces = {}
self.vsizer = wx.BoxSizer(wx.VERTICAL)
self.panel = scrolled.ScrolledPanel(self)
self.hsizer = wx.BoxSizer()
self.hsizer.AddStretchSpacer()
self.hvsizer = wx.BoxSizer(wx.VERTICAL)
self.hvsizer.SetMinSize((util.INNER_PANEL_WIDTH, -1))
label = ('Click the button below to select a folder containing face '
'images.\nThe images will be grouped based on similarity.\n'
'You will see the different groups under the "Grouping '
'Results" label.')
self.static_text = wx.StaticText(self.panel, label=label)
self.static_text.Wrap(util.INNER_PANEL_WIDTH)
self.hvsizer.Add(self.static_text, 0, wx.ALL, 0)
self.vhsizer = wx.BoxSizer()
self.lsizer = wx.BoxSizer(wx.VERTICAL)
self.lsizer.SetMinSize((util.MAX_IMAGE_SIZE, -1))
flag = wx.EXPAND | wx.ALIGN_CENTER | wx.ALL
self.btn = wx.Button(self.panel, label='Group')
self.lsizer.Add(self.btn, 0, flag, 5)
self.Bind(wx.EVT_BUTTON, self.OnChooseFolder, self.btn)
self.grid = base.MyGridStaticBitmap(self.panel, 0, 4, 0, 0)
self.lsizer.Add(self.grid, 0, wx.ALL, 5)
self.vhsizer.Add(self.lsizer, 1, wx.EXPAND)
self.vhsizer.AddSpacer(90)
self.rsizer = wx.BoxSizer(wx.VERTICAL)
self.rsizer.SetMinSize((util.MAX_IMAGE_SIZE, -1))
flag = wx.ALIGN_CENTER | wx.EXPAND | wx.ALL
self.result_text = wx.StaticText(self.panel, label='Grouping Results:')
self.rsizer.Add(self.result_text, 0, flag, 5)
self.result = base.GroupResult(self.panel)
self.rsizer.Add(self.result, 0, wx.EXPAND)
self.vhsizer.Add(self.rsizer, 1, wx.EXPAND)
self.hvsizer.Add(self.vhsizer)
self.hsizer.Add(self.hvsizer)
self.hsizer.AddStretchSpacer()
self.hsizer.Layout()
self.panel.SetSizer(self.hsizer)
self.panel.Layout()
self.panel.SetupScrolling(scroll_x=False)
self.vsizer.Add(self.panel, 3, wx.EXPAND)
self.log = base.MyLog(self)
self.vsizer.Add(self.log, 1, wx.EXPAND)
self.SetSizerAndFit(self.vsizer)
def OnChooseFolder(self, evt):
"""Choose Folder."""
dlg = wx.DirDialog(self)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
del self.face_paths[:]
self.faces.clear()
for root, dirs, files in os.walk(path):
if files:
self.face_paths.extend(
[os.path.join(root, filename) for filename in files])
self.btn.Disable()
self.log.log((
'Request: Preparing faces for grouping, detecting faces in '
'chosen folder.'))
self.grid.set_paths(self.face_paths)
for path in self.face_paths:
try:
res = util.CF.face.detect(path)
except util.CF.CognitiveFaceException:
continue
for entry in res:
face = model.Face(entry, path)
self.faces[face.id] = face
self.grid.set_faces(self.faces.values())
self.log.log('Response: Success. Total {0} faces are detected.'.
format(len(self.faces)))
self.log.log(
'Request: Grouping {0} faces.'.format(len(self.faces)))
res = util.CF.face.group(self.faces.keys())
self.result.set_data(self.faces, res)
len_groups = len(res['groups'])
if res.get('messyGroup'):
len_groups += 1
self.log.log(
'Response: Success. {0} faces grouped into {1} groups'.format(
len(self.faces), len_groups))
self.btn.Enable()
|
Cognitive-Face-Python/sample/view/panel_group.py/0
|
{
"file_path": "Cognitive-Face-Python/sample/view/panel_group.py",
"repo_id": "Cognitive-Face-Python",
"token_count": 2168
}
| 217 |
export CUDA_VISIBLE_DEVICES=6
python t5_run_eval.py \
--model_name_or_path ./checkpoint/Mod/ContrastExp_finetune_set1_seed1/checkpoint-50000 \
--subtask Mod \
--validation_file test \
--ebatch_size 16 \
--set set1
|
ContextualSP/abstraction_probing/code/t5_code/Mod_ContrastExp_test.sh/0
|
{
"file_path": "ContextualSP/abstraction_probing/code/t5_code/Mod_ContrastExp_test.sh",
"repo_id": "ContextualSP",
"token_count": 85
}
| 218 |
import json
import numpy as np
from data_utils.task_def import TaskType, DataFormat
import tasks
def load_data(file_path, task_def):
data_format = task_def.data_type
task_type = task_def.task_type
label_dict = task_def.label_vocab
if task_type == TaskType.Ranking:
assert data_format == DataFormat.PremiseAndMultiHypothesis
rows = []
for line in open(file_path, encoding="utf-8"):
fields = line.strip("\n").split("\t")
if data_format == DataFormat.PremiseOnly:
assert len(fields) == 3
row = {"uid": fields[0], "label": fields[1], "premise": fields[2]}
elif data_format == DataFormat.PremiseAndOneHypothesis:
assert len(fields) == 4
row = {
"uid": fields[0],
"label": fields[1],
"premise": fields[2],
"hypothesis": fields[3],
}
elif data_format == DataFormat.PremiseAndMultiHypothesis:
assert len(fields) > 5
row = {
"uid": fields[0],
"ruid": fields[1].split(","),
"label": fields[2],
"premise": fields[3],
"hypothesis": fields[4:],
}
elif data_format == DataFormat.Seqence:
row = {
"uid": fields[0],
"label": eval(fields[1]),
"premise": eval(fields[2]),
}
elif data_format == DataFormat.ClozeChoice:
row = {
"uid": fields[0],
"choice": fields[1],
"answer": fields[2],
"label": fields[3],
"premise": fields[4],
"hypothesis": fields[5:],
}
else:
raise ValueError(data_format)
task_obj = tasks.get_task_obj(task_def)
if task_obj is not None:
row["label"] = task_obj.input_parse_label(row["label"])
elif task_type == TaskType.Ranking:
labels = row["label"].split(",")
if label_dict is not None:
labels = [label_dict[label] for label in labels]
else:
labels = [float(label) for label in labels]
row["label"] = int(np.argmax(labels))
row["olabel"] = labels
elif task_type == TaskType.Span:
pass # don't process row label
elif task_type == TaskType.SeqenceLabeling:
assert type(row["label"]) is list
row["label"] = [label_dict[label] for label in row["label"]]
elif task_type == TaskType.ClozeChoice:
labels = eval(row["label"])
row["label"] = int(np.argmax(labels))
row["olabel"] = labels
rows.append(row)
return rows
def load_score_file(score_path, n_class):
sample_id_2_pred_score_seg_dic = {}
score_obj = json.loads(open(score_path, encoding="utf-8").read())
assert (len(score_obj["scores"]) % len(score_obj["uids"]) == 0) and (
len(score_obj["scores"]) / len(score_obj["uids"]) == n_class
), "scores column size should equal to sample count or multiple of sample count (for classification problem)"
scores = score_obj["scores"]
score_segs = [
scores[i * n_class : (i + 1) * n_class] for i in range(len(score_obj["uids"]))
]
for sample_id, pred, score_seg in zip(
score_obj["uids"], score_obj["predictions"], score_segs
):
sample_id_2_pred_score_seg_dic[sample_id] = (pred, score_seg)
return sample_id_2_pred_score_seg_dic
|
ContextualSP/adaptershare/data_utils/__init__.py/0
|
{
"file_path": "ContextualSP/adaptershare/data_utils/__init__.py",
"repo_id": "ContextualSP",
"token_count": 1767
}
| 219 |
#!/usr/bin/env bash
###############################
# Batch training script for domain adaptation.
# Xiaodong
###############################
declare -a SCITAIL=('scitail_001' 'scitail_01' 'scitail_1' 'scitail')
## Scitail
for split in "${SCITAIL[@]}"
do
export CUDA_VISIBLE_DEVICES=0
if [ ${split} == "scitail_001" ] || [ ${split} == "scitail_01" ]; then
batch_size=8
else
batch_size=32
fi
bash experiments/domain_adaptation/run_domain_adaptation.sh data/domain_adaptation/ mt_dnn_models/mt_dnn_base_uncased.pt ${split} scitail ${batch_size}
done
declare -a SCITAIL=('snli_001' 'snli_01' 'snli_1' 'snli')
##SNLI
for split in "${SCITAIL[@]}"
do
export CUDA_VISIBLE_DEVICES=0
if [ ${split} == "snli_001" ] || [ ${split} == "snli_01" ]; then
batch_size=8
else
batch_size=32
fi
bash experiments/domain_adaptation/run_domain_adaptation.sh data/domain_adaptation/ mt_dnn_models/mt_dnn_base_uncased.pt ${split} snli 32
done
|
ContextualSP/adaptershare/experiments/domain_adaptation/run_batch.sh/0
|
{
"file_path": "ContextualSP/adaptershare/experiments/domain_adaptation/run_batch.sh",
"repo_id": "ContextualSP",
"token_count": 430
}
| 220 |
import os
from sys import path
path.append(os.getcwd())
from data_utils.task_def import DataFormat
def load_conll_ner(file, is_train=True):
rows = []
cnt = 0
sentence = []
label = []
with open(file, encoding="utf8") as f:
for line in f:
line = line.strip()
if len(line) == 0 or line.startswith("-DOCSTART") or line[0] == "\n":
if len(sentence) > 0:
sample = {"uid": cnt, "premise": sentence, "label": label}
rows.append(sample)
sentence = []
label = []
cnt += 1
continue
splits = line.split(" ")
sentence.append(splits[0])
label.append(splits[-1])
if len(sentence) > 0:
sample = {"uid": cnt, "premise": sentence, "label": label}
return rows
def load_conll_pos(file, is_train=True):
rows = []
cnt = 0
sentence = []
label = []
with open(file, encoding="utf8") as f:
for line in f:
line = line.strip()
if len(line) == 0 or line.startswith("-DOCSTART") or line[0] == "\n":
if len(sentence) > 0:
sample = {"uid": cnt, "premise": sentence, "label": label}
rows.append(sample)
sentence = []
label = []
cnt += 1
continue
splits = line.split(" ")
sentence.append(splits[0])
label.append(splits[1])
if len(sentence) > 0:
sample = {"uid": cnt, "premise": sentence, "label": label}
return rows
def load_conll_chunk(file, is_train=True):
rows = []
cnt = 0
sentence = []
label = []
with open(file, encoding="utf8") as f:
for line in f:
line = line.strip()
if len(line) == 0 or line.startswith("-DOCSTART") or line[0] == "\n":
if len(sentence) > 0:
sample = {"uid": cnt, "premise": sentence, "label": label}
rows.append(sample)
sentence = []
label = []
cnt += 1
continue
splits = line.split(" ")
sentence.append(splits[0])
label.append(splits[2])
if len(sentence) > 0:
sample = {"uid": cnt, "premise": sentence, "label": label}
return rows
|
ContextualSP/adaptershare/experiments/ner/ner_utils.py/0
|
{
"file_path": "ContextualSP/adaptershare/experiments/ner/ner_utils.py",
"repo_id": "ContextualSP",
"token_count": 1318
}
| 221 |
import torch.nn as nn
from module.common import activation
from module.dropout_wrapper import DropoutWrapper
class Pooler(nn.Module):
def __init__(self, hidden_size, dropout_p=0.1, actf="tanh"):
super(Pooler, self).__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.activation = activation(actf)
self.dropout = DropoutWrapper(dropout_p=dropout_p)
def forward(self, hidden_states):
first_token_tensor = hidden_states[:, 0]
first_token_tensor = self.dropout(first_token_tensor)
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
|
ContextualSP/adaptershare/module/pooler.py/0
|
{
"file_path": "ContextualSP/adaptershare/module/pooler.py",
"repo_id": "ContextualSP",
"token_count": 279
}
| 222 |
# coding=utf-8
# Copyright (c) Microsoft. All rights reserved.
import yaml
import os
import numpy as np
import argparse
import json
import sys
from data_utils import load_data
from data_utils.task_def import TaskType, DataFormat
from data_utils.log_wrapper import create_logger
from experiments.exp_def import TaskDefs
from transformers import AutoTokenizer
from tqdm import tqdm
from functools import partial
import multiprocessing
DEBUG_MODE = False
MAX_SEQ_LEN = 512
DOC_STRIDE = 180
MAX_QUERY_LEN = 64
MRC_MAX_SEQ_LEN = 384
logger = create_logger(
__name__, to_disk=True, log_file="mt_dnn_data_proc_{}.log".format(MAX_SEQ_LEN)
)
def feature_extractor(tokenizer, text_a, text_b=None, max_length=512, do_padding=False):
inputs = tokenizer(
text_a,
text_b,
add_special_tokens=True,
max_length=max_length,
truncation=True,
padding=do_padding,
)
input_ids = inputs["input_ids"]
token_type_ids = (
inputs["token_type_ids"] if "token_type_ids" in inputs else [0] * len(input_ids)
)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = inputs["attention_mask"]
if do_padding:
assert len(input_ids) == max_length, "Error with input length {} vs {}".format(
len(input_ids), max_length
)
assert (
len(attention_mask) == max_length
), "Error with input length {} vs {}".format(len(attention_mask), max_length)
assert (
len(token_type_ids) == max_length
), "Error with input length {} vs {}".format(len(token_type_ids), max_length)
return input_ids, attention_mask, token_type_ids
def extract_feature_premise_only(sample, max_seq_len=MAX_SEQ_LEN, tokenizer=None):
"""extract feature of single sentence tasks"""
input_ids, input_mask, type_ids = feature_extractor(
tokenizer, sample["premise"], max_length=max_seq_len
)
feature = {
"uid": sample["uid"],
"label": sample["label"],
"token_id": input_ids,
"type_id": type_ids,
"attention_mask": input_mask,
}
return feature
def extract_feature_premise_and_one_hypo(
sample, max_seq_len=MAX_SEQ_LEN, tokenizer=None
):
input_ids, input_mask, type_ids = feature_extractor(
tokenizer,
sample["premise"],
text_b=sample["hypothesis"],
max_length=max_seq_len,
)
feature = {
"uid": sample["uid"],
"label": sample["label"],
"token_id": input_ids,
"type_id": type_ids,
"attention_mask": input_mask,
}
return feature
def extract_feature_premise_and_multi_hypo(
sample, max_seq_len=MAX_SEQ_LEN, tokenizer=None
):
ids = sample["uid"]
premise = sample["premise"]
hypothesis_list = sample["hypothesis"]
label = sample["label"]
input_ids_list = []
type_ids_list = []
attention_mask_list = []
for hypothesis in hypothesis_list:
input_ids, input_mask, type_ids = feature_extractor(
tokenizer, premise, hypothesis, max_length=max_seq_len
)
input_ids_list.append(input_ids)
type_ids_list.append(type_ids)
attention_mask_list.append(input_mask)
feature = {
"uid": ids,
"label": label,
"token_id": input_ids_list,
"type_id": type_ids_list,
"ruid": sample["ruid"],
"olabel": sample["olabel"],
"attention_mask": attention_mask_list,
}
return feature
def extract_feature_sequence(
sample, max_seq_len=MAX_SEQ_LEN, tokenizer=None, label_mapper=None
):
ids = sample["uid"]
premise = sample["premise"]
tokens = []
labels = []
for i, word in enumerate(premise):
subwords = tokenizer.tokenize(word)
tokens.extend(subwords)
for j in range(len(subwords)):
if j == 0:
labels.append(sample["label"][i])
else:
labels.append(label_mapper["X"])
if len(premise) > max_seq_len - 2:
tokens = tokens[: max_seq_len - 2]
labels = labels[: max_seq_len - 2]
label = [label_mapper["CLS"]] + labels + [label_mapper["SEP"]]
input_ids = tokenizer.convert_tokens_to_ids(
[tokenizer.cls_token] + tokens + [tokenizer.sep_token]
)
assert len(label) == len(input_ids)
type_ids = [0] * len(input_ids)
feature = {"uid": ids, "label": label, "token_id": input_ids, "type_id": type_ids}
return feature
def extract_feature_cloze_choice(
sample, max_seq_len=MAX_SEQ_LEN, tokenizer=None
):
ids = sample["uid"]
premise = sample["premise"]
hypothesis_list = sample["hypothesis"]
label = sample["label"]
input_ids_list = []
type_ids_list = []
attention_mask_list = []
for hypothesis in hypothesis_list:
input_ids, input_mask, type_ids = feature_extractor(
tokenizer, premise, hypothesis, max_length=max_seq_len
)
input_ids_list.append(input_ids)
type_ids_list.append(type_ids)
attention_mask_list.append(input_mask)
feature = {
"uid": ids,
"label": label,
"token_id": input_ids_list,
"type_id": type_ids_list,
"olabel": sample["olabel"],
"attention_mask": attention_mask_list,
"choice": sample["choice"],
"answer": sample["answer"]
}
return feature
def build_data(
data,
dump_path,
tokenizer,
data_format=DataFormat.PremiseOnly,
max_seq_len=MAX_SEQ_LEN,
lab_dict=None,
do_padding=False,
truncation=True,
workers=1,
):
if data_format == DataFormat.PremiseOnly:
partial_feature = partial(
extract_feature_premise_only, max_seq_len=max_seq_len, tokenizer=tokenizer
)
elif data_format == DataFormat.PremiseAndOneHypothesis:
partial_feature = partial(
extract_feature_premise_and_one_hypo,
max_seq_len=max_seq_len,
tokenizer=tokenizer,
)
elif data_format == DataFormat.PremiseAndMultiHypothesis:
partial_feature = partial(
extract_feature_premise_and_multi_hypo,
max_seq_len=max_seq_len,
tokenizer=tokenizer,
)
elif data_format == DataFormat.Seqence:
partial_feature = partial(
extract_feature_sequence,
max_seq_len=max_seq_len,
tokenizer=tokenizer,
label_mapper=lab_dict,
)
elif data_format == DataFormat.ClozeChoice:
partial_feature = partial(
extract_feature_cloze_choice,
max_seq_len=max_seq_len,
tokenizer=tokenizer,
)
else:
raise ValueError(data_format)
if workers > 1:
with multiprocessing.Pool(processes=workers) as pool:
features = pool.map(partial_feature, data)
logger.info("begin to write features")
with open(dump_path, "w", encoding="utf-8") as writer:
for feature in tqdm(features, total=len(features)):
writer.write("{}\n".format(json.dumps(feature)))
else:
with open(dump_path, "w", encoding="utf-8") as writer:
for sample in tqdm(data, total=len(data)):
feature = partial_feature(sample)
writer.write("{}\n".format(json.dumps(feature)))
def parse_args():
parser = argparse.ArgumentParser(
description="Preprocessing GLUE/SNLI/SciTail dataset."
)
parser.add_argument(
"--model",
type=str,
default="bert-large-uncased",
help="support all BERT and ROBERTA family supported by HuggingFace Transformers",
)
parser.add_argument("--do_padding", action="store_true")
parser.add_argument("--root_dir", type=str, default="data/canonical_data")
parser.add_argument(
"--task_def", type=str, default="experiments/glue/glue_task_def.yml"
)
parser.add_argument("--transformer_cache", default=".cache", type=str)
parser.add_argument("--workers", type=int, default=1)
args = parser.parse_args()
return args
def main(args):
# hyper param
root = args.root_dir
assert os.path.exists(root)
tokenizer = AutoTokenizer.from_pretrained(
args.model, cache_dir=args.transformer_cache
)
mt_dnn_root = os.path.join(root, args.model)
if not os.path.isdir(mt_dnn_root):
os.makedirs(mt_dnn_root)
task_defs = TaskDefs(args.task_def)
for task in task_defs.get_task_names():
task_def = task_defs.get_task_def(task)
logger.info("Task %s" % task)
for split_name in task_def.split_names:
file_path = os.path.join(root, "%s_%s.tsv" % (task, split_name))
if not os.path.exists(file_path):
logger.warning("File %s doesnot exit")
sys.exit(1)
rows = load_data(file_path, task_def)
dump_path = os.path.join(mt_dnn_root, "%s_%s.json" % (task, split_name))
logger.info(dump_path)
build_data(
rows,
dump_path,
tokenizer,
task_def.data_type,
lab_dict=task_def.label_vocab,
workers=args.workers,
)
if __name__ == "__main__":
args = parse_args()
main(args)
|
ContextualSP/adaptershare/prepro_std.py/0
|
{
"file_path": "ContextualSP/adaptershare/prepro_std.py",
"repo_id": "ContextualSP",
"token_count": 4224
}
| 223 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.