text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
{
"$schema": "http://json-schema.org/draft-07/schema",
"$id": "https://github.com/microsoft/AzureTRE/templates/workspace_services/databricks/template_schema.json",
"type": "object",
"title": "Azure Databricks",
"description": "Azure Databricks",
"required": [],
"properties": {
"display_name": {
"type": "string",
"title": "Name for the workspace service",
"description": "The name of the workspace service to be displayed to users",
"default": "Azure Databricks",
"updateable": true
},
"description": {
"type": "string",
"title": "Description of the workspace service",
"description": "Description of the workspace service",
"default": "Azure Databricks is a fast, easy, and collaborative Apache Spark-based big data analytics service designed for data science and data engineering.",
"updateable": true
},
"overview": {
"type": "string",
"title": "Workspace Service Overview",
"description": "Long form description of the workspace service, in markdown syntax",
"default": "The Azure Databricks Lakehouse Platform provides a unified set of tools for building, deploying, sharing, and maintaining enterprise-grade data solutions at scale.\nAzure Databricks integrates with cloud storage and security in your cloud account, and manages and deploys cloud infrastructure on your behalf.\n[Azure Databricks documentation](https://learn.microsoft.com/en-us/azure/databricks/introduction/)",
"updateable": true
},
"is_exposed_externally": {
"$id": "#/properties/is_exposed_externally",
"type": "boolean",
"title": "Expose externally",
"description": "Is the Databricks workspace accessible from outside of the workspace network",
"default": false
},
"address_space": {
"$id": "#/properties/address_space",
"type": "string",
"title": "Address space",
"description": "The address space of the databricks subnets"
}
},
"uiSchema": {
"address_space": {
"classNames": "tre-hidden"
}
},
"pipeline": {
"install": [
{
"stepId": "12ba0dad-ea6c-4d0d-9255-daa6212f5ffa",
"stepTitle": "Upgrade to ensure aware of address space",
"resourceType": "workspace",
"resourceAction": "upgrade",
"properties": []
},
{
"stepId": "main"
},
{
"stepId": "7ec5fa90-23bd-4809-b0d7-2d32c94016b1",
"stepTitle": "Add firewall rules for databricks",
"resourceTemplateName": "tre-shared-service-firewall",
"resourceType": "shared-service",
"resourceAction": "upgrade",
"properties": [
{
"name": "network_rule_collections",
"type": "array",
"arraySubstitutionAction": "replace",
"arrayMatchField": "name",
"value": {
"name": "nrc_svc_{{ resource.id }}_databricks",
"action": "Allow",
"rules": [
{
"name": "databricks",
"description": "Communication with Azure Databricks dependancies.",
"source_addresses": "{{ resource.properties.databricks_address_prefixes }}",
"destination_addresses": [ "AzureDatabricks"],
"destination_ports": [
"443"
],
"protocols": [
"TCP"
]
},
{
"name": "databricks-sql-metastore",
"description": "Stores metadata for databases and child objects in a Azure Databricks workspace.",
"source_addresses": "{{ resource.properties.databricks_address_prefixes }}",
"destination_addresses": "{{ resource.properties.metastore_addresses }}",
"destination_ports": [
"3306"
],
"protocols": [
"TCP"
]
},
{
"name": "databricks-observability-eventhub",
"description": "Transit for Azure Databricks on-cluster service specific telemetry.",
"source_addresses": "{{ resource.properties.databricks_address_prefixes }}",
"destination_addresses": "{{ resource.properties.event_hub_endpoint_addresses }}",
"destination_ports": [
"9093"
],
"protocols": [
"TCP"
]
},
{
"name": "AzureAD",
"description": "AAD access",
"source_addresses": "{{ resource.properties.workspace_address_spaces }}",
"destination_addresses": ["AzureActiveDirectory"],
"destination_ports": ["*"],
"protocols": ["TCP"]
}
]
}
},
{
"name": "rule_collections",
"type": "array",
"arraySubstitutionAction": "replace",
"arrayMatchField": "name",
"value": {
"name": "arc_svc_{{ resource.id }}_databricks",
"action": "Allow",
"rules": [
{
"name": "databricks-spark-log-blob-storage",
"description": "To store Azure Databricks audit and cluster logs (anonymized / masked) for support and troubleshooting.",
"source_addresses": "{{ resource.properties.databricks_address_prefixes }}",
"target_fqdns": "{{ resource.properties.log_blob_storage_domains }}",
"protocols": [
{
"port": "443",
"type": "Https"
}
]
},
{
"name": "databricks-artifact-blob-storage",
"description": "Stores Databricks Runtime images to be deployed on cluster nodes.",
"source_addresses": "{{ resource.properties.databricks_address_prefixes }}",
"target_fqdns": "{{ resource.properties.artifact_blob_storage_domains }}",
"protocols": [
{
"port": "443",
"type": "Https"
}
]
},
{
"name": "databricks-dbfs",
"description": "Azure Databricks workspace root storage.",
"source_addresses": "{{ resource.properties.databricks_address_prefixes }}",
"target_fqdns": [
"{{ resource.properties.dbfs_blob_storage_domain }}"
],
"protocols": [
{
"port": "443",
"type": "Https"
}
]
},
{
"name": "AAD CDN",
"description": "AAD CDN",
"source_addresses": "{{ resource.properties.workspace_address_spaces }}",
"target_fqdns": [
"aadcdn.msftauth.net"
],
"protocols": [
{
"port": "443",
"type": "Https"
}
]
}
]
}
}
]
}
],
"upgrade": [
{
"stepId": "12baaaad-ea6c-4d0d-9255-d316212f5ffa",
"stepTitle": "Upgrade to ensure aware of address space",
"resourceType": "workspace",
"resourceAction": "upgrade",
"properties": []
},
{
"stepId": "main"
},
{
"stepId": "260421b3-7308-491f-b531-e007cdc0aa46",
"stepTitle": "Add firewall rules for databricks",
"resourceTemplateName": "tre-shared-service-firewall",
"resourceType": "shared-service",
"resourceAction": "upgrade",
"properties": [
{
"name": "network_rule_collections",
"type": "array",
"arraySubstitutionAction": "replace",
"arrayMatchField": "name",
"value": {
"name": "nrc_svc_{{ resource.id }}_databricks",
"action": "Allow",
"rules": [
{
"name": "databricks",
"description": "Communication with Azure Databricks dependancies.",
"source_addresses": "{{ resource.properties.databricks_address_prefixes }}",
"destination_addresses": [ "AzureDatabricks"],
"destination_ports": [
"443"
],
"protocols": [
"TCP"
]
},
{
"name": "databricks-sql-metastore",
"description": "Stores metadata for databases and child objects in a Azure Databricks workspace.",
"source_addresses": "{{ resource.properties.databricks_address_prefixes }}",
"destination_addresses": "{{ resource.properties.metastore_addresses }}",
"destination_ports": [
"3306"
],
"protocols": [
"TCP"
]
},
{
"name": "databricks-observability-eventhub",
"description": "Transit for Azure Databricks on-cluster service specific telemetry.",
"source_addresses": "{{ resource.properties.databricks_address_prefixes }}",
"destination_addresses": "{{ resource.properties.event_hub_endpoint_addresses }}",
"destination_ports": [
"9093"
],
"protocols": [
"TCP"
]
},
{
"name": "AzureAD",
"description": "AAD access",
"source_addresses": "{{ resource.properties.workspace_address_spaces }}",
"destination_addresses": ["AzureActiveDirectory"],
"destination_ports": ["*"],
"protocols": ["TCP"]
}
]
}
},
{
"name": "rule_collections",
"type": "array",
"arraySubstitutionAction": "replace",
"arrayMatchField": "name",
"value": {
"name": "arc_svc_{{ resource.id }}_databricks",
"action": "Allow",
"rules": [
{
"name": "databricks-spark-log-blob-storage",
"description": "To store Azure Databricks audit and cluster logs (anonymized / masked) for support and troubleshooting.",
"source_addresses": "{{ resource.properties.databricks_address_prefixes }}",
"target_fqdns": "{{ resource.properties.log_blob_storage_domains }}",
"protocols": [
{
"port": "443",
"type": "Https"
}
]
},
{
"name": "databricks-artifact-blob-storage",
"description": "Stores Databricks Runtime images to be deployed on cluster nodes.",
"source_addresses": "{{ resource.properties.databricks_address_prefixes }}",
"target_fqdns": "{{ resource.properties.artifact_blob_storage_domains }}",
"protocols": [
{
"port": "443",
"type": "Https"
}
]
},
{
"name": "databricks-dbfs",
"description": "Azure Databricks workspace root storage.",
"source_addresses": "{{ resource.properties.databricks_address_prefixes }}",
"target_fqdns": [
"{{ resource.properties.dbfs_blob_storage_domain }}"
],
"protocols": [
{
"port": "443",
"type": "Https"
}
]
},
{
"name": "AAD CDN",
"description": "AAD CDN",
"source_addresses": "{{ resource.properties.workspace_address_spaces }}",
"target_fqdns": [
"aadcdn.msftauth.net"
],
"protocols": [
{
"port": "443",
"type": "Https"
}
]
}
]
}
}
]
}
],
"uninstall": [
{
"stepId": "da2d99a3-3940-4dcc-a934-53535f2e2451",
"stepTitle": "Remove network firewall rules for databricks",
"resourceTemplateName": "tre-shared-service-firewall",
"resourceType": "shared-service",
"resourceAction": "upgrade",
"properties": [
{
"name": "network_rule_collections",
"type": "array",
"arraySubstitutionAction": "remove",
"arrayMatchField": "name",
"value": {
"name": "nrc_svc_{{ resource.id }}_databricks"
}
},
{
"name": "rule_collections",
"type": "array",
"arraySubstitutionAction": "remove",
"arrayMatchField": "name",
"value": {
"name": "arc_svc_{{ resource.id }}_databricks"
}
}
]
},
{
"stepId": "main"
}
]
}
}
|
AzureTRE/templates/workspace_services/databricks/template_schema.json/0
|
{
"file_path": "AzureTRE/templates/workspace_services/databricks/template_schema.json",
"repo_id": "AzureTRE",
"token_count": 7802
}
| 133 |
#!/bin/bash
while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' http://localhost:3000/api/swagger)" != @("200"|"302") ]]; do
echo "Waiting for web service"
sleep 5
done
if [ -z $GITEA_USERNAME ]; then
echo "Gitea username is not set"
s6-svc -k /etc/s6/gitea
sleep 60
fi
echo "Adding admin user"
echo "gitea admin user create --admin --access-token --username='${GITEA_USERNAME}' --password='${GITEA_PASSWD}' --email='${GITEA_EMAIL}' --must-change-password=false"
su gitea -c "gitea admin user create --admin --access-token --username='${GITEA_USERNAME}' --password='${GITEA_PASSWD}' --email='${GITEA_EMAIL}' --must-change-password=false"
echo "Configuring OIDC"
echo "gitea admin auth add-oauth --name oidc --provider openidConnect --key '${GITEA_OPENID_CLIENT_ID}' --secret '${GITEA_OPENID_CLIENT_SECRET}' --auto-discover-url '${GITEA_OPENID_AUTHORITY}/.well-known/openid-configuration' --group-claim-name 'roles' --admin-group 'WorkspaceOwner'"
su gitea -c "gitea admin auth add-oauth --name oidc --provider openidConnect --key '${GITEA_OPENID_CLIENT_ID}' --secret '${GITEA_OPENID_CLIENT_SECRET}' --auto-discover-url '${GITEA_OPENID_AUTHORITY}/.well-known/openid-configuration' --group-claim-name 'roles' --admin-group 'WorkspaceOwner'"
|
AzureTRE/templates/workspace_services/gitea/docker/configure_gitea.sh/0
|
{
"file_path": "AzureTRE/templates/workspace_services/gitea/docker/configure_gitea.sh",
"repo_id": "AzureTRE",
"token_count": 504
}
| 134 |
#!/usr/bin/with-contenv sh
echo >&2 "starting tomcat"
sed -i "s#port=\"8080\"#port=\"8080\" maxHttpHeaderSize=\"65536\"#" /usr/share/tomcat9/conf/server.xml
sed -i "s#unpackWARs=\"true\" autoDeploy=\"true\">#><Context path=\"guacamole\" docBase=\"${GUACAMOLE_HOME}guacamole.war\"/>#" /usr/share/tomcat9/conf/server.xml
# uncomment below to debug
#sed -i "s#</Host>#<Valve className=\"org.apache.catalina.valves.AccessLogValve\" directory=\"/proc/self/fd\" prefix=\"1\" suffix=\"\" rotatable=\"false\" pattern=\"%h %l %u %t %r; %s %b %{X-Access-Token}i\" /></Host>#" /etc/tomcat9/server.xml
#cat /etc/tomcat9/server.xml
exec sh /usr/share/tomcat9/bin/catalina.sh run
|
AzureTRE/templates/workspace_services/guacamole/guacamole-server/docker/services/tomcat/run/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/guacamole-server/docker/services/tomcat/run",
"repo_id": "AzureTRE",
"token_count": 268
}
| 135 |
output "authentication_callback_uri" {
value = "https://${azurerm_linux_web_app.guacamole.default_hostname}/oauth2/callback"
}
output "web_apps_addresses" {
value = jsonencode(data.azurerm_subnet.web_apps.address_prefixes)
}
output "admin_connection_uri" {
value = "https://${azurerm_linux_web_app.guacamole.default_hostname}/guacamole"
}
|
AzureTRE/templates/workspace_services/guacamole/terraform/outputs.tf/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/terraform/outputs.tf",
"repo_id": "AzureTRE",
"token_count": 135
}
| 136 |
$DownloadPath = $env:Public + "\Desktop\ReviewData"
mkdir $DownloadPath
az storage blob download-batch -d $DownloadPath -s '"${airlock_request_sas_url}"'
|
AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-export-reviewvm/terraform/download_review_data.ps1/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-export-reviewvm/terraform/download_review_data.ps1",
"repo_id": "AzureTRE",
"token_count": 50
}
| 137 |
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/azurerm" {
version = "3.41.0"
constraints = "3.41.0"
hashes = [
"h1:Kn7sqPk/YpsvORFEd/zHXa8U7KkVB551DXUMwvqiU0s=",
"zh:123838b581a27499d0a1e3a9804a6f57304969f58c4ea7fbd938ae2a795b2a19",
"zh:761a7bff3872a192202411aa62e3e6aedc3046f0df86967a1f9ed5a74207f451",
"zh:83092681a9e14d5e548edccece5086d822f86de6ff8227bb78706b41f0041697",
"zh:95fd6be4a3b995dc8ad40054646e2261e01365af7e8f8ebe0e62133cee8250cd",
"zh:995c3eb0aa23fc6948f45e68173034facc4bd92f4865abc3bba4bd305596fc86",
"zh:9f7b158d39f3e9fbc01ee27e6a63600838e34b7364715ebeea7d62717e48cb56",
"zh:b23193883592a4889942e82e73782e70dfbb517561a4f24b09f8ab6cbdc46866",
"zh:c4884d654d03a0546ec78f348563e32220ae35a2c76f22cb3c960f989dc6be48",
"zh:dda1c6720c6cef052db2fb4886a9cd46dee849e4367d6d66b45ad9d5bb607b94",
"zh:f0bc878d67785343bfc36a7d14ec58a67fa436f5b8b497221aea3931e3dccefd",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
"zh:f6aa3c25f7106619cc6760e1d34b29b0956c50f285994f009939890a85e7b058",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.4.3"
constraints = "3.4.3"
hashes = [
"h1:xZGZf18JjMS06pFa4NErzANI98qi59SEcBsOcS2P2yQ=",
"zh:41c53ba47085d8261590990f8633c8906696fa0a3c4b384ff6a7ecbf84339752",
"zh:59d98081c4475f2ad77d881c4412c5129c56214892f490adf11c7e7a5a47de9b",
"zh:686ad1ee40b812b9e016317e7f34c0d63ef837e084dea4a1f578f64a6314ad53",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:84103eae7251384c0d995f5a257c72b0096605048f757b749b7b62107a5dccb3",
"zh:8ee974b110adb78c7cd18aae82b2729e5124d8f115d484215fd5199451053de5",
"zh:9dd4561e3c847e45de603f17fa0c01ae14cae8c4b7b4e6423c9ef3904b308dda",
"zh:bb07bb3c2c0296beba0beec629ebc6474c70732387477a65966483b5efabdbc6",
"zh:e891339e96c9e5a888727b45b2e1bb3fcbdfe0fd7c5b4396e4695459b38c8cb1",
"zh:ea4739860c24dfeaac6c100b2a2e357106a89d18751f7693f3c31ecf6a996f8d",
"zh:f0c76ac303fd0ab59146c39bc121c5d7d86f878e9a69294e29444d4c653786f8",
"zh:f143a9a5af42b38fed328a161279906759ff39ac428ebcfe55606e05e1518b93",
]
}
provider "registry.terraform.io/hashicorp/template" {
version = "2.2.0"
constraints = "2.2.0"
hashes = [
"h1:94qn780bi1qjrbC3uQtjJh3Wkfwd5+tTtJHOb7KTg9w=",
"zh:01702196f0a0492ec07917db7aaa595843d8f171dc195f4c988d2ffca2a06386",
"zh:09aae3da826ba3d7df69efeb25d146a1de0d03e951d35019a0f80e4f58c89b53",
"zh:09ba83c0625b6fe0a954da6fbd0c355ac0b7f07f86c91a2a97849140fea49603",
"zh:0e3a6c8e16f17f19010accd0844187d524580d9fdb0731f675ffcf4afba03d16",
"zh:45f2c594b6f2f34ea663704cc72048b212fe7d16fb4cfd959365fa997228a776",
"zh:77ea3e5a0446784d77114b5e851c970a3dde1e08fa6de38210b8385d7605d451",
"zh:8a154388f3708e3df5a69122a23bdfaf760a523788a5081976b3d5616f7d30ae",
"zh:992843002f2db5a11e626b3fc23dc0c87ad3729b3b3cff08e32ffb3df97edbde",
"zh:ad906f4cebd3ec5e43d5cd6dc8f4c5c9cc3b33d2243c89c5fc18f97f7277b51d",
"zh:c979425ddb256511137ecd093e23283234da0154b7fa8b21c2687182d9aea8b2",
]
}
|
AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-import-reviewvm/terraform/.terraform.lock.hcl/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-import-reviewvm/terraform/.terraform.lock.hcl",
"repo_id": "AzureTRE",
"token_count": 1922
}
| 138 |
#!/bin/bash
set -o errexit
set -o pipefail
set -o nounset
# Uncomment this line to see each command for debugging (careful: this will show secrets!)
# set -o xtrace
secret_name=$1
keyvault_name=$2
username=$3
resource_id=$4
password="$(LC_ALL=C tr -dc 'A-Za-z0-9_%@' </dev/urandom | head -c 16 ; echo)"
secret_value="$username
$password"
# Persist new password to keyvault
az keyvault secret set --name "$secret_name" --vault-name "$keyvault_name" --value "$secret_value"
# Set new VM password
az vm user update --ids "$resource_id" --username "$username" --password "$password"
|
AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-linuxvm/reset_password.sh/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-linuxvm/reset_password.sh",
"repo_id": "AzureTRE",
"token_count": 205
}
| 139 |
#!/bin/bash
set -o errexit
set -o pipefail
set -o nounset
# Uncomment this line to see each command for debugging
# set -o xtrace
# Delete any existing VM Extensions befroe a VM gets deleted.
# This is needed to work around bug https://github.com/hashicorp/terraform-provider-azurerm/issues/6098
MGMT_RESOURCE_GROUP_NAME=$1
MGMT_STORAGE_ACCOUNT_NAME=$2
TF_STATE_CONTAINER_NAME=$3
ID=$4
pushd terraform
terraform init -input=false -backend=true \
-backend-config="resource_group_name=${MGMT_RESOURCE_GROUP_NAME}" \
-backend-config="storage_account_name=${MGMT_STORAGE_ACCOUNT_NAME}" \
-backend-config="container_name=${TF_STATE_CONTAINER_NAME}" \
-backend-config="key=${ID}"
echo "Running terraform state list"
tf_state_list="$(terraform state list)"
echo "State list result: ${tf_state_list}"
# The [[ $? == 1 ]] part is here because grep will exit with code 1 if there are no matches,
# which will fail the script because of set -o errexit setting.
echo "${tf_state_list}" | { grep "azurerm_virtual_machine_extension." || [[ $? == 1 ]]; } | xargs -r terraform state rm
echo "Script finished"
popd
|
AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-windowsvm/delete_vm_extensions.sh/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-windowsvm/delete_vm_extensions.sh",
"repo_id": "AzureTRE",
"token_count": 398
}
| 140 |
# syntax=docker/dockerfile-upstream:1.4.0
FROM --platform=linux/amd64 debian:bullseye-slim
# PORTER_INIT
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
# Git is required for terraform_azurerm_environment_configuration
RUN --mount=type=cache,target=/var/cache/apt --mount=type=cache,target=/var/lib/apt \
apt-get update && apt-get install -y git jq --no-install-recommends
# PORTER_MIXINS
# Use the BUNDLE_DIR build argument to copy files into the bundle
COPY --link . ${BUNDLE_DIR}/
|
AzureTRE/templates/workspace_services/health-services/Dockerfile.tmpl/0
|
{
"file_path": "AzureTRE/templates/workspace_services/health-services/Dockerfile.tmpl",
"repo_id": "AzureTRE",
"token_count": 223
}
| 141 |
#!/bin/bash
set -e
acr_domain_suffix=$(az cloud show --query suffixes.acrLoginServerEndpoint --output tsv)
porter install tre-service-azureml --reference "${MGMT_ACR_NAME}${acr_domain_suffix}/tre-service-azureml:v0.1.9" \
--cred ./arm_auth_local_debugging.json \
--parameter-set ./parameters_service_azureml.json
|
AzureTRE/templates/workspace_services/innereye/install_service_azureml.sh/0
|
{
"file_path": "AzureTRE/templates/workspace_services/innereye/install_service_azureml.sh",
"repo_id": "AzureTRE",
"token_count": 128
}
| 142 |
locals {
short_service_id = substr(var.tre_resource_id, -4, -1)
short_workspace_id = substr(var.workspace_id, -4, -1)
core_resource_group_name = "rg-${var.tre_id}"
workspace_resource_name_suffix = "${var.tre_id}-ws-${local.short_workspace_id}"
service_resource_name_suffix = "${var.tre_id}-ws-${local.short_workspace_id}-svc-${local.short_service_id}"
webapp_name = "mlflow-${local.service_resource_name_suffix}"
postgresql_server_name = "mlflow-${local.service_resource_name_suffix}"
keyvault_name = lower("kv-${substr(local.workspace_resource_name_suffix, -20, -1)}")
storage_name = lower(replace("stg${substr(local.workspace_resource_name_suffix, -8, -1)}", "-", ""))
shared_storage_share = "vm-shared-storage"
mlflow_artefacts_container_name = "mlartefacts"
image_name = "mlflow-server"
image_tag = replace(replace(replace(data.local_file.version.content, "__version__ = \"", ""), "\"", ""), "\n", "")
tre_workspace_service_tags = {
tre_id = var.tre_id
tre_workspace_id = var.workspace_id
tre_workspace_service_id = var.tre_resource_id
}
web_app_diagnostic_categories_enabled = [
"AppServiceHTTPLogs", "AppServiceConsoleLogs", "AppServiceAppLogs", "AppServiceFileAuditLogs",
"AppServiceAuditLogs", "AppServiceIPSecAuditLogs", "AppServicePlatformLogs", "AppServiceAntivirusScanAuditLogs"
]
identity_name = "id-${local.webapp_name}"
}
|
AzureTRE/templates/workspace_services/mlflow/terraform/locals.tf/0
|
{
"file_path": "AzureTRE/templates/workspace_services/mlflow/terraform/locals.tf",
"repo_id": "AzureTRE",
"token_count": 727
}
| 143 |
resource "random_password" "password" {
length = 20
min_upper = 2
min_lower = 2
min_numeric = 2
min_special = 2
}
resource "azurerm_mysql_flexible_server" "mysql" {
name = "mysql-${local.service_resource_name_suffix}"
resource_group_name = data.azurerm_resource_group.ws.name
location = data.azurerm_resource_group.ws.location
administrator_login = "mysqladmin"
administrator_password = random_password.password.result
sku_name = local.sql_sku[var.sql_sku].value
version = "8.0.21"
backup_retention_days = 7
geo_redundant_backup_enabled = false
tags = local.workspace_service_tags
lifecycle { ignore_changes = [tags, zone] }
}
resource "azurerm_mysql_flexible_database" "db" {
name = var.db_name
resource_group_name = data.azurerm_resource_group.ws.name
server_name = azurerm_mysql_flexible_server.mysql.name
charset = "utf8"
collation = "utf8_unicode_ci"
}
resource "azurerm_private_endpoint" "mysql_private_endpoint" {
name = "pe-${azurerm_mysql_flexible_server.mysql.name}"
location = data.azurerm_resource_group.ws.location
resource_group_name = data.azurerm_resource_group.ws.name
subnet_id = data.azurerm_subnet.services.id
tags = local.workspace_service_tags
private_service_connection {
private_connection_resource_id = azurerm_mysql_flexible_server.mysql.id
name = "psc-${azurerm_mysql_flexible_server.mysql.name}"
subresource_names = ["mysqlServer"]
is_manual_connection = false
}
private_dns_zone_group {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.mysql.database.azure.com"]
private_dns_zone_ids = [data.azurerm_private_dns_zone.mysql.id]
}
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_key_vault_secret" "db_password" {
name = "${azurerm_mysql_flexible_server.mysql.name}-administrator-password"
value = random_password.password.result
key_vault_id = data.azurerm_key_vault.ws.id
tags = local.workspace_service_tags
lifecycle { ignore_changes = [tags] }
}
|
AzureTRE/templates/workspace_services/mysql/terraform/mysql.tf/0
|
{
"file_path": "AzureTRE/templates/workspace_services/mysql/terraform/mysql.tf",
"repo_id": "AzureTRE",
"token_count": 1086
}
| 144 |
-- This gives each new user access to all sources plus 'Atlas User' role.
CREATE OR REPLACE FUNCTION function_default_user_roles() RETURNS TRIGGER AS
$BODY$
BEGIN
INSERT INTO webapi.sec_user_role (role_id, user_id)
SELECT r.id as role_id, new.id as user_id
FROM webapi.sec_role as r
WHERE r.name LIKE 'Source user%' OR r.name = 'Atlas users';
RETURN new;
END;
$BODY$
language plpgsql;
DROP TRIGGER IF EXISTS trigger_sec_user_insert ON webapi.sec_user;
CREATE TRIGGER trigger_sec_user_insert
AFTER INSERT ON webapi.sec_user
FOR EACH ROW
EXECUTE PROCEDURE function_default_user_roles();
DO $$
BEGIN
RAISE NOTICE 'Finished setting up default roles procedures.';
END $$;
|
AzureTRE/templates/workspace_services/ohdsi/sql/atlas_default_roles.sql/0
|
{
"file_path": "AzureTRE/templates/workspace_services/ohdsi/sql/atlas_default_roles.sql",
"repo_id": "AzureTRE",
"token_count": 256
}
| 145 |
output "connection_uri" {
value = local.atlas_ui_url
description = "Atlas Endpoint"
precondition {
condition = local.atlas_ui_fqdn == azurerm_linux_web_app.atlas_ui.default_hostname
error_message = "Computed FQDN is different than actual one."
}
}
output "webapi_uri" {
value = local.ohdsi_webapi_url
description = "WebAPI Endpoint"
precondition {
condition = local.ohdsi_webapi_fqdn == azurerm_linux_web_app.ohdsi_webapi.default_hostname
error_message = "Computed FQDN is different than actual one."
}
}
output "authentication_callback_uri" {
value = "${local.ohdsi_webapi_url_auth_callback}?client_name=OidcClient"
}
output "is_exposed_externally" {
value = false
}
|
AzureTRE/templates/workspace_services/ohdsi/terraform/outputs.tf/0
|
{
"file_path": "AzureTRE/templates/workspace_services/ohdsi/terraform/outputs.tf",
"repo_id": "AzureTRE",
"token_count": 287
}
| 146 |
---
schemaVersion: 1.0.0
name: tre-workspace-base
version: 1.5.3
description: "A base Azure TRE workspace"
dockerfile: Dockerfile.tmpl
registry: azuretre
credentials:
# Credentials for interacting with the AAD Auth tenant
- name: auth_client_id
env: AUTH_CLIENT_ID
- name: auth_client_secret
env: AUTH_CLIENT_SECRET
- name: auth_tenant_id
env: AUTH_TENANT_ID
# Credentials for interacting with Azure
- name: azure_tenant_id
env: ARM_TENANT_ID
- name: azure_subscription_id
env: ARM_SUBSCRIPTION_ID
- name: azure_client_id
env: ARM_CLIENT_ID
- name: azure_client_secret
env: ARM_CLIENT_SECRET
parameters:
- name: tre_id
type: string
description: "The ID of the parent TRE instance e.g., mytre-dev-3142"
- name: id
type: string
description: "the resource ID for this installation"
- name: azure_environment
type: string
default: "AzureCloud"
description: "Used by Azure CLI to set the Azure environment"
- name: azure_location
type: string
description: "Azure location (region) to deploy to"
- name: address_spaces
type: string
description: "VNet address spaces"
- name: tfstate_resource_group_name
type: string
description: "Resource group containing the Terraform state storage account"
- name: tfstate_storage_account_name
type: string
description: "The name of the Terraform state storage account"
- name: tfstate_container_name
type: string
default: "tfstate"
description: "The name of the Terraform state storage container"
- name: arm_use_msi
type: boolean
default: false
- name: shared_storage_quota
type: integer
default: 50
- name: enable_local_debugging
type: boolean
default: false
- name: register_aad_application
type: boolean
default: false
description: "Whether this bundle should register the workspace in AAD"
- name: create_aad_groups
type: boolean
default: false
description: "Whether this bundle should create AAD groups for the workspace app roles"
- name: workspace_owner_object_id
type: string
description: "The object id of the user that will be granted WorkspaceOwner after it is created."
- name: client_id
type: string
default: ""
description:
"The client id of the workspace in the identity provider. This value is typically provided to you
when you create the ws application"
- name: client_secret
type: string
description:
"The client secret of the workspace in the identity provider. This value is typically provided to you
when you create the ws application"
default: ""
- name: scope_id
type: string
default: ""
description: "The Service Principal Name or identifierUri (e.g. api://GUID"
- name: sp_id
type: string
default: ""
description: "The Service Principal in the Identity provider to be able to get claims"
- name: app_role_id_workspace_owner
type: string
default: ""
description: "The id of the application role WorkspaceOwner in the identity provider"
- name: app_role_id_workspace_researcher
type: string
default: ""
description: "The id of the application role WorkspaceResearcher in the identity provider"
- name: app_role_id_workspace_airlock_manager
type: string
default: ""
description: "The id of the application role AirlockManager in the identity provider"
- name: aad_redirect_uris
type: string
description: "List of redirect URIs in {name:value} format"
default: "W10=" # b64 for []
- name: app_service_plan_sku
type: string
description: "The SKU used when deploying an Azure App Service Plan"
default: "P1v3"
- name: enable_airlock
type: boolean
default: true
- name: arm_environment
type: string
outputs:
- name: app_role_id_workspace_owner
type: string
applyTo:
- install
- upgrade
- name: app_role_id_workspace_researcher
type: string
applyTo:
- install
- upgrade
- name: app_role_id_workspace_airlock_manager
type: string
applyTo:
- install
- upgrade
- name: client_id
type: string
applyTo:
- install
- upgrade
- name: scope_id
type: string
applyTo:
- install
- upgrade
- name: sp_id
type: string
applyTo:
- install
- upgrade
mixins:
- exec
- terraform:
clientVersion: 1.4.6
- az:
clientVersion: 2.49.0
install:
- terraform:
description: "Deploy workspace"
vars:
tre_id: ${ bundle.parameters.tre_id }
tre_resource_id: ${ bundle.parameters.id }
location: ${ bundle.parameters.azure_location }
address_spaces: ${ bundle.parameters.address_spaces }
shared_storage_quota: ${ bundle.parameters.shared_storage_quota }
enable_local_debugging: ${ bundle.parameters.enable_local_debugging }
register_aad_application: ${ bundle.parameters.register_aad_application }
create_aad_groups: ${ bundle.parameters.create_aad_groups }
auth_client_id: ${ bundle.credentials.auth_client_id }
auth_client_secret: ${ bundle.credentials.auth_client_secret }
auth_tenant_id: ${ bundle.credentials.auth_tenant_id }
workspace_owner_object_id: ${ bundle.parameters.workspace_owner_object_id }
client_id: ${ bundle.parameters.client_id }
client_secret: ${ bundle.parameters.client_secret }
scope_id: ${ bundle.parameters.scope_id }
sp_id: ${ bundle.parameters.sp_id }
app_role_id_workspace_owner: ${ bundle.parameters.app_role_id_workspace_owner }
app_role_id_workspace_researcher: ${ bundle.parameters.app_role_id_workspace_researcher }
app_role_id_workspace_airlock_manager: ${ bundle.parameters.app_role_id_workspace_airlock_manager }
aad_redirect_uris_b64: ${ bundle.parameters.aad_redirect_uris }
app_service_plan_sku: ${ bundle.parameters.app_service_plan_sku }
enable_airlock: ${ bundle.parameters.enable_airlock }
arm_environment: ${ bundle.parameters.arm_environment }
backendConfig:
resource_group_name: ${ bundle.parameters.tfstate_resource_group_name }
storage_account_name: ${ bundle.parameters.tfstate_storage_account_name }
container_name: ${ bundle.parameters.tfstate_container_name }
key: ${ bundle.parameters.tre_id }-ws-${ bundle.parameters.id }
outputs:
- name: app_role_id_workspace_owner
- name: app_role_id_workspace_researcher
- name: app_role_id_workspace_airlock_manager
- name: client_id
- name: scope_id
- name: sp_id
upgrade:
- terraform:
description: "Upgrade workspace"
vars:
tre_id: ${ bundle.parameters.tre_id }
tre_resource_id: ${ bundle.parameters.id }
location: ${ bundle.parameters.azure_location }
address_spaces: ${ bundle.parameters.address_spaces }
shared_storage_quota: ${ bundle.parameters.shared_storage_quota }
enable_local_debugging: ${ bundle.parameters.enable_local_debugging }
register_aad_application: ${ bundle.parameters.register_aad_application }
create_aad_groups: ${ bundle.parameters.create_aad_groups }
auth_client_id: ${ bundle.credentials.auth_client_id }
auth_client_secret: ${ bundle.credentials.auth_client_secret }
auth_tenant_id: ${ bundle.credentials.auth_tenant_id }
workspace_owner_object_id: ${ bundle.parameters.workspace_owner_object_id }
client_id: ${ bundle.parameters.client_id }
client_secret: ${ bundle.parameters.client_secret }
scope_id: ${ bundle.parameters.scope_id }
sp_id: ${ bundle.parameters.sp_id }
app_role_id_workspace_owner: ${ bundle.parameters.app_role_id_workspace_owner }
app_role_id_workspace_researcher: ${ bundle.parameters.app_role_id_workspace_researcher }
app_role_id_workspace_airlock_manager: ${ bundle.parameters.app_role_id_workspace_airlock_manager }
aad_redirect_uris_b64: ${ bundle.parameters.aad_redirect_uris }
app_service_plan_sku: ${ bundle.parameters.app_service_plan_sku }
enable_airlock: ${ bundle.parameters.enable_airlock }
arm_environment: ${ bundle.parameters.arm_environment }
backendConfig:
resource_group_name: ${ bundle.parameters.tfstate_resource_group_name }
storage_account_name: ${ bundle.parameters.tfstate_storage_account_name }
container_name: ${ bundle.parameters.tfstate_container_name }
key: ${ bundle.parameters.tre_id }-ws-${ bundle.parameters.id }
outputs:
- name: app_role_id_workspace_owner
- name: app_role_id_workspace_researcher
- name: app_role_id_workspace_airlock_manager
- name: client_id
- name: scope_id
- name: sp_id
- az:
description: "Set Azure Cloud Environment"
arguments:
- cloud
- set
flags:
name: ${ bundle.parameters.azure_environment }
- az:
description: "AAD Application Admin Login"
arguments:
- login
flags:
service-principal: ""
username: '${ bundle.credentials.auth_client_id }'
password: '${ bundle.credentials.auth_client_secret }'
tenant: '${ bundle.credentials.auth_tenant_id }'
allow-no-subscriptions: ""
- exec:
description: "Update workspace app redirect urls"
command: ./update_redirect_urls.sh
flags:
workspace-api-client-id: '${ bundle.parameters.client_id }'
aad-redirect-uris-b64: '${ bundle.parameters.aad_redirect_uris }'
register-aad-application: '${ bundle.parameters.register_aad_application }'
uninstall:
- terraform:
description: "Tear down workspace"
vars:
tre_id: ${ bundle.parameters.tre_id }
tre_resource_id: ${ bundle.parameters.id }
location: ${ bundle.parameters.azure_location }
address_spaces: ${ bundle.parameters.address_spaces }
shared_storage_quota: ${ bundle.parameters.shared_storage_quota }
enable_local_debugging: ${ bundle.parameters.enable_local_debugging }
register_aad_application: ${ bundle.parameters.register_aad_application }
create_aad_groups: ${ bundle.parameters.create_aad_groups }
auth_client_id: ${ bundle.credentials.auth_client_id }
auth_client_secret: ${ bundle.credentials.auth_client_secret }
auth_tenant_id: ${ bundle.credentials.auth_tenant_id }
workspace_owner_object_id: ${ bundle.parameters.workspace_owner_object_id }
client_id: ${ bundle.parameters.client_id }
scope_id: ${ bundle.parameters.scope_id }
sp_id: ${ bundle.parameters.sp_id }
app_role_id_workspace_owner: ${ bundle.parameters.app_role_id_workspace_owner }
app_role_id_workspace_researcher: ${ bundle.parameters.app_role_id_workspace_researcher }
app_role_id_workspace_airlock_manager: ${ bundle.parameters.app_role_id_workspace_airlock_manager }
aad_redirect_uris_b64: ${ bundle.parameters.aad_redirect_uris }
app_service_plan_sku: ${ bundle.parameters.app_service_plan_sku }
enable_airlock: ${ bundle.parameters.enable_airlock }
arm_environment: ${ bundle.parameters.arm_environment }
backendConfig:
resource_group_name: ${ bundle.parameters.tfstate_resource_group_name }
storage_account_name: ${ bundle.parameters.tfstate_storage_account_name }
container_name: ${ bundle.parameters.tfstate_container_name }
key: ${ bundle.parameters.tre_id }-ws-${ bundle.parameters.id }
|
AzureTRE/templates/workspaces/base/porter.yaml/0
|
{
"file_path": "AzureTRE/templates/workspaces/base/porter.yaml",
"repo_id": "AzureTRE",
"token_count": 4555
}
| 147 |
locals {
short_workspace_id = substr(var.tre_resource_id, -4, -1)
app_insights_name = "appi-${var.tre_id}-ws-${local.short_workspace_id}"
}
|
AzureTRE/templates/workspaces/base/terraform/azure-monitor/locals.tf/0
|
{
"file_path": "AzureTRE/templates/workspaces/base/terraform/azure-monitor/locals.tf",
"repo_id": "AzureTRE",
"token_count": 65
}
| 148 |
resource "azurerm_private_dns_zone_virtual_network_link" "azurewebsites" {
name = "azurewebsites-link-${azurerm_virtual_network.ws.name}"
resource_group_name = local.core_resource_group_name
private_dns_zone_name = data.azurerm_private_dns_zone.azurewebsites.name
virtual_network_id = azurerm_virtual_network.ws.id
registration_enabled = false
tags = var.tre_workspace_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone_virtual_network_link" "filecorelink" {
name = "filecorelink-${local.workspace_resource_name_suffix}"
resource_group_name = local.core_resource_group_name
private_dns_zone_name = data.azurerm_private_dns_zone.filecore.name
virtual_network_id = azurerm_virtual_network.ws.id
tags = var.tre_workspace_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone_virtual_network_link" "blobcorelink" {
name = "blobcorelink-${local.workspace_resource_name_suffix}"
resource_group_name = local.core_resource_group_name
private_dns_zone_name = data.azurerm_private_dns_zone.blobcore.name
virtual_network_id = azurerm_virtual_network.ws.id
tags = var.tre_workspace_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone_virtual_network_link" "dfscorelink" {
name = "dfscorelink-${local.workspace_resource_name_suffix}"
resource_group_name = local.core_resource_group_name
private_dns_zone_name = data.azurerm_private_dns_zone.dfscore.name
virtual_network_id = azurerm_virtual_network.ws.id
tags = var.tre_workspace_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone_virtual_network_link" "vaultcorelink" {
name = "vaultcorelink-${local.workspace_resource_name_suffix}"
resource_group_name = local.core_resource_group_name
private_dns_zone_name = data.azurerm_private_dns_zone.vaultcore.name
virtual_network_id = azurerm_virtual_network.ws.id
tags = var.tre_workspace_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone_virtual_network_link" "azurecrlink" {
name = "azurecrlink-${local.workspace_resource_name_suffix}"
resource_group_name = local.core_resource_group_name
private_dns_zone_name = data.azurerm_private_dns_zone.azurecr.name
virtual_network_id = azurerm_virtual_network.ws.id
tags = var.tre_workspace_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone_virtual_network_link" "azuremllink" {
name = "azuremllink-${local.workspace_resource_name_suffix}"
resource_group_name = local.core_resource_group_name
private_dns_zone_name = data.azurerm_private_dns_zone.azureml.name
virtual_network_id = azurerm_virtual_network.ws.id
tags = var.tre_workspace_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone_virtual_network_link" "azuremlcertlink" {
name = "azuremlcertlink-${local.workspace_resource_name_suffix}"
resource_group_name = local.core_resource_group_name
private_dns_zone_name = data.azurerm_private_dns_zone.azuremlcert.name
virtual_network_id = azurerm_virtual_network.ws.id
tags = var.tre_workspace_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone_virtual_network_link" "healthlink" {
name = "healthlink-${local.workspace_resource_name_suffix}"
resource_group_name = local.core_resource_group_name
private_dns_zone_name = data.azurerm_private_dns_zone.health.name
virtual_network_id = azurerm_virtual_network.ws.id
tags = var.tre_workspace_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone_virtual_network_link" "dicomlink" {
name = "dicomlink-${local.workspace_resource_name_suffix}"
resource_group_name = local.core_resource_group_name
private_dns_zone_name = data.azurerm_private_dns_zone.dicom.name
virtual_network_id = azurerm_virtual_network.ws.id
tags = var.tre_workspace_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone_virtual_network_link" "notebookslink" {
name = "notebookslink-${local.workspace_resource_name_suffix}"
resource_group_name = local.core_resource_group_name
private_dns_zone_name = data.azurerm_private_dns_zone.notebooks.name
virtual_network_id = azurerm_virtual_network.ws.id
tags = var.tre_workspace_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone_virtual_network_link" "mysqllink" {
name = "mysqllink-${local.workspace_resource_name_suffix}"
resource_group_name = local.core_resource_group_name
private_dns_zone_name = data.azurerm_private_dns_zone.mysql.name
virtual_network_id = azurerm_virtual_network.ws.id
tags = var.tre_workspace_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone_virtual_network_link" "postgreslink" {
name = "postgreslink-${local.workspace_resource_name_suffix}"
resource_group_name = local.core_resource_group_name
private_dns_zone_name = data.azurerm_private_dns_zone.postgres.name
virtual_network_id = azurerm_virtual_network.ws.id
tags = var.tre_workspace_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone_virtual_network_link" "nexuslink" {
name = "nexuslink-${local.workspace_resource_name_suffix}"
resource_group_name = local.core_resource_group_name
private_dns_zone_name = data.azurerm_private_dns_zone.nexus.name
virtual_network_id = azurerm_virtual_network.ws.id
tags = var.tre_workspace_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_private_dns_zone_virtual_network_link" "databrickslink" {
name = "databrickslink-${local.workspace_resource_name_suffix}"
resource_group_name = local.core_resource_group_name
private_dns_zone_name = data.azurerm_private_dns_zone.databricks.name
virtual_network_id = azurerm_virtual_network.ws.id
tags = var.tre_workspace_tags
lifecycle { ignore_changes = [tags] }
}
|
AzureTRE/templates/workspaces/base/terraform/network/zone_links.tf/0
|
{
"file_path": "AzureTRE/templates/workspaces/base/terraform/network/zone_links.tf",
"repo_id": "AzureTRE",
"token_count": 2725
}
| 149 |
import { FontWeights, getTheme, IButtonStyles, IconButton, IIconProps, Link, mergeStyleSets, Modal } from "@fluentui/react";
import React, { useState } from "react";
interface ComplexPropertyModalProps {
val: any,
title: string
};
export const ComplexPropertyModal: React.FunctionComponent<ComplexPropertyModalProps> = (props: ComplexPropertyModalProps) => {
const [isOpen, setIsOpen] = useState(false);
return (
<>
<Link onClick={() => setIsOpen(true)}>[details]</Link>
{
isOpen &&
<Modal
titleAriaId={"modal"}
isOpen={true}
onDismiss={() => setIsOpen(false)}
isBlocking={false}
containerClassName={contentStyles.container}
>
<div className={contentStyles.header}>
<span id={"modal"}>{props.title}</span>
<IconButton
styles={iconButtonStyles}
iconProps={cancelIcon}
ariaLabel="Close popup modal"
onClick={() => setIsOpen(false)}
/>
</div>
<div className={contentStyles.body}>
<NestedDisplayItem val={props.val} isExpanded={true} topLayer={true}/>
</div>
</Modal>
}
</>
)
};
interface NestedDisplayItemProps {
val: any,
isExpanded?: boolean,
topLayer?: boolean
};
const NestedDisplayItem: React.FunctionComponent<NestedDisplayItemProps> = (props: NestedDisplayItemProps) => {
const [isExpanded, setIsExpanded] = useState(props.isExpanded === true)
return (
<>
{
!props.topLayer &&
<IconButton onClick={() => setIsExpanded(!isExpanded)} iconProps={{iconName: isExpanded ? 'ChevronUp' : 'ChevronDown'}} />
}
{
isExpanded &&
<ul className="tre-complex-list">
{
Object.keys(props.val).map((key: string, i) => {
if (typeof (props.val[key]) === 'object') {
return (
<li key={i} className={props.topLayer ? "tre-complex-list-border" : ""}>
<span style={{fontSize:'16px'}}>{key}:</span>
<NestedDisplayItem val={props.val[key]} isExpanded={false}/>
</li>
);
}
return (
<li key={i}>{isNaN(parseInt(key)) && key + ':'} {props.val[key]}</li>);
})
}
</ul>
}
</>
);
};
const cancelIcon: IIconProps = { iconName: 'Cancel' };
const theme = getTheme();
const contentStyles = mergeStyleSets({
container: {
display: 'flex',
flexFlow: 'column nowrap',
alignItems: 'stretch',
},
header: [
theme.fonts.xxLarge,
{
flex: '1 1 auto',
borderTop: `4px solid ${theme.palette.themePrimary}`,
color: theme.palette.neutralPrimary,
display: 'flex',
alignItems: 'center',
fontWeight: FontWeights.semibold,
padding: '12px 12px 14px 24px',
},
],
body: {
flex: '4 4 auto',
padding: '0 24px 24px 24px',
overflowY: 'hidden',
selectors: {
p: { margin: '14px 0' },
'p:first-child': { marginTop: 0 },
'p:last-child': { marginBottom: 0 },
},
},
});
const iconButtonStyles: Partial<IButtonStyles> = {
root: {
color: theme.palette.neutralPrimary,
marginLeft: 'auto',
marginTop: '4px',
marginRight: '2px',
},
rootHovered: {
color: theme.palette.neutralDark,
},
};
|
AzureTRE/ui/app/src/components/shared/ComplexItemDisplay.tsx/0
|
{
"file_path": "AzureTRE/ui/app/src/components/shared/ComplexItemDisplay.tsx",
"repo_id": "AzureTRE",
"token_count": 1620
}
| 150 |
import { IStackStyles, Spinner, SpinnerSize, Stack } from "@fluentui/react";
import React, { useEffect, useContext, useState } from 'react';
import { useParams } from 'react-router-dom';
import { HttpMethod, useAuthApiCall } from '../../hooks/useAuthApiCall';
import { HistoryItem, Resource } from '../../models/resource';
import { ApiEndpoint } from '../../models/apiEndpoints';
import { ResourceHistoryListItem } from './ResourceHistoryListItem';
import { WorkspaceContext } from '../../contexts/WorkspaceContext';
import config from '../../config.json';
import moment from "moment";
import { APIError } from "../../models/exceptions";
import { LoadingState } from "../../models/loadingState";
import { ExceptionLayout } from "./ExceptionLayout";
interface ResourceHistoryListProps {
resource: Resource
}
export const ResourceHistoryList: React.FunctionComponent<ResourceHistoryListProps> = (props: ResourceHistoryListProps) => {
const apiCall = useAuthApiCall();
const [apiError, setApiError] = useState({} as APIError);
const workspaceCtx = useContext(WorkspaceContext);
const { resourceId } = useParams();
const [resourceHistory, setResourceHistory] = useState([] as Array<HistoryItem>)
const [loadingState, setLoadingState] = useState('loading');
useEffect(() => {
const getResourceHistory = async () => {
try {
// get resource operations
const scopeId = workspaceCtx.roles.length > 0 ? workspaceCtx.workspaceApplicationIdURI : "";
const history = await apiCall(`${props.resource.resourcePath}/${ApiEndpoint.History}`, HttpMethod.Get, scopeId);
config.debug && console.log(`Got resource history, for resource:${props.resource.id}: ${history.resource_history}`);
setResourceHistory(history.resource_history.reverse());
setLoadingState(history ? LoadingState.Ok : LoadingState.Error);
} catch (err: any) {
err.userMessage = "Error retrieving resource history"
setApiError(err);
setLoadingState(LoadingState.Error);
}
};
getResourceHistory();
}, [apiCall, props.resource, resourceId, workspaceCtx.workspaceApplicationIdURI, workspaceCtx.roles]);
const stackStyles: IStackStyles = {
root: {
padding: 0,
minWidth: 300
}
};
switch (loadingState) {
case LoadingState.Ok:
return (
<>
{
resourceHistory && resourceHistory.map((history: HistoryItem, i: number) => {
return (
<Stack wrap horizontal style={{borderBottom: '1px #999 solid', padding: '10px 0'}} key={i}>
<Stack grow styles={stackStyles}>
<ResourceHistoryListItem header={'Resource Id'} val={history.resourceId} />
<ResourceHistoryListItem header={'Resource Version'} val={history.resourceVersion.toString()} />
<ResourceHistoryListItem header={'Enabled'} val={history.isEnabled.toString()} />
<ResourceHistoryListItem header={'Template Version'} val={history.templateVersion} />
<ResourceHistoryListItem header={'Updated'} val={`${moment.unix(history.updatedWhen).toLocaleString()} (${moment.unix(history.updatedWhen).fromNow()})`} />
<ResourceHistoryListItem header={'User'} val={history.user.name} />
</Stack>
</Stack>
)
})
}
</>
);
case LoadingState.Error:
return (
<ExceptionLayout e={apiError} />
)
default:
return (
<div style={{ marginTop: '20px' }}>
<Spinner label="Loading history" ariaLive="assertive" labelPosition="top" size={SpinnerSize.large} />
</div>
)
}
};
|
AzureTRE/ui/app/src/components/shared/ResourceHistoryList.tsx/0
|
{
"file_path": "AzureTRE/ui/app/src/components/shared/ResourceHistoryList.tsx",
"repo_id": "AzureTRE",
"token_count": 1413
}
| 151 |
import { DefaultButton, Dialog, DialogFooter, DocumentCard, DocumentCardActivity, DocumentCardDetails, DocumentCardTitle, DocumentCardType, FontIcon, getTheme, IStackItemStyles, IStackStyles, IStackTokens, mergeStyles, MessageBar, MessageBarType, Modal, Panel, PanelType, Persona, PersonaSize, PrimaryButton, Spinner, SpinnerSize, Stack} from "@fluentui/react";
import moment from "moment";
import React, { useCallback, useContext, useEffect, useState } from "react";
import { useNavigate, useParams } from "react-router-dom";
import { WorkspaceContext } from "../../../contexts/WorkspaceContext";
import { HttpMethod, useAuthApiCall } from "../../../hooks/useAuthApiCall";
import { AirlockFilesLinkValidStatus, AirlockRequest, AirlockRequestAction, AirlockRequestStatus, AirlockReviewDecision } from "../../../models/airlock";
import { ApiEndpoint } from "../../../models/apiEndpoints";
import { APIError } from "../../../models/exceptions";
import { destructiveButtonStyles } from "../../../styles";
import { ExceptionLayout } from "../ExceptionLayout";
import { AirlockRequestFilesSection } from "./AirlockRequestFilesSection";
import { AirlockReviewRequest } from "./AirlockReviewRequest";
interface AirlockViewRequestProps {
requests: AirlockRequest[];
onUpdateRequest: (requests: AirlockRequest) => void;
}
export const AirlockViewRequest: React.FunctionComponent<AirlockViewRequestProps> = (props: AirlockViewRequestProps) => {
const {requestId} = useParams();
const [request, setRequest] = useState<AirlockRequest>();
const [hideSubmitDialog, setHideSubmitDialog] = useState(true);
const [reviewIsOpen, setReviewIsOpen] = useState(false);
const [submitting, setSubmitting] = useState(false);
const [submitError, setSubmitError] = useState(false);
const [hideCancelDialog, setHideCancelDialog] = useState(true);
const [apiError, setApiError] = useState({} as APIError);
const workspaceCtx = useContext(WorkspaceContext);
const apiCall = useAuthApiCall();
const navigate = useNavigate();
useEffect(() => {
// Get the selected request from the router param and find in the requests prop
let req = props.requests.find(r => r.id === requestId) as AirlockRequest;
// If not found, fetch it from the API
if (!req) {
apiCall(
`${ApiEndpoint.Workspaces}/${workspaceCtx.workspace.id}/${ApiEndpoint.AirlockRequests}/${requestId}`,
HttpMethod.Get,
workspaceCtx.workspaceApplicationIdURI
).then((result) => {
const request = result.airlockRequest as AirlockRequest;
request.allowedUserActions = result.allowedUserActions;
setRequest(request);
});
} else {
setRequest(req);
}
console.log(req);
}, [apiCall, requestId, props.requests, workspaceCtx.workspace.id, workspaceCtx.workspaceApplicationIdURI]);
const dismissPanel = useCallback(() => navigate('../'), [navigate]);
// Submit an airlock request
const submitRequest = useCallback(async () => {
if (request && request.workspaceId) {
setSubmitting(true);
setSubmitError(false);
try {
const response = await apiCall(
`${ApiEndpoint.Workspaces}/${request.workspaceId}/${ApiEndpoint.AirlockRequests}/${request.id}/${ApiEndpoint.AirlockSubmit}`,
HttpMethod.Post,
workspaceCtx.workspaceApplicationIdURI
);
props.onUpdateRequest(response.airlockRequest);
setHideSubmitDialog(true);
} catch (err: any) {
err.userMessage = 'Error submitting airlock request';
setApiError(err);
setSubmitError(true);
}
setSubmitting(false);
}
}, [apiCall, request, props, workspaceCtx.workspaceApplicationIdURI]);
// Cancel an airlock request
const cancelRequest = useCallback(async () => {
if (request && request.workspaceId) {
setSubmitting(true);
setSubmitError(false);
try {
const response = await apiCall(
`${ApiEndpoint.Workspaces}/${request.workspaceId}/${ApiEndpoint.AirlockRequests}/${request.id}/${ApiEndpoint.AirlockCancel}`,
HttpMethod.Post,
workspaceCtx.workspaceApplicationIdURI
);
props.onUpdateRequest(response.airlockRequest);
setHideCancelDialog(true);
} catch (err: any) {
err.userMessage = 'Error cancelling airlock request';
setApiError(err);
setSubmitError(true);
}
setSubmitting(false);
}
}, [apiCall, request, props, workspaceCtx.workspaceApplicationIdURI]);
// Render the panel footer along with buttons that the signed-in user is allowed to see according to the API
const renderFooter = useCallback(() => {
let footer = <></>
if (request) {
footer = <>
{
request.status === AirlockRequestStatus.Draft && <div style={{marginTop: '10px', marginBottom: '10px'}}>
<MessageBar>
This request is currently in draft. Add a file to the request's storage container and submit when ready.
</MessageBar>
</div>
}
{
request.statusMessage && <div style={{marginTop: '10px', marginBottom: '10px'}}>
<MessageBar messageBarType={MessageBarType.error}>{request.statusMessage}</MessageBar>
</div>
}
<div style={{textAlign: 'end'}}>
{
request.allowedUserActions?.includes(AirlockRequestAction.Cancel) &&
<DefaultButton onClick={() => {setSubmitError(false); setHideCancelDialog(false)}} styles={destructiveButtonStyles}>Cancel request</DefaultButton>
}
{
request.allowedUserActions?.includes(AirlockRequestAction.Submit) &&
<PrimaryButton onClick={() => {setSubmitError(false); setHideSubmitDialog(false)}}>Submit</PrimaryButton>
}
{
request.allowedUserActions?.includes(AirlockRequestAction.Review) &&
<PrimaryButton onClick={() => setReviewIsOpen(true)}>Review</PrimaryButton>
}
</div>
</>
}
return footer;
}, [request]);
return (
<>
<Panel
headerText={request && request.title ? request.title : "View airlock request"}
isOpen={true}
isLightDismiss={true}
onDismiss={dismissPanel}
onRenderFooterContent={renderFooter}
isFooterAtBottom={true}
closeButtonAriaLabel="Close"
type={PanelType.custom}
customWidth="450px"
> {
request ? <>
<Stack horizontal horizontalAlign="space-between" style={{marginTop: '40px'}} styles={underlineStackStyles}>
<Stack.Item styles={stackItemStyles}>
<b>Id</b>
</Stack.Item>
<Stack.Item styles={stackItemStyles}>
<p>{request.id}</p>
</Stack.Item>
</Stack>
<Stack horizontal horizontalAlign="space-between" styles={underlineStackStyles}>
<Stack.Item styles={stackItemStyles}>
<b>Creator</b>
</Stack.Item>
<Stack.Item styles={stackItemStyles}>
<Persona size={PersonaSize.size32} text={request.createdBy?.name} />
</Stack.Item>
</Stack>
<Stack horizontal horizontalAlign="space-between" styles={underlineStackStyles}>
<Stack.Item styles={stackItemStyles}>
<b>Type</b>
</Stack.Item>
<Stack.Item styles={stackItemStyles}>
<p>{request.type}</p>
</Stack.Item>
</Stack>
<Stack horizontal horizontalAlign="space-between" styles={underlineStackStyles}>
<Stack.Item styles={stackItemStyles}>
<b>Status</b>
</Stack.Item>
<Stack.Item styles={stackItemStyles}>
<p>{request.status.replace("_", " ")}</p>
</Stack.Item>
</Stack>
<Stack horizontal horizontalAlign="space-between" styles={underlineStackStyles}>
<Stack.Item styles={stackItemStyles}>
<b>Workspace</b>
</Stack.Item>
<Stack.Item styles={stackItemStyles}>
<p>{workspaceCtx.workspace?.properties?.display_name}</p>
</Stack.Item>
</Stack>
<Stack horizontal horizontalAlign="space-between" styles={underlineStackStyles}>
<Stack.Item styles={stackItemStyles}>
<b>Created</b>
</Stack.Item>
<Stack.Item styles={stackItemStyles}>
<p>{moment.unix(request.createdWhen).format('DD/MM/YYYY')}</p>
</Stack.Item>
</Stack>
<Stack horizontal horizontalAlign="space-between" styles={underlineStackStyles}>
<Stack.Item styles={stackItemStyles}>
<b>Updated</b>
</Stack.Item>
<Stack.Item styles={stackItemStyles}>
<p>{moment.unix(request.updatedWhen).fromNow()}</p>
</Stack.Item>
</Stack>
<Stack style={{marginTop: '20px'}} styles={underlineStackStyles}>
<Stack.Item styles={stackItemStyles}>
<b>Business Justification</b>
</Stack.Item>
</Stack>
<Stack>
<Stack.Item styles={stackItemStyles}>
<p>{request.businessJustification}</p>
</Stack.Item>
</Stack>
{
AirlockFilesLinkValidStatus.includes(request.status) && <>
<Stack style={{marginTop: '20px'}} styles={underlineStackStyles}>
<Stack.Item styles={stackItemStyles}>
<b>Files</b>
</Stack.Item>
</Stack>
<AirlockRequestFilesSection request={request} workspaceApplicationIdURI={workspaceCtx.workspaceApplicationIdURI}/>
</>
}
{
request.reviews && request.reviews.length > 0 && <>
<Stack style={{marginTop: '20px', marginBottom: '20px'}} styles={underlineStackStyles}>
<Stack.Item styles={stackItemStyles}>
<b>Reviews</b>
</Stack.Item>
</Stack>
<Stack tokens={stackTokens}>
{
request.reviews.map((review, i) => {
return <DocumentCard
key={i}
aria-label="Review"
type={DocumentCardType.compact}>
<DocumentCardDetails>
<DocumentCardActivity
activity={moment.unix(review.dateCreated).fromNow()}
people={[{name: review.reviewer.name, profileImageSrc: ''}]}
/>
<DocumentCardTitle
title={review.decisionExplanation}
shouldTruncate
showAsSecondaryTitle
/>
</DocumentCardDetails>
<div style={{margin:10}}>
{
review.reviewDecision === AirlockReviewDecision.Approved && <>
<FontIcon aria-label="Approved" iconName="Completed" className={approvedIcon} />
Approved
</>
}
{
review.reviewDecision === AirlockReviewDecision.Rejected && <>
<FontIcon aria-label="Rejected" iconName="ErrorBadge" className={rejectedIcon} />
Rejected
</>
}
</div>
</DocumentCard>
})
}
</Stack>
</>
}
</>
: <div style={{ marginTop: '70px' }}>
<Spinner label="Loading..." ariaLive="assertive" labelPosition="top" size={SpinnerSize.large} />
</div>
}
<Dialog
hidden={hideSubmitDialog}
onDismiss={() => {setHideSubmitDialog(true); setSubmitError(false)}}
dialogContentProps={{
title: 'Submit request?',
subText: 'Make sure you have uploaded your file to the request\'s storage account before submitting.',
}}
>
{
submitError && <ExceptionLayout e={apiError} />
}
{
submitting
? <Spinner label="Submitting..." ariaLive="assertive" labelPosition="top" size={SpinnerSize.large} />
: <DialogFooter>
<DefaultButton onClick={() => {setHideSubmitDialog(true); setSubmitError(false)}} text="Cancel" />
<PrimaryButton onClick={submitRequest} text="Submit" />
</DialogFooter>
}
</Dialog>
<Dialog
hidden={hideCancelDialog}
onDismiss={() => {setHideCancelDialog(true); setSubmitError(false)}}
dialogContentProps={{
title: 'Cancel Airlock Request?',
subText: 'Are you sure you want to cancel this airlock request?',
}}
>
{
submitError && <ExceptionLayout e={apiError} />
}
{
submitting
? <Spinner label="Cancelling..." ariaLive="assertive" labelPosition="top" size={SpinnerSize.large} />
: <DialogFooter>
<DefaultButton onClick={cancelRequest} text="Cancel Request" styles={destructiveButtonStyles} />
<DefaultButton onClick={() => {setHideCancelDialog(true); setSubmitError(false)}} text="Back" />
</DialogFooter>
}
</Dialog>
<Modal
titleAriaId={`title-${request?.id}`}
isOpen={reviewIsOpen}
onDismiss={() => setReviewIsOpen(false)}
containerClassName={modalStyles}
>
<AirlockReviewRequest
request={request}
onUpdateRequest={props.onUpdateRequest}
onReviewRequest={(request) => {props.onUpdateRequest(request); setReviewIsOpen(false)}}
onClose={() => setReviewIsOpen(false)}
/>
</Modal>
</Panel>
</>
)
}
const { palette } = getTheme();
const stackTokens: IStackTokens = { childrenGap: 20 };
const underlineStackStyles: IStackStyles = {
root: {
borderBottom: '#f2f2f2 solid 1px'
},
};
const stackItemStyles: IStackItemStyles = {
root: {
alignItems: 'center',
display: 'flex',
height: 50,
margin: '0px 5px'
},
};
const approvedIcon = mergeStyles({
color: palette.green,
marginRight: 5,
fontSize: 12
});
const rejectedIcon = mergeStyles({
color: palette.red,
marginRight: 5,
fontSize: 12
});
const modalStyles = mergeStyles({
display: 'flex',
flexFlow: 'column nowrap',
alignItems: 'stretch',
});
|
AzureTRE/ui/app/src/components/shared/airlock/AirlockViewRequest.tsx/0
|
{
"file_path": "AzureTRE/ui/app/src/components/shared/airlock/AirlockViewRequest.tsx",
"repo_id": "AzureTRE",
"token_count": 6788
}
| 152 |
import React, { useContext, useEffect, useState } from 'react';
import { Route, Routes, useNavigate, useParams } from 'react-router-dom';
import { ApiEndpoint } from '../../models/apiEndpoints';
import { useAuthApiCall, HttpMethod } from '../../hooks/useAuthApiCall';
import { UserResource } from '../../models/userResource';
import { WorkspaceService } from '../../models/workspaceService';
import { PrimaryButton, Spinner, SpinnerSize, Stack } from '@fluentui/react';
import { ComponentAction, Resource } from '../../models/resource';
import { ResourceCardList } from '../shared/ResourceCardList';
import { LoadingState } from '../../models/loadingState';
import { WorkspaceContext } from '../../contexts/WorkspaceContext';
import { ResourceType } from '../../models/resourceType';
import { ResourceHeader } from '../shared/ResourceHeader';
import { useComponentManager } from '../../hooks/useComponentManager';
import { CreateUpdateResourceContext } from '../../contexts/CreateUpdateResourceContext';
import { successStates } from '../../models/operation';
import { UserResourceItem } from './UserResourceItem';
import { ResourceBody } from '../shared/ResourceBody';
import { SecuredByRole } from '../shared/SecuredByRole';
import { WorkspaceRoleName } from '../../models/roleNames';
import { APIError } from '../../models/exceptions';
import { ExceptionLayout } from '../shared/ExceptionLayout';
interface WorkspaceServiceItemProps {
workspaceService?: WorkspaceService,
updateWorkspaceService: (ws: WorkspaceService) => void,
removeWorkspaceService: (ws: WorkspaceService) => void
}
export const WorkspaceServiceItem: React.FunctionComponent<WorkspaceServiceItemProps> = (props: WorkspaceServiceItemProps) => {
const { workspaceServiceId } = useParams();
const [userResources, setUserResources] = useState([] as Array<UserResource>)
const [workspaceService, setWorkspaceService] = useState({} as WorkspaceService)
const [loadingState, setLoadingState] = useState(LoadingState.Loading);
const [selectedUserResource, setSelectedUserResource] = useState({} as UserResource);
const [hasUserResourceTemplates, setHasUserResourceTemplates] = useState(false);
const workspaceCtx = useContext(WorkspaceContext);
const createFormCtx = useContext(CreateUpdateResourceContext);
const navigate = useNavigate();
const apiCall = useAuthApiCall();
const [apiError, setApiError] = useState({} as APIError);
const latestUpdate = useComponentManager(
workspaceService,
(r: Resource) => { props.updateWorkspaceService(r as WorkspaceService); setWorkspaceService(r as WorkspaceService) },
(r: Resource) => { props.removeWorkspaceService(r as WorkspaceService); navigate(`/${ApiEndpoint.Workspaces}/${workspaceCtx.workspace.id}/${ApiEndpoint.WorkspaceServices}`) }
);
useEffect(() => {
const getData = async () => {
if(!workspaceCtx.workspace.id) return;
setHasUserResourceTemplates(false);
try {
let svc = props.workspaceService || {} as WorkspaceService;
// did we get passed the workspace service, or shall we get it from the api?
if (props.workspaceService && props.workspaceService.id && props.workspaceService.id === workspaceServiceId) {
setWorkspaceService(props.workspaceService);
} else {
let ws = await apiCall(`${ApiEndpoint.Workspaces}/${workspaceCtx.workspace.id}/${ApiEndpoint.WorkspaceServices}/${workspaceServiceId}`, HttpMethod.Get, workspaceCtx.workspaceApplicationIdURI);
setWorkspaceService(ws.workspaceService);
svc = ws.workspaceService;
}
// get the user resources
const u = await apiCall(`${ApiEndpoint.Workspaces}/${workspaceCtx.workspace.id}/${ApiEndpoint.WorkspaceServices}/${workspaceServiceId}/${ApiEndpoint.UserResources}`, HttpMethod.Get, workspaceCtx.workspaceApplicationIdURI)
// get user resource templates - to check
const ut = await apiCall(`${ApiEndpoint.Workspaces}/${workspaceCtx.workspace.id}/${ApiEndpoint.WorkspaceServiceTemplates}/${svc.templateName}/${ApiEndpoint.UserResourceTemplates}`, HttpMethod.Get, workspaceCtx.workspaceApplicationIdURI);
setHasUserResourceTemplates(ut && ut.templates && ut.templates.length > 0);
setUserResources(u.userResources);
setLoadingState(LoadingState.Ok);
} catch (err: any) {
err.userMessage = "Error retrieving resources";
setApiError(err);
setLoadingState(LoadingState.Error);
}
};
getData();
}, [apiCall, props.workspaceService, workspaceCtx.workspace.id, workspaceCtx.workspaceApplicationIdURI, workspaceServiceId]);
const addUserResource = (u: UserResource) => {
let ur = [...userResources];
ur.push(u);
setUserResources(ur);
}
const updateUserResource = (u: UserResource) => {
let ur = [...userResources];
let i = ur.findIndex((f: UserResource) => f.id === u.id);
ur.splice(i, 1, u);
setUserResources(ur);
}
const removeUserResource = (u: UserResource) => {
let ur = [...userResources];
let i = ur.findIndex((f: UserResource) => f.id === u.id);
ur.splice(i, 1);
setUserResources(ur);
}
switch (loadingState) {
case LoadingState.Ok:
return (
<>
<Routes>
<Route path="*" element={
<>
<ResourceHeader resource={workspaceService} latestUpdate={latestUpdate} />
<ResourceBody resource={workspaceService} />
{
hasUserResourceTemplates &&
<Stack className="tre-panel">
<Stack.Item>
<Stack horizontal horizontalAlign="space-between">
<h1>Resources</h1>
<SecuredByRole allowedWorkspaceRoles={[WorkspaceRoleName.WorkspaceOwner, WorkspaceRoleName.WorkspaceResearcher, WorkspaceRoleName.AirlockManager]} element={
<PrimaryButton iconProps={{ iconName: 'Add' }} text="Create new"
disabled={!workspaceService.isEnabled || latestUpdate.componentAction === ComponentAction.Lock || successStates.indexOf(workspaceService.deploymentStatus) === -1}
title={(!workspaceService.isEnabled || latestUpdate.componentAction === ComponentAction.Lock || successStates.indexOf(workspaceService.deploymentStatus) === -1) ? 'Service must be enabled, successfully deployed, and not locked' : 'Create a User Resource'}
onClick={() => {
createFormCtx.openCreateForm({
resourceType: ResourceType.UserResource,
resourceParent: workspaceService,
onAdd: (r: Resource) => addUserResource(r as UserResource),
workspaceApplicationIdURI: workspaceCtx.workspaceApplicationIdURI
})
}} />
} />
</Stack>
</Stack.Item>
<Stack.Item>
{
userResources &&
<ResourceCardList
resources={userResources}
selectResource={(r: Resource) => setSelectedUserResource(r as UserResource)}
updateResource={(r: Resource) => updateUserResource(r as UserResource)}
removeResource={(r: Resource) => removeUserResource(r as UserResource)}
emptyText="This workspace service contains no user resources."
isExposedExternally={workspaceService.properties.is_exposed_externally} />
}
</Stack.Item>
</Stack>
}
</>
} />
<Route path="user-resources/:userResourceId/*" element={
<UserResourceItem
userResource={selectedUserResource}
updateUserResource={(u: UserResource) => updateUserResource(u)}
removeUserResource={(u: UserResource) => removeUserResource(u)}
/>
} />
</Routes>
</>
);
case LoadingState.Error:
return (
<ExceptionLayout e={apiError} />
);
default:
return (
<div style={{ marginTop: '20px' }}>
<Spinner label="Loading Workspace Service" ariaLive="assertive" labelPosition="top" size={SpinnerSize.large} />
</div>
)
}
};
|
AzureTRE/ui/app/src/components/workspaces/WorkspaceServiceItem.tsx/0
|
{
"file_path": "AzureTRE/ui/app/src/components/workspaces/WorkspaceServiceItem.tsx",
"repo_id": "AzureTRE",
"token_count": 3454
}
| 153 |
import { ResourceType } from "./resourceType";
import { User } from "./user";
export interface Operation {
id: string,
resourceId: string,
resourcePath: string,
resourceVersion: number,
status: string,
action: string,
message: string,
createdWhen: number,
updatedWhen: number,
user: User,
steps?: Array<OperationStep>,
dismiss?: Boolean // UI-only prop, not fed from the API
}
export interface OperationStep {
templateStepId: string,
stepTitle: string,
resourceId: string,
resourceTemplateName: string,
resourceType: ResourceType,
resourceAction: string,
status: string,
message: string,
updatedWhen: number
}
export const awaitingStates = [
"awaiting_deployment",
"awaiting_update",
"awaiting_deletion",
"awaiting_action"
]
export const successStates = [
"deployed",
"updated",
"deleted",
"action_succeeded"
]
export const failedStates = [
"deployment_failed",
"deleting_failed",
"updating_failed",
"action_failed",
]
export const completedStates = [
...failedStates,
...successStates
]
export const inProgressStates = [
...awaitingStates,
"deploying",
"updating",
"deleting",
"invoking_action",
"pipeline_running"
]
export const actionsDisabledStates = [
...inProgressStates,
"deployment_failed",
"failed"
]
|
AzureTRE/ui/app/src/models/operation.ts/0
|
{
"file_path": "AzureTRE/ui/app/src/models/operation.ts",
"repo_id": "AzureTRE",
"token_count": 470
}
| 154 |
{
"compilerOptions": {
"target": "es5",
"lib": [
"dom",
"dom.iterable",
"esnext"
],
"allowJs": true,
"skipLibCheck": true,
"esModuleInterop": true,
"allowSyntheticDefaultImports": true,
"strict": true,
"forceConsistentCasingInFileNames": true,
"noFallthroughCasesInSwitch": true,
"module": "esnext",
"moduleResolution": "node",
"resolveJsonModule": true,
"isolatedModules": true,
"noEmit": true,
"jsx": "react-jsx",
"paths": {
"react": [ "./node_modules/@types/react" ]
}
},
"include": [
"src"
]
}
|
AzureTRE/ui/app/tsconfig.json/0
|
{
"file_path": "AzureTRE/ui/app/tsconfig.json",
"repo_id": "AzureTRE",
"token_count": 284
}
| 155 |
# BioGPT
This repository contains the implementation of [BioGPT: Generative Pre-trained Transformer for Biomedical Text Generation and Mining](https://academic.oup.com/bib/advance-article/doi/10.1093/bib/bbac409/6713511?guestAccessKey=a66d9b5d-4f83-4017-bb52-405815c907b9), by Renqian Luo, Liai Sun, Yingce Xia, Tao Qin, Sheng Zhang, Hoifung Poon and Tie-Yan Liu.
# Requirements and Installation
* [PyTorch](http://pytorch.org/) version == 1.12.0
* Python version == 3.10
* fairseq version == 0.12.0:
``` bash
git clone https://github.com/pytorch/fairseq
cd fairseq
git checkout v0.12.0
pip install .
python setup.py build_ext --inplace
cd ..
```
* Moses
``` bash
git clone https://github.com/moses-smt/mosesdecoder.git
export MOSES=${PWD}/mosesdecoder
```
* fastBPE
``` bash
git clone https://github.com/glample/fastBPE.git
export FASTBPE=${PWD}/fastBPE
cd fastBPE
g++ -std=c++11 -pthread -O3 fastBPE/main.cc -IfastBPE -o fast
```
* sacremoses
``` bash
pip install sacremoses
```
* sklearn
``` bash
pip install scikit-learn
```
Remember to set the environment variables `MOSES` and `FASTBPE` to the path of Moses and fastBPE respetively, as they will be required later.
# Getting Started
## Pre-trained models
We provide our pre-trained BioGPT model checkpoints along with fine-tuned checkpoints for downstream tasks, available both through URL download as well as through the Hugging Face 🤗 Hub.
|Model|Description|URL|🤗 Hub|
|----|----|---|---|
|BioGPT|Pre-trained BioGPT model checkpoint|[link](https://msralaphilly2.blob.core.windows.net/release/BioGPT/checkpoints/Pre-trained-BioGPT.tgz?sp=r&st=2023-11-13T15:37:35Z&se=2099-12-30T23:37:35Z&spr=https&sv=2022-11-02&sr=b&sig=3CcG1TOhqJPBhkVutvVn3PtUq0vPyLBgwggUfojypfY%3D)|[link](https://huggingface.co/microsoft/biogpt)|
|BioGPT-Large|Pre-trained BioGPT-Large model checkpoint|[link](https://msralaphilly2.blob.core.windows.net/release/BioGPT/checkpoints/Pre-trained-BioGPT-Large.tgz?sp=r&st=2023-11-13T15:38:13Z&se=2099-12-30T23:38:13Z&spr=https&sv=2022-11-02&sr=b&sig=ib1SZut9wAwrsxGWtFtIZDhrnRg92dwPJmoY2lr3MTg%3D)|[link](https://huggingface.co/microsoft/biogpt-large)|
|BioGPT-QA-PubMedQA-BioGPT|Fine-tuned BioGPT for question answering task on PubMedQA|[link](https://msralaphilly2.blob.core.windows.net/release/BioGPT/checkpoints/QA-PubMedQA-BioGPT.tgz?sp=r&st=2023-11-13T15:38:43Z&se=2099-12-30T23:38:43Z&spr=https&sv=2022-11-02&sr=b&sig=A5SQae6ifsXmrsgpj4E2flhyXm4iHc%2FqO5b8HGOMyjc%3D)| |
|BioGPT-QA-PubMedQA-BioGPT-Large|Fine-tuned BioGPT-Large for question answering task on PubMedQA|[link](https://msralaphilly2.blob.core.windows.net/release/BioGPT/checkpoints/QA-PubMedQA-BioGPT-Large.tgz?sp=r&st=2023-11-13T15:39:40Z&se=2099-12-30T23:39:40Z&spr=https&sv=2022-11-02&sr=b&sig=t%2B%2FD%2BxVoIxiuyDsD0VXv%2FjSGoS0VcrdVXycYhWZoxUc%3D)||
|BioGPT-RE-BC5CDR|Fine-tuned BioGPT for relation extraction task on BC5CDR|[link](https://msralaphilly2.blob.core.windows.net/release/BioGPT/checkpoints/RE-BC5CDR-BioGPT.tgz?sp=r&st=2023-11-13T15:35:14Z&se=2099-12-30T23:35:14Z&spr=https&sv=2022-11-02&sr=b&sig=uXlLIHlVeKIbS%2BVmdzAmlNCeKdoKO2lxsSmwSi%2FH8nE%3D)| |
|BioGPT-RE-DDI|Fine-tuned BioGPT for relation extraction task on DDI|[link](https://msralaphilly2.blob.core.windows.net/release/BioGPT/checkpoints/RE-DDI-BioGPT.tgz?sp=r&st=2023-11-13T15:35:58Z&se=2099-12-30T23:35:58Z&spr=https&sv=2022-11-02&sr=b&sig=DkaQMuM%2FXAsM2p8%2BUs45ecuqhlSRF1DUYRBJNcxD6Pk%3D)| |
|BioGPT-RE-DTI|Fine-tuned BioGPT for relation extraction task on KD-DTI|[link](https://msralaphilly2.blob.core.windows.net/release/BioGPT/checkpoints/RE-DTI-BioGPT.tgz?sp=r&st=2023-11-13T15:36:23Z&se=2099-12-30T23:36:23Z&spr=https&sv=2022-11-02&sr=b&sig=bRgUZyqGuwYdM%2FVFzIv6Xa0GThkXq6bVzszmTe9c%2BKM%3D)| |
|BioGPT-DC-HoC|Fine-tuned BioGPT for document classification task on HoC|[link](https://msralaphilly2.blob.core.windows.net/release/BioGPT/checkpoints/DC-HoC-BioGPT.tgz?sp=r&st=2023-11-13T15:37:17Z&se=2099-12-30T23:37:17Z&spr=https&sv=2022-11-02&sr=b&sig=1DxroWPt%2FBppCTy7QHs842lLy8SQRcUeUwSfMzDFvl0%3D)| |
Download them and extract them to the `checkpoints` folder of this project.
For example:
``` bash
mkdir checkpoints
cd checkpoints
wget https://msralaphilly2.blob.core.windows.net/release/BioGPT/checkpoints/Pre-trained-BioGPT.tgz?sp=r&st=2023-11-13T15:37:35Z&se=2099-12-30T23:37:35Z&spr=https&sv=2022-11-02&sr=b&sig=3CcG1TOhqJPBhkVutvVn3PtUq0vPyLBgwggUfojypfY%3D
tar -zxvf Pre-trained-BioGPT.tgz
```
## Example Usage
Use pre-trained BioGPT model in your code:
```python
import torch
from fairseq.models.transformer_lm import TransformerLanguageModel
m = TransformerLanguageModel.from_pretrained(
"checkpoints/Pre-trained-BioGPT",
"checkpoint.pt",
"data",
tokenizer='moses',
bpe='fastbpe',
bpe_codes="data/bpecodes",
min_len=100,
max_len_b=1024)
m.cuda()
src_tokens = m.encode("COVID-19 is")
generate = m.generate([src_tokens], beam=5)[0]
output = m.decode(generate[0]["tokens"])
print(output)
```
Use fine-tuned BioGPT model on KD-DTI for drug-target-interaction in your code:
```python
import torch
from src.transformer_lm_prompt import TransformerLanguageModelPrompt
m = TransformerLanguageModelPrompt.from_pretrained(
"checkpoints/RE-DTI-BioGPT",
"checkpoint_avg.pt",
"data/KD-DTI/relis-bin",
tokenizer='moses',
bpe='fastbpe',
bpe_codes="data/bpecodes",
max_len_b=1024,
beam=1)
m.cuda()
src_text="" # input text, e.g., a PubMed abstract
src_tokens = m.encode(src_text)
generate = m.generate([src_tokens], beam=args.beam)[0]
output = m.decode(generate[0]["tokens"])
print(output)
```
For more downstream tasks, please see below.
## Downstream tasks
See corresponding folder in [examples](examples):
### [Relation Extraction on BC5CDR](examples/RE-BC5CDR)
### [Relation Extraction on KD-DTI](examples/RE-DTI/)
### [Relation Extraction on DDI](examples/RE-DDI)
### [Document Classification on HoC](examples/DC-HoC/)
### [Question Answering on PubMedQA](examples/QA-PubMedQA/)
### [Text Generation](examples/text-generation/)
## Hugging Face 🤗 Usage
BioGPT has also been integrated into the Hugging Face `transformers` library, and model checkpoints are available on the Hugging Face Hub.
You can use this model directly with a pipeline for text generation. Since the generation relies on some randomness, we set a seed for reproducibility:
```python
from transformers import pipeline, set_seed
from transformers import BioGptTokenizer, BioGptForCausalLM
model = BioGptForCausalLM.from_pretrained("microsoft/biogpt")
tokenizer = BioGptTokenizer.from_pretrained("microsoft/biogpt")
generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
set_seed(42)
generator("COVID-19 is", max_length=20, num_return_sequences=5, do_sample=True)
```
Here is how to use this model to get the features of a given text in PyTorch:
```python
from transformers import BioGptTokenizer, BioGptForCausalLM
tokenizer = BioGptTokenizer.from_pretrained("microsoft/biogpt")
model = BioGptForCausalLM.from_pretrained("microsoft/biogpt")
text = "Replace me by any text you'd like."
encoded_input = tokenizer(text, return_tensors='pt')
output = model(**encoded_input)
```
Beam-search decoding:
```python
import torch
from transformers import BioGptTokenizer, BioGptForCausalLM, set_seed
tokenizer = BioGptTokenizer.from_pretrained("microsoft/biogpt")
model = BioGptForCausalLM.from_pretrained("microsoft/biogpt")
sentence = "COVID-19 is"
inputs = tokenizer(sentence, return_tensors="pt")
set_seed(42)
with torch.no_grad():
beam_output = model.generate(**inputs,
min_length=100,
max_length=1024,
num_beams=5,
early_stopping=True
)
tokenizer.decode(beam_output[0], skip_special_tokens=True)
```
For more information, please see the [documentation](https://huggingface.co/docs/transformers/main/en/model_doc/biogpt) on the Hugging Face website.
## Demos
Check out these demos on Hugging Face Spaces:
* [Text Generation with BioGPT-Large](https://huggingface.co/spaces/katielink/biogpt-large-demo)
* [Question Answering with BioGPT-Large-PubMedQA](https://huggingface.co/spaces/katielink/biogpt-qa-demo)
# License
BioGPT is MIT-licensed.
The license applies to the pre-trained models as well.
# Contributing
This project welcomes contributions and suggestions. Most contributions require you to agree to a
Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com.
When you submit a pull request, a CLA bot will automatically determine whether you need to provide
a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions
provided by the bot. You will only need to do this once across all repos using our CLA.
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
contact [[email protected]](mailto:[email protected]) with any additional questions or comments.
# Trademarks
This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft
trademarks or logos is subject to and must follow
[Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general).
Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship.
Any use of third-party trademarks or logos are subject to those third-party's policies.
|
BioGPT/README.md/0
|
{
"file_path": "BioGPT/README.md",
"repo_id": "BioGPT",
"token_count": 3995
}
| 156 |
FORMAT=$1
GOLD_FILE=$2
PREDICTION_FILE=$3
java -cp bc5cdr_eval.jar ncbi.bc5cdr_eval.Evaluate id Disease $FORMAT $GOLD_FILE $PREDICTION_FILE | grep -v INFO
# java -cp bc5cdr_eval.jar ncbi.bc5cdr_eval.Evaluate id Disease $FORMAT $GOLD_FILE $PREDICTION_FILE
|
BioGPT/data/BC5CDR/raw/BC5CDR_Evaluation-0.0.3/eval_id.sh/0
|
{
"file_path": "BioGPT/data/BC5CDR/raw/BC5CDR_Evaluation-0.0.3/eval_id.sh",
"repo_id": "BioGPT",
"token_count": 113
}
| 157 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
SAVE_DIR=../../checkpoints/DC-HoC-BioGPT
mkdir -p ${SAVE_DIR}
fairseq-train \
../../data/HoC/ansis-bin --save-dir ${SAVE_DIR} \
--user-dir ../../src \
--finetune-from-model ../../checkpoints/Pre-trained-BioGPT/checkpoint.pt \
--task language_modeling_prompt \
--arch transformer_lm_prompt_biogpt \
--share-decoder-input-output-embed --decoder-learned-pos \
--optimizer adam --adam-betas '(0.9, 0.98)' \
--weight-decay 0.01 --clip-norm 0.0 \
--lr 1e-5 --lr-scheduler inverse_sqrt --warmup-updates 1000 --warmup-init-lr 1e-07 \
--tokens-per-sample 1024 --max-source-positions 900 --max-target-positions 1024 \
--max-tokens 1024 --update-freq 32 \
--skip-invalid-size-inputs-valid-test \
--max-update 20000 --save-interval-updates 1000 --no-epoch-checkpoints \
--learned-prompt 1
|
BioGPT/examples/DC-HoC/train.sh/0
|
{
"file_path": "BioGPT/examples/DC-HoC/train.sh",
"repo_id": "BioGPT",
"token_count": 370
}
| 158 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import re
import json
import sys
import os
pred_file = sys.argv[1]
gold_file = sys.argv[2]
pmids_file = sys.argv[3]
def normalize_name(s: str):
s = s.strip()
# normalize roman type id at end of string
num2roman = {"0": "0", "1": "I", "2": "II", "3": "III", "4": "IV", "5": "V", "6": "VI", "7": "VII", "8": "VIII", "9": "IX"}
if len(s) > 2 and s[-1].isnumeric() and not s[-2].isnumeric() and s[-1] in num2roman:
tmps = list(s)
s = ''.join(tmps[:-1]) + num2roman[tmps[-1]]
# remove useless end string
s = s.replace("-type", '')
return re.sub('[^a-zA-Z0-9]+', '', s)
def rm_abbr(tgt_set):
""" remove abbreviation in the brackets of entity, eg: aaa (bb) -> aaa """
def rm(s):
s = s.strip()
if "(" in s and s[-1] == ')': # entity end with a bracketed short cut
return normalize_name(s[:s.rfind("(")].strip())
else:
return normalize_name(s)
tgt_set = list(tgt_set)
if tgt_set and type(tgt_set[0]) in [tuple, list]: # process triples
return set([(rm(tp[0]), rm(tp[1]), rm(tp[2])) for tp in tgt_set])
else: # process entities
return set([rm(e) for e in tgt_set])
def get_abbr(tgt_set):
""" extract abbreviation in the brackets of entity, eg: aaa (bb) -> bb """
def rm(s):
s = s.strip()
if "(" in s and s[-1] == ')':
return normalize_name(s[s.rfind("(")+1:-1].strip())
else:
return normalize_name(s)
tgt_set = list(tgt_set)
if tgt_set and type(tgt_set[0]) in [tuple, list]: # process triples
return set([(rm(tp[0]), rm(tp[1]), rm(tp[2])) for tp in tgt_set])
else: # process entities
return set([rm(e) for e in tgt_set])
def acc(pred_set, gold_set):
""" Multi-label style acc """
tp_num = len(pred_set & gold_set)
return int(pred_set == gold_set) if len(gold_set) == 0 else 1.0 * tp_num / len(pred_set | gold_set)
def precision(pred_set, gold_set):
""" Multi-label style precision """
tp_num = len(pred_set & gold_set)
return int(pred_set == gold_set) if len(pred_set) == 0 else 1.0 * tp_num / len(pred_set)
def recall(pred_set, gold_set):
""" Multi-label style recall """
tp_num = len(pred_set & gold_set)
return int(pred_set == gold_set) if len(gold_set) == 0 else 1.0 * tp_num / len(gold_set)
def normed_eval(pred_set, gold_set, metric):
""" Both body and abbreviation match are considered correct """
abbr_pred_set, abbr_gold_set = get_abbr(pred_set), get_abbr(gold_set)
rm_pred_set, rm_gold_set = rm_abbr(pred_set), rm_abbr(gold_set)
return max(metric(abbr_pred_set, abbr_gold_set), metric(rm_pred_set, rm_gold_set))
def get_f1(p, r):
return 0 if (p + r) == 0 else (2.0 * p * r / (p + r))
def ave(scores):
return 1.0 * sum(scores) / len(scores)
def do_eval(preds, pmids, golden):
ret = []
num_pred, num_gold, num_missing = 0, 0, 0
all_f1, p, r, d_acc, t_acc, i_acc = [], [], [], [], [], []
all_pred_triple, all_pred_d, all_pred_t, all_pred_i, all_gold_triple, all_gold_d, all_gold_t, all_gold_i = [], [], [], [], [], [], [], [],
for pred, idx in zip(preds, pmids):
gold_d_set, gold_t_set, gold_i_set, gold_set = set(), set(), set(), set()
pred_d_set, pred_t_set, pred_i_set, pred_set = set(), set(), set(), set()
if pred["triple_list_pred"] and pred["triple_list_pred"][0]["subject"] != 'failed':
for tp in pred["triple_list_pred"]:
d = tp["subject"].strip().lower().replace(' ', '')
t = tp["object"].strip().lower().replace(' ', '')
i = tp["relation"].strip().lower().replace(' ', '')
pred_d_set.add(d)
pred_t_set.add(t)
pred_i_set.add(i)
pred_set.add((d, t, i))
if idx not in golden:
num_missing += 1
# print("----Missing:", idx)
continue
if golden[idx]["triples"]:
for tp in golden[idx]["triples"]:
d = tp["drug"].strip().lower().replace(' ', '')
t = tp["target"].strip().lower().replace(' ', '')
i = tp["interaction"].strip().lower().replace(' ', '')
gold_d_set.add(d)
gold_t_set.add(t)
gold_i_set.add(i)
gold_set.add((d, t, i))
# sample level eval
p.append(normed_eval(pred_set, gold_set, metric=precision))
r.append(normed_eval(pred_set, gold_set, metric=recall))
all_f1.append(get_f1(p[-1], r[-1]))
d_acc.append(normed_eval(pred_d_set, gold_d_set, metric=acc))
t_acc.append(normed_eval(pred_t_set, gold_t_set, metric=acc))
i_acc.append(normed_eval(pred_i_set, gold_i_set, metric=acc))
# onto level eval
all_pred_d.extend(pred_d_set)
all_pred_t.extend(pred_t_set)
all_pred_i.extend(pred_i_set)
all_pred_triple.extend(pred_set)
all_gold_d.extend(gold_d_set)
all_gold_t.extend(gold_t_set)
all_gold_i.extend(gold_i_set)
all_gold_triple.extend(gold_set)
# if len(gold_set) < len(golden[idx]["triples"]):
# print("Duplicate extists, ori", golden[idx]["triples"], gold_set)
num_pred += len(pred_set)
num_gold += len(gold_set)
ret.append({
"pmid": idx,
"title": golden[idx]["title"] if "title" in golden[idx] else None,
"abstract": golden[idx]["abstract"],
"d_pred_gold": [d_acc[-1], list(pred_d_set), list(gold_d_set)],
"t_pred_gold": [t_acc[-1], list(pred_t_set), list(gold_t_set)],
"i_pred_gold": [i_acc[-1], list(pred_i_set), list(gold_i_set)],
"all_pred_gold": [all_f1[-1], list(pred_set), list(gold_set)],
})
print("num sample", len(all_f1), "missing", len(preds) - len(all_f1), "num_gold tp", num_gold, "num_pred", num_pred)
# Note: we adopt multi-label metrics following: http://129.211.169.156/publication/tkde13rev.pdf
print("Sample: acc d: {:.4f}\tt:{:.4f}\ti: {:.4f}\ntp p: {:.4f}\ttp r: {:.4f}\ttp micro f1: {:.4f}\ttp macro f1: {:.4f} ".format(
ave(d_acc), ave(t_acc), ave(i_acc), ave(p), ave(r), ave(all_f1), get_f1(ave(p), ave(r))))
# Ontology evaluation_scripts
all_p, all_r = normed_eval(set(all_pred_triple), set(all_gold_triple), metric=precision), normed_eval(set(all_pred_triple), set(all_gold_triple), metric=recall)
d_p, d_r = normed_eval(set(all_pred_d), set(all_gold_d), metric=precision), normed_eval(set(all_pred_d), set(all_gold_d), metric=recall)
t_p, t_r = normed_eval(set(all_pred_t), set(all_gold_t), metric=precision), normed_eval(set(all_pred_t), set(all_gold_t), metric=recall)
i_p, i_r = normed_eval(set(all_pred_i), set(all_gold_i), metric=precision), normed_eval(set(all_pred_i), set(all_gold_i), metric=recall)
print("Ontology: f1 d: {:.4f}\tt:{:.4f}\ti: {:.4f}\t \nall p: {:.4f}\tall r: {:.4f}\tonto f1: {:.4f}".format(
get_f1(d_p, d_r), get_f1(t_p, t_r), get_f1(i_p, i_r), all_p, all_r, get_f1(all_p, all_r)
))
return ret
def main():
preds = []
with open(pred_file) as reader:
for line in reader:
preds.append(json.loads(line))
with open(gold_file) as reader:
golden = json.load(reader)
with open(pmids_file) as reader:
if '.json' in pmids_file:
pmids = json.load(reader)
else:
pmids = []
for line in reader:
pmids.append(line.strip())
print("\n====File: ", os.path.basename(pred_file))
result = do_eval(preds, pmids, golden)
last_pos = pred_file.rfind('.json')
res_file_name = pred_file[:last_pos] + '.eval_res.json'
with open(res_file_name, 'w') as writer:
json.dump(result, writer, indent=2)
if __name__ == "__main__":
main()
|
BioGPT/examples/RE-DDI/hard_match_evaluation.py/0
|
{
"file_path": "BioGPT/examples/RE-DDI/hard_match_evaluation.py",
"repo_id": "BioGPT",
"token_count": 3936
}
| 159 |
# Speedup Benchmark vs Vendor Libraries
This part presents a benchmark comparison between our custom library, BitBLAS, and various vendor libraries (cuBLAS, CUTLASS, bitsandbytes, faster-transformer, tensorrt-llm, vLLM, and Marlin) across different matrix operation types (GEMM, GEMV) and data formats (float16xfloat16, int8xint8, float16xint4/nf4). The benchmarks are conducted on NVIDIA GPUs - 24GB RTX 3090 and 80GB A100, with CUDA 12.1 installed.
## Benchmark Overview
### Tested Operations and Formats
- GEMM (General Matrix Multiply) and GEMV (General Matrix-Vector Multiply)
- Data formats: float16, int8, float16xint4/nf4
### Hardware
- NVIDIA RTX 3090 (24GB)
- NVIDIA A100 (80GB)
### Software
- CUDA 12.1
- Compared libraries: cuBLAS, CUTLASS, bitsandbytes, faster-transformer, tensorrt-llm, vLLM, Marlin
- Commit ID:
- bitsandbytes == 0.43.0
- vLLM: 865732342b4e3b8a4ef38f28a2a5bdb87cf3f970
- FasterTransformer: 1afbf20129647a35d108152fc6789bc1d029cda5
- TensorRT-LLM: 2bf3a0a4287069ac55ee3304c285b08592d3d1bc
- CUTLASS: 629f4653c3ea3db3264030382956fabe715f3436
- Marlin: 512f1b1ba39ff708bcc95419f11cfd1285cd31b3
## Results Summary
### RTX 3090 Benchmarks
- **Float16 and Int8 GEMM with Tensorcore**: BitBLAS matches the performance of cuBLAS and CUTLASS.
- **Float16xnf4 GEMV and GEMM**: BitBLAS achieves 2x the speed of bitsandbytes and 4x the base float16 performance.
- **Optimal performance** in float16xint4 GEMM.
### A100 Benchmarks
- **Int4 Dequantize Performance**: BitBLAS outperforms bitsandbytes, faster-transformer, tensorrt-llm, vLLM, and Marlin.
## Benchmark Configuration
The benchmark configurations for each test scenario are detailed below:
<!-- center -->
<div align="center">
<table class="tableizer-table">
<thead><tr class="tableizer-firstrow"><th>config</th><th>Provider</th><th>M</th><th>N</th><th>K</th></tr></thead><tbody>
<tr><td>V0</td><td>None</td><td>1</td><td>16384</td><td>16384</td></tr>
<tr><td>V1</td><td>BLOOM</td><td>1</td><td>43008</td><td>14336</td></tr>
<tr><td>V2</td><td>BLOOM</td><td>1</td><td>14336</td><td>14336</td></tr>
<tr><td>V3</td><td>BLOOM</td><td>1</td><td>57344</td><td>14336</td></tr>
<tr><td>V4</td><td>BLOOM</td><td>1</td><td>14336</td><td>57344</td></tr>
<tr><td>V5</td><td>OPT</td><td>1</td><td>9216</td><td>9216</td></tr>
<tr><td>V6</td><td>OPT</td><td>1</td><td>36864</td><td>9216</td></tr>
<tr><td>V7</td><td>OPT</td><td>1</td><td>9216</td><td>36864</td></tr>
<tr><td>V8</td><td>LLAMA</td><td>1</td><td>22016</td><td>8192</td></tr>
<tr><td>V9</td><td>LLAMA</td><td>1</td><td>8192</td><td>22016</td></tr>
<tr><td>V10</td><td>LLAMA-2</td><td>1</td><td>8192</td><td>8192</td></tr>
<tr><td>V11</td><td>LLAMA-2</td><td>1</td><td>28672</td><td>8192</td></tr>
<tr><td>V12</td><td>LLAMA-2</td><td>1</td><td>8192</td><td>28672</td></tr>
<tr><td>M0</td><td>None</td><td>16384</td><td>16384</td><td>16384</td></tr>
<tr><td>M1</td><td>BLOOM</td><td>8192</td><td>43008</td><td>14336</td></tr>
<tr><td>M2</td><td>BLOOM</td><td>8192</td><td>14336</td><td>14336</td></tr>
<tr><td>M3</td><td>BLOOM</td><td>8192</td><td>57344</td><td>14336</td></tr>
<tr><td>M4</td><td>BLOOM</td><td>8192</td><td>14336</td><td>57344</td></tr>
<tr><td>M5</td><td>OPT</td><td>8192</td><td>9216</td><td>9216</td></tr>
<tr><td>M6</td><td>OPT</td><td>8192</td><td>36864</td><td>9216</td></tr>
<tr><td>M7</td><td>OPT</td><td>8192</td><td>9216</td><td>36864</td></tr>
<tr><td>M8</td><td>LLAMA</td><td>8192</td><td>22016</td><td>8192</td></tr>
<tr><td>M9</td><td>LLAMA</td><td>8192</td><td>8192</td><td>22016</td></tr>
<tr><td>M10</td><td>LLAMA-2</td><td>8192</td><td>8192</td><td>8192</td></tr>
<tr><td>M11</td><td>LLAMA-2</td><td>8192</td><td>28672</td><td>8192</td></tr>
<tr><td>M12</td><td>LLAMA-2</td><td>8192</td><td>8192</td><td>28672</td></tr>
</tbody></table>
</div>
**Note:** To reproduce the 3rdparty frameworks' benchmark results, please refer to [mlc-benchmark](https://github.com/LeiWang1999/mlc-benchmark).
## Benchmark Images
BitNET 1.58B INT8xINT2 Matmul BS Scaling on A100.

3090 Related benchmark numbers




A100 Related Benchmark Result


INT8xUINT1 Matmul BS Scaling on A100.

|
BitBLAS/benchmark/README.md/0
|
{
"file_path": "BitBLAS/benchmark/README.md",
"repo_id": "BitBLAS",
"token_count": 2163
}
| 160 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from logging import getLogger
import numpy as np
import torch
import torch.nn as nn
from typing import List, Union, Literal, Optional
logger = getLogger(__name__)
try:
import bitblas # noqa: F401
except ImportError as e:
bitblas_import_exception = e
def error_raiser_bitblas(*args, **kwargs):
raise ValueError(
f"Trying to use the bitblas backend, but could not import dependencies with the following error: {bitblas_import_exception}"
)
autogptq_bitblas_cuda = bitblas_import_exception
from bitblas.utils import auto_detect_nvidia_target # noqa: E402
from bitblas.ops.matmul import MatmulConfig, Matmul # noqa: E402
class Linear(nn.Module):
def __init__(
self,
in_features: int,
out_features: int,
opt_M: Union[int, List[int]] = 1,
bias: bool = False,
dtype: torch.dtype = torch.float16,
propagate_a: bool = False,
propagate_b: bool = False,
enable_tuning: bool = False,
trainable: bool = False,
layout: Literal["nn", "nt"] = "nt",
target: Optional[str] = None,
):
"""
@opt_M: optimize range of the input shape for dynamic symbolic
if the input shape is a range, we will optimize the matmul with dynamic symbolic.
if the input shape is int, we will optimize the matmul with static symbolic.
"""
super().__init__()
if trainable:
raise NotImplementedError("Bitblas does not support train.")
self.in_features = in_features
self.out_features = out_features
self.opt_M = opt_M
self.dtype = dtype
self.propagate_a = propagate_a
self.propagate_b = propagate_b
self.enable_tuning = enable_tuning
self.weight = nn.Parameter(torch.empty((out_features, in_features), dtype=dtype))
if bias:
self.bias = nn.Parameter(torch.empty(out_features, dtype=dtype))
else:
self.register_parameter("bias", None)
BITBLAS_DTYPES = {
torch.float32: "float32",
torch.float16: "float16",
torch.int8: "int8",
}
assert dtype in BITBLAS_DTYPES, f"Unsupported dtype: {dtype}"
bitblas_dtype = BITBLAS_DTYPES[dtype]
self.target = target or auto_detect_nvidia_target()
matmul_config = MatmulConfig(
M=self.opt_M,
N=self.out_features,
K=self.in_features,
in_dtype=bitblas_dtype,
out_dtype=bitblas_dtype,
accum_dtype="int32" if bitblas_dtype == "int8" else bitblas_dtype,
with_bias=bias,
propagate_a=propagate_a,
propagate_b=propagate_b,
layout=layout,
)
self.bitblas_matmul = Matmul(
config=matmul_config,
target=self.target,
)
if enable_tuning:
self.bitblas_matmul.hardware_aware_finetune(topk=20)
self.reset_parameters()
def reset_parameters(self):
with torch.no_grad():
stdv = 1.0 / np.sqrt(self.weight.shape[1])
self.weight.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.uniform_(-stdv, stdv)
def forward(self, A, output=None):
args = [
A,
self.weight,
]
if self.bias is not None:
args.append(self.bias)
if output is None:
output = torch.empty(
A.shape[:-1] + (self.out_features,), dtype=A.dtype, device=A.device)
args.append(output)
self.bitblas_matmul(*args)
return output
__all__ = ["Linear"]
|
BitBLAS/integration/pytorch/bitblas_linear.py/0
|
{
"file_path": "BitBLAS/integration/pytorch/bitblas_linear.py",
"repo_id": "BitBLAS",
"token_count": 1809
}
| 161 |
# Copyright 2018 The apache/tvm Authors. All Rights Reserved.
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Modifications Copyright (c) Microsoft.
# The code below is mostly copied from apache/tvm common_schedules.py in dlight.
"""Common schedule strategies for TIR."""
from typing import Callable, List
from tvm import tir
from .analysis import BlockInfo
def get_block(
sch: tir.Schedule,
blocks: List[BlockInfo],
name: str,
):
"""Get the target block from a schedule.
Parameters
----------
sch : tir.Schedule
The TIR schedule used to get target block.
name : str
The name of the target block.
Returns
-------
target_block : BlockRV
The target block.
"""
target_block: tir.BlockRV = None
for block_info in blocks:
block = block_info.block_rv
if sch.get(block).name_hint == name:
target_block = block
return target_block
def get_output_blocks(
sch: tir.Schedule,
blocks: List[BlockInfo],
):
"""Get the output blocks of a schedule.
Parameters
----------
sch : tir.Schedule
The TIR schedule used to get output blocks.
blocks : List[BlockInfo]
The blocks to be analyzed.
Returns
-------
output_blocks : List[BlockInfo]
The output blocks.
"""
# collect arguments buffer
func = sch.mod["main"]
args = list(func.buffer_map.values())
output_blocks = []
for block_info in blocks:
block = block_info.block_rv
for write in sch.get(block).writes:
if write.buffer in args:
output_blocks.append(block)
return output_blocks
def try_inline(
sch: tir.Schedule,
blocks: List[BlockInfo],
) -> List[BlockInfo]:
"""Try to inline as many blocks as possible, and return the remaining blocks.
Parameters
----------
sch : tir.Schedule
The TIR schedule used to inline blocks.
blocks : List[BlockInfo]
The blocks to be inlined.
Returns
-------
remaining : List[BlockInfo]
The remaining blocks that cannot be inlined.
"""
def _trial(func: Callable):
for i, block in enumerate(blocks):
try:
func(block.block_rv)
except Exception: # pylint: disable=bare-except
continue
return i
return None
while True:
i = _trial(sch.compute_inline)
if i is None:
i = _trial(sch.reverse_compute_inline)
if i is None:
break
blocks.pop(i)
return blocks
def try_inline_contiguous_spatial(
sch: tir.Schedule,
block_infos: List[BlockInfo],
) -> List[BlockInfo]:
"""Try to inline contiguous spatial blocks in a schedule
Parameters
----------
sch : tir.Schedule
The TIR schedule used to inline blocks.
block_infos : List[BlockInfo]
The blocks to be try.
Returns
-------
remaining : List[BlockInfo]
The remaining blocks that cannot be inlined.
"""
if block_infos is None:
return None
results = []
spatial_blocks = []
block: BlockInfo
for block in block_infos:
if block.is_injective():
spatial_blocks.append(block)
elif spatial_blocks:
results.extend(try_inline(sch, spatial_blocks))
results.append(block)
spatial_blocks = []
else:
results.append(block)
if spatial_blocks:
results.extend(try_inline(sch, spatial_blocks))
return results
|
BitBLAS/python/bitblas/base/common_schedules.py/0
|
{
"file_path": "BitBLAS/python/bitblas/base/common_schedules.py",
"repo_id": "BitBLAS",
"token_count": 1634
}
| 162 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from typing import Dict, List, Tuple, Set, Mapping
from tvm.tir.schedule.schedule import BlockRV
from tvm.ir import structural_equal
from tvm import arith, tir
class Statement:
def __init__(self, block_analyzer, block: BlockRV):
self.block_analyzer = block_analyzer
self.block = block
# assume one tir block only has one output buffer
self.dep_name = block_analyzer.get_output_buffers(block)[0].name
self.dependent_region = _extract_dependent_region(block_analyzer, block)
self.reverse_bound_inference = {}
def make_reverse(self, input_name: str, input_iter: List[tir.PrimExpr]):
if len(self.block_analyzer.get_reduce_axis(self.block)) > 0:
return None
if len(self.dependent_region[input_name]) != 1:
return None
indices = self.dependent_region[input_name][0]
iter_map_range = {
_iter.var: _iter.dom for _iter in self.block_analyzer.get_spatial_axis(self.block)
}
iter_map_result = arith.detect_iter_map(
indices,
iter_map_range,
check_level=arith.iter_affine_map.IterMapLevel.Surjective,
simplify_trivial_iterators=False,
)
if len(iter_map_result.errors) > 0:
return None
results = arith.iter_affine_map.inverse_affine_iter_map(iter_map_result.indices, input_iter)
output_indices = []
for _iter in self.block_analyzer.get_spatial_axis(self.block):
if _iter.var in results:
output_indices.append(results[_iter.var])
else:
# not Bijective mapping case
output_indices.append(tir.Var("undefined", dtype="int32") % int(_iter.dom.extent))
return output_indices
def _merge_two_bounds(x: arith.ConstIntBound, y: arith.ConstIntBound):
return arith.ConstIntBound(min(x.min_value, y.min_value), max(x.max_value, y.max_value))
class TensorDepNode(object):
"""
For tensor dependency analysis.
"""
def __init__(self, name):
self.name = name
self._next = []
self._prev = []
def add_next(self, node):
self._next.append(node)
self.deduplicate(self._next)
def add_prev(self, node):
self._prev.append(node)
self.deduplicate(self._prev)
def deduplicate(self, lst):
seen = set()
lst[:] = [n for n in lst if not (n in seen or seen.add(n))]
def __str__(self):
return self.name
def __repr__(self):
return self.name
class DependencyAnalysis(object):
def __init__(self, deps):
self.deps = deps
# issue: duplicate name when we have two same ops.
self.name2dep = self._construct_unique_name2dep(deps)
self.mapping = {} # name -> TensorDepNode
def _construct_unique_name2dep(self, deps):
"""
This is a workaround for the issue that we have two same ops' fuse case.
See https://github.com/apache/tvm/issues/16433
"""
_names:Set = set()
name2dep:Mapping = {}
for dep in deps:
output_buffer = dep.block_analyzer.get_output_buffers(dep.block)[0]
base_name = output_buffer.name
if base_name not in _names:
_names.add(base_name)
else:
i = 1
while f"{base_name}_{i}" in _names:
i += 1
base_name = f"{base_name}_{i}"
_names.add(base_name)
name2dep[base_name] = dep
return name2dep
def get_or_create_node(self, name):
if name not in self.mapping:
self.mapping[name] = TensorDepNode(name)
return self.mapping[name]
def traverse_dependencies(self, compute):
if isinstance(compute, Statement):
node = self.get_or_create_node(
compute.block_analyzer.get_output_buffers(compute.block)[0].name
)
# Loop through input tensors
for input_buffer in compute.block_analyzer.get_input_buffers(compute.block):
# Get the input node
input_node = self.traverse_dependencies(input_buffer)
input_node.add_next(node)
node.add_prev(input_node)
elif isinstance(compute, tir.Buffer):
node = self.get_or_create_node(compute.name)
return node
def analyze(self):
# Starting point for traversal
for _, compute in self.name2dep.items():
self.traverse_dependencies(compute)
def print_dependencies(self):
for name, node in self.mapping.items():
print(f"{name} depends on {', '.join([prev.name for prev in node._prev])}")
def find_path_from_source(self, start_name, target_name):
"""
Finds the path (if it exists) from a starting node (source) to a target node.
Returns the path as a list of nodes.
"""
visited = set()
path = []
if self._find_path_recursive(self.mapping[start_name], target_name, visited, path):
return path
return []
def _find_path_recursive(self, current_node, target_name, visited, path):
"""
Recursive helper function for find_path_from_source.
"""
if current_node.name == target_name:
path.append(current_node)
return True
if current_node.name in visited:
return False
visited.add(current_node.name)
path.append(current_node)
for next_node in current_node._next:
if self._find_path_recursive(next_node, target_name, visited, path):
return True
path.pop()
return False
class InputShapeInference:
def __init__(self, deps: List[Statement]):
self.deps = deps
self.target_mapping = {}
self.buffer_mapping = {}
self.reduce_axes = []
for dep in self.deps:
for ax in dep.block_analyzer.get_reduce_axis(dep.block):
self.reduce_axes.append(ax)
self.dep_analysis = DependencyAnalysis(self.deps)
self.dep_analysis.analyze()
def construct_dependency_target(self, targets: Tuple[str]):
if targets in self.target_mapping:
return self.target_mapping[targets]
# should be buffer name instead of block name
name2dep = {
dep.block_analyzer.get_output_buffers(dep.block)[0].name: dep for dep in self.deps
}
mapping = {}
input_vars = []
for target in targets:
vars = [
iter.var
for iter in name2dep[target].block_analyzer.get_spatial_axis(name2dep[target].block)
]
input_vars.append(vars)
mapping[target] = [vars]
ana = arith.Analyzer()
for dep in self.deps:
for name in dep.dependent_region:
if name not in mapping:
continue
dep_name = dep.dep_name
indices = mapping[name][0]
output_indices = dep.make_reverse(name, indices)
if dep_name in targets:
continue
if dep_name not in mapping:
mapping[dep_name] = [output_indices]
elif not region_exist_in_list(output_indices, mapping[dep_name]):
mapping[dep_name].append(output_indices)
for dep in reversed(self.deps):
indices_list = mapping[dep.dep_name]
ax_vars = [iter.var for iter in dep.block_analyzer.get_spatial_axis(dep.block)]
for input_name, regions in dep.dependent_region.items():
if input_name in targets:
continue
if input_name not in mapping:
mapping[input_name] = []
for indices in indices_list:
for region in regions:
vmap = {
k: (tir.Cast(k.dtype, v) if v.dtype != k.dtype else v)
for k, v in zip(ax_vars, indices)
}
region = [
ana.simplify(tir.stmt_functor.substitute(ax, vmap)) for ax in region
]
if not region_exist_in_list(region, mapping[input_name]):
mapping[input_name].append(region)
buffers = []
for dep in self.deps:
for buffer in dep.block_analyzer.get_buffers(dep.block):
buffers.append(buffer)
for buffer in buffers:
self.buffer_mapping[buffer.name] = buffer
self.target_mapping[targets] = input_vars, mapping
return input_vars, mapping
def infer(
self, shape: Dict[str, List[arith.ConstIntBound]], rstep: Dict[str, int] = {}, targets=None
):
compute_targets = tuple(shape.keys())
input_vars, mapping = self.construct_dependency_target(compute_targets)
ana = arith.Analyzer()
results = {}
intermediate_bind = {}
for vars, bounds in zip(input_vars, shape.values()):
for var, bound in zip(vars, bounds):
ana.update(var, bound, True)
for ax in self.reduce_axes:
# assume the dom.min is always 0, maybe we can extend the IterInfo to include the min value.
if ax.var.name in rstep:
bound = arith.ConstIntBound(
int(ax.dom.min), int(ax.dom.min + min(ax.dom.extent, rstep[ax.var.name]) - 1)
)
else:
bound = arith.ConstIntBound(int(ax.dom.min), int(ax.dom.min + ax.dom.extent - 1))
ana.update(ax.var, bound, True)
for name, regions in mapping.items():
if targets is not None and name not in targets:
continue
if compute_targets[0:1] == compute_targets:
(compute_target,) = compute_targets
path = self.dep_analysis.find_path_from_source(name, compute_target)
if len(path) > 2:
intermediate_nodes = path[1:-1]
for node in intermediate_nodes:
iters = mapping[node.name]
if len(iters) != len(regions) or len(iters) != 1:
continue
if len(*iters) != len(*regions):
break
regions = iters
intermediate_bind[name] = compute_target
for region in regions:
bound = [ana.const_int_bound(indice) for indice in region]
if name in results: # simply merge two bounds
bound = [_merge_two_bounds(x, y) for x, y in zip(results[name], bound)]
results[name] = bound
else:
for region in regions:
bound = [ana.const_int_bound(indice) for indice in region]
if name in results: # simply merge two bounds
bound = [_merge_two_bounds(x, y) for x, y in zip(results[name], bound)]
results[name] = bound
for name, bounds in results.items():
results[name] = [c.max_value - c.min_value + 1 for c in bounds]
return results, intermediate_bind
def get_input_exprs(self, output_exprs):
input_vars, mapping = self.construct_dependency_target(tuple(output_exprs.keys()))
ana = arith.Analyzer()
for ax in self.reduce_axes:
ana.bind(ax.var, 0)
vmap = {}
for vars, exprs in zip(input_vars, output_exprs.values()):
for var, expr in zip(vars, exprs):
if expr.dtype != var.dtype:
expr = tir.Cast(var.dtype, expr)
vmap[var] = expr
result = {}
for name, regions in mapping.items():
region = regions[0]
result[name] = [
ana.simplify(tir.stmt_functor.substitute(index, vmap)) for index in region
]
return result
def region_exist_in_list(a, list) -> bool:
def expr_is_same(a, b) -> bool:
if isinstance(a, tir.IntImm) and isinstance(b, tir.IntImm):
return a.value == b.value
return structural_equal(a, b)
def region_is_same(a, b) -> bool:
for indice_a, indice_b in zip(a, b):
if not expr_is_same(indice_a, indice_b):
return False
return True
return any([region_is_same(a, x) for x in list])
def walk_indice(expr):
if isinstance(expr, tir.expr.BinaryOpExpr):
a = walk_indice(expr.a)
b = walk_indice(expr.b)
if a is not None and b is not None:
return expr
else:
return None
elif isinstance(expr, tir.expr.ConstExpr):
return expr
elif isinstance(expr, tir.Var):
return expr
elif isinstance(expr, tir.ProducerLoad):
return None
elif isinstance(expr, tir.Cast):
a = walk_indice(expr.value)
if a is not None:
return expr
return None
elif isinstance(expr, tir.Call):
return None
else:
raise Exception("Unhandled node type in walk_indice(): %s" % expr)
def _extract_dependent_region(block_analyzer, block: BlockRV) -> Dict[str, List[tir.PrimExpr]]:
input_buffers = block_analyzer.get_input_buffers(block)
dependent_region = {buffer.name: [] for buffer in input_buffers}
def fvisit(x):
if not isinstance(x, tir.BufferLoad):
return
if x.buffer.name not in dependent_region:
return
index = []
for indice, shape_limit in zip(x.indices, x.buffer.shape):
expr = walk_indice(indice)
if expr is None:
expr = tir.Var("undefined", dtype="int8") % shape_limit
if isinstance(expr, tir.IntImm) and expr.value == 0:
"""for tensor ir zero dim smplification case.
for ax0, ax1, ax2 in T.grid(T.int64(1024), T.int64(1024), T.int64(1024)):
with T.block("T_dense"):
v0, v1, v2 = T.axis.remap("SSR", [ax0, ax1, ax2])
T.reads(A_reindex[T.int64(0), v0, v2], B_reindex[T.int64(0), v1, v2])
T.writes(T_dense_reindex[T.int64(0), v0, v1])
with T.init():
T_dense_reindex[T.int64(0), v0, v1] = T.float16(0)
T_dense_reindex[T.int64(0), v0, v1] = T_dense_reindex[T.int64(0), v0, v1] + A_reindex[T.int64(0), v0, v2] * B_reindex[T.int64(0), v1, v2]
For exmaple, the T_dense_reindex has three dims, however there're only two spatial loops.
"""
continue
index.append(expr)
if not region_exist_in_list(index, dependent_region[x.buffer.name]):
dependent_region[x.buffer.name].append(index)
stmt = block_analyzer.sch.get(block)
tir.stmt_functor.post_order_visit(stmt, fvisit=fvisit)
return dependent_region
def get_analyzer_by_tir(block_analyzer, args) -> InputShapeInference:
deps = [Statement(block_analyzer, block) for block in args]
return InputShapeInference(deps)
|
BitBLAS/python/bitblas/base/roller/shape_inference/tir.py/0
|
{
"file_path": "BitBLAS/python/bitblas/base/roller/shape_inference/tir.py",
"repo_id": "BitBLAS",
"token_count": 7689
}
| 163 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# pylint: disable=missing-docstring, invalid-name
"""A GEMM schedule rule for GPU operators."""
from dataclasses import dataclass
from typing import Optional
from tvm import tir
from tvm.target import Target
from tvm.tir.stmt import ForKind
from ..base import analysis
from .base import GPUScheduleRule
from . import utils
from .matmul_analysis import (
auto_inline_consumer_chain,
auto_inline_producers,
get_in_out_dtypes,
get_index_map,
normalize_to_matmul,
get_reduction_blocks,
)
from .matmul_mma import MatmulTensorizationMMA
from .matmul_wmma import (
MatmulInt8Tensorization,
MatmulTensorizationWMMA,
)
from functools import reduce
import logging
logger = logging.getLogger(__name__)
class Matmul(GPUScheduleRule):
"""The schedule rule for matmul-like computation"""
@dataclass
class Config:
block_size_x: int = 8
block_size_y: int = 8
vthread_x: int = 1
vthread_y: int = 1
micro_size_x: int = 4
micro_size_y: int = 4
micro_size_k: int = 8
vector_size: int = 1
unroll: int = 256 # 0 means no unroll
use_shared: bool = True
storage_align: bool = False
inner_x: bool = False
def get_configs(self, target: Target) -> Config:
"""Get the schedule config for the target"""
if target.kind.name == "cuda" or target.kind.name == "rocm":
return Matmul.Config(
block_size_x=8,
block_size_y=16,
vthread_x=1,
vthread_y=1,
micro_size_x=4,
micro_size_y=4,
micro_size_k=16,
vector_size=2,
unroll=256,
use_shared=True,
storage_align=True,
inner_x=False,
)
elif target.kind.name == "opencl" and "android" in str(target.host):
return Matmul.Config(
block_size_x=8,
block_size_y=8,
vthread_x=1,
vthread_y=1,
micro_size_x=8,
micro_size_y=2,
micro_size_k=16,
vector_size=8,
unroll=64,
use_shared=False,
storage_align=False,
inner_x=True,
)
else:
return Matmul.Config()
def apply( # pylint: disable=too-many-locals,missing-docstring
self,
func: tir.PrimFunc,
target: Target,
_: bool,
) -> Optional[tir.Schedule]:
if not isinstance(func, tir.PrimFunc) or not self.is_target_available(target):
return None
sch = tir.Schedule(func)
root_block = analysis.get_root_block(sch)
blocks = sch.get_child_blocks(root_block)
reduction_blocks = get_reduction_blocks(sch, blocks)
if reduction_blocks is None:
return None
main_block = reduction_blocks[0]
block_stmt = sch.get(main_block)
sch = normalize_to_matmul(sch, main_block)
if sch is None:
return None
# Step 1. Check Tensor Core support
# Tensorization config:
# If any value of I, J, K is fixed and less than this threshold,
# tensorization rule will not be applied.
minimal_tensorize_threshold = 64
block_stmt = sch.get(main_block)
if target.kind.name == "cuda" and utils.get_sm_version(target) >= 70:
apply_tensorization: bool = True
# the batch dimension is not taken into consideration.
# Analyze read/write buffers and choose correct tensorizer: int8 or fp16.
in_dtype, out_dtype = get_in_out_dtypes(block_stmt)
if in_dtype not in ["int8", "float16"]:
apply_tensorization = False
for item_var in block_stmt.iter_vars[1:]:
extent = item_var.dom.extent
if isinstance(extent,
tir.expr.IntImm) and extent.value <= minimal_tensorize_threshold:
apply_tensorization = False
if apply_tensorization:
if in_dtype == "int8" and out_dtype == "int32":
tensorize_sch = MatmulInt8Tensorization().apply(func, target, _)
elif utils.get_sm_version(target) >= 80:
# For A100(sm_80) or more advanced gpu, use MMA tensorization.
tensorize_sch = MatmulTensorizationMMA().apply(func, target, _)
else:
# For other GPUs, use WMMA tensorization.
tensorize_sch = MatmulTensorizationWMMA().apply(func, target, _)
if tensorize_sch is not None:
return tensorize_sch
# Step 2. Get schedule config.
config = self.get_configs(target)
# Step 3. Schedule matmul
y_kernel_size = config.vthread_y * config.block_size_y * config.micro_size_y
x_kernel_size = config.vthread_x * config.block_size_x * config.micro_size_x
if config.inner_x:
sch.pad_einsum(
main_block,
[1, y_kernel_size, x_kernel_size, config.micro_size_k],
)
batch, y, x, k = sch.get_loops(main_block)
else:
sch.pad_einsum(
main_block,
[1, x_kernel_size, y_kernel_size, config.micro_size_k],
)
batch, x, y, k = sch.get_loops(main_block)
by, vy, ty, yi = sch.split(
y, [None, config.vthread_y, config.block_size_y, config.micro_size_y])
bx, vx, tx, xi = sch.split(
x, [None, config.vthread_x, config.block_size_x, config.micro_size_x])
ko, ki = sch.split(k, factors=[None, config.micro_size_k])
sch.reorder(by, bx, vy, vx, ty, tx, ko, ki, yi, xi)
by = sch.fuse(batch, by)
sch.bind(bx, "blockIdx.x")
sch.bind(by, "blockIdx.y")
sch.bind(vy, "vthread.y")
sch.bind(vx, "vthread.x")
sch.bind(ty, "threadIdx.y")
sch.bind(tx, "threadIdx.x")
inner_loop = config.micro_size_x if config.inner_x else config.micro_size_y
if inner_loop % config.vector_size == 0:
_, v = sch.split(xi, [None, config.vector_size])
sch.vectorize(v)
if config.unroll > 0:
sch.annotate(tx, ann_key="pragma_auto_unroll_max_step", ann_val=config.unroll)
sch.annotate(tx, ann_key="pragma_unroll_explicit", ann_val=1)
l2g = sch.cache_write(main_block, 0, "local")
sch.reverse_compute_at(l2g, tx, preserve_unit_loops=True)
if config.micro_size_x % config.vector_size == 0:
_, v = sch.split(sch.get_loops(l2g)[-1], [None, config.vector_size])
sch.vectorize(v)
if config.use_shared:
def _cooperative_fetch(index, vec_len):
block = sch.cache_read(main_block, index, "shared")
num_loops = len(sch.get_loops(block))
sch.compute_at(block, ko, preserve_unit_loops=True)
loops = sch.get_loops(block)[-num_loops:]
ty, tx, _, vec = sch.split(
sch.fuse(*loops),
factors=[config.block_size_y, config.block_size_x, None, vec_len],
)
sch.vectorize(vec)
sch.bind(ty, "threadIdx.y")
sch.bind(tx, "threadIdx.x")
if config.storage_align:
sch.storage_align(block, 0, axis=1, factor=8, offset=vec_len)
return block
a_g2s = _cooperative_fetch(0, vec_len=config.vector_size)
b_g2s = _cooperative_fetch(1, vec_len=config.vector_size)
auto_inline_producers(sch, a_g2s)
auto_inline_producers(sch, b_g2s)
else:
auto_inline_producers(sch, main_block)
auto_inline_consumer_chain(sch, l2g)
sch.decompose_reduction(main_block, ko)
# Step 4. Check if there are unbound blocks. Execute fallback scheduling to them.
def is_scheduled(block: tir.schedule.BlockRV) -> bool:
loops = sch.get_loops(block)
loop_kinds = {sch.get(loop).kind for loop in loops}
return loop_kinds != {ForKind.SERIAL}
blocks = sch.get_child_blocks(root_block)
max_threads_per_block = utils.max_threads_per_block(target) # noqa: F841
for block in blocks:
if is_scheduled(block):
continue
# no axis of the block is bound to thread or block
s_loops = sch.get_loops(block)
bx, tx = sch.split(
sch.fuse(*s_loops),
factors=[
None,
256,
],
)
sch.bind(bx, "blockIdx.x")
sch.bind(tx, "threadIdx.x")
return sch
def apply_config( # pylint: disable=too-many-locals,missing-docstring
self,
func: tir.PrimFunc,
config,
) -> tir.Schedule:
sch = tir.Schedule(func)
root_block = analysis.get_root_block(sch)
blocks = sch.get_child_blocks(root_block)
reduction_blocks = get_reduction_blocks(sch, blocks)
if reduction_blocks is None:
return None
# in some case conv template will use this rule, but the tile config is not
# analyzed by matmul expr.
if len(config.block) != 2:
logger.debug(f"Warning: block config {config.block} is not valid for matmul, skip.")
return None
main_block = reduction_blocks[0]
block_stmt = sch.get(main_block)
# cuda core prefer b is [k, j] layout without swizzling.
index_maps = get_index_map(block_stmt, ["n", "n", "n"])
if index_maps is None:
return None
matmul_index_map, a_index_map, b_index_map, c_index_map = index_maps
# Step 0. Normalize generic matmul to C[S, I, J] += A[S, I, K] * B[S, J, K]
block = sch.reindex(main_block, ("read", 0))
sch.transform_layout(block, ("write", 0), a_index_map)
block = sch.reindex(main_block, ("read", 1))
sch.transform_layout(block, ("write", 0), b_index_map)
block = sch.reindex(main_block, ("write", 0))
sch.transform_layout(block, ("read", 0), c_index_map)
sch.transform_block_layout(main_block, matmul_index_map)
# Step 2. Get schedule config.
block_row_warps = config.block[0] // (config.thread[0] * config.step[0])
block_col_warps = config.block[1] // (config.thread[1] * config.step[1])
thread_row_tiles = config.thread[1] // (config.step[0] * 2)
thread_col_tiles = config.thread[1] // (config.step[1] * 2)
vthread_row_tiles = (config.step[0] * 2) # expand vtrhead to avoid load band conflict
vthread_col_tiles = (config.step[1] * 2) # expand vtrhead to avoid load band conflict
chunk = config.rstep[0]
# Step 3. Schedule matmul
BM = block_row_warps * vthread_row_tiles * thread_row_tiles
BN = block_col_warps * vthread_col_tiles * thread_col_tiles
BK = chunk
sch.pad_einsum(
main_block,
[1, BM, BN, BK],
)
batch, y, x, k = sch.get_loops(main_block)
by, vy, ty, yi = sch.split(y, [None, vthread_row_tiles, block_row_warps, thread_row_tiles])
bx, vx, tx, xi = sch.split(x, [None, vthread_col_tiles, block_col_warps, thread_col_tiles])
ko, ki = sch.split(k, factors=[None, BK])
sch.reorder(by, bx, vy, vx, ty, tx, ko, ki, yi, xi)
by = sch.fuse(batch, by)
sch.bind(bx, "blockIdx.x")
sch.bind(by, "blockIdx.y")
sch.bind(vy, "vthread.y")
sch.bind(vx, "vthread.x")
sch.bind(ty, "threadIdx.y")
sch.bind(tx, "threadIdx.x")
def prod(iterable):
return reduce(lambda x, y: x * y, iterable, 1)
l2g = sch.cache_write(main_block, 0, "local")
sch.reverse_compute_at(l2g, tx, preserve_unit_loops=True)
def _cooperative_fetch(index, vec_len):
block = sch.cache_read(main_block, index, "shared")
num_loops = len(sch.get_loops(block))
block_local = sch.cache_read(main_block, index, "local")
sch.compute_at(block_local, ki, preserve_unit_loops=True)
sch.compute_at(block, ko, preserve_unit_loops=True)
loops = sch.get_loops(block)[-num_loops:]
_, ty, tx, vec = sch.split(
sch.fuse(*loops),
factors=[None, block_row_warps, block_col_warps, vec_len],
)
auto_inline_producers(sch, block)
def is_trivial_load(block):
# avoid vectorize under global[v2, v1]] shared[v1, v2] case
reads = sch.get(block).reads
writes = sch.get(block).writes
if len(reads) != 1 or len(writes) != 1:
return False
return all(
read.region[-1] == write.region[-1] for read, write in zip(reads, writes))
if is_trivial_load(block):
sch.vectorize(vec)
sch.bind(ty, "threadIdx.y")
sch.bind(tx, "threadIdx.x")
_, vec = sch.split(
sch.fuse(*sch.get_loops(block_local)[-2:]),
[None, vec_len // prod(config.step)],
)
sch.vectorize(vec)
return block
for i, input_region in enumerate(sch.get(main_block).reads):
_buffer_name = input_region.buffer.name.replace("_reindex", "").replace("_pad", "")
if _buffer_name not in config.cached_tensors:
logger.warning(
f"Warning: {_buffer_name} is not in cached_tensors {config.cached_tensors}, skip."
)
continue
# otherwise cooperative fetch in shared memory.
vectorize = config.vectorize.get(_buffer_name, 1)
_cooperative_fetch(i, vec_len=vectorize)
auto_inline_consumer_chain(sch, l2g)
_, vec = sch.split(
sch.fuse(*sch.get_loops(l2g)[-2:]), [None, vectorize // prod(config.step)])
sch.vectorize(vec)
sch.decompose_reduction(main_block, ko)
return sch
|
BitBLAS/python/bitblas/gpu/matmul.py/0
|
{
"file_path": "BitBLAS/python/bitblas/gpu/matmul.py",
"repo_id": "BitBLAS",
"token_count": 7385
}
| 164 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# pre-transformed tir expression of matmul
import tvm
from tvm import te, DataType
from tvm.tir import IndexMap
from bitblas.ops.operator import TransformKind
from bitblas.gpu.matmul_analysis import get_propagate_map
from bitblas.quantization import (
_tir_packed_int_to_int_convert,
_tir_packed_to_signed_convert,
_tir_packed_to_unsigned_convert,
_tir_u32_to_f4_to_f16,
_tir_packed_to_unsigned_convert_with_zeros,
)
def matmul_nt_dequantize_b(
M,
N,
K,
in_dtype="float16",
out_dtype="float16",
accum_dtype="float16",
bit=4,
storage_dtype="int8",
source_format="uint",
with_scaling=False,
with_zeros=False,
group_size=-1,
fast_decoding=False,
with_bias=False,
zeros_mode="original",
):
if not isinstance(M, int):
M = tvm.te.var("m")
storage_nbit = int("".join(c for c in storage_dtype if c.isdigit()))
storage_type = str("".join(c for c in storage_dtype if not c.isdigit()))
n_float_per_elem = storage_nbit // bit
if group_size == -1:
group_size = K
A = te.placeholder((M, K), name="A", dtype=in_dtype)
B = te.placeholder((N, K // storage_nbit * bit), name="B", dtype=storage_dtype)
LUT = te.placeholder((1 << bit,), name="LUT", dtype=in_dtype)
Scale = te.placeholder((N, K // group_size), name="Scale", dtype=in_dtype)
Zeros = te.placeholder((N, K // group_size), name="Zeros", dtype=in_dtype)
QZeros = te.placeholder(((K // group_size), N // storage_nbit * bit),
name="QZeros",
dtype=storage_dtype)
Bias = te.placeholder((N,), name="Bias", dtype=in_dtype)
def qzeros_dequantize(k, n):
return _tir_packed_to_unsigned_convert(storage_type, storage_nbit)(
bit,
QZeros[k, n // n_float_per_elem],
n % n_float_per_elem,
dtype=storage_dtype,
)
Dequantize_qzeros = te.compute(
(K // group_size, N),
qzeros_dequantize,
name="Dequantize_zeros",
)
def decode_func(n, k):
if with_zeros and zeros_mode == "quantized":
w = _tir_packed_to_unsigned_convert_with_zeros(storage_type, storage_nbit)(
bit,
B[n, k // n_float_per_elem],
k % n_float_per_elem,
Dequantize_qzeros[k // group_size, n],
dtype=in_dtype,
)
elif source_format == "uint":
w = _tir_packed_to_unsigned_convert(storage_type, storage_nbit)(
bit, B[n, k // n_float_per_elem], k % n_float_per_elem, dtype=in_dtype)
elif source_format == "int":
if bit == 1:
# Dequantize int1 to -1 and 1. Without this step, the values would be 0 and 1, identical to uint1.
w = _tir_packed_int_to_int_convert(storage_type, storage_nbit)(
bit, B[n, k // n_float_per_elem], k % n_float_per_elem, dtype=in_dtype)
else:
w = _tir_packed_to_signed_convert(storage_type, storage_nbit)(
bit, B[n, k // n_float_per_elem], k % n_float_per_elem, dtype=in_dtype)
elif source_format == "fp":
w = _tir_u32_to_f4_to_f16(
bit, B[n, k // n_float_per_elem], k % n_float_per_elem, dtype=in_dtype)
elif source_format == "nf":
w = LUT[_tir_packed_to_unsigned_convert(storage_type, storage_nbit)(
bit,
B[n, k // n_float_per_elem],
k % n_float_per_elem,
dtype="int32", # assume the index data type is int32
)]
else:
raise ValueError("Unsupported source_format: {}".format(source_format))
if not with_scaling:
return w
if not with_zeros:
return w * Scale[n, k // group_size]
if zeros_mode == "original":
w = (w - Zeros[n, k // group_size]) * Scale[n, k // group_size]
elif zeros_mode == "rescale":
w = w * Scale[n, k // group_size] - Zeros[n, k // group_size]
elif zeros_mode == "quantized":
w = w * Scale[n, k // group_size]
else:
raise ValueError("Unsupported zeros_mode: {}".format(zeros_mode))
return w
B_decode = te.compute((N, K), decode_func, name="B_decode")
# Describe the matrix multiplication in TE
k = te.reduce_axis((0, K), name="k")
C = te.compute(
(M, N),
lambda i, j: te.sum(
A[i, k].astype(accum_dtype) * B_decode[j, k].astype(accum_dtype), axis=k),
name="C",
)
D = te.compute((M, N), lambda i, j: C[i, j].astype(out_dtype), name="D")
args = [A, B]
last_output = D
if source_format == "nf":
args.append(LUT)
if with_scaling:
args.append(Scale)
if with_zeros:
if zeros_mode == "quantized":
args.append(QZeros)
else:
args.append(Zeros)
if with_bias:
E = te.compute((M, N), lambda i, j: D[i, j] + Bias[j], name="E")
last_output = E
args.append(Bias)
args.append(last_output)
func = te.create_prim_func(args).with_attr(
"dequantize_info",
{
"B_decode": {
"decode_block": "B_decode",
"fast_decoding": fast_decoding,
"source_format": {
"bits": bit,
"format": source_format,
},
"storage_dtype": storage_dtype,
"target_format": in_dtype,
"with_scaling": with_scaling,
"with_zeros": with_zeros,
"zeros_mode": zeros_mode,
"group_size": group_size,
}
},
)
return tvm.IRModule.from_expr(func)
def matmul_nt_dequantize_b_propagate_b(
M,
N,
K,
in_dtype="float16",
out_dtype="float16",
accum_dtype="float16",
bit=4,
storage_dtype="int8",
source_format="uint",
with_scaling=False,
with_zeros=False,
group_size=-1,
fast_decoding=False,
with_bias=False,
zeros_mode="original",
transform_kind: TransformKind = TransformKind.IntraWarpTransform,
):
if not isinstance(M, int):
M = tvm.te.var("m")
l = r = 16 # noqa: E741
if in_dtype in ["int8", "e4m3_float8", "e5m2_float8"]:
l, r = 16, 32 # noqa: E741
_, inverse_indexmap = get_propagate_map(trans=True, dtype=in_dtype, matrix_name="B")
target_dtype = DataType(in_dtype)
scaling_factor = 1
if bit > 0 and bit < target_dtype.bits:
scaling_factor = ((target_dtype.bits // bit) * DataType(storage_dtype).bits //
target_dtype.bits)
initial_indices = inverse_indexmap.initial_indices
scaling_final_indices = inverse_indexmap.map_indices(initial_indices[:-1] +
[initial_indices[-1] * scaling_factor])
scaling_final_indices = scaling_final_indices[:-1] + [
scaling_final_indices[-1] // scaling_factor
]
inverse_indexmap = IndexMap(
initial_indices,
scaling_final_indices,
None,
)
storage_nbit = int("".join(c for c in storage_dtype if c.isdigit()))
storage_type = str("".join(c for c in storage_dtype if not c.isdigit()))
n_float_per_elem = storage_nbit // bit
if group_size == -1:
group_size = K
qr = r * bit // storage_nbit
A = te.placeholder((M, K), name="A", dtype=in_dtype)
B = te.placeholder((N // l, (K // scaling_factor) // qr, l, qr), name="B", dtype=storage_dtype)
LUT = te.placeholder((1 << bit,), name="LUT", dtype=in_dtype)
Scale = te.placeholder((N, K // group_size), name="Scale", dtype=in_dtype)
Zeros = te.placeholder((N, K // group_size), name="Zeros", dtype=in_dtype)
Bias = te.placeholder((N,), name="Bias", dtype=in_dtype)
def fcompute(i, j):
warp_i, warp_j = i % l, j % qr
spatial_args = i // l, j // qr
if transform_kind >= TransformKind.IntraWarpTransform:
warp_i, warp_j = inverse_indexmap.map_indices([warp_i, warp_j])
new_index = (*spatial_args, warp_i, warp_j)
return B[new_index]
B_reindex = te.compute(
(N, K // storage_nbit * bit),
fcompute,
name="B_reindex",
)
def decode_func(n, k):
if source_format == "uint":
w = _tir_packed_to_unsigned_convert(storage_type, storage_nbit)(
bit,
B_reindex[n, k // n_float_per_elem],
k % n_float_per_elem,
dtype=in_dtype,
)
elif source_format == "int":
if bit == 1:
# Dequantize int1 to -1 and 1. Without this step, the values would be 0 and 1, identical to uint1.
w = _tir_packed_int_to_int_convert(storage_type, storage_nbit)(
bit, B_reindex[n, k // n_float_per_elem], k % n_float_per_elem, dtype=in_dtype)
else:
w = _tir_packed_to_signed_convert(storage_type, storage_nbit)(
bit,
B_reindex[n, k // n_float_per_elem],
k % n_float_per_elem,
dtype=in_dtype,
)
elif source_format == "fp":
w = _tir_u32_to_f4_to_f16(
bit,
B_reindex[n, k // n_float_per_elem],
k % n_float_per_elem,
dtype=in_dtype,
)
elif source_format == "nf":
w = LUT[_tir_packed_to_unsigned_convert(storage_type, storage_nbit)(
bit,
B_reindex[n, k // n_float_per_elem],
k % n_float_per_elem,
dtype="int32", # assume the index data type is int32
)]
else:
raise ValueError("Unsupported source_format: {}".format(source_format))
if not with_scaling:
return w
if not with_zeros:
return w * Scale[n, k // group_size]
if zeros_mode == "original":
w = (w - Zeros[n, k // group_size]) * Scale[n, k // group_size]
elif zeros_mode == "rescale":
w = w * Scale[n, k // group_size] - Zeros[n, k // group_size]
else:
raise ValueError("Unsupported zeros_mode: {}".format(zeros_mode))
return w
B_decode = te.compute((N, K), decode_func, name="B_decode")
# Describe the matrix multiplication in TE
k = te.reduce_axis((0, K), name="k")
C = te.compute(
(M, N),
lambda i, j: te.sum(
A[i, k].astype(accum_dtype) * B_decode[j, k].astype(accum_dtype), axis=k),
name="C",
)
D = te.compute((M, N), lambda i, j: C[i, j].astype(out_dtype), name="D")
args = [A, B]
last_output = D
if source_format == "nf":
args.append(LUT)
if with_scaling:
args.append(Scale)
if with_zeros:
args.append(Zeros)
if with_bias:
E = te.compute((M, N), lambda i, j: D[i, j] + Bias[j], name="E")
last_output = E
args.append(Bias)
args.append(last_output)
func = te.create_prim_func(args).with_attr(
"dequantize_info",
{
"B_decode": {
"decode_block": "B_decode",
"fast_decoding": fast_decoding,
"source_format": {
"bits": bit,
"format": source_format,
},
"storage_dtype": storage_dtype,
"target_format": in_dtype,
"with_zeros": with_zeros,
"zeros_mode": zeros_mode,
"with_scaling": with_scaling,
"group_size": group_size,
}
},
)
func = func.with_attr("weight_transform_kind", transform_kind.value)
return tvm.IRModule.from_expr(func)
def matmul_nt_dequantize_b_propagate_a_propagate_b(
M,
N,
K,
in_dtype="float16",
out_dtype="float16",
accum_dtype="float16",
bit=4,
storage_dtype="int8",
source_format="uint",
with_scaling=False,
with_zeros=False,
group_size=-1,
fast_decoding=False,
with_bias=False,
zeros_mode="original",
transform_kind_input: TransformKind = TransformKind.IntraWarpTransform,
transform_kind_weight: TransformKind = TransformKind.IntraWarpTransform,
):
if not isinstance(M, int):
M = tvm.te.var("m")
l = r = 16 # noqa: E741
if in_dtype in ["int8", "e4m3_float8", "e5m2_float8"]:
l, r = 16, 32 # noqa: E741
_, inversed_index_map = get_propagate_map(trans=False, dtype=in_dtype, matrix_name="A")
A = te.placeholder((M // l, K // r, l, r), name="A", dtype=in_dtype)
def fcompute(i, j):
warp_i, warp_j = i % l, j % r
spatial_args = i // l, j // r
if transform_kind_input >= TransformKind.IntraWarpTransform:
warp_i, warp_j = inversed_index_map.map_indices([warp_i, warp_j])
new_index = (*spatial_args, warp_i, warp_j)
return A[new_index]
A_reindex = te.compute(
(M, K),
fcompute,
name="A_reindex",
)
_, inversed_index_map = get_propagate_map(trans=True, dtype=in_dtype, matrix_name="B")
target_dtype = DataType(in_dtype)
scaling_factor = 1
if bit > 0 and bit < target_dtype.bits:
scaling_factor = ((target_dtype.bits // bit) * DataType(storage_dtype).bits //
target_dtype.bits)
initial_indices = inversed_index_map.initial_indices
scaling_final_indices = inversed_index_map.map_indices(
initial_indices[:-1] + [initial_indices[-1] * scaling_factor])
scaling_final_indices = scaling_final_indices[:-1] + [
scaling_final_indices[-1] // scaling_factor
]
inversed_index_map = IndexMap(
initial_indices,
scaling_final_indices,
None,
)
storage_nbit = int("".join(c for c in storage_dtype if c.isdigit()))
storage_type = str("".join(c for c in storage_dtype if not c.isdigit()))
n_float_per_elem = storage_nbit // bit
if group_size == -1:
group_size = K
qr = r * bit // storage_nbit
B = te.placeholder((N // l, (K // scaling_factor) // qr, l, qr), name="B", dtype=storage_dtype)
LUT = te.placeholder((1 << bit,), name="LUT", dtype=in_dtype)
Scale = te.placeholder((N, K // group_size), name="Scale", dtype=in_dtype)
Zeros = te.placeholder((N, K // group_size), name="Zeros", dtype=in_dtype)
Bias = te.placeholder((N,), name="Bias", dtype=in_dtype)
def fcompute(i, j):
warp_i, warp_j = i % l, j % qr
spatial_args = i // l, j // qr
if transform_kind_weight >= TransformKind.IntraWarpTransform:
warp_i, warp_j = inversed_index_map.map_indices([warp_i, warp_j])
new_index = (*spatial_args, warp_i, warp_j)
return B[new_index]
B_reindex = te.compute(
(N, K // storage_nbit * bit),
fcompute,
name="B_reindex",
)
def decode_func(n, k):
if source_format == "uint":
w = _tir_packed_to_unsigned_convert(storage_type, storage_nbit)(
bit,
B_reindex[n, k // n_float_per_elem],
k % n_float_per_elem,
dtype=in_dtype,
)
elif source_format == "int":
# Dequantize int1 to -1 and 1. Without this step, the values would be 0 and 1, identical to uint1.
if bit == 1:
w = _tir_packed_int_to_int_convert(storage_type, storage_nbit)(
bit, B_reindex[n, k // n_float_per_elem], k % n_float_per_elem, dtype=in_dtype)
else:
w = _tir_packed_to_signed_convert(storage_type, storage_nbit)(
bit,
B_reindex[n, k // n_float_per_elem],
k % n_float_per_elem,
dtype=in_dtype,
)
elif source_format == "fp":
w = _tir_u32_to_f4_to_f16(
bit,
B_reindex[n, k // n_float_per_elem],
k % n_float_per_elem,
dtype=in_dtype,
)
elif source_format == "nf":
w = LUT[_tir_packed_to_unsigned_convert(storage_type, storage_nbit)(
bit,
B_reindex[n, k // n_float_per_elem],
k % n_float_per_elem,
dtype="int32", # assume the index data type is int32
)]
else:
raise ValueError("Unsupported source_format: {}".format(source_format))
if not with_scaling:
return w
if not with_zeros:
return w * Scale[n, k // group_size]
if zeros_mode == "original":
w = (w - Zeros[n, k // group_size]) * Scale[n, k // group_size]
elif zeros_mode == "rescale":
w = w * Scale[n, k // group_size] - Zeros[n, k // group_size]
else:
raise ValueError("Unsupported zeros_mode: {}".format(zeros_mode))
return w
B_decode = te.compute((N, K), decode_func, name="B_decode")
# Describe the matrix multiplication in TE
k = te.reduce_axis((0, K), name="k")
C = te.compute(
(M, N),
lambda i, j: te.sum(
A_reindex[i, k].astype(accum_dtype) * B_decode[j, k].astype(accum_dtype),
axis=k,
),
name="C",
)
D = te.compute((M, N), lambda i, j: C[i, j].astype(out_dtype), name="D")
args = [A, B]
last_output = D
if source_format == "nf":
args.append(LUT)
if with_scaling:
args.append(Scale)
if with_zeros:
args.append(Zeros)
if with_bias:
E = te.compute((M, N), lambda i, j: D[i, j] + Bias[j], name="E")
last_output = E
args.append(Bias)
args.append(last_output)
func = te.create_prim_func(args).with_attr(
"dequantize_info",
{
"B_decode": {
"decode_block": "B_decode",
"fast_decoding": fast_decoding,
"source_format": {
"bits": bit,
"format": source_format,
},
"storage_dtype": storage_dtype,
"target_format": in_dtype,
"with_zeros": with_zeros,
"zeros_mode": zeros_mode,
"with_scaling": with_scaling,
"group_size": group_size,
}
},
)
func = func.with_attr("input_transform_kind", transform_kind_input.value)
func = func.with_attr("weight_transform_kind", transform_kind_weight.value)
return tvm.IRModule.from_expr(func)
def select_implementation(
M=None,
N=1024,
K=1024,
in_dtype="float16",
out_dtype="float16",
accum_dtype="float16",
bit=4,
storage_dtype="int8",
source_format="uint",
with_scaling=False,
with_zeros=False,
group_size=-1,
fast_decoding=False,
with_bias=False,
layout="nt",
zeros_mode="original",
propagate_a=False,
propagate_b=False,
):
if layout == "nn":
raise ValueError(
"Currently only support propagate_a=False and propagate_b=False for layout=nn in Dequantize Implementation"
)
elif layout == "nt":
if propagate_a and propagate_b:
return matmul_nt_dequantize_b_propagate_a_propagate_b(
M,
N,
K,
in_dtype,
out_dtype,
accum_dtype,
bit,
storage_dtype,
source_format,
with_scaling,
with_zeros,
group_size,
fast_decoding,
with_bias,
zeros_mode,
transform_kind_input=propagate_a,
transform_kind_weight=propagate_b,
)
elif propagate_a:
raise NotImplementedError
elif propagate_b:
return matmul_nt_dequantize_b_propagate_b(
M,
N,
K,
in_dtype,
out_dtype,
accum_dtype,
bit,
storage_dtype,
source_format,
with_scaling,
with_zeros,
group_size,
fast_decoding,
with_bias,
zeros_mode,
transform_kind=propagate_b,
)
else:
return matmul_nt_dequantize_b(
M,
N,
K,
in_dtype,
out_dtype,
accum_dtype,
bit,
storage_dtype,
source_format,
with_scaling,
with_zeros,
group_size,
fast_decoding,
with_bias,
zeros_mode,
)
else:
raise ValueError(f"Unsupported layout: {layout}")
|
BitBLAS/python/bitblas/ops/impl/matmul_dequantize_impl.py/0
|
{
"file_path": "BitBLAS/python/bitblas/ops/impl/matmul_dequantize_impl.py",
"repo_id": "BitBLAS",
"token_count": 11350
}
| 165 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import sys
import inspect
import pytest
from bitblas.base import DefaultPolicy, TensorCorePolicy
from bitblas.gpu.matmul_analysis import get_tensorized_func_and_tags
# pytest.main() wrapper to allow running single test file
def main():
test_file = inspect.getsourcefile(sys._getframe(1))
sys.exit(pytest.main([test_file] + sys.argv[1:]))
def debug_with_schedule(func, arch, sch_rule):
policy = DefaultPolicy(func=func, arch=arch)
try:
tensorized_func, tags = get_tensorized_func_and_tags(func, arch.target)
except Exception:
tags = None
if tags:
policy = TensorCorePolicy(func=tensorized_func, arch=arch, tags=tags)
configs = policy.emit_config(1)
return sch_rule.apply_config(func, configs[0])
|
BitBLAS/python/bitblas/testing/__init__.py/0
|
{
"file_path": "BitBLAS/python/bitblas/testing/__init__.py",
"repo_id": "BitBLAS",
"token_count": 300
}
| 166 |
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
#include <gtest/gtest.h>
#include <stdio.h>
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include "fast_decoding.hpp"
#define cudaCheckLastError(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
#define REGISTER_GLOBAL_DEVICE_INVOKER(kernel, function) \
template <typename... Args> \
__global__ void kernel(Args... args) \
{ \
function(args...); \
}
REGISTER_GLOBAL_DEVICE_INVOKER(kernelWrapper_i4s_to_i8s, decode_i4s_to_i8s)
REGISTER_GLOBAL_DEVICE_INVOKER(kernelWrapper_i4u_to_i8s, decode_i4u_to_i8s)
REGISTER_GLOBAL_DEVICE_INVOKER(kernelWrapper_i2s_to_i8s, decode_i2s_to_i8s)
REGISTER_GLOBAL_DEVICE_INVOKER(kernelWrapper_i2u_to_i8s, decode_i2u_to_i8s)
REGISTER_GLOBAL_DEVICE_INVOKER(kernelWrapper_i1s_to_i8s, decode_i1s_to_i8s)
REGISTER_GLOBAL_DEVICE_INVOKER(kernelWrapper_i1u_to_i8s, decode_i1u_to_i8s)
TEST(DecodeTest, DecodeInt4ToINT8)
{
using target_dtype = int8_t;
constexpr int nbits = 4;
constexpr int N = 32 / nbits;
constexpr int QN = N / 8 * nbits;
constexpr bool isSigned = true;
constexpr int zero_point = isSigned ? ((1 << (nbits - 1)) - 1) : 0;
// create four int8_t values
int8_t in_data[N] = {
0,
};
// breed seed
srand(0);
// random initializations with nbits range
for (int i = 0; i < N; i++)
{
in_data[i] = (rand() % (1 << nbits)) - zero_point;
}
int8_t *ins = new int8_t[QN];
general_compress(in_data, ins, nbits, N, isSigned);
int8_t *interleaved = new int8_t[QN];
general_interleave_int8(ins, interleaved, nbits, QN * sizeof(int8_t), false);
target_dtype *decoded = new target_dtype[N];
int8_t *ins_gpu;
target_dtype *decoded_gpu;
cudaCheckLastError(cudaMalloc((void **)&ins_gpu, QN * sizeof(int8_t)));
cudaCheckLastError(cudaMalloc((void **)&decoded_gpu, N * sizeof(target_dtype)));
cudaCheckLastError(cudaMemcpy(ins_gpu, interleaved, QN * sizeof(int8_t), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(decoded_gpu, decoded, N * sizeof(target_dtype), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaDeviceSynchronize());
kernelWrapper_i4s_to_i8s<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu, decoded_gpu);
cudaCheckLastError(cudaDeviceSynchronize());
cudaCheckLastError(cudaMemcpy(decoded, decoded_gpu, N * sizeof(target_dtype), cudaMemcpyDeviceToHost));
cudaCheckLastError(cudaFree(ins_gpu));
cudaCheckLastError(cudaFree(decoded_gpu));
for (int i = 0; i < N; i++)
{
EXPECT_EQ(in_data[i], int(decoded[i]));
}
free(ins);
free(interleaved);
free(decoded);
}
TEST(DecodeTest, DecodeUInt4ToINT8)
{
using target_dtype = int8_t;
constexpr int nbits = 4;
constexpr int N = 32 / nbits;
constexpr int QN = N / 8 * nbits;
constexpr bool isSigned = false;
constexpr int zero_point = isSigned ? ((1 << (nbits - 1)) - 1) : 0;
// create four int8_t values
int8_t in_data[N] = {
0,
};
// breed seed
srand(0);
// random initializations with nbits range
for (int i = 0; i < N; i++)
{
in_data[i] = (rand() % (1 << nbits)) - zero_point;
}
int8_t *ins = new int8_t[QN];
general_compress(in_data, ins, nbits, N, isSigned);
int8_t *interleaved = new int8_t[QN];
general_interleave_int8(ins, interleaved, nbits, QN * sizeof(int8_t), false);
target_dtype *decoded = new target_dtype[N];
int8_t *ins_gpu;
target_dtype *decoded_gpu;
cudaCheckLastError(cudaMalloc((void **)&ins_gpu, QN * sizeof(int8_t)));
cudaCheckLastError(cudaMalloc((void **)&decoded_gpu, N * sizeof(target_dtype)));
cudaCheckLastError(cudaMemcpy(ins_gpu, interleaved, QN * sizeof(int8_t), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(decoded_gpu, decoded, N * sizeof(target_dtype), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaDeviceSynchronize());
kernelWrapper_i4u_to_i8s<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu, decoded_gpu);
cudaCheckLastError(cudaDeviceSynchronize());
cudaCheckLastError(cudaMemcpy(decoded, decoded_gpu, N * sizeof(target_dtype), cudaMemcpyDeviceToHost));
cudaCheckLastError(cudaFree(ins_gpu));
cudaCheckLastError(cudaFree(decoded_gpu));
for (int i = 0; i < N; i++)
{
EXPECT_EQ(in_data[i], int(decoded[i]));
}
free(ins);
free(interleaved);
free(decoded);
}
TEST(DecodeTest, DecodeInt2ToINT8)
{
using target_dtype = int8_t;
constexpr int nbits = 2;
constexpr int N = 32 / nbits;
constexpr int QN = N / 8 * nbits;
constexpr bool isSigned = true;
constexpr int zero_point = isSigned ? ((1 << (nbits - 1)) - 1) : 0;
// create four int8_t values
int8_t in_data[N] = {
0,
};
// breed seed
srand(0);
// random initializations with nbits range
for (int i = 0; i < N; i++)
{
in_data[i] = (rand() % (1 << nbits)) - zero_point;
}
int8_t *ins = new int8_t[QN];
general_compress(in_data, ins, nbits, N, isSigned);
int8_t *interleaved = new int8_t[QN];
general_interleave_int8(ins, interleaved, nbits, QN * sizeof(int8_t), false);
target_dtype *decoded = new target_dtype[N];
int8_t *ins_gpu;
target_dtype *decoded_gpu;
cudaCheckLastError(cudaMalloc((void **)&ins_gpu, QN * sizeof(int8_t)));
cudaCheckLastError(cudaMalloc((void **)&decoded_gpu, N * sizeof(target_dtype)));
cudaCheckLastError(cudaMemcpy(ins_gpu, interleaved, QN * sizeof(int8_t), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(decoded_gpu, decoded, N * sizeof(target_dtype), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaDeviceSynchronize());
kernelWrapper_i2s_to_i8s<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu, decoded_gpu);
cudaCheckLastError(cudaDeviceSynchronize());
cudaCheckLastError(cudaMemcpy(decoded, decoded_gpu, N * sizeof(target_dtype), cudaMemcpyDeviceToHost));
cudaCheckLastError(cudaFree(ins_gpu));
cudaCheckLastError(cudaFree(decoded_gpu));
for (int i = 0; i < N; i++)
{
EXPECT_EQ(in_data[i], int(decoded[i]));
}
free(ins);
free(interleaved);
free(decoded);
}
TEST(DecodeTest, DecodeUInt2ToINT8)
{
using target_dtype = int8_t;
constexpr int nbits = 2;
constexpr int N = 32 / nbits;
constexpr int QN = N / 8 * nbits;
constexpr bool isSigned = false;
constexpr int zero_point = isSigned ? ((1 << (nbits - 1)) - 1) : 0;
// create four int8_t values
int8_t in_data[N] = {
0,
};
// breed seed
srand(0);
// random initializations with nbits range
for (int i = 0; i < N; i++)
{
in_data[i] = (rand() % (1 << nbits)) - zero_point;
}
int8_t *ins = new int8_t[QN];
general_compress(in_data, ins, nbits, N, isSigned);
int8_t *interleaved = new int8_t[QN];
general_interleave_int8(ins, interleaved, nbits, QN * sizeof(int8_t), false);
target_dtype *decoded = new target_dtype[N];
int8_t *ins_gpu;
target_dtype *decoded_gpu;
cudaCheckLastError(cudaMalloc((void **)&ins_gpu, QN * sizeof(int8_t)));
cudaCheckLastError(cudaMalloc((void **)&decoded_gpu, N * sizeof(target_dtype)));
cudaCheckLastError(cudaMemcpy(ins_gpu, interleaved, QN * sizeof(int8_t), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(decoded_gpu, decoded, N * sizeof(target_dtype), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaDeviceSynchronize());
kernelWrapper_i2u_to_i8s<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu, decoded_gpu);
cudaCheckLastError(cudaDeviceSynchronize());
cudaCheckLastError(cudaMemcpy(decoded, decoded_gpu, N * sizeof(target_dtype), cudaMemcpyDeviceToHost));
cudaCheckLastError(cudaFree(ins_gpu));
cudaCheckLastError(cudaFree(decoded_gpu));
for (int i = 0; i < N; i++)
{
EXPECT_EQ(in_data[i], int(decoded[i]));
}
free(ins);
free(interleaved);
free(decoded);
}
TEST(DecodeTest, DecodeInt1ToINT8)
{
using target_dtype = int8_t;
constexpr int nbits = 1;
constexpr int N = 32 / nbits;
constexpr int QN = N / 8 * nbits;
constexpr bool isSigned = true;
constexpr int zero_point = isSigned ? ((1 << (nbits - 1)) - 1) : 0;
// create four int8_t values
int8_t in_data[N] = {
0,
};
// breed seed
srand(0);
// random initializations with nbits range
for (int i = 0; i < N; i++)
{
in_data[i] = (rand() % (1 << nbits)) - zero_point;
}
int8_t *ins = new int8_t[QN];
general_compress(in_data, ins, nbits, N, isSigned);
int8_t *interleaved = new int8_t[QN];
general_interleave_int8(ins, interleaved, nbits, QN * sizeof(int8_t), false);
target_dtype *decoded = new target_dtype[N];
int8_t *ins_gpu;
target_dtype *decoded_gpu;
cudaCheckLastError(cudaMalloc((void **)&ins_gpu, QN * sizeof(int8_t)));
cudaCheckLastError(cudaMalloc((void **)&decoded_gpu, N * sizeof(target_dtype)));
cudaCheckLastError(cudaMemcpy(ins_gpu, interleaved, QN * sizeof(int8_t), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(decoded_gpu, decoded, N * sizeof(target_dtype), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaDeviceSynchronize());
kernelWrapper_i1s_to_i8s<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu, decoded_gpu);
kernelWrapper_i1s_to_i8s<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu + QN / 2, decoded_gpu + N / 2);
cudaCheckLastError(cudaDeviceSynchronize());
cudaCheckLastError(cudaMemcpy(decoded, decoded_gpu, N * sizeof(target_dtype), cudaMemcpyDeviceToHost));
cudaCheckLastError(cudaFree(ins_gpu));
cudaCheckLastError(cudaFree(decoded_gpu));
for (int i = 0; i < N; i++)
{
EXPECT_EQ(2 * in_data[i] - 1, int(decoded[i]));
}
free(ins);
free(interleaved);
free(decoded);
}
TEST(DecodeTest, DecodeUInt1ToINT8)
{
using target_dtype = int8_t;
constexpr int nbits = 1;
constexpr int N = 32 / nbits;
constexpr int QN = N / 8 * nbits;
constexpr bool isSigned = false;
constexpr int zero_point = isSigned ? ((1 << (nbits - 1)) - 1) : 0;
// create four int8_t values
int8_t in_data[N] = {
0,
};
// breed seed
srand(0);
// random initializations with nbits range
for (int i = 0; i < N; i++)
{
in_data[i] = (rand() % (1 << nbits)) - zero_point;
}
int8_t *ins = new int8_t[QN];
general_compress(in_data, ins, nbits, N, isSigned);
int8_t *interleaved = new int8_t[QN];
general_interleave_int8(ins, interleaved, nbits, QN * sizeof(int8_t), false);
target_dtype *decoded = new target_dtype[N];
int8_t *ins_gpu;
target_dtype *decoded_gpu;
cudaCheckLastError(cudaMalloc((void **)&ins_gpu, QN * sizeof(int8_t)));
cudaCheckLastError(cudaMalloc((void **)&decoded_gpu, N * sizeof(target_dtype)));
cudaCheckLastError(cudaMemcpy(ins_gpu, interleaved, QN * sizeof(int8_t), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaMemcpy(decoded_gpu, decoded, N * sizeof(target_dtype), cudaMemcpyHostToDevice));
cudaCheckLastError(cudaDeviceSynchronize());
kernelWrapper_i1u_to_i8s<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu, decoded_gpu);
kernelWrapper_i1u_to_i8s<<<dim3(1, 1, 1), dim3(1, 1, 1)>>>(ins_gpu + QN / 2, decoded_gpu + N / 2);
cudaCheckLastError(cudaDeviceSynchronize());
cudaCheckLastError(cudaMemcpy(decoded, decoded_gpu, N * sizeof(target_dtype), cudaMemcpyDeviceToHost));
cudaCheckLastError(cudaFree(ins_gpu));
cudaCheckLastError(cudaFree(decoded_gpu));
for (int i = 0; i < N; i++)
{
EXPECT_EQ(in_data[i], int(decoded[i]));
}
free(ins);
free(interleaved);
free(decoded);
}
|
BitBLAS/testing/cpp/lop3_type_conversion/lowprecision_to_int8.cu/0
|
{
"file_path": "BitBLAS/testing/cpp/lop3_type_conversion/lowprecision_to_int8.cu",
"repo_id": "BitBLAS",
"token_count": 5625
}
| 167 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm.ir import assert_structural_equal
from tvm.runtime import const
from tvm.tir import IndexMap, IntImm, floordiv, floormod
from tvm import tir
index_map = IndexMap.from_func(lambda i: [i // 4, i % 4], index_dtype="int32")
initial_i = index_map.initial_indices[0]
# but what we have is i <=> i // 4
# should do inverse
block_iter_map = IndexMap.from_func(lambda i: [i // 4], index_dtype="int32")
inverse_block_iter_map = index_map.inverse([32,])
new_final_indices = index_map.map_indices([initial_i * 4])
# # tir.IndexMap([initial_i // 4], final_indices, None)
# print(new_final_indices)
|
BitBLAS/testing/python/weight_only/index_map_deduce.py/0
|
{
"file_path": "BitBLAS/testing/python/weight_only/index_map_deduce.py",
"repo_id": "BitBLAS",
"token_count": 261
}
| 168 |
date ; hostname ; pwd
EXP_NODES=1
EXP_IS=384
EXP_PGB=8
EXP_PGEB=64
EXP_LR=3e-6
EXP_BS=64
EXP_ME=5
EXP_WS=0.06
EXP_WD=0.01
EXP_LMH=10
EXP_LMC=5
EXP_THL=2
EXP_HHS=2
EXP_LP=BridgeTower_pt_base.ckpt
EXP_RGM=blip_randaug_wc
export MASTER_ADDR=$HOSTNAME
export MASTER_PORT=19800
export NODE_RANK=0
PREFIX_NAME="ftfpt"
echo $MASTER_ADDR, $MASTER_PORT, $NODE_RANK, $EXP_NODES, $EXP_IS, $EXP_PGB, $EXP_PGEB, $EXP_LR, $EXP_BS, $EXP_ME, $EXP_WS, $EXP_WD, $EXP_LMH, $EXP_LMC, $EXP_THL, $EXP_HHS, $EXP_RGM
TIME=$(date "+%Y%m%d%H%M")
RUN_NAME=""$PREFIX_NAME"_"$EXP_IS"_"$EXP_PGB"_"$EXP_PGEB"_"$EXP_LR"_"$EXP_BS"_"$EXP_ME"_"$EXP_WS"_"$EXP_WD"_"$EXP_LMH"_"$EXP_LMC"_"$EXP_THL"_"$EXP_HHS"_"$EXP_RGM"_"$TIME""
echo $RUN_NAME
python run.py with run_name=$RUN_NAME task_finetune_snli_clip_bert bt clip16 text_roberta $EXP_RGM num_gpus=8 num_nodes=$EXP_NODES load_path=~/BT/best_checkpoints/$EXP_LP image_size=$EXP_IS per_gpu_batchsize=$EXP_PGB per_gpu_eval_batchsize=$EXP_PGEB learning_rate=$EXP_LR batch_size=$EXP_BS max_epoch=$EXP_ME warmup_steps=$EXP_WS weight_decay=$EXP_WD lr_mult_head=$EXP_LMH lr_mult_cross_modal=$EXP_LMC task_head_layers=$EXP_THL head_hidden_scale=$EXP_HHS
date
|
BridgeTower/scripts/ftfpt_base_snlive.sh/0
|
{
"file_path": "BridgeTower/scripts/ftfpt_base_snlive.sh",
"repo_id": "BridgeTower",
"token_count": 603
}
| 169 |
from ..datasets import ConceptualCaptionDataset
from .datamodule_base import BaseDataModule
class ConceptualCaptionDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return ConceptualCaptionDataset
@property
def dataset_name(self):
return "gcc"
|
BridgeTower/src/datamodules/conceptual_caption_datamodule.py/0
|
{
"file_path": "BridgeTower/src/datamodules/conceptual_caption_datamodule.py",
"repo_id": "BridgeTower",
"token_count": 143
}
| 170 |
from .base_dataset import BaseDataset
class SNLIDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
if split == "train":
names = ["snli_train"]
elif split == "val":
names = ["snli_dev", "snli_test"] # ViLT, METER
elif split == "test":
names = ["snli_dev", "snli_test"] # ViLT, METER
super().__init__(
*args,
**kwargs,
names=names,
text_column_name="sentences",
remove_duplicate=False,
)
def __getitem__(self, index):
image_tensor = self.get_image(index)["image"]
text = self.get_text(index)["text"]
index, question_index = self.index_mapper[index]
labels = self.table["labels"][index][question_index].as_py()
return {
"image": image_tensor,
"text": text,
"labels": labels,
"table_name": self.table_names[index],
}
|
BridgeTower/src/datasets/snli_dataset.py/0
|
{
"file_path": "BridgeTower/src/datasets/snli_dataset.py",
"repo_id": "BridgeTower",
"token_count": 532
}
| 171 |
# Modify from the above code repository
# https://github.com/salesforce/ALBEF/blob/HEAD/models/vit.py
# https://github.com/dandelin/ViLT/blob/HEAD/vilt/modules/vision_transformer.py
# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
import math
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.vision_transformer import PatchEmbed
from timm.models.helpers import load_pretrained
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.resnetv2 import ResNetV2
from timm.models.registry import register_model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = {
# patch models (my experiments)
"vit_small_patch16_224": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/vit_small_p16_224-15ec54c9.pth",
),
# patch models (weights ported from official Google JAX impl)
"vit_base_patch16_224": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth",
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
"vit_base_patch32_224": _cfg(
url="", # no official model weights for this combo, only for in21k
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
"vit_base_patch16_384": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_384-83fb41ba.pth",
input_size=(3, 384, 384),
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
crop_pct=1.0,
),
"vit_base_patch32_384": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p32_384-830016f5.pth",
input_size=(3, 384, 384),
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
crop_pct=1.0,
),
"vit_large_patch16_224": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_224-4ee7a4dc.pth",
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
"vit_large_patch32_224": _cfg(
url="", # no official model weights for this combo, only for in21k
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
"vit_large_patch16_384": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_384-b3be5167.pth",
input_size=(3, 384, 384),
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
crop_pct=1.0,
),
"vit_large_patch32_384": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p32_384-9b920ba8.pth",
input_size=(3, 384, 384),
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
crop_pct=1.0,
),
# patch models, imagenet21k (weights ported from official Google JAX impl)
"vit_base_patch16_224_in21k": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_patch16_224_in21k-e5005f0a.pth",
num_classes=21843,
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
"vit_base_patch32_224_in21k": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_patch32_224_in21k-8db57226.pth",
num_classes=21843,
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
"vit_large_patch16_224_in21k": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch16_224_in21k-606da67d.pth",
num_classes=21843,
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
"vit_large_patch32_224_in21k": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch32_224_in21k-9046d2e7.pth",
num_classes=21843,
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
"vit_huge_patch14_224_in21k": _cfg(
url="", # FIXME I have weights for this but > 2GB limit for github release binaries
num_classes=21843,
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
# hybrid models (weights ported from official Google JAX impl)
"vit_base_resnet50_224_in21k": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_224_in21k-6f7c7740.pth",
num_classes=21843,
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
crop_pct=0.9,
first_conv="patch_embed.backbone.stem.conv",
),
"vit_base_resnet50_384": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_384-9fd3c705.pth",
input_size=(3, 384, 384),
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
crop_pct=1.0,
first_conv="patch_embed.backbone.stem.conv",
),
# hybrid models (my experiments)
"vit_small_resnet26d_224": _cfg(),
"vit_small_resnet50d_s3_224": _cfg(),
"vit_base_resnet26d_224": _cfg(),
"vit_base_resnet50d_224": _cfg(),
# deit models (FB weights)
"vit_deit_tiny_patch16_224": _cfg(
url="https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth"
),
"vit_deit_small_patch16_224": _cfg(
url="https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth"
),
"vit_deit_base_patch16_224": _cfg(
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth",
),
"vit_deit_base_patch16_384": _cfg(
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth",
input_size=(3, 384, 384),
crop_pct=1.0,
),
"vit_deit_tiny_distilled_patch16_224": _cfg(
url="https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth"
),
"vit_deit_small_distilled_patch16_224": _cfg(
url="https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth"
),
"vit_deit_base_distilled_patch16_224": _cfg(
url="https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth",
),
"vit_deit_base_distilled_patch16_384": _cfg(
url="https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth",
input_size=(3, 384, 384),
crop_pct=1.0,
),
}
class Mlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_gradients = None
self.attention_map = None
def save_attn_gradients(self, attn_gradients):
self.attn_gradients = attn_gradients
def get_attn_gradients(self):
return self.attn_gradients
def save_attention_map(self, attention_map):
self.attention_map = attention_map
def get_attention_map(self):
return self.attention_map
def forward(self, x, register_hook=False):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
if register_hook:
self.save_attention_map(attn)
attn.register_hook(self.save_attn_gradients)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, register_hook=False):
x = x + self.drop_path(self.attn(self.norm1(x), register_hook=register_hook))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class VisionTransformer(nn.Module):
""" Vision Transformer
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -
https://arxiv.org/abs/2010.11929
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None, **kwargs):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
norm_layer: (nn.Module): normalization layer
"""
super().__init__()
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
self.model_type = kwargs['model_type']
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def forward(self, x, register_blk=-1):
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
x = x + self.pos_embed[:,:x.size(1),:]
x = self.pos_drop(x)
xs = []
for i,blk in enumerate(self.blocks):
x = blk(x, register_blk==i)
if self.model_type == 'BT':
xs.append(x)
x = self.norm(x)
# print(x.shape)
if self.model_type == 'BT':
return self.norm(torch.stack(xs, dim=0))
else:
return x
def forward_pre(self, x):
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
x = x + self.pos_embed[:,:x.size(1),:]
x = self.pos_drop(x)
return x
def forward_post(self, x, register_blk=-1):
x = self.norm(x)
return x
def resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()):
# Rescale the grid of position embeddings when loading from state_dict. Adapted from
# https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
print('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape)
ntok_new = posemb_new.shape[1]
if num_tokens:
posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:]
ntok_new -= num_tokens
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(math.sqrt(len(posemb_grid)))
if not len(gs_new): # backwards compatibility
gs_new = [int(math.sqrt(ntok_new))] * 2
assert len(gs_new) >= 2
print('Position embedding grid-size from %s to %s', [gs_old, gs_old], gs_new)
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
posemb_grid = F.interpolate(posemb_grid, size=gs_new, mode='bicubic', align_corners=False)
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1)
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
return posemb
def checkpoint_filter_fn(state_dict, model):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
if 'model' in state_dict:
# For deit models
state_dict = state_dict['model']
for k, v in state_dict.items():
if 'patch_embed.proj.weight' in k and len(v.shape) < 4:
# For old models that I trained prior to conv based patchification
O, I, H, W = model.patch_embed.proj.weight.shape
v = v.reshape(O, -1, H, W)
elif k == 'pos_embed' and v.shape != model.pos_embed.shape:
# To resize pos embedding when using model at different size from pretrained weights
v = resize_pos_embed(
v, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size)
out_dict[k] = v
return out_dict
def _create_vision_transformer(variant, pretrained=False, **kwargs):
default_cfg = default_cfgs[variant]
default_num_classes = default_cfg["num_classes"]
default_img_size = default_cfg["input_size"][-1]
num_classes = kwargs.pop("num_classes", default_num_classes)
img_size = kwargs.pop("img_size", default_img_size)
repr_size = kwargs.pop("representation_size", None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
print("Removing representation layer for fine-tuning.")
repr_size = None
model_cls = VisionTransformer
model = model_cls(
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
**kwargs,
)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(
model,
num_classes=num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=partial(checkpoint_filter_fn, model=model),
strict=False,
)
return model
@register_model
def vit_small_patch16_224(pretrained=False, **kwargs):
""" My custom 'small' ViT model. Depth=8, heads=8= mlp_ratio=3."""
model_kwargs = dict(
patch_size=16,
embed_dim=768,
depth=8,
num_heads=8,
mlp_ratio=3.0,
qkv_bias=False,
norm_layer=nn.LayerNorm,
**kwargs,
)
if pretrained:
# NOTE my scale was wrong for original weights, leaving this here until I have better ones for this model
model_kwargs.setdefault("qk_scale", 768 ** -0.5)
model = _create_vision_transformer(
"vit_small_patch16_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_patch16_224(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_base_patch16_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_patch32_224(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights.
"""
model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_base_patch32_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_patch16_384(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_base_patch16_384", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_patch32_384(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_base_patch32_384", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_large_patch16_224(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer(
"vit_large_patch16_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_large_patch32_224(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights.
"""
model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer(
"vit_large_patch32_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_large_patch16_384(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer(
"vit_large_patch16_384", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_large_patch32_384(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer(
"vit_large_patch32_384", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_patch16_224_in21k(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
representation_size=768,
**kwargs,
)
model = _create_vision_transformer(
"vit_base_patch16_224_in21k", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_patch32_224_in21k(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(
patch_size=32,
embed_dim=768,
depth=12,
num_heads=12,
representation_size=768,
**kwargs,
)
model = _create_vision_transformer(
"vit_base_patch32_224_in21k", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_large_patch16_224_in21k(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
representation_size=1024,
**kwargs,
)
model = _create_vision_transformer(
"vit_large_patch16_224_in21k", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_large_patch32_224_in21k(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(
patch_size=32,
embed_dim=1024,
depth=24,
num_heads=16,
representation_size=1024,
**kwargs,
)
model = _create_vision_transformer(
"vit_large_patch32_224_in21k", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_huge_patch14_224_in21k(pretrained=False, **kwargs):
""" ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
NOTE: converted weights not currently available, too large for github release hosting.
"""
model_kwargs = dict(
patch_size=14,
embed_dim=1280,
depth=32,
num_heads=16,
representation_size=1280,
**kwargs,
)
model = _create_vision_transformer(
"vit_huge_patch14_224_in21k", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_resnet50_224_in21k(pretrained=False, **kwargs):
""" R50+ViT-B/16 hybrid model from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
# create a ResNetV2 w/o pre-activation, that uses StdConv and GroupNorm and has 3 stages, no head
backbone = ResNetV2(
layers=(3, 4, 9),
num_classes=0,
global_pool="",
in_chans=kwargs.get("in_chans", 3),
preact=False,
stem_type="same",
conv_layer=StdConv2dSame,
)
model_kwargs = dict(
embed_dim=768,
depth=12,
num_heads=12,
hybrid_backbone=backbone,
representation_size=768,
**kwargs,
)
model = _create_vision_transformer(
"vit_base_resnet50_224_in21k", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_resnet50_384(pretrained=False, **kwargs):
""" R50+ViT-B/16 hybrid from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
# create a ResNetV2 w/o pre-activation, that uses StdConv and GroupNorm and has 3 stages, no head
backbone = ResNetV2(
layers=(3, 4, 9),
num_classes=0,
global_pool="",
in_chans=kwargs.get("in_chans", 3),
preact=False,
stem_type="same",
conv_layer=StdConv2dSame,
)
model_kwargs = dict(
embed_dim=768, depth=12, num_heads=12, hybrid_backbone=backbone, **kwargs
)
model = _create_vision_transformer(
"vit_base_resnet50_384", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_small_resnet26d_224(pretrained=False, **kwargs):
""" Custom ViT small hybrid w/ ResNet26D stride 32. No pretrained weights.
"""
backbone = resnet26d(
pretrained=pretrained,
in_chans=kwargs.get("in_chans", 3),
features_only=True,
out_indices=[4],
)
model_kwargs = dict(
embed_dim=768,
depth=8,
num_heads=8,
mlp_ratio=3,
hybrid_backbone=backbone,
**kwargs,
)
model = _create_vision_transformer(
"vit_small_resnet26d_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_small_resnet50d_s3_224(pretrained=False, **kwargs):
""" Custom ViT small hybrid w/ ResNet50D 3-stages, stride 16. No pretrained weights.
"""
backbone = resnet50d(
pretrained=pretrained,
in_chans=kwargs.get("in_chans", 3),
features_only=True,
out_indices=[3],
)
model_kwargs = dict(
embed_dim=768,
depth=8,
num_heads=8,
mlp_ratio=3,
hybrid_backbone=backbone,
**kwargs,
)
model = _create_vision_transformer(
"vit_small_resnet50d_s3_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_resnet26d_224(pretrained=False, **kwargs):
""" Custom ViT base hybrid w/ ResNet26D stride 32. No pretrained weights.
"""
backbone = resnet26d(
pretrained=pretrained,
in_chans=kwargs.get("in_chans", 3),
features_only=True,
out_indices=[4],
)
model_kwargs = dict(
embed_dim=768, depth=12, num_heads=12, hybrid_backbone=backbone, **kwargs
)
model = _create_vision_transformer(
"vit_base_resnet26d_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_resnet50d_224(pretrained=False, **kwargs):
""" Custom ViT base hybrid w/ ResNet50D stride 32. No pretrained weights.
"""
backbone = resnet50d(
pretrained=pretrained,
in_chans=kwargs.get("in_chans", 3),
features_only=True,
out_indices=[4],
)
model_kwargs = dict(
embed_dim=768, depth=12, num_heads=12, hybrid_backbone=backbone, **kwargs
)
model = _create_vision_transformer(
"vit_base_resnet50d_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_deit_tiny_patch16_224(pretrained=False, **kwargs):
""" DeiT-tiny model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs)
model = _create_vision_transformer(
"vit_deit_tiny_patch16_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_deit_small_patch16_224(pretrained=False, **kwargs):
""" DeiT-small model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = _create_vision_transformer(
"vit_deit_small_patch16_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_deit_base_patch16_224(pretrained=False, **kwargs):
""" DeiT base model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_deit_base_patch16_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_deit_base_patch16_384(pretrained=False, **kwargs):
""" DeiT base model @ 384x384 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_deit_base_patch16_384", pretrained=pretrained, **model_kwargs
)
return model
|
BridgeTower/src/modules/vit_model.py/0
|
{
"file_path": "BridgeTower/src/modules/vit_model.py",
"repo_id": "BridgeTower",
"token_count": 14567
}
| 172 |
import json
import pandas as pd
import pyarrow as pa
import random
import os
from tqdm import tqdm
from glob import glob
from collections import defaultdict, Counter
from glossary import normalize_word
def path2rest(path, split, annotations, label2ans):
iid = int(path.split("/")[-1][:-4])
with open(path, "rb") as fp:
binary = fp.read()
_annot = annotations[split][iid]
_annot = list(_annot.items())
qids, qas = [a[0] for a in _annot], [a[1] for a in _annot]
questions = [qa[0] for qa in qas]
answers = [qa[1] for qa in qas] if "test" not in split else list(list())
answer_labels = (
[a["labels"] for a in answers] if "test" not in split else list(list())
)
answer_scores = (
[a["scores"] for a in answers] if "test" not in split else list(list())
)
answers = (
[[label2ans[l] for l in al] for al in answer_labels]
if "test" not in split
else list(list())
)
return [binary, questions, answers, answer_labels, answer_scores, iid, qids, split]
def make_arrow(root, vg_root, dataset_root, use_coco_images_only=False):
with open(f"{root}/vqav2/v2_mscoco_train2014_annotations.json", "r") as fp:
annotations_train2014 = json.load(fp)["annotations"]
with open(f"{root}/vqav2/v2_mscoco_val2014_annotations.json", "r") as fp:
annotations_val2014 = json.load(fp)["annotations"]
all_major_answers = list()
for split, annots in zip(
["train", "val"], [annotations_train2014, annotations_val2014],
):
for q in tqdm(annots):
all_major_answers.append(q["multiple_choice_answer"])
all_major_answers = [normalize_word(word) for word in tqdm(all_major_answers)]
counter = {k: v for k, v in Counter(all_major_answers).items() if v >= 9}
ans2label = {k: i for i, k in enumerate(counter.keys())}
label2ans = list(counter.keys())
if use_coco_images_only:
id_file_name = 'vgqa/coco_ids.json'
output_file_name = 'vgqa_coco'
else:
id_file_name = 'vgqa/ids.json'
output_file_name = 'vgqa'
with open(os.path.join(vg_root, id_file_name)) as f:
ids = json.load(f)
train_image_ids = ids['train']
val_image_ids = ids['val'] + ids['test']
with open(f"{vg_root}/annotations/question_answers.json", "r") as fp:
qa_annotations = json.load(fp)
annotations = dict()
annotations['train'] = defaultdict(dict)
annotations['val'] = defaultdict(dict)
qa_images, qa_valid_images, qa_pairs, qa_valid_pairs = 0, 0, 0, 0
scores_counter = []
for annots in tqdm(qa_annotations):
qas = annots['qas']
split = None
if len(qas) == 0:
continue
if qas[0]['image_id'] in train_image_ids:
split = 'train'
elif qas[0]['image_id'] in val_image_ids:
split = 'val'
if split is not None:
qa_images += 1
qa_pairs += len(qas)
qa_valid_image_flag = 0
question_answer = defaultdict(dict)
for qa in qas:
answer = normalize_word(qa['answer'])
question = qa['question']
if answer in ans2label.keys():
question_answer[question][answer] = question_answer[question].get(answer, 0) + 1
# calculate distribution of question_answers
for q in question_answer:
# if any(count != 1 for count in question_answer[q].values()):
scores = sorted(list(question_answer[q].values()))
scores_counter.append(str(scores))
# only choose the question with only the same answer (99% of the samples)
for qa in qas:
answer = normalize_word(qa['answer'])
question = qa['question']
if answer in ans2label.keys() and len(question_answer[question]) == 1:
annotations[split][qa['image_id']][qa['qa_id']] = [question, {"labels": [ans2label[answer]], "scores": [1.0],}]
question_answer[question] = []
qa_valid_image_flag = 1
qa_valid_pairs += 1
qa_valid_images += qa_valid_image_flag
print(f"qa_images: {qa_images}, qa_valid_images: {qa_valid_images}, qa_pairs: {qa_pairs}, qa_valid_pairs: {qa_valid_pairs}, (train: {sum([len(image) for image in annotations['train'].values()])}, val: {sum([len(image) for image in annotations['val'].values()])})")
# coco_ids 49663 48645 727063 491809 (450987, 40822) (the same with mcan-vqa, BUTD use 485000 qa pairs)
# coco_ids_rq 49663 48640 727063 467916 (429061, 38855) (remove question with multiple answers)
# ids 99280 97217 1445322 978121 (887964, 90157)
# ids_rq 99280 97207 1445322 931866 (846015, 85851) (remove question with multiple answers)
distribution = {k: v for k, v in Counter(scores_counter).items()}
distribution = sorted(distribution.items(), key = lambda kv:(kv[1], kv[0]), reverse=True)
print(distribution)
print(sum([a[1] for a in distribution if len(eval(a[0])) != 1]))
# 4463 in 472379 / 8576 in 940442
id2filepath = {}
with open(os.path.join(vg_root, 'image_data.json')) as f:
metadata = json.load(f)
for item in metadata:
directory = item['url'].split("/")[-2]
name = item['url'].split("/")[-1]
filepath = f"{directory}/{name}"
id2filepath[item['image_id']] = filepath
for split in [
"train",
"val",
]:
annot = annotations[split]
paths = list(glob(f"{vg_root}/images/VG_100K/*.jpg")) + list(
glob(f"{vg_root}/images/VG_100K_2/*.jpg")
)
random.shuffle(paths)
annot_paths = [
path
for path in paths
if int(path.split("/")[-1][:-4]) in annot
]
if len(paths) == len(annot_paths):
print("all images have caption annotations")
else:
print("not all images have caption annotations")
print(
len(paths), len(annot_paths), len(annot),
)
bs = [
path2rest(path, split, annotations, label2ans) for path in tqdm(annot_paths)
]
dataframe = pd.DataFrame(
bs,
columns=[
"image",
"questions",
"answers",
"answer_labels",
"answer_scores",
"image_id",
"question_id",
"split",
],
)
table = pa.Table.from_pandas(dataframe)
os.makedirs(dataset_root, exist_ok=True)
with pa.OSFile(f"{dataset_root}/{output_file_name}_{split}.arrow", "wb") as sink:
with pa.RecordBatchFileWriter(sink, table.schema) as writer:
writer.write_table(table)
make_arrow('~/BT/dataset/mscoco_flickr30k_vqav2_snli_ve', '~/BT/dataset/vg', '~/BT/dataset/fine-tune', True)
make_arrow('~/BT/dataset/mscoco_flickr30k_vqav2_snli_ve', '~/BT/dataset/vg', '~/BT/dataset/fine-tune', False)
|
BridgeTower/src/utils/write_vgqa.py/0
|
{
"file_path": "BridgeTower/src/utils/write_vgqa.py",
"repo_id": "BridgeTower",
"token_count": 3444
}
| 173 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
import numpy as np
import skimage.io as io
# from face_sdk import FaceDetection
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from skimage.transform import SimilarityTransform
from skimage.transform import warp
from PIL import Image, ImageFilter
import torch.nn.functional as F
import torchvision as tv
import torchvision.utils as vutils
import time
import cv2
import os
from skimage import img_as_ubyte
import json
import argparse
import dlib
def calculate_cdf(histogram):
"""
This method calculates the cumulative distribution function
:param array histogram: The values of the histogram
:return: normalized_cdf: The normalized cumulative distribution function
:rtype: array
"""
# Get the cumulative sum of the elements
cdf = histogram.cumsum()
# Normalize the cdf
normalized_cdf = cdf / float(cdf.max())
return normalized_cdf
def calculate_lookup(src_cdf, ref_cdf):
"""
This method creates the lookup table
:param array src_cdf: The cdf for the source image
:param array ref_cdf: The cdf for the reference image
:return: lookup_table: The lookup table
:rtype: array
"""
lookup_table = np.zeros(256)
lookup_val = 0
for src_pixel_val in range(len(src_cdf)):
lookup_val
for ref_pixel_val in range(len(ref_cdf)):
if ref_cdf[ref_pixel_val] >= src_cdf[src_pixel_val]:
lookup_val = ref_pixel_val
break
lookup_table[src_pixel_val] = lookup_val
return lookup_table
def match_histograms(src_image, ref_image):
"""
This method matches the source image histogram to the
reference signal
:param image src_image: The original source image
:param image ref_image: The reference image
:return: image_after_matching
:rtype: image (array)
"""
# Split the images into the different color channels
# b means blue, g means green and r means red
src_b, src_g, src_r = cv2.split(src_image)
ref_b, ref_g, ref_r = cv2.split(ref_image)
# Compute the b, g, and r histograms separately
# The flatten() Numpy method returns a copy of the array c
# collapsed into one dimension.
src_hist_blue, bin_0 = np.histogram(src_b.flatten(), 256, [0, 256])
src_hist_green, bin_1 = np.histogram(src_g.flatten(), 256, [0, 256])
src_hist_red, bin_2 = np.histogram(src_r.flatten(), 256, [0, 256])
ref_hist_blue, bin_3 = np.histogram(ref_b.flatten(), 256, [0, 256])
ref_hist_green, bin_4 = np.histogram(ref_g.flatten(), 256, [0, 256])
ref_hist_red, bin_5 = np.histogram(ref_r.flatten(), 256, [0, 256])
# Compute the normalized cdf for the source and reference image
src_cdf_blue = calculate_cdf(src_hist_blue)
src_cdf_green = calculate_cdf(src_hist_green)
src_cdf_red = calculate_cdf(src_hist_red)
ref_cdf_blue = calculate_cdf(ref_hist_blue)
ref_cdf_green = calculate_cdf(ref_hist_green)
ref_cdf_red = calculate_cdf(ref_hist_red)
# Make a separate lookup table for each color
blue_lookup_table = calculate_lookup(src_cdf_blue, ref_cdf_blue)
green_lookup_table = calculate_lookup(src_cdf_green, ref_cdf_green)
red_lookup_table = calculate_lookup(src_cdf_red, ref_cdf_red)
# Use the lookup function to transform the colors of the original
# source image
blue_after_transform = cv2.LUT(src_b, blue_lookup_table)
green_after_transform = cv2.LUT(src_g, green_lookup_table)
red_after_transform = cv2.LUT(src_r, red_lookup_table)
# Put the image back together
image_after_matching = cv2.merge([blue_after_transform, green_after_transform, red_after_transform])
image_after_matching = cv2.convertScaleAbs(image_after_matching)
return image_after_matching
def _standard_face_pts():
pts = (
np.array([196.0, 226.0, 316.0, 226.0, 256.0, 286.0, 220.0, 360.4, 292.0, 360.4], np.float32) / 256.0
- 1.0
)
return np.reshape(pts, (5, 2))
def _origin_face_pts():
pts = np.array([196.0, 226.0, 316.0, 226.0, 256.0, 286.0, 220.0, 360.4, 292.0, 360.4], np.float32)
return np.reshape(pts, (5, 2))
def compute_transformation_matrix(img, landmark, normalize, target_face_scale=1.0):
std_pts = _standard_face_pts() # [-1,1]
target_pts = (std_pts * target_face_scale + 1) / 2 * 512.0
# print(target_pts)
h, w, c = img.shape
if normalize == True:
landmark[:, 0] = landmark[:, 0] / h * 2 - 1.0
landmark[:, 1] = landmark[:, 1] / w * 2 - 1.0
# print(landmark)
affine = SimilarityTransform()
affine.estimate(target_pts, landmark)
return affine
def compute_inverse_transformation_matrix(img, landmark, normalize, target_face_scale=1.0):
std_pts = _standard_face_pts() # [-1,1]
target_pts = (std_pts * target_face_scale + 1) / 2 * 512.0
# print(target_pts)
h, w, c = img.shape
if normalize == True:
landmark[:, 0] = landmark[:, 0] / h * 2 - 1.0
landmark[:, 1] = landmark[:, 1] / w * 2 - 1.0
# print(landmark)
affine = SimilarityTransform()
affine.estimate(landmark, target_pts)
return affine
def show_detection(image, box, landmark):
plt.imshow(image)
print(box[2] - box[0])
plt.gca().add_patch(
Rectangle(
(box[1], box[0]), box[2] - box[0], box[3] - box[1], linewidth=1, edgecolor="r", facecolor="none"
)
)
plt.scatter(landmark[0][0], landmark[0][1])
plt.scatter(landmark[1][0], landmark[1][1])
plt.scatter(landmark[2][0], landmark[2][1])
plt.scatter(landmark[3][0], landmark[3][1])
plt.scatter(landmark[4][0], landmark[4][1])
plt.show()
def affine2theta(affine, input_w, input_h, target_w, target_h):
# param = np.linalg.inv(affine)
param = affine
theta = np.zeros([2, 3])
theta[0, 0] = param[0, 0] * input_h / target_h
theta[0, 1] = param[0, 1] * input_w / target_h
theta[0, 2] = (2 * param[0, 2] + param[0, 0] * input_h + param[0, 1] * input_w) / target_h - 1
theta[1, 0] = param[1, 0] * input_h / target_w
theta[1, 1] = param[1, 1] * input_w / target_w
theta[1, 2] = (2 * param[1, 2] + param[1, 0] * input_h + param[1, 1] * input_w) / target_w - 1
return theta
def blur_blending(im1, im2, mask):
mask *= 255.0
kernel = np.ones((10, 10), np.uint8)
mask = cv2.erode(mask, kernel, iterations=1)
mask = Image.fromarray(mask.astype("uint8")).convert("L")
im1 = Image.fromarray(im1.astype("uint8"))
im2 = Image.fromarray(im2.astype("uint8"))
mask_blur = mask.filter(ImageFilter.GaussianBlur(20))
im = Image.composite(im1, im2, mask)
im = Image.composite(im, im2, mask_blur)
return np.array(im) / 255.0
def blur_blending_cv2(im1, im2, mask):
mask *= 255.0
kernel = np.ones((9, 9), np.uint8)
mask = cv2.erode(mask, kernel, iterations=3)
mask_blur = cv2.GaussianBlur(mask, (25, 25), 0)
mask_blur /= 255.0
im = im1 * mask_blur + (1 - mask_blur) * im2
im /= 255.0
im = np.clip(im, 0.0, 1.0)
return im
# def Poisson_blending(im1,im2,mask):
# Image.composite(
def Poisson_blending(im1, im2, mask):
# mask=1-mask
mask *= 255
kernel = np.ones((10, 10), np.uint8)
mask = cv2.erode(mask, kernel, iterations=1)
mask /= 255
mask = 1 - mask
mask *= 255
mask = mask[:, :, 0]
width, height, channels = im1.shape
center = (int(height / 2), int(width / 2))
result = cv2.seamlessClone(
im2.astype("uint8"), im1.astype("uint8"), mask.astype("uint8"), center, cv2.MIXED_CLONE
)
return result / 255.0
def Poisson_B(im1, im2, mask, center):
mask *= 255
result = cv2.seamlessClone(
im2.astype("uint8"), im1.astype("uint8"), mask.astype("uint8"), center, cv2.NORMAL_CLONE
)
return result / 255
def seamless_clone(old_face, new_face, raw_mask):
height, width, _ = old_face.shape
height = height // 2
width = width // 2
y_indices, x_indices, _ = np.nonzero(raw_mask)
y_crop = slice(np.min(y_indices), np.max(y_indices))
x_crop = slice(np.min(x_indices), np.max(x_indices))
y_center = int(np.rint((np.max(y_indices) + np.min(y_indices)) / 2 + height))
x_center = int(np.rint((np.max(x_indices) + np.min(x_indices)) / 2 + width))
insertion = np.rint(new_face[y_crop, x_crop] * 255.0).astype("uint8")
insertion_mask = np.rint(raw_mask[y_crop, x_crop] * 255.0).astype("uint8")
insertion_mask[insertion_mask != 0] = 255
prior = np.rint(np.pad(old_face * 255.0, ((height, height), (width, width), (0, 0)), "constant")).astype(
"uint8"
)
# if np.sum(insertion_mask) == 0:
n_mask = insertion_mask[1:-1, 1:-1, :]
n_mask = cv2.copyMakeBorder(n_mask, 1, 1, 1, 1, cv2.BORDER_CONSTANT, 0)
print(n_mask.shape)
x, y, w, h = cv2.boundingRect(n_mask[:, :, 0])
if w < 4 or h < 4:
blended = prior
else:
blended = cv2.seamlessClone(
insertion, # pylint: disable=no-member
prior,
insertion_mask,
(x_center, y_center),
cv2.NORMAL_CLONE,
) # pylint: disable=no-member
blended = blended[height:-height, width:-width]
return blended.astype("float32") / 255.0
def get_landmark(face_landmarks, id):
part = face_landmarks.part(id)
x = part.x
y = part.y
return (x, y)
def search(face_landmarks):
x1, y1 = get_landmark(face_landmarks, 36)
x2, y2 = get_landmark(face_landmarks, 39)
x3, y3 = get_landmark(face_landmarks, 42)
x4, y4 = get_landmark(face_landmarks, 45)
x_nose, y_nose = get_landmark(face_landmarks, 30)
x_left_mouth, y_left_mouth = get_landmark(face_landmarks, 48)
x_right_mouth, y_right_mouth = get_landmark(face_landmarks, 54)
x_left_eye = int((x1 + x2) / 2)
y_left_eye = int((y1 + y2) / 2)
x_right_eye = int((x3 + x4) / 2)
y_right_eye = int((y3 + y4) / 2)
results = np.array(
[
[x_left_eye, y_left_eye],
[x_right_eye, y_right_eye],
[x_nose, y_nose],
[x_left_mouth, y_left_mouth],
[x_right_mouth, y_right_mouth],
]
)
return results
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--origin_url", type=str, default="./", help="origin images")
parser.add_argument("--replace_url", type=str, default="./", help="restored faces")
parser.add_argument("--save_url", type=str, default="./save")
opts = parser.parse_args()
origin_url = opts.origin_url
replace_url = opts.replace_url
save_url = opts.save_url
if not os.path.exists(save_url):
os.makedirs(save_url)
face_detector = dlib.get_frontal_face_detector()
landmark_locator = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
count = 0
for x in os.listdir(origin_url):
img_url = os.path.join(origin_url, x)
pil_img = Image.open(img_url).convert("RGB")
origin_width, origin_height = pil_img.size
image = np.array(pil_img)
start = time.time()
faces = face_detector(image)
done = time.time()
if len(faces) == 0:
print("Warning: There is no face in %s" % (x))
continue
blended = image
for face_id in range(len(faces)):
current_face = faces[face_id]
face_landmarks = landmark_locator(image, current_face)
current_fl = search(face_landmarks)
forward_mask = np.ones_like(image).astype("uint8")
affine = compute_transformation_matrix(image, current_fl, False, target_face_scale=1.3)
aligned_face = warp(image, affine, output_shape=(512, 512, 3), preserve_range=True)
forward_mask = warp(
forward_mask, affine, output_shape=(512, 512, 3), order=0, preserve_range=True
)
affine_inverse = affine.inverse
cur_face = aligned_face
if replace_url != "":
face_name = x[:-4] + "_" + str(face_id + 1) + ".png"
cur_url = os.path.join(replace_url, face_name)
restored_face = Image.open(cur_url).convert("RGB")
restored_face = np.array(restored_face)
cur_face = restored_face
## Histogram Color matching
A = cv2.cvtColor(aligned_face.astype("uint8"), cv2.COLOR_RGB2BGR)
B = cv2.cvtColor(cur_face.astype("uint8"), cv2.COLOR_RGB2BGR)
B = match_histograms(B, A)
cur_face = cv2.cvtColor(B.astype("uint8"), cv2.COLOR_BGR2RGB)
warped_back = warp(
cur_face,
affine_inverse,
output_shape=(origin_height, origin_width, 3),
order=3,
preserve_range=True,
)
backward_mask = warp(
forward_mask,
affine_inverse,
output_shape=(origin_height, origin_width, 3),
order=0,
preserve_range=True,
) ## Nearest neighbour
blended = blur_blending_cv2(warped_back, blended, backward_mask)
blended *= 255.0
io.imsave(os.path.join(save_url, x), img_as_ubyte(blended / 255.0))
count += 1
if count % 1000 == 0:
print("%d have finished ..." % (count))
|
Bringing-Old-Photos-Back-to-Life/Face_Detection/align_warp_back_multiple_dlib_HR.py/0
|
{
"file_path": "Bringing-Old-Photos-Back-to-Life/Face_Detection/align_warp_back_multiple_dlib_HR.py",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 6102
}
| 174 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
import models.networks as networks
import util.util as util
class Pix2PixModel(torch.nn.Module):
@staticmethod
def modify_commandline_options(parser, is_train):
networks.modify_commandline_options(parser, is_train)
return parser
def __init__(self, opt):
super().__init__()
self.opt = opt
self.FloatTensor = torch.cuda.FloatTensor if self.use_gpu() else torch.FloatTensor
self.ByteTensor = torch.cuda.ByteTensor if self.use_gpu() else torch.ByteTensor
self.netG, self.netD, self.netE = self.initialize_networks(opt)
# set loss functions
if opt.isTrain:
self.criterionGAN = networks.GANLoss(opt.gan_mode, tensor=self.FloatTensor, opt=self.opt)
self.criterionFeat = torch.nn.L1Loss()
if not opt.no_vgg_loss:
self.criterionVGG = networks.VGGLoss(self.opt.gpu_ids)
if opt.use_vae:
self.KLDLoss = networks.KLDLoss()
# Entry point for all calls involving forward pass
# of deep networks. We used this approach since DataParallel module
# can't parallelize custom functions, we branch to different
# routines based on |mode|.
def forward(self, data, mode):
input_semantics, real_image, degraded_image = self.preprocess_input(data)
if mode == "generator":
g_loss, generated = self.compute_generator_loss(input_semantics, degraded_image, real_image)
return g_loss, generated
elif mode == "discriminator":
d_loss = self.compute_discriminator_loss(input_semantics, degraded_image, real_image)
return d_loss
elif mode == "encode_only":
z, mu, logvar = self.encode_z(real_image)
return mu, logvar
elif mode == "inference":
with torch.no_grad():
fake_image, _ = self.generate_fake(input_semantics, degraded_image, real_image)
return fake_image
else:
raise ValueError("|mode| is invalid")
def create_optimizers(self, opt):
G_params = list(self.netG.parameters())
if opt.use_vae:
G_params += list(self.netE.parameters())
if opt.isTrain:
D_params = list(self.netD.parameters())
beta1, beta2 = opt.beta1, opt.beta2
if opt.no_TTUR:
G_lr, D_lr = opt.lr, opt.lr
else:
G_lr, D_lr = opt.lr / 2, opt.lr * 2
optimizer_G = torch.optim.Adam(G_params, lr=G_lr, betas=(beta1, beta2))
optimizer_D = torch.optim.Adam(D_params, lr=D_lr, betas=(beta1, beta2))
return optimizer_G, optimizer_D
def save(self, epoch):
util.save_network(self.netG, "G", epoch, self.opt)
util.save_network(self.netD, "D", epoch, self.opt)
if self.opt.use_vae:
util.save_network(self.netE, "E", epoch, self.opt)
############################################################################
# Private helper methods
############################################################################
def initialize_networks(self, opt):
netG = networks.define_G(opt)
netD = networks.define_D(opt) if opt.isTrain else None
netE = networks.define_E(opt) if opt.use_vae else None
if not opt.isTrain or opt.continue_train:
netG = util.load_network(netG, "G", opt.which_epoch, opt)
if opt.isTrain:
netD = util.load_network(netD, "D", opt.which_epoch, opt)
if opt.use_vae:
netE = util.load_network(netE, "E", opt.which_epoch, opt)
return netG, netD, netE
# preprocess the input, such as moving the tensors to GPUs and
# transforming the label map to one-hot encoding
# |data|: dictionary of the input data
def preprocess_input(self, data):
# move to GPU and change data types
# data['label'] = data['label'].long()
if not self.opt.isTrain:
if self.use_gpu():
data["label"] = data["label"].cuda()
data["image"] = data["image"].cuda()
return data["label"], data["image"], data["image"]
## While testing, the input image is the degraded face
if self.use_gpu():
data["label"] = data["label"].cuda()
data["degraded_image"] = data["degraded_image"].cuda()
data["image"] = data["image"].cuda()
# # create one-hot label map
# label_map = data['label']
# bs, _, h, w = label_map.size()
# nc = self.opt.label_nc + 1 if self.opt.contain_dontcare_label \
# else self.opt.label_nc
# input_label = self.FloatTensor(bs, nc, h, w).zero_()
# input_semantics = input_label.scatter_(1, label_map, 1.0)
return data["label"], data["image"], data["degraded_image"]
def compute_generator_loss(self, input_semantics, degraded_image, real_image):
G_losses = {}
fake_image, KLD_loss = self.generate_fake(
input_semantics, degraded_image, real_image, compute_kld_loss=self.opt.use_vae
)
if self.opt.use_vae:
G_losses["KLD"] = KLD_loss
pred_fake, pred_real = self.discriminate(input_semantics, fake_image, real_image)
G_losses["GAN"] = self.criterionGAN(pred_fake, True, for_discriminator=False)
if not self.opt.no_ganFeat_loss:
num_D = len(pred_fake)
GAN_Feat_loss = self.FloatTensor(1).fill_(0)
for i in range(num_D): # for each discriminator
# last output is the final prediction, so we exclude it
num_intermediate_outputs = len(pred_fake[i]) - 1
for j in range(num_intermediate_outputs): # for each layer output
unweighted_loss = self.criterionFeat(pred_fake[i][j], pred_real[i][j].detach())
GAN_Feat_loss += unweighted_loss * self.opt.lambda_feat / num_D
G_losses["GAN_Feat"] = GAN_Feat_loss
if not self.opt.no_vgg_loss:
G_losses["VGG"] = self.criterionVGG(fake_image, real_image) * self.opt.lambda_vgg
return G_losses, fake_image
def compute_discriminator_loss(self, input_semantics, degraded_image, real_image):
D_losses = {}
with torch.no_grad():
fake_image, _ = self.generate_fake(input_semantics, degraded_image, real_image)
fake_image = fake_image.detach()
fake_image.requires_grad_()
pred_fake, pred_real = self.discriminate(input_semantics, fake_image, real_image)
D_losses["D_Fake"] = self.criterionGAN(pred_fake, False, for_discriminator=True)
D_losses["D_real"] = self.criterionGAN(pred_real, True, for_discriminator=True)
return D_losses
def encode_z(self, real_image):
mu, logvar = self.netE(real_image)
z = self.reparameterize(mu, logvar)
return z, mu, logvar
def generate_fake(self, input_semantics, degraded_image, real_image, compute_kld_loss=False):
z = None
KLD_loss = None
if self.opt.use_vae:
z, mu, logvar = self.encode_z(real_image)
if compute_kld_loss:
KLD_loss = self.KLDLoss(mu, logvar) * self.opt.lambda_kld
fake_image = self.netG(input_semantics, degraded_image, z=z)
assert (
not compute_kld_loss
) or self.opt.use_vae, "You cannot compute KLD loss if opt.use_vae == False"
return fake_image, KLD_loss
# Given fake and real image, return the prediction of discriminator
# for each fake and real image.
def discriminate(self, input_semantics, fake_image, real_image):
if self.opt.no_parsing_map:
fake_concat = fake_image
real_concat = real_image
else:
fake_concat = torch.cat([input_semantics, fake_image], dim=1)
real_concat = torch.cat([input_semantics, real_image], dim=1)
# In Batch Normalization, the fake and real images are
# recommended to be in the same batch to avoid disparate
# statistics in fake and real images.
# So both fake and real images are fed to D all at once.
fake_and_real = torch.cat([fake_concat, real_concat], dim=0)
discriminator_out = self.netD(fake_and_real)
pred_fake, pred_real = self.divide_pred(discriminator_out)
return pred_fake, pred_real
# Take the prediction of fake and real images from the combined batch
def divide_pred(self, pred):
# the prediction contains the intermediate outputs of multiscale GAN,
# so it's usually a list
if type(pred) == list:
fake = []
real = []
for p in pred:
fake.append([tensor[: tensor.size(0) // 2] for tensor in p])
real.append([tensor[tensor.size(0) // 2 :] for tensor in p])
else:
fake = pred[: pred.size(0) // 2]
real = pred[pred.size(0) // 2 :]
return fake, real
def get_edges(self, t):
edge = self.ByteTensor(t.size()).zero_()
edge[:, :, :, 1:] = edge[:, :, :, 1:] | (t[:, :, :, 1:] != t[:, :, :, :-1])
edge[:, :, :, :-1] = edge[:, :, :, :-1] | (t[:, :, :, 1:] != t[:, :, :, :-1])
edge[:, :, 1:, :] = edge[:, :, 1:, :] | (t[:, :, 1:, :] != t[:, :, :-1, :])
edge[:, :, :-1, :] = edge[:, :, :-1, :] | (t[:, :, 1:, :] != t[:, :, :-1, :])
return edge.float()
def reparameterize(self, mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps.mul(std) + mu
def use_gpu(self):
return len(self.opt.gpu_ids) > 0
|
Bringing-Old-Photos-Back-to-Life/Face_Enhancement/models/pix2pix_model.py/0
|
{
"file_path": "Bringing-Old-Photos-Back-to-Life/Face_Enhancement/models/pix2pix_model.py",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 4446
}
| 175 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch.utils.data
import random
from data.base_data_loader import BaseDataLoader
from data import online_dataset_for_old_photos as dts_ray_bigfile
def CreateDataset(opt):
dataset = None
if opt.training_dataset=='domain_A' or opt.training_dataset=='domain_B':
dataset = dts_ray_bigfile.UnPairOldPhotos_SR()
if opt.training_dataset=='mapping':
if opt.random_hole:
dataset = dts_ray_bigfile.PairOldPhotos_with_hole()
else:
dataset = dts_ray_bigfile.PairOldPhotos()
print("dataset [%s] was created" % (dataset.name()))
dataset.initialize(opt)
return dataset
class CustomDatasetDataLoader(BaseDataLoader):
def name(self):
return 'CustomDatasetDataLoader'
def initialize(self, opt):
BaseDataLoader.initialize(self, opt)
self.dataset = CreateDataset(opt)
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=opt.batchSize,
shuffle=not opt.serial_batches,
num_workers=int(opt.nThreads),
drop_last=True)
def load_data(self):
return self.dataloader
def __len__(self):
return min(len(self.dataset), self.opt.max_dataset_size)
|
Bringing-Old-Photos-Back-to-Life/Global/data/custom_dataset_data_loader.py/0
|
{
"file_path": "Bringing-Old-Photos-Back-to-Life/Global/data/custom_dataset_data_loader.py",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 567
}
| 176 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import numpy as np
import torch
import os
from torch.autograd import Variable
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
class Pix2PixHDModel(BaseModel):
def name(self):
return 'Pix2PixHDModel'
def init_loss_filter(self, use_gan_feat_loss, use_vgg_loss):
flags = (True, use_gan_feat_loss, use_vgg_loss, True, True, True, True, True, True)
def loss_filter(g_gan, g_gan_feat, g_vgg, g_kl, d_real, d_fake, g_featd, featd_real, featd_fake):
return [l for (l, f) in zip((g_gan, g_gan_feat, g_vgg, g_kl, d_real, d_fake, g_featd, featd_real, featd_fake), flags) if f]
return loss_filter
def initialize(self, opt):
BaseModel.initialize(self, opt)
if opt.resize_or_crop != 'none' or not opt.isTrain: # when training at full res this causes OOM
torch.backends.cudnn.benchmark = True
self.isTrain = opt.isTrain
self.use_features = opt.instance_feat or opt.label_feat ## Clearly it is false
self.gen_features = self.use_features and not self.opt.load_features ## it is also false
input_nc = opt.label_nc if opt.label_nc != 0 else opt.input_nc ## Just is the origin input channel #
##### define networks
# Generator network
netG_input_nc = input_nc
if not opt.no_instance:
netG_input_nc += 1
if self.use_features:
netG_input_nc += opt.feat_num
self.netG = networks.define_G(netG_input_nc, opt.output_nc, opt.ngf, opt.netG, opt.k_size,
opt.n_downsample_global, opt.n_blocks_global, opt.n_local_enhancers,
opt.n_blocks_local, opt.norm, gpu_ids=self.gpu_ids, opt=opt)
# Discriminator network
if self.isTrain:
use_sigmoid = opt.no_lsgan
netD_input_nc = opt.output_nc if opt.no_cgan else input_nc + opt.output_nc
if not opt.no_instance:
netD_input_nc += 1
self.netD = networks.define_D(netD_input_nc, opt.ndf, opt.n_layers_D, opt,opt.norm, use_sigmoid,
opt.num_D, not opt.no_ganFeat_loss, gpu_ids=self.gpu_ids)
self.feat_D=networks.define_D(64, opt.ndf, opt.n_layers_D, opt, opt.norm, use_sigmoid,
1, not opt.no_ganFeat_loss, gpu_ids=self.gpu_ids)
if self.opt.verbose:
print('---------- Networks initialized -------------')
# load networks
if not self.isTrain or opt.continue_train or opt.load_pretrain:
pretrained_path = '' if not self.isTrain else opt.load_pretrain
self.load_network(self.netG, 'G', opt.which_epoch, pretrained_path)
print("---------- G Networks reloaded -------------")
if self.isTrain:
self.load_network(self.netD, 'D', opt.which_epoch, pretrained_path)
self.load_network(self.feat_D, 'feat_D', opt.which_epoch, pretrained_path)
print("---------- D Networks reloaded -------------")
# set loss functions and optimizers
if self.isTrain:
if opt.pool_size > 0 and (len(self.gpu_ids)) > 1: ## The pool_size is 0!
raise NotImplementedError("Fake Pool Not Implemented for MultiGPU")
self.fake_pool = ImagePool(opt.pool_size)
self.old_lr = opt.lr
# define loss functions
self.loss_filter = self.init_loss_filter(not opt.no_ganFeat_loss, not opt.no_vgg_loss)
self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan, tensor=self.Tensor)
self.criterionFeat = torch.nn.L1Loss()
if not opt.no_vgg_loss:
self.criterionVGG = networks.VGGLoss_torch(self.gpu_ids)
# Names so we can breakout loss
self.loss_names = self.loss_filter('G_GAN', 'G_GAN_Feat', 'G_VGG', 'G_KL', 'D_real', 'D_fake', 'G_featD', 'featD_real','featD_fake')
# initialize optimizers
# optimizer G
params = list(self.netG.parameters())
if self.gen_features:
params += list(self.netE.parameters())
self.optimizer_G = torch.optim.Adam(params, lr=opt.lr, betas=(opt.beta1, 0.999))
# optimizer D
params = list(self.netD.parameters())
self.optimizer_D = torch.optim.Adam(params, lr=opt.lr, betas=(opt.beta1, 0.999))
params = list(self.feat_D.parameters())
self.optimizer_featD = torch.optim.Adam(params, lr=opt.lr, betas=(opt.beta1, 0.999))
print("---------- Optimizers initialized -------------")
if opt.continue_train:
self.load_optimizer(self.optimizer_D, 'D', opt.which_epoch)
self.load_optimizer(self.optimizer_G, "G", opt.which_epoch)
self.load_optimizer(self.optimizer_featD,'featD',opt.which_epoch)
for param_groups in self.optimizer_D.param_groups:
self.old_lr = param_groups['lr']
print("---------- Optimizers reloaded -------------")
print("---------- Current LR is %.8f -------------" % (self.old_lr))
## We also want to re-load the parameters of optimizer.
def encode_input(self, label_map, inst_map=None, real_image=None, feat_map=None, infer=False):
if self.opt.label_nc == 0:
input_label = label_map.data.cuda()
else:
# create one-hot vector for label map
size = label_map.size()
oneHot_size = (size[0], self.opt.label_nc, size[2], size[3])
input_label = torch.cuda.FloatTensor(torch.Size(oneHot_size)).zero_()
input_label = input_label.scatter_(1, label_map.data.long().cuda(), 1.0)
if self.opt.data_type == 16:
input_label = input_label.half()
# get edges from instance map
if not self.opt.no_instance:
inst_map = inst_map.data.cuda()
edge_map = self.get_edges(inst_map)
input_label = torch.cat((input_label, edge_map), dim=1)
input_label = Variable(input_label, volatile=infer)
# real images for training
if real_image is not None:
real_image = Variable(real_image.data.cuda())
# instance map for feature encoding
if self.use_features:
# get precomputed feature maps
if self.opt.load_features:
feat_map = Variable(feat_map.data.cuda())
if self.opt.label_feat:
inst_map = label_map.cuda()
return input_label, inst_map, real_image, feat_map
def discriminate(self, input_label, test_image, use_pool=False):
if input_label is None:
input_concat = test_image.detach()
else:
input_concat = torch.cat((input_label, test_image.detach()), dim=1)
if use_pool:
fake_query = self.fake_pool.query(input_concat)
return self.netD.forward(fake_query)
else:
return self.netD.forward(input_concat)
def feat_discriminate(self,input):
return self.feat_D.forward(input.detach())
def forward(self, label, inst, image, feat, infer=False):
# Encode Inputs
input_label, inst_map, real_image, feat_map = self.encode_input(label, inst, image, feat)
# Fake Generation
if self.use_features:
if not self.opt.load_features:
feat_map = self.netE.forward(real_image, inst_map)
input_concat = torch.cat((input_label, feat_map), dim=1)
else:
input_concat = input_label
hiddens = self.netG.forward(input_concat, 'enc')
noise = Variable(torch.randn(hiddens.size()).cuda(hiddens.data.get_device()))
# This is a reduced VAE implementation where we assume the outputs are multivariate Gaussian distribution with mean = hiddens and std_dev = all ones.
# We follow the the VAE of MUNIT (https://github.com/NVlabs/MUNIT/blob/master/networks.py)
fake_image = self.netG.forward(hiddens + noise, 'dec')
####################
##### GAN for the intermediate feature
real_old_feat =[]
syn_feat = []
for index,x in enumerate(inst):
if x==1:
real_old_feat.append(hiddens[index].unsqueeze(0))
else:
syn_feat.append(hiddens[index].unsqueeze(0))
L=min(len(real_old_feat),len(syn_feat))
real_old_feat=real_old_feat[:L]
syn_feat=syn_feat[:L]
real_old_feat=torch.cat(real_old_feat,0)
syn_feat=torch.cat(syn_feat,0)
pred_fake_feat=self.feat_discriminate(real_old_feat)
loss_featD_fake = self.criterionGAN(pred_fake_feat, False)
pred_real_feat=self.feat_discriminate(syn_feat)
loss_featD_real = self.criterionGAN(pred_real_feat, True)
pred_fake_feat_G=self.feat_D.forward(real_old_feat)
loss_G_featD=self.criterionGAN(pred_fake_feat_G,True)
#####################################
if self.opt.no_cgan:
# Fake Detection and Loss
pred_fake_pool = self.discriminate(None, fake_image, use_pool=True)
loss_D_fake = self.criterionGAN(pred_fake_pool, False)
# Real Detection and Loss
pred_real = self.discriminate(None, real_image)
loss_D_real = self.criterionGAN(pred_real, True)
# GAN loss (Fake Passability Loss)
pred_fake = self.netD.forward(fake_image)
loss_G_GAN = self.criterionGAN(pred_fake, True)
else:
# Fake Detection and Loss
pred_fake_pool = self.discriminate(input_label, fake_image, use_pool=True)
loss_D_fake = self.criterionGAN(pred_fake_pool, False)
# Real Detection and Loss
pred_real = self.discriminate(input_label, real_image)
loss_D_real = self.criterionGAN(pred_real, True)
# GAN loss (Fake Passability Loss)
pred_fake = self.netD.forward(torch.cat((input_label, fake_image), dim=1))
loss_G_GAN = self.criterionGAN(pred_fake, True)
loss_G_kl = torch.mean(torch.pow(hiddens, 2)) * self.opt.kl
# GAN feature matching loss
loss_G_GAN_Feat = 0
if not self.opt.no_ganFeat_loss:
feat_weights = 4.0 / (self.opt.n_layers_D + 1)
D_weights = 1.0 / self.opt.num_D
for i in range(self.opt.num_D):
for j in range(len(pred_fake[i]) - 1):
loss_G_GAN_Feat += D_weights * feat_weights * \
self.criterionFeat(pred_fake[i][j],
pred_real[i][j].detach()) * self.opt.lambda_feat
# VGG feature matching loss
loss_G_VGG = 0
if not self.opt.no_vgg_loss:
loss_G_VGG = self.criterionVGG(fake_image, real_image) * self.opt.lambda_feat
# Only return the fake_B image if necessary to save BW
return [self.loss_filter(loss_G_GAN, loss_G_GAN_Feat, loss_G_VGG, loss_G_kl, loss_D_real, loss_D_fake,loss_G_featD, loss_featD_real, loss_featD_fake),
None if not infer else fake_image]
def inference(self, label, inst, image=None, feat=None):
# Encode Inputs
image = Variable(image) if image is not None else None
input_label, inst_map, real_image, _ = self.encode_input(Variable(label), Variable(inst), image, infer=True)
# Fake Generation
if self.use_features:
if self.opt.use_encoded_image:
# encode the real image to get feature map
feat_map = self.netE.forward(real_image, inst_map)
else:
# sample clusters from precomputed features
feat_map = self.sample_features(inst_map)
input_concat = torch.cat((input_label, feat_map), dim=1)
else:
input_concat = input_label
if torch.__version__.startswith('0.4'):
with torch.no_grad():
fake_image = self.netG.forward(input_concat)
else:
fake_image = self.netG.forward(input_concat)
return fake_image
def sample_features(self, inst):
# read precomputed feature clusters
cluster_path = os.path.join(self.opt.checkpoints_dir, self.opt.name, self.opt.cluster_path)
features_clustered = np.load(cluster_path, encoding='latin1').item()
# randomly sample from the feature clusters
inst_np = inst.cpu().numpy().astype(int)
feat_map = self.Tensor(inst.size()[0], self.opt.feat_num, inst.size()[2], inst.size()[3])
for i in np.unique(inst_np):
label = i if i < 1000 else i // 1000
if label in features_clustered:
feat = features_clustered[label]
cluster_idx = np.random.randint(0, feat.shape[0])
idx = (inst == int(i)).nonzero()
for k in range(self.opt.feat_num):
feat_map[idx[:, 0], idx[:, 1] + k, idx[:, 2], idx[:, 3]] = feat[cluster_idx, k]
if self.opt.data_type == 16:
feat_map = feat_map.half()
return feat_map
def encode_features(self, image, inst):
image = Variable(image.cuda(), volatile=True)
feat_num = self.opt.feat_num
h, w = inst.size()[2], inst.size()[3]
block_num = 32
feat_map = self.netE.forward(image, inst.cuda())
inst_np = inst.cpu().numpy().astype(int)
feature = {}
for i in range(self.opt.label_nc):
feature[i] = np.zeros((0, feat_num + 1))
for i in np.unique(inst_np):
label = i if i < 1000 else i // 1000
idx = (inst == int(i)).nonzero()
num = idx.size()[0]
idx = idx[num // 2, :]
val = np.zeros((1, feat_num + 1))
for k in range(feat_num):
val[0, k] = feat_map[idx[0], idx[1] + k, idx[2], idx[3]].data[0]
val[0, feat_num] = float(num) / (h * w // block_num)
feature[label] = np.append(feature[label], val, axis=0)
return feature
def get_edges(self, t):
edge = torch.cuda.ByteTensor(t.size()).zero_()
edge[:, :, :, 1:] = edge[:, :, :, 1:] | (t[:, :, :, 1:] != t[:, :, :, :-1])
edge[:, :, :, :-1] = edge[:, :, :, :-1] | (t[:, :, :, 1:] != t[:, :, :, :-1])
edge[:, :, 1:, :] = edge[:, :, 1:, :] | (t[:, :, 1:, :] != t[:, :, :-1, :])
edge[:, :, :-1, :] = edge[:, :, :-1, :] | (t[:, :, 1:, :] != t[:, :, :-1, :])
if self.opt.data_type == 16:
return edge.half()
else:
return edge.float()
def save(self, which_epoch):
self.save_network(self.netG, 'G', which_epoch, self.gpu_ids)
self.save_network(self.netD, 'D', which_epoch, self.gpu_ids)
self.save_network(self.feat_D,'featD',which_epoch,self.gpu_ids)
self.save_optimizer(self.optimizer_G, "G", which_epoch)
self.save_optimizer(self.optimizer_D, "D", which_epoch)
self.save_optimizer(self.optimizer_featD,'featD',which_epoch)
if self.gen_features:
self.save_network(self.netE, 'E', which_epoch, self.gpu_ids)
def update_fixed_params(self):
params = list(self.netG.parameters())
if self.gen_features:
params += list(self.netE.parameters())
self.optimizer_G = torch.optim.Adam(params, lr=self.opt.lr, betas=(self.opt.beta1, 0.999))
if self.opt.verbose:
print('------------ Now also finetuning global generator -----------')
def update_learning_rate(self):
lrd = self.opt.lr / self.opt.niter_decay
lr = self.old_lr - lrd
for param_group in self.optimizer_D.param_groups:
param_group['lr'] = lr
for param_group in self.optimizer_G.param_groups:
param_group['lr'] = lr
for param_group in self.optimizer_featD.param_groups:
param_group['lr'] = lr
if self.opt.verbose:
print('update learning rate: %f -> %f' % (self.old_lr, lr))
self.old_lr = lr
class InferenceModel(Pix2PixHDModel):
def forward(self, inp):
label, inst = inp
return self.inference(label, inst)
|
Bringing-Old-Photos-Back-to-Life/Global/models/pix2pixHD_model_DA.py/0
|
{
"file_path": "Bringing-Old-Photos-Back-to-Life/Global/models/pix2pixHD_model_DA.py",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 7965
}
| 177 |
---
- name: Bringing-Old-Photos-Back-to-Life
hosts: all
gather_facts: no
# Succesfully tested on Ubuntu 18.04\20.04 and Debian 10
pre_tasks:
- name: install packages
package:
name:
- python3
- python3-pip
- python3-venv
- git
- unzip
- tar
- lbzip2
- build-essential
- cmake
- ffmpeg
- libsm6
- libxext6
- libgl1-mesa-glx
state: latest
become: yes
tasks:
- name: git clone repo
git:
repo: 'https://github.com/microsoft/Bringing-Old-Photos-Back-to-Life.git'
dest: Bringing-Old-Photos-Back-to-Life
clone: yes
- name: requirements setup
pip:
requirements: "~/Bringing-Old-Photos-Back-to-Life/requirements.txt"
virtualenv: "~/Bringing-Old-Photos-Back-to-Life/.venv"
virtualenv_command: /usr/bin/python3 -m venv .venv
- name: additional pip packages #requirements lack some packs
pip:
name:
- setuptools
- wheel
- scikit-build
virtualenv: "~/Bringing-Old-Photos-Back-to-Life/.venv"
virtualenv_command: /usr/bin/python3 -m venv .venv
- name: git clone batchnorm-pytorch
git:
repo: 'https://github.com/vacancy/Synchronized-BatchNorm-PyTorch'
dest: Synchronized-BatchNorm-PyTorch
clone: yes
- name: copy sync_batchnorm to face_enhancement
copy:
src: Synchronized-BatchNorm-PyTorch/sync_batchnorm
dest: Bringing-Old-Photos-Back-to-Life/Face_Enhancement/models/networks/
remote_src: yes
- name: copy sync_batchnorm to global
copy:
src: Synchronized-BatchNorm-PyTorch/sync_batchnorm
dest: Bringing-Old-Photos-Back-to-Life/Global/detection_models
remote_src: yes
- name: check if shape_predictor_68_face_landmarks.dat
stat:
path: Bringing-Old-Photos-Back-to-Life/Face_Detection/shape_predictor_68_face_landmarks.dat
register: p
- name: get shape_predictor_68_face_landmarks.dat.bz2
get_url:
url: http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
dest: Bringing-Old-Photos-Back-to-Life/Face_Detection/
when: p.stat.exists == False
- name: unarchive shape_predictor_68_face_landmarks.dat.bz2
shell: 'bzip2 -d Bringing-Old-Photos-Back-to-Life/Face_Detection/shape_predictor_68_face_landmarks.dat.bz2'
when: p.stat.exists == False
- name: check if face_enhancement
stat:
path: Bringing-Old-Photos-Back-to-Life/Face_Enhancement/checkpoints/Setting_9_epoch_100/latest_net_G.pth
register: fc
- name: unarchive Face_Enhancement/checkpoints.zip
unarchive:
src: https://facevc.blob.core.windows.net/zhanbo/old_photo/pretrain/Face_Enhancement/checkpoints.zip
dest: Bringing-Old-Photos-Back-to-Life/Face_Enhancement/
remote_src: yes
when: fc.stat.exists == False
- name: check if global
stat:
path: Bringing-Old-Photos-Back-to-Life/Global/checkpoints/detection/FT_Epoch_latest.pt
register: gc
- name: unarchive Global/checkpoints.zip
unarchive:
src: https://facevc.blob.core.windows.net/zhanbo/old_photo/pretrain/Global/checkpoints.zip
dest: Bringing-Old-Photos-Back-to-Life/Global/
remote_src: yes
when: gc.stat.exists == False
# Do not forget to execute 'source .venv/bin/activate' inside Bringing-Old-Photos-Back-to-Life before starting run.py
|
Bringing-Old-Photos-Back-to-Life/ansible.yaml/0
|
{
"file_path": "Bringing-Old-Photos-Back-to-Life/ansible.yaml",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 1453
}
| 178 |
"""
This is an example using CLAPCAP for audio captioning.
"""
from msclap import CLAP
# Load and initialize CLAP
clap_model = CLAP(version = 'clapcap', use_cuda=False)
#Load audio files
audio_files = ['audio_file']
# Generate captions for the recording
captions = clap_model.generate_caption(audio_files, resample=True, beam_size=5, entry_length=67, temperature=0.01)
# Print the result
for i in range(len(audio_files)):
print(f"Audio file: {audio_files[i]} \n")
print(f"Generated caption: {captions[i]} \n")
"""
The output (the exact caption may vary):
The birds are singing in the trees.
"""
|
CLAP/examples/audio_captioning.py/0
|
{
"file_path": "CLAP/examples/audio_captioning.py",
"repo_id": "CLAP",
"token_count": 211
}
| 179 |
import argparse
import yaml
import sys
def read_config_as_args(config_path,args=None,is_config_str=False):
return_dict = {}
if config_path is not None:
if is_config_str:
yml_config = yaml.load(config_path, Loader=yaml.FullLoader)
else:
with open(config_path, "r") as f:
yml_config = yaml.load(f, Loader=yaml.FullLoader)
if args != None:
for k, v in yml_config.items():
if k in args.__dict__:
args.__dict__[k] = v
else:
sys.stderr.write("Ignored unknown parameter {} in yaml.\n".format(k))
else:
for k, v in yml_config.items():
return_dict[k] = v
args = args if args != None else return_dict
return argparse.Namespace(**args)
|
CLAP/msclap/models/utils.py/0
|
{
"file_path": "CLAP/msclap/models/utils.py",
"repo_id": "CLAP",
"token_count": 422
}
| 180 |
# COCO-LM
This repository contains the scripts for fine-tuning COCO-LM pretrained models on GLUE and SQuAD 2.0 benchmarks.
Paper: [COCO-LM: Correcting and Contrasting Text Sequences for Language Model Pretraining](https://arxiv.org/abs/2102.08473)
<img src="./coco-lm.png" width="1000px"></img>
## Overview
We provide the scripts in two versions, based on two widely-used open-source codebases, the [Fairseq Library](https://github.com/pytorch/fairseq) and the [Huggingface Transformers Library](https://github.com/huggingface/transformers). The two code versions are mostly equivalent in functionality, and you are free to use either of them. However, we note that the [fairseq](fairseq) version is what we used in our experiments, and it will best reproduce the results in the paper; the [huggingface](huggingface) version is implemented later to provide compatibility with the Huggingface Transformers Library, and may yield slightly different results.
Please follow the README files under the two directories for running the code.
## GLUE Fine-Tuning Results
The [General Language Understanding Evaluation (GLUE)](https://gluebenchmark.com/) benchmark is a collection of sentence- or sentence-pair language understanding tasks for evaluating and analyzing natural language understanding systems.
GLUE dev set results of COCO-LM base++ and large++ models are as follows (median of 5 different random seeds):
| Model | MNLI-m/mm | QQP | QNLI | SST-2 | CoLA | RTE | MRPC | STS-B | AVG |
| ------ | ------ | ------ | ------ | ------ | ------ | ------ | ------ | ------ | ------ |
| COCO-LM base++ | 90.2/90.0 | 92.2 | 94.2 | 94.6 | 67.3 | 87.4 | 91.2 | 91.8 | 88.6 |
| COCO-LM large++ | 91.4/91.6 | 92.8 | 95.7 | 96.9 | 73.9 | 91.0 | 92.2 | 92.7 | 90.8 |
GLUE test set results of COCO-LM base++ and large++ models are as follows (no ensemble, task-specific tricks, etc.):
| Model | MNLI-m/mm | QQP | QNLI | SST-2 | CoLA | RTE | MRPC | STS-B | AVG |
| ------ | ------ | ------ | ------ | ------ | ------ | ------ | ------ | ------ | ------ |
| COCO-LM base++ | 89.8/89.3 | 89.8 | 94.2 | 95.6 | 68.6 | 82.3 | 88.5 | 90.3 | 87.4 |
| COCO-LM large++ | 91.6/91.1 | 90.5 | 95.8 | 96.7 | 70.5 | 89.2 | 88.4 | 91.8 | 89.3 |
## SQuAD 2.0 Fine-Tuning Results
[Stanford Question Answering Dataset (SQuAD)](https://rajpurkar.github.io/SQuAD-explorer/) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable.
SQuAD 2.0 dev set results of COCO-LM base++ and large++ models are as follows (median of 5 different random seeds):
| Model | EM | F1 |
| ------ | ------ | ------ |
| COCO-LM base++ | 85.4 | 88.1 |
| COCO-LM large++ | 88.2 | 91.0 |
## Citation
If you find the code and models useful for your research, please cite the following paper:
```
@inproceedings{meng2021cocolm,
title={{COCO-LM}: Correcting and contrasting text sequences for language model pretraining},
author={Meng, Yu and Xiong, Chenyan and Bajaj, Payal and Tiwary, Saurabh and Bennett, Paul and Han, Jiawei and Song, Xia},
booktitle={Conference on Neural Information Processing Systems},
year={2021}
}
```
## Contributing
This project welcomes contributions and suggestions. Most contributions require you to agree to a
Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com.
When you submit a pull request, a CLA bot will automatically determine whether you need to provide
a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions
provided by the bot. You will only need to do this once across all repos using our CLA.
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
contact [[email protected]](mailto:[email protected]) with any additional questions or comments.
|
COCO-LM/README.md/0
|
{
"file_path": "COCO-LM/README.md",
"repo_id": "COCO-LM",
"token_count": 1265
}
| 181 |
.. role:: hidden
:class: hidden-section
.. _Learning Rate Schedulers:
Learning Rate Schedulers
========================
Learning Rate Schedulers update the learning rate over the course of training.
Learning rates can be updated after each update via :func:`step_update` or at
epoch boundaries via :func:`step`.
.. automodule:: fairseq.optim.lr_scheduler
:members:
.. autoclass:: fairseq.optim.lr_scheduler.FairseqLRScheduler
:members:
:undoc-members:
.. autoclass:: fairseq.optim.lr_scheduler.cosine_lr_scheduler.CosineSchedule
:members:
:undoc-members:
.. autoclass:: fairseq.optim.lr_scheduler.fixed_schedule.FixedSchedule
:members:
:undoc-members:
.. autoclass:: fairseq.optim.lr_scheduler.inverse_square_root_schedule.InverseSquareRootSchedule
:members:
:undoc-members:
.. autoclass:: fairseq.optim.lr_scheduler.reduce_lr_on_plateau.ReduceLROnPlateau
:members:
:undoc-members:
.. autoclass:: fairseq.optim.lr_scheduler.triangular_lr_scheduler.TriangularSchedule
:members:
:undoc-members:
|
COCO-LM/fairseq/docs/lr_scheduler.rst/0
|
{
"file_path": "COCO-LM/fairseq/docs/lr_scheduler.rst",
"repo_id": "COCO-LM",
"token_count": 386
}
| 182 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import register_criterion
from fairseq.criterions.cross_entropy import CrossEntropyCriterion
from fairseq.dataclass import FairseqDataclass
from omegaconf import II
@dataclass
class AdaptiveSpanCriterionConfig(FairseqDataclass):
sentence_avg: bool = II("optimization.sentence_avg")
@register_criterion("adaptive_span_loss", dataclass=AdaptiveSpanCriterionConfig)
class AdaptiveSpanCriterion(CrossEntropyCriterion):
def __init__(self, task, sentence_avg):
super().__init__(task, sentence_avg)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss here is summed, different from the adaptive span code
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
loss, aux_loss, avg_span, max_span = self.compute_loss(
model, net_output, sample, reduce=reduce
)
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
loss /= sample_size
total_loss = loss + aux_loss
sample_size = 1
logging_output = {
"loss": loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
"total_loss": total_loss.data,
"avg_span": avg_span * sample_size,
"max_span": max_span * sample_size,
}
return total_loss, sample_size, logging_output
def compute_loss(self, model, net_output, sample, reduce=True):
loss, _ = super().compute_loss(model, net_output, sample, reduce)
aux_loss = model.get_aux_loss()
avg_span = model.get_current_avg_span()
max_span = model.get_current_max_span()
return loss, aux_loss, avg_span, max_span
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
total_loss_sum = sum(log.get("total_loss", 0) for log in logging_outputs)
avg_span_sum = sum(log.get("avg_span", 0) for log in logging_outputs)
max_span_sum = sum(log.get("max_span", 0) for log in logging_outputs)
# we divide by log(2) to convert the loss from base e to base 2
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
metrics.log_scalar("avg_span", avg_span_sum / sample_size, sample_size, round=3)
metrics.log_scalar("max_span", max_span_sum / sample_size, sample_size, round=3)
# total loss contains the L1 norm on adaptive-span
metrics.log_scalar(
"total_loss",
total_loss_sum / sample_size / math.log(2),
sample_size,
round=3,
)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
)
else:
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["loss"].avg)
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
|
COCO-LM/fairseq/examples/adaptive_span/adaptive_span_loss.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/adaptive_span/adaptive_span_loss.py",
"repo_id": "COCO-LM",
"token_count": 1801
}
| 183 |
#!/bin/bash
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
PY_BIN_ROOT=
# PyPI dependency
${PY_BIN_ROOT}pip install sentencepiece sacremoses
# Get data
if [ ! -d "data" ]; then
mkdir data
fi
if [ ! -f "data/fr-en.tgz" ]; then
wget https://wit3.fbk.eu/archive/2017-01-trnted/texts/fr/en/fr-en.tgz -P data
tar xvf data/fr-en.tgz -C data
fi
${PY_BIN_ROOT}python get_bitext.py --bpe-vocab 16384 --byte-vocab --char-vocab
for VOCAB_SIZE in 2048 4096; do
${PY_BIN_ROOT}python get_bitext.py --bpe-vocab ${VOCAB_SIZE} --bbpe-vocab ${VOCAB_SIZE}
done
rm -r data/fr-en data/fr-en.tgz
# Generate binary dataset
${PY_BIN_ROOT}/fairseq-preprocess --source-lang fr --target-lang en --destdir data/bin_bpe16384 --joined-dictionary \
--workers "$(nproc)" --trainpref data/train.moses.bpe16384 --validpref data/valid.moses.bpe16384 \
--testpref data/test.moses.bpe16384
${PY_BIN_ROOT}/fairseq-preprocess --source-lang fr --target-lang en --destdir data/bin_bytes --joined-dictionary \
--workers "$(nproc)" --trainpref data/train.moses.bytes --validpref data/valid.moses.bytes \
--testpref data/test.moses.bytes
${PY_BIN_ROOT}/fairseq-preprocess --source-lang fr --target-lang en --destdir data/bin_chars --joined-dictionary \
--workers "$(nproc)" --trainpref data/train.moses.chars --validpref data/valid.moses.chars \
--testpref data/test.moses.chars
for VOCAB_SIZE in 2048 4096; do
for TYPE in bbpe bpe; do
${PY_BIN_ROOT}/fairseq-preprocess --source-lang fr --target-lang en --destdir "data/bin_${TYPE}${VOCAB_SIZE}" \
--joined-dictionary --workers "$(nproc)" --trainpref "data/train.moses.${TYPE}${VOCAB_SIZE}" \
--validpref "data/valid.moses.${TYPE}${VOCAB_SIZE}" --testpref "data/test.moses.${TYPE}${VOCAB_SIZE}"
done
done
|
COCO-LM/fairseq/examples/byte_level_bpe/get_data.sh/0
|
{
"file_path": "COCO-LM/fairseq/examples/byte_level_bpe/get_data.sh",
"repo_id": "COCO-LM",
"token_count": 762
}
| 184 |
# Cross-Lingual Language Model Pre-training
Below are some details for training Cross-Lingual Language Models (XLM) - similar to the ones presented in [Lample & Conneau, 2019](https://arxiv.org/pdf/1901.07291.pdf) - in Fairseq. The current implementation only supports the Masked Language Model (MLM) from the paper above.
## Downloading and Tokenizing Monolingual Data
Pointers to the monolingual data from wikipedia, used for training the XLM-style MLM model as well as details on processing (tokenization and BPE) it can be found in the [XLM Github Repository](https://github.com/facebookresearch/XLM#download--preprocess-monolingual-data).
Let's assume the following for the code snippets in later sections to work
- Processed data is in the folder: monolingual_data/processed
- Each language has 3 files for train, test and validation. For example we have the following files for English:
train.en, valid.en
- We are training a model for 5 languages: Arabic (ar), German (de), English (en), Hindi (hi) and French (fr)
- The vocabulary file is monolingual_data/processed/vocab_mlm
## Fairseq Pre-processing and Binarization
Pre-process and binarize the data with the MaskedLMDictionary and cross_lingual_lm task
```bash
# Ensure the output directory exists
DATA_DIR=monolingual_data/fairseq_processed
mkdir -p "$DATA_DIR"
for lg in ar de en hi fr
do
fairseq-preprocess \
--task cross_lingual_lm \
--srcdict monolingual_data/processed/vocab_mlm \
--only-source \
--trainpref monolingual_data/processed/train \
--validpref monolingual_data/processed/valid \
--testpref monolingual_data/processed/test \
--destdir monolingual_data/fairseq_processed \
--workers 20 \
--source-lang $lg
# Since we only have a source language, the output file has a None for the
# target language. Remove this
for stage in train test valid
sudo mv "$DATA_DIR/$stage.$lg-None.$lg.bin" "$stage.$lg.bin"
sudo mv "$DATA_DIR/$stage.$lg-None.$lg.idx" "$stage.$lg.idx"
done
done
```
## Train a Cross-lingual Language Model similar to the XLM MLM model
Use the following command to train the model on 5 languages.
```
fairseq-train \
--task cross_lingual_lm monolingual_data/fairseq_processed \
--save-dir checkpoints/mlm \
--max-update 2400000 --save-interval 1 --no-epoch-checkpoints \
--arch xlm_base \
--optimizer adam --lr-scheduler reduce_lr_on_plateau \
--lr-shrink 0.5 --lr 0.0001 --stop-min-lr 1e-09 \
--dropout 0.1 \
--criterion legacy_masked_lm_loss \
--max-tokens 2048 --tokens-per-sample 256 --attention-dropout 0.1 \
--dataset-impl lazy --seed 0 \
--masked-lm-only \
--monolingual-langs 'ar,de,en,hi,fr' --num-segment 5 \
--ddp-backend=legacy_ddp
```
Some Notes:
- Using tokens_per_sample greater than 256 can cause OOM (out-of-memory) issues. Usually since MLM packs in streams of text, this parameter doesn't need much tuning.
- The Evaluation workflow for computing MLM Perplexity on test data is in progress.
- Finetuning this model on a downstream task is something which is not currently available.
|
COCO-LM/fairseq/examples/cross_lingual_language_model/README.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/cross_lingual_language_model/README.md",
"repo_id": "COCO-LM",
"token_count": 975
}
| 185 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import options, utils
from fairseq.models import (
FairseqEncoder,
FairseqIncrementalDecoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
@register_model("laser_lstm")
class LSTMModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens=None,
tgt_tokens=None,
tgt_lengths=None,
target_language_id=None,
dataset_name="",
):
assert target_language_id is not None
src_encoder_out = self.encoder(src_tokens, src_lengths, dataset_name)
return self.decoder(
prev_output_tokens, src_encoder_out, lang_id=target_language_id
)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--dropout",
default=0.1,
type=float,
metavar="D",
help="dropout probability",
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-embed-path",
default=None,
type=str,
metavar="STR",
help="path to pre-trained encoder embedding",
)
parser.add_argument(
"--encoder-hidden-size", type=int, metavar="N", help="encoder hidden size"
)
parser.add_argument(
"--encoder-layers", type=int, metavar="N", help="number of encoder layers"
)
parser.add_argument(
"--encoder-bidirectional",
action="store_true",
help="make all layers of encoder bidirectional",
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension",
)
parser.add_argument(
"--decoder-embed-path",
default=None,
type=str,
metavar="STR",
help="path to pre-trained decoder embedding",
)
parser.add_argument(
"--decoder-hidden-size", type=int, metavar="N", help="decoder hidden size"
)
parser.add_argument(
"--decoder-layers", type=int, metavar="N", help="number of decoder layers"
)
parser.add_argument(
"--decoder-out-embed-dim",
type=int,
metavar="N",
help="decoder output embedding dimension",
)
parser.add_argument(
"--decoder-zero-init",
type=str,
metavar="BOOL",
help="initialize the decoder hidden/cell state to zero",
)
parser.add_argument(
"--decoder-lang-embed-dim",
type=int,
metavar="N",
help="decoder language embedding dimension",
)
parser.add_argument(
"--fixed-embeddings",
action="store_true",
help="keep embeddings fixed (ENCODER ONLY)",
) # TODO Also apply to decoder embeddings?
# Granular dropout settings (if not specified these default to --dropout)
parser.add_argument(
"--encoder-dropout-in",
type=float,
metavar="D",
help="dropout probability for encoder input embedding",
)
parser.add_argument(
"--encoder-dropout-out",
type=float,
metavar="D",
help="dropout probability for encoder output",
)
parser.add_argument(
"--decoder-dropout-in",
type=float,
metavar="D",
help="dropout probability for decoder input embedding",
)
parser.add_argument(
"--decoder-dropout-out",
type=float,
metavar="D",
help="dropout probability for decoder output",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure that all args are properly defaulted (in case there are any new ones)
base_architecture(args)
def load_pretrained_embedding_from_file(embed_path, dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
embed_dict = utils.parse_embedding(embed_path)
utils.print_embed_overlap(embed_dict, dictionary)
return utils.load_embedding(embed_dict, dictionary, embed_tokens)
pretrained_encoder_embed = None
if args.encoder_embed_path:
pretrained_encoder_embed = load_pretrained_embedding_from_file(
args.encoder_embed_path, task.source_dictionary, args.encoder_embed_dim
)
pretrained_decoder_embed = None
if args.decoder_embed_path:
pretrained_decoder_embed = load_pretrained_embedding_from_file(
args.decoder_embed_path, task.target_dictionary, args.decoder_embed_dim
)
num_langs = task.num_tasks if hasattr(task, "num_tasks") else 0
encoder = LSTMEncoder(
dictionary=task.source_dictionary,
embed_dim=args.encoder_embed_dim,
hidden_size=args.encoder_hidden_size,
num_layers=args.encoder_layers,
dropout_in=args.encoder_dropout_in,
dropout_out=args.encoder_dropout_out,
bidirectional=args.encoder_bidirectional,
pretrained_embed=pretrained_encoder_embed,
fixed_embeddings=args.fixed_embeddings,
)
decoder = LSTMDecoder(
dictionary=task.target_dictionary,
embed_dim=args.decoder_embed_dim,
hidden_size=args.decoder_hidden_size,
out_embed_dim=args.decoder_out_embed_dim,
num_layers=args.decoder_layers,
dropout_in=args.decoder_dropout_in,
dropout_out=args.decoder_dropout_out,
zero_init=options.eval_bool(args.decoder_zero_init),
encoder_embed_dim=args.encoder_embed_dim,
encoder_output_units=encoder.output_units,
pretrained_embed=pretrained_decoder_embed,
num_langs=num_langs,
lang_embed_dim=args.decoder_lang_embed_dim,
)
return cls(encoder, decoder)
class LSTMEncoder(FairseqEncoder):
"""LSTM encoder."""
def __init__(
self,
dictionary,
embed_dim=512,
hidden_size=512,
num_layers=1,
dropout_in=0.1,
dropout_out=0.1,
bidirectional=False,
left_pad=True,
pretrained_embed=None,
padding_value=0.0,
fixed_embeddings=False,
):
super().__init__(dictionary)
self.num_layers = num_layers
self.dropout_in = dropout_in
self.dropout_out = dropout_out
self.bidirectional = bidirectional
self.hidden_size = hidden_size
num_embeddings = len(dictionary)
self.padding_idx = dictionary.pad()
if pretrained_embed is None:
self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx)
else:
self.embed_tokens = pretrained_embed
if fixed_embeddings:
self.embed_tokens.weight.requires_grad = False
self.lstm = LSTM(
input_size=embed_dim,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=self.dropout_out if num_layers > 1 else 0.0,
bidirectional=bidirectional,
)
self.left_pad = left_pad
self.padding_value = padding_value
self.output_units = hidden_size
if bidirectional:
self.output_units *= 2
def forward(self, src_tokens, src_lengths, dataset_name):
if self.left_pad:
# convert left-padding to right-padding
src_tokens = utils.convert_padding_direction(
src_tokens,
self.padding_idx,
left_to_right=True,
)
bsz, seqlen = src_tokens.size()
# embed tokens
x = self.embed_tokens(src_tokens)
x = F.dropout(x, p=self.dropout_in, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# pack embedded source tokens into a PackedSequence
try:
packed_x = nn.utils.rnn.pack_padded_sequence(x, src_lengths.data.tolist())
except BaseException:
raise Exception(f"Packing failed in dataset {dataset_name}")
# apply LSTM
if self.bidirectional:
state_size = 2 * self.num_layers, bsz, self.hidden_size
else:
state_size = self.num_layers, bsz, self.hidden_size
h0 = x.data.new(*state_size).zero_()
c0 = x.data.new(*state_size).zero_()
packed_outs, (final_hiddens, final_cells) = self.lstm(packed_x, (h0, c0))
# unpack outputs and apply dropout
x, _ = nn.utils.rnn.pad_packed_sequence(
packed_outs, padding_value=self.padding_value
)
x = F.dropout(x, p=self.dropout_out, training=self.training)
assert list(x.size()) == [seqlen, bsz, self.output_units]
if self.bidirectional:
def combine_bidir(outs):
return torch.cat(
[
torch.cat([outs[2 * i], outs[2 * i + 1]], dim=0).view(
1, bsz, self.output_units
)
for i in range(self.num_layers)
],
dim=0,
)
final_hiddens = combine_bidir(final_hiddens)
final_cells = combine_bidir(final_cells)
encoder_padding_mask = src_tokens.eq(self.padding_idx).t()
# Set padded outputs to -inf so they are not selected by max-pooling
padding_mask = src_tokens.eq(self.padding_idx).t().unsqueeze(-1)
if padding_mask.any():
x = x.float().masked_fill_(padding_mask, float("-inf")).type_as(x)
# Build the sentence embedding by max-pooling over the encoder outputs
sentemb = x.max(dim=0)[0]
return {
"sentemb": sentemb,
"encoder_out": (x, final_hiddens, final_cells),
"encoder_padding_mask": encoder_padding_mask
if encoder_padding_mask.any()
else None,
}
def reorder_encoder_out(self, encoder_out_dict, new_order):
encoder_out_dict["sentemb"] = encoder_out_dict["sentemb"].index_select(
0, new_order
)
encoder_out_dict["encoder_out"] = tuple(
eo.index_select(1, new_order) for eo in encoder_out_dict["encoder_out"]
)
if encoder_out_dict["encoder_padding_mask"] is not None:
encoder_out_dict["encoder_padding_mask"] = encoder_out_dict[
"encoder_padding_mask"
].index_select(1, new_order)
return encoder_out_dict
def max_positions(self):
"""Maximum input length supported by the encoder."""
return int(1e5) # an arbitrary large number
class LSTMDecoder(FairseqIncrementalDecoder):
"""LSTM decoder."""
def __init__(
self,
dictionary,
embed_dim=512,
hidden_size=512,
out_embed_dim=512,
num_layers=1,
dropout_in=0.1,
dropout_out=0.1,
zero_init=False,
encoder_embed_dim=512,
encoder_output_units=512,
pretrained_embed=None,
num_langs=1,
lang_embed_dim=0,
):
super().__init__(dictionary)
self.dropout_in = dropout_in
self.dropout_out = dropout_out
self.hidden_size = hidden_size
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
if pretrained_embed is None:
self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
else:
self.embed_tokens = pretrained_embed
self.layers = nn.ModuleList(
[
LSTMCell(
input_size=encoder_output_units + embed_dim + lang_embed_dim
if layer == 0
else hidden_size,
hidden_size=hidden_size,
)
for layer in range(num_layers)
]
)
if hidden_size != out_embed_dim:
self.additional_fc = Linear(hidden_size, out_embed_dim)
self.fc_out = Linear(out_embed_dim, num_embeddings, dropout=dropout_out)
if zero_init:
self.sentemb2init = None
else:
self.sentemb2init = Linear(
encoder_output_units, 2 * num_layers * hidden_size
)
if lang_embed_dim == 0:
self.embed_lang = None
else:
self.embed_lang = nn.Embedding(num_langs, lang_embed_dim)
nn.init.uniform_(self.embed_lang.weight, -0.1, 0.1)
def forward(
self, prev_output_tokens, encoder_out_dict, incremental_state=None, lang_id=0
):
sentemb = encoder_out_dict["sentemb"]
encoder_out = encoder_out_dict["encoder_out"]
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
bsz, seqlen = prev_output_tokens.size()
# get outputs from encoder
encoder_outs, _, _ = encoder_out[:3]
srclen = encoder_outs.size(0)
# embed tokens
x = self.embed_tokens(prev_output_tokens)
x = F.dropout(x, p=self.dropout_in, training=self.training)
# embed language identifier
if self.embed_lang is not None:
lang_ids = prev_output_tokens.data.new_full((bsz,), lang_id)
langemb = self.embed_lang(lang_ids)
# TODO Should we dropout here???
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# initialize previous states (or get from cache during incremental generation)
cached_state = utils.get_incremental_state(
self, incremental_state, "cached_state"
)
if cached_state is not None:
prev_hiddens, prev_cells, input_feed = cached_state
else:
num_layers = len(self.layers)
if self.sentemb2init is None:
prev_hiddens = [
x.data.new(bsz, self.hidden_size).zero_() for i in range(num_layers)
]
prev_cells = [
x.data.new(bsz, self.hidden_size).zero_() for i in range(num_layers)
]
else:
init = self.sentemb2init(sentemb)
prev_hiddens = [
init[:, (2 * i) * self.hidden_size : (2 * i + 1) * self.hidden_size]
for i in range(num_layers)
]
prev_cells = [
init[
:,
(2 * i + 1) * self.hidden_size : (2 * i + 2) * self.hidden_size,
]
for i in range(num_layers)
]
input_feed = x.data.new(bsz, self.hidden_size).zero_()
attn_scores = x.data.new(srclen, seqlen, bsz).zero_()
outs = []
for j in range(seqlen):
if self.embed_lang is None:
input = torch.cat((x[j, :, :], sentemb), dim=1)
else:
input = torch.cat((x[j, :, :], sentemb, langemb), dim=1)
for i, rnn in enumerate(self.layers):
# recurrent cell
hidden, cell = rnn(input, (prev_hiddens[i], prev_cells[i]))
# hidden state becomes the input to the next layer
input = F.dropout(hidden, p=self.dropout_out, training=self.training)
# save state for next time step
prev_hiddens[i] = hidden
prev_cells[i] = cell
out = hidden
out = F.dropout(out, p=self.dropout_out, training=self.training)
# input feeding
input_feed = out
# save final output
outs.append(out)
# cache previous states (no-op except during incremental generation)
utils.set_incremental_state(
self,
incremental_state,
"cached_state",
(prev_hiddens, prev_cells, input_feed),
)
# collect outputs across time steps
x = torch.cat(outs, dim=0).view(seqlen, bsz, self.hidden_size)
# T x B x C -> B x T x C
x = x.transpose(1, 0)
# srclen x tgtlen x bsz -> bsz x tgtlen x srclen
attn_scores = attn_scores.transpose(0, 2)
# project back to size of vocabulary
if hasattr(self, "additional_fc"):
x = self.additional_fc(x)
x = F.dropout(x, p=self.dropout_out, training=self.training)
x = self.fc_out(x)
return x, attn_scores
def reorder_incremental_state(self, incremental_state, new_order):
super().reorder_incremental_state(incremental_state, new_order)
cached_state = utils.get_incremental_state(
self, incremental_state, "cached_state"
)
if cached_state is None:
return
def reorder_state(state):
if isinstance(state, list):
return [reorder_state(state_i) for state_i in state]
return state.index_select(0, new_order)
new_state = tuple(map(reorder_state, cached_state))
utils.set_incremental_state(self, incremental_state, "cached_state", new_state)
def max_positions(self):
"""Maximum output length supported by the decoder."""
return int(1e5) # an arbitrary large number
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.uniform_(m.weight, -0.1, 0.1)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def LSTM(input_size, hidden_size, **kwargs):
m = nn.LSTM(input_size, hidden_size, **kwargs)
for name, param in m.named_parameters():
if "weight" in name or "bias" in name:
param.data.uniform_(-0.1, 0.1)
return m
def LSTMCell(input_size, hidden_size, **kwargs):
m = nn.LSTMCell(input_size, hidden_size, **kwargs)
for name, param in m.named_parameters():
if "weight" in name or "bias" in name:
param.data.uniform_(-0.1, 0.1)
return m
def Linear(in_features, out_features, bias=True, dropout=0):
"""Weight-normalized Linear layer (input: N x T x C)"""
m = nn.Linear(in_features, out_features, bias=bias)
m.weight.data.uniform_(-0.1, 0.1)
if bias:
m.bias.data.uniform_(-0.1, 0.1)
return m
@register_model_architecture("laser_lstm", "laser_lstm")
def base_architecture(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_hidden_size = getattr(
args, "encoder_hidden_size", args.encoder_embed_dim
)
args.encoder_layers = getattr(args, "encoder_layers", 1)
args.encoder_bidirectional = getattr(args, "encoder_bidirectional", False)
args.encoder_dropout_in = getattr(args, "encoder_dropout_in", args.dropout)
args.encoder_dropout_out = getattr(args, "encoder_dropout_out", args.dropout)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_hidden_size = getattr(
args, "decoder_hidden_size", args.decoder_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 1)
args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512)
args.decoder_dropout_in = getattr(args, "decoder_dropout_in", args.dropout)
args.decoder_dropout_out = getattr(args, "decoder_dropout_out", args.dropout)
args.decoder_zero_init = getattr(args, "decoder_zero_init", "0")
args.decoder_lang_embed_dim = getattr(args, "decoder_lang_embed_dim", 0)
args.fixed_embeddings = getattr(args, "fixed_embeddings", False)
|
COCO-LM/fairseq/examples/laser/laser_src/laser_lstm.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/laser/laser_src/laser_lstm.py",
"repo_id": "COCO-LM",
"token_count": 10327
}
| 186 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .models import linformer_roberta # noqa
|
COCO-LM/fairseq/examples/linformer/linformer_src/__init__.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/linformer/linformer_src/__init__.py",
"repo_id": "COCO-LM",
"token_count": 60
}
| 187 |
#!/bin/bash
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
if [ -z $WORKDIR_ROOT ] ;
then
echo "please specify your working directory root in environment variable WORKDIR_ROOT. Exitting..."
exit
fi
# first run download_wmt20.sh; it will install a few useful tools for other scripts
# TODO: need to print out instructions on downloading a few files which requires manually authentication from the websites
bash ./download_wmt20.sh
python ./download_wmt19_and_before.py
bash ./download_wat19_my.sh
python ./download_ted_and_extract.py
bash ./download_lotus.sh
bash ./download_iitb.sh
bash ./download_af_xh.sh
# IWSLT downloading URLs have changed in between; TODO: fix them:
bash ./download_iwslt_and_extract.sh
# TODO: globalvoices URLs changed; need to be fixed
bash ./download_flores_data.sh
|
COCO-LM/fairseq/examples/multilingual/data_scripts/download_ML50_v1.sh/0
|
{
"file_path": "COCO-LM/fairseq/examples/multilingual/data_scripts/download_ML50_v1.sh",
"repo_id": "COCO-LM",
"token_count": 296
}
| 188 |
#!/bin/bash
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
path_2_data=$1 # <path to data> which contains binarized data for each directions
lang_list=$2 # <path to a file which contains a list of languages separted by new lines>
lang_pairs=$3 #a list language pairs to train multilingual models, e.g. "en-fr,en-cs,fr-en,cs-en"
# pretrained can be an mBART pretrained model as well
pretrained_model=$4 #<path to a pretrained model>
fairseq-train "$path_2_data" \
--encoder-normalize-before --decoder-normalize-before \
--arch transformer --layernorm-embedding \
--task translation_multi_simple_epoch \
--finetune-from-model "$pretrained_model" \
--sampling-method "temperature" \
--sampling-temperature "1.5" \
--encoder-langtok "src" \
--decoder-langtok \
--lang-dict "$lang_list" \
--lang-pairs "$lang_pairs" \
--criterion label_smoothed_cross_entropy --label-smoothing 0.2 \
--optimizer adam --adam-eps 1e-06 --adam-betas '(0.9, 0.98)' \
--lr-scheduler inverse_sqrt --lr 3e-05 --warmup-updates 2500 --max-update 40000 \
--dropout 0.3 --attention-dropout 0.1 --weight-decay 0.0 \
--max-tokens 1024 --update-freq 2 \
--save-interval 1 --save-interval-updates 5000 --keep-interval-updates 10 --no-epoch-checkpoints \
--seed 222 --log-format simple --log-interval 2
|
COCO-LM/fairseq/examples/multilingual/finetune_multilingual_model.sh/0
|
{
"file_path": "COCO-LM/fairseq/examples/multilingual/finetune_multilingual_model.sh",
"repo_id": "COCO-LM",
"token_count": 509
}
| 189 |
# Pay Less Attention with Lightweight and Dynamic Convolutions (Wu et al., 2019)
This page contains pointers to pre-trained models as well as instructions on how to train new models for [our paper](https://arxiv.org/abs/1901.10430).
## Citation:
```bibtex
@inproceedings{wu2018pay,
title = {Pay Less Attention with Lightweight and Dynamic Convolutions},
author = {Felix Wu and Angela Fan and Alexei Baevski and Yann Dauphin and Michael Auli},
booktitle = {International Conference on Learning Representations},
year = {2019},
url = {https://arxiv.org/abs/1901.10430},
}
```
## Translation
### Pre-trained models
For some datasets we release models without GLUs which are faster at inference.
Model | Description | Dataset | Download
---|---|---|---
`lightconv.no_glu.iwslt14.de-en` | LightConv (without GLUs) | [IWSLT14 German-English](https://wit3.fbk.eu/archive/2014-01/texts/de/en/de-en.tgz) | model: <br> [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/iwslt14.de-en.lightconv.tar.gz) <br> IWSLT14 test: <br> [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/iwslt14.de-en.test.tar.bz2)
`dynamicconv.no_glu.iwslt14.de-en` | DynamicConv (without GLUs) | [IWSLT14 German-English](https://wit3.fbk.eu/archive/2014-01/texts/de/en/de-en.tgz) | model: <br> [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/iwslt14.de-en.dynamicconv.tar.gz) <br> IWSLT14 test: <br> [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/iwslt14.de-en.test.tar.bz2)
`lightconv.no_glu.wmt16.en-de` | LightConv (without GLUs) | [WMT16 English-German](https://drive.google.com/uc?export=download&id=0B_bZck-ksdkpM25jRUN2X2UxMm8) | model: <br> [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.lightconv.tar.gz) <br> newstest2014 (shared vocab): <br> [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt16.en-de.joined-dict.newstest2014.tar.bz2)
`dynamicconv.no_glu.wmt16.en-de` | DynamicConv (without GLUs) | [WMT16 English-German](https://drive.google.com/uc?export=download&id=0B_bZck-ksdkpM25jRUN2X2UxMm8) | model: <br> [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.dynamicconv.tar.gz) <br> newstest2014 (shared vocab): <br> [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt16.en-de.joined-dict.newstest2014.tar.bz2)
`lightconv.glu.wmt16.en-de` | LightConv | [WMT16 English-German](https://drive.google.com/uc?export=download&id=0B_bZck-ksdkpM25jRUN2X2UxMm8) | model: <br> [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.lightconv-glu.tar.gz) <br> newstest2014 (shared vocab): <br> [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt16.en-de.joined-dict.newstest2014.tar.bz2)
`dynamicconv.glu.wmt16.en-de` | DynamicConv | [WMT16 English-German](https://drive.google.com/uc?export=download&id=0B_bZck-ksdkpM25jRUN2X2UxMm8) | model: <br> [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.dynamicconv-glu.tar.gz) <br> newstest2014 (shared vocab): <br> [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt16.en-de.joined-dict.newstest2014.tar.bz2)
`lightconv.glu.wmt14.en-fr` | LightConv | [WMT14 English-French](http://statmt.org/wmt14/translation-task.html#Download) | model: <br> [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt14.en-fr.joined-dict.lightconv-glu.tar.gz) <br> newstest2014: <br> [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt14.en-fr.joined-dict.newstest2014.tar.bz2)
`dynamicconv.glu.wmt14.en-fr` | DynamicConv | [WMT14 English-French](http://statmt.org/wmt14/translation-task.html#Download) | model: <br> [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt14.en-fr.joined-dict.dynamicconv-glu.tar.gz) <br> newstest2014: <br> [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt14.en-fr.joined-dict.newstest2014.tar.bz2)
`lightconv.glu.wmt17.zh-en` | LightConv | [WMT17 Chinese-English](http://statmt.org/wmt17/translation-task.html#Download) | model: <br> [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt17.zh-en.lightconv-glu.tar.gz) <br> newstest2017: <br> [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt17.zh-en.newstest2017.tar.bz2)
`dynamicconv.glu.wmt17.zh-en` | DynamicConv | [WMT17 Chinese-English](http://statmt.org/wmt17/translation-task.html#Download) | model: <br> [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt17.zh-en.dynamicconv-glu.tar.gz) <br> newstest2017: <br> [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt17.zh-en.newstest2017.tar.bz2)
### Memory-Efficient CUDA Kernels
Since the PyTorch implementations of Light/Dynamic conv are quite memory intensive, we have developed CUDA kernels that implement the light and dynamic convolution operator in a memory-efficient and performant manner. For large sequence lengths, these kernels save about 50% memory compared to the PyTorch equivalent.
To install the kernels, use the commands below. Once installed, they will automatically be used in place of the PyTorch implementations whenever a light or dynamic convolution is used.
```sh
# to install lightconv
cd fairseq/modules/lightconv_layer
python cuda_function_gen.py
python setup.py install
# to install dynamicconv
cd fairseq/modules/dynamicconv_layer
python cuda_function_gen.py
python setup.py install
```
### Example usage (torch.hub)
We require a few additional Python dependencies for preprocessing:
```bash
pip install sacremoses subword_nmt
```
Interactive translation via PyTorch Hub:
```python
import torch
# List available models
torch.hub.list('pytorch/fairseq') # [..., 'lightconv.glu.wmt17.zh-en', ... ]
# Load a transformer trained on WMT'16 En-De
zh2en = torch.hub.load('pytorch/fairseq', 'lightconv.glu.wmt17.zh-en', tokenizer='moses', bpe='subword_nmt')
# The underlying model is available under the *models* attribute
assert isinstance(zh2en.models[0], fairseq.models.lightconv.LightConvModel)
# Translate a sentence
zh2en.translate('你好 世界')
# 'Hello World'
```
Loading custom models:
```python
from fairseq.models.lightconv import LightConvModel
en2fr = LightConvModel.from_pretrained(
'/path/to/checkpoints',
checkpoint_file='checkpoint_best.pt',
data_name_or_path='data-bin/wmt14_en_fr',
bpe='subword_nmt',
bpe_codes='data-bin/wmt14_en_fr/en.code'
)
en2fr.translate('Hello world!')
# 'Bonjour le monde'
```
### Preprocessing the training datasets
Please follow the instructions in [`examples/translation/README.md`](../translation/README.md) to preprocess the data.
### Training and evaluation options:
To use the model without GLU, please set `--encoder-glu 0 --decoder-glu 0`.
For LightConv, please use `--encoder-conv-type lightweight --decoder-conv-type lightweight`, otherwise the default is DynamicConv.
For best BLEU results, lenpen may need to be manually tuned.
To use the CUDA kernels, first install the PyTorch modules using the commands
above. Once the CUDA modules are installed, they will automatically be used
instead of the PyTorch modules.
### IWSLT14 De-En
Training and evaluating DynamicConv (without GLU) on a GPU:
```sh
# Training
SAVE="save/dynamic_conv_iwslt"
mkdir -p $SAVE
CUDA_VISIBLE_DEVICES=0 $(which fairseq-train) data-bin/iwslt14.tokenized.de-en \
--clip-norm 0 --optimizer adam --lr 0.0005 \
--source-lang de --target-lang en --max-tokens 4000 --no-progress-bar \
--log-interval 100 --stop-min-lr '1e-09' --weight-decay 0.0001 \
--criterion label_smoothed_cross_entropy --label-smoothing 0.1 \
--lr-scheduler inverse_sqrt \
--ddp-backend=legacy_ddp \
--max-update 50000 --warmup-updates 4000 --warmup-init-lr '1e-07' \
--adam-betas '(0.9, 0.98)' --keep-last-epochs 10 \
-a lightconv_iwslt_de_en --save-dir $SAVE \
--dropout 0.3 --attention-dropout 0.1 --weight-dropout 0.1 \
--encoder-glu 0 --decoder-glu 0
python scripts/average_checkpoints.py --inputs $SAVE \
--num-epoch-checkpoints 10 --output "${SAVE}/checkpoint_last10_avg.pt"
# Evaluation
CUDA_VISIBLE_DEVICES=0 fairseq-generate data-bin/iwslt14.tokenized.de-en --path "${SAVE}/checkpoint_last10_avg.pt" --batch-size 128 --beam 4 --remove-bpe --lenpen 1 --gen-subset test --quiet
```
### WMT16 En-De
Training and evaluating DynamicConv (with GLU) on WMT16 En-De using cosine scheduler on one machine with 8 V100 GPUs:
```sh
# Training
SAVE="save/dynamic_conv_wmt16en2de"
mkdir -p $SAVE
python -m torch.distributed.launch --nproc_per_node 8 $(which fairseq-train) \
data-bin/wmt16_en_de_bpe32k --fp16 --log-interval 100 --no-progress-bar \
--max-update 30000 --share-all-embeddings --optimizer adam \
--adam-betas '(0.9, 0.98)' --clip-norm 0.0 --weight-decay 0.0 \
--criterion label_smoothed_cross_entropy --label-smoothing 0.1 \
--stop-min-lr 1e-09 --update-freq 16 --attention-dropout 0.1 --keep-last-epochs 10 \
--ddp-backend=legacy_ddp --max-tokens 3584 \
--lr-scheduler cosine --warmup-init-lr 1e-7 --warmup-updates 10000 \
--lr-shrink 1 --lr 0.001 --min-lr 1e-7 --warmup-init-lr 1e-07 \
--t-mult 1 --lr-period-updates 20000 \
--arch lightconv_wmt_en_de_big --save-dir $SAVE \
--dropout 0.3 --attention-dropout 0.1 --weight-dropout 0.1 \
--encoder-glu 1 --decoder-glu 1
# Evaluation
CUDA_VISIBLE_DEVICES=0 fairseq-generate data-bin/wmt16.en-de.joined-dict.newstest2014 --path "${SAVE}/checkpoint_best.pt" --batch-size 128 --beam 5 --remove-bpe --lenpen 0.5 --gen-subset test > wmt16_gen.txt
bash scripts/compound_split_bleu.sh wmt16_gen.txt
```
### WMT14 En-Fr
Training DynamicConv (with GLU) on WMT14 En-Fr using cosine scheduler on one machine with 8 V100 GPUs:
```sh
# Training
SAVE="save/dynamic_conv_wmt14en2fr"
mkdir -p $SAVE
python -m torch.distributed.launch --nproc_per_node 8 $(which fairseq-train) \
data-bin/wmt14_en_fr --fp16 --log-interval 100 --no-progress-bar \
--max-update 30000 --share-all-embeddings --optimizer adam \
--adam-betas '(0.9, 0.98)' --clip-norm 0.0 --weight-decay 0.0 \
--criterion label_smoothed_cross_entropy --label-smoothing 0.1 \
--stop-min-lr 1e-09 --update-freq 16 --attention-dropout 0.1 --keep-last-epochs 10 \
--ddp-backend=legacy_ddp --max-tokens 3584 \
--lr-scheduler cosine --warmup-init-lr 1e-7 --warmup-updates 10000 \
--lr-shrink 1 --lr 0.001 --min-lr 1e-7 --warmup-init-lr 1e-07 \
--t-mult 1 --lr-period-updates 70000 \
--arch lightconv_wmt_en_fr_big --save-dir $SAVE \
--dropout 0.1 --attention-dropout 0.1 --weight-dropout 0.1 \
--encoder-glu 1 --decoder-glu 1
# Evaluation
CUDA_VISIBLE_DEVICES=0 fairseq-generate data-bin/wmt14.en-fr.joined-dict.newstest2014 --path "${SAVE}/checkpoint_best.pt" --batch-size 128 --beam 5 --remove-bpe --lenpen 0.9 --gen-subset test
```
|
COCO-LM/fairseq/examples/pay_less_attention_paper/README.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/pay_less_attention_paper/README.md",
"repo_id": "COCO-LM",
"token_count": 4319
}
| 190 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import numpy as np
import torch
from fairseq.data import (
Dictionary,
IdDataset,
ListDataset,
NestedDictionaryDataset,
NumelDataset,
NumSamplesDataset,
RawLabelDataset,
RightPadDataset,
SortDataset,
data_utils,
encoders,
)
from fairseq.tasks import LegacyFairseqTask, register_task
@register_task("commonsense_qa")
class CommonsenseQATask(LegacyFairseqTask):
"""Task to finetune RoBERTa for Commonsense QA."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument(
"data", metavar="DIR", help="path to data directory; we load <split>.jsonl"
)
parser.add_argument(
"--init-token",
type=int,
default=None,
help="add token at the beginning of each batch item",
)
parser.add_argument("--num-classes", type=int, default=5)
def __init__(self, args, vocab):
super().__init__(args)
self.vocab = vocab
self.mask = vocab.add_symbol("<mask>")
self.bpe = encoders.build_bpe(args)
@classmethod
def load_dictionary(cls, filename):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
dictionary = Dictionary.load(filename)
dictionary.add_symbol("<mask>")
return dictionary
@classmethod
def setup_task(cls, args, **kwargs):
assert (
args.criterion == "sentence_ranking"
), "Must set --criterion=sentence_ranking"
# load data and label dictionaries
vocab = cls.load_dictionary(os.path.join(args.data, "dict.txt"))
print("| dictionary: {} types".format(len(vocab)))
return cls(args, vocab)
def load_dataset(
self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs
):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
def binarize(s, append_bos=False):
if self.bpe is not None:
s = self.bpe.encode(s)
tokens = self.vocab.encode_line(
s,
append_eos=True,
add_if_not_exist=False,
).long()
if append_bos and self.args.init_token is not None:
tokens = torch.cat([tokens.new([self.args.init_token]), tokens])
return tokens
if data_path is None:
data_path = os.path.join(self.args.data, split + ".jsonl")
if not os.path.exists(data_path):
raise FileNotFoundError("Cannot find data: {}".format(data_path))
src_tokens = [[] for i in range(self.args.num_classes)]
src_lengths = [[] for i in range(self.args.num_classes)]
labels = []
with open(data_path) as h:
for line in h:
example = json.loads(line.strip())
if "answerKey" in example:
label = ord(example["answerKey"]) - ord("A")
labels.append(label)
question = example["question"]["stem"]
assert len(example["question"]["choices"]) == self.args.num_classes
# format: `<s> Q: Where would I not want a fox? </s> A: hen house </s>`
question = "Q: " + question
question_toks = binarize(question, append_bos=True)
for i, choice in enumerate(example["question"]["choices"]):
src = "A: " + choice["text"]
src_bin = torch.cat([question_toks, binarize(src)])
src_tokens[i].append(src_bin)
src_lengths[i].append(len(src_bin))
assert all(
len(src_tokens[0]) == len(src_tokens[i])
for i in range(self.args.num_classes)
)
assert len(src_tokens[0]) == len(src_lengths[0])
assert len(labels) == 0 or len(labels) == len(src_tokens[0])
for i in range(self.args.num_classes):
src_lengths[i] = np.array(src_lengths[i])
src_tokens[i] = ListDataset(src_tokens[i], src_lengths[i])
src_lengths[i] = ListDataset(src_lengths[i])
dataset = {
"id": IdDataset(),
"nsentences": NumSamplesDataset(),
"ntokens": NumelDataset(src_tokens[0], reduce=True),
}
for i in range(self.args.num_classes):
dataset.update(
{
"net_input{}".format(i + 1): {
"src_tokens": RightPadDataset(
src_tokens[i],
pad_idx=self.source_dictionary.pad(),
),
"src_lengths": src_lengths[i],
}
}
)
if len(labels) > 0:
dataset.update({"target": RawLabelDataset(labels)})
dataset = NestedDictionaryDataset(
dataset,
sizes=[np.maximum.reduce([src_token.sizes for src_token in src_tokens])],
)
with data_utils.numpy_seed(self.args.seed):
dataset = SortDataset(
dataset,
# shuffle
sort_order=[np.random.permutation(len(dataset))],
)
print("| Loaded {} with {} samples".format(split, len(dataset)))
self.datasets[split] = dataset
return self.datasets[split]
def build_model(self, args):
from fairseq import models
model = models.build_model(args, self)
model.register_classification_head(
"sentence_classification_head",
num_classes=1,
)
return model
@property
def source_dictionary(self):
return self.vocab
@property
def target_dictionary(self):
return self.vocab
|
COCO-LM/fairseq/examples/roberta/commonsense_qa/commonsense_qa_task.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/roberta/commonsense_qa/commonsense_qa_task.py",
"repo_id": "COCO-LM",
"token_count": 3041
}
| 191 |
# Scaling Neural Machine Translation (Ott et al., 2018)
This page includes instructions for reproducing results from the paper [Scaling Neural Machine Translation (Ott et al., 2018)](https://arxiv.org/abs/1806.00187).
## Pre-trained models
Model | Description | Dataset | Download
---|---|---|---
`transformer.wmt14.en-fr` | Transformer <br> ([Ott et al., 2018](https://arxiv.org/abs/1806.00187)) | [WMT14 English-French](http://statmt.org/wmt14/translation-task.html#Download) | model: <br> [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-fr.joined-dict.transformer.tar.bz2) <br> newstest2014: <br> [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt14.en-fr.joined-dict.newstest2014.tar.bz2)
`transformer.wmt16.en-de` | Transformer <br> ([Ott et al., 2018](https://arxiv.org/abs/1806.00187)) | [WMT16 English-German](https://drive.google.com/uc?export=download&id=0B_bZck-ksdkpM25jRUN2X2UxMm8) | model: <br> [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/models/wmt16.en-de.joined-dict.transformer.tar.bz2) <br> newstest2014: <br> [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt16.en-de.joined-dict.newstest2014.tar.bz2)
## Training a new model on WMT'16 En-De
First download the [preprocessed WMT'16 En-De data provided by Google](https://drive.google.com/uc?export=download&id=0B_bZck-ksdkpM25jRUN2X2UxMm8).
Then:
##### 1. Extract the WMT'16 En-De data
```bash
TEXT=wmt16_en_de_bpe32k
mkdir -p $TEXT
tar -xzvf wmt16_en_de.tar.gz -C $TEXT
```
##### 2. Preprocess the dataset with a joined dictionary
```bash
fairseq-preprocess \
--source-lang en --target-lang de \
--trainpref $TEXT/train.tok.clean.bpe.32000 \
--validpref $TEXT/newstest2013.tok.bpe.32000 \
--testpref $TEXT/newstest2014.tok.bpe.32000 \
--destdir data-bin/wmt16_en_de_bpe32k \
--nwordssrc 32768 --nwordstgt 32768 \
--joined-dictionary \
--workers 20
```
##### 3. Train a model
```bash
fairseq-train \
data-bin/wmt16_en_de_bpe32k \
--arch transformer_vaswani_wmt_en_de_big --share-all-embeddings \
--optimizer adam --adam-betas '(0.9, 0.98)' --clip-norm 0.0 \
--lr 0.0005 --lr-scheduler inverse_sqrt --warmup-updates 4000 --warmup-init-lr 1e-07 \
--dropout 0.3 --weight-decay 0.0 \
--criterion label_smoothed_cross_entropy --label-smoothing 0.1 \
--max-tokens 3584 \
--fp16
```
Note that the `--fp16` flag requires you have CUDA 9.1 or greater and a Volta GPU or newer.
***IMPORTANT:*** You will get better performance by training with big batches and
increasing the learning rate. If you want to train the above model with big batches
(assuming your machine has 8 GPUs):
- add `--update-freq 16` to simulate training on 8x16=128 GPUs
- increase the learning rate; 0.001 works well for big batches
##### 4. Evaluate
Now we can evaluate our trained model.
Note that the original [Attention Is All You Need](https://arxiv.org/abs/1706.03762)
paper used a couple tricks to achieve better BLEU scores. We use these same tricks in
the Scaling NMT paper, so it's important to apply them when reproducing our results.
First, use the [average_checkpoints.py](/scripts/average_checkpoints.py) script to
average the last few checkpoints. Averaging the last 5-10 checkpoints is usually
good, but you may need to adjust this depending on how long you've trained:
```bash
python scripts/average_checkpoints \
--inputs /path/to/checkpoints \
--num-epoch-checkpoints 10 \
--output checkpoint.avg10.pt
```
Next, generate translations using a beam width of 4 and length penalty of 0.6:
```bash
fairseq-generate \
data-bin/wmt16_en_de_bpe32k \
--path checkpoint.avg10.pt \
--beam 4 --lenpen 0.6 --remove-bpe > gen.out
```
Finally, we apply the ["compound splitting" script](/scripts/compound_split_bleu.sh) to
add spaces around dashes. For example "Café-Liebhaber" would become three tokens:
"Café - Liebhaber". This typically results in larger BLEU scores, but it is not
appropriate to compare these inflated scores to work which does not include this trick.
This trick was used in the [original AIAYN code](https://github.com/tensorflow/tensor2tensor/blob/fc9335c0203685cbbfe2b30c92db4352d8f60779/tensor2tensor/utils/get_ende_bleu.sh),
so we used it in the Scaling NMT paper as well. That said, it's strongly advised to
report [sacrebleu](https://github.com/mjpost/sacrebleu) scores instead.
To compute "compound split" tokenized BLEU (not recommended!):
```bash
bash scripts/compound_split_bleu.sh gen.out
# BLEU4 = 29.29, 60.3/35.0/22.8/15.3 (BP=1.000, ratio=1.004, syslen=64763, reflen=64496)
```
To compute detokenized BLEU with sacrebleu (preferred):
```bash
bash scripts/sacrebleu.sh wmt14/full en de gen.out
# BLEU+case.mixed+lang.en-de+numrefs.1+smooth.exp+test.wmt14/full+tok.13a+version.1.4.3 = 28.6 59.3/34.3/22.1/14.9 (BP = 1.000 ratio = 1.016 hyp_len = 63666 ref_len = 62688)
```
## Citation
```bibtex
@inproceedings{ott2018scaling,
title = {Scaling Neural Machine Translation},
author = {Ott, Myle and Edunov, Sergey and Grangier, David and Auli, Michael},
booktitle = {Proceedings of the Third Conference on Machine Translation (WMT)},
year = 2018,
}
```
|
COCO-LM/fairseq/examples/scaling_nmt/README.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/scaling_nmt/README.md",
"repo_id": "COCO-LM",
"token_count": 1960
}
| 192 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
model_name = file[: file.find(".py")]
importlib.import_module(
"examples.simultaneous_translation.models." + model_name
)
|
COCO-LM/fairseq/examples/simultaneous_translation/models/__init__.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/simultaneous_translation/models/__init__.py",
"repo_id": "COCO-LM",
"token_count": 174
}
| 193 |
import importlib
import os
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
task_name = file[: file.find(".py")]
importlib.import_module("examples.speech_recognition.tasks." + task_name)
|
COCO-LM/fairseq/examples/speech_recognition/tasks/__init__.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/speech_recognition/tasks/__init__.py",
"repo_id": "COCO-LM",
"token_count": 108
}
| 194 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
from fairseq import checkpoint_utils, utils, tasks
from . import DEFAULT_EOS, GET, SEND
from .agent import Agent
class SimulTransAgent(Agent):
def __init__(self, args):
# Load Model
self.load_model(args)
# build word spliter
self.build_word_splitter(args)
self.max_len = args.max_len
self.eos = DEFAULT_EOS
@staticmethod
def add_args(parser):
parser.add_argument(
"--model-path",
type=str,
required=True,
help="path to your pretrained model.",
)
parser.add_argument(
"--data-bin", type=str, required=True, help="Path of data binary"
)
parser.add_argument(
"--user-dir",
type=str,
default="example/simultaneous_translation",
help="User directory for simultaneous translation",
)
parser.add_argument(
"--src-splitter-type",
type=str,
default=None,
help="Subword splitter type for source text",
)
parser.add_argument(
"--tgt-splitter-type",
type=str,
default=None,
help="Subword splitter type for target text",
)
parser.add_argument(
"--src-splitter-path",
type=str,
default=None,
help="Subword splitter model path for source text",
)
parser.add_argument(
"--tgt-splitter-path",
type=str,
default=None,
help="Subword splitter model path for target text",
)
parser.add_argument(
"--max-len",
type=int,
default=150,
help="Maximum length difference between source and target prediction",
)
parser.add_argument(
"--model-overrides",
default="{}",
type=str,
metavar="DICT",
help="A dictionary used to override model args at generation "
"that were used during model training",
)
# fmt: on
return parser
def load_dictionary(self, task):
raise NotImplementedError
def load_model(self, args):
args.user_dir = os.path.join(os.path.dirname(__file__), "..", "..")
utils.import_user_module(args)
filename = args.model_path
if not os.path.exists(filename):
raise IOError("Model file not found: {}".format(filename))
state = checkpoint_utils.load_checkpoint_to_cpu(
filename, json.loads(args.model_overrides)
)
saved_args = state["args"]
saved_args.data = args.data_bin
task = tasks.setup_task(saved_args)
# build model for ensemble
self.model = task.build_model(saved_args)
self.model.load_state_dict(state["model"], strict=True)
# Set dictionary
self.load_dictionary(task)
def init_states(self):
return {
"indices": {"src": [], "tgt": []},
"tokens": {"src": [], "tgt": []},
"segments": {"src": [], "tgt": []},
"steps": {"src": 0, "tgt": 0},
"finished": False,
"finish_read": False,
"model_states": {},
}
def update_states(self, states, new_state):
raise NotImplementedError
def policy(self, states):
# Read and Write policy
action = None
while action is None:
if states["finished"]:
# Finish the hypo by sending eos to server
return self.finish_action()
# Model make decision given current states
decision = self.model.decision_from_states(states)
if decision == 0 and not self.finish_read(states):
# READ
action = self.read_action(states)
else:
# WRITE
action = self.write_action(states)
# None means we make decision again but not sending server anything
# This happened when read a buffered token
# Or predict a subword
return action
def finish_read(self, states):
raise NotImplementedError
def write_action(self, states):
token, index = self.model.predict_from_states(states)
if (
index == self.dict["tgt"].eos()
or len(states["tokens"]["tgt"]) > self.max_len
):
# Finish this sentence is predict EOS
states["finished"] = True
end_idx_last_full_word = self._target_length(states)
else:
states["tokens"]["tgt"] += [token]
end_idx_last_full_word = self.word_splitter["tgt"].end_idx_last_full_word(
states["tokens"]["tgt"]
)
self._append_indices(states, [index], "tgt")
if end_idx_last_full_word > states["steps"]["tgt"]:
# Only sent detokenized full words to the server
word = self.word_splitter["tgt"].merge(
states["tokens"]["tgt"][states["steps"]["tgt"] : end_idx_last_full_word]
)
states["steps"]["tgt"] = end_idx_last_full_word
states["segments"]["tgt"] += [word]
return {"key": SEND, "value": word}
else:
return None
def read_action(self, states):
return {"key": GET, "value": None}
def finish_action(self):
return {"key": SEND, "value": DEFAULT_EOS}
def reset(self):
pass
def finish_eval(self, states, new_state):
if len(new_state) == 0 and len(states["indices"]["src"]) == 0:
return True
return False
def _append_indices(self, states, new_indices, key):
states["indices"][key] += new_indices
def _target_length(self, states):
return len(states["tokens"]["tgt"])
|
COCO-LM/fairseq/examples/speech_to_text/simultaneous_translation/agents/simul_trans_agent.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/speech_to_text/simultaneous_translation/agents/simul_trans_agent.py",
"repo_id": "COCO-LM",
"token_count": 2930
}
| 195 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from dataclasses import dataclass, field
from typing import List, Optional, Tuple
import torch
from fairseq import utils
from fairseq.data import (
Dictionary,
TokenBlockDataset,
data_utils,
iterators,
)
from fairseq.dataclass import FairseqDataclass
from fairseq.distributed import utils as dist_utils
from fairseq.tasks import FairseqTask, register_task
from omegaconf import II
logger = logging.getLogger(__name__)
@dataclass
class TruncatedBPTTLMConfig(FairseqDataclass):
data: str = field(default="???", metadata={"help": "path to data directory"})
tokens_per_sample: int = field(
default=1024,
metadata={"help": "max number of tokens per sequence"},
)
batch_size: int = II("dataset.batch_size")
# Some models use *max_target_positions* to know how many positional
# embeddings to learn. We use II(...) to make it default to
# *tokens_per_sample*, but in principle there could be more positional
# embeddings than tokens in a single batch. This may also be irrelevant for
# custom model implementations.
max_target_positions: int = II("task.tokens_per_sample")
# these will be populated automatically if not provided
data_parallel_rank: Optional[int] = None
data_parallel_size: Optional[int] = None
@register_task("truncated_bptt_lm", dataclass=TruncatedBPTTLMConfig)
class TruncatedBPTTLMTask(FairseqTask):
def __init__(self, cfg: TruncatedBPTTLMConfig):
super().__init__(cfg)
if cfg.data_parallel_rank is None or cfg.data_parallel_size is None:
if torch.distributed.is_initialized():
cfg.data_parallel_rank = dist_utils.get_data_parallel_rank()
cfg.data_parallel_size = dist_utils.get_data_parallel_world_size()
else:
cfg.data_parallel_rank = 0
cfg.data_parallel_size = 1
# load the dictionary
paths = utils.split_paths(cfg.data)
assert len(paths) > 0
self.dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt"))
logger.info("dictionary: {} types".format(len(self.dictionary)))
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split (e.g., train, valid, test)"""
# support sharded datasets
paths = utils.split_paths(self.cfg.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
split_path = os.path.join(data_path, split)
# each element of *data* will be a tensorized line from the original
# text dataset, similar to ``open(split_path).readlines()``
data = data_utils.load_indexed_dataset(
split_path, self.dictionary, combine=combine
)
if data is None:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, split_path)
)
# this is similar to ``data.view(-1).split(tokens_per_sample)``
data = TokenBlockDataset(
data,
data.sizes,
block_size=self.cfg.tokens_per_sample,
pad=None, # unused
eos=None, # unused
break_mode="none",
)
self.datasets[split] = TruncatedBPTTDataset(
data=data,
bsz_per_shard=self.cfg.batch_size,
shard_id=self.cfg.data_parallel_rank,
num_shards=self.cfg.data_parallel_size,
)
def dataset(self, split):
return self.datasets[split]
def get_batch_iterator(
self, dataset, num_workers=0, epoch=1, data_buffer_size=0, **kwargs
):
return iterators.EpochBatchIterator(
dataset=dataset,
collate_fn=self._collate_fn,
num_workers=num_workers,
epoch=epoch,
buffer_size=data_buffer_size,
# we don't use the batching functionality from EpochBatchIterator;
# instead every item in *dataset* is a whole batch
batch_sampler=[[i] for i in range(len(dataset))],
disable_shuffling=True,
)
def _collate_fn(self, items: List[List[torch.Tensor]]):
# we don't use fairseq's batching functionality, so we expect a single
# Tensor of type List[torch.Tensor]
assert len(items) == 1
# item will have shape B x T (the last batch may have length < T)
id, item = items[0]
item = data_utils.collate_tokens(item, pad_idx=self.source_dictionary.pad())
B, T = item.size()
# shift item one position over and append a padding token for the target
target = torch.nn.functional.pad(
item[:, 1:], (0, 1, 0, 0), value=self.target_dictionary.pad()
)
# fairseq expects batches to have the following structure
return {
"id": torch.tensor([id]*item.size(0)),
"net_input": {
"src_tokens": item,
},
"target": target,
"nsentences": item.size(0),
"ntokens": item.numel(),
}
def build_dataset_for_inference(
self, src_tokens: List[torch.Tensor], src_lengths: List[int], **kwargs
) -> torch.utils.data.Dataset:
eos = self.source_dictionary.eos()
dataset = TokenBlockDataset(
src_tokens,
src_lengths,
block_size=None, # ignored for "eos" break mode
pad=self.source_dictionary.pad(),
eos=eos,
break_mode="eos",
)
class Dataset(torch.utils.data.Dataset):
def __getitem__(self, i):
item = dataset[i]
if item[-1] == eos:
# remove eos to support generating with a prefix
item = item[:-1]
return (i, [item])
def __len__(self):
return len(dataset)
return Dataset()
def inference_step(
self, generator, models, sample, prefix_tokens=None, constraints=None
):
with torch.no_grad():
if constraints is not None:
raise NotImplementedError
# SequenceGenerator doesn't use *src_tokens* directly, we need to
# pass the *prefix_tokens* argument instead.
if prefix_tokens is None and sample["net_input"]["src_tokens"].nelement():
prefix_tokens = sample["net_input"]["src_tokens"]
# begin generation with the end-of-sentence token
bos_token = self.source_dictionary.eos()
return generator.generate(
models, sample, prefix_tokens=prefix_tokens, bos_token=bos_token
)
def eval_lm_dataloader(
self,
dataset,
max_tokens: Optional[int] = 36000,
batch_size: Optional[int] = None,
max_positions: Optional[int] = None,
num_shards: int = 1,
shard_id: int = 0,
num_workers: int = 1,
data_buffer_size: int = 10,
context_window: int = 0,
):
if context_window > 0:
raise NotImplementedError(
"Transformer-XL doesn't need --context-window, try "
"--model-overrides '{\"mem_len\":42}' instead "
)
return self.get_batch_iterator(
dataset=dataset,
max_tokens=max_tokens,
max_sentences=batch_size,
max_positions=max_positions,
ignore_invalid_inputs=True,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
data_buffer_size=data_buffer_size,
).next_epoch_itr(shuffle=False)
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
class TruncatedBPTTDataset(torch.utils.data.Dataset):
def __init__(
self,
data: List[torch.Tensor], # ordered list of items
bsz_per_shard, # number of items processed per GPUs per forward
shard_id, # current GPU ID
num_shards, # number of GPUs
):
super().__init__()
self.data = data
def batchify(data, bsz):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).contiguous()
return data
# total number of sequences processed by all GPUs in each forward pass
global_batch_size = bsz_per_shard * num_shards
"""
With a 16 item dataset, bsz_per_shard=2 and num_shards=3,
*indices* might look like:
indices = [[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9],
[10, 11]]
The size of the TruncatedBPTTDataset instance will be 2,
and shard 1 will see items:
[(0, [data[4], data[6]]),
(1, [data[5], data[7]])]
"""
indices = batchify(torch.arange(len(data)), global_batch_size)
assert indices.size(0) == global_batch_size
self.my_indices = indices[
shard_id * bsz_per_shard : (shard_id + 1) * bsz_per_shard
]
assert self.my_indices.size(0) == bsz_per_shard
def __len__(self):
return self.my_indices.size(1)
def __getitem__(self, i) -> Tuple[int, List[torch.Tensor]]:
return (i, [self.data[idx] for idx in self.my_indices[:, i]])
|
COCO-LM/fairseq/examples/truncated_bptt/truncated_bptt_lm_task.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/truncated_bptt/truncated_bptt_lm_task.py",
"repo_id": "COCO-LM",
"token_count": 4667
}
| 196 |
# @package _group_
common:
fp16: true
log_format: json
log_interval: 200
checkpoint:
save_interval_updates: 25000
keep_interval_updates: 1
no_epoch_checkpoints: true
task:
_name: audio_pretraining
data: ???
max_sample_size: 250000
min_sample_size: 32000
normalize: false
dataset:
num_workers: 6
max_tokens: 1400000
skip_invalid_size_inputs_valid_test: true
distributed_training:
distributed_world_size: 64
ddp_backend: legacy_ddp
criterion:
_name: wav2vec
infonce: true
log_keys: ["prob_perplexity","code_perplexity","temp"]
loss_weights: [0.1, 10]
optimization:
max_update: 400000
lr: [0.0005]
optimizer:
_name: adam
adam_betas: (0.9,0.98)
adam_eps: 1e-06
weight_decay: 0.01
lr_scheduler:
_name: polynomial_decay
warmup_updates: 32000
model:
_name: wav2vec2
quantize_targets: true
final_dim: 256
encoder_layerdrop: 0.05
dropout_input: 0.1
dropout_features: 0.1
feature_grad_mult: 0.1
encoder_embed_dim: 768
|
COCO-LM/fairseq/examples/wav2vec/config/pretraining/wav2vec2_base_librispeech.yaml/0
|
{
"file_path": "COCO-LM/fairseq/examples/wav2vec/config/pretraining/wav2vec2_base_librispeech.yaml",
"repo_id": "COCO-LM",
"token_count": 422
}
| 197 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import numpy as np
import torch
from fairseq.data import Dictionary, FairseqDataset
from fairseq.tasks import LegacyFairseqTask, register_task
logger = logging.getLogger(__name__)
@register_task("dummy_mt")
class DummyMTTask(LegacyFairseqTask):
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument("--dict-size", default=49996, type=int)
parser.add_argument("--dataset-size", default=100000, type=int)
parser.add_argument("--src-len", default=30, type=int)
parser.add_argument("--tgt-len", default=30, type=int)
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
dictionary.pad_to_multiple_(8) # often faster if divisible by 8
self.dummy_src = torch.arange(args.src_len + 1) + dictionary.pad() + 1
self.dummy_tgt = torch.arange(args.tgt_len + 1) + dictionary.pad() + 1
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task. """
dictionary = Dictionary()
for i in range(args.dict_size):
dictionary.add_symbol("word{}".format(i))
logger.info("dictionary: {} types".format(len(dictionary)))
args.max_source_positions = args.src_len + dictionary.pad() + 2
args.max_target_positions = args.tgt_len + dictionary.pad() + 2
return cls(args, dictionary)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
item_size = max(self.args.src_len, self.args.tgt_len)
if self.args.batch_size is not None:
bsz = self.args.batch_size
else:
bsz = max(1, self.args.max_tokens // item_size)
tgt = torch.stack([self.dummy_tgt for _ in range(bsz)])
self.datasets[split] = DummyDataset(
{
"id": 1,
"net_input": {
"src_tokens": torch.stack([self.dummy_src for _ in range(bsz)]),
"src_lengths": torch.full(
(bsz,), self.args.src_len, dtype=torch.long
),
"prev_output_tokens": tgt.clone(),
},
"target": tgt,
"nsentences": bsz,
"ntokens": bsz * self.args.tgt_len,
},
num_items=self.args.dataset_size,
item_size=item_size,
)
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
class DummyDataset(FairseqDataset):
def __init__(self, batch, num_items, item_size):
super().__init__()
self.batch = batch
self.num_items = num_items
self.item_size = item_size
def __getitem__(self, index):
return index
def __len__(self):
return self.num_items
def collater(self, samples):
return self.batch
@property
def sizes(self):
return np.array([self.item_size] * self.num_items)
def num_tokens(self, index):
return self.item_size
def size(self, index):
return self.item_size
def ordered_indices(self):
return np.arange(self.num_items)
@property
def supports_prefetch(self):
return False
|
COCO-LM/fairseq/fairseq/benchmark/dummy_mt.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/benchmark/dummy_mt.py",
"repo_id": "COCO-LM",
"token_count": 1684
}
| 198 |
# @package _group_
activation_fn: "relu"
dropout: 0.1
attention_dropout: 0.1
activation_dropout: 0.0
relu_dropout: 0.0
decoder_embed_dim: 512
decoder_output_dim: 512
decoder_input_dim: 512
decoder_ffn_embed_dim: 4096
decoder_layers: 12
decoder_attention_heads: 16
decoder_normalize_before: true
no_decoder_final_norm: true
adaptive_softmax_cutoff: null
adaptive_softmax_dropout: 0
adaptive_softmax_factor: 4
no_token_positional_embeddings: false
share_decoder_input_output_embed: false
character_embeddings: false
character_filters: "[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]"
character_embedding_dim: 4
char_embedder_highway_layers: 2
adaptive_input: false
adaptive_input_factor: 4
adaptive_input_cutoff: null
tie_adaptive_weights: false
tie_adaptive_proj: false
decoder_learned_pos: false
decoder_layerdrop: 0
decoder_layers_to_keep: null
layernorm_embedding: false
no_scale_embedding: false
quant_noise_pq: 0
quant_noise_pq_block_size: 8
quant_noise_scalar: 0
|
COCO-LM/fairseq/fairseq/config/model/transformer_lm/transformer_lm_gbw.yaml/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/config/model/transformer_lm/transformer_lm_gbw.yaml",
"repo_id": "COCO-LM",
"token_count": 389
}
| 199 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from examples.simultaneous_translation.utils.latency import LatencyTraining
from fairseq.criterions import register_criterion
from fairseq.criterions.label_smoothed_cross_entropy import (
LabelSmoothedCrossEntropyCriterion,
)
@register_criterion("latency_augmented_label_smoothed_cross_entropy")
class LatencyAugmentedLabelSmoothedCrossEntropyCriterion(
LabelSmoothedCrossEntropyCriterion
):
def __init__(
self,
task,
sentence_avg,
label_smoothing,
ignore_prefix_size,
report_accuracy,
latency_weight_avg,
latency_weight_avg_type,
latency_weight_var,
latency_weight_var_type,
mass_preservation,
average_method,
):
super().__init__(
task, sentence_avg, label_smoothing, ignore_prefix_size, report_accuracy
)
self.eps = label_smoothing
self.latency_weight_avg = latency_weight_avg
self.latency_weight_avg_type = latency_weight_avg_type
self.latency_weight_var = latency_weight_var
self.latency_weight_var_type = latency_weight_var_type
self.mass_preservation = mass_preservation
self.average_method = average_method
self.latency_train = LatencyTraining(
self.latency_weight_avg,
self.latency_weight_var,
self.latency_weight_avg_type,
self.latency_weight_var_type,
self.mass_preservation,
self.average_method,
)
@staticmethod
def add_args(parser):
super(
LatencyAugmentedLabelSmoothedCrossEntropyCriterion,
LatencyAugmentedLabelSmoothedCrossEntropyCriterion,
).add_args(parser)
# fmt: off
"""Add criterion-specific arguments to the parser."""
parser.add_argument(
"--label-smoothing",
default=0.0,
type=float,
metavar="D",
help="epsilon for label smoothing, 0 means no label smoothing",
)
parser.add_argument(
"--ignore_prefix_size",
default=0,
type=int,
help="ignore first N tokens",
)
parser.add_argument(
"--report-accuracy",
default=False,
type=bool,
help="report accuracy metric",
)
parser.add_argument("--latency-weight-avg", default=0., type=float, metavar='D',
help="Average loss weight")
parser.add_argument("--latency-weight-var", default=0., type=float, metavar='D',
help="Variance loss weight")
parser.add_argument("--latency-weight-avg-type", default="differentiable_average_lagging",
help="Statistics for Average loss type")
parser.add_argument("--latency-weight-var-type", default="variance_delay",
help="Statistics for variance loss type")
parser.add_argument("--average-method", default="weighted_average",
help="Average loss type")
# fmt: on
def compute_loss(self, model, net_output, sample, reduce=True):
# Compute cross entropy loss first
loss, nll_loss = super().compute_loss(model, net_output, sample, reduce)
# Obtain the expected alignment
attn_list = [item["alpha"] for item in net_output[-1]["attn_list"]]
target_padding_mask = model.get_targets(sample, net_output).eq(self.padding_idx)
source_padding_mask = net_output[-1].get("encoder_padding_mask", None)
# Get latency loss
latency_loss = self.latency_train.loss(
attn_list, source_padding_mask, target_padding_mask
)
loss += latency_loss
return loss, nll_loss
|
COCO-LM/fairseq/fairseq/criterions/label_smoothed_cross_entropy_latency_augmented.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/criterions/label_smoothed_cross_entropy_latency_augmented.py",
"repo_id": "COCO-LM",
"token_count": 1785
}
| 200 |
import numpy as np
from fairseq.data.audio.feature_transforms import (
AudioFeatureTransform,
register_audio_feature_transform,
)
@register_audio_feature_transform("global_cmvn")
class GlobalCMVN(AudioFeatureTransform):
"""Global CMVN (cepstral mean and variance normalization). The global mean
and variance need to be pre-computed and stored in NumPy format (.npz)."""
@classmethod
def from_config_dict(cls, config=None):
_config = {} if config is None else config
return GlobalCMVN(_config.get("stats_npz_path"))
def __init__(self, stats_npz_path):
self.stats_npz_path = stats_npz_path
stats = np.load(stats_npz_path)
self.mean, self.std = stats["mean"], stats["std"]
def __repr__(self):
return self.__class__.__name__ + f'(stats_npz_path="{self.stats_npz_path}")'
def __call__(self, x):
x = np.subtract(x, self.mean)
x = np.divide(x, self.std)
return x
|
COCO-LM/fairseq/fairseq/data/audio/feature_transforms/global_cmvn.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/audio/feature_transforms/global_cmvn.py",
"repo_id": "COCO-LM",
"token_count": 391
}
| 201 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
from fairseq import registry
build_tokenizer, register_tokenizer, TOKENIZER_REGISTRY, _ = registry.setup_registry(
"--tokenizer",
default=None,
)
build_bpe, register_bpe, BPE_REGISTRY, _ = registry.setup_registry(
"--bpe",
default=None,
)
# automatically import any Python files in the encoders/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
module = file[: file.find(".py")]
importlib.import_module("fairseq.data.encoders." + module)
|
COCO-LM/fairseq/fairseq/data/encoders/__init__.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/encoders/__init__.py",
"repo_id": "COCO-LM",
"token_count": 264
}
| 202 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import numpy as np
import torch.utils.data
from fairseq.data import data_utils
logger = logging.getLogger(__name__)
class EpochListening:
"""Mixin for receiving updates whenever the epoch increments."""
@property
def can_reuse_epoch_itr_across_epochs(self):
"""
Whether we can reuse the :class:`fairseq.data.EpochBatchIterator` for
this dataset across epochs.
This needs to return ``False`` if the sample sizes can change across
epochs, in which case we may need to regenerate batches at each epoch.
If your dataset relies in ``set_epoch`` then you should consider setting
this to ``False``.
"""
return True
def set_epoch(self, epoch):
"""Will receive the updated epoch number at the beginning of the epoch."""
pass
class FairseqDataset(torch.utils.data.Dataset, EpochListening):
"""A dataset that provides helpers for batching."""
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def collater(self, samples):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
Returns:
dict: a mini-batch suitable for forwarding with a Model
"""
raise NotImplementedError
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
raise NotImplementedError
def num_tokens_vec(self, indices):
"""Return the number of tokens for a set of positions defined by indices.
This value is used to enforce ``--max-tokens`` during batching."""
raise NotImplementedError
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
raise NotImplementedError
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
return np.arange(len(self), dtype=np.int64)
@property
def supports_prefetch(self):
"""Whether this dataset supports prefetching."""
return False
def attr(self, attr: str, index: int):
return getattr(self, attr, None)
def prefetch(self, indices):
"""Prefetch the data required for this epoch."""
raise NotImplementedError
def get_batch_shapes(self):
"""
Return a list of valid batch shapes, for example::
[(8, 512), (16, 256), (32, 128)]
The first dimension of each tuple is the batch size and can be ``None``
to automatically infer the max batch size based on ``--max-tokens``.
The second dimension of each tuple is the max supported length as given
by :func:`fairseq.data.FairseqDataset.num_tokens`.
This will be used by :func:`fairseq.data.FairseqDataset.batch_by_size`
to restrict batch shapes. This is useful on TPUs to avoid too many
dynamic shapes (and recompilations).
"""
return None
def batch_by_size(
self,
indices,
max_tokens=None,
max_sentences=None,
required_batch_size_multiple=1,
):
"""
Given an ordered set of indices, return batches according to
*max_tokens*, *max_sentences* and *required_batch_size_multiple*.
"""
from fairseq.data import data_utils
fixed_shapes = self.get_batch_shapes()
if fixed_shapes is not None:
def adjust_bsz(bsz, num_tokens):
if bsz is None:
assert max_tokens is not None, "Must specify --max-tokens"
bsz = max_tokens // num_tokens
if max_sentences is not None:
bsz = min(bsz, max_sentences)
elif (
bsz >= required_batch_size_multiple
and bsz % required_batch_size_multiple != 0
):
bsz -= bsz % required_batch_size_multiple
return bsz
fixed_shapes = np.array(
[
[adjust_bsz(bsz, num_tokens), num_tokens]
for (bsz, num_tokens) in fixed_shapes
]
)
try:
num_tokens_vec = self.num_tokens_vec(indices).astype('int64')
except NotImplementedError:
num_tokens_vec = None
return data_utils.batch_by_size(
indices,
num_tokens_fn=self.num_tokens,
num_tokens_vec=num_tokens_vec,
max_tokens=max_tokens,
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
fixed_shapes=fixed_shapes,
)
def filter_indices_by_size(self, indices, max_sizes):
"""
Filter a list of sample indices. Remove those that are longer than
specified in *max_sizes*.
WARNING: don't update, override method in child classes
Args:
indices (np.array): original array of sample indices
max_sizes (int or list[int] or tuple[int]): max sample size,
can be defined separately for src and tgt (then list or tuple)
Returns:
np.array: filtered sample array
list: list of removed indices
"""
if isinstance(max_sizes, float) or isinstance(max_sizes, int):
if hasattr(self, "sizes") and isinstance(self.sizes, np.ndarray):
ignored = indices[self.sizes[indices] > max_sizes].tolist()
indices = indices[self.sizes[indices] <= max_sizes]
elif (
hasattr(self, "sizes")
and isinstance(self.sizes, list)
and len(self.sizes) == 1
):
ignored = indices[self.sizes[0][indices] > max_sizes].tolist()
indices = indices[self.sizes[0][indices] <= max_sizes]
else:
indices, ignored = data_utils._filter_by_size_dynamic(
indices, self.size, max_sizes
)
else:
indices, ignored = data_utils._filter_by_size_dynamic(
indices, self.size, max_sizes
)
return indices, ignored
@property
def supports_fetch_outside_dataloader(self):
"""Whether this dataset supports fetching outside the workers of the dataloader."""
return True
class FairseqIterableDataset(torch.utils.data.IterableDataset, EpochListening):
"""
For datasets that need to be read sequentially, usually because the data is
being streamed or otherwise can't be manipulated on a single machine.
"""
def __iter__(self):
raise NotImplementedError
|
COCO-LM/fairseq/fairseq/data/fairseq_dataset.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/fairseq_dataset.py",
"repo_id": "COCO-LM",
"token_count": 3132
}
| 203 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
from typing import Callable, Dict, List
import numpy as np
from . import FairseqDataset
def uniform_sampler(x):
# Sample from uniform distribution
return np.random.choice(x, 1).item()
class MultiCorpusSampledDataset(FairseqDataset):
"""
Stores multiple instances of FairseqDataset together and in every iteration
creates a batch by first sampling a dataset according to a specified
probability distribution and then getting instances from that dataset.
Args:
datasets: an OrderedDict of FairseqDataset instances.
sampling_func: A function for sampling over list of dataset keys.
The default strategy is to sample uniformly.
"""
def __init__(
self,
datasets: Dict[str, FairseqDataset],
sampling_func: Callable[[List], int] = None,
):
super().__init__()
assert isinstance(datasets, OrderedDict)
self.datasets = datasets
if sampling_func is None:
sampling_func = uniform_sampler
self.sampling_func = sampling_func
self.total_num_instances = 0
for _, dataset in datasets.items():
assert isinstance(dataset, FairseqDataset)
self.total_num_instances += len(dataset)
self._ordered_indices = None
def __len__(self):
"""
Length of this dataset is the sum of individual datasets
"""
return self.total_num_instances
def ordered_indices(self):
"""
Ordered indices for batching. Here we call the underlying
dataset's ordered_indices() so that we get the same random ordering
as we would have from using the underlying dataset directly.
"""
if self._ordered_indices is None:
self._ordered_indices = OrderedDict(
[
(key, dataset.ordered_indices())
for key, dataset in self.datasets.items()
]
)
return np.arange(len(self))
def _map_index_to_dataset(self, key: int, index: int):
"""
Different underlying datasets have different lengths. In order to ensure
we are not accessing an index outside the range of the current dataset
size, we wrap around. This function should be called after we have
created an ordering for this and all underlying datasets.
"""
assert (
self._ordered_indices is not None
), "Must call MultiCorpusSampledDataset.ordered_indices() first"
mapped_index = index % len(self.datasets[key])
return self._ordered_indices[key][mapped_index]
def __getitem__(self, index: int):
"""
Get the item associated with index from each underlying dataset.
Since index is in the range of [0, TotalNumInstances], we need to
map the index to the dataset before retrieving the item.
"""
return OrderedDict(
[
(key, dataset[self._map_index_to_dataset(key, index)])
for key, dataset in self.datasets.items()
]
)
def collater(self, samples: List[Dict]):
"""
Generate a mini-batch for this dataset.
To convert this into a regular mini-batch we use the following
logic:
1. Select a dataset using the specified probability distribution.
2. Call the collater function of the selected dataset.
"""
if len(samples) == 0:
return None
selected_key = self.sampling_func(list(self.datasets.keys()))
selected_samples = [sample[selected_key] for sample in samples]
return self.datasets[selected_key].collater(selected_samples)
def num_tokens(self, index: int):
"""
Return an example's length (number of tokens), used for batching. Here
we return the max across all examples at index across all underlying
datasets.
"""
return max(
dataset.num_tokens(self._map_index_to_dataset(key, index))
for key, dataset in self.datasets.items()
)
def size(self, index: int):
"""
Return an example's size as a float or tuple. Here we return the max
across all underlying datasets. This value is used when filtering a
dataset with max-positions.
"""
return max(
dataset.size(self._map_index_to_dataset(key, index))
for key, dataset in self.datasets.items()
)
@property
def supports_prefetch(self):
return all(
getattr(dataset, "supports_prefetch", False)
for dataset in self.datasets.values()
)
def prefetch(self, indices):
for key, dataset in self.datasets.items():
dataset.prefetch(
[self._map_index_to_dataset(key, index) for index in indices]
)
@property
def supports_fetch_outside_dataloader(self):
return all(
self.datasets[key].supports_fetch_outside_dataloader
for key in self.datasets
)
|
COCO-LM/fairseq/fairseq/data/multi_corpus_sampled_dataset.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/multi_corpus_sampled_dataset.py",
"repo_id": "COCO-LM",
"token_count": 2181
}
| 204 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import numpy as np
from . import FairseqDataset
class RawLabelDataset(FairseqDataset):
def __init__(self, labels):
super().__init__()
self.labels = labels
def __getitem__(self, index):
return self.labels[index]
def __len__(self):
return len(self.labels)
def collater(self, samples):
return torch.tensor(samples)
class RawArrayDataset(FairseqDataset):
def __init__(self, dataset):
super().__init__()
self.dataset = dataset
if hasattr(dataset, 'sizes'):
self._sizes = dataset.sizes
else:
try:
self._sizes = np.array([len(x) for x in self.dataset])
except:
self._sizes = np.array([1 for x in self.dataset])
def __getitem__(self, index):
return self.dataset[index]
def __len__(self):
return len(self.dataset)
def collater(self, samples):
if hasattr(self.dataset, 'collater'):
return self.dataset.collater(samples)
else:
return default_collate(samples)
@property
def sizes(self):
return self._sizes
def num_tokens(self, index):
return self.dataset.num_tokens(index)
def size(self, index):
return self.dataset.size(index)
|
COCO-LM/fairseq/fairseq/data/raw_label_dataset.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/raw_label_dataset.py",
"repo_id": "COCO-LM",
"token_count": 662
}
| 205 |
# cython: language_level=3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from itertools import chain
from libc.math cimport ceil
cimport cython
cimport numpy as np
from libc.stdint cimport int32_t, int64_t
DTYPE = np.int64
ctypedef int64_t DTYPE_t
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.nonecheck(False)
cdef np.ndarray[DTYPE_t, ndim=2] _get_slice_indices_none_mode(np.ndarray[DTYPE_t, ndim=1] sizes, int block_size):
cdef DTYPE_t total_size = sizes.sum()
cdef DTYPE_t length = <DTYPE_t> ceil(total_size / <double> block_size)
cdef np.ndarray[DTYPE_t, ndim=2] slice_indices = np.zeros([length, 2], dtype=DTYPE)
cdef DTYPE_t[:, :] slice_indices_view = slice_indices
cdef DTYPE_t i
cdef DTYPE_t start
cdef DTYPE_t end
for i in range(length):
start = i * block_size
end = min(start + block_size, total_size)
slice_indices_view[i][0] = start
slice_indices_view[i][1] = end
return slice_indices
cdef np.ndarray[DTYPE_t, ndim=2] _fast_convert_to_np_array(list list_of_list):
"""
Faster function to convert DTYPE_t list of list.
Only fast when there are huge number of rows and low number of columns.
"""
cdef np.ndarray[DTYPE_t, ndim=1] flat = np.fromiter(chain.from_iterable(list_of_list), DTYPE, -1)
return flat.reshape((len(list_of_list), -1))
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.nonecheck(False)
cpdef np.ndarray[DTYPE_t, ndim=2] _get_slice_indices_fast(np.ndarray[DTYPE_t, ndim=1] sizes, str break_mode, int block_size, int document_sep_len):
cdef DTYPE_t tok_idx = 0
cdef DTYPE_t sz_idx = 0
cdef DTYPE_t curr_size = 0
cdef DTYPE_t i = 0
cdef DTYPE_t length
cdef DTYPE_t total_size
cdef DTYPE_t[:] sizes_view = sizes
cdef np.ndarray[DTYPE_t, ndim=2] slice_indices
cdef list slice_indices_list = []
if break_mode is None or break_mode == 'none':
slice_indices = _get_slice_indices_none_mode(sizes, block_size)
elif break_mode == 'complete':
while sz_idx < len(sizes_view):
if curr_size + sizes_view[sz_idx] <= block_size or curr_size == 0:
curr_size += sizes_view[sz_idx]
sz_idx += 1
else:
slice_indices_list.append((tok_idx, tok_idx + curr_size))
tok_idx += curr_size
curr_size = 0
if curr_size > 0:
slice_indices_list.append((tok_idx, tok_idx + curr_size))
slice_indices = _fast_convert_to_np_array(slice_indices_list)
elif break_mode == 'complete_doc':
while sz_idx < len(sizes_view):
if (
(curr_size + sizes_view[sz_idx] <= block_size or curr_size == 0)
# an empty sentence indicates end-of-document:
and sizes_view[sz_idx] != document_sep_len
):
curr_size += sizes_view[sz_idx]
sz_idx += 1
else:
# Only keep non-empty documents.
if curr_size > 1:
slice_indices_list.append((tok_idx, tok_idx + curr_size))
tok_idx += curr_size
curr_size = 0
if sizes_view[sz_idx] == document_sep_len:
tok_idx += sizes_view[sz_idx]
sz_idx += 1
if curr_size > 1:
slice_indices_list.append((tok_idx, tok_idx + curr_size))
slice_indices = _fast_convert_to_np_array(slice_indices_list)
elif break_mode == 'eos':
slice_indices = np.zeros((len(sizes), 2), dtype=DTYPE)
cumsum = sizes.cumsum(axis=0)
slice_indices[1:, 0] = cumsum[:cumsum.shape[0] - 1]
slice_indices[:, 1] = cumsum
else:
raise ValueError('Invalid break_mode: ' + break_mode)
return slice_indices
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.nonecheck(False)
cpdef np.ndarray[DTYPE_t, ndim=2] _get_block_to_dataset_index_fast(np.ndarray[DTYPE_t, ndim=1] sizes, np.ndarray[DTYPE_t, ndim=2] slice_indices):
cdef DTYPE_t start_ds_idx
cdef DTYPE_t start_offset
cdef DTYPE_t end_ds_idx
cdef DTYPE_t i
cdef DTYPE_t s
cdef DTYPE_t e
cdef DatasetSearcher ds = DatasetSearcher(sizes)
cdef np.ndarray[DTYPE_t, ndim=2] block_to_dataset_index = np.zeros([len(slice_indices), 3], dtype=DTYPE)
cdef DTYPE_t[:, :] block_to_dataset_index_view = block_to_dataset_index
cdef DTYPE_t[:, :] slice_indices_view = slice_indices
cdef Py_ssize_t x_max = slice_indices.shape[0]
for i in range(x_max):
s = slice_indices_view[i][0]
e = slice_indices_view[i][1]
ds.seek(s)
start_ds_idx = ds.current_index
start_offset = ds.current_offset
if e <= s:
end_ds_idx = start_ds_idx
else:
ds.seek(e - 1)
end_ds_idx = ds.current_index
block_to_dataset_index_view[i][0] = start_ds_idx # starting index in dataset
block_to_dataset_index_view[i][1] = start_offset # starting offset within starting index
block_to_dataset_index_view[i][2] = end_ds_idx # ending index in dataset
return block_to_dataset_index
cdef class DatasetSearcher(object):
"""Helper for mapping "flat" indices to indices and offsets in an
underlying dataset."""
cdef DTYPE_t current_i
cdef DTYPE_t current_offset
cdef DTYPE_t current_index
cdef DTYPE_t[:] sizes
def __init__(self, DTYPE_t[:] sizes):
self.sizes = sizes
self.reset()
cdef reset(self):
self.current_offset = 0 # offset within current index in underlying dataset
self.current_i = 0 # "flat" index
self.current_index = 0 # index in underlying dataset
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.nonecheck(False)
cdef int step(self, DTYPE_t i):
cdef DTYPE_t to_consume
cdef DTYPE_t remaining
if i < self.current_i:
self.reset()
if i > self.current_i:
to_consume = i - self.current_i
remaining = self.sizes[self.current_index] - self.current_offset
if remaining > to_consume:
self.current_offset += to_consume
self.current_i += to_consume
else:
assert remaining >= 0
self.current_i += remaining
self.current_index += 1
self.current_offset = 0
return 1
return 0
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.nonecheck(False)
cdef seek(self, DTYPE_t i):
cdef int not_done = 1
while not_done == 1:
not_done = self.step(i)
assert self.current_i == i
|
COCO-LM/fairseq/fairseq/data/token_block_utils_fast.pyx/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/token_block_utils_fast.pyx",
"repo_id": "COCO-LM",
"token_count": 3387
}
| 206 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Utilities for working with the local dataset cache.
This file is adapted from `AllenNLP <https://github.com/allenai/allennlp>`_.
and `huggingface <https://github.com/huggingface>`_.
"""
import fnmatch
import json
import logging
import os
import shutil
import tarfile
import tempfile
from functools import partial, wraps
from hashlib import sha256
from io import open
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv(
"TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")
)
)
default_cache_path = os.path.join(torch_cache_home, "pytorch_fairseq")
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
PYTORCH_FAIRSEQ_CACHE = Path(os.getenv("PYTORCH_FAIRSEQ_CACHE", default_cache_path))
except (AttributeError, ImportError):
PYTORCH_FAIRSEQ_CACHE = os.getenv("PYTORCH_FAIRSEQ_CACHE", default_cache_path)
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "pytorch_model.bin"
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def load_archive_file(archive_file):
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=None)
except EnvironmentError:
logger.info(
"Archive name '{}' was not found in archive name list. "
"We assumed '{}' was a path or URL but couldn't find any file "
"associated to this path or URL.".format(
archive_file,
archive_file,
)
)
return None
if resolved_archive_file == archive_file:
logger.info("loading archive file {}".format(archive_file))
else:
logger.info(
"loading archive file {} from cache at {}".format(
archive_file, resolved_archive_file
)
)
# Extract archive to temp dir and replace .tar.bz2 if necessary
tempdir = None
if not os.path.isdir(resolved_archive_file):
tempdir = tempfile.mkdtemp()
logger.info(
"extracting archive file {} to temp dir {}".format(
resolved_archive_file, tempdir
)
)
ext = os.path.splitext(archive_file)[1][1:]
with tarfile.open(resolved_archive_file, "r:" + ext) as archive:
top_dir = os.path.commonprefix(archive.getnames())
archive.extractall(tempdir)
os.remove(resolved_archive_file)
shutil.move(os.path.join(tempdir, top_dir), resolved_archive_file)
shutil.rmtree(tempdir)
return resolved_archive_file
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the URL's, delimited
by a period.
"""
url_bytes = url.encode("utf-8")
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_FAIRSEQ_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + ".json"
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata["url"]
etag = metadata["etag"]
return url, etag
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_FAIRSEQ_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ("http", "https", "s3"):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError(
"unable to parse {} as a URL or as a local path".format(url_or_filename)
)
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
from botocore.exceptions import ClientError
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
import boto3
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
import boto3
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def request_wrap_timeout(func, url):
import requests
for attempt, timeout in enumerate([10, 20, 40, 60, 60]):
try:
return func(timeout=timeout)
except requests.exceptions.Timeout as e:
logger.warning(
"Request for %s timed-out (attempt %d). Retrying with a timeout of %d secs",
url,
attempt,
timeout,
exc_info=e,
)
continue
raise RuntimeError(f"Unable to fetch file {url}")
def http_get(url, temp_file):
import requests
from tqdm import tqdm
req = request_wrap_timeout(partial(requests.get, url, stream=True), url)
content_length = req.headers.get("Content-Length")
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_FAIRSEQ_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
try:
import requests
response = request_wrap_timeout(
partial(requests.head, url, allow_redirects=True), url
)
if response.status_code != 200:
etag = None
else:
etag = response.headers.get("ETag")
except RuntimeError:
etag = None
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# If we don't have a connection (etag is None) and can't identify the file
# try to get the last downloaded one
if not os.path.exists(cache_path) and etag is None:
matching_files = fnmatch.filter(os.listdir(cache_dir), filename + ".*")
matching_files = list(filter(lambda s: not s.endswith(".json"), matching_files))
if matching_files:
cache_path = os.path.join(cache_dir, matching_files[-1])
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, "wb") as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {"url": url, "etag": etag}
meta_path = cache_path + ".json"
with open(meta_path, "w") as meta_file:
output_string = json.dumps(meta)
meta_file.write(output_string)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename):
"""
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
"""
collection = set()
with open(filename, "r", encoding="utf-8") as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
|
COCO-LM/fairseq/fairseq/file_utils.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/file_utils.py",
"repo_id": "COCO-LM",
"token_count": 4762
}
| 207 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, NamedTuple, Optional
import torch
import torch.nn as nn
from torch import Tensor
EncoderOut = NamedTuple(
"EncoderOut",
[
("encoder_out", Tensor), # T x B x C
("encoder_padding_mask", Optional[Tensor]), # B x T
("encoder_embedding", Optional[Tensor]), # B x T x C
("encoder_states", Optional[List[Tensor]]), # List[T x B x C]
("src_tokens", Optional[Tensor]), # B x T
("src_lengths", Optional[Tensor]), # B x 1
],
)
class FairseqEncoder(nn.Module):
"""Base class for encoders."""
def __init__(self, dictionary):
super().__init__()
self.dictionary = dictionary
def forward(self, src_tokens, src_lengths=None, **kwargs):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (LongTensor): lengths of each source sentence of shape
`(batch)`
"""
raise NotImplementedError
def forward_torchscript(self, net_input: Dict[str, Tensor]):
"""A TorchScript-compatible version of forward.
Encoders which use additional arguments may want to override
this method for TorchScript compatibility.
"""
if torch.jit.is_scripting():
return self.forward(
src_tokens=net_input["src_tokens"],
src_lengths=net_input["src_lengths"],
)
else:
return self.forward_non_torchscript(net_input)
@torch.jit.unused
def forward_non_torchscript(self, net_input: Dict[str, Tensor]):
encoder_input = {
k: v for k, v in net_input.items() if k != "prev_output_tokens"
}
return self.forward(**encoder_input)
def reorder_encoder_out(self, encoder_out, new_order):
"""
Reorder encoder output according to `new_order`.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
`encoder_out` rearranged according to `new_order`
"""
raise NotImplementedError
def max_positions(self):
"""Maximum input length supported by the encoder."""
return 1e6 # an arbitrary large number
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade old state dicts to work with newer code."""
return state_dict
def set_num_updates(self, num_updates):
"""State from trainer to pass along to model at every update."""
def _apply(m):
if hasattr(m, "set_num_updates") and m != self:
m.set_num_updates(num_updates)
self.apply(_apply)
|
COCO-LM/fairseq/fairseq/models/fairseq_encoder.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/fairseq_encoder.py",
"repo_id": "COCO-LM",
"token_count": 1262
}
| 208 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This file implements:
Ghazvininejad, Marjan, et al.
"Constant-time machine translation with conditional masked language models."
arXiv preprint arXiv:1904.09324 (2019).
"""
from fairseq.models import register_model, register_model_architecture
from fairseq.models.nat import NATransformerModel
from fairseq.utils import new_arange
def _skeptical_unmasking(output_scores, output_masks, p):
sorted_index = output_scores.sort(-1)[1]
boundary_len = (
(output_masks.sum(1, keepdim=True).type_as(output_scores) - 2) * p
).long()
skeptical_mask = new_arange(output_masks) < boundary_len
return skeptical_mask.scatter(1, sorted_index, skeptical_mask)
@register_model("cmlm_transformer")
class CMLMNATransformerModel(NATransformerModel):
@staticmethod
def add_args(parser):
NATransformerModel.add_args(parser)
def forward(
self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs
):
assert not self.decoder.src_embedding_copy, "do not support embedding copy."
# encoding
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
# length prediction
length_out = self.decoder.forward_length(
normalize=False, encoder_out=encoder_out
)
length_tgt = self.decoder.forward_length_prediction(
length_out, encoder_out, tgt_tokens
)
# decoding
word_ins_out = self.decoder(
normalize=False,
prev_output_tokens=prev_output_tokens,
encoder_out=encoder_out,
)
word_ins_mask = prev_output_tokens.eq(self.unk)
return {
"word_ins": {
"out": word_ins_out,
"tgt": tgt_tokens,
"mask": word_ins_mask,
"ls": self.args.label_smoothing,
"nll_loss": True,
},
"length": {
"out": length_out,
"tgt": length_tgt,
"factor": self.decoder.length_loss_factor,
},
}
def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, **kwargs):
step = decoder_out.step
max_step = decoder_out.max_step
output_tokens = decoder_out.output_tokens
output_scores = decoder_out.output_scores
history = decoder_out.history
# execute the decoder
output_masks = output_tokens.eq(self.unk)
_scores, _tokens = self.decoder(
normalize=True,
prev_output_tokens=output_tokens,
encoder_out=encoder_out,
).max(-1)
output_tokens.masked_scatter_(output_masks, _tokens[output_masks])
output_scores.masked_scatter_(output_masks, _scores[output_masks])
if history is not None:
history.append(output_tokens.clone())
# skeptical decoding (depend on the maximum decoding steps.)
if (step + 1) < max_step:
skeptical_mask = _skeptical_unmasking(
output_scores, output_tokens.ne(self.pad), 1 - (step + 1) / max_step
)
output_tokens.masked_fill_(skeptical_mask, self.unk)
output_scores.masked_fill_(skeptical_mask, 0.0)
if history is not None:
history.append(output_tokens.clone())
return decoder_out._replace(
output_tokens=output_tokens,
output_scores=output_scores,
attn=None,
history=history,
)
@register_model_architecture("cmlm_transformer", "cmlm_transformer")
def cmlm_base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", True)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.apply_bert_init = getattr(args, "apply_bert_init", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
# --- special arguments ---
args.sg_length_pred = getattr(args, "sg_length_pred", False)
args.pred_length_offset = getattr(args, "pred_length_offset", False)
args.length_loss_factor = getattr(args, "length_loss_factor", 0.1)
args.ngram_predictor = getattr(args, "ngram_predictor", 1)
args.src_embedding_copy = getattr(args, "src_embedding_copy", False)
@register_model_architecture("cmlm_transformer", "cmlm_transformer_wmt_en_de")
def cmlm_wmt_en_de(args):
cmlm_base_architecture(args)
|
COCO-LM/fairseq/fairseq/models/nat/cmlm_transformer.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/nat/cmlm_transformer.py",
"repo_id": "COCO-LM",
"token_count": 2846
}
| 209 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .berard import * # noqa
from .convtransformer import * # noqa
from .s2t_transformer import * # noqa
|
COCO-LM/fairseq/fairseq/models/speech_to_text/__init__.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/speech_to_text/__init__.py",
"repo_id": "COCO-LM",
"token_count": 83
}
| 210 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from argparse import Namespace
import contextlib
import copy
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from dataclasses import dataclass, field
from omegaconf import MISSING, II, open_dict
from typing import Optional, Any
from fairseq import checkpoint_utils, tasks, utils
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.tasks import FairseqTask
from fairseq.models import (
BaseFairseqModel,
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
)
from fairseq.models.wav2vec.wav2vec2 import MASKING_DISTRIBUTION_CHOICES
from fairseq.modules import LayerNorm, PositionalEmbedding, TransformerDecoderLayer
@dataclass
class Wav2Vec2AsrConfig(FairseqDataclass):
w2v_path: str = field(
default=MISSING, metadata={"help": "path to wav2vec 2.0 model"}
)
no_pretrained_weights: bool = field(
default=False, metadata={"help": "if true, does not load pretrained weights"}
)
dropout_input: float = field(
default=0.0,
metadata={"help": "dropout to apply to the input (after feat extr)"},
)
final_dropout: float = field(
default=0.0,
metadata={"help": "dropout after transformer and before final projection"},
)
dropout: float = field(
default=0.0, metadata={"help": "dropout probability inside wav2vec 2.0 model"}
)
attention_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability for attention weights inside wav2vec 2.0 model"
},
)
activation_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability after activation in FFN inside wav2vec 2.0 model"
},
)
# masking
apply_mask: bool = field(
default=False, metadata={"help": "apply masking during fine-tuning"}
)
mask_length: int = field(
default=10, metadata={"help": "repeat the mask indices multiple times"}
)
mask_prob: float = field(
default=0.5,
metadata={
"help": "probability of replacing a token with mask (normalized by length)"
},
)
mask_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static", metadata={"help": "how to choose masks"}
)
mask_other: float = field(
default=0,
metadata={
"help": "secondary mask argument (used for more complex distributions), "
"see help in compute_mask_indices"
},
)
no_mask_overlap: bool = field(
default=False, metadata={"help": "whether to allow masks to overlap"}
)
# channel masking
mask_channel_length: int = field(
default=10, metadata={"help": "length of the mask for features (channels)"}
)
mask_channel_prob: float = field(
default=0.0, metadata={"help": "probability of replacing a feature with 0"}
)
mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static",
metadata={"help": "how to choose mask length for channel masking"},
)
mask_channel_other: float = field(
default=0,
metadata={
"help": "secondary mask argument (used for more complex distributions), "
"see help in compute_mask_indicesh"
},
)
no_mask_channel_overlap: bool = field(
default=False, metadata={"help": "whether to allow channel masks to overlap"}
)
freeze_finetune_updates: int = field(
default=0, metadata={"help": "dont finetune wav2vec for this many updates"}
)
feature_grad_mult: float = field(
default=0.0, metadata={"help": "reset feature grad mult in wav2vec 2.0 to this"}
)
layerdrop: float = field(
default=0.0, metadata={"help": "probability of dropping a layer in wav2vec 2.0"}
)
normalize: bool = II("task.normalize")
data: str = II("task.data")
# this holds the loaded wav2vec args
w2v_args: Any = None
@dataclass
class Wav2Vec2CtcConfig(Wav2Vec2AsrConfig):
mask_min_space: Optional[int] = field(
default=1,
metadata={"help": "min space between spans (if no overlap is enabled)"},
)
mask_channel_min_space: Optional[int] = field(
default=1,
metadata={"help": "min space between spans (if no overlap is enabled)"},
)
conv_feature_layers: Optional[str] = field(
default="[(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512,2,2)] + [(512,2,2)]",
metadata={
"help": (
"string describing convolutional feature extraction "
"layers in form of a python list that contains "
"[(dim, kernel_size, stride), ...]"
),
},
)
encoder_embed_dim: Optional[int] = field(
default=768, metadata={"help": "encoder embedding dimension"}
)
@register_model("wav2vec_ctc", dataclass=Wav2Vec2CtcConfig)
class Wav2VecCtc(BaseFairseqModel):
def __init__(self, cfg: Wav2Vec2CtcConfig, w2v_encoder: BaseFairseqModel):
super().__init__()
self.cfg = cfg
self.w2v_encoder = w2v_encoder
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
return state_dict
@classmethod
def build_model(cls, cfg: Wav2Vec2CtcConfig, task: FairseqTask):
"""Build a new model instance."""
w2v_encoder = Wav2VecEncoder(cfg, task.target_dictionary)
return cls(cfg, w2v_encoder)
def get_normalized_probs(self, net_output, log_probs):
"""Get normalized probabilities (or log probs) from a net's output."""
logits = net_output["encoder_out"]
if log_probs:
return utils.log_softmax(logits.float(), dim=-1)
else:
return utils.softmax(logits.float(), dim=-1)
def get_logits(self, net_output):
logits = net_output["encoder_out"]
padding = net_output["padding_mask"]
if padding is not None and padding.any():
padding = padding.T
logits[padding][...,0] = 0
logits[padding][...,1:] = float('-inf')
return logits
def forward(self, **kwargs):
x = self.w2v_encoder(**kwargs)
return x
@dataclass
class Wav2Vec2Seq2SeqConfig(Wav2Vec2AsrConfig):
decoder_embed_dim: int = field(
default=768, metadata={"help": "decoder embedding dimension"}
)
decoder_ffn_embed_dim: int = field(
default=3072, metadata={"help": "decoder embedding dimension for FFN"}
)
decoder_layers: int = field(default=6, metadata={"help": "num of decoder layers"})
decoder_layerdrop: float = field(
default=0.0, metadata={"help": "decoder layerdrop chance"}
)
decoder_attention_heads: int = field(
default=4, metadata={"help": "num decoder attention heads"}
)
decoder_learned_pos: bool = field(
default=False,
metadata={"help": "use learned positional embeddings in the decoder"},
)
decoder_normalize_before: bool = field(
default=False, metadata={"help": "apply layernorm before each decoder block"}
)
no_token_positional_embeddings: bool = field(
default=False,
metadata={
"help": "if set, disables positional embeddings (outside self attention)"
},
)
decoder_dropout: float = field(
default=0.0, metadata={"help": "dropout probability in the decoder"}
)
decoder_attention_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability for attention weights inside the decoder"
},
)
decoder_activation_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability after activation in FFN inside the decoder"
},
)
max_target_positions: int = field(
default=2048, metadata={"help": "max target positions"}
)
share_decoder_input_output_embed: bool = field(
default=False, metadata={"help": "share decoder input and output embeddings"}
)
autoregressive: bool = II("task.autoregressive")
@register_model("wav2vec_seq2seq", dataclass=Wav2Vec2Seq2SeqConfig)
class Wav2Vec2Seq2SeqModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@classmethod
def build_model(cls, cfg: Wav2Vec2Seq2SeqConfig, task: FairseqTask):
"""Build a new model instance."""
assert cfg.autoregressive, "Please set task.autoregressive=true for seq2seq asr models"
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
def build_embedding(dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
return emb
decoder_embed_tokens = build_embedding(tgt_dict, cfg.decoder_embed_dim)
encoder = cls.build_encoder(cfg)
decoder = cls.build_decoder(cfg, tgt_dict, decoder_embed_tokens)
return Wav2Vec2Seq2SeqModel(encoder, decoder)
@classmethod
def build_encoder(cls, cfg: Wav2Vec2AsrConfig):
return Wav2VecEncoder(cfg)
@classmethod
def build_decoder(cls, cfg: Wav2Vec2Seq2SeqConfig, tgt_dict, embed_tokens):
return TransformerDecoder(cfg, tgt_dict, embed_tokens)
def forward(self, **kwargs):
encoder_out = self.encoder(tbc=False, **kwargs)
decoder_out = self.decoder(encoder_out=encoder_out, **kwargs)
return decoder_out
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
return state_dict
class Wav2VecEncoder(FairseqEncoder):
def __init__(self, cfg: Wav2Vec2AsrConfig, tgt_dict=None):
self.apply_mask = cfg.apply_mask
arg_overrides = {
"dropout": cfg.dropout,
"activation_dropout": cfg.activation_dropout,
"dropout_input": cfg.dropout_input,
"attention_dropout": cfg.attention_dropout,
"mask_length": cfg.mask_length,
"mask_prob": cfg.mask_prob,
"mask_selection": cfg.mask_selection,
"mask_other": cfg.mask_other,
"no_mask_overlap": cfg.no_mask_overlap,
"mask_channel_length": cfg.mask_channel_length,
"mask_channel_prob": cfg.mask_channel_prob,
"mask_channel_selection": cfg.mask_channel_selection,
"mask_channel_other": cfg.mask_channel_other,
"no_mask_channel_overlap": cfg.no_mask_channel_overlap,
"encoder_layerdrop": cfg.layerdrop,
"feature_grad_mult": cfg.feature_grad_mult,
}
if cfg.w2v_args is None:
state = checkpoint_utils.load_checkpoint_to_cpu(cfg.w2v_path, arg_overrides)
w2v_args = state.get("cfg", None)
if w2v_args is None:
w2v_args = convert_namespace_to_omegaconf(state["args"])
cfg.w2v_args = w2v_args
else:
state = None
w2v_args = cfg.w2v_args
if isinstance(w2v_args, Namespace):
cfg.w2v_args = w2v_args = convert_namespace_to_omegaconf(w2v_args)
assert cfg.normalize == w2v_args.task.normalize, (
"Fine-tuning works best when data normalization is the same. "
"Please check that --normalize is set or unset for both pre-training and here"
)
w2v_args.task.data = cfg.data
task = tasks.setup_task(w2v_args.task)
model = task.build_model(w2v_args.model)
if state is not None and not cfg.no_pretrained_weights:
model.load_state_dict(state["model"], strict=True)
model.remove_pretraining_modules()
super().__init__(task.source_dictionary)
d = w2v_args.model.encoder_embed_dim
self.w2v_model = model
self.final_dropout = nn.Dropout(cfg.final_dropout)
self.freeze_finetune_updates = cfg.freeze_finetune_updates
self.num_updates = 0
if tgt_dict is not None:
self.proj = Linear(d, len(tgt_dict))
elif getattr(cfg, "decoder_embed_dim", d) != d:
self.proj = Linear(d, cfg.decoder_embed_dim)
else:
self.proj = None
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
super().set_num_updates(num_updates)
self.num_updates = num_updates
def forward(self, source, padding_mask, tbc=True, **kwargs):
w2v_args = {
"source": source,
"padding_mask": padding_mask,
"mask": self.apply_mask and self.training,
}
ft = self.freeze_finetune_updates <= self.num_updates
with torch.no_grad() if not ft else contextlib.ExitStack():
x, padding_mask = self.w2v_model.extract_features(**w2v_args)
if tbc:
# B x T x C -> T x B x C
x = x.transpose(0, 1)
x = self.final_dropout(x)
if self.proj:
x = self.proj(x)
return {
"encoder_out": x, # T x B x C
"encoder_padding_mask": padding_mask.transpose(0, 1), # T x B
"padding_mask": padding_mask,
}
def reorder_encoder_out(self, encoder_out, new_order):
if encoder_out["encoder_out"] is not None:
encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
1, new_order
)
if encoder_out["encoder_padding_mask"] is not None:
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(0, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
return None
def upgrade_state_dict_named(self, state_dict, name):
return state_dict
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self,
cfg: Wav2Vec2Seq2SeqConfig,
dictionary,
embed_tokens,
no_encoder_attn=False,
):
super().__init__(dictionary)
self.dropout = cfg.decoder_dropout
self.share_input_output_embed = cfg.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = cfg.decoder_embed_dim
self.output_embed_dim = cfg.decoder_embed_dim
self.layerdrop = cfg.decoder_layerdrop
padding_idx = embed_tokens.padding_idx
self.max_target_positions = cfg.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim
self.project_in_dim = (
Linear(input_embed_dim, embed_dim, bias=False)
if embed_dim != input_embed_dim
else None
)
self.embed_positions = (
PositionalEmbedding(
cfg.max_target_positions,
embed_dim,
padding_idx,
learned=cfg.decoder_learned_pos,
)
if not cfg.no_token_positional_embeddings
else None
)
# TODO: update this when transformer gets converted to dataclass configs
transformer_cfg = copy.deepcopy(cfg)
with open_dict(transformer_cfg):
transformer_cfg.dropout = transformer_cfg.decoder_dropout
transformer_cfg.attention_dropout = (
transformer_cfg.decoder_attention_dropout
)
transformer_cfg.activation_dropout = (
transformer_cfg.decoder_activation_dropout
)
self.layers = nn.ModuleList([])
self.layers.extend(
[
TransformerDecoderLayer(transformer_cfg, no_encoder_attn)
for _ in range(transformer_cfg.decoder_layers)
]
)
if not self.share_input_output_embed:
self.embed_out = nn.Parameter(
torch.Tensor(len(dictionary), self.output_embed_dim)
)
nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5)
if transformer_cfg.decoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def forward(
self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
prev_output_tokens = prev_output_tokens.long()
x, extra = self.extract_features(
prev_output_tokens, encoder_out, incremental_state
)
x = self.output_layer(x)
return x, extra
def extract_features(
self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused
):
"""
Similar to *forward* but only return features.
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
# embed positions
positions = (
self.embed_positions(
prev_output_tokens, incremental_state=incremental_state
)
if self.embed_positions is not None
else None
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
attn = None
inner_states = [x]
# decoder layers
for layer in self.layers:
dropout_probability = np.random.random()
if not self.training or (dropout_probability > self.layerdrop):
x, attn, _ = layer(
x,
encoder_out["encoder_out"] if encoder_out is not None else None,
encoder_out["padding_mask"]
if encoder_out is not None
else None,
incremental_state,
self_attn_mask=self.buffered_future_mask(x)
if incremental_state is None
else None,
)
inner_states.append(x)
if self.layer_norm:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
return x, {"attn": attn, "inner_states": inner_states}
def output_layer(self, features, **kwargs):
"""Project features to the vocabulary size."""
# project back to size of vocabulary
if self.share_input_output_embed:
return F.linear(features, self.embed_tokens.weight)
else:
return F.linear(features, self.embed_out)
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions)
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if (
not hasattr(self, "_future_mask")
or self._future_mask is None
or self._future_mask.device != tensor.device
or self._future_mask.size(0) < dim
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(tensor.new(dim, dim)), 1
)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
return state_dict
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
|
COCO-LM/fairseq/fairseq/models/wav2vec/wav2vec2_asr.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/wav2vec/wav2vec2_asr.py",
"repo_id": "COCO-LM",
"token_count": 9946
}
| 211 |
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <ATen/ATen.h>
#include <c10/cuda/CUDAStream.h>
#include <cuda.h>
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <algorithm>
#include <functional>
#include <iostream>
#include <stdexcept>
#include <utility>
#include <vector>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
#define SHFL_MASK 0xffffffff
template<int FS, int SB, int padding_l, typename scalar_t>
__global__
void dynamicconv_forward_kernel(const scalar_t* input,
const scalar_t* weight,
int minibatch,
int sequenceLength,
int numFeatures,
int numFiltersInBlock,
int numHeads,
scalar_t* output);
template<int FS, int SB, int padding_l, typename scalar_t>
__global__
void dynamicconv_backward_kernel(
const scalar_t* gradOutput, // B * C * T
const scalar_t* input, // B * C * T
const scalar_t* weight,
int minibatch,
int sequenceLength,
int numFeatures,
int numFiltersInBlock,
int numHeads,
scalar_t* gradWeight,
scalar_t* gradInput); // B * H * k * T
|
COCO-LM/fairseq/fairseq/modules/dynamicconv_layer/dynamicconv_cuda.cuh/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/dynamicconv_layer/dynamicconv_cuda.cuh",
"repo_id": "COCO-LM",
"token_count": 663
}
| 212 |
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <torch/extension.h>
#include <vector>
std::vector<at::Tensor> lightconv_cuda_forward(
at::Tensor input,
at::Tensor filters,
int padding_l);
std::vector<at::Tensor> lightconv_cuda_backward(
at::Tensor gradOutput,
int padding_l,
at::Tensor input,
at::Tensor filters);
#define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
std::vector<at::Tensor> lightconv_forward(
at::Tensor input,
at::Tensor filters,
int padding_l) {
CHECK_INPUT(input);
CHECK_INPUT(filters);
return lightconv_cuda_forward(input, filters, padding_l);
}
std::vector<at::Tensor> lightconv_backward(
at::Tensor gradOutput,
int padding_l,
at::Tensor input,
at::Tensor filters) {
CHECK_INPUT(gradOutput);
CHECK_INPUT(input);
CHECK_INPUT(filters);
return lightconv_cuda_backward(gradOutput, padding_l, input, filters);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &lightconv_forward, "lighconv forward (CUDA)");
m.def("backward", &lightconv_backward, "lighconv backward (CUDA)");
}
|
COCO-LM/fairseq/fairseq/modules/lightconv_layer/lightconv_cuda.cpp/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/lightconv_layer/lightconv_cuda.cpp",
"repo_id": "COCO-LM",
"token_count": 571
}
| 213 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
class PQLinear(nn.Module):
"""
Quantized counterpart of nn.Linear module. Stores the centroid, the assignments
and the non-quantized biases. The full weight is re-instantiated at each forward
pass.
Args:
- centroids: centroids of size n_centroids x block_size
- assignments: assignments of the centroids to the subvectors
of size self.out_features x n_blocks
- bias: the non-quantized bias
Remarks:
- We refer the reader to the official documentation of the nn.Linear module
for the other arguments and the behavior of the module
- Performance tests on GPU show that this implementation is 15% slower than
the non-quantized nn.Linear module for a standard training loop.
"""
def __init__(self, centroids, assignments, bias, in_features, out_features):
super(PQLinear, self).__init__()
self.block_size = centroids.size(1)
self.n_centroids = centroids.size(0)
self.in_features = in_features
self.out_features = out_features
# check compatibility
if self.in_features % self.block_size != 0:
raise ValueError("Wrong PQ sizes")
if len(assignments) % self.out_features != 0:
raise ValueError("Wrong PQ sizes")
# define parameters
self.centroids = nn.Parameter(centroids, requires_grad=True)
self.register_buffer("assignments", assignments)
self.register_buffer("counts", torch.bincount(assignments).type_as(centroids))
if bias is not None:
self.bias = nn.Parameter(bias)
else:
self.register_parameter("bias", None)
@property
def weight(self):
return (
self.centroids[self.assignments]
.reshape(-1, self.out_features, self.block_size)
.permute(1, 0, 2)
.flatten(1, 2)
)
def forward(self, x):
return F.linear(
x,
self.weight,
self.bias,
)
def extra_repr(self):
return f"in_features={self.in_features},\
out_features={self.out_features},\
n_centroids={self.n_centroids},\
block_size={self.block_size},\
bias={self.bias is not None}"
|
COCO-LM/fairseq/fairseq/modules/quantization/pq/modules/qlinear.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/quantization/pq/modules/qlinear.py",
"repo_id": "COCO-LM",
"token_count": 1091
}
| 214 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from fairseq.modules import TransformerSentenceEncoder
from fairseq.modules.sparse_transformer_sentence_encoder_layer import (
SparseTransformerSentenceEncoderLayer,
)
class SparseTransformerSentenceEncoder(TransformerSentenceEncoder):
"""
Sparse implementation of the TransformerSentenceEncoder
- see SparseMultiheadAttention
"""
def __init__(
self,
padding_idx: int,
vocab_size: int,
num_encoder_layers: int = 6,
embedding_dim: int = 768,
ffn_embedding_dim: int = 3072,
num_attention_heads: int = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
max_seq_len: int = 256,
num_segments: int = 2,
use_position_embeddings: bool = True,
offset_positions_by_padding: bool = True,
encoder_normalize_before: bool = False,
apply_bert_init: bool = False,
activation_fn: str = "relu",
learned_pos_embedding: bool = True,
embed_scale: float = None,
freeze_embeddings: bool = False,
n_trans_layers_to_freeze: int = 0,
export: bool = False,
is_bidirectional: bool = True,
stride: int = 32,
expressivity: int = 8,
) -> None:
super().__init__(
padding_idx,
vocab_size,
num_encoder_layers,
embedding_dim,
ffn_embedding_dim,
num_attention_heads,
dropout,
attention_dropout,
activation_dropout,
max_seq_len,
num_segments,
use_position_embeddings,
offset_positions_by_padding,
encoder_normalize_before,
apply_bert_init,
activation_fn,
learned_pos_embedding,
embed_scale,
freeze_embeddings,
n_trans_layers_to_freeze,
export,
)
self.layers = nn.ModuleList(
[
SparseTransformerSentenceEncoderLayer(
embedding_dim=self.embedding_dim,
ffn_embedding_dim=ffn_embedding_dim,
num_attention_heads=num_attention_heads,
dropout=dropout,
attention_dropout=attention_dropout,
activation_dropout=activation_dropout,
activation_fn=activation_fn,
export=export,
is_bidirectional=is_bidirectional,
stride=stride,
expressivity=expressivity,
)
for _ in range(num_encoder_layers)
]
)
def freeze_module_params(m):
if m is not None:
for p in m.parameters():
p.requires_grad = False
for layer in range(n_trans_layers_to_freeze):
freeze_module_params(self.layers[layer])
|
COCO-LM/fairseq/fairseq/modules/sparse_transformer_sentence_encoder.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/sparse_transformer_sentence_encoder.py",
"repo_id": "COCO-LM",
"token_count": 1608
}
| 215 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
import torch
import torch.distributed as dist
from fairseq.dataclass.configs import FairseqBMUFConfig
from fairseq.dataclass.utils import gen_parser_from_dataclass
from fairseq.optim.fairseq_optimizer import FairseqOptimizer
class FairseqBMUF(FairseqOptimizer):
"""
Implements incremental block distributed data parallelism similar to
https://ieeexplore.ieee.org/document/7472805
Paper title: Scalable training of deep learning machines by incremental
block training with intra-block parallel optimization and blockwise
model-update filtering
"""
def __init__(self, cfg: FairseqBMUFConfig, optimizer):
super().__init__(cfg)
self._optimizer = optimizer
self._num_updates = 0
self.sync_iter = cfg.global_sync_iter
self.block_momentum = cfg.block_momentum
self.block_lr = cfg.block_lr
self._reset_local_data()
self.warmup_iteration = cfg.warmup_iterations
self.use_nbm = cfg.use_nbm
self.initial_state = self._optimizer.state_dict()
self.average_sync = self.cfg.average_sync
self.world_size = self.cfg.distributed_world_size
@staticmethod
def add_args(parser):
"""Add optimizer-specific arguments to the parser."""
gen_parser_from_dataclass(parser, FairseqBMUFConfig())
@property
def optimizer(self):
return self._optimizer.optimizer
@property
def optimizer_config(self):
return self._optimizer.optimizer_config
def get_lr(self):
return self._optimizer.get_lr()
def set_lr(self, lr):
self._optimizer.set_lr(lr)
def state_dict(self):
return self._optimizer.state_dict()
def load_state_dict(self, state_dict, optimizer_overrides=None):
self._optimizer.load_state_dict(state_dict, optimizer_overrides)
self.initial_state = self._optimizer.state_dict()
def multiply_grads(self, c):
"""Multiplies grads by a constant *c*."""
self._optimizer.multiply_grads(c)
def clip_grad_norm(self, max_norm, aggregate_norm_fn=None):
"""Clips gradient norm."""
return self._optimizer.clip_grad_norm(max_norm, aggregate_norm_fn)
def average_params(self):
self._optimizer.average_params()
def _block_sync(self):
if self.world_size <= 1:
return
# Update the global model using local models from all GPUs
# (Step-1) Calculate grad between previously synced model and
# currrent local model
if self.block_momentum != 0:
self._calc_grad()
# (Step-2) Average gradient from all GPUs
self._avg_grad_from_all_gpus()
# (Step-3) Calculate global momentum and update the global model
if self.block_momentum != 0:
self._update_global_model()
# (Step-4) Average local optimizer params
if self.average_sync:
self.average_params()
def _is_warmup_end(self):
# Check whether train iterations is equal to warmup iter
if self.get_num_updates() == self.warmup_iteration:
return True
return False
def _is_bmuf_iter(self):
# Check whether train iterations is equal to bmuf sync iter
if (self.get_num_updates() > self.warmup_iteration) and (
self.get_num_updates() % self.sync_iter == 0
):
return True
return False
def _warmup_sync(self, root_rank=0):
if self.world_size <= 1:
return
# Broadcast the local model to all gpus
for param in self.params:
dist.broadcast(param.data, src=root_rank)
# Update local optimizer state
if self.average_sync:
self._optimizer.average_params()
else:
self._optimizer.load_state_dict(self.initial_state)
self._reset_local_data()
def step(self, closure=None):
"""Performs a single optimization step."""
self._optimizer.step(closure)
self.set_num_updates(self.get_num_updates() + 1)
if self._is_warmup_end():
self._warmup_sync()
elif self._is_bmuf_iter():
self._block_sync()
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
self._optimizer.zero_grad()
def get_num_updates(self):
"""Get the number of parameters updates."""
return self._num_updates
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
self._num_updates = num_updates
@torch.no_grad()
def _reset_local_data(self):
# (Step-0) Initialize global momentum parameters and store global copy on each gpu
self.global_params = [torch.zeros_like(p.data) for p in self.params]
self.smoothed_grads = [p.data.new_zeros(p.data.size()) for p in self.params]
self.grads = [p.data.new_zeros(p.data.size()) for p in self.params]
# saving the global model locally for calculating gradient during bmuf sync
for param, global_param in zip(self.params, self.global_params):
global_param.copy_(param.data)
@torch.no_grad()
def _calc_grad(self):
# global_params is basically the global copy from the previously finished
# synchronisation. param.data is local parameter after block_sync_freq
# for the local gpu. so grad is difference between previously synced
# model and currrent local model.
for index, (param, global_param) in enumerate(
zip(self.params, self.global_params)
):
self.grads[index] = global_param - param.data
def _avg_grad_from_all_gpus(self):
for index, param in enumerate(self.params):
sync_para = param.data if self.block_momentum == 0 else self.grads[index]
sync_para /= float(dist.get_world_size())
dist.all_reduce(sync_para, op=dist.ReduceOp.SUM)
@torch.no_grad()
def _update_global_model(self):
for index, (param, global_param, smoothed_grad, grad) in enumerate(
zip(
self.params,
self.global_params,
self.smoothed_grads,
# all gpus would share the same value of smoothed_grad, since it is
# always computed on synchronized gradients.
self.grads,
)
):
# global_param is basically last syncrhornized parameter. though
# smoothed_grad is local, all processes will have same value of
# smoothed_grad and hence param is globally synchronized copy.
# smoothed_grad(t) = BM * smoothed_grad(t-1) + BM_lr * grad(t)
smoothed_grad = self.block_momentum * smoothed_grad + self.block_lr * grad
param.data.copy_(global_param - smoothed_grad)
# A Nesterov momentum here is to do a partial weight update before
# calculating the gradient
if self.use_nbm:
param.data.copy_(param.data - self.block_momentum * smoothed_grad)
# backup for the next synchronization.
self.smoothed_grads[index] = smoothed_grad
global_param.copy_(param.data)
|
COCO-LM/fairseq/fairseq/optim/bmuf.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/optim/bmuf.py",
"repo_id": "COCO-LM",
"token_count": 3121
}
| 216 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from typing import List
import torch.optim.lr_scheduler
from omegaconf import II
from fairseq.dataclass import FairseqDataclass
from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler
@dataclass
class ReduceLROnPlateauLRScheduleConfig(FairseqDataclass):
lr_shrink: float = field(
default=0.1, metadata={"help": "shrink factor for annealing"}
)
lr_threshold: float = field(
default=1e-4,
metadata={
"help": (
"threshold for measuring the new optimum, to only focus on "
"significant changes"
)
},
)
lr_patience: int = field(
default=0,
metadata={
"help": (
"number of epochs with no improvement after which learning rate will "
"be reduced"
)
},
)
warmup_updates: int = field(
default=0,
metadata={"help": "warmup the learning rate linearly for the first N updates"},
)
warmup_init_lr: float = field(
default=-1,
metadata={
"help": "initial learning rate during warmup phase; default is cfg.lr"
},
)
lr: List[float] = II("optimization.lr")
maximize_best_checkpoint_metric: bool = II(
"checkpoint.maximize_best_checkpoint_metric"
)
@register_lr_scheduler(
"reduce_lr_on_plateau", dataclass=ReduceLROnPlateauLRScheduleConfig
)
class ReduceLROnPlateauLRSchedule(FairseqLRScheduler):
"""
Decay the LR by a factor every time the validation loss plateaus.
Also comes with optional warmup phase, where we linearly increase
the learning rate from some initial learning rate
(``--warmup-init-lr``) until the configured learning rate
(``--lr``). Thereafter the lr is adjusted according to original
reduce_on_plateau scheme.
During warmup::
lrs = torch.linspace(
cfg.warmup_init_lr, cfg.lr, cfg.warmup_updates
)
lr = lrs[update_num]
"""
def __init__(self, cfg: ReduceLROnPlateauLRScheduleConfig, optimizer):
super().__init__(cfg, optimizer)
if len(cfg.lr) > 1:
raise ValueError(
"Cannot use a fixed learning rate schedule with reduce_lr_on_plateau."
" Consider --lr-scheduler=fixed instead."
)
self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer.optimizer,
patience=cfg.lr_patience,
factor=cfg.lr_shrink,
mode="max" if cfg.maximize_best_checkpoint_metric else "min",
threshold=cfg.lr_threshold,
)
warmup_end_lr = cfg.lr[0]
# if no warm up, sets initial lr to be cfg.lr[0]
if cfg.warmup_init_lr < 0:
cfg.warmup_init_lr = 0 if cfg.warmup_updates > 0 else warmup_end_lr
# linearly warmup for the first cfg.warmup_updates
if cfg.warmup_updates > 0:
self.lr_step = (warmup_end_lr - cfg.warmup_init_lr) / cfg.warmup_updates
# this flag is either set from arg when no warm up, or set by
# step_update() when warmup finishes
self.warmup_end = True if cfg.warmup_updates <= 0 else False
# initial learning rate
# this self.lr is used only during init and/or warm up period
self.lr = cfg.warmup_init_lr
self.optimizer.set_lr(self.lr)
def state_dict(self):
"""Return the LR scheduler state dict."""
return {
"best": self.lr_scheduler.best,
"last_epoch": self.lr_scheduler.last_epoch,
}
def load_state_dict(self, state_dict):
"""Load an LR scheduler state dict."""
self.lr_scheduler.best = state_dict["best"]
if "last_epoch" in state_dict:
self.lr_scheduler.last_epoch = state_dict["last_epoch"]
def step(self, epoch, val_loss=None):
"""
Update the learning rate at the end of the given epoch if warmup
finishes otherwise no update of lr on epoch boundaries
"""
if val_loss is not None and self.warmup_end is True:
self.lr_scheduler.step(val_loss)
else:
self.lr_scheduler.last_epoch = epoch
return self.optimizer.get_lr()
def step_update(self, num_updates):
"""
Update the learning rate after each update."""
# if there is warmup
if self.cfg.warmup_updates > 0:
if num_updates <= self.cfg.warmup_updates:
self.lr = self.cfg.warmup_init_lr + num_updates * self.lr_step
self.optimizer.set_lr(self.lr)
else:
if self.warmup_end is False:
self.warmup_end = True
# else do nothing
return self.optimizer.get_lr()
|
COCO-LM/fairseq/fairseq/optim/lr_scheduler/reduce_lr_on_plateau.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/optim/lr_scheduler/reduce_lr_on_plateau.py",
"repo_id": "COCO-LM",
"token_count": 2249
}
| 217 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Dict, List, Optional
import torch
import torch.nn as nn
from fairseq import search, utils
from fairseq.data import data_utils
from fairseq.models import FairseqIncrementalDecoder
from torch import Tensor
from fairseq.ngram_repeat_block import NGramRepeatBlock
class SequenceGenerator(nn.Module):
def __init__(
self,
models,
tgt_dict,
beam_size=1,
max_len_a=0,
max_len_b=200,
min_len=1,
normalize_scores=True,
len_penalty=1.0,
unk_penalty=0.0,
temperature=1.0,
match_source_len=False,
no_repeat_ngram_size=0,
search_strategy=None,
eos=None,
symbols_to_strip_from_output=None,
lm_model=None,
lm_weight=1.0,
):
"""Generates translations of a given source sentence.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models,
currently support fairseq.models.TransformerModel for scripting
beam_size (int, optional): beam width (default: 1)
max_len_a/b (int, optional): generate sequences of maximum length
ax + b, where x is the source length
min_len (int, optional): the minimum length of the generated output
(not including end-of-sentence)
normalize_scores (bool, optional): normalize scores by the length
of the output (default: True)
len_penalty (float, optional): length penalty, where <1.0 favors
shorter, >1.0 favors longer sentences (default: 1.0)
unk_penalty (float, optional): unknown word penalty, where <0
produces more unks, >0 produces fewer (default: 0.0)
temperature (float, optional): temperature, where values
>1.0 produce more uniform samples and values <1.0 produce
sharper samples (default: 1.0)
match_source_len (bool, optional): outputs should match the source
length (default: False)
"""
super().__init__()
if isinstance(models, EnsembleModel):
self.model = models
else:
self.model = EnsembleModel(models)
self.tgt_dict = tgt_dict
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos() if eos is None else eos
self.symbols_to_strip_from_output = (
symbols_to_strip_from_output.union({self.eos})
if symbols_to_strip_from_output is not None
else {self.eos}
)
self.vocab_size = len(tgt_dict)
self.beam_size = beam_size
# the max beam size is the dictionary size - 1, since we never select pad
self.beam_size = min(beam_size, self.vocab_size - 1)
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.min_len = min_len
self.normalize_scores = normalize_scores
self.len_penalty = len_penalty
self.unk_penalty = unk_penalty
self.temperature = temperature
self.match_source_len = match_source_len
if no_repeat_ngram_size > 0:
self.repeat_ngram_blocker = NGramRepeatBlock(no_repeat_ngram_size)
else:
self.repeat_ngram_blocker = None
assert temperature > 0, "--temperature must be greater than 0"
self.search = (
search.BeamSearch(tgt_dict) if search_strategy is None else search_strategy
)
# We only need to set src_lengths in LengthConstrainedBeamSearch.
# As a module attribute, setting it would break in multithread
# settings when the model is shared.
self.should_set_src_lengths = (
hasattr(self.search, "needs_src_lengths") and self.search.needs_src_lengths
)
self.model.eval()
self.lm_model = lm_model
self.lm_weight = lm_weight
if self.lm_model is not None:
self.lm_model.eval()
def cuda(self):
self.model.cuda()
return self
@torch.no_grad()
def forward(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
"""Generate a batch of translations.
Args:
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
return self._generate(sample, prefix_tokens, bos_token=bos_token)
# TODO(myleott): unused, deprecate after pytorch-translate migration
def generate_batched_itr(self, data_itr, beam_size=None, cuda=False, timer=None):
"""Iterate over a batched dataset and yield individual translations.
Args:
cuda (bool, optional): use GPU for generation
timer (StopwatchMeter, optional): time generations
"""
for sample in data_itr:
s = utils.move_to_cuda(sample) if cuda else sample
if "net_input" not in s:
continue
input = s["net_input"]
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in input.items() if k != "prev_output_tokens"
}
if timer is not None:
timer.start()
with torch.no_grad():
hypos = self.generate(encoder_input)
if timer is not None:
timer.stop(sum(len(h[0]["tokens"]) for h in hypos))
for i, id in enumerate(s["id"].data):
# remove padding
src = utils.strip_pad(input["src_tokens"].data[i, :], self.pad)
ref = (
utils.strip_pad(s["target"].data[i, :], self.pad)
if s["target"] is not None
else None
)
yield id, src, ref, hypos[i]
@torch.no_grad()
def generate(self, models, sample: Dict[str, Dict[str, Tensor]], **kwargs):
"""Generate translations. Match the api of other fairseq generators.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
constraints (torch.LongTensor, optional): force decoder to include
the list of constraints
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
return self._generate(sample, **kwargs)
def _generate(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
constraints: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
incremental_states = torch.jit.annotate(
List[Dict[str, Dict[str, Optional[Tensor]]]],
[
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})
for i in range(self.model.models_size)
],
)
net_input = sample["net_input"]
if "src_tokens" in net_input:
src_tokens = net_input["src_tokens"]
# length of the source text being the character length except EndOfSentence and pad
src_lengths = (
(src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1)
)
elif "source" in net_input:
src_tokens = net_input["source"]
src_lengths = (
net_input["padding_mask"].size(-1) - net_input["padding_mask"].sum(-1)
if net_input["padding_mask"] is not None
else torch.tensor(src_tokens.size(-1)).to(src_tokens)
)
else:
raise Exception("expected src_tokens or source in net input")
# bsz: total number of sentences in beam
# Note that src_tokens may have more than 2 dimensions (i.e. audio features)
bsz, src_len = src_tokens.size()[:2]
beam_size = self.beam_size
if constraints is not None and not self.search.supports_constraints:
raise NotImplementedError(
"Target-side constraints were provided, but search method doesn't support them"
)
# Initialize constraints, when active
self.search.init_constraints(constraints, beam_size)
max_len: int = -1
if self.match_source_len:
max_len = src_lengths.max().item()
else:
max_len = min(
int(self.max_len_a * src_len + self.max_len_b),
# exclude the EOS marker
self.model.max_decoder_positions() - 1,
)
assert (
self.min_len <= max_len
), "min_len cannot be larger than max_len, please adjust these!"
# compute the encoder output for each beam
encoder_outs = self.model.forward_encoder(net_input)
# placeholder of indices for bsz * beam_size to hold tokens and accumulative scores
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src_tokens.device).long()
encoder_outs = self.model.reorder_encoder_out(encoder_outs, new_order)
# ensure encoder_outs is a List.
assert encoder_outs is not None
# initialize buffers
scores = (
torch.zeros(bsz * beam_size, max_len + 1).to(src_tokens).float()
) # +1 for eos; pad is never chosen for scoring
tokens = (
torch.zeros(bsz * beam_size, max_len + 2)
.to(src_tokens)
.long()
.fill_(self.pad)
) # +2 for eos and pad
tokens[:, 0] = self.eos if bos_token is None else bos_token
attn: Optional[Tensor] = None
# A list that indicates candidates that should be ignored.
# For example, suppose we're sampling and have already finalized 2/5
# samples. Then cands_to_ignore would mark 2 positions as being ignored,
# so that we only finalize the remaining 3 samples.
cands_to_ignore = (
torch.zeros(bsz, beam_size).to(src_tokens).eq(-1)
) # forward and backward-compatible False mask
# list of completed sentences
finalized = torch.jit.annotate(
List[List[Dict[str, Tensor]]],
[torch.jit.annotate(List[Dict[str, Tensor]], []) for i in range(bsz)],
) # contains lists of dictionaries of infomation about the hypothesis being finalized at each step
finished = [
False for i in range(bsz)
] # a boolean array indicating if the sentence at the index is finished or not
num_remaining_sent = bsz # number of sentences remaining
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (
(torch.arange(0, bsz) * beam_size)
.unsqueeze(1)
.type_as(tokens)
.to(src_tokens.device)
)
cand_offsets = torch.arange(0, cand_size).type_as(tokens).to(src_tokens.device)
reorder_state: Optional[Tensor] = None
batch_idxs: Optional[Tensor] = None
original_batch_idxs: Optional[Tensor] = None
if "id" in sample and isinstance(sample["id"], Tensor):
original_batch_idxs = sample["id"]
else:
original_batch_idxs = torch.arange(0, bsz).type_as(tokens)
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(
batch_idxs
)
reorder_state.view(-1, beam_size).add_(
corr.unsqueeze(-1) * beam_size
)
original_batch_idxs = original_batch_idxs[batch_idxs]
self.model.reorder_incremental_state(incremental_states, reorder_state)
encoder_outs = self.model.reorder_encoder_out(
encoder_outs, reorder_state
)
lprobs, avg_attn_scores = self.model.forward_decoder(
tokens[:, : step + 1],
encoder_outs,
incremental_states,
self.temperature,
)
if self.lm_model is not None:
lm_out = self.lm_model(tokens[:, : step + 1])
probs = self.lm_model.get_normalized_probs(
lm_out, log_probs=True, sample=None
)
probs = probs[:, -1, :] * self.lm_weight
lprobs += probs
lprobs[lprobs != lprobs] = torch.tensor(-math.inf).to(lprobs)
lprobs[:, self.pad] = -math.inf # never select pad
lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty
# handle max length constraint
if step >= max_len:
lprobs[:, : self.eos] = -math.inf
lprobs[:, self.eos + 1 :] = -math.inf
# handle prefix tokens (possibly with different lengths)
if (
prefix_tokens is not None
and step < prefix_tokens.size(1)
and step < max_len
):
lprobs, tokens, scores = self._prefix_tokens(
step, lprobs, scores, tokens, prefix_tokens, beam_size
)
elif step < self.min_len:
# minimum length constraint (does not apply if using prefix_tokens)
lprobs[:, self.eos] = -math.inf
# Record attention scores, only support avg_attn_scores is a Tensor
if avg_attn_scores is not None:
if attn is None:
attn = torch.empty(
bsz * beam_size, avg_attn_scores.size(1), max_len + 2
).to(scores)
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs)
eos_bbsz_idx = torch.empty(0).to(
tokens
) # indices of hypothesis ending with eos (finished sentences)
eos_scores = torch.empty(0).to(
scores
) # scores of hypothesis ending with eos (finished sentences)
if self.should_set_src_lengths:
self.search.set_src_lengths(src_lengths)
if self.repeat_ngram_blocker is not None:
lprobs = self.repeat_ngram_blocker(tokens, lprobs, bsz, beam_size, step)
# Shape: (batch, cand_size)
cand_scores, cand_indices, cand_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
tokens[:, : step + 1],
original_batch_idxs,
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos
# Shape of eos_mask: (batch size, beam size)
eos_mask = cand_indices.eq(self.eos) & cand_scores.ne(-math.inf)
eos_mask[:, :beam_size][cands_to_ignore] = torch.tensor(0).to(eos_mask)
# only consider eos when it's among the top beam_size indices
# Now we know what beam item(s) to finish
# Shape: 1d list of absolute-numbered
eos_bbsz_idx = torch.masked_select(
cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents: List[int] = []
if eos_bbsz_idx.numel() > 0:
eos_scores = torch.masked_select(
cand_scores[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents = self.finalize_hypos(
step,
eos_bbsz_idx,
eos_scores,
tokens,
scores,
finalized,
finished,
beam_size,
attn,
src_lengths,
max_len,
)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
if self.search.stop_on_max_len and step >= max_len:
break
assert step < max_len, f"{step} < {max_len}"
# Remove finalized sentences (ones for which {beam_size}
# finished hypotheses have been generated) from the batch.
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = torch.ones(
bsz, dtype=torch.bool, device=cand_indices.device
)
batch_mask[finalized_sents] = False
# TODO replace `nonzero(as_tuple=False)` after TorchScript supports it
batch_idxs = torch.arange(
bsz, device=cand_indices.device
).masked_select(batch_mask)
# Choose the subset of the hypothesized constraints that will continue
self.search.prune_sentences(batch_idxs)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
cands_to_ignore = cands_to_ignore[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(
new_bsz * beam_size, attn.size(1), -1
)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos hypos
# and values < cand_size indicate candidate active hypos.
# After, the min values per row are the top candidate active hypos
# Rewrite the operator since the element wise or is not supported in torchscript.
eos_mask[:, :beam_size] = ~((~cands_to_ignore) & (~eos_mask[:, :beam_size]))
active_mask = torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[: eos_mask.size(1)],
)
# get the top beam_size active hypotheses, which are just
# the hypos with the smallest values in active_mask.
# {active_hypos} indicates which {beam_size} hypotheses
# from the list of {2 * beam_size} candidates were
# selected. Shapes: (batch size, beam size)
new_cands_to_ignore, active_hypos = torch.topk(
active_mask, k=beam_size, dim=1, largest=False
)
# update cands_to_ignore to ignore any finalized hypos.
cands_to_ignore = new_cands_to_ignore.ge(cand_size)[:, :beam_size]
# Make sure there is at least one active item for each sentence in the batch.
assert (~cands_to_ignore).any(dim=1).all()
# update cands_to_ignore to ignore any finalized hypos
# {active_bbsz_idx} denotes which beam number is continued for each new hypothesis (a beam
# can be selected more than once).
active_bbsz_idx = torch.gather(cand_bbsz_idx, dim=1, index=active_hypos)
active_scores = torch.gather(cand_scores, dim=1, index=active_hypos)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
# Set the tokens for each beam (can select the same row more than once)
tokens[:, : step + 1] = torch.index_select(
tokens[:, : step + 1], dim=0, index=active_bbsz_idx
)
# Select the next token for each of them
tokens.view(bsz, beam_size, -1)[:, :, step + 1] = torch.gather(
cand_indices, dim=1, index=active_hypos
)
if step > 0:
scores[:, :step] = torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx
)
scores.view(bsz, beam_size, -1)[:, :, step] = torch.gather(
cand_scores, dim=1, index=active_hypos
)
# Update constraints based on which candidates were selected for the next beam
self.search.update_constraints(active_hypos)
# copy attention for active hypotheses
if attn is not None:
attn[:, :, : step + 2] = torch.index_select(
attn[:, :, : step + 2], dim=0, index=active_bbsz_idx
)
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
scores = torch.tensor(
[float(elem["score"].item()) for elem in finalized[sent]]
)
_, sorted_scores_indices = torch.sort(scores, descending=True)
finalized[sent] = [finalized[sent][ssi] for ssi in sorted_scores_indices]
finalized[sent] = torch.jit.annotate(
List[Dict[str, Tensor]], finalized[sent]
)
return finalized
def _prefix_tokens(
self, step: int, lprobs, scores, tokens, prefix_tokens, beam_size: int
):
"""Handle prefix tokens"""
prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)
prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1))
prefix_mask = prefix_toks.ne(self.pad)
lprobs[prefix_mask] = torch.tensor(-math.inf).to(lprobs)
lprobs[prefix_mask] = lprobs[prefix_mask].scatter(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs[prefix_mask]
)
# if prefix includes eos, then we should make sure tokens and
# scores are the same across all beams
eos_mask = prefix_toks.eq(self.eos)
if eos_mask.any():
# validate that the first beam matches the prefix
first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[
:, 0, 1 : step + 1
]
eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]
target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]
assert (first_beam == target_prefix).all()
# copy tokens, scores and lprobs from the first beam to all beams
tokens = self.replicate_first_beam(tokens, eos_mask_batch_dim, beam_size)
scores = self.replicate_first_beam(scores, eos_mask_batch_dim, beam_size)
lprobs = self.replicate_first_beam(lprobs, eos_mask_batch_dim, beam_size)
return lprobs, tokens, scores
def replicate_first_beam(self, tensor, mask, beam_size: int):
tensor = tensor.view(-1, beam_size, tensor.size(-1))
tensor[mask] = tensor[mask][:, :1, :]
return tensor.view(-1, tensor.size(-1))
def finalize_hypos(
self,
step: int,
bbsz_idx,
eos_scores,
tokens,
scores,
finalized: List[List[Dict[str, Tensor]]],
finished: List[bool],
beam_size: int,
attn: Optional[Tensor],
src_lengths,
max_len: int,
):
"""Finalize hypothesis, store finalized information in `finalized`, and change `finished` accordingly.
A sentence is finalized when {beam_size} finished items have been collected for it.
Returns number of sentences (not beam items) being finalized.
These will be removed from the batch and not processed further.
Args:
bbsz_idx (Tensor):
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors.
# tokens is (batch * beam, max_len). So the index_select
# gets the newly EOS rows, then selects cols 1..{step + 2}
tokens_clone = tokens.index_select(0, bbsz_idx)[
:, 1 : step + 2
] # skip the first index, which is EOS
tokens_clone[:, step] = self.eos
attn_clone = (
attn.index_select(0, bbsz_idx)[:, :, 1 : step + 2]
if attn is not None
else None
)
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, : step + 1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
# cum_unfin records which sentences in the batch are finished.
# It helps match indexing between (a) the original sentences
# in the batch and (b) the current, possibly-reduced set of
# sentences.
cum_unfin: List[int] = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
# The keys here are of the form "{sent}_{unfin_idx}", where
# "unfin_idx" is the index in the current (possibly reduced)
# list of sentences, and "sent" is the index in the original,
# unreduced batch
# set() is not supported in script export
sents_seen: Dict[str, Optional[Tensor]] = {}
# For every finished beam item
for i in range(bbsz_idx.size()[0]):
idx = bbsz_idx[i]
score = eos_scores[i]
# sentence index in the current (possibly reduced) batch
unfin_idx = idx // beam_size
# sentence index in the original (unreduced) batch
sent = unfin_idx + cum_unfin[unfin_idx]
# Cannot create dict for key type '(int, int)' in torchscript.
# The workaround is to cast int to string
seen = str(sent.item()) + "_" + str(unfin_idx.item())
if seen not in sents_seen:
sents_seen[seen] = None
if self.match_source_len and step > src_lengths[unfin_idx]:
score = torch.tensor(-math.inf).to(score)
# An input sentence (among those in a batch) is finished when
# beam_size hypotheses have been collected for it
if len(finalized[sent]) < beam_size:
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i]
else:
hypo_attn = torch.empty(0)
finalized[sent].append(
{
"tokens": tokens_clone[i],
"score": score,
"attention": hypo_attn, # src_len x tgt_len
"alignment": torch.empty(0),
"positional_scores": pos_scores[i],
}
)
newly_finished: List[int] = []
for seen in sents_seen.keys():
# check termination conditions for this sentence
sent: int = int(float(seen.split("_")[0]))
unfin_idx: int = int(float(seen.split("_")[1]))
if not finished[sent] and self.is_finished(
step, unfin_idx, max_len, len(finalized[sent]), beam_size
):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
def is_finished(
self,
step: int,
unfin_idx: int,
max_len: int,
finalized_sent_len: int,
beam_size: int,
):
"""
Check whether decoding for a sentence is finished, which
occurs when the list of finalized sentences has reached the
beam size, or when we reach the maximum length.
"""
assert finalized_sent_len <= beam_size
if finalized_sent_len == beam_size or step == max_len:
return True
return False
class EnsembleModel(nn.Module):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__()
self.models_size = len(models)
# method '__len__' is not supported in ModuleList for torch script
self.single_model = models[0]
self.models = nn.ModuleList(models)
self.has_incremental: bool = False
if all(
hasattr(m, "decoder") and isinstance(m.decoder, FairseqIncrementalDecoder)
for m in models
):
self.has_incremental = True
def forward(self):
pass
def has_encoder(self):
return hasattr(self.single_model, "encoder")
def has_incremental_states(self):
return self.has_incremental
def max_decoder_positions(self):
return min([m.max_decoder_positions() for m in self.models])
@torch.jit.export
def forward_encoder(self, net_input: Dict[str, Tensor]):
if not self.has_encoder():
return None
return [model.encoder.forward_torchscript(net_input) for model in self.models]
@torch.jit.export
def forward_decoder(
self,
tokens,
encoder_outs: List[Dict[str, List[Tensor]]],
incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],
temperature: float = 1.0,
):
log_probs = []
avg_attn: Optional[Tensor] = None
encoder_out: Optional[Dict[str, List[Tensor]]] = None
for i, model in enumerate(self.models):
if self.has_encoder():
encoder_out = encoder_outs[i]
# decode each model
if self.has_incremental_states():
decoder_out = model.decoder.forward(
tokens,
encoder_out=encoder_out,
incremental_state=incremental_states[i],
)
else:
decoder_out = model.decoder.forward(tokens, encoder_out=encoder_out)
attn: Optional[Tensor] = None
decoder_len = len(decoder_out)
if decoder_len > 1 and decoder_out[1] is not None:
if isinstance(decoder_out[1], Tensor):
attn = decoder_out[1]
else:
attn_holder = decoder_out[1]["attn"]
if isinstance(attn_holder, Tensor):
attn = attn_holder
elif attn_holder is not None:
attn = attn_holder[0]
if attn is not None:
attn = attn[:, -1, :]
decoder_out_tuple = (
decoder_out[0][:, -1:, :].div_(temperature),
None if decoder_len <= 1 else decoder_out[1],
)
probs = model.get_normalized_probs(
decoder_out_tuple, log_probs=True, sample=None
)
probs = probs[:, -1, :]
if self.models_size == 1:
return probs, attn
log_probs.append(probs)
if attn is not None:
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
avg_probs = torch.logsumexp(torch.stack(log_probs, dim=0), dim=0) - math.log(
self.models_size
)
if avg_attn is not None:
avg_attn.div_(self.models_size)
return avg_probs, avg_attn
@torch.jit.export
def reorder_encoder_out(
self, encoder_outs: Optional[List[Dict[str, List[Tensor]]]], new_order
):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
new_outs: List[Dict[str, List[Tensor]]] = []
if not self.has_encoder():
return new_outs
for i, model in enumerate(self.models):
assert encoder_outs is not None
new_outs.append(
model.encoder.reorder_encoder_out(encoder_outs[i], new_order)
)
return new_outs
@torch.jit.export
def reorder_incremental_state(
self,
incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],
new_order,
):
if not self.has_incremental_states():
return
for i, model in enumerate(self.models):
model.decoder.reorder_incremental_state_scripting(
incremental_states[i], new_order
)
class SequenceGeneratorWithAlignment(SequenceGenerator):
def __init__(
self, models, tgt_dict, left_pad_target=False, print_alignment="hard", **kwargs
):
"""Generates translations of a given source sentence.
Produces alignments following "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
left_pad_target (bool, optional): Whether or not the
hypothesis should be left padded or not when they are
teacher forced for generating alignments.
"""
super().__init__(EnsembleModelWithAlignment(models), tgt_dict, **kwargs)
self.left_pad_target = left_pad_target
if print_alignment == "hard":
self.extract_alignment = utils.extract_hard_alignment
elif print_alignment == "soft":
self.extract_alignment = utils.extract_soft_alignment
@torch.no_grad()
def generate(self, models, sample, **kwargs):
finalized = super()._generate(sample, **kwargs)
src_tokens = sample["net_input"]["src_tokens"]
bsz = src_tokens.shape[0]
beam_size = self.beam_size
(
src_tokens,
src_lengths,
prev_output_tokens,
tgt_tokens,
) = self._prepare_batch_for_alignment(sample, finalized)
if any(getattr(m, "full_context_alignment", False) for m in self.model.models):
attn = self.model.forward_align(src_tokens, src_lengths, prev_output_tokens)
else:
attn = [
finalized[i // beam_size][i % beam_size]["attention"].transpose(1, 0)
for i in range(bsz * beam_size)
]
if src_tokens.device != "cpu":
src_tokens = src_tokens.to("cpu")
tgt_tokens = tgt_tokens.to("cpu")
attn = [i.to("cpu") for i in attn]
# Process the attn matrix to extract hard alignments.
for i in range(bsz * beam_size):
alignment = self.extract_alignment(
attn[i], src_tokens[i], tgt_tokens[i], self.pad, self.eos
)
finalized[i // beam_size][i % beam_size]["alignment"] = alignment
return finalized
def _prepare_batch_for_alignment(self, sample, hypothesis):
src_tokens = sample["net_input"]["src_tokens"]
bsz = src_tokens.shape[0]
src_tokens = (
src_tokens[:, None, :]
.expand(-1, self.beam_size, -1)
.contiguous()
.view(bsz * self.beam_size, -1)
)
src_lengths = sample["net_input"]["src_lengths"]
src_lengths = (
src_lengths[:, None]
.expand(-1, self.beam_size)
.contiguous()
.view(bsz * self.beam_size)
)
prev_output_tokens = data_utils.collate_tokens(
[beam["tokens"] for example in hypothesis for beam in example],
self.pad,
self.eos,
self.left_pad_target,
move_eos_to_beginning=True,
)
tgt_tokens = data_utils.collate_tokens(
[beam["tokens"] for example in hypothesis for beam in example],
self.pad,
self.eos,
self.left_pad_target,
move_eos_to_beginning=False,
)
return src_tokens, src_lengths, prev_output_tokens, tgt_tokens
class EnsembleModelWithAlignment(EnsembleModel):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__(models)
def forward_align(self, src_tokens, src_lengths, prev_output_tokens):
avg_attn = None
for model in self.models:
decoder_out = model(src_tokens, src_lengths, prev_output_tokens)
attn = decoder_out[1]["attn"][0]
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
if len(self.models) > 1:
avg_attn.div_(len(self.models))
return avg_attn
|
COCO-LM/fairseq/fairseq/sequence_generator.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/sequence_generator.py",
"repo_id": "COCO-LM",
"token_count": 19051
}
| 218 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os.path as op
from argparse import Namespace
from fairseq.data import Dictionary, encoders
from fairseq.data.audio.speech_to_text_dataset import (
S2TDataConfig,
SpeechToTextDataset,
SpeechToTextDatasetCreator,
get_features_or_waveform
)
from fairseq.tasks import LegacyFairseqTask, register_task
logger = logging.getLogger(__name__)
@register_task("speech_to_text")
class SpeechToTextTask(LegacyFairseqTask):
@staticmethod
def add_args(parser):
parser.add_argument("data", help="manifest root path")
parser.add_argument(
"--config-yaml",
type=str,
default="config.yaml",
help="Configuration YAML filename (under manifest root)",
)
parser.add_argument(
"--max-source-positions",
default=6000,
type=int,
metavar="N",
help="max number of tokens in the source sequence",
)
parser.add_argument(
"--max-target-positions",
default=1024,
type=int,
metavar="N",
help="max number of tokens in the target sequence",
)
def __init__(self, args, tgt_dict):
super().__init__(args)
self.tgt_dict = tgt_dict
self.data_cfg = S2TDataConfig(op.join(args.data, args.config_yaml))
@classmethod
def setup_task(cls, args, **kwargs):
data_cfg = S2TDataConfig(op.join(args.data, args.config_yaml))
dict_path = op.join(args.data, data_cfg.vocab_filename)
if not op.isfile(dict_path):
raise FileNotFoundError(f"Dict not found: {dict_path}")
tgt_dict = Dictionary.load(dict_path)
logger.info(
f"dictionary size ({data_cfg.vocab_filename}): " f"{len(tgt_dict):,}"
)
if getattr(args, "train_subset", None) is not None:
if not all(s.startswith("train") for s in args.train_subset.split(",")):
raise ValueError('Train splits should be named like "train*".')
return cls(args, tgt_dict)
def build_criterion(self, args):
from fairseq import criterions
if self.data_cfg.prepend_tgt_lang_tag and args.ignore_prefix_size != 1:
raise ValueError(
'Please set "--ignore-prefix-size 1" since '
"target language ID token is prepended as BOS."
)
return criterions.build_criterion(args, self)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
is_train_split = split.startswith("train")
pre_tokenizer = self.build_tokenizer(self.args)
bpe_tokenizer = self.build_bpe(self.args)
self.datasets[split] = SpeechToTextDatasetCreator.from_tsv(
self.args.data,
self.data_cfg,
split,
self.tgt_dict,
pre_tokenizer,
bpe_tokenizer,
is_train_split=is_train_split,
epoch=epoch,
seed=self.args.seed,
)
@property
def target_dictionary(self):
return self.tgt_dict
@property
def source_dictionary(self):
return None
def max_positions(self):
return self.args.max_source_positions, self.args.max_target_positions
def build_model(self, args):
args.input_feat_per_channel = self.data_cfg.input_feat_per_channel
args.input_channels = self.data_cfg.input_channels
return super(SpeechToTextTask, self).build_model(args)
def build_generator(
self,
models,
args,
seq_gen_cls=None,
extra_gen_cls_kwargs=None,
):
if self.data_cfg.prepend_tgt_lang_tag and args.prefix_size != 1:
raise ValueError(
'Please set "--prefix-size 1" since '
"target language ID token is prepended as BOS."
)
lang_token_ids = {
i
for s, i in self.tgt_dict.indices.items()
if SpeechToTextDataset.is_lang_tag(s)
}
extra_gen_cls_kwargs = {"symbols_to_strip_from_output": lang_token_ids}
return super().build_generator(
models, args, seq_gen_cls=None, extra_gen_cls_kwargs=extra_gen_cls_kwargs
)
def build_tokenizer(self, args):
logger.info(f"pre-tokenizer: {self.data_cfg.pre_tokenizer}")
return encoders.build_tokenizer(Namespace(**self.data_cfg.pre_tokenizer))
def build_bpe(self, args):
logger.info(f"tokenizer: {self.data_cfg.bpe_tokenizer}")
return encoders.build_bpe(Namespace(**self.data_cfg.bpe_tokenizer))
def get_interactive_tokens_and_lengths(self, lines, encode_fn):
n_frames = [get_features_or_waveform(p).shape[0] for p in lines]
return lines, n_frames
def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs):
return SpeechToTextDataset(
"interactive", False, self.data_cfg, src_tokens, src_lengths
)
|
COCO-LM/fairseq/fairseq/tasks/speech_to_text.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/tasks/speech_to_text.py",
"repo_id": "COCO-LM",
"token_count": 2396
}
| 219 |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
from fairseq.dataclass.initialize import add_defaults, hydra_init
from fairseq_cli.train import main as pre_main
from fairseq import distributed_utils, metrics
from fairseq.dataclass.configs import FairseqConfig
import hydra
from hydra.core.hydra_config import HydraConfig
import torch
from omegaconf import OmegaConf, open_dict
logger = logging.getLogger("fairseq_cli.hydra_train")
@hydra.main(config_path=os.path.join("..", "fairseq", "config"), config_name="config")
def hydra_main(cfg: FairseqConfig) -> float:
add_defaults(cfg)
if cfg.common.reset_logging:
reset_logging() # Hydra hijacks logging, fix that
else:
with open_dict(cfg):
# make hydra logging work with ddp (see # see https://github.com/facebookresearch/hydra/issues/1126)
cfg.job_logging_cfg = OmegaConf.to_container(HydraConfig.get().job_logging, resolve=True)
cfg = OmegaConf.create(OmegaConf.to_container(cfg, resolve=True, enum_to_str=True))
OmegaConf.set_struct(cfg, True)
try:
if cfg.common.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(cfg, pre_main)
else:
distributed_utils.call_main(cfg, pre_main)
except BaseException as e:
if not cfg.common.suppress_crashes:
raise
else:
logger.error("Crashed! " + str(e))
# get best val and return - useful for sweepers
try:
best_val = metrics.get_smoothed_value(
"valid", cfg.checkpoint.best_checkpoint_metric
)
except:
best_val = None
if best_val is None:
best_val = float("inf")
return best_val
def reset_logging():
root = logging.getLogger()
for handler in root.handlers:
root.removeHandler(handler)
root.setLevel(os.environ.get("LOGLEVEL", "INFO").upper())
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(
logging.Formatter(
fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
)
root.addHandler(handler)
def cli_main():
try:
from hydra._internal.utils import get_args
cfg_name = get_args().config_name or "config"
except:
logger.warning("Failed to get config name from hydra args")
cfg_name = "config"
hydra_init(cfg_name)
hydra_main()
if __name__ == "__main__":
cli_main()
|
COCO-LM/fairseq/fairseq_cli/hydra_train.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq_cli/hydra_train.py",
"repo_id": "COCO-LM",
"token_count": 1142
}
| 220 |
/**
* From PyTorch:
*
* Copyright (c) 2016- Facebook, Inc (Adam Paszke)
* Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
* Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
* Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
* Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
* Copyright (c) 2011-2013 NYU (Clement Farabet)
* Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
* Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
* Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
*
* From Caffe2:
*
* Copyright (c) 2016-present, Facebook Inc. All rights reserved.
*
* All contributions by Facebook:
* Copyright (c) 2016 Facebook Inc.
*
* All contributions by Google:
* Copyright (c) 2015 Google Inc.
* All rights reserved.
*
* All contributions by Yangqing Jia:
* Copyright (c) 2015 Yangqing Jia
* All rights reserved.
*
* All contributions from Caffe:
* Copyright(c) 2013, 2014, 2015, the respective contributors
* All rights reserved.
*
* All other contributions:
* Copyright(c) 2015, 2016 the respective contributors
* All rights reserved.
*
* Caffe2 uses a copyright model similar to Caffe: each contributor holds
* copyright over their contributions to Caffe2. The project versioning records
* all such contribution and copyright details. If a contributor wants to further
* mark their specific copyright on a particular contribution, they should
* indicate their copyright solely in the commit message of the change when it is
* committed.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
* and IDIAP Research Institute nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/NumericLimits.cuh>
#include <THC/THC.h>
#include <THC/THCGeneral.h>
#include <THC/THCThrustAllocator.cuh>
#include "type_shim.h"
#include "compat.h"
#define ALIGN_BYTES 16
using Tensor = at::Tensor;
using TensorList = at::TensorList;
using ScalarType = at::ScalarType;
using at::acc_type;
template<typename T, typename AccumT, typename OutT>
struct LogSoftMaxForwardEpilogue {
__device__ __forceinline__ LogSoftMaxForwardEpilogue(AccumT max_input, AccumT sum)
: logsum(max_input + std::log(sum)) {}
__device__ __forceinline__ LogSoftMaxForwardEpilogue(AccumT max_log_sum_exp)
: logsum(max_log_sum_exp) {}
__device__ __forceinline__ OutT operator()(T input) const {
return static_cast<OutT>(input - logsum);
}
const AccumT logsum;
};
template<typename T, typename AccumT, typename OutT>
struct LogSoftMaxBackwardEpilogue {
__device__ __forceinline__ LogSoftMaxBackwardEpilogue(AccumT sum)
: sum(sum) {}
__device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const {
return static_cast<T>(gradOutput - std::exp(static_cast<AccumT>(output)) * sum);
}
const AccumT sum;
};
const int max_threads = 1024;
inline dim3 SoftMax_getBlockSize(int ILP, uint64_t dim_size) {
uint64_t block_size = 1;
uint64_t max_block_size = std::min(dim_size / ILP, static_cast<uint64_t>(max_threads));
while (block_size < (max_block_size/2)) block_size *= 2;
// Launch at least a single warp - the kernel assumes that.
block_size = std::max(block_size, static_cast<uint64_t>(32));
return dim3(block_size);
}
template<typename T>
struct Add {
__device__ __forceinline__ T operator()(T a, T b) const {
return a + b;
}
};
template<typename T>
struct Max {
__device__ __forceinline__ T operator()(T a, T b) const {
return a < b ? b : a;
}
};
////////////////////////////////////////////////////////////////////////////////
// Regular kernel (fast when dim_size is large; requires inner_size == 1)
////////////////////////////////////////////////////////////////////////////////
template <typename T, typename AccumT>
struct MaxFloat
{
__device__ __forceinline__ AccumT operator()(AccumT max, T v) const {
return ::max(max, (AccumT)v);
}
};
template<typename T, typename AccumT>
struct AddFloat
{
__device__ __forceinline__ AccumT operator()(AccumT sum, T v) const {
return sum + v;
}
};
template<typename T, typename AccumT>
struct SumExpFloat
{
__device__ __forceinline__ SumExpFloat(AccumT v)
: max_k(v) {}
__device__ __forceinline__ AccumT operator()(AccumT sum, T v) const {
return sum + std::exp(v - max_k);
}
const AccumT max_k;
};
template <template<typename> class Reduction, typename AccumT>
__device__ __forceinline__ AccumT
blockReduce(AccumT* smem, AccumT val,
const Reduction<AccumT>& r,
AccumT defaultVal)
{
// To avoid RaW races from chaining blockReduce calls together, we need a sync here
__syncthreads();
smem[threadIdx.x] = val;
__syncthreads();
AccumT warpVal = defaultVal;
// First warp will perform per-warp reductions for the remaining warps
uint32_t mask = (((uint64_t)1) << (blockDim.x / 32)) - 1;
if (threadIdx.x < 32) {
int lane = threadIdx.x % 32;
if (lane < blockDim.x / 32) {
#pragma unroll
for (int i = 0; i < 32; ++i) {
warpVal = r(warpVal, smem[lane * 32 + i]);
}
__syncwarp(mask);
smem[lane] = warpVal;
}
}
__syncthreads();
// First thread will perform a reduction of the above per-warp reductions
AccumT blockVal = defaultVal;
if (threadIdx.x == 0) {
for (int i = 0; i < blockDim.x / 32; ++i) {
blockVal = r(blockVal, smem[i]);
}
smem[0] = blockVal;
}
// Sync and broadcast
__syncthreads();
return smem[0];
}
template <template<typename> class Reduction1, template<typename> class Reduction2, typename AccumT>
__device__ __forceinline__ void
blockReduce(AccumT* smem,
AccumT* reducVal1,
AccumT val1,
const Reduction1<AccumT>& r1,
AccumT defaultVal1,
AccumT* reducVal2,
AccumT val2,
const Reduction2<AccumT>& r2,
AccumT defaultVal2)
{
// To avoid RaW races from chaining blockReduce calls together, we need a sync here
__syncthreads();
smem[threadIdx.x] = val1;
smem[blockDim.x + threadIdx.x] = val2;
__syncthreads();
AccumT warpVal1 = defaultVal1;
AccumT warpVal2 = defaultVal2;
// First warp will perform per-warp reductions for the remaining warps
uint32_t mask = (((uint64_t)1) << (blockDim.x / 32)) - 1;
if (threadIdx.x < 32) {
int lane = threadIdx.x % 32;
if (lane < blockDim.x / 32) {
#pragma unroll
for (int i = 0; i < 32; ++i) {
warpVal1 = r1(warpVal1, smem[lane * 32 + i]);
warpVal2 = r2(warpVal2, smem[lane * 32 + i + blockDim.x]);
}
__syncwarp(mask);
smem[lane] = warpVal1;
smem[lane + blockDim.x] = warpVal2;
}
}
__syncthreads();
// First thread will perform a reduction of the above per-warp reductions
AccumT blockVal1 = defaultVal1;
AccumT blockVal2 = defaultVal2;
if (threadIdx.x == 0) {
for (int i = 0; i < blockDim.x / 32; ++i) {
blockVal1 = r1(blockVal1, smem[i]);
blockVal2 = r2(blockVal2, smem[i + blockDim.x]);
}
smem[0] = blockVal1;
smem[blockDim.x] = blockVal2;
}
// Sync and broadcast
__syncthreads();
*reducVal1 = smem[0];
*reducVal2 = smem[blockDim.x];
__syncthreads();
}
template <template<typename, typename> class Reduction, int ILP, typename T, typename AccumT>
__device__ __forceinline__ AccumT
ilpReduce(int shift,
T* data,
int size,
const Reduction<T, AccumT>& r,
AccumT defaultVal)
{
typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LoadT;
AccumT threadVal = defaultVal;
int offset = threadIdx.x;
// shift and do 1
if(shift > 0){
data -= shift;
size += shift;
if(threadIdx.x >= shift){
threadVal = r(threadVal, data[offset]);
}
size -= blockDim.x;
data += blockDim.x;
}
int last = size % (ILP * blockDim.x);
T v[ILP];
LoadT* value = reinterpret_cast<LoadT*>(&v);
for (; offset * ILP < (size - last); offset += blockDim.x) {
*value = reinterpret_cast<LoadT*>(data)[offset];
for (int j = 0; j < ILP; ++j) {
threadVal = r(threadVal, v[j]);
}
}
offset = size - last + threadIdx.x;
// Epilogue
for (; offset < size; offset += blockDim.x)
threadVal = r(threadVal, data[offset]);
return threadVal;
}
template <template<typename, typename> class Reduction1, template<typename, typename> class Reduction2, int ILP, typename T, typename AccumT>
__device__ __forceinline__ void
ilpReduce(int shift,
T* data,
int size,
AccumT* reducVal1,
const Reduction1<T, AccumT>& r1,
AccumT defaultVal1,
AccumT* reducVal2,
const Reduction2<T, AccumT>& r2,
AccumT defaultVal2)
{
typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LoadT;
AccumT threadVal1 = defaultVal1;
AccumT threadVal2 = defaultVal2;
int offset = threadIdx.x;
// shift and do 1
if(shift > 0){
data -= shift;
size += shift;
if(threadIdx.x >= shift){
threadVal1 = r1(threadVal1, data[offset]);
threadVal2 = r2(threadVal2, data[offset]);
}
size -= blockDim.x;
data += blockDim.x;
}
int last = size % (ILP * blockDim.x);
T v[ILP];
LoadT* value = reinterpret_cast<LoadT*>(&v);
for (; offset * ILP < (size - last); offset += blockDim.x) {
*value = reinterpret_cast<LoadT*>(data)[offset];
for (int j = 0; j < ILP; ++j) {
threadVal1 = r1(threadVal1, v[j]);
threadVal2 = r2(threadVal2, v[j]);
}
}
offset = size - last + threadIdx.x;
// Epilogue
for (; offset < size; offset += blockDim.x) {
threadVal1 = r1(threadVal1, data[offset]);
threadVal2 = r2(threadVal2, data[offset]);
}
*reducVal1 = threadVal1;
*reducVal2 = threadVal2;
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template <typename, typename, typename> class Epilogue>
__global__ void
cunn_SoftMaxXEntropyForward(
accscalar_t *losses,
outscalar_t *max_log_sum_exp,
scalar_t *input,
int64_t *labels,
int64_t classes)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
// forward pointers to batch[blockIdx.x]
// each block handles a sample in the mini-batch
input += blockIdx.x * classes;
//output += blockIdx.x * classes;
const int shift = ((uint64_t)input) % ALIGN_BYTES / sizeof(scalar_t);
int64_t label = labels[blockIdx.x];
// find the max and sum
accscalar_t threadMax, threadSum, max_k, sum_k;
ilpReduce<MaxFloat, AddFloat, ILP, scalar_t, accscalar_t>(
shift, input, classes,
&threadMax, MaxFloat<scalar_t, accscalar_t>(),
-at::numeric_limits<accscalar_t>::max(),
&threadSum, AddFloat<scalar_t, accscalar_t>(),
static_cast<accscalar_t>(0));
blockReduce<Max, Add, accscalar_t>(
sdata,
&max_k, threadMax, Max<accscalar_t>(),
-at::numeric_limits<accscalar_t>::max(),
&sum_k, threadSum, Add<accscalar_t>(),
static_cast<accscalar_t>(0));
accscalar_t threadExp = ilpReduce<SumExpFloat, ILP, scalar_t, accscalar_t>(shift, input, classes, SumExpFloat<scalar_t, accscalar_t>(max_k), static_cast<accscalar_t>(0));
accscalar_t sumAll = blockReduce<Add, accscalar_t>(
sdata, threadExp, Add<accscalar_t>(), static_cast<accscalar_t>(0));
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_k, sumAll);
// calculate per element loss
// reserve max + log_sum_exp for bprop
if (threadIdx.x == 0) {
accscalar_t log_prob = epilogue(static_cast<accscalar_t>(input[label]));
losses[blockIdx.x] = -log_prob;
max_log_sum_exp[blockIdx.x] = max_k + std::log(sumAll);
}
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t>
__device__ __forceinline__ void
apply(scalar_t *gradInput,
scalar_t *logits,
outscalar_t *max_log_sum_exp,
outscalar_t *gradOutput,
int64_t *labels,
int classes)
{
accscalar_t tmpGradOutput = gradOutput[blockIdx.x];
int64_t label = labels[blockIdx.x];
accscalar_t coeff = max_log_sum_exp[blockIdx.x];
int offset = threadIdx.x;
int last = classes % (ILP * blockDim.x);
for (; offset < classes - last; offset += blockDim.x * ILP) {
accscalar_t tmpLogits[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
tmpLogits[j] = static_cast<accscalar_t>(logits[offset + j * blockDim.x]);
}
#pragma unroll
for (int j = 0; j < ILP; ++j)
gradInput[offset + j * blockDim.x] = tmpGradOutput * (
std::exp(tmpLogits[j] - coeff) - static_cast<accscalar_t>(
(offset + j * blockDim.x == label) ? 1 : 0));
}
for (; offset < classes; offset += blockDim.x)
gradInput[offset] = tmpGradOutput * (std::exp(
static_cast<accscalar_t>(logits[offset]) - coeff) -
static_cast<accscalar_t>((offset == label) ? 1 : 0));
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t>
__device__ __forceinline__ void
aligned_apply(int shift,
scalar_t *gradInput,
scalar_t *logits,
outscalar_t *max_log_sum_exp,
outscalar_t *gradOutput,
int64_t *labels,
int classes)
{
accscalar_t tmpGradOutput = gradOutput[blockIdx.x];
int64_t label = labels[blockIdx.x];
accscalar_t coeff = max_log_sum_exp[blockIdx.x];
int offset = threadIdx.x;
// shift and do 1
if(shift > 0){
logits -= shift;
gradInput -= shift;
classes += shift;
if(threadIdx.x >= shift){
gradInput[offset] = tmpGradOutput * (std::exp(
static_cast<accscalar_t>(logits[offset]) - coeff) -
static_cast<accscalar_t>(((offset - shift) == label) ? 1 : 0));
}
classes -= blockDim.x;
gradInput += blockDim.x;
logits += blockDim.x;
shift -= blockDim.x;
}
int last = classes % (ILP * blockDim.x);
typedef typename std::aligned_storage<ILP*sizeof(scalar_t), ILP*alignof(scalar_t)>::type LoadT;
// input
scalar_t v[ILP];
LoadT* value = reinterpret_cast<LoadT*>(&v);
// output
scalar_t r[ILP];
LoadT* result = reinterpret_cast<LoadT*>(&r);
for (; offset * ILP < (classes - last); offset += blockDim.x) {
*value = reinterpret_cast<LoadT*>(logits)[offset];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
r[j] = tmpGradOutput * (std::exp(
static_cast<accscalar_t>(v[j]) - coeff) -
static_cast<accscalar_t>(((ILP * offset + j - shift) == label) ? 1 : 0));
}
reinterpret_cast<LoadT*>(gradInput)[offset] = *result;
}
offset = classes - last + threadIdx.x;
for (; offset < classes; offset += blockDim.x)
gradInput[offset] = tmpGradOutput * (std::exp(
static_cast<accscalar_t>(logits[offset]) - coeff) -
static_cast<accscalar_t>(((offset - shift) == label) ? 1 : 0));
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__global__ void
cunn_SoftMaxXEntropyBackward(
scalar_t *gradInput,
scalar_t *logits,
outscalar_t *max_log_sum_exp,
outscalar_t *gradOutput,
int64_t *labels,
int classes)
{
gradInput += blockIdx.x * classes;
logits += blockIdx.x * classes;
// Do vectorized load/store when input/output have same alignment
const int shift = ((uint64_t)logits) % ALIGN_BYTES / sizeof(scalar_t);
const int shift_ = ((uint64_t)gradInput) % ALIGN_BYTES / sizeof(scalar_t);
if (shift == shift_){
aligned_apply<ILP, scalar_t, accscalar_t, outscalar_t>(shift, gradInput, logits, max_log_sum_exp, gradOutput, labels, classes);
}
else {
apply<ILP, scalar_t, accscalar_t, outscalar_t>(gradInput, logits, max_log_sum_exp, gradOutput, labels, classes);
}
}
template<template<typename, typename, typename> class Epilogue>
std::vector<Tensor> host_bf16_softmax_xentropy(
const Tensor & input_,
const Tensor & labels_,
const bool bf16_to_float){
if (bf16_to_float) AT_ASSERTM(input_.type().scalarType() == ScalarType::BFloat16,"conversion is supported for bf16 type only");
AT_ASSERTM(labels_.type().scalarType() == ScalarType::Long,"Label type should be CUDA Long");
auto input = input_.contiguous();
Tensor max_log_sum_exp = at::empty_like(labels_, bf16_to_float ? input.options().dtype(ScalarType::Float) : input.options());
Tensor losses = at::empty_like(labels_, input_.options().dtype(ScalarType::Float));
static_assert(std::is_same<acc_type<at::BFloat16, true>, float>::value ||
std::is_same<acc_type<at::BFloat16, true>, double>::value,
"accscalar_t for bf16 should be float or double");
AT_ASSERTM(input.dim() == 2, "Currently only 2 dim input supported");
AT_ASSERTM(labels_.dim() == 1, "Labels should be 1 dimensional");
AT_ASSERTM(input.size(0) == labels_.size(0), "Input and label should have same number of examples");
AT_ASSERTM(input.numel() > 0, "Number of classes in input should not be 0");
const int64_t dim = 1;
int64_t outer_size = 1;
int64_t dim_size = input.size(dim);
int64_t inner_size = 1;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
for (int64_t i = 0; i < dim; ++i)
outer_size *= input.size(i);
for (int64_t i = dim + 1; i < input.dim(); ++i)
inner_size *= input.size(i);
// This kernel spawns a block per each element in the batch.
// XXX: it assumes that inner_size == 1
TORCH_CHECK(inner_size == 1, "Currently only inner size 1 supported");
dim3 grid(outer_size);
using namespace at;
DISPATCH_FLOAT_AND_BF16(input.scalar_type(), 0, "host_bf16_softmax_xentropy",
using accscalar_t = at::acc_type<scalar_t_0, true>;
const int ILP = sizeof(float4)/sizeof(scalar_t_0);
dim3 block = SoftMax_getBlockSize(ILP, dim_size);
if (!bf16_to_float) {
cunn_SoftMaxXEntropyForward<ILP, scalar_t_0, accscalar_t, scalar_t_0, Epilogue>
<<<grid, block, 2 * block.x * sizeof(accscalar_t), stream>>>(
losses.DATA_PTR<accscalar_t>(), max_log_sum_exp.DATA_PTR<scalar_t_0>(),
input.DATA_PTR<scalar_t_0>(), labels_.DATA_PTR<int64_t>(),
dim_size
);
} else {
cunn_SoftMaxXEntropyForward<ILP, scalar_t_0, accscalar_t, accscalar_t, Epilogue>
<<<grid, block, 2 * block.x * sizeof(accscalar_t), stream>>>(
losses.DATA_PTR<accscalar_t>(), max_log_sum_exp.DATA_PTR<accscalar_t>(),
input.DATA_PTR<scalar_t_0>(), labels_.DATA_PTR<int64_t>(),
dim_size
);
}
);
THCudaCheck(cudaGetLastError());
std::vector<at::Tensor> ret = {losses, max_log_sum_exp};
return ret;
}
template<template<typename, typename, typename> class Epilogue>
std::vector<Tensor> host_softmax_xentropy(
const Tensor & input_,
const Tensor & labels_,
const bool half_to_float){
if (half_to_float) AT_ASSERTM(input_.type().scalarType() == ScalarType::Half
|| input_.type().scalarType() == ScalarType::BFloat16,"conversion is supported for half or BFloat16 type only");
AT_ASSERTM(labels_.type().scalarType() == ScalarType::Long,"Label type should be CUDA Long");
auto input = input_.contiguous();
Tensor max_log_sum_exp = at::empty_like(labels_, half_to_float ? input.options().dtype(ScalarType::Float) : input.options());
Tensor losses = at::empty_like(labels_, input_.options().dtype(ScalarType::Float));
static_assert(std::is_same<acc_type<at::Half, true>, float>::value ||
std::is_same<acc_type<at::Half, true>, double>::value,
"accscalar_t for half should be float or double");
static_assert(std::is_same<acc_type<at::BFloat16, true>, float>::value ||
std::is_same<acc_type<at::BFloat16, true>, double>::value,
"accscalar_t for BFloat16 should be float or double");
AT_ASSERTM(input.dim() == 2, "Currently only 2 dim input supported");
AT_ASSERTM(labels_.dim() == 1, "Labels should be 1 dimensional");
AT_ASSERTM(input.size(0) == labels_.size(0), "Input and label should have same number of examples");
AT_ASSERTM(input.numel() > 0, "Number of classes in input should not be 0");
const int64_t dim = 1;
int64_t outer_size = 1;
int64_t dim_size = input.size(dim);
int64_t inner_size = 1;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
for (int64_t i = 0; i < dim; ++i)
outer_size *= input.size(i);
for (int64_t i = dim + 1; i < input.dim(); ++i)
inner_size *= input.size(i);
// This kernel spawns a block per each element in the batch.
// XXX: it assumes that inner_size == 1
TORCH_CHECK(inner_size == 1, "Currently only inner size 1 supported");
dim3 grid(outer_size);
using namespace at;
DISPATCH_FLOAT_AND_HALF_AND_BF16(input.scalar_type(), 0, "host_softmax_xentropy",
using accscalar_t = at::acc_type<scalar_t_0, true>;
const int ILP = sizeof(float4)/sizeof(scalar_t_0);
dim3 block = SoftMax_getBlockSize(ILP, dim_size);
if (!half_to_float) {
cunn_SoftMaxXEntropyForward<ILP, scalar_t_0, accscalar_t, scalar_t_0, Epilogue>
<<<grid, block, 2 * block.x * sizeof(accscalar_t), stream>>>(
losses.DATA_PTR<accscalar_t>(), max_log_sum_exp.DATA_PTR<scalar_t_0>(),
input.DATA_PTR<scalar_t_0>(), labels_.DATA_PTR<int64_t>(),
dim_size
);
} else {
cunn_SoftMaxXEntropyForward<ILP, scalar_t_0, accscalar_t, accscalar_t, Epilogue>
<<<grid, block, 2 * block.x * sizeof(accscalar_t), stream>>>(
losses.DATA_PTR<accscalar_t>(), max_log_sum_exp.DATA_PTR<accscalar_t>(),
input.DATA_PTR<scalar_t_0>(), labels_.DATA_PTR<int64_t>(),
dim_size
);
}
);
THCudaCheck(cudaGetLastError());
std::vector<at::Tensor> ret = {losses, max_log_sum_exp};
return ret;
}
template<template<typename, typename, typename> class Epilogue>
Tensor host_softmax_xentropy_backward(
const at::Tensor &grad_loss,
const at::Tensor &logits_,
const at::Tensor &max_log_sum_exp,
const at::Tensor &labels,
bool half_to_float) {
const int64_t dim = 1;
Tensor gI = at::empty_like(logits_);
if (grad_loss.numel() == 0) {
return gI;
}
auto grad = grad_loss.contiguous();
auto logits = logits_.contiguous();
static_assert(std::is_same<acc_type<at::Half, true>, float>::value ||
std::is_same<acc_type<at::Half, true>, double>::value,
"accscalar_t for half should be float or double");
static_assert(std::is_same<acc_type<at::BFloat16, true>, float>::value ||
std::is_same<acc_type<at::BFloat16, true>, double>::value,
"accscalar_t for BFloat16 should be float or double");
if (grad.dim() == 0) grad = grad.view(1);
AT_ASSERTM(logits_.dim() == 2, "Currently only 2 dim input supported");
AT_ASSERTM(labels.dim() == 1, "Labels should be 1 dimensional");
AT_ASSERTM(logits_.numel() > 0, "Number of classes in input should not be 0");
AT_ASSERTM(logits_.size(0) == labels.size(0), "Input and label should have same number of examples");
AT_ASSERTM(labels.size(0) == grad.size(0), "Label and loss should have same number of examples");
int64_t outer_size = 1;
int64_t dim_size = logits.size(dim);
int64_t inner_size = 1;
for (int64_t i = 0; i < dim; ++i)
outer_size *= logits.size(i);
for (int64_t i = dim + 1; i < logits.dim(); ++i)
inner_size *= logits.size(i);
// See descriptions of kernels above.
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
TORCH_CHECK(inner_size == 1, "Currently only inner size 1 supported");
dim3 grid(outer_size);
DISPATCH_FLOAT_AND_HALF_AND_BF16(gI.scalar_type(), 0, "host_softmax_xentropy_backward",
using accscalar_t = acc_type<scalar_t_0, true>;
const int ILP = sizeof(float4)/sizeof(scalar_t_0);
dim3 block = SoftMax_getBlockSize(ILP, dim_size);
if (!half_to_float) {
cunn_SoftMaxXEntropyBackward<ILP, scalar_t_0, accscalar_t, scalar_t_0, Epilogue>
<<<grid, block, block.x * sizeof(accscalar_t), stream>>>(
gI.DATA_PTR<scalar_t_0>(), logits.DATA_PTR<scalar_t_0>(),
max_log_sum_exp.DATA_PTR<scalar_t_0>(),
grad.DATA_PTR<scalar_t_0>(), labels.DATA_PTR<int64_t>(),
dim_size
);
} else {
cunn_SoftMaxXEntropyBackward<ILP, scalar_t_0, accscalar_t, accscalar_t, Epilogue>
<<<grid, block, block.x * sizeof(accscalar_t), stream>>>(
gI.DATA_PTR<scalar_t_0>(), logits.DATA_PTR<scalar_t_0>(),
max_log_sum_exp.DATA_PTR<accscalar_t>(),
grad.DATA_PTR<accscalar_t>(), labels.DATA_PTR<int64_t>(),
dim_size
);
}
);
THCudaCheck(cudaGetLastError());
return gI;
}
std::vector<Tensor> softmax_xentropy_cuda(const Tensor &input, const Tensor &labels, const bool half_to_float){
return host_softmax_xentropy<LogSoftMaxForwardEpilogue>(input, labels, half_to_float);
}
at::Tensor softmax_xentropy_backward_cuda(
const at::Tensor &grad_loss,
const at::Tensor &logits,
const at::Tensor &max_log_sum_exp,
const at::Tensor &labels) {
bool half_to_float = grad_loss.type().scalarType() != logits.type().scalarType();
if (half_to_float) {
AT_ASSERTM((grad_loss.type().scalarType() == ScalarType::Float && (logits.type().scalarType() == ScalarType::Half || logits.type().scalarType() == ScalarType::BFloat16)),
"expected input and grad types to match, or input to be at::half and grad to be at::Float");
}
return host_softmax_xentropy_backward<LogSoftMaxBackwardEpilogue>(grad_loss, logits, max_log_sum_exp, labels, half_to_float);
}
|
COCO-LM/fairseq/fused_ops/csrc/xentropy/xentropy_kernel.cu/0
|
{
"file_path": "COCO-LM/fairseq/fused_ops/csrc/xentropy/xentropy_kernel.cu",
"repo_id": "COCO-LM",
"token_count": 11071
}
| 221 |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import contextlib
import sys
import sentencepiece as spm
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model", required=True, help="sentencepiece model to use for encoding"
)
parser.add_argument(
"--inputs", nargs="+", default=["-"], help="input files to filter/encode"
)
parser.add_argument(
"--outputs", nargs="+", default=["-"], help="path to save encoded outputs"
)
parser.add_argument("--output_format", choices=["piece", "id"], default="piece")
parser.add_argument(
"--min-len",
type=int,
metavar="N",
help="filter sentence pairs with fewer than N tokens",
)
parser.add_argument(
"--max-len",
type=int,
metavar="N",
help="filter sentence pairs with more than N tokens",
)
args = parser.parse_args()
assert len(args.inputs) == len(
args.outputs
), "number of input and output paths should match"
sp = spm.SentencePieceProcessor()
sp.Load(args.model)
if args.output_format == "piece":
def encode(l):
return sp.EncodeAsPieces(l)
elif args.output_format == "id":
def encode(l):
return list(map(str, sp.EncodeAsIds(l)))
else:
raise NotImplementedError
if args.min_len is not None or args.max_len is not None:
def valid(line):
return (args.min_len is None or len(line) >= args.min_len) and (
args.max_len is None or len(line) <= args.max_len
)
else:
def valid(lines):
return True
with contextlib.ExitStack() as stack:
inputs = [
stack.enter_context(open(input, "r", encoding="utf-8"))
if input != "-"
else sys.stdin
for input in args.inputs
]
outputs = [
stack.enter_context(open(output, "w", encoding="utf-8"))
if output != "-"
else sys.stdout
for output in args.outputs
]
stats = {
"num_empty": 0,
"num_filtered": 0,
}
def encode_line(line):
line = line.strip()
if len(line) > 0:
line = encode(line)
if valid(line):
return line
else:
stats["num_filtered"] += 1
else:
stats["num_empty"] += 1
return None
for i, lines in enumerate(zip(*inputs), start=1):
enc_lines = list(map(encode_line, lines))
if not any(enc_line is None for enc_line in enc_lines):
for enc_line, output_h in zip(enc_lines, outputs):
print(" ".join(enc_line), file=output_h)
if i % 10000 == 0:
print("processed {} lines".format(i), file=sys.stderr)
print("skipped {} empty lines".format(stats["num_empty"]), file=sys.stderr)
print("filtered {} lines".format(stats["num_filtered"]), file=sys.stderr)
if __name__ == "__main__":
main()
|
COCO-LM/fairseq/scripts/spm_encode.py/0
|
{
"file_path": "COCO-LM/fairseq/scripts/spm_encode.py",
"repo_id": "COCO-LM",
"token_count": 1575
}
| 222 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
import torch
from examples.speech_recognition.data.collaters import Seq2SeqCollater
class TestSeq2SeqCollator(unittest.TestCase):
def test_collate(self):
eos_idx = 1
pad_idx = 0
collater = Seq2SeqCollater(
feature_index=0, label_index=1, pad_index=pad_idx, eos_index=eos_idx
)
# 2 frames in the first sample and 3 frames in the second one
frames1 = np.array([[7, 8], [9, 10]])
frames2 = np.array([[1, 2], [3, 4], [5, 6]])
target1 = np.array([4, 2, 3, eos_idx])
target2 = np.array([3, 2, eos_idx])
sample1 = {"id": 0, "data": [frames1, target1]}
sample2 = {"id": 1, "data": [frames2, target2]}
batch = collater.collate([sample1, sample2])
# collate sort inputs by frame's length before creating the batch
self.assertTensorEqual(batch["id"], torch.tensor([1, 0]))
self.assertEqual(batch["ntokens"], 7)
self.assertTensorEqual(
batch["net_input"]["src_tokens"],
torch.tensor(
[[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [pad_idx, pad_idx]]]
),
)
self.assertTensorEqual(
batch["net_input"]["prev_output_tokens"],
torch.tensor([[eos_idx, 3, 2, pad_idx], [eos_idx, 4, 2, 3]]),
)
self.assertTensorEqual(batch["net_input"]["src_lengths"], torch.tensor([3, 2]))
self.assertTensorEqual(
batch["target"],
torch.tensor([[3, 2, eos_idx, pad_idx], [4, 2, 3, eos_idx]]),
)
self.assertEqual(batch["nsentences"], 2)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
if __name__ == "__main__":
unittest.main()
|
COCO-LM/fairseq/tests/speech_recognition/test_collaters.py/0
|
{
"file_path": "COCO-LM/fairseq/tests/speech_recognition/test_collaters.py",
"repo_id": "COCO-LM",
"token_count": 982
}
| 223 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import tempfile
import unittest
import torch
from fairseq.data.dictionary import Dictionary
from fairseq.models.transformer import TransformerModel
from fairseq.modules import multihead_attention, sinusoidal_positional_embedding
from fairseq.tasks.fairseq_task import LegacyFairseqTask
DEFAULT_TEST_VOCAB_SIZE = 100
class DummyTask(LegacyFairseqTask):
def __init__(self, args):
super().__init__(args)
self.dictionary = get_dummy_dictionary()
if getattr(self.args, "ctc", False):
self.dictionary.add_symbol("<ctc_blank>")
self.src_dict = self.dictionary
self.tgt_dict = self.dictionary
@property
def source_dictionary(self):
return self.src_dict
@property
def target_dictionary(self):
return self.dictionary
def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE):
dummy_dict = Dictionary()
# add dummy symbol to satisfy vocab size
for id, _ in enumerate(range(vocab_size)):
dummy_dict.add_symbol("{}".format(id), 1000)
return dummy_dict
def get_dummy_task_and_parser():
"""
Return a dummy task and argument parser, which can be used to
create a model/criterion.
"""
parser = argparse.ArgumentParser(
description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS
)
DummyTask.add_args(parser)
args = parser.parse_args([])
task = DummyTask.setup_task(args)
return task, parser
def _test_save_and_load(scripted_module):
with tempfile.NamedTemporaryFile() as f:
scripted_module.save(f.name)
torch.jit.load(f.name)
class TestExportModels(unittest.TestCase):
def test_export_multihead_attention(self):
module = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2)
scripted = torch.jit.script(module)
_test_save_and_load(scripted)
def test_incremental_state_multihead_attention(self):
module1 = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2)
module1 = torch.jit.script(module1)
module2 = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2)
module2 = torch.jit.script(module2)
state = {}
state = module1.set_incremental_state(state, "key", {"a": torch.tensor([1])})
state = module2.set_incremental_state(state, "key", {"a": torch.tensor([2])})
v1 = module1.get_incremental_state(state, "key")["a"]
v2 = module2.get_incremental_state(state, "key")["a"]
self.assertEqual(v1, 1)
self.assertEqual(v2, 2)
def test_positional_embedding(self):
module = sinusoidal_positional_embedding.SinusoidalPositionalEmbedding(
embedding_dim=8, padding_idx=1
)
scripted = torch.jit.script(module)
_test_save_and_load(scripted)
@unittest.skipIf(
torch.__version__ < "1.6.0", "Targeting OSS scriptability for the 1.6 release"
)
def test_export_transformer(self):
task, parser = get_dummy_task_and_parser()
TransformerModel.add_args(parser)
args = parser.parse_args([])
model = TransformerModel.build_model(args, task)
scripted = torch.jit.script(model)
_test_save_and_load(scripted)
@unittest.skipIf(
torch.__version__ < "1.6.0", "Targeting OSS scriptability for the 1.6 release"
)
def test_export_transformer_no_token_pos_emb(self):
task, parser = get_dummy_task_and_parser()
TransformerModel.add_args(parser)
args = parser.parse_args([])
args.no_token_positional_embeddings = True
model = TransformerModel.build_model(args, task)
scripted = torch.jit.script(model)
_test_save_and_load(scripted)
if __name__ == "__main__":
unittest.main()
|
COCO-LM/fairseq/tests/test_export.py/0
|
{
"file_path": "COCO-LM/fairseq/tests/test_export.py",
"repo_id": "COCO-LM",
"token_count": 1637
}
| 224 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import json
import os
import tempfile
import unittest
from io import StringIO
import torch
from . import test_binaries
class TestReproducibility(unittest.TestCase):
def _test_reproducibility(
self,
name,
extra_flags=None,
delta=0.0001,
resume_checkpoint="checkpoint1.pt",
max_epoch=3,
):
def get_last_log_stats_containing_string(log_records, search_string):
for log_record in logs.records[::-1]:
if isinstance(log_record.msg, str) and search_string in log_record.msg:
return json.loads(log_record.msg)
if extra_flags is None:
extra_flags = []
with tempfile.TemporaryDirectory(name) as data_dir:
with self.assertLogs() as logs:
test_binaries.create_dummy_data(data_dir)
test_binaries.preprocess_translation_data(data_dir)
# train epochs 1 and 2 together
with self.assertLogs() as logs:
test_binaries.train_translation_model(
data_dir,
"fconv_iwslt_de_en",
[
"--dropout",
"0.0",
"--log-format",
"json",
"--log-interval",
"1",
"--max-epoch",
str(max_epoch),
]
+ extra_flags,
)
train_log = get_last_log_stats_containing_string(logs.records, "train_loss")
valid_log = get_last_log_stats_containing_string(logs.records, "valid_loss")
# train epoch 2, resuming from previous checkpoint 1
os.rename(
os.path.join(data_dir, resume_checkpoint),
os.path.join(data_dir, "checkpoint_last.pt"),
)
with self.assertLogs() as logs:
test_binaries.train_translation_model(
data_dir,
"fconv_iwslt_de_en",
[
"--dropout",
"0.0",
"--log-format",
"json",
"--log-interval",
"1",
"--max-epoch",
str(max_epoch),
]
+ extra_flags,
)
train_res_log = get_last_log_stats_containing_string(
logs.records, "train_loss"
)
valid_res_log = get_last_log_stats_containing_string(
logs.records, "valid_loss"
)
for k in ["train_loss", "train_ppl", "train_num_updates", "train_gnorm"]:
self.assertAlmostEqual(
float(train_log[k]), float(train_res_log[k]), delta=delta
)
for k in [
"valid_loss",
"valid_ppl",
"valid_num_updates",
"valid_best_loss",
]:
self.assertAlmostEqual(
float(valid_log[k]), float(valid_res_log[k]), delta=delta
)
def test_reproducibility(self):
self._test_reproducibility("test_reproducibility")
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
def test_reproducibility_fp16(self):
self._test_reproducibility(
"test_reproducibility_fp16",
[
"--fp16",
"--fp16-init-scale",
"4096",
],
delta=0.011,
)
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
def test_reproducibility_memory_efficient_fp16(self):
self._test_reproducibility(
"test_reproducibility_memory_efficient_fp16",
[
"--memory-efficient-fp16",
"--fp16-init-scale",
"4096",
],
)
def test_mid_epoch_reproducibility(self):
self._test_reproducibility(
"test_mid_epoch_reproducibility",
["--save-interval-updates", "3"],
resume_checkpoint="checkpoint_1_3.pt",
max_epoch=1,
)
if __name__ == "__main__":
unittest.main()
|
COCO-LM/fairseq/tests/test_reproducibility.py/0
|
{
"file_path": "COCO-LM/fairseq/tests/test_reproducibility.py",
"repo_id": "COCO-LM",
"token_count": 2595
}
| 225 |
# ------------------------------------------
# CSWin Transformer
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# written By Xiaoyi Dong
# ------------------------------------------
from .cswin import *
|
CSWin-Transformer/models/__init__.py/0
|
{
"file_path": "CSWin-Transformer/models/__init__.py",
"repo_id": "CSWin-Transformer",
"token_count": 48
}
| 226 |
seed_everything: 42
# ---------------------------- TRAINER -------------------------------------------
trainer:
default_root_dir: ${oc.env:OUTPUT_DIR,/home/t-tungnguyen/ClimaX/exps/pretrain_climax}
precision: 16
gpus: null
num_nodes: 1
accelerator: gpu
strategy: ddp
min_epochs: 1
max_epochs: 100
enable_progress_bar: true
sync_batchnorm: True
enable_checkpointing: True
resume_from_checkpoint: null
limit_val_batches: 0
num_sanity_val_steps: 0
# debugging
fast_dev_run: false
logger:
class_path: pytorch_lightning.loggers.tensorboard.TensorBoardLogger
init_args:
save_dir: ${trainer.default_root_dir}/logs
name: null
version: null
log_graph: False
default_hp_metric: True
prefix: ""
callbacks:
- class_path: pytorch_lightning.callbacks.LearningRateMonitor
init_args:
logging_interval: "step"
- class_path: pytorch_lightning.callbacks.ModelCheckpoint
init_args:
dirpath: "${trainer.default_root_dir}/checkpoints"
save_last: True # additionaly always save model from last epoch
verbose: False
filename: "epoch_{epoch:03d}"
auto_insert_metric_name: False
- class_path: pytorch_lightning.callbacks.RichModelSummary
init_args:
max_depth: -1
- class_path: pytorch_lightning.callbacks.RichProgressBar
# ---------------------------- MODEL -------------------------------------------
model:
lr: 5e-4
beta_1: 0.9
beta_2: 0.95
weight_decay: 1e-5
warmup_steps: 10000
max_steps: 200000
warmup_start_lr: 1e-8
eta_min: 1e-8
net:
class_path: climax.arch.ClimaX
init_args:
default_vars: [
"land_sea_mask",
"orography",
"lattitude",
"2m_temperature",
"10m_u_component_of_wind",
"10m_v_component_of_wind",
"geopotential_50",
"geopotential_250",
"geopotential_500",
"geopotential_600",
"geopotential_700",
"geopotential_850",
"geopotential_925",
"u_component_of_wind_50",
"u_component_of_wind_250",
"u_component_of_wind_500",
"u_component_of_wind_600",
"u_component_of_wind_700",
"u_component_of_wind_850",
"u_component_of_wind_925",
"v_component_of_wind_50",
"v_component_of_wind_250",
"v_component_of_wind_500",
"v_component_of_wind_600",
"v_component_of_wind_700",
"v_component_of_wind_850",
"v_component_of_wind_925",
"temperature_50",
"temperature_250",
"temperature_500",
"temperature_600",
"temperature_700",
"temperature_850",
"temperature_925",
"relative_humidity_50",
"relative_humidity_250",
"relative_humidity_500",
"relative_humidity_600",
"relative_humidity_700",
"relative_humidity_850",
"relative_humidity_925",
"specific_humidity_50",
"specific_humidity_250",
"specific_humidity_500",
"specific_humidity_600",
"specific_humidity_700",
"specific_humidity_850",
"specific_humidity_925",
]
img_size: [32, 64]
patch_size: 2
embed_dim: 1024
depth: 8
decoder_depth: 2
num_heads: 16
mlp_ratio: 4
drop_path: 0.1
drop_rate: 0.1
# ---------------------------- DATA -------------------------------------------
data:
dict_root_dirs: {
'mpi-esm': '/datadrive/datasets/CMIP6/MPI-ESM/5.625deg_equally_np_all_levels',
}
dict_start_idx: {
'mpi-esm': 0,
}
dict_end_idx: {
'mpi-esm': 1,
}
dict_in_variables: {
'mpi-esm': [
"2m_temperature",
"10m_u_component_of_wind",
"10m_v_component_of_wind",
"geopotential_50", "geopotential_250", "geopotential_500", "geopotential_600", "geopotential_700", "geopotential_850", "geopotential_925",
"u_component_of_wind_50", "u_component_of_wind_250", "u_component_of_wind_500", "u_component_of_wind_600", "u_component_of_wind_700", "u_component_of_wind_850", "u_component_of_wind_925",
"v_component_of_wind_50", "v_component_of_wind_250", "v_component_of_wind_500", "v_component_of_wind_600", "v_component_of_wind_700", "v_component_of_wind_850", "v_component_of_wind_925",
"temperature_50", "temperature_250", "temperature_500", "temperature_600", "temperature_700", "temperature_850", "temperature_925",
"specific_humidity_50", "specific_humidity_250", "specific_humidity_500", "specific_humidity_600", "specific_humidity_700", "specific_humidity_850", "specific_humidity_925",
],
}
dict_out_variables: {
'mpi-esm': null,
}
dict_max_predict_ranges: {
'mpi-esm': 28,
}
dict_random_lead_time: {
'mpi-esm': True,
}
dict_hrs_each_step: {
'mpi-esm': 6,
}
dict_buffer_sizes: {
'mpi-esm': 10000,
}
batch_size: 128
num_workers: 1
pin_memory: False
|
ClimaX/configs/pretrain_climax.yaml/0
|
{
"file_path": "ClimaX/configs/pretrain_climax.yaml",
"repo_id": "ClimaX",
"token_count": 2267
}
| 227 |
# Global Forecasting
::: climax.global_forecast.datamodule
::: climax.global_forecast.module
|
ClimaX/docs/reference/global_forecast.md/0
|
{
"file_path": "ClimaX/docs/reference/global_forecast.md",
"repo_id": "ClimaX",
"token_count": 32
}
| 228 |
datadir: /data/CMIP6/CMCC
name: geopotential
cmip_name: zg
era_name: z
run: r1i1p1f1
res:
- 1.40625
# - 5.625
|
ClimaX/snakemake_configs/CMCC/config_geopotential.yml/0
|
{
"file_path": "ClimaX/snakemake_configs/CMCC/config_geopotential.yml",
"repo_id": "ClimaX",
"token_count": 61
}
| 229 |
datadir: /data/CMIP6/MPI-ESM
server_prefix: http://esgf-data1.llnl.gov/thredds/fileServer/css03_data/CMIP6/CMIP
name: 2m_temperature
cmip_name: tas
era_name: t2m
output_type: 6hrPlevPt
run: r1i1p1f1
version: v20190815
res:
- 1.40625
# - 5.625
|
ClimaX/snakemake_configs/MPI-ESM/config_2m_temperature.yml/0
|
{
"file_path": "ClimaX/snakemake_configs/MPI-ESM/config_2m_temperature.yml",
"repo_id": "ClimaX",
"token_count": 123
}
| 230 |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import numpy as np
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm
# DeiT: https://github.com/facebookresearch/deit
# --------------------------------------------------------
import torch
import torch.nn as nn
from climax.arch import ClimaX
from climax.utils.pos_embed import get_1d_sincos_pos_embed_from_grid
class ClimaXClimateBench(ClimaX):
def __init__(
self,
default_vars,
out_vars,
img_size=[32, 64],
time_history=1,
patch_size=2,
embed_dim=1024,
depth=8,
decoder_depth=2,
num_heads=16,
mlp_ratio=4.0,
drop_path=0.1,
drop_rate=0.1,
parallel_patch_embed=False,
freeze_encoder=False,
):
assert out_vars is not None
super().__init__(
default_vars,
img_size,
patch_size,
embed_dim,
depth,
decoder_depth,
num_heads,
mlp_ratio,
drop_path,
drop_rate,
parallel_patch_embed
)
self.out_vars = out_vars
self.time_history = time_history
self.freeze_encoder = freeze_encoder
# used to aggregate multiple timesteps in the input
self.time_pos_embed = nn.Parameter(torch.zeros(1, time_history, embed_dim), requires_grad=True)
self.time_agg = nn.MultiheadAttention(embed_dim, num_heads, batch_first=True)
self.time_query = nn.Parameter(torch.zeros(1, 1, embed_dim), requires_grad=True)
# initialize time embedding
time_pos_embed = get_1d_sincos_pos_embed_from_grid(self.time_pos_embed.shape[-1], np.arange(self.time_history))
self.time_pos_embed.data.copy_(torch.from_numpy(time_pos_embed).float().unsqueeze(0))
# overwrite ClimaX
# use a linear prediction head for this task
self.head = nn.Linear(embed_dim, img_size[0]*img_size[1])
if freeze_encoder:
for name, p in self.blocks.named_parameters():
name = name.lower()
# we do not freeze the norm layers, as suggested by https://arxiv.org/abs/2103.05247
if 'norm' in name:
continue
else:
p.requires_grad_(False)
def forward_encoder(self, x: torch.Tensor, lead_times: torch.Tensor, variables):
# x: `[B, T, V, H, W]` shape.
if isinstance(variables, list):
variables = tuple(variables)
b, t, _, _, _ = x.shape
x = x.flatten(0, 1) # BxT, V, H, W
# tokenize each variable separately
embeds = []
var_ids = self.get_var_ids(variables, x.device)
if self.parallel_patch_embed:
x = self.token_embeds(x, var_ids) # BxT, V, L, D
else:
for i in range(len(var_ids)):
id = var_ids[i]
embeds.append(self.token_embeds[id](x[:, i : i + 1]))
x = torch.stack(embeds, dim=1) # BxT, V, L, D
# add variable embedding
var_embed = self.get_var_emb(self.var_embed, variables)
x = x + var_embed.unsqueeze(2) # BxT, V, L, D
# variable aggregation
x = self.aggregate_variables(x) # BxT, L, D
# add pos embedding
x = x + self.pos_embed
# add time embedding
# time emb: 1, T, D
x = x.unflatten(0, sizes=(b, t)) # B, T, L, D
x = x + self.time_pos_embed.unsqueeze(2)
# add lead time embedding
lead_time_emb = self.lead_time_embed(lead_times.unsqueeze(-1)) # B, D
lead_time_emb = lead_time_emb.unsqueeze(1).unsqueeze(2)
x = x + lead_time_emb # B, T, L, D
x = x.flatten(0, 1) # BxT, L, D
x = self.pos_drop(x)
# apply Transformer blocks
for blk in self.blocks:
x = blk(x)
x = self.norm(x) # BxT, L, D
x = x.unflatten(0, sizes=(b, t)) # B, T, L, D
# global average pooling, also used in CNN-LSTM baseline in ClimateBench
x = x.mean(-2) # B, T, D
time_query = self.time_query.repeat_interleave(x.shape[0], dim=0)
x, _ = self.time_agg(time_query, x, x) # B, 1, D
return x
def forward(self, x, y, lead_times, variables, out_variables, metric, lat):
x = self.forward_encoder(x, lead_times, variables) # B, 1, D
preds = self.head(x)
preds = preds.reshape(-1, 1, self.img_size[0], self.img_size[1]) # B, 1, H, W
if metric is None:
loss = None
else:
loss = [m(preds, y, out_variables, lat) for m in metric]
return loss, preds
|
ClimaX/src/climax/climate_projection/arch.py/0
|
{
"file_path": "ClimaX/src/climax/climate_projection/arch.py",
"repo_id": "ClimaX",
"token_count": 2414
}
| 231 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
from climax.arch import ClimaX
class RegionalClimaX(ClimaX):
def __init__(self, default_vars, img_size=..., patch_size=2, embed_dim=1024, depth=8, decoder_depth=2, num_heads=16, mlp_ratio=4, drop_path=0.1, drop_rate=0.1):
super().__init__(default_vars, img_size, patch_size, embed_dim, depth, decoder_depth, num_heads, mlp_ratio, drop_path, drop_rate)
def forward_encoder(self, x: torch.Tensor, lead_times: torch.Tensor, variables, region_info):
# x: `[B, V, H, W]` shape.
if isinstance(variables, list):
variables = tuple(variables)
# tokenize each variable separately
embeds = []
var_ids = self.get_var_ids(variables, x.device)
for i in range(len(var_ids)):
id = var_ids[i]
embeds.append(self.token_embeds[id](x[:, i : i + 1]))
x = torch.stack(embeds, dim=1) # B, V, L, D
# add variable embedding
var_embed = self.get_var_emb(self.var_embed, variables)
x = x + var_embed.unsqueeze(2) # B, V, L, D
# get the patch ids corresponding to the region
region_patch_ids = region_info['patch_ids']
x = x[:, :, region_patch_ids, :]
# variable aggregation
x = self.aggregate_variables(x) # B, L, D
# add pos embedding
x = x + self.pos_embed[:, region_patch_ids, :]
# add lead time embedding
lead_time_emb = self.lead_time_embed(lead_times.unsqueeze(-1)) # B, D
lead_time_emb = lead_time_emb.unsqueeze(1)
x = x + lead_time_emb # B, L, D
x = self.pos_drop(x)
# apply Transformer blocks
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x
def forward(self, x, y, lead_times, variables, out_variables, metric, lat, region_info):
"""Forward pass through the model.
Args:
x: `[B, Vi, H, W]` shape. Input weather/climate variables
y: `[B, Vo, H, W]` shape. Target weather/climate variables
lead_times: `[B]` shape. Forecasting lead times of each element of the batch.
region_info: Containing the region's information
Returns:
loss (list): Different metrics.
preds (torch.Tensor): `[B, Vo, H, W]` shape. Predicted weather/climate variables.
"""
out_transformers = self.forward_encoder(x, lead_times, variables, region_info) # B, L, D
preds = self.head(out_transformers) # B, L, V*p*p
min_h, max_h = region_info['min_h'], region_info['max_h']
min_w, max_w = region_info['min_w'], region_info['max_w']
preds = self.unpatchify(preds, h = max_h - min_h + 1, w = max_w - min_w + 1)
out_var_ids = self.get_var_ids(tuple(out_variables), preds.device)
preds = preds[:, out_var_ids]
y = y[:, :, min_h:max_h+1, min_w:max_w+1]
lat = lat[min_h:max_h+1]
if metric is None:
loss = None
else:
loss = [m(preds, y, out_variables, lat) for m in metric]
return loss, preds
def evaluate(self, x, y, lead_times, variables, out_variables, transform, metrics, lat, clim, log_postfix, region_info):
_, preds = self.forward(x, y, lead_times, variables, out_variables, metric=None, lat=lat, region_info=region_info)
min_h, max_h = region_info['min_h'], region_info['max_h']
min_w, max_w = region_info['min_w'], region_info['max_w']
y = y[:, :, min_h:max_h+1, min_w:max_w+1]
lat = lat[min_h:max_h+1]
clim = clim[:, min_h:max_h+1, min_w:max_w+1]
return [m(preds, y, transform, out_variables, lat, clim, log_postfix) for m in metrics]
|
ClimaX/src/climax/regional_forecast/arch.py/0
|
{
"file_path": "ClimaX/src/climax/regional_forecast/arch.py",
"repo_id": "ClimaX",
"token_count": 1754
}
| 232 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.