text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
data "azurerm_resource_group" "ws" {
name = "rg-${var.tre_id}-ws-${local.short_workspace_id}"
}
data "azurerm_key_vault" "ws" {
name = local.key_vault_name
resource_group_name = data.azurerm_resource_group.ws.name
}
data "azurerm_key_vault_secret" "aad_tenant_id" {
name = "auth-tenant-id"
key_vault_id = data.azurerm_key_vault.ws.id
}
data "azurerm_key_vault_secret" "workspace_client_id" {
name = "workspace-client-id"
key_vault_id = data.azurerm_key_vault.ws.id
}
data "azurerm_key_vault_secret" "workspace_client_secret" {
name = "workspace-client-secret"
key_vault_id = data.azurerm_key_vault.ws.id
}
data "azurerm_log_analytics_workspace" "workspace" {
name = "log-${var.tre_id}-ws-${local.short_workspace_id}"
resource_group_name = data.azurerm_resource_group.ws.name
}
data "azurerm_storage_account" "stg" {
name = local.storage_name
resource_group_name = data.azurerm_resource_group.ws.name
}
data "azurerm_service_plan" "workspace" {
name = "plan-${var.workspace_id}"
resource_group_name = data.azurerm_resource_group.ws.name
}
data "azurerm_virtual_network" "ws" {
name = "vnet-${var.tre_id}-ws-${local.short_workspace_id}"
resource_group_name = data.azurerm_resource_group.ws.name
}
data "azurerm_virtual_network" "core" {
name = "vnet-${var.tre_id}"
resource_group_name = local.core_resource_group_name
}
data "azurerm_subnet" "web_app" {
name = "WebAppsSubnet"
virtual_network_name = data.azurerm_virtual_network.ws.name
resource_group_name = data.azurerm_resource_group.ws.name
}
data "azurerm_subnet" "services" {
name = "ServicesSubnet"
virtual_network_name = data.azurerm_virtual_network.ws.name
resource_group_name = data.azurerm_resource_group.ws.name
}
data "azurerm_subnet" "resource_processor" {
name = "ResourceProcessorSubnet"
resource_group_name = local.core_resource_group_name
virtual_network_name = data.azurerm_virtual_network.core.name
}
data "azurerm_private_dns_zone" "azurewebsites" {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.azurewebsites.net"]
resource_group_name = local.core_resource_group_name
}
data "azurerm_private_dns_zone" "postgres" {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.postgres.database.azure.com"]
resource_group_name = local.core_resource_group_name
}
|
AzureTRE/templates/workspace_services/ohdsi/terraform/data.tf/0
|
{
"file_path": "AzureTRE/templates/workspace_services/ohdsi/terraform/data.tf",
"repo_id": "AzureTRE",
"token_count": 1125
}
| 134 |
# This file has a .terraform file extension in order to avoid 'terraform init's validation checks that are executed by the 'make bundle-build' command.
# The Dockerfile includes a RUN command to change the extension from .terraform to .tf after the files from the base workspace are copied to this directory.
locals {
core_resource_group_name = "rg-${var.tre_id}"
# STorage AirLock IMport InProgress
import_in_progress_storage_name = lower(replace("stalimip${var.tre_id}", "-", ""))
}
module "terraform_azurerm_environment_configuration" {
source = "git::https://github.com/microsoft/terraform-azurerm-environment-configuration.git?ref=0.2.0"
arm_environment = var.arm_environment
}
data "azurerm_storage_account" "sa_import_inprogress" {
name = local.import_in_progress_storage_name
resource_group_name = local.core_resource_group_name
}
resource "azurerm_private_endpoint" "sa_import_inprogress_pe" {
name = "stg-ip-import-blob-${local.workspace_resource_name_suffix}"
location = var.location
resource_group_name = azurerm_resource_group.ws.name
subnet_id = module.network.services_subnet_id
lifecycle { ignore_changes = [tags] }
private_service_connection {
name = "psc-stg-ip-import-blob-${local.workspace_resource_name_suffix}"
private_connection_resource_id = data.azurerm_storage_account.sa_import_inprogress.id
is_manual_connection = false
subresource_names = ["Blob"]
}
tags = local.tre_workspace_tags
}
resource "azurerm_private_dns_zone" "stg_import_inprogress_blob" {
name = "${data.azurerm_storage_account.sa_import_inprogress.name}.${module.terraform_azurerm_environment_configuration.private_links["privatelink.blob.core.windows.net"]}"
resource_group_name = azurerm_resource_group.ws.name
tags = local.tre_workspace_tags
depends_on = [ azurerm_private_endpoint.sa_import_inprogress_pe ]
}
resource "azurerm_private_dns_a_record" "stg_import_inprogress_blob" {
name = "@" # Root record
zone_name = azurerm_private_dns_zone.stg_import_inprogress_blob.name
resource_group_name = azurerm_resource_group.ws.name
ttl = 300
records = [azurerm_private_endpoint.sa_import_inprogress_pe.private_service_connection[0].private_ip_address]
tags = local.tre_workspace_tags
}
resource "azurerm_private_dns_zone_virtual_network_link" "stg_import_inprogress_blob" {
name = "vnl-stg-ip-import-blob-${local.workspace_resource_name_suffix}"
resource_group_name = azurerm_resource_group.ws.name
private_dns_zone_name = azurerm_private_dns_zone.stg_import_inprogress_blob.name
virtual_network_id = module.network.vnet_id
tags = local.tre_workspace_tags
depends_on = [ azurerm_private_dns_a_record.stg_import_inprogress_blob ]
}
|
AzureTRE/templates/workspaces/airlock-import-review/terraform/import_review_resources.terraform/0
|
{
"file_path": "AzureTRE/templates/workspaces/airlock-import-review/terraform/import_review_resources.terraform",
"repo_id": "AzureTRE",
"token_count": 1151
}
| 135 |
locals {
core_vnet = "vnet-${var.tre_id}"
short_workspace_id = substr(var.tre_resource_id, -4, -1)
core_resource_group_name = "rg-${var.tre_id}"
workspace_resource_name_suffix = "${var.tre_id}-ws-${local.short_workspace_id}"
address_spaces = jsondecode(base64decode(var.address_spaces))
vnet_subnets = cidrsubnets(local.address_spaces[0], 1, 1)
services_subnet_address_prefix = local.vnet_subnets[0]
webapps_subnet_address_prefix = local.vnet_subnets[1]
}
|
AzureTRE/templates/workspaces/base/terraform/network/locals.tf/0
|
{
"file_path": "AzureTRE/templates/workspaces/base/terraform/network/locals.tf",
"repo_id": "AzureTRE",
"token_count": 269
}
| 136 |
import React, { useContext } from 'react';
import { ResourceDebug } from '../shared/ResourceDebug';
import { Pivot, PivotItem } from '@fluentui/react';
import { ResourcePropertyPanel } from '../shared/ResourcePropertyPanel';
import { Resource } from '../../models/resource';
import { ResourceHistoryList } from '../shared/ResourceHistoryList';
import { ResourceOperationsList } from '../shared/ResourceOperationsList';
import ReactMarkdown from 'react-markdown';
import remarkGfm from 'remark-gfm';
import { RoleName, WorkspaceRoleName } from '../../models/roleNames';
import { ResourceType } from '../../models/resourceType';
import { SecuredByRole } from './SecuredByRole';
import { WorkspaceContext } from '../../contexts/WorkspaceContext';
interface ResourceBodyProps {
resource: Resource,
readonly?: boolean;
}
export const ResourceBody: React.FunctionComponent<ResourceBodyProps> = (props: ResourceBodyProps) => {
const workspaceCtx = useContext(WorkspaceContext);
const operationsRolesByResourceType = {
[ResourceType.Workspace]: [RoleName.TREAdmin, WorkspaceRoleName.WorkspaceOwner],
[ResourceType.SharedService]: [RoleName.TREAdmin],
[ResourceType.WorkspaceService]: [WorkspaceRoleName.WorkspaceOwner],
[ResourceType.UserResource]: [WorkspaceRoleName.WorkspaceOwner, WorkspaceRoleName.WorkspaceResearcher]
};
const historyRolesByResourceType = {
[ResourceType.Workspace]: [RoleName.TREAdmin, WorkspaceRoleName.WorkspaceOwner],
[ResourceType.SharedService]: [RoleName.TREAdmin],
[ResourceType.WorkspaceService]: [WorkspaceRoleName.WorkspaceOwner],
[ResourceType.UserResource]: [WorkspaceRoleName.WorkspaceOwner, WorkspaceRoleName.WorkspaceResearcher]
};
const operationsRoles = operationsRolesByResourceType[props.resource.resourceType];
const historyRoles = historyRolesByResourceType[props.resource.resourceType];
const workspaceId = workspaceCtx.workspace?.id || "";
return (
<Pivot aria-label="Resource Menu" className='tre-resource-panel'>
<PivotItem
headerText="Overview"
headerButtonProps={{
'data-order': 1,
'data-title': 'Overview',
}}
>
<div style={{ padding: 5 }}>
{props.readonly}
<ReactMarkdown remarkPlugins={[remarkGfm]}>{props.resource.properties?.overview || props.resource.properties?.description}</ReactMarkdown>
</div>
</PivotItem>
{
!props.readonly &&
<PivotItem headerText="Details">
<ResourcePropertyPanel resource={props.resource} />
<ResourceDebug resource={props.resource} />
</PivotItem>
}
{
!props.readonly && historyRoles &&
<PivotItem headerText="History">
<SecuredByRole allowedAppRoles={historyRoles} allowedWorkspaceRoles={historyRoles} workspaceId={workspaceId} errorString={`Must have ${historyRoles.join(" or ")} role`} element={
<ResourceHistoryList resource={props.resource} />
} />
</PivotItem>
}
{
!props.readonly && operationsRoles &&
<PivotItem headerText="Operations">
<SecuredByRole allowedAppRoles={operationsRoles} allowedWorkspaceRoles={operationsRoles} workspaceId={workspaceId} errorString={`Must have ${operationsRoles.join(" or ")} role`} element={
<ResourceOperationsList resource={props.resource} />
} />
</PivotItem>
}
</Pivot>
);
};
|
AzureTRE/ui/app/src/components/shared/ResourceBody.tsx/0
|
{
"file_path": "AzureTRE/ui/app/src/components/shared/ResourceBody.tsx",
"repo_id": "AzureTRE",
"token_count": 1228
}
| 137 |
import React from 'react';
import { getTheme, Icon, mergeStyles, Stack } from '@fluentui/react';
import { Link } from 'react-router-dom';
import { UserMenu } from './UserMenu';
import { NotificationPanel } from './notifications/NotificationPanel';
export const TopNav: React.FunctionComponent = () => {
return (
<>
<div className={contentClass}>
<Stack horizontal>
<Stack.Item grow={100}>
<Link to='/' className='tre-home-link'>
<Icon iconName="TestBeakerSolid" style={{ marginLeft: '10px', marginRight: '10px', verticalAlign: 'middle' }} />
<h5 style={{display: 'inline'}}>Azure TRE</h5>
</Link>
</Stack.Item>
<Stack.Item>
<NotificationPanel />
</Stack.Item>
<Stack.Item grow>
<UserMenu />
</Stack.Item>
</Stack>
</div>
</>
);
};
const theme = getTheme();
const contentClass = mergeStyles([
{
backgroundColor: theme.palette.themeDark,
color: theme.palette.white,
lineHeight: '50px',
padding: '0 10px 0 10px'
}
]);
|
AzureTRE/ui/app/src/components/shared/TopNav.tsx/0
|
{
"file_path": "AzureTRE/ui/app/src/components/shared/TopNav.tsx",
"repo_id": "AzureTRE",
"token_count": 487
}
| 138 |
// from Dan Abramov - https://overreacted.io/making-setinterval-declarative-with-react-hooks/
import { useEffect, useRef } from "react";
export const useInterval = (callback: () => void, delay: number | null) => {
const savedCallback = useRef(callback);
useEffect(() => {
savedCallback.current = callback;
}, [callback]);
useEffect(() => {
const tick = () => {
savedCallback.current && savedCallback.current();
}
if (delay !== null) {
let id = setInterval(tick, delay);
return () => clearInterval(id);
}
}, [delay]);
};
|
AzureTRE/ui/app/src/components/shared/notifications/useInterval.ts/0
|
{
"file_path": "AzureTRE/ui/app/src/components/shared/notifications/useInterval.ts",
"repo_id": "AzureTRE",
"token_count": 231
}
| 139 |
import { App } from './App';
import { mergeStyles } from '@fluentui/react';
import reportWebVitals from './reportWebVitals';
import { BrowserRouter } from 'react-router-dom';
import { pca } from './authConfig'
import { MsalProvider } from '@azure/msal-react';
import { Provider } from 'react-redux';
import { store } from './store/store';
import { createRoot } from 'react-dom/client';
// Inject some global styles
mergeStyles({
':global(body,html)': {
margin: 0,
padding: 0,
height: '100vh',
},
});
const root = createRoot(document.getElementById("root") as HTMLElement);
root.render(
<MsalProvider instance={pca}>
<BrowserRouter>
<Provider store={store}>
<App />
</Provider>
</BrowserRouter>
</MsalProvider>);
// If you want to start measuring performance in your app, pass a function
// to log results (for example: reportWebVitals(console.log))
// or send to an analytics endpoint. Learn more: https://bit.ly/CRA-vitals
reportWebVitals();
|
AzureTRE/ui/app/src/index.tsx/0
|
{
"file_path": "AzureTRE/ui/app/src/index.tsx",
"repo_id": "AzureTRE",
"token_count": 345
}
| 140 |
import { Resource } from "./resource";
export interface WorkspaceService extends Resource {
workspaceId: string
}
|
AzureTRE/ui/app/src/models/workspaceService.ts/0
|
{
"file_path": "AzureTRE/ui/app/src/models/workspaceService.ts",
"repo_id": "AzureTRE",
"token_count": 30
}
| 141 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
MODEL_DIR=../../checkpoints/RE-BC5CDR-BioGPT
MODEL=checkpoint_avg.pt
DATA_DIR=${PWD}/../../data/BC5CDR/relis-bin
BASE_DATA_DIR=${DATA_DIR%/*}
BIN_DATA_DIR=${DATA_DIR##*/}
DATA_PREFIX=${BIN_DATA_DIR%-*}
RAW_DATA_DIR=${BASE_DATA_DIR}/raw
OUTPUT_FILE=generate_${MODEL}
INPUT_FILE=${RAW_DATA_DIR}/${DATA_PREFIX}_test.tok.bpe.x
OUTPUT_FILE=${MODEL_DIR}/${OUTPUT_FILE}
GOLD_FILE=${RAW_DATA_DIR}/CDR_Data/CDR.Corpus.v010516/CDR_TestSet.PubTator.txt
ENTITY_FILE=${RAW_DATA_DIR}/test.entities.json
PMID_FILE=${RAW_DATA_DIR}/${DATA_PREFIX}_test.pmid
# average checkpoints
if [ ! -f "${MODEL_DIR}/${MODEL}" ]; then
python ../../scripts/average_checkpoints.py --inputs=${MODEL_DIR} --output=${MODEL_DIR}/${MODEL} --num-epoch-checkpoints=5
fi
# inference
if [ ! -f "$OUTPUT_FILE" ]; then
echo "Begin inferencing ${INPUT_FILE} using ${MODEL_DIR}/${MODEL}"
python ../../inference.py --data_dir=${DATA_DIR} --model_dir=${MODEL_DIR} --model_file=${MODEL} --src_file=${INPUT_FILE} --output_file=${OUTPUT_FILE}
fi
# debpe
sed -i "s/@@ //g" ${OUTPUT_FILE}
# detok
perl ${MOSES}/scripts/tokenizer/detokenizer.perl -l en -a < ${OUTPUT_FILE} > ${OUTPUT_FILE}.detok
# postprocess
python postprocess.py ${OUTPUT_FILE}.detok ${ENTITY_FILE} ${PMID_FILE}
# eval
cd ${RAW_DATA_DIR}/BC5CDR_Evaluation-0.0.3
bash eval_relation.sh PubTator ${OLDPWD}/${GOLD_FILE} ${OLDPWD}/${OUTPUT_FILE}.detok.extracted.PubTator
cd ${OLDPWD}
|
BioGPT/examples/RE-BC5CDR/infer.sh/0
|
{
"file_path": "BioGPT/examples/RE-BC5CDR/infer.sh",
"repo_id": "BioGPT",
"token_count": 660
}
| 142 |
# BitBLAS
BitBLAS is a library to support mixed-precision BLAS operations on GPUs, for example, the $W_{wdtype}A_{adtype}$ mixed-precision matrix multiplication where $C_{cdtype}[M, N] = A_{adtype}[M, K] \times W_{wdtype}[N, K]$.
BitBLAS aims to support efficient mixed-precision DNN model deployment, especially the $W_{wdtype}A_{adtype}$ quantization in large language models (LLMs), for example, the $W_{UINT4}A_{FP16}$ in [GPTQ](https://arxiv.org/abs/2210.17323), the $W_{INT2}A_{FP16}$ in [BitDistiller](https://arxiv.org/abs/2402.10631), the $W_{INT2}A_{INT8}$ in [BitNet-b1.58](https://arxiv.org/abs/2402.17764). BitBLAS is based on techniques from our accepted submission at OSDI'24.
Some of the key features of BitBLAS include:
- High performance matrix multiplication for both GEMV (e.g., the single batch auto-regressive decode phase in LLM) and GEMM (e.g., the batched auto-regressive decode phase and the prefill phase in LLM):
- $W_{wdtype}A_{adtype}$ mixed-precision matrix multiplication including FP16xINT4/2/1, INT8xINT4/2/1, etc. Please checkout [support matrix](#support-matrix) for detailed data types support.
- Matrix multiplication like FP16xFP16 and INT8xINT8.
- Auto-Tensorization for TensorCore-like hardware instructions.
- Implemented [integration](https://github.com/microsoft/BitBLAS/blob/main/integration/) to [PyTorch](https://pytorch.org/), [AutoGPTQ](https://github.com/AutoGPTQ/AutoGPTQ), [vLLM](https://github.com/vllm-project/vllm) and [BitNet-b1.58](https://huggingface.co/1bitLLM/bitnet_b1_58-3B) for LLM deployment. Please checkout [benchmark summary](#benchmark-summary) for detailed end2end LLM inference performance.
- BitBLAS first implemented $W_{INT2}A_{INT8}$ GEMV/GEMM in [BitNet-b1.58](https://arxiv.org/abs/2402.17764) with 8x/2x speedup over cuBLAS $W_{FP16}A_{FP16}$ on A100, please checkout [op_benchmark_a100_int2_scaling](https://github.com/microsoft/BitBLAS/blob/main/images/figures/op_benchmark_a100_int2_scaling.png) for detailed benchmark results. Please checkout [BitNet-b1.58 integration](https://github.com/microsoft/BitBLAS/blob/main/integration/BitNet) for the integration with the 3rdparty reproduced BitNet-b1.58 model.
- Support customizing mixed-precision DNN operations for your specific scenarios via the flexible DSL (TIR Script).
## Latest News
- 2024.04.19: BitBLAS is now open source! We are excited to announce that BitBLAS, a high-performance library for mixed-precision DNN model deployment, is now available to the public.
- 2024.04.30: BitBLAS now supports FP8 TensorCore!
## Integration Example of FasterTransformer with BitBLAS

## Benchmark Summary
BitBLAS achieves exceptional performance across a variety of computational patterns. Below are selected results showcasing its capabilities:
- End2End Integration with Quantize Inference Kernel for AutoGPTQ and vLLM.
<div>
<img src="./images/figures/end2end_llama_13b_auto_gptq.png" alt="AutoGPTQ end2end performance of llama13b on A100" style="width: 24%;" />
<img src="./images/figures/end2end_llama_70b_auto_gptq.png" alt="AutoGPTQ end2end performance of llama13b on A100" style="width: 24%;" />
<img src="./images/figures/end2end_llama_13b_vllm.png" alt="vLLM end2end performance of llama13b on A100" style="width: 24%;" />
<img src="./images/figures/end2end_llama_70B_vllm.png" alt="vLLM end2end performance of llama13b on A100" style="width: 24%;" />
</div>
- Weight Only Matmul performance on A100
<div>
<img src="./images/figures/op_benchmark_a100_wq_gemv_e7.png" alt="gemm weight only performance on A100" style="width: 49%;" />
<img src="./images/figures/op_benchmark_a100_wq_gemm_e7.png" alt="gemm weight only performance on A100" style="width: 49%;" />
</div>
- TensorCore FP16/INT8 GEMM Performance Vs. Vendor Library on A100 and RTX4090
<div>
<img src="./images/figures/op_benchmark_consistent_gemm_fp16.png" alt="gemm fp16 performance on 4090 and a100" style="width: 49%;" />
<img src="./images/figures/op_benchmark_consistent_gemm_int8.png" alt="gemm int8 performance on 4090 and a100" style="width: 49%;" />
</div>
For more detailed information on benchmark sets with other formats (NF4/FP4) and other devices (RTX 3090), please refer to the [benchmark](./benchmark/README.md).
## Support Matrix
| **A_dtype** | **W_dtype** | **Accum_dtype** | **Out_dtype** | **BitBLAS<br>Support** | **Tested<br>Platform** |
|:-----------:|:-----------:|:---------------:|:---------------:|:----------------------:|:----------------------:|
| FP16 | FP16 | FP16 | FP16 | **√** | V100(SM_70)/A100(SM_80)/A6000(SM_86)/RTX 4090(SM_89) |
| FP16 | FP4_E2M1 | FP16 | FP16 | **√** | V100(SM_70)/A100(SM_80)/A6000(SM_86)/RTX 4090(SM_89) |
| FP16 | INT8 | FP16 | FP16 | **√** | V100(SM_70)/A100(SM_80)/A6000(SM_86)/RTX 4090(SM_89) |
| FP16 | UINT4/INT4 | FP16 | FP16 | **√** | V100(SM_70)/A100(SM_80)/A6000(SM_86)/RTX 4090(SM_89) |
| FP16 | UINT2/INT2 | FP16 | FP16 | **√** | V100(SM_70)/A100(SM_80)/A6000(SM_86)/RTX 4090(SM_89) |
| FP16 | UINT1 | FP16 | FP16 | **√** | V100(SM_70)/A100(SM_80)/A6000(SM_86)/RTX 4090(SM_89) |
| FP16 | NF4 | FP16 | FP16 | **√** | V100(SM_70)/A100(SM_80)/A6000(SM_86)/RTX 4090(SM_89) |
| INT8 | INT8 | INT32 | FP32/INT32/FP16/INT8 | **√** | V100(SM_70)/A100(SM_80)/A6000(SM_86)/RTX 4090(SM_89) |
| INT8 | UINT4/INT4 | INT32 | FP32/INT32/FP16/INT8 | **√** | V100(SM_70)/A100(SM_80)/A6000(SM_86)/RTX 4090(SM_89) |
| INT8 | UINT2/INT2 | INT32 | FP32/INT32/FP16/INT8 | **√** | V100(SM_70)/A100(SM_80)/A6000(SM_86)/RTX 4090(SM_89) |
| INT8 | UINT1 | INT32 | FP32/INT32/FP16/INT8 | **√** | V100(SM_70)/A100(SM_80)/A6000(SM_86)/RTX 4090(SM_89) |
| FP8_E4M3 | FP8_E4M3 | FP32 | FP32/FP16 | **√** | RTX 4090(SM_89) |
| FP8_E5M2 | FP8_E5M2 | FP32 | FP32/FP16 | **√** | RTX 4090(SM_89) |
We are continuously expanding the support matrix. If you have any specific requirements, please feel free to open an issue or PR.
## Getting Started
- [Installation](https://github.com/microsoft/BitBLAS/blob/main/docs/Installation.md):
To install BitBLAS, please checkout the document [installation](https://github.com/microsoft/BitBLAS/blob/main/docs/Installation.md). Also Make sure you already have the cuda toolkit (version >= 11) installed in the system. Or you can easily install from `pip install bitblas` from PyPi. Currently we only provide whl files for CUDA>=12.1 and Ubuntu>=20.04 with Python>=3.8, if you are using a different version of CUDA or OS System, you may need to build BitBLAS from source.
- [QuickStart](https://github.com/microsoft/BitBLAS/blob/main/docs/QuickStart.md): BitBLAS provides two Python APIs to perform mixed-precision matrix multiplication:
- ```bitblas.Matmul``` implements the $W_{wdtype}A_{adtype}$ mixed-precision matrix multiplication of $C_{cdtype}[M, N] = A_{adtype}[M, K] \times W_{wdtype}[N, K]$.
- ```bitblas.Linear``` is a PyTorch ```nn.Linear```-like module to support a Linear of mixed-precision.
- [Integration](https://github.com/microsoft/BitBLAS/tree/main/integration): Explore how BitBLAS seamlessly integrates with LLM deployment frameworks through our examples. Discover the ease of integrating BitBLAS with PyTorch, AutoGPTQ, and vLLM in the 3rd-party integration examples.
- [Customization](https://github.com/microsoft/BitBLAS/blob/main/docs/ExtendOperatorsWithDSL.md): BitBLAS supports implementing customized mixed-precision DNN operations rather than matrix multiplication with the flexible DSL (TIR Script).
## Contributing
This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com.
When you submit a pull request, a CLA bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA.
This project has adopted the Microsoft Open Source Code of Conduct. For more information see the Code of Conduct FAQ or contact [email protected] with any additional questions or comments.
## Trademarks
This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow Microsoft's Trademark & Brand Guidelines. Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party's policies.
|
BitBLAS/README.md/0
|
{
"file_path": "BitBLAS/README.md",
"repo_id": "BitBLAS",
"token_count": 3676
}
| 143 |
# Installation Guide
## Prerequisites
**Operating System**: Linux (Ubuntu 20.04 or later recommended for installation via wheel or PyPI or you may need to checkout the [Building from Source](#building-from-source) section for other Linux distributions.)
- **Python Version**: >= 3.7
- **CUDA Version**: >= 10.0
## Installing with pip
The easiest way to install BitBLAS is direcly from the PyPi using pip. To install the latest version, run the following command in your terminal.
**Note**: Currently, bitblas whl is only supported on Linux systems. We recommend using Ubuntu 20.04 or later version as we build the whl files on this platform. Currently we only provide whl files for CUDA>=12.1 and with Python>=3.8. If you are using a different version of CUDA. you may need to build BitBLAS from source.
```bash
pip install bitblas
```
Alternatively, you may choose to install BitBLAS using prebuilt packages available on the Release Page:
```bash
pip install bitblas-0.0.0.dev0+ubuntu.20.4.cu120-py3-none-any.whl
```
After installing BitBLAS, you can verify the installation by running:
```bash
python -c "import bitblas; print(bitblas.__version__)"
```
## Building from Source
We recommend using a docker container with the necessary dependencies to build BitBLAS from source. You can use the following command to run a docker container with the necessary dependencies:
```bash
docker run --gpus all -it --rm --ipc=host nvcr.io/nvidia/pytorch:23.01-py3
```
To build and install BitBLAS directly from source, follow the steps below. This process requires certain pre-requisites from apache tvm, which can be installed on Ubuntu/Debian-based systems using the following commands:
```bash
sudo apt-get update
sudo apt-get install -y python3 python3-dev python3-setuptools gcc libtinfo-dev zlib1g-dev build-essential cmake libedit-dev libxml2-dev
```
After installing the prerequisites, you can clone the BitBLAS repository and install it using pip:
```bash
git clone --recursive https://github.com/Microsoft/BitBLAS.git
cd BitBLAS
pip install . # Please be patient, this may take some time.
```
if you want to install BitBLAS with the development mode, you can run the following command:
```bash
pip install -e .
```
|
BitBLAS/docs/Installation.md/0
|
{
"file_path": "BitBLAS/docs/Installation.md",
"repo_id": "BitBLAS",
"token_count": 637
}
| 144 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import argparse
import torch
from modeling_bitnet import BitnetForCausalLM
torch.set_grad_enabled(False)
parser = argparse.ArgumentParser()
parser.add_argument('--hf_path', default='1bitLLM/bitnet_b1_58-3B', type=str)
def profile(model, input_data):
import time
import numpy as np
model = model.cuda()
model.eval()
def get_runtime(num_repeats=1):
tic = time.time()
for _ in range(num_repeats):
_ = model(input_data)
torch.cuda.synchronize()
return (time.time() - tic) * 1000 / num_repeats
with torch.no_grad():
st = time.time()
while time.time() - st < 1.0:
get_runtime() # warmup
warmup_runtime = get_runtime()
num_repeats = max(1, int(1000 / warmup_runtime))
times = get_runtime(num_repeats)
return np.mean(times)
def main():
model = BitnetForCausalLM.from_pretrained(
'1bitLLM/bitnet_b1_58-3B',
use_flash_attention_2=True,
torch_dtype=torch.float16,
).cuda().half()
with torch.no_grad():
model._post_process_weights()
input_id = torch.ones(1, 1).long().cuda()
# test forward
output = model(input_id)
# make sure the output is the same as the simulated output
print(output)
if __name__ == '__main__':
main()
|
BitBLAS/integration/BitNet/eval_correctness.py/0
|
{
"file_path": "BitBLAS/integration/BitNet/eval_correctness.py",
"repo_id": "BitBLAS",
"token_count": 597
}
| 145 |
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
#include <cuda_runtime.h>
#include <assert.h>
#include "ladder_kernel.h"
#include "mma.h"
// nvcc ladder_kernel.cu -gencode arch=compute_80,code=sm_80
__global__ void __launch_bounds__(128) bitblas_kernel_fp16_int2_fp16_m1n15360k5120_nt(half* __restrict__ A, half* __restrict__ QB, half* __restrict__ D) {
signed char* B = ((int8_t *)QB);
half* Scale = (half *)((int8_t *)QB + 19660800);
half* Zeros = (half *)((int8_t *)QB + 20889600);
// const dim3 GridDim(15360, 1, 1);
// const dim3 BlockDim(128, 1, 1);
// bitblas_kernel_fp16_int2_fp16_m1n15360k5120_nt<<<GridDim, BlockDim>>>(input_0, input_1, output);
half in_thread_C_local[1];
signed char B_local[4];
half B_decode_local[8];
half A_local[8];
__shared__ half red_result[1];
in_thread_C_local[0] = __float2half_rn(0.000000e+00f);
for (int ax1_0 = 0; ax1_0 < 5; ++ax1_0) {
*(int*)(B_local + 0) = *(int*)(B + (((((int)blockIdx.x) * 2560) + (ax1_0 * 512)) + (((int)threadIdx.x) * 4)));
for (int ax1 = 0; ax1 < 8; ++ax1) {
B_decode_local[ax1] = (((((half)((((uint)B_local[(ax1 >> 1)]) >> (((uint)(ax1 & 1)) * (uint)4)) & (uint)15)) - __float2half_rn(7.000000e+00f)) * Scale[(((((int)blockIdx.x) * 40) + (ax1_0 * 8)) + (((int)threadIdx.x) >> 4))]) - Zeros[(((((int)blockIdx.x) * 40) + (ax1_0 * 8)) + (((int)threadIdx.x) >> 4))]);
}
*(uint4*)(A_local + 0) = *(uint4*)(A + ((ax1_0 * 1024) + (((int)threadIdx.x) * 8)));
for (int ax1_2_0 = 0; ax1_2_0 < 4; ++ax1_2_0) {
for (int ax1_2_1 = 0; ax1_2_1 < 2; ++ax1_2_1) {
in_thread_C_local[0] = (in_thread_C_local[0] + (A_local[((ax1_2_0 * 2) + ax1_2_1)] * B_decode_local[((ax1_2_0 * 2) + ax1_2_1)]));
}
}
}
half red_buf0[1];
uint mask[1];
half t0[1];
half red_buf0_1[1];
uint mask_1[1];
half t0_1[1];
__shared__ half red_buf_staging[4];
red_buf0_1[0] = in_thread_C_local[0];
mask_1[0] = __activemask();
t0_1[0] = __shfl_down_sync(mask_1[0], red_buf0_1[0], 16, 32);
red_buf0_1[0] = (red_buf0_1[0] + t0_1[0]);
t0_1[0] = __shfl_down_sync(mask_1[0], red_buf0_1[0], 8, 32);
red_buf0_1[0] = (red_buf0_1[0] + t0_1[0]);
t0_1[0] = __shfl_down_sync(mask_1[0], red_buf0_1[0], 4, 32);
red_buf0_1[0] = (red_buf0_1[0] + t0_1[0]);
t0_1[0] = __shfl_down_sync(mask_1[0], red_buf0_1[0], 2, 32);
red_buf0_1[0] = (red_buf0_1[0] + t0_1[0]);
t0_1[0] = __shfl_down_sync(mask_1[0], red_buf0_1[0], 1, 32);
red_buf0_1[0] = (red_buf0_1[0] + t0_1[0]);
if ((((int)threadIdx.x) % 32) == 0) {
red_buf_staging[(((int)threadIdx.x) >> 5)] = red_buf0_1[0];
}
__syncthreads();
if (((int)threadIdx.x) < 4) {
red_buf0[0] = red_buf_staging[((int)threadIdx.x)];
}
mask[0] = (__activemask() & (uint)15);
t0[0] = __shfl_down_sync(mask[0], red_buf0[0], 2, 32);
red_buf0[0] = (red_buf0[0] + t0[0]);
t0[0] = __shfl_down_sync(mask[0], red_buf0[0], 1, 32);
red_buf0[0] = (red_buf0[0] + t0[0]);
if (((int)threadIdx.x) == 0) {
((volatile half*)red_result)[0] = red_buf0[0];
}
__syncthreads();
if (((int)threadIdx.x) == 0) {
D[((int)blockIdx.x)] = (half)(((volatile half*)red_result)[0]);
}
}
__global__ void __launch_bounds__(128) bitblas_kernel_fp16_int2_fp16_m128n15360k5120_nt(half* __restrict__ A, half* __restrict__ QB, half* __restrict__ D) {
signed char* B = ((int8_t *)QB);
half* Scale = (half *)((int8_t *)QB + 19660800);
half* Zeros = (half *)((int8_t *)QB + 20889600);
// const dim3 GridDim(160, 2, 1);
// const dim3 BlockDim(32, 4, 1);
// bitblas_kernel_fp16_int2_fp16_m128n15360k5120_nt<<<GridDim, BlockDim>>>(input_0, input_1, output);
const int MAX_BLOCK_N = 10;
const auto baseBlockIdx = blockIdx.x + gridDim.x *blockIdx.y;
const auto totalPanel = (gridDim.x * gridDim.y +MAX_BLOCK_N * gridDim.x - 1) / (MAX_BLOCK_N * gridDim.x);
const auto totalBlock = gridDim.x * gridDim.y;
const auto panelIdx = baseBlockIdx / (MAX_BLOCK_N *gridDim.x);
const auto strideLd = panelIdx + 1 < totalPanel ?MAX_BLOCK_N : (totalBlock - panelIdx * (MAX_BLOCK_N *gridDim.x)) / gridDim.x;
const auto bx = (panelIdx & 1) ? gridDim.x -(baseBlockIdx - panelIdx * MAX_BLOCK_N * gridDim.x) /strideLd - 1 : (baseBlockIdx - panelIdx * MAX_BLOCK_N *gridDim.x) / strideLd;
const auto by = (baseBlockIdx - panelIdx * MAX_BLOCK_N *gridDim.x) % strideLd + panelIdx * MAX_BLOCK_N;
const auto bz = blockIdx.z;
const dim3 blockIdx(bx, by, bz);
half C_reindex_shared_warp[48];
__shared__ half A_reindex_shared[4096];
__shared__ signed char B_shared[3072];
__shared__ half B_decode_reindex_shared[3072];
signed char B_local[4];
half B_decode_reindex_local[8];
half A_reindex_shared_warp[8];
half B_decode_reindex_shared_warp[48];
signed char B_local_1[4];
half B_decode_reindex_local_1[8];
half A_reindex_shared_warp_1[8];
half B_decode_reindex_shared_warp_1[48];
for (int var = 0; var < 1; ++var) {
for (int ax2_0_3_init = 0; ax2_0_3_init < 6; ++ax2_0_3_init) {
for (int i = 0; i < 8; ++i) {
C_reindex_shared_warp[(ax2_0_3_init * 8) + i] = 0.0;}
;
}
#pragma unroll
for (int ax0_ax1_ax2_fused_0 = 0; ax0_ax1_ax2_fused_0 < 2; ++ax0_ax1_ax2_fused_0) {
{
unsigned int addr;
#if TVM_ENBALE_EFFICIENT_SMEM_PTR_CAST
addr = static_cast<unsigned int>(__cvta_generic_to_shared((void *)(A_reindex_shared + ((((ax0_ax1_ax2_fused_0 * 1024) + (((int)threadIdx.y) * 256)) + ((((int)threadIdx.x) >> 2) * 32)) + (((((int)threadIdx.x) & 3) ^ (((int)threadIdx.x) >> 3)) * 8)))));
#else
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)(A_reindex_shared + ((((ax0_ax1_ax2_fused_0 * 1024) + (((int)threadIdx.y) * 256)) + ((((int)threadIdx.x) >> 2) * 32)) + (((((int)threadIdx.x) & 3) ^ (((int)threadIdx.x) >> 3)) * 8))))
);
#endif
__asm__ __volatile__(
#if TVM_ENABLE_L2_PREFETCH
"cp.async.cg.shared.global.L2::128B [%0], [%1], %2;"
#else
"cp.async.cg.shared.global [%0], [%1], %2;"
#endif
:: "r"(addr), "l"((void*)(A + (((((((int)blockIdx.y) * 327680) + (ax0_ax1_ax2_fused_0 * 163840)) + (((int)threadIdx.y) * 40960)) + ((((int)threadIdx.x) >> 2) * 5120)) + ((((int)threadIdx.x) & 3) * 8)))), "n"(16)
);
}
}
__syncthreads();
#pragma unroll
for (int ax0_ax1_fused_0 = 0; ax0_ax1_fused_0 < 1; ++ax0_ax1_fused_0) {
if (((int)threadIdx.y) < 3) {
{
unsigned int addr;
#if TVM_ENBALE_EFFICIENT_SMEM_PTR_CAST
addr = static_cast<unsigned int>(__cvta_generic_to_shared((void *)(B_shared + ((((int)threadIdx.y) * 512) + (((int)threadIdx.x) * 16)))));
#else
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)(B_shared + ((((int)threadIdx.y) * 512) + (((int)threadIdx.x) * 16))))
);
#endif
__asm__ __volatile__(
#if TVM_ENABLE_L2_PREFETCH
"cp.async.cg.shared.global.L2::128B [%0], [%1], %2;"
#else
"cp.async.cg.shared.global [%0], [%1], %2;"
#endif
:: "r"(addr), "l"((void*)(B + (((((int)blockIdx.x) * 245760) + (((int)threadIdx.y) * 81920)) + (((int)threadIdx.x) * 2560)))), "n"(16)
);
}
}
}
__asm__ __volatile__("cp.async.commit_group;");
for (int ax3_0_0 = 0; ax3_0_0 < 159; ++ax3_0_0) {
__syncthreads();
#pragma unroll
for (int ax0_ax1_ax2_fused_0_1 = 0; ax0_ax1_ax2_fused_0_1 < 2; ++ax0_ax1_ax2_fused_0_1) {
{
unsigned int addr;
#if TVM_ENBALE_EFFICIENT_SMEM_PTR_CAST
addr = static_cast<unsigned int>(__cvta_generic_to_shared((void *)(A_reindex_shared + (((((((ax3_0_0 + 1) & 1) * 2048) + (ax0_ax1_ax2_fused_0_1 * 1024)) + (((int)threadIdx.y) * 256)) + ((((int)threadIdx.x) >> 2) * 32)) + (((((int)threadIdx.x) & 3) ^ (((int)threadIdx.x) >> 3)) * 8)))));
#else
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)(A_reindex_shared + (((((((ax3_0_0 + 1) & 1) * 2048) + (ax0_ax1_ax2_fused_0_1 * 1024)) + (((int)threadIdx.y) * 256)) + ((((int)threadIdx.x) >> 2) * 32)) + (((((int)threadIdx.x) & 3) ^ (((int)threadIdx.x) >> 3)) * 8))))
);
#endif
__asm__ __volatile__(
#if TVM_ENABLE_L2_PREFETCH
"cp.async.cg.shared.global.L2::128B [%0], [%1], %2;"
#else
"cp.async.cg.shared.global [%0], [%1], %2;"
#endif
:: "r"(addr), "l"((void*)(A + (((((((((int)blockIdx.y) * 327680) + (ax0_ax1_ax2_fused_0_1 * 163840)) + (((int)threadIdx.y) * 40960)) + ((((int)threadIdx.x) >> 2) * 5120)) + (ax3_0_0 * 32)) + ((((int)threadIdx.x) & 3) * 8)) + 32))), "n"(16)
);
}
}
#pragma unroll
for (int ax0_ax1_fused_0_1 = 0; ax0_ax1_fused_0_1 < 1; ++ax0_ax1_fused_0_1) {
if (((int)threadIdx.y) < 3) {
{
unsigned int addr;
#if TVM_ENBALE_EFFICIENT_SMEM_PTR_CAST
addr = static_cast<unsigned int>(__cvta_generic_to_shared((void *)(B_shared + (((((ax3_0_0 + 1) & 1) * 1536) + (((int)threadIdx.y) * 512)) + (((int)threadIdx.x) * 16)))));
#else
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)(B_shared + (((((ax3_0_0 + 1) & 1) * 1536) + (((int)threadIdx.y) * 512)) + (((int)threadIdx.x) * 16))))
);
#endif
__asm__ __volatile__(
#if TVM_ENABLE_L2_PREFETCH
"cp.async.cg.shared.global.L2::128B [%0], [%1], %2;"
#else
"cp.async.cg.shared.global [%0], [%1], %2;"
#endif
:: "r"(addr), "l"((void*)(B + (((((((int)blockIdx.x) * 245760) + (((int)threadIdx.y) * 81920)) + (((int)threadIdx.x) * 2560)) + (ax3_0_0 * 16)) + 16))), "n"(16)
);
}
}
}
__asm__ __volatile__("cp.async.commit_group;");
__asm__ __volatile__("cp.async.wait_group 1;");
__syncthreads();
for (int ax1_ax2_0_fused_0 = 0; ax1_ax2_0_fused_0 < 3; ++ax1_ax2_0_fused_0) {
*(int*)(B_local + 0) = *(int*)(B_shared + (((((ax3_0_0 & 1) * 1536) + (ax1_ax2_0_fused_0 * 512)) + (((int)threadIdx.y) * 128)) + (((int)threadIdx.x) * 4)));
for (int ax2 = 0; ax2 < 8; ++ax2) {
B_decode_reindex_local[ax2] = (((((half)((((uint)B_local[(ax2 >> 1)]) >> (((uint)(ax2 & 1)) * (uint)4)) & (uint)15)) - __float2half_rn(7.000000e+00f)) * Scale[(((((((int)blockIdx.x) * 3840) + (ax1_ax2_0_fused_0 * 1280)) + (((int)threadIdx.y) * 320)) + ((((int)threadIdx.x) >> 2) * 40)) + (ax3_0_0 >> 2))]) - Zeros[(((((((int)blockIdx.x) * 3840) + (ax1_ax2_0_fused_0 * 1280)) + (((int)threadIdx.y) * 320)) + ((((int)threadIdx.x) >> 2) * 40)) + (ax3_0_0 >> 2))]);
}
*(uint4*)(B_decode_reindex_shared + ((((ax1_ax2_0_fused_0 * 1024) + (((int)threadIdx.y) * 256)) + ((((int)threadIdx.x) >> 2) * 32)) + (((((int)threadIdx.x) & 3) ^ (((int)threadIdx.x) >> 3)) * 8))) = *(uint4*)(B_decode_reindex_local + 0);
}
__syncthreads();
for (int ax3_0_1 = 0; ax3_0_1 < 2; ++ax3_0_1) {
{
unsigned int addr;
#if TVM_ENBALE_EFFICIENT_SMEM_PTR_CAST
addr = static_cast<unsigned int>(__cvta_generic_to_shared((void *)((&(A_reindex_shared[(((((ax3_0_0 & 1) * 2048) + (((int)threadIdx.y) * 512)) + ((((int)threadIdx.x) & 15) * 32)) + ((((ax3_0_1 * 2) + (((int)threadIdx.x) >> 4)) ^ ((((int)threadIdx.x) & 7) >> 1)) * 8))])) + 0)));
#else
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)((&(A_reindex_shared[(((((ax3_0_0 & 1) * 2048) + (((int)threadIdx.y) * 512)) + ((((int)threadIdx.x) & 15) * 32)) + ((((ax3_0_1 * 2) + (((int)threadIdx.x) >> 4)) ^ ((((int)threadIdx.x) & 7) >> 1)) * 8))])) + 0))
);
#endif
__asm__ __volatile__(
"ldmatrix.sync.aligned.m8n8.x4.shared.b16"
"{%0, %1, %2, %3}, [%4];\n"
: "=r"(((unsigned *)(A_reindex_shared_warp + 0))[0]), "=r"(((unsigned *)(A_reindex_shared_warp + 0))[1]), "=r"(((unsigned *)(A_reindex_shared_warp + 0))[2]), "=r"(((unsigned *)(A_reindex_shared_warp + 0))[3])
: "r"(addr)
);
}
for (int ax1_0 = 0; ax1_0 < 6; ++ax1_0) {
{
unsigned int addr;
#if TVM_ENBALE_EFFICIENT_SMEM_PTR_CAST
addr = static_cast<unsigned int>(__cvta_generic_to_shared((void *)((&(B_decode_reindex_shared[((((ax1_0 * 512) + ((((int)threadIdx.x) >> 4) * 256)) + ((((int)threadIdx.x) & 7) * 32)) + ((((ax3_0_1 * 2) + ((((int)threadIdx.x) & 15) >> 3)) ^ ((((int)threadIdx.x) & 7) >> 1)) * 8))])) + 0)));
#else
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)((&(B_decode_reindex_shared[((((ax1_0 * 512) + ((((int)threadIdx.x) >> 4) * 256)) + ((((int)threadIdx.x) & 7) * 32)) + ((((ax3_0_1 * 2) + ((((int)threadIdx.x) & 15) >> 3)) ^ ((((int)threadIdx.x) & 7) >> 1)) * 8))])) + 0))
);
#endif
__asm__ __volatile__(
"ldmatrix.sync.aligned.m8n8.x4.shared.b16"
"{%0, %1, %2, %3}, [%4];\n"
: "=r"(((unsigned *)(B_decode_reindex_shared_warp + (ax1_0 * 8)))[0]), "=r"(((unsigned *)(B_decode_reindex_shared_warp + (ax1_0 * 8)))[1]), "=r"(((unsigned *)(B_decode_reindex_shared_warp + (ax1_0 * 8)))[2]), "=r"(((unsigned *)(B_decode_reindex_shared_warp + (ax1_0 * 8)))[3])
: "r"(addr)
);
}
}
for (int ax2_0_3 = 0; ax2_0_3 < 6; ++ax2_0_3) {
{
__asm__ __volatile__(
"mma.sync.aligned.m16n8k16.row.col.f16.f16.f16.f16"
"{%0, %1}, {%2, %3, %4, %5}, {%6, %7}, {%8, %9};\n"
: "=r"(((unsigned *)(C_reindex_shared_warp + (ax2_0_3 * 8)))[0]), "=r"(((unsigned *)(C_reindex_shared_warp + (ax2_0_3 * 8)))[1])
: "r"(((unsigned *)(A_reindex_shared_warp + 0))[0]), "r"(((unsigned *)(A_reindex_shared_warp + 0))[1]), "r"(((unsigned *)(A_reindex_shared_warp + 0))[2]), "r"(((unsigned *)(A_reindex_shared_warp + 0))[3]), "r"(((unsigned *)(B_decode_reindex_shared_warp + (ax2_0_3 * 8)))[0]), "r"(((unsigned *)(B_decode_reindex_shared_warp + (ax2_0_3 * 8)))[1]), "r"(((unsigned *)(C_reindex_shared_warp + (ax2_0_3 * 8)))[0]), "r"(((unsigned *)(C_reindex_shared_warp + (ax2_0_3 * 8)))[1]));
}
{
__asm__ __volatile__(
"mma.sync.aligned.m16n8k16.row.col.f16.f16.f16.f16"
"{%0, %1}, {%2, %3, %4, %5}, {%6, %7}, {%8, %9};\n"
: "=r"(((unsigned *)(C_reindex_shared_warp + ((ax2_0_3 * 8) + 4)))[0]), "=r"(((unsigned *)(C_reindex_shared_warp + ((ax2_0_3 * 8) + 4)))[1])
: "r"(((unsigned *)(A_reindex_shared_warp + 0))[0]), "r"(((unsigned *)(A_reindex_shared_warp + 0))[1]), "r"(((unsigned *)(A_reindex_shared_warp + 0))[2]), "r"(((unsigned *)(A_reindex_shared_warp + 0))[3]), "r"(((unsigned *)(B_decode_reindex_shared_warp + ((ax2_0_3 * 8) + 4)))[0]), "r"(((unsigned *)(B_decode_reindex_shared_warp + ((ax2_0_3 * 8) + 4)))[1]), "r"(((unsigned *)(C_reindex_shared_warp + ((ax2_0_3 * 8) + 4)))[0]), "r"(((unsigned *)(C_reindex_shared_warp + ((ax2_0_3 * 8) + 4)))[1]));
}
}
}
}
__asm__ __volatile__("cp.async.wait_group 0;");
__syncthreads();
for (int ax1_ax2_0_fused_0_1 = 0; ax1_ax2_0_fused_0_1 < 3; ++ax1_ax2_0_fused_0_1) {
*(int*)(B_local_1 + 0) = *(int*)(B_shared + ((((ax1_ax2_0_fused_0_1 * 512) + (((int)threadIdx.y) * 128)) + (((int)threadIdx.x) * 4)) + 1536));
for (int ax2_1 = 0; ax2_1 < 8; ++ax2_1) {
B_decode_reindex_local_1[ax2_1] = (((((half)((((uint)B_local_1[(ax2_1 >> 1)]) >> (((uint)(ax2_1 & 1)) * (uint)4)) & (uint)15)) - __float2half_rn(7.000000e+00f)) * Scale[(((((((int)blockIdx.x) * 3840) + (ax1_ax2_0_fused_0_1 * 1280)) + (((int)threadIdx.y) * 320)) + ((((int)threadIdx.x) >> 2) * 40)) + 39)]) - Zeros[(((((((int)blockIdx.x) * 3840) + (ax1_ax2_0_fused_0_1 * 1280)) + (((int)threadIdx.y) * 320)) + ((((int)threadIdx.x) >> 2) * 40)) + 39)]);
}
*(uint4*)(B_decode_reindex_shared + ((((ax1_ax2_0_fused_0_1 * 1024) + (((int)threadIdx.y) * 256)) + ((((int)threadIdx.x) >> 2) * 32)) + (((((int)threadIdx.x) & 3) ^ (((int)threadIdx.x) >> 3)) * 8))) = *(uint4*)(B_decode_reindex_local_1 + 0);
}
__syncthreads();
for (int ax3_0_1_1 = 0; ax3_0_1_1 < 2; ++ax3_0_1_1) {
{
unsigned int addr;
#if TVM_ENBALE_EFFICIENT_SMEM_PTR_CAST
addr = static_cast<unsigned int>(__cvta_generic_to_shared((void *)((&(A_reindex_shared[((((((int)threadIdx.y) * 512) + ((((int)threadIdx.x) & 15) * 32)) + ((((ax3_0_1_1 * 2) + (((int)threadIdx.x) >> 4)) ^ ((((int)threadIdx.x) & 7) >> 1)) * 8)) + 2048)])) + 0)));
#else
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)((&(A_reindex_shared[((((((int)threadIdx.y) * 512) + ((((int)threadIdx.x) & 15) * 32)) + ((((ax3_0_1_1 * 2) + (((int)threadIdx.x) >> 4)) ^ ((((int)threadIdx.x) & 7) >> 1)) * 8)) + 2048)])) + 0))
);
#endif
__asm__ __volatile__(
"ldmatrix.sync.aligned.m8n8.x4.shared.b16"
"{%0, %1, %2, %3}, [%4];\n"
: "=r"(((unsigned *)(A_reindex_shared_warp_1 + 0))[0]), "=r"(((unsigned *)(A_reindex_shared_warp_1 + 0))[1]), "=r"(((unsigned *)(A_reindex_shared_warp_1 + 0))[2]), "=r"(((unsigned *)(A_reindex_shared_warp_1 + 0))[3])
: "r"(addr)
);
}
for (int ax1_0_1 = 0; ax1_0_1 < 6; ++ax1_0_1) {
{
unsigned int addr;
#if TVM_ENBALE_EFFICIENT_SMEM_PTR_CAST
addr = static_cast<unsigned int>(__cvta_generic_to_shared((void *)((&(B_decode_reindex_shared[((((ax1_0_1 * 512) + ((((int)threadIdx.x) >> 4) * 256)) + ((((int)threadIdx.x) & 7) * 32)) + ((((ax3_0_1_1 * 2) + ((((int)threadIdx.x) & 15) >> 3)) ^ ((((int)threadIdx.x) & 7) >> 1)) * 8))])) + 0)));
#else
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)((&(B_decode_reindex_shared[((((ax1_0_1 * 512) + ((((int)threadIdx.x) >> 4) * 256)) + ((((int)threadIdx.x) & 7) * 32)) + ((((ax3_0_1_1 * 2) + ((((int)threadIdx.x) & 15) >> 3)) ^ ((((int)threadIdx.x) & 7) >> 1)) * 8))])) + 0))
);
#endif
__asm__ __volatile__(
"ldmatrix.sync.aligned.m8n8.x4.shared.b16"
"{%0, %1, %2, %3}, [%4];\n"
: "=r"(((unsigned *)(B_decode_reindex_shared_warp_1 + (ax1_0_1 * 8)))[0]), "=r"(((unsigned *)(B_decode_reindex_shared_warp_1 + (ax1_0_1 * 8)))[1]), "=r"(((unsigned *)(B_decode_reindex_shared_warp_1 + (ax1_0_1 * 8)))[2]), "=r"(((unsigned *)(B_decode_reindex_shared_warp_1 + (ax1_0_1 * 8)))[3])
: "r"(addr)
);
}
}
for (int ax2_0_3_1 = 0; ax2_0_3_1 < 6; ++ax2_0_3_1) {
{
__asm__ __volatile__(
"mma.sync.aligned.m16n8k16.row.col.f16.f16.f16.f16"
"{%0, %1}, {%2, %3, %4, %5}, {%6, %7}, {%8, %9};\n"
: "=r"(((unsigned *)(C_reindex_shared_warp + (ax2_0_3_1 * 8)))[0]), "=r"(((unsigned *)(C_reindex_shared_warp + (ax2_0_3_1 * 8)))[1])
: "r"(((unsigned *)(A_reindex_shared_warp_1 + 0))[0]), "r"(((unsigned *)(A_reindex_shared_warp_1 + 0))[1]), "r"(((unsigned *)(A_reindex_shared_warp_1 + 0))[2]), "r"(((unsigned *)(A_reindex_shared_warp_1 + 0))[3]), "r"(((unsigned *)(B_decode_reindex_shared_warp_1 + (ax2_0_3_1 * 8)))[0]), "r"(((unsigned *)(B_decode_reindex_shared_warp_1 + (ax2_0_3_1 * 8)))[1]), "r"(((unsigned *)(C_reindex_shared_warp + (ax2_0_3_1 * 8)))[0]), "r"(((unsigned *)(C_reindex_shared_warp + (ax2_0_3_1 * 8)))[1]));
}
{
__asm__ __volatile__(
"mma.sync.aligned.m16n8k16.row.col.f16.f16.f16.f16"
"{%0, %1}, {%2, %3, %4, %5}, {%6, %7}, {%8, %9};\n"
: "=r"(((unsigned *)(C_reindex_shared_warp + ((ax2_0_3_1 * 8) + 4)))[0]), "=r"(((unsigned *)(C_reindex_shared_warp + ((ax2_0_3_1 * 8) + 4)))[1])
: "r"(((unsigned *)(A_reindex_shared_warp_1 + 0))[0]), "r"(((unsigned *)(A_reindex_shared_warp_1 + 0))[1]), "r"(((unsigned *)(A_reindex_shared_warp_1 + 0))[2]), "r"(((unsigned *)(A_reindex_shared_warp_1 + 0))[3]), "r"(((unsigned *)(B_decode_reindex_shared_warp_1 + ((ax2_0_3_1 * 8) + 4)))[0]), "r"(((unsigned *)(B_decode_reindex_shared_warp_1 + ((ax2_0_3_1 * 8) + 4)))[1]), "r"(((unsigned *)(C_reindex_shared_warp + ((ax2_0_3_1 * 8) + 4)))[0]), "r"(((unsigned *)(C_reindex_shared_warp + ((ax2_0_3_1 * 8) + 4)))[1]));
}
}
}
for (int ax0 = 0; ax0 < 6; ++ax0) {
__syncthreads();
for (int local_id = 0; local_id < 8; local_id+=2) {
*((uint *)&(&(((half*)B_shared)[(((int)threadIdx.y) * 256)]))[((((((local_id % 4) / 2) * 8) + (threadIdx.x / 4)) * 16) + ((((local_id / 4) * 8) + ((threadIdx.x % 4) * 2)) + (local_id % 2)))]) = *((uint *)&C_reindex_shared_warp[(ax0 * 8) + local_id]);
}
;
__syncthreads();
#pragma unroll
for (int ax0_ax1_ax2_ax3_ax4_fused_0 = 0; ax0_ax1_ax2_ax3_ax4_fused_0 < 1; ++ax0_ax1_ax2_ax3_ax4_fused_0) {
*(uint4*)(D + ((((((((int)blockIdx.y) * 983040) + (((int)threadIdx.y) * 245760)) + ((((int)threadIdx.x) >> 1) * 15360)) + (((int)blockIdx.x) * 96)) + (ax0 * 16)) + ((((int)threadIdx.x) & 1) * 8))) = *(uint4*)(((half*)B_shared) + ((((int)threadIdx.y) * 256) + (((int)threadIdx.x) * 8)));
}
}
}
}
int ladder_gemm_fp16xint2_fp16(half *input_0, half *input_1, half *output, const int M, const int N, const int K, const int trans_a, const int trans_b, half *workspace_ptr)
{
assert(trans_a == 0 && trans_b == 1);
if (M == 1 && N == 15360 && K == 5120){
const dim3 GridDim(15360, 1, 1);
const dim3 BlockDim(128, 1, 1);
bitblas_kernel_fp16_int2_fp16_m1n15360k5120_nt<<<GridDim, BlockDim>>>(input_0, input_1, output);
return 0;
}
if (M == 128 && N == 15360 && K == 5120){
const dim3 GridDim(160, 2, 1);
const dim3 BlockDim(32, 4, 1);
bitblas_kernel_fp16_int2_fp16_m128n15360k5120_nt<<<GridDim, BlockDim>>>(input_0, input_1, output);
return 0;
}
return -1;
}
|
BitBLAS/integration/fastertransformer/kenrel_output/ladder_kernel.cu/0
|
{
"file_path": "BitBLAS/integration/fastertransformer/kenrel_output/ladder_kernel.cu",
"repo_id": "BitBLAS",
"token_count": 11504
}
| 146 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from typing import List
import numpy as np
def get_all_factors(n: int) -> List[int]:
# Calculate the square root of n and round it up to the nearest integer
n0 = int(np.ceil(np.sqrt(n)))
# Find all divisors of n that are less than n0
val = np.where(n % np.arange(1, n0) == 0)[0] + 1
# If n is a perfect square, add the square root to the list of factors
mid = np.array([], dtype=int) if n0 * n0 != n else [n0]
# Combine the factors and their corresponding larger pair factors
return [int(x) for x in np.concatenate([val, mid, n // val[::-1]])]
def factorize(n: int) -> List[int]:
i = 2 # Start with the smallest prime number
result = []
# Iterate through numbers to find factors
while n > 1:
if n % i == 0: # If i is a factor of n
n //= i # Divide n by i and keep the integer part
result.append(i)
else:
i += 1 # Try the next number
return result
def coalesced_factor(subtensor: List[int], tensor: List[int]) -> int:
# If the last dimension of the subtensor and tensor differ, or subtensor has only one dimension
if subtensor[-1] != tensor[-1] or len(subtensor) == 1:
return subtensor[-1]
else:
# Recursively calculate the coalesced factor for the remaining dimensions
return subtensor[-1] * coalesced_factor(subtensor[:-1], tensor[:-1])
def coalesced_tensor_shape(subtensor: List[int], tensor: List[int], transaction_size: int) -> int:
# Calculate the total number of elements in the subtensor
bytes = int(np.prod(subtensor))
if bytes == 0:
return 0
# Calculate the coalesced factor for the subtensor
factor = int(coalesced_factor(subtensor, tensor))
# Compute the shape of the coalesced tensor
return transaction_size * bytes / min(transaction_size, factor)
|
BitBLAS/python/bitblas/base/roller/policy/common.py/0
|
{
"file_path": "BitBLAS/python/bitblas/base/roller/policy/common.py",
"repo_id": "BitBLAS",
"token_count": 710
}
| 147 |
# Copyright 2018 The apache/tvm Authors. All Rights Reserved.
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Modifications Copyright (c) Microsoft.
# The code below is mostly copied from apache/tvm fallback.py in dlight.
# pylint: disable=missing-docstring
"""A fallback schedule rule for GPU operators."""
from typing import List, Tuple
from tvm import tir
from tvm.target import Target
from ..base import normalize_prim_func, try_inline
from . import utils
from .base import GPUScheduleRule
class Fallback(GPUScheduleRule):
"""
A fallback schedule rule for all GPU operators. It will try to inline all the blocks first,
and then apply a simple block/grid mapping to the spatial loops on top of the remaining blocks.
"""
def apply( # pylint: disable=too-many-locals,missing-docstring
self,
func: tir.PrimFunc,
target: Target,
_: bool,
) -> tir.Schedule:
if not isinstance(func, tir.PrimFunc) or not self.is_target_available(target):
return None
max_threads_per_block = utils.max_threads_per_block(target)
sch = tir.Schedule(func)
block_infos = normalize_prim_func(sch)
if block_infos is None:
return None
block_infos = try_inline(sch, block_infos)
reduction_blocks: List[Tuple[tir.schedule.BlockRV, tir.schedule.LoopRV]] = []
for block in block_infos:
s_loops: List[tir.schedule.LoopRV] = []
r_loops: List[tir.schedule.LoopRV] = []
o_loops: List[tir.schedule.LoopRV] = []
dom_kind = block.dom_kind()
block = block.block_rv
if (
any(
[
sch.get(loop_rv).thread_binding is not None
for loop_rv in sch.get_loops(block)
]
)
or len(sch.get_loops(block)) == 0
):
continue
for loop, iter_type in zip(sch.get_loops(block), dom_kind):
{"S": s_loops, "R": r_loops, "O": o_loops}[iter_type].append(loop)
if not s_loops:
s_loops.append(sch.add_unit_loop(block))
sch.reorder(*s_loops, *r_loops, *o_loops)
bx, tx = sch.split( # pylint: disable=invalid-name
sch.fuse(*s_loops),
factors=[None, max_threads_per_block],
)
sch.bind(bx, "blockIdx.x")
sch.bind(tx, "threadIdx.x")
if len(r_loops) > 0:
reduction_blocks.append((block, r_loops[0]))
for block, r_loop in reduction_blocks:
sch.decompose_reduction(block, r_loop)
return sch
|
BitBLAS/python/bitblas/gpu/fallback.py/0
|
{
"file_path": "BitBLAS/python/bitblas/gpu/fallback.py",
"repo_id": "BitBLAS",
"token_count": 1504
}
| 148 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from .operator import Operator # noqa: F401
from .matmul import Matmul, MatmulConfig # noqa: F401
from .matmul_dequantize import MatmulWeightOnlyDequantize, MatmulWeightOnlyDequantizeConfig # noqa: F401
from .ladder_permutate import LadderPermutate, LadderPermutateConfig # noqa: F401
from .lop3_permutate import LOP3Permutate, LOP3PermutateConfig # noqa: F401
|
BitBLAS/python/bitblas/ops/__init__.py/0
|
{
"file_path": "BitBLAS/python/bitblas/ops/__init__.py",
"repo_id": "BitBLAS",
"token_count": 151
}
| 149 |
# Copyright 2018 The apache/tvm Authors. All Rights Reserved.
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Modifications Copyright (c) Microsoft.
# The code below is mostly copied from mlc.ai quantization.py in mlc-llm.
# pylint: disable=invalid-name,missing-function-docstring,unused-variable
"""TIR computation utilities for quantization."""
import tvm
from tvm import tir
# fmt: off
def _tir_f32x2_to_bf16x2_to_u32(v0: tir.PrimExpr, v1: tir.PrimExpr, round_to_even: bool = True):
mask = tir.const((1 << 16) - 1, "uint32")
res = []
for data in [v0, v1]:
u32_val = tir.reinterpret("uint32", data)
if round_to_even:
rounding_bias = ((u32_val >> tir.const(16, "uint32"))
& tir.const(1, "uint32")) + tir.const(0x7FFF, "uint32")
u32_val += rounding_bias
res.append((u32_val >> tir.const(16, "uint32")) & mask)
return res[0] | (res[1] << tir.const(16, "uint32"))
def _tir_u32_to_bf16x2_to_f32x2(x: tir.PrimExpr):
mask = tir.const((1 << 16) - 1, "uint32")
x0 = x & mask
x1 = (x >> 16) & mask
return (tir.reinterpret("float32", x << tir.const(16, "uint32")) for x in [x0, x1])
def _tir_u32_to_int_to_float(nbit: int, val: tir.PrimExpr, pos: tir.PrimExpr, dtype: str):
assert val.dtype == "uint32"
mask = tvm.tir.const((1 << nbit) - 1, "uint32")
return tir.Cast(dtype, (val >> (pos * nbit).astype("uint32")) & mask)
def _tir_packed_uint_to_uint_to_float(storage_nbit: int):
storage_dtype = "uint" + str(storage_nbit)
def f_convert(nbit: int, val: tir.PrimExpr, pos: tir.PrimExpr, dtype: str):
assert val.dtype == storage_dtype, f"{val.dtype} != {storage_dtype}"
max_int_value = (1 << (nbit - 1)) - 1
return ((val >> (pos.astype("uint32") * tir.const(nbit, "uint32"))) & tir.const(
(1 << nbit) - 1, "uint32")).astype(dtype) - tir.const(max_int_value, dtype)
return f_convert
def _tir_packed_int_to_int_to_float(storage_nbit: int):
storage_dtype = "int" + str(storage_nbit)
def f_convert(nbit: int, val: tir.PrimExpr, pos: tir.PrimExpr, dtype: str):
assert val.dtype == storage_dtype, f"{val.dtype} != {storage_dtype}"
mask = tir.const((1 << nbit) - 1, "int32")
unextended = (val >> (pos.astype("int32") * tir.const(nbit, "int32"))) & mask
return tir.Cast(
dtype, (unextended << tir.const(32 - nbit, "int32")) >> tir.const(32 - nbit, "int32"))
return f_convert
def _tir_f32_to_uint_to_f4(val: tir.PrimExpr):
assert val.dtype == "float32"
val_u32 = tir.reinterpret("uint32", val)
# e_f32 > 120 -> e_f4 = min(e_f32 - 120 + M_h, 7)
# e_f32 == 120 -> e_f4 = 1
# e_f32 < 120 -> e_f4 = 0
m_h = (val_u32 >> tir.const(22, "uint32")) & tir.const(1, "uint32")
e_f32 = (val_u32 >> tir.const(23, "uint32")) & tir.const(255, "uint32")
s = (val_u32 >> tir.const(31, "uint32"))
e_f4 = tir.Select(
e_f32 > tir.const(120, "uint32"),
tir.Min(e_f32 - tir.const(120, "uint32") + m_h, tir.const(7, "uint32")),
tir.Select(e_f32 == tir.const(120, "uint32"), tir.const(1, "uint32"),
tir.const(0, "uint32")))
return (s << tir.const(3, "uint32")) | e_f4
def _tir_f16_to_uint_to_f4(val: tir.PrimExpr):
assert val.dtype == "float16"
val_u32 = tir.Cast("uint32", tir.reinterpret("uint16", val))
m_h = (val_u32 >> tir.const(9, "uint32")) & tir.const(1, "uint32")
e_f16 = (val_u32 >> tir.const(10, "uint32")) & tir.const(31, "uint32")
s = (val_u32 >> tir.const(15, "uint32"))
e_f4 = tir.Select(
e_f16 > tir.const(8, "uint32"),
tir.Min(e_f16 - tir.const(8, "uint32") + m_h, tir.const(7, "uint32")),
tir.Select(e_f16 == tir.const(8, "uint32"), tir.const(1, "uint32"), tir.const(0, "uint32")))
return (s << tir.const(3, "uint32")) | e_f4
def _tir_u32_to_f4_to_f32(nbit: int, val: tir.PrimExpr, pos: tir.PrimExpr, dtype: str):
assert nbit == 4
assert dtype == "float32"
assert val.dtype == "uint32"
# e_f4 == 0 -> e_f32 = 0
# e_f4 != 0 -> e_f32 = e_f4 + 120 = e_f4 | (1111000)_2
mask = tvm.tir.const((1 << nbit) - 1, "uint32")
f4 = (val >> (pos.astype("uint32") * tir.const(nbit, "uint32"))) & mask
s = f4 >> tir.const(3, "uint32")
e_f4 = f4 & tir.const(7, "uint32")
e_f32 = e_f4 | tir.const(120, "uint32")
val_f32 = tir.reinterpret("float32",
(e_f32 | (s << tir.const(8, "uint32"))) << tir.const(23, "uint32"))
return tir.Select(e_f4 == tir.const(0, "uint32"), tir.const(0, "float32"), val_f32)
def _tir_u32_to_f4_to_f16(nbit: int, val: tir.PrimExpr, pos: tir.PrimExpr, dtype: str):
assert nbit == 4
assert dtype == "float16"
assert val.dtype == "uint32"
# e_f4 == 0 -> e_f16 = 0
# e_f4 != 0 -> e_f16 = e_f4 + 8 = e_f4 | (1000)_2
mask = tvm.tir.const((1 << nbit) - 1, "uint32")
f4 = (val >> (pos.astype("uint32") * tir.const(nbit, "uint32"))) & mask
s = f4 >> tir.const(3, "uint32")
e_f4 = f4 & tir.const(7, "uint32")
e_f16 = e_f4 | tir.const(8, "uint32")
val_f16 = tir.reinterpret("float16",
(e_f16 | (s << tir.const(5, "uint32"))) << tir.const(10, "uint32"))
return tir.Select(e_f4 == tir.const(0, "uint32"), tir.const(0, "float16"), val_f16)
def _tir_packed_to_signed_convert(storage_type="uint", storage_nbit=8):
storage_dtype = storage_type + str(storage_nbit)
def f_convert(nbit: int, val: tir.PrimExpr, pos: tir.PrimExpr, dtype: str):
assert val.dtype == storage_dtype, f"{val.dtype} != {storage_dtype}"
max_int_value = (1 << (nbit - 1))
return ((val >> (pos.astype("uint32") * tir.const(nbit, "uint32"))) & tir.const(
(1 << nbit) - 1, "uint32")).astype(dtype) - tir.const(max_int_value, dtype)
return f_convert
def _tir_packed_to_unsigned_convert(storage_type="uint", storage_nbit=8):
storage_dtype = storage_type + str(storage_nbit)
def f_convert(nbit: int, val: tvm.tir.PrimExpr, pos: tvm.tir.PrimExpr, dtype: str):
assert val.dtype == storage_dtype, f"{val.dtype} != {storage_dtype}"
mask = tvm.tir.const((1 << nbit) - 1, storage_dtype)
return ((val >> (pos * nbit).astype(storage_dtype)) & mask).astype(dtype)
return f_convert
def _tir_packed_to_unsigned_convert_with_zeros(storage_type="uint", storage_nbit=8):
storage_dtype = storage_type + str(storage_nbit)
def f_convert(nbit: int, val: tvm.tir.PrimExpr, pos: tvm.tir.PrimExpr, zero: tvm.tir.PrimExpr,
dtype: str):
assert val.dtype == storage_dtype, f"{val.dtype} != {storage_dtype}"
mask = tvm.tir.const((1 << nbit) - 1, storage_dtype)
return (((val >> (pos * nbit).astype(storage_dtype)) & mask) - zero).astype(dtype)
return f_convert
def _tir_packed_int_to_int_convert(storage_type="uint", storage_nbit=8):
storage_dtype = storage_type + str(storage_nbit)
def f_convert(nbit: int, val: tir.PrimExpr, pos: tir.PrimExpr, dtype: str):
assert val.dtype == storage_dtype, f"{val.dtype} != {storage_dtype}"
mask = tir.const((1 << nbit) - 1, "int32")
unextended = (val >> (pos.astype("int32") * tir.const(nbit, "int32"))) & mask
return tir.Cast(
dtype, (unextended << tir.const(32 - nbit, "int32")) >> tir.const(32 - nbit, "int32"))
return f_convert
# fmt: on
|
BitBLAS/python/bitblas/quantization/quantization.py/0
|
{
"file_path": "BitBLAS/python/bitblas/quantization/quantization.py",
"repo_id": "BitBLAS",
"token_count": 3697
}
| 150 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import io
import subprocess
import shutil
from setuptools import setup, find_packages
from setuptools.command.install import install
from setuptools.command.build_py import build_py
from setuptools.command.sdist import sdist
from wheel.bdist_wheel import bdist_wheel
import distutils.dir_util
from typing import List
import re
import tarfile
from io import BytesIO
import os
import sys
import urllib.request
from distutils.version import LooseVersion
import platform
# Environment variables False/True
PYPI_BUILD = os.environ.get("PYPI_BUILD", "False").lower() == "true"
PACKAGE_NAME = "bitblas"
ROOT_DIR = os.path.dirname(__file__)
MAIN_CUDA_VERSION = "12.1"
# BitBLAS only supports Linux platform
assert sys.platform.startswith("linux"), "BitBLAS only supports Linux platform (including WSL)."
def get_path(*filepath) -> str:
return os.path.join(ROOT_DIR, *filepath)
def get_requirements() -> List[str]:
"""Get Python package dependencies from requirements.txt."""
with open(get_path("requirements.txt")) as f:
requirements = f.read().strip().split("\n")
return requirements
def find_version(filepath: str) -> str:
"""Extract version information from the given filepath.
Adapted from https://github.com/ray-project/ray/blob/0b190ee1160eeca9796bc091e07eaebf4c85b511/python/setup.py
"""
with open(filepath) as fp:
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", fp.read(), re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
def get_nvcc_cuda_version():
"""Get the CUDA version from nvcc.
Adapted from https://github.com/NVIDIA/apex/blob/8b7a1ff183741dd8f9b87e7bafd04cfde99cea28/setup.py
"""
nvcc_output = subprocess.check_output(["nvcc", "-V"], universal_newlines=True)
output = nvcc_output.split()
release_idx = output.index("release") + 1
nvcc_cuda_version = LooseVersion(output[release_idx].split(",")[0])
return nvcc_cuda_version
def get_bitblas_version(with_cuda=True, with_system_info=True) -> str:
version = find_version(get_path("python/bitblas", "__init__.py"))
local_version_parts = []
if with_system_info:
local_version_parts.append(get_system_info().replace("-", "."))
if with_cuda:
cuda_version = str(get_nvcc_cuda_version())
cuda_version_str = cuda_version.replace(".", "")[:3]
local_version_parts.append(f"cu{cuda_version_str}")
if local_version_parts:
version += f"+{'.'.join(local_version_parts)}"
return version
def get_system_info():
system = platform.system().lower()
if system == "linux":
try:
with open("/etc/os-release") as f:
os_release = f.read()
version_id_match = re.search(r'VERSION_ID="(\d+\.\d+)"', os_release)
if version_id_match:
version_id = version_id_match.group(1)
distro = "ubuntu"
return f"{distro}-{version_id}"
except FileNotFoundError:
pass
return system
def read_readme() -> str:
"""Read the README file if present."""
p = get_path("README.md")
if os.path.isfile(p):
return io.open(get_path("README.md"), "r", encoding="utf-8").read()
else:
return ""
def download_and_extract_llvm(version, is_aarch64=False, extract_path="3rdparty"):
"""
Downloads and extracts the specified version of LLVM for the given platform.
Args:
version (str): The version of LLVM to download.
is_aarch64 (bool): True if the target platform is aarch64, False otherwise.
extract_path (str): The directory path where the archive will be extracted.
Returns:
str: The path where the LLVM archive was extracted.
"""
ubuntu_version = "16.04"
if version >= "16.0.0":
ubuntu_version = "20.04"
elif version >= "13.0.0":
ubuntu_version = "18.04"
base_url = (f"https://github.com/llvm/llvm-project/releases/download/llvmorg-{version}")
file_name = f"clang+llvm-{version}-{'aarch64-linux-gnu' if is_aarch64 else f'x86_64-linux-gnu-ubuntu-{ubuntu_version}'}.tar.xz"
download_url = f"{base_url}/{file_name}"
# Download the file
print(f"Downloading {file_name} from {download_url}")
with urllib.request.urlopen(download_url) as response:
if response.status != 200:
raise Exception(f"Download failed with status code {response.status}")
file_content = response.read()
# Ensure the extract path exists
os.makedirs(extract_path, exist_ok=True)
# if the file already exists, remove it
if os.path.exists(os.path.join(extract_path, file_name)):
os.remove(os.path.join(extract_path, file_name))
# Extract the file
print(f"Extracting {file_name} to {extract_path}")
with tarfile.open(fileobj=BytesIO(file_content), mode="r:xz") as tar:
tar.extractall(path=extract_path)
print("Download and extraction completed successfully.")
return os.path.abspath(os.path.join(extract_path, file_name.replace(".tar.xz", "")))
package_data = {
"bitblas": ["py.typed"],
}
LLVM_VERSION = "10.0.1"
IS_AARCH64 = False # Set to True if on an aarch64 platform
EXTRACT_PATH = "3rdparty" # Default extraction path
def update_submodules():
"""Updates git submodules."""
try:
subprocess.check_call(["git", "submodule", "update", "--init", "--recursive"])
except subprocess.CalledProcessError as error:
raise RuntimeError("Failed to update submodules") from error
def build_tvm(llvm_config_path):
"""Configures and builds TVM."""
os.chdir("3rdparty/tvm")
if not os.path.exists("build"):
os.makedirs("build")
os.chdir("build")
# Copy the config.cmake as a baseline
if not os.path.exists("config.cmake"):
shutil.copy("../cmake/config.cmake", "config.cmake")
# Set LLVM path and enable CUDA in config.cmake
with open("config.cmake", "a") as config_file:
config_file.write(f"set(USE_LLVM {llvm_config_path})\n")
config_file.write("set(USE_CUDA ON)\n")
# Run CMake and make
try:
subprocess.check_call(["cmake", ".."])
subprocess.check_call(["make", "-j"])
except subprocess.CalledProcessError as error:
raise RuntimeError("Failed to build TVM") from error
finally:
# Go back to the original directory
os.chdir("../../..")
def setup_llvm_for_tvm():
"""Downloads and extracts LLVM, then configures TVM to use it."""
# Assume the download_and_extract_llvm function and its dependencies are defined elsewhere in this script
extract_path = download_and_extract_llvm(LLVM_VERSION, IS_AARCH64, EXTRACT_PATH)
llvm_config_path = os.path.join(extract_path, "bin", "llvm-config")
return extract_path, llvm_config_path
class BitBLASInstallCommand(install):
"""Customized setuptools install command - builds TVM after setting up LLVM."""
def run(self):
# Recursively update submodules
# update_submodules()
# Set up LLVM for TVM
_, llvm_path = setup_llvm_for_tvm()
# Build TVM
build_tvm(llvm_path)
# Continue with the standard installation process
install.run(self)
class BitBLASBuilPydCommand(build_py):
"""Customized setuptools install command - builds TVM after setting up LLVM."""
def run(self):
build_py.run(self)
# custom build tvm
update_submodules()
# Set up LLVM for TVM
_, llvm_path = setup_llvm_for_tvm()
# Build TVM
build_tvm(llvm_path)
# Copy the built TVM to the package directory
TVM_PREBUILD_ITEMS = [
"3rdparty/tvm/build/libtvm_runtime.so",
"3rdparty/tvm/build/libtvm.so",
"3rdparty/tvm/build/config.cmake",
"3rdparty/tvm/python",
"3rdparty/tvm/licenses",
"3rdparty/tvm/conftest.py",
"3rdparty/tvm/CONTRIBUTORS.md",
"3rdparty/tvm/KEYS",
"3rdparty/tvm/LICENSE",
"3rdparty/tvm/README.md",
"3rdparty/tvm/mypy.ini",
"3rdparty/tvm/pyproject.toml",
"3rdparty/tvm/version.py",
]
for item in TVM_PREBUILD_ITEMS:
source_dir = os.path.join(ROOT_DIR, item)
target_dir = os.path.join(self.build_lib, PACKAGE_NAME, item)
if os.path.isdir(source_dir):
self.mkpath(target_dir)
distutils.dir_util.copy_tree(source_dir, target_dir)
else:
target_dir = os.path.dirname(target_dir)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
shutil.copy2(source_dir, target_dir)
class BitBLASSdistCommand(sdist):
"""Customized setuptools sdist command - includes the pyproject.toml file."""
def make_distribution(self):
self.distribution.metadata.name = PACKAGE_NAME
self.distribution.metadata.version = get_bitblas_version(
with_cuda=False, with_system_info=False)
super().make_distribution()
setup(
name=PACKAGE_NAME,
version=get_bitblas_version(with_cuda=False, with_system_info=False) if PYPI_BUILD else get_bitblas_version(),
packages=find_packages(where="python"),
package_dir={"": "python"},
author="Microsoft Research",
description="A light weight framework to generate high performance CUDA/HIP code for BLAS operators.",
long_description=read_readme(),
long_description_content_type='text/markdown',
platforms=["Environment :: GPU :: NVIDIA CUDA",
"Operating System :: POSIX :: Linux"],
license="MIT",
keywords="BLAS, CUDA, HIP, Code Generation, TVM",
url="https://github.com/microsoft/BitBLAS",
classifiers=[
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
],
python_requires=">=3.8",
install_requires=get_requirements(),
tests_require=[
"yapf>=0.32.0",
"toml>=0.10.2",
"tomli>=2.0.1",
"ruff>=0.1.5",
"codespell>=2.2.6",
],
package_data=package_data,
include_package_data=True,
data_files=[
"requirements.txt",
],
cmdclass={
"install": BitBLASInstallCommand,
"build_py": BitBLASBuilPydCommand,
"sdist": BitBLASSdistCommand,
},
)
|
BitBLAS/setup.py/0
|
{
"file_path": "BitBLAS/setup.py",
"repo_id": "BitBLAS",
"token_count": 4512
}
| 151 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import pytest
import bitblas
from bitblas.ops.param_permutate import ParamPermutate, ParamPermutateConfig
import tvm
target = tvm.target.Target("llvm")
# fmt: off
@pytest.mark.parametrize(
"M,N,datatype,transpose_matrix,group_size,propagate_kind,target_instruction", [
(1024, 1024, "float16", True, 1, True, "nvidia-mma"),
])
def test_param_permutate_profile_latency(
M,
N,
datatype,
transpose_matrix,
group_size,
propagate_kind,
target_instruction,
):
param_permutate_config = ParamPermutateConfig(
M=M,
N=N,
datatype=datatype,
propagate_kind=propagate_kind,
group_size=group_size,
transpose_matrix=transpose_matrix,
target_instruction=target_instruction,
)
param_permutate = ParamPermutate(
config=param_permutate_config,
target=target,
)
latency = param_permutate.profile_latency()
assert latency
# fmt: on
if __name__ == "__main__":
bitblas.testing.main()
|
BitBLAS/testing/python/operators/test_param_permutate_ops.py/0
|
{
"file_path": "BitBLAS/testing/python/operators/test_param_permutate_ops.py",
"repo_id": "BitBLAS",
"token_count": 464
}
| 152 |
# Based on https://pytorch-lightning.readthedocs.io/en/stable/notebooks/lightning_examples/text-transformers.html
import copy
import os
from datetime import datetime
from typing import Optional
from pytorch_lightning.loggers import WandbLogger
import datasets
import torch
import pytorch_lightning as pl
from pytorch_lightning import LightningDataModule, LightningModule
from torch.utils.data import DataLoader
from transformers import (
AdamW,
get_linear_schedule_with_warmup,
)
from torchvision.datasets import CIFAR10, CIFAR100
from src.modules.clip_model import build_model, adapt_position_encoding
from src.transforms import clip_transform
from sacred import Experiment
ex = Experiment("CIFAR")
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (20480, rlimit[1]))
class CIFARDataModule(LightningDataModule):
def __init__(self, _config):
super().__init__()
self._config = _config
self.transforms = clip_transform(_config['image_size'])
def prepare_data(self):
data_root = self._config['data_root']
if self._config["group_name"] == 'cifar10':
CIFAR10(root=f'{data_root}/cifar10',train=True,download=True, transform=self.transforms)
CIFAR10(root=f'{data_root}/cifar10',train=False,download=True, transform=self.transforms)
elif self._config["group_name"] == 'cifar100':
CIFAR100(root=f'{data_root}/cifar100',train=True,download=True, transform=self.transforms)
CIFAR100(root=f'{data_root}/cifar100',train=False,download=True, transform=self.transforms)
def setup(self, stage):
data_root = self._config['data_root']
if self._config["group_name"] == 'cifar10':
self.cifar_train = CIFAR10(root=f'{data_root}/cifar10',train=True,download=True, transform=self.transforms)
self.cifar_test = CIFAR10(root=f'{data_root}/cifar10',train=False,download=True, transform=self.transforms)
self.num_labels = 10
elif self._config["group_name"] == 'cifar100':
self.cifar_train = CIFAR100(root=f'{data_root}/cifar100',train=True,download=True, transform=self.transforms)
self.cifar_test = CIFAR100(root=f'{data_root}/cifar100',train=False,download=True, transform=self.transforms)
self.num_labels = 100
def train_dataloader(self):
cifar_train = DataLoader(self.cifar_train, batch_size=self._config["per_gpu_batchsize"], shuffle=True, num_workers=self._config["num_workers"])
return cifar_train
def val_dataloader(self):
cifar_val = DataLoader(self.cifar_test, batch_size=self._config["per_gpu_eval_batchsize"], shuffle=False, num_workers=self._config["num_workers"])
return cifar_val
def test_dataloader(self):
return DataLoader(self.cifar_test, batch_size=self._config["per_gpu_eval_batchsize"], shuffle=False, num_workers=self._config["num_workers"])
class CLIPViTModule(LightningModule):
def __init__(
self,
model_name_or_path: str,
num_labels: int,
learning_rate: float = 2e-5,
adam_epsilon: float = 1e-8,
warmup_steps: int = 0,
weight_decay: float = 0.0,
train_batch_size: int = 32,
eval_batch_size: int = 32,
load_path: str = None,
image_size: int = 224,
hidden_size: int = 768,
patch_size: int = 16,
resolution_before: int = 224,
vit_remove_last: bool = False,
**kwargs,
):
super().__init__()
self.save_hyperparameters()
self.num_labels = num_labels
self.model = build_model(model_name_or_path, resolution_after=image_size, model_type="ViT", vit_remove_last=vit_remove_last)
self.classifier = torch.nn.Linear(hidden_size, num_labels)
self.classifier.weight.data.normal_(mean=0.0, std=0.02)
self.classifier.bias.data.zero_()
self.the_metric = -1
if load_path is not None:
ckpt = torch.load(load_path, map_location="cpu")
state_dict = ckpt["state_dict"]
state_dict = {k.replace('vit_model.', ''): v for k, v in state_dict.items() if k.startswith("vit_model")}
if resolution_before != image_size:
state_dict = adapt_position_encoding(state_dict, after=image_size, patch_size=patch_size)
self.model.load_state_dict(state_dict, strict=False)
self.metric = datasets.load_metric('accuracy', experiment_id=datetime.now().strftime("%d-%m-%Y_%H-%M-%S"))
def infer(self, batch, batch_idx):
inputs, labels = batch
logits = self.classifier(self.model(inputs)[:, 0, :])
loss_fct = torch.nn.CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
predictions = logits.argmax(-1)
return loss, predictions
def training_step(self, batch, batch_idx):
loss, _ = self.infer(batch, batch_idx)
return loss
def training_epoch_end(self, outs):
self.log("train_loss", torch.stack([x["loss"] for x in outs]).mean(), prog_bar=True)
def validation_step(self, batch, batch_idx, dataloader_idx=0):
loss, predictions = self.infer(batch, batch_idx)
return {"loss": loss, "preds": predictions, "labels": batch[1]}
def validation_epoch_end(self, outputs):
preds = torch.cat([x["preds"] for x in outputs]).detach().cpu().numpy()
labels = torch.cat([x["labels"] for x in outputs]).detach().cpu().numpy()
loss = torch.stack([x["loss"] for x in outputs]).mean()
self.log("val_loss", loss, prog_bar=True)
metrics_results = self.metric.compute(predictions=preds, references=labels)
self.log_dict(metrics_results, prog_bar=True)
self.the_metric = max(self.the_metric, metrics_results['accuracy'])
self.log("the_metric", self.the_metric)
return loss
def configure_optimizers(self):
"""Prepare optimizer and schedule (linear warmup and decay)"""
model = self.model
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon, betas=(0.9, 0.98))
self.total_steps = len(self.trainer.datamodule.train_dataloader()) * self.trainer.max_epochs // self.trainer.accumulate_grad_batches // max(1, self.trainer.gpus)
print(self.total_steps)
print(self.hparams.warmup_steps if type(self.hparams.warmup_steps) is int else self.hparams.warmup_steps * self.total_steps)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=self.hparams.warmup_steps if type(self.hparams.warmup_steps) is int else self.hparams.warmup_steps * self.total_steps,
num_training_steps=self.total_steps,
)
scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return [optimizer], [scheduler]
@ex.config
def config():
root_dir = "."
data_root = f"{root_dir}/dataset/cifar"
log_dir = f"{root_dir}/logs"
output_dir = f"{root_dir}/checkpoints"
load_path = f""
load_flag = False # load from load_path or clip-vit
num_gpus = 8
num_nodes = 1
num_workers = 8
precision = 32
per_gpu_batchsize = 64 # you should define this manually with per_gpu_batch_size=#
per_gpu_eval_batchsize = 256
# Wandb Logger Setting
exp_name = "Uni-Modal"
group_name = "cifar10"
run_name = "finetune"
# PL Trainer Setting
resume_from = None
fast_dev_run = False
val_check_interval = 1.0
log_every_n_steps = 50
# Experiment Setting
seed = 0
batch_size = 512 # this is a desired batch size; pl trainer will accumulate gradients when per step batch is smaller.
# Image setting
vit = 'CLIP-ViT-B/16'
image_size = 224 # 32?
patch_size = 16
resolution_before = 224
input_image_embed_size = 768
vit_remove_last = False
# Optimizer Setting
learning_rate = 2e-5 # 0.03 for ViT-B/16
weight_decay = 0.01
adam_epsilon = 1e-8
max_epoch = 10
max_steps = -1 # 10000 for ViT-B/16
warmup_steps = 0.06 # 0.05 for ViT-B/16
patience = 3
@ex.automain
def main(_config):
_config = copy.deepcopy(_config)
# pl.seed_everything(_config["seed"])
dm = CIFARDataModule(_config)
dm.setup("fit")
model = CLIPViTModule(
model_name_or_path=_config["vit"],
load_path=_config["load_path"] if _config["load_flag"] else None,
num_labels=dm.num_labels,
learning_rate=_config["learning_rate"],
warmup_steps=_config["warmup_steps"],
weight_decay=_config["weight_decay"],
adam_epsilon=_config["adam_epsilon"],
train_batch_size=_config["per_gpu_batchsize"],
eval_batch_size=_config["per_gpu_eval_batchsize"],
image_size=_config["image_size"],
hidden_size=_config["input_image_embed_size"],
patch_size=_config["patch_size"],
resolution_before=_config["resolution_before"],
vit_remove_last=_config["vit_remove_last"],
)
exp_name = _config["exp_name"]
group_name = _config["group_name"]
run_name = _config["run_name"]
output_dir = f'{_config["output_dir"]}/{exp_name}_{group_name}_{run_name}'
os.makedirs(_config["log_dir"], exist_ok=True)
os.makedirs(output_dir, exist_ok=True)
logger = WandbLogger(save_dir=_config["log_dir"], project=exp_name, name=f'{exp_name}_{group_name}_{run_name}', group=group_name)
lr_callback = pl.callbacks.LearningRateMonitor(logging_interval="step")
# early_stop_callback = pl.callbacks.EarlyStopping(
# monitor='the_metric',
# patience=_config["patience"],
# strict=True,
# verbose=True,
# mode='max'
# )
# callbacks = [lr_callback, early_stop_callback]
callbacks = [lr_callback]
logger.log_hyperparams(_config)
num_gpus = (
_config["num_gpus"]
if isinstance(_config["num_gpus"], int)
else len(_config["num_gpus"])
)
grad_steps = max(_config["batch_size"] // (
_config["per_gpu_batchsize"] * num_gpus * _config["num_nodes"]
), 1)
trainer = pl.Trainer(
gpus=_config["num_gpus"],
num_nodes=_config["num_nodes"],
precision=_config["precision"],
strategy="ddp",
benchmark=True,
deterministic=True,
max_epochs=_config["max_epoch"] if _config["max_steps"] == -1 else 1000,
max_steps=_config["max_steps"],
logger=logger,
accumulate_grad_batches=grad_steps,
log_every_n_steps=_config["log_every_n_steps"],
resume_from_checkpoint=_config["resume_from"],
weights_summary="top",
callbacks=callbacks,
fast_dev_run=_config["fast_dev_run"],
val_check_interval=_config["val_check_interval"],
)
trainer.fit(model, datamodule=dm)
# trainer.validate(model, datamodule=dm)
# trainer.test(model, datamodule=dm)
|
BridgeTower/run_cifar.py/0
|
{
"file_path": "BridgeTower/run_cifar.py",
"repo_id": "BridgeTower",
"token_count": 5038
}
| 153 |
import random
import torch
import io
import pyarrow as pa
import os
from PIL import Image
from ..transforms import keys_to_transforms
class BaseDataset(torch.utils.data.Dataset):
def __init__(
self,
data_dir: str,
transform_keys: list,
image_size: int,
names: list,
text_column_name: str = "",
remove_duplicate=True,
max_text_len=40,
draw_false_image=0,
draw_false_text=0,
image_only=False,
tokenizer=None,
debug_num=0,
):
"""
data_dir : where dataset file *.arrow lives; existence should be guaranteed via DataModule.prepare_data
transform_keys : keys for generating augmented views of images
text_column_name : pyarrow table column name that has list of strings as elements
"""
assert len(transform_keys) >= 1
super().__init__()
self.transforms = keys_to_transforms(transform_keys, size=image_size)
self.clip_transform = False
for transform_key in transform_keys:
if 'clip' in transform_key:
self.clip_transform = True
break
self.text_column_name = text_column_name
self.names = names
self.max_text_len = max_text_len
self.draw_false_image = draw_false_image
self.draw_false_text = draw_false_text
self.image_only = image_only
self.data_dir = data_dir
self.tokenizer = tokenizer
self.debug_num = debug_num
if len(names) != 0:
tables = [
pa.ipc.RecordBatchFileReader(
pa.memory_map(f"{data_dir}/{name}.arrow", "r")
).read_all()
for name in names
if os.path.isfile(f"{data_dir}/{name}.arrow")
]
self.table_names = list()
for i, name in enumerate(names):
self.table_names += [name] * len(tables[i])
self.table = pa.concat_tables(tables, promote=True)
if text_column_name != "":
self.text_column_name = text_column_name
self.all_texts = self.table[text_column_name].to_pandas().tolist()
if type(self.all_texts[0][0]) == str:
self.all_texts = (
[list(set(texts)) for texts in self.all_texts]
if remove_duplicate
else self.all_texts
)
else: #snli
self.all_texts = (
[[t[1].strip() for t in texts] for texts in self.all_texts]
)
else:
self.all_texts = list()
else:
self.all_texts = list()
self.index_mapper = dict()
if text_column_name != "" and not self.image_only:
# j: sample index, i: image index (with serveal texts)
# _j: j-th sample's text index
j = 0
for i, texts in enumerate(self.all_texts):
for _j in range(len(texts)):
self.index_mapper[j] = (i, _j)
j += 1
if debug_num != 0 and j >= debug_num:
break
if debug_num != 0 and j >= debug_num:
break
else:
for i in range(len(self.table)):
self.index_mapper[i] = (i, None)
if debug_num != 0 and i+1 >= debug_num:
break
if torch.distributed.is_initialized():
if len(names) != 0 and torch.distributed.get_rank() == 0:
print(f'{names[0]} \t #Images: {i+1}, #Captions: {len(self.index_mapper)}')
else:
if len(names) != 0:
print(f'{names[0]} \t #Images: {i+1}, #Captions: {len(self.index_mapper)}')
@property
def corpus(self):
return [text for texts in self.all_texts for text in texts]
def __len__(self):
return len(self.index_mapper)
def get_raw_image(self, index, image_key="image"):
index, caption_index = self.index_mapper[index]
image_bytes = io.BytesIO(self.table[image_key][index].as_py())
image_bytes.seek(0)
if self.clip_transform:
return Image.open(image_bytes).convert("RGBA")
else:
return Image.open(image_bytes).convert("RGB")
def get_image(self, raw_index, image_key="image"):
index, caption_index = self.index_mapper[raw_index]
image = self.get_raw_image(raw_index, image_key=image_key)
image_tensor = [tr(image) for tr in self.transforms]
return {
"image": image_tensor,
"img_index": index,
"cap_index": caption_index,
"raw_index": raw_index,
}
def get_false_image(self, rep, image_key="image"):
random_index = random.randint(0, len(self.index_mapper) - 1)
image = self.get_raw_image(random_index, image_key=image_key)
image_tensor = [tr(image) for tr in self.transforms]
return {f"false_image_{rep}": image_tensor}
def get_text(self, raw_index):
index, caption_index = self.index_mapper[raw_index]
text = self.all_texts[index][caption_index]
encoding = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return {
"text": (text, encoding),
"img_index": index,
"cap_index": caption_index,
"raw_index": raw_index,
}
def get_false_text(self, rep):
random_index = random.randint(0, len(self.index_mapper) - 1)
index, caption_index = self.index_mapper[random_index]
text = self.all_texts[index][caption_index]
encoding = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return {f"false_text_{rep}": (text, encoding)}
def get_suite(self, index):
result = None
while result is None:
try:
ret = dict()
ret.update(self.get_image(index))
if not self.image_only:
txt = self.get_text(index)
ret.update({"replica": True if txt["cap_index"] > 0 else False})
ret.update(txt)
for i in range(self.draw_false_image):
ret.update(self.get_false_image(i))
for i in range(self.draw_false_text):
ret.update(self.get_false_text(i))
result = True
except Exception as e:
print(f"Error while read file idx {index} in {self.names[0]} -> {e}")
index = random.randint(0, len(self.index_mapper) - 1)
return ret
def collate(self, batch, mlm_collator):
batch_size = len(batch)
keys = set([key for b in batch for key in b.keys()])
dict_batch = {k: [dic[k] if k in dic else None for dic in batch] for k in keys}
img_keys = [k for k in list(dict_batch.keys()) if "image" in k]
img_sizes = list()
for img_key in img_keys:
img = dict_batch[img_key]
img_sizes += [ii.shape for i in img if i is not None for ii in i]
for size in img_sizes:
assert (
len(size) == 3
), f"Collate error, an image should be in shape of (3, H, W), instead of given {size}"
if len(img_keys) != 0:
max_height = max([i[1] for i in img_sizes])
max_width = max([i[2] for i in img_sizes])
for img_key in img_keys:
img = dict_batch[img_key]
view_size = len(img[0])
new_images = [
torch.zeros(batch_size, 3, max_height, max_width)
for _ in range(view_size)
]
for bi in range(batch_size):
orig_batch = img[bi]
for vi in range(view_size):
if orig_batch is None:
new_images[vi][bi] = None
else:
orig = img[bi][vi]
new_images[vi][bi, :, : orig.shape[1], : orig.shape[2]] = orig
dict_batch[img_key] = new_images
txt_keys = [k for k in list(dict_batch.keys()) if "text" in k]
if len(txt_keys) != 0:
# texts = [[d[0] for d in dict_batch[txt_key]] for txt_key in txt_keys]
encodings = [[d[1] for d in dict_batch[txt_key]] for txt_key in txt_keys]
# draw_text_len = len(encodings)
flatten_encodings = [e for encoding in encodings for e in encoding]
flatten_mlms = mlm_collator(flatten_encodings)
for i, txt_key in enumerate(txt_keys):
texts, encodings = (
[d[0] for d in dict_batch[txt_key]],
[d[1] for d in dict_batch[txt_key]],
)
mlm_ids, mlm_labels = (
flatten_mlms["input_ids"][batch_size * (i) : batch_size * (i + 1)],
flatten_mlms["labels"][batch_size * (i) : batch_size * (i + 1)],
)
input_ids = torch.zeros_like(mlm_ids)
attention_mask = torch.zeros_like(mlm_ids)
for _i, encoding in enumerate(encodings):
_input_ids, _attention_mask = (
torch.tensor(encoding["input_ids"]),
torch.tensor(encoding["attention_mask"]),
)
input_ids[_i, : len(_input_ids)] = _input_ids
attention_mask[_i, : len(_attention_mask)] = _attention_mask
dict_batch[txt_key] = texts
dict_batch[f"{txt_key}_ids"] = input_ids
dict_batch[f"{txt_key}_labels"] = torch.full_like(input_ids, -100)
dict_batch[f"{txt_key}_ids_mlm"] = mlm_ids
dict_batch[f"{txt_key}_labels_mlm"] = mlm_labels
dict_batch[f"{txt_key}_masks"] = attention_mask
return dict_batch
|
BridgeTower/src/datasets/base_dataset.py/0
|
{
"file_path": "BridgeTower/src/datasets/base_dataset.py",
"repo_id": "BridgeTower",
"token_count": 5467
}
| 154 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from .bert_model import BertPredictionHeadTransform
class LinkTower(nn.Module):
def __init__(self, config):
super(LinkTower, self).__init__()
self.LayerNorm = nn.LayerNorm(config['hidden_size'])
def forward(self, hidden_states, cross_modal_hidden_states):
return self.LayerNorm(hidden_states + cross_modal_hidden_states)
class Pooler(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class ITCHead(nn.Module):
def __init__(self, hidden_size, embed_size):
super().__init__()
self.fc = nn.Linear(hidden_size, embed_size)
def forward(self, x):
return self.fc(x)
class ITMHead(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.fc = nn.Linear(hidden_size, 2)
def forward(self, x):
return self.fc(x)
class MLMHead(nn.Module):
def __init__(self, config, weight=None):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
if weight is not None:
self.decoder.weight = weight
def forward(self, x):
x = self.transform(x)
x = self.decoder(x) + self.bias
return x
|
BridgeTower/src/modules/heads.py/0
|
{
"file_path": "BridgeTower/src/modules/heads.py",
"repo_id": "BridgeTower",
"token_count": 736
}
| 155 |
import json
import pandas as pd
import pyarrow as pa
import gc
import random
import os
from tqdm import tqdm
from glob import glob
import pandas as pd
def path2rest(path, iid2captions):
split, _, name = path.split("/")[-3:]
split = split.split("_")[-1]
iid = name
with open(path, "rb") as fp:
binary = fp.read()
captions = iid2captions[iid]
return [
binary,
captions,
iid,
split,
]
annotations = {
'val' : 'Validation_GCC-1.1.0-Validation.tsv',
'train' : 'Train_GCC-training.tsv'
}
check_exist = {
'val' : 'val_image_exist.txt',
'train' : 'train_image_exist.txt'
}
def make_arrow(root, dataset_root):
for split in ["val", "train"]:
data = pd.read_csv(f"{root}/utils/{annotations[split]}", sep='\t', header=None)
with open(f"{root}/{check_exist[split]}", 'r') as fr:
data_exist = fr.readlines()
exist_image_file_names = [line.strip().split("/")[-1] for line in data_exist]
iid2captions = dict()
captions = [dataitem[0] for dataitem in data.values.tolist()]
for exist_image_file_name in tqdm(exist_image_file_names):
exist_image_idx = int(exist_image_file_name.split(".")[0])
iid2captions[exist_image_file_name] = [captions[exist_image_idx]]
paths = list(glob(f"{root}/{split}_image/*"))
random.shuffle(paths)
caption_paths = [path for path in paths if path.split("/")[-1] in iid2captions]
if len(paths) == len(caption_paths):
print("all images have caption annotations")
else:
print("not all images have caption annotations")
print(
len(paths), len(caption_paths), len(iid2captions),
)
sub_len = int(len(caption_paths) // 100000)
subs = list(range(sub_len + 1))
for sub in subs:
sub_paths = caption_paths[sub * 100000 : (sub + 1) * 100000]
bs = [path2rest(path, iid2captions) for path in tqdm(sub_paths)]
dataframe = pd.DataFrame(
bs, columns=["image", "caption", "image_id", "split"],
)
table = pa.Table.from_pandas(dataframe)
os.makedirs(dataset_root, exist_ok=True)
with pa.OSFile(
f"{dataset_root}/conceptual_caption_{split}_{sub}.arrow", "wb"
) as sink:
with pa.RecordBatchFileWriter(sink, table.schema) as writer:
writer.write_table(table)
del dataframe
del table
del bs
gc.collect()
make_arrow('~/BT/dataset/cc', '~/BT/dataset/pre-train')
|
BridgeTower/src/utils/write_conceptual_caption.py/0
|
{
"file_path": "BridgeTower/src/utils/write_conceptual_caption.py",
"repo_id": "BridgeTower",
"token_count": 1306
}
| 156 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
from models.networks.base_network import BaseNetwork
from models.networks.generator import *
from models.networks.encoder import *
import util.util as util
def find_network_using_name(target_network_name, filename):
target_class_name = target_network_name + filename
module_name = "models.networks." + filename
network = util.find_class_in_module(target_class_name, module_name)
assert issubclass(network, BaseNetwork), "Class %s should be a subclass of BaseNetwork" % network
return network
def modify_commandline_options(parser, is_train):
opt, _ = parser.parse_known_args()
netG_cls = find_network_using_name(opt.netG, "generator")
parser = netG_cls.modify_commandline_options(parser, is_train)
if is_train:
netD_cls = find_network_using_name(opt.netD, "discriminator")
parser = netD_cls.modify_commandline_options(parser, is_train)
netE_cls = find_network_using_name("conv", "encoder")
parser = netE_cls.modify_commandline_options(parser, is_train)
return parser
def create_network(cls, opt):
net = cls(opt)
net.print_network()
if len(opt.gpu_ids) > 0:
assert torch.cuda.is_available()
net.cuda()
net.init_weights(opt.init_type, opt.init_variance)
return net
def define_G(opt):
netG_cls = find_network_using_name(opt.netG, "generator")
return create_network(netG_cls, opt)
def define_D(opt):
netD_cls = find_network_using_name(opt.netD, "discriminator")
return create_network(netD_cls, opt)
def define_E(opt):
# there exists only one encoder type
netE_cls = find_network_using_name("conv", "encoder")
return create_network(netE_cls, opt)
|
Bringing-Old-Photos-Back-to-Life/Face_Enhancement/models/networks/__init__.py/0
|
{
"file_path": "Bringing-Old-Photos-Back-to-Life/Face_Enhancement/models/networks/__init__.py",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 665
}
| 157 |
import numpy as np
import cv2
import PySimpleGUI as sg
import os.path
import argparse
import os
import sys
import shutil
from subprocess import call
def modify(image_filename=None, cv2_frame=None):
def run_cmd(command):
try:
call(command, shell=True)
except KeyboardInterrupt:
print("Process interrupted")
sys.exit(1)
parser = argparse.ArgumentParser()
parser.add_argument("--input_folder", type=str,
default= image_filename, help="Test images")
parser.add_argument(
"--output_folder",
type=str,
default="./output",
help="Restored images, please use the absolute path",
)
parser.add_argument("--GPU", type=str, default="-1", help="0,1,2")
parser.add_argument(
"--checkpoint_name", type=str, default="Setting_9_epoch_100", help="choose which checkpoint"
)
parser.add_argument("--with_scratch",default="--with_scratch" ,action="store_true")
opts = parser.parse_args()
gpu1 = opts.GPU
# resolve relative paths before changing directory
opts.input_folder = os.path.abspath(opts.input_folder)
opts.output_folder = os.path.abspath(opts.output_folder)
if not os.path.exists(opts.output_folder):
os.makedirs(opts.output_folder)
main_environment = os.getcwd()
# Stage 1: Overall Quality Improve
print("Running Stage 1: Overall restoration")
os.chdir("./Global")
stage_1_input_dir = opts.input_folder
stage_1_output_dir = os.path.join(
opts.output_folder, "stage_1_restore_output")
if not os.path.exists(stage_1_output_dir):
os.makedirs(stage_1_output_dir)
if not opts.with_scratch:
stage_1_command = (
"python test.py --test_mode Full --Quality_restore --test_input "
+ stage_1_input_dir
+ " --outputs_dir "
+ stage_1_output_dir
+ " --gpu_ids "
+ gpu1
)
run_cmd(stage_1_command)
else:
mask_dir = os.path.join(stage_1_output_dir, "masks")
new_input = os.path.join(mask_dir, "input")
new_mask = os.path.join(mask_dir, "mask")
stage_1_command_1 = (
"python detection.py --test_path "
+ stage_1_input_dir
+ " --output_dir "
+ mask_dir
+ " --input_size full_size"
+ " --GPU "
+ gpu1
)
stage_1_command_2 = (
"python test.py --Scratch_and_Quality_restore --test_input "
+ new_input
+ " --test_mask "
+ new_mask
+ " --outputs_dir "
+ stage_1_output_dir
+ " --gpu_ids "
+ gpu1
)
run_cmd(stage_1_command_1)
run_cmd(stage_1_command_2)
# Solve the case when there is no face in the old photo
stage_1_results = os.path.join(stage_1_output_dir, "restored_image")
stage_4_output_dir = os.path.join(opts.output_folder, "final_output")
if not os.path.exists(stage_4_output_dir):
os.makedirs(stage_4_output_dir)
for x in os.listdir(stage_1_results):
img_dir = os.path.join(stage_1_results, x)
shutil.copy(img_dir, stage_4_output_dir)
print("Finish Stage 1 ...")
print("\n")
# Stage 2: Face Detection
print("Running Stage 2: Face Detection")
os.chdir(".././Face_Detection")
stage_2_input_dir = os.path.join(stage_1_output_dir, "restored_image")
stage_2_output_dir = os.path.join(
opts.output_folder, "stage_2_detection_output")
if not os.path.exists(stage_2_output_dir):
os.makedirs(stage_2_output_dir)
stage_2_command = (
"python detect_all_dlib.py --url " + stage_2_input_dir +
" --save_url " + stage_2_output_dir
)
run_cmd(stage_2_command)
print("Finish Stage 2 ...")
print("\n")
# Stage 3: Face Restore
print("Running Stage 3: Face Enhancement")
os.chdir(".././Face_Enhancement")
stage_3_input_mask = "./"
stage_3_input_face = stage_2_output_dir
stage_3_output_dir = os.path.join(
opts.output_folder, "stage_3_face_output")
if not os.path.exists(stage_3_output_dir):
os.makedirs(stage_3_output_dir)
stage_3_command = (
"python test_face.py --old_face_folder "
+ stage_3_input_face
+ " --old_face_label_folder "
+ stage_3_input_mask
+ " --tensorboard_log --name "
+ opts.checkpoint_name
+ " --gpu_ids "
+ gpu1
+ " --load_size 256 --label_nc 18 --no_instance --preprocess_mode resize --batchSize 4 --results_dir "
+ stage_3_output_dir
+ " --no_parsing_map"
)
run_cmd(stage_3_command)
print("Finish Stage 3 ...")
print("\n")
# Stage 4: Warp back
print("Running Stage 4: Blending")
os.chdir(".././Face_Detection")
stage_4_input_image_dir = os.path.join(
stage_1_output_dir, "restored_image")
stage_4_input_face_dir = os.path.join(stage_3_output_dir, "each_img")
stage_4_output_dir = os.path.join(opts.output_folder, "final_output")
if not os.path.exists(stage_4_output_dir):
os.makedirs(stage_4_output_dir)
stage_4_command = (
"python align_warp_back_multiple_dlib.py --origin_url "
+ stage_4_input_image_dir
+ " --replace_url "
+ stage_4_input_face_dir
+ " --save_url "
+ stage_4_output_dir
)
run_cmd(stage_4_command)
print("Finish Stage 4 ...")
print("\n")
print("All the processing is done. Please check the results.")
# --------------------------------- The GUI ---------------------------------
# First the window layout...
images_col = [[sg.Text('Input file:'), sg.In(enable_events=True, key='-IN FILE-'), sg.FileBrowse()],
[sg.Button('Modify Photo', key='-MPHOTO-'), sg.Button('Exit')],
[sg.Image(filename='', key='-IN-'), sg.Image(filename='', key='-OUT-')],]
# ----- Full layout -----
layout = [[sg.VSeperator(), sg.Column(images_col)]]
# ----- Make the window -----
window = sg.Window('Bringing-old-photos-back-to-life', layout, grab_anywhere=True)
# ----- Run the Event Loop -----
prev_filename = colorized = cap = None
while True:
event, values = window.read()
if event in (None, 'Exit'):
break
elif event == '-MPHOTO-':
try:
n1 = filename.split("/")[-2]
n2 = filename.split("/")[-3]
n3 = filename.split("/")[-1]
filename= str(f"./{n2}/{n1}")
modify(filename)
global f_image
f_image = f'./output/final_output/{n3}'
image = cv2.imread(f_image)
window['-OUT-'].update(data=cv2.imencode('.png', image)[1].tobytes())
except:
continue
elif event == '-IN FILE-': # A single filename was chosen
filename = values['-IN FILE-']
if filename != prev_filename:
prev_filename = filename
try:
image = cv2.imread(filename)
window['-IN-'].update(data=cv2.imencode('.png', image)[1].tobytes())
except:
continue
# ----- Exit program -----
window.close()
|
Bringing-Old-Photos-Back-to-Life/GUI.py/0
|
{
"file_path": "Bringing-Old-Photos-Back-to-Life/GUI.py",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 3364
}
| 158 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import random
import torch
from torch.autograd import Variable
class ImagePool:
def __init__(self, pool_size):
self.pool_size = pool_size
if self.pool_size > 0:
self.num_imgs = 0
self.images = []
def query(self, images):
if self.pool_size == 0:
return images
return_images = []
for image in images.data:
image = torch.unsqueeze(image, 0)
if self.num_imgs < self.pool_size:
self.num_imgs = self.num_imgs + 1
self.images.append(image)
return_images.append(image)
else:
p = random.uniform(0, 1)
if p > 0.5:
random_id = random.randint(0, self.pool_size - 1)
tmp = self.images[random_id].clone()
self.images[random_id] = image
return_images.append(tmp)
else:
return_images.append(image)
return_images = Variable(torch.cat(return_images, 0))
return return_images
|
Bringing-Old-Photos-Back-to-Life/Global/util/image_pool.py/0
|
{
"file_path": "Bringing-Old-Photos-Back-to-Life/Global/util/image_pool.py",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 601
}
| 159 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
from .htsat import HTSATWrapper
def get_audio_encoder(name: str):
if name == "Cnn14":
return Cnn14
elif name == "HTSAT":
return HTSATWrapper
else:
raise Exception('The audio encoder name {} is incorrect or not supported'.format(name))
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.conv2 = nn.Conv2d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
def forward(self, input, pool_size=(2, 2), pool_type='avg'):
x = input
x = F.relu_(self.bn1(self.conv1(x)))
x = F.relu_(self.bn2(self.conv2(x)))
if pool_type == 'max':
x = F.max_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg':
x = F.avg_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg+max':
x1 = F.avg_pool2d(x, kernel_size=pool_size)
x2 = F.max_pool2d(x, kernel_size=pool_size)
x = x1 + x2
else:
raise Exception('Incorrect argument!')
return x
class ConvBlock5x5(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock5x5, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=(5, 5), stride=(1, 1),
padding=(2, 2), bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
def forward(self, input, pool_size=(2, 2), pool_type='avg'):
x = input
x = F.relu_(self.bn1(self.conv1(x)))
if pool_type == 'max':
x = F.max_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg':
x = F.avg_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg+max':
x1 = F.avg_pool2d(x, kernel_size=pool_size)
x2 = F.max_pool2d(x, kernel_size=pool_size)
x = x1 + x2
else:
raise Exception('Incorrect argument!')
return x
class AttBlock(nn.Module):
def __init__(self, n_in, n_out, activation='linear', temperature=1.):
super(AttBlock, self).__init__()
self.activation = activation
self.temperature = temperature
self.att = nn.Conv1d(in_channels=n_in, out_channels=n_out, kernel_size=1, stride=1, padding=0, bias=True)
self.cla = nn.Conv1d(in_channels=n_in, out_channels=n_out, kernel_size=1, stride=1, padding=0, bias=True)
self.bn_att = nn.BatchNorm1d(n_out)
def forward(self, x):
# x: (n_samples, n_in, n_time)
norm_att = torch.softmax(torch.clamp(self.att(x), -10, 10), dim=-1)
cla = self.nonlinear_transform(self.cla(x))
x = torch.sum(norm_att * cla, dim=2)
return x, norm_att, cla
def nonlinear_transform(self, x):
if self.activation == 'linear':
return x
elif self.activation == 'sigmoid':
return torch.sigmoid(x)
class Cnn14(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num, out_emb):
super(Cnn14, self).__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center, pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True)
self.bn0 = nn.BatchNorm2d(64)
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024)
self.conv_block6 = ConvBlock(in_channels=1024, out_channels=2048)
# out_emb is 2048 for best Cnn14
self.fc1 = nn.Linear(2048, out_emb, bias=True)
self.fc_audioset = nn.Linear(out_emb, classes_num, bias=True)
def forward(self, input, mixup_lambda=None):
"""
Input: (batch_size, data_length)
"""
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block5(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block6(x, pool_size=(1, 1), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = torch.mean(x, dim=3)
(x1, _) = torch.max(x, dim=2)
x2 = torch.mean(x, dim=2)
x = x1 + x2
x = F.dropout(x, p=0.5, training=self.training)
x = F.relu_(self.fc1(x))
embedding = F.dropout(x, p=0.5, training=self.training)
clipwise_output = torch.sigmoid(self.fc_audioset(x))
output_dict = {'clipwise_output': clipwise_output, 'embedding': embedding}
return output_dict
|
CLAP/msclap/models/audio.py/0
|
{
"file_path": "CLAP/msclap/models/audio.py",
"repo_id": "CLAP",
"token_count": 3644
}
| 160 |
[writers]
option-limit=0
|
COCO-LM/fairseq/docs/docutils.conf/0
|
{
"file_path": "COCO-LM/fairseq/docs/docutils.conf",
"repo_id": "COCO-LM",
"token_count": 10
}
| 161 |
# Fine-tuning BART on GLUE tasks
### 1) Download the data from GLUE website (https://gluebenchmark.com/tasks) using following commands:
```bash
wget https://gist.githubusercontent.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e/raw/17b8dd0d724281ed7c3b2aeeda662b92809aadd5/download_glue_data.py
python download_glue_data.py --data_dir glue_data --tasks all
```
### 2) Preprocess GLUE task data (same as RoBERTa):
```bash
./examples/roberta/preprocess_GLUE_tasks.sh glue_data <glue_task_name>
```
`glue_task_name` is one of the following:
`{ALL, QQP, MNLI, QNLI, MRPC, RTE, STS-B, SST-2, CoLA}`
Use `ALL` for preprocessing all the glue tasks.
### 3) Fine-tuning on GLUE task:
Example fine-tuning cmd for `RTE` task
```bash
TOTAL_NUM_UPDATES=2036 # 10 epochs through RTE for bsz 16
WARMUP_UPDATES=61 # 6 percent of the number of updates
LR=1e-05 # Peak LR for polynomial LR scheduler.
NUM_CLASSES=2
MAX_SENTENCES=16 # Batch size.
BART_PATH=/path/to/bart/model.pt
CUDA_VISIBLE_DEVICES=0,1 fairseq-train RTE-bin/ \
--restore-file $BART_PATH \
--batch-size $MAX_SENTENCES \
--max-tokens 4400 \
--task sentence_prediction \
--add-prev-output-tokens \
--layernorm-embedding \
--share-all-embeddings \
--share-decoder-input-output-embed \
--reset-optimizer --reset-dataloader --reset-meters \
--required-batch-size-multiple 1 \
--init-token 0 \
--arch bart_large \
--criterion sentence_prediction \
--num-classes $NUM_CLASSES \
--dropout 0.1 --attention-dropout 0.1 \
--weight-decay 0.01 --optimizer adam --adam-betas "(0.9, 0.98)" --adam-eps 1e-08 \
--clip-norm 0.0 \
--lr-scheduler polynomial_decay --lr $LR --total-num-update $TOTAL_NUM_UPDATES --warmup-updates $WARMUP_UPDATES \
--fp16 --fp16-init-scale 4 --threshold-loss-scale 1 --fp16-scale-window 128 \
--max-epoch 10 \
--find-unused-parameters \
--best-checkpoint-metric accuracy --maximize-best-checkpoint-metric;
```
For each of the GLUE task, you will need to use following cmd-line arguments:
Model | MNLI | QNLI | QQP | RTE | SST-2 | MRPC | CoLA | STS-B
---|---|---|---|---|---|---|---|---
`--num-classes` | 3 | 2 | 2 | 2 | 2 | 2 | 2 | 1
`--lr` | 5e-6 | 1e-5 | 1e-5 | 1e-5 | 5e-6 | 2e-5 | 2e-5 | 2e-5
`bsz` | 128 | 32 | 32 | 32 | 128 | 64 | 64 | 32
`--total-num-update` | 30968 | 33112 | 113272 | 1018 | 5233 | 1148 | 1334 | 1799
`--warmup-updates` | 1858 | 1986 | 6796 | 61 | 314 | 68 | 80 | 107
For `STS-B` additionally add `--regression-target --best-checkpoint-metric loss` and remove `--maximize-best-checkpoint-metric`.
**Note:**
a) `--total-num-updates` is used by `--polynomial_decay` scheduler and is calculated for `--max-epoch=10` and `--batch-size=32/64/128` depending on the task.
b) Above cmd-args and hyperparams are tested on Nvidia `V100` GPU with `32gb` of memory for each task. Depending on the GPU memory resources available to you, you can use increase `--update-freq` and reduce `--batch-size`.
### Inference on GLUE task
After training the model as mentioned in previous step, you can perform inference with checkpoints in `checkpoints/` directory using following python code snippet:
```python
from fairseq.models.bart import BARTModel
bart = BARTModel.from_pretrained(
'checkpoints/',
checkpoint_file='checkpoint_best.pt',
data_name_or_path='RTE-bin'
)
label_fn = lambda label: bart.task.label_dictionary.string(
[label + bart.task.label_dictionary.nspecial]
)
ncorrect, nsamples = 0, 0
bart.cuda()
bart.eval()
with open('glue_data/RTE/dev.tsv') as fin:
fin.readline()
for index, line in enumerate(fin):
tokens = line.strip().split('\t')
sent1, sent2, target = tokens[1], tokens[2], tokens[3]
tokens = bart.encode(sent1, sent2)
prediction = bart.predict('sentence_classification_head', tokens).argmax().item()
prediction_label = label_fn(prediction)
ncorrect += int(prediction_label == target)
nsamples += 1
print('| Accuracy: ', float(ncorrect)/float(nsamples))
```
|
COCO-LM/fairseq/examples/bart/README.glue.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/bart/README.glue.md",
"repo_id": "COCO-LM",
"token_count": 1615
}
| 162 |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import glob
from subprocess import check_call
try:
import faiss
has_faiss = True
except ImportError:
has_faiss = False
import numpy as np
GB = 1024 * 1024 * 1024
def call(cmd):
print(cmd)
check_call(cmd, shell=True)
def get_batches(directory, lang, prefix="all_avg_pool"):
print(f"Finding in {directory}/{prefix}.{lang}*")
files = glob.glob(f"{directory}/{prefix}.{lang}*")
emb_files = []
txt_files = []
for emb_fi in files:
emb_files.append(emb_fi)
txt_fi = emb_fi.replace(prefix, "sentences")
txt_files.append(txt_fi)
return emb_files, txt_files
def load_batch(emb_file, dim):
embeddings = np.fromfile(emb_file, dtype=np.float32)
num_rows = int(embeddings.shape[0] / dim)
embeddings = embeddings.reshape((num_rows, dim))
faiss.normalize_L2(embeddings)
return embeddings
def knnGPU_sharded(x_batches_f, y_batches_f, dim, k, direction="x2y"):
if not has_faiss:
raise ImportError("Please install Faiss")
sims = []
inds = []
xfrom = 0
xto = 0
for x_batch_f in x_batches_f:
yfrom = 0
yto = 0
x_batch = load_batch(x_batch_f, dim)
xto = xfrom + x_batch.shape[0]
bsims, binds = [], []
for y_batch_f in y_batches_f:
y_batch = load_batch(y_batch_f, dim)
neighbor_size = min(k, y_batch.shape[0])
yto = yfrom + y_batch.shape[0]
print("{}-{} -> {}-{}".format(xfrom, xto, yfrom, yto))
idx = faiss.IndexFlatIP(dim)
idx = faiss.index_cpu_to_all_gpus(idx)
idx.add(y_batch)
bsim, bind = idx.search(x_batch, neighbor_size)
bsims.append(bsim)
binds.append(bind + yfrom)
yfrom += y_batch.shape[0]
del idx
del y_batch
bsims = np.concatenate(bsims, axis=1)
binds = np.concatenate(binds, axis=1)
aux = np.argsort(-bsims, axis=1)
sim_batch = np.zeros((x_batch.shape[0], k), dtype=np.float32)
ind_batch = np.zeros((x_batch.shape[0], k), dtype=np.int64)
for i in range(x_batch.shape[0]):
for j in range(k):
sim_batch[i, j] = bsims[i, aux[i, j]]
ind_batch[i, j] = binds[i, aux[i, j]]
sims.append(sim_batch)
inds.append(ind_batch)
xfrom += x_batch.shape[0]
del x_batch
sim = np.concatenate(sims, axis=0)
ind = np.concatenate(inds, axis=0)
return sim, ind
def score(sim, fwd_mean, bwd_mean, margin):
return margin(sim, (fwd_mean + bwd_mean) / 2)
def score_candidates(
sim_mat, candidate_inds, fwd_mean, bwd_mean, margin, verbose=False
):
print(" - scoring {:d} candidates".format(sim_mat.shape[0]))
scores = np.zeros(candidate_inds.shape)
for i in range(scores.shape[0]):
for j in range(scores.shape[1]):
k = int(candidate_inds[i, j])
scores[i, j] = score(sim_mat[i, j], fwd_mean[i], bwd_mean[k], margin)
return scores
def load_text(files):
all_sentences = []
for fi in files:
with open(fi) as sentence_fi:
for line in sentence_fi:
all_sentences.append(line.strip())
print(f"Read {len(all_sentences)} sentences")
return all_sentences
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Mine bitext")
parser.add_argument("--src-lang", help="Source language")
parser.add_argument("--tgt-lang", help="Target language")
parser.add_argument(
"--dict-path", help="Path to dictionary file", default="dict.txt"
)
parser.add_argument(
"--spm-path", help="Path to SPM model file", default="sentence.bpe.model"
)
parser.add_argument("--dim", type=int, default=1024, help="Embedding dimension")
parser.add_argument("--mem", type=int, default=5, help="Memory in GB")
parser.add_argument("--src-dir", help="Source directory")
parser.add_argument("--tgt-dir", help="Target directory")
parser.add_argument("--output", help="Output path")
parser.add_argument(
"--neighborhood", type=int, default=4, help="Embedding dimension"
)
parser.add_argument(
"--threshold", type=float, default=1.06, help="Threshold on mined bitext"
)
parser.add_argument(
"--valid-size",
type=int,
default=2000,
help="Number of sentences used for validation set",
)
parser.add_argument(
"--min-count",
type=int,
default=50000,
help="Min num sentences used for each language",
)
args = parser.parse_args()
x_batches_f, x_sents_f = get_batches(args.src_dir, args.src_lang)
y_batches_f, y_sents_f = get_batches(args.tgt_dir, args.tgt_lang)
margin = lambda a, b: a / b
y2x_sim, y2x_ind = knnGPU_sharded(
y_batches_f, x_batches_f, args.dim, args.neighborhood, direction="y2x"
)
x2y_sim, x2y_ind = knnGPU_sharded(
x_batches_f, y_batches_f, args.dim, args.neighborhood, direction="x2y"
)
x2y_mean = x2y_sim.mean(axis=1)
y2x_mean = y2x_sim.mean(axis=1)
fwd_scores = score_candidates(x2y_sim, x2y_ind, x2y_mean, y2x_mean, margin)
bwd_scores = score_candidates(y2x_sim, y2x_ind, y2x_mean, x2y_mean, margin)
fwd_best = x2y_ind[np.arange(x2y_sim.shape[0]), fwd_scores.argmax(axis=1)]
bwd_best = y2x_ind[np.arange(y2x_sim.shape[0]), bwd_scores.argmax(axis=1)]
indices = np.stack(
(
np.concatenate((np.arange(x2y_ind.shape[0]), bwd_best)),
np.concatenate((fwd_best, np.arange(y2x_ind.shape[0]))),
),
axis=1,
)
scores = np.concatenate((fwd_scores.max(axis=1), bwd_scores.max(axis=1)))
x_sentences = load_text(x_sents_f)
y_sentences = load_text(y_sents_f)
threshold = args.threshold
min_count = args.min_count
seen_src, seen_trg = set(), set()
directory = args.output
call(f"mkdir -p {directory}")
src_out = open(
f"{directory}/all.{args.src_lang}",
mode="w",
encoding="utf-8",
errors="surrogateescape",
)
tgt_out = open(
f"{directory}/all.{args.tgt_lang}",
mode="w",
encoding="utf-8",
errors="surrogateescape",
)
scores_out = open(
f"{directory}/all.scores", mode="w", encoding="utf-8", errors="surrogateescape"
)
count = 0
for i in np.argsort(-scores):
src_ind, trg_ind = indices[i]
if src_ind not in seen_src and trg_ind not in seen_trg:
seen_src.add(src_ind)
seen_trg.add(trg_ind)
if scores[i] > threshold or count < min_count:
if x_sentences[src_ind]:
print(scores[i], file=scores_out)
print(x_sentences[src_ind], file=src_out)
print(y_sentences[trg_ind], file=tgt_out)
count += 1
else:
print(f"Ignoring sentence: {x_sentences[src_ind]}")
src_out.close()
tgt_out.close()
scores_out.close()
print(f"Found {count} pairs for threshold={threshold}")
with open(f"{directory}/all.{args.src_lang}") as all_s, open(
f"{directory}/all.{args.tgt_lang}"
) as all_t, open(f"{directory}/valid.{args.src_lang}", "w") as valid_s, open(
f"{directory}/valid.{args.tgt_lang}", "w"
) as valid_t, open(
f"{directory}/train.{args.src_lang}", "w"
) as train_s, open(
f"{directory}/train.{args.tgt_lang}", "w"
) as train_t:
count = 0
for s_line, t_line in zip(all_s, all_t):
s_line = s_line.split("\t")[1]
t_line = t_line.split("\t")[1]
if count >= args.valid_size:
train_s.write(s_line)
train_t.write(t_line)
else:
valid_s.write(s_line)
valid_t.write(t_line)
count += 1
|
COCO-LM/fairseq/examples/criss/mining/mine.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/criss/mining/mine.py",
"repo_id": "COCO-LM",
"token_count": 4017
}
| 163 |
# Adaptive Input Representations for Neural Language Modeling (Baevski and Auli, 2018)
## Pre-trained models
Description | Parameters | Dataset | Model and Test set(s)
---|---:|---|---
Adaptive Inputs <br> ([Baevski and Auli, 2018](https://arxiv.org/abs/1809.10853)) | 1026M | [Google Billion Words](https://github.com/ciprian-chelba/1-billion-word-language-modeling-benchmark) | [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/models/lm/adaptive_lm_gbw_huge.tar.bz2)
Adaptive Inputs <br> ([Baevski and Auli, 2018](https://arxiv.org/abs/1809.10853)) | 247M | [WikiText-103](https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/) | [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/models/lm/adaptive_lm_wiki103.v2.tar.bz2)
## Training an LM with adaptive inputs
First, see the general [language modeling README](README.md) for instructions on
preprocessing the WikiText-103 data.
Then use the following training command to train a model with adaptive inputs
using the `transformer_lm_wiki103` model architecture:
```bash
fairseq-train --task language_modeling \
data-bin/wikitext-103 \
--save-dir checkpoints/transformer_wikitext-103 \
--arch transformer_lm_wiki103 \
--max-update 286000 --lr 1.0 --t-mult 2 --lr-period-updates 270000 --lr-scheduler cosine --lr-shrink 0.75 \
--warmup-updates 16000 --warmup-init-lr 1e-07 --stop-min-lr 1e-09 --optimizer nag --min-lr 0.0001 --clip-norm 0.1 \
--criterion adaptive_loss --max-tokens 3072 --update-freq 3 --tokens-per-sample 3072 --seed 1 \
--sample-break-mode none --skip-invalid-size-inputs-valid-test --ddp-backend=legacy_ddp
```
## Citation
```bibtex
@inproceedings{
baevski2018adaptive,
title={Adaptive Input Representations for Neural Language Modeling},
author={Alexei Baevski and Michael Auli},
booktitle={International Conference on Learning Representations},
year={2019},
url={https://openreview.net/forum?id=ByxZX20qFQ},
}
```
|
COCO-LM/fairseq/examples/language_model/README.adaptive_inputs.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/language_model/README.adaptive_inputs.md",
"repo_id": "COCO-LM",
"token_count": 723
}
| 164 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Optional
import torch.nn as nn
from fairseq.models.fairseq_encoder import EncoderOut
from fairseq.models.transformer import TransformerDecoder, TransformerEncoder
from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer
from torch import Tensor
from ..modules.latent_layers import LayerSelect
class LatentTransformerEncoder(TransformerEncoder):
"""Latent depth (https://arxiv.org/abs/2009.13102) implemented in
TransformerEncoder.
"""
def __init__(self, args, dictionary, embed_tokens, num_logits=1):
self.num_logits = num_logits
self.num_layers = args.encoder_layers
super().__init__(args, dictionary, embed_tokens)
self.layer_select = LayerSelect(
num_layers=self.num_layers,
num_logits=self.num_logits,
soft_select=getattr(args, "soft_select", False),
sampling_tau=getattr(args, "sampling_tau", 5.),
)
self.lang_idx = None
self.layers = nn.ModuleList(
[self._build_encoder_layer(args, idx) for idx in range(args.encoder_layers)]
)
def set_lang_idx(self, lang_idx):
self.lang_idx = lang_idx
def _build_encoder_layer(self, args, idx=None):
return LatentTransformerEncoderLayer(args, idx, layer_select=self.layer_select)
def forward(self, src_tokens, src_lengths, return_all_hiddens: bool = False):
self.layer_select.sample(self.lang_idx)
return super().forward(src_tokens, src_lengths, return_all_hiddens)
class LatentTransformerEncoderLayer(TransformerEncoderLayer):
"""Encoder layer with each (non_residual) block weighted by samples of Bernouli
or Gumbel Signmoid samples.
Args:
args (argparse.Namespace): parsed command-line arguments from standard
TransformerEncoderLayer.
idx (int): layer index (used to retrieve samples).
layer_select (LayerSelect, optional): instance of LayerSelect module with logits
parameters and sampling method.
"""
def __init__(self, args, idx, layer_select=None):
super().__init__(args)
self.idx = idx
self.layer_select = layer_select
def residual_connection(self, x, residual):
return residual + x * self.layer_select(self.idx)
class LatentTransformerDecoder(TransformerDecoder):
"""Latent depth (https://arxiv.org/abs/2009.13102) implemented in
TransformerDecoder.
"""
def __init__(
self, args, dictionary, embed_tokens, no_encoder_attn=False, num_logits=1
):
self.num_logits = num_logits
self.num_layers = args.decoder_layers
super().__init__(
args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn
)
self.layer_select = LayerSelect(
num_layers=self.num_layers,
num_logits=self.num_logits,
soft_select=getattr(args, "soft_select", False),
sampling_tau=getattr(args, "sampling_tau", 5.),
)
self.lang_idx = None
self.layers = nn.ModuleList(
[
self._build_decoder_layer(args, no_encoder_attn, idx)
for idx in range(args.decoder_layers)
]
)
def set_lang_idx(self, lang_idx):
self.lang_idx = lang_idx
def _build_decoder_layer(self, args, no_encoder_attn=False, idx=None):
return LatentTransformerDecoderLayer(
args, idx, layer_select=self.layer_select, no_encoder_attn=no_encoder_attn
)
def forward(
self,
prev_output_tokens,
encoder_out: Optional[EncoderOut] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
):
self.layer_select.sample(self.lang_idx)
return super().forward(
prev_output_tokens=prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
features_only=features_only,
alignment_layer=alignment_layer,
src_lengths=src_lengths,
return_all_hiddens=return_all_hiddens,
)
class LatentTransformerDecoderLayer(TransformerDecoderLayer):
"""Decoder layer with each (non_residual) block weighted by samples of Bernouli
or Gumbel Signmoid samples.
Args:
args (argparse.Namespace): parsed command-line arguments from standard
TransformerDecoderLayer.
idx (int): layer index (used to retrieve samples).
layer_select (LayerSelect, optional): instance of LayerSelect module with logits
parameters and sampling method.
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self,
args,
idx,
layer_select=None,
no_encoder_attn=False,
add_bias_kv=False,
add_zero_attn=False,
):
super().__init__(args, no_encoder_attn, add_bias_kv, add_zero_attn)
self.idx = idx
self.layer_select = layer_select
def residual_connection(self, x, residual):
return residual + x * self.layer_select(self.idx)
|
COCO-LM/fairseq/examples/latent_depth/latent_depth_src/models/latent_transformer.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/latent_depth/latent_depth_src/models/latent_transformer.py",
"repo_id": "COCO-LM",
"token_count": 2408
}
| 165 |
import argparse
from collections import namedtuple
import os
DATADIR = "/path/to/train_data"
DEDUP_FROM_DIR = "/path/to/eval/data"
OUTPUT_DIR = "/path/to/output/data"
def main(args):
languages = set()
for language_directory in os.listdir(DATADIR):
if "_" in language_directory:
src, tgt = language_directory.split("_")
languages.add(LanguagePair(src=src, tgt=tgt))
data = existing_data()
train_languages = sorted(languages)
for language_pair in train_languages[args.start_index:args.start_index + args.size]:
print(language_pair)
dedup(language_pair, data)
LanguagePair = namedtuple("LanguagePair", ["src", "tgt"])
def existing_data():
data = set()
for file in os.listdir(DEDUP_FROM_DIR):
with open(os.path.join(DEDUP_FROM_DIR, file)) as f:
data |= set(f.readlines())
return data
def dedup(language_pair, data, verbose=True, output=True):
train_filenames = LanguagePair(
src=f"{DATADIR}/{language_pair.src}_{language_pair.tgt}/train.{language_pair.src}",
tgt=f"{DATADIR}/{language_pair.src}_{language_pair.tgt}/train.{language_pair.tgt}",
)
output_filenames = LanguagePair(
src=f"{OUTPUT_DIR}/train.dedup.{language_pair.src}-{language_pair.tgt}.{language_pair.src}",
tgt=f"{OUTPUT_DIR}/train.dedup.{language_pair.src}-{language_pair.tgt}.{language_pair.tgt}"
)
# If output exists, skip this pair. It has already been done.
if (os.path.exists(output_filenames.src) and
os.path.exists(output_filenames.tgt)):
if verbose:
print(f"{language_pair.src}-{language_pair.tgt} already done.")
return
if verbose:
print(f"{language_pair.src}-{language_pair.tgt} ready, will check dups.")
# If there is no output, no need to actually do the loop.
if not output:
return
if os.path.exists(train_filenames.src) and os.path.exists(train_filenames.tgt):
with open(train_filenames.src) as f:
train_source = f.readlines()
with open(train_filenames.tgt) as f:
train_target = f.readlines()
# do dedup
new_train_source = []
new_train_target = []
for i, train_line in enumerate(train_source):
if train_line not in data and train_target[i] not in data:
new_train_source.append(train_line)
new_train_target.append(train_target[i])
assert len(train_source) == len(train_target)
assert len(new_train_source) == len(new_train_target)
assert len(new_train_source) <= len(train_source)
with open(output_filenames.src, "w") as o:
for line in new_train_source:
o.write(line)
with open(output_filenames.tgt, "w") as o:
for line in new_train_target:
o.write(line)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--start-index", required=True, type=int)
parser.add_argument("-n", "--size", required=True, type=int)
main(parser.parse_args())
|
COCO-LM/fairseq/examples/m2m_100/process_data/dedup_data.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/m2m_100/process_data/dedup_data.py",
"repo_id": "COCO-LM",
"token_count": 1410
}
| 166 |
# Install dependency
```bash
pip install -r requirement.txt
```
# Download the data set
```bash
export WORKDIR_ROOT=<a directory which will hold all working files>
```
The downloaded data will be at $WORKDIR_ROOT/ML50
# preprocess the data
Install SPM [here](https://github.com/google/sentencepiece)
```bash
export WORKDIR_ROOT=<a directory which will hold all working files>
export SPM_PATH=<a path pointing to sentencepice spm_encode.py>
```
* $WORKDIR_ROOT/ML50/raw: extracted raw data
* $WORKDIR_ROOT/ML50/dedup: dedup data
* $WORKDIR_ROOT/ML50/clean: data with valid and test sentences removed from the dedup data
|
COCO-LM/fairseq/examples/multilingual/data_scripts/README.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/multilingual/data_scripts/README.md",
"repo_id": "COCO-LM",
"token_count": 207
}
| 167 |
#!/bin/bash
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
if [ -z $WORKDIR_ROOT ] ;
then
echo "please specify your working directory root in environment variable WORKDIR_ROOT. Exitting..."
exit
fi
if [ -z $SPM_PATH ] ;
then
echo "Please install sentence piecence from https://github.com/google/sentencepiece and set SPM_PATH pointing to the installed spm_encode.py. Exitting..."
exit
fi
ML50=${WORKDIR_ROOT}/ML50
mkdir -p $ML50/dedup
mkdir -p $ML50/cleaned_dedup
python ./dedup_all.py --from-folder $ML50/raw --to-folder $ML50/dedup
python ./remove_valid_test_in_train.py --from-folder $ML50/dedup --to-folder $ML50/clean
python ./binarize.py --raw-folder $ML50/clean
|
COCO-LM/fairseq/examples/multilingual/data_scripts/preprocess_ML50_v1.sh/0
|
{
"file_path": "COCO-LM/fairseq/examples/multilingual/data_scripts/preprocess_ML50_v1.sh",
"repo_id": "COCO-LM",
"token_count": 295
}
| 168 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import random
import numpy as np
from fairseq import options
from examples.noisychannel import rerank, rerank_options
def random_search(args):
param_values = []
tuneable_parameters = ["lenpen", "weight1", "weight2", "weight3"]
initial_params = [args.lenpen, args.weight1, args.weight2, args.weight3]
for i, elem in enumerate(initial_params):
if type(elem) is not list:
initial_params[i] = [elem]
else:
initial_params[i] = elem
tune_parameters = args.tune_param.copy()
for i in range(len(args.tune_param)):
assert args.upper_bound[i] >= args.lower_bound[i]
index = tuneable_parameters.index(args.tune_param[i])
del tuneable_parameters[index]
del initial_params[index]
tune_parameters += tuneable_parameters
param_values += initial_params
random.seed(args.seed)
random_params = np.array(
[
[
random.uniform(args.lower_bound[i], args.upper_bound[i])
for i in range(len(args.tune_param))
]
for k in range(args.num_trials)
]
)
set_params = np.array(
[
[initial_params[i][0] for i in range(len(tuneable_parameters))]
for k in range(args.num_trials)
]
)
random_params = np.concatenate((random_params, set_params), 1)
rerank_args = vars(args).copy()
if args.nbest_list:
rerank_args["gen_subset"] = "test"
else:
rerank_args["gen_subset"] = args.tune_subset
for k in range(len(tune_parameters)):
rerank_args[tune_parameters[k]] = list(random_params[:, k])
if args.share_weights:
k = tune_parameters.index("weight2")
rerank_args["weight3"] = list(random_params[:, k])
rerank_args = argparse.Namespace(**rerank_args)
best_lenpen, best_weight1, best_weight2, best_weight3, best_score = rerank.rerank(
rerank_args
)
rerank_args = vars(args).copy()
rerank_args["lenpen"] = [best_lenpen]
rerank_args["weight1"] = [best_weight1]
rerank_args["weight2"] = [best_weight2]
rerank_args["weight3"] = [best_weight3]
# write the hypothesis from the valid set from the best trial
if args.gen_subset != "valid":
rerank_args["gen_subset"] = "valid"
rerank_args = argparse.Namespace(**rerank_args)
rerank.rerank(rerank_args)
# test with the best hyperparameters on gen subset
rerank_args = vars(args).copy()
rerank_args["gen_subset"] = args.gen_subset
rerank_args["lenpen"] = [best_lenpen]
rerank_args["weight1"] = [best_weight1]
rerank_args["weight2"] = [best_weight2]
rerank_args["weight3"] = [best_weight3]
rerank_args = argparse.Namespace(**rerank_args)
rerank.rerank(rerank_args)
def cli_main():
parser = rerank_options.get_tuning_parser()
args = options.parse_args_and_arch(parser)
random_search(args)
if __name__ == "__main__":
cli_main()
|
COCO-LM/fairseq/examples/noisychannel/rerank_tune.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/noisychannel/rerank_tune.py",
"repo_id": "COCO-LM",
"token_count": 1362
}
| 169 |
# Finetuning RoBERTa on GLUE tasks
### 1) Download the data from GLUE website (https://gluebenchmark.com/tasks) using following commands:
```bash
wget https://gist.githubusercontent.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e/raw/17b8dd0d724281ed7c3b2aeeda662b92809aadd5/download_glue_data.py
python download_glue_data.py --data_dir glue_data --tasks all
```
### 2) Preprocess GLUE task data:
```bash
./examples/roberta/preprocess_GLUE_tasks.sh glue_data <glue_task_name>
```
`glue_task_name` is one of the following:
`{ALL, QQP, MNLI, QNLI, MRPC, RTE, STS-B, SST-2, CoLA}`
Use `ALL` for preprocessing all the glue tasks.
### 3) Fine-tuning on GLUE task:
Example fine-tuning cmd for `RTE` task
```bash
TOTAL_NUM_UPDATES=2036 # 10 epochs through RTE for bsz 16
WARMUP_UPDATES=122 # 6 percent of the number of updates
LR=2e-05 # Peak LR for polynomial LR scheduler.
NUM_CLASSES=2
MAX_SENTENCES=16 # Batch size.
ROBERTA_PATH=/path/to/roberta/model.pt
CUDA_VISIBLE_DEVICES=0 fairseq-train RTE-bin/ \
--restore-file $ROBERTA_PATH \
--max-positions 512 \
--batch-size $MAX_SENTENCES \
--max-tokens 4400 \
--task sentence_prediction \
--reset-optimizer --reset-dataloader --reset-meters \
--required-batch-size-multiple 1 \
--init-token 0 --separator-token 2 \
--arch roberta_large \
--criterion sentence_prediction \
--num-classes $NUM_CLASSES \
--dropout 0.1 --attention-dropout 0.1 \
--weight-decay 0.1 --optimizer adam --adam-betas "(0.9, 0.98)" --adam-eps 1e-06 \
--clip-norm 0.0 \
--lr-scheduler polynomial_decay --lr $LR --total-num-update $TOTAL_NUM_UPDATES --warmup-updates $WARMUP_UPDATES \
--fp16 --fp16-init-scale 4 --threshold-loss-scale 1 --fp16-scale-window 128 \
--max-epoch 10 \
--find-unused-parameters \
--best-checkpoint-metric accuracy --maximize-best-checkpoint-metric;
```
For each of the GLUE task, you will need to use following cmd-line arguments:
Model | MNLI | QNLI | QQP | RTE | SST-2 | MRPC | CoLA | STS-B
---|---|---|---|---|---|---|---|---
`--num-classes` | 3 | 2 | 2 | 2 | 2 | 2 | 2 | 1
`--lr` | 1e-5 | 1e-5 | 1e-5 | 2e-5 | 1e-5 | 1e-5 | 1e-5 | 2e-5
`--batch-size` | 32 | 32 | 32 | 16 | 32 | 16 | 16 | 16
`--total-num-update` | 123873 | 33112 | 113272 | 2036 | 20935 | 2296 | 5336 | 3598
`--warmup-updates` | 7432 | 1986 | 28318 | 122 | 1256 | 137 | 320 | 214
For `STS-B` additionally add `--regression-target --best-checkpoint-metric loss` and remove `--maximize-best-checkpoint-metric`.
**Note:**
a) `--total-num-updates` is used by `--polynomial_decay` scheduler and is calculated for `--max-epoch=10` and `--batch-size=16/32` depending on the task.
b) Above cmd-args and hyperparams are tested on one Nvidia `V100` GPU with `32gb` of memory for each task. Depending on the GPU memory resources available to you, you can use increase `--update-freq` and reduce `--batch-size`.
c) All the settings in above table are suggested settings based on our hyperparam search within a fixed search space (for careful comparison across models). You might be able to find better metrics with wider hyperparam search.
### Inference on GLUE task
After training the model as mentioned in previous step, you can perform inference with checkpoints in `checkpoints/` directory using following python code snippet:
```python
from fairseq.models.roberta import RobertaModel
roberta = RobertaModel.from_pretrained(
'checkpoints/',
checkpoint_file='checkpoint_best.pt',
data_name_or_path='RTE-bin'
)
label_fn = lambda label: roberta.task.label_dictionary.string(
[label + roberta.task.label_dictionary.nspecial]
)
ncorrect, nsamples = 0, 0
roberta.cuda()
roberta.eval()
with open('glue_data/RTE/dev.tsv') as fin:
fin.readline()
for index, line in enumerate(fin):
tokens = line.strip().split('\t')
sent1, sent2, target = tokens[1], tokens[2], tokens[3]
tokens = roberta.encode(sent1, sent2)
prediction = roberta.predict('sentence_classification_head', tokens).argmax().item()
prediction_label = label_fn(prediction)
ncorrect += int(prediction_label == target)
nsamples += 1
print('| Accuracy: ', float(ncorrect)/float(nsamples))
```
|
COCO-LM/fairseq/examples/roberta/README.glue.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/roberta/README.glue.md",
"repo_id": "COCO-LM",
"token_count": 1643
}
| 170 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
from functools import lru_cache
def convert_sentence_to_json(sentence):
if "_" in sentence:
prefix, rest = sentence.split("_", 1)
query, rest = rest.split("_", 1)
query_index = len(prefix.rstrip().split(" "))
else:
query, query_index = None, None
prefix, rest = sentence.split("[", 1)
pronoun, rest = rest.split("]", 1)
pronoun_index = len(prefix.rstrip().split(" "))
sentence = sentence.replace("_", "").replace("[", "").replace("]", "")
return {
"idx": 0,
"text": sentence,
"target": {
"span1_index": query_index,
"span1_text": query,
"span2_index": pronoun_index,
"span2_text": pronoun,
},
}
def extended_noun_chunks(sentence):
noun_chunks = {(np.start, np.end) for np in sentence.noun_chunks}
np_start, cur_np = 0, "NONE"
for i, token in enumerate(sentence):
np_type = token.pos_ if token.pos_ in {"NOUN", "PROPN"} else "NONE"
if np_type != cur_np:
if cur_np != "NONE":
noun_chunks.add((np_start, i))
if np_type != "NONE":
np_start = i
cur_np = np_type
if cur_np != "NONE":
noun_chunks.add((np_start, len(sentence)))
return [sentence[s:e] for (s, e) in sorted(noun_chunks)]
def find_token(sentence, start_pos):
found_tok = None
for tok in sentence:
if tok.idx == start_pos:
found_tok = tok
break
return found_tok
def find_span(sentence, search_text, start=0):
search_text = search_text.lower()
for tok in sentence[start:]:
remainder = sentence[tok.i :].text.lower()
if remainder.startswith(search_text):
len_to_consume = len(search_text)
start_idx = tok.idx
for next_tok in sentence[tok.i :]:
end_idx = next_tok.idx + len(next_tok.text)
if end_idx - start_idx == len_to_consume:
span = sentence[tok.i : next_tok.i + 1]
return span
return None
@lru_cache(maxsize=1)
def get_detokenizer():
from sacremoses import MosesDetokenizer
detok = MosesDetokenizer(lang="en")
return detok
@lru_cache(maxsize=1)
def get_spacy_nlp():
import en_core_web_lg
nlp = en_core_web_lg.load()
return nlp
def jsonl_iterator(input_fname, positive_only=False, ngram_order=3, eval=False):
detok = get_detokenizer()
nlp = get_spacy_nlp()
with open(input_fname) as fin:
for line in fin:
sample = json.loads(line.strip())
if positive_only and "label" in sample and not sample["label"]:
# only consider examples where the query is correct
continue
target = sample["target"]
# clean up the query
query = target["span1_text"]
if query is not None:
if "\n" in query:
continue
if query.endswith(".") or query.endswith(","):
query = query[:-1]
# split tokens
tokens = sample["text"].split(" ")
def strip_pronoun(x):
return x.rstrip('.,"')
# find the pronoun
pronoun_idx = target["span2_index"]
pronoun = strip_pronoun(target["span2_text"])
if strip_pronoun(tokens[pronoun_idx]) != pronoun:
# hack: sometimes the index is misaligned
if strip_pronoun(tokens[pronoun_idx + 1]) == pronoun:
pronoun_idx += 1
else:
raise Exception("Misaligned pronoun!")
assert strip_pronoun(tokens[pronoun_idx]) == pronoun
# split tokens before and after the pronoun
before = tokens[:pronoun_idx]
after = tokens[pronoun_idx + 1 :]
# the GPT BPE attaches leading spaces to tokens, so we keep track
# of whether we need spaces before or after the pronoun
leading_space = " " if pronoun_idx > 0 else ""
trailing_space = " " if len(after) > 0 else ""
# detokenize
before = detok.detokenize(before, return_str=True)
pronoun = detok.detokenize([pronoun], return_str=True)
after = detok.detokenize(after, return_str=True)
# hack: when the pronoun ends in a period (or comma), move the
# punctuation to the "after" part
if pronoun.endswith(".") or pronoun.endswith(","):
after = pronoun[-1] + trailing_space + after
pronoun = pronoun[:-1]
# hack: when the "after" part begins with a comma or period, remove
# the trailing space
if after.startswith(".") or after.startswith(","):
trailing_space = ""
# parse sentence with spacy
sentence = nlp(before + leading_space + pronoun + trailing_space + after)
# find pronoun span
start = len(before + leading_space)
first_pronoun_tok = find_token(sentence, start_pos=start)
pronoun_span = find_span(sentence, pronoun, start=first_pronoun_tok.i)
assert pronoun_span.text == pronoun
if eval:
# convert to format where pronoun is surrounded by "[]" and
# query is surrounded by "_"
query_span = find_span(sentence, query)
query_with_ws = "_{}_{}".format(
query_span.text,
(" " if query_span.text_with_ws.endswith(" ") else ""),
)
pronoun_with_ws = "[{}]{}".format(
pronoun_span.text,
(" " if pronoun_span.text_with_ws.endswith(" ") else ""),
)
if query_span.start < pronoun_span.start:
first = (query_span, query_with_ws)
second = (pronoun_span, pronoun_with_ws)
else:
first = (pronoun_span, pronoun_with_ws)
second = (query_span, query_with_ws)
sentence = (
sentence[: first[0].start].text_with_ws
+ first[1]
+ sentence[first[0].end : second[0].start].text_with_ws
+ second[1]
+ sentence[second[0].end :].text
)
yield sentence, sample.get("label", None)
else:
yield sentence, pronoun_span, query, sample.get("label", None)
def winogrande_jsonl_iterator(input_fname, eval=False):
with open(input_fname) as fin:
for line in fin:
sample = json.loads(line.strip())
sentence, option1, option2 = (
sample["sentence"],
sample["option1"],
sample["option2"],
)
pronoun_span = (sentence.index("_"), sentence.index("_") + 1)
if eval:
query, cand = option1, option2
else:
query = option1 if sample["answer"] == "1" else option2
cand = option2 if sample["answer"] == "1" else option1
yield sentence, pronoun_span, query, cand
def filter_noun_chunks(
chunks, exclude_pronouns=False, exclude_query=None, exact_match=False
):
if exclude_pronouns:
chunks = [
np
for np in chunks
if (np.lemma_ != "-PRON-" and not all(tok.pos_ == "PRON" for tok in np))
]
if exclude_query is not None:
excl_txt = [exclude_query.lower()]
filtered_chunks = []
for chunk in chunks:
lower_chunk = chunk.text.lower()
found = False
for excl in excl_txt:
if (
not exact_match and (lower_chunk in excl or excl in lower_chunk)
) or lower_chunk == excl:
found = True
break
if not found:
filtered_chunks.append(chunk)
chunks = filtered_chunks
return chunks
|
COCO-LM/fairseq/examples/roberta/wsc/wsc_utils.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/roberta/wsc/wsc_utils.py",
"repo_id": "COCO-LM",
"token_count": 4154
}
| 171 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import torch
from examples.simultaneous_translation.utils.latency import LatencyInference
LATENCY_METRICS = [
"differentiable_average_lagging",
"average_lagging",
"average_proportion",
]
class LatencyScorer:
def __init__(self, start_from_zero=True):
self.recorder = []
self.scores = {}
self.scorer = LatencyInference()
self.start_from_zero = start_from_zero
def update_reorder(self, list_of_dict):
self.recorder = []
for info in list_of_dict:
delays = [int(x) - int(not self.start_from_zero) for x in info["delays"]]
delays = torch.LongTensor(delays).unsqueeze(0)
src_len = torch.LongTensor([info["src_len"]]).unsqueeze(0)
self.recorder.append(self.scorer(delays, src_len))
def cal_latency(self):
self.scores = {}
for metric in LATENCY_METRICS:
self.scores[metric] = sum(
[x[metric][0, 0].item() for x in self.recorder]
) / len(self.recorder)
return self.scores
@classmethod
def score(cls, list_of_dict, start_from_zero=True):
scorer_to_return = cls(start_from_zero)
scorer_to_return.update_reorder(list_of_dict)
scorer_to_return.cal_latency()
return scorer_to_return.scores
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input", required=True)
parser.add_argument("--start-from-zero", action="store_true")
args = parser.parse_args()
scorer = LatencyInference()
recorder = []
with open(args.input, "r") as f:
for line in f:
info = json.loads(line)
delays = [int(x) - int(not args.start_from_zero) for x in info["delays"]]
delays = torch.LongTensor(delays).unsqueeze(0)
src_len = torch.LongTensor([info["src_len"]]).unsqueeze(0)
recorder.append(scorer(delays, src_len))
average_results = {}
for metric in LATENCY_METRICS:
average_results[metric] = sum([x[metric][0, 0].item() for x in recorder]) / len(
recorder
)
print(f"{metric}: {average_results[metric]}")
|
COCO-LM/fairseq/examples/simultaneous_translation/eval/eval_latency.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/simultaneous_translation/eval/eval_latency.py",
"repo_id": "COCO-LM",
"token_count": 1037
}
| 172 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
class LatencyMetric(object):
@staticmethod
def length_from_padding_mask(padding_mask, batch_first: bool = False):
dim = 1 if batch_first else 0
return padding_mask.size(dim) - padding_mask.sum(dim=dim, keepdim=True)
def prepare_latency_metric(
self,
delays,
src_lens,
target_padding_mask=None,
batch_first: bool = False,
start_from_zero: bool = True,
):
assert len(delays.size()) == 2
assert len(src_lens.size()) == 2
if start_from_zero:
delays = delays + 1
if batch_first:
# convert to batch_last
delays = delays.t()
src_lens = src_lens.t()
tgt_len, bsz = delays.size()
_, bsz_1 = src_lens.size()
if target_padding_mask is not None:
target_padding_mask = target_padding_mask.t()
tgt_len_1, bsz_2 = target_padding_mask.size()
assert tgt_len == tgt_len_1
assert bsz == bsz_2
assert bsz == bsz_1
if target_padding_mask is None:
tgt_lens = tgt_len * delays.new_ones([1, bsz]).float()
else:
# 1, batch_size
tgt_lens = self.length_from_padding_mask(target_padding_mask, False).float()
delays = delays.masked_fill(target_padding_mask, 0)
return delays, src_lens, tgt_lens, target_padding_mask
def __call__(
self,
delays,
src_lens,
target_padding_mask=None,
batch_first: bool = False,
start_from_zero: bool = True,
):
delays, src_lens, tgt_lens, target_padding_mask = self.prepare_latency_metric(
delays, src_lens, target_padding_mask, batch_first, start_from_zero
)
return self.cal_metric(delays, src_lens, tgt_lens, target_padding_mask)
@staticmethod
def cal_metric(delays, src_lens, tgt_lens, target_padding_mask):
"""
Expected sizes:
delays: tgt_len, batch_size
src_lens: 1, batch_size
target_padding_mask: tgt_len, batch_size
"""
raise NotImplementedError
class AverageProportion(LatencyMetric):
"""
Function to calculate Average Proportion from
Can neural machine translation do simultaneous translation?
(https://arxiv.org/abs/1606.02012)
Delays are monotonic steps, range from 1 to src_len.
Give src x tgt y, AP is calculated as:
AP = 1 / (|x||y]) sum_i^|Y| deleys_i
"""
@staticmethod
def cal_metric(delays, src_lens, tgt_lens, target_padding_mask):
if target_padding_mask is not None:
AP = torch.sum(
delays.masked_fill(target_padding_mask, 0), dim=0, keepdim=True
)
else:
AP = torch.sum(delays, dim=0, keepdim=True)
AP = AP / (src_lens * tgt_lens)
return AP
class AverageLagging(LatencyMetric):
"""
Function to calculate Average Lagging from
STACL: Simultaneous Translation with Implicit Anticipation
and Controllable Latency using Prefix-to-Prefix Framework
(https://arxiv.org/abs/1810.08398)
Delays are monotonic steps, range from 1 to src_len.
Give src x tgt y, AP is calculated as:
AL = 1 / tau sum_i^tau delays_i - (i - 1) / gamma
Where
gamma = |y| / |x|
tau = argmin_i(delays_i = |x|)
"""
@staticmethod
def cal_metric(delays, src_lens, tgt_lens, target_padding_mask):
# tau = argmin_i(delays_i = |x|)
tgt_len, bsz = delays.size()
lagging_padding_mask = delays >= src_lens
lagging_padding_mask = torch.nn.functional.pad(
lagging_padding_mask.t(), (1, 0)
).t()[:-1, :]
gamma = tgt_lens / src_lens
lagging = (
delays
- torch.arange(delays.size(0))
.unsqueeze(1)
.type_as(delays)
.expand_as(delays)
/ gamma
)
lagging.masked_fill_(lagging_padding_mask, 0)
tau = (1 - lagging_padding_mask.type_as(lagging)).sum(dim=0, keepdim=True)
AL = lagging.sum(dim=0, keepdim=True) / tau
return AL
class DifferentiableAverageLagging(LatencyMetric):
"""
Function to calculate Differentiable Average Lagging from
Monotonic Infinite Lookback Attention for Simultaneous Machine Translation
(https://arxiv.org/abs/1906.05218)
Delays are monotonic steps, range from 0 to src_len-1.
(In the original paper thery are from 1 to src_len)
Give src x tgt y, AP is calculated as:
DAL = 1 / |Y| sum_i^|Y| delays'_i - (i - 1) / gamma
Where
delays'_i =
1. delays_i if i == 1
2. max(delays_i, delays'_{i-1} + 1 / gamma)
"""
@staticmethod
def cal_metric(delays, src_lens, tgt_lens, target_padding_mask):
tgt_len, bsz = delays.size()
gamma = tgt_lens / src_lens
new_delays = torch.zeros_like(delays)
for i in range(delays.size(0)):
if i == 0:
new_delays[i] = delays[i]
else:
new_delays[i] = torch.cat(
[
new_delays[i - 1].unsqueeze(0) + 1 / gamma,
delays[i].unsqueeze(0),
],
dim=0,
).max(dim=0)[0]
DAL = (
new_delays
- torch.arange(delays.size(0))
.unsqueeze(1)
.type_as(delays)
.expand_as(delays)
/ gamma
)
if target_padding_mask is not None:
DAL = DAL.masked_fill(target_padding_mask, 0)
DAL = DAL.sum(dim=0, keepdim=True) / tgt_lens
return DAL
class LatencyMetricVariance(LatencyMetric):
def prepare_latency_metric(
self,
delays,
src_lens,
target_padding_mask=None,
batch_first: bool = True,
start_from_zero: bool = True,
):
assert batch_first
assert len(delays.size()) == 3
assert len(src_lens.size()) == 2
if start_from_zero:
delays = delays + 1
# convert to batch_last
bsz, num_heads_x_layers, tgt_len = delays.size()
bsz_1, _ = src_lens.size()
assert bsz == bsz_1
if target_padding_mask is not None:
bsz_2, tgt_len_1 = target_padding_mask.size()
assert tgt_len == tgt_len_1
assert bsz == bsz_2
if target_padding_mask is None:
tgt_lens = tgt_len * delays.new_ones([bsz, tgt_len]).float()
else:
# batch_size, 1
tgt_lens = self.length_from_padding_mask(target_padding_mask, True).float()
delays = delays.masked_fill(target_padding_mask.unsqueeze(1), 0)
return delays, src_lens, tgt_lens, target_padding_mask
class VarianceDelay(LatencyMetricVariance):
@staticmethod
def cal_metric(delays, src_lens, tgt_lens, target_padding_mask):
"""
delays : bsz, num_heads_x_layers, tgt_len
src_lens : bsz, 1
target_lens : bsz, 1
target_padding_mask: bsz, tgt_len or None
"""
if delays.size(1) == 1:
return delays.new_zeros([1])
variance_delays = delays.var(dim=1)
if target_padding_mask is not None:
variance_delays.masked_fill_(target_padding_mask, 0)
return variance_delays.sum(dim=1, keepdim=True) / tgt_lens
class LatencyInference(object):
def __init__(self, start_from_zero=True):
self.metric_calculator = {
"differentiable_average_lagging": DifferentiableAverageLagging(),
"average_lagging": AverageLagging(),
"average_proportion": AverageProportion(),
}
self.start_from_zero = start_from_zero
def __call__(self, monotonic_step, src_lens):
"""
monotonic_step range from 0 to src_len. src_len means eos
delays: bsz, tgt_len
src_lens: bsz, 1
"""
if not self.start_from_zero:
monotonic_step -= 1
src_lens = src_lens
delays = monotonic_step.view(
monotonic_step.size(0), -1, monotonic_step.size(-1)
).max(dim=1)[0]
delays = delays.masked_fill(delays >= src_lens, 0) + (src_lens - 1).expand_as(
delays
).masked_fill(delays < src_lens, 0)
return_dict = {}
for key, func in self.metric_calculator.items():
return_dict[key] = func(
delays.float(),
src_lens.float(),
target_padding_mask=None,
batch_first=True,
start_from_zero=True,
).t()
return return_dict
class LatencyTraining(object):
def __init__(
self,
avg_weight,
var_weight,
avg_type,
var_type,
stay_on_last_token,
average_method,
):
self.avg_weight = avg_weight
self.var_weight = var_weight
self.avg_type = avg_type
self.var_type = var_type
self.stay_on_last_token = stay_on_last_token
self.average_method = average_method
self.metric_calculator = {
"differentiable_average_lagging": DifferentiableAverageLagging(),
"average_lagging": AverageLagging(),
"average_proportion": AverageProportion(),
}
self.variance_calculator = {
"variance_delay": VarianceDelay(),
}
def expected_delays_from_attention(
self, attention, source_padding_mask=None, target_padding_mask=None
):
if type(attention) == list:
# bsz, num_heads, tgt_len, src_len
bsz, num_heads, tgt_len, src_len = attention[0].size()
attention = torch.cat(attention, dim=1)
bsz, num_heads_x_layers, tgt_len, src_len = attention.size()
# bsz * num_heads * num_layers, tgt_len, src_len
attention = attention.view(-1, tgt_len, src_len)
else:
# bsz * num_heads * num_layers, tgt_len, src_len
bsz, tgt_len, src_len = attention.size()
num_heads_x_layers = 1
attention = attention.view(-1, tgt_len, src_len)
if not self.stay_on_last_token:
residual_attention = 1 - attention[:, :, :-1].sum(dim=2, keepdim=True)
attention = torch.cat([attention[:, :, :-1], residual_attention], dim=2)
# bsz * num_heads_x_num_layers, tgt_len, src_len for MMA
steps = (
torch.arange(1, 1 + src_len)
.unsqueeze(0)
.unsqueeze(1)
.expand_as(attention)
.type_as(attention)
)
if source_padding_mask is not None:
src_offset = (
source_padding_mask.type_as(attention)
.sum(dim=1, keepdim=True)
.expand(bsz, num_heads_x_layers)
.contiguous()
.view(-1, 1)
)
src_lens = src_len - src_offset
if source_padding_mask[:, 0].any():
# Pad left
src_offset = src_offset.view(-1, 1, 1)
steps = steps - src_offset
steps = steps.masked_fill(steps <= 0, 0)
else:
src_lens = attention.new_ones([bsz, num_heads_x_layers]) * src_len
src_lens = src_lens.view(-1, 1)
# bsz * num_heads_num_layers, tgt_len, src_len
expected_delays = (
(steps * attention).sum(dim=2).view(bsz, num_heads_x_layers, tgt_len)
)
if target_padding_mask is not None:
expected_delays.masked_fill_(target_padding_mask.unsqueeze(1), 0)
return expected_delays, src_lens
def avg_loss(self, expected_delays, src_lens, target_padding_mask):
bsz, num_heads_x_layers, tgt_len = expected_delays.size()
target_padding_mask = (
target_padding_mask.unsqueeze(1)
.expand_as(expected_delays)
.contiguous()
.view(-1, tgt_len)
)
if self.average_method == "average":
# bsz * tgt_len
expected_delays = expected_delays.mean(dim=1)
elif self.average_method == "weighted_average":
weights = torch.nn.functional.softmax(expected_delays, dim=1)
expected_delays = torch.sum(expected_delays * weights, dim=1)
elif self.average_method == "max":
# bsz * num_heads_x_num_layers, tgt_len
expected_delays = expected_delays.max(dim=1)[0]
else:
raise RuntimeError(f"{self.average_method} is not supported")
src_lens = src_lens.view(bsz, -1)[:, :1]
target_padding_mask = target_padding_mask.view(bsz, -1, tgt_len)[:, 0]
if self.avg_weight > 0.0:
if self.avg_type in self.metric_calculator:
average_delays = self.metric_calculator[self.avg_type](
expected_delays,
src_lens,
target_padding_mask,
batch_first=True,
start_from_zero=False,
)
else:
raise RuntimeError(f"{self.avg_type} is not supported.")
# bsz * num_heads_x_num_layers, 1
return self.avg_weight * average_delays.sum()
else:
return 0.0
def var_loss(self, expected_delays, src_lens, target_padding_mask):
src_lens = src_lens.view(expected_delays.size(0), expected_delays.size(1))[
:, :1
]
if self.var_weight > 0.0:
if self.var_type in self.variance_calculator:
variance_delays = self.variance_calculator[self.var_type](
expected_delays,
src_lens,
target_padding_mask,
batch_first=True,
start_from_zero=False,
)
else:
raise RuntimeError(f"{self.var_type} is not supported.")
return self.var_weight * variance_delays.sum()
else:
return 0.0
def loss(self, attention, source_padding_mask=None, target_padding_mask=None):
expected_delays, src_lens = self.expected_delays_from_attention(
attention, source_padding_mask, target_padding_mask
)
latency_loss = 0
latency_loss += self.avg_loss(expected_delays, src_lens, target_padding_mask)
latency_loss += self.var_loss(expected_delays, src_lens, target_padding_mask)
return latency_loss
|
COCO-LM/fairseq/examples/simultaneous_translation/utils/latency.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/simultaneous_translation/utils/latency.py",
"repo_id": "COCO-LM",
"token_count": 7473
}
| 173 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import gc
import itertools as it
import math
import os.path as osp
import warnings
from collections import deque, namedtuple
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Tuple
import numpy as np
import torch
from examples.speech_recognition.data.replabels import unpack_replabels
from fairseq import tasks
from fairseq.data.dictionary import Dictionary
from fairseq.dataclass.configs import FairseqDataclass
from fairseq.dataclass.constants import ChoiceEnum
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.models.fairseq_model import FairseqModel
from fairseq.utils import apply_to_sample
from omegaconf import MISSING, open_dict
try:
from flashlight.lib.sequence.criterion import (CpuViterbiPath,
get_data_ptr_as_bytes)
from flashlight.lib.text.decoder import (LM, CriterionType, DecodeResult,
KenLM, LexiconDecoder,
LexiconDecoderOptions,
LexiconFreeDecoder,
LexiconFreeDecoderOptions,
LMState, SmearingMode, Trie)
from flashlight.lib.text.dictionary import create_word_dict, load_words
except ImportError:
warnings.warn(
"flashlight python bindings are required to use this functionality. "
"Please install from "
"https://github.com/facebookresearch/flashlight/tree/master/bindings/python"
)
LM = object
LMState = object
CRITERION_CHOICES = ChoiceEnum(["ctc", "asg"])
DECODER_CHOICES = ChoiceEnum(["viterbi", "kenlm", "fairseqlm"])
@dataclass
class DecoderConfig(FairseqDataclass):
name: DECODER_CHOICES = field(
default="viterbi",
metadata={"help": "The type of decoder to use"},
)
nbest: int = field(
default=1,
metadata={"help": "Number of decodings to return"},
)
criterion: CRITERION_CHOICES = field(
default="ctc",
metadata={"help": "Criterion to use"},
)
asgtransitions: List[int] = field(
default=MISSING,
metadata={"help": "ASG transition indices"},
)
maxreplabel: int = field(
default=2,
metadata={"help": "Maximum repeated labels for ASG criterion"},
)
unitlm: bool = field(
default=False,
metadata={"help": "If set, use unit language model"},
)
lmpath: str = field(
default=MISSING,
metadata={"help": "Language model for KenLM decoder"},
)
lexicon: Optional[str] = field(
default=None,
metadata={"help": "Lexicon for Flashlight decoder"},
)
beam: int = field(
default=50,
metadata={"help": "Number of beams to use for decoding"},
)
beamthreshold: float = field(
default=15.0,
metadata={"help": "Threshold for beam search decoding"},
)
beamsizetoken: Optional[int] = field(
default=None,
metadata={"help": "Beam size to use"}
)
wordscore: float = field(
default=1.5,
metadata={"help": "Word score for KenLM decoder"},
)
unkweight: float = field(
default=-math.inf,
metadata={"help": "Unknown weight for KenLM decoder"},
)
silweight: float = field(
default=-0.3,
metadata={"help": "Silence weight for KenLM decoder"},
)
lmweight: float = field(
default=1.5,
metadata={"help": "Weight for LM while interpolating score"},
)
unitlm: bool = field(
default=False,
metadata={"help": "If using a unit language model"},
)
class BaseDecoder:
def __init__(self, cfg: DecoderConfig, tgt_dict: Dictionary) -> None:
self.tgt_dict = tgt_dict
self.vocab_size = len(tgt_dict)
self.nbest = cfg.nbest
self.unitlm = cfg.unitlm
if cfg.criterion == "ctc":
self.criterion_type = CriterionType.CTC
self.blank = (
tgt_dict.index("<ctc_blank>")
if "<ctc_blank>" in tgt_dict.indices
else tgt_dict.bos()
)
if "<sep>" in tgt_dict.indices:
self.silence = tgt_dict.index("<sep>")
elif "|" in tgt_dict.indices:
self.silence = tgt_dict.index("|")
else:
self.silence = tgt_dict.eos()
self.asgtransitions = None
elif cfg.criterion == "asg_loss":
self.criterion_type = CriterionType.ASG
self.blank = -1
self.silence = -1
self.asgtransitions = cfg.asgtransitions
self.maxreplabel = cfg.maxreplabel
assert len(self.asgtransitions) == self.vocab_size ** 2
else:
raise RuntimeError(f"unknown criterion: {cfg.criterion}")
def generate(
self,
models: List[FairseqModel],
sample: Dict[str, Any],
**unused
) -> List[List[Dict[str, torch.LongTensor]]]:
encoder_input = {
k: v
for k, v in sample["net_input"].items()
if k != "prev_output_tokens"
}
emissions = self.get_emissions(models, encoder_input)
return self.decode(emissions)
def get_emissions(
self,
models: List[FairseqModel],
encoder_input: Dict[str, Any],
) -> torch.FloatTensor:
model = models[0]
encoder_out = model(**encoder_input)
if self.criterion_type == CriterionType.CTC:
if hasattr(model, "get_logits"):
emissions = model.get_logits(encoder_out)
else:
emissions = model.get_normalized_probs(
encoder_out, log_probs=True)
elif self.criterion_type == CriterionType.ASG:
emissions = encoder_out["encoder_out"]
else:
raise ValueError("Criterion not implemented: "
f"{self.criterion_type}")
return emissions.transpose(0, 1).float().cpu().contiguous()
def get_tokens(self, idxs: torch.IntTensor) -> torch.LongTensor:
idxs = (g[0] for g in it.groupby(idxs))
if self.criterion_type == CriterionType.CTC:
idxs = filter(lambda x: x != self.blank, idxs)
elif self.criterion_type == CriterionType.ASG:
idxs = filter(lambda x: x >= 0, idxs)
idxs = unpack_replabels(
list(idxs), self.tgt_dict, self.maxreplabel)
return torch.LongTensor(list(idxs))
def decode(
self,
emissions: torch.FloatTensor,
) -> List[List[Dict[str, torch.LongTensor]]]:
raise NotImplementedError
class ViterbiDecoder(BaseDecoder):
def decode(
self,
emissions: torch.FloatTensor,
) -> List[List[Dict[str, torch.LongTensor]]]:
B, T, N = emissions.size()
if self.asgtransitions is None:
transitions = torch.FloatTensor(N, N).zero_()
else:
transitions = torch.FloatTensor(self.asgtransitions).view(N, N)
viterbi_path = torch.IntTensor(B, T)
workspace = torch.ByteTensor(
CpuViterbiPath.get_workspace_size(B, T, N))
CpuViterbiPath.compute(
B,
T,
N,
get_data_ptr_as_bytes(emissions),
get_data_ptr_as_bytes(transitions),
get_data_ptr_as_bytes(viterbi_path),
get_data_ptr_as_bytes(workspace),
)
return [
[{"tokens": self.get_tokens(viterbi_path[b].tolist()), "score": 0}]
for b in range(B)
]
class KenLMDecoder(BaseDecoder):
def __init__(self, cfg: DecoderConfig, tgt_dict: Dictionary) -> None:
super().__init__(cfg, tgt_dict)
if cfg.lexicon:
self.lexicon = load_words(cfg.lexicon)
self.word_dict = create_word_dict(self.lexicon)
self.unk_word = self.word_dict.get_index("<unk>")
self.lm = KenLM(cfg.lmpath, self.word_dict)
self.trie = Trie(self.vocab_size, self.silence)
start_state = self.lm.start(False)
for word, spellings in self.lexicon.items():
word_idx = self.word_dict.get_index(word)
_, score = self.lm.score(start_state, word_idx)
for spelling in spellings:
spelling_idxs = [
tgt_dict.index(token)
for token in spelling
]
assert tgt_dict.unk() not in spelling_idxs, \
f"{spelling} {spelling_idxs}"
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder_opts = LexiconDecoderOptions(
beam_size=cfg.beam,
beam_size_token=cfg.beamsizetoken or len(tgt_dict),
beam_threshold=cfg.beamthreshold,
lm_weight=cfg.lmweight,
word_score=cfg.wordscore,
unk_score=cfg.unkweight,
sil_score=cfg.silweight,
log_add=False,
criterion_type=self.criterion_type,
)
if self.asgtransitions is None:
self.asgtransitions = []
self.decoder = LexiconDecoder(
self.decoder_opts,
self.trie,
self.lm,
self.silence,
self.blank,
self.unk_word,
self.asgtransitions,
self.unitlm,
)
else:
assert self.unitlm, "Lexicon-free decoding requires unit LM"
d = {w: [[w]] for w in tgt_dict.symbols}
self.word_dict = create_word_dict(d)
self.lm = KenLM(cfg.lmpath, self.word_dict)
self.decoder_opts = LexiconFreeDecoderOptions(
beam_size=cfg.beam,
beam_size_token=cfg.beamsizetoken or len(tgt_dict),
beam_threshold=cfg.beamthreshold,
lm_weight=cfg.lmweight,
sil_score=cfg.silweight,
log_add=False,
criterion_type=self.criterion_type,
)
self.decoder = LexiconFreeDecoder(
self.decoder_opts, self.lm, self.silence, self.blank, []
)
def decode(
self,
emissions: torch.FloatTensor,
) -> List[List[Dict[str, torch.LongTensor]]]:
B, T, N = emissions.size()
hypos = []
for b in range(B):
emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0)
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[: self.nbest]
hypos.append([
{
"tokens": self.get_tokens(result.tokens),
"score": result.score,
"words": [
self.word_dict.get_entry(x)
for x in result.words if x >= 0
],
} for result in nbest_results
])
return hypos
FairseqLMState = namedtuple(
"FairseqLMState",
[
"prefix",
"incremental_state",
"probs",
]
)
class FairseqLM(LM):
def __init__(self, dictionary: Dictionary, model: FairseqModel) -> None:
super().__init__()
self.dictionary = dictionary
self.model = model
self.unk = self.dictionary.unk()
self.save_incremental = False # this currently does not work properly
self.max_cache = 20_000
model.cuda()
model.eval()
model.make_generation_fast_()
self.states = {}
self.stateq = deque()
def start(self, start_with_nothing: bool) -> LMState:
state = LMState()
prefix = torch.LongTensor([[self.dictionary.eos()]])
incremental_state = {} if self.save_incremental else None
with torch.no_grad():
res = self.model(
prefix.cuda(), incremental_state=incremental_state)
probs = self.model.get_normalized_probs(
res, log_probs=True, sample=None)
if incremental_state is not None:
incremental_state = apply_to_sample(
lambda x: x.cpu(), incremental_state)
self.states[state] = FairseqLMState(
prefix.numpy(), incremental_state, probs[0, -1].cpu().numpy()
)
self.stateq.append(state)
return state
def score(
self,
state: LMState,
token_index: int,
no_cache: bool = False,
) -> Tuple[LMState, int]:
"""
Evaluate language model based on the current lm state and new word
Parameters:
-----------
state: current lm state
token_index: index of the word
(can be lexicon index then you should store inside LM the
mapping between indices of lexicon and lm, or lm index of a word)
Returns:
--------
(LMState, float): pair of (new state, score for the current word)
"""
curr_state = self.states[state]
def trim_cache(targ_size: int) -> None:
while len(self.stateq) > targ_size:
rem_k = self.stateq.popleft()
rem_st = self.states[rem_k]
rem_st = FairseqLMState(rem_st.prefix, None, None)
self.states[rem_k] = rem_st
if curr_state.probs is None:
new_incremental_state = (
curr_state.incremental_state.copy()
if curr_state.incremental_state is not None
else None
)
with torch.no_grad():
if new_incremental_state is not None:
new_incremental_state = apply_to_sample(
lambda x: x.cuda(), new_incremental_state
)
elif self.save_incremental:
new_incremental_state = {}
res = self.model(
torch.from_numpy(curr_state.prefix).cuda(),
incremental_state=new_incremental_state,
)
probs = self.model.get_normalized_probs(
res, log_probs=True, sample=None
)
if new_incremental_state is not None:
new_incremental_state = apply_to_sample(
lambda x: x.cpu(), new_incremental_state
)
curr_state = FairseqLMState(
curr_state.prefix, new_incremental_state, probs[0, -1].cpu(
).numpy()
)
if not no_cache:
self.states[state] = curr_state
self.stateq.append(state)
score = curr_state.probs[token_index].item()
trim_cache(self.max_cache)
outstate = state.child(token_index)
if outstate not in self.states and not no_cache:
prefix = np.concatenate(
[curr_state.prefix, torch.LongTensor([[token_index]])], -1
)
incr_state = curr_state.incremental_state
self.states[outstate] = FairseqLMState(prefix, incr_state, None)
if token_index == self.unk:
score = float("-inf")
return outstate, score
def finish(self, state: LMState) -> Tuple[LMState, int]:
"""
Evaluate eos for language model based on the current lm state
Returns:
--------
(LMState, float): pair of (new state, score for the current word)
"""
return self.score(state, self.dictionary.eos())
def empty_cache(self) -> None:
self.states = {}
self.stateq = deque()
gc.collect()
class FairseqLMDecoder(BaseDecoder):
def __init__(self, cfg: DecoderConfig, tgt_dict: Dictionary) -> None:
super().__init__(cfg, tgt_dict)
self.lexicon = load_words(cfg.lexicon) if cfg.lexicon else None
self.idx_to_wrd = {}
checkpoint = torch.load(cfg.lmpath, map_location="cpu")
if "cfg" in checkpoint and checkpoint["cfg"] is not None:
lm_args = checkpoint["cfg"]
else:
lm_args = convert_namespace_to_omegaconf(checkpoint["args"])
with open_dict(lm_args.task):
lm_args.task.data = osp.dirname(cfg.lmpath)
task = tasks.setup_task(lm_args.task)
model = task.build_model(lm_args.model)
model.load_state_dict(checkpoint["model"], strict=False)
self.trie = Trie(self.vocab_size, self.silence)
self.word_dict = task.dictionary
self.unk_word = self.word_dict.unk()
self.lm = FairseqLM(self.word_dict, model)
if self.lexicon:
start_state = self.lm.start(False)
for i, (word, spellings) in enumerate(self.lexicon.items()):
if self.unitlm:
word_idx = i
self.idx_to_wrd[i] = word
score = 0
else:
word_idx = self.word_dict.index(word)
_, score = self.lm.score(
start_state, word_idx, no_cache=True)
for spelling in spellings:
spelling_idxs = [
tgt_dict.index(token)
for token in spelling
]
assert tgt_dict.unk() not in spelling_idxs, \
f"{spelling} {spelling_idxs}"
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder_opts = LexiconDecoderOptions(
beam_size=cfg.beam,
beam_size_token=cfg.beamsizetoken or len(tgt_dict),
beam_threshold=cfg.beamthreshold,
lm_weight=cfg.lmweight,
word_score=cfg.wordscore,
unk_score=cfg.unkweight,
sil_score=cfg.silweight,
log_add=False,
criterion_type=self.criterion_type,
)
if self.asgtransitions is None:
self.asgtransitions = []
self.decoder = LexiconDecoder(
self.decoder_opts,
self.trie,
self.lm,
self.silence,
self.blank,
self.unk_word,
self.asgtransitions,
self.unitlm,
)
else:
assert self.unitlm, "Lexicon-free decoding requires unit LM"
d = {w: [[w]] for w in tgt_dict.symbols}
self.word_dict = create_word_dict(d)
self.lm = KenLM(cfg.lmpath, self.word_dict)
self.decoder_opts = LexiconFreeDecoderOptions(
beam_size=cfg.beam,
beam_size_token=cfg.beamsizetoken or len(tgt_dict),
beam_threshold=cfg.beamthreshold,
lm_weight=cfg.lmweight,
sil_score=cfg.silweight,
log_add=False,
criterion_type=self.criterion_type,
)
self.decoder = LexiconFreeDecoder(
self.decoder_opts, self.lm, self.silence, self.blank, []
)
def decode(
self,
emissions: torch.FloatTensor,
) -> List[List[Dict[str, torch.LongTensor]]]:
B, T, N = emissions.size()
hypos = []
def make_hypo(result: DecodeResult) -> Dict[str, Any]:
hypo = {
"tokens": self.get_tokens(result.tokens),
"score": result.score,
}
if self.lexicon:
hypo["words"] = [
self.idx_to_wrd[x] if self.unitlm else self.word_dict[x]
for x in result.words if x >= 0
]
return hypo
for b in range(B):
emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0)
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[:self.nbest]
hypos.append([make_hypo(result) for result in nbest_results])
self.lm.empty_cache()
return hypos
def Decoder(cfg: DecoderConfig, tgt_dict: Dictionary) -> BaseDecoder:
if cfg.name == "viterbi":
return ViterbiDecoder(cfg, tgt_dict)
if cfg.name == "kenlm":
return KenLMDecoder(cfg, tgt_dict)
if cfg.name == "fairseqlm":
return FairseqLMDecoder(cfg, tgt_dict)
raise NotImplementedError(f"Invalid decoder name: {cfg.name}")
|
COCO-LM/fairseq/examples/speech_recognition/hydra/decoder.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/speech_recognition/hydra/decoder.py",
"repo_id": "COCO-LM",
"token_count": 10775
}
| 174 |
# Simultaneous Speech Translation (SimulST) on MuST-C
This is a tutorial of training and evaluating a transformer *wait-k* simultaneous model on MUST-C English-Germen Dataset, from [SimulMT to SimulST: Adapting Simultaneous Text Translation to End-to-End Simultaneous Speech Translation](https://www.aclweb.org/anthology/2020.aacl-main.58.pdf).
[MuST-C](https://www.aclweb.org/anthology/N19-1202) is multilingual speech-to-text translation corpus with 8-language translations on English TED talks.
## Data Preparation
This section introduces the data preparation for training and evaluation.
If you only want to evaluate the model, please jump to [Inference & Evaluation](#inference-&-evaluation)
[Download](https://ict.fbk.eu/must-c) and unpack MuST-C data to a path
`${MUSTC_ROOT}/en-${TARGET_LANG_ID}`, then preprocess it with
```bash
# Additional Python packages for S2T data processing/model training
pip install pandas torchaudio sentencepiece
# Generate TSV manifests, features, vocabulary,
# global cepstral and mean estimation,
# and configuration for each language
cd fairseq
python examples/speech_to_text/prep_mustc_data.py \
--data-root ${MUSTC_ROOT} --task asr \
--vocab-type unigram --vocab-size 10000 \
--cmvn-type global
python examples/speech_to_text/prep_mustc_data.py \
--data-root ${MUSTC_ROOT} --task st \
--vocab-type unigram --vocab-size 10000 \
--cmvn-type global
```
## ASR Pretraining
We need a pretrained offline ASR model. Assuming the save directory of the ASR model is `${ASR_SAVE_DIR}`.
The following command (and the subsequent training commands in this tutorial) assume training on 1 GPU (you can also train on 8 GPUs and remove the `--update-freq 8` option).
```
fairseq-train ${MUSTC_ROOT}/en-de \
--config-yaml config_asr.yaml --train-subset train_asr --valid-subset dev_asr \
--save-dir ${ASR_SAVE_DIR} --num-workers 4 --max-tokens 40000 --max-update 100000 \
--task speech_to_text --criterion label_smoothed_cross_entropy --report-accuracy \
--arch convtransformer_espnet --optimizer adam --lr 0.0005 --lr-scheduler inverse_sqrt \
--warmup-updates 10000 --clip-norm 10.0 --seed 1 --update-freq 8
```
A pretrained ASR checkpoint can be downloaded [here](https://dl.fbaipublicfiles.com/simultaneous_translation/must_c_v1_en_de_pretrained_asr)
## Simultaneous Speech Translation Training
### Wait-K with fixed pre-decision module
Fixed pre-decision indicates that the model operate simultaneous policy on the boundaries of fixed chunks.
Here is a example of fixed pre-decision ratio 7 (the simultaneous decision is made every 7 encoder states) and
a wait-3 policy model. Assuming the save directory is `${ST_SAVE_DIR}`
```bash
fairseq-train ${MUSTC_ROOT}/en-de \
--config-yaml config_st.yaml --train-subset train_st --valid-subset dev_st \
--save-dir ${ST_SAVE_DIR} --num-workers 8 \
--optimizer adam --lr 0.0001 --lr-scheduler inverse_sqrt --clip-norm 10.0 \
--criterion label_smoothed_cross_entropy \
--warmup-updates 4000 --max-update 100000 --max-tokens 40000 --seed 2 \
--load-pretrained-encoder-from ${ASR_SAVE_DIR}/checkpoint_best.pt \
--task speech_to_text \
--arch convtransformer_simul_trans_espnet \
--simul-type waitk_fixed_pre_decision \
--waitk-lagging 3 \
--fixed-pre-decision-ratio 7 \
--update-freq 8
```
### Monotonic multihead attention with fixed pre-decision module
```
fairseq-train ${MUSTC_ROOT}/en-de \
--config-yaml config_st.yaml --train-subset train_st --valid-subset dev_st \
--save-dir ${ST_SAVE_DIR} --num-workers 8 \
--optimizer adam --lr 0.0001 --lr-scheduler inverse_sqrt --clip-norm 10.0 \
--warmup-updates 4000 --max-update 100000 --max-tokens 40000 --seed 2 \
--load-pretrained-encoder-from ${ASR_SAVE_DIR}/${CHECKPOINT_FILENAME} \
--task speech_to_text \
--criterion latency_augmented_label_smoothed_cross_entropy \
--latency-weight-avg 0.1 \
--arch convtransformer_simul_trans_espnet \
--simul-type infinite_lookback_fixed_pre_decision \
--fixed-pre-decision-ratio 7 \
--update-freq 8
```
## Inference & Evaluation
[SimulEval](https://github.com/facebookresearch/SimulEval) is used for evaluation.
The following command is for evaluation.
```
git clone https://github.com/facebookresearch/SimulEval.git
cd SimulEval
pip install -e .
simuleval \
--agent ${FAIRSEQ}/examples/speech_to_text/simultaneous_translation/agents/fairseq_simul_st_agent.py
--source ${SRC_LIST_OF_AUDIO}
--target ${TGT_FILE}
--data-bin ${MUSTC_ROOT}/en-de \
--config config_st.yaml \
--model-path ${ST_SAVE_DIR}/${CHECKPOINT_FILENAME} \
--output ${OUTPUT} \
--scores
```
The source file `${SRC_LIST_OF_AUDIO}` is a list of paths of audio files. Assuming your audio files stored at `/home/user/data`,
it should look like this
```bash
/home/user/data/audio-1.wav
/home/user/data/audio-2.wav
```
Each line of target file `${TGT_FILE}` is the translation for each audio file input.
```bash
Translation_1
Translation_2
```
The `--data-bin` and `--config` should be the same in previous section if you prepare the data from the scratch.
If only for evaluation, a prepared data directory can be found [here](https://dl.fbaipublicfiles.com/simultaneous_translation/must_c_v1.0_en_de_databin.tgz). It contains
- `spm_unigram10000_st.model`: a sentencepiece model binary.
- `spm_unigram10000_st.txt`: the dictionary file generated by the sentencepiece model.
- `gcmvn.npz`: the binary for global cepstral mean and variance.
- `config_st.yaml`: the config yaml file. It looks like this.
You will need to set the absolute paths for `sentencepiece_model` and `stats_npz_path` if the data directory is downloaded.
```yaml
bpe_tokenizer:
bpe: sentencepiece
sentencepiece_model: ABS_PATH_TO_SENTENCEPIECE_MODEL
global_cmvn:
stats_npz_path: ABS_PATH_TO_GCMVN_FILE
input_channels: 1
input_feat_per_channel: 80
sampling_alpha: 1.0
specaugment:
freq_mask_F: 27
freq_mask_N: 1
time_mask_N: 1
time_mask_T: 100
time_mask_p: 1.0
time_wrap_W: 0
transforms:
'*':
- global_cmvn
_train:
- global_cmvn
- specaugment
vocab_filename: spm_unigram10000_st.txt
```
Notice that once a `--data-bin` is set, the `--config` is the base name of the config yaml, not the full path.
Set `--model-path` to the model checkpoint.
A pretrained checkpoint can be downloaded from [here](https://dl.fbaipublicfiles.com/simultaneous_translation/convtransformer_wait5_pre7), which is a wait-5 model with a pre-decision of 280 ms.
The output should be similar as follow:
```bash
{
"Quality": {
"BLEU": 12.79214535384013
},
"Latency": {
"AL": 1669.5778120018108,
"AL_CA": 2077.9027656104813,
"AP": 0.7652936521983029,
"AP_CA": 0.8891561507382866,
"DAL": 2028.1566141735727,
"DAL_CA": 2497.336430059716
}
}
```
If `--output ${OUTPUT}` option is used, the detailed log and scores will be stored under the `${OUTPUT}` directory.
The quality is measured by detokenized BLEU. So make sure that the predicted words sent to the server are detokenized.
The latency metrics are
* Average Proportion
* Average Lagging
* Differentiable Average Lagging
Again they will also be evaluated on detokenized text.
|
COCO-LM/fairseq/examples/speech_to_text/docs/simulst_mustc_example.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/speech_to_text/docs/simulst_mustc_example.md",
"repo_id": "COCO-LM",
"token_count": 2709
}
| 175 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
class LogSumExpMoE(torch.autograd.Function):
"""Standard LogSumExp forward pass, but use *posterior* for the backward.
See `"Mixture Models for Diverse Machine Translation: Tricks of the Trade"
(Shen et al., 2019) <https://arxiv.org/abs/1902.07816>`_.
"""
@staticmethod
def forward(ctx, logp, posterior, dim=-1):
ctx.save_for_backward(posterior)
ctx.dim = dim
return torch.logsumexp(logp, dim=dim)
@staticmethod
def backward(ctx, grad_output):
(posterior,) = ctx.saved_tensors
grad_logp = grad_output.unsqueeze(ctx.dim) * posterior
return grad_logp, None, None
|
COCO-LM/fairseq/examples/translation_moe/translation_moe_src/logsumexp_moe.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/translation_moe/translation_moe_src/logsumexp_moe.py",
"repo_id": "COCO-LM",
"token_count": 319
}
| 176 |
# Unsupervised Cross-lingual Representation Learning at Scale (XLM-RoBERTa)
https://arxiv.org/pdf/1911.02116.pdf
## Introduction
XLM-R (XLM-RoBERTa) is a generic cross lingual sentence encoder that obtains state-of-the-art results on many cross-lingual understanding (XLU) benchmarks. It is trained on 2.5T of filtered CommonCrawl data in 100 languages (list below).
Language | Language|Language |Language | Language
---|---|---|---|---
Afrikaans | Albanian | Amharic | Arabic | Armenian
Assamese | Azerbaijani | Basque | Belarusian | Bengali
Bengali Romanize | Bosnian | Breton | Bulgarian | Burmese
Burmese zawgyi font | Catalan | Chinese (Simplified) | Chinese (Traditional) | Croatian
Czech | Danish | Dutch | English | Esperanto
Estonian | Filipino | Finnish | French | Galician
Georgian | German | Greek | Gujarati | Hausa
Hebrew | Hindi | Hindi Romanize | Hungarian | Icelandic
Indonesian | Irish | Italian | Japanese | Javanese
Kannada | Kazakh | Khmer | Korean | Kurdish (Kurmanji)
Kyrgyz | Lao | Latin | Latvian | Lithuanian
Macedonian | Malagasy | Malay | Malayalam | Marathi
Mongolian | Nepali | Norwegian | Oriya | Oromo
Pashto | Persian | Polish | Portuguese | Punjabi
Romanian | Russian | Sanskrit | Scottish Gaelic | Serbian
Sindhi | Sinhala | Slovak | Slovenian | Somali
Spanish | Sundanese | Swahili | Swedish | Tamil
Tamil Romanize | Telugu | Telugu Romanize | Thai | Turkish
Ukrainian | Urdu | Urdu Romanize | Uyghur | Uzbek
Vietnamese | Welsh | Western Frisian | Xhosa | Yiddish
## Pre-trained models
Model | Description | #params | vocab size | Download
---|---|---|---|---
`xlmr.base` | XLM-R using the BERT-base architecture | 250M | 250k | [xlm.base.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/xlmr.base.tar.gz)
`xlmr.large` | XLM-R using the BERT-large architecture | 560M | 250k | [xlm.large.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/xlmr.large.tar.gz)
(Note: Above are final model checkpoints. If you were using previously released `v0` version, we recommend using above. They have same architecture and dictionary.)
## Results
**[XNLI (Conneau et al., 2018)](https://arxiv.org/abs/1809.05053)**
Model | average | en | fr | es | de | el | bg | ru | tr | ar | vi | th | zh | hi | sw | ur
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---
`roberta.large.mnli` _(TRANSLATE-TEST)_ | 77.8 | 91.3 | 82.9 | 84.3 | 81.2 | 81.7 | 83.1 | 78.3 | 76.8 | 76.6 | 74.2 | 74.1 | 77.5 | 70.9 | 66.7 | 66.8
`xlmr.large` _(TRANSLATE-TRAIN-ALL)_ | **83.6** | 89.1 | 85.1 | 86.6 | 85.7 | 85.3 | 85.9 | 83.5 | 83.2 | 83.1 | 83.7 | 81.5 | 83.7 | 81.6 | 78.0 | 78.1
**[MLQA (Lewis et al., 2018)](https://arxiv.org/abs/1910.07475)**
Model | average | en | es | de | ar | hi | vi | zh
---|---|---|---|---|---|---|---|---
`BERT-large` | - | 80.2/67.4 | - | - | - | - | - | -
`mBERT` | 57.7 / 41.6 | 77.7 / 65.2 | 64.3 / 46.6 | 57.9 / 44.3 | 45.7 / 29.8| 43.8 / 29.7 | 57.1 / 38.6 | 57.5 / 37.3
`xlmr.large` | **70.7 / 52.7** | 80.6 / 67.8 | 74.1 / 56.0 | 68.5 / 53.6 | 63.1 / 43.5 | 69.2 / 51.6 | 71.3 / 50.9 | 68.0 / 45.4
## Example usage
##### Load XLM-R from torch.hub (PyTorch >= 1.1):
```python
import torch
xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
xlmr.eval() # disable dropout (or leave in train mode to finetune)
```
##### Load XLM-R (for PyTorch 1.0 or custom models):
```python
# Download xlmr.large model
wget https://dl.fbaipublicfiles.com/fairseq/models/xlmr.large.tar.gz
tar -xzvf xlmr.large.tar.gz
# Load the model in fairseq
from fairseq.models.roberta import XLMRModel
xlmr = XLMRModel.from_pretrained('/path/to/xlmr.large', checkpoint_file='model.pt')
xlmr.eval() # disable dropout (or leave in train mode to finetune)
```
##### Apply sentence-piece-model (SPM) encoding to input text:
```python
en_tokens = xlmr.encode('Hello world!')
assert en_tokens.tolist() == [0, 35378, 8999, 38, 2]
xlmr.decode(en_tokens) # 'Hello world!'
zh_tokens = xlmr.encode('你好,世界')
assert zh_tokens.tolist() == [0, 6, 124084, 4, 3221, 2]
xlmr.decode(zh_tokens) # '你好,世界'
hi_tokens = xlmr.encode('नमस्ते दुनिया')
assert hi_tokens.tolist() == [0, 68700, 97883, 29405, 2]
xlmr.decode(hi_tokens) # 'नमस्ते दुनिया'
ar_tokens = xlmr.encode('مرحبا بالعالم')
assert ar_tokens.tolist() == [0, 665, 193478, 258, 1705, 77796, 2]
xlmr.decode(ar_tokens) # 'مرحبا بالعالم'
fr_tokens = xlmr.encode('Bonjour le monde')
assert fr_tokens.tolist() == [0, 84602, 95, 11146, 2]
xlmr.decode(fr_tokens) # 'Bonjour le monde'
```
##### Extract features from XLM-R:
```python
# Extract the last layer's features
last_layer_features = xlmr.extract_features(zh_tokens)
assert last_layer_features.size() == torch.Size([1, 6, 1024])
# Extract all layer's features (layer 0 is the embedding layer)
all_layers = xlmr.extract_features(zh_tokens, return_all_hiddens=True)
assert len(all_layers) == 25
assert torch.all(all_layers[-1] == last_layer_features)
```
## Citation
```bibtex
@article{conneau2019unsupervised,
title={Unsupervised Cross-lingual Representation Learning at Scale},
author={Conneau, Alexis and Khandelwal, Kartikay and Goyal, Naman and Chaudhary, Vishrav and Wenzek, Guillaume and Guzm{\'a}n, Francisco and Grave, Edouard and Ott, Myle and Zettlemoyer, Luke and Stoyanov, Veselin},
journal={arXiv preprint arXiv:1911.02116},
year={2019}
}
```
|
COCO-LM/fairseq/examples/xlmr/README.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/xlmr/README.md",
"repo_id": "COCO-LM",
"token_count": 2169
}
| 177 |
/**
* Copyright 2017-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <torch/extension.h>
torch::Tensor LevenshteinDistanceCuda(
torch::Tensor source,
torch::Tensor target,
torch::Tensor source_length,
torch::Tensor target_length);
torch::Tensor GenerateDeletionLabelCuda(
torch::Tensor source,
torch::Tensor operations);
std::pair<torch::Tensor, torch::Tensor> GenerateInsertionLabelCuda(
torch::Tensor source,
torch::Tensor operations);
|
COCO-LM/fairseq/fairseq/clib/libnat_cuda/edit_dist.h/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/clib/libnat_cuda/edit_dist.h",
"repo_id": "COCO-LM",
"token_count": 242
}
| 178 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.constants import DDP_BACKEND_CHOICES
from omegaconf import II
@dataclass
class AdaptiveLossConfig(FairseqDataclass):
sentence_avg: bool = II("optimization.sentence_avg")
ddp_backend: DDP_BACKEND_CHOICES = II("distributed_training.ddp_backend")
@register_criterion("adaptive_loss", dataclass=AdaptiveLossConfig)
class AdaptiveLoss(FairseqCriterion):
"""This is an implementation of the loss function accompanying the adaptive softmax approximation for
graphical processing units (GPU), described in the paper "Efficient softmax approximation for GPUs"
(http://arxiv.org/abs/1609.04309)."""
def __init__(self, task, sentence_avg):
super().__init__(task)
self.sentence_avg = sentence_avg
@classmethod
def build_criterion(cls, cfg: AdaptiveLossConfig, task):
if cfg.ddp_backend in {"c10d", "pytorch_ddp"}:
raise Exception(
"AdaptiveLoss is not compatible with the PyTorch "
"version of DistributedDataParallel. Please use "
"`--ddp-backend=legacy_ddp` instead."
)
return cls(task, cfg.sentence_avg)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
assert (
hasattr(model.decoder, "adaptive_softmax")
and model.decoder.adaptive_softmax is not None
)
adaptive_softmax = model.decoder.adaptive_softmax
net_output = model(**sample["net_input"])
orig_target = model.get_targets(sample, net_output)
nsentences = orig_target.size(0)
orig_target = orig_target.view(-1)
bsz = orig_target.size(0)
logits, target = adaptive_softmax(net_output[0], orig_target)
assert len(target) == len(logits)
loss = net_output[0].new(1 if reduce else bsz).zero_()
for i in range(len(target)):
if target[i] is not None:
assert target[i].min() >= 0 and target[i].max() <= logits[i].size(1)
loss += F.cross_entropy(
logits[i],
target[i],
ignore_index=self.padding_idx,
reduction="sum" if reduce else "none",
)
orig = utils.strip_pad(orig_target, self.padding_idx)
ntokens = orig.numel()
sample_size = sample["target"].size(0) if self.sentence_avg else ntokens
logging_output = {
"loss": loss.data,
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs))
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
)
else:
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["loss"].avg)
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
|
COCO-LM/fairseq/fairseq/criterions/adaptive_loss.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/criterions/adaptive_loss.py",
"repo_id": "COCO-LM",
"token_count": 1998
}
| 179 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
from .dictionary import Dictionary, TruncatedDictionary
from .fairseq_dataset import FairseqDataset, FairseqIterableDataset
from .base_wrapper_dataset import BaseWrapperDataset
from .add_target_dataset import AddTargetDataset
from .append_token_dataset import AppendTokenDataset
from .audio.raw_audio_dataset import FileAudioDataset
from .backtranslation_dataset import BacktranslationDataset
from .bucket_pad_length_dataset import BucketPadLengthDataset
from .colorize_dataset import ColorizeDataset
from .concat_dataset import ConcatDataset
from .concat_sentences_dataset import ConcatSentencesDataset
from .denoising_dataset import DenoisingDataset
from .id_dataset import IdDataset
from .indexed_dataset import (
IndexedCachedDataset,
IndexedDataset,
IndexedRawTextDataset,
MMapIndexedDataset,
)
from .language_pair_dataset import LanguagePairDataset
from .list_dataset import ListDataset
from .lm_context_window_dataset import LMContextWindowDataset
from .lru_cache_dataset import LRUCacheDataset
from .mask_tokens_dataset import MaskTokensDataset
from .monolingual_dataset import MonolingualDataset
from .multi_corpus_sampled_dataset import MultiCorpusSampledDataset
from .nested_dictionary_dataset import NestedDictionaryDataset
from .noising import NoisingDataset
from .numel_dataset import NumelDataset
from .num_samples_dataset import NumSamplesDataset
from .offset_tokens_dataset import OffsetTokensDataset
from .pad_dataset import LeftPadDataset, PadDataset, RightPadDataset
from .prepend_dataset import PrependDataset
from .prepend_token_dataset import PrependTokenDataset
from .raw_label_dataset import RawLabelDataset, RawArrayDataset
from .replace_dataset import ReplaceDataset
from .resampling_dataset import ResamplingDataset
from .roll_dataset import RollDataset
from .round_robin_zip_datasets import RoundRobinZipDatasets
from .sort_dataset import SortDataset
from .strip_token_dataset import StripTokenDataset
from .subsample_dataset import SubsampleDataset
from .token_block_dataset import TokenBlockDataset
from .transform_eos_dataset import TransformEosDataset
from .transform_eos_lang_pair_dataset import TransformEosLangPairDataset
from .shorten_dataset import TruncateDataset, RandomCropDataset
from .span_dataset import SpanDataset
from .multilingual.sampled_multi_dataset import SampledMultiDataset
from .multilingual.sampled_multi_epoch_dataset import SampledMultiEpochDataset
from .fasta_dataset import FastaDataset, EncodedFastaDataset
from .iterators import (
CountingIterator,
EpochBatchIterator,
GroupedIterator,
ShardedIterator,
)
__all__ = [
"AddTargetDataset",
"AppendTokenDataset",
"BacktranslationDataset",
"BaseWrapperDataset",
"BucketPadLengthDataset",
"ColorizeDataset",
"ConcatDataset",
"ConcatSentencesDataset",
"CountingIterator",
"DenoisingDataset",
"Dictionary",
"EncodedFastaDataset",
"EpochBatchIterator",
"FairseqDataset",
"FairseqIterableDataset",
"FastaDataset",
"GroupedIterator",
"IdDataset",
"IndexedCachedDataset",
"IndexedDataset",
"IndexedRawTextDataset",
"LanguagePairDataset",
"LeftPadDataset",
"ListDataset",
"LMContextWindowDataset",
"LRUCacheDataset",
"MaskTokensDataset",
"MMapIndexedDataset",
"MonolingualDataset",
"MultiCorpusSampledDataset",
"NestedDictionaryDataset",
"NoisingDataset",
"NumelDataset",
"NumSamplesDataset",
"OffsetTokensDataset",
"PadDataset",
"PrependDataset",
"PrependTokenDataset",
"ReplaceDataset",
"RollDataset",
"FileAudioDataset",
"RawLabelDataset",
"ResamplingDataset",
"RightPadDataset",
"RoundRobinZipDatasets",
"SampledMultiDataset",
"SampledMultiEpochDataset",
"ShardedIterator",
"SortDataset",
"StripTokenDataset",
"SubsampleDataset",
"TokenBlockDataset",
"TransformEosDataset",
"TransformEosLangPairDataset",
"TruncateDataset",
"TruncatedDictionary",
]
|
COCO-LM/fairseq/fairseq/data/__init__.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/__init__.py",
"repo_id": "COCO-LM",
"token_count": 1595
}
| 180 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from . import FairseqDataset
class ConcatSentencesDataset(FairseqDataset):
def __init__(self, *datasets):
super().__init__()
self.datasets = datasets
assert all(
len(ds) == len(datasets[0]) for ds in datasets
), "datasets must have the same length"
def __getitem__(self, index):
return torch.cat([ds[index] for ds in self.datasets])
def __len__(self):
return len(self.datasets[0])
def collater(self, samples):
return self.datasets[0].collater(samples)
@property
def sizes(self):
return sum(ds.sizes for ds in self.datasets)
def num_tokens(self, index):
return sum(ds.num_tokens(index) for ds in self.datasets)
def size(self, index):
return sum(ds.size(index) for ds in self.datasets)
def ordered_indices(self):
return self.datasets[0].ordered_indices()
@property
def supports_prefetch(self):
return any(getattr(ds, "supports_prefetch", False) for ds in self.datasets)
def prefetch(self, indices):
for ds in self.datasets:
if getattr(ds, "supports_prefetch", False):
ds.prefetch(indices)
def set_epoch(self, epoch):
super().set_epoch(epoch)
for ds in self.datasets:
if hasattr(ds, "set_epoch"):
ds.set_epoch(epoch)
|
COCO-LM/fairseq/fairseq/data/concat_sentences_dataset.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/concat_sentences_dataset.py",
"repo_id": "COCO-LM",
"token_count": 685
}
| 181 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from fairseq.data.encoders import register_tokenizer
from fairseq.dataclass import FairseqDataclass
@dataclass
class MosesTokenizerConfig(FairseqDataclass):
source_lang: str = field(default="en", metadata={"help": "source language"})
target_lang: str = field(default="en", metadata={"help": "target language"})
moses_no_dash_splits: bool = field(
default=False, metadata={"help": "don't apply dash split rules"}
)
moses_no_escape: bool = field(
default=False,
metadata={"help": "don't perform HTML escaping on apostrophe, quotes, etc."},
)
@register_tokenizer("moses", dataclass=MosesTokenizerConfig)
class MosesTokenizer(object):
def __init__(self, cfg: MosesTokenizerConfig):
self.cfg = cfg
try:
from sacremoses import MosesTokenizer, MosesDetokenizer
self.tok = MosesTokenizer(cfg.source_lang)
self.detok = MosesDetokenizer(cfg.target_lang)
except ImportError:
raise ImportError(
"Please install Moses tokenizer with: pip install sacremoses"
)
def encode(self, x: str) -> str:
return self.tok.tokenize(
x,
aggressive_dash_splits=(not self.cfg.moses_no_dash_splits),
return_str=True,
escape=(not self.cfg.moses_no_escape),
)
def decode(self, x: str) -> str:
return self.detok.detokenize(x.split())
|
COCO-LM/fairseq/fairseq/data/encoders/moses_tokenizer.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/encoders/moses_tokenizer.py",
"repo_id": "COCO-LM",
"token_count": 660
}
| 182 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import BaseWrapperDataset
class ListDataset(BaseWrapperDataset):
def __init__(self, dataset, sizes=None):
super().__init__(dataset)
self._sizes = sizes
def __iter__(self):
for x in self.dataset:
yield x
def collater(self, samples):
return samples
@property
def sizes(self):
return self._sizes
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
def set_epoch(self, epoch):
pass
|
COCO-LM/fairseq/fairseq/data/list_dataset.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/list_dataset.py",
"repo_id": "COCO-LM",
"token_count": 292
}
| 183 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from . import BaseWrapperDataset
class NumelDataset(BaseWrapperDataset):
def __init__(self, dataset, reduce=False):
super().__init__(dataset)
self.reduce = reduce
def __getitem__(self, index):
item = self.dataset[index]
if torch.is_tensor(item):
return torch.numel(item)
else:
return np.size(item)
def __len__(self):
return len(self.dataset)
def collater(self, samples):
if self.reduce:
return sum(samples)
else:
return torch.tensor(samples)
|
COCO-LM/fairseq/fairseq/data/numel_dataset.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/numel_dataset.py",
"repo_id": "COCO-LM",
"token_count": 332
}
| 184 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os, collections
import pickle
import logging
import numpy as np
import six
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class SquadExample(object):
"""A single training/test example for simple sequence classification.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
qas_id,
question_text,
doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=False,
answers=[]):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
self.answers = answers
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (str(self.qas_id))
s += ", question_text: %s" % (
str(self.question_text))
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.start_position:
s += ", end_position: %d" % (self.end_position)
if self.start_position:
s += ", is_impossible: %r" % (self.is_impossible)
if self.orig_answer_text:
s += ", ori_answer_text: %s" % (self.orig_answer_text)
s += ", answer_text: %s" % (' '.join(self.doc_tokens[self.start_position: self.end_position + 1]))
return s
class SquadFeature(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
p_mask,
doc_offset,
start_position=None,
end_position=None,
is_impossible=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.p_mask = p_mask
self.doc_offset = doc_offset
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def read_squad_examples(input_file, is_training, version_2_with_negative):
"""Read a SQuAD json file into a list of SquadExample."""
with open(input_file, "r") as reader:
input_data = json.load(reader)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
is_impossible = qa.get("is_impossible", False)
answers = []
if is_training:
if (len(qa["answers"]) != 1) and (not is_impossible):
raise ValueError(
"For training, each question should have exactly 1 answer.")
if not is_impossible:
answer = qa["answers"][0]
orig_answer_text = answer["text"]
answer_offset = answer["answer_start"]
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset + answer_length - 1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(
doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(
whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
print("Could not find answer: '%s' vs. '%s'",
actual_text, cleaned_answer_text)
continue
else:
start_position = -1
end_position = -1
orig_answer_text = ""
elif not is_impossible:
answers = qa["answers"]
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible,
answers=answers)
examples.append(example)
return examples
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def squad_convert_examples_to_features(examples, tokenizer, max_seq_length, doc_stride,
max_query_length, is_training,
cls_token='[CLS]', sep_token='[SEP]', additional_seq=True):
features = []
unique_id = 1000000000
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
p_mask = []
token_to_orig_map = {}
token_is_max_context = {}
tokens.append(cls_token)
p_mask.append(0)
for token in query_tokens:
tokens.append(token)
p_mask.append(1)
tokens.append(sep_token)
p_mask.append(1)
if additional_seq:
tokens.append(sep_token)
p_mask.append(1)
doc_offset = len(tokens)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
p_mask.append(0)
tokens.append(sep_token)
p_mask.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
assert len(p_mask) == len(input_ids)
start_position = None
end_position = None
if is_training and not example.is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
else:
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if is_training and example.is_impossible:
start_position = 0
end_position = 0
if example_index < 10:
print("*** Example ***")
print("unique_id: %s" % (unique_id))
print("example_index: %s" % (example_index))
print("doc_span_index: %s" % (doc_span_index))
print("tokens: %s" % " ".join(
[x for x in tokens]))
print("token_to_orig_map: %s" % " ".join(
["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)]))
print("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context)
]))
print("input_ids: %s" % " ".join([str(x) for x in input_ids]))
if is_training and example.is_impossible:
print("impossible example")
if is_training and not example.is_impossible:
answer_text = " ".join(tokens[start_position:(end_position + 1)])
print("start_position: %d" % (start_position))
print("end_position: %d" % (end_position))
print("answer: %s" % (answer_text))
feature = SquadFeature(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
p_mask=p_mask,
doc_offset=doc_offset,
start_position=start_position,
end_position=end_position,
is_impossible=example.is_impossible)
features.append(feature)
unique_id += 1
return features
|
COCO-LM/fairseq/fairseq/data/squad/squad_extractor.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/squad/squad_extractor.py",
"repo_id": "COCO-LM",
"token_count": 8408
}
| 185 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
from typing import Optional
import torch
from fairseq.dataclass.configs import DistributedTrainingConfig
from fairseq.distributed import utils as dist_utils
try:
from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP
has_FSDP = True
except ImportError:
FSDP = torch.nn.Module
has_FSDP = False
class FullyShardedDataParallel(FSDP):
"""
A small wrapper around fairscale's FullyShardedDataParallel (FSDP) with some
fairseq-specific checkpoint saving/loading logic.
Args:
use_sharded_state (bool): if True, then ``state_dict`` will return
``FSDP.local_state_dict`` and ``load_state_dict`` will call
``FSDP.load_local_state_dict``. Otherwise, ``state_dict`` will
return the full model weights on data parallel rank 0 (empty on
other ranks) and ``load_state_dict`` will broadcast model weights
from rank 0 to other ranks.
"""
def __init__(self, *args, use_sharded_state: bool = False, **kwargs):
if not has_FSDP:
raise ImportError(
"Cannot find FullyShardedDataParallel. "
"Please install fairscale with: pip install fairscale"
)
super().__init__(*args, **kwargs)
self.use_sharded_state = use_sharded_state
@property
def unwrapped_module(self) -> torch.nn.Module:
if self.flatten_parameters:
return self.module.module
else:
return self.module
def state_dict(self, destination=None, prefix='', keep_vars=False):
if self.use_sharded_state:
return super().local_state_dict(
destination=destination, prefix=prefix, keep_vars=keep_vars
)
else:
if self.rank == 0:
return super().state_dict(
destination=destination, prefix=prefix, keep_vars=keep_vars
)
else:
# We must call state_dict() due to use of communication
# primitives. But we don't use the result.
super().state_dict()
return destination or {}
def load_state_dict(self, state_dict, strict=True, model_cfg=None):
if self.use_sharded_state:
return super().load_local_state_dict(state_dict, strict=strict)
else:
state_dict = dist_utils.broadcast_object(
state_dict, src_rank=0, group=self.process_group
)
return super().load_state_dict(state_dict, strict=strict)
@contextlib.contextmanager
def fsdp_enable_wrap(cfg: DistributedTrainingConfig, use_sharded_state: bool = False):
try:
from fairscale.nn import enable_wrap
except ImportError:
raise ImportError(
"Cannot find FullyShardedDataParallel. "
"Please install fairscale with: pip install fairscale"
)
if cfg.memory_efficient_fp16:
assert cfg.fp16 # memory_efficient_fp16 should imply fp16
group = dist_utils.get_data_parallel_group()
if group is None and cfg.distributed_world_size == 1:
from fairscale.utils.testing import DummyProcessGroup
group = DummyProcessGroup(rank=0, size=1)
fsdp_config = {
"process_group": group,
"reshard_after_forward": not cfg.no_reshard_after_forward,
"mixed_precision": cfg.fp16 and not cfg.memory_efficient_fp16,
"fp32_reduce_scatter": cfg.fp32_reduce_scatter,
"flatten_parameters": True,
"cpu_offload": cfg.cpu_offload,
"compute_dtype": torch.float16 if cfg.fp16 else torch.float32,
"bucket_cap_mb": cfg.bucket_cap_mb,
}
with enable_wrap(
wrapper_cls=FullyShardedDataParallel,
use_sharded_state=use_sharded_state,
**fsdp_config,
):
yield
def fsdp_wrap(module, min_num_params: Optional[int] = None, **kwargs):
"""
Helper to wrap layers/modules in FSDP. This falls back to a no-op if
fairscale is not available.
Args:
module (nn.Module): module to (maybe) wrap
min_num_params (int, Optional): minimum number of layer params to wrap
"""
try:
from fairscale.nn import wrap
if min_num_params is not None:
num_params = sum(p.numel() for p in module.parameters())
if num_params >= min_num_params:
return wrap(module, **kwargs)
else:
return module
else:
return wrap(module, **kwargs)
except ImportError:
return module
|
COCO-LM/fairseq/fairseq/distributed/fully_sharded_data_parallel.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/distributed/fully_sharded_data_parallel.py",
"repo_id": "COCO-LM",
"token_count": 2045
}
| 186 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
try:
from fairseq.model_parallel.megatron.mpu.cross_entropy import (
vocab_parallel_cross_entropy,
)
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
@register_criterion("vocab_parallel_cross_entropy")
class VocabParallelCrossEntropyCriterion(FairseqCriterion):
def __init__(self, task, sentence_avg):
super().__init__(task)
self.sentence_avg = sentence_avg
if not has_megatron_submodule:
raise ImportError(
"\n\nPlease install the megatron submodule:"
"\n\n git submodule update --init "
"fairseq/model_parallel/megatron"
)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
target = sample["target"]
loss = vocab_parallel_cross_entropy(net_output[0].float(), target)
loss = (loss * (target != self.padding_idx)).sum()
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
)
else:
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["loss"].avg)
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
|
COCO-LM/fairseq/fairseq/model_parallel/criterions/vocab_parallel_cross_entropy.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/model_parallel/criterions/vocab_parallel_cross_entropy.py",
"repo_id": "COCO-LM",
"token_count": 1356
}
| 187 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
BART: Denoising Sequence-to-Sequence Pre-training for
Natural Language Generation, Translation, and Comprehension
"""
from typing import Optional
import logging
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.models import register_model, register_model_architecture
from fairseq.models.transformer import TransformerModel
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from .hub_interface import BARTHubInterface
logger = logging.getLogger(__name__)
@register_model("bart")
class BARTModel(TransformerModel):
__jit_unused_properties__ = ["supported_targets"]
@classmethod
def hub_models(cls):
return {
"bart.base": "http://dl.fbaipublicfiles.com/fairseq/models/bart.base.tar.gz",
"bart.large": "http://dl.fbaipublicfiles.com/fairseq/models/bart.large.tar.gz",
"bart.large.mnli": "http://dl.fbaipublicfiles.com/fairseq/models/bart.large.mnli.tar.gz",
"bart.large.cnn": "http://dl.fbaipublicfiles.com/fairseq/models/bart.large.cnn.tar.gz",
"bart.large.xsum": "http://dl.fbaipublicfiles.com/fairseq/models/bart.large.xsum.tar.gz",
}
def __init__(self, args, encoder, decoder):
super().__init__(args, encoder, decoder)
# We follow BERT's random weight initialization
self.apply(init_bert_params)
self.classification_heads = nn.ModuleDict()
if hasattr(self.encoder, "dictionary"):
self.eos: int = self.encoder.dictionary.eos()
@staticmethod
def add_args(parser):
super(BARTModel, BARTModel).add_args(parser)
parser.add_argument(
"--pooler-dropout",
type=float,
metavar="D",
help="dropout probability in the masked_lm pooler layers",
)
parser.add_argument(
"--pooler-activation-fn",
choices=utils.get_available_activation_fns(),
help="activation function to use for pooler layer",
)
parser.add_argument(
"--spectral-norm-classification-head",
action="store_true",
help="Apply spectral normalization on the classification head",
)
@property
def supported_targets(self):
return {"self"}
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens,
features_only: bool = False,
classification_head_name: Optional[str] = None,
token_embeddings: Optional[torch.Tensor] = None,
return_all_hiddens: bool = True,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
if classification_head_name is not None:
features_only = True
encoder_out = self.encoder(
src_tokens,
src_lengths=src_lengths,
token_embeddings=token_embeddings,
return_all_hiddens=return_all_hiddens
)
x, extra = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
features_only=features_only,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
src_lengths=src_lengths,
return_all_hiddens=return_all_hiddens,
)
eos: int = self.eos
if classification_head_name is not None:
sentence_representation = x[
src_tokens.eq(eos), :
].view(x.size(0), -1, x.size(-1))[:, -1, :]
for k, head in self.classification_heads.items():
# for torch script only supports iteration
if k == classification_head_name:
x = head(sentence_representation)
break
return x, extra
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
bpe="gpt2",
sample_break_mode="eos",
**kwargs,
):
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
bpe=bpe,
load_checkpoint_heads=True,
sample_break_mode=sample_break_mode,
**kwargs,
)
return BARTHubInterface(x["args"], x["task"], x["models"][0])
def register_classification_head(
self, name, num_classes=None, inner_dim=None, **kwargs
):
"""Register a classification head."""
logger.info("Registering classification head: {0}".format(name))
if name in self.classification_heads:
prev_num_classes = self.classification_heads[name].out_proj.out_features
prev_inner_dim = self.classification_heads[name].dense.out_features
if num_classes != prev_num_classes or inner_dim != prev_inner_dim:
logger.warning(
're-registering head "{}" with num_classes {} (prev: {}) '
"and inner_dim {} (prev: {})".format(
name, num_classes, prev_num_classes, inner_dim, prev_inner_dim
)
)
self.classification_heads[name] = BARTClassificationHead(
input_dim=self.args.encoder_embed_dim,
inner_dim=inner_dim or self.args.encoder_embed_dim,
num_classes=num_classes,
activation_fn=self.args.pooler_activation_fn,
pooler_dropout=self.args.pooler_dropout,
do_spectral_norm=getattr(
self.args, "spectral_norm_classification_head", False
),
)
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
prefix = name + "." if name != "" else ""
current_head_names = (
[]
if not hasattr(self, "classification_heads")
else self.classification_heads.keys()
)
# Handle new classification heads present in the state dict.
keys_to_delete = []
for k in state_dict.keys():
if not k.startswith(prefix + "classification_heads."):
continue
head_name = k[len(prefix + "classification_heads.") :].split(".")[0]
num_classes = state_dict[
prefix + "classification_heads." + head_name + ".out_proj.weight"
].size(0)
inner_dim = state_dict[
prefix + "classification_heads." + head_name + ".dense.weight"
].size(0)
if getattr(self.args, "load_checkpoint_heads", False):
if head_name not in current_head_names:
self.register_classification_head(head_name, num_classes, inner_dim)
else:
if head_name not in current_head_names:
logger.warning(
"deleting classification head ({}) from checkpoint "
"not present in current model: {}".format(head_name, k)
)
keys_to_delete.append(k)
elif (
num_classes
!= self.classification_heads[head_name].out_proj.out_features
or inner_dim
!= self.classification_heads[head_name].dense.out_features
):
logger.warning(
"deleting classification head ({}) from checkpoint "
"with different dimensions than current model: {}".format(
head_name, k
)
)
keys_to_delete.append(k)
for k in keys_to_delete:
del state_dict[k]
def truncate_emb(key):
if key in state_dict:
state_dict[key] = state_dict[key][:-1, :]
# When finetuning on translation task, remove last row of
# embedding matrix that corresponds to mask_idx token.
loaded_dict_size = state_dict["encoder.embed_tokens.weight"].size(0)
if (
loaded_dict_size == len(self.encoder.dictionary) + 1
and "<mask>" not in self.encoder.dictionary
):
truncate_emb("encoder.embed_tokens.weight")
truncate_emb("decoder.embed_tokens.weight")
truncate_emb("encoder.output_projection.weight")
truncate_emb("decoder.output_projection.weight")
# When continued pretraining on new set of languages for mbart,
# add extra lang embeddings at the end of embed_tokens.
# Note: newly added languages are assumed to have been added at the end.
if self.args.task == "multilingual_denoising" and loaded_dict_size < len(
self.encoder.dictionary
):
logger.info(
"Adding extra language embeddings not found in pretrained model for "
"continued pretraining of MBART on new set of languages."
)
loaded_mask_token_embedding = state_dict["encoder.embed_tokens.weight"][
-1, :
]
num_langids_to_add = len(self.encoder.dictionary) - loaded_dict_size
embed_dim = state_dict["encoder.embed_tokens.weight"].size(1)
new_lang_embed_to_add = torch.zeros(num_langids_to_add, embed_dim)
nn.init.normal_(new_lang_embed_to_add, mean=0, std=embed_dim ** -0.5)
new_lang_embed_to_add = new_lang_embed_to_add.to(
dtype=state_dict["encoder.embed_tokens.weight"].dtype,
)
state_dict["encoder.embed_tokens.weight"] = torch.cat(
[
state_dict["encoder.embed_tokens.weight"][
: loaded_dict_size - 1, :
],
new_lang_embed_to_add,
loaded_mask_token_embedding.unsqueeze(0),
]
)
state_dict["decoder.embed_tokens.weight"] = torch.cat(
[
state_dict["decoder.embed_tokens.weight"][
: loaded_dict_size - 1, :
],
new_lang_embed_to_add,
loaded_mask_token_embedding.unsqueeze(0),
]
)
# Copy any newly-added classification heads into the state dict
# with their current weights.
if hasattr(self, "classification_heads"):
cur_state = self.classification_heads.state_dict()
for k, v in cur_state.items():
if prefix + "classification_heads." + k not in state_dict:
logger.info("Overwriting " + prefix + "classification_heads." + k)
state_dict[prefix + "classification_heads." + k] = v
class BARTClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(
self,
input_dim,
inner_dim,
num_classes,
activation_fn,
pooler_dropout,
do_spectral_norm=False,
):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.activation_fn = utils.get_activation_fn(activation_fn)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
if do_spectral_norm:
self.out_proj = torch.nn.utils.spectral_norm(self.out_proj)
def forward(self, features, **kwargs):
x = features
x = self.dropout(x)
x = self.dense(x)
x = self.activation_fn(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@register_model_architecture("bart", "bart_large")
def bart_large_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * 1024)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 12)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", True)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.relu_dropout = getattr(args, "relu_dropout", 0.0)
args.dropout = getattr(args, "dropout", 0.1)
args.max_target_positions = getattr(args, "max_target_positions", 1024)
args.max_source_positions = getattr(args, "max_source_positions", 1024)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", True
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", True)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", True)
args.layernorm_embedding = getattr(args, "layernorm_embedding", True)
args.activation_fn = getattr(args, "activation_fn", "gelu")
args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh")
args.pooler_dropout = getattr(args, "pooler_dropout", 0.0)
@register_model_architecture("bart", "bart_base")
def bart_base_architecture(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * 768)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 12)
bart_large_architecture(args)
@register_model_architecture("bart", "mbart_large")
def mbart_large_architecture(args):
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
bart_large_architecture(args)
@register_model_architecture("bart", "mbart_base")
def mbart_base_architecture(args):
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
bart_base_architecture(args)
@register_model_architecture("bart", "mbart_base_wmt20")
def mbart_base_wmt20_architecture(args):
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
mbart_base_architecture(args)
|
COCO-LM/fairseq/fairseq/models/bart/model.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/bart/model.py",
"repo_id": "COCO-LM",
"token_count": 7259
}
| 188 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import AdaptiveSoftmax, FairseqDropout
from torch import Tensor
DEFAULT_MAX_SOURCE_POSITIONS = 1e5
DEFAULT_MAX_TARGET_POSITIONS = 1e5
@register_model("lstm")
class LSTMModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-freeze-embed', action='store_true',
help='freeze encoder embeddings')
parser.add_argument('--encoder-hidden-size', type=int, metavar='N',
help='encoder hidden size')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='number of encoder layers')
parser.add_argument('--encoder-bidirectional', action='store_true',
help='make all layers of encoder bidirectional')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-freeze-embed', action='store_true',
help='freeze decoder embeddings')
parser.add_argument('--decoder-hidden-size', type=int, metavar='N',
help='decoder hidden size')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='number of decoder layers')
parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N',
help='decoder output embedding dimension')
parser.add_argument('--decoder-attention', type=str, metavar='BOOL',
help='decoder attention')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion')
parser.add_argument('--share-decoder-input-output-embed', default=False,
action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', default=False, action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
# Granular dropout settings (if not specified these default to --dropout)
parser.add_argument('--encoder-dropout-in', type=float, metavar='D',
help='dropout probability for encoder input embedding')
parser.add_argument('--encoder-dropout-out', type=float, metavar='D',
help='dropout probability for encoder output')
parser.add_argument('--decoder-dropout-in', type=float, metavar='D',
help='dropout probability for decoder input embedding')
parser.add_argument('--decoder-dropout-out', type=float, metavar='D',
help='dropout probability for decoder output')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure that all args are properly defaulted (in case there are any new ones)
base_architecture(args)
if args.encoder_layers != args.decoder_layers:
raise ValueError("--encoder-layers must match --decoder-layers")
max_source_positions = getattr(
args, "max_source_positions", DEFAULT_MAX_SOURCE_POSITIONS
)
max_target_positions = getattr(
args, "max_target_positions", DEFAULT_MAX_TARGET_POSITIONS
)
def load_pretrained_embedding_from_file(embed_path, dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
embed_dict = utils.parse_embedding(embed_path)
utils.print_embed_overlap(embed_dict, dictionary)
return utils.load_embedding(embed_dict, dictionary, embed_tokens)
if args.encoder_embed_path:
pretrained_encoder_embed = load_pretrained_embedding_from_file(
args.encoder_embed_path, task.source_dictionary, args.encoder_embed_dim
)
else:
num_embeddings = len(task.source_dictionary)
pretrained_encoder_embed = Embedding(
num_embeddings, args.encoder_embed_dim, task.source_dictionary.pad()
)
if args.share_all_embeddings:
# double check all parameters combinations are valid
if task.source_dictionary != task.target_dictionary:
raise ValueError("--share-all-embeddings requires a joint dictionary")
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path
):
raise ValueError(
"--share-all-embed not compatible with --decoder-embed-path"
)
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
"--share-all-embeddings requires --encoder-embed-dim to "
"match --decoder-embed-dim"
)
pretrained_decoder_embed = pretrained_encoder_embed
args.share_decoder_input_output_embed = True
else:
# separate decoder input embeddings
pretrained_decoder_embed = None
if args.decoder_embed_path:
pretrained_decoder_embed = load_pretrained_embedding_from_file(
args.decoder_embed_path,
task.target_dictionary,
args.decoder_embed_dim,
)
# one last double check of parameter combinations
if args.share_decoder_input_output_embed and (
args.decoder_embed_dim != args.decoder_out_embed_dim
):
raise ValueError(
"--share-decoder-input-output-embeddings requires "
"--decoder-embed-dim to match --decoder-out-embed-dim"
)
if args.encoder_freeze_embed:
pretrained_encoder_embed.weight.requires_grad = False
if args.decoder_freeze_embed:
pretrained_decoder_embed.weight.requires_grad = False
encoder = LSTMEncoder(
dictionary=task.source_dictionary,
embed_dim=args.encoder_embed_dim,
hidden_size=args.encoder_hidden_size,
num_layers=args.encoder_layers,
dropout_in=args.encoder_dropout_in,
dropout_out=args.encoder_dropout_out,
bidirectional=args.encoder_bidirectional,
pretrained_embed=pretrained_encoder_embed,
max_source_positions=max_source_positions,
)
decoder = LSTMDecoder(
dictionary=task.target_dictionary,
embed_dim=args.decoder_embed_dim,
hidden_size=args.decoder_hidden_size,
out_embed_dim=args.decoder_out_embed_dim,
num_layers=args.decoder_layers,
dropout_in=args.decoder_dropout_in,
dropout_out=args.decoder_dropout_out,
attention=utils.eval_bool(args.decoder_attention),
encoder_output_units=encoder.output_units,
pretrained_embed=pretrained_decoder_embed,
share_input_output_embed=args.share_decoder_input_output_embed,
adaptive_softmax_cutoff=(
utils.eval_str_list(args.adaptive_softmax_cutoff, type=int)
if args.criterion == "adaptive_loss"
else None
),
max_target_positions=max_target_positions,
residuals=False,
)
return cls(encoder, decoder)
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
):
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths)
decoder_out = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
)
return decoder_out
class LSTMEncoder(FairseqEncoder):
"""LSTM encoder."""
def __init__(
self,
dictionary,
embed_dim=512,
hidden_size=512,
num_layers=1,
dropout_in=0.1,
dropout_out=0.1,
bidirectional=False,
left_pad=True,
pretrained_embed=None,
padding_idx=None,
max_source_positions=DEFAULT_MAX_SOURCE_POSITIONS,
):
super().__init__(dictionary)
self.num_layers = num_layers
self.dropout_in_module = FairseqDropout(
dropout_in, module_name=self.__class__.__name__
)
self.dropout_out_module = FairseqDropout(
dropout_out, module_name=self.__class__.__name__
)
self.bidirectional = bidirectional
self.hidden_size = hidden_size
self.max_source_positions = max_source_positions
num_embeddings = len(dictionary)
self.padding_idx = padding_idx if padding_idx is not None else dictionary.pad()
if pretrained_embed is None:
self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx)
else:
self.embed_tokens = pretrained_embed
self.lstm = LSTM(
input_size=embed_dim,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=self.dropout_out_module.p if num_layers > 1 else 0.0,
bidirectional=bidirectional,
)
self.left_pad = left_pad
self.output_units = hidden_size
if bidirectional:
self.output_units *= 2
def forward(
self,
src_tokens: Tensor,
src_lengths: Tensor,
enforce_sorted: bool = True,
):
"""
Args:
src_tokens (LongTensor): tokens in the source language of
shape `(batch, src_len)`
src_lengths (LongTensor): lengths of each source sentence of
shape `(batch)`
enforce_sorted (bool, optional): if True, `src_tokens` is
expected to contain sequences sorted by length in a
decreasing order. If False, this condition is not
required. Default: True.
"""
if self.left_pad:
# nn.utils.rnn.pack_padded_sequence requires right-padding;
# convert left-padding to right-padding
src_tokens = utils.convert_padding_direction(
src_tokens,
torch.zeros_like(src_tokens).fill_(self.padding_idx),
left_to_right=True,
)
bsz, seqlen = src_tokens.size()
# embed tokens
x = self.embed_tokens(src_tokens)
x = self.dropout_in_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# pack embedded source tokens into a PackedSequence
packed_x = nn.utils.rnn.pack_padded_sequence(
x, src_lengths.cpu(), enforce_sorted=enforce_sorted
)
# apply LSTM
if self.bidirectional:
state_size = 2 * self.num_layers, bsz, self.hidden_size
else:
state_size = self.num_layers, bsz, self.hidden_size
h0 = x.new_zeros(*state_size)
c0 = x.new_zeros(*state_size)
packed_outs, (final_hiddens, final_cells) = self.lstm(packed_x, (h0, c0))
# unpack outputs and apply dropout
x, _ = nn.utils.rnn.pad_packed_sequence(
packed_outs, padding_value=self.padding_idx * 1.0
)
x = self.dropout_out_module(x)
assert list(x.size()) == [seqlen, bsz, self.output_units]
if self.bidirectional:
final_hiddens = self.combine_bidir(final_hiddens, bsz)
final_cells = self.combine_bidir(final_cells, bsz)
encoder_padding_mask = src_tokens.eq(self.padding_idx).t()
return tuple(
(
x, # seq_len x batch x hidden
final_hiddens, # num_layers x batch x num_directions*hidden
final_cells, # num_layers x batch x num_directions*hidden
encoder_padding_mask, # seq_len x batch
)
)
def combine_bidir(self, outs, bsz: int):
out = outs.view(self.num_layers, 2, bsz, -1).transpose(1, 2).contiguous()
return out.view(self.num_layers, bsz, -1)
def reorder_encoder_out(self, encoder_out, new_order):
return tuple(
(
encoder_out[0].index_select(1, new_order),
encoder_out[1].index_select(1, new_order),
encoder_out[2].index_select(1, new_order),
encoder_out[3].index_select(1, new_order),
)
)
def max_positions(self):
"""Maximum input length supported by the encoder."""
return self.max_source_positions
class AttentionLayer(nn.Module):
def __init__(self, input_embed_dim, source_embed_dim, output_embed_dim, bias=False):
super().__init__()
self.input_proj = Linear(input_embed_dim, source_embed_dim, bias=bias)
self.output_proj = Linear(
input_embed_dim + source_embed_dim, output_embed_dim, bias=bias
)
def forward(self, input, source_hids, encoder_padding_mask):
# input: bsz x input_embed_dim
# source_hids: srclen x bsz x source_embed_dim
# x: bsz x source_embed_dim
x = self.input_proj(input)
# compute attention
attn_scores = (source_hids * x.unsqueeze(0)).sum(dim=2)
# don't attend over padding
if encoder_padding_mask is not None:
attn_scores = (
attn_scores.float()
.masked_fill_(encoder_padding_mask, float("-inf"))
.type_as(attn_scores)
) # FP16 support: cast to float and back
attn_scores = F.softmax(attn_scores, dim=0) # srclen x bsz
# sum weighted sources
x = (attn_scores.unsqueeze(2) * source_hids).sum(dim=0)
x = torch.tanh(self.output_proj(torch.cat((x, input), dim=1)))
return x, attn_scores
class LSTMDecoder(FairseqIncrementalDecoder):
"""LSTM decoder."""
def __init__(
self,
dictionary,
embed_dim=512,
hidden_size=512,
out_embed_dim=512,
num_layers=1,
dropout_in=0.1,
dropout_out=0.1,
attention=True,
encoder_output_units=512,
pretrained_embed=None,
share_input_output_embed=False,
adaptive_softmax_cutoff=None,
max_target_positions=DEFAULT_MAX_TARGET_POSITIONS,
residuals=False,
):
super().__init__(dictionary)
self.dropout_in_module = FairseqDropout(
dropout_in, module_name=self.__class__.__name__
)
self.dropout_out_module = FairseqDropout(
dropout_out, module_name=self.__class__.__name__
)
self.hidden_size = hidden_size
self.share_input_output_embed = share_input_output_embed
self.need_attn = True
self.max_target_positions = max_target_positions
self.residuals = residuals
self.num_layers = num_layers
self.adaptive_softmax = None
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
if pretrained_embed is None:
self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
else:
self.embed_tokens = pretrained_embed
self.encoder_output_units = encoder_output_units
if encoder_output_units != hidden_size and encoder_output_units != 0:
self.encoder_hidden_proj = Linear(encoder_output_units, hidden_size)
self.encoder_cell_proj = Linear(encoder_output_units, hidden_size)
else:
self.encoder_hidden_proj = self.encoder_cell_proj = None
# disable input feeding if there is no encoder
# input feeding is described in arxiv.org/abs/1508.04025
input_feed_size = 0 if encoder_output_units == 0 else hidden_size
self.layers = nn.ModuleList(
[
LSTMCell(
input_size=input_feed_size + embed_dim
if layer == 0
else hidden_size,
hidden_size=hidden_size,
)
for layer in range(num_layers)
]
)
if attention:
# TODO make bias configurable
self.attention = AttentionLayer(
hidden_size, encoder_output_units, hidden_size, bias=False
)
else:
self.attention = None
if hidden_size != out_embed_dim:
self.additional_fc = Linear(hidden_size, out_embed_dim)
if adaptive_softmax_cutoff is not None:
# setting adaptive_softmax dropout to dropout_out for now but can be redefined
self.adaptive_softmax = AdaptiveSoftmax(
num_embeddings,
hidden_size,
adaptive_softmax_cutoff,
dropout=dropout_out,
)
elif not self.share_input_output_embed:
self.fc_out = Linear(out_embed_dim, num_embeddings, dropout=dropout_out)
def forward(
self,
prev_output_tokens,
encoder_out: Optional[Tuple[Tensor, Tensor, Tensor, Tensor]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
src_lengths: Optional[Tensor] = None,
):
x, attn_scores = self.extract_features(
prev_output_tokens, encoder_out, incremental_state
)
return self.output_layer(x), attn_scores
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[Tuple[Tensor, Tensor, Tensor, Tensor]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
):
"""
Similar to *forward* but only return features.
"""
# get outputs from encoder
if encoder_out is not None:
encoder_outs = encoder_out[0]
encoder_hiddens = encoder_out[1]
encoder_cells = encoder_out[2]
encoder_padding_mask = encoder_out[3]
else:
encoder_outs = torch.empty(0)
encoder_hiddens = torch.empty(0)
encoder_cells = torch.empty(0)
encoder_padding_mask = torch.empty(0)
srclen = encoder_outs.size(0)
if incremental_state is not None and len(incremental_state) > 0:
prev_output_tokens = prev_output_tokens[:, -1:]
bsz, seqlen = prev_output_tokens.size()
# embed tokens
x = self.embed_tokens(prev_output_tokens)
x = self.dropout_in_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# initialize previous states (or get from cache during incremental generation)
if incremental_state is not None and len(incremental_state) > 0:
prev_hiddens, prev_cells, input_feed = self.get_cached_state(
incremental_state
)
elif encoder_out is not None:
# setup recurrent cells
prev_hiddens = [encoder_hiddens[i] for i in range(self.num_layers)]
prev_cells = [encoder_cells[i] for i in range(self.num_layers)]
if self.encoder_hidden_proj is not None:
prev_hiddens = [self.encoder_hidden_proj(y) for y in prev_hiddens]
prev_cells = [self.encoder_cell_proj(y) for y in prev_cells]
input_feed = x.new_zeros(bsz, self.hidden_size)
else:
# setup zero cells, since there is no encoder
zero_state = x.new_zeros(bsz, self.hidden_size)
prev_hiddens = [zero_state for i in range(self.num_layers)]
prev_cells = [zero_state for i in range(self.num_layers)]
input_feed = None
assert (
srclen > 0 or self.attention is None
), "attention is not supported if there are no encoder outputs"
attn_scores: Optional[Tensor] = (
x.new_zeros(srclen, seqlen, bsz) if self.attention is not None else None
)
outs = []
for j in range(seqlen):
# input feeding: concatenate context vector from previous time step
if input_feed is not None:
input = torch.cat((x[j, :, :], input_feed), dim=1)
else:
input = x[j]
for i, rnn in enumerate(self.layers):
# recurrent cell
hidden, cell = rnn(input, (prev_hiddens[i], prev_cells[i]))
# hidden state becomes the input to the next layer
input = self.dropout_out_module(hidden)
if self.residuals:
input = input + prev_hiddens[i]
# save state for next time step
prev_hiddens[i] = hidden
prev_cells[i] = cell
# apply attention using the last layer's hidden state
if self.attention is not None:
assert attn_scores is not None
out, attn_scores[:, j, :] = self.attention(
hidden, encoder_outs, encoder_padding_mask
)
else:
out = hidden
out = self.dropout_out_module(out)
# input feeding
if input_feed is not None:
input_feed = out
# save final output
outs.append(out)
# Stack all the necessary tensors together and store
prev_hiddens_tensor = torch.stack(prev_hiddens)
prev_cells_tensor = torch.stack(prev_cells)
cache_state = torch.jit.annotate(
Dict[str, Optional[Tensor]],
{
"prev_hiddens": prev_hiddens_tensor,
"prev_cells": prev_cells_tensor,
"input_feed": input_feed,
},
)
self.set_incremental_state(incremental_state, "cached_state", cache_state)
# collect outputs across time steps
x = torch.cat(outs, dim=0).view(seqlen, bsz, self.hidden_size)
# T x B x C -> B x T x C
x = x.transpose(1, 0)
if hasattr(self, "additional_fc") and self.adaptive_softmax is None:
x = self.additional_fc(x)
x = self.dropout_out_module(x)
# srclen x tgtlen x bsz -> bsz x tgtlen x srclen
if not self.training and self.need_attn and self.attention is not None:
assert attn_scores is not None
attn_scores = attn_scores.transpose(0, 2)
else:
attn_scores = None
return x, attn_scores
def output_layer(self, x):
"""Project features to the vocabulary size."""
if self.adaptive_softmax is None:
if self.share_input_output_embed:
x = F.linear(x, self.embed_tokens.weight)
else:
x = self.fc_out(x)
return x
def get_cached_state(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
) -> Tuple[List[Tensor], List[Tensor], Optional[Tensor]]:
cached_state = self.get_incremental_state(incremental_state, "cached_state")
assert cached_state is not None
prev_hiddens_ = cached_state["prev_hiddens"]
assert prev_hiddens_ is not None
prev_cells_ = cached_state["prev_cells"]
assert prev_cells_ is not None
prev_hiddens = [prev_hiddens_[i] for i in range(self.num_layers)]
prev_cells = [prev_cells_[j] for j in range(self.num_layers)]
input_feed = cached_state[
"input_feed"
] # can be None for decoder-only language models
return prev_hiddens, prev_cells, input_feed
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
new_order: Tensor,
):
if incremental_state is None or len(incremental_state) == 0:
return
prev_hiddens, prev_cells, input_feed = self.get_cached_state(incremental_state)
prev_hiddens = [p.index_select(0, new_order) for p in prev_hiddens]
prev_cells = [p.index_select(0, new_order) for p in prev_cells]
if input_feed is not None:
input_feed = input_feed.index_select(0, new_order)
cached_state_new = torch.jit.annotate(
Dict[str, Optional[Tensor]],
{
"prev_hiddens": torch.stack(prev_hiddens),
"prev_cells": torch.stack(prev_cells),
"input_feed": input_feed,
},
)
self.set_incremental_state(incremental_state, "cached_state", cached_state_new),
return
def max_positions(self):
"""Maximum output length supported by the decoder."""
return self.max_target_positions
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.uniform_(m.weight, -0.1, 0.1)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def LSTM(input_size, hidden_size, **kwargs):
m = nn.LSTM(input_size, hidden_size, **kwargs)
for name, param in m.named_parameters():
if "weight" in name or "bias" in name:
param.data.uniform_(-0.1, 0.1)
return m
def LSTMCell(input_size, hidden_size, **kwargs):
m = nn.LSTMCell(input_size, hidden_size, **kwargs)
for name, param in m.named_parameters():
if "weight" in name or "bias" in name:
param.data.uniform_(-0.1, 0.1)
return m
def Linear(in_features, out_features, bias=True, dropout=0.0):
"""Linear layer (input: N x T x C)"""
m = nn.Linear(in_features, out_features, bias=bias)
m.weight.data.uniform_(-0.1, 0.1)
if bias:
m.bias.data.uniform_(-0.1, 0.1)
return m
@register_model_architecture("lstm", "lstm")
def base_architecture(args):
args.dropout = getattr(args, "dropout", 0.1)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_freeze_embed = getattr(args, "encoder_freeze_embed", False)
args.encoder_hidden_size = getattr(
args, "encoder_hidden_size", args.encoder_embed_dim
)
args.encoder_layers = getattr(args, "encoder_layers", 1)
args.encoder_bidirectional = getattr(args, "encoder_bidirectional", False)
args.encoder_dropout_in = getattr(args, "encoder_dropout_in", args.dropout)
args.encoder_dropout_out = getattr(args, "encoder_dropout_out", args.dropout)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_freeze_embed = getattr(args, "decoder_freeze_embed", False)
args.decoder_hidden_size = getattr(
args, "decoder_hidden_size", args.decoder_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 1)
args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512)
args.decoder_attention = getattr(args, "decoder_attention", "1")
args.decoder_dropout_in = getattr(args, "decoder_dropout_in", args.dropout)
args.decoder_dropout_out = getattr(args, "decoder_dropout_out", args.dropout)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.adaptive_softmax_cutoff = getattr(
args, "adaptive_softmax_cutoff", "10000,50000,200000"
)
@register_model_architecture("lstm", "lstm_wiseman_iwslt_de_en")
def lstm_wiseman_iwslt_de_en(args):
args.dropout = getattr(args, "dropout", 0.1)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_dropout_in = getattr(args, "encoder_dropout_in", 0)
args.encoder_dropout_out = getattr(args, "encoder_dropout_out", 0)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256)
args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256)
args.decoder_dropout_in = getattr(args, "decoder_dropout_in", 0)
args.decoder_dropout_out = getattr(args, "decoder_dropout_out", args.dropout)
base_architecture(args)
@register_model_architecture("lstm", "lstm_luong_wmt_en_de")
def lstm_luong_wmt_en_de(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1000)
args.encoder_layers = getattr(args, "encoder_layers", 4)
args.encoder_dropout_out = getattr(args, "encoder_dropout_out", 0)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1000)
args.decoder_layers = getattr(args, "decoder_layers", 4)
args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 1000)
args.decoder_dropout_out = getattr(args, "decoder_dropout_out", 0)
base_architecture(args)
|
COCO-LM/fairseq/fairseq/models/lstm.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/lstm.py",
"repo_id": "COCO-LM",
"token_count": 14643
}
| 189 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import Counter
from typing import List
import torch
def align_bpe_to_words(roberta, bpe_tokens: torch.LongTensor, other_tokens: List[str]):
"""
Helper to align GPT-2 BPE to other tokenization formats (e.g., spaCy).
Args:
roberta (RobertaHubInterface): RoBERTa instance
bpe_tokens (torch.LongTensor): GPT-2 BPE tokens of shape `(T_bpe)`
other_tokens (List[str]): other tokens of shape `(T_words)`
Returns:
List[str]: mapping from *other_tokens* to corresponding *bpe_tokens*.
"""
assert bpe_tokens.dim() == 1
assert bpe_tokens[0] == 0
def clean(text):
return text.strip()
# remove whitespaces to simplify alignment
bpe_tokens = [roberta.task.source_dictionary.string([x]) for x in bpe_tokens]
bpe_tokens = [
clean(roberta.bpe.decode(x) if x not in {"<s>", ""} else x) for x in bpe_tokens
]
other_tokens = [clean(str(o)) for o in other_tokens]
# strip leading <s>
bpe_tokens = bpe_tokens[1:]
assert "".join(bpe_tokens) == "".join(other_tokens)
# create alignment from every word to a list of BPE tokens
alignment = []
bpe_toks = filter(lambda item: item[1] != "", enumerate(bpe_tokens, start=1))
j, bpe_tok = next(bpe_toks)
for other_tok in other_tokens:
bpe_indices = []
while True:
if other_tok.startswith(bpe_tok):
bpe_indices.append(j)
other_tok = other_tok[len(bpe_tok) :]
try:
j, bpe_tok = next(bpe_toks)
except StopIteration:
j, bpe_tok = None, None
elif bpe_tok.startswith(other_tok):
# other_tok spans multiple BPE tokens
bpe_indices.append(j)
bpe_tok = bpe_tok[len(other_tok) :]
other_tok = ""
else:
raise Exception('Cannot align "{}" and "{}"'.format(other_tok, bpe_tok))
if other_tok == "":
break
assert len(bpe_indices) > 0
alignment.append(bpe_indices)
assert len(alignment) == len(other_tokens)
return alignment
def align_features_to_words(roberta, features, alignment):
"""
Align given features to words.
Args:
roberta (RobertaHubInterface): RoBERTa instance
features (torch.Tensor): features to align of shape `(T_bpe x C)`
alignment: alignment between BPE tokens and words returned by
func:`align_bpe_to_words`.
"""
assert features.dim() == 2
bpe_counts = Counter(j for bpe_indices in alignment for j in bpe_indices)
assert bpe_counts[0] == 0 # <s> shouldn't be aligned
denom = features.new([bpe_counts.get(j, 1) for j in range(len(features))])
weighted_features = features / denom.unsqueeze(-1)
output = [weighted_features[0]]
largest_j = -1
for bpe_indices in alignment:
output.append(weighted_features[bpe_indices].sum(dim=0))
largest_j = max(largest_j, *bpe_indices)
for j in range(largest_j + 1, len(features)):
output.append(weighted_features[j])
output = torch.stack(output)
assert torch.all(torch.abs(output.sum(dim=0) - features.sum(dim=0)) < 1e-4)
return output
def spacy_nlp():
if getattr(spacy_nlp, "_nlp", None) is None:
try:
from spacy.lang.en import English
spacy_nlp._nlp = English()
except ImportError:
raise ImportError("Please install spacy with: pip install spacy")
return spacy_nlp._nlp
def spacy_tokenizer():
if getattr(spacy_tokenizer, "_tokenizer", None) is None:
try:
nlp = spacy_nlp()
spacy_tokenizer._tokenizer = nlp.Defaults.create_tokenizer(nlp)
except ImportError:
raise ImportError("Please install spacy with: pip install spacy")
return spacy_tokenizer._tokenizer
|
COCO-LM/fairseq/fairseq/models/roberta/alignment_utils.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/roberta/alignment_utils.py",
"repo_id": "COCO-LM",
"token_count": 1862
}
| 190 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.models import register_model, register_model_architecture
from fairseq.models.transformer import (
TransformerModel,
base_architecture,
transformer_wmt_en_de_big,
)
@register_model("transformer_align")
class TransformerAlignModel(TransformerModel):
"""
See "Jointly Learning to Align and Translate with Transformer
Models" (Garg et al., EMNLP 2019).
"""
def __init__(self, encoder, decoder, args):
super().__init__(args, encoder, decoder)
self.alignment_heads = args.alignment_heads
self.alignment_layer = args.alignment_layer
self.full_context_alignment = args.full_context_alignment
@staticmethod
def add_args(parser):
# fmt: off
super(TransformerAlignModel, TransformerAlignModel).add_args(parser)
parser.add_argument('--alignment-heads', type=int, metavar='D',
help='Number of cross attention heads per layer to supervised with alignments')
parser.add_argument('--alignment-layer', type=int, metavar='D',
help='Layer number which has to be supervised. 0 corresponding to the bottommost layer.')
parser.add_argument('--full-context-alignment', action='store_true',
help='Whether or not alignment is supervised conditioned on the full target context.')
# fmt: on
@classmethod
def build_model(cls, args, task):
# set any default arguments
transformer_align(args)
transformer_model = TransformerModel.build_model(args, task)
return TransformerAlignModel(
transformer_model.encoder, transformer_model.decoder, args
)
def forward(self, src_tokens, src_lengths, prev_output_tokens):
encoder_out = self.encoder(src_tokens, src_lengths)
return self.forward_decoder(prev_output_tokens, encoder_out)
def forward_decoder(
self,
prev_output_tokens,
encoder_out=None,
incremental_state=None,
features_only=False,
**extra_args,
):
attn_args = {
"alignment_layer": self.alignment_layer,
"alignment_heads": self.alignment_heads,
}
decoder_out = self.decoder(prev_output_tokens, encoder_out, **attn_args)
if self.full_context_alignment:
attn_args["full_context_alignment"] = self.full_context_alignment
_, alignment_out = self.decoder(
prev_output_tokens,
encoder_out,
features_only=True,
**attn_args,
**extra_args,
)
decoder_out[1]["attn"] = alignment_out["attn"]
return decoder_out
@register_model_architecture("transformer_align", "transformer_align")
def transformer_align(args):
args.alignment_heads = getattr(args, "alignment_heads", 1)
args.alignment_layer = getattr(args, "alignment_layer", 4)
args.full_context_alignment = getattr(args, "full_context_alignment", False)
base_architecture(args)
@register_model_architecture("transformer_align", "transformer_wmt_en_de_big_align")
def transformer_wmt_en_de_big_align(args):
args.alignment_heads = getattr(args, "alignment_heads", 1)
args.alignment_layer = getattr(args, "alignment_layer", 4)
transformer_wmt_en_de_big(args)
|
COCO-LM/fairseq/fairseq/models/transformer_align.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/transformer_align.py",
"repo_id": "COCO-LM",
"token_count": 1469
}
| 191 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.modules.fairseq_dropout import FairseqDropout
from fairseq.modules.scalar_bias import scalar_bias
class SingleHeadAttention(nn.Module):
"""
Single-head attention that supports Gating and Downsampling
"""
def __init__(
self,
out_channels,
embed_dim,
head_dim,
head_index,
dropout=0.0,
bias=True,
project_input=True,
gated=False,
downsample=False,
num_heads=1,
):
super().__init__()
self.embed_dim = embed_dim
self.dropout_module = FairseqDropout(
dropout, module_name=self.__class__.__name__
)
self.head_index = head_index
self.head_dim = head_dim
self.project_input = project_input
self.gated = gated
self.downsample = downsample
self.num_heads = num_heads
self.projection = None
k_layers = []
v_layers = []
if self.downsample:
k_layers.append(Downsample(self.head_index))
v_layers.append(Downsample(self.head_index))
out_proj_size = self.head_dim
else:
out_proj_size = self.head_dim * self.num_heads
if self.gated:
k_layers.append(GatedLinear(self.embed_dim, out_proj_size, bias=bias))
self.in_proj_q = GatedLinear(self.embed_dim, out_proj_size, bias=bias)
v_layers.append(GatedLinear(self.embed_dim, out_proj_size, bias=bias))
else:
k_layers.append(Linear(self.embed_dim, out_proj_size, bias=bias))
self.in_proj_q = Linear(self.embed_dim, out_proj_size, bias=bias)
v_layers.append(Linear(self.embed_dim, out_proj_size, bias=bias))
self.in_proj_k = nn.Sequential(*k_layers)
self.in_proj_v = nn.Sequential(*v_layers)
if self.downsample:
self.out_proj = Linear(out_proj_size, self.head_dim, bias=bias)
else:
self.out_proj = Linear(out_proj_size, out_channels, bias=bias)
self.scaling = self.head_dim ** -0.5
def forward(
self,
query,
key,
value,
mask_future_timesteps=False,
key_padding_mask=None,
use_scalar_bias=False,
):
"""Input shape: Time x Batch x Channel
Self-attention can be implemented by passing in the same arguments for
query, key and value. Future timesteps can be masked with the
`mask_future_timesteps` argument. Padding elements can be excluded from
the key by passing a binary ByteTensor (`key_padding_mask`) with shape:
batch x src_len, where padding elements are indicated by 1s.
"""
src_len, bsz, out_channels = key.size()
tgt_len = query.size(0)
assert list(query.size()) == [tgt_len, bsz, out_channels]
assert key.size() == value.size()
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.downsample:
size = bsz
else:
size = bsz * self.num_heads
k = key
v = value
q = query
if self.project_input:
q = self.in_proj_q(q)
k = self.in_proj_k(k)
v = self.in_proj_v(v)
src_len = k.size()[0]
q *= self.scaling
if not self.downsample:
q = q.view(tgt_len, size, self.head_dim)
k = k.view(src_len, size, self.head_dim)
v = v.view(src_len, size, self.head_dim)
q = q.transpose(0, 1)
k = k.transpose(0, 1)
v = v.transpose(0, 1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
if mask_future_timesteps:
assert (
query.size() == key.size()
), "mask_future_timesteps only applies to self-attention"
attn_weights *= torch.tril(
attn_weights.data.new([1]).expand(tgt_len, tgt_len).clone(),
diagonal=-1,
)[:, :: self.head_index + 1 if self.downsample else 1].unsqueeze(0)
attn_weights += torch.triu(
attn_weights.data.new([-math.inf]).expand(tgt_len, tgt_len).clone(),
diagonal=0,
)[:, :: self.head_index + 1 if self.downsample else 1].unsqueeze(0)
tgt_size = tgt_len
if use_scalar_bias:
attn_weights = scalar_bias(attn_weights, 2)
v = scalar_bias(v, 1)
tgt_size += 1
if key_padding_mask is not None:
# don't attend to padding symbols
if key_padding_mask.max() > 0:
if self.downsample:
attn_weights = attn_weights.view(bsz, 1, tgt_len, src_len)
else:
attn_weights = attn_weights.view(
size, self.num_heads, tgt_len, src_len
)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
-math.inf,
)
attn_weights = attn_weights.view(size, tgt_len, src_len)
attn_weights = F.softmax(attn_weights, dim=-1)
attn_weights = self.dropout_module(attn_weights)
attn = torch.bmm(attn_weights, v)
if self.downsample:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, self.head_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, self.embed_dim)
attn = self.out_proj(attn)
return attn, attn_weights
class DownsampledMultiHeadAttention(nn.ModuleList):
"""
Multi-headed attention with Gating and Downsampling
"""
def __init__(
self,
out_channels,
embed_dim,
num_heads,
dropout=0.0,
bias=True,
project_input=True,
gated=False,
downsample=False,
):
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
self.downsample = downsample
self.gated = gated
self.project_input = project_input
assert self.head_dim * num_heads == embed_dim
if self.downsample:
attention_heads = []
for index in range(self.num_heads):
attention_heads.append(
SingleHeadAttention(
out_channels,
self.embed_dim,
self.head_dim,
index,
dropout,
bias,
self.project_input,
self.gated,
self.downsample,
self.num_heads,
)
)
super().__init__(modules=attention_heads)
self.out_proj = Linear(embed_dim, out_channels, bias=bias)
else:
# either we have a list of attention heads, or just one attention head
# if not being downsampled, we can do the heads with one linear layer instead of separate ones
super().__init__()
self.attention_module = SingleHeadAttention(
out_channels,
self.embed_dim,
self.head_dim,
1,
dropout,
bias,
self.project_input,
self.gated,
self.downsample,
self.num_heads,
)
def forward(
self,
query,
key,
value,
mask_future_timesteps=False,
key_padding_mask=None,
use_scalar_bias=False,
):
src_len, bsz, embed_dim = key.size()
tgt_len = query.size(0)
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
assert key.size() == value.size()
tgt_size = tgt_len
if use_scalar_bias:
tgt_size += 1
attn = []
attn_weights = []
if self.downsample:
for attention_head_number in range(self.num_heads):
# call the forward of each attention head
_attn, _attn_weight = self[attention_head_number](
query,
key,
value,
mask_future_timesteps,
key_padding_mask,
use_scalar_bias,
)
attn.append(_attn)
attn_weights.append(_attn_weight)
full_attn = torch.cat(attn, dim=2)
full_attn = self.out_proj(full_attn)
return full_attn, attn_weights[0].clone()
else:
_attn, _attn_weight = self.attention_module(
query,
key,
value,
mask_future_timesteps,
key_padding_mask,
use_scalar_bias,
)
attn.append(_attn)
attn_weights.append(_attn_weight)
full_attn = torch.cat(attn, dim=2)
full_attn_weights = torch.cat(attn_weights)
full_attn_weights = full_attn_weights.view(
bsz, self.num_heads, tgt_size, src_len
)
full_attn_weights = full_attn_weights.sum(dim=1) / self.num_heads
return full_attn, full_attn_weights
class Downsample(nn.Module):
"""
Selects every nth element, where n is the index
"""
def __init__(self, index):
super().__init__()
self.index = index
def forward(self, x):
return x[:: self.index + 1]
def Linear(in_features, out_features, dropout=0.0, bias=True):
"""Weight-normalized Linear layer (input: B x T x C)"""
m = nn.Linear(in_features, out_features, bias=bias)
m.weight.data.normal_(mean=0, std=math.sqrt((1 - dropout) / in_features))
m.bias.data.zero_()
return nn.utils.weight_norm(m)
def GatedLinear(in_features, out_features, dropout=0.0, bias=True):
"""Weight-normalized Linear layer (input: B x T x C) with interspersed GLU units"""
return nn.Sequential(
Linear(in_features, out_features * 4, dropout, bias),
nn.GLU(),
Linear(out_features * 2, out_features * 2, dropout, bias),
nn.GLU(),
Linear(out_features, out_features, dropout, bias),
)
|
COCO-LM/fairseq/fairseq/modules/downsampled_multihead_attention.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/downsampled_multihead_attention.py",
"repo_id": "COCO-LM",
"token_count": 5639
}
| 192 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from fairseq.modules import Fp32GroupNorm
class KmeansVectorQuantizer(nn.Module):
def __init__(
self, dim, num_vars, groups, combine_groups, vq_dim, time_first, gamma=0.25
):
"""Vector quantization using straight pass-through estimator (i.e. kmeans)
Args:
dim: input dimension (channels)
num_vars: number of quantized vectors per group
groups: number of groups for vector quantization
combine_groups: whether to use the vectors for all groups
vq_dim: dimensionality of the resulting quantized vector
time_first: if true, expect input in BxTxC format, otherwise in BxCxT
gamma: commitment loss coefficient
"""
super().__init__()
self.groups = groups
self.combine_groups = combine_groups
self.input_dim = dim
self.num_vars = num_vars
self.vq_dim = vq_dim
self.time_first = time_first
assert (
vq_dim % groups == 0
), f"dim {vq_dim} must be divisible by groups {groups} for concatenation"
self.var_dim = vq_dim // groups
num_groups = groups if not combine_groups else 1
self.embedding = nn.Parameter(
0.01 * torch.randn(num_vars, num_groups, self.var_dim)
)
self.projection = nn.Sequential(
nn.Conv1d(dim, dim, kernel_size=1, groups=groups, bias=False),
Fp32GroupNorm(groups, dim),
)
self.gamma = gamma
self.mse_mean = nn.MSELoss(reduction="mean")
def _pass_grad(self, x, y):
"""Manually set gradient for backward pass.
for y = f(x), ensure that during the backward pass,
dL/dy = dL/dx regardless of f(x).
Returns:
y, with the gradient forced to be dL/dy = dL/dx.
"""
return y.detach() + (x - x.detach())
@property
def expand_embedding(self):
if self.combine_groups:
return self.embedding.expand(self.num_vars, self.groups, self.var_dim)
return self.embedding
def forward_idx(self, x):
res = self.forward(x, produce_targets=True)
return res["x"], res["targets"]
def forward(self, x, produce_targets=False):
result = {"num_vars": self.num_vars}
if self.time_first:
x = x.transpose(1, 2)
bsz, fsz, tsz = x.shape
ze = self.projection(x)
ze_ = ze.view(bsz, self.groups, self.var_dim, tsz).permute(0, 3, 1, 2)
d = (
(ze_.unsqueeze(0) - self.expand_embedding.unsqueeze(1).unsqueeze(1))
.view(self.num_vars, bsz, tsz, self.groups, -1)
.norm(dim=-1, p=2)
)
idx = d.argmin(dim=0)
zq = (
torch.stack(
[
self.expand_embedding[idx[..., group], group]
for group in range(self.groups)
],
dim=-2,
)
.view(bsz, tsz, self.groups * self.var_dim)
.permute(0, 2, 1)
)
assert ze.shape == zq.shape, (ze.shape, zq.shape)
x = self._pass_grad(ze, zq)
hard_x = (
idx.new_zeros(bsz * tsz * self.groups, self.num_vars)
.scatter_(-1, idx.view(-1, 1), 1.0)
.view(bsz * tsz, self.groups, -1)
)
hard_probs = torch.mean(hard_x.float(), dim=0)
result["code_perplexity"] = torch.exp(
-torch.sum(hard_probs * torch.log(hard_probs + 1e-7), dim=-1)
).sum()
if produce_targets:
result["targets"] = idx
if self.time_first:
x = x.transpose(1, 2) # BCT -> BTC
result["x"] = x
ze = ze.float()
zq = zq.float()
latent_loss = self.mse_mean(zq, ze.detach())
commitment_loss = self.mse_mean(ze, zq.detach())
result["kmeans_loss"] = latent_loss + self.gamma * commitment_loss
return result
|
COCO-LM/fairseq/fairseq/modules/kmeans_vector_quantizer.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/kmeans_vector_quantizer.py",
"repo_id": "COCO-LM",
"token_count": 2060
}
| 193 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
def emulate_int(w, bits, method, scale=None, zero_point=None):
q = globals()[f"emulate_int{bits}_{method}"]
return q(w, scale=scale, zero_point=zero_point)
def quantize(w, scale, zero_point):
return (
torch.clamp(torch.round(w / scale + zero_point), 0, 255) - zero_point
) * scale
def emulate_int8_histogram(w, scale=None, zero_point=None):
if scale is None:
obs = torch.quantization.observer.HistogramObserver()
_ = obs(w.float())
scale, zero_point = obs.calculate_qparams()
scale = scale.cuda().type_as(w)
zero_point = zero_point.cuda().type_as(w)
return quantize(w, scale, zero_point), scale, zero_point
def emulate_int8_channel(w, scale=None, zero_point=None):
if scale is None:
obs = torch.quantization.observer.PerChannelMinMaxObserver(
ch_axis=-1, qscheme=torch.per_channel_symmetric
)
_ = obs(w)
scale, zero_point, ch_axis = obs.get_qparams()
scale = scale.cuda().type_as(w)
zero_point = zero_point.cuda().type_as(w)
return quantize(w, scale, zero_point), scale, zero_point
def emulate_int8_tensor(w, scale=None, zero_point=None):
if scale is None:
obs = torch.quantization.observer.MinMaxObserver()
_ = obs(w)
scale, zero_point = obs.calculate_qparams()
scale = scale.cuda().type_as(w)
zero_point = zero_point.cuda().type_as(w)
return quantize(w, scale, zero_point), scale, zero_point
|
COCO-LM/fairseq/fairseq/modules/quantization/scalar/ops.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/quantization/scalar/ops.py",
"repo_id": "COCO-LM",
"token_count": 699
}
| 194 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
import importlib
import os
from fairseq import registry
from fairseq.optim.bmuf import FairseqBMUF # noqa
from fairseq.optim.fairseq_optimizer import ( # noqa
FairseqOptimizer,
LegacyFairseqOptimizer,
)
from fairseq.optim.fp16_optimizer import FP16Optimizer, MemoryEfficientFP16Optimizer
from fairseq.optim.shard import shard_
from omegaconf import DictConfig
__all__ = [
"FairseqOptimizer",
"FP16Optimizer",
"MemoryEfficientFP16Optimizer",
"shard_",
]
(
_build_optimizer,
register_optimizer,
OPTIMIZER_REGISTRY,
OPTIMIZER_DATACLASS_REGISTRY,
) = registry.setup_registry("--optimizer", base_class=FairseqOptimizer, required=True)
def build_optimizer(cfg: DictConfig, params, *extra_args, **extra_kwargs):
if all(isinstance(p, dict) for p in params):
params = [t for p in params for t in p.values()]
params = list(filter(lambda p: p.requires_grad, params))
return _build_optimizer(cfg, params, *extra_args, **extra_kwargs)
# automatically import any Python files in the optim/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
file_name = file[: file.find(".py")]
importlib.import_module("fairseq.optim." + file_name)
|
COCO-LM/fairseq/fairseq/optim/__init__.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/optim/__init__.py",
"repo_id": "COCO-LM",
"token_count": 537
}
| 195 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from argparse import Namespace
from fairseq.dataclass.utils import gen_parser_from_dataclass
from fairseq.optim import FairseqOptimizer
class FairseqLRScheduler(object):
def __init__(self, cfg, optimizer):
super().__init__()
if optimizer is not None and not isinstance(optimizer, FairseqOptimizer):
raise ValueError("optimizer must be an instance of FairseqOptimizer")
self.cfg = cfg
self.optimizer = optimizer
self.best = None
@classmethod
def add_args(cls, parser):
"""Add arguments to the parser for this LR scheduler."""
dc = getattr(cls, "__dataclass", None)
if dc is not None:
gen_parser_from_dataclass(parser, dc())
def state_dict(self):
"""Return the LR scheduler state dict."""
return {"best": self.best}
def load_state_dict(self, state_dict):
"""Load an LR scheduler state dict."""
self.best = state_dict["best"]
def step_begin_epoch(self, epoch):
"""Update the learning rate at the beginning of the given epoch."""
pass
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
if val_loss is not None:
if self.best is None:
self.best = val_loss
else:
self.best = min(self.best, val_loss)
def step_update(self, num_updates):
"""Update the learning rate after each update."""
return self.optimizer.get_lr()
class LegacyFairseqLRScheduler(FairseqLRScheduler):
def __init__(self, args: Namespace, optimizer):
if not isinstance(optimizer, FairseqOptimizer):
raise ValueError("optimizer must be an instance of FairseqOptimizer")
self.args = args
self.optimizer = optimizer
self.best = None
|
COCO-LM/fairseq/fairseq/optim/lr_scheduler/fairseq_lr_scheduler.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/optim/lr_scheduler/fairseq_lr_scheduler.py",
"repo_id": "COCO-LM",
"token_count": 800
}
| 196 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
from abc import ABC, abstractmethod
from fairseq import registry
from omegaconf import DictConfig
class BaseScorer(ABC):
def __init__(self, cfg):
self.cfg = cfg
self.ref = []
self.pred = []
def add_string(self, ref, pred):
self.ref.append(ref)
self.pred.append(pred)
@abstractmethod
def score(self) -> float:
pass
@abstractmethod
def result_string(self) -> str:
pass
_build_scorer, register_scorer, SCORER_REGISTRY, _ = registry.setup_registry(
"--scoring", default="bleu"
)
def build_scorer(choice, tgt_dict):
_choice = choice._name if isinstance(choice, DictConfig) else choice
if _choice == "bleu":
from fairseq.scoring import bleu
return bleu.Scorer(
bleu.BleuConfig(pad=tgt_dict.pad(), eos=tgt_dict.eos(), unk=tgt_dict.unk())
)
return _build_scorer(choice)
# automatically import any Python files in the current directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
module = file[: file.find(".py")]
importlib.import_module("fairseq.scoring." + module)
|
COCO-LM/fairseq/fairseq/scoring/__init__.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/scoring/__init__.py",
"repo_id": "COCO-LM",
"token_count": 546
}
| 197 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import numpy as np
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
DenoisingDataset,
Dictionary,
PrependTokenDataset,
ResamplingDataset,
SortDataset,
TokenBlockDataset,
data_utils,
)
from fairseq.data.encoders.utils import get_whole_word_mask
from fairseq.tasks import register_task
from .denoising import DenoisingTask
logger = logging.getLogger(__name__)
@register_task("multilingual_denoising")
class MultilingualDenoisingTask(DenoisingTask):
@staticmethod
def add_args(parser):
DenoisingTask.add_args(parser)
parser.add_argument(
"--multilang-sampling-alpha",
type=float,
default=1.0,
help="smoothing alpha for sample ratios across multiple datasets",
)
parser.add_argument("--add-lang-token", default=False, action="store_true")
parser.add_argument(
"--langs", type=str, help="language ids we are considering", default=None
)
parser.add_argument(
"--no-whole-word-mask-langs",
type=str,
default="",
metavar="N",
help="languages without spacing between words dont support whole word masking",
)
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task."""
paths = args.data.split(":")
assert len(paths) > 0
dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt"))
data_path = paths[0]
if args.langs is None:
languages = sorted(
[
name
for name in os.listdir(data_path)
if os.path.isdir(os.path.join(data_path, name))
]
)
else:
languages = args.langs.split(",")
if args.add_lang_token:
for lang in languages:
dictionary.add_symbol("[{}]".format(lang))
logger.info("dictionary: {} types".format(len(dictionary)))
if not hasattr(args, "shuffle_instance"):
args.shuffle_instance = False
return cls(args, dictionary)
def __init__(self, args, dictionary):
super().__init__(args, dictionary)
self.dictionary = dictionary
self.seed = args.seed
# add mask token
self.mask_idx = self.dictionary.add_symbol("<mask>")
self.langs = args.langs
self.args = args
def _get_sample_prob(self, dataset_lens):
"""
Get smoothed sampling porbability by languages. This helps low resource
languages by upsampling them.
"""
prob = dataset_lens / dataset_lens.sum()
smoothed_prob = prob ** self.args.multilang_sampling_alpha
smoothed_prob = smoothed_prob / smoothed_prob.sum()
return smoothed_prob
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = self.args.data.split(":")
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
split_path = os.path.join(data_path, split)
if self.langs is None:
languages = sorted(
[
name
for name in os.listdir(data_path)
if os.path.isdir(os.path.join(data_path, name))
]
)
else:
languages = self.langs.split(",")
for name in languages:
p = os.path.join(data_path, name)
assert os.path.exists(p), "data not found: {}".format(p)
logger.info("Training on {0} languages: {1}".format(len(languages), languages))
logger.info(
"Language to id mapping: ", {lang: id for id, lang in enumerate(languages)}
)
mask_whole_words = get_whole_word_mask(self.args, self.dictionary)
language_without_segmentations = self.args.no_whole_word_mask_langs.split(",")
lang_datasets = []
for language in languages:
split_path = os.path.join(data_path, language, split)
dataset = data_utils.load_indexed_dataset(
split_path,
self.source_dictionary,
self.args.dataset_impl,
combine=combine,
)
if dataset is None:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, split_path)
)
end_token = (
self.source_dictionary.index("[{}]".format(language))
if self.args.add_lang_token
else self.source_dictionary.eos()
)
# create continuous blocks of tokens
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
self.args.tokens_per_sample - 2, # one less for <s>
pad=self.source_dictionary.pad(),
eos=end_token,
break_mode=self.args.sample_break_mode,
)
logger.info("loaded {} blocks from: {}".format(len(dataset), split_path))
# prepend beginning-of-sentence token (<s>, equiv. to [CLS] in BERT)
dataset = PrependTokenDataset(dataset, self.source_dictionary.bos())
dataset = AppendTokenDataset(dataset, end_token)
lang_mask_whole_words = (
mask_whole_words
if language not in language_without_segmentations
else None
)
lang_dataset = DenoisingDataset(
dataset,
dataset.sizes,
self.dictionary,
self.mask_idx,
lang_mask_whole_words,
shuffle=self.args.shuffle_instance,
seed=self.seed,
args=self.args,
eos=None
if not self.args.add_lang_token
else self.source_dictionary.index("[{}]".format(language)),
)
lang_datasets.append(lang_dataset)
dataset_lengths = np.array(
[len(d) for d in lang_datasets],
dtype=float,
)
logger.info(
"loaded total {} blocks for all languages".format(
int(dataset_lengths.sum()),
)
)
if split == self.args.train_subset:
# For train subset, additionally up or down sample languages.
sample_probs = self._get_sample_prob(dataset_lengths)
logger.info(
"Sample probability by language: {}".format(
{
lang: "{0:.4f}".format(sample_probs[id])
for id, lang in enumerate(languages)
}
)
)
size_ratio = (sample_probs * dataset_lengths.sum()) / dataset_lengths
logger.info(
"Up/Down Sampling ratio by language: {}".format(
{
lang: "{0:.2f}".format(size_ratio[id])
for id, lang in enumerate(languages)
}
)
)
resampled_lang_datasets = [
ResamplingDataset(
lang_datasets[i],
size_ratio=size_ratio[i],
seed=self.args.seed,
epoch=epoch,
replace=size_ratio[i] >= 1.0,
)
for i, d in enumerate(lang_datasets)
]
dataset = ConcatDataset(
resampled_lang_datasets,
)
else:
dataset = ConcatDataset(lang_datasets)
lang_splits = [split]
for lang_id, lang_dataset in enumerate(lang_datasets):
split_name = split + "_" + languages[lang_id]
lang_splits.append(split_name)
self.datasets[split_name] = lang_dataset
if split in self.args.valid_subset:
self.args.valid_subset = self.args.valid_subset.replace(
split, ",".join(lang_splits)
)
with data_utils.numpy_seed(self.args.seed + epoch):
shuffle = np.random.permutation(len(dataset))
self.datasets[split] = SortDataset(
dataset,
sort_order=[
shuffle,
dataset.sizes,
],
)
|
COCO-LM/fairseq/fairseq/tasks/multilingual_denoising.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/tasks/multilingual_denoising.py",
"repo_id": "COCO-LM",
"token_count": 4582
}
| 198 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import contextlib
import copy
import importlib
import logging
import os
import sys
import tempfile
import warnings
from itertools import accumulate
from typing import Callable, Dict, List, Optional
import torch
import torch.nn.functional as F
from fairseq.modules.multihead_attention import MultiheadAttention
from torch import Tensor
try:
from amp_C import multi_tensor_l2norm
multi_tensor_l2norm_available = True
except ImportError:
multi_tensor_l2norm_available = False
try:
import torch_xla.core.xla_model as xm
except ImportError:
xm = None
logger = logging.getLogger(__name__)
MANIFOLD_PATH_SEP = "|"
class FileContentsAction(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super(FileContentsAction, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
from fairseq.file_io import PathManager
if PathManager.isfile(values):
with PathManager.open(values) as f:
argument = f.read().strip()
else:
argument = values
setattr(namespace, self.dest, argument)
def split_paths(paths: str) -> List[str]:
return (
paths.split(os.pathsep)
if "://" not in paths
else paths.split(MANIFOLD_PATH_SEP)
)
def load_ensemble_for_inference(filenames, task, model_arg_overrides=None):
from fairseq import checkpoint_utils
deprecation_warning(
"utils.load_ensemble_for_inference is deprecated. "
"Please use checkpoint_utils.load_model_ensemble instead."
)
return checkpoint_utils.load_model_ensemble(
filenames, arg_overrides=model_arg_overrides, task=task
)
def apply_to_sample(f, sample):
if hasattr(sample, "__len__") and len(sample) == 0:
return {}
def _apply(x):
if torch.is_tensor(x):
return f(x)
elif isinstance(x, dict):
return {key: _apply(value) for key, value in x.items()}
elif isinstance(x, list):
return [_apply(x) for x in x]
elif isinstance(x, tuple):
return tuple(_apply(x) for x in x)
elif isinstance(x, set):
return {_apply(x) for x in x}
else:
return x
return _apply(sample)
def move_to_cuda(sample, device=None):
device = device or torch.cuda.current_device()
def _move_to_cuda(tensor):
# non_blocking is ignored if tensor is not pinned, so we can always set
# to True (see github.com/PyTorchLightning/pytorch-lightning/issues/620)
return tensor.to(device=device, non_blocking=True)
return apply_to_sample(_move_to_cuda, sample)
def move_to_cpu(sample):
def _move_to_cpu(tensor):
# PyTorch has poor support for half tensors (float16) on CPU.
# Move any such tensors to float32.
if tensor.dtype in {torch.bfloat16, torch.float16}:
tensor = tensor.to(dtype=torch.float32)
return tensor.cpu()
return apply_to_sample(_move_to_cpu, sample)
def move_to_tpu(sample):
import torch_xla.core.xla_model as xm
device = xm.xla_device()
def _move_to_tpu(tensor):
return tensor.to(device)
return apply_to_sample(_move_to_tpu, sample)
def get_incremental_state(
module: MultiheadAttention,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
) -> Optional[Dict[str, Optional[Tensor]]]:
"""Helper for getting incremental state for an nn.Module."""
return module.get_incremental_state(incremental_state, key)
def set_incremental_state(
module: MultiheadAttention,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
value: Dict[str, Optional[Tensor]],
) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]:
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
result = module.set_incremental_state(incremental_state, key, value)
if result is not None:
incremental_state = result
return incremental_state
def load_align_dict(replace_unk):
if replace_unk is None:
align_dict = None
elif isinstance(replace_unk, str) and len(replace_unk) > 0:
# Load alignment dictionary for unknown word replacement if it was passed as an argument.
align_dict = {}
with open(replace_unk, "r") as f:
for line in f:
cols = line.split()
align_dict[cols[0]] = cols[1]
else:
# No alignment dictionary provided but we still want to perform unknown word replacement by copying the
# original source word.
align_dict = {}
return align_dict
def print_embed_overlap(embed_dict, vocab_dict):
embed_keys = set(embed_dict.keys())
vocab_keys = set(vocab_dict.symbols)
overlap = len(embed_keys & vocab_keys)
logger.info("found {}/{} types in embedding file".format(overlap, len(vocab_dict)))
def parse_embedding(embed_path):
"""Parse embedding text file into a dictionary of word and embedding tensors.
The first line can have vocabulary size and dimension. The following lines
should contain word and embedding separated by spaces.
Example:
2 5
the -0.0230 -0.0264 0.0287 0.0171 0.1403
at -0.0395 -0.1286 0.0275 0.0254 -0.0932
"""
embed_dict = {}
with open(embed_path) as f_embed:
next(f_embed) # skip header
for line in f_embed:
pieces = line.rstrip().split(" ")
embed_dict[pieces[0]] = torch.Tensor(
[float(weight) for weight in pieces[1:]]
)
return embed_dict
def load_embedding(embed_dict, vocab, embedding):
for idx in range(len(vocab)):
token = vocab[idx]
if token in embed_dict:
embedding.weight.data[idx] = embed_dict[token]
return embedding
def replace_unk(hypo_str, src_str, alignment, align_dict, unk):
from fairseq import tokenizer
# Tokens are strings here
hypo_tokens = tokenizer.tokenize_line(hypo_str)
# TODO: Very rare cases where the replacement is '<eos>' should be handled gracefully
src_tokens = tokenizer.tokenize_line(src_str) + ["<eos>"]
for i, ht in enumerate(hypo_tokens):
if ht == unk:
src_token = src_tokens[alignment[i]]
# Either take the corresponding value in the aligned dictionary or just copy the original value.
hypo_tokens[i] = align_dict.get(src_token, src_token)
return " ".join(hypo_tokens)
def post_process_prediction(
hypo_tokens,
src_str,
alignment,
align_dict,
tgt_dict,
remove_bpe=None,
extra_symbols_to_ignore=None,
):
hypo_str = tgt_dict.string(
hypo_tokens, remove_bpe, extra_symbols_to_ignore=extra_symbols_to_ignore
)
if align_dict is not None:
hypo_str = replace_unk(
hypo_str, src_str, alignment, align_dict, tgt_dict.unk_string()
)
if align_dict is not None or remove_bpe is not None:
# Convert back to tokens for evaluating with unk replacement or without BPE
# Note that the dictionary can be modified inside the method.
hypo_tokens = tgt_dict.encode_line(hypo_str, add_if_not_exist=True)
return hypo_tokens, hypo_str, alignment
def make_positions(tensor, padding_idx: int, onnx_trace: bool = False):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
# The series of casts and type-conversions here are carefully
# balanced to both work with ONNX export and XLA. In particular XLA
# prefers ints, cumsum defaults to output longs, and ONNX doesn't know
# how to handle the dtype kwarg in cumsum.
mask = tensor.ne(padding_idx).int()
return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx
def strip_pad(tensor, pad):
return tensor[tensor.ne(pad)]
def buffered_arange(max):
if not hasattr(buffered_arange, "buf"):
buffered_arange.buf = torch.LongTensor()
if max > buffered_arange.buf.numel():
buffered_arange.buf.resize_(max)
torch.arange(max, out=buffered_arange.buf)
return buffered_arange.buf[:max]
def convert_padding_direction(
src_tokens, padding_idx, right_to_left: bool = False, left_to_right: bool = False
):
assert right_to_left ^ left_to_right
pad_mask = src_tokens.eq(padding_idx)
if not pad_mask.any():
# no padding, return early
return src_tokens
if left_to_right and not pad_mask[:, 0].any():
# already right padded
return src_tokens
if right_to_left and not pad_mask[:, -1].any():
# already left padded
return src_tokens
max_len = src_tokens.size(1)
buffered = torch.empty(0).long()
if max_len > 0:
torch.arange(max_len, out=buffered)
range = buffered.type_as(src_tokens).expand_as(src_tokens)
num_pads = pad_mask.long().sum(dim=1, keepdim=True)
if right_to_left:
index = torch.remainder(range - num_pads, max_len)
else:
index = torch.remainder(range + num_pads, max_len)
return src_tokens.gather(1, index)
def item(tensor):
# tpu-comment: making this a no-op for xla devices.
if torch.is_tensor(tensor) and tensor.device.type == 'xla':
return tensor.detach()
if hasattr(tensor, "item"):
return tensor.item()
if hasattr(tensor, "__getitem__"):
return tensor[0]
return tensor
def multi_tensor_total_norm(grads, chunk_size=2048 * 32) -> torch.Tensor:
per_device_grads = {}
norms = []
for grad in grads:
device = grad.device
cur_device_grads = per_device_grads.get(device)
if cur_device_grads is None:
cur_device_grads = []
per_device_grads[device] = cur_device_grads
cur_device_grads.append(grad)
for device in per_device_grads.keys():
cur_device_grads = per_device_grads[device]
if device.type == "cuda":
# TODO(msb) return has_inf
has_inf = torch.zeros((1, 1), dtype=torch.int, device=device)
with torch.cuda.device(device):
norm = multi_tensor_l2norm(
chunk_size, has_inf, [cur_device_grads], False
)
norms.append(norm[0].to(torch.cuda.current_device()))
else:
norms += [torch.norm(g, p=2, dtype=torch.float32) for g in cur_device_grads]
total_norm = torch.norm(torch.stack(norms))
return total_norm
@torch.no_grad()
def clip_grad_norm_(params, max_norm, aggregate_norm_fn=None) -> torch.Tensor:
if isinstance(params, torch.Tensor):
params = [params]
params = list(params)
grads = [p.grad.detach() for p in filter(lambda p: p.grad is not None, params)]
if len(grads) == 0:
if len(params) > 0:
return params[0].new_tensor(0.0)
else:
return torch.tensor(0.0)
if len(grads) == 1:
total_norm = torch.norm(grads[0], p=2, dtype=torch.float32)
else:
if multi_tensor_l2norm_available:
total_norm = multi_tensor_total_norm(grads)
else:
if torch.cuda.is_available():
warnings.warn(
"amp_C fused kernels unavailable, disabling multi_tensor_l2norm; "
"you may get better performance by installing NVIDIA's apex library"
)
device = torch.cuda.current_device()
elif grads[0].device.type == "xla":
device = grads[0].device
else:
device = torch.device("cpu")
total_norm = torch.norm(
torch.stack(
[torch.norm(g, p=2, dtype=torch.float32).to(device) for g in grads]
)
)
if aggregate_norm_fn is not None:
total_norm = aggregate_norm_fn(total_norm)
if max_norm > 0:
max_norm = float(max_norm)
clip_coef = (max_norm / (total_norm + 1e-6)).clamp_(max=1)
for g in grads:
g.mul_(clip_coef)
return total_norm
def fill_with_neg_inf(t):
"""FP16-compatible function that fills a tensor with -inf."""
return t.float().fill_(float("-inf")).type_as(t)
def _match_types(arg1, arg2):
"""Convert the numerical argument to the same type as the other argument"""
def upgrade(arg_number, arg_structure):
if isinstance(arg_structure, tuple):
return tuple([arg_number] * len(arg_structure))
elif isinstance(arg_structure, dict):
arg = copy.deepcopy(arg_structure)
for k in arg:
arg[k] = upgrade(arg_number, arg_structure[k])
return arg
else:
return arg_number
if isinstance(arg1, float) or isinstance(arg1, int):
return upgrade(arg1, arg2), arg2
elif isinstance(arg2, float) or isinstance(arg2, int):
return arg1, upgrade(arg2, arg1)
return arg1, arg2
def resolve_max_positions(*args):
"""Resolve max position constraints from multiple sources."""
def map_value_update(d1, d2):
updated_value = copy.deepcopy(d1)
for key in d2:
if key not in updated_value:
updated_value[key] = d2[key]
else:
updated_value[key] = min(d1[key], d2[key])
return updated_value
def nullsafe_min(l):
minim = None
for item in l:
if minim is None:
minim = item
elif item is not None and item < minim:
minim = item
return minim
max_positions = None
for arg in args:
if max_positions is None:
max_positions = arg
elif arg is not None:
max_positions, arg = _match_types(max_positions, arg)
if isinstance(arg, float) or isinstance(arg, int):
max_positions = min(max_positions, arg)
elif isinstance(arg, dict):
max_positions = map_value_update(max_positions, arg)
else:
max_positions = tuple(map(nullsafe_min, zip(max_positions, arg)))
return max_positions
def import_user_module(args):
module_path = getattr(args, "user_dir", None)
if module_path is not None:
module_path = os.path.abspath(args.user_dir)
if not os.path.exists(module_path) and not os.path.isfile(os.path.dirname(module_path)):
fairseq_rel_path = os.path.join(os.path.dirname(__file__), args.user_dir)
if os.path.exists(fairseq_rel_path):
module_path = fairseq_rel_path
else:
fairseq_rel_path = os.path.join(
os.path.dirname(__file__), "..", args.user_dir
)
if os.path.exists(fairseq_rel_path):
module_path = fairseq_rel_path
else:
raise FileNotFoundError(module_path)
# ensure that user modules are only imported once
import_user_module.memo = getattr(import_user_module, "memo", set())
if module_path not in import_user_module.memo:
import_user_module.memo.add(module_path)
module_parent, module_name = os.path.split(module_path)
if module_name not in sys.modules:
sys.path.insert(0, module_parent)
importlib.import_module(module_name)
else:
raise ImportError(
"Failed to import --user-dir={} because the corresponding module name "
"({}) is not globally unique. Please rename the directory to "
"something unique and try again.".format(module_path, module_name)
)
def softmax(x, dim: int, onnx_trace: bool = False):
if onnx_trace:
return F.softmax(x.float(), dim=dim)
else:
return F.softmax(x, dim=dim, dtype=torch.float32)
def log_softmax(x, dim: int, onnx_trace: bool = False):
if onnx_trace:
return F.log_softmax(x.float(), dim=dim)
else:
return F.log_softmax(x, dim=dim, dtype=torch.float32)
def get_perplexity(loss, round=2, base=2):
from fairseq.logging.meters import safe_round
if loss is None:
return 0.0
try:
return safe_round(base ** loss, round)
except OverflowError:
return float("inf")
def deprecation_warning(message, stacklevel=3):
# don't use DeprecationWarning, since it's ignored by default
warnings.warn(message, stacklevel=stacklevel)
def get_activation_fn(activation: str) -> Callable:
""" Returns the activation function corresponding to `activation` """
from fairseq.modules import gelu, gelu_accurate
if activation == "relu":
return F.relu
elif activation == "gelu":
return gelu
elif activation == "gelu_fast":
deprecation_warning(
"--activation-fn=gelu_fast has been renamed to gelu_accurate"
)
return gelu_accurate
elif activation == "gelu_accurate":
return gelu_accurate
elif activation == "tanh":
return torch.tanh
elif activation == "linear":
return lambda x: x
else:
raise RuntimeError("--activation-fn {} not supported".format(activation))
def get_available_activation_fns() -> List:
return [
"relu",
"gelu",
"gelu_fast", # deprecated
"gelu_accurate",
"tanh",
"linear",
]
@contextlib.contextmanager
def model_eval(model):
is_training = model.training
model.eval()
yield
model.train(is_training)
def has_parameters(module):
try:
next(module.parameters())
return True
except StopIteration:
return False
def get_rng_state():
state = {"torch_rng_state": torch.get_rng_state()}
if xm is not None:
state["xla_rng_state"] = xm.get_rng_state()
if torch.cuda.is_available():
state["cuda_rng_state"] = torch.cuda.get_rng_state()
return state
def set_rng_state(state):
torch.set_rng_state(state["torch_rng_state"])
if xm is not None:
xm.set_rng_state(state["xla_rng_state"])
if torch.cuda.is_available():
torch.cuda.set_rng_state(state["cuda_rng_state"])
class set_torch_seed(object):
def __init__(self, seed):
assert isinstance(seed, int)
self.rng_state = get_rng_state()
torch.manual_seed(seed)
if xm is not None:
xm.set_rng_state(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
def __enter__(self):
return self
def __exit__(self, *exc):
set_rng_state(self.rng_state)
def parse_alignment(line):
"""
Parses a single line from the alingment file.
Args:
line (str): String containing the alignment of the format:
<src_idx_1>-<tgt_idx_1> <src_idx_2>-<tgt_idx_2> ..
<src_idx_m>-<tgt_idx_m>. All indices are 0 indexed.
Returns:
torch.IntTensor: packed alignments of shape (2 * m).
"""
alignments = line.strip().split()
parsed_alignment = torch.IntTensor(2 * len(alignments))
for idx, alignment in enumerate(alignments):
src_idx, tgt_idx = alignment.split("-")
parsed_alignment[2 * idx] = int(src_idx)
parsed_alignment[2 * idx + 1] = int(tgt_idx)
return parsed_alignment
def get_token_to_word_mapping(tokens, exclude_list):
n = len(tokens)
word_start = [int(token not in exclude_list) for token in tokens]
word_idx = list(accumulate(word_start))
token_to_word = {i: word_idx[i] for i in range(n)}
return token_to_word
def extract_hard_alignment(attn, src_sent, tgt_sent, pad, eos):
tgt_valid = (
((tgt_sent != pad) & (tgt_sent != eos)).nonzero(as_tuple=False).squeeze(dim=-1)
)
src_invalid = (
((src_sent == pad) | (src_sent == eos)).nonzero(as_tuple=False).squeeze(dim=-1)
)
src_token_to_word = get_token_to_word_mapping(src_sent, [eos, pad])
tgt_token_to_word = get_token_to_word_mapping(tgt_sent, [eos, pad])
alignment = []
if len(tgt_valid) != 0 and len(src_invalid) < len(src_sent):
attn_valid = attn[tgt_valid]
attn_valid[:, src_invalid] = float("-inf")
_, src_indices = attn_valid.max(dim=1)
for tgt_idx, src_idx in zip(tgt_valid, src_indices):
alignment.append(
(
src_token_to_word[src_idx.item()] - 1,
tgt_token_to_word[tgt_idx.item()] - 1,
)
)
return alignment
def extract_soft_alignment(attn, src_sent, tgt_sent, pad, eos):
tgt_valid = (
((tgt_sent != pad)).nonzero(as_tuple=False)
)
src_valid = (
((src_sent != pad)).nonzero(as_tuple=False).squeeze(dim=-1)
)
alignment = []
if len(tgt_valid) != 0 and len(src_valid) != 0:
attn_valid = attn[tgt_valid, src_valid]
alignment = [
["{:.6f}".format(p) for p in src_probs.tolist()]
for src_probs in attn_valid
]
return alignment
def new_arange(x, *size):
"""
Return a Tensor of `size` filled with a range function on the device of x.
If size is empty, using the size of the variable x.
"""
if len(size) == 0:
size = x.size()
return torch.arange(size[-1], device=x.device).expand(*size).contiguous()
def get_tpu_device():
return xm.xla_device()
def tpu_data_loader(itr):
import torch_xla.core.xla_model as xm
import torch_xla.distributed.parallel_loader as pl
from fairseq.data import iterators
xm.rendezvous("tpu_data_loader") # wait for all workers
xm.mark_step()
device = xm.xla_device()
return iterators.CountingIterator(
pl.ParallelLoader(itr, [device]).per_device_loader(device),
start=getattr(itr, "n", 0),
total=len(itr),
)
def is_xla_tensor(tensor):
return torch.is_tensor(tensor) and tensor.device.type == 'xla'
def index_put(tensor, indices, value):
if is_xla_tensor(tensor):
for _ in range(indices.dim(), tensor.dim()):
indices = indices.unsqueeze(-1)
if indices.size(-1) < tensor.size(-1):
indices = indices.expand_as(tensor)
tensor = torch.mul(tensor, ~indices) + torch.mul(value, indices)
else:
tensor[indices] = value
return tensor
def xla_device_to_cpu(dat):
import torch_xla.core.xla_model as xm
return xm._maybe_convert_to_cpu(dat)
class CudaEnvironment(object):
def __init__(self):
cur_device = torch.cuda.current_device()
prop = torch.cuda.get_device_properties("cuda:{}".format(cur_device))
self.name = prop.name
self.major = prop.major
self.minor = prop.minor
self.total_memory_in_GB = prop.total_memory / 1024 / 1024 / 1024
@staticmethod
def pretty_print_cuda_env_list(cuda_env_list):
"""
Given a list of CudaEnviorments, pretty print them
"""
num_workers = len(cuda_env_list)
center = "CUDA enviroments for all {} workers".format(num_workers)
banner_len = 40 - len(center) // 2
first_line = "*" * banner_len + center + "*" * banner_len
logger.info(first_line)
for r, env in enumerate(cuda_env_list):
logger.info(
"rank {:3d}: ".format(r)
+ "capabilities = {:2d}.{:<2d} ; ".format(env.major, env.minor)
+ "total memory = {:.3f} GB ; ".format(env.total_memory_in_GB)
+ "name = {:40s}".format(env.name)
)
logger.info(first_line)
def csv_str_list(x):
return x.split(",")
def eval_str_list(x, type=float):
if x is None:
return None
if isinstance(x, str):
x = eval(x)
try:
return list(map(type, x))
except TypeError:
return [type(x)]
def eval_str_dict(x, type=dict):
if x is None:
return None
if isinstance(x, str):
x = eval(x)
return x
def eval_bool(x, default=False):
if x is None:
return default
try:
return bool(eval(x))
except TypeError:
return default
|
COCO-LM/fairseq/fairseq/utils.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/utils.py",
"repo_id": "COCO-LM",
"token_count": 11060
}
| 199 |
#include "ATen/ATen.h"
#include "ATen/AccumulateType.h"
#include "ATen/cuda/CUDAContext.h"
#include <THC/THCDeviceUtils.cuh>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_bf16.h>
#include "type_shim.h"
template<typename U> __device__
void cuWelfordOnlineSum(
const U curr,
U& mu,
U& sigma2,
U& count)
{
count = count + U(1);
U delta = curr - mu;
U lmean = mu + delta / count;
mu = lmean;
U delta2 = curr - lmean;
sigma2 = sigma2 + delta * delta2;
}
template<typename U> __device__
void cuChanOnlineSum(
const U muB,
const U sigma2B,
const U countB,
U& mu,
U& sigma2,
U& count)
{
U delta = muB - mu;
U nA = count;
U nB = countB;
count = count + countB;
U nX = count;
if (nX > U(0)) {
nA = nA / nX;
nB = nB / nX;
mu = nA*mu + nB*muB;
sigma2 = sigma2 + sigma2B + delta * delta * nA * nB * nX;
} else {
mu = U(0);
sigma2 = U(0);
}
}
template<typename T, typename U> __device__
void cuWelfordMuSigma2(
const T* __restrict__ vals,
const int n1,
const int n2,
const int i1,
U& mu,
U& sigma2,
U* buf)
{
// Assumptions:
// 1) blockDim.x == warpSize
// 2) Tensor is contiguous
// 3) 2*blockDim.y*sizeof(U)+blockDim.y*sizeof(int) shared memory available.
//
// compute variance and mean over n2
U count = U(0);
mu= U(0);
sigma2 = U(0);
if (i1 < n1) {
// one warp normalizes one n1 index,
// synchronization is implicit
// initialize with standard Welford algorithm
const int numx = blockDim.x * blockDim.y;
const int thrx = threadIdx.x + threadIdx.y * blockDim.x;
const T* lvals = vals + i1*n2;
int l = 4*thrx;
for (; l+3 < n2; l+=4*numx) {
for (int k = 0; k < 4; ++k) {
U curr = static_cast<U>(lvals[l+k]);
cuWelfordOnlineSum<U>(curr,mu,sigma2,count);
}
}
for (; l < n2; ++l) {
U curr = static_cast<U>(lvals[l]);
cuWelfordOnlineSum<U>(curr,mu,sigma2,count);
}
// intra-warp reductions
for (int l = 0; l <= 4; ++l) {
int srcLaneB = (threadIdx.x+(1<<l))&31;
U muB = WARP_SHFL(mu, srcLaneB);
U countB = WARP_SHFL(count, srcLaneB);
U sigma2B = WARP_SHFL(sigma2, srcLaneB);
cuChanOnlineSum<U>(muB,sigma2B,countB,mu,sigma2,count);
}
// threadIdx.x == 0 has correct values for each warp
// inter-warp reductions
if (blockDim.y > 1) {
U* ubuf = (U*)buf;
U* ibuf = (U*)(ubuf + blockDim.y);
for (int offset = blockDim.y/2; offset > 0; offset /= 2) {
// upper half of warps write to shared
if (threadIdx.x == 0 && threadIdx.y >= offset && threadIdx.y < 2*offset) {
const int wrt_y = threadIdx.y - offset;
ubuf[2*wrt_y] = mu;
ubuf[2*wrt_y+1] = sigma2;
ibuf[wrt_y] = count;
}
__syncthreads();
// lower half merges
if (threadIdx.x == 0 && threadIdx.y < offset) {
U muB = ubuf[2*threadIdx.y];
U sigma2B = ubuf[2*threadIdx.y+1];
U countB = ibuf[threadIdx.y];
cuChanOnlineSum<U>(muB,sigma2B,countB,mu,sigma2,count);
}
__syncthreads();
}
// threadIdx.x = 0 && threadIdx.y == 0 only thread that has correct values
if (threadIdx.x == 0 && threadIdx.y == 0) {
ubuf[0] = mu;
ubuf[1] = sigma2;
}
__syncthreads();
mu = ubuf[0];
sigma2 = ubuf[1]/U(n2);
// don't care about final value of count, we know count == n2
} else {
mu = WARP_SHFL(mu, 0);
sigma2 = WARP_SHFL(sigma2/U(n2), 0);
}
}
}
template<> __device__
void cuWelfordMuSigma2(
const at::Half* __restrict__ vals,
const int n1,
const int n2,
const int i1,
float& mu,
float& sigma2,
float* buf)
{
// Assumptions:
// 1) blockDim.x == warpSize
// 2) Tensor is contiguous
// 3) 2*blockDim.y*sizeof(U)+blockDim.y*sizeof(int) shared memory available.
//
// compute variance and mean over n2
float count = 0.0f;
mu= float(0);
sigma2 = float(0);
if (i1 < n1) {
// one warp normalizes one n1 index,
// synchronization is implicit
// initialize with standard Welford algorithm
const int numx = blockDim.x * blockDim.y;
const int thrx = threadIdx.x + threadIdx.y * blockDim.x;
const at::Half* lvals = vals + i1*n2;
int l = 8*thrx;
if ((((size_t)lvals)&3) != 0) {
// 16 bit alignment
// first thread consumes first point
if (thrx == 0) {
float curr = static_cast<float>(lvals[0]);
cuWelfordOnlineSum(curr,mu,sigma2,count);
}
++l;
}
// at this point, lvals[l] are 32 bit aligned for all threads.
for (; l+7 < n2; l+=8*numx) {
for (int k = 0; k < 8; k+=2) {
float2 curr = __half22float2(*((__half2*)(lvals+l+k)));
cuWelfordOnlineSum(curr.x,mu,sigma2,count);
cuWelfordOnlineSum(curr.y,mu,sigma2,count);
}
}
for (; l < n2; ++l) {
float curr = static_cast<float>(lvals[l]);
cuWelfordOnlineSum(curr,mu,sigma2,count);
}
// intra-warp reductions
for (int l = 0; l <= 4; ++l) {
int srcLaneB = (threadIdx.x+(1<<l))&31;
float muB = WARP_SHFL(mu, srcLaneB);
float countB = WARP_SHFL(count, srcLaneB);
float sigma2B = WARP_SHFL(sigma2, srcLaneB);
cuChanOnlineSum(muB,sigma2B,countB,mu,sigma2,count);
}
// threadIdx.x == 0 has correct values for each warp
// inter-warp reductions
if (blockDim.y > 1) {
float* ubuf = (float*)buf;
float* ibuf = (float*)(ubuf + blockDim.y);
for (int offset = blockDim.y/2; offset > 0; offset /= 2) {
// upper half of warps write to shared
if (threadIdx.x == 0 && threadIdx.y >= offset && threadIdx.y < 2*offset) {
const int wrt_y = threadIdx.y - offset;
ubuf[2*wrt_y] = mu;
ubuf[2*wrt_y+1] = sigma2;
ibuf[wrt_y] = count;
}
__syncthreads();
// lower half merges
if (threadIdx.x == 0 && threadIdx.y < offset) {
float muB = ubuf[2*threadIdx.y];
float sigma2B = ubuf[2*threadIdx.y+1];
float countB = ibuf[threadIdx.y];
cuChanOnlineSum(muB,sigma2B,countB,mu,sigma2,count);
}
__syncthreads();
}
// threadIdx.x = 0 && threadIdx.y == 0 only thread that has correct values
if (threadIdx.x == 0 && threadIdx.y == 0) {
ubuf[0] = mu;
ubuf[1] = sigma2;
}
__syncthreads();
mu = ubuf[0];
sigma2 = ubuf[1]/float(n2);
// don't care about final value of count, we know count == n2
} else {
mu = WARP_SHFL(mu, 0);
sigma2 = WARP_SHFL(sigma2/float(n2), 0);
}
}
}
#if __CUDA_ARCH__ >= 800
template<> __device__
void cuWelfordMuSigma2(
const at::BFloat16* __restrict__ vals,
const int n1,
const int n2,
const int i1,
float& mu,
float& sigma2,
float* buf)
{
// Assumptions:
// 1) blockDim.x == warpSize
// 2) Tensor is contiguous
// 3) 2*blockDim.y*sizeof(U)+blockDim.y*sizeof(int) shared memory available.
//
// compute variance and mean over n2
float count = 0.0f;
mu= float(0);
sigma2 = float(0);
if (i1 < n1) {
// one warp normalizes one n1 index,
// synchronization is implicit
// initialize with standard Welford algorithm
const int numx = blockDim.x * blockDim.y;
const int thrx = threadIdx.x + threadIdx.y * blockDim.x;
const at::BFloat16* lvals = vals + i1*n2;
int l = 8*thrx;
if ((((size_t)lvals)&3) != 0) {
// 16 bit alignment
// first thread consumes first point
if (thrx == 0) {
float curr = static_cast<float>(lvals[0]);
cuWelfordOnlineSum(curr,mu,sigma2,count);
}
++l;
}
// at this point, lvals[l] are 32 bit aligned for all threads.
for (; l+7 < n2; l+=8*numx) {
for (int k = 0; k < 8; k+=2) {
float2 curr = __bfloat1622float2(*((__nv_bfloat162*)(lvals+l+k)));
cuWelfordOnlineSum(curr.x,mu,sigma2,count);
cuWelfordOnlineSum(curr.y,mu,sigma2,count);
}
}
for (; l < n2; ++l) {
float curr = static_cast<float>(lvals[l]);
cuWelfordOnlineSum(curr,mu,sigma2,count);
}
// intra-warp reductions
for (int l = 0; l <= 4; ++l) {
int srcLaneB = (threadIdx.x+(1<<l))&31;
float muB = WARP_SHFL(mu, srcLaneB);
float countB = WARP_SHFL(count, srcLaneB);
float sigma2B = WARP_SHFL(sigma2, srcLaneB);
cuChanOnlineSum(muB,sigma2B,countB,mu,sigma2,count);
}
// threadIdx.x == 0 has correct values for each warp
// inter-warp reductions
if (blockDim.y > 1) {
float* ubuf = (float*)buf;
float* ibuf = (float*)(ubuf + blockDim.y);
for (int offset = blockDim.y/2; offset > 0; offset /= 2) {
// upper half of warps write to shared
if (threadIdx.x == 0 && threadIdx.y >= offset && threadIdx.y < 2*offset) {
const int wrt_y = threadIdx.y - offset;
ubuf[2*wrt_y] = mu;
ubuf[2*wrt_y+1] = sigma2;
ibuf[wrt_y] = count;
}
__syncthreads();
// lower half merges
if (threadIdx.x == 0 && threadIdx.y < offset) {
float muB = ubuf[2*threadIdx.y];
float sigma2B = ubuf[2*threadIdx.y+1];
float countB = ibuf[threadIdx.y];
cuChanOnlineSum(muB,sigma2B,countB,mu,sigma2,count);
}
__syncthreads();
}
// threadIdx.x = 0 && threadIdx.y == 0 only thread that has correct values
if (threadIdx.x == 0 && threadIdx.y == 0) {
ubuf[0] = mu;
ubuf[1] = sigma2;
}
__syncthreads();
mu = ubuf[0];
sigma2 = ubuf[1]/float(n2);
// don't care about final value of count, we know count == n2
} else {
mu = WARP_SHFL(mu, 0);
sigma2 = WARP_SHFL(sigma2/float(n2), 0);
}
}
}
#endif
template<typename U> U rsqrt(U v) {
return U(1) / sqrt(v);
}
template<> float rsqrt(float v) {
return rsqrtf(v);
}
template<> double rsqrt(double v) {
return rsqrt(v);
}
namespace {
// This is the un-specialized struct. Note that we prevent instantiation of this
// struct by putting an undefined symbol in the function body so it won't compile.
// template <typename T>
// struct SharedMemory
// {
// // Ensure that we won't compile any un-specialized types
// __device__ T *getPointer()
// {
// extern __device__ void error(void);
// error();
// return NULL;
// }
// };
// https://github.com/NVIDIA/apex/issues/246
template <typename T>
struct SharedMemory;
template <>
struct SharedMemory <float>
{
__device__ float *getPointer()
{
extern __shared__ float s_float[];
return s_float;
}
};
template <>
struct SharedMemory <double>
{
__device__ double *getPointer()
{
extern __shared__ double s_double[];
return s_double;
}
};
}
template<typename T, typename U> __global__
void cuApplyLayerNorm(
T* __restrict__ output_vals,
U* __restrict__ mean,
U* __restrict__ invvar,
const T* __restrict__ vals,
const int n1,
const int n2,
const U epsilon,
const T* __restrict__ gamma,
const T* __restrict__ beta
)
{
// Assumptions:
// 1) blockDim.x == warpSize
// 2) Tensors are contiguous
//
for (auto i1=blockIdx.y; i1 < n1; i1 += gridDim.y) {
SharedMemory<U> shared;
U* buf = shared.getPointer();
U mu,sigma2;
cuWelfordMuSigma2(vals,n1,n2,i1,mu,sigma2,buf);
const T* lvals = vals + i1*n2;
T* ovals = output_vals + i1*n2;
U c_invvar = rsqrt(sigma2 + epsilon);
const int numx = blockDim.x * blockDim.y;
const int thrx = threadIdx.x + threadIdx.y * blockDim.x;
if (gamma != NULL && beta != NULL) {
for (int i = thrx; i < n2; i+=numx) {
U curr = static_cast<U>(lvals[i]);
ovals[i] = gamma[i] * static_cast<T>(c_invvar * (curr - mu)) + beta[i];
}
} else {
for (int i = thrx; i < n2; i+=numx) {
U curr = static_cast<U>(lvals[i]);
ovals[i] = static_cast<T>(c_invvar * (curr - mu));
}
}
if (threadIdx.x == 0 && threadIdx.y == 0) {
mean[i1] = mu;
invvar[i1] = c_invvar;
}
}
}
template<typename T, typename U> __device__
void cuLoadWriteStridedInputs(
const int i1_block,
const int thr_load_row_off,
const int thr_load_col_off,
const int i2_off,
const int row_stride,
U* warp_buf1,
U* warp_buf2,
const T* input,
const T* dout,
const int i1_end,
const int n2,
const U* __restrict__ mean,
const U* __restrict__ invvar
)
{
int i1 = i1_block+thr_load_row_off;
if (i1 < i1_end) {
U curr_mean = mean[i1];
U curr_invvar = invvar[i1];
for (int k = 0; k < blockDim.y; ++k) {
int i2 = i2_off + k;
int load_idx = i1*n2+i2;
int write_idx = thr_load_row_off*row_stride+thr_load_col_off+k;
if (i2<n2) {
U curr_input = static_cast<U>(input[load_idx]);
U curr_dout = static_cast<U>(dout[load_idx]);
warp_buf1[write_idx] = curr_dout;
warp_buf2[write_idx] = curr_dout * (curr_input - curr_mean) * curr_invvar;
} else {
warp_buf1[write_idx] = U(0);
warp_buf2[write_idx] = U(0);
}
}
} else {
for (int k = 0; k < blockDim.y; ++k) {
int write_idx = thr_load_row_off*row_stride+thr_load_col_off+k;
warp_buf1[write_idx] = U(0);
warp_buf2[write_idx] = U(0);
}
}
}
template<typename T, typename U> __device__
void cuLoadAddStridedInputs(
const int i1_block,
const int thr_load_row_off,
const int thr_load_col_off,
const int i2_off,
const int row_stride,
U* warp_buf1,
U* warp_buf2,
const T* input,
const T* dout,
const int i1_end,
const int n2,
const U* __restrict__ mean,
const U* __restrict__ invvar
)
{
int i1 = i1_block+thr_load_row_off;
if (i1 < i1_end) {
U curr_mean = mean[i1];
U curr_invvar = invvar[i1];
for (int k = 0; k < blockDim.y; ++k) {
int i2 = i2_off + k;
int load_idx = i1*n2+i2;
int write_idx = thr_load_row_off*row_stride+thr_load_col_off+k;
if (i2<n2) {
U curr_input = static_cast<U>(input[load_idx]);
U curr_dout = static_cast<U>(dout[load_idx]);
warp_buf1[write_idx] += curr_dout;
warp_buf2[write_idx] += curr_dout * (curr_input - curr_mean) * curr_invvar;
}
}
}
}
template<typename T, typename U> __global__
void cuComputePartGradGammaBeta(
const T* __restrict__ dout,
const T* __restrict__ input,
const int n1,
const int n2,
const U* __restrict__ mean,
const U* __restrict__ invvar,
U epsilon,
U* part_grad_gamma,
U* part_grad_beta)
{
const int numsegs_n1 = (n1+blockDim.y*blockDim.y-1) / (blockDim.y*blockDim.y);
const int segs_per_block = (numsegs_n1 + gridDim.y - 1) / gridDim.y;
const int i1_beg = blockIdx.y * segs_per_block * blockDim.y*blockDim.y;
const int i1_beg_plus_one = (blockIdx.y+1) * segs_per_block * blockDim.y*blockDim.y;
const int i1_end = i1_beg_plus_one < n1 ? i1_beg_plus_one : n1;
const int row_stride = blockDim.x+1;
const int thr_load_col_off = (threadIdx.x*blockDim.y)&(blockDim.x-1);
const int thr_load_row_off = (threadIdx.x*blockDim.y)/blockDim.x + threadIdx.y*blockDim.y;
const int i2_off = blockIdx.x * blockDim.x + thr_load_col_off;
SharedMemory<U> shared;
U* buf = shared.getPointer(); // buf has at least blockDim.x * blockDim.y * blockDim.y + (blockDim.y - 1)*(blockDim.x/blockDim.y) elements
U* warp_buf1 = (U*)buf;
U* warp_buf2 = warp_buf1 + blockDim.y * blockDim.y * row_stride;
// compute partial sums from strided inputs
// do this to increase number of loads in flight
cuLoadWriteStridedInputs(i1_beg,thr_load_row_off,thr_load_col_off,i2_off,row_stride,warp_buf1,warp_buf2,input,dout,i1_end,n2,mean,invvar);
for (int i1_block = i1_beg+blockDim.y*blockDim.y; i1_block < i1_end; i1_block+=blockDim.y*blockDim.y) {
cuLoadAddStridedInputs(i1_block,thr_load_row_off,thr_load_col_off,i2_off,row_stride,warp_buf1,warp_buf2,input,dout,i1_end,n2,mean,invvar);
}
__syncthreads();
// inter-warp reductions
// sum within each warp
U acc1 = U(0);
U acc2 = U(0);
for (int k = 0; k < blockDim.y; ++k) {
int row1 = threadIdx.y + k*blockDim.y;
int idx1 = row1*row_stride + threadIdx.x;
acc1 += warp_buf1[idx1];
acc2 += warp_buf2[idx1];
}
warp_buf1[threadIdx.y*row_stride+threadIdx.x] = acc1;
warp_buf2[threadIdx.y*row_stride+threadIdx.x] = acc2;
__syncthreads();
// sum all warps
for (int offset = blockDim.y/2; offset > 1; offset /= 2) {
if (threadIdx.y < offset) {
int row1 = threadIdx.y;
int row2 = threadIdx.y + offset;
int idx1 = row1*row_stride + threadIdx.x;
int idx2 = row2*row_stride + threadIdx.x;
warp_buf1[idx1] += warp_buf1[idx2];
warp_buf2[idx1] += warp_buf2[idx2];
}
__syncthreads();
}
int i2 = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.y == 0 && i2 < n2) {
int row1 = threadIdx.y;
int row2 = threadIdx.y + 1;
int idx1 = row1*row_stride + threadIdx.x;
int idx2 = row2*row_stride + threadIdx.x;
part_grad_beta[blockIdx.y*n2+i2] = warp_buf1[idx1] + warp_buf1[idx2];
part_grad_gamma[blockIdx.y*n2+i2] = warp_buf2[idx1] + warp_buf2[idx2];
}
}
template<typename T, typename U> __global__
void cuComputeGradGammaBeta(
const U* part_grad_gamma,
const U* part_grad_beta,
const int part_size,
const int n1,
const int n2,
T* grad_gamma,
T* grad_beta)
{
// sum partial gradients for gamma and beta
SharedMemory<U> shared;
U* buf = shared.getPointer();
int i2 = blockIdx.x * blockDim.x + threadIdx.x;
if (i2 < n2) {
// each warp does sequential reductions until reduced part_size is num_warps
int num_warp_reductions = part_size / blockDim.y;
U sum_gamma = U(0);
U sum_beta = U(0);
const U* part_grad_gamma_ptr = part_grad_gamma + threadIdx.y * num_warp_reductions * n2 + i2;
const U* part_grad_beta_ptr = part_grad_beta + threadIdx.y * num_warp_reductions * n2 + i2;
for (int warp_offset = 0; warp_offset < num_warp_reductions; ++warp_offset) {
sum_gamma += part_grad_gamma_ptr[warp_offset*n2];
sum_beta += part_grad_beta_ptr[warp_offset*n2];
}
// inter-warp reductions
const int nbsize3 = blockDim.x * blockDim.y / 2;
for (int offset = blockDim.y/2; offset >= 1; offset /= 2) {
// top half write to shared memory
if (threadIdx.y >= offset && threadIdx.y < 2*offset) {
const int write_idx = (threadIdx.y - offset) * blockDim.x + threadIdx.x;
buf[write_idx] = sum_gamma;
buf[write_idx+nbsize3] = sum_beta;
}
__syncthreads();
// bottom half sums
if (threadIdx.y < offset) {
const int read_idx = threadIdx.y * blockDim.x + threadIdx.x;
sum_gamma += buf[read_idx];
sum_beta += buf[read_idx+nbsize3];
}
__syncthreads();
}
// write out fully summed gradients
if (threadIdx.y == 0) {
grad_gamma[i2] = sum_gamma;
grad_beta[i2] = sum_beta;
}
}
}
template<typename T, typename U> __global__
void cuComputeGradInput(
const T* __restrict__ dout,
const T* __restrict__ input,
const int n1,
const int n2,
const U* __restrict__ mean,
const U* __restrict__ invvar,
U epsilon,
const T* gamma,
T* grad_input)
{
for (auto i1=blockIdx.y; i1 < n1; i1 += gridDim.y) {
U sum_loss1 = U(0);
U sum_loss2 = U(0);
const U c_mean = mean[i1];
const U c_invvar = invvar[i1];
const T* k_input = input + i1*n2;
const T* k_dout = dout + i1*n2;
const int numx = blockDim.x * blockDim.y;
const int thrx = threadIdx.x + threadIdx.y * blockDim.x;
if (gamma != NULL) {
int l = 4*thrx;
for (; l+3 < n2; l+=4*numx) {
for (int k = 0; k < 4; ++k) {
const U c_h = static_cast<U>(k_input[l+k]);
const U c_loss = static_cast<U>(k_dout[l+k]);
sum_loss1 += c_loss * gamma[l+k];
sum_loss2 += c_loss * gamma[l+k] * (c_h - c_mean) * c_invvar;
}
}
for (; l < n2; ++l) {
const U c_h = static_cast<U>(k_input[l]);
const U c_loss = static_cast<U>(k_dout[l]);
sum_loss1 += c_loss * gamma[l];
sum_loss2 += c_loss * gamma[l] * (c_h - c_mean) * c_invvar;
}
} else {
int l = 4*thrx;
for (; l+3 < n2; l+=4*numx) {
for (int k = 0; k < 4; ++k) {
const U c_h = static_cast<U>(k_input[l+k]);
const U c_loss = static_cast<U>(k_dout[l+k]);
sum_loss1 += c_loss;
sum_loss2 += c_loss * (c_h - c_mean) * c_invvar;
}
}
for (; l < n2; ++l) {
const U c_h = static_cast<U>(k_input[l]);
const U c_loss = static_cast<U>(k_dout[l]);
sum_loss1 += c_loss;
sum_loss2 += c_loss * (c_h - c_mean) * c_invvar;
}
}
// intra-warp reductions
for (int mask = blockDim.x/2; mask > 0; mask /= 2) {
sum_loss1 += WARP_SHFL_XOR(sum_loss1, mask);
sum_loss2 += WARP_SHFL_XOR(sum_loss2, mask);
}
// inter-warp reductions
if (blockDim.y > 1) {
SharedMemory<U> shared;
U* buf = shared.getPointer();
for (int offset = blockDim.y/2; offset > 0; offset /= 2) {
// upper half of warps write to shared
if (threadIdx.y >= offset && threadIdx.y < 2*offset) {
const int wrt_i = (threadIdx.y - offset) * blockDim.x + threadIdx.x;
buf[2*wrt_i] = sum_loss1;
buf[2*wrt_i+1] = sum_loss2;
}
__syncthreads();
// lower half merges
if (threadIdx.y < offset) {
const int read_i = threadIdx.y * blockDim.x + threadIdx.x;
sum_loss1 += buf[2*read_i];
sum_loss2 += buf[2*read_i+1];
}
__syncthreads();
}
if (threadIdx.y == 0) {
buf[2*threadIdx.x] = sum_loss1;
buf[2*threadIdx.x+1] = sum_loss2;
}
__syncthreads();
if (threadIdx.y !=0) {
sum_loss1 = buf[2*threadIdx.x];
sum_loss2 = buf[2*threadIdx.x+1];
}
}
// all threads now have the two sums over l
U fH = (U)n2;
U term1 = (U(1) / fH) * c_invvar;
T* k_grad_input = grad_input + i1*n2;
if (gamma != NULL) {
for (int l = thrx; l < n2; l+=numx) {
const U c_h = static_cast<U>(k_input[l]);
const U c_loss = static_cast<U>(k_dout[l]);
U f_grad_input = fH * c_loss * gamma[l];
f_grad_input -= sum_loss1;
f_grad_input -= (c_h - c_mean) * c_invvar * sum_loss2;
f_grad_input *= term1;
k_grad_input[l] = static_cast<T>(f_grad_input);
}
} else {
for (int l = thrx; l < n2; l+=numx) {
const U c_h = static_cast<U>(k_input[l]);
const U c_loss = static_cast<U>(k_dout[l]);
U f_grad_input = fH * c_loss;
f_grad_input -= sum_loss1;
f_grad_input -= (c_h - c_mean) * c_invvar * sum_loss2;
f_grad_input *= term1;
k_grad_input[l] = static_cast<T>(f_grad_input);
}
}
}
}
template<typename T, typename U>
void HostApplyLayerNorm(
T* output,
U* mean,
U* invvar,
const T* input,
int n1,
int n2,
double epsilon,
const T* gamma,
const T* beta
)
{
auto stream = at::cuda::getCurrentCUDAStream().stream();
const dim3 threads(32,4,1);
const uint64_t maxGridY = at::cuda::getCurrentDeviceProperties()->maxGridSize[1];
const dim3 blocks(1, std::min((uint64_t)n1, maxGridY), 1);
int nshared =
threads.y > 1 ?
threads.y*sizeof(U)+(threads.y/2)*sizeof(U) :
0;
cuApplyLayerNorm<<<blocks, threads, nshared, stream>>>(
output,
mean,
invvar,
input,
n1,n2,
U(epsilon),
gamma,beta);
}
void cuda_layer_norm(
at::Tensor* output,
at::Tensor* mean,
at::Tensor* invvar,
at::Tensor* input,
int n1,
int n2,
#ifdef VERSION_GE_1_1
at::IntArrayRef normalized_shape,
#else
at::IntList normalized_shape,
#endif
at::Tensor* gamma,
at::Tensor* beta,
double epsilon)
{
using namespace at;
DISPATCH_DOUBLE_FLOAT_AND_HALF_AND_BF16(input->scalar_type(), 0, "layer_norm_cuda_kernel",
using accscalar_t = at::acc_type<scalar_t_0, true>;
HostApplyLayerNorm(
output->DATA_PTR<scalar_t_0>(),
mean->DATA_PTR<accscalar_t>(),
invvar->DATA_PTR<accscalar_t>(),
input->DATA_PTR<scalar_t_0>(),
n1,n2,
epsilon,
gamma != NULL ? gamma->DATA_PTR<scalar_t_0>() : NULL,
beta != NULL ? beta->DATA_PTR<scalar_t_0>() : NULL);
)
}
template<typename T, typename U>
void HostLayerNormGradient(
const T* dout,
const U* mean,
const U* invvar,
at::Tensor* input,
int n1,
int n2,
const T* gamma,
const T* beta,
double epsilon,
T* grad_input,
T* grad_gamma,
T* grad_beta
)
{
auto stream = at::cuda::getCurrentCUDAStream().stream();
if (gamma != NULL && beta != NULL) {
// compute grad_gamma(j) and grad_beta(j)
const int part_size = 16;
const dim3 threads2(32,4,1);
const dim3 blocks2((n2+threads2.x-1)/threads2.x,part_size,1);
const int nshared2_a = 2 * sizeof(U) * threads2.y * threads2.y * (threads2.x + 1);
const int nshared2_b = threads2.x * threads2.y * sizeof(U);
const int nshared2 = nshared2_a > nshared2_b ? nshared2_a : nshared2_b;
at::Tensor part_grad_gamma = at::empty({part_size,n2}, input->options().dtype((input->scalar_type()==at::ScalarType::Half || input->scalar_type()==at::ScalarType::BFloat16) ? at::ScalarType::Float : input->scalar_type()));
at::Tensor part_grad_beta = at::empty_like(part_grad_gamma);
cuComputePartGradGammaBeta<<<blocks2, threads2, nshared2, stream>>>(
dout,
input->DATA_PTR<T>(),
n1,n2,
mean,
invvar,
U(epsilon),
part_grad_gamma.DATA_PTR<U>(),
part_grad_beta.DATA_PTR<U>());
const dim3 threads3(32,8,1);
const dim3 blocks3((n2+threads2.x-1)/threads2.x,1,1);
const int nshared3 = threads3.x * threads3.y * sizeof(U);
cuComputeGradGammaBeta<<<blocks3, threads3, nshared3, stream>>>(
part_grad_gamma.DATA_PTR<U>(),
part_grad_beta.DATA_PTR<U>(),
part_size,
n1,n2,
grad_gamma,
grad_beta);
}
// compute grad_input
const uint64_t maxGridY = at::cuda::getCurrentDeviceProperties()->maxGridSize[1];
const dim3 blocks1(1, std::min((uint64_t)n1, maxGridY), 1);
const dim3 threads1(32,4,1);
int nshared =
threads1.y > 1 ?
threads1.y*threads1.x*sizeof(U) :
0;
cuComputeGradInput<<<blocks1, threads1, nshared, stream>>>(
dout,
input->DATA_PTR<T>(),
n1,n2,
mean,
invvar,
U(epsilon),
gamma,
grad_input);
}
void cuda_layer_norm_gradient(
at::Tensor* dout,
at::Tensor* mean,
at::Tensor* invvar,
at::Tensor* input,
int n1,
int n2,
#ifdef VERSION_GE_1_1
at::IntArrayRef normalized_shape,
#else
at::IntList normalized_shape,
#endif
at::Tensor* gamma,
at::Tensor* beta,
double epsilon,
at::Tensor* grad_input,
at::Tensor* grad_gamma,
at::Tensor* grad_beta)
{
using namespace at;
DISPATCH_DOUBLE_FLOAT_AND_HALF_AND_BF16(input->scalar_type(), 0, "cuComputeGradInput",
using accscalar_t = at::acc_type<scalar_t_0, true>;
HostLayerNormGradient(
dout->DATA_PTR<scalar_t_0>(),
mean->DATA_PTR<accscalar_t>(),
invvar->DATA_PTR<accscalar_t>(),
input,
n1,n2,
// TMJ pass NULL argument for gamma, beta, grad_gamma and grad_beta
// if gamma Tensor is NULL on input.
gamma != NULL ? gamma->DATA_PTR<scalar_t_0>() : NULL,
gamma != NULL ? beta->DATA_PTR<scalar_t_0>() : NULL,
epsilon,
grad_input->DATA_PTR<scalar_t_0>(),
gamma != NULL ? grad_gamma->DATA_PTR<scalar_t_0>() : NULL,
gamma != NULL ? grad_beta->DATA_PTR<scalar_t_0>() : NULL);
)
}
|
COCO-LM/fairseq/fused_ops/csrc/layernorm/layernorm_kernel.cu/0
|
{
"file_path": "COCO-LM/fairseq/fused_ops/csrc/layernorm/layernorm_kernel.cu",
"repo_id": "COCO-LM",
"token_count": 14286
}
| 200 |
#!/bin/bash
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# fail fast
set -e
# python get_glue_data.py --data_dir $1
# raw glue data as downloaded by glue download script (https://gist.github.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e)
if [[ $# -ne 4 ]]; then
echo "Run as following:"
echo "process.sh <glue_data_folder> <task_name> <dict_dir> <output>"
exit 1
fi
GLUE_DATA_FOLDER=$1
TASKS=$2 # QQP
DICT=$3
OUTPUT=$4
mkdir -p $OUTPUT
if [ "$TASKS" = "ALL" ]
then
TASKS="QQP MNLI QNLI MRPC RTE STS-B SST-2 CoLA"
fi
for TASK in $TASKS
do
echo "Preprocessing $TASK"
TASK_DATA_FOLDER="$GLUE_DATA_FOLDER/$TASK"
echo "Raw data as downloaded from glue website: $TASK_DATA_FOLDER"
SPLITS="train dev test"
INPUT_COUNT=2
if [ "$TASK" = "QQP" ]
then
INPUT_COLUMNS=( 4 5 )
TEST_INPUT_COLUMNS=( 2 3 )
LABEL_COLUMN=6
elif [ "$TASK" = "MNLI" ]
then
SPLITS="train dev_matched dev_mismatched test_matched test_mismatched"
INPUT_COLUMNS=( 9 10 )
TEST_INPUT_COLUMNS=( 9 10 )
DEV_LABEL_COLUMN=16
LABEL_COLUMN=12
elif [ "$TASK" = "QNLI" ]
then
INPUT_COLUMNS=( 2 3 )
TEST_INPUT_COLUMNS=( 2 3 )
LABEL_COLUMN=4
elif [ "$TASK" = "MRPC" ]
then
INPUT_COLUMNS=( 4 5 )
TEST_INPUT_COLUMNS=( 4 5 )
LABEL_COLUMN=1
elif [ "$TASK" = "RTE" ]
then
INPUT_COLUMNS=( 2 3 )
TEST_INPUT_COLUMNS=( 2 3 )
LABEL_COLUMN=4
elif [ "$TASK" = "STS-B" ]
then
INPUT_COLUMNS=( 8 9 )
TEST_INPUT_COLUMNS=( 8 9 )
LABEL_COLUMN=10
# Following are single sentence tasks.
elif [ "$TASK" = "SST-2" ]
then
INPUT_COLUMNS=( 1 )
TEST_INPUT_COLUMNS=( 2 )
LABEL_COLUMN=2
INPUT_COUNT=1
elif [ "$TASK" = "CoLA" ]
then
INPUT_COLUMNS=( 4 )
TEST_INPUT_COLUMNS=( 2 )
LABEL_COLUMN=2
INPUT_COUNT=1
fi
# Strip out header and filter lines that don't have expected number of fields.
rm -rf "$TASK_DATA_FOLDER/processed" ||:
mkdir -p "$TASK_DATA_FOLDER/processed"
for SPLIT in $SPLITS
do
# CoLA train and dev doesn't have header.
if [[ ( "$TASK" = "CoLA") && ( "$SPLIT" != "test" ) ]]
then
cp "$TASK_DATA_FOLDER/$SPLIT.tsv" "$TASK_DATA_FOLDER/processed/$SPLIT.tsv.temp";
else
tail -n +2 "$TASK_DATA_FOLDER/$SPLIT.tsv" > "$TASK_DATA_FOLDER/processed/$SPLIT.tsv.temp";
fi
# Remove unformatted lines from train and dev files for QQP dataset.
if [[ ( "$TASK" = "QQP") && ( "$SPLIT" != "test" ) ]]
then
awk -F '\t' -v NUM_FIELDS=6 'NF==NUM_FIELDS{print}{}' "$TASK_DATA_FOLDER/processed/$SPLIT.tsv.temp" > "$TASK_DATA_FOLDER/processed/$SPLIT.tsv";
else
cp "$TASK_DATA_FOLDER/processed/$SPLIT.tsv.temp" "$TASK_DATA_FOLDER/processed/$SPLIT.tsv";
fi
rm "$TASK_DATA_FOLDER/processed/$SPLIT.tsv.temp" ||: ;
done
# Split into input0, input1 and label
for SPLIT in $SPLITS
do
for INPUT_TYPE in $(seq 0 $((INPUT_COUNT-1)))
do
if [[ "$SPLIT" != test* ]]
then
COLUMN_NUMBER=${INPUT_COLUMNS[$INPUT_TYPE]}
else
COLUMN_NUMBER=${TEST_INPUT_COLUMNS[$INPUT_TYPE]}
fi
cut -f"$COLUMN_NUMBER" "$TASK_DATA_FOLDER/processed/$SPLIT.tsv" > "$TASK_DATA_FOLDER/processed/$SPLIT.raw.input$INPUT_TYPE";
done
if [[ "$SPLIT" != test* ]]
then
if [ "$TASK" = "MNLI" ] && [ "$SPLIT" != "train" ]
then
cut -f"$DEV_LABEL_COLUMN" "$TASK_DATA_FOLDER/processed/$SPLIT.tsv" > "$TASK_DATA_FOLDER/processed/$SPLIT.label";
else
cut -f"$LABEL_COLUMN" "$TASK_DATA_FOLDER/processed/$SPLIT.tsv" > "$TASK_DATA_FOLDER/processed/$SPLIT.label";
fi
fi
# BPE encode.
for INPUT_TYPE in $(seq 0 $((INPUT_COUNT-1)))
do
MYLANG="input$INPUT_TYPE"
echo "BPE encoding $SPLIT/$MYLANG"
cat $TASK_DATA_FOLDER/processed/$SPLIT.raw.$MYLANG | \
python multiprocessing_sp_encoder.py \
--sentencepiece-model $DICT/sp.model \
--vocab $DICT/dict.txt \
> $TASK_DATA_FOLDER/processed/$SPLIT.$MYLANG
done
done
# Remove output directory.
rm -rf "$TASK-bin" ||:
DEVPREF="$TASK_DATA_FOLDER/processed/dev.LANG"
TESTPREF="$TASK_DATA_FOLDER/processed/test.LANG"
if [ "$TASK" = "MNLI" ]
then
DEVPREF="$TASK_DATA_FOLDER/processed/dev_matched.LANG,$TASK_DATA_FOLDER/processed/dev_mismatched.LANG"
TESTPREF="$TASK_DATA_FOLDER/processed/test_matched.LANG,$TASK_DATA_FOLDER/processed/test_mismatched.LANG"
fi
# Run fairseq preprocessing:
for INPUT_TYPE in $(seq 0 $((INPUT_COUNT-1)))
do
MYLANG="input$INPUT_TYPE"
python ../../fairseq_cli/preprocess.py \
--only-source \
--trainpref "$TASK_DATA_FOLDER/processed/train.$MYLANG" \
--validpref "${DEVPREF//LANG/$MYLANG}" \
--testpref "${TESTPREF//LANG/$MYLANG}" \
--destdir "${OUTPUT}/$TASK-bin/$MYLANG" \
--workers 8 \
--srcdict $DICT/dict.txt;
done
if [[ "$TASK" != "STS-B" ]]
then
python ../../fairseq_cli/preprocess.py \
--only-source \
--trainpref "$TASK_DATA_FOLDER/processed/train.label" \
--validpref "${DEVPREF//LANG/'label'}" \
--destdir "${OUTPUT}/$TASK-bin/label" \
--workers 8;
else
# For STS-B output range is converted to be between: [0.0, 1.0]
mkdir -p "${OUTPUT}/$TASK-bin/label"
awk '{print $1 / 5.0 }' "$TASK_DATA_FOLDER/processed/train.label" > "${OUTPUT}/$TASK-bin/label/train.label"
awk '{print $1 / 5.0 }' "$TASK_DATA_FOLDER/processed/dev.label" > "${OUTPUT}/$TASK-bin/label/valid.label"
fi
done
|
COCO-LM/fairseq/preprocess/glue/process.sh/0
|
{
"file_path": "COCO-LM/fairseq/preprocess/glue/process.sh",
"repo_id": "COCO-LM",
"token_count": 2776
}
| 201 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from fairseq.data import Dictionary, data_utils, indexed_dataset
def get_parser():
parser = argparse.ArgumentParser(
description="writes text from binarized file to stdout"
)
# fmt: off
parser.add_argument('--dataset-impl', help='dataset implementation',
choices=indexed_dataset.get_available_dataset_impl())
parser.add_argument('--dict', metavar='FP', help='dictionary containing known words', default=None)
parser.add_argument('--input', metavar='FP', required=True, help='binarized file to read')
# fmt: on
return parser
def main():
parser = get_parser()
args = parser.parse_args()
dictionary = Dictionary.load(args.dict) if args.dict is not None else None
dataset = data_utils.load_indexed_dataset(
args.input,
dictionary,
dataset_impl=args.dataset_impl,
default="lazy",
)
for tensor_line in dataset:
if dictionary is None:
line = " ".join([str(int(x)) for x in tensor_line])
else:
line = dictionary.string(tensor_line)
print(line)
if __name__ == "__main__":
main()
|
COCO-LM/fairseq/scripts/read_binarized.py/0
|
{
"file_path": "COCO-LM/fairseq/scripts/read_binarized.py",
"repo_id": "COCO-LM",
"token_count": 526
}
| 202 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
import tempfile
import torch
def spawn_and_init(fn, world_size, args=None):
if args is None:
args = ()
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
torch.multiprocessing.spawn(
fn=functools.partial(init_and_run, fn, args),
args=(world_size, tmp_file.name,),
nprocs=world_size,
join=True,
)
def distributed_init(rank, world_size, tmp_file):
torch.distributed.init_process_group(
backend="nccl",
init_method="file://{}".format(tmp_file),
world_size=world_size,
rank=rank,
)
torch.cuda.set_device(rank)
def init_and_run(fn, args, rank, world_size, tmp_file):
distributed_init(rank, world_size, tmp_file)
group = torch.distributed.new_group()
fn(rank, group, *args)
def objects_are_equal(a, b) -> bool:
if type(a) is not type(b):
return False
if isinstance(a, dict):
if set(a.keys()) != set(b.keys()):
return False
for k in a.keys():
if not objects_are_equal(a[k], b[k]):
return False
return True
elif isinstance(a, (list, tuple, set)):
if len(a) != len(b):
return False
return all(objects_are_equal(x, y) for x, y in zip(a, b))
elif torch.is_tensor(a):
return (
a.size() == b.size()
and a.dtype == b.dtype
and a.device == b.device
and torch.all(a == b)
)
else:
return a == b
|
COCO-LM/fairseq/tests/distributed/utils.py/0
|
{
"file_path": "COCO-LM/fairseq/tests/distributed/utils.py",
"repo_id": "COCO-LM",
"token_count": 805
}
| 203 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from fairseq.data import LanguagePairDataset, TokenBlockDataset
from fairseq.data.concat_dataset import ConcatDataset
from tests.test_train import mock_dict
class TestConcatDataset(unittest.TestCase):
def setUp(self):
d = mock_dict()
tokens_1 = torch.LongTensor([1]).view(1, -1)
tokens_ds1 = TokenBlockDataset(
tokens_1,
sizes=[tokens_1.size(-1)],
block_size=1,
pad=0,
eos=1,
include_targets=False,
)
self.dataset_1 = LanguagePairDataset(
tokens_ds1, tokens_ds1.sizes, d, shuffle=False
)
tokens_2 = torch.LongTensor([2]).view(1, -1)
tokens_ds2 = TokenBlockDataset(
tokens_2,
sizes=[tokens_2.size(-1)],
block_size=1,
pad=0,
eos=1,
include_targets=False,
)
self.dataset_2 = LanguagePairDataset(
tokens_ds2, tokens_ds2.sizes, d, shuffle=False
)
def test_concat_dataset_basics(self):
d = ConcatDataset([self.dataset_1, self.dataset_2])
assert len(d) == 2
assert d[0]["source"][0] == 1
assert d[1]["source"][0] == 2
d = ConcatDataset([self.dataset_1, self.dataset_2], sample_ratios=[1, 2])
assert len(d) == 3
assert d[0]["source"][0] == 1
assert d[1]["source"][0] == 2
assert d[2]["source"][0] == 2
d = ConcatDataset([self.dataset_1, self.dataset_2], sample_ratios=[2, 1])
assert len(d) == 3
assert d[0]["source"][0] == 1
assert d[1]["source"][0] == 1
assert d[2]["source"][0] == 2
|
COCO-LM/fairseq/tests/test_concat_dataset.py/0
|
{
"file_path": "COCO-LM/fairseq/tests/test_concat_dataset.py",
"repo_id": "COCO-LM",
"token_count": 948
}
| 204 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import uuid
from fairseq import metrics
class TestMetrics(unittest.TestCase):
def test_nesting(self):
with metrics.aggregate() as a:
metrics.log_scalar("loss", 1)
with metrics.aggregate() as b:
metrics.log_scalar("loss", 2)
self.assertEqual(a.get_smoothed_values()["loss"], 1.5)
self.assertEqual(b.get_smoothed_values()["loss"], 2)
def test_new_root(self):
with metrics.aggregate() as a:
metrics.log_scalar("loss", 1)
with metrics.aggregate(new_root=True) as b:
metrics.log_scalar("loss", 2)
self.assertEqual(a.get_smoothed_values()["loss"], 1)
self.assertEqual(b.get_smoothed_values()["loss"], 2)
def test_nested_new_root(self):
with metrics.aggregate() as layer1:
metrics.log_scalar("loss", 1)
with metrics.aggregate(new_root=True) as layer2:
metrics.log_scalar("loss", 2)
with metrics.aggregate() as layer3:
metrics.log_scalar("loss", 3)
with metrics.aggregate(new_root=True) as layer4:
metrics.log_scalar("loss", 4)
metrics.log_scalar("loss", 1.5)
self.assertEqual(layer4.get_smoothed_values()["loss"], 4)
self.assertEqual(layer3.get_smoothed_values()["loss"], 3)
self.assertEqual(layer2.get_smoothed_values()["loss"], 2.5)
self.assertEqual(layer1.get_smoothed_values()["loss"], 1.25)
def test_named(self):
name = str(uuid.uuid4())
metrics.reset_meters(name)
with metrics.aggregate(name):
metrics.log_scalar("loss", 1)
metrics.log_scalar("loss", 3)
with metrics.aggregate(name):
metrics.log_scalar("loss", 2)
self.assertEqual(metrics.get_smoothed_values(name)["loss"], 1.5)
def test_nested_duplicate_names(self):
name = str(uuid.uuid4())
metrics.reset_meters(name)
with metrics.aggregate(name):
metrics.log_scalar("loss", 1)
with metrics.aggregate() as other:
with metrics.aggregate(name):
metrics.log_scalar("loss", 2)
metrics.log_scalar("loss", 6)
self.assertEqual(metrics.get_smoothed_values(name)["loss"], 3)
self.assertEqual(other.get_smoothed_values()["loss"], 2)
if __name__ == "__main__":
unittest.main()
|
COCO-LM/fairseq/tests/test_metrics.py/0
|
{
"file_path": "COCO-LM/fairseq/tests/test_metrics.py",
"repo_id": "COCO-LM",
"token_count": 1257
}
| 205 |
# COCO-LM (Huggingface)
This repository contains the Huggingface version of scripts for fine-tuning COCO-LM pretrained models on GLUE and SQuAD benchmarks. The scripts are based on the [Huggingface Transformers Library](https://github.com/huggingface/transformers).
Paper: [COCO-LM: Correcting and Contrasting Text Sequences for Language Model Pretraining](https://arxiv.org/abs/2102.08473)
## Requirements
The scripts require Python 3.6+ and the required Python packages can be installed via pip (running in a virtual environment is recommended):
```
pip3 install -r requirements.txt
```
In addition, if you would like to utilize `fp16` training, you need to install [apex](https://github.com/NVIDIA/apex).
## Pretrained Models
We release two COCO-LM pretrained models, [`cocolm-base`](https://huggingface.co/microsoft/cocolm-base) and [`cocolm-large`](https://huggingface.co/microsoft/cocolm-large), which correspond to the `base++` and `large++` models mentioned in the paper, respectively. You do not need to download them manually as they will be automatically downloaded upon running the training scripts.
## Usage
```python
>>> import torch
>>> from cocolm.modeling_cocolm import COCOLMModel
>>> from cocolm.configuration_cocolm import COCOLMConfig
>>> from cocolm.tokenization_cocolm import COCOLMTokenizer
>>> config = COCOLMConfig.from_pretrained("microsoft/cocolm-base")
>>> model = COCOLMModel.from_pretrained("microsoft/cocolm-base", config=config)
>>> tokenizer = COCOLMTokenizer.from_pretrained("microsoft/cocolm-base")
>>> inputs = tokenizer.encode("Hello world!")
>>> outputs = model(torch.tensor([inputs]))
```
## GLUE Fine-tuning
The [General Language Understanding Evaluation (GLUE)](https://gluebenchmark.com/) benchmark is a collection of sentence- or sentence-pair language understanding tasks for evaluating and analyzing natural language understanding systems.
**Download GLUE Data**: You can download the [GLUE data](https://gluebenchmark.com/tasks) by running [this script](https://gist.github.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e) and unpack it to some directory.
**Fine-Tuning**: You can run the [`run_glue.sh`](run_glue.sh) script for fine-tuning on each GLUE task. An example for using the script for fine-tuning on MNLI is shown below:
```
MODEL=microsoft/cocolm-base
TASK=MNLI
GLUE_DATASET_PATH=/path/to/downloaded/glue_data
OUT_PATH=./glue_finetune/cocolm_base
BSZ=32
LR=1e-5
EPOCH=2
WARMUP=0.0625
SEED=1
export CUDA_VISIBLE_DEVICES=0
bash run_glue.sh $MODEL $TASK $GLUE_DATASET_PATH $OUT_PATH $BSZ $LR $EPOCH $WARMUP $SEED
```
**Optimal Hyperparameters**: The fine-tuning hyperparameters leading to the best dev set performance in our experiments are shown below (please note that the results and optimal hyperparameters might slightly differ in your runs due to different computation environments):
* COCO-LM base++
| | MNLI-m/mm | QQP | QNLI | SST-2 | CoLA | RTE | MRPC | STS-B |
| ------ | ------ | ------ | ------ | ------ | ------ | ------ | ------ | ------ |
| BSZ | 32/32 | 32 | 32 | 32 | 16 | 16 | 16 | 16 |
| LR | 1e-5/2e-5 | 2e-5 | 1e-5 | 1e-5 | 2e-5 | 3e-5 | 2e-5 | 4e-5 |
| EPOCH | 2/2 | 5 | 5 | 5 | 10 | 10 | 10 | 10 |
| WARMUP | 0.0625/0.0625 | 0.0625 | 0.0625 | 0.0625 | 0.1 | 0.1 | 0.1 | 0.1 |
| Result | 90.1/90.0 | 92.3 | 94.2 | 95.1 | 69.9 | 87.4 | 90.9 | 91.8 |
* COCO-LM large++
| | MNLI-m/mm | QQP | QNLI | SST-2 | CoLA | RTE | MRPC | STS-B |
| ------ | ------ | ------ | ------ | ------ | ------ | ------ | ------ | ------ |
| BSZ | 32/32 | 32 | 32 | 32 | 32 | 32 | 16 | 16 |
| LR | 5e-6/5e-6 | 2e-5 | 5e-6 | 1e-5 | 2e-5 | 3e-5 | 2e-5 | 2e-5 |
| EPOCH | 2/2 | 5 | 2 | 5 | 10 | 10 | 10 | 10 |
| WARMUP | 0.0625/0.0625 | 0.0625 | 0.0625 | 0.0625 | 0.0625 | 0.1 | 0.1 | 0.1 |
| Result | 91.3/91.6 | 92.8 | 95.8 | 96.8 | 73.1 | 91.3 | 91.9 | 92.8 |
## SQuAD 2.0 Fine-tuning
[Stanford Question Answering Dataset (SQuAD)](https://rajpurkar.github.io/SQuAD-explorer/) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable.
The SQuAD 2.0 dataset will be automatically downloaded upon running the training script.
**Fine-Tuning**: You can run the [`run_squad.sh`](run_squad.sh) script for fine-tuning on SQuAD 2.0. An example for using the script is shown below:
```
MODEL=microsoft/cocolm-base
SQUAD_DATASET_PATH=/path/to/squad2_data/
OUT_PATH=./squad2_finetune/cocolm_base
BSZ=32
LR=3e-5
EPOCH=3
WARMUP=0.0625
SEED=1
export CUDA_VISIBLE_DEVICES=0
bash run_squad.sh $MODEL $SQUAD_DATASET_PATH $OUT_PATH $BSZ $LR $EPOCH $WARMUP $SEED
```
**Optimal Hyperparameters**: The fine-tuning hyperparameters leading to the best dev set performance in our experiments are shown below (please note that the results and optimal hyperparameters might slightly differ in your runs due to different computation environments):
* COCO-LM base++
| | EM | F1 |
| ------ | ------ | ------ |
| BSZ | 32 | 32 |
| LR | 3e-5 | 3e-5 |
| EPOCH | 3 | 3 |
| WARMUP | 0.0625 | 0.0625 |
| Result | 84.9 | 87.7 |
* COCO-LM large++
| | EM | F1 |
| ------ | ------ | ------ |
| BSZ | 16 | 16 |
| LR | 1e-5 | 1e-5 |
| EPOCH | 2 | 2 |
| WARMUP | 0.0625 | 0.0625 |
| Result | 88.3 | 91.0 |
|
COCO-LM/huggingface/README.md/0
|
{
"file_path": "COCO-LM/huggingface/README.md",
"repo_id": "COCO-LM",
"token_count": 1951
}
| 206 |
datadir: /data/CMIP6/AWI-ESM
name: geopotential
cmip_name: zg
era_name: z
run: r1i1p1f1
res:
- 1.40625
# - 5.625
|
ClimaX/snakemake_configs/AWI-ESM/config_geopotential.yml/0
|
{
"file_path": "ClimaX/snakemake_configs/AWI-ESM/config_geopotential.yml",
"repo_id": "ClimaX",
"token_count": 64
}
| 207 |
datadir: /data/CMIP6/HAMMOZ
name: temperature
cmip_name: ta
era_name: t
run: r1i1p1f1
version: v20190628
res:
- 1.40625
# - 5.625
|
ClimaX/snakemake_configs/HAMMOZ/config_temperature.yml/0
|
{
"file_path": "ClimaX/snakemake_configs/HAMMOZ/config_temperature.yml",
"repo_id": "ClimaX",
"token_count": 67
}
| 208 |
datadir: /data/CMIP6/TaiESM1
server_prefix: https://esgf.ceda.ac.uk/thredds/fileServer/esg_cmip6/CMIP6/CMIP
name: temperature
cmip_name: ta
era_name: t
run: r1i1p1f1
res:
- 1.40625
# - 5.625
|
ClimaX/snakemake_configs/TaiESM1/config_temperature.yml/0
|
{
"file_path": "ClimaX/snakemake_configs/TaiESM1/config_temperature.yml",
"repo_id": "ClimaX",
"token_count": 99
}
| 209 |
import argparse
import xarray as xr
import numpy as np
import xesmf as xe
from glob import glob
import os
def regrid(
ds_in,
ddeg_out,
method='bilinear',
reuse_weights=True,
cmip=False,
rename=None
):
"""
Regrid horizontally.
:param ds_in: Input xarray dataset
:param ddeg_out: Output resolution
:param method: Regridding method
:param reuse_weights: Reuse weights for regridding
:return: ds_out: Regridded dataset
"""
# import pdb; pdb.set_trace()
# Rename to ESMF compatible coordinates
if 'latitude' in ds_in.coords:
ds_in = ds_in.rename({'latitude': 'lat', 'longitude': 'lon'})
if cmip:
ds_in = ds_in.drop(('lat_bnds', 'lon_bnds'))
if hasattr(ds_in, 'plev_bnds'):
ds_in = ds_in.drop(('plev_bnds'))
if hasattr(ds_in, 'time_bnds'):
ds_in = ds_in.drop(('time_bnds'))
if rename is not None:
ds_in = ds_in.rename({rename[0]: rename[1]})
# Create output grid
grid_out = xr.Dataset(
{
'lat': (['lat'], np.arange(-90+ddeg_out/2, 90, ddeg_out)),
'lon': (['lon'], np.arange(0, 360, ddeg_out)),
}
)
# Create regridder
regridder = xe.Regridder(
ds_in, grid_out, method, periodic=True, reuse_weights=reuse_weights
)
ds_out = regridder(ds_in, keep_attrs=True).astype('float32')
# # Set attributes since they get lost during regridding
# for var in ds_out:
# ds_out[var].attrs = ds_in[var].attrs
# ds_out.attrs.update(ds_in.attrs)
if rename is not None:
if rename[0] == 'zg':
ds_out['z'] *= 9.807
if rename[0] == 'rsdt':
ds_out['tisr'] *= 60*60
ds_out = ds_out.isel(time=slice(1, None, 12))
ds_out = ds_out.assign_coords({'time': ds_out.time + np.timedelta64(90, 'm')})
# # Regrid dataset
# ds_out = regridder(ds_in)
return ds_out
def main(
input_fns,
output_dir,
ddeg_out,
method='bilinear',
reuse_weights=True,
custom_fn=None,
file_ending='nc',
cmip=False,
rename=None
):
"""
:param input_fns: Input files. Can use *. If more than one, loop over them
:param output_dir: Output directory
:param ddeg_out: Output resolution
:param method: Regridding method
:param reuse_weights: Reuse weights for regridding
:param custom_fn: If not None, use custom file name. Otherwise infer from parameters.
:param file_ending: Default = nc
"""
# Make sure output directory exists
os.makedirs(output_dir, exist_ok=True)
# Get files for starred expressions
if '*' in input_fns[0]:
input_fns = sorted(glob(input_fns[0]))
# Loop over input files
for fn in input_fns:
print(f'Regridding file: {fn}')
ds_in = xr.open_dataset(fn)
ds_out = regrid(ds_in, ddeg_out, method, reuse_weights, cmip, rename)
fn_out = (
custom_fn or
'_'.join(fn.split('/')[-1][:-3].split('_')[:-1]) + '_' + str(ddeg_out) + 'deg.' + file_ending
)
print(f"Saving file: {output_dir + '/' + fn_out}")
ds_out.to_netcdf(output_dir + '/' + fn_out)
ds_in.close(); ds_out.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--input_fns',
type=str,
nargs='+',
help="Input files (full path). Can use *. If more than one, loop over them",
required=True
)
parser.add_argument(
'--output_dir',
type=str,
help="Output directory",
required=True
)
parser.add_argument(
'--ddeg_out',
type=float,
help="Output resolution",
required=True
)
parser.add_argument(
'--reuse_weights',
type=int,
help="Reuse weights for regridding. 0 or 1 (default)",
# default=1,
default=0
)
parser.add_argument(
'--custom_fn',
type=str,
help="If not None, use custom file name. Otherwise infer from parameters.",
default=None
)
parser.add_argument(
'--file_ending',
type=str,
help="File ending. Default = nc",
default='nc'
)
parser.add_argument(
'--cmip',
type=int,
help="Is CMIP data. 0 or 1 (default)",
default=0
)
parser.add_argument(
'--rename',
type=str,
nargs='+',
help="Rename var in dataset",
default=None
)
args = parser.parse_args()
main(
input_fns=args.input_fns,
output_dir=args.output_dir,
ddeg_out=args.ddeg_out,
reuse_weights=args.reuse_weights,
custom_fn=args.custom_fn,
file_ending=args.file_ending,
cmip=args.cmip,
rename=args.rename
)
|
ClimaX/src/data_preprocessing/regrid.py/0
|
{
"file_path": "ClimaX/src/data_preprocessing/regrid.py",
"repo_id": "ClimaX",
"token_count": 2416
}
| 210 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import sys
import torch
from torchvision.utils import save_image
from options.train_options import TrainOptions
import data
from util.iter_counter import IterationCounter
from util.util import print_current_errors
from util.util import mkdir
from trainers.pix2pix_trainer import Pix2PixTrainer
if __name__ == '__main__':
# parse options
opt = TrainOptions().parse()
# print options to help debugging
print(' '.join(sys.argv))
dataloader = data.create_dataloader(opt)
len_dataloader = len(dataloader)
# create tool for counting iterations
iter_counter = IterationCounter(opt, len(dataloader))
# create trainer for our model
trainer = Pix2PixTrainer(opt, resume_epoch=iter_counter.first_epoch)
save_root = os.path.join('checkpoints', opt.name, 'train')
mkdir(save_root)
for epoch in iter_counter.training_epochs():
opt.epoch = epoch
iter_counter.record_epoch_start(epoch)
for i, data_i in enumerate(dataloader, start=iter_counter.epoch_iter):
iter_counter.record_one_iteration()
# Training
# train generator
if i % opt.D_steps_per_G == 0:
trainer.run_generator_one_step(data_i)
# train discriminator
trainer.run_discriminator_one_step(data_i)
if iter_counter.needs_printing():
losses = trainer.get_latest_losses()
try:
print_current_errors(opt, epoch, iter_counter.epoch_iter,
iter_counter.epoch_iter_num, losses, iter_counter.time_per_iter)
except OSError as err:
print(err)
if iter_counter.needs_displaying():
imgs_num = data_i['label'].shape[0]
if opt.dataset_mode == 'deepfashionHD':
label = data_i['label'][:,:3,:,:]
show_size = opt.display_winsize
imgs = torch.cat((label.cpu(), data_i['ref'].cpu(), \
trainer.get_latest_generated().data.cpu(), \
data_i['image'].cpu()), 0)
try:
save_name = '%08d_%08d.png' % (epoch, iter_counter.total_steps_so_far)
save_name = os.path.join(save_root, save_name)
save_image(imgs, save_name, nrow=imgs_num, padding=0, normalize=True)
except OSError as err:
print(err)
if iter_counter.needs_saving():
print('saving the latest model (epoch %d, total_steps %d)' %
(epoch, iter_counter.total_steps_so_far))
try:
trainer.save('latest')
iter_counter.record_current_iter()
except OSError as err:
import pdb; pdb.set_trace()
print(err)
trainer.update_learning_rate(epoch)
iter_counter.record_epoch_end()
if epoch % opt.save_epoch_freq == 0 or epoch == iter_counter.total_epochs:
print('saving the model at the end of epoch %d, iters %d' %
(epoch, iter_counter.total_steps_so_far))
try:
trainer.save('latest')
trainer.save(epoch)
except OSError as err:
print(err)
print('Training was successfully finished.')
|
CoCosNet-v2/train.py/0
|
{
"file_path": "CoCosNet-v2/train.py",
"repo_id": "CoCosNet-v2",
"token_count": 1723
}
| 211 |
"""
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import sys
import argparse
import os
from util import util
import torch
import models
import data
import pickle
class BaseOptions():
def __init__(self):
self.initialized = False
def initialize(self, parser):
# experiment specifics
parser.add_argument('--name', type=str, default='label2coco', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--gpu_ids', type=str, default='0,1,2,3', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
parser.add_argument('--model', type=str, default='pix2pix', help='which model to use')
parser.add_argument('--norm_G', type=str, default='spectralinstance', help='instance normalization or batch normalization')
parser.add_argument('--norm_D', type=str, default='spectralinstance', help='instance normalization or batch normalization')
parser.add_argument('--norm_E', type=str, default='spectralinstance', help='instance normalization or batch normalization')
parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
# input/output sizes
parser.add_argument('--batchSize', type=int, default=4, help='input batch size')
parser.add_argument('--preprocess_mode', type=str, default='scale_width_and_crop', help='scaling and cropping of images at load time.', choices=("resize_and_crop", "crop", "scale_width", "scale_width_and_crop", "scale_shortside", "scale_shortside_and_crop", "fixed", "none"))
parser.add_argument('--load_size', type=int, default=256, help='Scale images to this size. The final image will be cropped to --crop_size.')
parser.add_argument('--crop_size', type=int, default=256, help='Crop to the width of crop_size (after initially scaling the images to load_size.)')
parser.add_argument('--aspect_ratio', type=float, default=1.0, help='The ratio width/height. The final height of the load image will be crop_size/aspect_ratio')
parser.add_argument('--label_nc', type=int, default=182, help='# of input label classes without unknown class. If you have unknown class as class label, specify --contain_dopntcare_label.')
parser.add_argument('--contain_dontcare_label', action='store_true', help='if the label map contains dontcare label (dontcare=255)')
parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels')
# for setting inputs
parser.add_argument('--dataroot', type=str, default='/mnt/blob/Dataset/ADEChallengeData2016/images')
parser.add_argument('--dataset_mode', type=str, default='ade20k')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data argumentation')
parser.add_argument('--nThreads', default=16, type=int, help='# threads for loading data')
parser.add_argument('--max_dataset_size', type=int, default=sys.maxsize, help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--load_from_opt_file', action='store_true', help='load the options from checkpoints and use that as default')
parser.add_argument('--cache_filelist_write', action='store_true', help='saves the current filelist into a text file, so that it loads faster')
parser.add_argument('--cache_filelist_read', action='store_true', help='reads from the file list cache')
# for displays
parser.add_argument('--display_winsize', type=int, default=400, help='display window size')
# for generator
parser.add_argument('--netG', type=str, default='spade', help='selects model to use for netG (pix2pixhd | spade)')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer')
parser.add_argument('--init_type', type=str, default='xavier', help='network initialization [normal|xavier|kaiming|orthogonal]')
parser.add_argument('--init_variance', type=float, default=0.02, help='variance of the initialization distribution')
parser.add_argument('--z_dim', type=int, default=256,
help="dimension of the latent z vector")
# for instance-wise features
parser.add_argument('--CBN_intype', type=str, default='warp_mask', help='type of CBN input for framework, warp/mask/warp_mask')
parser.add_argument('--maskmix', action='store_true', help='use mask in correspondence net')
parser.add_argument('--use_attention', action='store_true', help='and nonlocal block in G and D')
parser.add_argument('--warp_mask_losstype', type=str, default='none', help='type of warped mask loss, none/direct/cycle')
parser.add_argument('--show_warpmask', action='store_true', help='save warp mask')
parser.add_argument('--match_kernel', type=int, default=3, help='correspondence matrix match kernel size')
parser.add_argument('--adaptor_kernel', type=int, default=3, help='kernel size in domain adaptor')
parser.add_argument('--PONO', action='store_true', help='use positional normalization ')
parser.add_argument('--PONO_C', action='store_true', help='use C normalization in corr module')
parser.add_argument('--eqlr_sn', action='store_true', help='if true, use equlr, else use sn')
parser.add_argument('--vgg_normal_correct', action='store_true', help='if true, correct vgg normalization and replace vgg FM model with ctx model')
parser.add_argument('--weight_domainC', type=float, default=0.0, help='weight of Domain classification loss for domain adaptation')
parser.add_argument('--domain_rela', action='store_true', help='if true, use Relativistic loss in domain classifier')
parser.add_argument('--use_ema', action='store_true', help='if true, use EMA in G')
parser.add_argument('--ema_beta', type=float, default=0.999, help='beta in ema setting')
parser.add_argument('--warp_cycle_w', type=float, default=0.0, help='push warp cycle to ref')
parser.add_argument('--two_cycle', action='store_true', help='input to ref and back')
parser.add_argument('--apex', action='store_true', help='if true, use apex')
parser.add_argument('--warp_bilinear', action='store_true', help='if true, upsample warp by bilinear')
parser.add_argument('--adaptor_res_deeper', action='store_true', help='if true, use 6 res block in domain adaptor')
parser.add_argument('--adaptor_nonlocal', action='store_true', help='if true, use nonlocal block in domain adaptor')
parser.add_argument('--adaptor_se', action='store_true', help='if true, use se layer in domain adaptor')
parser.add_argument('--dilation_conv', action='store_true', help='if true, use dilation conv in domain adaptor when adaptor_res_deeper is True')
parser.add_argument('--use_coordconv', action='store_true', help='if true, use coordconv in CorrNet')
parser.add_argument('--warp_patch', action='store_true', help='use corr matrix to warp 4*4 patch')
parser.add_argument('--warp_stride', type=int, default=4, help='corr matrix 256 / warp_stride')
parser.add_argument('--mask_noise', action='store_true', help='use noise with mask')
parser.add_argument('--noise_for_mask', action='store_true', help='replace mask with noise')
parser.add_argument('--video_like', action='store_true', help='useful in deepfashion')
self.initialized = True
return parser
def gather_options(self):
# initialize parser with basic options
if not self.initialized:
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get the basic options
opt, unknown = parser.parse_known_args()
# modify model-related parser options
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
# modify dataset-related parser options
dataset_mode = opt.dataset_mode
dataset_option_setter = data.get_option_setter(dataset_mode)
parser = dataset_option_setter(parser, self.isTrain)
opt, unknown = parser.parse_known_args()
# if there is opt_file, load it.
# The previous default options will be overwritten
if opt.load_from_opt_file:
parser = self.update_options_from_file(parser, opt)
opt = parser.parse_args()
self.parser = parser
return opt
def print_options(self, opt):
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
def option_file_path(self, opt, makedir=False):
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
if makedir:
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt')
return file_name
def save_options(self, opt):
file_name = self.option_file_path(opt, makedir=True)
with open(file_name + '.txt', 'wt') as opt_file:
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
opt_file.write('{:>25}: {:<30}{}\n'.format(str(k), str(v), comment))
with open(file_name + '.pkl', 'wb') as opt_file:
pickle.dump(opt, opt_file)
def update_options_from_file(self, parser, opt):
new_opt = self.load_options(opt)
for k, v in sorted(vars(opt).items()):
if hasattr(new_opt, k) and v != getattr(new_opt, k):
new_val = getattr(new_opt, k)
parser.set_defaults(**{k: new_val})
return parser
def load_options(self, opt):
file_name = self.option_file_path(opt, makedir=False)
new_opt = pickle.load(open(file_name + '.pkl', 'rb'))
return new_opt
def parse(self, save=False):
opt = self.gather_options() #gather options from base, train, dataset, model
opt.isTrain = self.isTrain # train or test
self.print_options(opt)
if opt.isTrain:
self.save_options(opt)
# Set semantic_nc based on the option.
# This will be convenient in many places
opt.semantic_nc = opt.label_nc + \
(1 if opt.contain_dontcare_label else 0)
# set gpu ids
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
opt.gpu_ids.append(id)
if len(opt.gpu_ids) > 0:
torch.cuda.set_device(opt.gpu_ids[0])
assert len(opt.gpu_ids) == 0 or opt.batchSize % len(opt.gpu_ids) == 0, \
"Batch size %d is wrong. It must be a multiple of # GPUs %d." \
% (opt.batchSize, len(opt.gpu_ids))
self.opt = opt
return self.opt
|
CoCosNet/options/base_options.py/0
|
{
"file_path": "CoCosNet/options/base_options.py",
"repo_id": "CoCosNet",
"token_count": 4619
}
| 212 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import cv2
from PIL import Image
import numpy as np
from skimage import feature
# parts = ['skin', 'hair', 'l_brow', 'r_brow', 'l_eye', 'r_eye', 'l_ear', 'r_ear', 'nose', 'u_lip', 'mouth', 'l_lip', 'neck',
# 'cloth', 'hat', 'eye_g', 'ear_r', 'neck_l']
inner_parts = ['skin', 'l_brow', 'r_brow', 'l_eye', 'r_eye', 'l_ear', 'r_ear', 'nose', 'u_lip', 'mouth', 'l_lip', 'eye_g', 'hair']
root = 'C:/Data/CelebAMask-HQ'
def get_edges(edge, t):
edge[:,1:] = edge[:,1:] | (t[:,1:] != t[:,:-1])
edge[:,:-1] = edge[:,:-1] | (t[:,1:] != t[:,:-1])
edge[1:,:] = edge[1:,:] | (t[1:,:] != t[:-1,:])
edge[:-1,:] = edge[:-1,:] | (t[1:,:] != t[:-1,:])
return edge
for i in range(30000):
img = Image.open(os.path.join(root, 'CelebA-HQ-img', str(i) + '.jpg')).resize((512, 512), resample=Image.BILINEAR)
inner_label = np.ones(img.size, dtype=np.uint8)
edges = np.zeros(img.size, dtype=np.uint8)
subfolder = str(i // 2000)
for part in inner_parts:
edge = np.zeros(img.size, dtype=np.uint8) #this for distance transform map on each facial part
path = os.path.join(root, 'CelebAMask-HQ-mask-anno', subfolder, str(i).zfill(5) + '_' + part + '.png')
if os.path.exists(path):
part_label = Image.open(path).convert('L')
part_label = np.array(part_label)
if part == 'hair':
inner_label[part_label == 255] = 1
else:
inner_label[part_label == 255] = 0
edges = get_edges(edges, part_label)
edge = get_edges(edge, part_label)
im_dist = cv2.distanceTransform(255-edge*255, cv2.DIST_L1, 3)
im_dist = np.clip((im_dist / 3), 0, 255).astype(np.uint8)
#Image.fromarray(im_dist).save(os.path.join(root, 'CelebAMask-HQ-mask-anno', 'parsing_edges', str(i).zfill(5) + '_{}.png'.format(part)))
# canny edge for background
canny_edges = feature.canny(np.array(img.convert('L')))
canny_edges = canny_edges * inner_label
edges += canny_edges
Image.fromarray(edges * 255).save(os.path.join(root, 'CelebAMask-HQ-mask-anno', 'parsing_edges', str(i).zfill(5) + '.png'))
|
CoCosNet/util/mask_to_edge.py/0
|
{
"file_path": "CoCosNet/util/mask_to_edge.py",
"repo_id": "CoCosNet",
"token_count": 1054
}
| 213 |
CUDA_VISIBLE_DEBVISES=0 python run.py \
--prefix codenet \
--output_dir ../saved_models/inference \
--data_cache_dir ../saved_models/inference \
--eval_data_path ../data/codenetmut_test.json \
--model_name_or_path microsoft/codeexecutor \
--block_size 1024 \
--per_gpu_train_batch_size 8 \
--per_gpu_eval_batch_size 16 \
--gradient_accumulation_steps 8 \
--learning_rate 1e-4 \
--node_index 0 \
--weight_decay 0.01 \
--adam_epsilon 1e-6 \
--max_grad_norm 1.0 \
--max_steps 1000 \
--warmup_steps 10000 \
--save_steps 5000 \
--seed 123456
|
CodeBERT/CodeExecutor/inference/run.sh/0
|
{
"file_path": "CodeBERT/CodeExecutor/inference/run.sh",
"repo_id": "CodeBERT",
"token_count": 268
}
| 214 |
import torch
import torch.nn as nn
import torch
from torch.autograd import Variable
import copy
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss, MSELoss
class RobertaClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size*2, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, 2)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = x.reshape(-1,x.size(-1)*2)
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
class Model(nn.Module):
def __init__(self, encoder,config,tokenizer,args):
super(Model, self).__init__()
self.encoder = encoder
self.config=config
self.tokenizer=tokenizer
self.classifier=RobertaClassificationHead(config)
self.args=args
def forward(self, inputs_ids_1,position_idx_1,attn_mask_1,inputs_ids_2,position_idx_2,attn_mask_2,labels=None):
bs,l=inputs_ids_1.size()
inputs_ids=torch.cat((inputs_ids_1.unsqueeze(1),inputs_ids_2.unsqueeze(1)),1).view(bs*2,l)
position_idx=torch.cat((position_idx_1.unsqueeze(1),position_idx_2.unsqueeze(1)),1).view(bs*2,l)
attn_mask=torch.cat((attn_mask_1.unsqueeze(1),attn_mask_2.unsqueeze(1)),1).view(bs*2,l,l)
#embedding
nodes_mask=position_idx.eq(0)
token_mask=position_idx.ge(2)
inputs_embeddings=self.encoder.roberta.embeddings.word_embeddings(inputs_ids)
nodes_to_token_mask=nodes_mask[:,:,None]&token_mask[:,None,:]&attn_mask
nodes_to_token_mask=nodes_to_token_mask/(nodes_to_token_mask.sum(-1)+1e-10)[:,:,None]
avg_embeddings=torch.einsum("abc,acd->abd",nodes_to_token_mask,inputs_embeddings)
inputs_embeddings=inputs_embeddings*(~nodes_mask)[:,:,None]+avg_embeddings*nodes_mask[:,:,None]
outputs = self.encoder.roberta(inputs_embeds=inputs_embeddings,attention_mask=attn_mask,position_ids=position_idx,token_type_ids=position_idx.eq(-1).long())[0]
logits=self.classifier(outputs)
# shape: [batch_size, num_classes]
prob=F.softmax(logits, dim=-1)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits, labels)
return loss,prob
else:
return prob
|
CodeBERT/GraphCodeBERT/clonedetection/model.py/0
|
{
"file_path": "CodeBERT/GraphCodeBERT/clonedetection/model.py",
"repo_id": "CodeBERT",
"token_count": 1311
}
| 215 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
import torch.nn as nn
import torch
from torch.autograd import Variable
import copy
class Seq2Seq(nn.Module):
"""
Build Seqence-to-Sequence.
Parameters:
* `encoder`- encoder of seq2seq model. e.g. roberta
* `decoder`- decoder of seq2seq model. e.g. transformer
* `config`- configuration of encoder model.
* `beam_size`- beam size for beam search.
* `max_length`- max length of target for beam search.
* `sos_id`- start of symbol ids in target for beam search.
* `eos_id`- end of symbol ids in target for beam search.
"""
def __init__(self, encoder,decoder,config,beam_size=None,max_length=None,sos_id=None,eos_id=None):
super(Seq2Seq, self).__init__()
self.encoder = encoder
self.decoder=decoder
self.config=config
self.register_buffer("bias", torch.tril(torch.ones(2048, 2048)))
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.lsm = nn.LogSoftmax(dim=-1)
self.tie_weights()
self.beam_size=beam_size
self.max_length=max_length
self.sos_id=sos_id
self.eos_id=eos_id
def _tie_or_clone_weights(self, first_module, second_module):
""" Tie or clone module weights depending of weither we are using TorchScript or not
"""
if self.config.torchscript:
first_module.weight = nn.Parameter(second_module.weight.clone())
else:
first_module.weight = second_module.weight
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.lm_head,
self.encoder.embeddings.word_embeddings)
def forward(self, source_ids,source_mask,position_idx,attn_mask,target_ids=None,target_mask=None,args=None):
#embedding
nodes_mask=position_idx.eq(0)
token_mask=position_idx.ge(2)
inputs_embeddings=self.encoder.embeddings.word_embeddings(source_ids)
nodes_to_token_mask=nodes_mask[:,:,None]&token_mask[:,None,:]&attn_mask
nodes_to_token_mask=nodes_to_token_mask/(nodes_to_token_mask.sum(-1)+1e-10)[:,:,None]
avg_embeddings=torch.einsum("abc,acd->abd",nodes_to_token_mask,inputs_embeddings)
inputs_embeddings=inputs_embeddings*(~nodes_mask)[:,:,None]+avg_embeddings*nodes_mask[:,:,None]
outputs = self.encoder(inputs_embeds=inputs_embeddings,attention_mask=attn_mask,position_ids=position_idx)
encoder_output = outputs[0].permute([1,0,2]).contiguous()
#source_mask=token_mask.float()
if target_ids is not None:
attn_mask=-1e4 *(1-self.bias[:target_ids.shape[1],:target_ids.shape[1]])
tgt_embeddings = self.encoder.embeddings(target_ids).permute([1,0,2]).contiguous()
out = self.decoder(tgt_embeddings,encoder_output,tgt_mask=attn_mask,memory_key_padding_mask=(1-source_mask).bool())
hidden_states = torch.tanh(self.dense(out)).permute([1,0,2]).contiguous()
lm_logits = self.lm_head(hidden_states)
# Shift so that tokens < n predict n
active_loss = target_mask[..., 1:].ne(0).view(-1) == 1
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = target_ids[..., 1:].contiguous()
# Flatten the tokens
loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1))[active_loss],
shift_labels.view(-1)[active_loss])
outputs = loss,loss*active_loss.sum(),active_loss.sum()
return outputs
else:
#Predict
preds=[]
zero=torch.cuda.LongTensor(1).fill_(0)
for i in range(source_ids.shape[0]):
context=encoder_output[:,i:i+1]
context_mask=source_mask[i:i+1,:]
beam = Beam(self.beam_size,self.sos_id,self.eos_id)
input_ids=beam.getCurrentState()
context=context.repeat(1, self.beam_size,1)
context_mask=context_mask.repeat(self.beam_size,1)
for _ in range(self.max_length):
if beam.done():
break
attn_mask=-1e4 *(1-self.bias[:input_ids.shape[1],:input_ids.shape[1]])
tgt_embeddings = self.encoder.embeddings(input_ids).permute([1,0,2]).contiguous()
out = self.decoder(tgt_embeddings,context,tgt_mask=attn_mask,memory_key_padding_mask=(1-context_mask).bool())
out = torch.tanh(self.dense(out))
hidden_states=out.permute([1,0,2]).contiguous()[:,-1,:]
out = self.lsm(self.lm_head(hidden_states)).data
beam.advance(out)
input_ids.data.copy_(input_ids.data.index_select(0, beam.getCurrentOrigin()))
input_ids=torch.cat((input_ids,beam.getCurrentState()),-1)
hyp= beam.getHyp(beam.getFinal())
pred=beam.buildTargetTokens(hyp)[:self.beam_size]
pred=[torch.cat([x.view(-1) for x in p]+[zero]*(self.max_length-len(p))).view(1,-1) for p in pred]
preds.append(torch.cat(pred,0).unsqueeze(0))
preds=torch.cat(preds,0)
return preds
class Beam(object):
def __init__(self, size,sos,eos):
self.size = size
self.tt = torch.cuda
# The score for each translation on the beam.
self.scores = self.tt.FloatTensor(size).zero_()
# The backpointers at each time-step.
self.prevKs = []
# The outputs at each time-step.
self.nextYs = [self.tt.LongTensor(size)
.fill_(0)]
self.nextYs[0][0] = sos
# Has EOS topped the beam yet.
self._eos = eos
self.eosTop = False
# Time and k pair for finished.
self.finished = []
def getCurrentState(self):
"Get the outputs for the current timestep."
batch = self.tt.LongTensor(self.nextYs[-1]).view(-1, 1)
return batch
def getCurrentOrigin(self):
"Get the backpointers for the current timestep."
return self.prevKs[-1]
def advance(self, wordLk):
"""
Given prob over words for every last beam `wordLk` and attention
`attnOut`: Compute and update the beam search.
Parameters:
* `wordLk`- probs of advancing from the last step (K x words)
* `attnOut`- attention at the last step
Returns: True if beam search is complete.
"""
numWords = wordLk.size(1)
# Sum the previous scores.
if len(self.prevKs) > 0:
beamLk = wordLk + self.scores.unsqueeze(1).expand_as(wordLk)
# Don't let EOS have children.
for i in range(self.nextYs[-1].size(0)):
if self.nextYs[-1][i] == self._eos:
beamLk[i] = -1e20
else:
beamLk = wordLk[0]
flatBeamLk = beamLk.view(-1)
bestScores, bestScoresId = flatBeamLk.topk(self.size, 0, True, True)
self.scores = bestScores
# bestScoresId is flattened beam x word array, so calculate which
# word and beam each score came from
prevK = bestScoresId // numWords
self.prevKs.append(prevK)
self.nextYs.append((bestScoresId - prevK * numWords))
for i in range(self.nextYs[-1].size(0)):
if self.nextYs[-1][i] == self._eos:
s = self.scores[i]
self.finished.append((s, len(self.nextYs) - 1, i))
# End condition is when top-of-beam is EOS and no global score.
if self.nextYs[-1][0] == self._eos:
self.eosTop = True
def done(self):
return self.eosTop and len(self.finished) >=self.size
def getFinal(self):
if len(self.finished) == 0:
self.finished.append((self.scores[0], len(self.nextYs) - 1, 0))
self.finished.sort(key=lambda a: -a[0])
if len(self.finished) != self.size:
unfinished=[]
for i in range(self.nextYs[-1].size(0)):
if self.nextYs[-1][i] != self._eos:
s = self.scores[i]
unfinished.append((s, len(self.nextYs) - 1, i))
unfinished.sort(key=lambda a: -a[0])
self.finished+=unfinished[:self.size-len(self.finished)]
return self.finished[:self.size]
def getHyp(self, beam_res):
"""
Walk back to construct the full hypothesis.
"""
hyps=[]
for _,timestep, k in beam_res:
hyp = []
for j in range(len(self.prevKs[:timestep]) - 1, -1, -1):
hyp.append(self.nextYs[j+1][k])
k = self.prevKs[j][k]
hyps.append(hyp[::-1])
return hyps
def buildTargetTokens(self, preds):
sentence=[]
for pred in preds:
tokens = []
for tok in pred:
if tok==self._eos:
break
tokens.append(tok)
sentence.append(tokens)
return sentence
|
CodeBERT/GraphCodeBERT/translation/model.py/0
|
{
"file_path": "CodeBERT/GraphCodeBERT/translation/model.py",
"repo_id": "CodeBERT",
"token_count": 4898
}
| 216 |
# Code Completion
## Dependency
- pip install torch
- pip install transformers
- pip install javalang
## Data Download
```bash
unzip dataset.zip
cd dataset/javaCorpus/
bash download.sh
python preprocess.py --base_dir=token_completion --output_dir=./
wget https://github.com/microsoft/CodeXGLUE/raw/main/Code-Code/CodeCompletion-line/dataset/javaCorpus/line_completion/test.json
cd ../py150
bash download.sh
python preprocess.py --base_dir=py150_files --output_dir=./
wget https://github.com/microsoft/CodeXGLUE/raw/main/Code-Code/CodeCompletion-line/dataset/py150/line_completion/test.json
cd ../..
```
## Fine-Tune Setting
Here we provide fine-tune settings for code completion, whose results are reported in the paper.
#### JavaCorpus Dataset
```shell
# Training
python run.py \
--do_train \
--do_eval \
--lang java \
--model_name_or_path microsoft/unixcoder-base \
--train_filename dataset/javaCorpus/train.txt \
--dev_filename dataset/javaCorpus/dev.json \
--output_dir saved_models/javaCorpus \
--max_source_length 936 \
--max_target_length 64 \
--beam_size 5 \
--train_batch_size 32 \
--gradient_accumulation_steps 1 \
--eval_batch_size 32 \
--learning_rate 2e-5 \
--num_train_epochs 10
# Output predictions of test set
python run.py \
--do_test \
--lang java \
--model_name_or_path microsoft/unixcoder-base \
--load_model_path saved_models/javaCorpus/checkpoint-best-acc/pytorch_model.bin \
--test_filename dataset/javaCorpus/test.json \
--output_dir saved_models/javaCorpus \
--max_source_length 936 \
--max_target_length 64 \
--beam_size 5 \
--eval_batch_size 32
```
Prediction results of test set are ```saved_models/javaCorpus/predictions.txt```.To obtain the score of test set, you need to send the prediction to [email protected].
#### PY150 Dataset
```shell
# Training
python run.py \
--do_train \
--do_eval \
--lang python \
--model_name_or_path microsoft/unixcoder-base \
--train_filename dataset/py150/train.txt \
--dev_filename dataset/py150/dev.json \
--output_dir saved_models/py150 \
--max_source_length 936 \
--max_target_length 64 \
--beam_size 5 \
--train_batch_size 32 \
--gradient_accumulation_steps 1 \
--eval_batch_size 32 \
--learning_rate 2e-4 \
--num_train_epochs 10
# Output predictions of test set
python run.py \
--do_test \
--lang python \
--model_name_or_path microsoft/unixcoder-base \
--load_model_path saved_models/py150/checkpoint-best-acc/pytorch_model.bin \
--test_filename dataset/py150/test.json \
--output_dir saved_models/py150 \
--max_source_length 936 \
--max_target_length 64 \
--beam_size 5 \
--eval_batch_size 32
```
Prediction results of test set are ```saved_models/py150/predictions.txt```.To obtain the score of test set, you need to send the prediction to [email protected].
|
CodeBERT/UniXcoder/downstream-tasks/code-completion/README.md/0
|
{
"file_path": "CodeBERT/UniXcoder/downstream-tasks/code-completion/README.md",
"repo_id": "CodeBERT",
"token_count": 1029
}
| 217 |
# Zero-shot Code-to-Code Search
Given a source code as the query, the task aims to retrieve codes with the same semantics from a collection of candidates in zero-shot setting. We collect 11,744/15,594/23,530 functions from [CodeNet](https://github.com/IBM/Project_CodeNet) corpus in Ruby/Python/Java. Each function solves one of 4,053 problems.
## Data Download
```bash
cd dataset
wget https://dax-cdn.cdn.appdomain.cloud/dax-project-codenet/1.0.0/Project_CodeNet.tar.gz
tar -xvf Project_CodeNet.tar.gz
python preprocess.py
cd ..
```
## Dependency
- pip install torch
- pip install transformers
## Zero-Shot Setting
```bash
source_lang=ruby
target_lang=python
python run.py \
--model_name_or_path microsoft/unixcoder-base \
--query_data_file dataset/${source_lang}_with_func.jsonl \
--candidate_data_file dataset/${target_lang}_with_func.jsonl \
--query_lang ${source_lang} \
--candidate_lang ${target_lang} \
--code_length 512 \
--eval_batch_size 256
```
|
CodeBERT/UniXcoder/downstream-tasks/zero-shot-search/README.md/0
|
{
"file_path": "CodeBERT/UniXcoder/downstream-tasks/zero-shot-search/README.md",
"repo_id": "CodeBERT",
"token_count": 337
}
| 218 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
import logging
import os
from src.postprocess import PostProcessor
from src.execution import evaluate_with_test_code, evaluate_with_test_cases
from src.io_utils import Tools
from src.agreement import DataManager, DualAgreement
from src.evaluation import pass_at_K, get_result_of_sorted_solutions
logging.basicConfig(
format="SystemLog: [%(asctime)s][%(name)s][%(levelname)s] - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--source_path_for_solution", type=str, help="model input file in .jsonl format")
parser.add_argument("--predict_path_for_solution", type=str, help="model output file in .jsonl format")
parser.add_argument("--source_path_for_test", type=str, help="model input file in .jsonl format")
parser.add_argument("--predict_path_for_test", type=str, help="model output file in .jsonl format")
parser.add_argument("--cache_dir", type=str, help="the directory to store the cache files")
parser.add_argument("--timeout", type=float, default=0.1, help="how many seconds to wait during execution for each test case")
parser.add_argument("--test_case_limit", type=int, default=5, help="first n test cases per sample")
args = parser.parse_args()
handled_solutions, task_count = PostProcessor.map_task_id_for_solution(args.predict_path_for_solution, args.source_path_for_solution)
handled_test_cases = PostProcessor.map_task_id_for_test_case(args.predict_path_for_test, args.source_path_for_test)
ground_truth_exec_result = evaluate_with_test_code(handled_solutions, timeout=args.timeout)
dual_exec_result = evaluate_with_test_cases(handled_solutions, handled_test_cases, timeout=args.timeout, limit=args.test_case_limit)
Tools.dump_pickle(os.path.join(args.cache_dir, 'ground_truth_exec_result.pkl'), ground_truth_exec_result)
Tools.dump_pickle(os.path.join(args.cache_dir, 'dual_exec_result.pkl'), dual_exec_result)
data_manager = DataManager(dual_exec_result, handled_solutions, handled_test_cases, args.test_case_limit)
set_consistency = DualAgreement(data_manager)
ranked_result = set_consistency.get_sorted_solutions_without_iter()
logger.info('pass rates of ranked solutions')
get_result_of_sorted_solutions(ground_truth_exec_result, ranked_result)
logger.info('pass rates of random solutions')
pass_at_K(ground_truth_exec_result)
|
CodeT/CodeT/main.py/0
|
{
"file_path": "CodeT/CodeT/main.py",
"repo_id": "CodeT",
"token_count": 914
}
| 219 |
#!/usr/bin/env bash
set -e
trap 'exitScript' ERR
help()
{
cat <<- _EOF_
Help for Codex CLI Bash setup script
Usage: source bash_setup.sh [optional parameters]
-o orgId Set the OpenAI organization id.
-k apiKey Set the OpenAI API key.
-e engineId Set the OpenAI engine id.
-d Print some system information for debugging.
-h Print this help content.
To uninstall Codex CLI use bash_cleanup.sh.
For more information visit https://github.com/microsoft/Codex-CLI
_EOF_
}
# Read command line parameters
readParameters()
{
while [ "$1" != "" ]; do
case $1 in
-o ) shift; ORG_ID=$1 ;;
-k ) shift; SECRET_KEY=$1 ;;
-e ) shift; ENGINE_ID=$1 ;;
-d ) systemInfo
exitScript
;;
* ) help
exitScript
;;
esac
shift
done
}
# Prompt user for OpenAI settings
askSettings()
{
echo "*** Starting Codex CLI bash setup ***"
if [ -z "$ORG_ID" ]; then
echo -n 'OpenAI Organization Id: '; read ORG_ID
fi
if [ -z "$SECRET_KEY" ]; then
echo -n 'OpenAI API key: '; read -s SECRET_KEY; echo
fi
if [ -z "$ENGINE_ID" ]; then
echo -n 'OpenAI Engine Id: '; read ENGINE_ID
fi
}
# Call OpenAI API with the given settings to verify everythin is in order
validateSettings()
{
echo -n "*** Testing Open AI access... "
local TEST=$(curl -s 'https://api.openai.com/v1/engines' -H "Authorization: Bearer $SECRET_KEY" -H "OpenAI-Organization: $ORG_ID" -w '%{http_code}')
local STATUS_CODE=$(echo "$TEST"|tail -n 1)
if [ $STATUS_CODE -ne 200 ]; then
echo "ERROR [$STATUS_CODE]"
echo "Failed to access OpenAI API, result: $STATUS_CODE"
echo "Please check your OpenAI API key (https://beta.openai.com/account/api-keys)"
echo "and Organization ID (https://beta.openai.com/account/org-settings)."
echo "*************"
exitScript
return
fi
local ENGINE_FOUND=$(echo "$TEST"|grep '"id"'|grep "\"$ENGINE_ID\"")
if [ -z "$ENGINE_FOUND" ]; then
echo "ERROR"
echo "Cannot find OpenAI engine: $ENGINE_ID"
echo "Please check the OpenAI engine id (https://beta.openai.com/docs/engines/codex-series-private-beta)."
echo "*************"
exitScript
return
fi
echo "OK ***"
}
# Store API key and other settings in `openaiapirc`
configureApp()
{
echo "*** Configuring application [$OPENAI_RC_FILE] ***"
echo '[openai]' > $OPENAI_RC_FILE
echo "organization_id=$ORG_ID" >> $OPENAI_RC_FILE
echo "secret_key=$SECRET_KEY" >> $OPENAI_RC_FILE
echo "engine=$ENGINE_ID" >> $OPENAI_RC_FILE
chmod +x "$CODEX_CLI_PATH/src/codex_query.py"
}
# Create and load ~/.codexclirc to setup bash 'Ctrl + G' binding
configureBash()
{
echo "*** Configuring bash [$BASH_RC_FILE] ***"
echo -n > $HOME/.codexclirc
echo "export CODEX_CLI_PATH=\"${CODEX_CLI_PATH}\"" >> $BASH_RC_FILE
echo 'source "$CODEX_CLI_PATH/scripts/bash_plugin.sh"' >> $BASH_RC_FILE
echo "bind -x '\"\C-g\":\"create_completion\"'" >> $BASH_RC_FILE
if [ $SOURCED -eq 1 ]; then
echo "*** Testing bash settings [$BASH_RC_FILE] ***"
source "$BASH_RC_FILE"
fi
}
# Add call to .codexclirc into .bashrc
enableApp()
{
echo "*** Activating application [$HOME/.bashrc] ***"
# Check if already installed
if grep -Fq ".codexclirc" $HOME/.bashrc; then
return 0
fi
echo -e "\n# Initialize Codex CLI" >> $HOME/.bashrc
echo 'if [ -f "$HOME/.codexclirc" ]; then' >> $HOME/.bashrc
echo ' . "$HOME/.codexclirc"' >> $HOME/.bashrc
echo 'fi' >> $HOME/.bashrc
}
# Print some system details useful to debug the script in case it's not working
systemInfo()
{
echo "*** system ***"
uname -smpr
echo "*** shell ***"
echo $SHELL
echo "*** bash interpreter ***"
echo $BASH_VERSION
echo "*** python ***"
if command -v python &> /dev/null; then
which python
python --version
else
echo "python not found"
fi
echo "*** curl ***"
if command -v curl &> /dev/null; then
which curl
curl --version
else
echo "curl not found"
fi
}
# Remove variables and functions from the environment, in case the script was sourced
cleanupEnv()
{
unset ORG_ID SECRET_KEY ENGINE_ID SOURCED OPENAI_RC_FILE BASH_RC_FILE
unset -f askSettings validateSettings configureApp configureBash enableApp readParameters
}
# Clean exit for sourced scripts
exitScript()
{
cleanupEnv
kill -SIGINT $$
}
# Detect if the script is sourced
(return 0 2>/dev/null) && SOURCED=1 || SOURCED=0
# Path to Codex CLI source
CODEX_CLI_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd .. && pwd )"
# Path to OpenAI API settings
OPENAI_RC_FILE="$CODEX_CLI_PATH/src/openaiapirc"
# Path to Bash settings loaded when a Bash session starts
BASH_RC_FILE="$HOME/.codexclirc"
# Start installation
readParameters $*
askSettings
validateSettings
configureApp
configureBash
enableApp
cleanupEnv
echo -e "*** Setup complete! ***\n";
echo "***********************************************"
echo "Open a new Bash terminal, type '#' followed by"
echo "your natural language command and hit Ctrl + G!"
echo "***********************************************"
|
Codex-CLI/scripts/bash_setup.sh/0
|
{
"file_path": "Codex-CLI/scripts/bash_setup.sh",
"repo_id": "Codex-CLI",
"token_count": 2218
}
| 220 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: face.py
Description: Face section of the Cognitive Face API.
"""
from . import util
def detect(image, face_id=True, landmarks=False, attributes=''):
"""Detect human faces in an image and returns face locations, and
optionally with `face_id`s, landmarks, and attributes.
Args:
image: A URL or a file path or a file-like object represents an image.
face_id: [Optional] Return faceIds of the detected faces or not. The
default value is true.
landmarks: [Optional] Return face landmarks of the detected faces or
not. The default value is false.
attributes: [Optional] Analyze and return the one or more specified
face attributes in the comma-separated string like
"age,gender". Supported face attributes include age, gender,
headPose, smile, facialHair, glasses, emotion, makeup, accessories,
occlusion, blur, exposure, noise. Note that each face attribute
analysis has additional computational and time cost.
Returns:
An array of face entries ranked by face rectangle size in descending
order. An empty response indicates no faces detected. A face entry may
contain the corresponding values depending on input parameters.
"""
url = 'detect'
headers, data, json = util.parse_image(image)
params = {
'returnFaceId': face_id and 'true' or 'false',
'returnFaceLandmarks': landmarks and 'true' or 'false',
'returnFaceAttributes': attributes,
}
return util.request(
'POST', url, headers=headers, params=params, json=json, data=data)
def find_similars(face_id,
face_list_id=None,
large_face_list_id=None,
face_ids=None,
max_candidates_return=20,
mode='matchPerson'):
"""Given query face's `face_id`, to search the similar-looking faces from a
`face_id` array, a `face_list_id` or a `large_face_list_id`.
Parameter `large_face_list_id`, `face_list_id` and `face_ids` should not be
provided at the same time.
Args:
face_id: `face_id` of the query face. User needs to call `face.detect`
first to get a valid `face_id`. Note that this `face_id` is not
persisted and will expire in 24 hours after the detection call.
face_list_id: An existing user-specified unique candidate face list,
created in `face_list.create`. Face list contains a set of
`persisted_face_ids` which are persisted and will never expire.
large_face_list_id: An existing user-specified unique candidate face
list, created in `large_face_list.create`. Large Face list contains
a set of `persisted_face_ids` which are persisted and will never
expire.
face_ids: An array of candidate `face_id`s. All of them are created by
`face.detect` and the `face_id`s will expire in 24 hours after the
detection call. The number of `face_id`s is limited to 1000.
max_candidates_return: Optional parameter. The number of top similar
faces returned. The valid range is [1, 1000]. It defaults to 20.
mode: Optional parameter. Similar face searching mode. It can be
"matchPerson" or "matchFace". It defaults to "matchPerson".
Returns:
An array of the most similar faces represented in `face_id` if the
input parameter is `face_ids` or `persisted_face_id` if the input
parameter is `face_list_id` or `large_face_list_id`.
"""
url = 'findsimilars'
json = {
'faceId': face_id,
'faceListId': face_list_id,
'largeFaceListId': large_face_list_id,
'faceIds': face_ids,
'maxNumOfCandidatesReturned': max_candidates_return,
'mode': mode,
}
return util.request('POST', url, json=json)
def group(face_ids):
"""Divide candidate faces into groups based on face similarity.
Args:
face_ids: An array of candidate `face_id`s created by `face.detect`.
The maximum is 1000 faces.
Returns:
one or more groups of similar faces (ranked by group size) and a
messyGroup.
"""
url = 'group'
json = {
'faceIds': face_ids,
}
return util.request('POST', url, json=json)
def identify(face_ids,
person_group_id=None,
large_person_group_id=None,
max_candidates_return=1,
threshold=None):
"""Identify unknown faces from a person group or a large person group.
Args:
face_ids: An array of query `face_id`s, created by the `face.detect`.
Each of the faces are identified independently. The valid number of
`face_ids` is between [1, 10].
person_group_id: `person_group_id` of the target person group, created
by `person_group.create`.
large_person_group_id: `large_person_group_id` of the target large
person group, createdJ by `large_person_group.create`.
max_candidates_return: Optional parameter. The range of
`max_candidates_return` is between 1 and 5 (default is 1).
threshold: Optional parameter. Confidence threshold of identification,
used to judge whether one face belongs to one person. The range of
confidence threshold is [0, 1] (default specified by algorithm).
Returns:
The identified candidate person(s) for each query face(s).
"""
url = 'identify'
json = {
'personGroupId': person_group_id,
'largePersonGroupId': large_person_group_id,
'faceIds': face_ids,
'maxNumOfCandidatesReturned': max_candidates_return,
'confidenceThreshold': threshold,
}
return util.request('POST', url, json=json)
def verify(face_id,
another_face_id=None,
person_group_id=None,
large_person_group_id=None,
person_id=None):
"""Verify whether two faces belong to a same person or whether one face
belongs to a person.
For face to face verification, only `face_id` and `another_face_id` is
necessary. For face to person verification, only `face_id`,
`person_group_id` (or `large_person_group_id`) and `person_id` is needed.
Args:
face_id: `face_id` of one face, comes from `face.detect`.
another_face_id: `face_id` of another face, comes from `face.detect`.
person_group_id: Using existing `person_group_id` and `person_id` for
fast loading a specified person. `person_group_id` is created in
`person_group.create`.
large_person_group_id: Using existing `large_person_group_id` and
`person_id` for fast loading a specified person.
`large_person_group_id` is created in `large_person_group.create`.
person_id: Specify a certain person in a person group. `person_id` is
created in `person.create`.
Returns:
The verification result.
"""
url = 'verify'
json = {}
if another_face_id:
json.update({
'faceId1': face_id,
'faceId2': another_face_id,
})
else:
json.update({
'faceId': face_id,
'personGroupId': person_group_id,
'largePersonGroupId': large_person_group_id,
'personId': person_id,
})
return util.request('POST', url, json=json)
|
Cognitive-Face-Python/cognitive_face/face.py/0
|
{
"file_path": "Cognitive-Face-Python/cognitive_face/face.py",
"repo_id": "Cognitive-Face-Python",
"token_count": 3006
}
| 221 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: test_large_person_group_person.py
Description: Unittests for Large Person Group Person section of the Cognitive
Face API.
"""
import unittest
import cognitive_face as CF
from . import util
class TestLargePersonGroupPerson(unittest.TestCase):
"""Unittests for Large Person Group Person section."""
def test_person(self):
"""Unittests for `large_person_group_person.create`,
`large_person_group_person.update` and
`large_person_group_person.delete`.
"""
res = CF.large_person_group_person.create(
util.DataStore.large_person_group_id, 'TempPerson')
print(res)
self.assertIsInstance(res, dict)
util.wait()
person_id = res['personId']
res = CF.large_person_group_person.update(
util.DataStore.large_person_group_id, person_id, 'TP')
print(res)
self.assertIsInstance(res, dict)
util.wait()
res = CF.large_person_group_person.delete(
util.DataStore.large_person_group_id, person_id)
print(res)
self.assertIsInstance(res, dict)
util.wait()
def test_get(self):
"""Unittest for `large_person_group_person.get`."""
res = CF.large_person_group_person.get(
util.DataStore.large_person_group_id,
util.DataStore.large_person_group_person_id['Dad'])
print(res)
self.assertIsInstance(res, dict)
util.wait()
def test_list(self):
"""Unittest for `large_person_group_person.list`."""
res = CF.large_person_group_person.list(
util.DataStore.large_person_group_id)
print(res)
self.assertIsInstance(res, list)
util.wait()
|
Cognitive-Face-Python/cognitive_face/tests/test_large_person_group_person.py/0
|
{
"file_path": "Cognitive-Face-Python/cognitive_face/tests/test_large_person_group_person.py",
"repo_id": "Cognitive-Face-Python",
"token_count": 779
}
| 222 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: panel_identification.py
Description: Identification Panel for Python SDK sample.
"""
import os
import uuid
import wx
import wx.lib.scrolledpanel as scrolled
import util
import model
from view import base
class IdentificationPanel(base.MyPanel):
"""Identification Panel."""
def __init__(self, parent):
super(IdentificationPanel, self).__init__(parent)
self.large_person_group_id = str(uuid.uuid1())
self.person_id_names = {}
self.person_name_faces = {}
self.faces = {}
self.face_ids = []
self.vsizer = wx.BoxSizer(wx.VERTICAL)
self.panel = scrolled.ScrolledPanel(self)
self.hsizer = wx.BoxSizer()
self.hsizer.AddStretchSpacer()
self.hvsizer = wx.BoxSizer(wx.VERTICAL)
self.hvsizer.SetMinSize((util.INNER_PANEL_WIDTH, -1))
label = ('1) Place face images of one person in a folder and give '
'the folder the same name as that person.\n'
'2) Repeat the step above one or more times, creating '
'different folders for different people.\n'
'3) Place all of the person folders in one root folder.\n'
'4) Click "Load Group" and select the root folder you '
'created above.\n'
'5) Click "Choose Image" to select a different image '
'representing one of the people for whom you created '
'folders above. The face in the image will be framed and '
'tagged with the name of the person.')
self.static_text = wx.StaticText(self.panel, label=label)
self.static_text.Wrap(util.INNER_PANEL_WIDTH)
self.hvsizer.Add(self.static_text, 0, wx.ALL, 0)
self.vhsizer = wx.BoxSizer()
self.lsizer = wx.BoxSizer(wx.VERTICAL)
self.lsizer.SetMinSize((util.MAX_IMAGE_SIZE, -1))
flag = wx.EXPAND | wx.ALIGN_CENTER | wx.ALL
self.btn_folder = wx.Button(self.panel, label='Load Group')
self.lsizer.Add(self.btn_folder, 0, flag, 5)
self.Bind(wx.EVT_BUTTON, self.OnChooseFolder, self.btn_folder)
flag = wx.ALIGN_CENTER | wx.ALL | wx.EXPAND
self.grid = base.CaptionWrapFaceList(self.panel)
self.lsizer.Add(self.grid, 0, flag, 5)
self.vhsizer.Add(self.lsizer, 1, wx.EXPAND)
self.vhsizer.AddSpacer(90)
self.rsizer = wx.BoxSizer(wx.VERTICAL)
self.rsizer.SetMinSize((util.MAX_IMAGE_SIZE, -1))
flag = wx.EXPAND | wx.ALIGN_CENTER | wx.ALL
self.btn_file = wx.Button(self.panel, label='Choose Image')
self.rsizer.Add(self.btn_file, 0, flag, 5)
self.Bind(wx.EVT_BUTTON, self.OnChooseImage, self.btn_file)
flag = wx.ALIGN_CENTER | wx.ALL
self.bitmap = base.MyStaticBitmap(self.panel)
self.rsizer.Add(self.bitmap, 0, flag, 5)
self.vhsizer.Add(self.rsizer, 1, wx.EXPAND)
self.hvsizer.Add(self.vhsizer)
self.hsizer.Add(self.hvsizer)
self.hsizer.AddStretchSpacer()
self.hsizer.Layout()
self.panel.SetSizer(self.hsizer)
self.panel.Layout()
self.panel.SetupScrolling(scroll_x=False)
self.vsizer.Add(self.panel, 3, wx.EXPAND)
self.log = base.MyLog(self)
self.vsizer.Add(self.log, 1, wx.EXPAND)
self.SetSizerAndFit(self.vsizer)
self.btn_file.Disable()
def OnChooseFolder(self, evt):
"""Choose Folder."""
self.log.log((
'Request: Group {0} will be used to build a person database. '
'Checking whether the group exists.').format(
self.large_person_group_id))
try:
util.CF.large_person_group.get(self.large_person_group_id)
self.log.log(
'Response: Group {0} exists.'.format(
self.large_person_group_id))
text = ('Requires a clean up for group "{0}" before setting up a '
'new person database. Click YES to proceed, group "{0}" '
'will be cleared.').format(self.large_person_group_id)
title = 'Warning'
style = wx.YES_NO | wx.ICON_WARNING
result = wx.MessageBox(text, title, style)
if result == wx.YES:
util.CF.large_person_group.delete(self.large_person_group_id)
self.large_person_group_id = str(uuid.uuid1())
else:
return
except util.CF.CognitiveFaceException as exp:
if exp.code != 'LargePersonGroupNotFound':
self.log.log('Response: {}. {}'.format(exp.code, exp.msg))
return
else:
self.log.log(
'Response: Group {0} does not exist previously.'.format(
self.large_person_group_id))
self.log.log(
'Request: Creating group "{0}"'.format(self.large_person_group_id))
util.CF.large_person_group.create(self.large_person_group_id)
self.log.log('Response: Success. Group "{0}" created'.format(
self.large_person_group_id))
self.log.log((
'Preparing faces for identification, detecting faces in chosen '
'folder.'))
dlg = wx.DirDialog(self)
if dlg.ShowModal() != wx.ID_OK:
return
path = dlg.GetPath()
self.person_id_names.clear()
self.person_name_faces.clear()
face_count = 0
for person_name in os.listdir(path):
path_person = os.path.join(path, person_name)
if os.path.isdir(path_person):
self.log.log(
'Request: Creating person "{0}"'.format(person_name))
res = util.CF.large_person_group_person.create(
self.large_person_group_id, person_name)
person_id = res['personId']
self.log.log(
'Response: Success. Person "{0}" (PersonID: {1}) created'.
format(person_name, person_id))
self.person_id_names[person_id] = person_name
self.person_name_faces[person_name] = []
for entry in os.listdir(path_person):
path_face = os.path.join(path_person, entry)
if os.path.isfile(path_face):
res = util.CF.large_person_group_person_face.add(
path_face, self.large_person_group_id, person_id)
if res.get('persistedFaceId'):
face_count += 1
face = model.Face(res, path_face)
self.person_name_faces[person_name].append(face)
self.log.log('Response: Success. Total {0} faces are detected.'.
format(face_count))
self.log.log(
'Request: Training group "{0}"'.format(
self.large_person_group_id))
res = util.CF.large_person_group.train(self.large_person_group_id)
self.grid.set_data(self.person_name_faces)
self.panel.SetupScrolling(scroll_x=False)
self.btn_file.Enable()
def OnChooseImage(self, evt):
"""Choose Image."""
util.CF.util.wait_for_large_person_group_training(
self.large_person_group_id)
self.log.log(
'Response: Success. Group "{0}" training process is Succeeded'.
format(self.large_person_group_id))
dlg = wx.FileDialog(self, wildcard=util.IMAGE_WILDCARD)
if dlg.ShowModal() != wx.ID_OK:
return
path = dlg.GetPath()
self.bitmap.set_path(path)
self.log.log('Detecting faces in {}'.format(path))
self.faces.clear()
del self.face_ids[:]
res = util.CF.face.detect(path)
for entry in res:
face = model.Face(entry, path)
self.faces[face.id] = face
self.face_ids.append(face.id)
self.log.log('Request: Identifying {0} face(s) in group "{1}"'.format(
len(self.faces), self.large_person_group_id))
res = util.CF.face.identify(
self.face_ids,
large_person_group_id=self.large_person_group_id)
for entry in res:
face_id = entry['faceId']
if entry['candidates']:
person_id = entry['candidates'][0]['personId']
self.faces[face_id].set_name(self.person_id_names[person_id])
else:
self.faces[face_id].set_name('Unknown')
util.draw_bitmap_rectangle(self.bitmap, self.faces.values())
log_text = 'Response: Success.'
for face_id in self.faces:
log_text += ' Face {0} is identified as {1}.'.format(
face_id, self.faces[face_id].name)
self.log.log(log_text)
|
Cognitive-Face-Python/sample/view/panel_identification.py/0
|
{
"file_path": "Cognitive-Face-Python/sample/view/panel_identification.py",
"repo_id": "Cognitive-Face-Python",
"token_count": 4424
}
| 223 |
# Copyright (c) Microsoft. All rights reserved.
import logging
from time import gmtime, strftime
import sys
def create_logger(name, silent=False, to_disk=False, log_file=None):
"""Logger wrapper"""
# setup logger
log = logging.getLogger(name)
log.setLevel(logging.DEBUG)
log.propagate = False
formatter = logging.Formatter(
fmt="%(asctime)s %(message)s", datefmt="%m/%d/%Y %I:%M:%S"
)
if not silent:
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
log.addHandler(ch)
if to_disk:
log_file = (
log_file
if log_file is not None
else strftime("%Y-%m-%d-%H-%M-%S.log", gmtime())
)
fh = logging.FileHandler(log_file)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
log.addHandler(fh)
return log
|
ContextualSP/adaptershare/data_utils/log_wrapper.py/0
|
{
"file_path": "ContextualSP/adaptershare/data_utils/log_wrapper.py",
"repo_id": "ContextualSP",
"token_count": 431
}
| 224 |
#!/usr/bin/env bash
###############################
# Training script for domain adaptation
# By Xiaodong
###############################
set -e
if [[ $# -lt 5 ]]; then
echo "run_domain_adaptation.sh <data_dir> <init_checkpoint> <train> <test> <batch-size>"
exit 1
fi
data_dir=$1
ICKPT=$2
TRAIN=$3
TEST=$4
batch_size=${5:-"16"}
export ROOT_DIR="domain_app"
export EPOCH=3
export LR="5e-5"
export OPTIM="adamax"
export TASK_DEF="experiments/domain_adaptation/domain_adaptation_def.yml"
export BS=${batch_size}
export ED="1"
echo ${TASK_DEF}
task=$(echo ${TRAIN} | sed -e 's/_train.json//' )
echo $task
output_dir="${ROOT_DIR}/${task}"
echo $output_dir
mkdir -p ${output_dir}
if [[ -f "${output_dir}/model*.pt" ]]; then
rm "${output_dir}/model*.pt"
rm "${output_dir}/config.json"
fi
LOG_FILE="${output_dir}/domain-adaptation-train.log"
python train.py --data_dir=${data_dir}/${DD} --task_def=${TASK_DEF} --train_dataset=${TRAIN} --test_dataset=${TEST} --init_checkpoint=${ICKPT} --batch_size=${BS} --learning_rate=${LR} --epochs=${EPOCH} --encoder_type=${ED} --optimizer=${OPTIM} --output_dir=${output_dir} --log_file=${LOG_FILE}
|
ContextualSP/adaptershare/experiments/domain_adaptation/run_domain_adaptation.sh/0
|
{
"file_path": "ContextualSP/adaptershare/experiments/domain_adaptation/run_domain_adaptation.sh",
"repo_id": "ContextualSP",
"token_count": 472
}
| 225 |
import os
import argparse
from sys import path
path.append(os.getcwd())
from data_utils.task_def import DataFormat
from data_utils.log_wrapper import create_logger
from experiments.ner.ner_utils import load_conll_chunk, load_conll_ner, load_conll_pos
from experiments.common_utils import dump_rows
logger = create_logger(
__name__, to_disk=True, log_file="bert_ner_data_proc_512_cased.log"
)
def parse_args():
parser = argparse.ArgumentParser(description="Preprocessing English NER dataset.")
parser.add_argument("--data_dir", type=str, required=True)
parser.add_argument("--seed", type=int, default=13)
parser.add_argument("--output_dir", type=str, required=True)
args = parser.parse_args()
return args
def main(args):
data_dir = args.data_dir
data_dir = os.path.abspath(data_dir)
if not os.path.isdir(data_dir):
os.mkdir(data_dir)
train_path = os.path.join(data_dir, "train.txt")
dev_path = os.path.join(data_dir, "valid.txt")
test_path = os.path.join(data_dir, "test.txt")
train_data = load_conll_ner(train_path)
dev_data = load_conll_ner(dev_path)
test_data = load_conll_ner(test_path)
logger.info("Loaded {} NER train samples".format(len(train_data)))
logger.info("Loaded {} NER dev samples".format(len(dev_data)))
logger.info("Loaded {} NER test samples".format(len(test_data)))
pos_train_data = load_conll_pos(train_path)
pos_dev_data = load_conll_pos(dev_path)
pos_test_data = load_conll_pos(test_path)
logger.info("Loaded {} POS train samples".format(len(pos_train_data)))
logger.info("Loaded {} POS dev samples".format(len(pos_train_data)))
logger.info("Loaded {} POS test samples".format(len(pos_train_data)))
chunk_train_data = load_conll_chunk(train_path)
chunk_dev_data = load_conll_chunk(dev_path)
chunk_test_data = load_conll_chunk(test_path)
logger.info("Loaded {} POS train samples".format(len(chunk_train_data)))
logger.info("Loaded {} POS dev samples".format(len(chunk_dev_data)))
logger.info("Loaded {} POS test samples".format(len(chunk_test_data)))
bert_root = args.output_dir
if not os.path.isdir(bert_root):
os.mkdir(bert_root)
train_fout = os.path.join(bert_root, "ner_train.tsv")
dev_fout = os.path.join(bert_root, "ner_dev.tsv")
test_fout = os.path.join(bert_root, "ner_test.tsv")
dump_rows(train_data, train_fout, DataFormat.Seqence)
dump_rows(dev_data, dev_fout, DataFormat.Seqence)
dump_rows(test_data, test_fout, DataFormat.Seqence)
logger.info("done with NER")
train_fout = os.path.join(bert_root, "pos_train.tsv")
dev_fout = os.path.join(bert_root, "pos_dev.tsv")
test_fout = os.path.join(bert_root, "pos_test.tsv")
dump_rows(pos_train_data, train_fout, DataFormat.Seqence)
dump_rows(pos_dev_data, dev_fout, DataFormat.Seqence)
dump_rows(pos_test_data, test_fout, DataFormat.Seqence)
logger.info("done with POS")
train_fout = os.path.join(bert_root, "chunk_train.tsv")
dev_fout = os.path.join(bert_root, "chunk_dev.tsv")
test_fout = os.path.join(bert_root, "chunk_test.tsv")
dump_rows(chunk_train_data, train_fout, DataFormat.Seqence)
dump_rows(chunk_dev_data, dev_fout, DataFormat.Seqence)
dump_rows(chunk_test_data, test_fout, DataFormat.Seqence)
logger.info("done with chunk")
if __name__ == "__main__":
args = parse_args()
main(args)
|
ContextualSP/adaptershare/experiments/ner/prepro.py/0
|
{
"file_path": "ContextualSP/adaptershare/experiments/ner/prepro.py",
"repo_id": "ContextualSP",
"token_count": 1397
}
| 226 |
# coding=utf-8
# Copyright (c) Microsoft. All rights reserved.
import torch
import random
import torch.nn as nn
from torch.nn.utils import weight_norm
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from module.dropout_wrapper import DropoutWrapper
from module.similarity import FlatSimilarityWrapper, SelfAttnWrapper
from module.my_optim import weight_norm as WN
SMALL_POS_NUM = 1.0e-30
def generate_mask(new_data, dropout_p=0.0, is_training=False):
if not is_training:
dropout_p = 0.0
new_data = (1 - dropout_p) * (new_data.zero_() + 1)
for i in range(new_data.size(0)):
one = random.randint(0, new_data.size(1) - 1)
new_data[i][one] = 1
mask = 1.0 / (1 - dropout_p) * torch.bernoulli(new_data)
mask.requires_grad = False
return mask
class Classifier(nn.Module):
def __init__(self, x_size, y_size, opt, prefix="decoder", dropout=None):
super(Classifier, self).__init__()
self.opt = opt
if dropout is None:
self.dropout = DropoutWrapper(opt.get("{}_dropout_p".format(prefix), 0))
else:
self.dropout = dropout
self.merge_opt = opt.get("{}_merge_opt".format(prefix), 0)
self.weight_norm_on = opt.get("{}_weight_norm_on".format(prefix), False)
if self.merge_opt == 1:
self.proj = nn.Linear(x_size * 4, y_size)
else:
self.proj = nn.Linear(x_size * 2, y_size)
if self.weight_norm_on:
self.proj = weight_norm(self.proj)
def forward(self, x1, x2, mask=None):
if self.merge_opt == 1:
x = torch.cat([x1, x2, (x1 - x2).abs(), x1 * x2], 1)
else:
x = torch.cat([x1, x2], 1)
x = self.dropout(x)
scores = self.proj(x)
return scores
class SANClassifier(nn.Module):
"""Implementation of Stochastic Answer Networks for Natural Language Inference, Xiaodong Liu, Kevin Duh and Jianfeng Gao
https://arxiv.org/abs/1804.07888
"""
def __init__(
self, x_size, h_size, label_size, opt={}, prefix="decoder", dropout=None
):
super(SANClassifier, self).__init__()
if dropout is None:
self.dropout = DropoutWrapper(
opt.get("{}_dropout_p".format(self.prefix), 0)
)
else:
self.dropout = dropout
self.prefix = prefix
self.query_wsum = SelfAttnWrapper(
x_size, prefix="mem_cum", opt=opt, dropout=self.dropout
)
self.attn = FlatSimilarityWrapper(x_size, h_size, prefix, opt, self.dropout)
self.rnn_type = "{}{}".format(
opt.get("{}_rnn_type".format(prefix), "gru").upper(), "Cell"
)
self.rnn = getattr(nn, self.rnn_type)(x_size, h_size)
self.num_turn = opt.get("{}_num_turn".format(prefix), 5)
self.opt = opt
self.mem_random_drop = opt.get("{}_mem_drop_p".format(prefix), 0)
self.mem_type = opt.get("{}_mem_type".format(prefix), 0)
self.weight_norm_on = opt.get("{}_weight_norm_on".format(prefix), False)
self.label_size = label_size
self.dump_state = opt.get("dump_state_on", False)
self.alpha = Parameter(torch.zeros(1, 1), requires_grad=False)
if self.weight_norm_on:
self.rnn = WN(self.rnn)
self.classifier = Classifier(
x_size, self.label_size, opt, prefix=prefix, dropout=self.dropout
)
def forward(self, x, h0, x_mask=None, h_mask=None):
h0 = self.query_wsum(h0, h_mask)
if type(self.rnn) is nn.LSTMCell:
c0 = h0.new(h0.size()).zero_()
scores_list = []
for turn in range(self.num_turn):
att_scores = self.attn(x, h0, x_mask)
x_sum = torch.bmm(F.softmax(att_scores, 1).unsqueeze(1), x).squeeze(1)
scores = self.classifier(x_sum, h0)
scores_list.append(scores)
# next turn
if self.rnn is not None:
h0 = self.dropout(h0)
if type(self.rnn) is nn.LSTMCell:
h0, c0 = self.rnn(x_sum, (h0, c0))
else:
h0 = self.rnn(x_sum, h0)
if self.mem_type == 1:
mask = generate_mask(
self.alpha.data.new(x.size(0), self.num_turn),
self.mem_random_drop,
self.training,
)
mask = [m.contiguous() for m in torch.unbind(mask, 1)]
tmp_scores_list = [
mask[idx].view(x.size(0), 1).expand_as(inp) * F.softmax(inp, 1)
for idx, inp in enumerate(scores_list)
]
scores = torch.stack(tmp_scores_list, 2)
scores = torch.mean(scores, 2)
scores = torch.log(scores)
else:
scores = scores_list[-1]
if self.dump_state:
return scores, scores_list
else:
return scores
class MaskLmHeader(nn.Module):
"""Mask LM"""
def __init__(self, embedding_weights=None, bias=False):
super(MaskLmHeader, self).__init__()
self.decoder = nn.Linear(
embedding_weights.size(1), embedding_weights.size(0), bias=bias
)
self.decoder.weight = embedding_weights
self.nsp = nn.Linear(embedding_weights.size(1), 2)
def forward(self, hidden_states):
mlm_out = self.decoder(hidden_states)
nsp_out = self.nsp(hidden_states[:, 0, :])
return mlm_out, nsp_out
|
ContextualSP/adaptershare/module/san.py/0
|
{
"file_path": "ContextualSP/adaptershare/module/san.py",
"repo_id": "ContextualSP",
"token_count": 2756
}
| 227 |
from transformers import BertConfig, BertModel, BertTokenizer
from module.san_model import SanModel
MODEL_CLASSES = {
"bert": (BertConfig, BertModel, BertTokenizer),
# "xlnet": (XLNetConfig, XLNetModel, XLNetTokenizer),
# "roberta": (RobertaConfig, RobertaModel, RobertaTokenizer),
# "albert": (AlbertConfig, AlbertModel, AlbertTokenizer),
# "xlm": (XLMRobertaConfig, XLMRobertaModel, XLMRobertaTokenizer),
# "san": (BertConfig, SanModel, BertTokenizer),
# "electra": (ElectraConfig, ElectraModel, ElectraTokenizer),
# "t5": (T5Config, T5EncoderModel, T5Tokenizer),
# "deberta": (DebertaConfig, DebertaModel, DebertaTokenizer),
# "t5g": (T5Config, T5ForConditionalGeneration, T5Tokenizer),
}
|
ContextualSP/adaptershare/pretrained_models.py/0
|
{
"file_path": "ContextualSP/adaptershare/pretrained_models.py",
"repo_id": "ContextualSP",
"token_count": 275
}
| 228 |
import os
import re
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import BertTokenizer, AdamW, get_linear_schedule_with_warmup
from models import *
from utils import *
from datetime import datetime
import logging
from dataclasses import dataclass, field
def get_logger(log_dir: str, version: str):
os.makedirs(log_dir, exist_ok=True)
logging_file = os.path.join(log_dir, '{}.log'.format(version))
logging.basicConfig(level=logging.INFO,
format='%(asctime)s\t%(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M:%S',
filename=logging_file,
filemode='w')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(asctime)s\t%(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
logger = logging.getLogger("")
logger.addHandler(console)
return logger
def set_seed(seed = 123):
import random
import numpy as np
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
@dataclass
class DistillingArgs:
learning_rate: float = field(default=3e-5, metadata={"help": "The initial learning rate for Adam."})
weight_decay: float = field(default=0.0, metadata={"help": "Weight decay if we apply some."})
adam_epsilon: float = field(default=1e-8, metadata={"help": "Epsilon for Adam optimizer."})
max_grad_norm: float = field(default=1.0, metadata={"help": "Max gradient norm."})
dropout: float = field(default=0.3)
train_batch_size: int = field(default=16, metadata={"help": "Training batch size"})
eval_batch_size: int = field(default=32, metadata={"help": "Evaluation batch size"})
num_train_epochs: int = field(default=10, metadata={"help": "Training epochs"})
max_encode_length: int = field(default=512)
alw_func: str = field(default='0.0')
logging_steps : int = field(default=100)
evaluate_steps: int = field(default=1000)
accumulation_steps: int = field(default=1)
checkpoint: str = field(default=None) # pretrained checkpoint
with_alignments: bool = field(default=False)
bert_version: str = field(default=None)
model: str = field(default=None)
data_dir: str = field(default="data/slsql", metadata={"help": "input data dir"})
out_dir: str = field(default='out', metadata={'help': 'output data dir'})
sampling: bool = field(default=False)
device: str = field(default='cpu')
seed: int = field(default=123)
class SelfLearningDistiller:
"""
Teacher-Student Self-Learning Distiller
"""
args: DistillingArgs
model: nn.Module
logger: logging.Logger
device: torch.device
version: str
def __init__(self, args: DistillingArgs) -> None:
set_seed(args.seed)
self.args = args
self.version = "self-learning_{}".format(datetime.now().strftime("%Y%m%d%H%M"))
if self.args.sampling:
self.version = "sampling_" + self.version
self.args.evaluate_steps = 100
self.device = torch.device(self.args.device)
self.logger = get_logger(os.path.join(self.args.out_dir, self.version), 'self_learning')
self.model = self.load_model_from_ckpt()
open(os.path.join(self.args.out_dir, self.version, "config.json"), 'w', encoding='utf-8').write(json.dumps(self.args.__dict__, indent=4, sort_keys=True) + '\n')
self.logger.info("save training config over.")
def load_model_from_ckpt(self):
ckpt_path = os.path.join(self.args.data_dir, 'checkpoints', self.args.checkpoint)
ckpt_dir = os.path.dirname(ckpt_path)
config = json.load(open(os.path.join(ckpt_dir, 'config.json'), 'r', encoding='utf-8'))
config['dropout'] = self.args.dropout
assert self.args.bert_version is None or self.args.bert_version == config['bert_version']
assert self.args.model is None or self.args.model == config['model']
self.args.bert_version = config['bert_version']
self.args.model = config['model']
model = load_model_from_checkpoint(config['model'], self.device, checkpoint=ckpt_path, **{'bert_version': config['bert_version'], 'dropout': 0.0 })
self.logger.info("Load checkpoint from {} over.".format(self.args.checkpoint, self.args.bert_version))
for key, val in config.items():
self.logger.info("{} = {}".format(key, val))
return model
def get_train_and_eval_iter(self):
bert_version = self.args.bert_version.replace("hfl/", "")
train_paths = [os.path.join(self.args.data_dir, f"train.{bert_version}.json")]
dev_paths = [os.path.join(self.args.data_dir, f"dev.{bert_version}.json")]
tokenizer = BertTokenizer.from_pretrained(self.args.bert_version)
self.logger.info("load BERT tokenizer from {} over.".format(self.args.bert_version))
data_loader_func = get_data_iterator_func(self.args.model)
if self.args.sampling:
train_iter = data_loader_func(train_paths, tokenizer, self.args.train_batch_size, self.device, False, True, self.args.max_encode_length, sampling_size=self.args.train_batch_size * 100)
dev_iter = data_loader_func(dev_paths, tokenizer, self.args.eval_batch_size, self.device, False, False, self.args.max_encode_length, sampling_size=self.args.train_batch_size * 20)
else:
train_iter = data_loader_func(train_paths, tokenizer, self.args.train_batch_size, self.device, False, True, self.args.max_encode_length)
dev_iter = data_loader_func(dev_paths, tokenizer, self.args.eval_batch_size, self.device, False, False, self.args.max_encode_length)
self.logger.info("load train iterator over, size = {}".format(len(train_iter.batch_sampler)))
self.logger.info("load dev iterator over, size = {}".format(len(dev_iter.batch_sampler)))
return train_iter, dev_iter
def evaluate(self, model: nn.Module, dev_iter: DataLoader, saved_file=None):
model.eval()
evaluator = get_evaluator_class(self.args.model)()
with torch.no_grad():
for batch_inputs in dev_iter:
batch_outputs = model.compute_loss(**batch_inputs)
evaluator.add_batch(batch_inputs, batch_outputs)
saved_path = os.path.join(self.args.out_dir, self.version, saved_file) if saved_file is not None else None
eval_result = evaluator.get_metrics(saved_path)
model.train()
self.logger.info("Evaluate over:\n{}".format("\n".join([f"{k} = {v:.4f}" if isinstance(v, float) else f"{k} {v}" for k, v in eval_result.items()])))
return eval_result
@staticmethod
def get_masking_inference_func(masking_inputs: Dict, model: nn.Module, infer_size: int):
infer_outputs = defaultdict(list)
input_token_ids, input_token_types, meta_index = masking_inputs['input_token_ids'], masking_inputs['input_token_types'], masking_inputs['meta_index']
model.eval()
index = 0
with torch.no_grad():
while index < len(input_token_ids):
model_inputs = {
'input_token_ids': input_token_ids[index:index+infer_size],
'input_token_types': input_token_types[index:index+infer_size],
'meta_index': meta_index[index:index+infer_size]
}
model_outputs = model.forward(**model_inputs)
for token_type in [SQLTokenType.table, SQLTokenType.column, SQLTokenType.value]:
if f'{str(token_type)}_logits' in model_outputs:
infer_outputs[token_type.abbr] += model_outputs[f'{str(token_type)}_logits']
index += infer_size
for key, val in infer_outputs.items():
infer_outputs[key] = torch.stack(val, dim=0)
return infer_outputs
@staticmethod
def get_alignment_weights_from_teacher(inputs: Dict, teacher: nn.Module):
teacher.eval()
with torch.no_grad():
outputs = teacher.forward(**inputs)
assert 'alignment_weights' in outputs
alignment_weights: List[torch.Tensor] = outputs['alignment_weights']
for i in range(len(alignment_weights)):
labels = torch.cat([inputs['table_labels'][i], inputs['column_labels'][i], inputs['value_labels'][i]], dim=0)
assert alignment_weights[i].size(0) == len(labels)
alignment_weights[i].masked_fill_((labels == 0)[:, None], 0.0)
return alignment_weights
@staticmethod
def soft_cross_entropy_with_logits(predict_logits: torch.Tensor, target_logits: torch.Tensor) -> torch.Tensor:
return (- target_logits * predict_logits.log()).mean()
def _compute_distill_loss(self, s_outputs, t_alignments: torch.Tensor):
align_ce_loss = 0
for i in range(len(t_alignments)):
align_ce_loss += self.soft_cross_entropy_with_logits(
s_outputs['alignment_weights'][i],
t_alignments[i]
)
align_ce_loss /= len(t_alignments)
s_outputs['identify_loss'] = s_outputs['loss']
s_outputs['align_loss'] = align_ce_loss
s_outputs['loss'] = s_outputs['identify_loss'] + s_outputs['align_loss'] * float(self.args.alw_func)
return s_outputs
def distill(self):
"""
use pre-trained student model as teacher to train a new student model
"""
train_iter, dev_iter = self.get_train_and_eval_iter()
teacher = self.model
self.logger.info("Evaluating Teacher ...")
self.evaluate(teacher, dev_iter, 'teacher.eval.txt')
teacher.eval()
model_args = { 'bert_version': self.args.bert_version, 'dropout': self.args.dropout }
student = load_model_from_checkpoint(model=self.args.model, device=self.args.device, **model_args)
self.logger.info("Initialize new model as student over.")
num_train_steps = self.args.num_train_epochs * int(len(train_iter))
self.logger.info("num_train_steps = {}".format(num_train_steps))
optimizer = AdamW(student.parameters(), lr=self.args.learning_rate, eps=self.args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(0.05 * num_train_steps), num_training_steps=num_train_steps)
student.train()
grad_accumulation_count = 0
global_step = 0
best_ckpt_path, best_eval_score = None, -100
for epoch in range(self.args.num_train_epochs):
logging_loss = defaultdict(float)
for batch_inputs in train_iter:
global_step += 1
grad_accumulation_count += 1
if not self.args.with_alignments:
batch_inputs['align_loss_weight'] = float(self.args.alw_func)
batch_inputs['masking_infer_func'] = lambda x: self.get_masking_inference_func(x, teacher, self.args.train_batch_size)
outputs = student.compute_loss(**batch_inputs)
else:
outputs = student.compute_loss(**batch_inputs)
teacher_alignment_weights = self.get_alignment_weights_from_teacher(batch_inputs, teacher)
outputs = self._compute_distill_loss(outputs, teacher_alignment_weights)
loss = outputs['loss'] / self.args.accumulation_steps
loss.backward()
if grad_accumulation_count % self.args.accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(student.parameters(), self.args.max_grad_norm)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
logging_loss['total_loss'] += loss.item() * self.args.accumulation_steps
logging_loss['align_loss'] += outputs['align_loss'].item()
if global_step % self.args.logging_steps == 0:
loss_string = "total loss: {:.4f}; align loss: {:.4f} ({:.4f})".format(
logging_loss['total_loss'] / self.args.logging_steps,
logging_loss['align_loss'] / self.args.logging_steps,
float(self.args.alw_func))
self.logger.info("Epoch: {}, Step: {}/{}, {}".format(epoch + 1, global_step, len(train_iter) * (epoch + 1), loss_string))
logging_loss = defaultdict(float)
if global_step % self.args.evaluate_steps == 0:
self.logger.info("Evaluating student step {} ...".format(global_step))
eval_metrics = self.evaluate(student, dev_iter, saved_file='student.eval.step_{}.txt'.format(global_step))
eval_score = (eval_metrics['overall accuracy'] + eval_metrics['average F1']) / 2
saved_path = os.path.join(self.args.out_dir, self.version, "student.step_{}.acc_{:.3f}.f1_{:.3f}.pt".format(
global_step,
eval_metrics['overall accuracy'],
eval_metrics['average F1']))
torch.save(student.state_dict(), saved_path)
self.logger.info("Save checkpoint to {}".format(saved_path))
if eval_score > best_eval_score:
best_eval_score = eval_score
best_ckpt_path = saved_path
self.logger.info("Best Student Model Path: {}".format(best_ckpt_path))
self.logger.info("***** Running teacher-student self-training over *****")
def parse_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-lr', '--learning_rate', help='learning rate', type=float, default=5e-5)
parser.add_argument('-train_bs', '--train_batch_size', help='train batch size', type=int, default=10)
parser.add_argument('-eval_bs', '--eval_batch_size', help='eval batch size', type=int, default=10)
parser.add_argument('-max_enc_length', '--max_encode_length', help='sequence max encode length', type=int, default=512)
parser.add_argument('-num_epochs', '--num_train_epochs', default=30, type=int)
parser.add_argument('-sampling', '--sampling', action='store_true')
parser.add_argument('-ckpt', '--checkpoint', default=None)
parser.add_argument('-with_align', '--with_alignments', action='store_true')
parser.add_argument('-alw', '--alw_func', default='0.1')
parser.add_argument('-data', '--data_dir', default=os.getenv("PT_DATA_DIR", default='data/slsql'))
parser.add_argument('-out_dir', '--out_dir', default=os.getenv("PT_OUTPUT_DIR",default='pt'))
parser.add_argument('-acc_steps', '--accumulation_steps', type=int, default=1)
parser.add_argument('-dropout', '--dropout', type=float, default=0.3)
parser.add_argument('-gpu', '--device', default='cuda:0' if torch.cuda.is_available() else 'cpu')
args = parser.parse_args()
distill_args = DistillingArgs(**dict(args._get_kwargs()))
return distill_args
if __name__ == '__main__':
args = parse_args()
distiller = SelfLearningDistiller(args)
distiller.distill()
|
ContextualSP/awakening_latent_grounding/distill.py/0
|
{
"file_path": "ContextualSP/awakening_latent_grounding/distill.py",
"repo_id": "ContextualSP",
"token_count": 6781
}
| 229 |
#!/usr/bin/env bash
wget https://ai.tencent.com/ailab/nlp/en/dialogue/datasets/Restoration-200K.zip
unzip -j Restoration-200K.zip
rm -rf Restoration-200K.zip
python ../../preprocess.py --dataset Multi
|
ContextualSP/incomplete_utterance_rewriting/dataset/Multi/download.sh/0
|
{
"file_path": "ContextualSP/incomplete_utterance_rewriting/dataset/Multi/download.sh",
"repo_id": "ContextualSP",
"token_count": 77
}
| 230 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# Author: Qian Liu (SivilTaram)
# Original Repo: https://github.com/microsoft/ContextualSP
from typing import Dict
from typing import List
import numpy as np
import torch
import torch.nn as nn
from allennlp.data import Vocabulary
from allennlp.models import Model
from allennlp.modules import Seq2SeqEncoder
from allennlp.modules import TextFieldEmbedder
from allennlp.modules.input_variational_dropout import InputVariationalDropout
from allennlp.modules.matrix_attention import DotProductMatrixAttention, CosineMatrixAttention, BilinearMatrixAttention, \
LinearMatrixAttention
from allennlp.nn import util
from torch.nn import ModuleDict
from torch.nn.utils.rnn import pad_sequence
from attn_unet import AttentionUNet
from data_utils import Scorer, BatchAverage, FScoreMetric, get_class_mapping, transmit_seq, CorpusBLEUMetric
from similar_functions import ElementWiseMatrixAttention
def count_parameters(model):
parameter_count = ["Name: {}\t\tCount: {}".format(name, p.numel()) for name, p in model.named_parameters()
if p.requires_grad]
return "\n".join(parameter_count)
@Model.register('rewrite')
class UnifiedFollowUp(Model):
def __init__(self, vocab: Vocabulary,
text_encoder: Seq2SeqEncoder,
word_embedder: TextFieldEmbedder,
enable_training_log: bool = False,
inp_drop_rate: float = 0.2,
out_drop_rate: float = 0.2,
loss_weights: List = (0.2, 0.4, 0.4),
super_mode: str = 'before',
backbone: str = 'unet',
unet_down_channel: int = 256,
feature_sel: int = 127):
super(UnifiedFollowUp, self).__init__(vocab)
self.text_encoder = text_encoder
self.word_embedder = word_embedder
"""
Define model arch choices
"""
self.backbone = backbone
# input dropout
if inp_drop_rate > 0:
self.var_inp_dropout = InputVariationalDropout(p=inp_drop_rate)
else:
self.var_inp_dropout = lambda x: x
# output dropout
if out_drop_rate > 0:
self.var_out_dropout = InputVariationalDropout(p=out_drop_rate)
else:
self.var_out_dropout = lambda x: x
self.hidden_size = text_encoder.get_output_dim() // 2 if text_encoder.is_bidirectional() \
else text_encoder.get_output_dim()
self.output_size = text_encoder.get_output_dim()
# ele -> element wise multiply
# dot -> dot product
# cos -> cosine similarity
# emb_dot -> embedding dot product
# emb_cos -> embedding cosine similarity
# linear -> linear similarity
# bilinear -> bilinear similarity
feature_sel = feature_sel
sel_arr = "{0:07b}".format(int(feature_sel))
nni_choices = ['ele', 'dot', 'cos', 'emb_dot', 'emb_cos', 'linear', 'bilinear']
self.segment_choices = [nni_choices[i] for i in range(7) if sel_arr[i] == '1']
# if expand bi-direction, we will regard forward/backward as two channels
self.expand_bidir = False
self.similar_function = ModuleDict({
'ele': ElementWiseMatrixAttention(),
'dot': DotProductMatrixAttention(),
'cos': CosineMatrixAttention(),
'emb_dot': DotProductMatrixAttention(),
'emb_cos': CosineMatrixAttention(),
'bilinear': BilinearMatrixAttention(matrix_1_dim=self.output_size, matrix_2_dim=self.output_size),
'linear': LinearMatrixAttention(tensor_1_dim=self.output_size, tensor_2_dim=self.output_size)
})
self.attn_channel = 0
for choice in self.segment_choices:
if choice == 'ele':
self.attn_channel += self.output_size
elif choice in ['dot', 'cos', 'emb_dot', 'emb_cos', 'bilinear', 'linear']:
if self.expand_bidir:
self.attn_channel += 2
else:
self.attn_channel += 1
self.class_mapping: Dict[str, int] = get_class_mapping(super_mode=super_mode)
# Here we have two choices now, one is MLP, and another is UNet
if self.backbone == 'unet':
self.segmentation_net = AttentionUNet(input_channels=self.attn_channel,
class_number=len(self.class_mapping.keys()),
down_channel=unet_down_channel)
else:
raise Exception("Currently we do not support for other arches.")
class_zero_weight = loss_weights[0]
class_one_weight = loss_weights[1]
self.register_buffer('weight_tensor', torch.tensor([class_zero_weight, class_one_weight,
1 - class_zero_weight - class_one_weight]))
self.loss = nn.CrossEntropyLoss(ignore_index=-1,
weight=self.weight_tensor)
# initialize metrics measurement
self.metrics = {'ROUGE': BatchAverage(),
'_ROUGE1': BatchAverage(),
'_ROUGE2': BatchAverage(),
# TODO: You can speed up the code by disable BLEU since
# the corpus-based BLEU metric is much time-consuming.
'BLEU': CorpusBLEUMetric(),
'EM': BatchAverage(),
'F1': FScoreMetric(prefix="1"),
'F2': FScoreMetric(prefix="2"),
'F3': FScoreMetric(prefix="3")}
parameter_num = count_parameters(self)
print(parameter_num)
self.min_width = 8
self.min_height = 8
self.enable_training_log = enable_training_log
def forward(self, matrix_map: torch.Tensor,
context_str: List[str],
cur_str: List[str],
restate_str: List[str],
context_tokens: Dict[str, torch.Tensor] = None,
cur_tokens: Dict[str, torch.Tensor] = None,
joint_tokens: Dict[str, torch.Tensor] = None,
joint_border: torch.Tensor = None) -> Dict[str, torch.Tensor]:
attn_features = []
# no joint encoding
if context_tokens is not None:
if 'bert-type-ids' in context_tokens:
# fmod to avoid out of index
context_tokens['bert-type-ids'] = torch.fmod(context_tokens['bert-type-ids'], 2)
context_embedding = self.word_embedder(context_tokens)
cur_embedding = self.word_embedder(cur_tokens)
batch_size, context_len, _ = context_embedding.size()
joint_embedding = torch.cat([context_embedding, cur_embedding], dim=1)
# add variational dropout
joint_embedding = self.var_inp_dropout(joint_embedding)
context_embedding, cur_embedding = joint_embedding[:, :context_len, :], joint_embedding[:, context_len:, :]
# get context-sensitive representations
context_mask = util.get_text_field_mask(context_tokens)
context_repr = self.text_encoder(context_embedding, context_mask)
# get current representation
cur_mask = util.get_text_field_mask(cur_tokens)
cur_repr = self.text_encoder(cur_embedding, cur_mask)
context_repr = self.var_out_dropout(context_repr)
cur_repr = self.var_out_dropout(cur_repr)
else:
if 'bert-type-ids' in joint_tokens:
# fmod to avoid out of index
joint_tokens['bert-type-ids'] = torch.fmod(joint_tokens['bert-type-ids'], 2)
joint_embedding = self.word_embedder(joint_tokens)
joint_embedding = self.var_inp_dropout(joint_embedding)
joint_mask = util.get_text_field_mask(joint_tokens)
joint_repr = self.text_encoder(joint_embedding, joint_mask)
joint_repr = self.var_out_dropout(joint_repr)
# split repr into context_repr and cur_repr
batch_size, _ = joint_border.shape
joint_border = joint_border.view(batch_size)
context_reprs = []
context_embeddings = []
cur_reprs = []
cur_embeddings = []
for i in range(batch_size):
context_embeddings.append(joint_embedding[i, :joint_border[i]])
context_reprs.append(joint_repr[i, :joint_border[i]])
cur_embeddings.append(joint_embedding[i, joint_border[i]:])
cur_reprs.append(joint_repr[i, joint_border[i]:])
context_repr = pad_sequence(context_reprs, batch_first=True)
cur_repr = pad_sequence(cur_reprs, batch_first=True)
context_embedding = pad_sequence(context_embeddings, batch_first=True)
cur_embedding = pad_sequence(cur_embeddings, batch_first=True)
# padding feature map matrix to satisfy the minimum height/width of UNet model
if context_repr.shape[1] < self.min_height:
_, cur_height, hidden_size = context_repr.shape
out_tensor = context_repr.data.new(batch_size, self.min_height, hidden_size).fill_(0)
out_tensor[:, :cur_height, :] = context_repr
context_repr = out_tensor
if cur_repr.shape[1] < self.min_width:
_, cur_width, hidden_size = cur_repr.shape
out_tensor = cur_repr.data.new(batch_size, self.min_width, hidden_size).fill_(0)
out_tensor[:, :cur_width, :] = cur_repr
cur_repr = out_tensor
context_forward, context_backward = context_repr[:, :, :self.hidden_size], context_repr[:, :, self.hidden_size:]
cur_forward, cur_backward = cur_repr[:, :, :self.hidden_size], cur_repr[:, :, self.hidden_size:]
for choice in self.segment_choices:
if choice == 'ele':
attn_features.append(self.similar_function[choice](context_repr,
cur_repr))
elif 'emb' in choice:
attn_features.append(self.similar_function[choice](context_embedding,
cur_embedding).unsqueeze(dim=1))
else:
if self.expand_bidir:
attn_features.append(self.similar_function[choice](context_forward,
cur_forward).unsqueeze(dim=1))
attn_features.append(self.similar_function[choice](context_backward,
cur_backward).unsqueeze(dim=1))
else:
attn_features.append(self.similar_function[choice](context_repr,
cur_repr).unsqueeze(dim=1))
attn_input = torch.cat(attn_features, dim=1)
# here we assume the attn_input as batch_size x channel x width x height
attn_map = self.segmentation_net(attn_input)
# attn_map: batch_size x width x height x class
batch_size, width, height, class_size = attn_map.size()
# if the current length and height is not equal to matrix-map
if width != matrix_map.shape[1] or height != matrix_map.shape[2]:
out_tensor = matrix_map.data.new(batch_size, width, height).fill_(-1)
out_tensor[:, :matrix_map.shape[1], :matrix_map.shape[2]] = matrix_map
matrix_map = out_tensor
attn_mask = (matrix_map != -1).long()
attn_map_flatten = attn_map.view(batch_size * width * height, class_size)
matrix_map_flatten = matrix_map.view(batch_size * width * height).long()
# cross entropy loss
loss_val = self.loss(attn_map_flatten, matrix_map_flatten)
outputs = {'loss': loss_val}
if (self.training and self.enable_training_log) or (not self.training):
attn_map_numpy = attn_map.data.cpu().numpy()
attn_mask_numpy = attn_mask.data.cpu().numpy()
predict_str = []
for i in range(batch_size):
sample_predict_str = self._predict_base_on_attn_map(attn_map_numpy[i],
attn_mask_numpy[i],
cur_str[i],
context_str[i])
if sample_predict_str.strip() == '':
# To avoid error when evaluating on ROUGE
sample_predict_str = 'hello'
predict_str.append(sample_predict_str)
self.evaluate_metrics(restate_str=restate_str,
predict_str=predict_str,
cur_str=cur_str)
outputs['predicted_tokens'] = predict_str
return outputs
def evaluate_metrics(self, restate_str: List[str], predict_str: List[str], cur_str: List[str]):
"""
BLEU Score
"""
self.metrics['BLEU'](restate_str, predict_str)
"""
Exact Match Score
"""
em_score = Scorer.em_score(restate_str, predict_str)
self.metrics['EM'](em_score)
"""
ROUGE Score
"""
rouge1, rouge2, rouge = Scorer.rouge_score(restate_str, predict_str)
self.metrics['ROUGE'](rouge)
self.metrics['_ROUGE1'](rouge1)
self.metrics['_ROUGE2'](rouge2)
"""
F-Score (note this one is the rewriting F-score)
See definition in paper: https://ai.tencent.com/ailab/nlp/dialogue/papers/EMNLP_zhufengpan.pdf
"""
i1c, p1c, r1c, i2c, p2c, r2c, i3c, p3c, r3c = Scorer.restored_count(
restate_str, predict_str, cur_str)
self.metrics['F1'](i1c, p1c, r1c)
self.metrics['F2'](i2c, p2c, r2c)
self.metrics['F3'](i3c, p3c, r3c)
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
other_metrics = {k: v.get_metric(reset) for k, v in self.metrics.items() if k not in ['F1', 'F2', 'F3', 'BLEU']}
f_metrics_dict = {k: v.get_metric(reset) for k, v in self.metrics.items() if k in ['F1', 'F2', 'F3']}
f_metrics_dict = {**f_metrics_dict['F1'], **f_metrics_dict['F2'], **f_metrics_dict['F3']}
bleu_metrics = self.metrics['BLEU'].get_metric(reset)
return {**other_metrics, **f_metrics_dict, **bleu_metrics}
def _predict_base_on_attn_map(self, attn_map, attn_mask, cur_str, context_str) -> str:
"""
Detection the operation op, keeping the same format as the result of export_conflict_map
:param attn_map: attention_map, with shape `height x width x class_size`
:return: ordered operation sequence
"""
discrete_attn_map = np.argmax(attn_map, axis=2)
discrete_attn_map = attn_mask * discrete_attn_map
op_seq: List = []
for label, label_value in self.class_mapping.items():
if label_value == 0:
# do nothing
continue
connect_matrix = discrete_attn_map.copy()
# make the non label value as zero
connect_matrix = np.where(connect_matrix != label_value, 0,
connect_matrix)
ops = UnifiedFollowUp._scan_twice(connect_matrix)
for op in ops:
op_seq.append([label, *op])
op_seq = sorted(op_seq, key=lambda x: x[2][1], reverse=True)
predict_str = transmit_seq(cur_str, context_str, op_seq)
return predict_str
@staticmethod
def _scan_twice(connect_matrix):
label_num = 1
label_equations = {}
height, width = connect_matrix.shape
for i in range(height):
for j in range(width):
if connect_matrix[i, j] == 0:
continue
if j != 0:
left_val = connect_matrix[i, j - 1]
else:
left_val = 0
if i != 0:
top_val = connect_matrix[i - 1, j]
else:
top_val = 0
if i != 0 and j != 0:
left_top_val = connect_matrix[i - 1, j - 1]
else:
left_top_val = 0
if any([left_val > 0, top_val > 0, left_top_val > 0]):
neighbour_labels = [v for v in [left_val, top_val,
left_top_val] if v > 0]
min_label = min(neighbour_labels)
connect_matrix[i, j] = min_label
set_min_label = min([label_equations[label] for label in
neighbour_labels])
for label in neighbour_labels:
label_equations[label] = min(set_min_label, min_label)
if set_min_label > min_label:
for key, value in label_equations:
if value == set_min_label:
label_equations[key] = min_label
else:
new_label = label_num
connect_matrix[i, j] = new_label
label_equations[new_label] = new_label
label_num += 1
for i in range(height):
for j in range(width):
if connect_matrix[i, j] == 0:
continue
label = connect_matrix[i, j]
normalized_label = label_equations[label]
connect_matrix[i, j] = normalized_label
groups = list(set(label_equations.values()))
ret_boxes = []
for group_label in groups:
points = np.argwhere(connect_matrix == group_label)
points_y = points[:, (0)]
points_x = points[:, (1)]
min_width = np.amin(points_x)
max_width = np.amax(points_x) + 1
min_height = np.amin(points_y)
max_height = np.amax(points_y) + 1
ret_boxes.append([[min_width, max_width], [min_height, max_height]])
return ret_boxes
|
ContextualSP/incomplete_utterance_rewriting/src/model.py/0
|
{
"file_path": "ContextualSP/incomplete_utterance_rewriting/src/model.py",
"repo_id": "ContextualSP",
"token_count": 9456
}
| 231 |
# coding: utf-8
import json
import dill
import hashlib
import os
import torch
from allennlp.common.util import JsonDict, sanitize
from allennlp.data import Instance
from allennlp.data import Vocabulary
from allennlp.models.archival import load_archive
from parsers.irnet.context.converter import ActionConverter
from parsers.irnet.dataset_reader.spider_reader import SpiderDatasetReader
from parsers.irnet.models.sparc_parser import SparcParser
class Parser:
def __init__(self, model: torch.nn.Module):
assert model is not None
model.eval()
self.model = model
def parse(self, example):
# requirement: 'predict_sql' or 'predict_semql' must in returned dict
raise NotImplementedError()
class IRNetSpiderParser(Parser):
def __init__(self, model):
super().__init__(model)
self.spider_dataset_reader = SpiderDatasetReader()
self.sha1 = hashlib.sha1()
def parse(self, example):
hash_id = self.hash_dict(example)[:7]
if os.path.exists(f'cache/spider_instance/{hash_id}.bin'):
instance = dill.load(open(f'cache/spider_instance/{hash_id}.bin', 'rb'))
else:
db_id = example['db_id']
inter_utter_list = [example['question']]
sql_list = [example['sql']]
sql_query_list = [example['query']]
instance = self.spider_dataset_reader.text_to_instance(
utter_list=inter_utter_list,
db_id=db_id,
sql_list=sql_list,
sql_query_list=sql_query_list
)
dill.dump(instance, open(f'cache/spider_instance/{hash_id}.bin', 'wb'))
parsed_result = self.parse_instance(instance)
return parsed_result
def parse_instance(self, instance: Instance) -> JsonDict:
# convert predict result into production rule string
index_to_rule = [production_rule_field.rule
for production_rule_field in instance.fields['valid_actions_list'].field_list[0].field_list]
# Now get result
results = sanitize(self.model.forward_on_instance(instance))
rule_repr = [index_to_rule[ind] for ind in results['best_predict']]
ground_rule_repr = [index_to_rule[ind] for ind in results['ground_truth']]
db_context = instance.fields['worlds'].field_list[0].metadata.db_context
action_converter = ActionConverter(db_context)
predict_sql = action_converter.translate_to_sql(rule_repr)
ground_sql = action_converter.translate_to_sql(ground_rule_repr)
dis_results = {'predict': rule_repr,
'predict_sql': predict_sql,
'ground': ground_rule_repr,
'ground_sql': ground_sql,
'table_content': results['table_content']}
return dis_results
def hash_dict(self, d):
dict_str = json.dumps(d)
self.sha1.update(bytes(dict_str, encoding='utf-8'))
hex = self.sha1.hexdigest()
return hex
@staticmethod
def get_parser():
dataset_path = 'data/datasets/spider'
vocab = Vocabulary.from_files('parsers/irnet/checkpoints/v1.0_spider_baseline_model/vocabulary')
overrides = {
"dataset_path": dataset_path,
"train_data_path": "train.json",
"validation_data_path": "dev.json"
}
parser_model = load_archive('parsers/irnet/checkpoints/v1.0_spider_baseline_model/model.tar.gz',
cuda_device=0,
overrides=json.dumps(overrides)).model
parser_model.sql_metric_util._evaluator.update_dataset_path(dataset_path=dataset_path)
parser = IRNetSpiderParser(model=parser_model)
return parser
if __name__ == '__main__':
parser: Parser = IRNetSpiderParser.get_parser()
|
ContextualSP/interactive_text_to_sql/parsers/parser.py/0
|
{
"file_path": "ContextualSP/interactive_text_to_sql/parsers/parser.py",
"repo_id": "ContextualSP",
"token_count": 1776
}
| 232 |
# coding: utf-8
import logging
import sys
import random
import os
from tqdm import tqdm
import numpy as np
import torch
from src.data import SpiderAlignDataset
from src.aligner_model import BertAlignerModel
from src.utils.utils import AverageMeter
logging.basicConfig(level=logging.INFO,
format="%(asctime)s [%(levelname)s] [%(name)s] %(message)s",
datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(__name__)
# What are the names of colleges that have two or more players, listed in descending alphabetical order?
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# random.seed(1229)
# torch.manual_seed(1229)
# torch.cuda.manual_seed(1229)
batch_size = 16 * len(os.environ['CUDA_VISIBLE_DEVICES'].split(','))
n_negative = 50
total_training_iter = 0
coterms = [x.strip() for x in open('data/spider/coterms.txt', 'r').readlines()]
def train(model, dataloader, criterion, optimizer):
global total_training_iter
model.train()
with tqdm(dataloader) as tqdm_dataloader:
average_meter = AverageMeter()
for batch_data in tqdm_dataloader:
tensors, weights, lengths, texts = batch_data
positive_tensors, negative_tensors = tensors
positive_weights, negative_weights = weights
positive_lengths, negative_lengths = lengths
positive_texts, negative_texts = texts
positive_tensors, negative_tensors = positive_tensors.to(device), negative_tensors.to(device)
positive_weights, negative_weights = positive_weights.to(device), negative_weights.to(device)
positive_lengths, negative_lengths = positive_lengths.to(device), negative_lengths.to(device)
batch_size = positive_tensors.size(0)
ques_max_len = torch.LongTensor([positive_lengths[:, 0].max()]).expand(batch_size, 1)
pos_max_len = torch.LongTensor([positive_lengths[:, 1].max()]).expand(batch_size, 1)
neg_max_len = torch.LongTensor([negative_lengths[:, 1].max()]).expand(batch_size, 1)
# positive_similar_matrix, negative_similar_matrix = model(positive_tensors, positive_lengths, negative_tensors, negative_lengths)
if (not isinstance(model, torch.nn.DataParallel) and model.use_autoencoder) or \
(isinstance(model, torch.nn.DataParallel) and model.module.use_autoencoder):
positive_similar_matrix, negative_similar_matrix, autoencoder_diff = \
model(positive_tensors, positive_lengths, positive_weights,
negative_tensors, negative_lengths, negative_weights,
ques_max_len, pos_max_len, neg_max_len, mode='train')
else:
positive_similar_matrix, negative_similar_matrix = \
model(positive_tensors, positive_lengths, positive_weights,
negative_tensors, negative_lengths, negative_weights,
ques_max_len, pos_max_len, neg_max_len, mode='train')
autoencoder_diff = None
if torch.cuda.is_available():
positive_lengths = positive_lengths.cuda()
negative_lengths = negative_lengths.cuda()
matrix_loss = criterion(positive_similar_matrix, negative_similar_matrix, (positive_lengths, negative_lengths))
loss = matrix_loss
if autoencoder_diff:
loss = matrix_loss + autoencoder_diff
optimizer.zero_grad()
loss.backward()
optimizer.step()
average_meter.update(loss.item(), 1)
tqdm_dataloader.set_postfix_str('loss = {:.4f}'.format(average_meter.avg))
total_training_iter += 1
# if total_training_iter % 500 == 0:
# return
def validate(model, dataloader, criterion):
model.eval()
with tqdm(dataloader) as tqdm_dataloader:
average_meter = AverageMeter()
all_ques_lens, all_pos_lens, all_neg_lens = [], [], []
all_pos_alignments, all_neg_alignments = [], []
for batch_data in tqdm_dataloader:
tensors, weights, lengths, texts = batch_data
positive_tensors, negative_tensors = tensors
positive_weights, negative_weights = weights
positive_lengths, negative_lengths = lengths
positive_texts, negative_texts = texts
positive_tensors, negative_tensors = positive_tensors.to(device), negative_tensors.to(device)
positive_weights, negative_weights = positive_weights.to(device), negative_weights.to(device)
positive_lengths, negative_lengths = positive_lengths.to(device), negative_lengths.to(device)
batch_size = positive_tensors.size(0)
ques_max_len = torch.LongTensor([positive_lengths[:, 0].max()]).expand(batch_size, 1)
pos_max_len = torch.LongTensor([positive_lengths[:, 1].max()]).expand(batch_size, 1)
neg_max_len = torch.LongTensor([negative_lengths[:, 1].max()]).expand(batch_size, 1)
# positive_similar_matrix, negative_similar_matrix = \
# model(positive_tensors, positive_lengths, negative_tensors, negative_lengths)
if (not isinstance(model, torch.nn.DataParallel) and model.use_autoencoder) or \
(isinstance(model, torch.nn.DataParallel) and model.module.use_autoencoder):
positive_similar_matrix, negative_similar_matrix, autoencoder_diff = \
model(positive_tensors, positive_lengths, positive_weights,
negative_tensors, negative_lengths, negative_weights,
ques_max_len, pos_max_len, neg_max_len, mode='train')
else:
positive_similar_matrix, negative_similar_matrix = \
model(positive_tensors, positive_lengths, positive_weights,
negative_tensors, negative_lengths, negative_weights,
ques_max_len, pos_max_len, neg_max_len, mode='train')
autoencoder_diff = None
if torch.cuda.is_available():
positive_lengths = positive_lengths.cuda()
negative_lengths = negative_lengths.cuda()
loss = criterion(positive_similar_matrix, negative_similar_matrix, (positive_lengths, negative_lengths))
average_meter.update(loss.item(), 1)
tqdm_dataloader.set_postfix_str('loss = {:.4f}'.format(average_meter.avg))
all_ques_lens.extend(positive_lengths[:, 0].squeeze().cpu().numpy())
all_pos_lens.extend(positive_lengths[:, 1].squeeze().cpu().numpy())
all_neg_lens.extend(negative_lengths[:, 1].squeeze().cpu().numpy())
all_pos_alignments.extend(positive_similar_matrix.detach().cpu().numpy())
all_neg_alignments.extend(negative_similar_matrix.detach().cpu().numpy())
alignments = [all_pos_alignments, all_neg_alignments]
lengths = [all_ques_lens, all_pos_lens, all_neg_lens]
val_examples, val_corrects, val_acc = validate_acc(alignments, lengths, n_negative)
print(f'Validate acc = {val_acc}')
return val_acc
def validate_acc(alignments, lengths, neg_sample_num=100):
""" Validate accuracy: whether model can choose the positive
sentence over other negative samples """
pos_scores, neg_scores = [], []
pos_alignments, neg_alignments = alignments
src_lengths, pos_tgt_lengths, neg_tgt_lengths = lengths
assert len(pos_alignments) == len(neg_alignments) == len(src_lengths) == len(pos_tgt_lengths) == len(neg_tgt_lengths)
for pos_alignment, neg_alignment, src_len, pos_tgt_len, neg_tgt_len \
in zip(pos_alignments, neg_alignments, src_lengths, pos_tgt_lengths, neg_tgt_lengths):
# print(np.shape(pos_alignment))
# print(src_len)
# print(pos_tgt_len, neg_tgt_len)
pos_score = np.sum(pos_alignment.max(0)) / src_len / pos_tgt_len
neg_score = np.sum(neg_alignment.max(0)) / src_len / neg_tgt_len
pos_scores.append(pos_score)
neg_scores.append(neg_score)
num_examples, num_corrects = 0, 0
for i in range(0, len(pos_scores), neg_sample_num):
one_pos_scores = pos_scores[i: i + neg_sample_num]
one_neg_scores = neg_scores[i: i + neg_sample_num]
num_examples += 1
if one_pos_scores[0] > max(one_neg_scores):
num_corrects += 1
return num_examples, num_corrects, 1. * num_corrects / num_examples
def main():
logger.info('******************** Spider Alignment ********************')
use_autoencoder = False
table_file = 'data/spider/tables.json'
train_data_file = 'data/spider/train_spider.json'
dev_data_file = 'data/spider/dev.json'
train_align_dataset = SpiderAlignDataset(table_file=table_file, data_file=train_data_file, n_negative=n_negative,
negative_sampling_mode='mix')
train_dataloader = train_align_dataset.get_dataloader(batch_size=batch_size, shuffle=True, num_workers=8)
dev_align_dataset = SpiderAlignDataset(table_file=table_file, data_file=dev_data_file, n_negative=n_negative,
negative_sampling_mode='modify')
dev_dataloader = dev_align_dataset.get_dataloader(batch_size=batch_size, shuffle=False, num_workers=8)
aligner_model = BertAlignerModel(use_autoencoder=use_autoencoder)
if os.path.exists('saved/spider/model.pt'):
aligner_model.load_state_dict(torch.load('saved/spider/model.pt'))
if torch.cuda.is_available():
aligner_model = aligner_model.cuda()
else:
logger.warning("Model is running on CPU. The progress will be very slow.")
criterion = aligner_model.criterion
optimizer = aligner_model.optimizer
# aligner_model = torch.nn.DataParallel(aligner_model)
validate(aligner_model, dev_dataloader, criterion)
for epoch in range(100):
train(aligner_model, train_dataloader, criterion, optimizer)
validate(aligner_model, dev_dataloader, criterion)
if not os.path.exists('./saved/spider'):
os.makedirs('./saved/spider')
if isinstance(aligner_model, torch.nn.DataParallel):
torch.save(aligner_model.module.state_dict(), f'saved/spider/model-{epoch}.pt')
torch.save(aligner_model.module.state_dict(), 'saved/spider/model.pt')
else:
torch.save(aligner_model.state_dict(), f'saved/spider/model-{epoch}.pt')
torch.save(aligner_model.state_dict(), 'saved/spider/model.pt')
if __name__ == '__main__':
main()
|
ContextualSP/interactive_text_to_sql/src/train_spider_aligner.py/0
|
{
"file_path": "ContextualSP/interactive_text_to_sql/src/train_spider_aligner.py",
"repo_id": "ContextualSP",
"token_count": 4785
}
| 233 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.