python_code
stringlengths 0
869k
|
---|
import os
import uuid
from typing import Dict, List, Optional
from grpc._channel import _InactiveRpcError
from qdrant_client.http.exceptions import UnexpectedResponse
from qdrant_client.http.models import PayloadSchemaType
from datastore.datastore import DataStore
from models.models import (
DocumentChunk,
DocumentMetadataFilter,
QueryResult,
QueryWithEmbedding,
DocumentChunkWithScore,
)
from qdrant_client.http import models as rest
import qdrant_client
from services.date import to_unix_timestamp
QDRANT_URL = os.environ.get("QDRANT_URL", "http://localhost")
QDRANT_PORT = os.environ.get("QDRANT_PORT", "6333")
QDRANT_GRPC_PORT = os.environ.get("QDRANT_GRPC_PORT", "6334")
QDRANT_API_KEY = os.environ.get("QDRANT_API_KEY")
QDRANT_COLLECTION = os.environ.get("QDRANT_COLLECTION", "document_chunks")
class QdrantDataStore(DataStore):
UUID_NAMESPACE = uuid.UUID("3896d314-1e95-4a3a-b45a-945f9f0b541d")
def __init__(
self,
collection_name: Optional[str] = None,
vector_size: int = 1536,
distance: str = "Cosine",
recreate_collection: bool = False,
):
"""
Args:
collection_name: Name of the collection to be used
vector_size: Size of the embedding stored in a collection
distance:
Any of "Cosine" / "Euclid" / "Dot". Distance function to measure
similarity
"""
self.client = qdrant_client.QdrantClient(
url=QDRANT_URL,
port=int(QDRANT_PORT),
grpc_port=int(QDRANT_GRPC_PORT),
api_key=QDRANT_API_KEY,
prefer_grpc=True,
timeout=10,
)
self.collection_name = collection_name or QDRANT_COLLECTION
# Set up the collection so the points might be inserted or queried
self._set_up_collection(vector_size, distance, recreate_collection)
async def _upsert(self, chunks: Dict[str, List[DocumentChunk]]) -> List[str]:
"""
Takes in a list of document chunks and inserts them into the database.
Return a list of document ids.
"""
points = [
self._convert_document_chunk_to_point(chunk)
for _, chunks in chunks.items()
for chunk in chunks
]
self.client.upsert(
collection_name=self.collection_name,
points=points, # type: ignore
wait=True,
)
return list(chunks.keys())
async def _query(
self,
queries: List[QueryWithEmbedding],
) -> List[QueryResult]:
"""
Takes in a list of queries with embeddings and filters and returns a list of query results with matching document chunks and scores.
"""
search_requests = [
self._convert_query_to_search_request(query) for query in queries
]
results = self.client.search_batch(
collection_name=self.collection_name,
requests=search_requests,
)
return [
QueryResult(
query=query.query,
results=[
self._convert_scored_point_to_document_chunk_with_score(point)
for point in result
],
)
for query, result in zip(queries, results)
]
async def delete(
self,
ids: Optional[List[str]] = None,
filter: Optional[DocumentMetadataFilter] = None,
delete_all: Optional[bool] = None,
) -> bool:
"""
Removes vectors by ids, filter, or everything in the datastore.
Returns whether the operation was successful.
"""
if ids is None and filter is None and delete_all is None:
raise ValueError(
"Please provide one of the parameters: ids, filter or delete_all."
)
if delete_all:
points_selector = rest.Filter()
else:
points_selector = self._convert_metadata_filter_to_qdrant_filter(
filter, ids
)
response = self.client.delete(
collection_name=self.collection_name,
points_selector=points_selector, # type: ignore
)
return "COMPLETED" == response.status
def _convert_document_chunk_to_point(
self, document_chunk: DocumentChunk
) -> rest.PointStruct:
created_at = (
to_unix_timestamp(document_chunk.metadata.created_at)
if document_chunk.metadata.created_at is not None
else None
)
return rest.PointStruct(
id=self._create_document_chunk_id(document_chunk.id),
vector=document_chunk.embedding, # type: ignore
payload={
"id": document_chunk.id,
"text": document_chunk.text,
"metadata": document_chunk.metadata.dict(),
"created_at": created_at,
},
)
def _create_document_chunk_id(self, external_id: Optional[str]) -> str:
if external_id is None:
return uuid.uuid4().hex
return uuid.uuid5(self.UUID_NAMESPACE, external_id).hex
def _convert_query_to_search_request(
self, query: QueryWithEmbedding
) -> rest.SearchRequest:
return rest.SearchRequest(
vector=query.embedding,
filter=self._convert_metadata_filter_to_qdrant_filter(query.filter),
limit=query.top_k, # type: ignore
with_payload=True,
with_vector=False,
)
def _convert_metadata_filter_to_qdrant_filter(
self,
metadata_filter: Optional[DocumentMetadataFilter] = None,
ids: Optional[List[str]] = None,
) -> Optional[rest.Filter]:
if metadata_filter is None and ids is None:
return None
must_conditions, should_conditions = [], []
# Filtering by document ids
if ids and len(ids) > 0:
for document_id in ids:
should_conditions.append(
rest.FieldCondition(
key="metadata.document_id",
match=rest.MatchValue(value=document_id),
)
)
# Equality filters for the payload attributes
if metadata_filter:
meta_attributes_keys = {
"document_id": "metadata.document_id",
"source": "metadata.source",
"source_id": "metadata.source_id",
"author": "metadata.author",
}
for meta_attr_name, payload_key in meta_attributes_keys.items():
attr_value = getattr(metadata_filter, meta_attr_name)
if attr_value is None:
continue
must_conditions.append(
rest.FieldCondition(
key=payload_key, match=rest.MatchValue(value=attr_value)
)
)
# Date filters use range filtering
start_date = metadata_filter.start_date
end_date = metadata_filter.end_date
if start_date or end_date:
gte_filter = (
to_unix_timestamp(start_date) if start_date is not None else None
)
lte_filter = (
to_unix_timestamp(end_date) if end_date is not None else None
)
must_conditions.append(
rest.FieldCondition(
key="created_at",
range=rest.Range(
gte=gte_filter,
lte=lte_filter,
),
)
)
if 0 == len(must_conditions) and 0 == len(should_conditions):
return None
return rest.Filter(must=must_conditions, should=should_conditions)
def _convert_scored_point_to_document_chunk_with_score(
self, scored_point: rest.ScoredPoint
) -> DocumentChunkWithScore:
payload = scored_point.payload or {}
return DocumentChunkWithScore(
id=payload.get("id"),
text=scored_point.payload.get("text"), # type: ignore
metadata=scored_point.payload.get("metadata"), # type: ignore
embedding=scored_point.vector, # type: ignore
score=scored_point.score,
)
def _set_up_collection(
self, vector_size: int, distance: str, recreate_collection: bool
):
distance = rest.Distance[distance.upper()]
if recreate_collection:
self._recreate_collection(distance, vector_size)
try:
collection_info = self.client.get_collection(self.collection_name)
current_distance = collection_info.config.params.vectors.distance # type: ignore
current_vector_size = collection_info.config.params.vectors.size # type: ignore
if current_distance != distance:
raise ValueError(
f"Collection '{self.collection_name}' already exists in Qdrant, "
f"but it is configured with a similarity '{current_distance.name}'. "
f"If you want to use that collection, but with a different "
f"similarity, please set `recreate_collection=True` argument."
)
if current_vector_size != vector_size:
raise ValueError(
f"Collection '{self.collection_name}' already exists in Qdrant, "
f"but it is configured with a vector size '{current_vector_size}'. "
f"If you want to use that collection, but with a different "
f"vector size, please set `recreate_collection=True` argument."
)
except (UnexpectedResponse, _InactiveRpcError):
self._recreate_collection(distance, vector_size)
def _recreate_collection(self, distance: rest.Distance, vector_size: int):
self.client.recreate_collection(
self.collection_name,
vectors_config=rest.VectorParams(
size=vector_size,
distance=distance,
),
)
# Create the payload index for the document_id metadata attribute, as it is
# used to delete the document related entries
self.client.create_payload_index(
self.collection_name,
field_name="metadata.document_id",
field_type=PayloadSchemaType.KEYWORD,
)
# Create the payload index for the created_at attribute, to make the lookup
# by range filters faster
self.client.create_payload_index(
self.collection_name,
field_name="created_at",
field_schema=PayloadSchemaType.INTEGER,
)
|
import os
from typing import Any, List
from datetime import datetime
from supabase import Client
from datastore.providers.pgvector_datastore import PGClient, PgVectorDataStore
from models.models import (
DocumentMetadataFilter,
)
SUPABASE_URL = os.environ.get("SUPABASE_URL")
assert SUPABASE_URL is not None, "SUPABASE_URL is not set"
SUPABASE_ANON_KEY = os.environ.get("SUPABASE_ANON_KEY")
# use service role key if you want this app to be able to bypass your Row Level Security policies
SUPABASE_SERVICE_ROLE_KEY = os.environ.get("SUPABASE_SERVICE_ROLE_KEY")
assert (
SUPABASE_ANON_KEY is not None or SUPABASE_SERVICE_ROLE_KEY is not None
), "SUPABASE_ANON_KEY or SUPABASE_SERVICE_ROLE_KEY must be set"
# class that implements the DataStore interface for Supabase Datastore provider
class SupabaseDataStore(PgVectorDataStore):
def create_db_client(self):
return SupabaseClient()
class SupabaseClient(PGClient):
def __init__(self) -> None:
super().__init__()
if not SUPABASE_SERVICE_ROLE_KEY:
self.client = Client(SUPABASE_URL, SUPABASE_ANON_KEY)
else:
self.client = Client(SUPABASE_URL, SUPABASE_SERVICE_ROLE_KEY)
async def upsert(self, table: str, json: dict[str, Any]):
"""
Takes in a list of documents and inserts them into the table.
"""
if "created_at" in json:
json["created_at"] = json["created_at"][0].isoformat()
self.client.table(table).upsert(json).execute()
async def rpc(self, function_name: str, params: dict[str, Any]):
"""
Calls a stored procedure in the database with the given parameters.
"""
if "in_start_date" in params:
params["in_start_date"] = params["in_start_date"].isoformat()
if "in_end_date" in params:
params["in_end_date"] = params["in_end_date"].isoformat()
response = self.client.rpc(function_name, params=params).execute()
return response.data
async def delete_like(self, table: str, column: str, pattern: str):
"""
Deletes rows in the table that match the pattern.
"""
self.client.table(table).delete().like(column, pattern).execute()
async def delete_in(self, table: str, column: str, ids: List[str]):
"""
Deletes rows in the table that match the ids.
"""
self.client.table(table).delete().in_(column, ids).execute()
async def delete_by_filters(self, table: str, filter: DocumentMetadataFilter):
"""
Deletes rows in the table that match the filter.
"""
builder = self.client.table(table).delete()
if filter.document_id:
builder = builder.eq(
"document_id",
filter.document_id,
)
if filter.source:
builder = builder.eq("source", filter.source)
if filter.source_id:
builder = builder.eq("source_id", filter.source_id)
if filter.author:
builder = builder.eq("author", filter.author)
if filter.start_date:
builder = builder.gte(
"created_at",
filter.start_date[0].isoformat(),
)
if filter.end_date:
builder = builder.lte(
"created_at",
filter.end_date[0].isoformat(),
)
builder.execute()
|
import asyncio
import os
import re
import time
import base64
from typing import Dict, List, Optional, Union
from datastore.datastore import DataStore
from models.models import DocumentChunk, DocumentChunkMetadata, DocumentChunkWithScore, DocumentMetadataFilter, Query, QueryResult, QueryWithEmbedding
from loguru import logger
from azure.search.documents.aio import SearchClient
from azure.search.documents.models import Vector, QueryType
from azure.search.documents.indexes import SearchIndexClient
from azure.search.documents.indexes.models import *
from azure.core.credentials import AzureKeyCredential
from azure.identity import DefaultAzureCredential as DefaultAzureCredentialSync
from azure.identity.aio import DefaultAzureCredential
AZURESEARCH_SERVICE = os.environ.get("AZURESEARCH_SERVICE")
AZURESEARCH_INDEX = os.environ.get("AZURESEARCH_INDEX")
AZURESEARCH_API_KEY = os.environ.get("AZURESEARCH_API_KEY")
AZURESEARCH_SEMANTIC_CONFIG = os.environ.get("AZURESEARCH_SEMANTIC_CONFIG")
AZURESEARCH_LANGUAGE = os.environ.get("AZURESEARCH_LANGUAGE", "en-us")
AZURESEARCH_DISABLE_HYBRID = os.environ.get("AZURESEARCH_DISABLE_HYBRID")
AZURESEARCH_DIMENSIONS = os.environ.get("AZURESEARCH_DIMENSIONS", 1536) # Default to OpenAI's ada-002 embedding model vector size
assert AZURESEARCH_SERVICE is not None
assert AZURESEARCH_INDEX is not None
# Allow overriding field names for Azure Search
FIELDS_ID = os.environ.get("AZURESEARCH_FIELDS_ID", "id")
FIELDS_TEXT = os.environ.get("AZURESEARCH_FIELDS_TEXT", "text")
FIELDS_EMBEDDING = os.environ.get("AZURESEARCH_FIELDS_EMBEDDING", "embedding")
FIELDS_DOCUMENT_ID = os.environ.get("AZURESEARCH_FIELDS_DOCUMENT_ID", "document_id")
FIELDS_SOURCE = os.environ.get("AZURESEARCH_FIELDS_SOURCE", "source")
FIELDS_SOURCE_ID = os.environ.get("AZURESEARCH_FIELDS_SOURCE_ID", "source_id")
FIELDS_URL = os.environ.get("AZURESEARCH_FIELDS_URL", "url")
FIELDS_CREATED_AT = os.environ.get("AZURESEARCH_FIELDS_CREATED_AT", "created_at")
FIELDS_AUTHOR = os.environ.get("AZURESEARCH_FIELDS_AUTHOR", "author")
MAX_UPLOAD_BATCH_SIZE = 1000
MAX_DELETE_BATCH_SIZE = 1000
class AzureSearchDataStore(DataStore):
def __init__(self):
self.client = SearchClient(
endpoint=f"https://{AZURESEARCH_SERVICE}.search.windows.net",
index_name=AZURESEARCH_INDEX,
credential=AzureSearchDataStore._create_credentials(True),
user_agent="retrievalplugin"
)
mgmt_client = SearchIndexClient(
endpoint=f"https://{AZURESEARCH_SERVICE}.search.windows.net",
credential=AzureSearchDataStore._create_credentials(False),
user_agent="retrievalplugin"
)
if AZURESEARCH_INDEX not in [name for name in mgmt_client.list_index_names()]:
self._create_index(mgmt_client)
else:
logger.info(f"Using existing index {AZURESEARCH_INDEX} in service {AZURESEARCH_SERVICE}")
async def _upsert(self, chunks: Dict[str, List[DocumentChunk]]) -> List[str]:
azdocuments: List[Dict] = []
async def upload():
r = await self.client.upload_documents(documents=azdocuments)
count = sum(1 for rr in r if rr.succeeded)
logger.info(f"Upserted {count} chunks out of {len(azdocuments)}")
if count < len(azdocuments):
raise Exception(f"Failed to upload {len(azdocuments) - count} chunks")
ids = []
for document_id, document_chunks in chunks.items():
ids.append(document_id)
for chunk in document_chunks:
azdocuments.append({
# base64-encode the id string to stay within Azure Search's valid characters for keys
FIELDS_ID: base64.urlsafe_b64encode(bytes(chunk.id, "utf-8")).decode("ascii"),
FIELDS_TEXT: chunk.text,
FIELDS_EMBEDDING: chunk.embedding,
FIELDS_DOCUMENT_ID: document_id,
FIELDS_SOURCE: chunk.metadata.source,
FIELDS_SOURCE_ID: chunk.metadata.source_id,
FIELDS_URL: chunk.metadata.url,
FIELDS_CREATED_AT: chunk.metadata.created_at,
FIELDS_AUTHOR: chunk.metadata.author,
})
if len(azdocuments) >= MAX_UPLOAD_BATCH_SIZE:
await upload()
azdocuments = []
if len(azdocuments) > 0:
await upload()
return ids
async def delete(self, ids: Optional[List[str]] = None, filter: Optional[DocumentMetadataFilter] = None, delete_all: Optional[bool] = None) -> bool:
filter = None if delete_all else self._translate_filter(filter)
if delete_all or filter is not None:
deleted = set()
while True:
search_result = await self.client.search(None, filter=filter, top=MAX_DELETE_BATCH_SIZE, include_total_count=True, select=FIELDS_ID)
if await search_result.get_count() == 0:
break
documents = [{ FIELDS_ID: d[FIELDS_ID] } async for d in search_result if d[FIELDS_ID] not in deleted]
if len(documents) > 0:
logger.info(f"Deleting {len(documents)} chunks " + ("using a filter" if filter is not None else "using delete_all"))
del_result = await self.client.delete_documents(documents=documents)
if not all([rr.succeeded for rr in del_result]):
raise Exception("Failed to delete documents")
deleted.update([d[FIELDS_ID] for d in documents])
else:
# All repeats, delay a bit to let the index refresh and try again
time.sleep(0.25)
if ids is not None and len(ids) > 0:
for id in ids:
logger.info(f"Deleting chunks for document id {id}")
await self.delete(filter=DocumentMetadataFilter(document_id=id))
return True
async def _query(self, queries: List[QueryWithEmbedding]) -> List[QueryResult]:
"""
Takes in a list of queries with embeddings and filters and returns a list of query results with matching document chunks and scores.
"""
return await asyncio.gather(*(self._single_query(query) for query in queries))
async def _single_query(self, query: QueryWithEmbedding) -> QueryResult:
"""
Takes in a single query and filters and returns a query result with matching document chunks and scores.
"""
filter = self._translate_filter(query.filter) if query.filter is not None else None
try:
vector_top_k = query.top_k if filter is None else query.top_k * 2
q = query.query if not AZURESEARCH_DISABLE_HYBRID else None
if AZURESEARCH_SEMANTIC_CONFIG != None and not AZURESEARCH_DISABLE_HYBRID:
# Ensure we're feeding a good number of candidates to the L2 reranker
vector_top_k = max(50, vector_top_k)
r = await self.client.search(
q,
filter=filter,
top=query.top_k,
vector=Vector(value=query.embedding, k=vector_top_k, fields=FIELDS_EMBEDDING),
query_type=QueryType.SEMANTIC,
query_language=AZURESEARCH_LANGUAGE,
semantic_configuration_name=AZURESEARCH_SEMANTIC_CONFIG)
else:
r = await self.client.search(
q,
filter=filter,
top=query.top_k,
vector=Vector(value=query.embedding, k=vector_top_k, fields=FIELDS_EMBEDDING))
results: List[DocumentChunkWithScore] = []
async for hit in r:
f = lambda field: hit.get(field) if field != "-" else None
results.append(DocumentChunkWithScore(
id=hit[FIELDS_ID],
text=hit[FIELDS_TEXT],
metadata=DocumentChunkMetadata(
document_id=f(FIELDS_DOCUMENT_ID),
source=f(FIELDS_SOURCE),
source_id=f(FIELDS_SOURCE_ID),
url=f(FIELDS_URL),
created_at=f(FIELDS_CREATED_AT),
author=f(FIELDS_AUTHOR)
),
score=hit["@search.score"]
))
return QueryResult(query=query.query, results=results)
except Exception as e:
raise Exception(f"Error querying the index: {e}")
@staticmethod
def _translate_filter(filter: DocumentMetadataFilter) -> str:
"""
Translates a DocumentMetadataFilter into an Azure Search filter string
"""
if filter is None:
return None
escape = lambda s: s.replace("'", "''")
# regex to validate dates are in OData format
date_re = re.compile(r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z")
filter_list = []
if filter.document_id is not None:
filter_list.append(f"{FIELDS_DOCUMENT_ID} eq '{escape(filter.document_id)}'")
if filter.source is not None:
filter_list.append(f"{FIELDS_SOURCE} eq '{escape(filter.source)}'")
if filter.source_id is not None:
filter_list.append(f"{FIELDS_SOURCE_ID} eq '{escape(filter.source_id)}'")
if filter.author is not None:
filter_list.append(f"{FIELDS_AUTHOR} eq '{escape(filter.author)}'")
if filter.start_date is not None:
if not date_re.match(filter.start_date):
raise ValueError(f"start_date must be in OData format, got {filter.start_date}")
filter_list.append(f"{FIELDS_CREATED_AT} ge {filter.start_date}")
if filter.end_date is not None:
if not date_re.match(filter.end_date):
raise ValueError(f"end_date must be in OData format, got {filter.end_date}")
filter_list.append(f"{FIELDS_CREATED_AT} le {filter.end_date}")
return " and ".join(filter_list) if len(filter_list) > 0 else None
def _create_index(self, mgmt_client: SearchIndexClient):
"""
Creates an Azure Cognitive Search index, including a semantic search configuration if a name is specified for it
"""
logger.info(
f"Creating index {AZURESEARCH_INDEX} in service {AZURESEARCH_SERVICE}" +
(f" with semantic search configuration {AZURESEARCH_SEMANTIC_CONFIG}" if AZURESEARCH_SEMANTIC_CONFIG is not None else "")
)
mgmt_client.create_index(
SearchIndex(
name=AZURESEARCH_INDEX,
fields=[
SimpleField(name=FIELDS_ID, type=SearchFieldDataType.String, key=True),
SearchableField(name=FIELDS_TEXT, type=SearchFieldDataType.String, analyzer_name="standard.lucene"),
SearchField(name=FIELDS_EMBEDDING, type=SearchFieldDataType.Collection(SearchFieldDataType.Single),
hidden=False, searchable=True, filterable=False, sortable=False, facetable=False,
dimensions=AZURESEARCH_DIMENSIONS, vector_search_configuration="default"),
SimpleField(name=FIELDS_DOCUMENT_ID, type=SearchFieldDataType.String, filterable=True, sortable=True),
SimpleField(name=FIELDS_SOURCE, type=SearchFieldDataType.String, filterable=True, sortable=True),
SimpleField(name=FIELDS_SOURCE_ID, type=SearchFieldDataType.String, filterable=True, sortable=True),
SimpleField(name=FIELDS_URL, type=SearchFieldDataType.String),
SimpleField(name=FIELDS_CREATED_AT, type=SearchFieldDataType.DateTimeOffset, filterable=True, sortable=True),
SimpleField(name=FIELDS_AUTHOR, type=SearchFieldDataType.String, filterable=True, sortable=True)
],
semantic_settings=None if AZURESEARCH_SEMANTIC_CONFIG is None else SemanticSettings(
configurations=[SemanticConfiguration(
name=AZURESEARCH_SEMANTIC_CONFIG,
prioritized_fields=PrioritizedFields(
title_field=None, prioritized_content_fields=[SemanticField(field_name=FIELDS_TEXT)]
)
)]
),
vector_search=VectorSearch(
algorithm_configurations=[
VectorSearchAlgorithmConfiguration(
name="default",
kind="hnsw",
# Could change to dotproduct for OpenAI's embeddings since they normalize vectors to unit length
hnsw_parameters=HnswParameters(metric="cosine")
)
]
)
)
)
@staticmethod
def _create_credentials(use_async: bool) -> Union[AzureKeyCredential, DefaultAzureCredential, DefaultAzureCredentialSync]:
if AZURESEARCH_API_KEY is None:
logger.info("Using DefaultAzureCredential for Azure Search, make sure local identity or managed identity are set up appropriately")
credential = DefaultAzureCredential() if use_async else DefaultAzureCredentialSync()
else:
logger.info("Using an API key to authenticate with Azure Search")
credential = AzureKeyCredential(AZURESEARCH_API_KEY)
return credential
|
import os
from typing import Dict, List, Any, Optional
import elasticsearch
from elasticsearch import Elasticsearch, helpers
from loguru import logger
from datastore.datastore import DataStore
from models.models import (
DocumentChunk,
DocumentChunkWithScore,
DocumentMetadataFilter,
QueryResult,
QueryWithEmbedding,
)
from services.date import to_unix_timestamp
ELASTICSEARCH_URL = os.environ.get("ELASTICSEARCH_URL", "http://localhost:9200")
ELASTICSEARCH_CLOUD_ID = os.environ.get("ELASTICSEARCH_CLOUD_ID")
ELASTICSEARCH_USERNAME = os.environ.get("ELASTICSEARCH_USERNAME")
ELASTICSEARCH_PASSWORD = os.environ.get("ELASTICSEARCH_PASSWORD")
ELASTICSEARCH_API_KEY = os.environ.get("ELASTICSEARCH_API_KEY")
ELASTICSEARCH_INDEX = os.environ.get("ELASTICSEARCH_INDEX")
ELASTICSEARCH_REPLICAS = int(os.environ.get("ELASTICSEARCH_REPLICAS", "1"))
ELASTICSEARCH_SHARDS = int(os.environ.get("ELASTICSEARCH_SHARDS", "1"))
VECTOR_SIZE = 1536
UPSERT_BATCH_SIZE = 100
class ElasticsearchDataStore(DataStore):
def __init__(
self,
index_name: Optional[str] = None,
vector_size: int = VECTOR_SIZE,
similarity: str = "cosine",
replicas: int = ELASTICSEARCH_REPLICAS,
shards: int = ELASTICSEARCH_SHARDS,
recreate_index: bool = True,
):
"""
Args:
index_name: Name of the index to be used
vector_size: Size of the embedding stored in a collection
similarity:
Any of "cosine" / "l2_norm" / "dot_product".
"""
assert similarity in [
"cosine",
"l2_norm",
"dot_product",
], "Similarity must be one of 'cosine' / 'l2_norm' / 'dot_product'."
assert replicas > 0, "Replicas must be greater than or equal to 0."
assert shards > 0, "Shards must be greater than or equal to 0."
self.client = connect_to_elasticsearch(
ELASTICSEARCH_URL,
ELASTICSEARCH_CLOUD_ID,
ELASTICSEARCH_API_KEY,
ELASTICSEARCH_USERNAME,
ELASTICSEARCH_PASSWORD,
)
assert (
index_name != "" or ELASTICSEARCH_INDEX != ""
), "Please provide an index name."
self.index_name = index_name or ELASTICSEARCH_INDEX or ""
replicas = replicas or ELASTICSEARCH_REPLICAS
shards = shards or ELASTICSEARCH_SHARDS
# Set up the collection so the documents might be inserted or queried
self._set_up_index(vector_size, similarity, replicas, shards, recreate_index)
async def _upsert(self, chunks: Dict[str, List[DocumentChunk]]) -> List[str]:
"""
Takes in a list of document chunks and inserts them into the database.
Return a list of document ids.
"""
actions = []
for _, chunkList in chunks.items():
for chunk in chunkList:
actions = (
actions
+ self._convert_document_chunk_to_es_document_operation(chunk)
)
self.client.bulk(operations=actions, index=self.index_name)
return list(chunks.keys())
async def _query(
self,
queries: List[QueryWithEmbedding],
) -> List[QueryResult]:
"""
Takes in a list of queries with embeddings and filters and returns a list of query results with matching document chunks and scores.
"""
searches = self._convert_queries_to_msearch_query(queries)
results = self.client.msearch(searches=searches)
return [
QueryResult(
query=query.query,
results=[
self._convert_hit_to_document_chunk_with_score(hit)
for hit in result["hits"]["hits"]
],
)
for query, result in zip(queries, results["responses"])
]
async def delete(
self,
ids: Optional[List[str]] = None,
filter: Optional[DocumentMetadataFilter] = None,
delete_all: Optional[bool] = None,
) -> bool:
"""
Removes vectors by ids, filter, or everything in the datastore.
Returns whether the operation was successful.
"""
# Delete all vectors from the index if delete_all is True
if delete_all:
try:
logger.info(f"Deleting all vectors from index")
self.client.delete_by_query(
index=self.index_name, query={"match_all": {}}
)
logger.info(f"Deleted all vectors successfully")
return True
except Exception as e:
logger.error(f"Error deleting all vectors: {e}")
raise e
# Convert the metadata filter object to a dict with elasticsearch filter expressions
es_filters = self._get_es_filters(filter)
# Delete vectors that match the filter from the index if the filter is not empty
if es_filters != {}:
try:
logger.info(f"Deleting vectors with filter {es_filters}")
self.client.delete_by_query(index=self.index_name, query=es_filters)
logger.info(f"Deleted vectors with filter successfully")
except Exception as e:
logger.error(f"Error deleting vectors with filter: {e}")
raise e
if ids:
try:
documents_to_delete = [doc_id for doc_id in ids]
logger.info(f"Deleting {len(documents_to_delete)} documents")
res = self.client.delete_by_query(
index=self.index_name,
query={"terms": {"metadata.document_id": documents_to_delete}},
)
logger.info(f"Deleted documents successfully")
except Exception as e:
logger.error(f"Error deleting documents: {e}")
raise e
return True
def _get_es_filters(
self, filter: Optional[DocumentMetadataFilter] = None
) -> Dict[str, Any]:
if filter is None:
return {}
es_filters = {
"bool": {
"must": [],
}
}
# For each field in the MetadataFilter, check if it has a value and add the corresponding pinecone filter expression
# For start_date and end_date, uses the range query - gte and lte operators respectively
# For other fields, uses the term query
for field, value in filter.dict().items():
if value is not None:
if field == "start_date":
es_filters["bool"]["must"].append(
{"range": {"created_at": {"gte": to_unix_timestamp(value)}}}
)
elif field == "end_date":
es_filters["bool"]["must"].append(
{"range": {"created_at": {"lte": to_unix_timestamp(value)}}}
)
else:
es_filters["bool"]["must"].append(
{"term": {f"metadata.{field}": value}}
)
return es_filters
def _convert_document_chunk_to_es_document_operation(
self, document_chunk: DocumentChunk
) -> List[Dict]:
created_at = (
to_unix_timestamp(document_chunk.metadata.created_at)
if document_chunk.metadata.created_at is not None
else None
)
action_and_metadata = {
"index": {
"_index": self.index_name,
"_id": document_chunk.id,
}
}
source = {
"id": document_chunk.id,
"text": document_chunk.text,
"metadata": document_chunk.metadata.dict(),
"created_at": created_at,
"embedding": document_chunk.embedding,
}
return [action_and_metadata, source]
def _convert_queries_to_msearch_query(self, queries: List[QueryWithEmbedding]):
searches = []
for query in queries:
searches.append({"index": self.index_name})
searches.append(
{
"_source": True,
"knn": {
"field": "embedding",
"query_vector": query.embedding,
"k": query.top_k,
"num_candidates": query.top_k,
},
"size": query.top_k,
}
)
return searches
def _convert_hit_to_document_chunk_with_score(self, hit) -> DocumentChunkWithScore:
return DocumentChunkWithScore(
id=hit["_id"],
text=hit["_source"]["text"], # type: ignore
metadata=hit["_source"]["metadata"], # type: ignore
embedding=hit["_source"]["embedding"], # type: ignore
score=hit["_score"],
)
def _set_up_index(
self,
vector_size: int,
similarity: str,
replicas: int,
shards: int,
recreate_index: bool,
) -> None:
if recreate_index:
self._recreate_index(similarity, vector_size, replicas, shards)
try:
index_mapping = self.client.indices.get_mapping(index=self.index_name)
current_similarity = index_mapping[self.index_name]["mappings"]["properties"]["embedding"]["similarity"] # type: ignore
current_vector_size = index_mapping[self.index_name]["mappings"]["properties"]["embedding"]["dims"] # type: ignore
if current_similarity != similarity:
raise ValueError(
f"Collection '{self.index_name}' already exists in Elasticsearch, "
f"but it is configured with a similarity '{current_similarity}'. "
f"If you want to use that collection, but with a different "
f"similarity, please set `recreate_index=True` argument."
)
if current_vector_size != vector_size:
raise ValueError(
f"Collection '{self.index_name}' already exists in Elasticsearch, "
f"but it is configured with a vector size '{current_vector_size}'. "
f"If you want to use that collection, but with a different "
f"vector size, please set `recreate_index=True` argument."
)
except elasticsearch.exceptions.NotFoundError:
self._recreate_index(similarity, vector_size, replicas, shards)
def _recreate_index(
self, similarity: str, vector_size: int, replicas: int, shards: int
) -> None:
settings = {
"index": {
"number_of_shards": shards,
"number_of_replicas": replicas,
"refresh_interval": "1s",
}
}
mappings = {
"properties": {
"embedding": {
"type": "dense_vector",
"dims": vector_size,
"index": True,
"similarity": similarity,
}
}
}
self.client.indices.delete(
index=self.index_name, ignore_unavailable=True, allow_no_indices=True
)
self.client.indices.create(
index=self.index_name, mappings=mappings, settings=settings
)
def connect_to_elasticsearch(
elasticsearch_url=None, cloud_id=None, api_key=None, username=None, password=None
):
# Check if both elasticsearch_url and cloud_id are defined
if elasticsearch_url and cloud_id:
raise ValueError(
"Both elasticsearch_url and cloud_id are defined. Please provide only one."
)
# Initialize connection parameters dictionary
connection_params = {}
# Define the connection based on the provided parameters
if elasticsearch_url:
connection_params["hosts"] = [elasticsearch_url]
elif cloud_id:
connection_params["cloud_id"] = cloud_id
else:
raise ValueError("Please provide either elasticsearch_url or cloud_id.")
# Add authentication details based on the provided parameters
if api_key:
connection_params["api_key"] = api_key
elif username and password:
connection_params["basic_auth"] = (username, password)
else:
logger.warning(
"No authentication details provided. Please consider using an api_key or username and password to secure your connection."
)
# Establish the Elasticsearch client connection
es_client = Elasticsearch(**connection_params)
try:
es_client.info()
except Exception as e:
logger.error(f"Error connecting to Elasticsearch: {e}")
raise e
return es_client
|
import os
from typing import Any, Dict, List, Optional
import pinecone
from tenacity import retry, wait_random_exponential, stop_after_attempt
import asyncio
from loguru import logger
from datastore.datastore import DataStore
from models.models import (
DocumentChunk,
DocumentChunkMetadata,
DocumentChunkWithScore,
DocumentMetadataFilter,
QueryResult,
QueryWithEmbedding,
Source,
)
from services.date import to_unix_timestamp
# Read environment variables for Pinecone configuration
PINECONE_API_KEY = os.environ.get("PINECONE_API_KEY")
PINECONE_ENVIRONMENT = os.environ.get("PINECONE_ENVIRONMENT")
PINECONE_INDEX = os.environ.get("PINECONE_INDEX")
assert PINECONE_API_KEY is not None
assert PINECONE_ENVIRONMENT is not None
assert PINECONE_INDEX is not None
# Initialize Pinecone with the API key and environment
pinecone.init(api_key=PINECONE_API_KEY, environment=PINECONE_ENVIRONMENT)
# Set the batch size for upserting vectors to Pinecone
UPSERT_BATCH_SIZE = 100
class PineconeDataStore(DataStore):
def __init__(self):
# Check if the index name is specified and exists in Pinecone
if PINECONE_INDEX and PINECONE_INDEX not in pinecone.list_indexes():
# Get all fields in the metadata object in a list
fields_to_index = list(DocumentChunkMetadata.__fields__.keys())
# Create a new index with the specified name, dimension, and metadata configuration
try:
logger.info(
f"Creating index {PINECONE_INDEX} with metadata config {fields_to_index}"
)
pinecone.create_index(
PINECONE_INDEX,
dimension=1536, # dimensionality of OpenAI ada v2 embeddings
metadata_config={"indexed": fields_to_index},
)
self.index = pinecone.Index(PINECONE_INDEX)
logger.info(f"Index {PINECONE_INDEX} created successfully")
except Exception as e:
logger.error(f"Error creating index {PINECONE_INDEX}: {e}")
raise e
elif PINECONE_INDEX and PINECONE_INDEX in pinecone.list_indexes():
# Connect to an existing index with the specified name
try:
logger.info(f"Connecting to existing index {PINECONE_INDEX}")
self.index = pinecone.Index(PINECONE_INDEX)
logger.info(f"Connected to index {PINECONE_INDEX} successfully")
except Exception as e:
logger.error(f"Error connecting to index {PINECONE_INDEX}: {e}")
raise e
@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(3))
async def _upsert(self, chunks: Dict[str, List[DocumentChunk]]) -> List[str]:
"""
Takes in a dict from document id to list of document chunks and inserts them into the index.
Return a list of document ids.
"""
# Initialize a list of ids to return
doc_ids: List[str] = []
# Initialize a list of vectors to upsert
vectors = []
# Loop through the dict items
for doc_id, chunk_list in chunks.items():
# Append the id to the ids list
doc_ids.append(doc_id)
logger.info(f"Upserting document_id: {doc_id}")
for chunk in chunk_list:
# Create a vector tuple of (id, embedding, metadata)
# Convert the metadata object to a dict with unix timestamps for dates
pinecone_metadata = self._get_pinecone_metadata(chunk.metadata)
# Add the text and document id to the metadata dict
pinecone_metadata["text"] = chunk.text
pinecone_metadata["document_id"] = doc_id
vector = (chunk.id, chunk.embedding, pinecone_metadata)
vectors.append(vector)
# Split the vectors list into batches of the specified size
batches = [
vectors[i : i + UPSERT_BATCH_SIZE]
for i in range(0, len(vectors), UPSERT_BATCH_SIZE)
]
# Upsert each batch to Pinecone
for batch in batches:
try:
logger.info(f"Upserting batch of size {len(batch)}")
self.index.upsert(vectors=batch)
logger.info(f"Upserted batch successfully")
except Exception as e:
logger.error(f"Error upserting batch: {e}")
raise e
return doc_ids
@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(3))
async def _query(
self,
queries: List[QueryWithEmbedding],
) -> List[QueryResult]:
"""
Takes in a list of queries with embeddings and filters and returns a list of query results with matching document chunks and scores.
"""
# Define a helper coroutine that performs a single query and returns a QueryResult
async def _single_query(query: QueryWithEmbedding) -> QueryResult:
logger.debug(f"Query: {query.query}")
# Convert the metadata filter object to a dict with pinecone filter expressions
pinecone_filter = self._get_pinecone_filter(query.filter)
try:
# Query the index with the query embedding, filter, and top_k
query_response = self.index.query(
# namespace=namespace,
top_k=query.top_k,
vector=query.embedding,
filter=pinecone_filter,
include_metadata=True,
)
except Exception as e:
logger.error(f"Error querying index: {e}")
raise e
query_results: List[DocumentChunkWithScore] = []
for result in query_response.matches:
score = result.score
metadata = result.metadata
# Remove document id and text from metadata and store it in a new variable
metadata_without_text = (
{key: value for key, value in metadata.items() if key != "text"}
if metadata
else None
)
# If the source is not a valid Source in the Source enum, set it to None
if (
metadata_without_text
and "source" in metadata_without_text
and metadata_without_text["source"] not in Source.__members__
):
metadata_without_text["source"] = None
# Create a document chunk with score object with the result data
result = DocumentChunkWithScore(
id=result.id,
score=score,
text=metadata["text"] if metadata and "text" in metadata else None,
metadata=metadata_without_text,
)
query_results.append(result)
return QueryResult(query=query.query, results=query_results)
# Use asyncio.gather to run multiple _single_query coroutines concurrently and collect their results
results: List[QueryResult] = await asyncio.gather(
*[_single_query(query) for query in queries]
)
return results
@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(3))
async def delete(
self,
ids: Optional[List[str]] = None,
filter: Optional[DocumentMetadataFilter] = None,
delete_all: Optional[bool] = None,
) -> bool:
"""
Removes vectors by ids, filter, or everything from the index.
"""
# Delete all vectors from the index if delete_all is True
if delete_all:
try:
logger.info(f"Deleting all vectors from index")
self.index.delete(delete_all=True)
logger.info(f"Deleted all vectors successfully")
return True
except Exception as e:
logger.error(f"Error deleting all vectors: {e}")
raise e
# Convert the metadata filter object to a dict with pinecone filter expressions
pinecone_filter = self._get_pinecone_filter(filter)
# Delete vectors that match the filter from the index if the filter is not empty
if pinecone_filter != {}:
try:
logger.info(f"Deleting vectors with filter {pinecone_filter}")
self.index.delete(filter=pinecone_filter)
logger.info(f"Deleted vectors with filter successfully")
except Exception as e:
logger.error(f"Error deleting vectors with filter: {e}")
raise e
# Delete vectors that match the document ids from the index if the ids list is not empty
if ids is not None and len(ids) > 0:
try:
logger.info(f"Deleting vectors with ids {ids}")
pinecone_filter = {"document_id": {"$in": ids}}
self.index.delete(filter=pinecone_filter) # type: ignore
logger.info(f"Deleted vectors with ids successfully")
except Exception as e:
logger.error(f"Error deleting vectors with ids: {e}")
raise e
return True
def _get_pinecone_filter(
self, filter: Optional[DocumentMetadataFilter] = None
) -> Dict[str, Any]:
if filter is None:
return {}
pinecone_filter = {}
# For each field in the MetadataFilter, check if it has a value and add the corresponding pinecone filter expression
# For start_date and end_date, uses the $gte and $lte operators respectively
# For other fields, uses the $eq operator
for field, value in filter.dict().items():
if value is not None:
if field == "start_date":
pinecone_filter["date"] = pinecone_filter.get("date", {})
pinecone_filter["date"]["$gte"] = to_unix_timestamp(value)
elif field == "end_date":
pinecone_filter["date"] = pinecone_filter.get("date", {})
pinecone_filter["date"]["$lte"] = to_unix_timestamp(value)
else:
pinecone_filter[field] = value
return pinecone_filter
def _get_pinecone_metadata(
self, metadata: Optional[DocumentChunkMetadata] = None
) -> Dict[str, Any]:
if metadata is None:
return {}
pinecone_metadata = {}
# For each field in the Metadata, check if it has a value and add it to the pinecone metadata dict
# For fields that are dates, convert them to unix timestamps
for field, value in metadata.dict().items():
if value is not None:
if field in ["created_at"]:
pinecone_metadata[field] = to_unix_timestamp(value)
else:
pinecone_metadata[field] = value
return pinecone_metadata
|
import os
from typing import Any, List
from datetime import datetime
import numpy as np
from psycopg2 import connect
from psycopg2.extras import DictCursor
from pgvector.psycopg2 import register_vector
from services.date import to_unix_timestamp
from datastore.providers.pgvector_datastore import PGClient, PgVectorDataStore
from models.models import (
DocumentMetadataFilter,
)
PG_HOST = os.environ.get("PG_HOST", "localhost")
PG_PORT = int(os.environ.get("PG_PORT", 5432))
PG_DB = os.environ.get("PG_DB", "postgres")
PG_USER = os.environ.get("PG_USER", "postgres")
PG_PASSWORD = os.environ.get("PG_PASSWORD", "postgres")
# class that implements the DataStore interface for Postgres Datastore provider
class PostgresDataStore(PgVectorDataStore):
def create_db_client(self):
return PostgresClient()
class PostgresClient(PGClient):
def __init__(self) -> None:
super().__init__()
self.client = connect(
dbname=PG_DB, user=PG_USER, password=PG_PASSWORD, host=PG_HOST, port=PG_PORT
)
register_vector(self.client)
def __del__(self):
# close the connection when the client is destroyed
self.client.close()
async def upsert(self, table: str, json: dict[str, Any]):
"""
Takes in a list of documents and inserts them into the table.
"""
with self.client.cursor() as cur:
if not json.get("created_at"):
json["created_at"] = datetime.now()
json["embedding"] = np.array(json["embedding"])
cur.execute(
f"INSERT INTO {table} (id, content, embedding, document_id, source, source_id, url, author, created_at) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s) ON CONFLICT (id) DO UPDATE SET content = %s, embedding = %s, document_id = %s, source = %s, source_id = %s, url = %s, author = %s, created_at = %s",
(
json["id"],
json["content"],
json["embedding"],
json["document_id"],
json["source"],
json["source_id"],
json["url"],
json["author"],
json["created_at"],
json["content"],
json["embedding"],
json["document_id"],
json["source"],
json["source_id"],
json["url"],
json["author"],
json["created_at"],
),
)
self.client.commit()
async def rpc(self, function_name: str, params: dict[str, Any]):
"""
Calls a stored procedure in the database with the given parameters.
"""
data = []
params["in_embedding"] = np.array(params["in_embedding"])
with self.client.cursor(cursor_factory=DictCursor) as cur:
cur.callproc(function_name, params)
rows = cur.fetchall()
self.client.commit()
for row in rows:
row["created_at"] = to_unix_timestamp(row["created_at"])
data.append(dict(row))
return data
async def delete_like(self, table: str, column: str, pattern: str):
"""
Deletes rows in the table that match the pattern.
"""
with self.client.cursor() as cur:
cur.execute(
f"DELETE FROM {table} WHERE {column} LIKE %s",
(f"%{pattern}%",),
)
self.client.commit()
async def delete_in(self, table: str, column: str, ids: List[str]):
"""
Deletes rows in the table that match the ids.
"""
with self.client.cursor() as cur:
cur.execute(
f"DELETE FROM {table} WHERE {column} IN %s",
(tuple(ids),),
)
self.client.commit()
async def delete_by_filters(self, table: str, filter: DocumentMetadataFilter):
"""
Deletes rows in the table that match the filter.
"""
filters = "WHERE"
if filter.document_id:
filters += f" document_id = '{filter.document_id}' AND"
if filter.source:
filters += f" source = '{filter.source}' AND"
if filter.source_id:
filters += f" source_id = '{filter.source_id}' AND"
if filter.author:
filters += f" author = '{filter.author}' AND"
if filter.start_date:
filters += f" created_at >= '{filter.start_date}' AND"
if filter.end_date:
filters += f" created_at <= '{filter.end_date}' AND"
filters = filters[:-4]
with self.client.cursor() as cur:
cur.execute(f"DELETE FROM {table} {filters}")
self.client.commit()
|
import json
import os
import asyncio
from loguru import logger
from typing import Dict, List, Optional
from pymilvus import (
Collection,
connections,
utility,
FieldSchema,
DataType,
CollectionSchema,
MilvusException,
)
from uuid import uuid4
from services.date import to_unix_timestamp
from datastore.datastore import DataStore
from models.models import (
DocumentChunk,
DocumentChunkMetadata,
Source,
DocumentMetadataFilter,
QueryResult,
QueryWithEmbedding,
DocumentChunkWithScore,
)
MILVUS_COLLECTION = os.environ.get("MILVUS_COLLECTION") or "c" + uuid4().hex
MILVUS_HOST = os.environ.get("MILVUS_HOST") or "localhost"
MILVUS_PORT = os.environ.get("MILVUS_PORT") or 19530
MILVUS_USER = os.environ.get("MILVUS_USER")
MILVUS_PASSWORD = os.environ.get("MILVUS_PASSWORD")
MILVUS_USE_SECURITY = False if MILVUS_PASSWORD is None else True
MILVUS_INDEX_PARAMS = os.environ.get("MILVUS_INDEX_PARAMS")
MILVUS_SEARCH_PARAMS = os.environ.get("MILVUS_SEARCH_PARAMS")
MILVUS_CONSISTENCY_LEVEL = os.environ.get("MILVUS_CONSISTENCY_LEVEL")
UPSERT_BATCH_SIZE = 100
OUTPUT_DIM = 1536
EMBEDDING_FIELD = "embedding"
class Required:
pass
# The fields names that we are going to be storing within Milvus, the field declaration for schema creation, and the default value
SCHEMA_V1 = [
(
"pk",
FieldSchema(name="pk", dtype=DataType.INT64, is_primary=True, auto_id=True),
Required,
),
(
EMBEDDING_FIELD,
FieldSchema(name=EMBEDDING_FIELD, dtype=DataType.FLOAT_VECTOR, dim=OUTPUT_DIM),
Required,
),
(
"text",
FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=65535),
Required,
),
(
"document_id",
FieldSchema(name="document_id", dtype=DataType.VARCHAR, max_length=65535),
"",
),
(
"source_id",
FieldSchema(name="source_id", dtype=DataType.VARCHAR, max_length=65535),
"",
),
(
"id",
FieldSchema(
name="id",
dtype=DataType.VARCHAR,
max_length=65535,
),
"",
),
(
"source",
FieldSchema(name="source", dtype=DataType.VARCHAR, max_length=65535),
"",
),
("url", FieldSchema(name="url", dtype=DataType.VARCHAR, max_length=65535), ""),
("created_at", FieldSchema(name="created_at", dtype=DataType.INT64), -1),
(
"author",
FieldSchema(name="author", dtype=DataType.VARCHAR, max_length=65535),
"",
),
]
# V2 schema, remomve the "pk" field
SCHEMA_V2 = SCHEMA_V1[1:]
SCHEMA_V2[4][1].is_primary = True
class MilvusDataStore(DataStore):
def __init__(
self,
create_new: Optional[bool] = False,
consistency_level: str = "Bounded",
):
"""Create a Milvus DataStore.
The Milvus Datastore allows for storing your indexes and metadata within a Milvus instance.
Args:
create_new (Optional[bool], optional): Whether to overwrite if collection already exists. Defaults to True.
consistency_level(str, optional): Specify the collection consistency level.
Defaults to "Bounded" for search performance.
Set to "Strong" in test cases for result validation.
"""
# Overwrite the default consistency level by MILVUS_CONSISTENCY_LEVEL
self._consistency_level = MILVUS_CONSISTENCY_LEVEL or consistency_level
self._create_connection()
self._create_collection(MILVUS_COLLECTION, create_new) # type: ignore
self._create_index()
def _get_schema(self):
return SCHEMA_V1 if self._schema_ver == "V1" else SCHEMA_V2
def _create_connection(self):
try:
self.alias = ""
# Check if the connection already exists
for x in connections.list_connections():
addr = connections.get_connection_addr(x[0])
if x[1] and ('address' in addr) and (addr['address'] == "{}:{}".format(MILVUS_HOST, MILVUS_PORT)):
self.alias = x[0]
logger.info("Reuse connection to Milvus server '{}:{}' with alias '{:s}'"
.format(MILVUS_HOST, MILVUS_PORT, self.alias))
break
# Connect to the Milvus instance using the passed in Environment variables
if len(self.alias) == 0:
self.alias = uuid4().hex
connections.connect(
alias=self.alias,
host=MILVUS_HOST,
port=MILVUS_PORT,
user=MILVUS_USER, # type: ignore
password=MILVUS_PASSWORD, # type: ignore
secure=MILVUS_USE_SECURITY,
)
logger.info("Create connection to Milvus server '{}:{}' with alias '{:s}'"
.format(MILVUS_HOST, MILVUS_PORT, self.alias))
except Exception as e:
logger.error("Failed to create connection to Milvus server '{}:{}', error: {}"
.format(MILVUS_HOST, MILVUS_PORT, e))
def _create_collection(self, collection_name, create_new: bool) -> None:
"""Create a collection based on environment and passed in variables.
Args:
create_new (bool): Whether to overwrite if collection already exists.
"""
try:
self._schema_ver = "V1"
# If the collection exists and create_new is True, drop the existing collection
if utility.has_collection(collection_name, using=self.alias) and create_new:
utility.drop_collection(collection_name, using=self.alias)
# Check if the collection doesnt exist
if utility.has_collection(collection_name, using=self.alias) is False:
# If it doesnt exist use the field params from init to create a new schem
schema = [field[1] for field in SCHEMA_V2]
schema = CollectionSchema(schema)
# Use the schema to create a new collection
self.col = Collection(
collection_name,
schema=schema,
using=self.alias,
consistency_level=self._consistency_level,
)
self._schema_ver = "V2"
logger.info("Create Milvus collection '{}' with schema {} and consistency level {}"
.format(collection_name, self._schema_ver, self._consistency_level))
else:
# If the collection exists, point to it
self.col = Collection(
collection_name, using=self.alias
) # type: ignore
# Which sechma is used
for field in self.col.schema.fields:
if field.name == "id" and field.is_primary:
self._schema_ver = "V2"
break
logger.info("Milvus collection '{}' already exists with schema {}"
.format(collection_name, self._schema_ver))
except Exception as e:
logger.error("Failed to create collection '{}', error: {}".format(collection_name, e))
def _create_index(self):
# TODO: verify index/search params passed by os.environ
self.index_params = MILVUS_INDEX_PARAMS or None
self.search_params = MILVUS_SEARCH_PARAMS or None
try:
# If no index on the collection, create one
if len(self.col.indexes) == 0:
if self.index_params is not None:
# Convert the string format to JSON format parameters passed by MILVUS_INDEX_PARAMS
self.index_params = json.loads(self.index_params)
logger.info("Create Milvus index: {}".format(self.index_params))
# Create an index on the 'embedding' field with the index params found in init
self.col.create_index(EMBEDDING_FIELD, index_params=self.index_params)
else:
# If no index param supplied, to first create an HNSW index for Milvus
try:
i_p = {
"metric_type": "IP",
"index_type": "HNSW",
"params": {"M": 8, "efConstruction": 64},
}
logger.info("Attempting creation of Milvus '{}' index".format(i_p["index_type"]))
self.col.create_index(EMBEDDING_FIELD, index_params=i_p)
self.index_params = i_p
logger.info("Creation of Milvus '{}' index successful".format(i_p["index_type"]))
# If create fails, most likely due to being Zilliz Cloud instance, try to create an AutoIndex
except MilvusException:
logger.info("Attempting creation of Milvus default index")
i_p = {"metric_type": "IP", "index_type": "AUTOINDEX", "params": {}}
self.col.create_index(EMBEDDING_FIELD, index_params=i_p)
self.index_params = i_p
logger.info("Creation of Milvus default index successful")
# If an index already exists, grab its params
else:
# How about if the first index is not vector index?
for index in self.col.indexes:
idx = index.to_dict()
if idx["field"] == EMBEDDING_FIELD:
logger.info("Index already exists: {}".format(idx))
self.index_params = idx['index_param']
break
self.col.load()
if self.search_params is not None:
# Convert the string format to JSON format parameters passed by MILVUS_SEARCH_PARAMS
self.search_params = json.loads(self.search_params)
else:
# The default search params
metric_type = "IP"
if "metric_type" in self.index_params:
metric_type = self.index_params["metric_type"]
default_search_params = {
"IVF_FLAT": {"metric_type": metric_type, "params": {"nprobe": 10}},
"IVF_SQ8": {"metric_type": metric_type, "params": {"nprobe": 10}},
"IVF_PQ": {"metric_type": metric_type, "params": {"nprobe": 10}},
"HNSW": {"metric_type": metric_type, "params": {"ef": 10}},
"RHNSW_FLAT": {"metric_type": metric_type, "params": {"ef": 10}},
"RHNSW_SQ": {"metric_type": metric_type, "params": {"ef": 10}},
"RHNSW_PQ": {"metric_type": metric_type, "params": {"ef": 10}},
"IVF_HNSW": {"metric_type": metric_type, "params": {"nprobe": 10, "ef": 10}},
"ANNOY": {"metric_type": metric_type, "params": {"search_k": 10}},
"AUTOINDEX": {"metric_type": metric_type, "params": {}},
}
# Set the search params
self.search_params = default_search_params[self.index_params["index_type"]]
logger.info("Milvus search parameters: {}".format(self.search_params))
except Exception as e:
logger.error("Failed to create index, error: {}".format(e))
async def _upsert(self, chunks: Dict[str, List[DocumentChunk]]) -> List[str]:
"""Upsert chunks into the datastore.
Args:
chunks (Dict[str, List[DocumentChunk]]): A list of DocumentChunks to insert
Raises:
e: Error in upserting data.
Returns:
List[str]: The document_id's that were inserted.
"""
try:
# The doc id's to return for the upsert
doc_ids: List[str] = []
# List to collect all the insert data, skip the "pk" for schema V1
offset = 1 if self._schema_ver == "V1" else 0
insert_data = [[] for _ in range(len(self._get_schema()) - offset)]
# Go through each document chunklist and grab the data
for doc_id, chunk_list in chunks.items():
# Append the doc_id to the list we are returning
doc_ids.append(doc_id)
# Examine each chunk in the chunklist
for chunk in chunk_list:
# Extract data from the chunk
list_of_data = self._get_values(chunk)
# Check if the data is valid
if list_of_data is not None:
# Append each field to the insert_data
for x in range(len(insert_data)):
insert_data[x].append(list_of_data[x])
# Slice up our insert data into batches
batches = [
insert_data[i : i + UPSERT_BATCH_SIZE]
for i in range(0, len(insert_data), UPSERT_BATCH_SIZE)
]
# Attempt to insert each batch into our collection
# batch data can work with both V1 and V2 schema
for batch in batches:
if len(batch[0]) != 0:
try:
logger.info(f"Upserting batch of size {len(batch[0])}")
self.col.insert(batch)
logger.info(f"Upserted batch successfully")
except Exception as e:
logger.error(f"Failed to insert batch records, error: {e}")
raise e
# This setting perfoms flushes after insert. Small insert == bad to use
# self.col.flush()
return doc_ids
except Exception as e:
logger.error("Failed to insert records, error: {}".format(e))
return []
def _get_values(self, chunk: DocumentChunk) -> List[any] | None: # type: ignore
"""Convert the chunk into a list of values to insert whose indexes align with fields.
Args:
chunk (DocumentChunk): The chunk to convert.
Returns:
List (any): The values to insert.
"""
# Convert DocumentChunk and its sub models to dict
values = chunk.dict()
# Unpack the metadata into the same dict
meta = values.pop("metadata")
values.update(meta)
# Convert date to int timestamp form
if values["created_at"]:
values["created_at"] = to_unix_timestamp(values["created_at"])
# If source exists, change from Source object to the string value it holds
if values["source"]:
values["source"] = values["source"].value
# List to collect data we will return
ret = []
# Grab data responding to each field, excluding the hidden auto pk field for schema V1
offset = 1 if self._schema_ver == "V1" else 0
for key, _, default in self._get_schema()[offset:]:
# Grab the data at the key and default to our defaults set in init
x = values.get(key) or default
# If one of our required fields is missing, ignore the entire entry
if x is Required:
logger.info("Chunk " + values["id"] + " missing " + key + " skipping")
return None
# Add the corresponding value if it passes the tests
ret.append(x)
return ret
async def _query(
self,
queries: List[QueryWithEmbedding],
) -> List[QueryResult]:
"""Query the QueryWithEmbedding against the MilvusDocumentSearch
Search the embedding and its filter in the collection.
Args:
queries (List[QueryWithEmbedding]): The list of searches to perform.
Returns:
List[QueryResult]: Results for each search.
"""
# Async to perform the query, adapted from pinecone implementation
async def _single_query(query: QueryWithEmbedding) -> QueryResult:
try:
filter = None
# Set the filter to expression that is valid for Milvus
if query.filter is not None:
# Either a valid filter or None will be returned
filter = self._get_filter(query.filter)
# Perform our search
return_from = 2 if self._schema_ver == "V1" else 1
res = self.col.search(
data=[query.embedding],
anns_field=EMBEDDING_FIELD,
param=self.search_params,
limit=query.top_k,
expr=filter,
output_fields=[
field[0] for field in self._get_schema()[return_from:]
], # Ignoring pk, embedding
)
# Results that will hold our DocumentChunkWithScores
results = []
# Parse every result for our search
for hit in res[0]: # type: ignore
# The distance score for the search result, falls under DocumentChunkWithScore
score = hit.score
# Our metadata info, falls under DocumentChunkMetadata
metadata = {}
# Grab the values that correspond to our fields, ignore pk and embedding.
for x in [field[0] for field in self._get_schema()[return_from:]]:
metadata[x] = hit.entity.get(x)
# If the source isn't valid, convert to None
if metadata["source"] not in Source.__members__:
metadata["source"] = None
# Text falls under the DocumentChunk
text = metadata.pop("text")
# Id falls under the DocumentChunk
ids = metadata.pop("id")
chunk = DocumentChunkWithScore(
id=ids,
score=score,
text=text,
metadata=DocumentChunkMetadata(**metadata),
)
results.append(chunk)
# TODO: decide on doing queries to grab the embedding itself, slows down performance as double query occurs
return QueryResult(query=query.query, results=results)
except Exception as e:
logger.error("Failed to query, error: {}".format(e))
return QueryResult(query=query.query, results=[])
results: List[QueryResult] = await asyncio.gather(
*[_single_query(query) for query in queries]
)
return results
async def delete(
self,
ids: Optional[List[str]] = None,
filter: Optional[DocumentMetadataFilter] = None,
delete_all: Optional[bool] = None,
) -> bool:
"""Delete the entities based either on the chunk_id of the vector,
Args:
ids (Optional[List[str]], optional): The document_ids to delete. Defaults to None.
filter (Optional[DocumentMetadataFilter], optional): The filter to delete by. Defaults to None.
delete_all (Optional[bool], optional): Whether to drop the collection and recreate it. Defaults to None.
"""
# If deleting all, drop and create the new collection
if delete_all:
coll_name = self.col.name
logger.info("Delete the entire collection {} and create new one".format(coll_name))
# Release the collection from memory
self.col.release()
# Drop the collection
self.col.drop()
# Recreate the new collection
self._create_collection(coll_name, True)
self._create_index()
return True
# Keep track of how many we have deleted for later printing
delete_count = 0
batch_size = 100
pk_name = "pk" if self._schema_ver == "V1" else "id"
try:
# According to the api design, the ids is a list of document_id,
# document_id is not primary key, use query+delete to workaround,
# in future version we can delete by expression
if (ids is not None) and len(ids) > 0:
# Add quotation marks around the string format id
ids = ['"' + str(id) + '"' for id in ids]
# Query for the pk's of entries that match id's
ids = self.col.query(f"document_id in [{','.join(ids)}]")
# Convert to list of pks
pks = [str(entry[pk_name]) for entry in ids] # type: ignore
# for schema V2, the "id" is varchar, rewrite the expression
if self._schema_ver != "V1":
pks = ['"' + pk + '"' for pk in pks]
# Delete by ids batch by batch(avoid too long expression)
logger.info("Apply {:d} deletions to schema {:s}".format(len(pks), self._schema_ver))
while len(pks) > 0:
batch_pks = pks[:batch_size]
pks = pks[batch_size:]
# Delete the entries batch by batch
res = self.col.delete(f"{pk_name} in [{','.join(batch_pks)}]")
# Increment our deleted count
delete_count += int(res.delete_count) # type: ignore
except Exception as e:
logger.error("Failed to delete by ids, error: {}".format(e))
try:
# Check if empty filter
if filter is not None:
# Convert filter to milvus expression
filter = self._get_filter(filter) # type: ignore
# Check if there is anything to filter
if len(filter) != 0: # type: ignore
# Query for the pk's of entries that match filter
res = self.col.query(filter) # type: ignore
# Convert to list of pks
pks = [str(entry[pk_name]) for entry in res] # type: ignore
# for schema V2, the "id" is varchar, rewrite the expression
if self._schema_ver != "V1":
pks = ['"' + pk + '"' for pk in pks]
# Check to see if there are valid pk's to delete, delete batch by batch(avoid too long expression)
while len(pks) > 0: # type: ignore
batch_pks = pks[:batch_size]
pks = pks[batch_size:]
# Delete the entries batch by batch
res = self.col.delete(f"{pk_name} in [{','.join(batch_pks)}]") # type: ignore
# Increment our delete count
delete_count += int(res.delete_count) # type: ignore
except Exception as e:
logger.error("Failed to delete by filter, error: {}".format(e))
logger.info("{:d} records deleted".format(delete_count))
# This setting performs flushes after delete. Small delete == bad to use
# self.col.flush()
return True
def _get_filter(self, filter: DocumentMetadataFilter) -> Optional[str]:
"""Converts a DocumentMetdataFilter to the expression that Milvus takes.
Args:
filter (DocumentMetadataFilter): The Filter to convert to Milvus expression.
Returns:
Optional[str]: The filter if valid, otherwise None.
"""
filters = []
# Go through all the fields and their values
for field, value in filter.dict().items():
# Check if the Value is empty
if value is not None:
# Convert start_date to int and add greater than or equal logic
if field == "start_date":
filters.append(
"(created_at >= " + str(to_unix_timestamp(value)) + ")"
)
# Convert end_date to int and add less than or equal logic
elif field == "end_date":
filters.append(
"(created_at <= " + str(to_unix_timestamp(value)) + ")"
)
# Convert Source to its string value and check equivalency
elif field == "source":
filters.append("(" + field + ' == "' + str(value.value) + '")')
# Check equivalency of rest of string fields
else:
filters.append("(" + field + ' == "' + str(value) + '")')
# Join all our expressions with `and``
return " and ".join(filters)
|
# This is a version of the main.py file found in ../../server/main.py that also gives ChatGPT access to the upsert endpoint
# (allowing it to save information from the chat back to the vector) database.
# Copy and paste this into the main file at ../../server/main.py if you choose to give the model access to the upsert endpoint
# and want to access the openapi.json when you run the app locally at http://0.0.0.0:8000/sub/openapi.json.
import os
from typing import Optional
import uvicorn
from fastapi import FastAPI, File, Form, HTTPException, Depends, Body, UploadFile
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
from fastapi.staticfiles import StaticFiles
from loguru import logger
from models.api import (
DeleteRequest,
DeleteResponse,
QueryRequest,
QueryResponse,
UpsertRequest,
UpsertResponse,
)
from datastore.factory import get_datastore
from services.file import get_document_from_file
from models.models import DocumentMetadata, Source
bearer_scheme = HTTPBearer()
BEARER_TOKEN = os.environ.get("BEARER_TOKEN")
assert BEARER_TOKEN is not None
def validate_token(credentials: HTTPAuthorizationCredentials = Depends(bearer_scheme)):
if credentials.scheme != "Bearer" or credentials.credentials != BEARER_TOKEN:
raise HTTPException(status_code=401, detail="Invalid or missing token")
return credentials
app = FastAPI()
app.mount("/.well-known", StaticFiles(directory=".well-known"), name="static")
# Create a sub-application, in order to access just the upsert and query endpoints in the OpenAPI schema, found at http://0.0.0.0:8000/sub/openapi.json when the app is running locally
sub_app = FastAPI(
title="Retrieval Plugin API",
description="A retrieval API for querying and filtering documents based on natural language queries and metadata",
version="1.0.0",
servers=[{"url": "https://your-app-url.com"}],
dependencies=[Depends(validate_token)],
)
app.mount("/sub", sub_app)
@app.post(
"/upsert-file",
response_model=UpsertResponse,
)
async def upsert_file(
file: UploadFile = File(...),
metadata: Optional[str] = Form(None),
):
try:
metadata_obj = (
DocumentMetadata.parse_raw(metadata)
if metadata
else DocumentMetadata(source=Source.file)
)
except:
metadata_obj = DocumentMetadata(source=Source.file)
document = await get_document_from_file(file, metadata_obj)
try:
ids = await datastore.upsert([document])
return UpsertResponse(ids=ids)
except Exception as e:
logger.error(e)
raise HTTPException(status_code=500, detail=f"str({e})")
@app.post(
"/upsert",
response_model=UpsertResponse,
)
async def upsert_main(
request: UpsertRequest = Body(...),
token: HTTPAuthorizationCredentials = Depends(validate_token),
):
try:
ids = await datastore.upsert(request.documents)
return UpsertResponse(ids=ids)
except Exception as e:
logger.error(e)
raise HTTPException(status_code=500, detail="Internal Service Error")
@sub_app.post(
"/upsert",
response_model=UpsertResponse,
# NOTE: We are describing the shape of the API endpoint input due to a current limitation in parsing arrays of objects from OpenAPI schemas. This will not be necessary in the future.
description="Save chat information. Accepts an array of documents with text (potential questions + conversation text), metadata (source 'chat' and timestamp, no ID as this will be generated). Confirm with the user before saving, ask for more details/context.",
)
async def upsert(
request: UpsertRequest = Body(...),
token: HTTPAuthorizationCredentials = Depends(validate_token),
):
try:
ids = await datastore.upsert(request.documents)
return UpsertResponse(ids=ids)
except Exception as e:
logger.error(e)
raise HTTPException(status_code=500, detail="Internal Service Error")
@app.post(
"/query",
response_model=QueryResponse,
)
async def query_main(
request: QueryRequest = Body(...),
token: HTTPAuthorizationCredentials = Depends(validate_token),
):
try:
results = await datastore.query(
request.queries,
)
return QueryResponse(results=results)
except Exception as e:
logger.error(e)
raise HTTPException(status_code=500, detail="Internal Service Error")
@sub_app.post(
"/query",
response_model=QueryResponse,
# NOTE: We are describing the shape of the API endpoint input due to a current limitation in parsing arrays of objects from OpenAPI schemas. This will not be necessary in the future.
description="Accepts search query objects array each with query and optional filter. Break down complex questions into sub-questions. Refine results by criteria, e.g. time / source, don't do this often. Split queries if ResponseTooLargeError occurs.",
)
async def query(
request: QueryRequest = Body(...),
token: HTTPAuthorizationCredentials = Depends(validate_token),
):
try:
results = await datastore.query(
request.queries,
)
return QueryResponse(results=results)
except Exception as e:
logger.error(e)
raise HTTPException(status_code=500, detail="Internal Service Error")
@app.delete(
"/delete",
response_model=DeleteResponse,
)
async def delete(
request: DeleteRequest = Body(...),
token: HTTPAuthorizationCredentials = Depends(validate_token),
):
if not (request.ids or request.filter or request.delete_all):
raise HTTPException(
status_code=400,
detail="One of ids, filter, or delete_all is required",
)
try:
success = await datastore.delete(
ids=request.ids,
filter=request.filter,
delete_all=request.delete_all,
)
return DeleteResponse(success=success)
except Exception as e:
logger.error(e)
raise HTTPException(status_code=500, detail="Internal Service Error")
@app.on_event("startup")
async def startup():
global datastore
datastore = await get_datastore()
def start():
uvicorn.run("server.main:app", host="0.0.0.0", port=8000, reload=True)
|
# This is a version of the main.py file found in ../../../server/main.py without authentication.
# Copy and paste this into the main file at ../../../server/main.py if you choose to use no authentication for your retrieval plugin.
from typing import Optional
import uvicorn
from fastapi import FastAPI, File, Form, HTTPException, Body, UploadFile
from fastapi.staticfiles import StaticFiles
from loguru import logger
from models.api import (
DeleteRequest,
DeleteResponse,
QueryRequest,
QueryResponse,
UpsertRequest,
UpsertResponse,
)
from datastore.factory import get_datastore
from services.file import get_document_from_file
from models.models import DocumentMetadata, Source
app = FastAPI()
app.mount("/.well-known", StaticFiles(directory=".well-known"), name="static")
# Create a sub-application, in order to access just the query endpoints in the OpenAPI schema, found at http://0.0.0.0:8000/sub/openapi.json when the app is running locally
sub_app = FastAPI(
title="Retrieval Plugin API",
description="A retrieval API for querying and filtering documents based on natural language queries and metadata",
version="1.0.0",
servers=[{"url": "https://your-app-url.com"}],
)
app.mount("/sub", sub_app)
@app.post(
"/upsert-file",
response_model=UpsertResponse,
)
async def upsert_file(
file: UploadFile = File(...),
metadata: Optional[str] = Form(None),
):
try:
metadata_obj = (
DocumentMetadata.parse_raw(metadata)
if metadata
else DocumentMetadata(source=Source.file)
)
except:
metadata_obj = DocumentMetadata(source=Source.file)
document = await get_document_from_file(file, metadata_obj)
try:
ids = await datastore.upsert([document])
return UpsertResponse(ids=ids)
except Exception as e:
logger.error(e)
raise HTTPException(status_code=500, detail=f"str({e})")
@app.post(
"/upsert",
response_model=UpsertResponse,
)
async def upsert(
request: UpsertRequest = Body(...),
):
try:
ids = await datastore.upsert(request.documents)
return UpsertResponse(ids=ids)
except Exception as e:
logger.error(e)
raise HTTPException(status_code=500, detail="Internal Service Error")
@app.post(
"/query",
response_model=QueryResponse,
)
async def query_main(
request: QueryRequest = Body(...),
):
try:
results = await datastore.query(
request.queries,
)
return QueryResponse(results=results)
except Exception as e:
logger.error(e)
raise HTTPException(status_code=500, detail="Internal Service Error")
@sub_app.post(
"/query",
response_model=QueryResponse,
description="Accepts search query objects with query and optional filter. Break down complex questions into sub-questions. Refine results by criteria, e.g. time / source, don't do this often. Split queries if ResponseTooLargeError occurs.",
)
async def query(
request: QueryRequest = Body(...),
):
try:
results = await datastore.query(
request.queries,
)
return QueryResponse(results=results)
except Exception as e:
logger.error(e)
raise HTTPException(status_code=500, detail="Internal Service Error")
@app.delete(
"/delete",
response_model=DeleteResponse,
)
async def delete(
request: DeleteRequest = Body(...),
):
if not (request.ids or request.filter or request.delete_all):
raise HTTPException(
status_code=400,
detail="One of ids, filter, or delete_all is required",
)
try:
success = await datastore.delete(
ids=request.ids,
filter=request.filter,
delete_all=request.delete_all,
)
return DeleteResponse(success=success)
except Exception as e:
logger.error(e)
raise HTTPException(status_code=500, detail="Internal Service Error")
@app.on_event("startup")
async def startup():
global datastore
datastore = await get_datastore()
def start():
uvicorn.run("server.main:app", host="0.0.0.0", port=8000, reload=True)
|
import uuid
import json
import argparse
import asyncio
from loguru import logger
from models.models import Document, DocumentMetadata
from datastore.datastore import DataStore
from datastore.factory import get_datastore
from services.extract_metadata import extract_metadata_from_document
from services.pii_detection import screen_text_for_pii
DOCUMENT_UPSERT_BATCH_SIZE = 50
async def process_jsonl_dump(
filepath: str,
datastore: DataStore,
custom_metadata: dict,
screen_for_pii: bool,
extract_metadata: bool,
):
# open the jsonl file as a generator of dictionaries
with open(filepath) as jsonl_file:
data = [json.loads(line) for line in jsonl_file]
documents = []
skipped_items = []
# iterate over the data and create document objects
for item in data:
if len(documents) % 20 == 0:
logger.info(f"Processed {len(documents)} documents")
try:
# get the id, text, source, source_id, url, created_at and author from the item
# use default values if not specified
id = item.get("id", None)
text = item.get("text", None)
source = item.get("source", None)
source_id = item.get("source_id", None)
url = item.get("url", None)
created_at = item.get("created_at", None)
author = item.get("author", None)
if not text:
logger.info("No document text, skipping...")
continue
# create a metadata object with the source, source_id, url, created_at and author
metadata = DocumentMetadata(
source=source,
source_id=source_id,
url=url,
created_at=created_at,
author=author,
)
# update metadata with custom values
for key, value in custom_metadata.items():
if hasattr(metadata, key):
setattr(metadata, key, value)
# screen for pii if requested
if screen_for_pii:
pii_detected = screen_text_for_pii(text)
# if pii detected, print a warning and skip the document
if pii_detected:
logger.info("PII detected in document, skipping")
skipped_items.append(item) # add the skipped item to the list
continue
# extract metadata if requested
if extract_metadata:
# extract metadata from the document text
extracted_metadata = extract_metadata_from_document(
f"Text: {text}; Metadata: {str(metadata)}"
)
# get a Metadata object from the extracted metadata
metadata = DocumentMetadata(**extracted_metadata)
# create a document object with the id, text and metadata
document = Document(
id=id,
text=text,
metadata=metadata,
)
documents.append(document)
except Exception as e:
# log the error and continue with the next item
logger.error(f"Error processing {item}: {e}")
skipped_items.append(item) # add the skipped item to the list
# do this in batches, the upsert method already batches documents but this allows
# us to add more descriptive logging
for i in range(0, len(documents), DOCUMENT_UPSERT_BATCH_SIZE):
# Get the text of the chunks in the current batch
batch_documents = documents[i : i + DOCUMENT_UPSERT_BATCH_SIZE]
logger.info(f"Upserting batch of {len(batch_documents)} documents, batch {i}")
await datastore.upsert(batch_documents)
# print the skipped items
logger.info(f"Skipped {len(skipped_items)} items due to errors or PII detection")
for item in skipped_items:
logger.info(item)
async def main():
# parse the command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument("--filepath", required=True, help="The path to the jsonl dump")
parser.add_argument(
"--custom_metadata",
default="{}",
help="A JSON string of key-value pairs to update the metadata of the documents",
)
parser.add_argument(
"--screen_for_pii",
default=False,
type=bool,
help="A boolean flag to indicate whether to try the PII detection function (using a language model)",
)
parser.add_argument(
"--extract_metadata",
default=False,
type=bool,
help="A boolean flag to indicate whether to try to extract metadata from the document (using a language model)",
)
args = parser.parse_args()
# get the arguments
filepath = args.filepath
custom_metadata = json.loads(args.custom_metadata)
screen_for_pii = args.screen_for_pii
extract_metadata = args.extract_metadata
# initialize the db instance once as a global variable
datastore = await get_datastore()
# process the jsonl dump
await process_jsonl_dump(
filepath, datastore, custom_metadata, screen_for_pii, extract_metadata
)
if __name__ == "__main__":
asyncio.run(main())
|
import uuid
import json
import argparse
import asyncio
from loguru import logger
from models.models import Document, DocumentMetadata
from datastore.datastore import DataStore
from datastore.factory import get_datastore
from services.extract_metadata import extract_metadata_from_document
from services.pii_detection import screen_text_for_pii
DOCUMENT_UPSERT_BATCH_SIZE = 50
async def process_json_dump(
filepath: str,
datastore: DataStore,
custom_metadata: dict,
screen_for_pii: bool,
extract_metadata: bool,
):
# load the json file as a list of dictionaries
with open(filepath) as json_file:
data = json.load(json_file)
documents = []
skipped_items = []
# iterate over the data and create document objects
for item in data:
if len(documents) % 20 == 0:
logger.info(f"Processed {len(documents)} documents")
try:
# get the id, text, source, source_id, url, created_at and author from the item
# use default values if not specified
id = item.get("id", None)
text = item.get("text", None)
source = item.get("source", None)
source_id = item.get("source_id", None)
url = item.get("url", None)
created_at = item.get("created_at", None)
author = item.get("author", None)
if not text:
logger.info("No document text, skipping...")
continue
# create a metadata object with the source, source_id, url, created_at and author
metadata = DocumentMetadata(
source=source,
source_id=source_id,
url=url,
created_at=created_at,
author=author,
)
logger.info("metadata: ", str(metadata))
# update metadata with custom values
for key, value in custom_metadata.items():
if hasattr(metadata, key):
setattr(metadata, key, value)
# screen for pii if requested
if screen_for_pii:
pii_detected = screen_text_for_pii(text)
# if pii detected, print a warning and skip the document
if pii_detected:
logger.info("PII detected in document, skipping")
skipped_items.append(item) # add the skipped item to the list
continue
# extract metadata if requested
if extract_metadata:
# extract metadata from the document text
extracted_metadata = extract_metadata_from_document(
f"Text: {text}; Metadata: {str(metadata)}"
)
# get a Metadata object from the extracted metadata
metadata = DocumentMetadata(**extracted_metadata)
# create a document object with the id or a random id, text and metadata
document = Document(
id=id or str(uuid.uuid4()),
text=text,
metadata=metadata,
)
documents.append(document)
except Exception as e:
# log the error and continue with the next item
logger.error(f"Error processing {item}: {e}")
skipped_items.append(item) # add the skipped item to the list
# do this in batches, the upsert method already batches documents but this allows
# us to add more descriptive logging
for i in range(0, len(documents), DOCUMENT_UPSERT_BATCH_SIZE):
# Get the text of the chunks in the current batch
batch_documents = documents[i : i + DOCUMENT_UPSERT_BATCH_SIZE]
logger.info(f"Upserting batch of {len(batch_documents)} documents, batch {i}")
logger.info("documents: ", documents)
await datastore.upsert(batch_documents)
# print the skipped items
logger.info(f"Skipped {len(skipped_items)} items due to errors or PII detection")
for item in skipped_items:
logger.info(item)
async def main():
# parse the command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument("--filepath", required=True, help="The path to the json dump")
parser.add_argument(
"--custom_metadata",
default="{}",
help="A JSON string of key-value pairs to update the metadata of the documents",
)
parser.add_argument(
"--screen_for_pii",
default=False,
type=bool,
help="A boolean flag to indicate whether to try the PII detection function (using a language model)",
)
parser.add_argument(
"--extract_metadata",
default=False,
type=bool,
help="A boolean flag to indicate whether to try to extract metadata from the document (using a language model)",
)
args = parser.parse_args()
# get the arguments
filepath = args.filepath
custom_metadata = json.loads(args.custom_metadata)
screen_for_pii = args.screen_for_pii
extract_metadata = args.extract_metadata
# initialize the db instance once as a global variable
datastore = await get_datastore()
# process the json dump
await process_json_dump(
filepath, datastore, custom_metadata, screen_for_pii, extract_metadata
)
if __name__ == "__main__":
asyncio.run(main())
|
import uuid
import zipfile
import os
import json
import argparse
import asyncio
from loguru import logger
from models.models import Document, DocumentMetadata, Source
from datastore.datastore import DataStore
from datastore.factory import get_datastore
from services.extract_metadata import extract_metadata_from_document
from services.file import extract_text_from_filepath
from services.pii_detection import screen_text_for_pii
DOCUMENT_UPSERT_BATCH_SIZE = 50
async def process_file_dump(
filepath: str,
datastore: DataStore,
custom_metadata: dict,
screen_for_pii: bool,
extract_metadata: bool,
):
# create a ZipFile object and extract all the files into a directory named 'dump'
with zipfile.ZipFile(filepath) as zip_file:
zip_file.extractall("dump")
documents = []
skipped_files = []
# use os.walk to traverse the dump directory and its subdirectories
for root, dirs, files in os.walk("dump"):
for filename in files:
if len(documents) % 20 == 0:
logger.info(f"Processed {len(documents)} documents")
filepath = os.path.join(root, filename)
try:
extracted_text = extract_text_from_filepath(filepath)
logger.info(f"extracted_text from {filepath}")
# create a metadata object with the source and source_id fields
metadata = DocumentMetadata(
source=Source.file,
source_id=filename,
)
# update metadata with custom values
for key, value in custom_metadata.items():
if hasattr(metadata, key):
setattr(metadata, key, value)
# screen for pii if requested
if screen_for_pii:
pii_detected = screen_text_for_pii(extracted_text)
# if pii detected, print a warning and skip the document
if pii_detected:
logger.info("PII detected in document, skipping")
skipped_files.append(
filepath
) # add the skipped file to the list
continue
# extract metadata if requested
if extract_metadata:
# extract metadata from the document text
extracted_metadata = extract_metadata_from_document(
f"Text: {extracted_text}; Metadata: {str(metadata)}"
)
# get a Metadata object from the extracted metadata
metadata = DocumentMetadata(**extracted_metadata)
# create a document object with a random id, text and metadata
document = Document(
id=str(uuid.uuid4()),
text=extracted_text,
metadata=metadata,
)
documents.append(document)
except Exception as e:
# log the error and continue with the next file
logger.error(f"Error processing {filepath}: {e}")
skipped_files.append(filepath) # add the skipped file to the list
# do this in batches, the upsert method already batches documents but this allows
# us to add more descriptive logging
for i in range(0, len(documents), DOCUMENT_UPSERT_BATCH_SIZE):
# Get the text of the chunks in the current batch
batch_documents = [doc for doc in documents[i : i + DOCUMENT_UPSERT_BATCH_SIZE]]
logger.info(f"Upserting batch of {len(batch_documents)} documents, batch {i}")
logger.info("documents: ", documents)
await datastore.upsert(batch_documents)
# delete all files in the dump directory
for root, dirs, files in os.walk("dump", topdown=False):
for filename in files:
filepath = os.path.join(root, filename)
os.remove(filepath)
for dirname in dirs:
dirpath = os.path.join(root, dirname)
os.rmdir(dirpath)
# delete the dump directory
os.rmdir("dump")
# print the skipped files
logger.info(f"Skipped {len(skipped_files)} files due to errors or PII detection")
for file in skipped_files:
logger.info(file)
async def main():
# parse the command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument("--filepath", required=True, help="The path to the file dump")
parser.add_argument(
"--custom_metadata",
default="{}",
help="A JSON string of key-value pairs to update the metadata of the documents",
)
parser.add_argument(
"--screen_for_pii",
default=False,
type=bool,
help="A boolean flag to indicate whether to try the PII detection function (using a language model)",
)
parser.add_argument(
"--extract_metadata",
default=False,
type=bool,
help="A boolean flag to indicate whether to try to extract metadata from the document (using a language model)",
)
args = parser.parse_args()
# get the arguments
filepath = args.filepath
custom_metadata = json.loads(args.custom_metadata)
screen_for_pii = args.screen_for_pii
extract_metadata = args.extract_metadata
# initialize the db instance once as a global variable
datastore = await get_datastore()
# process the file dump
await process_file_dump(
filepath, datastore, custom_metadata, screen_for_pii, extract_metadata
)
if __name__ == "__main__":
asyncio.run(main())
|
from models.models import Source
from services.openai import get_chat_completion
import json
from typing import Dict
import os
from loguru import logger
def extract_metadata_from_document(text: str) -> Dict[str, str]:
sources = Source.__members__.keys()
sources_string = ", ".join(sources)
# This prompt is just an example, change it to fit your use case
messages = [
{
"role": "system",
"content": f"""
Given a document from a user, try to extract the following metadata:
- source: string, one of {sources_string}
- url: string or don't specify
- created_at: string or don't specify
- author: string or don't specify
Respond with a JSON containing the extracted metadata in key value pairs. If you don't find a metadata field, don't specify it.
""",
},
{"role": "user", "content": text},
]
# NOTE: Azure Open AI requires deployment id
# Read environment variable - if not set - not used
completion = get_chat_completion(
messages,
"gpt-4",
os.environ.get("OPENAI_METADATA_EXTRACTIONMODEL_DEPLOYMENTID")
) # TODO: change to your preferred model name
logger.info(f"completion: {completion}")
try:
metadata = json.loads(completion)
except:
metadata = {}
return metadata
|
import os
from services.openai import get_chat_completion
def screen_text_for_pii(text: str) -> bool:
# This prompt is just an example, change it to fit your use case
messages = [
{
"role": "system",
"content": f"""
You can only respond with the word "True" or "False", where your answer indicates whether the text in the user's message contains PII.
Do not explain your answer, and do not use punctuation.
Your task is to identify whether the text extracted from your company files
contains sensitive PII information that should not be shared with the broader company. Here are some things to look out for:
- An email address that identifies a specific person in either the local-part or the domain
- The postal address of a private residence (must include at least a street name)
- The postal address of a public place (must include either a street name or business name)
- Notes about hiring decisions with mentioned names of candidates. The user will send a document for you to analyze.
""",
},
{"role": "user", "content": text},
]
completion = get_chat_completion(
messages,
deployment_id=os.environ.get("OPENAI_COMPLETIONMODEL_DEPLOYMENTID")
)
if completion.startswith("True"):
return True
return False
|
import os
from io import BufferedReader
from typing import Optional
from fastapi import UploadFile
import mimetypes
from PyPDF2 import PdfReader
import docx2txt
import csv
import pptx
from loguru import logger
from models.models import Document, DocumentMetadata
async def get_document_from_file(
file: UploadFile, metadata: DocumentMetadata
) -> Document:
extracted_text = await extract_text_from_form_file(file)
doc = Document(text=extracted_text, metadata=metadata)
return doc
def extract_text_from_filepath(filepath: str, mimetype: Optional[str] = None) -> str:
"""Return the text content of a file given its filepath."""
if mimetype is None:
# Get the mimetype of the file based on its extension
mimetype, _ = mimetypes.guess_type(filepath)
if not mimetype:
if filepath.endswith(".md"):
mimetype = "text/markdown"
else:
raise Exception("Unsupported file type")
try:
with open(filepath, "rb") as file:
extracted_text = extract_text_from_file(file, mimetype)
except Exception as e:
logger.error(e)
raise e
return extracted_text
def extract_text_from_file(file: BufferedReader, mimetype: str) -> str:
if mimetype == "application/pdf":
# Extract text from pdf using PyPDF2
reader = PdfReader(file)
extracted_text = " ".join([page.extract_text() for page in reader.pages])
elif mimetype == "text/plain" or mimetype == "text/markdown":
# Read text from plain text file
extracted_text = file.read().decode("utf-8")
elif (
mimetype
== "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
):
# Extract text from docx using docx2txt
extracted_text = docx2txt.process(file)
elif mimetype == "text/csv":
# Extract text from csv using csv module
extracted_text = ""
decoded_buffer = (line.decode("utf-8") for line in file)
reader = csv.reader(decoded_buffer)
for row in reader:
extracted_text += " ".join(row) + "\n"
elif (
mimetype
== "application/vnd.openxmlformats-officedocument.presentationml.presentation"
):
# Extract text from pptx using python-pptx
extracted_text = ""
presentation = pptx.Presentation(file)
for slide in presentation.slides:
for shape in slide.shapes:
if shape.has_text_frame:
for paragraph in shape.text_frame.paragraphs:
for run in paragraph.runs:
extracted_text += run.text + " "
extracted_text += "\n"
else:
# Unsupported file type
raise ValueError("Unsupported file type: {}".format(mimetype))
return extracted_text
# Extract text from a file based on its mimetype
async def extract_text_from_form_file(file: UploadFile):
"""Return the text content of a file."""
# get the file body from the upload file object
mimetype = file.content_type
logger.info(f"mimetype: {mimetype}")
logger.info(f"file.file: {file.file}")
logger.info("file: ", file)
file_stream = await file.read()
temp_file_path = "/tmp/temp_file"
# write the file to a temporary location
with open(temp_file_path, "wb") as f:
f.write(file_stream)
try:
extracted_text = extract_text_from_filepath(temp_file_path, mimetype)
except Exception as e:
logger.error(e)
os.remove(temp_file_path)
raise e
# remove file from temp location
os.remove(temp_file_path)
return extracted_text
|
from typing import List
import openai
import os
from loguru import logger
from tenacity import retry, wait_random_exponential, stop_after_attempt
@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(3))
def get_embeddings(texts: List[str]) -> List[List[float]]:
"""
Embed texts using OpenAI's ada model.
Args:
texts: The list of texts to embed.
Returns:
A list of embeddings, each of which is a list of floats.
Raises:
Exception: If the OpenAI API call fails.
"""
# Call the OpenAI API to get the embeddings
# NOTE: Azure Open AI requires deployment id
deployment = os.environ.get("OPENAI_EMBEDDINGMODEL_DEPLOYMENTID")
response = {}
if deployment == None:
response = openai.Embedding.create(input=texts, model="text-embedding-ada-002")
else:
response = openai.Embedding.create(input=texts, deployment_id=deployment)
# Extract the embedding data from the response
data = response["data"] # type: ignore
# Return the embeddings as a list of lists of floats
return [result["embedding"] for result in data]
@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(3))
def get_chat_completion(
messages,
model="gpt-3.5-turbo", # use "gpt-4" for better results
deployment_id = None
):
"""
Generate a chat completion using OpenAI's chat completion API.
Args:
messages: The list of messages in the chat history.
model: The name of the model to use for the completion. Default is gpt-3.5-turbo, which is a fast, cheap and versatile model. Use gpt-4 for higher quality but slower results.
Returns:
A string containing the chat completion.
Raises:
Exception: If the OpenAI API call fails.
"""
# call the OpenAI chat completion API with the given messages
# Note: Azure Open AI requires deployment id
response = {}
if deployment_id == None:
response = openai.ChatCompletion.create(
model=model,
messages=messages,
)
else:
response = openai.ChatCompletion.create(
deployment_id = deployment_id,
messages=messages,
)
choices = response["choices"] # type: ignore
completion = choices[0].message.content.strip()
logger.info(f"Completion: {completion}")
return completion
|
import arrow
from loguru import logger
def to_unix_timestamp(date_str: str) -> int:
"""
Convert a date string to a unix timestamp (seconds since epoch).
Args:
date_str: The date string to convert.
Returns:
The unix timestamp corresponding to the date string.
If the date string cannot be parsed as a valid date format, returns the current unix timestamp and prints a warning.
"""
# Try to parse the date string using arrow, which supports many common date formats
try:
date_obj = arrow.get(date_str)
return int(date_obj.timestamp())
except arrow.parser.ParserError:
# If the parsing fails, return the current unix timestamp and print a warning
logger.info(f"Invalid date format: {date_str}")
return int(arrow.now().timestamp())
|
from typing import Dict, List, Optional, Tuple
import uuid
import os
from models.models import Document, DocumentChunk, DocumentChunkMetadata
import tiktoken
from services.openai import get_embeddings
# Global variables
tokenizer = tiktoken.get_encoding(
"cl100k_base"
) # The encoding scheme to use for tokenization
# Constants
CHUNK_SIZE = 200 # The target size of each text chunk in tokens
MIN_CHUNK_SIZE_CHARS = 350 # The minimum size of each text chunk in characters
MIN_CHUNK_LENGTH_TO_EMBED = 5 # Discard chunks shorter than this
EMBEDDINGS_BATCH_SIZE = int(os.environ.get("OPENAI_EMBEDDING_BATCH_SIZE", 128)) # The number of embeddings to request at a time
MAX_NUM_CHUNKS = 10000 # The maximum number of chunks to generate from a text
def get_text_chunks(text: str, chunk_token_size: Optional[int]) -> List[str]:
"""
Split a text into chunks of ~CHUNK_SIZE tokens, based on punctuation and newline boundaries.
Args:
text: The text to split into chunks.
chunk_token_size: The target size of each chunk in tokens, or None to use the default CHUNK_SIZE.
Returns:
A list of text chunks, each of which is a string of ~CHUNK_SIZE tokens.
"""
# Return an empty list if the text is empty or whitespace
if not text or text.isspace():
return []
# Tokenize the text
tokens = tokenizer.encode(text, disallowed_special=())
# Initialize an empty list of chunks
chunks = []
# Use the provided chunk token size or the default one
chunk_size = chunk_token_size or CHUNK_SIZE
# Initialize a counter for the number of chunks
num_chunks = 0
# Loop until all tokens are consumed
while tokens and num_chunks < MAX_NUM_CHUNKS:
# Take the first chunk_size tokens as a chunk
chunk = tokens[:chunk_size]
# Decode the chunk into text
chunk_text = tokenizer.decode(chunk)
# Skip the chunk if it is empty or whitespace
if not chunk_text or chunk_text.isspace():
# Remove the tokens corresponding to the chunk text from the remaining tokens
tokens = tokens[len(chunk) :]
# Continue to the next iteration of the loop
continue
# Find the last period or punctuation mark in the chunk
last_punctuation = max(
chunk_text.rfind("."),
chunk_text.rfind("?"),
chunk_text.rfind("!"),
chunk_text.rfind("\n"),
)
# If there is a punctuation mark, and the last punctuation index is before MIN_CHUNK_SIZE_CHARS
if last_punctuation != -1 and last_punctuation > MIN_CHUNK_SIZE_CHARS:
# Truncate the chunk text at the punctuation mark
chunk_text = chunk_text[: last_punctuation + 1]
# Remove any newline characters and strip any leading or trailing whitespace
chunk_text_to_append = chunk_text.replace("\n", " ").strip()
if len(chunk_text_to_append) > MIN_CHUNK_LENGTH_TO_EMBED:
# Append the chunk text to the list of chunks
chunks.append(chunk_text_to_append)
# Remove the tokens corresponding to the chunk text from the remaining tokens
tokens = tokens[len(tokenizer.encode(chunk_text, disallowed_special=())) :]
# Increment the number of chunks
num_chunks += 1
# Handle the remaining tokens
if tokens:
remaining_text = tokenizer.decode(tokens).replace("\n", " ").strip()
if len(remaining_text) > MIN_CHUNK_LENGTH_TO_EMBED:
chunks.append(remaining_text)
return chunks
def create_document_chunks(
doc: Document, chunk_token_size: Optional[int]
) -> Tuple[List[DocumentChunk], str]:
"""
Create a list of document chunks from a document object and return the document id.
Args:
doc: The document object to create chunks from. It should have a text attribute and optionally an id and a metadata attribute.
chunk_token_size: The target size of each chunk in tokens, or None to use the default CHUNK_SIZE.
Returns:
A tuple of (doc_chunks, doc_id), where doc_chunks is a list of document chunks, each of which is a DocumentChunk object with an id, a document_id, a text, and a metadata attribute,
and doc_id is the id of the document object, generated if not provided. The id of each chunk is generated from the document id and a sequential number, and the metadata is copied from the document object.
"""
# Check if the document text is empty or whitespace
if not doc.text or doc.text.isspace():
return [], doc.id or str(uuid.uuid4())
# Generate a document id if not provided
doc_id = doc.id or str(uuid.uuid4())
# Split the document text into chunks
text_chunks = get_text_chunks(doc.text, chunk_token_size)
metadata = (
DocumentChunkMetadata(**doc.metadata.__dict__)
if doc.metadata is not None
else DocumentChunkMetadata()
)
metadata.document_id = doc_id
# Initialize an empty list of chunks for this document
doc_chunks = []
# Assign each chunk a sequential number and create a DocumentChunk object
for i, text_chunk in enumerate(text_chunks):
chunk_id = f"{doc_id}_{i}"
doc_chunk = DocumentChunk(
id=chunk_id,
text=text_chunk,
metadata=metadata,
)
# Append the chunk object to the list of chunks for this document
doc_chunks.append(doc_chunk)
# Return the list of chunks and the document id
return doc_chunks, doc_id
def get_document_chunks(
documents: List[Document], chunk_token_size: Optional[int]
) -> Dict[str, List[DocumentChunk]]:
"""
Convert a list of documents into a dictionary from document id to list of document chunks.
Args:
documents: The list of documents to convert.
chunk_token_size: The target size of each chunk in tokens, or None to use the default CHUNK_SIZE.
Returns:
A dictionary mapping each document id to a list of document chunks, each of which is a DocumentChunk object
with text, metadata, and embedding attributes.
"""
# Initialize an empty dictionary of lists of chunks
chunks: Dict[str, List[DocumentChunk]] = {}
# Initialize an empty list of all chunks
all_chunks: List[DocumentChunk] = []
# Loop over each document and create chunks
for doc in documents:
doc_chunks, doc_id = create_document_chunks(doc, chunk_token_size)
# Append the chunks for this document to the list of all chunks
all_chunks.extend(doc_chunks)
# Add the list of chunks for this document to the dictionary with the document id as the key
chunks[doc_id] = doc_chunks
# Check if there are no chunks
if not all_chunks:
return {}
# Get all the embeddings for the document chunks in batches, using get_embeddings
embeddings: List[List[float]] = []
for i in range(0, len(all_chunks), EMBEDDINGS_BATCH_SIZE):
# Get the text of the chunks in the current batch
batch_texts = [
chunk.text for chunk in all_chunks[i : i + EMBEDDINGS_BATCH_SIZE]
]
# Get the embeddings for the batch texts
batch_embeddings = get_embeddings(batch_texts)
# Append the batch embeddings to the embeddings list
embeddings.extend(batch_embeddings)
# Update the document chunk objects with the embeddings
for i, chunk in enumerate(all_chunks):
# Assign the embedding from the embeddings list to the chunk object
chunk.embedding = embeddings[i]
return chunks
|
from setuptools import setup, find_packages
setup(
name="neuron_explainer",
packages=find_packages(),
version="0.0.1",
author="OpenAI",
install_requires=[
"httpx>=0.22",
"scikit-learn",
"boostedblob>=0.13.0",
"tiktoken",
"blobfile",
"numpy",
"pytest",
"orjson",
],
url="",
description="",
python_requires='>=3.7',
)
|
def standardize_azure_url(url):
"""Make sure url is converted to url format, not an azure path"""
if url.startswith("az://openaipublic/"):
url = url.replace("az://openaipublic/", "https://openaipublic.blob.core.windows.net/")
return url
|
import asyncio
import contextlib
import os
import random
import traceback
from asyncio import Semaphore
from functools import wraps
from typing import Any, Callable, Optional
import httpx
import orjson
def is_api_error(err: Exception) -> bool:
if isinstance(err, httpx.HTTPStatusError):
response = err.response
error_data = response.json().get("error", {})
error_message = error_data.get("message")
if response.status_code in [400, 404, 415]:
if error_data.get("type") == "idempotency_error":
print(f"Retrying after idempotency error: {error_message} ({response.url})")
return True
else:
# Invalid request
return False
else:
print(f"Retrying after API error: {error_message} ({response.url})")
return True
elif isinstance(err, httpx.ConnectError):
print(f"Retrying after connection error... ({err.request.url})")
return True
elif isinstance(err, httpx.TimeoutException):
print(f"Retrying after a timeout error... ({err.request.url})")
return True
elif isinstance(err, httpx.ReadError):
print(f"Retrying after a read error... ({err.request.url})")
return True
print(f"Retrying after an unexpected error: {repr(err)}")
traceback.print_tb(err.__traceback__)
return True
def exponential_backoff(
retry_on: Callable[[Exception], bool] = lambda err: True
) -> Callable[[Callable], Callable]:
"""
Returns a decorator which retries the wrapped function as long as the specified retry_on
function returns True for the exception, applying exponential backoff with jitter after
failures, up to a retry limit.
"""
init_delay_s = 1.0
max_delay_s = 10.0
# Roughly 30 minutes before we give up.
max_tries = 200
backoff_multiplier = 2.0
jitter = 0.2
def decorate(f: Callable) -> Callable:
assert asyncio.iscoroutinefunction(f)
@wraps(f)
async def f_retry(*args: Any, **kwargs: Any) -> None:
delay_s = init_delay_s
for i in range(max_tries):
try:
return await f(*args, **kwargs)
except Exception as err:
if not retry_on(err) or i == max_tries - 1:
raise
jittered_delay = random.uniform(delay_s * (1 - jitter), delay_s * (1 + jitter))
await asyncio.sleep(jittered_delay)
delay_s = min(delay_s * backoff_multiplier, max_delay_s)
return f_retry
return decorate
API_KEY = os.getenv("OPENAI_API_KEY")
assert API_KEY, "Please set the OPENAI_API_KEY environment variable"
API_HTTP_HEADERS = {
"Content-Type": "application/json",
"Authorization": "Bearer " + API_KEY,
}
BASE_API_URL = "https://api.openai.com/v1"
class ApiClient:
"""Performs inference using the OpenAI API. Supports response caching and concurrency limits."""
def __init__(
self,
model_name: str,
# If set, no more than this number of HTTP requests will be made concurrently.
max_concurrent: Optional[int] = None,
# Whether to cache request/response pairs in memory to avoid duplicating requests.
cache: bool = False,
):
self.model_name = model_name
if max_concurrent is not None:
self._concurrency_check: Optional[Semaphore] = Semaphore(max_concurrent)
else:
self._concurrency_check = None
if cache:
self._cache: Optional[dict[str, Any]] = {}
else:
self._cache = None
@exponential_backoff(retry_on=is_api_error)
async def make_request(
self, timeout_seconds: Optional[int] = None, **kwargs: Any
) -> dict[str, Any]:
if self._cache is not None:
key = orjson.dumps(kwargs)
if key in self._cache:
return self._cache[key]
async with contextlib.AsyncExitStack() as stack:
if self._concurrency_check is not None:
await stack.enter_async_context(self._concurrency_check)
http_client = await stack.enter_async_context(
httpx.AsyncClient(timeout=timeout_seconds)
)
# If the request has a "messages" key, it should be sent to the /chat/completions
# endpoint. Otherwise, it should be sent to the /completions endpoint.
url = BASE_API_URL + ("/chat/completions" if "messages" in kwargs else "/completions")
kwargs["model"] = self.model_name
response = await http_client.post(url, headers=API_HTTP_HEADERS, json=kwargs)
# The response json has useful information but the exception doesn't include it, so print it
# out then reraise.
try:
response.raise_for_status()
except Exception as e:
print(response.json())
raise e
if self._cache is not None:
self._cache[key] = response.json()
return response.json()
if __name__ == "__main__":
async def main() -> None:
client = ApiClient(model_name="gpt-3.5-turbo", max_concurrent=1)
print(await client.make_request(prompt="Why did the chicken cross the road?", max_tokens=9))
asyncio.run(main())
|
"""Utilities for formatting activation records into prompts."""
import math
from typing import Optional, Sequence
from neuron_explainer.activations.activations import ActivationRecord
UNKNOWN_ACTIVATION_STRING = "unknown"
def relu(x: float) -> float:
return max(0.0, x)
def calculate_max_activation(activation_records: Sequence[ActivationRecord]) -> float:
"""Return the maximum activation value of the neuron across all the activation records."""
flattened = [
# Relu is used to assume any values less than 0 are indicating the neuron is in the resting
# state. This is a simplifying assumption that works with relu/gelu.
max(relu(x) for x in activation_record.activations)
for activation_record in activation_records
]
return max(flattened)
def normalize_activations(activation_record: list[float], max_activation: float) -> list[int]:
"""Convert raw neuron activations to integers on the range [0, 10]."""
if max_activation <= 0:
return [0 for x in activation_record]
# Relu is used to assume any values less than 0 are indicating the neuron is in the resting
# state. This is a simplifying assumption that works with relu/gelu.
return [min(10, math.floor(10 * relu(x) / max_activation)) for x in activation_record]
def _format_activation_record(
activation_record: ActivationRecord,
max_activation: float,
omit_zeros: bool,
hide_activations: bool = False,
start_index: int = 0,
) -> str:
"""Format neuron activations into a string, suitable for use in prompts."""
tokens = activation_record.tokens
normalized_activations = normalize_activations(activation_record.activations, max_activation)
if omit_zeros:
assert (not hide_activations) and start_index == 0, "Can't hide activations and omit zeros"
tokens = [
token for token, activation in zip(tokens, normalized_activations) if activation > 0
]
normalized_activations = [x for x in normalized_activations if x > 0]
entries = []
assert len(tokens) == len(normalized_activations)
for index, token, activation in zip(range(len(tokens)), tokens, normalized_activations):
activation_string = str(int(activation))
if hide_activations or index < start_index:
activation_string = UNKNOWN_ACTIVATION_STRING
entries.append(f"{token}\t{activation_string}")
return "\n".join(entries)
def format_activation_records(
activation_records: Sequence[ActivationRecord],
max_activation: float,
*,
omit_zeros: bool = False,
start_indices: Optional[list[int]] = None,
hide_activations: bool = False,
) -> str:
"""Format a list of activation records into a string."""
return (
"\n<start>\n"
+ "\n<end>\n<start>\n".join(
[
_format_activation_record(
activation_record,
max_activation,
omit_zeros=omit_zeros,
hide_activations=hide_activations,
start_index=0 if start_indices is None else start_indices[i],
)
for i, activation_record in enumerate(activation_records)
]
)
+ "\n<end>\n"
)
def _format_tokens_for_simulation(tokens: Sequence[str]) -> str:
"""
Format tokens into a string with each token marked as having an "unknown" activation, suitable
for use in prompts.
"""
entries = []
for token in tokens:
entries.append(f"{token}\t{UNKNOWN_ACTIVATION_STRING}")
return "\n".join(entries)
def format_sequences_for_simulation(
all_tokens: Sequence[Sequence[str]],
) -> str:
"""
Format a list of lists of tokens into a string with each token marked as having an "unknown"
activation, suitable for use in prompts.
"""
return (
"\n<start>\n"
+ "\n<end>\n<start>\n".join(
[_format_tokens_for_simulation(tokens) for tokens in all_tokens]
)
+ "\n<end>\n"
)
def non_zero_activation_proportion(
activation_records: Sequence[ActivationRecord], max_activation: float
) -> float:
"""Return the proportion of activation values that aren't zero."""
total_activations_count = sum(
[len(activation_record.activations) for activation_record in activation_records]
)
normalized_activations = [
normalize_activations(activation_record.activations, max_activation)
for activation_record in activation_records
]
non_zero_activations_count = sum(
[len([x for x in activations if x != 0]) for activations in normalized_activations]
)
return non_zero_activations_count / total_activations_count
|
# Dataclasses and enums for storing neuron-indexed information about activations. Also, related
# helper functions.
import math
from dataclasses import dataclass, field
from typing import List, Optional, Union
import urllib.request
import blobfile as bf
import boostedblob as bbb
from neuron_explainer.fast_dataclasses import FastDataclass, loads, register_dataclass
from neuron_explainer.azure import standardize_azure_url
@register_dataclass
@dataclass
class ActivationRecord(FastDataclass):
"""Collated lists of tokens and their activations for a single neuron."""
tokens: List[str]
"""Tokens in the text sequence, represented as strings."""
activations: List[float]
"""Raw activation values for the neuron on each token in the text sequence."""
@register_dataclass
@dataclass
class NeuronId(FastDataclass):
"""Identifier for a neuron in an artificial neural network."""
layer_index: int
"""The index of layer the neuron is in. The first layer used during inference has index 0."""
neuron_index: int
"""The neuron's index within in its layer. Indices start from 0 in each layer."""
def _check_slices(
slices_by_split: dict[str, slice],
expected_num_values: int,
) -> None:
"""Assert that the slices are disjoint and fully cover the intended range."""
indices = set()
sum_of_slice_lengths = 0
n_splits = len(slices_by_split.keys())
for s in slices_by_split.values():
subrange = range(expected_num_values)[s]
sum_of_slice_lengths += len(subrange)
indices |= set(subrange)
assert (
sum_of_slice_lengths == expected_num_values
), f"{sum_of_slice_lengths=} != {expected_num_values=}"
stride = n_splits
expected_indices = set.union(
*[set(range(start_index, expected_num_values, stride)) for start_index in range(n_splits)]
)
assert indices == expected_indices, f"{indices=} != {expected_indices=}"
def get_slices_for_splits(
splits: list[str],
num_activation_records_per_split: int,
) -> dict[str, slice]:
"""
Get equal-sized interleaved subsets for each of a list of splits, given the number of elements
to include in each split.
"""
stride = len(splits)
num_activation_records_for_even_splits = num_activation_records_per_split * stride
slices_by_split = {
split: slice(split_index, num_activation_records_for_even_splits, stride)
for split_index, split in enumerate(splits)
}
_check_slices(
slices_by_split=slices_by_split,
expected_num_values=num_activation_records_for_even_splits,
)
return slices_by_split
@dataclass
class ActivationRecordSliceParams:
"""How to select splits (train, valid, etc.) of activation records."""
n_examples_per_split: Optional[int]
"""The number of examples to include in each split."""
@register_dataclass
@dataclass
class NeuronRecord(FastDataclass):
"""Neuron-indexed activation data, including summary stats and notable activation records."""
neuron_id: NeuronId
"""Identifier for the neuron."""
random_sample: list[ActivationRecord] = field(default_factory=list)
"""
Random activation records for this neuron. The random sample is independent from those used for
other neurons.
"""
random_sample_by_quantile: Optional[list[list[ActivationRecord]]] = None
"""
Random samples of activation records in each of the specified quantiles. None if quantile
tracking is disabled.
"""
quantile_boundaries: Optional[list[float]] = None
"""Boundaries of the quantiles used to generate the random_sample_by_quantile field."""
# Moments of activations
mean: Optional[float] = math.nan
variance: Optional[float] = math.nan
skewness: Optional[float] = math.nan
kurtosis: Optional[float] = math.nan
most_positive_activation_records: list[ActivationRecord] = field(default_factory=list)
"""
Activation records with the most positive figure of merit value for this neuron over all dataset
examples.
"""
@property
def max_activation(self) -> float:
"""Return the maximum activation value over all top-activating activation records."""
return max([max(ar.activations) for ar in self.most_positive_activation_records])
def _get_top_activation_slices(
self, activation_record_slice_params: ActivationRecordSliceParams
) -> dict[str, slice]:
splits = ["train", "calibration", "valid", "test"]
n_examples_per_split = activation_record_slice_params.n_examples_per_split
if n_examples_per_split is None:
n_examples_per_split = len(self.most_positive_activation_records) // len(splits)
assert len(self.most_positive_activation_records) >= n_examples_per_split * len(splits)
return get_slices_for_splits(splits, n_examples_per_split)
def _get_random_activation_slices(
self, activation_record_slice_params: ActivationRecordSliceParams
) -> dict[str, slice]:
splits = ["calibration", "valid", "test"]
n_examples_per_split = activation_record_slice_params.n_examples_per_split
if n_examples_per_split is None:
n_examples_per_split = len(self.random_sample) // len(splits)
# NOTE: this assert could trigger on some old datasets with only 10 random samples, in which case you may have to remove "test" from the set of splits
assert len(self.random_sample) >= n_examples_per_split * len(splits)
return get_slices_for_splits(splits, n_examples_per_split)
def train_activation_records(
self,
activation_record_slice_params: ActivationRecordSliceParams,
) -> list[ActivationRecord]:
"""
Train split, typically used for generating explanations. Consists exclusively of
top-activating records since context window limitations make it difficult to include
random records.
"""
return self.most_positive_activation_records[
self._get_top_activation_slices(activation_record_slice_params)["train"]
]
def calibration_activation_records(
self,
activation_record_slice_params: ActivationRecordSliceParams,
) -> list[ActivationRecord]:
"""
Calibration split, typically used for calibrating neuron simulations. See
http://go/neuron_explanation_methodology for an explanation of calibration. Consists of
top-activating records and random records in a 1:1 ratio.
"""
return (
self.most_positive_activation_records[
self._get_top_activation_slices(activation_record_slice_params)["calibration"]
]
+ self.random_sample[
self._get_random_activation_slices(activation_record_slice_params)["calibration"]
]
)
def valid_activation_records(
self,
activation_record_slice_params: ActivationRecordSliceParams,
) -> list[ActivationRecord]:
"""
Validation split, typically used for evaluating explanations, either automatically with
simulation + correlation coefficient scoring, or manually by humans. Consists of
top-activating records and random records in a 1:1 ratio.
"""
return (
self.most_positive_activation_records[
self._get_top_activation_slices(activation_record_slice_params)["valid"]
]
+ self.random_sample[
self._get_random_activation_slices(activation_record_slice_params)["valid"]
]
)
def test_activation_records(
self,
activation_record_slice_params: ActivationRecordSliceParams,
) -> list[ActivationRecord]:
"""
Test split, typically used for explanation evaluations that can't use the validation split.
Consists of top-activating records and random records in a 1:1 ratio.
"""
return (
self.most_positive_activation_records[
self._get_top_activation_slices(activation_record_slice_params)["test"]
]
+ self.random_sample[
self._get_random_activation_slices(activation_record_slice_params)["test"]
]
)
def neuron_exists(
dataset_path: str, layer_index: Union[str, int], neuron_index: Union[str, int]
) -> bool:
"""Return whether the specified neuron exists."""
file = bf.join(dataset_path, "neurons", str(layer_index), f"{neuron_index}.json")
return bf.exists(file)
def load_neuron(
layer_index: Union[str, int],
neuron_index: Union[str, int],
dataset_path: str = "https://openaipublic.blob.core.windows.net/neuron-explainer/data/collated-activations",
) -> NeuronRecord:
"""Load the NeuronRecord for the specified neuron."""
url = "/".join([dataset_path, str(layer_index), f"{neuron_index}.json"])
url = standardize_azure_url(url)
with urllib.request.urlopen(url) as f:
neuron_record = loads(f.read())
if not isinstance(neuron_record, NeuronRecord):
raise ValueError(
f"Stored data incompatible with current version of NeuronRecord dataclass."
)
return neuron_record
@bbb.ensure_session
async def load_neuron_async(
layer_index: Union[str, int],
neuron_index: Union[str, int],
dataset_path: str = "az://openaipublic/neuron-explainer/data/collated-activations",
) -> NeuronRecord:
"""Async version of load_neuron."""
file = bf.join(dataset_path, str(layer_index), f"{neuron_index}.json")
return await read_neuron_file(file)
@bbb.ensure_session
async def read_neuron_file(neuron_filename: str) -> NeuronRecord:
"""Like load_neuron_async, but takes a raw neuron filename."""
raw_contents = await bbb.read.read_single(neuron_filename)
neuron_record = loads(raw_contents.decode("utf-8"))
if not isinstance(neuron_record, NeuronRecord):
raise ValueError(
f"Stored data incompatible with current version of NeuronRecord dataclass."
)
return neuron_record
def get_sorted_neuron_indices(dataset_path: str, layer_index: Union[str, int]) -> List[int]:
"""Returns the indices of all neurons in this layer, in ascending order."""
layer_dir = bf.join(dataset_path, "neurons", str(layer_index))
return sorted(
[int(f.split(".")[0]) for f in bf.listdir(layer_dir) if f.split(".")[0].isnumeric()]
)
def get_sorted_layers(dataset_path: str) -> List[str]:
"""
Return the indices of all layers in this dataset, in ascending numerical order, as strings.
"""
return [
str(x)
for x in sorted(
[int(x) for x in bf.listdir(bf.join(dataset_path, "neurons")) if x.isnumeric()]
)
]
|
from dataclasses import dataclass
from typing import List, Union
import blobfile as bf
from neuron_explainer.fast_dataclasses import FastDataclass, loads, register_dataclass
from neuron_explainer.azure import standardize_azure_url
import urllib.request
@register_dataclass
@dataclass
class TokensAndWeights(FastDataclass):
tokens: List[str]
strengths: List[float]
@register_dataclass
@dataclass
class WeightBasedSummaryOfNeuron(FastDataclass):
input_positive: TokensAndWeights
input_negative: TokensAndWeights
output_positive: TokensAndWeights
output_negative: TokensAndWeights
def load_token_weight_connections_of_neuron(
layer_index: Union[str, int],
neuron_index: Union[str, int],
dataset_path: str = "https://openaipublic.blob.core.windows.net/neuron-explainer/data/related-tokens/weight-based",
) -> WeightBasedSummaryOfNeuron:
"""Load the TokenLookupTableSummaryOfNeuron for the specified neuron."""
url = "/".join([dataset_path, str(layer_index), f"{neuron_index}.json"])
url = standardize_azure_url(url)
with urllib.request.urlopen(url) as f:
return loads(f.read(), backwards_compatible=False)
@register_dataclass
@dataclass
class TokenLookupTableSummaryOfNeuron(FastDataclass):
"""List of tokens and the average activations of a given neuron in response to each
respective token. These are selected from among the tokens in the vocabulary with the
highest average activations across an internet text dataset, with the highest activations
first."""
tokens: List[str]
average_activations: List[float]
def load_token_lookup_table_connections_of_neuron(
layer_index: Union[str, int],
neuron_index: Union[str, int],
dataset_path: str = "https://openaipublic.blob.core.windows.net/neuron-explainer/data/related-tokens/activation-based",
) -> TokenLookupTableSummaryOfNeuron:
"""Load the TokenLookupTableSummaryOfNeuron for the specified neuron."""
url = "/".join([dataset_path, str(layer_index), f"{neuron_index}.json"])
url = standardize_azure_url(url)
with urllib.request.urlopen(url) as f:
return loads(f.read(), backwards_compatible=False)
|
from neuron_explainer.explanations.few_shot_examples import FewShotExampleSet
from neuron_explainer.explanations.prompt_builder import HarmonyMessage, PromptFormat, Role
from neuron_explainer.explanations.simulator import (
ExplanationNeuronSimulator,
ExplanationTokenByTokenSimulator,
)
def test_make_explanation_simulation_prompt_if_format() -> None:
expected_prompt = """We're studying neurons in a neural network.
Each neuron looks for some particular thing in a short document.
Look at summary of what the neuron does, and try to predict how it will fire on each token.
The activation format is token<tab>activation, activations go from 0 to 10, "unknown" indicates an unknown activation. Most activations will be 0.
Neuron 1
Explanation of neuron 1 behavior: the main thing this neuron does is find vowels
Activations:
<start>
a 10
b 0
c 0
<end>
<start>
d unknown
e 10
f 0
<end>
Neuron 2
Explanation of neuron 2 behavior: the main thing this neuron does is find EXPLANATION<|endofprompt|>
Activations:
<start>
0 unknown
1 unknown
2 unknown
<end>
"""
prompt = ExplanationNeuronSimulator(
model_name="text-davinci-003",
explanation="EXPLANATION",
few_shot_example_set=FewShotExampleSet.TEST,
prompt_format=PromptFormat.INSTRUCTION_FOLLOWING,
).make_simulation_prompt(
tokens=[str(x) for x in range(3)],
)
assert prompt == expected_prompt
def test_make_explanation_simulation_prompt_harmony_format() -> None:
expected_prompt = [
HarmonyMessage(
role=Role.SYSTEM,
content="""We're studying neurons in a neural network.
Each neuron looks for some particular thing in a short document.
Look at summary of what the neuron does, and try to predict how it will fire on each token.
The activation format is token<tab>activation, activations go from 0 to 10, "unknown" indicates an unknown activation. Most activations will be 0.
""",
),
HarmonyMessage(
role=Role.USER,
content="""
Neuron 1
Explanation of neuron 1 behavior: the main thing this neuron does is find vowels""",
),
HarmonyMessage(
role=Role.ASSISTANT,
content="""
Activations:
<start>
a 10
b 0
c 0
<end>
<start>
d unknown
e 10
f 0
<end>
""",
),
HarmonyMessage(
role=Role.USER,
content="""
Neuron 2
Explanation of neuron 2 behavior: the main thing this neuron does is find EXPLANATION""",
),
HarmonyMessage(
role=Role.ASSISTANT,
content="""
Activations:
<start>
0 unknown
1 unknown
2 unknown
<end>
""",
),
]
prompt = ExplanationNeuronSimulator(
model_name="gpt-4",
explanation="EXPLANATION",
few_shot_example_set=FewShotExampleSet.TEST,
prompt_format=PromptFormat.HARMONY_V4,
).make_simulation_prompt(
tokens=[str(x) for x in range(3)],
)
assert isinstance(prompt, list)
assert isinstance(prompt[0], dict) # Really a HarmonyMessage
for actual_message, expected_message in zip(prompt, expected_prompt):
assert actual_message["role"] == expected_message["role"]
assert actual_message["content"] == expected_message["content"]
assert prompt == expected_prompt
def test_make_token_by_token_simulation_prompt_if_format() -> None:
expected_prompt = """We're studying neurons in a neural network. Each neuron looks for some particular thing in a short document. Look at an explanation of what the neuron does, and try to predict its activations on a particular token.
The activation format is token<tab>activation, and activations range from 0 to 10. Most activations will be 0.
Neuron 1
Explanation of neuron 1 behavior: the main thing this neuron does is find vowels
Activations:
<start>
a 10
b 0
c 0
<end>
<start>
d 0
e 10
f 0
<end>
Now, we're going predict the activation of a new neuron on a single token, following the same rules as the examples above. Activations still range from 0 to 10.
Neuron 2
Explanation of neuron 2 behavior: the main thing this neuron does is find numbers and nothing else
Text:
ghi
Last token in the text:
i
Last token activation, considering the token in the context in which it appeared in the text:
10
Neuron 3
Explanation of neuron 3 behavior: the main thing this neuron does is find numbers and nothing else
Text:
01
Last token in the text:
1
Last token activation, considering the token in the context in which it appeared in the text:
<|endofprompt|>"""
prompt = ExplanationTokenByTokenSimulator(
model_name="text-davinci-003",
explanation="EXPLANATION",
few_shot_example_set=FewShotExampleSet.TEST,
prompt_format=PromptFormat.INSTRUCTION_FOLLOWING,
).make_single_token_simulation_prompt(
tokens=[str(x) for x in range(3)],
explanation="numbers and nothing else",
token_index_to_score=1,
)
assert prompt == expected_prompt
def test_make_token_by_token_simulation_prompt_harmony_format() -> None:
expected_prompt = [
HarmonyMessage(
role=Role.SYSTEM,
content="""We're studying neurons in a neural network. Each neuron looks for some particular thing in a short document. Look at an explanation of what the neuron does, and try to predict its activations on a particular token.
The activation format is token<tab>activation, and activations range from 0 to 10. Most activations will be 0.
""",
),
HarmonyMessage(
role=Role.USER,
content="""Neuron 1
Explanation of neuron 1 behavior: the main thing this neuron does is find vowels
""",
),
HarmonyMessage(
role=Role.ASSISTANT,
content="""Activations:
<start>
a 10
b 0
c 0
<end>
<start>
d 0
e 10
f 0
<end>
""",
),
HarmonyMessage(
role=Role.SYSTEM,
content="Now, we're going predict the activation of a new neuron on a single token, following the same rules as the examples above. Activations still range from 0 to 10.",
),
HarmonyMessage(
role=Role.USER,
content="""
Neuron 2
Explanation of neuron 2 behavior: the main thing this neuron does is find numbers and nothing else
Text:
ghi
Last token in the text:
i
Last token activation, considering the token in the context in which it appeared in the text:
""",
),
HarmonyMessage(
role=Role.ASSISTANT,
content="""10
""",
),
HarmonyMessage(
role=Role.USER,
content="""
Neuron 3
Explanation of neuron 3 behavior: the main thing this neuron does is find numbers and nothing else
Text:
01
Last token in the text:
1
Last token activation, considering the token in the context in which it appeared in the text:
""",
),
]
prompt = ExplanationTokenByTokenSimulator(
model_name="gpt-4",
explanation="EXPLANATION",
few_shot_example_set=FewShotExampleSet.TEST,
prompt_format=PromptFormat.HARMONY_V4,
).make_single_token_simulation_prompt(
tokens=[str(x) for x in range(3)],
explanation="numbers and nothing else",
token_index_to_score=1,
)
assert isinstance(prompt, list)
assert isinstance(prompt[0], dict) # Really a HarmonyMessage
for actual_message, expected_message in zip(prompt, expected_prompt):
assert actual_message["role"] == expected_message["role"]
assert actual_message["content"] == expected_message["content"]
assert prompt == expected_prompt
|
import json
import os
from dataclasses import dataclass
from neuron_explainer.activations.activations import ActivationRecord
@dataclass(frozen=True)
class Puzzle:
"""A puzzle is a ground truth explanation, a collection of sentences (stored as ActivationRecords) with activations
according to that explanation, and a collection of false explanations"""
name: str
explanation: str
activation_records: list[ActivationRecord]
false_explanations: list[str]
def convert_puzzle_to_tokenized_sentences(puzzle: Puzzle) -> list[list[str]]:
"""Converts a puzzle to a list of tokenized sentences."""
return [record.tokens for record in puzzle.activation_records]
def convert_puzzle_dict_to_puzzle(puzzle_dict: dict) -> Puzzle:
"""Converts a json dictionary representation of a puzzle to the Puzzle class."""
puzzle_activation_records = []
for sentence in puzzle_dict["sentences"]:
# Token-activation pairs are listed as either a string or a list of a string and a float. If it is a list, the float is the activation.
# If it is only a string, the activation is assumed to be 0. This is useful for readability and reducing redundancy in the data.
tokens = [t[0] if type(t) is list else t for t in sentence]
assert all([type(t) is str for t in tokens]), "All tokens must be strings"
activations = [float(t[1]) if type(t) is list else 0.0 for t in sentence]
assert all([type(t) is float for t in activations]), "All activations must be floats"
puzzle_activation_records.append(ActivationRecord(tokens=tokens, activations=activations))
return Puzzle(
name=puzzle_dict["name"],
explanation=puzzle_dict["explanation"],
activation_records=puzzle_activation_records,
false_explanations=puzzle_dict["false_explanations"],
)
PUZZLES_BY_NAME: dict[str, Puzzle] = dict()
script_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(script_dir, "puzzles.json"), "r") as f:
puzzle_dicts = json.loads(f.read())
for name in puzzle_dicts.keys():
PUZZLES_BY_NAME[name] = convert_puzzle_dict_to_puzzle(puzzle_dicts[name])
|
from __future__ import annotations
from enum import Enum
from typing import TypedDict, Union
import tiktoken
HarmonyMessage = TypedDict(
"HarmonyMessage",
{
"role": str,
"content": str,
},
)
class PromptFormat(str, Enum):
"""
Different ways of formatting the components of a prompt into the format accepted by the relevant
API server endpoint.
"""
NONE = "none"
"""Suitable for use with models that don't use special tokens for instructions."""
INSTRUCTION_FOLLOWING = "instruction_following"
"""Suitable for IF models that use <|endofprompt|>."""
HARMONY_V4 = "harmony_v4"
"""
Suitable for Harmony models that use a structured turn-taking role+content format. Generates a
list of HarmonyMessage dicts that can be sent to the /chat/completions endpoint.
"""
@classmethod
def from_string(cls, s: str) -> PromptFormat:
for prompt_format in cls:
if prompt_format.value == s:
return prompt_format
raise ValueError(f"{s} is not a valid PromptFormat")
class Role(str, Enum):
"""See https://platform.openai.com/docs/guides/chat"""
SYSTEM = "system"
USER = "user"
ASSISTANT = "assistant"
class PromptBuilder:
"""Class for accumulating components of a prompt and then formatting them into an output."""
def __init__(self) -> None:
self._messages: list[HarmonyMessage] = []
def add_message(self, role: Role, message: str) -> None:
self._messages.append(HarmonyMessage(role=role, content=message))
def prompt_length_in_tokens(self, prompt_format: PromptFormat) -> int:
# TODO(sbills): Make the model/encoding configurable. This implementation assumes GPT-4.
encoding = tiktoken.get_encoding("cl100k_base")
if prompt_format == PromptFormat.HARMONY_V4:
# Approximately-correct implementation adapted from this documentation:
# https://platform.openai.com/docs/guides/chat/introduction
num_tokens = 0
for message in self._messages:
num_tokens += (
4 # every message follows <|im_start|>{role/name}\n{content}<|im_end|>\n
)
num_tokens += len(encoding.encode(message["content"], allowed_special="all"))
num_tokens += 2 # every reply is primed with <|im_start|>assistant
return num_tokens
else:
prompt_str = self.build(prompt_format)
assert isinstance(prompt_str, str)
return len(encoding.encode(prompt_str, allowed_special="all"))
def build(
self, prompt_format: PromptFormat, *, allow_extra_system_messages: bool = False
) -> Union[str, list[HarmonyMessage]]:
"""
Validates the messages added so far (reasonable alternation of assistant vs. user, etc.)
and returns either a regular string (maybe with <|endofprompt|> tokens) or a list of
HarmonyMessages suitable for use with the /chat/completions endpoint.
The `allow_extra_system_messages` parameter allows the caller to specify that the prompt
should be allowed to contain system messages after the very first one.
"""
# Create a deep copy of the messages so we can modify it and so that the caller can't
# modify the internal state of this object.
messages = [message.copy() for message in self._messages]
expected_next_role = Role.SYSTEM
for message in messages:
role = message["role"]
assert role == expected_next_role or (
allow_extra_system_messages and role == Role.SYSTEM
), f"Expected message from {expected_next_role} but got message from {role}"
if role == Role.SYSTEM:
expected_next_role = Role.USER
elif role == Role.USER:
expected_next_role = Role.ASSISTANT
elif role == Role.ASSISTANT:
expected_next_role = Role.USER
if prompt_format == PromptFormat.INSTRUCTION_FOLLOWING:
last_user_message = None
for message in messages:
if message["role"] == Role.USER:
last_user_message = message
assert last_user_message is not None
last_user_message["content"] += "<|endofprompt|>"
if prompt_format == PromptFormat.HARMONY_V4:
return messages
elif prompt_format in [PromptFormat.NONE, PromptFormat.INSTRUCTION_FOLLOWING]:
return "".join(message["content"] for message in messages)
else:
raise ValueError(f"Unknown prompt format: {prompt_format}")
|
"""Uses API calls to generate explanations of neuron behavior."""
from __future__ import annotations
import logging
import re
from abc import ABC, abstractmethod
from enum import Enum
from typing import Any, Optional, Sequence, Union
from neuron_explainer.activations.activation_records import (
calculate_max_activation,
format_activation_records,
non_zero_activation_proportion,
)
from neuron_explainer.activations.activations import ActivationRecord
from neuron_explainer.api_client import ApiClient
from neuron_explainer.explanations.few_shot_examples import FewShotExampleSet
from neuron_explainer.explanations.prompt_builder import (
HarmonyMessage,
PromptBuilder,
PromptFormat,
Role,
)
from neuron_explainer.explanations.token_space_few_shot_examples import (
TokenSpaceFewShotExampleSet,
)
logger = logging.getLogger(__name__)
# TODO(williamrs): This prefix may not work well for some things, like predicting the next token.
# Try other options like "this neuron activates for".
EXPLANATION_PREFIX = "the main thing this neuron does is find"
def _split_numbered_list(text: str) -> list[str]:
"""Split a numbered list into a list of strings."""
lines = re.split(r"\n\d+\.", text)
# Strip the leading whitespace from each line.
return [line.lstrip() for line in lines]
def _remove_final_period(text: str) -> str:
"""Strip a final period or period-space from a string."""
if text.endswith("."):
return text[:-1]
elif text.endswith(". "):
return text[:-2]
return text
class ContextSize(int, Enum):
TWO_K = 2049
FOUR_K = 4097
@classmethod
def from_int(cls, i: int) -> ContextSize:
for context_size in cls:
if context_size.value == i:
return context_size
raise ValueError(f"{i} is not a valid ContextSize")
HARMONY_V4_MODELS = ["gpt-3.5-turbo", "gpt-4"]
class NeuronExplainer(ABC):
"""
Abstract base class for Explainer classes that generate explanations from subclass-specific
input data.
"""
def __init__(
self,
model_name: str,
prompt_format: PromptFormat = PromptFormat.HARMONY_V4,
# This parameter lets us adjust the length of the prompt when we're generating explanations
# using older models with shorter context windows. In the future we can use it to experiment
# with longer context windows.
context_size: ContextSize = ContextSize.FOUR_K,
max_concurrent: Optional[int] = 10,
cache: bool = False,
):
if prompt_format == PromptFormat.HARMONY_V4:
assert model_name in HARMONY_V4_MODELS
elif prompt_format in [PromptFormat.NONE, PromptFormat.INSTRUCTION_FOLLOWING]:
assert model_name not in HARMONY_V4_MODELS
else:
raise ValueError(f"Unhandled prompt format {prompt_format}")
self.model_name = model_name
self.prompt_format = prompt_format
self.context_size = context_size
self.client = ApiClient(model_name=model_name, max_concurrent=max_concurrent, cache=cache)
async def generate_explanations(
self,
*,
num_samples: int = 5,
max_tokens: int = 60,
temperature: float = 1.0,
top_p: float = 1.0,
**prompt_kwargs: Any,
) -> list[Any]:
"""Generate explanations based on subclass-specific input data."""
prompt = self.make_explanation_prompt(max_tokens_for_completion=max_tokens, **prompt_kwargs)
generate_kwargs: dict[str, Any] = {
"n": num_samples,
"max_tokens": max_tokens,
"temperature": temperature,
"top_p": top_p,
}
if self.prompt_format == PromptFormat.HARMONY_V4:
assert isinstance(prompt, list)
assert isinstance(prompt[0], dict) # Really a HarmonyMessage
generate_kwargs["messages"] = prompt
else:
assert isinstance(prompt, str)
generate_kwargs["prompt"] = prompt
response = await self.client.make_request(**generate_kwargs)
logger.debug("response in generate_explanations is %s", response)
if self.prompt_format == PromptFormat.HARMONY_V4:
explanations = [x["message"]["content"] for x in response["choices"]]
elif self.prompt_format in [PromptFormat.NONE, PromptFormat.INSTRUCTION_FOLLOWING]:
explanations = [x["text"] for x in response["choices"]]
else:
raise ValueError(f"Unhandled prompt format {self.prompt_format}")
return self.postprocess_explanations(explanations, prompt_kwargs)
@abstractmethod
def make_explanation_prompt(self, **kwargs: Any) -> Union[str, list[HarmonyMessage]]:
"""
Create a prompt to send to the API to generate one or more explanations.
A prompt can be a simple string, or a list of HarmonyMessages, depending on the PromptFormat
used by this instance.
"""
...
def postprocess_explanations(
self, completions: list[str], prompt_kwargs: dict[str, Any]
) -> list[Any]:
"""Postprocess the completions returned by the API into a list of explanations."""
return completions # no-op by default
def _prompt_is_too_long(
self, prompt_builder: PromptBuilder, max_tokens_for_completion: int
) -> bool:
# We'll get a context size error if the prompt itself plus the maximum number of tokens for
# the completion is longer than the context size.
prompt_length = prompt_builder.prompt_length_in_tokens(self.prompt_format)
if prompt_length + max_tokens_for_completion > self.context_size.value:
print(
f"Prompt is too long: {prompt_length} + {max_tokens_for_completion} > "
f"{self.context_size.value}"
)
return True
return False
class TokenActivationPairExplainer(NeuronExplainer):
"""
Generate explanations of neuron behavior using a prompt with lists of token/activation pairs.
"""
def __init__(
self,
model_name: str,
prompt_format: PromptFormat = PromptFormat.HARMONY_V4,
# This parameter lets us adjust the length of the prompt when we're generating explanations
# using older models with shorter context windows. In the future we can use it to experiment
# with 8k+ context windows.
context_size: ContextSize = ContextSize.FOUR_K,
few_shot_example_set: FewShotExampleSet = FewShotExampleSet.ORIGINAL,
repeat_non_zero_activations: bool = True,
max_concurrent: Optional[int] = 10,
cache: bool = False,
):
super().__init__(
model_name=model_name,
prompt_format=prompt_format,
max_concurrent=max_concurrent,
cache=cache,
)
self.context_size = context_size
self.few_shot_example_set = few_shot_example_set
self.repeat_non_zero_activations = repeat_non_zero_activations
def make_explanation_prompt(self, **kwargs: Any) -> Union[str, list[HarmonyMessage]]:
original_kwargs = kwargs.copy()
all_activation_records: Sequence[ActivationRecord] = kwargs.pop("all_activation_records")
max_activation: float = kwargs.pop("max_activation")
kwargs.setdefault("numbered_list_of_n_explanations", None)
numbered_list_of_n_explanations: Optional[int] = kwargs.pop(
"numbered_list_of_n_explanations"
)
if numbered_list_of_n_explanations is not None:
assert numbered_list_of_n_explanations > 0, numbered_list_of_n_explanations
# This parameter lets us dynamically shrink the prompt if our initial attempt to create it
# results in something that's too long. It's only implemented for the 4k context size.
kwargs.setdefault("omit_n_activation_records", 0)
omit_n_activation_records: int = kwargs.pop("omit_n_activation_records")
max_tokens_for_completion: int = kwargs.pop("max_tokens_for_completion")
assert not kwargs, f"Unexpected kwargs: {kwargs}"
prompt_builder = PromptBuilder()
prompt_builder.add_message(
Role.SYSTEM,
"We're studying neurons in a neural network. Each neuron looks for some particular "
"thing in a short document. Look at the parts of the document the neuron activates for "
"and summarize in a single sentence what the neuron is looking for. Don't list "
"examples of words.\n\nThe activation format is token<tab>activation. Activation "
"values range from 0 to 10. A neuron finding what it's looking for is represented by a "
"non-zero activation value. The higher the activation value, the stronger the match.",
)
few_shot_examples = self.few_shot_example_set.get_examples()
num_omitted_activation_records = 0
for i, few_shot_example in enumerate(few_shot_examples):
few_shot_activation_records = few_shot_example.activation_records
if self.context_size == ContextSize.TWO_K:
# If we're using a 2k context window, we only have room for one activation record
# per few-shot example. (Two few-shot examples with one activation record each seems
# to work better than one few-shot example with two activation records, in local
# testing.)
few_shot_activation_records = few_shot_activation_records[:1]
elif (
self.context_size == ContextSize.FOUR_K
and num_omitted_activation_records < omit_n_activation_records
):
# Drop the last activation record for this few-shot example to save tokens, assuming
# there are at least two activation records.
if len(few_shot_activation_records) > 1:
print(f"Warning: omitting activation record from few-shot example {i}")
few_shot_activation_records = few_shot_activation_records[:-1]
num_omitted_activation_records += 1
self._add_per_neuron_explanation_prompt(
prompt_builder,
few_shot_activation_records,
i,
calculate_max_activation(few_shot_example.activation_records),
numbered_list_of_n_explanations=numbered_list_of_n_explanations,
explanation=few_shot_example.explanation,
)
self._add_per_neuron_explanation_prompt(
prompt_builder,
# If we're using a 2k context window, we only have room for two of the activation
# records.
all_activation_records[:2]
if self.context_size == ContextSize.TWO_K
else all_activation_records,
len(few_shot_examples),
max_activation,
numbered_list_of_n_explanations=numbered_list_of_n_explanations,
explanation=None,
)
# If the prompt is too long *and* we omitted the specified number of activation records, try
# again, omitting one more. (If we didn't make the specified number of omissions, we're out
# of opportunities to omit records, so we just return the prompt as-is.)
if (
self._prompt_is_too_long(prompt_builder, max_tokens_for_completion)
and num_omitted_activation_records == omit_n_activation_records
):
original_kwargs["omit_n_activation_records"] = omit_n_activation_records + 1
return self.make_explanation_prompt(**original_kwargs)
return prompt_builder.build(self.prompt_format)
def _add_per_neuron_explanation_prompt(
self,
prompt_builder: PromptBuilder,
activation_records: Sequence[ActivationRecord],
index: int,
max_activation: float,
# When set, this indicates that the prompt should solicit a numbered list of the given
# number of explanations, rather than a single explanation.
numbered_list_of_n_explanations: Optional[int],
explanation: Optional[str], # None means this is the end of the full prompt.
) -> None:
max_activation = calculate_max_activation(activation_records)
user_message = f"""
Neuron {index + 1}
Activations:{format_activation_records(activation_records, max_activation, omit_zeros=False)}"""
# We repeat the non-zero activations only if it was requested and if the proportion of
# non-zero activations isn't too high.
if (
self.repeat_non_zero_activations
and non_zero_activation_proportion(activation_records, max_activation) < 0.2
):
user_message += (
f"\nSame activations, but with all zeros filtered out:"
f"{format_activation_records(activation_records, max_activation, omit_zeros=True)}"
)
if numbered_list_of_n_explanations is None:
user_message += f"\nExplanation of neuron {index + 1} behavior:"
assistant_message = ""
# For the IF format, we want <|endofprompt|> to come before the explanation prefix.
if self.prompt_format == PromptFormat.INSTRUCTION_FOLLOWING:
assistant_message += f" {EXPLANATION_PREFIX}"
else:
user_message += f" {EXPLANATION_PREFIX}"
prompt_builder.add_message(Role.USER, user_message)
if explanation is not None:
assistant_message += f" {explanation}."
if assistant_message:
prompt_builder.add_message(Role.ASSISTANT, assistant_message)
else:
if explanation is None:
# For the final neuron, we solicit a numbered list of explanations.
prompt_builder.add_message(
Role.USER,
f"""\nHere are {numbered_list_of_n_explanations} possible explanations for neuron {index + 1} behavior, each beginning with "{EXPLANATION_PREFIX}":\n1. {EXPLANATION_PREFIX}""",
)
else:
# For the few-shot examples, we only present one explanation, but we present it as a
# numbered list.
prompt_builder.add_message(
Role.USER,
f"""\nHere is 1 possible explanation for neuron {index + 1} behavior, beginning with "{EXPLANATION_PREFIX}":\n1. {EXPLANATION_PREFIX}""",
)
prompt_builder.add_message(Role.ASSISTANT, f" {explanation}.")
def postprocess_explanations(
self, completions: list[str], prompt_kwargs: dict[str, Any]
) -> list[Any]:
"""Postprocess the explanations returned by the API"""
numbered_list_of_n_explanations = prompt_kwargs.get("numbered_list_of_n_explanations")
if numbered_list_of_n_explanations is None:
return completions
else:
all_explanations = []
for completion in completions:
for explanation in _split_numbered_list(completion):
if explanation.startswith(EXPLANATION_PREFIX):
explanation = explanation[len(EXPLANATION_PREFIX) :]
all_explanations.append(explanation.strip())
return all_explanations
class TokenSpaceRepresentationExplainer(NeuronExplainer):
"""
Generate explanations of arbitrary lists of tokens which disproportionately activate a
particular neuron. These lists of tokens can be generated in various ways. As an example, in one
set of experiments, we compute the average activation for each neuron conditional on each token
that appears in an internet text corpus. We then sort the tokens by their average activation,
and show 50 of the top 100 tokens. Other techniques that could be used include taking the top
tokens in the logit lens or tuned lens representations of a neuron.
"""
def __init__(
self,
model_name: str,
prompt_format: PromptFormat = PromptFormat.HARMONY_V4,
context_size: ContextSize = ContextSize.FOUR_K,
few_shot_example_set: TokenSpaceFewShotExampleSet = TokenSpaceFewShotExampleSet.ORIGINAL,
use_few_shot: bool = False,
output_numbered_list: bool = False,
max_concurrent: Optional[int] = 10,
cache: bool = False,
):
super().__init__(
model_name=model_name,
prompt_format=prompt_format,
context_size=context_size,
max_concurrent=max_concurrent,
cache=cache,
)
self.use_few_shot = use_few_shot
self.output_numbered_list = output_numbered_list
if self.use_few_shot:
assert few_shot_example_set is not None
self.few_shot_examples: Optional[TokenSpaceFewShotExampleSet] = few_shot_example_set
else:
self.few_shot_examples = None
self.prompt_prefix = (
"We're studying neurons in a neural network. Each neuron looks for some particular "
"kind of token (which can be a word, or part of a word). Look at the tokens the neuron "
"activates for (listed below) and summarize in a single sentence what the neuron is "
"looking for. Don't list examples of words."
)
def make_explanation_prompt(self, **kwargs: Any) -> Union[str, list[HarmonyMessage]]:
tokens: list[str] = kwargs.pop("tokens")
max_tokens_for_completion = kwargs.pop("max_tokens_for_completion")
assert not kwargs, f"Unexpected kwargs: {kwargs}"
# Note that this does not preserve the precise tokens, as e.g.
# f" {token_with_no_leading_space}" may be tokenized as "f{token_with_leading_space}".
# TODO(dan): Try out other variants, including "\n".join(...) and ",".join(...)
stringified_tokens = ", ".join([f"'{t}'" for t in tokens])
prompt_builder = PromptBuilder()
prompt_builder.add_message(Role.SYSTEM, self.prompt_prefix)
if self.use_few_shot:
self._add_few_shot_examples(prompt_builder)
self._add_neuron_specific_prompt(prompt_builder, stringified_tokens, explanation=None)
if self._prompt_is_too_long(prompt_builder, max_tokens_for_completion):
raise ValueError(f"Prompt too long: {prompt_builder.build(self.prompt_format)}")
else:
return prompt_builder.build(self.prompt_format)
def _add_few_shot_examples(self, prompt_builder: PromptBuilder) -> None:
"""
Append few-shot examples to the prompt. Each one consists of a comma-delimited list of
tokens and corresponding explanations, as saved in
alignment/neuron_explainer/weight_explainer/token_space_few_shot_examples.py.
"""
assert self.few_shot_examples is not None
few_shot_example_list = self.few_shot_examples.get_examples()
if self.output_numbered_list:
raise NotImplementedError("Numbered list output not supported for few-shot examples")
else:
for few_shot_example in few_shot_example_list:
self._add_neuron_specific_prompt(
prompt_builder,
", ".join([f"'{t}'" for t in few_shot_example.tokens]),
explanation=few_shot_example.explanation,
)
def _add_neuron_specific_prompt(
self,
prompt_builder: PromptBuilder,
stringified_tokens: str,
explanation: Optional[str],
) -> None:
"""
Append a neuron-specific prompt to the prompt builder. The prompt consists of a list of
tokens followed by either an explanation (if one is passed, for few shot examples) or by
the beginning of a completion, to be completed by the model with an explanation.
"""
user_message = f"\n\n\n\nTokens:\n{stringified_tokens}\n\nExplanation:\n"
assistant_message = ""
looking_for = "This neuron is looking for"
if self.prompt_format == PromptFormat.INSTRUCTION_FOLLOWING:
# We want <|endofprompt|> to come before "This neuron is looking for" in the IF format.
assistant_message += looking_for
else:
user_message += looking_for
if self.output_numbered_list:
start_of_list = "\n1."
if self.prompt_format == PromptFormat.INSTRUCTION_FOLLOWING:
assistant_message += start_of_list
else:
user_message += start_of_list
if explanation is not None:
assistant_message += f"{explanation}."
prompt_builder.add_message(Role.USER, user_message)
if assistant_message:
prompt_builder.add_message(Role.ASSISTANT, assistant_message)
def postprocess_explanations(
self, completions: list[str], prompt_kwargs: dict[str, Any]
) -> list[str]:
if self.output_numbered_list:
# Each list in the top-level list will have multiple explanations (multiple strings).
all_explanations = []
for completion in completions:
for explanation in _split_numbered_list(completion):
if explanation.startswith(EXPLANATION_PREFIX):
explanation = explanation[len(EXPLANATION_PREFIX) :]
all_explanations.append(explanation.strip())
return all_explanations
else:
# Each element in the top-level list will be an explanation as a string.
return [_remove_final_period(explanation) for explanation in completions]
|
# Dataclasses and enums for storing neuron explanations, their scores, and related data. Also,
# related helper functions.
from __future__ import annotations
import json
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import blobfile as bf
import boostedblob as bbb
from neuron_explainer.activations.activations import NeuronId
from neuron_explainer.fast_dataclasses import FastDataclass, loads, register_dataclass
class ActivationScale(str, Enum):
"""Which "units" are stored in the expected_activations/distribution_values fields of a
SequenceSimulation.
This enum identifies whether the values represent real activations of the neuron or something
else. Different scales are not necessarily related by a linear transformation.
"""
NEURON_ACTIVATIONS = "neuron_activations"
"""Values represent real activations of the neuron."""
SIMULATED_NORMALIZED_ACTIVATIONS = "simulated_normalized_activations"
"""
Values represent simulated activations of the neuron, normalized to the range [0, 10]. This
scale is arbitrary and should not be interpreted as a neuron activation.
"""
@register_dataclass
@dataclass
class SequenceSimulation(FastDataclass):
"""The result of a simulation of neuron activations on one text sequence."""
tokens: list[str]
"""The sequence of tokens that was simulated."""
expected_activations: list[float]
"""Expected value of the possibly-normalized activation for each token in the sequence."""
activation_scale: ActivationScale
"""What scale is used for values in the expected_activations field."""
distribution_values: list[list[float]]
"""
For each token in the sequence, a list of values from the discrete distribution of activations
produced from simulation. Tokens will be included here if and only if they are in the top K=15
tokens predicted by the simulator, and excluded otherwise.
May be transformed to another unit by calibration. When we simulate a neuron, we produce a
discrete distribution with values in the arbitrary discretized space of the neuron, e.g. 10%
chance of 0, 70% chance of 1, 20% chance of 2. Which we store as distribution_values =
[0, 1, 2], distribution_probabilities = [0.1, 0.7, 0.2]. When we transform the distribution to
the real activation units, we can correspondingly transform the values of this distribution
to get a distribution in the units of the neuron. e.g. if the mapping from the discretized space
to the real activation unit of the neuron is f(x) = x/2, then the distribution becomes 10%
chance of 0, 70% chance of 0.5, 20% chance of 1. Which we store as distribution_values =
[0, 0.5, 1], distribution_probabilities = [0.1, 0.7, 0.2].
"""
distribution_probabilities: list[list[float]]
"""
For each token in the sequence, the probability of the corresponding value in
distribution_values.
"""
uncalibrated_simulation: Optional["SequenceSimulation"] = None
"""The result of the simulation before calibration."""
@register_dataclass
@dataclass
class ScoredSequenceSimulation(FastDataclass):
"""
SequenceSimulation result with a score (for that sequence only) and ground truth activations.
"""
simulation: SequenceSimulation
"""The result of a simulation of neuron activations."""
true_activations: List[float]
"""Ground truth activations on the sequence (not normalized)"""
ev_correlation_score: float
"""
Correlation coefficient between the expected values of the normalized activations from the
simulation and the unnormalized true activations of the neuron on the text sequence.
"""
rsquared_score: Optional[float] = None
"""R^2 of the simulated activations."""
absolute_dev_explained_score: Optional[float] = None
"""
Score based on absolute difference between real and simulated activations.
absolute_dev_explained_score = 1 - mean(abs(real-predicted))/ mean(abs(real))
"""
@register_dataclass
@dataclass
class ScoredSimulation(FastDataclass):
"""Result of scoring a neuron simulation on multiple sequences."""
scored_sequence_simulations: List[ScoredSequenceSimulation]
"""ScoredSequenceSimulation for each sequence"""
ev_correlation_score: Optional[float] = None
"""
Correlation coefficient between the expected values of the normalized activations from the
simulation and the unnormalized true activations on a dataset created from all score_results.
(Note that this is not equivalent to averaging across sequences.)
"""
rsquared_score: Optional[float] = None
"""R^2 of the simulated activations."""
absolute_dev_explained_score: Optional[float] = None
"""
Score based on absolute difference between real and simulated activations.
absolute_dev_explained_score = 1 - mean(abs(real-predicted))/ mean(abs(real)).
"""
def get_preferred_score(self) -> Optional[float]:
"""
This method may return None in cases where the score is undefined, for example if the
normalized activations were all zero, yielding a correlation coefficient of NaN.
"""
return self.ev_correlation_score
@register_dataclass
@dataclass
class ScoredExplanation(FastDataclass):
"""Simulator parameters and the results of scoring it on multiple sequences"""
explanation: str
"""The explanation used for simulation."""
scored_simulation: ScoredSimulation
"""Result of scoring the neuron simulator on multiple sequences."""
def get_preferred_score(self) -> Optional[float]:
"""
This method may return None in cases where the score is undefined, for example if the
normalized activations were all zero, yielding a correlation coefficient of NaN.
"""
return self.scored_simulation.get_preferred_score()
@register_dataclass
@dataclass
class NeuronSimulationResults(FastDataclass):
"""Simulation results and scores for a neuron."""
neuron_id: NeuronId
scored_explanations: list[ScoredExplanation]
def load_neuron_explanations(
explanations_path: str, layer_index: Union[str, int], neuron_index: Union[str, int]
) -> Optional[NeuronSimulationResults]:
"""Load scored explanations for the specified neuron."""
file = bf.join(explanations_path, str(layer_index), f"{neuron_index}.jsonl")
if not bf.exists(file):
return None
with bf.BlobFile(file) as f:
for line in f:
return loads(line)
return None
@bbb.ensure_session
async def load_neuron_explanations_async(
explanations_path: str, layer_index: Union[str, int], neuron_index: Union[str, int]
) -> Optional[NeuronSimulationResults]:
"""Load scored explanations for the specified neuron, asynchronously."""
return await read_explanation_file(
bf.join(explanations_path, str(layer_index), f"{neuron_index}.jsonl")
)
@bbb.ensure_session
async def read_file(filename: str) -> Optional[str]:
"""Read the contents of the given file as a string, asynchronously."""
try:
raw_contents = await bbb.read.read_single(filename)
except FileNotFoundError:
print(f"Could not read {filename}")
return None
lines = []
for line in raw_contents.decode("utf-8").split("\n"):
if len(line) > 0:
lines.append(line)
assert len(lines) == 1, filename
return lines[0]
@bbb.ensure_session
async def read_explanation_file(explanation_filename: str) -> Optional[NeuronSimulationResults]:
"""Load scored explanations from the given filename, asynchronously."""
line = await read_file(explanation_filename)
return loads(line) if line is not None else None
@bbb.ensure_session
async def read_json_file(filename: str) -> Optional[dict]:
"""Read the contents of the given file as a JSON object, asynchronously."""
line = await read_file(filename)
return json.loads(line) if line is not None else None
def get_numerical_subdirs(dataset_path: str) -> list[str]:
"""Return the names of all numbered subdirectories in the specified directory.
Used to get all layer directories in an explanation directory.
"""
return [
str(x)
for x in sorted(
[
int(x)
for x in bf.listdir(dataset_path)
if bf.isdir(bf.join(dataset_path, x)) and x.isnumeric()
]
)
]
def get_sorted_neuron_indices_from_explanations(
explanations_path: str, layer: Union[str, int]
) -> list[int]:
"""Return the indices of all neurons in this layer, in ascending order."""
layer_dir = bf.join(explanations_path, str(layer))
return sorted(
[int(f.split(".")[0]) for f in bf.listdir(layer_dir) if f.split(".")[0].isnumeric()]
)
|
from __future__ import annotations
import asyncio
import logging
from typing import Any, Callable, Coroutine, Sequence
import numpy as np
from neuron_explainer.activations.activations import ActivationRecord
from neuron_explainer.explanations.calibrated_simulator import (
CalibratedNeuronSimulator,
LinearCalibratedNeuronSimulator,
)
from neuron_explainer.explanations.explanations import (
ScoredSequenceSimulation,
ScoredSimulation,
SequenceSimulation,
)
from neuron_explainer.explanations.simulator import ExplanationNeuronSimulator, NeuronSimulator
def flatten_list(list_of_lists: Sequence[Sequence[Any]]) -> list[Any]:
return [item for sublist in list_of_lists for item in sublist]
def correlation_score(
real_activations: Sequence[float] | np.ndarray,
predicted_activations: Sequence[float] | np.ndarray,
) -> float:
return np.corrcoef(real_activations, predicted_activations)[0, 1]
def score_from_simulation(
real_activations: ActivationRecord,
simulation: SequenceSimulation,
score_function: Callable[[Sequence[float] | np.ndarray, Sequence[float] | np.ndarray], float],
) -> float:
return score_function(real_activations.activations, simulation.expected_activations)
def rsquared_score_from_sequences(
real_activations: Sequence[float] | np.ndarray,
predicted_activations: Sequence[float] | np.ndarray,
) -> float:
return float(
1
- np.mean(np.square(np.array(real_activations) - np.array(predicted_activations)))
/ np.mean(np.square(np.array(real_activations)))
)
def absolute_dev_explained_score_from_sequences(
real_activations: Sequence[float] | np.ndarray,
predicted_activations: Sequence[float] | np.ndarray,
) -> float:
return float(
1
- np.mean(np.abs(np.array(real_activations) - np.array(predicted_activations)))
/ np.mean(np.abs(np.array(real_activations)))
)
async def make_explanation_simulator(
explanation: str,
calibration_activation_records: Sequence[ActivationRecord],
model_name: str,
calibrated_simulator_class: type[CalibratedNeuronSimulator] = LinearCalibratedNeuronSimulator,
) -> CalibratedNeuronSimulator:
"""
Make a simulator that uses an explanation to predict activations and calibrates it on the given
activation records.
"""
simulator = ExplanationNeuronSimulator(model_name, explanation)
calibrated_simulator = calibrated_simulator_class(simulator)
await calibrated_simulator.calibrate(calibration_activation_records)
return calibrated_simulator
async def _simulate_and_score_sequence(
simulator: NeuronSimulator, activations: ActivationRecord
) -> ScoredSequenceSimulation:
"""Score an explanation of a neuron by how well it predicts activations on a sentence."""
simulation = await simulator.simulate(activations.tokens)
logging.debug(simulation)
rsquared_score = score_from_simulation(activations, simulation, rsquared_score_from_sequences)
absolute_dev_explained_score = score_from_simulation(
activations, simulation, absolute_dev_explained_score_from_sequences
)
scored_sequence_simulation = ScoredSequenceSimulation(
simulation=simulation,
true_activations=activations.activations,
ev_correlation_score=score_from_simulation(activations, simulation, correlation_score),
rsquared_score=rsquared_score,
absolute_dev_explained_score=absolute_dev_explained_score,
)
return scored_sequence_simulation
def aggregate_scored_sequence_simulations(
scored_sequence_simulations: list[ScoredSequenceSimulation],
) -> ScoredSimulation:
"""
Aggregate a list of scored sequence simulations. The logic for doing this is non-trivial for EV
scores, since we want to calculate the correlation over all activations from all sequences at
once rather than simply averaging per-sequence correlations.
"""
all_true_activations: list[float] = []
all_expected_values: list[float] = []
for scored_sequence_simulation in scored_sequence_simulations:
all_true_activations.extend(scored_sequence_simulation.true_activations or [])
all_expected_values.extend(scored_sequence_simulation.simulation.expected_activations)
ev_correlation_score = (
correlation_score(all_true_activations, all_expected_values)
if len(all_true_activations) > 0
else None
)
rsquared_score = rsquared_score_from_sequences(all_true_activations, all_expected_values)
absolute_dev_explained_score = absolute_dev_explained_score_from_sequences(
all_true_activations, all_expected_values
)
return ScoredSimulation(
scored_sequence_simulations=scored_sequence_simulations,
ev_correlation_score=ev_correlation_score,
rsquared_score=rsquared_score,
absolute_dev_explained_score=absolute_dev_explained_score,
)
async def simulate_and_score(
simulator: NeuronSimulator,
activation_records: Sequence[ActivationRecord],
) -> ScoredSimulation:
"""
Score an explanation of a neuron by how well it predicts activations on the given text
sequences.
"""
scored_sequence_simulations = await asyncio.gather(
*[
_simulate_and_score_sequence(
simulator,
activation_record,
)
for activation_record in activation_records
]
)
return aggregate_scored_sequence_simulations(scored_sequence_simulations)
async def make_simulator_and_score(
make_simulator: Coroutine[None, None, NeuronSimulator],
activation_records: Sequence[ActivationRecord],
) -> ScoredSimulation:
"""Chain together creating the simulator and using it to score activation records."""
simulator = await make_simulator
return await simulate_and_score(simulator, activation_records)
|
from dataclasses import dataclass
from enum import Enum
from typing import List
from neuron_explainer.fast_dataclasses import FastDataclass
@dataclass
class Example(FastDataclass):
"""
An example list of tokens as strings corresponding to top token space inputs of a neuron, with a
string explanation of the neuron's behavior on these tokens.
"""
tokens: List[str]
explanation: str
class TokenSpaceFewShotExampleSet(Enum):
"""Determines which few-shot examples to use when sampling explanations."""
ORIGINAL = "original"
TEST = "test"
def get_examples(self) -> list[Example]:
"""Returns regular examples for use in a few-shot prompt."""
if self is TokenSpaceFewShotExampleSet.ORIGINAL:
return ORIGINAL_EXAMPLES
elif self is TokenSpaceFewShotExampleSet.TEST:
return TEST_EXAMPLES
else:
raise ValueError(f"Unhandled example set: {self}")
ORIGINAL_EXAMPLES = [
Example(
tokens=[
"actual",
" literal",
" actual",
" hyper",
" real",
" EX",
" Real",
"^",
"Full",
" full",
" optical",
" style",
"any",
"ALL",
"extreme",
" miniature",
" Optical",
" faint",
"~",
" Physical",
" REAL",
"*",
"virtual",
"TYPE",
" technical",
"otally",
" physic",
"Type",
"<",
"images",
"atic",
" sheer",
" Style",
" partial",
" natural",
"Hyper",
" Any",
" theoretical",
"|",
" ultimate",
"oing",
" constant",
"ANY",
"antically",
"ishly",
" ex",
" visual",
"special",
"omorphic",
"visual",
],
explanation=" adjectives related to being real, or to physical properties and evidence",
),
Example(
tokens=[
"cephal",
"aeus",
" coma",
"bered",
"abetes",
"inflamm",
"rugged",
"alysed",
"azine",
"hered",
"cells",
"aneously",
"fml",
"igm",
"culosis",
"iani",
"CTV",
"disabled",
"heric",
"ulo",
"geoning",
"awi",
"translation",
"iral",
"govtrack",
"mson",
"cloth",
"nesota",
" Dise",
" Lyme",
" dementia",
"agn",
" reversible",
" susceptibility",
"esthesia",
"orf",
" inflamm",
" Obesity",
" tox",
" Disorders",
"uberty",
"blind",
"ALTH",
"avier",
" Immunity",
" Hurt",
"ulet",
"ueless",
" sluggish",
"rosis",
],
explanation=" words related to physical medical conditions",
),
Example(
tokens=[
" January",
"terday",
"cember",
" April",
" July",
"September",
"December",
"Thursday",
"quished",
"November",
"Tuesday",
"uesday",
" Sept",
"ruary",
" March",
";;;;;;;;;;;;",
" Monday",
"Wednesday",
" Saturday",
" Wednesday",
"Reloaded",
"aturday",
" August",
"Feb",
"Sunday",
"Reviewed",
"uggest",
" Dhabi",
"ACTED",
"tten",
"Year",
"August",
"alogue",
"MX",
" Janeiro",
"yss",
" Leilan",
" Fiscal",
" referen",
"semb",
"eele",
"wcs",
"detail",
"ertation",
" Reborn",
" Sunday",
"itially",
"aturdays",
" Dise",
"essage",
],
explanation=" nouns related to time and dates",
),
]
TEST_EXAMPLES = [
Example(
tokens=[
"these",
" are",
" tokens",
],
explanation=" this is a test explanation",
),
]
|
"""
Code for calibrating simulations of neuron behavior. Calibration refers to a process of mapping from
a space of predicted activation values (e.g. [0, 10]) to the real activation distribution for a
neuron.
See http://go/neuron_explanation_methodology for description of calibration step. Necessary for
simulating neurons in the context of ablate-to-simulation, but can be skipped when using correlation
scoring. (Calibration may still improve quality for scoring, at least for non-linear calibration
methods.)
"""
from __future__ import annotations
import asyncio
from abc import abstractmethod
from typing import Optional, Sequence
import numpy as np
from neuron_explainer.activations.activations import ActivationRecord
from neuron_explainer.explanations.explanations import ActivationScale
from neuron_explainer.explanations.simulator import NeuronSimulator, SequenceSimulation
from sklearn import linear_model
class CalibratedNeuronSimulator(NeuronSimulator):
"""
Wrap a NeuronSimulator and calibrate it to map from the predicted activation space to the
actual neuron activation space.
"""
def __init__(self, uncalibrated_simulator: NeuronSimulator):
self.uncalibrated_simulator = uncalibrated_simulator
@classmethod
async def create(
cls,
uncalibrated_simulator: NeuronSimulator,
calibration_activation_records: Sequence[ActivationRecord],
) -> CalibratedNeuronSimulator:
"""
Create and calibrate a calibrated simulator (so initialization and calibration can be done
in one call).
"""
calibrated_simulator = cls(uncalibrated_simulator)
await calibrated_simulator.calibrate(calibration_activation_records)
return calibrated_simulator
async def calibrate(self, calibration_activation_records: Sequence[ActivationRecord]) -> None:
"""
Determine parameters to map from the predicted activation space to the real neuron
activation space, based on a calibration set.
Use when simulated sequences haven't already been produced on the calibration set.
"""
simulations = await asyncio.gather(
*[
self.uncalibrated_simulator.simulate(activations.tokens)
for activations in calibration_activation_records
]
)
self.calibrate_from_simulations(calibration_activation_records, simulations)
def calibrate_from_simulations(
self,
calibration_activation_records: Sequence[ActivationRecord],
simulations: Sequence[SequenceSimulation],
) -> None:
"""
Determine parameters to map from the predicted activation space to the real neuron
activation space, based on a calibration set.
Use when simulated sequences have already been produced on the calibration set.
"""
flattened_activations = []
flattened_simulated_activations: list[float] = []
for activations, simulation in zip(calibration_activation_records, simulations):
flattened_activations.extend(activations.activations)
flattened_simulated_activations.extend(simulation.expected_activations)
self._calibrate_from_flattened_activations(
np.array(flattened_activations), np.array(flattened_simulated_activations)
)
@abstractmethod
def _calibrate_from_flattened_activations(
self,
true_activations: np.ndarray,
uncalibrated_activations: np.ndarray,
) -> None:
"""
Determine parameters to map from the predicted activation space to the real neuron
activation space, based on a calibration set.
Take numpy arrays of all true activations and all uncalibrated activations on the
calibration set over all sequences.
"""
@abstractmethod
def apply_calibration(self, values: Sequence[float]) -> list[float]:
"""Apply the learned calibration to a sequence of values."""
async def simulate(self, tokens: Sequence[str]) -> SequenceSimulation:
uncalibrated_seq_simulation = await self.uncalibrated_simulator.simulate(tokens)
calibrated_activations = self.apply_calibration(
uncalibrated_seq_simulation.expected_activations
)
calibrated_distribution_values = [
self.apply_calibration(dv) for dv in uncalibrated_seq_simulation.distribution_values
]
return SequenceSimulation(
tokens=uncalibrated_seq_simulation.tokens,
expected_activations=calibrated_activations,
activation_scale=ActivationScale.NEURON_ACTIVATIONS,
distribution_values=calibrated_distribution_values,
distribution_probabilities=uncalibrated_seq_simulation.distribution_probabilities,
uncalibrated_simulation=uncalibrated_seq_simulation,
)
class UncalibratedNeuronSimulator(CalibratedNeuronSimulator):
"""Pass through the activations without trying to calibrate."""
def __init__(self, uncalibrated_simulator: NeuronSimulator):
super().__init__(uncalibrated_simulator)
async def calibrate(self, calibration_activation_records: Sequence[ActivationRecord]) -> None:
pass
def _calibrate_from_flattened_activations(
self,
true_activations: np.ndarray,
uncalibrated_activations: np.ndarray,
) -> None:
pass
def apply_calibration(self, values: Sequence[float]) -> list[float]:
return values if isinstance(values, list) else list(values)
class LinearCalibratedNeuronSimulator(CalibratedNeuronSimulator):
"""Find a linear mapping from uncalibrated activations to true activations.
Should not change ev_correlation_score because it is invariant to linear transformations.
"""
def __init__(self, uncalibrated_simulator: NeuronSimulator):
super().__init__(uncalibrated_simulator)
self._regression: Optional[linear_model.LinearRegression] = None
def _calibrate_from_flattened_activations(
self,
true_activations: np.ndarray,
uncalibrated_activations: np.ndarray,
) -> None:
self._regression = linear_model.LinearRegression()
self._regression.fit(uncalibrated_activations.reshape(-1, 1), true_activations)
def apply_calibration(self, values: Sequence[float]) -> list[float]:
if self._regression is None:
raise ValueError("Must call calibrate() before apply_calibration")
if len(values) == 0:
return []
return self._regression.predict(np.reshape(np.array(values), (-1, 1))).tolist()
class PercentileMatchingCalibratedNeuronSimulator(CalibratedNeuronSimulator):
"""
Map the nth percentile of the uncalibrated activations to the nth percentile of the true
activations for all n.
This will match the distribution of true activations on the calibration set, but will be
overconfident outside of the calibration set.
"""
def __init__(self, uncalibrated_simulator: NeuronSimulator):
super().__init__(uncalibrated_simulator)
self._uncalibrated_activations: Optional[np.ndarray] = None
self._true_activations: Optional[np.ndarray] = None
def _calibrate_from_flattened_activations(
self,
true_activations: np.ndarray,
uncalibrated_activations: np.ndarray,
) -> None:
self._uncalibrated_activations = np.sort(uncalibrated_activations)
self._true_activations = np.sort(true_activations)
def apply_calibration(self, values: Sequence[float]) -> list[float]:
if self._true_activations is None or self._uncalibrated_activations is None:
raise ValueError("Must call calibrate() before apply_calibration")
if len(values) == 0:
return []
return np.interp(
np.array(values), self._uncalibrated_activations, self._true_activations
).tolist()
|
import asyncio
from typing import Any
from neuron_explainer.explanations.explainer import (
TokenActivationPairExplainer,
TokenSpaceRepresentationExplainer,
)
from neuron_explainer.explanations.few_shot_examples import TEST_EXAMPLES, FewShotExampleSet
from neuron_explainer.explanations.prompt_builder import HarmonyMessage, PromptFormat, Role
from neuron_explainer.explanations.token_space_few_shot_examples import (
TokenSpaceFewShotExampleSet,
)
def setup_module(unused_module: Any) -> None:
# Make sure we have an event loop, since the attempt to create the Semaphore in
# ResearchApiClient will fail without it.
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
def test_if_formatting() -> None:
expected_prompt = """We're studying neurons in a neural network. Each neuron looks for some particular thing in a short document. Look at the parts of the document the neuron activates for and summarize in a single sentence what the neuron is looking for. Don't list examples of words.
The activation format is token<tab>activation. Activation values range from 0 to 10. A neuron finding what it's looking for is represented by a non-zero activation value. The higher the activation value, the stronger the match.
Neuron 1
Activations:
<start>
a 10
b 0
c 0
<end>
<start>
d 0
e 10
f 0
<end>
Explanation of neuron 1 behavior: the main thing this neuron does is find vowels.
Neuron 2
Activations:
<start>
a 10
b 0
c 0
<end>
<start>
d 0
e 10
f 0
<end>
Explanation of neuron 2 behavior:<|endofprompt|> the main thing this neuron does is find"""
explainer = TokenActivationPairExplainer(
model_name="text-davinci-003",
prompt_format=PromptFormat.INSTRUCTION_FOLLOWING,
few_shot_example_set=FewShotExampleSet.TEST,
)
prompt = explainer.make_explanation_prompt(
all_activation_records=TEST_EXAMPLES[0].activation_records,
max_activation=1.0,
max_tokens_for_completion=20,
)
assert prompt == expected_prompt
def test_harmony_format() -> None:
expected_prompt = [
HarmonyMessage(
role=Role.SYSTEM,
content="""We're studying neurons in a neural network. Each neuron looks for some particular thing in a short document. Look at the parts of the document the neuron activates for and summarize in a single sentence what the neuron is looking for. Don't list examples of words.
The activation format is token<tab>activation. Activation values range from 0 to 10. A neuron finding what it's looking for is represented by a non-zero activation value. The higher the activation value, the stronger the match.""",
),
HarmonyMessage(
role=Role.USER,
content="""
Neuron 1
Activations:
<start>
a 10
b 0
c 0
<end>
<start>
d 0
e 10
f 0
<end>
Explanation of neuron 1 behavior: the main thing this neuron does is find""",
),
HarmonyMessage(
role=Role.ASSISTANT,
content=" vowels.",
),
HarmonyMessage(
role=Role.USER,
content="""
Neuron 2
Activations:
<start>
a 10
b 0
c 0
<end>
<start>
d 0
e 10
f 0
<end>
Explanation of neuron 2 behavior: the main thing this neuron does is find""",
),
]
explainer = TokenActivationPairExplainer(
model_name="gpt-4",
prompt_format=PromptFormat.HARMONY_V4,
few_shot_example_set=FewShotExampleSet.TEST,
)
prompt = explainer.make_explanation_prompt(
all_activation_records=TEST_EXAMPLES[0].activation_records,
max_activation=1.0,
max_tokens_for_completion=20,
)
assert isinstance(prompt, list)
assert isinstance(prompt[0], dict) # Really a HarmonyMessage
for actual_message, expected_message in zip(prompt, expected_prompt):
assert actual_message["role"] == expected_message["role"]
assert actual_message["content"] == expected_message["content"]
assert prompt == expected_prompt
def test_token_space_explainer_if_formatting() -> None:
expected_prompt = """We're studying neurons in a neural network. Each neuron looks for some particular kind of token (which can be a word, or part of a word). Look at the tokens the neuron activates for (listed below) and summarize in a single sentence what the neuron is looking for. Don't list examples of words.
Tokens:
'these', ' are', ' tokens'
Explanation:
This neuron is looking for this is a test explanation.
Tokens:
'foo', 'bar', 'baz'
Explanation:
<|endofprompt|>This neuron is looking for"""
explainer = TokenSpaceRepresentationExplainer(
model_name="text-davinci-002",
prompt_format=PromptFormat.INSTRUCTION_FOLLOWING,
use_few_shot=True,
few_shot_example_set=TokenSpaceFewShotExampleSet.TEST,
)
prompt = explainer.make_explanation_prompt(
tokens=["foo", "bar", "baz"],
max_tokens_for_completion=20,
)
assert prompt == expected_prompt
def test_token_space_explainer_harmony_formatting() -> None:
expected_prompt = [
HarmonyMessage(
role=Role.SYSTEM,
content="We're studying neurons in a neural network. Each neuron looks for some particular kind of token (which can be a word, or part of a word). Look at the tokens the neuron activates for (listed below) and summarize in a single sentence what the neuron is looking for. Don't list examples of words.",
),
HarmonyMessage(
role=Role.USER,
content="""
Tokens:
'these', ' are', ' tokens'
Explanation:
This neuron is looking for""",
),
HarmonyMessage(
role=Role.ASSISTANT,
content=" this is a test explanation.",
),
HarmonyMessage(
role=Role.USER,
content="""
Tokens:
'foo', 'bar', 'baz'
Explanation:
This neuron is looking for""",
),
]
explainer = TokenSpaceRepresentationExplainer(
model_name="gpt-4",
prompt_format=PromptFormat.HARMONY_V4,
use_few_shot=True,
few_shot_example_set=TokenSpaceFewShotExampleSet.TEST,
)
prompt = explainer.make_explanation_prompt(
tokens=["foo", "bar", "baz"],
max_tokens_for_completion=20,
)
assert isinstance(prompt, list)
assert isinstance(prompt[0], dict) # Really a HarmonyMessage
for actual_message, expected_message in zip(prompt, expected_prompt):
assert actual_message["role"] == expected_message["role"]
assert actual_message["content"] == expected_message["content"]
assert prompt == expected_prompt
|
# Few-shot examples for generating and simulating neuron explanations.
from __future__ import annotations
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional
from neuron_explainer.activations.activations import ActivationRecord
from neuron_explainer.fast_dataclasses import FastDataclass
@dataclass
class Example(FastDataclass):
activation_records: List[ActivationRecord]
explanation: str
first_revealed_activation_indices: List[int]
"""
For each activation record, the index of the first token for which the activation value in the
prompt should be an actual number rather than "unknown".
Examples all start with the activations rendered as "unknown", then transition to revealing
specific normalized activation values. The goal is to lead the model to predict that activation
sequences will eventually transition to predicting specific activation values instead of just
"unknown". This lets us cheat and get predictions of activation values for every token in a
single round of inference by having the activations in the sequence we're predicting always be
"unknown" in the prompt: the model will always think that maybe the next token will be a real
activation.
"""
token_index_to_score: Optional[int] = None
"""
If the prompt is used as an example for one-token-at-a-time scoring, this is the index of the
token to score.
"""
class FewShotExampleSet(Enum):
"""Determines which few-shot examples to use when sampling explanations."""
ORIGINAL = "original"
NEWER = "newer"
TEST = "test"
@classmethod
def from_string(cls, string: str) -> FewShotExampleSet:
for example_set in FewShotExampleSet:
if example_set.value == string:
return example_set
raise ValueError(f"Unrecognized example set: {string}")
def get_examples(self) -> list[Example]:
"""Returns regular examples for use in a few-shot prompt."""
if self is FewShotExampleSet.ORIGINAL:
return ORIGINAL_EXAMPLES
elif self is FewShotExampleSet.NEWER:
return NEWER_EXAMPLES
elif self is FewShotExampleSet.TEST:
return TEST_EXAMPLES
else:
raise ValueError(f"Unhandled example set: {self}")
def get_single_token_prediction_example(self) -> Example:
"""
Returns an example suitable for use in a subprompt for predicting a single token's
normalized activation, for use with the "one token at a time" scoring approach.
"""
if self is FewShotExampleSet.NEWER:
return NEWER_SINGLE_TOKEN_EXAMPLE
elif self is FewShotExampleSet.TEST:
return TEST_SINGLE_TOKEN_EXAMPLE
else:
raise ValueError(f"Unhandled example set: {self}")
TEST_EXAMPLES = [
Example(
activation_records=[
ActivationRecord(
tokens=["a", "b", "c"],
activations=[1.0, 0.0, 0.0],
),
ActivationRecord(
tokens=["d", "e", "f"],
activations=[0.0, 1.0, 0.0],
),
],
explanation="vowels",
first_revealed_activation_indices=[0, 1],
),
]
TEST_SINGLE_TOKEN_EXAMPLE = Example(
activation_records=[
ActivationRecord(
activations=[0.0, 0.0, 1.0],
tokens=["g", "h", "i"],
),
],
first_revealed_activation_indices=[],
token_index_to_score=2,
explanation="test explanation",
)
ORIGINAL_EXAMPLES = [
Example(
activation_records=[
ActivationRecord(
tokens=[
"t",
"urt",
"ur",
"ro",
" is",
" fab",
"ulously",
" funny",
" and",
" over",
" the",
" top",
" as",
" a",
" '",
"very",
" sneaky",
"'",
" but",
"ler",
" who",
" excel",
"s",
" in",
" the",
" art",
" of",
" impossible",
" disappearing",
"/",
"re",
"app",
"earing",
" acts",
],
activations=[
-0.71,
-1.85,
-2.39,
-2.58,
-1.34,
-1.92,
-1.69,
-0.84,
-1.25,
-1.75,
-1.42,
-1.47,
-1.51,
-0.8,
-1.89,
-1.56,
-1.63,
0.44,
-1.87,
-2.55,
-2.09,
-1.76,
-1.33,
-0.88,
-1.63,
-2.39,
-2.63,
-0.99,
2.83,
-1.11,
-1.19,
-1.33,
4.24,
-1.51,
],
),
ActivationRecord(
tokens=[
"esc",
"aping",
" the",
" studio",
" ,",
" pic",
"col",
"i",
" is",
" warm",
"ly",
" affecting",
" and",
" so",
" is",
" this",
" ad",
"roit",
"ly",
" minimalist",
" movie",
" .",
],
activations=[
-0.69,
4.12,
1.83,
-2.28,
-0.28,
-0.79,
-2.2,
-2.03,
-1.77,
-1.71,
-2.44,
1.6,
-1,
-0.38,
-1.93,
-2.09,
-1.63,
-1.94,
-1.82,
-1.64,
-1.32,
-1.92,
],
),
],
first_revealed_activation_indices=[10, 3],
explanation="present tense verbs ending in 'ing'",
),
Example(
activation_records=[
ActivationRecord(
tokens=[
"as",
" sac",
"char",
"ine",
" movies",
" go",
" ,",
" this",
" is",
" likely",
" to",
" cause",
" massive",
" cardiac",
" arrest",
" if",
" taken",
" in",
" large",
" doses",
" .",
],
activations=[
-0.14,
-1.37,
-0.68,
-2.27,
-1.46,
-1.11,
-0.9,
-2.48,
-2.07,
-3.49,
-2.16,
-1.79,
-0.23,
-0.04,
4.46,
-1.02,
-2.26,
-2.95,
-1.49,
-1.46,
-0.6,
],
),
ActivationRecord(
tokens=[
"shot",
" perhaps",
" '",
"art",
"istically",
"'",
" with",
" handheld",
" cameras",
" and",
" apparently",
" no",
" movie",
" lights",
" by",
" jo",
"aquin",
" b",
"aca",
"-",
"as",
"ay",
" ,",
" the",
" low",
"-",
"budget",
" production",
" swings",
" annoy",
"ingly",
" between",
" vert",
"igo",
" and",
" opacity",
" .",
],
activations=[
-0.09,
-3.53,
-0.72,
-2.36,
-1.05,
-1.12,
-2.49,
-2.14,
-1.98,
-1.59,
-2.62,
-2,
-2.73,
-2.87,
-3.23,
-1.11,
-2.23,
-0.97,
-2.28,
-2.37,
-1.5,
-2.81,
-1.73,
-3.14,
-2.61,
-1.7,
-3.08,
-4,
-0.71,
-2.48,
-1.39,
-1.96,
-1.09,
4.37,
-0.74,
-0.5,
-0.62,
],
),
],
first_revealed_activation_indices=[5, 20],
explanation="words related to physical medical conditions",
),
Example(
activation_records=[
ActivationRecord(
tokens=[
"the",
" sense",
" of",
" together",
"ness",
" in",
" our",
" town",
" is",
" strong",
" .",
],
activations=[
0,
0,
0,
1,
2,
0,
0.23,
0.5,
0,
0,
0,
],
),
ActivationRecord(
tokens=[
"a",
" buoy",
"ant",
" romantic",
" comedy",
" about",
" friendship",
" ,",
" love",
" ,",
" and",
" the",
" truth",
" that",
" we",
"'re",
" all",
" in",
" this",
" together",
" .",
],
activations=[
-0.15,
-2.33,
-1.4,
-2.17,
-2.53,
-0.85,
0.23,
-1.89,
0.09,
-0.47,
-0.5,
-0.58,
-0.87,
0.22,
0.58,
1.34,
0.98,
2.21,
2.84,
1.7,
-0.89,
],
),
],
first_revealed_activation_indices=[0, 10],
explanation="phrases related to community",
),
]
NEWER_EXAMPLES = [
Example(
activation_records=[
ActivationRecord(
tokens=[
"The",
" editors",
" of",
" Bi",
"opol",
"ym",
"ers",
" are",
" delighted",
" to",
" present",
" the",
" ",
"201",
"8",
" Murray",
" Goodman",
" Memorial",
" Prize",
" to",
" Professor",
" David",
" N",
".",
" Ber",
"atan",
" in",
" recognition",
" of",
" his",
" seminal",
" contributions",
" to",
" bi",
"oph",
"ysics",
" and",
" their",
" impact",
" on",
" our",
" understanding",
" of",
" charge",
" transport",
" in",
" biom",
"olecules",
".\n\n",
"In",
"aug",
"ur",
"ated",
" in",
" ",
"200",
"7",
" in",
" honor",
" of",
" the",
" Bi",
"opol",
"ym",
"ers",
" Found",
"ing",
" Editor",
",",
" the",
" prize",
" is",
" awarded",
" for",
" outstanding",
" accomplishments",
],
activations=[
0,
0.01,
0.01,
0,
0,
0,
-0.01,
0,
-0.01,
0,
0,
0,
0,
0,
0.04,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
3.39,
0.12,
0,
-0.01,
0,
0,
0,
0,
-0,
0,
-0,
0,
0,
-0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
-0,
0,
0,
-0.01,
0,
0.41,
0,
0,
0,
-0.01,
0,
0,
0,
0,
0,
],
),
# We sometimes exceed the max context size when this is included :(
# ActivationRecord(
# tokens=[
# " We",
# " are",
# " proud",
# " of",
# " our",
# " national",
# " achievements",
# " in",
# " mastering",
# " all",
# " aspects",
# " of",
# " the",
# " fuel",
# " cycle",
# ".",
# " The",
# " current",
# " international",
# " interest",
# " in",
# " closing",
# " the",
# " fuel",
# " cycle",
# " is",
# " a",
# " vind",
# "ication",
# " of",
# " Dr",
# ".",
# " B",
# "hab",
# "ha",
# "’s",
# " pioneering",
# " vision",
# " and",
# " genius",
# ],
# activations=[
# -0,
# -0,
# 0,
# -0,
# -0,
# 0,
# 0,
# 0,
# -0,
# 0,
# 0,
# -0,
# 0,
# -0.01,
# 0,
# 0,
# -0,
# -0,
# 0,
# 0,
# 0,
# -0,
# -0,
# -0.01,
# 0,
# 0,
# -0,
# 0,
# 0,
# 0,
# 0,
# 0,
# -0,
# 0,
# 0,
# 0,
# 2.15,
# 0,
# 0,
# 0.03,
# ],
# ),
],
first_revealed_activation_indices=[7], # , 19],
explanation="language related to something being groundbreaking",
),
Example(
activation_records=[
ActivationRecord(
tokens=[
'{"',
"widget",
"Class",
'":"',
"Variant",
"Matrix",
"Widget",
'","',
"back",
"order",
"Message",
'":"',
"Back",
"ordered",
'","',
"back",
"order",
"Message",
"Single",
"Variant",
'":"',
"This",
" item",
" is",
" back",
"ordered",
'.","',
"ordered",
"Selection",
'":',
"true",
',"',
"product",
"Variant",
"Id",
'":',
"0",
',"',
"variant",
"Id",
"Field",
'":"',
"product",
"196",
"39",
"_V",
"ariant",
"Id",
'","',
"back",
"order",
"To",
"Message",
"Single",
"Variant",
'":"',
"This",
" item",
" is",
" back",
"ordered",
" and",
" is",
" expected",
" by",
" {",
"0",
"}.",
'","',
"low",
"Price",
'":',
"999",
"9",
".",
"0",
',"',
"attribute",
"Indexes",
'":[',
'],"',
"productId",
'":',
"196",
"39",
',"',
"price",
"V",
"ariance",
'":',
"true",
',"',
],
activations=[
-0.03,
0,
0,
0,
4.2,
0,
0,
0,
0,
-0,
0,
-0,
0,
0,
0,
0,
-0.01,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
-0,
0,
0,
0,
0,
0,
0,
-0.03,
0,
0,
0,
0,
-0.02,
0,
0,
0,
0,
0,
0,
0,
0,
0,
-0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
-0,
-0,
0,
0,
0,
0.01,
-0.01,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
-0.02,
0,
0,
0,
0,
0,
1.24,
0,
0,
0,
],
),
ActivationRecord(
tokens=[
"A",
" regular",
" look",
" at",
" the",
" ups",
" and",
" downs",
" of",
" variant",
" covers",
" in",
" the",
" comics",
" industry",
"…\n\n",
"Here",
" are",
" the",
" Lego",
" variant",
" sketch",
" covers",
" by",
" Leon",
"el",
" Cast",
"ell",
"ani",
" for",
" a",
" variety",
" of",
" Marvel",
" titles",
",",
],
activations=[
0,
-0.01,
0,
0,
0,
0,
0,
0,
0,
6.52,
0,
0,
0,
-0,
0,
0,
0,
0,
0,
0,
1.62,
0,
0,
0,
0,
0,
0,
0,
0,
0,
-0,
0,
0,
0,
-0,
0,
],
),
],
first_revealed_activation_indices=[2, 8],
explanation="the word “variant” and other words with the same ”vari” root",
),
]
NEWER_SINGLE_TOKEN_EXAMPLE = Example(
activation_records=[
ActivationRecord(
tokens=[
"B",
"10",
" ",
"111",
" MON",
"DAY",
",",
" F",
"EB",
"RU",
"ARY",
" ",
"11",
",",
" ",
"201",
"9",
" DON",
"ATE",
"fake higher scoring token", # See below.
],
activations=[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.37,
# This fake activation makes the previous token's activation normalize to 8, which
# might help address overconfidence in "10" activations for the one-token-at-a-time
# scoring prompt. This value and the associated token don't actually appear anywhere
# in the prompt.
0.45,
],
),
],
first_revealed_activation_indices=[],
token_index_to_score=18,
explanation="instances of the token 'ate' as part of another word",
)
|
"""Uses API calls to simulate neuron activations based on an explanation."""
from __future__ import annotations
import asyncio
import logging
from abc import ABC, abstractmethod
from collections import OrderedDict
from enum import Enum
from typing import Any, Optional, Sequence, Union
import numpy as np
from neuron_explainer.activations.activation_records import (
calculate_max_activation,
format_activation_records,
format_sequences_for_simulation,
normalize_activations,
)
from neuron_explainer.activations.activations import ActivationRecord
from neuron_explainer.api_client import ApiClient
from neuron_explainer.explanations.explainer import EXPLANATION_PREFIX
from neuron_explainer.explanations.explanations import ActivationScale, SequenceSimulation
from neuron_explainer.explanations.few_shot_examples import FewShotExampleSet
from neuron_explainer.explanations.prompt_builder import (
HarmonyMessage,
PromptBuilder,
PromptFormat,
Role,
)
logger = logging.getLogger(__name__)
# Our prompts use normalized activation values, which map any range of positive activations to the
# integers from 0 to 10.
MAX_NORMALIZED_ACTIVATION = 10
VALID_ACTIVATION_TOKENS_ORDERED = list(str(i) for i in range(MAX_NORMALIZED_ACTIVATION + 1))
VALID_ACTIVATION_TOKENS = set(VALID_ACTIVATION_TOKENS_ORDERED)
class SimulationType(str, Enum):
"""How to simulate neuron activations. Values correspond to subclasses of NeuronSimulator."""
ALL_AT_ONCE = "all_at_once"
"""
Use a single prompt with <unknown> tokens; calculate EVs using logprobs.
Implemented by ExplanationNeuronSimulator.
"""
ONE_AT_A_TIME = "one_at_a_time"
"""
Use a separate prompt for each token being simulated; calculate EVs using logprobs.
Implemented by ExplanationTokenByTokenSimulator.
"""
@classmethod
def from_string(cls, s: str) -> SimulationType:
for simulation_type in SimulationType:
if simulation_type.value == s:
return simulation_type
raise ValueError(f"Invalid simulation type: {s}")
def compute_expected_value(
norm_probabilities_by_distribution_value: OrderedDict[int, float]
) -> float:
"""
Given a map from distribution values (integers on the range [0, 10]) to normalized
probabilities, return an expected value for the distribution.
"""
return np.dot(
np.array(list(norm_probabilities_by_distribution_value.keys())),
np.array(list(norm_probabilities_by_distribution_value.values())),
)
def parse_top_logprobs(top_logprobs: dict[str, float]) -> OrderedDict[int, float]:
"""
Given a map from tokens to logprobs, return a map from distribution values (integers on the
range [0, 10]) to unnormalized probabilities (in the sense that they may not sum to 1).
"""
probabilities_by_distribution_value = OrderedDict()
for token, logprob in top_logprobs.items():
if token in VALID_ACTIVATION_TOKENS:
token_as_int = int(token)
probabilities_by_distribution_value[token_as_int] = np.exp(logprob)
return probabilities_by_distribution_value
def compute_predicted_activation_stats_for_token(
top_logprobs: dict[str, float],
) -> tuple[OrderedDict[int, float], float]:
probabilities_by_distribution_value = parse_top_logprobs(top_logprobs)
total_p_of_distribution_values = sum(probabilities_by_distribution_value.values())
norm_probabilities_by_distribution_value = OrderedDict(
{
distribution_value: p / total_p_of_distribution_values
for distribution_value, p in probabilities_by_distribution_value.items()
}
)
expected_value = compute_expected_value(norm_probabilities_by_distribution_value)
return (
norm_probabilities_by_distribution_value,
expected_value,
)
# Adapted from tether/tether/core/encoder.py.
def convert_to_byte_array(s: str) -> bytearray:
byte_array = bytearray()
assert s.startswith("bytes:"), s
s = s[6:]
while len(s) > 0:
if s[0] == "\\":
# Hex encoding.
assert s[1] == "x"
assert len(s) >= 4
byte_array.append(int(s[2:4], 16))
s = s[4:]
else:
# Regular ascii encoding.
byte_array.append(ord(s[0]))
s = s[1:]
return byte_array
def handle_byte_encoding(
response_tokens: Sequence[str], merged_response_index: int
) -> tuple[str, int]:
"""
Handle the case where the current token is a sequence of bytes. This may involve merging
multiple response tokens into a single token.
"""
response_token = response_tokens[merged_response_index]
if response_token.startswith("bytes:"):
byte_array = bytearray()
while True:
byte_array = convert_to_byte_array(response_token) + byte_array
try:
# If we can decode the byte array as utf-8, then we're done.
response_token = byte_array.decode("utf-8")
break
except UnicodeDecodeError:
# If not, then we need to merge the previous response token into the byte
# array.
merged_response_index -= 1
response_token = response_tokens[merged_response_index]
return response_token, merged_response_index
def was_token_split(current_token: str, response_tokens: Sequence[str], start_index: int) -> bool:
"""
Return whether current_token (a token from the subject model) was split into multiple tokens by
the simulator model (as represented by the tokens in response_tokens). start_index is the index
in response_tokens at which to begin looking backward to form a complete token. It is usually
the first token *before* the delimiter that separates the token from the normalized activation,
barring some unusual cases.
This mainly happens if the subject model uses a different tokenizer than the simulator model.
But it can also happen in cases where Unicode characters are split. This function handles both
cases.
"""
merged_response_tokens = ""
merged_response_index = start_index
while len(merged_response_tokens) < len(current_token):
response_token = response_tokens[merged_response_index]
response_token, merged_response_index = handle_byte_encoding(
response_tokens, merged_response_index
)
merged_response_tokens = response_token + merged_response_tokens
merged_response_index -= 1
# It's possible that merged_response_tokens is longer than current_token at this point,
# since the between-lines delimiter may have been merged into the original token. But it
# should always be the case that merged_response_tokens ends with current_token.
assert merged_response_tokens.endswith(current_token)
num_merged_tokens = start_index - merged_response_index
token_was_split = num_merged_tokens > 1
if token_was_split:
logger.debug(
"Warning: token from the subject model was split into 2+ tokens by the simulator model."
)
return token_was_split
def parse_simulation_response(
response: dict[str, Any],
prompt_format: PromptFormat,
tokens: Sequence[str],
) -> SequenceSimulation:
"""
Parse an API response to a simulation prompt.
Args:
response: response from the API
prompt_format: how the prompt was formatted
tokens: list of tokens as strings in the sequence where the neuron is being simulated
"""
choice = response["choices"][0]
if prompt_format == PromptFormat.HARMONY_V4:
text = choice["message"]["content"]
elif prompt_format in [
PromptFormat.NONE,
PromptFormat.INSTRUCTION_FOLLOWING,
]:
text = choice["text"]
else:
raise ValueError(f"Unhandled prompt format {prompt_format}")
response_tokens = choice["logprobs"]["tokens"]
choice["logprobs"]["token_logprobs"]
top_logprobs = choice["logprobs"]["top_logprobs"]
token_text_offset = choice["logprobs"]["text_offset"]
# This only works because the sequence "<start>" tokenizes into multiple tokens if it appears in
# a text sequence in the prompt.
scoring_start = text.rfind("<start>")
expected_values = []
original_sequence_tokens: list[str] = []
distribution_values: list[list[float]] = []
distribution_probabilities: list[list[float]] = []
for i in range(2, len(response_tokens)):
if len(original_sequence_tokens) == len(tokens):
# Make sure we haven't hit some sort of off-by-one error.
# TODO(sbills): Generalize this to handle different tokenizers.
reached_end = response_tokens[i + 1] == "<" and response_tokens[i + 2] == "end"
assert reached_end, f"{response_tokens[i-3:i+3]}"
break
if token_text_offset[i] >= scoring_start:
# We're looking for the first token after a tab. This token should be the text
# "unknown" if hide_activations=True or a normalized activation (0-10) otherwise.
# If it isn't, that means that the tab is not appearing as a delimiter, but rather
# as a token, in which case we should move on to the next response token.
if response_tokens[i - 1] == "\t":
if response_tokens[i] != "unknown":
logger.debug("Ignoring tab token that is not followed by an 'unknown' token.")
continue
# j represents the index of the token in a "token<tab>activation" line, barring
# one of the unusual cases handled below.
j = i - 2
current_token = tokens[len(original_sequence_tokens)]
if current_token == response_tokens[j] or was_token_split(
current_token, response_tokens, j
):
# We're in the normal case where the tokenization didn't throw off the
# formatting or in the token-was-split case, which we handle the usual way.
current_top_logprobs = top_logprobs[i]
(
norm_probabilities_by_distribution_value,
expected_value,
) = compute_predicted_activation_stats_for_token(
current_top_logprobs,
)
current_distribution_values = list(
norm_probabilities_by_distribution_value.keys()
)
current_distribution_probabilities = list(
norm_probabilities_by_distribution_value.values()
)
else:
# We're in a case where the tokenization resulted in a newline being folded into
# the token. We can't do our usual prediction of activation stats for the token,
# since the model did not observe the original token. Instead, we use dummy
# values. See the TODO elsewhere in this file about coming up with a better
# prompt format that avoids this situation.
newline_folded_into_token = "\n" in response_tokens[j]
assert (
newline_folded_into_token
), f"`{current_token=}` {response_tokens[j-3:j+3]=}"
logger.debug(
"Warning: newline before a token<tab>activation line was folded into the token"
)
current_distribution_values = []
current_distribution_probabilities = []
expected_value = 0.0
original_sequence_tokens.append(current_token)
distribution_values.append([float(v) for v in current_distribution_values])
distribution_probabilities.append(current_distribution_probabilities)
expected_values.append(expected_value)
return SequenceSimulation(
tokens=original_sequence_tokens,
expected_activations=expected_values,
activation_scale=ActivationScale.SIMULATED_NORMALIZED_ACTIVATIONS,
distribution_values=distribution_values,
distribution_probabilities=distribution_probabilities,
)
class NeuronSimulator(ABC):
"""Abstract base class for simulating neuron behavior."""
@abstractmethod
async def simulate(self, tokens: Sequence[str]) -> SequenceSimulation:
"""Simulate the behavior of a neuron based on an explanation."""
...
class ExplanationNeuronSimulator(NeuronSimulator):
"""
Simulate neuron behavior based on an explanation.
This class uses a few-shot prompt with examples of other explanations and activations. This
prompt allows us to score all of the tokens at once using a nifty trick involving logprobs.
"""
def __init__(
self,
model_name: str,
explanation: str,
max_concurrent: Optional[int] = 10,
few_shot_example_set: FewShotExampleSet = FewShotExampleSet.ORIGINAL,
prompt_format: PromptFormat = PromptFormat.INSTRUCTION_FOLLOWING,
cache: bool = False,
):
self.api_client = ApiClient(
model_name=model_name, max_concurrent=max_concurrent, cache=cache
)
self.explanation = explanation
self.few_shot_example_set = few_shot_example_set
self.prompt_format = prompt_format
async def simulate(
self,
tokens: Sequence[str],
) -> SequenceSimulation:
prompt = self.make_simulation_prompt(tokens)
generate_kwargs: dict[str, Any] = {
"max_tokens": 0,
"echo": True,
"logprobs": 15,
}
if self.prompt_format == PromptFormat.HARMONY_V4:
assert isinstance(prompt, list)
assert isinstance(prompt[0], dict) # Really a HarmonyMessage
generate_kwargs["messages"] = prompt
else:
assert isinstance(prompt, str)
generate_kwargs["prompt"] = prompt
response = await self.api_client.make_request(**generate_kwargs)
logger.debug("response in score_explanation_by_activations is %s", response)
result = parse_simulation_response(response, self.prompt_format, tokens)
logger.debug("result in score_explanation_by_activations is %s", result)
return result
# TODO(sbills): The current token<tab>activation format can result in improper tokenization.
# In particular, if the token is itself a tab, we may get a single "\t\t" token rather than two
# "\t" tokens. Consider using a separator that does not appear in any multi-character tokens.
def make_simulation_prompt(self, tokens: Sequence[str]) -> Union[str, list[HarmonyMessage]]:
"""Create a few-shot prompt for predicting neuron activations for the given tokens."""
# TODO(sbills): The prompts in this file are subtly different from the ones in explainer.py.
# Consider reconciling them.
prompt_builder = PromptBuilder()
prompt_builder.add_message(
Role.SYSTEM,
"""We're studying neurons in a neural network.
Each neuron looks for some particular thing in a short document.
Look at summary of what the neuron does, and try to predict how it will fire on each token.
The activation format is token<tab>activation, activations go from 0 to 10, "unknown" indicates an unknown activation. Most activations will be 0.
""",
)
few_shot_examples = self.few_shot_example_set.get_examples()
for i, example in enumerate(few_shot_examples):
prompt_builder.add_message(
Role.USER,
f"\n\nNeuron {i + 1}\nExplanation of neuron {i + 1} behavior: {EXPLANATION_PREFIX} "
f"{example.explanation}",
)
formatted_activation_records = format_activation_records(
example.activation_records,
calculate_max_activation(example.activation_records),
start_indices=example.first_revealed_activation_indices,
)
prompt_builder.add_message(
Role.ASSISTANT, f"\nActivations: {formatted_activation_records}\n"
)
prompt_builder.add_message(
Role.USER,
f"\n\nNeuron {len(few_shot_examples) + 1}\nExplanation of neuron "
f"{len(few_shot_examples) + 1} behavior: {EXPLANATION_PREFIX} "
f"{self.explanation.strip()}",
)
prompt_builder.add_message(
Role.ASSISTANT, f"\nActivations: {format_sequences_for_simulation([tokens])}"
)
return prompt_builder.build(self.prompt_format)
class ExplanationTokenByTokenSimulator(NeuronSimulator):
"""
Simulate neuron behavior based on an explanation.
Unlike ExplanationNeuronSimulator, this class uses one few-shot prompt per token to calculate
expected activations. This is slower. This class gets a one-token completion and calculates an
expected value from that token's logprobs.
"""
def __init__(
self,
model_name: str,
explanation: str,
max_concurrent: Optional[int] = 10,
few_shot_example_set: FewShotExampleSet = FewShotExampleSet.NEWER,
prompt_format: PromptFormat = PromptFormat.INSTRUCTION_FOLLOWING,
cache: bool = False,
):
assert (
few_shot_example_set != FewShotExampleSet.ORIGINAL
), "This simulator doesn't support the ORIGINAL few-shot example set."
self.api_client = ApiClient(
model_name=model_name, max_concurrent=max_concurrent, cache=cache
)
self.explanation = explanation
self.few_shot_example_set = few_shot_example_set
self.prompt_format = prompt_format
async def simulate(
self,
tokens: Sequence[str],
) -> SequenceSimulation:
responses_by_token = await asyncio.gather(
*[
self._get_activation_stats_for_single_token(tokens, self.explanation, token_index)
for token_index in range(len(tokens))
]
)
expected_values, distribution_values, distribution_probabilities = [], [], []
for response in responses_by_token:
activation_logprobs = response["choices"][0]["logprobs"]["top_logprobs"][0]
(
norm_probabilities_by_distribution_value,
expected_value,
) = compute_predicted_activation_stats_for_token(
activation_logprobs,
)
distribution_values.append(
[float(v) for v in norm_probabilities_by_distribution_value.keys()]
)
distribution_probabilities.append(
list(norm_probabilities_by_distribution_value.values())
)
expected_values.append(expected_value)
result = SequenceSimulation(
tokens=list(tokens), # SequenceSimulation expects List type
expected_activations=expected_values,
activation_scale=ActivationScale.SIMULATED_NORMALIZED_ACTIVATIONS,
distribution_values=distribution_values,
distribution_probabilities=distribution_probabilities,
)
logger.debug("result in score_explanation_by_activations is %s", result)
return result
async def _get_activation_stats_for_single_token(
self,
tokens: Sequence[str],
explanation: str,
token_index_to_score: int,
) -> dict:
prompt = self.make_single_token_simulation_prompt(
tokens,
explanation,
token_index_to_score=token_index_to_score,
)
return await self.api_client.make_request(
prompt=prompt, max_tokens=1, echo=False, logprobs=15
)
def _add_single_token_simulation_subprompt(
self,
prompt_builder: PromptBuilder,
activation_record: ActivationRecord,
neuron_index: int,
explanation: str,
token_index_to_score: int,
end_of_prompt: bool,
) -> None:
trimmed_activation_record = ActivationRecord(
tokens=activation_record.tokens[: token_index_to_score + 1],
activations=activation_record.activations[: token_index_to_score + 1],
)
prompt_builder.add_message(
Role.USER,
f"""
Neuron {neuron_index}
Explanation of neuron {neuron_index} behavior: {EXPLANATION_PREFIX} {explanation.strip()}
Text:
{"".join(trimmed_activation_record.tokens)}
Last token in the text:
{trimmed_activation_record.tokens[-1]}
Last token activation, considering the token in the context in which it appeared in the text:
""",
)
if not end_of_prompt:
normalized_activations = normalize_activations(
trimmed_activation_record.activations, calculate_max_activation([activation_record])
)
prompt_builder.add_message(
Role.ASSISTANT, str(normalized_activations[-1]) + ("" if end_of_prompt else "\n\n")
)
def make_single_token_simulation_prompt(
self,
tokens: Sequence[str],
explanation: str,
token_index_to_score: int,
) -> Union[str, list[HarmonyMessage]]:
"""Make a few-shot prompt for predicting the neuron's activation on a single token."""
assert explanation != ""
prompt_builder = PromptBuilder()
prompt_builder.add_message(
Role.SYSTEM,
"""We're studying neurons in a neural network. Each neuron looks for some particular thing in a short document. Look at an explanation of what the neuron does, and try to predict its activations on a particular token.
The activation format is token<tab>activation, and activations range from 0 to 10. Most activations will be 0.
""",
)
few_shot_examples = self.few_shot_example_set.get_examples()
for i, example in enumerate(few_shot_examples):
prompt_builder.add_message(
Role.USER,
f"Neuron {i + 1}\nExplanation of neuron {i + 1} behavior: {EXPLANATION_PREFIX} "
f"{example.explanation}\n",
)
formatted_activation_records = format_activation_records(
example.activation_records,
calculate_max_activation(example.activation_records),
start_indices=None,
)
prompt_builder.add_message(
Role.ASSISTANT,
f"Activations: {formatted_activation_records}\n\n",
)
prompt_builder.add_message(
Role.SYSTEM,
"Now, we're going predict the activation of a new neuron on a single token, "
"following the same rules as the examples above. Activations still range from 0 to 10.",
)
single_token_example = self.few_shot_example_set.get_single_token_prediction_example()
assert single_token_example.token_index_to_score is not None
self._add_single_token_simulation_subprompt(
prompt_builder,
single_token_example.activation_records[0],
len(few_shot_examples) + 1,
explanation,
token_index_to_score=single_token_example.token_index_to_score,
end_of_prompt=False,
)
activation_record = ActivationRecord(
tokens=list(tokens[: token_index_to_score + 1]), # ActivationRecord expects List type.
activations=[0.0] * len(tokens),
)
self._add_single_token_simulation_subprompt(
prompt_builder,
activation_record,
len(few_shot_examples) + 2,
explanation,
token_index_to_score,
end_of_prompt=True,
)
return prompt_builder.build(self.prompt_format, allow_extra_system_messages=True)
|
# Utilities for dataclasses that are very fast to serialize and deserialize, with limited data
# validation. Fields must not be tuples, since they get serialized and then deserialized as lists.
#
# The unit tests for this library show how to use it.
import json
from dataclasses import dataclass, field, fields, is_dataclass
from functools import partial
from typing import Any, Union
import orjson
dataclasses_by_name = {}
dataclasses_by_fieldnames = {}
@dataclass
class FastDataclass:
dataclass_name: str = field(init=False)
def __post_init__(self) -> None:
self.dataclass_name = self.__class__.__name__
def register_dataclass(cls): # type: ignore
assert is_dataclass(cls), "Only dataclasses can be registered."
dataclasses_by_name[cls.__name__] = cls
name_set = frozenset(f.name for f in fields(cls) if f.name != "dataclass_name")
dataclasses_by_fieldnames[name_set] = cls
return cls
def dumps(obj: Any) -> bytes:
return orjson.dumps(obj, option=orjson.OPT_SERIALIZE_NUMPY)
def _object_hook(d: Any, backwards_compatible: bool = True) -> Any:
# If d is a list, recurse.
if isinstance(d, list):
return [_object_hook(x, backwards_compatible=backwards_compatible) for x in d]
# If d is not a dict, return it as is.
if not isinstance(d, dict):
return d
cls = None
if "dataclass_name" in d:
if d["dataclass_name"] in dataclasses_by_name:
cls = dataclasses_by_name[d["dataclass_name"]]
else:
assert backwards_compatible, (
f"Dataclass {d['dataclass_name']} not found, set backwards_compatible=True if you "
f"are okay with that."
)
# Load objects created without dataclass_name set.
else:
# Try our best to find a dataclass if backwards_compatible is True.
if backwards_compatible:
d_fields = frozenset(d.keys())
if d_fields in dataclasses_by_fieldnames:
cls = dataclasses_by_fieldnames[d_fields]
elif len(d_fields) > 0:
# Check if the fields are a subset of a dataclass (if the dataclass had extra fields
# added since the data was created). Note that this will fail if fields were removed
# from the dataclass.
for key, possible_cls in dataclasses_by_fieldnames.items():
if d_fields.issubset(key):
cls = possible_cls
break
else:
print(f"Could not find dataclass for {d_fields} {cls}")
new_d = {
k: _object_hook(v, backwards_compatible=backwards_compatible)
for k, v in d.items()
if k != "dataclass_name"
}
if cls is not None:
return cls(**new_d)
else:
return new_d
def loads(s: Union[str, bytes], backwards_compatible: bool = True) -> Any:
return json.loads(
s,
object_hook=partial(_object_hook, backwards_compatible=backwards_compatible),
)
|
from .fast_dataclasses import FastDataclass, dumps, loads, register_dataclass
__all__ = ["FastDataclass", "dumps", "loads", "register_dataclass"]
|
from dataclasses import dataclass
import pytest
from .fast_dataclasses import FastDataclass, dumps, loads, register_dataclass
# Inheritance is a bit tricky with our setup. dataclass_name must be set for instances of these
# classes to serialize and deserialize correctly, but if it's given a default value, then subclasses
# can't have any fields that don't have default values, because of how constructors are generated
# for dataclasses (fields with no default value can't follow those with default values). To work
# around this, we set dataclass_name in __post_init__ on the base class, which is called after the
# constructor. The implementation does the right thing for both the base class and the subclass.
@register_dataclass
@dataclass
class DataclassC(FastDataclass):
ints: list[int]
@register_dataclass
@dataclass
class DataclassC_ext(DataclassC):
s: str
@register_dataclass
@dataclass
class DataclassB(FastDataclass):
str_to_c: dict[str, DataclassC]
cs: list[DataclassC]
@register_dataclass
@dataclass
class DataclassA(FastDataclass):
floats: list[float]
strings: list[str]
bs: list[DataclassB]
@register_dataclass
@dataclass
class DataclassD(FastDataclass):
s1: str
s2: str = "default"
def test_dataclasses() -> None:
a = DataclassA(
floats=[1.0, 2.0],
strings=["a", "b"],
bs=[
DataclassB(
str_to_c={"a": DataclassC(ints=[1, 2]), "b": DataclassC(ints=[3, 4])},
cs=[DataclassC(ints=[5, 6]), DataclassC_ext(ints=[7, 8], s="s")],
),
DataclassB(
str_to_c={"c": DataclassC_ext(ints=[9, 10], s="t"), "d": DataclassC(ints=[11, 12])},
cs=[DataclassC(ints=[13, 14]), DataclassC(ints=[15, 16])],
),
],
)
assert loads(dumps(a)) == a
def test_c_and_c_ext() -> None:
c_ext = DataclassC_ext(ints=[3, 4], s="s")
assert loads(dumps(c_ext)) == c_ext
c = DataclassC(ints=[1, 2])
assert loads(dumps(c)) == c
def test_bad_serialized_data() -> None:
assert type(loads(dumps(DataclassC(ints=[3, 4])))) == DataclassC
assert type(loads('{"ints": [3, 4]}', backwards_compatible=False)) == dict
assert type(loads('{"ints": [3, 4], "dataclass_name": "DataclassC"}')) == DataclassC
with pytest.raises(TypeError):
loads('{"ints": [3, 4], "bogus_extra_field": "foo", "dataclass_name": "DataclassC"}')
with pytest.raises(TypeError):
loads('{"ints_field_is_missing": [3, 4], "dataclass_name": "DataclassC"}')
assert type(loads('{"s1": "test"}', backwards_compatible=False)) == dict
assert type(loads('{"s1": "test"}', backwards_compatible=True)) == DataclassD
|
# %%
import logging
from flask import Flask, request
from flask_cors import CORS
import json
import urllib.request
def load_az_json(url):
with urllib.request.urlopen(url) as f:
return json.load(f)
def start(
dev: bool = False,
host_name: str = "0.0.0.0",
port: int = 80,
):
app = Flask("interpretability chat")
app.logger.setLevel(logging.INFO)
# app.logger.disabled = True
CORS(app)
@app.after_request
def after_request(response):
response.headers.add("Access-Control-Allow-Origin", "*")
response.headers.add(
"Access-Control-Allow-Headers", "Content-Type,Authorization"
)
response.headers.add(
"Access-Control-Allow-Methods", "GET,PUT,POST,DELETE,OPTIONS"
)
return response
@app.route("/load_az", methods=["GET", "POST"])
async def load_az():
args = request.get_json()
path = args["path"]
result = load_az_json(path)
return result
app.run(debug=dev, host=host_name, port=port, use_reloader=False)
def main(dev: bool = True, host_name: str = "0.0.0.0", port: int = 8000):
start(dev=dev, host_name=host_name, port=port)
if __name__ == "__main__":
main()
|
import multiprocessing
import os
import sys
import subprocess
from distutils import sysconfig
from distutils.command.build import build as DistutilsBuild
from setuptools import setup
def build_common(dynamic_library_extension, cmake_arg_list=None):
# On OSX CMake's FindPythonLibs is flaky; we need to supply lib and include
# dirs otherwise it sometimes fails to pull up the correct versions (see
# https://cmake.org/Bug/view.php?id=14809)
def find_python_library():
for var in ['LIBPL', 'LIBDIR']:
python_library = os.path.join(sysconfig.get_config_var(var), 'libpython{}.{}'.format(sysconfig.get_python_version(), dynamic_library_extension))
if os.path.exists(python_library):
return python_library
cores_to_use = max(1, multiprocessing.cpu_count() - 1)
cmake_arg_list = cmake_arg_list if cmake_arg_list is not None else []
python_library = find_python_library()
python_include = sysconfig.get_python_inc()
if python_library is not None:
cmake_arg_list.append('-DPYTHON_LIBRARY={}'.format(python_library))
cmake_arg_list.append('-DPYTHON_INCLUDE_DIR={}'.format(python_include))
subprocess.check_call(['cmake', '-DCMAKE_BUILD_TYPE=Release', '-DBUILD_PYTHON=ON', '-DBUILD_JAVA=OFF', '-DPYTHON_EXECUTABLE:FILEPATH={}'.format(sys.executable)] + cmake_arg_list, cwd='doom_py')
subprocess.check_call(['make', '-j', str(cores_to_use)], cwd='doom_py')
subprocess.check_call(['rm', '-f', 'vizdoom.so'], cwd='doom_py')
subprocess.check_call(['ln', '-s', 'bin/python/vizdoom.so', 'vizdoom.so'], cwd='doom_py')
def build_osx():
build_common('dylib', cmake_arg_list=['-DOSX_COCOA_BACKEND=OFF'])
# Symlink to the correct vizdoom binary
subprocess.check_call(['rm', '-f', 'bin/vizdoom'], cwd='doom_py')
subprocess.check_call(['ln', '-s', 'vizdoom.app/Contents/MacOS/vizdoom', 'bin/vizdoom'], cwd='doom_py')
def build_linux():
build_common('so')
def build_windows():
# THIS IS UNTESTED
build_common('dll')
if sys.platform.startswith("darwin"):
platname = "osx"
build_func = build_osx
elif sys.platform.startswith("linux"):
platname = "linux"
build_func = build_linux
elif sys.platform.startswith("win"):
platname = "win"
build_func = build_windows
else:
raise RuntimeError("Unrecognized platform: {}".format(sys.platform))
# For building Doom
class BuildDoom(DistutilsBuild):
def run(self):
try:
build_func()
except subprocess.CalledProcessError as e:
if platname == 'osx':
library_str = "doom_py requires boost, boost-python, sdl2 on OSX (installable via 'brew install boost boost-python sdl2')"
elif platname == 'linux':
library_str = "Try running 'apt-get install -y python-numpy cmake zlib1g-dev libjpeg-dev libboost-all-dev gcc libsdl2-dev wget unzip'"
else:
library_str = ''
sys.stderr.write("\033[1m" + "\nCould not build doom-py: %s. (HINT: are you sure cmake is installed? You might also be missing a library. %s\n\n" % (e, library_str) + "\033[0m")
raise
DistutilsBuild.run(self)
setup(name='doom-py',
version='0.0.14',
description='Python bindings to ViZDoom',
url='https://github.com/openai/doom-py',
author='OpenAI Community',
author_email='[email protected]',
packages=['doom_py'],
cmdclass={'build': BuildDoom},
setup_requires=['numpy'],
install_requires=['numpy'],
tests_require=['nose2'],
classifiers=['License :: OSI Approved :: MIT License'],
include_package_data=True,
)
|
from doom_py.vizdoom import *
import os
class Loader():
"""
This class converts file name to full paths to be imported
by the DoomGame
"""
def get_vizdoom_path(self):
package_directory = os.path.dirname(os.path.abspath(__file__))
return os.path.join(package_directory, 'bin/vizdoom')
def get_freedoom_path(self):
package_directory = os.path.dirname(os.path.abspath(__file__))
return os.path.join(package_directory, 'scenarios/freedoom2.wad')
def get_scenario_path(self, name):
package_directory = os.path.dirname(os.path.abspath(__file__))
return os.path.join(package_directory, 'scenarios/{}'.format(name))
|
#!/usr/bin/python
#####################################################################
# This script presents how to run some scenarios.
# Configuration is loaded from "../../examples/config/<SCENARIO_NAME>.cfg" file.
# <episodes> number of episodes are played.
# Random combination of buttons is chosen for every action.
# Game variables from state and last reward are printed.
# To see the scenario description go to "../../scenarios/README.md"
#
#####################################################################
from __future__ import print_function
from vizdoom import DoomGame, ScreenResolution
from random import choice
import itertools as it
from time import sleep
game = DoomGame()
# Choose scenario config file you wish to watch.
# Don't load two configs cause the second will overrite the first one.
# Multiple config files are ok but combining these ones doesn't make much sense.
game.load_config("../../examples/config/basic.cfg")
#game.load_config("../../examples/config/deadly_corridor.cfg")
#game.load_config("../../examples/config/deathmatch.cfg")
#game.load_config("../../examples/config/defend_the_center.cfg")
#game.load_config("../../examples/config/defend_the_line.cfg")
#game.load_config("../../examples/config/health_gathering.cfg")
#game.load_config("../../examples/config/my_way_home.cfg")
#game.load_config("../../examples/config/predict_position.cfg")
#game.load_config("../../examples/config/take_cover.cfg")
# Makes the screen bigger to see more details.
game.set_screen_resolution(ScreenResolution.RES_640X480)
game.init()
# Creates all possible actions depending on how many buttons there are.
actions_num = game.get_available_buttons_size()
actions = []
for perm in it.product([False, True], repeat=actions_num):
actions.append(list(perm))
episodes = 10
sleep_time = 0.028
for i in range(episodes):
print("Episode #" +str(i+1))
# Not needed for the first episdoe but the loop is nicer.
game.new_episode()
while not game.is_episode_finished():
# Gets the state and possibly to something with it
s = game.get_state()
img = s.image_buffer
misc = s.game_variables
# Makes a random action and save the reward.
r = game.make_action(choice(actions))
# Makes a "prolonged" action and skip frames:
# skiprate = 3
# r = game.make_action(choice(actions), skiprate)
# The same could be achieved with:
# game.set_action(choice(actions))
# skiprate = 3
# game.advance_action(skiprate)
# r = game.get_last_reward()
print("State #" +str(s.number))
print("Game Variables:", misc)
print("Performed action:",game.get_last_action())
print("Last Reward:",r)
print("=====================")
# Sleep some time because processing is too fast to watch.
if sleep_time>0:
sleep(sleep_time)
print("Episode finished!")
print("total reward:", game.get_total_reward())
print("************************")
|
#!/usr/bin/python
#####################################################################
# This script presents how to make use of game variables to implement
# shaping using health_guided.wad scenario
# Health_guided scenario is just like health_gathering
# (see "../../scenarios/README.md") but for each collected medkit global
# variable number 1 in acs script (coresponding to USER1) is increased
# by 100.0. It is not considered a part of reward but will possibly
# reduce learning time.
# <episodes> number of episodes are played.
# Random combination of buttons is chosen for every action.
# Game variables from state and last reward are printed.
#
#####################################################################
from __future__ import print_function
from vizdoom import *
from random import choice
import itertools as it
from time import sleep
import cv2
game = DoomGame()
# Choose scenario config file you wish to watch.
# Don't load two configs cause the second will overrite the first one.
# Multiple config files are ok but combining these ones doesn't make much sense.
game.load_config("../../examples/config/health_gathering.cfg")
game.set_screen_resolution(ScreenResolution.RES_640X480)
game.init()
# Creates all possible actions.
actions_num = game.get_available_buttons_size()
actions = []
for perm in it.product([False, True], repeat=actions_num):
actions.append(list(perm))
episodes = 10
sleep_time = 0.028
last_total_shaping_reward = 0
for i in range(episodes):
print("Episode #" + str(i+1))
# Not needed for the first episdoe but the loop is nicer.
game.new_episode()
while not game.is_episode_finished():
# Gets the state and possibly to something with it
s = game.get_state()
img = s.image_buffer
misc = s.game_variables
# Makes a random action and save the reward.
r = game.make_action(choice(actions))
# Retrieve the shaping reward
sr = doom_fixed_to_double(game.get_game_variable(GameVariable.USER1))
sr = sr - last_total_shaping_reward
last_total_shaping_reward += sr
print("State #" +str(s.number))
print("Health:", misc[0])
print("Last Reward:", r)
print("Last Shaping Reward:", sr)
print("=====================")
# Sleep some time because processing is too fast to watch.
if sleep_time>0:
sleep(sleep_time)
print("Episode finished!")
print("total reward:", game.get_total_reward())
print("************************")
game.close()
|
#!/usr/bin/python
from __future__ import print_function
from vizdoom import *
from random import choice
from time import sleep
from time import time
game = DoomGame()
game.set_vizdoom_path("../../bin/vizdoom")
game.set_doom_game_path("../../scenarios/freedoom2.wad")
#game.set_doom_game_path("../../scenarios/doom2.wad") # Not provided with environment due to licences.
game.set_doom_map("map01")
game.set_screen_resolution(ScreenResolution.RES_640X480)
# Adds delta buttons that will be allowed and set the maximum allowed value (optional).
game.add_available_button(Button.MOVE_FORWARD_BACKWARD_DELTA, 10)
game.add_available_button(Button.MOVE_LEFT_RIGHT_DELTA, 5)
game.add_available_button(Button.TURN_LEFT_RIGHT_DELTA, 5)
game.add_available_button(Button.LOOK_UP_DOWN_DELTA)
# For normal buttons (binary) all values other than 0 are interpreted as pushed.
# For delta buttons values determine a precision/speed.
#
# For TURN_LEFT_RIGHT_DELTA and LOOK_UP_DOWN_DELTA value is the angle (in degrees)
# of which the viewing angle will change.
#
# For MOVE_FORWARD_BACKWARD_DELTA, MOVE_LEFT_RIGHT_DELTA, MOVE_UP_DOWN_DELTA (rarely used)
# value is the speed of movement in a given direction (100 is close to the maximum speed).
action = [100, 10, 10, 1]
# If button's absolute value > max button's value then value = max value with original value sign.
# Delta buttons in spectator modes correspond to mouse movements.
# Maximum allowed values also apply to spectator modes.
# game.add_game_args("+freelook 1") # Use this to enable looking around with the mouse.
# game.set_mode(Mode.SPECTATOR)
game.set_window_visible(True)
game.init()
episodes = 10
sleep_time = 0.028
for i in range(episodes):
print("Episode #" + str(i+1))
game.new_episode()
while not game.is_episode_finished():
s = game.get_state()
r = game.make_action(action)
t = game.get_episode_time()
action[0] = t % 100 - 50
action[1] = t % 100 - 50
action[2] = t % 100 - 50
if not t % 25:
action[3] = -action[3]
print("State #" + str(s.number))
print("=====================")
if sleep_time>0:
sleep(sleep_time)
print("Episode finished.")
print("************************")
game.close()
|
#!/usr/bin/python
#####################################################################
# This script presents SPECTATOR mode. In SPECTATOR mode you play and
# your agent can learn from it.
# Configuration is loaded from "../../examples/config/<SCENARIO_NAME>.cfg" file.
#
# To see the scenario description go to "../../scenarios/README.md"
#
#####################################################################
from __future__ import print_function
from vizdoom import *
from time import sleep
game = DoomGame()
# Choose scenario config file you wish to watch.
# Don't load two configs cause the second will overrite the first one.
# Multiple config files are ok but combining these ones doesn't make much sense.
#game.load_config("../../examples/config/basic.cfg")
#game.load_config("../../examples/config/deadly_corridor.cfg")
game.load_config("../../examples/config/deathmatch.cfg")
#game.load_config("../../examples/config/defend_the_center.cfg")
#game.load_config("../../examples/config/defend_the_line.cfg")
#game.load_config("../../examples/config/health_gathering.cfg")
#game.load_config("../../examples/config/my_way_home.cfg")
#game.load_config("../../examples/config/predict_position.cfg")
#game.load_config("../../examples/config/take_cover.cfg")
# Enables freelook in engine
game.add_game_args("+freelook 1")
game.set_screen_resolution(ScreenResolution.RES_640X480)
# Enables spectator mode, so you can play. Sounds strange but it is agent who is supposed to watch not you.
game.set_window_visible(True)
game.set_mode(Mode.SPECTATOR)
game.init()
episodes = 10
print("")
for i in range(episodes):
print("Episode #" +str(i+1))
game.new_episode()
while not game.is_episode_finished():
s = game.get_state()
img = s.image_buffer
misc = s.game_variables
game.advance_action()
a = game.get_last_action()
r = game.get_last_reward()
print("state #"+str(s.number))
print("game variables: ", misc)
print("action:", a)
print("reward:",r)
print("=====================")
print("episode finished!")
print("total reward:", game.get_total_reward())
print("************************")
sleep(2.0)
game.close()
|
#!/usr/bin/python
#####################################################################
# This script tests performance in frames per second.
# Change iters, resolution, window visibility, use get_ state or not.
# It should give you some idea how fast the framework can work on
# your hardware. The test involes copying the state to make it more
# simillar to any reasonable usage. Comment the line with get_state
# to exclude copying process.
#####################################################################
from __future__ import print_function
from vizdoom import *
from random import choice
from vizdoom import ScreenResolution as res
from time import time
# Some options:
resolution =res.RES_320X240
screen_format = ScreenFormat.DEPTH_BUFFER8
iterations = 10000
game = DoomGame()
game.load_config("../../examples/config/basic.cfg")
game.set_screen_resolution(resolution)
game.set_screen_format(screen_format)
game.set_window_visible(False)
game.init()
actions = [[True,False,False],[False,True,False],[False,False,True]]
left = actions[0]
right = actions[1]
shoot = actions[2]
idle = [False,False,False]
iterations = 10000
start = time()
print("Checking FPS rating. It may take some time. Be patient.")
for i in range(iterations):
if game.is_episode_finished():
game.new_episode()
# Copying happens here
s = game.get_state()
game.make_action(choice(actions))
end=time()
t = end-start
print("Results:")
print("Iterations:", iterations)
print("Resolution:", resolution)
print("time:",round(t,3))
print("fps: ",round(iterations/t,2))
game.close()
|
#!/usr/bin/python
#####################################################################
# This script presents different formats of the screen buffer.
# OpenCV is used here to display images, install it or remove any
# references to cv2
# Configuration is loaded from "../../examples/config/basic.cfg" file.
# <episodes> number of episodes are played.
# Random combination of buttons is chosen for every action.
# Game variables from state and last reward are printed.
# To see the scenario description go to "../../scenarios/README.md"
#
#####################################################################
from __future__ import print_function
from vizdoom import *
from time import sleep
from time import time
from random import choice
import cv2
game = DoomGame()
# Use other config file if you wish.
game.load_config("../../examples/config/basic.cfg")
#game.set_window_visible(False)
# Just umcomment desired format. The last uncommented will be applied.
# Formats with C were ommited cause they are not cv2 friendly
#game.set_screen_format(ScreenFormat.RGB24)
#game.set_screen_format(ScreenFormat.ARGB32)
#game.set_screen_format(ScreenFormat.GRAY8)
# This is most fun. It looks best if you inverse colors.
game.set_screen_format(ScreenFormat.DEPTH_BUFFER8)
#These formats can be use bet they do not make much sense for cv2, you'll just get mixed up colors.
#game.set_screen_format(ScreenFormat.BGR24)
#game.set_screen_format(ScreenFormat.RGBA32)
#game.set_screen_format(ScreenFormat.BGRA32)
#game.set_screen_format(ScreenFormat.ABGR32)
#This one makes no sense in particular
#game.set_screen_format(ScreenFormat.DOOM_256_COLORS)
game.set_screen_resolution(ScreenResolution.RES_640X480)
game.init()
actions = [[True,False,False],[False,True,False],[False,False,True]]
episodes = 10
# sleep time in ms
sleep_time = 20
for i in range(episodes):
print("Episode #" +str(i+1))
# Not needed for the first episdoe but the loop is nicer.
game.new_episode()
while not game.is_episode_finished():
# Gets the state and possibly to something with it
s = game.get_state()
img = s.image_buffer
misc = s.game_variables
# Gray8 shape is not cv2 compliant
if game.get_screen_format() in [ScreenFormat.GRAY8, ScreenFormat.DEPTH_BUFFER8]:
img = img.reshape(img.shape[1],img.shape[2],1)
# Display the image here!
cv2.imshow('Doom Buffer',img)
cv2.waitKey(sleep_time)
# Makes a random action and save the reward.
r = game.make_action(choice(actions))
print("State #" +str(s.number))
print("Game Variables:", misc)
print("Last Reward:",r)
print("=====================")
print("Episode finished!")
print("total reward:", game.get_total_reward())
print("************************")
cv2.destroyAllWindows()
|
#!/usr/bin/python
#####################################################################
# This script presents how to run deterministic episodes by setting
# seed. After setting the seed every episode will look the same (if
# agent will behave deterministicly of course).
# Configuration is loaded from "../../examples/config/<SCENARIO_NAME>.cfg" file.
# <episodes> number of episodes are played.
# Random combination of buttons is chosen for every action.
#
#Game variables from state and last reward are printed.
#
# To see the scenario description go to "../../scenarios/README.md"
#
#####################################################################
from __future__ import print_function
from vizdoom import *
from random import choice
import itertools as it
from time import sleep
game = DoomGame()
# Choose the scenario config file you wish to watch.
# Don't load two configs cause the second will overrite the first one.
# Multiple config files are ok but combining these ones doesn't make much sense.
game.load_config("../../examples/config/basic.cfg")
#game.load_config("../../examples/config/deadly_corridor.cfg")
#game.load_config("../../examples/config/defend_the_center.cfg")
#game.load_config("../../examples/config/defend_the_line.cfg")
#game.load_config("../../examples/config/health_gathering.cfg")
#game.load_config("../../examples/config/my_way_home.cfg")
#game.load_config("../../examples/config/predict_position.cfg")
game.set_screen_resolution(ScreenResolution.RES_640X480)
seed = 1234
# Sets the seed. It could be after init as well.
game.set_seed(seed)
game.init()
# Creates all possible actions depending on how many buttons there are.
actions_num = game.get_available_buttons_size()
actions = []
for perm in it.product([False, True], repeat=actions_num):
actions.append(list(perm))
episodes = 10
sleep_time = 0.028
for i in range(episodes):
print("Episode #" + str(i+1))
# Seed can be changed anytime. It will affect next episodes.
# game.set_seed(seed)
game.new_episode()
while not game.is_episode_finished():
# Gets the state and possibly to something with it
s = game.get_state()
img = s.image_buffer
misc = s.game_variables
# Check which action you chose!
r = game.make_action(choice(actions))
print("State #" + str(s.number))
print("Game Variables:", misc)
print("Last Reward:", r)
print("Seed:", game.get_seed())
print("=====================")
# Sleep some time because processing is too fast to watch.
if sleep_time>0:
sleep(sleep_time)
print("Episode finished!")
print("total reward:", game.get_total_reward())
print("************************")
game.close()
|
#!/usr/bin/python
#####################################################################
# This script presents how to use the most basic features of the environment.
# It configures the engine, and makes the agent perform random actions.
# It also gets current state and reward earned with the action.
# <episodes> number of episodes are played.
# Random combination of buttons is chosen for every action.
# Game variables from state and last reward are printed.
# To see the scenario description go to "../../scenarios/README.md"
#
#####################################################################
from __future__ import print_function
from vizdoom import DoomGame
from vizdoom import Mode
from vizdoom import Button
from vizdoom import GameVariable
from vizdoom import ScreenFormat
from vizdoom import ScreenResolution
# Or just use from vizdoom import *
from random import choice
from time import sleep
from time import time
# Create DoomGame instance. It will run the game and communicate with you.
game = DoomGame()
# Now it's time for configuration!
# load_config could be used to load configuration instead of doing it here with code.
# If load_config is used in-code configuration will work. Note that the most recent changes will add to previous ones.
#game.load_config("../../examples/config/basic.cfg")
# Sets path to vizdoom engine executive which will be spawned as a separate process. Default is "./vizdoom".
game.set_vizdoom_path("../../bin/vizdoom")
# Sets path to doom2 iwad resource file which contains the actual doom game. Default is "./doom2.wad".
game.set_doom_game_path("../../scenarios/freedoom2.wad")
#game.set_doom_game_path("../../scenarios/doom2.wad") # Not provided with environment due to licences.
# Sets path to additional resources iwad file which is basically your scenario iwad.
# If not specified default doom2 maps will be used and it's pretty much useles... unless you want to play doom.
game.set_doom_scenario_path("../../scenarios/basic.wad")
# Sets map to start (scenario .wad files can contain many maps).
game.set_doom_map("map01")
# Sets resolution. Default is 320X240
game.set_screen_resolution(ScreenResolution.RES_640X480)
# Sets the screen buffer format. Not used here but now you can change it. Defalut is CRCGCB.
game.set_screen_format(ScreenFormat.RGB24)
# Sets other rendering options
game.set_render_hud(False)
game.set_render_crosshair(False)
game.set_render_weapon(True)
game.set_render_decals(False)
game.set_render_particles(False)
# Adds buttons that will be allowed.
game.add_available_button(Button.MOVE_LEFT)
game.add_available_button(Button.MOVE_RIGHT)
game.add_available_button(Button.ATTACK)
# Adds game variables that will be included in state.
game.add_available_game_variable(GameVariable.AMMO2)
# Causes episodes to finish after 200 tics (actions)
game.set_episode_timeout(200)
# Makes episodes start after 10 tics (~after raising the weapon)
game.set_episode_start_time(10)
# Makes the window appear (turned on by default)
game.set_window_visible(True)
# Turns on the sound. (turned off by default)
game.set_sound_enabled(True)
# Sets the livin reward (for each move) to -1
game.set_living_reward(-1)
# Sets ViZDoom mode (PLAYER, ASYNC_PLAYER, SPECTATOR, ASYNC_SPECTATOR, PLAYER mode is default)
game.set_mode(Mode.PLAYER)
# Initialize the game. Further configuration won't take any effect from now on.
game.init()
# Define some actions. Each list entry corresponds to declared buttons:
# MOVE_LEFT, MOVE_RIGHT, ATTACK
# 5 more combinations are naturally possible but only 3 are included for transparency when watching.
actions = [[True,False,False],[False,True,False],[False,False,True]]
# Run this many episodes
episodes = 10
# Sets time that will pause the engine after each action.
# Without this everything would go too fast for you to keep track of what's happening.
# 0.05 is quite arbitrary, nice to watch with my hardware setup.
sleep_time = 0.028
for i in range(episodes):
print("Episode #" + str(i+1))
# Starts a new episode. It is not needed right after init() but it doesn't cost much. At least the loop is nicer.
game.new_episode()
while not game.is_episode_finished():
# Gets the state
s = game.get_state()
# Makes a random action and get remember reward.
r = game.make_action(choice(actions))
# Prints state's game variables. Printing the image is quite pointless.
print("State #" + str(s.number))
print("Game variables:", s.game_variables[0])
print("Reward:", r)
print("=====================")
if sleep_time>0:
sleep(sleep_time)
# Check how the episode went.
print("Episode finished.")
print("total reward:", game.get_total_reward())
print("************************")
# It will be done automatically anyway but sometimes you need to do it in the middle of the program...
game.close()
|
#!/usr/bin/python
from __future__ import print_function
from vizdoom import *
from random import choice
game = DoomGame()
game.set_vizdoom_path("../../bin/vizdoom")
# Use CIG example config or Your own.
game.load_config("../../examples/config/cig.cfg")
# Select game and map You want to use.
game.set_doom_game_path("../../scenarios/freedoom2.wad")
#game.set_doom_game_path("../../scenarios/doom2.wad") # Not provided with environment due to licences
game.set_doom_map("map01") # Limited deathmatch.
#game.set_doom_map("map02") # Full deathmatch.
# Start multiplayer game only with Your AI (with options that will be used in the competition, details in cig_host example).
game.add_game_args("-host 1 -deathmatch +timelimit 10.0 "
"+sv_forcerespawn 1 +sv_noautoaim 1 +sv_respawnprotect 1 +sv_spawnfarthest 1")
# Name Your AI.
game.add_game_args("+name AI")
# Multiplayer requires the use of asynchronous modes, but when playing only with bots, synchronous modes can also be used.
game.set_mode(Mode.PLAYER)
# game.set_window_visible(false)
game.init()
# Three example sample actions
actions = [[1,0,0,0,0,0,0,0,0],[0,1,0,0,0,0,0,0,0],[0,0,1,0,0,0,0,0,0]]
# Add bots (file examples/bots.cfg must be placed in the same directory as the Doom executable file).
bots_number = 7
for i in range(bots_number):
game.send_game_command("addbot")
# Play until the game (episode) is over.
while not game.is_episode_finished():
if game.is_player_dead():
# Use this to respawn immediately after death, new state will be available.
game.respawn_player()
# Or observe the game until automatic respawn.
#game.advance_action();
#continue;
s = game.get_state()
# Analyze the state.
game.make_action(choice(actions))
# Make your action.
print("Frags:", game.get_game_variable(GameVariable.FRAGCOUNT))
game.close()
|
#!/usr/bin/python
from __future__ import print_function
from vizdoom import *
from random import choice
game = DoomGame()
# Use CIG example config or Your own.
game.load_config("../../examples/config/cig.cfg")
# Select game and map You want to use.
game.set_doom_game_path("../../scenarios/freedoom2.wad")
#game.set_doom_game_path("../../scenarios/doom2.wad") # Not provided with environment due to licences
game.set_doom_map("map01") # Limited deathmatch.
#game.set_doom_map("map02") # Full deathmatch.
# Host game with options that will be used in the competition.
#
game.add_game_args("-host 2 " # This machine will function as a host for a multiplayer game with this many players (including this machine). It will wait for other machines to connect using the -join parameter and then start the game when everyone is connected.
"-deathmatch " # Deathmatch rules are used for the game.
"+timelimit 10.0 " # The game (episode) will end after this many minutes have elapsed.
"+sv_forcerespawn 1 " # Players will respawn automatically after they die.
"+sv_noautoaim 1 " # Autoaim is disabled for all players.
"+sv_respawnprotect 1 " # Players will be invulnerable for two second after spawning.
"+sv_spawnfarthest 1 " # Players will be spawned as far as possible from any other players.
"+vizdoom_nocheat 1") # Disables depth buffer and the ability to use commands that could interfere with multiplayer game.
# Name Your AI.
game.add_game_args("+name AI")
# Multiplayer requires the use of asynchronous modes.
game.set_mode(Mode.ASYNC_PLAYER)
# game.set_window_visible(false)
game.init()
# Three example sample actions
actions = [[1,0,0,0,0,0,0,0,0],[0,1,0,0,0,0,0,0,0],[0,0,1,0,0,0,0,0,0]]
# Play until the game (episode) is over.
while not game.is_episode_finished():
if game.is_player_dead():
# Use this to respawn immediately after death, new state will be available.
game.respawn_player()
# Or observe the game until automatic respawn.
#game.advance_action();
#continue;
s = game.get_state()
# Analyze the state.
game.make_action(choice(actions))
# Make your action.
print("Frags:", game.get_game_variable(GameVariable.FRAGCOUNT))
game.close()
|
#!/usr/bin/python
import itertools as it
import pickle
from random import sample, randint, random
from time import time
from vizdoom import *
import cv2
import numpy as np
import theano
from lasagne.init import GlorotUniform, Constant
from lasagne.layers import Conv2DLayer, InputLayer, DenseLayer, MaxPool2DLayer, get_output, get_all_params, \
get_all_param_values, set_all_param_values
from lasagne.nonlinearities import rectify
from lasagne.objectives import squared_error
from lasagne.updates import rmsprop
from theano import tensor
from tqdm import *
from time import sleep
# Q-learning settings:
replay_memory_size = 10000
discount_factor = 0.99
start_epsilon = float(1.0)
end_epsilon = float(0.1)
epsilon = start_epsilon
static_epsilon_steps = 5000
epsilon_decay_steps = 20000
epsilon_decay_stride = (start_epsilon - end_epsilon) / epsilon_decay_steps
# Max reward is about 100 (for killing) so it'll be normalized
reward_scale = 0.01
# Some of the network's and learning settings:
learning_rate = 0.00001
batch_size = 32
epochs = 20
training_steps_per_epoch = 5000
test_episodes_per_epoch = 100
# Other parameters
skiprate = 7
downsampled_x = 60
downsampled_y = int(2/3.0*downsampled_x)
episodes_to_watch = 10
# Where to save and load network's weights.
params_savefile = "basic_params"
params_loadfile = None
# Function for converting images
def convert(img):
img = img[0].astype(np.float32) / 255.0
img = cv2.resize(img, (downsampled_x, downsampled_y))
return img
# Replay memory:
class ReplayMemory:
def __init__(self, capacity):
state_shape = (capacity, 1, downsampled_y, downsampled_x)
self.s1 = np.zeros(state_shape, dtype=np.float32)
self.s2 = np.zeros(state_shape, dtype=np.float32)
self.a = np.zeros(capacity, dtype=np.int32)
self.r = np.zeros(capacity, dtype=np.float32)
self.nonterminal = np.zeros(capacity, dtype=np.bool_)
self.size = 0
self.capacity = capacity
self.oldest_index = 0
def add_transition(self, s1, action, s2, reward):
self.s1[self.oldest_index, 0] = s1
if s2 is None:
self.nonterminal[self.oldest_index] = False
else:
self.s2[self.oldest_index, 0] = s2
self.nonterminal[self.oldest_index] = True
self.a[self.oldest_index] = action
self.r[self.oldest_index] = reward
self.oldest_index = (self.oldest_index + 1) % self.capacity
self.size = min(self.size + 1, self.capacity)
def get_sample(self, sample_size):
i = sample(range(0, self.size), sample_size)
return self.s1[i], self.s2[i], self.a[i], self.r[i], self.nonterminal[i]
# Creates the network:
def create_network(available_actions_num):
# Creates the input variables
s1 = tensor.tensor4("States")
a = tensor.vector("Actions", dtype="int32")
q2 = tensor.vector("Next State best Q-Value")
r = tensor.vector("Rewards")
nonterminal = tensor.vector("Nonterminal", dtype="int8")
# Creates the input layer of the network.
dqn = InputLayer(shape=[None, 1, downsampled_y, downsampled_x], input_var=s1)
# Adds 3 convolutional layers, each followed by a max pooling layer.
dqn = Conv2DLayer(dqn, num_filters=32, filter_size=[8, 8],
nonlinearity=rectify, W=GlorotUniform("relu"),
b=Constant(.1))
dqn = MaxPool2DLayer(dqn, pool_size=[2, 2])
dqn = Conv2DLayer(dqn, num_filters=64, filter_size=[4, 4],
nonlinearity=rectify, W=GlorotUniform("relu"),
b=Constant(.1))
dqn = MaxPool2DLayer(dqn, pool_size=[2, 2])
dqn = Conv2DLayer(dqn, num_filters=64, filter_size=[3, 3],
nonlinearity=rectify, W=GlorotUniform("relu"),
b=Constant(.1))
dqn = MaxPool2DLayer(dqn, pool_size=[2, 2])
# Adds a single fully connected layer.
dqn = DenseLayer(dqn, num_units=512, nonlinearity=rectify, W=GlorotUniform("relu"),
b=Constant(.1))
# Adds a single fully connected layer which is the output layer.
# (no nonlinearity as it is for approximating an arbitrary real function)
dqn = DenseLayer(dqn, num_units=available_actions_num, nonlinearity=None)
# Theano stuff
q = get_output(dqn)
# Only q for the chosen actions is updated more or less according to following formula:
# target Q(s,a,t) = r + gamma * max Q(s2,_,t+1)
target_q = tensor.set_subtensor(q[tensor.arange(q.shape[0]), a], r + discount_factor * nonterminal * q2)
loss = squared_error(q, target_q).mean()
# Updates the parameters according to the computed gradient using rmsprop.
params = get_all_params(dqn, trainable=True)
updates = rmsprop(loss, params, learning_rate)
# Compiles theano functions
print "Compiling the network ..."
function_learn = theano.function([s1, q2, a, r, nonterminal], loss, updates=updates, name="learn_fn")
function_get_q_values = theano.function([s1], q, name="eval_fn")
function_get_best_action = theano.function([s1], tensor.argmax(q), name="test_fn")
print "Network compiled."
# Returns Theano objects for the net and functions.
# We wouldn't need the net anymore but it is nice to save your model.
return dqn, function_learn, function_get_q_values, function_get_best_action
# Creates and initializes the environment.
print "Initializing doom..."
game = DoomGame()
game.load_config("../../examples/config/learning.cfg")
game.init()
print "Doom initialized."
# Creates all possible actions.
n = game.get_available_buttons_size()
actions = []
for perm in it.product([0, 1], repeat=n):
actions.append(list(perm))
# Creates replay memory which will store the transitions
memory = ReplayMemory(capacity=replay_memory_size)
net, learn, get_q_values, get_best_action = create_network(len(actions))
# Loads the network's parameters if the loadfile was specified
if params_loadfile is not None:
params = pickle.load(open(params_loadfile, "r"))
set_all_param_values(net, params)
# Makes an action according to epsilon greedy policy and performs a single backpropagation on the network.
def perform_learning_step():
# Checks the state and downsamples it.
s1 = convert(game.get_state().image_buffer)
# With probability epsilon makes a random action.
if random() <= epsilon:
a = randint(0, len(actions) - 1)
else:
# Chooses the best action according to the network.
a = get_best_action(s1.reshape([1, 1, downsampled_y, downsampled_x]))
reward = game.make_action(actions[a], skiprate + 1)
reward *= reward_scale
if game.is_episode_finished():
s2 = None
else:
s2 = convert(game.get_state().image_buffer)
# Remember the transition that was just experienced.
memory.add_transition(s1, a, s2, reward)
# Gets a single, random minibatch from the replay memory and learns from it.
if memory.size > batch_size:
s1, s2, a, reward, nonterminal = memory.get_sample(batch_size)
q2 = np.max(get_q_values(s2), axis=1)
loss = learn(s1, q2, a, reward, nonterminal)
else:
loss = 0
return loss
print "Starting the training!"
steps = 0
for epoch in range(epochs):
print "\nEpoch", epoch
train_time = 0
train_episodes_finished = 0
train_loss = []
train_rewards = []
train_start = time()
print "\nTraining ..."
game.new_episode()
for learning_step in tqdm(range(training_steps_per_epoch)):
# Learning and action is here.
train_loss.append(perform_learning_step())
# I
if game.is_episode_finished():
r = game.get_total_reward()
train_rewards.append(r)
game.new_episode()
train_episodes_finished += 1
steps += 1
if steps > static_epsilon_steps:
epsilon = max(end_epsilon, epsilon - epsilon_decay_stride)
train_end = time()
train_time = train_end - train_start
mean_loss = np.mean(train_loss)
print train_episodes_finished, "training episodes played."
print "Training results:"
train_rewards = np.array(train_rewards)
print "mean:", train_rewards.mean(), "std:", train_rewards.std(), "max:", train_rewards.max(), "min:", train_rewards.min(), "mean_loss:", mean_loss, "epsilon:", epsilon
print "t:", str(round(train_time, 2)) + "s"
# Testing
test_episode = []
test_rewards = []
test_start = time()
print "Testing..."
for test_episode in tqdm(range(test_episodes_per_epoch)):
game.new_episode()
while not game.is_episode_finished():
state = convert(game.get_state().image_buffer).reshape([1, 1, downsampled_y, downsampled_x])
best_action_index = get_best_action(state)
game.make_action(actions[best_action_index], skiprate + 1)
r = game.get_total_reward()
test_rewards.append(r)
test_end = time()
test_time = test_end - test_start
print "Test results:"
test_rewards = np.array(test_rewards)
print "mean:", test_rewards.mean(), "std:", test_rewards.std(), "max:", test_rewards.max(), "min:", test_rewards.min()
print "t:", str(round(test_time, 2)) + "s"
if params_savefile:
print "Saving network weigths to:", params_savefile
pickle.dump(get_all_param_values(net), open(params_savefile, "w"))
print "========================="
print "Training finished! Time to watch!"
game.close()
game.set_window_visible(True)
game.set_mode(Mode.ASYNC_PLAYER)
game.init()
# Sleeping time between episodes, for convenience.
episode_sleep = 0.5
for i in range(episodes_to_watch):
game.new_episode()
while not game.is_episode_finished():
state = convert(game.get_state().image_buffer).reshape([1, 1, downsampled_y, downsampled_x])
best_action_index = get_best_action(state)
game.set_action(actions[best_action_index])
for i in range(skiprate+1):
game.advance_action()
sleep(episode_sleep)
r = game.get_total_reward()
print "Total reward: ", r
|
#!/usr/bin/python
from __future__ import print_function
from vizdoom import *
from random import choice
game = DoomGame()
# Use CIG example config or Your own.
game.load_config("../../examples/config/cig.cfg")
# Select game and map You want to use.
game.set_doom_game_path("../../scenarios/freedoom2.wad")
#game.set_doom_game_path("../../scenarios/doom2.wad") # Not provided with environment due to licences
game.set_doom_map("map01") # Limited deathmatch.
#game.set_doom_map("map02") # Full deathmatch.
# Join existing game.
game.add_game_args("-join 127.0.0.1") # Connect to a host for a multiplayer game.
# Name Your AI.
game.add_game_args("+name AI")
# Multiplayer requires the use of asynchronous modes.
game.set_mode(Mode.ASYNC_PLAYER)
# game.set_window_visible(false)
game.init()
# Three example sample actions
actions = [[1,0,0,0,0,0,0,0,0],[0,1,0,0,0,0,0,0,0],[0,0,1,0,0,0,0,0,0]]
# Play until the game (episode) is over.
while not game.is_episode_finished():
if game.is_player_dead():
# Use this to respawn immediately after death, new state will be available.
game.respawn_player()
# Or observe the game until automatic respawn.
#game.advance_action();
#continue;
s = game.get_state()
# Analyze the state.
game.make_action(choice(actions))
# Make your action.
print("Frags:", game.get_game_variable(GameVariable.FRAGCOUNT))
game.close()
|
import os
import pkg_resources
from setuptools import setup, find_packages
setup(
name="clip",
py_modules=["clip"],
version="1.0",
description="",
author="OpenAI",
packages=find_packages(exclude=["tests*"]),
install_requires=[
str(r)
for r in pkg_resources.parse_requirements(
open(os.path.join(os.path.dirname(__file__), "requirements.txt"))
)
],
include_package_data=True,
extras_require={'dev': ['pytest']},
)
|
from clip.clip import tokenize as _tokenize, load as _load, available_models as _available_models
import re
import string
dependencies = ["torch", "torchvision", "ftfy", "regex", "tqdm"]
# For compatibility (cannot include special characters in function name)
model_functions = { model: re.sub(f'[{string.punctuation}]', '_', model) for model in _available_models()}
def _create_hub_entrypoint(model):
def entrypoint(**kwargs):
return _load(model, **kwargs)
entrypoint.__doc__ = f"""Loads the {model} CLIP model
Parameters
----------
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model or more hackable non-JIT model (default).
download_root: str
path to download the model files; by default, it uses "~/.cache/clip"
Returns
-------
model : torch.nn.Module
The {model} CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
return entrypoint
def tokenize():
return _tokenize
_entrypoints = {model_functions[model]: _create_hub_entrypoint(model) for model in _available_models()}
globals().update(_entrypoints) |
from .clip import *
|
from collections import OrderedDict
from typing import Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.relu2 = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu3 = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(OrderedDict([
("-1", nn.AvgPool2d(stride)),
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
("1", nn.BatchNorm2d(planes * self.expansion))
]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu1(self.bn1(self.conv1(x)))
out = self.relu2(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu3(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.flatten(start_dim=2).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x[:1], key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
return x.squeeze(0)
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
super().__init__()
self.output_dim = output_dim
self.input_resolution = input_resolution
# the 3-layer stem
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width // 2)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width // 2)
self.relu2 = nn.ReLU(inplace=True)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.relu3 = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(2)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
def stem(x):
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.avgpool(x)
return x
x = x.type(self.conv1.weight.dtype)
x = stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
def forward(self, x: torch.Tensor):
return self.resblocks(x)
class VisionTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def forward(self, x: torch.Tensor):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x[:, 0, :])
if self.proj is not None:
x = x @ self.proj
return x
class CLIP(nn.Module):
def __init__(self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int
):
super().__init__()
self.context_length = context_length
if isinstance(vision_layers, (tuple, list)):
vision_heads = vision_width * 32 // 64
self.visual = ModifiedResNet(
layers=vision_layers,
output_dim=embed_dim,
heads=vision_heads,
input_resolution=image_resolution,
width=vision_width
)
else:
vision_heads = vision_width // 64
self.visual = VisionTransformer(
input_resolution=image_resolution,
patch_size=vision_patch_size,
width=vision_width,
layers=vision_layers,
heads=vision_heads,
output_dim=embed_dim
)
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask()
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
if isinstance(self.visual, ModifiedResNet):
if self.visual.attnpool is not None:
std = self.visual.attnpool.c_proj.in_features ** -0.5
nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_image(self, image):
return self.visual(image.type(self.dtype))
def encode_text(self, text):
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding.type(self.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
return x
def forward(self, image, text):
image_features = self.encode_image(image)
text_features = self.encode_text(text)
# normalized features
image_features = image_features / image_features.norm(dim=1, keepdim=True)
text_features = text_features / text_features.norm(dim=1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logits_per_image.t()
# shape = [global_batch_size, global_batch_size]
return logits_per_image, logits_per_text
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def build_model(state_dict: dict):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith("transformer.resblocks")))
model = CLIP(
embed_dim,
image_resolution, vision_layers, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
convert_weights(model)
model.load_state_dict(state_dict)
return model.eval()
|
import hashlib
import os
import urllib
import warnings
from typing import Any, Union, List
from pkg_resources import packaging
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .model import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
try:
from torchvision.transforms import InterpolationMode
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"):
warnings.warn("PyTorch version 1.7.1 or higher is recommended")
__all__ = ["available_models", "load", "tokenize"]
_tokenizer = _Tokenizer()
_MODELS = {
"RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
"RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
"RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
"RN50x16": "https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt",
"RN50x64": "https://openaipublic.azureedge.net/clip/models/be1cfb55d75a9666199fb2206c106743da0f6468c9d327f3e0d0a543a9919d9c/RN50x64.pt",
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
"ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt",
"ViT-L/14@336px": "https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt",
}
def _download(url: str, root: str):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True, unit_divisor=1024) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError("Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def _convert_image_to_rgb(image):
return image.convert("RGB")
def _transform(n_px):
return Compose([
Resize(n_px, interpolation=BICUBIC),
CenterCrop(n_px),
_convert_image_to_rgb,
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit: bool = False, download_root: str = None):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model or more hackable non-JIT model (default).
download_root: str
path to download the model files; by default, it uses "~/.cache/clip"
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if name in _MODELS:
model_path = _download(_MODELS[name], download_root or os.path.expanduser("~/.cache/clip"))
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
with open(model_path, 'rb') as opened_file:
try:
# loading JIT archive
model = torch.jit.load(opened_file, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(opened_file, map_location="cpu")
if not jit:
model = build_model(state_dict or model.state_dict()).to(device)
if str(device) == "cpu":
model.float()
return model, _transform(model.visual.input_resolution)
# patch the device names
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
def _node_get(node: torch._C.Node, key: str):
"""Gets attributes of a node which is polymorphic over return type.
From https://github.com/pytorch/pytorch/pull/82628
"""
sel = node.kindOf(key)
return getattr(node, sel)(key)
def patch_device(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(_node_get(node, "value")).startswith("cuda"):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
if _node_get(inputs[i].node(), "value") == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
return model, _transform(model.input_resolution.item())
def tokenize(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False) -> Union[torch.IntTensor, torch.LongTensor]:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
truncate: bool
Whether to truncate the text in case its encoding is longer than the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length].
We return LongTensor when torch version is <1.8.0, since older index_select requires indices to be long.
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<|startoftext|>"]
eot_token = _tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
if packaging.version.parse(torch.__version__) < packaging.version.parse("1.8.0"):
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
else:
result = torch.zeros(len(all_tokens), context_length, dtype=torch.int)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate:
tokens = tokens[:context_length]
tokens[-1] = eot_token
else:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result
|
import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
|
import numpy as np
import pytest
import torch
from PIL import Image
import clip
@pytest.mark.parametrize('model_name', clip.available_models())
def test_consistency(model_name):
device = "cpu"
jit_model, transform = clip.load(model_name, device=device, jit=True)
py_model, _ = clip.load(model_name, device=device, jit=False)
image = transform(Image.open("CLIP.png")).unsqueeze(0).to(device)
text = clip.tokenize(["a diagram", "a dog", "a cat"]).to(device)
with torch.no_grad():
logits_per_image, _ = jit_model(image, text)
jit_probs = logits_per_image.softmax(dim=-1).cpu().numpy()
logits_per_image, _ = py_model(image, text)
py_probs = logits_per_image.softmax(dim=-1).cpu().numpy()
assert np.allclose(jit_probs, py_probs, atol=0.01, rtol=0.1)
|
from setuptools import setup, find_packages
setup(
name="jcm",
version="0.1",
packages=find_packages(),
package_dir={"jcm": "jcm"},
install_requires=[
"wandb",
"clean-fid",
"torchvision",
"torch",
"tensorflow",
"tensorboard",
"absl-py",
"flax",
"jax==0.4.10",
"dm-haiku",
"optax",
"diffrax",
"ml-collections",
"requests",
"scikit-image",
"termcolor",
"mpi4py",
"smart-open[all]",
"azure-identity",
"azure-storage-blob",
"pandas",
"seaborn",
"tqdm",
"huggingface_hub",
"h5py",
"flaxmodels",
"torch-fidelity",
],
)
|
# Code modified from https://github.com/GaParmar/clean-fid/blob/main/cleanfid/fid.py
# Original license below:
# MIT License
#
# Copyright (c) 2021 Gaurav Parmar
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
from cleanfid import fid
import torchvision
import numpy as np
import flax
import logging
from . import checkpoints
import tqdm
import time
import jax
import os
import io
import blobfile
import json
import uuid
import requests
import torch_fidelity
class ResizeDataset(torch.utils.data.Dataset):
"""
A placeholder Dataset that enables parallelizing the resize operation
using multiple CPU cores
files: list of all files in the folder
fn_resize: function that takes an np_array as input [0,255]
"""
def __init__(self, files, mode, size=(299, 299), fdir=None):
self.files = files
self.fdir = fdir
self.transforms = torchvision.transforms.ToTensor()
self.size = size
self.fn_resize = fid.build_resizer(mode)
self.custom_image_tranform = lambda x: x
def __len__(self):
return len(self.files)
def __getitem__(self, i):
img_np = self.files[i]
# apply a custom image transform before resizing the image to 299x299
img_np = self.custom_image_tranform(img_np)
# fn_resize expects a np array and returns a np array
img_resized = self.fn_resize(img_np)
# ToTensor() converts to [0,1] only if input in uint8
if img_resized.dtype == "uint8":
img_t = self.transforms(np.array(img_resized)) * 255
elif img_resized.dtype == "float32":
img_t = self.transforms(img_resized)
return img_t
class TorchDataset(torch.utils.data.Dataset):
"""
A placeholder Dataset that enables parallelizing the resize operation
using multiple CPU cores
files: list of all files in the folder
fn_resize: function that takes an np_array as input [0,255]
"""
def __init__(self, files, mode, size=(299, 299), fdir=None):
self.files = files
self.fdir = fdir
self.transforms = torchvision.transforms.ToTensor()
self.size = size
self.fn_resize = fid.build_resizer(mode)
self.custom_image_tranform = lambda x: x
def __len__(self):
return len(self.files)
def __getitem__(self, i):
img_np = self.files[i]
# apply a custom image transform before resizing the image to 299x299
img_np = self.custom_image_tranform(img_np)
# fn_resize expects a np array and returns a np array
img_resized = self.fn_resize(img_np)
# ToTensor() converts to [0,1] only if input in uint8
assert img_resized.dtype == "uint8"
img_t = (self.transforms(np.array(img_resized)) * 255).to(torch.uint8)
return img_t
def compute_fid(
samples,
feat_model,
dataset_name="cifar10",
dataset_res=32,
dataset_split="train",
batch_size=1024,
num_workers=12,
mode="legacy_tensorflow",
device=torch.device("cuda:0"),
):
dataset = ResizeDataset(samples, mode=mode)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=False,
drop_last=False,
num_workers=num_workers,
)
l_feats = []
for batch in tqdm.tqdm(dataloader):
l_feats.append(fid.get_batch_features(batch, feat_model, device))
np_feats = np.concatenate(l_feats)
mu = np.mean(np_feats, axis=0)
sigma = np.cov(np_feats, rowvar=False)
ref_mu, ref_sigma = fid.get_reference_statistics(
dataset_name, dataset_res, mode=mode, seed=0, split=dataset_split
)
score = fid.frechet_distance(mu, sigma, ref_mu, ref_sigma)
return score
def compute_all_metrics(
samples,
dataset_name="cifar10-train",
mode="legacy_tensorflow",
batch_size=1024,
num_workers=12,
):
dataset = TorchDataset(samples, mode=mode)
metrics_dict = torch_fidelity.calculate_metrics(
input1=dataset,
input2=dataset_name,
cuda=True,
isc=True,
fid=True,
verbose=False,
)
return metrics_dict
def get_samples_from_ckpt(folder, ckpt):
files = list(
blobfile.glob(os.path.join(folder, f"ckpt_{ckpt}_host_*", "samples_*.npz"))
)
all_samples = []
for file in files:
with blobfile.BlobFile(file, "rb") as fin:
all_samples.append(np.load(fin)["samples"])
if len(all_samples) >= 1:
all_samples = np.concatenate(all_samples)
else:
all_samples = np.zeros((0, 32, 32, 3), dtype=np.uint8)
return all_samples
def get_fids(folder, ckpt_range, mode, device):
ckpts = []
fids = []
feat_model = fid.build_feature_extractor(mode, device)
for ckpt in ckpt_range:
ckpts.append(ckpt)
print("Loading samples from ckpt", ckpt)
data = get_samples_from_ckpt(folder, ckpt)
print(f"data.shape: {data.shape}")
fids.append(
compute_fid(
data[:50000],
mode="legacy_tensorflow",
device=device,
feat_model=feat_model,
)
)
print("FID", fids[-1])
return ckpts, fids
def compute_metrics(
config,
workdir,
eval_folder,
mode="legacy_tensorflow",
device=torch.device("cuda:0"),
):
"""Compute the FID metrics from given samples.
Args:
config (dict): The config dict.
workdir (str): The working directory.
eval_folder (str): The folder to store the evaluation results.
"""
eval_dir = os.path.join(workdir, eval_folder)
blobfile.makedirs(eval_dir)
@flax.struct.dataclass
class MetricsMeta:
ckpt_id: int
metrics_meta = MetricsMeta(
ckpt_id=config.eval.begin_ckpt,
)
metrics_meta = checkpoints.restore_checkpoint(
eval_dir, metrics_meta, step=None, prefix="metrics_meta_"
)
feat_model = fid.build_feature_extractor(mode, device)
begin_ckpt = max(metrics_meta.ckpt_id, config.eval.begin_ckpt)
for ckpt in range(begin_ckpt, config.eval.end_ckpt + 1):
print(f"Start metric evaluation for ckpt {ckpt}")
all_samples = get_samples_from_ckpt(eval_dir, ckpt)
waiting_message_printed = False
while all_samples.shape[0] < config.eval.num_samples:
if not waiting_message_printed and jax.process_index() == 0:
logging.warning(f"Waiting for the arrival of samples for ckpt {ckpt}")
waiting_message_printed = True
time.sleep(100)
all_samples = get_samples_from_ckpt(eval_dir, ckpt)
fid_score = compute_fid(
all_samples[: config.eval.num_samples],
mode=mode,
device=device,
feat_model=feat_model,
)
with blobfile.BlobFile(
os.path.join(eval_dir, f"metrics_{ckpt}.npz"),
"wb",
) as fout:
io_buffer = io.BytesIO()
np.savez_compressed(io_buffer, fid=fid_score)
fout.write(io_buffer.getvalue())
metrics_meta = metrics_meta.replace(ckpt_id=ckpt + 1)
checkpoints.save_checkpoint(
eval_dir, metrics_meta, step=ckpt, keep=1, prefix="metrics_meta_"
)
meta_files = blobfile.glob(os.path.join(eval_dir, "metrics_meta_*.npz"))
for file in meta_files:
blobfile.remove(file)
def obtain_feature_extractor(mode="legacy_tensorflow", device=torch.device("cuda:0")):
return fid.build_feature_extractor(mode, device)
def compute_fid_jupyter(
all_samples,
feature_extractor,
mode="legacy_tensorflow",
device=torch.device("cuda:0"),
):
"""Compute the FID metrics from given samples.
Args:
config (dict): The config dict.
workdir (str): The working directory.
eval_folder (str): The folder to store the evaluation results.
"""
feat_model = feature_extractor
fid_score = compute_fid(
all_samples,
mode=mode,
device=device,
feat_model=feat_model,
)
return fid_score
|
# Copyright 2023 (c) OpenAI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Return training and evaluation/test datasets from config files."""
import jax
from torchvision.datasets import CIFAR10
from torchvision.transforms import ToTensor, RandomHorizontalFlip, Compose, Normalize
from torch.utils.data import DataLoader
import torch
import numpy as np
import blobfile
def get_dataset(
config,
additional_dim=None,
uniform_dequantization=False,
evaluation=False,
drop_last=True,
):
"""Create data loaders for training and evaluation.
Args:
config: A ml_collection.ConfigDict parsed from config files.
additional_dim: An integer or `None`. If present, add one additional dimension to the output data,
which equals the number of steps jitted together.
uniform_dequantization: If `True`, add uniform dequantization to images.
evaluation: If `True`, fix number of epochs to 1. Default is `False`.
drop_last: If `True`, drop the last batch if it is smaller than the batch size. Default is `True`.
if `False`, the last batch will be padded with zeros and a mask will be returned.
Returns:
train_ds, eval_ds, dataset_builder.
"""
# Compute batch size for this worker.
batch_size = (
config.training.batch_size if not evaluation else config.eval.batch_size
)
if batch_size % jax.device_count() != 0:
raise ValueError(
f"Batch sizes ({batch_size} must be divided by"
f"the number of devices ({jax.device_count()})"
)
per_device_batch_size = batch_size // jax.device_count()
# Create additional data dimension when jitting multiple steps together
if additional_dim is None:
batch_dims = [jax.local_device_count(), per_device_batch_size]
else:
batch_dims = [jax.local_device_count(), additional_dim, per_device_batch_size]
# Create dataset builders for each dataset.
# assert config.data.dataset == "CIFAR10", "Only CIFAR10 is supported for now."
if config.data.dataset.upper() == "CIFAR10":
def uniform_deq(image_th):
if uniform_dequantization:
return (image_th * 255.0 + torch.rand_like(image_th)) / 256.0
else:
return image_th
def data_augmentation(image):
if config.data.random_flip is True and not evaluation:
return RandomHorizontalFlip(p=0.5)(image)
else:
return image
transforms = Compose(
[
data_augmentation,
ToTensor(),
uniform_deq,
Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
train_ds = CIFAR10("./data", train=True, download=True, transform=transforms)
eval_ds = CIFAR10("./data", train=False, download=True, transform=transforms)
@torch.no_grad()
def collate_fn(batch):
image_tensor = torch.stack([x[0] for x in batch], dim=0)
label_tensor = torch.tensor([x[1] for x in batch])
if image_tensor.shape[0] == int(np.prod(batch_dims)):
image_tensor = (
image_tensor.reshape(batch_dims + [3, 32, 32])
.transpose(-3, -2)
.transpose(-2, -1)
)
label_tensor = label_tensor.reshape(batch_dims)
return {
"image": image_tensor,
"label": label_tensor,
"mask": torch.ones_like(label_tensor),
}
# If the batch size is not a multiple of the batch dimension, pad the batch with zeros.
else:
pad_size = int(np.prod(batch_dims)) - image_tensor.shape[0]
padded_image = torch.concat(
[
image_tensor,
torch.zeros(pad_size, 3, 32, 32, dtype=image_tensor.dtype),
],
axis=0,
)
padded_label = torch.concat(
[
label_tensor,
torch.zeros(pad_size, dtype=label_tensor.dtype),
],
axis=0,
)
mask = torch.ones(int(np.prod(batch_dims)))
mask[image_tensor.shape[0] :] = 0.0
padded_image = (
padded_image.reshape(batch_dims + [3, 32, 32])
.transpose(-3, -2)
.transpose(-2, -1)
)
padded_label = padded_label.reshape(batch_dims)
mask = mask.reshape(batch_dims)
return {"image": padded_image, "label": padded_label, "mask": mask}
train_ds_loader = DataLoader(
train_ds,
batch_size=int(np.prod(batch_dims)),
shuffle=not evaluation,
num_workers=16,
drop_last=drop_last,
collate_fn=collate_fn,
)
eval_ds_loader = DataLoader(
eval_ds,
batch_size=int(np.prod(batch_dims)),
shuffle=not evaluation,
num_workers=16,
drop_last=drop_last,
collate_fn=collate_fn,
)
return train_ds_loader, eval_ds_loader
|
# Copyright 2023 (c) OpenAI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2021 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Checkpointing helper functions.
Handles saving and restoring optimizer checkpoints based on step-number or
other numerical metric in filename. Cleans up older / worse-performing
checkpoint files.
Ported from Tensorflow GFile to blobfile by Yang Song.
"""
from concurrent.futures import thread
import os
import re
from absl import logging
from flax import serialization
import blobfile
# Single-group reg-exps for int or float numerical substrings.
# captures sign:
SIGNED_FLOAT_RE = re.compile(r"([-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?)")
# does not capture sign:
UNSIGNED_FLOAT_RE = re.compile(r"[-+]?((?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?)")
def _checkpoint_path(ckpt_dir, step, prefix="checkpoint_"):
return os.path.join(ckpt_dir, f"{prefix}{step}")
def natural_sort(file_list, signed=True):
"""Natural sort for filenames with numerical substrings.
Args:
file_list: List[str]: list of paths to sort containing numerical
substrings.
signed: bool: if leading '-' (or '+') signs should be included in
numerical substrings as a sign or treated as a separator.
Returns:
List of filenames sorted 'naturally', not lexicographically: any
integer substrings are used to subsort numerically. e.g.
file_1, file_10, file_2 --> file_1, file_2, file_10
file_0.1, file_-0.2, file_2.0 --> file_-0.2, file_0.1, file_2.0
"""
float_re = SIGNED_FLOAT_RE if signed else UNSIGNED_FLOAT_RE
def maybe_num(s):
if float_re.match(s):
return float(s)
else:
return s
def split_keys(s):
return [maybe_num(c) for c in float_re.split(s)]
return sorted(file_list, key=split_keys)
def save_checkpoint(ckpt_dir, target, step, prefix="checkpoint_", keep=1):
"""Save a checkpoint of the model.
Attempts to be pre-emption safe by writing to temporary before
a final rename and cleanup of past files.
Args:
ckpt_dir: str: path to store checkpoint files in.
target: serializable flax object, usually a flax optimizer.
step: int or float: training step number or other metric number.
prefix: str: checkpoint file name prefix.
keep: number of past checkpoint files to keep.
Returns:
Filename of saved checkpoint.
"""
# Write temporary checkpoint file.
logging.info("Saving checkpoint at step: %s", step)
ckpt_tmp_path = _checkpoint_path(ckpt_dir, "tmp", prefix)
ckpt_path = _checkpoint_path(ckpt_dir, step, prefix)
blobfile.makedirs(os.path.dirname(ckpt_path))
with blobfile.BlobFile(ckpt_tmp_path, "wb") as fp:
fp.write(serialization.to_bytes(target))
# Rename once serialization and writing finished.
blobfile.copy(ckpt_tmp_path, ckpt_path, overwrite=True)
blobfile.remove(ckpt_tmp_path)
logging.info("Saved checkpoint at %s", ckpt_path)
# Remove old checkpoint files.
base_path = os.path.join(ckpt_dir, f"{prefix}")
checkpoint_files = natural_sort(blobfile.glob(base_path + "*"))
if len(checkpoint_files) > keep:
old_ckpts = checkpoint_files[:-keep]
for path in old_ckpts:
logging.info("Removing checkpoint at %s", path)
blobfile.remove(path)
return ckpt_path
def latest_checkpoint(ckpt_dir, prefix="checkpoint_"):
"""Retrieve the path of the latest checkpoint in a directory.
Args:
ckpt_dir: str: directory of checkpoints to restore from.
prefix: str: name prefix of checkpoint files.
Returns:
The latest checkpoint path or None if no checkpoints were found.
"""
glob_path = os.path.join(ckpt_dir, f"{prefix}*")
checkpoint_files = natural_sort(blobfile.glob(glob_path))
ckpt_tmp_path = _checkpoint_path(ckpt_dir, "tmp", prefix)
checkpoint_files = [f for f in checkpoint_files if f != ckpt_tmp_path]
if checkpoint_files:
return checkpoint_files[-1]
else:
return None
def restore_checkpoint(
ckpt_dir, target, step=None, prefix="checkpoint_", parallel=True
):
"""Restore last/best checkpoint from checkpoints in path.
Sorts the checkpoint files naturally, returning the highest-valued
file, e.g.:
ckpt_1, ckpt_2, ckpt_3 --> ckpt_3
ckpt_0.01, ckpt_0.1, ckpt_0.001 --> ckpt_0.1
ckpt_-1.0, ckpt_1.0, ckpt_1e5 --> ckpt_1e5
Args:
ckpt_dir: str: checkpoint file or directory of checkpoints to restore from.
target: matching object to rebuild via deserialized state-dict. If None,
the deserialized state-dict is returned as-is.
step: int: step number to load or None to load latest. If specified,
ckpt_dir must be a directory.
prefix: str: name prefix of checkpoint files.
parallel: bool: whether to load seekable checkpoints in parallel, for speed.
Returns:
Restored `target` updated from checkpoint file, or if no step specified and
no checkpoint files present, returns the passed-in `target` unchanged.
If a file path is specified and is not found, the passed-in `target` will be
returned. This is to match the behavior of the case where a directory path
is specified but the directory has not yet been created.
"""
if step:
ckpt_path = _checkpoint_path(ckpt_dir, step, prefix)
if not blobfile.exists(ckpt_path):
raise ValueError(f"Matching checkpoint not found: {ckpt_path}")
else:
if blobfile.isdir(ckpt_dir):
ckpt_path = latest_checkpoint(ckpt_dir, prefix)
if not ckpt_path:
logging.info(f"Found no checkpoint files in {ckpt_dir}")
return target
else:
ckpt_path = ckpt_dir
if not blobfile.exists(ckpt_path):
logging.info(f"Found no checkpoint file at {ckpt_path}")
return target
logging.info("Restoring checkpoint from %s", ckpt_path)
with blobfile.BlobFile(ckpt_path, "rb") as fp:
if parallel and fp.seekable():
buf_size = 128 << 20 # 128M buffer.
fp.seek(0, 2)
fp_size = fp.tell()
fp.seek(0)
num_bufs = fp_size / buf_size
logging.debug("num_bufs: %d", num_bufs)
checkpoint_contents = bytearray(fp_size)
def read_chunk(i):
# NOTE: We have to re-open the file to read each chunk, otherwise the
# parallelism has no effect. But we could reuse the file pointers
# within each thread.
# with gfile.GFile(ckpt_path, "rb") as f:
with blobfile.BlobFile(ckpt_path, "rb") as f:
f.seek(i * buf_size)
buf = f.read(buf_size)
if buf:
checkpoint_contents[
i * buf_size : i * buf_size + len(buf)
] = buf
return len(buf) / buf_size
pool_size = 32
pool = thread.ThreadPoolExecutor(pool_size)
results = pool.map(read_chunk, range(int(num_bufs) + 1))
results = list(results)
pool.shutdown(wait=False)
logging.debug("results: %s", results)
else:
checkpoint_contents = fp.read()
if target is None:
return serialization.msgpack_restore(checkpoint_contents)
else:
return serialization.from_bytes(target, checkpoint_contents)
|
# Copyright 2023 (c) OpenAI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract SDE classes, Reverse SDE, and VE/VP SDEs."""
import abc
import jax.numpy as jnp
import jax
import numpy as np
from .utils import batch_mul
def get_sde(config):
if config.training.sde.lower() == "vpsde":
sde = VPSDE(
beta_min=config.model.beta_min,
beta_max=config.model.beta_max,
N=config.model.num_scales,
)
elif config.training.sde.lower() == "subvpsde":
sde = subVPSDE(
beta_min=config.model.beta_min,
beta_max=config.model.beta_max,
N=config.model.num_scales,
)
elif config.training.sde.lower() == "vesde":
sde = VESDE(
sigma_min=config.model.sigma_min,
sigma_max=config.model.sigma_max,
N=config.model.num_scales,
)
elif config.training.sde.lower() == "kvesde":
sde = KVESDE(
t_min=config.model.t_min,
t_max=config.model.t_max,
N=config.model.num_scales,
rho=config.model.rho,
data_std=config.model.data_std,
)
else:
raise NotImplementedError(f"SDE {config.training.sde} unknown.")
return sde
class SDE(abc.ABC):
"""SDE abstract class. Functions are designed for a mini-batch of inputs."""
def __init__(self, N):
"""Construct an SDE.
Args:
N: number of discretization time steps.
"""
super().__init__()
self.N = N
@property
@abc.abstractmethod
def T(self):
"""End time of the SDE."""
pass
@abc.abstractmethod
def sde(self, x, t):
pass
@abc.abstractmethod
def marginal_prob(self, x, t):
"""Parameters to determine the marginal distribution of the SDE, $p_t(x)$."""
pass
@abc.abstractmethod
def prior_sampling(self, rng, shape):
"""Generate one sample from the prior distribution, $p_T(x)$."""
pass
@abc.abstractmethod
def prior_logp(self, z):
"""Compute log-density of the prior distribution.
Useful for computing the log-likelihood via probability flow ODE.
Args:
z: latent code
Returns:
log probability density
"""
pass
def discretize(self, x, t):
"""Discretize the SDE in the form: x_{i+1} = x_i + f_i(x_i) + G_i z_i.
Useful for reverse diffusion sampling and probabiliy flow sampling.
Defaults to Euler-Maruyama discretization.
Args:
x: a JAX tensor.
t: a JAX float representing the time step (from 0 to `self.T`)
Returns:
f, G
"""
dt = 1 / self.N
drift, diffusion = self.sde(x, t)
f = drift * dt
G = diffusion * jnp.sqrt(dt)
return f, G
def reverse(self, score_fn, probability_flow=False):
"""Create the reverse-time SDE/ODE.
Args:
score_fn: A time-dependent score-based model that takes x and t and returns the score.
probability_flow: If `True`, create the reverse-time ODE used for probability flow sampling.
"""
N = self.N
T = self.T
sde_fn = self.sde
discretize_fn = self.discretize
# Build the class for reverse-time SDE.
class RSDE(self.__class__):
def __init__(self):
self.N = N
self.probability_flow = probability_flow
@property
def T(self):
return T
def sde(self, x, t):
"""Create the drift and diffusion functions for the reverse SDE/ODE."""
drift, diffusion = sde_fn(x, t)
score = score_fn(x, t)
drift = drift - batch_mul(
diffusion**2, score * (0.5 if self.probability_flow else 1.0)
)
# Set the diffusion function to zero for ODEs.
diffusion = jnp.zeros_like(t) if self.probability_flow else diffusion
return drift, diffusion
def discretize(self, x, t):
"""Create discretized iteration rules for the reverse diffusion sampler."""
f, G = discretize_fn(x, t)
rev_f = f - batch_mul(
G**2, score_fn(x, t) * (0.5 if self.probability_flow else 1.0)
)
rev_G = jnp.zeros_like(t) if self.probability_flow else G
return rev_f, rev_G
return RSDE()
class VPSDE(SDE):
def __init__(self, beta_min=0.1, beta_max=20, N=1000):
"""Construct a Variance Preserving SDE.
Args:
beta_min: value of beta(0)
beta_max: value of beta(1)
N: number of discretization steps
"""
super().__init__(N)
self.beta_0 = beta_min
self.beta_1 = beta_max
self.N = N
self.discrete_betas = jnp.linspace(beta_min / N, beta_max / N, N)
self.alphas = 1.0 - self.discrete_betas
self.alphas_cumprod = jnp.cumprod(self.alphas, axis=0)
self.sqrt_alphas_cumprod = jnp.sqrt(self.alphas_cumprod)
self.sqrt_1m_alphas_cumprod = jnp.sqrt(1.0 - self.alphas_cumprod)
@property
def T(self):
return 1
def sde(self, x, t):
beta_t = self.beta_0 + t * (self.beta_1 - self.beta_0)
drift = -0.5 * batch_mul(beta_t, x)
diffusion = jnp.sqrt(beta_t)
return drift, diffusion
def marginal_prob(self, x, t, high_precision=True):
log_mean_coeff = (
-0.25 * t**2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0
)
if high_precision:
mean = batch_mul(
jnp.where(
jnp.abs(log_mean_coeff) <= 1e-3,
1 + log_mean_coeff,
jnp.exp(log_mean_coeff),
),
x,
)
std = jnp.where(
jnp.abs(log_mean_coeff) <= 1e-3,
jnp.sqrt(-2.0 * log_mean_coeff),
jnp.sqrt(1 - jnp.exp(2.0 * log_mean_coeff)),
)
else:
mean = batch_mul(jnp.exp(log_mean_coeff), x)
std = jnp.sqrt(1 - jnp.exp(2 * log_mean_coeff))
return mean, std
def prior_sampling(self, rng, shape):
return jax.random.normal(rng, shape)
def prior_logp(self, z):
shape = z.shape
N = np.prod(shape[1:])
logp_fn = lambda z: -N / 2.0 * jnp.log(2 * np.pi) - jnp.sum(z**2) / 2.0
return jax.vmap(logp_fn)(z)
def prior_entropy(self, z):
shape = z.shape
entropy = jnp.ones(shape) * (0.5 * jnp.log(2 * np.pi) + 0.5)
entropy = entropy.reshape((z.shape[0], -1))
return jnp.sum(entropy, axis=-1)
def discretize(self, x, t):
"""DDPM discretization."""
timestep = (t * (self.N - 1) / self.T).astype(jnp.int32)
beta = self.discrete_betas[timestep]
alpha = self.alphas[timestep]
sqrt_beta = jnp.sqrt(beta)
f = batch_mul(jnp.sqrt(alpha), x) - x
G = sqrt_beta
return f, G
def likelihood_importance_cum_weight(self, t, eps=1e-5):
exponent1 = 0.5 * eps * (eps - 2) * self.beta_0 - 0.5 * eps**2 * self.beta_1
exponent2 = 0.5 * t * (t - 2) * self.beta_0 - 0.5 * t**2 * self.beta_1
term1 = jnp.where(
jnp.abs(exponent1) <= 1e-3, -exponent1, 1.0 - jnp.exp(exponent1)
)
term2 = jnp.where(
jnp.abs(exponent2) <= 1e-3, -exponent2, 1.0 - jnp.exp(exponent2)
)
return 0.5 * (
-2 * jnp.log(term1)
+ 2 * jnp.log(term2)
+ self.beta_0 * (-2 * eps + eps**2 - (t - 2) * t)
+ self.beta_1 * (-(eps**2) + t**2)
)
def sample_importance_weighted_time_for_likelihood(
self, rng, shape, quantile=None, eps=1e-5, steps=100
):
Z = self.likelihood_importance_cum_weight(self.T, eps=eps)
if quantile is None:
quantile = jax.random.uniform(rng, shape, minval=0, maxval=Z)
lb = jnp.ones_like(quantile) * eps
ub = jnp.ones_like(quantile) * self.T
def bisection_func(carry, idx):
lb, ub = carry
mid = (lb + ub) / 2.0
value = self.likelihood_importance_cum_weight(mid, eps=eps)
lb = jnp.where(value <= quantile, mid, lb)
ub = jnp.where(value <= quantile, ub, mid)
return (lb, ub), idx
(lb, ub), _ = jax.lax.scan(bisection_func, (lb, ub), jnp.arange(0, steps))
return (lb + ub) / 2.0
class subVPSDE(SDE):
def __init__(self, beta_min=0.1, beta_max=20, N=1000):
"""Construct the sub-VP SDE that excels at likelihoods.
Args:
beta_min: value of beta(0)
beta_max: value of beta(1)
N: number of discretization steps
"""
super().__init__(N)
self.beta_0 = beta_min
self.beta_1 = beta_max
self.N = N
@property
def T(self):
return 1
def sde(self, x, t, high_precision=True):
beta_t = self.beta_0 + t * (self.beta_1 - self.beta_0)
drift = -0.5 * batch_mul(beta_t, x)
exponent = -2 * self.beta_0 * t - (self.beta_1 - self.beta_0) * t**2
discount = 1.0 - jnp.exp(exponent)
if high_precision:
discount = jnp.where(jnp.abs(exponent) <= 1e-3, -exponent, discount)
diffusion = jnp.sqrt(beta_t * discount)
return drift, diffusion
def marginal_prob(self, x, t, high_precision=True):
log_mean_coeff = (
-0.25 * t**2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0
)
if high_precision:
mean = batch_mul(
jnp.where(
jnp.abs(log_mean_coeff) <= 1e-3,
1.0 + log_mean_coeff,
jnp.exp(log_mean_coeff),
),
x,
)
std = jnp.where(
jnp.abs(log_mean_coeff) <= 1e-3,
-2.0 * log_mean_coeff,
1 - jnp.exp(2.0 * log_mean_coeff),
)
else:
mean = batch_mul(jnp.exp(log_mean_coeff), x)
std = 1 - jnp.exp(2.0 * log_mean_coeff)
return mean, std
def prior_sampling(self, rng, shape):
return jax.random.normal(rng, shape)
def prior_logp(self, z):
shape = z.shape
N = np.prod(shape[1:])
logp_fn = lambda z: -N / 2.0 * jnp.log(2 * np.pi) - jnp.sum(z**2) / 2.0
return jax.vmap(logp_fn)(z)
def prior_entropy(self, z):
shape = z.shape
entropy = jnp.ones(shape) * (0.5 * jnp.log(2 * np.pi) + 0.5)
entropy = entropy.reshape((z.shape[0], -1))
return jnp.sum(entropy, axis=-1)
def likelihood_importance_cum_weight(self, t, eps=1e-5):
exponent1 = 0.5 * eps * (eps * self.beta_1 - (eps - 2) * self.beta_0)
exponent2 = 0.5 * t * (self.beta_1 * t - (t - 2) * self.beta_0)
term1 = jnp.where(
exponent1 <= 1e-3, jnp.log(exponent1), jnp.log(jnp.exp(exponent1) - 1.0)
)
term2 = jnp.where(
exponent2 <= 1e-3, jnp.log(exponent2), jnp.log(jnp.exp(exponent2) - 1.0)
)
return 0.5 * (
-4 * term1
+ 4 * term2
+ (2 * eps - eps**2 + t * (t - 2)) * self.beta_0
+ (eps**2 - t**2) * self.beta_1
)
def sample_importance_weighted_time_for_likelihood(
self, rng, shape, quantile=None, eps=1e-5, steps=100
):
Z = self.likelihood_importance_cum_weight(self.T, eps=eps)
if quantile is None:
quantile = jax.random.uniform(rng, shape, minval=0, maxval=Z)
lb = jnp.ones_like(quantile) * eps
ub = jnp.ones_like(quantile) * self.T
def bisection_func(carry, idx):
lb, ub = carry
mid = (lb + ub) / 2.0
value = self.likelihood_importance_cum_weight(mid, eps=eps)
lb = jnp.where(value <= quantile, mid, lb)
ub = jnp.where(value <= quantile, ub, mid)
return (lb, ub), idx
(lb, ub), _ = jax.lax.scan(bisection_func, (lb, ub), jnp.arange(0, steps))
return (lb + ub) / 2.0
class VESDE(SDE):
def __init__(self, sigma_min=0.01, sigma_max=50, N=1000, linear=False):
"""Construct a Variance Exploding SDE.
Args:
sigma_min: smallest sigma.
sigma_max: largest sigma.
N: number of discretization steps
"""
super().__init__(N)
self.sigma_min = sigma_min
self.sigma_max = sigma_max
self.linear = linear
if not linear:
self.discrete_sigmas = jnp.exp(
np.linspace(np.log(self.sigma_min), np.log(self.sigma_max), N)
)
else:
self.discrete_sigmas = jnp.linspace(self.sigma_min, self.sigma_max, N)
self.N = N
@property
def T(self):
return 1
def sde(self, x, t):
drift = jnp.zeros_like(x)
if not self.linear:
sigma = self.sigma_min * (self.sigma_max / self.sigma_min) ** t
diffusion = sigma * jnp.sqrt(
2 * (jnp.log(self.sigma_max) - jnp.log(self.sigma_min))
)
else:
diffusion = self.sigma_max * jnp.sqrt(2 * t)
return drift, diffusion
def marginal_prob(self, x, t):
mean = x
if not self.linear:
std = self.sigma_min * (self.sigma_max / self.sigma_min) ** t
else:
std = t * self.sigma_max
return mean, std
def prior_sampling(self, rng, shape):
return jax.random.normal(rng, shape) * self.sigma_max
def prior_logp(self, z):
shape = z.shape
N = np.prod(shape[1:])
logp_fn = lambda z: -N / 2.0 * jnp.log(
2 * np.pi * self.sigma_max**2
) - jnp.sum(z**2) / (2 * self.sigma_max**2)
return jax.vmap(logp_fn)(z)
def prior_entropy(self, z):
shape = z.shape
entropy = jnp.ones(shape) * (
0.5 * jnp.log(2 * np.pi * self.sigma_max**2) + 0.5
)
entropy = entropy.reshape((z.shape[0], -1))
return jnp.sum(entropy, axis=-1)
def discretize(self, x, t):
"""SMLD(NCSN) discretization."""
if not self.linear:
timestep = (t * (self.N - 1) / self.T).astype(jnp.int32)
sigma = self.discrete_sigmas[timestep]
adjacent_sigma = jnp.where(
timestep == 0,
jnp.zeros_like(timestep),
self.discrete_sigmas[timestep - 1],
)
f = jnp.zeros_like(x)
G = jnp.sqrt(sigma**2 - adjacent_sigma**2)
return f, G
else:
return super().discretize(x, t)
class KVESDE(SDE):
def __init__(self, t_min=0.002, t_max=80.0, N=1000, rho=7.0, data_std=0.5):
"""Construct a Variance Exploding SDE as in Kerras et al.
Args:
t_min: smallest time
t_max: largest time.
N: number of discretization steps
rho: parameter for time steps.
"""
super().__init__(N)
self.t_min = t_min
self.t_max = t_max
self.rho = rho
self.N = N
self.data_std = data_std
@property
def T(self):
return self.t_max
def sde(self, x, t):
drift = jnp.zeros_like(x)
diffusion = jnp.sqrt(2 * t)
return drift, diffusion
def marginal_prob(self, x, t):
mean = x
std = t
return mean, std
def prior_sampling(self, rng, shape):
return jax.random.normal(rng, shape) * self.t_max
def prior_logp(self, z):
shape = z.shape
N = np.prod(shape[1:])
logp_fn = lambda z: -N / 2.0 * jnp.log(2 * np.pi * self.t_max**2) - jnp.sum(
z**2
) / (2 * self.t_max**2)
return jax.vmap(logp_fn)(z)
def prior_entropy(self, z):
shape = z.shape
entropy = jnp.ones(shape) * (0.5 * jnp.log(2 * np.pi * self.t_max**2) + 0.5)
entropy = entropy.reshape((z.shape[0], -1))
return jnp.sum(entropy, axis=-1)
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
"""Utility code for generating and saving image grids and checkpointing.
The `save_image` code is copied from
https://github.com/google/flax/blob/master/examples/vae/utils.py,
which is a JAX equivalent to the same function in TorchVision
(https://github.com/pytorch/vision/blob/master/torchvision/utils.py)
"""
import math
from typing import Any, Dict, Optional, TypeVar
import flax
import jax
import jax.numpy as jnp
import numpy as np
from PIL import Image
import blobfile
T = TypeVar("T")
def batch_add(a, b):
return jax.vmap(lambda a, b: a + b)(a, b)
def batch_mul(a, b):
return jax.vmap(lambda a, b: a * b)(a, b)
def load_training_state(filepath, state):
with blobfile.open(filepath, "rb") as f:
state = flax.serialization.from_bytes(state, f.read())
return state
def save_image(ndarray, fp, nrow=8, padding=2, pad_value=0.0, format=None):
"""Make a grid of images and save it into an image file.
Pixel values are assumed to be within [0, 1].
Args:
ndarray (array_like): 4D mini-batch images of shape (B x H x W x C).
fp: A filename(string) or file object.
nrow (int, optional): Number of images displayed in each row of the grid.
The final grid size is ``(B / nrow, nrow)``. Default: ``8``.
padding (int, optional): amount of padding. Default: ``2``.
pad_value (float, optional): Value for the padded pixels. Default: ``0``.
format(Optional): If omitted, the format to use is determined from the
filename extension. If a file object was used instead of a filename, this
parameter should always be used.
"""
if not (
isinstance(ndarray, jnp.ndarray)
or (
isinstance(ndarray, list)
and all(isinstance(t, jnp.ndarray) for t in ndarray)
)
):
raise TypeError("array_like of tensors expected, got {}".format(type(ndarray)))
ndarray = jnp.asarray(ndarray)
if ndarray.ndim == 4 and ndarray.shape[-1] == 1: # single-channel images
ndarray = jnp.concatenate((ndarray, ndarray, ndarray), -1)
# make the mini-batch of images into a grid
nmaps = ndarray.shape[0]
xmaps = min(nrow, nmaps)
ymaps = int(math.ceil(float(nmaps) / xmaps))
height, width = int(ndarray.shape[1] + padding), int(ndarray.shape[2] + padding)
num_channels = ndarray.shape[3]
grid = jnp.full(
(height * ymaps + padding, width * xmaps + padding, num_channels), pad_value
).astype(jnp.float32)
k = 0
for y in range(ymaps):
for x in range(xmaps):
if k >= nmaps:
break
grid = grid.at[
y * height + padding : (y + 1) * height,
x * width + padding : (x + 1) * width,
].set(ndarray[k])
k = k + 1
# Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
ndarr = np.asarray(jnp.clip(grid * 255.0 + 0.5, 0, 255).astype(jnp.uint8))
im = Image.fromarray(ndarr.copy())
im.save(fp, format=format)
def flatten_dict(config):
"""Flatten a hierarchical dict to a simple dict."""
new_dict = {}
for key, value in config.items():
if isinstance(value, dict):
sub_dict = flatten_dict(value)
for subkey, subvalue in sub_dict.items():
new_dict[key + "/" + subkey] = subvalue
elif isinstance(value, tuple):
new_dict[key] = str(value)
else:
new_dict[key] = value
return new_dict
|
# Copyright 2023 (c) OpenAI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various sampling methods."""
import functools
import jax
import jax.numpy as jnp
import jax.random as random
import abc
import flax
import haiku as hk
import numpy as np
from .models.utils import (
from_flattened_numpy,
to_flattened_numpy,
get_score_fn,
get_model_fn,
)
from scipy import integrate
from . import sde_lib
from .utils import batch_mul, batch_add
from .models import utils as mutils
from .losses import get_ema_scales_fn
_CORRECTORS = {}
_PREDICTORS = {}
def register_predictor(cls=None, *, name=None):
"""A decorator for registering predictor classes."""
def _register(cls):
if name is None:
local_name = cls.__name__
else:
local_name = name
if local_name in _PREDICTORS:
raise ValueError(f"Already registered model with name: {local_name}")
_PREDICTORS[local_name] = cls
return cls
if cls is None:
return _register
else:
return _register(cls)
def register_corrector(cls=None, *, name=None):
"""A decorator for registering corrector classes."""
def _register(cls):
if name is None:
local_name = cls.__name__
else:
local_name = name
if local_name in _CORRECTORS:
raise ValueError(f"Already registered model with name: {local_name}")
_CORRECTORS[local_name] = cls
return cls
if cls is None:
return _register
else:
return _register(cls)
def get_predictor(name):
return _PREDICTORS[name]
def get_corrector(name):
return _CORRECTORS[name]
def get_sampling_fn(config, sde, model, shape, eps=1e-3):
"""Create a sampling function.
Args:
config: A `ml_collections.ConfigDict` object that contains all configuration information.
sde: A `sde_lib.SDE` object that represents the forward SDE.
model: A `flax.linen.Module` object that represents the architecture of a time-dependent score-based model.
shape: A sequence of integers representing the expected shape of a single sample.
eps: A `float` number. The reverse-time SDE is only integrated to `eps` for numerical stability.
Returns:
A function that takes random states and a replicated training state and outputs samples with the
trailing dimensions matching `shape`.
"""
sampler_name = config.sampling.method
# Probability flow ODE sampling with black-box ODE solvers
if sampler_name.lower() == "ode":
sampling_fn = get_ode_sampler(
sde=sde,
model=model,
shape=shape,
denoise=config.sampling.noise_removal,
eps=eps,
)
# Predictor-Corrector sampling. Predictor-only and Corrector-only samplers are special cases.
elif sampler_name.lower() == "pc":
predictor = get_predictor(config.sampling.predictor.lower())
corrector = get_corrector(config.sampling.corrector.lower())
sampling_fn = get_pc_sampler(
sde=sde,
model=model,
shape=shape,
predictor=predictor,
corrector=corrector,
snr=config.sampling.snr,
n_steps=config.sampling.n_steps_each,
probability_flow=config.sampling.probability_flow,
denoise=config.sampling.noise_removal,
eps=eps,
)
elif sampler_name.lower() == "heun":
sampling_fn = get_heun_sampler(
sde=sde, model=model, shape=shape, denoise=config.sampling.denoise
)
elif sampler_name.lower() == "euler":
sampling_fn = get_euler_sampler(
sde=sde, model=model, shape=shape, denoise=config.sampling.denoise
)
elif sampler_name.lower() == "onestep":
sampling_fn = get_onestep_sampler(
config=config,
sde=sde,
model=model,
shape=shape,
)
elif sampler_name.lower() == "seeded_sampler":
sampling_fn = get_seeded_sampler(
config=config,
sde=sde,
model=model,
shape=shape,
)
elif sampler_name.lower() == "progressive_distillation":
sampling_fn = get_progressive_distillation_sampler(
config=config,
sde=sde,
model=model,
shape=shape,
denoise=config.sampling.denoise,
)
else:
raise ValueError(f"Sampler name {sampler_name} unknown.")
return sampling_fn
class Predictor(abc.ABC):
"""The abstract class for a predictor algorithm."""
def __init__(self, sde, score_fn, probability_flow=False):
super().__init__()
self.sde = sde
# Compute the reverse SDE/ODE
self.rsde = sde.reverse(score_fn, probability_flow)
self.score_fn = score_fn
@abc.abstractmethod
def update_fn(self, rng, x, t):
"""One update of the predictor.
Args:
rng: A JAX random state.
x: A JAX array representing the current state
t: A JAX array representing the current time step.
Returns:
x: A JAX array of the next state.
x_mean: A JAX array. The next state without random noise. Useful for denoising.
"""
pass
class Corrector(abc.ABC):
"""The abstract class for a corrector algorithm."""
def __init__(self, sde, score_fn, snr, n_steps):
super().__init__()
self.sde = sde
self.score_fn = score_fn
self.snr = snr
self.n_steps = n_steps
@abc.abstractmethod
def update_fn(self, rng, x, t):
"""One update of the corrector.
Args:
rng: A JAX random state.
x: A JAX array representing the current state
t: A JAX array representing the current time step.
Returns:
x: A JAX array of the next state.
x_mean: A JAX array. The next state without random noise. Useful for denoising.
"""
pass
@register_predictor(name="euler_maruyama")
class EulerMaruyamaPredictor(Predictor):
def __init__(self, sde, score_fn, probability_flow=False):
super().__init__(sde, score_fn, probability_flow)
def update_fn(self, rng, x, t):
dt = -1.0 / self.rsde.N
z = random.normal(rng, x.shape)
drift, diffusion = self.rsde.sde(x, t)
x_mean = x + drift * dt
x = x_mean + batch_mul(diffusion, jnp.sqrt(-dt) * z)
return x, x_mean
@register_predictor(name="reverse_diffusion")
class ReverseDiffusionPredictor(Predictor):
def __init__(self, sde, score_fn, probability_flow=False):
super().__init__(sde, score_fn, probability_flow)
def update_fn(self, rng, x, t):
f, G = self.rsde.discretize(x, t)
z = random.normal(rng, x.shape)
x_mean = x - f
x = x_mean + batch_mul(G, z)
return x, x_mean
@register_predictor(name="ancestral_sampling")
class AncestralSamplingPredictor(Predictor):
"""The ancestral sampling predictor. Currently only supports VE/VP SDEs."""
def __init__(self, sde, score_fn, probability_flow=False):
super().__init__(sde, score_fn, probability_flow)
if not isinstance(sde, sde_lib.VPSDE) and not isinstance(sde, sde_lib.VESDE):
raise NotImplementedError(
f"SDE class {sde.__class__.__name__} not yet supported."
)
assert (
not probability_flow
), "Probability flow not supported by ancestral sampling"
def vesde_update_fn(self, rng, x, t):
sde = self.sde
timestep = (t * (sde.N - 1) / sde.T).astype(jnp.int32)
sigma = sde.discrete_sigmas[timestep]
adjacent_sigma = jnp.where(
timestep == 0, jnp.zeros(t.shape), sde.discrete_sigmas[timestep - 1]
)
score = self.score_fn(x, t)
x_mean = x + batch_mul(score, sigma**2 - adjacent_sigma**2)
std = jnp.sqrt(
(adjacent_sigma**2 * (sigma**2 - adjacent_sigma**2)) / (sigma**2)
)
noise = random.normal(rng, x.shape)
x = x_mean + batch_mul(std, noise)
return x, x_mean
def vpsde_update_fn(self, rng, x, t):
sde = self.sde
timestep = (t * (sde.N - 1) / sde.T).astype(jnp.int32)
beta = sde.discrete_betas[timestep]
score = self.score_fn(x, t)
x_mean = batch_mul((x + batch_mul(beta, score)), 1.0 / jnp.sqrt(1.0 - beta))
noise = random.normal(rng, x.shape)
x = x_mean + batch_mul(jnp.sqrt(beta), noise)
return x, x_mean
def update_fn(self, rng, x, t):
if isinstance(self.sde, sde_lib.VESDE):
return self.vesde_update_fn(rng, x, t)
elif isinstance(self.sde, sde_lib.VPSDE):
return self.vpsde_update_fn(rng, x, t)
@register_predictor(name="none")
class NonePredictor(Predictor):
"""An empty predictor that does nothing."""
def __init__(self, sde, score_fn, probability_flow=False):
pass
def update_fn(self, rng, x, t):
return x, x
@register_corrector(name="langevin")
class LangevinCorrector(Corrector):
def __init__(self, sde, score_fn, snr, n_steps):
super().__init__(sde, score_fn, snr, n_steps)
if (
not isinstance(sde, sde_lib.VPSDE)
and not isinstance(sde, sde_lib.VESDE)
and not isinstance(sde, sde_lib.subVPSDE)
):
raise NotImplementedError(
f"SDE class {sde.__class__.__name__} not yet supported."
)
def update_fn(self, rng, x, t):
sde = self.sde
score_fn = self.score_fn
n_steps = self.n_steps
target_snr = self.snr
if isinstance(sde, sde_lib.VPSDE) or isinstance(sde, sde_lib.subVPSDE):
timestep = (t * (sde.N - 1) / sde.T).astype(jnp.int32)
alpha = sde.alphas[timestep]
else:
alpha = jnp.ones_like(t)
def loop_body(step, val):
rng, x, x_mean = val
grad = score_fn(x, t)
rng, step_rng = jax.random.split(rng)
noise = jax.random.normal(step_rng, x.shape)
grad_norm = jnp.linalg.norm(
grad.reshape((grad.shape[0], -1)), axis=-1
).mean()
grad_norm = jax.lax.pmean(grad_norm, axis_name="batch")
noise_norm = jnp.linalg.norm(
noise.reshape((noise.shape[0], -1)), axis=-1
).mean()
noise_norm = jax.lax.pmean(noise_norm, axis_name="batch")
step_size = (target_snr * noise_norm / grad_norm) ** 2 * 2 * alpha
x_mean = x + batch_mul(step_size, grad)
x = x_mean + batch_mul(noise, jnp.sqrt(step_size * 2))
return rng, x, x_mean
_, x, x_mean = jax.lax.fori_loop(0, n_steps, loop_body, (rng, x, x))
return x, x_mean
@register_corrector(name="ald")
class AnnealedLangevinDynamics(Corrector):
"""The original annealed Langevin dynamics predictor in NCSN/NCSNv2.
We include this corrector only for completeness. It was not directly used in our paper.
"""
def __init__(self, sde, score_fn, snr, n_steps):
super().__init__(sde, score_fn, snr, n_steps)
if (
not isinstance(sde, sde_lib.VPSDE)
and not isinstance(sde, sde_lib.VESDE)
and not isinstance(sde, sde_lib.subVPSDE)
):
raise NotImplementedError(
f"SDE class {sde.__class__.__name__} not yet supported."
)
def update_fn(self, rng, x, t):
sde = self.sde
score_fn = self.score_fn
n_steps = self.n_steps
target_snr = self.snr
if isinstance(sde, sde_lib.VPSDE) or isinstance(sde, sde_lib.subVPSDE):
timestep = (t * (sde.N - 1) / sde.T).astype(jnp.int32)
alpha = sde.alphas[timestep]
else:
alpha = jnp.ones_like(t)
std = self.sde.marginal_prob(x, t)[1]
def loop_body(step, val):
rng, x, x_mean = val
grad = score_fn(x, t)
rng, step_rng = jax.random.split(rng)
noise = jax.random.normal(step_rng, x.shape)
step_size = (target_snr * std) ** 2 * 2 * alpha
x_mean = x + batch_mul(step_size, grad)
x = x_mean + batch_mul(noise, jnp.sqrt(step_size * 2))
return rng, x, x_mean
_, x, x_mean = jax.lax.fori_loop(0, n_steps, loop_body, (rng, x, x))
return x, x_mean
@register_corrector(name="none")
class NoneCorrector(Corrector):
"""An empty corrector that does nothing."""
def __init__(self, sde, score_fn, snr, n_steps):
pass
def update_fn(self, rng, x, t):
return x, x
def shared_predictor_update_fn(
rng, state, x, t, sde, model, predictor, probability_flow
):
"""A wrapper that configures and returns the update function of predictors."""
score_fn = mutils.get_score_fn(
sde,
model,
state.params_ema,
state.model_state,
train=False,
)
if predictor is None:
# Corrector-only sampler
predictor_obj = NonePredictor(sde, score_fn, probability_flow)
else:
predictor_obj = predictor(sde, score_fn, probability_flow)
return predictor_obj.update_fn(rng, x, t)
def shared_corrector_update_fn(rng, state, x, t, sde, model, corrector, snr, n_steps):
"""A wrapper tha configures and returns the update function of correctors."""
score_fn = mutils.get_score_fn(
sde,
model,
state.params_ema,
state.model_state,
train=False,
)
if corrector is None:
# Predictor-only sampler
corrector_obj = NoneCorrector(sde, score_fn, snr, n_steps)
else:
corrector_obj = corrector(sde, score_fn, snr, n_steps)
return corrector_obj.update_fn(rng, x, t)
def get_pc_sampler(
sde,
model,
shape,
predictor,
corrector,
snr,
n_steps=1,
probability_flow=False,
denoise=True,
eps=1e-3,
):
"""Create a Predictor-Corrector (PC) sampler.
Args:
sde: An `sde_lib.SDE` object representing the forward SDE.
model: A `flax.linen.Module` object that represents the architecture of a time-dependent score-based model.
shape: A sequence of integers. The expected shape of a single sample.
predictor: A subclass of `sampling.Predictor` representing the predictor algorithm.
corrector: A subclass of `sampling.Corrector` representing the corrector algorithm.
snr: A `float` number. The signal-to-noise ratio for configuring correctors.
n_steps: An integer. The number of corrector steps per predictor update.
probability_flow: If `True`, solve the reverse-time probability flow ODE when running the predictor.
denoise: If `True`, add one-step denoising to the final samples.
eps: A `float` number. The reverse-time SDE and ODE are integrated to `epsilon` to avoid numerical issues.
Returns:
A sampling function that takes random states, and a replcated training state and returns samples as well as
the number of function evaluations during sampling.
"""
# Create predictor & corrector update functions
predictor_update_fn = functools.partial(
shared_predictor_update_fn,
sde=sde,
model=model,
predictor=predictor,
probability_flow=probability_flow,
)
corrector_update_fn = functools.partial(
shared_corrector_update_fn,
sde=sde,
model=model,
corrector=corrector,
snr=snr,
n_steps=n_steps,
)
def pc_sampler(rng, state):
"""The PC sampler funciton.
Args:
rng: A JAX random state
state: A `flax.struct.dataclass` object that represents the training state of a score-based model.
Returns:
Samples, number of function evaluations
"""
# Initial sample
rng, step_rng = random.split(rng)
x = sde.prior_sampling(step_rng, shape)
timesteps = jnp.linspace(sde.T, eps, sde.N)
def loop_body(i, val):
rng, x, x_mean = val
t = timesteps[i]
vec_t = jnp.ones(shape[0]) * t
rng, step_rng = random.split(rng)
x, x_mean = corrector_update_fn(step_rng, state, x, vec_t)
rng, step_rng = random.split(rng)
x, x_mean = predictor_update_fn(step_rng, state, x, vec_t)
return rng, x, x_mean
_, x, x_mean = jax.lax.fori_loop(0, sde.N, loop_body, (rng, x, x))
# Denoising is equivalent to running one predictor step without adding noise.
return x_mean if denoise else x, sde.N * (n_steps + 1)
return jax.pmap(pc_sampler, axis_name="batch")
def get_onestep_sampler(config, sde, model, shape):
def sampler(rng, state):
rng, step_rng = random.split(rng)
x = jax.random.normal(step_rng, shape) * config.sampling.std
model_fn = mutils.get_distiller_fn(
sde,
model,
state.params_ema,
state.model_state,
train=False,
return_state=False,
)
samples = model_fn(x, jnp.ones((x.shape[0],)) * config.sampling.std)
return samples, 1
return jax.pmap(sampler, axis_name="batch")
def get_seeded_sampler(config, sde, model, shape):
def sampler(rng, state, init, t):
rng, step_rng = random.split(rng)
x = init
model_fn = mutils.get_distiller_fn(
sde,
model,
state.params_ema,
state.model_state,
train=False,
return_state=False,
)
samples = model_fn(x, jnp.ones((x.shape[0],)) * t)
return samples, 1
return jax.pmap(sampler, axis_name="batch")
def get_heun_sampler(sde, model, shape, denoise=True):
def heun_sampler(rng, state):
denoiser_fn = mutils.get_denoiser_fn(
sde, model, state.params_ema, state.model_state, train=False
)
rng = hk.PRNGSequence(rng)
x = sde.prior_sampling(next(rng), shape)
timesteps = (
sde.t_max ** (1 / sde.rho)
+ jnp.arange(sde.N)
/ (sde.N - 1)
* (sde.t_min ** (1 / sde.rho) - sde.t_max ** (1 / sde.rho))
) ** sde.rho
timesteps = jnp.concatenate([timesteps, jnp.array([0.0])])
def loop_body(i, val):
x = val
t = timesteps[i]
vec_t = jnp.ones((shape[0],)) * t
denoiser = denoiser_fn(x, vec_t)
d = 1 / t * x - 1 / t * denoiser
next_t = timesteps[i + 1]
samples = x + (next_t - t) * d
vec_next_t = jnp.ones((shape[0],)) * next_t
denoiser = denoiser_fn(samples, vec_next_t)
next_d = 1 / next_t * samples - 1 / next_t * denoiser
samples = x + (next_t - t) / 2 * (d + next_d)
return samples
x = jax.lax.fori_loop(0, sde.N - 1, loop_body, x)
if denoise:
t = timesteps[sde.N - 1]
vec_t = jnp.ones((shape[0],)) * t
denoiser = denoiser_fn(x, vec_t)
d = 1 / t * x - 1 / t * denoiser
next_t = timesteps[sde.N]
samples = x + (next_t - t) * d
else:
samples = x
return samples, sde.N
return jax.pmap(heun_sampler, axis_name="batch")
def get_euler_sampler(sde, model, shape, denoise=True):
def euler_sampler(rng, state):
denoiser_fn = mutils.get_denoiser_fn(
sde, model, state.params_ema, state.model_state, train=False
)
rng = hk.PRNGSequence(rng)
x = sde.prior_sampling(next(rng), shape)
timesteps = (
sde.t_max ** (1 / sde.rho)
+ jnp.arange(sde.N)
/ (sde.N - 1)
* (sde.t_min ** (1 / sde.rho) - sde.t_max ** (1 / sde.rho))
) ** sde.rho
timesteps = jnp.concatenate([timesteps, jnp.array([0.0])])
def loop_body(i, val):
x = val
t = timesteps[i]
vec_t = jnp.ones((shape[0],)) * t
denoiser = denoiser_fn(x, vec_t)
d = 1 / t * x - 1 / t * denoiser
next_t = timesteps[i + 1]
samples = x + (next_t - t) * d
return samples
x = jax.lax.fori_loop(0, sde.N - 1, loop_body, x)
if denoise:
t = timesteps[sde.N - 1]
vec_t = jnp.ones((shape[0],)) * t
denoiser = denoiser_fn(x, vec_t)
d = 1 / t * x - 1 / t * denoiser
next_t = timesteps[sde.N]
samples = x + (next_t - t) * d
else:
samples = x
return samples, sde.N
return jax.pmap(euler_sampler, axis_name="batch")
def get_progressive_distillation_sampler(config, sde, model, shape, denoise=True):
ema_scales_fn = get_ema_scales_fn(config)
def progressive_distillation_sampler(rng, state):
denoiser_fn = mutils.get_denoiser_fn(
sde, model, state.params_ema, state.model_state, train=False
)
_, num_scales = ema_scales_fn(state.step)
rng = hk.PRNGSequence(rng)
x = sde.prior_sampling(next(rng), shape)
t_start = sde.t_max ** (1 / sde.rho)
t_end = sde.t_min ** (1 / sde.rho)
def loop_body(i, val):
x = val
t = (t_start + i / num_scales * (t_end - t_start)) ** sde.rho
vec_t = jnp.ones((shape[0],)) * t
denoiser = denoiser_fn(x, vec_t)
d = 1 / t * x - 1 / t * denoiser
next_t = (t_start + (i + 1) / num_scales * (t_end - t_start)) ** sde.rho
samples = x + (next_t - t) * d
return samples
x = jax.lax.fori_loop(0, num_scales, loop_body, x)
if denoise:
t = sde.t_min
vec_t = jnp.ones((shape[0],)) * t
denoiser = denoiser_fn(x, vec_t)
d = 1 / t * x - 1 / t * denoiser
next_t = 0.0
samples = x + (next_t - t) * d
else:
samples = x
return samples, num_scales
return jax.pmap(progressive_distillation_sampler, axis_name="batch")
def get_ode_sampler(
sde,
model,
shape,
denoise=False,
rtol=1e-5,
atol=1e-5,
method="RK45",
eps=1e-3,
):
"""Probability flow ODE sampler with the black-box ODE solver.
Args:
sde: An `sde_lib.SDE` object that represents the forward SDE.
model: A `flax.linen.Module` object that represents the architecture of the score-based model.
shape: A sequence of integers. The expected shape of a single sample.
denoise: If `True`, add one-step denoising to final samples.
rtol: A `float` number. The relative tolerance level of the ODE solver.
atol: A `float` number. The absolute tolerance level of the ODE solver.
method: A `str`. The algorithm used for the black-box ODE solver.
See the documentation of `scipy.integrate.solve_ivp`.
eps: A `float` number. The reverse-time SDE/ODE will be integrated to `eps` for numerical stability.
Returns:
A sampling function that takes random states, and a replicated training state and returns samples
as well as the number of function evaluations during sampling.
"""
@jax.pmap
def denoise_update_fn(rng, state, x):
score_fn = get_score_fn(
sde,
model,
state.params_ema,
state.model_state,
train=False,
)
# Reverse diffusion predictor for denoising
predictor_obj = ReverseDiffusionPredictor(sde, score_fn, probability_flow=False)
vec_eps = jnp.ones((x.shape[0],)) * eps
_, x = predictor_obj.update_fn(rng, x, vec_eps)
return x
@jax.pmap
def drift_fn(state, x, t):
"""Get the drift function of the reverse-time SDE."""
score_fn = get_score_fn(
sde,
model,
state.params_ema,
state.model_state,
train=False,
)
rsde = sde.reverse(score_fn, probability_flow=True)
return rsde.sde(x, t)[0]
def ode_sampler(prng, pstate, z=None):
"""The probability flow ODE sampler with black-box ODE solver.
Args:
prng: An array of random state. The leading dimension equals the number of devices.
pstate: Replicated training state for running on multiple devices.
z: If present, generate samples from latent code `z`.
Returns:
Samples, and the number of function evaluations.
"""
# Initial sample
rng = flax.jax_utils.unreplicate(prng)
rng, step_rng = random.split(rng)
if z is None:
# If not represent, sample the latent code from the prior distibution of the SDE.
x = sde.prior_sampling(step_rng, (jax.local_device_count(),) + shape)
else:
x = z
def ode_func(t, x):
x = from_flattened_numpy(x, (jax.local_device_count(),) + shape)
vec_t = jnp.ones((x.shape[0], x.shape[1])) * t
drift = drift_fn(pstate, x, vec_t)
return to_flattened_numpy(drift)
# Black-box ODE solver for the probability flow ODE
solution = integrate.solve_ivp(
ode_func,
(sde.T, eps),
to_flattened_numpy(x),
rtol=rtol,
atol=atol,
method=method,
)
nfe = solution.nfev
x = jnp.asarray(solution.y[:, -1]).reshape((jax.local_device_count(),) + shape)
# Denoising is equivalent to running one predictor step without adding noise
if denoise:
rng, *step_rng = random.split(rng, jax.local_device_count() + 1)
step_rng = jnp.asarray(step_rng)
x = denoise_update_fn(step_rng, pstate, x)
return x, nfe
return ode_sampler
|
# Copyright 2023 (c) OpenAI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training and evaluation for score-based generative models. """
import os
from typing import Any
import flax
import flax.jax_utils as flax_utils
import jax
import jax.numpy as jnp
import numpy as np
import logging
import functools
import haiku as hk
from . import checkpoints
import wandb
# Keep the import below for registering all model definitions
from .models import ddpm, ncsnv2, ncsnpp
from .models import utils as mutils
from . import losses
from . import sampling
from . import utils
from . import datasets
from . import sde_lib
import blobfile
def train(config, workdir):
"""Runs the training pipeline.
Args:
config: Configuration to use.
workdir: Working directory for checkpoints and TF summaries. If this
contains checkpoint training will be resumed from the latest checkpoint.
"""
# Create directories for experimental logs
sample_dir = os.path.join(workdir, "samples")
blobfile.makedirs(sample_dir)
rng = hk.PRNGSequence(config.seed)
# Initialize model.
score_model, init_model_state, initial_params = mutils.init_model(next(rng), config)
optimizer, optimize_fn = losses.get_optimizer(config)
if config.training.loss.lower().endswith(
("ema", "adaptive", "progressive_distillation")
):
state = mutils.StateWithTarget(
step=0,
lr=config.optim.lr,
ema_rate=config.model.ema_rate,
params=initial_params,
target_params=initial_params,
params_ema=initial_params,
model_state=init_model_state,
opt_state=optimizer.init(initial_params),
rng_state=rng.internal_state,
)
else:
state = mutils.State(
step=0,
lr=config.optim.lr,
ema_rate=config.model.ema_rate,
params=initial_params,
params_ema=initial_params,
model_state=init_model_state,
opt_state=optimizer.init(initial_params),
rng_state=rng.internal_state,
)
# Setup SDEs
sde = sde_lib.get_sde(config)
# Build one-step training and evaluation functions
train_loss_fn, eval_loss_fn, state = losses.get_loss_fn(
config, sde, score_model, state, next(rng)
)
ema_scale_fn = losses.get_ema_scales_fn(config)
train_step_fn = losses.get_step_fn(
train_loss_fn,
train=True,
optimize_fn=optimize_fn,
ema_scales_fn=ema_scale_fn,
)
# Pmap (and jit-compile) multiple training steps together for faster running
p_train_step = jax.pmap(
functools.partial(jax.lax.scan, train_step_fn),
axis_name="batch",
)
eval_step_fn = losses.get_step_fn(
eval_loss_fn,
train=False,
optimize_fn=optimize_fn,
ema_scales_fn=ema_scale_fn,
)
# Pmap (and jit-compile) multiple evaluation steps together for faster running
p_eval_step = jax.pmap(
functools.partial(jax.lax.scan, eval_step_fn),
axis_name="batch",
)
# Create checkpoints directory
checkpoint_dir = os.path.join(workdir, "checkpoints")
# Intermediate checkpoints to resume training after pre-emption in cloud environments
checkpoint_meta_dir = os.path.join(workdir, "checkpoints-meta")
blobfile.makedirs(checkpoint_dir)
blobfile.makedirs(checkpoint_meta_dir)
# Resume training when intermediate checkpoints are detected
state = checkpoints.restore_checkpoint(checkpoint_meta_dir, state)
# `state.step` is JAX integer on the GPU/TPU devices
initial_step = int(state.step)
rng.replace_internal_state(state.rng_state)
# Finished model initialization
# Build data iterators
train_ds, eval_ds = datasets.get_dataset(
config,
additional_dim=config.training.n_jitted_steps,
uniform_dequantization=config.data.uniform_dequantization,
)
train_iter = iter(train_ds)
eval_iter = iter(eval_ds)
# Building sampling functions
if config.training.snapshot_sampling:
sampling_shape = (
config.training.batch_size // jax.local_device_count(),
config.data.image_size,
config.data.image_size,
config.data.num_channels,
)
sampling_fn = sampling.get_sampling_fn(config, sde, score_model, sampling_shape)
# Replicate the training state to run on multiple devices
pstate = flax_utils.replicate(state)
num_train_steps = config.training.n_iters
# In case there are multiple hosts (e.g., TPU pods), only log to host 0
if jax.process_index() == 0:
logging.info("Starting training loop at step %d." % (initial_step,))
rng = hk.PRNGSequence(jax.random.fold_in(next(rng), jax.process_index()))
# JIT multiple training steps together for faster training
n_jitted_steps = config.training.n_jitted_steps
# Must be divisible by the number of steps jitted together
assert (
config.training.log_freq % n_jitted_steps == 0
and config.training.snapshot_freq_for_preemption % n_jitted_steps == 0
and config.training.eval_freq % n_jitted_steps == 0
and config.training.snapshot_freq % n_jitted_steps == 0
), "The number of steps jitted together must be divisible by the logging frequency."
for step in range(
initial_step, num_train_steps + 1, config.training.n_jitted_steps
):
# Convert data to JAX arrays and normalize them. Use ._numpy() to avoid copy.
try:
data = next(train_iter)
except StopIteration:
# Restart the iterator when the dataset is exhausted.
train_iter = iter(train_ds)
data = next(train_iter)
batch = jax.tree_util.tree_map(lambda x: x.detach().cpu().numpy(), data)
next_rng = rng.take(jax.local_device_count())
next_rng = jnp.asarray(next_rng)
# Execute one training step
(_, pstate), (ploss, p_log_stats) = p_train_step((next_rng, pstate), batch)
loss = flax.jax_utils.unreplicate(ploss).mean()
log_stats = jax.tree_map(
lambda x: x.mean(), flax.jax_utils.unreplicate(p_log_stats)
)
# Log to console, file and tensorboard on host 0
if jax.process_index() == 0 and step % config.training.log_freq == 0:
logging.info("step: %d, training_loss: %.5e" % (step, loss))
if "dsm_loss" in log_stats and "distill_loss" in log_stats:
logging.info(
"step: %d, dsm_loss: %.5e, distill_loss: %.5e"
% (step, log_stats["dsm_loss"], log_stats["distill_loss"])
)
wandb.log({"training_loss": float(loss)}, step=step)
for key, value in log_stats.items():
wandb.log({f"training_{key}": float(value)}, step=step)
# Report the loss on an evaluation dataset periodically
if step % config.training.eval_freq == 0:
try:
eval_data = next(eval_iter)
except StopIteration:
eval_iter = iter(eval_ds)
eval_data = next(eval_iter)
eval_batch = jax.tree_util.tree_map(
lambda x: x.detach().cpu().numpy(), eval_data
)
next_rng = jnp.asarray(rng.take(jax.local_device_count()))
(_, _), (peval_loss, peval_log_stats) = p_eval_step(
(next_rng, pstate), eval_batch
)
eval_loss = flax.jax_utils.unreplicate(peval_loss).mean()
eval_log_stats = jax.tree_map(
lambda x: x.mean(), flax.jax_utils.unreplicate(peval_log_stats)
)
if jax.process_index() == 0:
logging.info("step: %d, eval_loss: %.5e" % (step, eval_loss))
if "dsm_loss" in eval_log_stats and "distill_loss" in eval_log_stats:
logging.info(
"step: %d, dsm_loss: %.5e, distill_loss: %.5e"
% (
step,
eval_log_stats["dsm_loss"],
eval_log_stats["distill_loss"],
)
)
wandb.log({"eval_loss": float(eval_loss)}, step=step)
for key, value in eval_log_stats.items():
wandb.log({f"eval_{key}": float(value)}, step=step)
if config.training.loss.lower() == "progressive_distillation":
ema_scale_fn = losses.get_ema_scales_fn(config)
if step > 0:
scales = int(ema_scale_fn(step)[1])
last_scales = int(ema_scale_fn(step - 1)[1])
if scales != last_scales:
# Move to the next distillation iteration
if scales == 2 or scales == 1:
config.optim.linear_decay_steps = (
config.training.distill_steps_per_iter * 2
)
elif scales == 1:
config.optim.linear_decay_steps = config.training.n_iters - step
optimizer, optimize_fn = losses.get_optimizer(config)
state = flax.jax_utils.unreplicate(pstate)
state = state.replace(
target_params=state.params_ema,
params=state.params_ema,
opt_state=optimizer.init(state.params_ema),
)
pstate = flax.jax_utils.replicate(state)
train_step_fn = losses.get_step_fn(
train_loss_fn,
train=True,
optimize_fn=optimize_fn,
ema_scales_fn=ema_scale_fn,
)
# Pmap (and jit-compile) multiple training steps together for faster running
p_train_step = jax.pmap(
functools.partial(jax.lax.scan, train_step_fn),
axis_name="batch",
)
eval_step_fn = losses.get_step_fn(
eval_loss_fn,
train=False,
optimize_fn=optimize_fn,
ema_scales_fn=ema_scale_fn,
)
# Pmap (and jit-compile) multiple evaluation steps together for faster running
p_eval_step = jax.pmap(
functools.partial(jax.lax.scan, eval_step_fn),
axis_name="batch",
)
# Save a checkpoint periodically and generate samples if needed
if (
step != 0
and step % config.training.snapshot_freq == 0
or step == num_train_steps
):
# Save the checkpoint.
if jax.process_index() == 0:
saved_state = flax_utils.unreplicate(pstate)
saved_state = saved_state.replace(rng_state=rng.internal_state)
checkpoints.save_checkpoint(
checkpoint_dir,
saved_state,
step=step // config.training.snapshot_freq,
keep=np.inf,
)
# Generate and save samples
if config.training.snapshot_sampling:
# Use the same random seed for sampling to track progress
sample_rng_seed = hk.PRNGSequence(42)
sample_rng = jnp.asarray(sample_rng_seed.take(jax.local_device_count()))
sample, n = sampling_fn(sample_rng, pstate)
sample = (sample + 1.0) / 2.0
this_sample_dir = os.path.join(
sample_dir, "iter_{}_host_{}".format(step, jax.process_index())
)
blobfile.makedirs(this_sample_dir)
image_grid = sample.reshape((-1, *sample.shape[2:]))
nrow = int(np.sqrt(image_grid.shape[0]))
sample = np.clip(sample * 255, 0, 255).astype(np.uint8)
with blobfile.BlobFile(
os.path.join(this_sample_dir, "sample.np"),
"wb",
) as fout:
np.save(fout, sample)
with blobfile.BlobFile(
os.path.join(this_sample_dir, "sample.png"),
"wb",
) as fout:
utils.save_image(image_grid, fout, nrow=nrow, padding=2)
# Save a temporary checkpoint to resume training after pre-emption periodically
# Must execute at the last to avoid corner cases where the main checkpoint was not successfully saved
if (
step != 0
and step % config.training.snapshot_freq_for_preemption == 0
and jax.process_index() == 0
):
saved_state = flax_utils.unreplicate(pstate)
saved_state = saved_state.replace(rng_state=rng.internal_state)
checkpoints.save_checkpoint(
checkpoint_meta_dir,
saved_state,
step=step // config.training.snapshot_freq_for_preemption,
keep=1,
)
|
# Copyright 2023 (c) OpenAI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import time
from typing import Any
import flax
import jax
import jax.numpy as jnp
import numpy as np
import logging
import functools
import haiku as hk
import math
from collections import defaultdict
from . import checkpoints
# Keep the import below for registering all model definitions
from .models import ddpm, ncsnv2, ncsnpp
from .models import utils as mutils
from . import losses
from . import sampling
from . import datasets
from . import metrics
from . import likelihood
from . import sde_lib
from .metrics import get_samples_from_ckpt
import blobfile
def evaluate(config, workdir, eval_folder="eval"):
"""Evaluate trained models.
Args:
config: Configuration to use.
workdir: Working directory for checkpoints.
eval_folder: The subfolder for storing evaluation results. Default to
"eval".
"""
# Create directory to eval_folder
eval_dir = os.path.join(workdir, eval_folder)
blobfile.makedirs(eval_dir)
rng = hk.PRNGSequence(config.seed + 1)
# Initialize model
score_model, init_model_state, initial_params = mutils.init_model(next(rng), config)
optimizer, optimize_fn = losses.get_optimizer(config)
if config.training.loss.lower().endswith(
("ema", "adaptive", "progressive_distillation")
):
state = mutils.StateWithTarget(
step=0,
lr=config.optim.lr,
ema_rate=config.model.ema_rate,
params=initial_params,
target_params=initial_params,
params_ema=initial_params,
model_state=init_model_state,
opt_state=optimizer.init(initial_params),
rng_state=rng.internal_state,
)
else:
state = mutils.State(
step=0,
lr=config.optim.lr,
ema_rate=config.model.ema_rate,
params=initial_params,
params_ema=initial_params,
model_state=init_model_state,
opt_state=optimizer.init(initial_params),
rng_state=rng.internal_state,
)
checkpoint_dir = os.path.join(workdir, "checkpoints")
# Setup SDEs
sde = sde_lib.get_sde(config)
# Add one additional round to get the exact number of samples as required.
# num_sampling_rounds and num_bpd_rounds must be computed in all cases.
num_sampling_rounds = int(
math.ceil(config.eval.num_samples / config.eval.batch_size)
)
# Create data loaders for likelihood evaluation. Only evaluate on uniformly dequantized data
train_ds_bpd, eval_ds_bpd = datasets.get_dataset(
config,
additional_dim=None,
uniform_dequantization=True,
evaluation=True,
drop_last=False,
)
if config.eval.bpd_dataset.lower() == "train":
ds_bpd = train_ds_bpd
elif config.eval.bpd_dataset.lower() == "test":
# Go over the dataset 5 times when computing likelihood on the test dataset
ds_bpd = eval_ds_bpd
else:
raise ValueError(f"No bpd dataset {config.eval.bpd_dataset} recognized.")
num_bpd_rounds = len(ds_bpd)
if config.eval.enable_loss:
# Build datasets
train_ds, eval_ds = datasets.get_dataset(
config,
additional_dim=1,
uniform_dequantization=config.data.uniform_dequantization,
evaluation=True,
drop_last=False,
)
# Create the one-step evaluation function when loss computation is enabled
train_loss_fn, eval_loss_fn, state = losses.get_loss_fn(
config, sde, score_model, state, next(rng)
)
ema_scales_fn = losses.get_ema_scales_fn(config)
eval_step = losses.get_step_fn(
eval_loss_fn,
train=False,
optimize_fn=optimize_fn,
ema_scales_fn=ema_scales_fn,
)
# Pmap (and jit-compile) multiple evaluation steps together for faster execution
p_eval_step = jax.pmap(
functools.partial(jax.lax.scan, eval_step),
axis_name="batch",
)
if config.eval.enable_bpd:
# Build the likelihood computation function when likelihood is enabled
likelihood_fn = likelihood.get_likelihood_fn(
sde,
score_model,
num_repeats=5 if config.eval.bpd_dataset.lower() == "test" else 1,
)
# Build the sampling function when sampling is enabled
if config.eval.enable_sampling:
sampling_shape = (
config.eval.batch_size // jax.local_device_count(),
config.data.image_size,
config.data.image_size,
config.data.num_channels,
)
sampling_fn = sampling.get_sampling_fn(config, sde, score_model, sampling_shape)
# Create different random states for different hosts in a multi-host environment (e.g., TPU pods)
rng = hk.PRNGSequence(jax.random.fold_in(next(rng), jax.process_index()))
# A data class for storing intermediate results to resume evaluation after pre-emption
@flax.struct.dataclass
class EvalMeta:
ckpt_id: int
sampling_round_id: int
bpd_round_id: int
rng_state: Any
# Restore evaluation after pre-emption
eval_meta = EvalMeta(
ckpt_id=config.eval.begin_ckpt,
sampling_round_id=-1,
bpd_round_id=-1,
rng_state=rng.internal_state,
)
eval_meta = checkpoints.restore_checkpoint(
eval_dir, eval_meta, step=None, prefix=f"meta_{jax.process_index()}_"
)
# avoid not starting from config.eval.begin_ckpt.
if eval_meta.ckpt_id < config.eval.begin_ckpt:
eval_meta = eval_meta.replace(
ckpt_id=config.eval.begin_ckpt,
sampling_round_id=-1,
bpd_round_id=-1,
rng_state=rng.internal_state,
)
# Evaluation order: first loss, then likelihood, then sampling
if eval_meta.bpd_round_id < num_bpd_rounds - 1:
begin_ckpt = eval_meta.ckpt_id
begin_bpd_round = eval_meta.bpd_round_id + 1
begin_sampling_round = 0
elif eval_meta.sampling_round_id < num_sampling_rounds - 1:
begin_ckpt = eval_meta.ckpt_id
begin_bpd_round = num_bpd_rounds
begin_sampling_round = eval_meta.sampling_round_id + 1
else:
begin_ckpt = eval_meta.ckpt_id + 1
begin_bpd_round = 0
begin_sampling_round = 0
rng.replace_internal_state(eval_meta.rng_state)
logging.info("begin checkpoint: %d" % (begin_ckpt,))
for ckpt in range(begin_ckpt, config.eval.end_ckpt + 1):
## Part 1: Load checkpoint
# Wait if the target checkpoint doesn't exist yet
waiting_message_printed = False
ckpt_filename = os.path.join(checkpoint_dir, "checkpoint_{}".format(ckpt))
while not blobfile.exists(ckpt_filename):
if not waiting_message_printed and jax.process_index() == 0:
logging.warning("Waiting for the arrival of checkpoint_%d" % (ckpt,))
waiting_message_printed = True
time.sleep(60)
# Wait for 2 additional mins in case the file exists but is not ready for reading
try:
state = checkpoints.restore_checkpoint(checkpoint_dir, state, step=ckpt)
except:
time.sleep(60)
try:
state = checkpoints.restore_checkpoint(checkpoint_dir, state, step=ckpt)
except:
raise OSError("checkpoint file is not ready for reading")
# Replicate the training state to prepare for pmap
pstate = flax.jax_utils.replicate(state)
## Part 2: Compute loss
if config.eval.enable_loss:
all_losses = []
all_log_stats = defaultdict(list)
eval_iter = iter(eval_ds) # pytype: disable=wrong-arg-types
for i, batch in enumerate(eval_iter):
eval_batch = jax.tree_util.tree_map(
lambda x: x.detach().cpu().numpy(), batch
)
next_rng = jnp.asarray(rng.take(jax.local_device_count()))
(_, _), (
p_eval_loss,
p_eval_log_stats,
) = p_eval_step((next_rng, pstate), eval_batch)
eval_loss = flax.jax_utils.unreplicate(p_eval_loss)
eval_log_stats = flax.jax_utils.unreplicate(p_eval_log_stats)
all_losses.extend(eval_loss)
for key, value in eval_log_stats.items():
all_log_stats[key].extend(value)
if (i + 1) % 1000 == 0 and jax.process_index() == 0:
logging.info("Finished %dth step loss evaluation" % (i + 1))
# Save loss values to disk or Google Cloud Storage
all_losses = jnp.asarray(all_losses)
all_log_stats = jax.tree_map(lambda x: jnp.asarray(x), all_log_stats)
with blobfile.BlobFile(
os.path.join(eval_dir, f"ckpt_{ckpt}_loss.npz"), "wb"
) as fout:
io_buffer = io.BytesIO()
np.savez_compressed(
io_buffer,
all_losses=all_losses,
mean_loss=all_losses.mean(),
**all_log_stats,
)
fout.write(io_buffer.getvalue())
## Part 3: Compute likelihood (bits/dim)
if config.eval.enable_bpd:
bpds = []
bpd_iter = iter(ds_bpd)
for _ in range(begin_bpd_round):
next(bpd_iter)
for i, eval_batch in enumerate(bpd_iter):
eval_batch = jax.tree_util.tree_map(
lambda x: x.detach().cpu().numpy(), eval_batch
)
step_rng = jnp.asarray(rng.take(jax.local_device_count()))
bpd = likelihood_fn(step_rng, pstate, eval_batch["image"])[0]
bpd = bpd.reshape(-1)
bpds.extend(bpd)
bpd_round_id = begin_bpd_round + i
logging.info(
"ckpt: %d, round: %d, mean bpd: %6f"
% (ckpt, bpd_round_id, jnp.mean(jnp.asarray(bpds)))
)
# Save bits/dim to disk or Google Cloud Storage
with blobfile.BlobFile(
os.path.join(
eval_dir,
f"{config.eval.bpd_dataset}_ckpt_{ckpt}_bpd_{bpd_round_id}.npz",
),
"wb",
) as fout:
io_buffer = io.BytesIO()
np.savez_compressed(io_buffer, bpd)
fout.write(io_buffer.getvalue())
eval_meta = eval_meta.replace(
ckpt_id=ckpt,
bpd_round_id=bpd_round_id,
rng_state=rng.internal_state,
)
# Save intermediate states to resume evaluation after pre-emption
checkpoints.save_checkpoint(
eval_dir,
eval_meta,
step=ckpt * (num_bpd_rounds + num_sampling_rounds) + bpd_round_id,
keep=1,
prefix=f"meta_{jax.process_index()}_",
)
else:
# Skip likelihood computation and save intermediate states for pre-emption
eval_meta = eval_meta.replace(ckpt_id=ckpt, bpd_round_id=num_bpd_rounds - 1)
checkpoints.save_checkpoint(
eval_dir,
eval_meta,
step=ckpt * (num_bpd_rounds + num_sampling_rounds) + num_bpd_rounds - 1,
keep=1,
prefix=f"meta_{jax.process_index()}_",
)
# Generate samples and compute IS/FID/KID when enabled
if config.eval.enable_sampling:
logging.info(f"Start sampling evaluation for ckpt {ckpt}")
# Run sample generation for multiple rounds to create enough samples
# Designed to be pre-emption safe. Automatically resumes when interrupted
for r in range(begin_sampling_round, num_sampling_rounds):
if jax.process_index() == 0:
logging.info("sampling -- ckpt: %d, round: %d" % (ckpt, r))
# Directory to save samples. Different for each host to avoid writing conflicts
this_sample_dir = os.path.join(
eval_dir, f"ckpt_{ckpt}_host_{jax.process_index()}"
)
blobfile.makedirs(this_sample_dir)
sample_rng = jnp.asarray(rng.take(jax.local_device_count()))
samples, n = sampling_fn(sample_rng, pstate)
samples = (samples + 1.0) / 2.0
samples = np.clip(samples * 255.0, 0, 255).astype(np.uint8)
samples = samples.reshape(
(
-1,
config.data.image_size,
config.data.image_size,
config.data.num_channels,
)
)
# Write samples to disk or Google Cloud Storage
with blobfile.BlobFile(
os.path.join(this_sample_dir, f"samples_{r}.npz"),
"wb",
) as fout:
io_buffer = io.BytesIO()
np.savez_compressed(io_buffer, samples=samples)
fout.write(io_buffer.getvalue())
# Save image samples and submit to the FID evaluation website
if r == num_sampling_rounds - 1:
# Collect samples from all hosts and sampling rounds
if jax.process_index() == 0:
all_samples = get_samples_from_ckpt(eval_dir, ckpt)
all_samples = all_samples[: config.eval.num_samples]
sample_path = os.path.join(eval_dir, f"ckpt_{ckpt}_samples.npz")
with blobfile.BlobFile(sample_path, "wb") as fout:
io_buffer = io.BytesIO()
np.savez_compressed(io_buffer, all_samples)
fout.write(io_buffer.getvalue())
# Update the intermediate evaluation state
eval_meta = eval_meta.replace(
ckpt_id=ckpt, sampling_round_id=r, rng_state=rng.internal_state
)
# Save intermediate states to resume evaluation after pre-emption
checkpoints.save_checkpoint(
eval_dir,
eval_meta,
step=ckpt * (num_sampling_rounds + num_bpd_rounds)
+ r
+ num_bpd_rounds,
keep=1,
prefix=f"meta_{jax.process_index()}_",
)
else:
# Skip sampling and save intermediate evaluation states for pre-emption
eval_meta = eval_meta.replace(
ckpt_id=ckpt,
sampling_round_id=num_sampling_rounds - 1,
rng_state=rng.internal_state,
)
checkpoints.save_checkpoint(
eval_dir,
eval_meta,
step=ckpt * (num_sampling_rounds + num_bpd_rounds)
+ num_sampling_rounds
- 1
+ num_bpd_rounds,
keep=1,
prefix=f"meta_{jax.process_index()}_",
)
begin_bpd_round = 0
begin_sampling_round = 0
# Remove all meta files after finishing evaluation
meta_files = blobfile.glob(os.path.join(eval_dir, f"meta_{jax.process_index()}_*"))
for file in meta_files:
blobfile.remove(file)
|
# Copyright 2023 (c) OpenAI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All functions related to loss computation and optimization.
"""
import optax
import jax
import jax.numpy as jnp
import haiku as hk
import jax.random as random
from . import checkpoints
from .models import utils as mutils
from .utils import batch_mul
from jcm import sde_lib
import numpy as np
def get_optimizer(config):
"""Returns a flax optimizer object based on `config`."""
if config.optim.optimizer.lower() == "adam":
if hasattr(config.optim, "linear_decay_steps"): # for progressive distillation
stable_training_schedule = optax.linear_schedule(
init_value=config.optim.lr,
end_value=0.0,
transition_steps=config.optim.linear_decay_steps,
)
else:
stable_training_schedule = optax.constant_schedule(config.optim.lr)
schedule = optax.join_schedules(
[
optax.linear_schedule(
init_value=0,
end_value=config.optim.lr,
transition_steps=config.optim.warmup,
),
stable_training_schedule,
],
[config.optim.warmup],
)
if not np.isinf(config.optim.grad_clip):
optimizer = optax.chain(
optax.clip_by_global_norm(max_norm=config.optim.grad_clip),
optax.adamw(
learning_rate=schedule,
b1=config.optim.beta1,
eps=config.optim.eps,
weight_decay=config.optim.weight_decay,
),
)
else:
optimizer = optax.adamw(
learning_rate=schedule,
b1=config.optim.beta1,
eps=config.optim.eps,
weight_decay=config.optim.weight_decay,
)
elif config.optim.optimizer.lower() == "radam":
beta1 = config.optim.beta1
beta2 = config.optim.beta2
eps = config.optim.eps
weight_decay = config.optim.weight_decay
lr = config.optim.lr
optimizer = optax.chain(
optax.scale_by_radam(b1=beta1, b2=beta2, eps=eps),
optax.add_decayed_weights(weight_decay, None),
optax.scale(-lr),
)
else:
raise NotImplementedError(
f"Optimizer {config.optim.optimizer} not supported yet!"
)
def optimize_fn(grads, opt_state, params):
updates, opt_state = optimizer.update(grads, opt_state, params)
params = optax.apply_updates(params, updates)
return params, opt_state
return optimizer, optimize_fn
def get_loss_fn(config, sde, score_model, state, rng):
likelihood_weighting = config.training.likelihood_weighting
if config.training.loss.lower() in ["dsm", "ssm"]:
ssm = config.training.loss.lower() == "ssm"
train_loss_fn = get_score_matching_loss_fn(
sde,
score_model,
train=True,
likelihood_weighting=likelihood_weighting,
ssm=ssm,
)
eval_loss_fn = get_score_matching_loss_fn(
sde,
score_model,
train=False,
likelihood_weighting=likelihood_weighting,
ssm=ssm,
)
elif config.training.loss.lower().startswith(
("continuous", "consistency", "progressive_distillation")
):
optimizer, optimize_fn = get_optimizer(config.training.ref_config)
rng = hk.PRNGSequence(rng)
ref_config = config.training.ref_config
ref_model, init_ref_model_state, init_ref_params = mutils.init_model(
next(rng), ref_config
)
ref_state = mutils.State(
step=0,
lr=ref_config.optim.lr,
ema_rate=ref_config.model.ema_rate,
params=init_ref_params,
params_ema=init_ref_params,
model_state=init_ref_model_state,
opt_state=optimizer.init(init_ref_params),
rng_state=rng.internal_state,
)
ref_state = checkpoints.restore_checkpoint(
config.training.ref_model_path, ref_state
)
# Initialize the flow model from the denoiser model
if config.training.finetune:
state = state.replace(
params=ref_state.params,
params_ema=ref_state.params_ema,
model_state=ref_state.model_state,
)
if config.training.loss_norm.lower() == "lpips":
lpips_model, lpips_params = mutils.init_lpips(next(rng), config)
else:
lpips_model, lpips_params = None, None
if config.training.loss.lower().startswith("continuous"):
train_loss_fn = get_continuous_consistency_loss_fn(
sde,
ref_model,
ref_state.params_ema,
ref_state.model_state,
score_model,
train=True,
loss_norm=config.training.loss_norm,
stopgrad=config.training.stopgrad,
lpips_model=lpips_model,
lpips_params=lpips_params,
dsm_target=config.training.dsm_target,
)
eval_loss_fn = get_continuous_consistency_loss_fn(
sde,
ref_model,
ref_state.params_ema,
ref_state.model_state,
score_model,
train=False,
loss_norm=config.training.loss_norm,
stopgrad=config.training.stopgrad,
lpips_model=lpips_model,
lpips_params=lpips_params,
dsm_target=config.training.dsm_target,
)
elif config.training.loss.lower().startswith("consistency"):
train_loss_fn = get_consistency_loss_fn(
sde,
ref_model,
ref_state.params_ema,
ref_state.model_state,
score_model,
train=True,
loss_norm=config.training.loss_norm,
weighting=config.training.weighting,
stopgrad=config.training.stopgrad,
dsm_target=config.training.dsm_target,
solver=config.training.solver,
lpips_model=lpips_model,
lpips_params=lpips_params,
)
eval_loss_fn = get_consistency_loss_fn(
sde,
ref_model,
ref_state.params_ema,
ref_state.model_state,
score_model,
train=False,
loss_norm=config.training.loss_norm,
weighting=config.training.weighting,
stopgrad=config.training.stopgrad,
dsm_target=config.training.dsm_target,
solver=config.training.solver,
lpips_model=lpips_model,
lpips_params=lpips_params,
)
elif config.training.loss.lower() == "progressive_distillation":
train_loss_fn = get_progressive_distillation_loss_fn(
sde,
score_model,
train=True,
loss_norm=config.training.loss_norm,
weighting=config.training.weighting,
lpips_model=lpips_model,
lpips_params=lpips_params,
)
eval_loss_fn = get_progressive_distillation_loss_fn(
sde,
score_model,
train=False,
loss_norm=config.training.loss_norm,
weighting=config.training.weighting,
lpips_model=lpips_model,
lpips_params=lpips_params,
)
assert (
config.training.finetune
), "Finetuning is required for progressive distillation."
state = state.replace(
target_params=ref_state.params_ema,
)
else:
raise ValueError(f"Unknown loss {config.training.loss}")
return train_loss_fn, eval_loss_fn, state
def get_quarter_masks(t, ranges):
return [(ranges[i] <= t) & (t < ranges[i + 1]) for i in range(len(ranges) - 1)]
def get_consistency_loss_fn(
sde,
ref_model,
ref_params,
ref_states,
model,
train,
loss_norm="l1",
weighting="uniform",
stopgrad=True,
dsm_target=False,
solver="heun",
lpips_model=None,
lpips_params=None,
):
assert isinstance(sde, sde_lib.KVESDE), "Only KVE SDEs are supported for now."
denoiser_fn = mutils.get_denoiser_fn(
sde,
ref_model,
ref_params,
ref_states,
train=False,
return_state=False,
)
def heun_solver(samples, t, next_t, x0):
x = samples
if dsm_target:
denoiser = x0
else:
denoiser = denoiser_fn(x, t)
d = batch_mul(1 / t, x - denoiser)
samples = x + batch_mul(next_t - t, d)
if dsm_target:
denoiser = x0
else:
denoiser = denoiser_fn(samples, next_t)
next_d = batch_mul(1 / next_t, samples - denoiser)
samples = x + batch_mul((next_t - t) / 2, d + next_d)
return samples
def euler_solver(samples, t, next_t, x0):
x = samples
if dsm_target:
denoiser = x0
else:
denoiser = denoiser_fn(x, t)
score = batch_mul(1 / t**2, denoiser - x)
samples = x + batch_mul(next_t - t, -batch_mul(score, t))
return samples
if solver.lower() == "heun":
ode_solver = heun_solver
elif solver.lower() == "euler":
ode_solver = euler_solver
def loss_fn(rng, params, states, batch, target_params=None, num_scales=None):
rng = hk.PRNGSequence(rng)
x = batch["image"]
if target_params is None:
target_params = params
if num_scales is None:
num_scales = sde.N
indices = jax.random.randint(next(rng), (x.shape[0],), 0, num_scales - 1)
t = sde.t_max ** (1 / sde.rho) + indices / (num_scales - 1) * (
sde.t_min ** (1 / sde.rho) - sde.t_max ** (1 / sde.rho)
)
t = t**sde.rho
t2 = sde.t_max ** (1 / sde.rho) + (indices + 1) / (num_scales - 1) * (
sde.t_min ** (1 / sde.rho) - sde.t_max ** (1 / sde.rho)
)
t2 = t2**sde.rho
z = jax.random.normal(next(rng), x.shape)
x_t = x + batch_mul(t, z)
dropout_rng = next(rng)
Ft, new_states = mutils.get_distiller_fn(
sde, model, params, states, train=train, return_state=True
)(x_t, t, rng=dropout_rng if train else None)
x_t2 = ode_solver(x_t, t, t2, x)
Ft2, new_states = mutils.get_distiller_fn(
sde, model, target_params, new_states, train=train, return_state=True
)(x_t2, t2, rng=dropout_rng if train else None)
if stopgrad:
Ft2 = jax.lax.stop_gradient(Ft2)
diffs = Ft - Ft2
if weighting.lower() == "uniform":
weight = jnp.ones_like(t)
elif weighting.lower() == "snrp1":
weight = 1 / t**2 + 1.0
elif weighting.lower() == "truncated_snr":
weight = jnp.maximum(1 / t**2, jnp.ones_like(t))
elif weighting.lower() == "snr":
weight = 1 / t**2
else:
raise NotImplementedError(f"Weighting {weighting} not implemented")
if loss_norm.lower() == "l1":
losses = jnp.abs(diffs)
losses = jnp.mean(losses.reshape(losses.shape[0], -1), axis=-1)
elif loss_norm.lower() == "l2":
losses = diffs**2
losses = jnp.mean(losses.reshape(losses.shape[0], -1), axis=-1)
elif loss_norm.lower() == "linf":
losses = jnp.abs(diffs)
losses = jnp.max(losses.reshape(losses.shape[0], -1), axis=-1)
elif loss_norm.lower() == "lpips":
scaled_Ft = jax.image.resize(
Ft, (Ft.shape[0], 224, 224, 3), method="bilinear"
)
scaled_Ft2 = jax.image.resize(
Ft2, (Ft2.shape[0], 224, 224, 3), method="bilinear"
)
losses = jnp.squeeze(lpips_model.apply(lpips_params, scaled_Ft, scaled_Ft2))
else:
raise ValueError("Unknown loss norm: {}".format(loss_norm))
loss = jnp.nansum(losses * batch["mask"] * weight / jnp.sum(batch["mask"]))
log_stats = {}
## Uncomment to log loss per time step
# for t_index in range(sde.N - 1):
# mask = (indices == t_index).astype(jnp.float32)
# log_stats["loss_t{}".format(t_index)] = jnp.nansum(
# losses * batch["mask"] * mask / jnp.sum(batch["mask"] * mask)
# )
return loss, (new_states, log_stats)
return loss_fn
def get_progressive_distillation_loss_fn(
sde,
model,
train,
loss_norm="l2",
weighting="truncated_snr",
lpips_model=None,
lpips_params=None,
):
assert isinstance(sde, sde_lib.KVESDE), "Only KVE SDEs are supported for now."
def loss_fn(rng, params, states, batch, target_params, num_scales):
rng = hk.PRNGSequence(rng)
x = batch["image"]
indices = jax.random.randint(next(rng), (x.shape[0],), 0, num_scales)
t = sde.t_max ** (1 / sde.rho) + indices / num_scales * (
sde.t_min ** (1 / sde.rho) - sde.t_max ** (1 / sde.rho)
)
t = t**sde.rho
t2 = sde.t_max ** (1 / sde.rho) + (indices + 0.5) / num_scales * (
sde.t_min ** (1 / sde.rho) - sde.t_max ** (1 / sde.rho)
)
t2 = t2**sde.rho
t3 = sde.t_max ** (1 / sde.rho) + (indices + 1) / num_scales * (
sde.t_min ** (1 / sde.rho) - sde.t_max ** (1 / sde.rho)
)
t3 = t3**sde.rho
z = jax.random.normal(next(rng), x.shape)
x_t = x + batch_mul(t, z)
dropout_rng = next(rng)
denoised_x, new_states = mutils.get_denoiser_fn(
sde, model, params, states, train=train, return_state=True
)(x_t, t, rng=dropout_rng if train else None)
target_denoiser_fn = mutils.get_denoiser_fn(
sde,
model,
target_params,
states,
train=False,
return_state=False,
)
def euler_solver(samples, t, next_t):
x = samples
denoiser = target_denoiser_fn(x, t, rng=None)
score = batch_mul(1 / t**2, denoiser - x)
samples = x + batch_mul(next_t - t, -batch_mul(score, t))
return samples
def euler_to_denoiser(x_t, t, x_next_t, next_t):
denoiser = x_t - batch_mul(t, batch_mul(x_next_t - x_t, 1 / (next_t - t)))
return denoiser
x_t2 = euler_solver(x_t, t, t2)
x_t3 = euler_solver(x_t2, t2, t3)
target_x = jax.lax.stop_gradient(euler_to_denoiser(x_t, t, x_t3, t3))
diffs = denoised_x - target_x
if loss_norm.lower() == "l1":
losses = jnp.abs(diffs)
losses = jnp.mean(losses.reshape(losses.shape[0], -1), axis=-1)
elif loss_norm.lower() == "l2":
losses = diffs**2
losses = jnp.mean(losses.reshape(losses.shape[0], -1), axis=-1)
elif loss_norm.lower() == "linf":
losses = jnp.abs(diffs)
losses = jnp.max(losses.reshape(losses.shape[0], -1), axis=-1)
elif loss_norm.lower() == "lpips":
scaled_denoised_x = jax.image.resize(
denoised_x, (denoised_x.shape[0], 224, 224, 3), method="bilinear"
)
scaled_target_x = jax.image.resize(
target_x, (target_x.shape[0], 224, 224, 3), method="bilinear"
)
losses = jnp.squeeze(
lpips_model.apply(lpips_params, scaled_denoised_x, scaled_target_x)
)
else:
raise ValueError("Unknown loss norm: {}".format(loss_norm))
if weighting.lower() == "snrp1":
weight = 1 / t**2 + 1
elif weighting.lower() == "truncated_snr":
weight = jnp.maximum(1 / t**2, jnp.ones_like(t))
elif weighting.lower() == "snr":
weight = 1 / t**2
loss = jnp.nansum(losses * batch["mask"] * weight / jnp.sum(batch["mask"]))
log_stats = {}
return loss, (new_states, log_stats)
return loss_fn
def get_continuous_consistency_loss_fn(
sde,
ref_model,
ref_params,
ref_states,
model,
train,
loss_norm="l1",
stopgrad=False,
lpips_model=None,
lpips_params=None,
dsm_target=False,
):
assert isinstance(sde, sde_lib.KVESDE), "Only KVE SDEs are supported for now."
score_fn = mutils.get_score_fn(
sde,
ref_model,
ref_params,
ref_states,
train=False,
return_state=False,
)
def loss_fn(rng, params, states, batch):
rng = hk.PRNGSequence(rng)
x = batch["image"]
# sampling t according to the Heun sampler
t = jax.random.uniform(
next(rng),
(x.shape[0],),
minval=sde.t_min ** (1 / sde.rho),
maxval=sde.t_max ** (1 / sde.rho),
) ** (sde.rho)
weightings = jnp.ones_like(t)
z = jax.random.normal(next(rng), x.shape)
x_t = x + batch_mul(t, z)
if dsm_target:
score_t = batch_mul(x - x_t, 1 / t**2)
else:
score_t = score_fn(x_t, t)
if train:
step_rng = next(rng)
else:
step_rng = None
def model_fn(data, time):
return mutils.get_distiller_fn(
sde, model, params, states, train=train, return_state=True
)(data, time, rng=step_rng)
Ft, diffs, new_states = jax.jvp(
model_fn, (x_t, t), (batch_mul(t, score_t), -jnp.ones_like(t)), has_aux=True
)
if loss_norm.lower() == "l1":
losses = jnp.abs(diffs)
losses = jnp.mean(losses.reshape(losses.shape[0], -1), axis=1)
elif loss_norm.lower() == "l2":
losses = diffs**2
losses = jnp.sqrt(jnp.sum(losses.reshape(losses.shape[0], -1), axis=1))
elif loss_norm.lower() == "linf":
losses = jnp.abs(diffs)
losses = jnp.max(losses.reshape(losses.shape[0], -1), axis=1)
elif loss_norm.lower() == "lpips":
def metric(x):
scaled_Ft = jax.image.resize(
Ft, (Ft.shape[0], 224, 224, 3), method="bilinear"
)
x = jax.image.resize(x, (x.shape[0], 224, 224, 3), method="bilinear")
return jnp.sum(
jnp.squeeze(lpips_model.apply(lpips_params, scaled_Ft, x))
)
losses = (
jax.grad(lambda x: jnp.sum(jax.grad(metric)(x) * diffs))(Ft) * diffs
)
losses = jnp.sum(losses.reshape(losses.shape[0], -1), axis=1)
else:
raise ValueError("Unknown loss norm: {}".format(loss_norm))
if stopgrad:
if loss_norm.lower() == "l2":
pseudo_losses = -jax.lax.stop_gradient(diffs) * Ft
pseudo_losses = jnp.sum(
pseudo_losses.reshape((pseudo_losses.shape[0], -1)), axis=-1
)
loss = jnp.nansum(
pseudo_losses * batch["mask"] * weightings / jnp.sum(batch["mask"])
)
elif loss_norm.lower() == "lpips":
def metric_fn(x):
x = jax.image.resize(
x, (x.shape[0], 224, 224, 3), method="bilinear"
)
y = jax.image.resize(
jax.lax.stop_gradient(Ft),
(x.shape[0], 224, 224, 3),
method="bilinear",
)
return jnp.sum(jnp.squeeze(lpips_model.apply(lpips_params, x, y)))
# forward-over-reverse
def hvp(f, primals, tangents):
return jax.jvp(jax.grad(f), primals, tangents)[1]
pseudo_losses = Ft * hvp(
metric_fn,
(jax.lax.stop_gradient(Ft),),
(-jax.lax.stop_gradient(diffs),),
)
pseudo_losses = jnp.sum(
pseudo_losses.reshape((pseudo_losses.shape[0], -1)), axis=-1
)
loss = jnp.nansum(
pseudo_losses * batch["mask"] * weightings / jnp.sum(batch["mask"])
)
else:
raise NotImplementedError
else:
loss = jnp.nansum(
losses * batch["mask"] * weightings / jnp.sum(batch["mask"])
)
quarter_masks = get_quarter_masks(
t,
np.linspace(sde.t_min ** (1 / sde.rho), sde.t_max ** (1 / sde.rho), 5)
** sde.rho,
)
loss_q1 = jnp.nansum(
losses
* quarter_masks[0]
* batch["mask"]
/ jnp.sum(quarter_masks[0] * batch["mask"])
)
loss_q2 = jnp.nansum(
losses
* quarter_masks[1]
* batch["mask"]
/ jnp.sum(quarter_masks[1] * batch["mask"])
)
loss_q3 = jnp.nansum(
losses
* quarter_masks[2]
* batch["mask"]
/ jnp.sum(quarter_masks[2] * batch["mask"])
)
loss_q4 = jnp.nansum(
losses
* quarter_masks[3]
* batch["mask"]
/ jnp.sum(quarter_masks[3] * batch["mask"])
)
log_stats = {
"loss": loss,
"loss_q1": loss_q1,
"loss_q2": loss_q2,
"loss_q3": loss_q3,
"loss_q4": loss_q4,
}
return loss, (new_states, log_stats)
return loss_fn
def get_score_matching_loss_fn(
sde,
model,
train,
likelihood_weighting=False,
ssm=False,
eps=1e-5,
):
"""Create a loss function for training with arbirary SDEs.
Args:
sde: An `sde_lib.SDE` object that represents the forward SDE.
model: A `flax.linen.Module` object that represents the architecture of the score-based model.
train: `True` for training loss and `False` for evaluation loss.
likelihood_weighting: If `True`, weight the mixture of score matching losses
according to https://arxiv.org/abs/2101.09258; otherwise use the weighting recommended in our paper.
eps: A `float` number. The smallest time step to sample from.
Returns:
A loss function.
"""
def dsm_loss_fn(rng, params, states, batch):
"""Compute the loss function based on denoising score matching.
Args:
rng: A JAX random state.
params: A dictionary that contains trainable parameters of the score-based model.
states: A dictionary that contains mutable states of the score-based model.
batch: A mini-batch of training data.
Returns:
loss: A scalar that represents the average loss value across the mini-batch.
new_model_state: A dictionary that contains the mutated states of the score-based model.
"""
data = batch["image"]
rng = hk.PRNGSequence(rng)
if isinstance(sde, sde_lib.KVESDE):
t = random.normal(next(rng), (data.shape[0],)) * 1.2 - 1.2
t = jnp.exp(t)
else:
t = random.uniform(next(rng), (data.shape[0],), minval=eps, maxval=sde.T)
z = random.normal(next(rng), data.shape)
mean, std = sde.marginal_prob(data, t)
perturbed_data = mean + batch_mul(std, z)
if isinstance(sde, sde_lib.KVESDE):
score_fn = mutils.get_score_fn(
sde,
model,
params,
states,
train=train,
return_state=True,
)
score, new_model_state = score_fn(perturbed_data, t, rng=next(rng))
losses = jnp.square(batch_mul(score, std) + z)
losses = batch_mul(
losses, (std**2 + sde.data_std**2) / sde.data_std**2
)
losses = jnp.sum(losses.reshape((losses.shape[0], -1)), axis=-1)
else:
score_fn = mutils.get_score_fn(
sde,
model,
params,
states,
train=train,
return_state=True,
)
score, new_model_state = score_fn(perturbed_data, t, rng=next(rng))
if not likelihood_weighting:
losses = jnp.square(batch_mul(score, std) + z)
losses = jnp.mean(losses.reshape((losses.shape[0], -1)), axis=-1)
else:
g2 = sde.sde(jnp.zeros_like(data), t)[1] ** 2
losses = jnp.square(score + batch_mul(z, 1.0 / std))
losses = jnp.mean(losses.reshape((losses.shape[0], -1)), axis=-1) * g2
loss = jnp.nansum(losses * batch["mask"] / jnp.sum(batch["mask"]))
quarter_masks = get_quarter_masks(
t,
np.linspace(sde.t_min ** (1 / sde.rho), sde.t_max ** (1 / sde.rho), 5)
** sde.rho,
)
loss_q1 = jnp.nansum(
losses
* quarter_masks[0]
* batch["mask"]
/ jnp.sum(quarter_masks[0] * batch["mask"])
)
loss_q2 = jnp.nansum(
losses
* quarter_masks[1]
* batch["mask"]
/ jnp.sum(quarter_masks[1] * batch["mask"])
)
loss_q3 = jnp.nansum(
losses
* quarter_masks[2]
* batch["mask"]
/ jnp.sum(quarter_masks[2] * batch["mask"])
)
loss_q4 = jnp.nansum(
losses
* quarter_masks[3]
* batch["mask"]
/ jnp.sum(quarter_masks[3] * batch["mask"])
)
log_stats = {
"loss_q1": loss_q1,
"loss_q2": loss_q2,
"loss_q3": loss_q3,
"loss_q4": loss_q4,
}
return loss, (new_model_state, log_stats)
def ssm_loss_fn(rng, params, states, batch):
"""Compute the loss function based on sliced score matching.
Args:
rng: A JAX random state.
params: A dictionary that contains trainable parameters of the score-based model.
states: A dictionary that contains mutable states of the score-based model.
batch: A mini-batch of training data.
Returns:
loss: A scalar that represents the average loss value across the mini-batch.
new_model_state: A dictionary that contains the mutated states of the score-based model.
"""
score_fn = mutils.get_score_fn(
sde,
model,
params,
states,
train=train,
return_state=True,
)
data = batch["image"]
rng = hk.PRNGSequence(rng)
# DEBUG: beware of eps!
if isinstance(sde, sde_lib.KVESDE):
t = random.normal(next(rng), (data.shape[0],)) * 1.2 - 1.2
t = jnp.exp(t)
else:
t = random.uniform(next(rng), (data.shape[0],), minval=eps, maxval=sde.T)
# t = random.uniform(next(rng), (data.shape[0],), minval=eps, maxval=sde.T)
z = random.normal(next(rng), data.shape)
mean, std = sde.marginal_prob(data, t)
perturbed_data = mean + batch_mul(std, z)
def score_fn_for_jvp(x):
return score_fn(x, t, rng=next(rng))
epsilon = random.rademacher(next(rng), data.shape, dtype=data.dtype)
score, score_trace, new_model_state = jax.jvp(
score_fn_for_jvp, (perturbed_data,), (epsilon,), has_aux=True
)
score_norm = jnp.mean(jnp.square(score).reshape((score.shape[0], -1)), axis=-1)
score_trace = jnp.mean(
(2 * score_trace * epsilon).reshape((score.shape[0], -1)), axis=-1
)
if not likelihood_weighting:
losses = (score_norm + score_trace) * std**2
elif isinstance(sde, sde_lib.KVESDE):
losses = score_norm + score_trace
losses = (
losses * std**2 * (std**2 + sde.data_std**2) / sde.data_std**2
)
else:
g2 = sde.sde(jnp.zeros_like(data), t)[1] ** 2
losses = (score_norm + score_trace) * g2
loss = jnp.nansum(losses * batch["mask"] / jnp.sum(batch["mask"]))
quarter_masks = get_quarter_masks(
t,
np.linspace(sde.t_min ** (1 / sde.rho), sde.t_max ** (1 / sde.rho), 5)
** sde.rho,
)
loss_q1 = jnp.nansum(
losses
* quarter_masks[0]
* batch["mask"]
/ jnp.sum(quarter_masks[0] * batch["mask"])
)
loss_q2 = jnp.nansum(
losses
* quarter_masks[1]
* batch["mask"]
/ jnp.sum(quarter_masks[1] * batch["mask"])
)
loss_q3 = jnp.nansum(
losses
* quarter_masks[2]
* batch["mask"]
/ jnp.sum(quarter_masks[2] * batch["mask"])
)
loss_q4 = jnp.nansum(
losses
* quarter_masks[3]
* batch["mask"]
/ jnp.sum(quarter_masks[3] * batch["mask"])
)
log_stats = {
"loss_q1": loss_q1,
"loss_q2": loss_q2,
"loss_q3": loss_q3,
"loss_q4": loss_q4,
"loss": loss,
}
return loss, (new_model_state, log_stats)
return dsm_loss_fn if not ssm else ssm_loss_fn
def get_ema_scales_fn(config):
if config.training.loss.lower() in ("dsm", "ssm", "continuous", "consistency"):
def ema_and_scales_fn(step):
return None, None
else:
def ema_and_scales_fn(step):
if (
config.training.target_ema_mode == "fixed"
and config.training.scale_mode == "fixed"
):
target_ema = float(config.training.target_ema)
scales = int(config.model.num_scales)
elif (
config.training.target_ema_mode == "adaptive"
and config.training.scale_mode == "progressive"
):
start_ema = float(config.training.start_ema)
start_scales = int(config.training.start_scales)
end_scales = int(config.training.end_scales)
total_steps = int(config.training.n_iters)
scales = jnp.ceil(
jnp.sqrt(
(step / total_steps)
* ((end_scales + 1) ** 2 - start_scales**2)
+ start_scales**2
)
- 1
).astype(jnp.int32)
scales = jnp.maximum(scales, 1)
c = -jnp.log(start_ema) * start_scales
target_ema = jnp.exp(-c / scales)
scales = scales + 1
elif (
config.training.target_ema_mode == "fixed"
and config.training.scale_mode == "progdist"
):
start_scales = int(config.training.start_scales)
distill_steps_per_iter = int(config.training.distill_steps_per_iter)
distill_stage = step // distill_steps_per_iter
scales = start_scales // (2**distill_stage)
scales = jnp.maximum(scales, 2)
sub_stage = jnp.maximum(
step - distill_steps_per_iter * (jnp.log2(start_scales) - 1),
0,
)
sub_stage = sub_stage // (distill_steps_per_iter * 2)
sub_scales = 2 // (2**sub_stage)
sub_scales = jnp.maximum(sub_scales, 1)
scales = jnp.where(scales == 2, sub_scales, scales)
target_ema = 1.0
else:
raise NotImplementedError
return target_ema, scales
return ema_and_scales_fn
def get_step_fn(
loss_fn,
train,
optimize_fn=None,
ema_scales_fn=None,
):
"""Create a one-step training/evaluation function.
Args:
loss_fn: The loss function for training or evaluation. It should have the
signature `loss_fn(rng, params, states, batch)`.
train: `True` for training and `False` for evaluation.
optimize_fn: An optimization function.
ema_scales_fn: A function that returns the current EMA and number of scales. Useful for progressive training.
Returns:
A one-step function for training or evaluation.
"""
def step_fn(carry_state, batch):
"""Running one step of training or evaluation.
This function will undergo `jax.lax.scan` so that multiple steps can be pmapped and jit-compiled together
for faster execution.
Args:
carry_state: A tuple (JAX random state, `flax.struct.dataclass` containing the training state).
batch: A mini-batch of training/evaluation data.
Returns:
new_carry_state: The updated tuple of `carry_state`.
loss: The average loss value of this state.
"""
(rng, state) = carry_state
rng, step_rng = jax.random.split(rng)
grad_fn = jax.value_and_grad(loss_fn, argnums=1, has_aux=True)
if train:
step = state.step
params = state.params
states = state.model_state
opt_state = state.opt_state
target_ema, num_scales = ema_scales_fn(step)
if target_ema is None and num_scales is None:
(
loss,
(new_model_state, log_stats),
), grad = grad_fn(step_rng, params, states, batch)
grad = jax.lax.pmean(grad, axis_name="batch")
new_params, new_opt_state = optimize_fn(grad, opt_state, params)
new_params_ema = jax.tree_util.tree_map(
lambda p_ema, p: p_ema * state.ema_rate
+ p * (1.0 - state.ema_rate),
state.params_ema,
new_params,
)
step = state.step + 1
new_state = state.replace(
step=step,
params=new_params,
params_ema=new_params_ema,
model_state=new_model_state,
opt_state=new_opt_state,
)
else:
target_params = state.target_params
(loss, (new_model_state, log_stats)), grad = grad_fn(
step_rng, params, states, batch, target_params, num_scales
)
grad = jax.lax.pmean(grad, axis_name="batch")
new_params, new_opt_state = optimize_fn(grad, opt_state, params)
new_params_ema = jax.tree_util.tree_map(
lambda p_ema, p: p_ema * state.ema_rate
+ p * (1.0 - state.ema_rate),
state.params_ema,
new_params,
)
new_target_params = jax.tree_util.tree_map(
lambda p_target, p: p_target * target_ema + p * (1.0 - target_ema),
target_params,
new_params,
)
step = state.step + 1
new_state = state.replace(
step=step,
params=new_params,
params_ema=new_params_ema,
target_params=new_target_params,
model_state=new_model_state,
opt_state=new_opt_state,
)
else:
target_ema, num_scales = ema_scales_fn(state.step)
if target_ema is None and num_scales is None:
loss, (_, log_stats) = loss_fn(
step_rng,
state.params_ema,
state.model_state,
batch,
)
else:
loss, (_, log_stats) = loss_fn(
step_rng,
state.params_ema,
state.model_state,
batch,
state.target_params,
num_scales,
)
new_state = state
loss = jax.lax.pmean(loss, axis_name="batch")
mean_log_stats = jax.tree_map(
lambda x: jax.lax.pmean(x, axis_name="batch"), log_stats
)
new_carry_state = (rng, new_state)
return new_carry_state, (loss, mean_log_stats)
return step_fn
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
# pytype: skip-file
"""Various sampling methods."""
import jax
import flax
import jax.numpy as jnp
import numpy as np
from scipy import integrate
import haiku as hk
from jcm.utils import T
from .models import utils as mutils
import diffrax
def get_div_fn(fn):
"""Create the divergence function of `fn` using the Hutchinson-Skilling trace estimator."""
## Reverse-mode differentiation (slower)
# def div_fn(x, t, eps):
# grad_fn = lambda data: jnp.sum(fn(data, t) * eps)
# grad_fn_eps = jax.grad(grad_fn)(x)
# return jnp.sum(grad_fn_eps * eps, axis=tuple(range(1, len(x.shape))))
## Forward-mode differentiation (faster)
def div_fn(x, t, eps):
jvp = jax.jvp(lambda x: fn(x, t), (x,), (eps,))[1]
return jnp.sum(jvp * eps, axis=tuple(range(1, len(x.shape))))
return div_fn
def get_likelihood_fn(
sde,
model,
hutchinson_type="Rademacher",
rtol=1e-5,
atol=1e-5,
eps=1e-5,
num_repeats=1,
):
"""Create a function to compute the unbiased log-likelihood estimate of a given data point.
Args:
sde: A `sde_lib.SDE` object that represents the forward SDE.
model: A `flax.linen.Module` object that represents the architecture of the score-based model.
hutchinson_type: "Rademacher" or "Gaussian". The type of noise for Hutchinson-Skilling trace estimator.
rtol: A `float` number. The relative tolerance level of the black-box ODE solver.
atol: A `float` number. The absolute tolerance level of the black-box ODE solver.
eps: A `float` number. The probability flow ODE is integrated to `eps` for numerical stability.
num_repeats: The number of times to repeat the black-box ODE solver for reduced variance.
Returns:
A function that takes random states, replicated training states, and a batch of data points
and returns the log-likelihoods in bits/dim, the latent code, and the number of function
evaluations cost by computation.
"""
def drift_fn(state, x, t):
"""The drift function of the reverse-time SDE."""
score_fn = mutils.get_score_fn(
sde,
model,
state.params_ema,
state.model_state,
train=False,
)
# Probability flow ODE is a special case of Reverse SDE
rsde = sde.reverse(score_fn, probability_flow=True)
return rsde.sde(x, t)[0]
def likelihood_fn(rng, state, data):
"""Compute an unbiased estimate to the log-likelihood in bits/dim.
Args:
rng: An array of random states.
state: Replicated training state for running on multiple devices.
data: A JAX array of shape [batch size, ...].
Returns:
bpd: A JAX array of shape [batch size]. The log-likelihoods on `data` in bits/dim.
z: A JAX array of the same shape as `data`. The latent representation of `data` under the
probability flow ODE.
nfe: An integer. The number of function evaluations used for running the black-box ODE solver.
"""
div_fn = get_div_fn(lambda x, t: drift_fn(state, x, t))
rng = hk.PRNGSequence(rng)
shape = data.shape
if hutchinson_type == "Gaussian":
epsilon = jax.random.normal(next(rng), shape)
elif hutchinson_type == "Rademacher":
epsilon = jax.random.rademacher(next(rng), shape, dtype=data.dtype)
else:
raise NotImplementedError(f"Hutchinson type {hutchinson_type} unknown.")
## ODE function for diffrax ODE solver
def ode_func(t, x, args):
sample = x[..., :-1]
vec_t = jnp.ones((sample.shape[0],)) * t
drift = drift_fn(sample, vec_t)
logp_grad = div_fn(sample, vec_t, epsilon)
return jnp.stack([drift, logp_grad], axis=-1)
term = diffrax.ODETerm(ode_func)
solver = diffrax.Tsit5()
stepsize_controller = diffrax.PIDController(rtol=rtol, atol=atol)
solution = diffrax.diffeqsolve(
term,
solver,
t0=sde.T,
t1=eps,
dt0=eps - sde.T,
y0=jnp.stack([data, jnp.zeros_like((data.shape[0],))], axis=-1),
stepsize_controller=stepsize_controller,
)
nfe = solution.stats["num_steps"]
z = solution.ys[-1, ..., :-1]
delta_logp = solution.ys[-1, ..., -1]
prior_logp = sde.prior_logp(z)
bpd = -(prior_logp + delta_logp) / np.log(2)
N = np.prod(shape[1:])
bpd = bpd / N
offset = 7.0
bpd += offset
return bpd, z, nfe
def likelihood_fn_repeated(rng, state, data):
def loop_fn(i, carry):
bpd, nfe, rng = carry
rng, step_rng = jax.random.split(rng)
bpd_i, z_i, nfe_i = likelihood_fn(step_rng, state, data)
bpd = bpd + bpd_i
nfe = nfe + nfe_i
return bpd, nfe, rng
bpd, nfe, rng = jax.lax.fori_loop(
0, num_repeats, loop_fn, (jnp.zeros(data.shape[0]), 0, rng)
)
bpd = bpd / num_repeats
nfe = nfe / num_repeats
return bpd, nfe
return jax.pmap(likelihood_fn_repeated, axis_name="batch")
|
# Copyright 2023 (c) OpenAI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training and evaluation"""
from jcm import train
from jcm import evaluate
from jcm import metrics
import logging
import os
import blobfile
import wandb
from absl import flags, app
from ml_collections.config_flags import config_flags
FLAGS = flags.FLAGS
config_flags.DEFINE_config_file(
"config", None, "Training configuration.", lock_config=True
)
flags.DEFINE_string("workdir", None, "Work directory.")
flags.DEFINE_enum(
"mode",
None,
["train", "eval", "metrics"],
"Running mode: train or eval or metrics",
)
flags.DEFINE_string(
"eval_folder", "eval", "The folder name for storing evaluation results"
)
flags.DEFINE_integer("num_gpus", 8, "Number of GPUs to use.")
flags.mark_flags_as_required(["workdir", "config", "mode"])
def main(argv):
if FLAGS.mode == "train":
wandb.login()
wandb.init(
project=os.path.basename(FLAGS.workdir),
name=os.path.basename(FLAGS.workdir),
config=FLAGS.config.to_dict(),
)
# Create the working directory
blobfile.makedirs(FLAGS.workdir)
formatter = logging.Formatter(
"%(levelname)s - %(filename)s - %(asctime)s - %(message)s"
)
logger = logging.getLogger()
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel("INFO")
# Run the training pipeline
train.train(FLAGS.config, FLAGS.workdir)
elif FLAGS.mode == "eval":
# Run the evaluation pipeline
evaluate.evaluate(
FLAGS.config,
FLAGS.workdir,
FLAGS.eval_folder,
)
elif FLAGS.mode == "metrics":
# Compute the metrics
metrics.compute_metrics(
FLAGS.config,
FLAGS.workdir,
FLAGS.eval_folder,
)
else:
raise ValueError(f"Mode {FLAGS.mode} not recognized.")
if __name__ == "__main__":
app.run(main)
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
"""The NCSNv2 model."""
import flax.linen as nn
import functools
from .utils import register_model
from .layers import (
CondRefineBlock,
RefineBlock,
ResidualBlock,
ncsn_conv3x3,
ConditionalResidualBlock,
get_act,
)
from .normalization import get_normalization
import ml_collections
CondResidualBlock = ConditionalResidualBlock
conv3x3 = ncsn_conv3x3
def get_network(config):
if config.data.image_size < 96:
return functools.partial(NCSNv2, config=config)
elif 96 <= config.data.image_size <= 128:
return functools.partial(NCSNv2_128, config=config)
elif 128 < config.data.image_size <= 256:
return functools.partial(NCSNv2_256, config=config)
else:
raise NotImplementedError(
f"No network suitable for {config.data.image_size}px implemented yet."
)
@register_model(name="ncsnv2_64")
class NCSNv2(nn.Module):
"""NCSNv2 model architecture."""
config: ml_collections.ConfigDict
@nn.compact
def __call__(self, x, labels, train=True):
# config parsing
config = self.config
nf = config.model.nf
act = get_act(config)
normalizer = get_normalization(config)
interpolation = config.model.interpolation
if not config.data.centered:
h = 2 * x - 1.0
else:
h = x
h = conv3x3(h, nf, stride=1, bias=True)
# ResNet backbone
h = ResidualBlock(nf, resample=None, act=act, normalization=normalizer)(h)
layer1 = ResidualBlock(nf, resample=None, act=act, normalization=normalizer)(h)
h = ResidualBlock(2 * nf, resample="down", act=act, normalization=normalizer)(
layer1
)
layer2 = ResidualBlock(
2 * nf, resample=None, act=act, normalization=normalizer
)(h)
h = ResidualBlock(
2 * nf, resample="down", act=act, normalization=normalizer, dilation=2
)(layer2)
layer3 = ResidualBlock(
2 * nf, resample=None, act=act, normalization=normalizer, dilation=2
)(h)
h = ResidualBlock(
2 * nf, resample="down", act=act, normalization=normalizer, dilation=4
)(layer3)
layer4 = ResidualBlock(
2 * nf, resample=None, act=act, normalization=normalizer, dilation=4
)(h)
# U-Net with RefineBlocks
ref1 = RefineBlock(
layer4.shape[1:3], 2 * nf, act=act, interpolation=interpolation, start=True
)([layer4])
ref2 = RefineBlock(
layer3.shape[1:3], 2 * nf, interpolation=interpolation, act=act
)([layer3, ref1])
ref3 = RefineBlock(
layer2.shape[1:3], 2 * nf, interpolation=interpolation, act=act
)([layer2, ref2])
ref4 = RefineBlock(
layer1.shape[1:3], nf, interpolation=interpolation, act=act, end=True
)([layer1, ref3])
h = normalizer()(ref4)
h = act(h)
h = conv3x3(h, x.shape[-1])
return h
@register_model(name="ncsn")
class NCSN(nn.Module):
"""NCSNv1 model architecture."""
config: ml_collections.ConfigDict
@nn.compact
def __call__(self, x, labels, train=True):
# config parsing
config = self.config
nf = config.model.nf
act = get_act(config)
normalizer = get_normalization(config, conditional=True)
interpolation = config.model.interpolation
if not config.data.centered:
h = 2 * x - 1.0
else:
h = x
h = conv3x3(h, nf, stride=1, bias=True)
# ResNet backbone
h = CondResidualBlock(nf, resample=None, act=act, normalization=normalizer)(
h, labels
)
layer1 = CondResidualBlock(
nf, resample=None, act=act, normalization=normalizer
)(h, labels)
h = CondResidualBlock(
2 * nf, resample="down", act=act, normalization=normalizer
)(layer1, labels)
layer2 = CondResidualBlock(
2 * nf, resample=None, act=act, normalization=normalizer
)(h, labels)
h = CondResidualBlock(
2 * nf, resample="down", act=act, normalization=normalizer, dilation=2
)(layer2, labels)
layer3 = CondResidualBlock(
2 * nf, resample=None, act=act, normalization=normalizer, dilation=2
)(h, labels)
h = CondResidualBlock(
2 * nf, resample="down", act=act, normalization=normalizer, dilation=4
)(layer3, labels)
layer4 = CondResidualBlock(
2 * nf, resample=None, act=act, normalization=normalizer, dilation=4
)(h, labels)
# U-Net with RefineBlocks
ref1 = CondRefineBlock(
layer4.shape[1:3],
2 * nf,
act=act,
normalizer=normalizer,
interpolation=interpolation,
start=True,
)([layer4], labels)
ref2 = CondRefineBlock(
layer3.shape[1:3],
2 * nf,
normalizer=normalizer,
interpolation=interpolation,
act=act,
)([layer3, ref1], labels)
ref3 = CondRefineBlock(
layer2.shape[1:3],
2 * nf,
normalizer=normalizer,
interpolation=interpolation,
act=act,
)([layer2, ref2], labels)
ref4 = CondRefineBlock(
layer1.shape[1:3],
nf,
normalizer=normalizer,
interpolation=interpolation,
act=act,
end=True,
)([layer1, ref3], labels)
h = normalizer()(ref4, labels)
h = act(h)
h = conv3x3(h, x.shape[-1])
return h
@register_model(name="ncsnv2_128")
class NCSNv2_128(nn.Module): # pylint: disable=invalid-name
"""NCSNv2 model architecture for 128px images."""
config: ml_collections.ConfigDict
@nn.compact
def __call__(self, x, labels, train=True):
# config parsing
config = self.config
nf = config.model.nf
act = get_act(config)
normalizer = get_normalization(config)
interpolation = config.model.interpolation
if not config.data.centered:
h = 2 * x - 1.0
else:
h = x
h = conv3x3(h, nf, stride=1, bias=True)
# ResNet backbone
h = ResidualBlock(nf, resample=None, act=act, normalization=normalizer)(h)
layer1 = ResidualBlock(nf, resample=None, act=act, normalization=normalizer)(h)
h = ResidualBlock(2 * nf, resample="down", act=act, normalization=normalizer)(
layer1
)
layer2 = ResidualBlock(
2 * nf, resample=None, act=act, normalization=normalizer
)(h)
h = ResidualBlock(2 * nf, resample="down", act=act, normalization=normalizer)(
layer2
)
layer3 = ResidualBlock(
2 * nf, resample=None, act=act, normalization=normalizer
)(h)
h = ResidualBlock(
4 * nf, resample="down", act=act, normalization=normalizer, dilation=2
)(layer3)
layer4 = ResidualBlock(
4 * nf, resample=None, act=act, normalization=normalizer, dilation=2
)(h)
h = ResidualBlock(
4 * nf, resample="down", act=act, normalization=normalizer, dilation=4
)(layer4)
layer5 = ResidualBlock(
4 * nf, resample=None, act=act, normalization=normalizer, dilation=4
)(h)
# U-Net with RefineBlocks
ref1 = RefineBlock(
layer5.shape[1:3], 4 * nf, interpolation=interpolation, act=act, start=True
)([layer5])
ref2 = RefineBlock(
layer4.shape[1:3], 2 * nf, interpolation=interpolation, act=act
)([layer4, ref1])
ref3 = RefineBlock(
layer3.shape[1:3], 2 * nf, interpolation=interpolation, act=act
)([layer3, ref2])
ref4 = RefineBlock(layer2.shape[1:3], nf, interpolation=interpolation, act=act)(
[layer2, ref3]
)
ref5 = RefineBlock(
layer1.shape[1:3], nf, interpolation=interpolation, act=act, end=True
)([layer1, ref4])
h = normalizer()(ref5)
h = act(h)
h = conv3x3(h, x.shape[-1])
return h
@register_model(name="ncsnv2_256")
class NCSNv2_256(nn.Module): # pylint: disable=invalid-name
"""NCSNv2 model architecture for 256px images."""
config: ml_collections.ConfigDict
@nn.compact
def __call__(self, x, labels, train=True):
# config parsing
config = self.config
nf = config.model.nf
act = get_act(config)
normalizer = get_normalization(config)
interpolation = config.model.interpolation
if not config.data.centered:
h = 2 * x - 1.0
else:
h = x
h = conv3x3(h, nf, stride=1, bias=True)
# ResNet backbone
h = ResidualBlock(nf, resample=None, act=act, normalization=normalizer)(h)
layer1 = ResidualBlock(nf, resample=None, act=act, normalization=normalizer)(h)
h = ResidualBlock(2 * nf, resample="down", act=act, normalization=normalizer)(
layer1
)
layer2 = ResidualBlock(
2 * nf, resample=None, act=act, normalization=normalizer
)(h)
h = ResidualBlock(2 * nf, resample="down", act=act, normalization=normalizer)(
layer2
)
layer3 = ResidualBlock(
2 * nf, resample=None, act=act, normalization=normalizer
)(h)
h = ResidualBlock(2 * nf, resample="down", act=act, normalization=normalizer)(
layer3
)
layer31 = ResidualBlock(
2 * nf, resample=None, act=act, normalization=normalizer
)(h)
h = ResidualBlock(
4 * nf, resample="down", act=act, normalization=normalizer, dilation=2
)(layer31)
layer4 = ResidualBlock(
4 * nf, resample=None, act=act, normalization=normalizer, dilation=2
)(h)
h = ResidualBlock(
4 * nf, resample="down", act=act, normalization=normalizer, dilation=4
)(layer4)
layer5 = ResidualBlock(
4 * nf, resample=None, act=act, normalization=normalizer, dilation=4
)(h)
# U-Net with RefineBlocks
ref1 = RefineBlock(
layer5.shape[1:3], 4 * nf, interpolation=interpolation, act=act, start=True
)([layer5])
ref2 = RefineBlock(
layer4.shape[1:3], 2 * nf, interpolation=interpolation, act=act
)([layer4, ref1])
ref31 = RefineBlock(
layer31.shape[1:3], 2 * nf, interpolation=interpolation, act=act
)([layer31, ref2])
ref3 = RefineBlock(
layer3.shape[1:3], 2 * nf, interpolation=interpolation, act=act
)([layer3, ref31])
ref4 = RefineBlock(layer2.shape[1:3], nf, interpolation=interpolation, act=act)(
[layer2, ref3]
)
ref5 = RefineBlock(
layer1.shape[1:3], nf, interpolation=interpolation, act=act, end=True
)([layer1, ref4])
h = normalizer()(ref5)
h = act(h)
h = conv3x3(h, x.shape[-1])
return h
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
"""Layers for defining NCSN++.
"""
from typing import Any, Optional, Tuple
from . import layers
from . import up_or_down_sampling
import flax.linen as nn
import jax
import jax.numpy as jnp
import numpy as np
conv1x1 = layers.ddpm_conv1x1
conv3x3 = layers.ddpm_conv3x3
NIN = layers.NIN
default_init = layers.default_init
class GaussianFourierProjection(nn.Module):
"""Gaussian Fourier embeddings for noise levels."""
embedding_size: int = 256
scale: float = 1.0
@nn.compact
def __call__(self, x):
W = self.param(
"W", jax.nn.initializers.normal(stddev=self.scale), (self.embedding_size,)
)
W = jax.lax.stop_gradient(W)
x_proj = x[:, None] * W[None, :] * 2 * jnp.pi
return jnp.concatenate([jnp.sin(x_proj), jnp.cos(x_proj)], axis=-1)
class Combine(nn.Module):
"""Combine information from skip connections."""
method: str = "cat"
@nn.compact
def __call__(self, x, y):
h = conv1x1(x, y.shape[-1])
if self.method == "cat":
return jnp.concatenate([h, y], axis=-1)
elif self.method == "sum":
return h + y
else:
raise ValueError(f"Method {self.method} not recognized.")
class AttnBlockpp(nn.Module):
"""Channel-wise self-attention block. Modified from DDPM."""
skip_rescale: bool = False
init_scale: float = 0.0
@nn.compact
def __call__(self, x):
B, H, W, C = x.shape
h = nn.GroupNorm(num_groups=min(x.shape[-1] // 4, 32))(x)
q = NIN(C)(h)
k = NIN(C)(h)
v = NIN(C)(h)
w = jnp.einsum("bhwc,bHWc->bhwHW", q, k) * (int(C) ** (-0.5))
w = jnp.reshape(w, (B, H, W, H * W))
w = jax.nn.softmax(w, axis=-1)
w = jnp.reshape(w, (B, H, W, H, W))
h = jnp.einsum("bhwHW,bHWc->bhwc", w, v)
h = NIN(C, init_scale=self.init_scale)(h)
if not self.skip_rescale:
return x + h
else:
return (x + h) / np.sqrt(2.0)
class Upsample(nn.Module):
out_ch: Optional[int] = None
with_conv: bool = False
fir: bool = False
fir_kernel: Tuple[int] = (1, 3, 3, 1)
@nn.compact
def __call__(self, x):
B, H, W, C = x.shape
out_ch = self.out_ch if self.out_ch else C
if not self.fir:
h = jax.image.resize(x, (x.shape[0], H * 2, W * 2, C), "nearest")
if self.with_conv:
h = conv3x3(h, out_ch)
else:
if not self.with_conv:
h = up_or_down_sampling.upsample_2d(x, self.fir_kernel, factor=2)
else:
h = up_or_down_sampling.Conv2d(
out_ch,
kernel=3,
up=True,
resample_kernel=self.fir_kernel,
use_bias=True,
kernel_init=default_init(),
)(x)
assert h.shape == (B, 2 * H, 2 * W, out_ch)
return h
class Downsample(nn.Module):
out_ch: Optional[int] = None
with_conv: bool = False
fir: bool = False
fir_kernel: Tuple[int] = (1, 3, 3, 1)
@nn.compact
def __call__(self, x):
B, H, W, C = x.shape
out_ch = self.out_ch if self.out_ch else C
if not self.fir:
if self.with_conv:
x = conv3x3(x, out_ch, stride=2)
else:
x = nn.avg_pool(x, window_shape=(2, 2), strides=(2, 2), padding="SAME")
else:
if not self.with_conv:
x = up_or_down_sampling.downsample_2d(x, self.fir_kernel, factor=2)
else:
x = up_or_down_sampling.Conv2d(
out_ch,
kernel=3,
down=True,
resample_kernel=self.fir_kernel,
use_bias=True,
kernel_init=default_init(),
)(x)
assert x.shape == (B, H // 2, W // 2, out_ch)
return x
class ResnetBlockDDPMpp(nn.Module):
"""ResBlock adapted from DDPM."""
act: Any
out_ch: Optional[int] = None
conv_shortcut: bool = False
dropout: float = 0.1
skip_rescale: bool = False
init_scale: float = 0.0
@nn.compact
def __call__(self, x, temb=None, train=True):
B, H, W, C = x.shape
out_ch = self.out_ch if self.out_ch else C
h = self.act(nn.GroupNorm(num_groups=min(x.shape[-1] // 4, 32))(x))
h = conv3x3(h, out_ch)
# Add bias to each feature map conditioned on the time embedding
if temb is not None:
h += nn.Dense(out_ch, kernel_init=default_init())(self.act(temb))[
:, None, None, :
]
h = self.act(nn.GroupNorm(num_groups=min(h.shape[-1] // 4, 32))(h))
h = nn.Dropout(self.dropout)(h, deterministic=not train)
h = conv3x3(h, out_ch, init_scale=self.init_scale)
if C != out_ch:
if self.conv_shortcut:
x = conv3x3(x, out_ch)
else:
x = NIN(out_ch)(x)
if not self.skip_rescale:
return x + h
else:
return (x + h) / np.sqrt(2.0)
class ResnetBlockBigGANpp(nn.Module):
"""ResBlock adapted from BigGAN."""
act: Any
up: bool = False
down: bool = False
out_ch: Optional[int] = None
dropout: float = 0.1
fir: bool = False
fir_kernel: Tuple[int] = (1, 3, 3, 1)
skip_rescale: bool = True
init_scale: float = 0.0
@nn.compact
def __call__(self, x, temb=None, train=True):
B, H, W, C = x.shape
out_ch = self.out_ch if self.out_ch else C
h = self.act(nn.GroupNorm(num_groups=min(x.shape[-1] // 4, 32))(x))
if self.up:
if self.fir:
h = up_or_down_sampling.upsample_2d(h, self.fir_kernel, factor=2)
x = up_or_down_sampling.upsample_2d(x, self.fir_kernel, factor=2)
else:
h = up_or_down_sampling.naive_upsample_2d(h, factor=2)
x = up_or_down_sampling.naive_upsample_2d(x, factor=2)
elif self.down:
if self.fir:
h = up_or_down_sampling.downsample_2d(h, self.fir_kernel, factor=2)
x = up_or_down_sampling.downsample_2d(x, self.fir_kernel, factor=2)
else:
h = up_or_down_sampling.naive_downsample_2d(h, factor=2)
x = up_or_down_sampling.naive_downsample_2d(x, factor=2)
h = conv3x3(h, out_ch)
# Add bias to each feature map conditioned on the time embedding
if temb is not None:
h += nn.Dense(out_ch, kernel_init=default_init())(self.act(temb))[
:, None, None, :
]
h = self.act(nn.GroupNorm(num_groups=min(h.shape[-1] // 4, 32))(h))
h = nn.Dropout(self.dropout)(h, deterministic=not train)
h = conv3x3(h, out_ch, init_scale=self.init_scale)
if C != out_ch or self.up or self.down:
x = conv1x1(x, out_ch)
if not self.skip_rescale:
return x + h
else:
return (x + h) / np.sqrt(2.0)
|
# Copyright 2023 (c) OpenAI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2023 (c) OpenAI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All functions and modules related to model definition.
"""
from typing import Any
import flax
import haiku as hk
import functools
import jax.numpy as jnp
from .. import sde_lib
import jax
import numpy as np
from . import wideresnet_noise_conditional
from .. import checkpoints
from ..utils import T, batch_mul
# The dataclass that stores all training states
@flax.struct.dataclass
class State:
step: int
lr: float
ema_rate: float
params: Any
params_ema: Any
model_state: Any
opt_state: Any
rng_state: Any
@flax.struct.dataclass
class StateWithTarget:
step: int
lr: float
ema_rate: float
params: Any
target_params: Any
params_ema: Any
model_state: Any
opt_state: Any
rng_state: Any
_MODELS = {}
def register_model(cls=None, *, name=None):
"""A decorator for registering model classes."""
def _register(cls):
if name is None:
local_name = cls.__name__
else:
local_name = name
if local_name in _MODELS:
raise ValueError(f"Already registered model with name: {local_name}")
_MODELS[local_name] = cls
return cls
if cls is None:
return _register
else:
return _register(cls)
def get_model(name):
return _MODELS[name]
def init_model(rng, config):
"""Initialize a `flax.linen.Module` model."""
rng = hk.PRNGSequence(rng)
model_name = config.model.name
model_def = functools.partial(get_model(model_name), config=config)
input_shape = (
jax.local_device_count(),
config.data.image_size,
config.data.image_size,
config.data.num_channels,
)
label_shape = input_shape[:1]
fake_input = jnp.zeros(input_shape)
fake_label = jnp.zeros(label_shape, dtype=jnp.int32)
model = model_def()
variables = model.init(
{"params": next(rng), "dropout": next(rng)}, fake_input, fake_label
)
# Variables is a `flax.FrozenDict`. It is immutable and respects functional programming
init_model_state, initial_params = variables.pop("params")
return model, init_model_state, initial_params
def init_lpips(rng, config):
assert config.training.loss_norm.lower() == "lpips", "LPIPS is not used in training"
from .lpips import LPIPS
model = LPIPS()
params = model.init(rng, jnp.zeros((1, 256, 256, 3)), jnp.zeros((1, 256, 256, 3)))
return model, params
def get_model_fn(model, params, states, train=False):
"""Create a function to give the output of the score-based model.
Args:
model: A `flax.linen.Module` object the represent the architecture of score-based model.
params: A dictionary that contains all trainable parameters.
states: A dictionary that contains all mutable states.
train: `True` for training and `False` for evaluation.
Returns:
A model function.
"""
def model_fn(x, labels, rng=None):
"""Compute the output of the score-based model.
Args:
x: A mini-batch of input data.
labels: A mini-batch of conditioning variables for time steps. Should be interpreted differently
for different models.
rng: If present, it is the random state for dropout
Returns:
A tuple of (model output, new mutable states)
"""
variables = {"params": params, **states}
if not train:
return model.apply(variables, x, labels, train=False, mutable=False), states
else:
rngs = {"dropout": rng}
return model.apply(
variables, x, labels, train=True, mutable=list(states.keys()), rngs=rngs
)
return model_fn
def get_denoiser_fn(sde, model, params, states, train=False, return_state=False):
model_fn = get_model_fn(model, params, states, train=train)
assert isinstance(
sde, sde_lib.KVESDE
), "Only KVE SDE is supported for building the denoiser"
def denoiser_fn(x, t, rng=None):
in_x = batch_mul(x, 1 / jnp.sqrt(t**2 + sde.data_std**2))
cond_t = 0.25 * jnp.log(t)
denoiser, state = model_fn(in_x, cond_t, rng)
denoiser = batch_mul(
denoiser, t * sde.data_std / jnp.sqrt(t**2 + sde.data_std**2)
)
skip_x = batch_mul(x, sde.data_std**2 / (t**2 + sde.data_std**2))
denoiser = skip_x + denoiser
if return_state:
return denoiser, state
else:
return denoiser
return denoiser_fn
def get_distiller_fn(
sde, model, params, states, train=False, return_state=False, pred_t=None
):
assert isinstance(
sde, sde_lib.KVESDE
), "Only KVE SDE is supported for building the denoiser"
model_fn = get_model_fn(model, params, states, train=train)
if pred_t is None:
pred_t = sde.t_min
def distiller_fn(x, t, rng=None):
in_x = batch_mul(x, 1 / jnp.sqrt(t**2 + sde.data_std**2))
cond_t = 0.25 * jnp.log(t)
denoiser, state = model_fn(in_x, cond_t, rng)
denoiser = batch_mul(
denoiser,
(t - pred_t) * sde.data_std / jnp.sqrt(t**2 + sde.data_std**2),
)
skip_x = batch_mul(
x, sde.data_std**2 / ((t - pred_t) ** 2 + sde.data_std**2)
)
denoiser = skip_x + denoiser
if return_state:
return denoiser, state
else:
return denoiser
return distiller_fn
def get_gaussianizer_fn(
sde, model, params, states, train=False, return_state=False, pred_t=None
):
assert isinstance(
sde, sde_lib.KVESDE
), "Only KVE SDE is supported for building the denoiser"
model_fn = get_model_fn(model, params, states, train=train)
if pred_t is None:
pred_t = sde.t_min
def gaussianizer_fn(x, t, rng=None):
in_x = x / sde.data_std
cond_t = 0.25 * jnp.log(t)
model_output, state = model_fn(in_x, cond_t, rng)
model_output = x + batch_mul(model_output, t - pred_t)
if return_state:
return model_output, state
else:
return model_output
return gaussianizer_fn
def get_score_fn(sde, model, params, states, train=False, return_state=False):
"""Wraps `score_fn` so that the model output corresponds to a real time-dependent score function.
Args:
sde: An `sde_lib.SDE` object that represents the forward SDE.
model: A `flax.linen.Module` object that represents the architecture of the score-based model.
params: A dictionary that contains all trainable parameters.
states: A dictionary that contains all other mutable parameters.
train: `True` for training and `False` for evaluation.
return_state: If `True`, return the new mutable states alongside the model output.
Returns:
A score function.
"""
model_fn = get_model_fn(model, params, states, train=train)
if isinstance(sde, sde_lib.VPSDE) or isinstance(sde, sde_lib.subVPSDE):
def score_fn(x, t, rng=None):
# Scale neural network output by standard deviation and flip sign
# For VP-trained models, t=0 corresponds to the lowest noise level
# The maximum value of time embedding is assumed to 999 for
# continuously-trained models.
cond_t = t * 999
model, state = model_fn(x, cond_t, rng)
std = sde.marginal_prob(jnp.zeros_like(x), t)[1]
score = batch_mul(-model, 1.0 / std)
if return_state:
return score, state
else:
return score
elif isinstance(sde, sde_lib.VESDE):
def score_fn(x, t, rng=None):
x = 2 * x - 1.0 # assuming x is in [0, 1]
std = sde.marginal_prob(jnp.zeros_like(x), t)[1]
score, state = model_fn(x, jnp.log(std), rng)
score = batch_mul(score, 1.0 / std)
if return_state:
return score, state
else:
return score
elif isinstance(sde, sde_lib.KVESDE):
denoiser_fn = get_denoiser_fn(
sde, model, params, states, train=train, return_state=True
)
def score_fn(x, t, rng=None):
denoiser, state = denoiser_fn(x, t, rng)
score = batch_mul(denoiser - x, 1 / t**2)
if return_state:
return score, state
else:
return score
else:
raise NotImplementedError(
f"SDE class {sde.__class__.__name__} not yet supported."
)
return score_fn
def get_denoiser_and_distiller_fn(
sde, model, params, states, train=False, return_state=False, pred_t=None
):
"""Wraps `score_fn` so that the model output corresponds to a real time-dependent score function.
Args:
sde: An `sde_lib.SDE` object that represents the forward SDE.
model: A `flax.linen.Module` object that represents the architecture of the score-based model.
params: A dictionary that contains all trainable parameters.
states: A dictionary that contains all other mutable parameters.
train: `True` for training and `False` for evaluation.
return_state: If `True`, return the new mutable states alongside the model output.
pred_t: The time at which the denoiser is identity.
Returns:
A score function.
"""
assert isinstance(
sde, sde_lib.KVESDE
), "Only KVE SDE is supported for joint training."
model_fn = get_model_fn(model, params, states, train=train)
if pred_t is None:
pred_t = sde.t_min
from .ncsnpp import NCSNpp, JointNCSNpp
def denoiser_distiller_fn(x, t, rng=None):
in_x = batch_mul(x, 1 / jnp.sqrt(t**2 + sde.data_std**2))
cond_t = 0.25 * jnp.log(t)
if isinstance(model, NCSNpp):
model_output, state = model_fn(in_x, cond_t, rng)
denoiser = model_output[..., :3]
distiller = model_output[..., 3:]
elif isinstance(model, JointNCSNpp):
(denoiser, distiller), state = model_fn(in_x, cond_t, rng)
denoiser = batch_mul(
denoiser, t * sde.data_std / jnp.sqrt(t**2 + sde.data_std**2)
)
skip_x = batch_mul(x, sde.data_std**2 / (t**2 + sde.data_std**2))
denoiser = skip_x + denoiser
distiller = batch_mul(
distiller,
(t - pred_t) * sde.data_std / jnp.sqrt(t**2 + sde.data_std**2),
)
skip_x = batch_mul(
x, sde.data_std**2 / ((t - pred_t) ** 2 + sde.data_std**2)
)
distiller = skip_x + distiller
if return_state:
return (denoiser, distiller), state
else:
return denoiser, distiller
return denoiser_distiller_fn
def to_flattened_numpy(x):
"""Flatten a JAX array `x` and convert it to numpy."""
return np.asarray(x.reshape((-1,)))
def from_flattened_numpy(x, shape):
"""Form a JAX array with the given `shape` from a flattened numpy array `x`."""
return jnp.asarray(x).reshape(shape)
def create_classifier(prng_key, batch_size, ckpt_path):
"""Create a noise-conditional image classifier.
Args:
prng_key: A JAX random state.
batch_size: The batch size of input data.
ckpt_path: The path to stored checkpoints for this classifier.
Returns:
classifier: A `flax.linen.Module` object that represents the architecture of the classifier.
classifier_params: A dictionary that contains trainable parameters of the classifier.
"""
input_shape = (batch_size, 32, 32, 3)
classifier = wideresnet_noise_conditional.WideResnet(
blocks_per_group=4, channel_multiplier=10, num_outputs=10
)
initial_variables = classifier.init(
{"params": prng_key, "dropout": jax.random.PRNGKey(0)},
jnp.ones(input_shape, dtype=jnp.float32),
jnp.ones((batch_size,), dtype=jnp.float32),
train=False,
)
model_state, init_params = initial_variables.pop("params")
classifier_params = checkpoints.restore_checkpoint(ckpt_path, init_params)
return classifier, classifier_params
def get_logit_fn(classifier, classifier_params):
"""Create a logit function for the classifier."""
def preprocess(data):
image_mean = jnp.asarray([[[0.49139968, 0.48215841, 0.44653091]]])
image_std = jnp.asarray([[[0.24703223, 0.24348513, 0.26158784]]])
return (data - image_mean[None, ...]) / image_std[None, ...]
def logit_fn(data, ve_noise_scale):
"""Give the logits of the classifier.
Args:
data: A JAX array of the input.
ve_noise_scale: time conditioning variables in the form of VE SDEs.
Returns:
logits: The logits given by the noise-conditional classifier.
"""
data = preprocess(data)
logits = classifier.apply(
{"params": classifier_params},
data,
ve_noise_scale,
train=False,
mutable=False,
)
return logits
return logit_fn
def get_classifier_grad_fn(logit_fn):
"""Create the gradient function for the classifier in use of class-conditional sampling."""
def grad_fn(data, ve_noise_scale, labels):
def prob_fn(data):
logits = logit_fn(data, ve_noise_scale)
prob = jax.nn.log_softmax(logits, axis=-1)[
jnp.arange(labels.shape[0]), labels
].sum()
return prob
return jax.grad(prob_fn)(data)
return grad_fn
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
"""Layers used for up-sampling or down-sampling images.
Many functions are ported from https://github.com/NVlabs/stylegan2.
"""
import flax.linen as nn
from typing import Any, Tuple, Optional, Sequence
import jax
import jax.nn as jnn
import jax.numpy as jnp
import numpy as np
# Function ported from StyleGAN2
def get_weight(module, shape, weight_var="weight", kernel_init=None):
"""Get/create weight tensor for a convolution or fully-connected layer."""
return module.param(weight_var, kernel_init, shape)
class Conv2d(nn.Module):
"""Conv2d layer with optimal upsampling and downsampling (StyleGAN2)."""
fmaps: int
kernel: int
up: bool = False
down: bool = False
resample_kernel: Tuple[int] = (1, 3, 3, 1)
use_bias: bool = True
weight_var: str = "weight"
kernel_init: Optional[Any] = None
@nn.compact
def __call__(self, x):
assert not (self.up and self.down)
assert self.kernel >= 1 and self.kernel % 2 == 1
w = get_weight(
self,
(self.kernel, self.kernel, x.shape[-1], self.fmaps),
weight_var=self.weight_var,
kernel_init=self.kernel_init,
)
if self.up:
x = upsample_conv_2d(x, w, data_format="NHWC", k=self.resample_kernel)
elif self.down:
x = conv_downsample_2d(x, w, data_format="NHWC", k=self.resample_kernel)
else:
x = jax.lax.conv_general_dilated(
x,
w,
window_strides=(1, 1),
padding="SAME",
dimension_numbers=("NHWC", "HWIO", "NHWC"),
)
if self.use_bias:
b = self.param("bias", jnn.initializers.zeros, (x.shape[-1],))
x = x + b.reshape((1, 1, 1, -1))
return x
def naive_upsample_2d(x, factor=2):
_N, H, W, C = x.shape
x = jnp.reshape(x, [-1, H, 1, W, 1, C])
x = jnp.tile(x, [1, 1, factor, 1, factor, 1])
return jnp.reshape(x, [-1, H * factor, W * factor, C])
def naive_downsample_2d(x, factor=2):
_N, H, W, C = x.shape
x = jnp.reshape(x, [-1, H // factor, factor, W // factor, factor, C])
return jnp.mean(x, axis=[2, 4])
def upsample_conv_2d(x, w, k=None, factor=2, gain=1, data_format="NHWC"):
"""Fused `upsample_2d()` followed by `tf.nn.conv2d()`.
Padding is performed only once at the beginning, not between the
operations.
The fused op is considerably more efficient than performing the same
calculation
using standard TensorFlow ops. It supports gradients of arbitrary order.
Args:
x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W,
C]`.
w: Weight tensor of the shape `[filterH, filterW, inChannels,
outChannels]`. Grouped convolution can be performed by `inChannels =
x.shape[0] // numGroups`.
k: FIR filter of the shape `[firH, firW]` or `[firN]`
(separable). The default is `[1] * factor`, which corresponds to
nearest-neighbor upsampling.
factor: Integer upsampling factor (default: 2).
gain: Scaling factor for signal magnitude (default: 1.0).
data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
Returns:
Tensor of the shape `[N, C, H * factor, W * factor]` or
`[N, H * factor, W * factor, C]`, and same datatype as `x`.
"""
assert isinstance(factor, int) and factor >= 1
# Check weight shape.
assert len(w.shape) == 4
convH = w.shape[0]
convW = w.shape[1]
inC = w.shape[2]
outC = w.shape[3]
assert convW == convH
# Setup filter kernel.
if k is None:
k = [1] * factor
k = _setup_kernel(k) * (gain * (factor**2))
p = (k.shape[0] - factor) - (convW - 1)
stride = [factor, factor]
# Determine data dimensions.
if data_format == "NCHW":
num_groups = _shape(x, 1) // inC
else:
num_groups = _shape(x, 3) // inC
# Transpose weights.
w = jnp.reshape(w, [convH, convW, inC, num_groups, -1])
w = jnp.transpose(w[::-1, ::-1], [0, 1, 4, 3, 2])
w = jnp.reshape(w, [convH, convW, -1, num_groups * inC])
## Original TF code.
# x = tf.nn.conv2d_transpose(
# x,
# w,
# output_shape=output_shape,
# strides=stride,
# padding='VALID',
# data_format=data_format)
## JAX equivalent
x = jax.lax.conv_transpose(
x,
w,
strides=stride,
padding="VALID",
transpose_kernel=True,
dimension_numbers=(data_format, "HWIO", data_format),
)
return _simple_upfirdn_2d(
x, k, pad0=(p + 1) // 2 + factor - 1, pad1=p // 2 + 1, data_format=data_format
)
def conv_downsample_2d(x, w, k=None, factor=2, gain=1, data_format="NHWC"):
"""Fused `tf.nn.conv2d()` followed by `downsample_2d()`.
Padding is performed only once at the beginning, not between the operations.
The fused op is considerably more efficient than performing the same
calculation
using standard TensorFlow ops. It supports gradients of arbitrary order.
Args:
x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W,
C]`.
w: Weight tensor of the shape `[filterH, filterW, inChannels,
outChannels]`. Grouped convolution can be performed by `inChannels =
x.shape[0] // numGroups`.
k: FIR filter of the shape `[firH, firW]` or `[firN]`
(separable). The default is `[1] * factor`, which corresponds to
average pooling.
factor: Integer downsampling factor (default: 2).
gain: Scaling factor for signal magnitude (default: 1.0).
data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
Returns:
Tensor of the shape `[N, C, H // factor, W // factor]` or
`[N, H // factor, W // factor, C]`, and same datatype as `x`.
"""
assert isinstance(factor, int) and factor >= 1
convH, convW, _inC, _outC = w.shape
assert convW == convH
if k is None:
k = [1] * factor
k = _setup_kernel(k) * gain
p = (k.shape[0] - factor) + (convW - 1)
s = [factor, factor]
x = _simple_upfirdn_2d(
x, k, pad0=(p + 1) // 2, pad1=p // 2, data_format=data_format
)
return jax.lax.conv_general_dilated(
x,
w,
window_strides=s,
padding="VALID",
dimension_numbers=(data_format, "HWIO", data_format),
)
def upfirdn_2d(x, k, upx, upy, downx, downy, padx0, padx1, pady0, pady1):
"""Pad, upsample, FIR filter, and downsample a batch of 2D images.
Accepts a batch of 2D images of the shape `[majorDim, inH, inW, minorDim]`
and performs the following operations for each image, batched across
`majorDim` and `minorDim`:
1. Pad the image with zeros by the specified number of pixels on each side
(`padx0`, `padx1`, `pady0`, `pady1`). Specifying a negative value
corresponds to cropping the image.
2. Upsample the image by inserting the zeros after each pixel (`upx`,
`upy`).
3. Convolve the image with the specified 2D FIR filter (`k`), shrinking the
image so that the footprint of all output pixels lies within the input
image.
4. Downsample the image by throwing away pixels (`downx`, `downy`).
This sequence of operations bears close resemblance to
scipy.signal.upfirdn().
The fused op is considerably more efficient than performing the same
calculation
using standard TensorFlow ops. It supports gradients of arbitrary order.
Args:
x: Input tensor of the shape `[majorDim, inH, inW, minorDim]`.
k: 2D FIR filter of the shape `[firH, firW]`.
upx: Integer upsampling factor along the X-axis (default: 1).
upy: Integer upsampling factor along the Y-axis (default: 1).
downx: Integer downsampling factor along the X-axis (default: 1).
downy: Integer downsampling factor along the Y-axis (default: 1).
padx0: Number of pixels to pad on the left side (default: 0).
padx1: Number of pixels to pad on the right side (default: 0).
pady0: Number of pixels to pad on the top side (default: 0).
pady1: Number of pixels to pad on the bottom side (default: 0).
impl: Name of the implementation to use. Can be `"ref"` or `"cuda"`
(default).
Returns:
Tensor of the shape `[majorDim, outH, outW, minorDim]`, and same
datatype as `x`.
"""
k = jnp.asarray(k, dtype=np.float32)
assert len(x.shape) == 4
inH = x.shape[1]
inW = x.shape[2]
minorDim = x.shape[3]
kernelH, kernelW = k.shape
assert inW >= 1 and inH >= 1
assert kernelW >= 1 and kernelH >= 1
assert isinstance(upx, int) and isinstance(upy, int)
assert isinstance(downx, int) and isinstance(downy, int)
assert isinstance(padx0, int) and isinstance(padx1, int)
assert isinstance(pady0, int) and isinstance(pady1, int)
# Upsample (insert zeros).
x = jnp.reshape(x, (-1, inH, 1, inW, 1, minorDim))
x = jnp.pad(x, [[0, 0], [0, 0], [0, upy - 1], [0, 0], [0, upx - 1], [0, 0]])
x = jnp.reshape(x, [-1, inH * upy, inW * upx, minorDim])
# Pad (crop if negative).
x = jnp.pad(
x,
[
[0, 0],
[max(pady0, 0), max(pady1, 0)],
[max(padx0, 0), max(padx1, 0)],
[0, 0],
],
)
x = x[
:,
max(-pady0, 0) : x.shape[1] - max(-pady1, 0),
max(-padx0, 0) : x.shape[2] - max(-padx1, 0),
:,
]
# Convolve with filter.
x = jnp.transpose(x, [0, 3, 1, 2])
x = jnp.reshape(x, [-1, 1, inH * upy + pady0 + pady1, inW * upx + padx0 + padx1])
w = jnp.array(k[::-1, ::-1, None, None], dtype=x.dtype)
x = jax.lax.conv_general_dilated(
x,
w,
window_strides=(1, 1),
padding="VALID",
dimension_numbers=("NCHW", "HWIO", "NCHW"),
)
x = jnp.reshape(
x,
[
-1,
minorDim,
inH * upy + pady0 + pady1 - kernelH + 1,
inW * upx + padx0 + padx1 - kernelW + 1,
],
)
x = jnp.transpose(x, [0, 2, 3, 1])
# Downsample (throw away pixels).
return x[:, ::downy, ::downx, :]
def _simple_upfirdn_2d(x, k, up=1, down=1, pad0=0, pad1=0, data_format="NCHW"):
assert data_format in ["NCHW", "NHWC"]
assert len(x.shape) == 4
y = x
if data_format == "NCHW":
y = jnp.reshape(y, [-1, y.shape[2], y.shape[3], 1])
y = upfirdn_2d(
y,
k,
upx=up,
upy=up,
downx=down,
downy=down,
padx0=pad0,
padx1=pad1,
pady0=pad0,
pady1=pad1,
)
if data_format == "NCHW":
y = jnp.reshape(y, [-1, x.shape[1], y.shape[1], y.shape[2]])
return y
def _setup_kernel(k):
k = np.asarray(k, dtype=np.float32)
if k.ndim == 1:
k = np.outer(k, k)
k /= np.sum(k)
assert k.ndim == 2
assert k.shape[0] == k.shape[1]
return k
def _shape(x, dim):
return x.shape[dim]
def upsample_2d(x, k=None, factor=2, gain=1, data_format="NHWC"):
r"""Upsample a batch of 2D images with the given filter.
Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]`
and upsamples each image with the given filter. The filter is normalized so
that
if the input pixels are constant, they will be scaled by the specified
`gain`.
Pixels outside the image are assumed to be zero, and the filter is padded
with
zeros so that its shape is a multiple of the upsampling factor.
Args:
x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W,
C]`.
k: FIR filter of the shape `[firH, firW]` or `[firN]`
(separable). The default is `[1] * factor`, which corresponds to
nearest-neighbor upsampling.
factor: Integer upsampling factor (default: 2).
gain: Scaling factor for signal magnitude (default: 1.0).
data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
Returns:
Tensor of the shape `[N, C, H * factor, W * factor]` or
`[N, H * factor, W * factor, C]`, and same datatype as `x`.
"""
assert isinstance(factor, int) and factor >= 1
if k is None:
k = [1] * factor
k = _setup_kernel(k) * (gain * (factor**2))
p = k.shape[0] - factor
return _simple_upfirdn_2d(
x,
k,
up=factor,
pad0=(p + 1) // 2 + factor - 1,
pad1=p // 2,
data_format=data_format,
)
def downsample_2d(x, k=None, factor=2, gain=1, data_format="NHWC"):
r"""Downsample a batch of 2D images with the given filter.
Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]`
and downsamples each image with the given filter. The filter is normalized
so that
if the input pixels are constant, they will be scaled by the specified
`gain`.
Pixels outside the image are assumed to be zero, and the filter is padded
with
zeros so that its shape is a multiple of the downsampling factor.
Args:
x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W,
C]`.
k: FIR filter of the shape `[firH, firW]` or `[firN]`
(separable). The default is `[1] * factor`, which corresponds to
average pooling.
factor: Integer downsampling factor (default: 2).
gain: Scaling factor for signal magnitude (default: 1.0).
data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
impl: Name of the implementation to use. Can be `"ref"` or
`"cuda"` (default).
Returns:
Tensor of the shape `[N, C, H // factor, W // factor]` or
`[N, H // factor, W // factor, C]`, and same datatype as `x`.
"""
assert isinstance(factor, int) and factor >= 1
if k is None:
k = [1] * factor
k = _setup_kernel(k) * gain
p = k.shape[0] - factor
return _simple_upfirdn_2d(
x, k, down=factor, pad0=(p + 1) // 2, pad1=p // 2, data_format=data_format
)
|
# Code adapted from https://github.com/google-research/google-research/tree/master/flax_models/cifar
# Original copyright statement:
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wide Resnet Model.
Reference:
Wide Residual Networks, Sergey Zagoruyko, Nikos Komodakis
https://arxiv.org/abs/1605.07146
Initially forked from
github.com/google/flax/blob/master/examples/cifar10/models/wideresnet.py
This implementation mimics the one from
github.com/tensorflow/models/blob/master/research/autoaugment/wrn.py
that is widely used as a benchmark.
It uses identity + zero padding skip connections, with kaiming normal
initialization for convolutional kernels (mode = fan_out, gain=2.0).
The final dense layer uses a uniform distribution U[-scale, scale] where
scale = 1 / sqrt(num_classes) as per the autoaugment implementation.
Using the default initialization instead gives error rates approximately 0.5%
greater on cifar100, most likely because the parameters used in the literature
were finetuned for this particular initialization.
Finally, the autoaugment implementation adds more residual connections between
the groups (instead of just between the blocks as per the original paper and
most implementations). It is possible to safely remove those connections without
degrading the performance, which we do by default to match the original
wideresnet paper. Setting `use_additional_skip_connections` to True will add
them back and then reproduces exactly the model used in autoaugment.
"""
import numpy as np
import flax
from flax import linen as nn
import jax
import jax.numpy as jnp
from typing import Any, Tuple, Optional
_BATCHNORM_MOMENTUM = 0.9
_BATCHNORM_EPSILON = 1e-5
# Kaiming initialization with fan out mode. Should be used to initialize
# convolutional kernels.
conv_kernel_init_fn = jax.nn.initializers.variance_scaling(2.0, "fan_out", "normal")
def dense_layer_init_fn(key, shape, dtype=jnp.float32):
"""Initializer for the final dense layer.
Args:
key: PRNG key to use to sample the weights.
shape: Shape of the tensor to initialize.
dtype: Data type of the tensor to initialize.
Returns:
The initialized tensor.
"""
num_units_out = shape[1]
unif_init_range = 1.0 / (num_units_out) ** (0.5)
return jax.random.uniform(key, shape, dtype, -1) * unif_init_range
def shake_shake_train(xa, xb, rng=None):
"""Shake-shake regularization in training mode.
Shake-shake regularization interpolates between inputs A and B
with *different* random uniform (per-sample) interpolation factors
for the forward and backward/gradient passes.
Args:
xa: Input, branch A.
xb: Input, branch B.
rng: PRNG key.
Returns:
Mix of input branches.
"""
if rng is None:
rng = flax.nn.make_rng()
gate_forward_key, gate_backward_key = jax.random.split(rng, num=2)
gate_shape = (len(xa), 1, 1, 1)
# Draw different interpolation factors (gate) for forward and backward pass.
gate_forward = jax.random.uniform(
gate_forward_key, gate_shape, dtype=jnp.float32, minval=0.0, maxval=1.0
)
gate_backward = jax.random.uniform(
gate_backward_key, gate_shape, dtype=jnp.float32, minval=0.0, maxval=1.0
)
# Compute interpolated x for forward and backward.
x_forward = xa * gate_forward + xb * (1.0 - gate_forward)
x_backward = xa * gate_backward + xb * (1.0 - gate_backward)
# Combine using stop_gradient.
return x_backward + jax.lax.stop_gradient(x_forward - x_backward)
def shake_shake_eval(xa, xb):
"""Shake-shake regularization in testing mode.
Args:
xa: Input, branch A.
xb: Input, branch B.
Returns:
Mix of input branches.
"""
# Blend between inputs A and B 50%-50%.
return (xa + xb) * 0.5
def shake_drop_train(x, mask_prob, alpha_min, alpha_max, beta_min, beta_max, rng=None):
"""ShakeDrop training pass.
See https://arxiv.org/abs/1802.02375
Args:
x: Input to apply ShakeDrop to.
mask_prob: Mask probability.
alpha_min: Alpha range lower.
alpha_max: Alpha range upper.
beta_min: Beta range lower.
beta_max: Beta range upper.
rng: PRNG key (if `None`, uses `flax.nn.make_rng`).
Returns:
The regularized tensor.
"""
if rng is None:
rng = flax.nn.make_rng()
bern_key, alpha_key, beta_key = jax.random.split(rng, num=3)
rnd_shape = (len(x), 1, 1, 1)
# Bernoulli variable b_l in Eqn 6, https://arxiv.org/abs/1802.02375.
mask = jax.random.bernoulli(bern_key, mask_prob, rnd_shape)
mask = mask.astype(jnp.float32)
alpha_values = jax.random.uniform(
alpha_key, rnd_shape, dtype=jnp.float32, minval=alpha_min, maxval=alpha_max
)
beta_values = jax.random.uniform(
beta_key, rnd_shape, dtype=jnp.float32, minval=beta_min, maxval=beta_max
)
# See Eqn 6 in https://arxiv.org/abs/1802.02375.
rand_forward = mask + alpha_values - mask * alpha_values
rand_backward = mask + beta_values - mask * beta_values
return x * rand_backward + jax.lax.stop_gradient(
x * rand_forward - x * rand_backward
)
def shake_drop_eval(x, mask_prob, alpha_min, alpha_max):
"""ShakeDrop eval pass.
See https://arxiv.org/abs/1802.02375
Args:
x: Input to apply ShakeDrop to.
mask_prob: Mask probability.
alpha_min: Alpha range lower.
alpha_max: Alpha range upper.
Returns:
The regularized tensor.
"""
expected_alpha = (alpha_max + alpha_min) / 2
# See Eqn 6 in https://arxiv.org/abs/1802.02375.
return (mask_prob + expected_alpha - mask_prob * expected_alpha) * x
def activation(x, train, apply_relu=True, name=""):
x = nn.GroupNorm(name=name, epsilon=1e-5, num_groups=min(x.shape[-1] // 4, 32))(x)
if apply_relu:
x = jax.nn.relu(x)
return x
def _output_add(block_x, orig_x):
"""Add two tensors, padding them with zeros or pooling them if necessary.
Args:
block_x: Output of a resnet block.
orig_x: Residual branch to add to the output of the resnet block.
Returns:
The sum of blocks_x and orig_x. If necessary, orig_x will be average pooled
or zero padded so that its shape matches orig_x.
"""
stride = orig_x.shape[-2] // block_x.shape[-2]
strides = (stride, stride)
if block_x.shape[-1] != orig_x.shape[-1]:
orig_x = nn.avg_pool(orig_x, strides, strides)
channels_to_add = block_x.shape[-1] - orig_x.shape[-1]
orig_x = jnp.pad(orig_x, [(0, 0), (0, 0), (0, 0), (0, channels_to_add)])
return block_x + orig_x
class GaussianFourierProjection(nn.Module):
"""Gaussian Fourier embeddings for noise levels."""
embedding_size: int = 256
scale: float = 1.0
@nn.compact
def __call__(self, x):
W = self.param(
"W", jax.nn.initializers.normal(stddev=self.scale), (self.embedding_size,)
)
W = jax.lax.stop_gradient(W)
x_proj = x[:, None] * W[None, :] * 2 * jnp.pi
return jnp.concatenate([jnp.sin(x_proj), jnp.cos(x_proj)], axis=-1)
class WideResnetBlock(nn.Module):
"""Defines a single WideResnetBlock."""
channels: int
strides: Tuple[int] = (1, 1)
activate_before_residual: bool = False
@nn.compact
def __call__(self, x, temb=None, train=True):
if self.activate_before_residual:
x = activation(x, train, name="init_bn")
orig_x = x
else:
orig_x = x
block_x = x
if not self.activate_before_residual:
block_x = activation(block_x, train, name="init_bn")
block_x = nn.Conv(
self.channels,
(3, 3),
self.strides,
padding="SAME",
use_bias=False,
kernel_init=conv_kernel_init_fn,
name="conv1",
)(block_x)
if temb is not None:
block_x += nn.Dense(self.channels)(nn.swish(temb))[:, None, None, :]
block_x = activation(block_x, train=train, name="bn_2")
block_x = nn.Conv(
self.channels,
(3, 3),
padding="SAME",
use_bias=False,
kernel_init=conv_kernel_init_fn,
name="conv2",
)(block_x)
return _output_add(block_x, orig_x)
class WideResnetGroup(nn.Module):
"""Defines a WideResnetGroup."""
blocks_per_group: int
channels: int
strides: Tuple[int] = (1, 1)
activate_before_residual: bool = False
@nn.compact
def __call__(self, x, temb=None, train=True):
for i in range(self.blocks_per_group):
x = WideResnetBlock(
self.channels,
self.strides if i == 0 else (1, 1),
activate_before_residual=self.activate_before_residual and not i,
)(x, temb, train)
return x
class WideResnet(nn.Module):
"""Defines the WideResnet Model."""
blocks_per_group: int
channel_multiplier: int
num_outputs: int
@nn.compact
def __call__(self, x, sigmas, train=True):
# per image standardization
N = np.prod(x.shape[1:])
x = (x - jnp.mean(x, axis=(1, 2, 3), keepdims=True)) / jnp.maximum(
jnp.std(x, axis=(1, 2, 3), keepdims=True), 1.0 / np.sqrt(N)
)
temb = GaussianFourierProjection(embedding_size=128, scale=16)(jnp.log(sigmas))
temb = nn.Dense(128 * 4)(temb)
temb = nn.Dense(128 * 4)(nn.swish(temb))
x = nn.Conv(
16,
(3, 3),
padding="SAME",
name="init_conv",
kernel_init=conv_kernel_init_fn,
use_bias=False,
)(x)
x = WideResnetGroup(
self.blocks_per_group,
16 * self.channel_multiplier,
activate_before_residual=True,
)(x, temb, train)
x = WideResnetGroup(
self.blocks_per_group, 32 * self.channel_multiplier, (2, 2)
)(x, temb, train)
x = WideResnetGroup(
self.blocks_per_group, 64 * self.channel_multiplier, (2, 2)
)(x, temb, train)
x = activation(x, train=train, name="pre-pool-bn")
x = nn.avg_pool(x, x.shape[1:3])
x = x.reshape((x.shape[0], -1))
x = nn.Dense(self.num_outputs, kernel_init=dense_layer_init_fn)(x)
return x
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
"""Common layers for defining score networks.
"""
import functools
import math
import string
from typing import Any, Sequence, Optional
import flax.linen as nn
import jax
import jax.nn as jnn
import jax.numpy as jnp
def get_act(config):
"""Get activation functions from the config file."""
if config.model.nonlinearity.lower() == "elu":
return nn.elu
elif config.model.nonlinearity.lower() == "relu":
return nn.relu
elif config.model.nonlinearity.lower() == "lrelu":
return functools.partial(nn.leaky_relu, negative_slope=0.2)
elif config.model.nonlinearity.lower() == "swish":
return nn.swish
else:
raise NotImplementedError("activation function does not exist!")
def ncsn_conv1x1(x, out_planes, stride=1, bias=True, dilation=1, init_scale=1.0):
"""1x1 convolution with PyTorch initialization. Same as NCSNv1/v2."""
init_scale = 1e-10 if init_scale == 0 else init_scale
kernel_init = jnn.initializers.variance_scaling(
1 / 3 * init_scale, "fan_in", "uniform"
)
kernel_shape = (1, 1) + (x.shape[-1], out_planes)
bias_init = lambda key, shape: kernel_init(key, kernel_shape)[0, 0, 0, :]
output = nn.Conv(
out_planes,
kernel_size=(1, 1),
strides=(stride, stride),
padding="SAME",
use_bias=bias,
kernel_dilation=(dilation, dilation),
kernel_init=kernel_init,
bias_init=bias_init,
)(x)
return output
def default_init(scale=1.0):
"""The same initialization used in DDPM."""
scale = 1e-10 if scale == 0 else scale
return jnn.initializers.variance_scaling(scale, "fan_avg", "uniform")
def ddpm_conv1x1(x, out_planes, stride=1, bias=True, dilation=1, init_scale=1.0):
"""1x1 convolution with DDPM initialization."""
bias_init = jnn.initializers.zeros
output = nn.Conv(
out_planes,
kernel_size=(1, 1),
strides=(stride, stride),
padding="SAME",
use_bias=bias,
kernel_dilation=(dilation, dilation),
kernel_init=default_init(init_scale),
bias_init=bias_init,
)(x)
return output
def ncsn_conv3x3(x, out_planes, stride=1, bias=True, dilation=1, init_scale=1.0):
"""3x3 convolution with PyTorch initialization. Same as NCSNv1/NCSNv2."""
init_scale = 1e-10 if init_scale == 0 else init_scale
kernel_init = jnn.initializers.variance_scaling(
1 / 3 * init_scale, "fan_in", "uniform"
)
kernel_shape = (3, 3) + (x.shape[-1], out_planes)
bias_init = lambda key, shape: kernel_init(key, kernel_shape)[0, 0, 0, :]
output = nn.Conv(
out_planes,
kernel_size=(3, 3),
strides=(stride, stride),
padding="SAME",
use_bias=bias,
kernel_dilation=(dilation, dilation),
kernel_init=kernel_init,
bias_init=bias_init,
)(x)
return output
def ddpm_conv3x3(x, out_planes, stride=1, bias=True, dilation=1, init_scale=1.0):
"""3x3 convolution with DDPM initialization."""
bias_init = jnn.initializers.zeros
output = nn.Conv(
out_planes,
kernel_size=(3, 3),
strides=(stride, stride),
padding="SAME",
use_bias=bias,
kernel_dilation=(dilation, dilation),
kernel_init=default_init(init_scale),
bias_init=bias_init,
)(x)
return output
###########################################################################
# Functions below are ported over from the NCSNv1/NCSNv2 codebase:
# https://github.com/ermongroup/ncsn
# https://github.com/ermongroup/ncsnv2
###########################################################################
class CRPBlock(nn.Module):
"""CRPBlock for RefineNet. Used in NCSNv2."""
features: int
n_stages: int
act: Any = nn.relu
@nn.compact
def __call__(self, x):
x = self.act(x)
path = x
for _ in range(self.n_stages):
path = nn.max_pool(
path, window_shape=(5, 5), strides=(1, 1), padding="SAME"
)
path = ncsn_conv3x3(path, self.features, stride=1, bias=False)
x = path + x
return x
class CondCRPBlock(nn.Module):
"""Noise-conditional CRPBlock for RefineNet. Used in NCSNv1."""
features: int
n_stages: int
normalizer: Any
act: Any = nn.relu
@nn.compact
def __call__(self, x, y):
x = self.act(x)
path = x
for _ in range(self.n_stages):
path = self.normalizer()(path, y)
path = nn.avg_pool(
path, window_shape=(5, 5), strides=(1, 1), padding="SAME"
)
path = ncsn_conv3x3(path, self.features, stride=1, bias=False)
x = path + x
return x
class RCUBlock(nn.Module):
"""RCUBlock for RefineNet. Used in NCSNv2."""
features: int
n_blocks: int
n_stages: int
act: Any = nn.relu
@nn.compact
def __call__(self, x):
for _ in range(self.n_blocks):
residual = x
for _ in range(self.n_stages):
x = self.act(x)
x = ncsn_conv3x3(x, self.features, stride=1, bias=False)
x = x + residual
return x
class CondRCUBlock(nn.Module):
"""Noise-conditional RCUBlock for RefineNet. Used in NCSNv1."""
features: int
n_blocks: int
n_stages: int
normalizer: Any
act: Any = nn.relu
@nn.compact
def __call__(self, x, y):
for _ in range(self.n_blocks):
residual = x
for _ in range(self.n_stages):
x = self.normalizer()(x, y)
x = self.act(x)
x = ncsn_conv3x3(x, self.features, stride=1, bias=False)
x += residual
return x
class MSFBlock(nn.Module):
"""MSFBlock for RefineNet. Used in NCSNv2."""
shape: Sequence[int]
features: int
interpolation: str = "bilinear"
@nn.compact
def __call__(self, xs):
sums = jnp.zeros((xs[0].shape[0], *self.shape, self.features))
for i in range(len(xs)):
h = ncsn_conv3x3(xs[i], self.features, stride=1, bias=True)
if self.interpolation == "bilinear":
h = jax.image.resize(
h, (h.shape[0], *self.shape, h.shape[-1]), "bilinear"
)
elif self.interpolation == "nearest_neighbor":
h = jax.image.resize(
h, (h.shape[0], *self.shape, h.shape[-1]), "nearest"
)
else:
raise ValueError(f"Interpolation {self.interpolation} does not exist!")
sums = sums + h
return sums
class CondMSFBlock(nn.Module):
"""Noise-conditional MSFBlock for RefineNet. Used in NCSNv1."""
shape: Sequence[int]
features: int
normalizer: Any
interpolation: str = "bilinear"
@nn.compact
def __call__(self, xs, y):
sums = jnp.zeros((xs[0].shape[0], *self.shape, self.features))
for i in range(len(xs)):
h = self.normalizer()(xs[i], y)
h = ncsn_conv3x3(h, self.features, stride=1, bias=True)
if self.interpolation == "bilinear":
h = jax.image.resize(
h, (h.shape[0], *self.shape, h.shape[-1]), "bilinear"
)
elif self.interpolation == "nearest_neighbor":
h = jax.image.resize(
h, (h.shape[0], *self.shape, h.shape[-1]), "nearest"
)
else:
raise ValueError(f"Interpolation {self.interpolation} does not exist")
sums = sums + h
return sums
class RefineBlock(nn.Module):
"""RefineBlock for building NCSNv2 RefineNet."""
output_shape: Sequence[int]
features: int
act: Any = nn.relu
interpolation: str = "bilinear"
start: bool = False
end: bool = False
@nn.compact
def __call__(self, xs):
rcu_block = functools.partial(RCUBlock, n_blocks=2, n_stages=2, act=self.act)
rcu_block_output = functools.partial(
RCUBlock,
features=self.features,
n_blocks=3 if self.end else 1,
n_stages=2,
act=self.act,
)
hs = []
for i in range(len(xs)):
h = rcu_block(features=xs[i].shape[-1])(xs[i])
hs.append(h)
if not self.start:
msf = functools.partial(
MSFBlock, features=self.features, interpolation=self.interpolation
)
h = msf(shape=self.output_shape)(hs)
else:
h = hs[0]
crp = functools.partial(
CRPBlock, features=self.features, n_stages=2, act=self.act
)
h = crp()(h)
h = rcu_block_output()(h)
return h
class CondRefineBlock(nn.Module):
"""Noise-conditional RefineBlock for building NCSNv1 RefineNet."""
output_shape: Sequence[int]
features: int
normalizer: Any
act: Any = nn.relu
interpolation: str = "bilinear"
start: bool = False
end: bool = False
@nn.compact
def __call__(self, xs, y):
rcu_block = functools.partial(
CondRCUBlock,
n_blocks=2,
n_stages=2,
act=self.act,
normalizer=self.normalizer,
)
rcu_block_output = functools.partial(
CondRCUBlock,
features=self.features,
n_blocks=3 if self.end else 1,
n_stages=2,
act=self.act,
normalizer=self.normalizer,
)
hs = []
for i in range(len(xs)):
h = rcu_block(features=xs[i].shape[-1])(xs[i], y)
hs.append(h)
if not self.start:
msf = functools.partial(
CondMSFBlock,
features=self.features,
interpolation=self.interpolation,
normalizer=self.normalizer,
)
h = msf(shape=self.output_shape)(hs, y)
else:
h = hs[0]
crp = functools.partial(
CondCRPBlock,
features=self.features,
n_stages=2,
act=self.act,
normalizer=self.normalizer,
)
h = crp()(h, y)
h = rcu_block_output()(h, y)
return h
class ConvMeanPool(nn.Module):
"""ConvMeanPool for building the ResNet backbone."""
output_dim: int
kernel_size: int = 3
biases: bool = True
@nn.compact
def __call__(self, inputs):
output = nn.Conv(
features=self.output_dim,
kernel_size=(self.kernel_size, self.kernel_size),
strides=(1, 1),
padding="SAME",
use_bias=self.biases,
)(inputs)
output = (
sum(
[
output[:, ::2, ::2, :],
output[:, 1::2, ::2, :],
output[:, ::2, 1::2, :],
output[:, 1::2, 1::2, :],
]
)
/ 4.0
)
return output
class MeanPoolConv(nn.Module):
"""MeanPoolConv for building the ResNet backbone."""
output_dim: int
kernel_size: int = 3
biases: bool = True
@nn.compact
def __call__(self, inputs):
output = inputs
output = (
sum(
[
output[:, ::2, ::2, :],
output[:, 1::2, ::2, :],
output[:, ::2, 1::2, :],
output[:, 1::2, 1::2, :],
]
)
/ 4.0
)
output = nn.Conv(
features=self.output_dim,
kernel_size=(self.kernel_size, self.kernel_size),
strides=(1, 1),
padding="SAME",
use_bias=self.biases,
)(output)
return output
class ResidualBlock(nn.Module):
"""The residual block for defining the ResNet backbone. Used in NCSNv2."""
output_dim: int
normalization: Any
resample: Optional[str] = None
act: Any = nn.elu
dilation: int = 1
@nn.compact
def __call__(self, x):
h = self.normalization()(x)
h = self.act(h)
if self.resample == "down":
h = ncsn_conv3x3(h, h.shape[-1], dilation=self.dilation)
h = self.normalization()(h)
h = self.act(h)
if self.dilation > 1:
h = ncsn_conv3x3(h, self.output_dim, dilation=self.dilation)
shortcut = ncsn_conv3x3(x, self.output_dim, dilation=self.dilation)
else:
h = ConvMeanPool(output_dim=self.output_dim)(h)
shortcut = ConvMeanPool(output_dim=self.output_dim, kernel_size=1)(x)
elif self.resample is None:
if self.dilation > 1:
if self.output_dim == x.shape[-1]:
shortcut = x
else:
shortcut = ncsn_conv3x3(x, self.output_dim, dilation=self.dilation)
h = ncsn_conv3x3(h, self.output_dim, dilation=self.dilation)
h = self.normalization()(h)
h = self.act(h)
h = ncsn_conv3x3(h, self.output_dim, dilation=self.dilation)
else:
if self.output_dim == x.shape[-1]:
shortcut = x
else:
shortcut = ncsn_conv1x1(x, self.output_dim)
h = ncsn_conv3x3(h, self.output_dim)
h = self.normalization()(h)
h = self.act(h)
h = ncsn_conv3x3(h, self.output_dim)
return h + shortcut
class ConditionalResidualBlock(nn.Module):
"""The noise-conditional residual block for building NCSNv1."""
output_dim: int
normalization: Any
resample: Optional[str] = None
act: Any = nn.elu
dilation: int = 1
@nn.compact
def __call__(self, x, y):
h = self.normalization()(x, y)
h = self.act(h)
if self.resample == "down":
h = ncsn_conv3x3(h, h.shape[-1], dilation=self.dilation)
h = self.normalization(h, y)
h = self.act(h)
if self.dilation > 1:
h = ncsn_conv3x3(h, self.output_dim, dilation=self.dilation)
shortcut = ncsn_conv3x3(x, self.output_dim, dilation=self.dilation)
else:
h = ConvMeanPool(output_dim=self.output_dim)(h)
shortcut = ConvMeanPool(output_dim=self.output_dim, kernel_size=1)(x)
elif self.resample is None:
if self.dilation > 1:
if self.output_dim == x.shape[-1]:
shortcut = x
else:
shortcut = ncsn_conv3x3(x, self.output_dim, dilation=self.dilation)
h = ncsn_conv3x3(h, self.output_dim, dilation=self.dilation)
h = self.normalization()(h, y)
h = self.act(h)
h = ncsn_conv3x3(h, self.output_dim, dilation=self.dilation)
else:
if self.output_dim == x.shape[-1]:
shortcut = x
else:
shortcut = ncsn_conv1x1(x, self.output_dim)
h = ncsn_conv3x3(h, self.output_dim)
h = self.normalization()(h, y)
h = self.act(h)
h = ncsn_conv3x3(h, self.output_dim)
return h + shortcut
###########################################################################
# Functions below are ported over from the DDPM codebase:
# https://github.com/hojonathanho/diffusion/blob/master/diffusion_tf/nn.py
###########################################################################
def get_timestep_embedding(timesteps, embedding_dim, max_positions=10000):
assert len(timesteps.shape) == 1 # and timesteps.dtype == tf.int32
half_dim = embedding_dim // 2
# magic number 10000 is from transformers
emb = math.log(max_positions) / (half_dim - 1)
# emb = math.log(2.) / (half_dim - 1)
emb = jnp.exp(jnp.arange(half_dim, dtype=jnp.float32) * -emb)
# emb = tf.range(num_embeddings, dtype=jnp.float32)[:, None] * emb[None, :]
# emb = tf.cast(timesteps, dtype=jnp.float32)[:, None] * emb[None, :]
emb = timesteps[:, None] * emb[None, :]
emb = jnp.concatenate([jnp.sin(emb), jnp.cos(emb)], axis=1)
if embedding_dim % 2 == 1: # zero pad
emb = jnp.pad(emb, [[0, 0], [0, 1]])
assert emb.shape == (timesteps.shape[0], embedding_dim)
return emb
class NIN(nn.Module):
num_units: int
init_scale: float = 0.1
@nn.compact
def __call__(self, x):
in_dim = int(x.shape[-1])
W = self.param(
"W", default_init(scale=self.init_scale), (in_dim, self.num_units)
)
b = self.param("b", jnn.initializers.zeros, (self.num_units,))
y = contract_inner(x, W) + b
assert y.shape == x.shape[:-1] + (self.num_units,)
return y
def _einsum(a, b, c, x, y):
einsum_str = "{},{}->{}".format("".join(a), "".join(b), "".join(c))
return jnp.einsum(einsum_str, x, y)
def contract_inner(x, y):
"""tensordot(x, y, 1)."""
x_chars = list(string.ascii_lowercase[: len(x.shape)])
y_chars = list(string.ascii_uppercase[: len(y.shape)])
assert len(x_chars) == len(x.shape) and len(y_chars) == len(y.shape)
y_chars[0] = x_chars[-1] # first axis of y and last of x get summed
out_chars = x_chars[:-1] + y_chars[1:]
return _einsum(x_chars, y_chars, out_chars, x, y)
class AttnBlock(nn.Module):
"""Channel-wise self-attention block."""
normalize: Any
@nn.compact
def __call__(self, x):
B, H, W, C = x.shape
h = self.normalize()(x)
q = NIN(C)(h)
k = NIN(C)(h)
v = NIN(C)(h)
w = jnp.einsum("bhwc,bHWc->bhwHW", q, k) * (int(C) ** (-0.5))
w = jnp.reshape(w, (B, H, W, H * W))
w = jax.nn.softmax(w, axis=-1)
w = jnp.reshape(w, (B, H, W, H, W))
h = jnp.einsum("bhwHW,bHWc->bhwc", w, v)
h = NIN(C, init_scale=0.0)(h)
return x + h
class Upsample(nn.Module):
with_conv: bool = False
@nn.compact
def __call__(self, x):
B, H, W, C = x.shape
h = jax.image.resize(x, (x.shape[0], H * 2, W * 2, C), "nearest")
if self.with_conv:
h = ddpm_conv3x3(h, C)
return h
class Downsample(nn.Module):
with_conv: bool = False
@nn.compact
def __call__(self, x):
B, H, W, C = x.shape
if self.with_conv:
x = ddpm_conv3x3(x, C, stride=2)
else:
x = nn.avg_pool(x, window_shape=(2, 2), strides=(2, 2), padding="SAME")
assert x.shape == (B, H // 2, W // 2, C)
return x
class ResnetBlockDDPM(nn.Module):
"""The ResNet Blocks used in DDPM."""
act: Any
normalize: Any
out_ch: Optional[int] = None
conv_shortcut: bool = False
dropout: float = 0.5
@nn.compact
def __call__(self, x, temb=None, train=True):
B, H, W, C = x.shape
out_ch = self.out_ch if self.out_ch else C
h = self.act(self.normalize()(x))
h = ddpm_conv3x3(h, out_ch)
# Add bias to each feature map conditioned on the time embedding
if temb is not None:
h += nn.Dense(out_ch, kernel_init=default_init())(self.act(temb))[
:, None, None, :
]
h = self.act(self.normalize()(h))
h = nn.Dropout(self.dropout)(h, deterministic=not train)
h = ddpm_conv3x3(h, out_ch, init_scale=0.0)
if C != out_ch:
if self.conv_shortcut:
x = ddpm_conv3x3(x, out_ch)
else:
x = NIN(out_ch)(x)
return x + h
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Normalization layers."""
import flax.linen as nn
import functools
import jax.nn.initializers as init
import jax.numpy as jnp
def get_normalization(config, conditional=False):
"""Obtain normalization modules from the config file."""
norm = config.model.normalization
if conditional:
if norm == "InstanceNorm++":
return functools.partial(
ConditionalInstanceNorm2dPlus, num_classes=config.model.num_classes
)
else:
raise NotImplementedError(f"{norm} not implemented yet.")
else:
if norm == "InstanceNorm":
return InstanceNorm2d
elif norm == "InstanceNorm++":
return InstanceNorm2dPlus
elif norm == "VarianceNorm":
return VarianceNorm2d
elif norm == "GroupNorm":
return nn.GroupNorm
else:
raise ValueError("Unknown normalization: %s" % norm)
class VarianceNorm2d(nn.Module):
"""Variance normalization for images."""
bias: bool = False
@staticmethod
def scale_init(key, shape, dtype=jnp.float32):
normal_init = init.normal(0.02)
return normal_init(key, shape, dtype=dtype) + 1.0
@nn.compact
def __call__(self, x):
variance = jnp.var(x, axis=(1, 2), keepdims=True)
h = x / jnp.sqrt(variance + 1e-5)
h = h * self.param("scale", VarianceNorm2d.scale_init, (1, 1, 1, x.shape[-1]))
if self.bias:
h = h + self.param("bias", init.zeros, (1, 1, 1, x.shape[-1]))
return h
class InstanceNorm2d(nn.Module):
"""Instance normalization for images."""
bias: bool = True
@nn.compact
def __call__(self, x):
mean = jnp.mean(x, axis=(1, 2), keepdims=True)
variance = jnp.var(x, axis=(1, 2), keepdims=True)
h = (x - mean) / jnp.sqrt(variance + 1e-5)
h = h * self.param("scale", init.ones, (1, 1, 1, x.shape[-1]))
if self.bias:
h = h + self.param("bias", init.zeros, (1, 1, 1, x.shape[-1]))
return h
class InstanceNorm2dPlus(nn.Module):
"""InstanceNorm++ as proposed in the original NCSN paper."""
bias: bool = True
@staticmethod
def scale_init(key, shape, dtype=jnp.float32):
normal_init = init.normal(0.02)
return normal_init(key, shape, dtype=dtype) + 1.0
@nn.compact
def __call__(self, x):
means = jnp.mean(x, axis=(1, 2))
m = jnp.mean(means, axis=-1, keepdims=True)
v = jnp.var(means, axis=-1, keepdims=True)
means_plus = (means - m) / jnp.sqrt(v + 1e-5)
h = (x - means[:, None, None, :]) / jnp.sqrt(
jnp.var(x, axis=(1, 2), keepdims=True) + 1e-5
)
h = h + means_plus[:, None, None, :] * self.param(
"alpha", InstanceNorm2dPlus.scale_init, (1, 1, 1, x.shape[-1])
)
h = h * self.param(
"gamma", InstanceNorm2dPlus.scale_init, (1, 1, 1, x.shape[-1])
)
if self.bias:
h = h + self.param("beta", init.zeros, (1, 1, 1, x.shape[-1]))
return h
class ConditionalInstanceNorm2dPlus(nn.Module):
"""Conditional InstanceNorm++ as in the original NCSN paper."""
num_classes: int = 10
bias: bool = True
@nn.compact
def __call__(self, x, y):
means = jnp.mean(x, axis=(1, 2))
m = jnp.mean(means, axis=-1, keepdims=True)
v = jnp.var(means, axis=-1, keepdims=True)
means_plus = (means - m) / jnp.sqrt(v + 1e-5)
h = (x - means[:, None, None, :]) / jnp.sqrt(
jnp.var(x, axis=(1, 2), keepdims=True) + 1e-5
)
normal_init = init.normal(0.02)
zero_init = init.zeros
if self.bias:
def init_embed(key, shape, dtype=jnp.float32):
feature_size = shape[1] // 3
normal = (
normal_init(key, (shape[0], 2 * feature_size), dtype=dtype) + 1.0
)
zero = zero_init(key, (shape[0], feature_size), dtype=dtype)
return jnp.concatenate([normal, zero], axis=-1)
embed = nn.Embed(
num_embeddings=self.num_classes,
features=x.shape[-1] * 3,
embedding_init=init_embed,
)
else:
def init_embed(key, shape, dtype=jnp.float32):
return normal_init(key, shape, dtype=dtype) + 1.0
embed = nn.Embed(
num_embeddings=self.num_classes,
features=x.shape[-1] * 2,
embedding_init=init_embed,
)
if self.bias:
gamma, alpha, beta = jnp.split(embed(y), 3, axis=-1)
h = h + means_plus[:, None, None, :] * alpha[:, None, None, :]
out = gamma[:, None, None, :] * h + beta[:, None, None, :]
else:
gamma, alpha = jnp.split(embed(y), 2, axis=-1)
h = h + means_plus[:, None, None, :] * alpha[:, None, None, :]
out = gamma[:, None, None, :] * h
return out
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
from . import utils, layers, layerspp, normalization
import flax.linen as nn
import functools
import jax.numpy as jnp
import numpy as np
import ml_collections
ResnetBlockDDPM = layerspp.ResnetBlockDDPMpp
ResnetBlockBigGAN = layerspp.ResnetBlockBigGANpp
Combine = layerspp.Combine
conv3x3 = layerspp.conv3x3
conv1x1 = layerspp.conv1x1
get_act = layers.get_act
get_normalization = normalization.get_normalization
default_initializer = layers.default_init
@utils.register_model(name="ncsnpp")
class NCSNpp(nn.Module):
"""NCSN++ model"""
config: ml_collections.ConfigDict
@nn.compact
def __call__(self, x, time_cond, train=True):
# config parsing
config = self.config
act = get_act(config)
nf = config.model.nf
ch_mult = config.model.ch_mult
num_res_blocks = config.model.num_res_blocks
attn_resolutions = config.model.attn_resolutions
dropout = config.model.dropout
resamp_with_conv = config.model.resamp_with_conv
num_resolutions = len(ch_mult)
conditional = config.model.conditional # noise-conditional
fir = config.model.fir
fir_kernel = config.model.fir_kernel
skip_rescale = config.model.skip_rescale
resblock_type = config.model.resblock_type.lower()
progressive = config.model.progressive.lower()
progressive_input = config.model.progressive_input.lower()
embedding_type = config.model.embedding_type.lower()
init_scale = config.model.init_scale
assert progressive in ["none", "output_skip", "residual"]
assert progressive_input in ["none", "input_skip", "residual"]
assert embedding_type in ["fourier", "positional"]
combine_method = config.model.progressive_combine.lower()
combiner = functools.partial(Combine, method=combine_method)
# timestep/noise_level embedding; only for continuous training
if embedding_type == "fourier":
# Gaussian Fourier features embeddings.
temb = layerspp.GaussianFourierProjection(
embedding_size=nf, scale=config.model.fourier_scale
)(time_cond)
elif embedding_type == "positional":
# Sinusoidal positional embeddings.
temb = layers.get_timestep_embedding(time_cond, nf)
else:
raise ValueError(f"embedding type {embedding_type} unknown.")
if conditional:
temb = nn.Dense(nf * 4, kernel_init=default_initializer())(temb)
temb = nn.Dense(nf * 4, kernel_init=default_initializer())(act(temb))
else:
temb = None
AttnBlock = functools.partial(
layerspp.AttnBlockpp, init_scale=init_scale, skip_rescale=skip_rescale
)
Upsample = functools.partial(
layerspp.Upsample,
with_conv=resamp_with_conv,
fir=fir,
fir_kernel=fir_kernel,
)
if progressive == "output_skip":
pyramid_upsample = functools.partial(
layerspp.Upsample, fir=fir, fir_kernel=fir_kernel, with_conv=False
)
elif progressive == "residual":
pyramid_upsample = functools.partial(
layerspp.Upsample, fir=fir, fir_kernel=fir_kernel, with_conv=True
)
Downsample = functools.partial(
layerspp.Downsample,
with_conv=resamp_with_conv,
fir=fir,
fir_kernel=fir_kernel,
)
if progressive_input == "input_skip":
pyramid_downsample = functools.partial(
layerspp.Downsample, fir=fir, fir_kernel=fir_kernel, with_conv=False
)
elif progressive_input == "residual":
pyramid_downsample = functools.partial(
layerspp.Downsample, fir=fir, fir_kernel=fir_kernel, with_conv=True
)
if resblock_type == "ddpm":
ResnetBlock = functools.partial(
ResnetBlockDDPM,
act=act,
dropout=dropout,
init_scale=init_scale,
skip_rescale=skip_rescale,
)
elif resblock_type == "biggan":
ResnetBlock = functools.partial(
ResnetBlockBigGAN,
act=act,
dropout=dropout,
fir=fir,
fir_kernel=fir_kernel,
init_scale=init_scale,
skip_rescale=skip_rescale,
)
else:
raise ValueError(f"resblock type {resblock_type} unrecognized.")
# Downsampling block
input_pyramid = None
if progressive_input != "none":
input_pyramid = x
hs = [conv3x3(x, nf)]
for i_level in range(num_resolutions):
# Residual blocks for this resolution
for i_block in range(num_res_blocks):
h = ResnetBlock(out_ch=nf * ch_mult[i_level])(hs[-1], temb, train)
if h.shape[1] in attn_resolutions:
h = AttnBlock()(h)
hs.append(h)
if i_level != num_resolutions - 1:
if resblock_type == "ddpm":
h = Downsample()(hs[-1])
else:
h = ResnetBlock(down=True)(hs[-1], temb, train)
if progressive_input == "input_skip":
input_pyramid = pyramid_downsample()(input_pyramid)
h = combiner()(input_pyramid, h)
elif progressive_input == "residual":
input_pyramid = pyramid_downsample(out_ch=h.shape[-1])(
input_pyramid
)
if skip_rescale:
input_pyramid = (input_pyramid + h) / np.sqrt(
2.0, dtype=np.float32
)
else:
input_pyramid = input_pyramid + h
h = input_pyramid
hs.append(h)
h = hs[-1]
h = ResnetBlock()(h, temb, train)
h = AttnBlock()(h)
h = ResnetBlock()(h, temb, train)
pyramid = None
# Upsampling block
for i_level in reversed(range(num_resolutions)):
for i_block in range(num_res_blocks + 1):
h = ResnetBlock(out_ch=nf * ch_mult[i_level])(
jnp.concatenate([h, hs.pop()], axis=-1), temb, train
)
if h.shape[1] in attn_resolutions:
h = AttnBlock()(h)
if progressive != "none":
if i_level == num_resolutions - 1:
if progressive == "output_skip":
pyramid = conv3x3(
act(nn.GroupNorm(num_groups=min(h.shape[-1] // 4, 32))(h)),
x.shape[-1],
bias=True,
init_scale=init_scale,
)
elif progressive == "residual":
pyramid = conv3x3(
act(nn.GroupNorm(num_groups=min(h.shape[-1] // 4, 32))(h)),
h.shape[-1],
bias=True,
)
else:
raise ValueError(f"{progressive} is not a valid name.")
else:
if progressive == "output_skip":
pyramid = pyramid_upsample()(pyramid)
pyramid = pyramid + conv3x3(
act(nn.GroupNorm(num_groups=min(h.shape[-1] // 4, 32))(h)),
x.shape[-1],
bias=True,
init_scale=init_scale,
)
elif progressive == "residual":
pyramid = pyramid_upsample(out_ch=h.shape[-1])(pyramid)
if skip_rescale:
pyramid = (pyramid + h) / np.sqrt(2.0, dtype=np.float32)
else:
pyramid = pyramid + h
h = pyramid
else:
raise ValueError(f"{progressive} is not a valid name")
if i_level != 0:
if resblock_type == "ddpm":
h = Upsample()(h)
else:
h = ResnetBlock(up=True)(h, temb, train)
assert not hs
if progressive == "output_skip" and not config.model.double_heads:
h = pyramid
else:
h = act(nn.GroupNorm(num_groups=min(h.shape[-1] // 4, 32))(h))
if config.model.double_heads:
h = conv3x3(h, x.shape[-1] * 2, init_scale=init_scale)
else:
h = conv3x3(h, x.shape[-1], init_scale=init_scale)
return h
@utils.register_model(name="joint_ncsnpp")
class JointNCSNpp(nn.Module):
"""NCSN++ model"""
config: ml_collections.ConfigDict
@nn.compact
def __call__(self, x, time_cond, train=True):
# config parsing
denoiser = NCSNpp(config=self.config)
distiller = NCSNpp(config=self.config)
return denoiser(x, time_cond, train), distiller(x, time_cond, train)
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
"""DDPM model.
This code is the FLAX equivalent of:
https://github.com/hojonathanho/diffusion/blob/master/diffusion_tf/models/unet.py
"""
import flax.linen as nn
import jax.numpy as jnp
import ml_collections
import functools
from . import utils, layers, normalization
RefineBlock = layers.RefineBlock
ResidualBlock = layers.ResidualBlock
ResnetBlockDDPM = layers.ResnetBlockDDPM
Upsample = layers.Upsample
Downsample = layers.Downsample
conv3x3 = layers.ddpm_conv3x3
get_act = layers.get_act
get_normalization = normalization.get_normalization
default_initializer = layers.default_init
@utils.register_model(name="ddpm")
class DDPM(nn.Module):
"""DDPM model architecture."""
config: ml_collections.ConfigDict
@nn.compact
def __call__(self, x, labels, train=True):
# config parsing
config = self.config
act = get_act(config)
normalize = get_normalization(config)
nf = config.model.nf
ch_mult = config.model.ch_mult
num_res_blocks = config.model.num_res_blocks
attn_resolutions = config.model.attn_resolutions
dropout = config.model.dropout
resamp_with_conv = config.model.resamp_with_conv
num_resolutions = len(ch_mult)
AttnBlock = functools.partial(layers.AttnBlock, normalize=normalize)
ResnetBlock = functools.partial(
ResnetBlockDDPM, act=act, normalize=normalize, dropout=dropout
)
if config.model.conditional:
# timestep/scale embedding
timesteps = labels
temb = layers.get_timestep_embedding(timesteps, nf)
temb = nn.Dense(nf * 4, kernel_init=default_initializer())(temb)
temb = nn.Dense(nf * 4, kernel_init=default_initializer())(act(temb))
else:
temb = None
if config.data.centered:
# Input is in [-1, 1]
h = x
else:
# Input is in [0, 1]
h = 2 * x - 1.0
# Downsampling block
hs = [conv3x3(h, nf)]
for i_level in range(num_resolutions):
# Residual blocks for this resolution
for i_block in range(num_res_blocks):
h = ResnetBlock(out_ch=nf * ch_mult[i_level])(hs[-1], temb, train)
if h.shape[1] in attn_resolutions:
h = AttnBlock()(h)
hs.append(h)
if i_level != num_resolutions - 1:
hs.append(Downsample(with_conv=resamp_with_conv)(hs[-1]))
h = hs[-1]
h = ResnetBlock()(h, temb, train)
h = AttnBlock()(h)
h = ResnetBlock()(h, temb, train)
# Upsampling block
for i_level in reversed(range(num_resolutions)):
for i_block in range(num_res_blocks + 1):
h = ResnetBlock(out_ch=nf * ch_mult[i_level])(
jnp.concatenate([h, hs.pop()], axis=-1), temb, train
)
if h.shape[1] in attn_resolutions:
h = AttnBlock()(h)
if i_level != 0:
h = Upsample(with_conv=resamp_with_conv)(h)
assert not hs
h = act(normalize()(h))
h = conv3x3(h, x.shape[-1], init_scale=0.0)
return h
|
# Code from https://github.com/pcuenca/lpips-j/blob/main/src/lpips_j/lpips.py
#
# Original copyright statement:
# Copyright 2021 The DALL·E mini Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import h5py
import flax.linen as nn
import jax.numpy as jnp
# from flaxmodels import VGG16
import flaxmodels.vgg as vgg
from huggingface_hub import hf_hub_download
class VGGExtractor(vgg.VGG):
"""
VGG16 configured as a feature extractor for LPIPS, with weights
downloaded from the huggingface hub.
Note: subclasses `VGG` from `flaxmodels`, even though it was
probably not meant to be subclassed (is not included in __all__).
"""
def __init__(self):
super().__init__(
output="activations",
pretrained="imagenet",
architecture="vgg16",
include_head=False,
)
def setup(self):
weights_file = hf_hub_download(
repo_id="pcuenq/lpips-jax", filename="vgg16_weights.h5"
)
self.param_dict = h5py.File(weights_file, "r")
class NetLinLayer(nn.Module):
weights: jnp.array
kernel_size = (1, 1)
def setup(self):
w = lambda *_: self.weights
self.layer = nn.Conv(
1, self.kernel_size, kernel_init=w, strides=None, padding=0, use_bias=False
)
def __call__(self, x):
x = self.layer(x)
return x
class LPIPS(nn.Module):
def setup(self):
# We don't add a scaling layer because `VGG16` already includes it
self.feature_names = ["relu1_2", "relu2_2", "relu3_3", "relu4_3", "relu5_3"]
self.vgg = VGGExtractor()
weights_file = hf_hub_download(
repo_id="pcuenq/lpips-jax", filename="lpips_lin.h5"
)
lin_weights = h5py.File(weights_file)
self.lins = [
NetLinLayer(jnp.array(lin_weights[f"lin{i}"]))
for i in range(len(self.feature_names))
]
def __call__(self, x, t):
x = self.vgg((x + 1) / 2)
t = self.vgg((t + 1) / 2)
feats_x, feats_t, diffs = {}, {}, {}
for i, f in enumerate(self.feature_names):
feats_x[i], feats_t[i] = normalize_tensor(x[f]), normalize_tensor(t[f])
diffs[i] = (feats_x[i] - feats_t[i]) ** 2
# We should maybe vectorize this better
res = [
spatial_average(self.lins[i](diffs[i]), keepdims=True)
for i in range(len(self.feature_names))
]
val = res[0]
for i in range(1, len(res)):
val += res[i]
return val
def normalize_tensor(x, eps=1e-10):
# Use `-1` because we are channel-last
norm_factor = jnp.sqrt(jnp.sum(x**2, axis=-1, keepdims=True))
return x / (norm_factor + eps)
def spatial_average(x, keepdims=True):
# Mean over W, H
return jnp.mean(x, axis=[1, 2], keepdims=keepdims)
|
from configs.default_cifar10_configs import get_default_configs
from configs.cifar10_k_ve import get_config as get_ref_config
import math
def get_config():
config = get_default_configs()
# training
training = config.training
training.sde = "kvesde"
training.loss = "consistency_adaptive"
training.ref_model_path = "/path/to/edm_cifar10_ema"
training.ref_config = get_ref_config()
training.n_iters = 800000
training.n_jitted_steps = 1
training.snapshot_freq_for_preemption = 5000
training.snapshot_freq = 10000
training.batch_size = 512
training.loss_norm = "lpips"
training.finetune = False
training.stopgrad = True
training.dsm_target = True
training.solver = "euler"
training.start_ema = 0.9
training.start_scales = 2
training.end_scales = 150
training.target_ema_mode = "adaptive"
training.scale_mode = "progressive"
training.weighting = "uniform"
# evaluation
evaluate = config.eval
evaluate.begin_ckpt = 1
evaluate.end_ckpt = 80
evaluate.enable_loss = True
# sampling
sampling = config.sampling
sampling.method = "onestep"
sampling.std = config.model.t_max
# data
data = config.data
data.dataset = "CIFAR10"
# model
model = config.model
model.name = "ncsnpp"
model.ema_rate = 0.9999
model.normalization = "GroupNorm"
model.nonlinearity = "swish"
model.nf = 128
model.ch_mult = (2, 2, 2)
model.num_res_blocks = 4
model.attn_resolutions = (16,)
model.resamp_with_conv = True
model.conditional = True
model.fir = True
model.fir_kernel = [1, 3, 3, 1]
model.skip_rescale = True
model.resblock_type = "biggan"
model.progressive = "none"
model.progressive_input = "residual"
model.progressive_combine = "sum"
model.attention_type = "ddpm"
model.init_scale = 0.0
model.fourier_scale = 16
model.conv_size = 3
model.rho = 7.0
model.data_std = 0.5
model.num_scales = 18
# model.dropout = 0.13
model.dropout = 0.0
# optimization
optim = config.optim
optim.weight_decay = 0.0
optim.optimizer = "radam"
optim.lr = 2e-4
optim.beta1 = 0.9
optim.eps = 1e-8
optim.warmup = int(1e7 / training.batch_size) # warmup for 10M images
optim.grad_clip = float("inf") # no gradient clipping
return config
|
from configs.default_cifar10_configs import get_default_configs
import math
def get_config():
config = get_default_configs()
# training
training = config.training
training.sde = "kvesde"
training.loss = "dsm"
training.batch_size = 512
training.n_iters = 400001
training.n_jitted_steps = 2
training.snapshot_freq = 10000
training.snapshot_freq_for_preemption = 5000
training.log_freq = 50
training.eval_freq = 100
# sampling
sampling = config.sampling
sampling.method = "heun"
sampling.denoise = True
# evaluation
evaluate = config.eval
evaluate.begin_ckpt = 1
evaluate.end_ckpt = 40
# model
model = config.model
model.name = "ncsnpp"
model.ema_rate = math.exp(
math.log(0.5) / (0.5e6 / training.batch_size)
) # half life of 0.5M images
model.normalization = "GroupNorm"
model.nonlinearity = "swish"
model.nf = 128
model.ch_mult = (2, 2, 2)
model.num_res_blocks = 4
model.attn_resolutions = (16,)
model.resamp_with_conv = True
model.conditional = True
model.fir = True
model.fir_kernel = [1, 3, 3, 1]
model.skip_rescale = True
model.resblock_type = "biggan"
model.progressive = "none"
model.progressive_input = "residual"
model.progressive_combine = "sum"
model.attention_type = "ddpm"
model.embedding_type = "fourier"
model.init_scale = 0.0
model.fourier_scale = 16
model.conv_size = 3
model.rho = 7.0
model.data_std = 0.5
model.num_scales = 18
model.dropout = 0.13
# optimization
optim = config.optim
optim.weight_decay = 0
optim.optimizer = "Adam"
optim.lr = 1e-3
optim.beta1 = 0.9
optim.eps = 1e-8
optim.warmup = int(1e7 / training.batch_size) # warmup for 10M images
optim.grad_clip = float("inf") # no gradient clipping
return config
|
from configs.default_cifar10_configs import get_default_configs
from configs.cifar10_k_ve import get_config as get_ref_config
import math
def get_config():
config = get_default_configs()
# training
training = config.training
training.sde = "kvesde"
training.loss = "consistency_ema"
training.ref_model_path = "/path/to/edm_cifar10_ema"
training.ref_config = get_ref_config()
training.n_iters = 800001
training.n_jitted_steps = 1
training.snapshot_freq_for_preemption = 5000
training.snapshot_freq = 10000
training.batch_size = 512
training.loss_norm = "lpips"
training.finetune = False
training.stopgrad = True
training.dsm_target = True
training.solver = "euler"
training.target_ema = 0.99
training.weighting = "uniform"
training.target_ema_mode = "fixed"
training.scale_mode = "fixed"
# evaluation
evaluate = config.eval
evaluate.begin_ckpt = 1
evaluate.end_ckpt = 80
evaluate.enable_loss = True
# sampling
sampling = config.sampling
sampling.method = "onestep"
sampling.std = config.model.t_max
# data
data = config.data
data.dataset = "CIFAR10"
# model
model = config.model
model.name = "ncsnpp"
model.ema_rate = 0.9999
model.normalization = "GroupNorm"
model.nonlinearity = "swish"
model.nf = 128
model.ch_mult = (2, 2, 2)
model.num_res_blocks = 4
model.attn_resolutions = (16,)
model.resamp_with_conv = True
model.conditional = True
model.fir = True
model.fir_kernel = [1, 3, 3, 1]
model.skip_rescale = True
model.resblock_type = "biggan"
model.progressive = "none"
model.progressive_input = "residual"
model.progressive_combine = "sum"
model.attention_type = "ddpm"
model.init_scale = 0.0
model.fourier_scale = 16
model.conv_size = 3
model.rho = 7.0
model.data_std = 0.5
model.num_scales = 18
model.dropout = 0.0
# optimization
optim = config.optim
optim.weight_decay = 0.0
optim.optimizer = "radam"
optim.lr = 2e-4
optim.beta1 = 0.9
optim.eps = 1e-8
optim.warmup = int(1e7 / training.batch_size) # warmup for 10M images
optim.grad_clip = float("inf") # no gradient clipping
return config
|
from configs.default_cifar10_configs import get_default_configs
from configs.cifar10_k_ve import get_config as get_ref_config
import math
def get_config():
config = get_default_configs()
# training
training = config.training
training.sde = "kvesde"
training.loss = "continuous"
training.ref_model_path = "/path/to/edm_cifar10_ema"
training.ref_config = get_ref_config()
training.n_iters = 800001
training.n_jitted_steps = 1
training.snapshot_freq_for_preemption = 5000
training.snapshot_freq = 10000
training.batch_size = 512
training.loss_norm = "l2"
training.finetune = True
training.stopgrad = False
training.dsm_target = False
# evaluation
evaluate = config.eval
evaluate.begin_ckpt = 1
evaluate.end_ckpt = 40
evaluate.enable_loss = True
# sampling
sampling = config.sampling
sampling.method = "onestep"
sampling.std = config.model.t_max
# data
data = config.data
data.dataset = "CIFAR10"
# model
model = config.model
model.name = "ncsnpp"
model.ema_rate = 0.9999
model.normalization = "GroupNorm"
model.nonlinearity = "swish"
model.nf = 128
model.ch_mult = (2, 2, 2)
model.num_res_blocks = 4
model.attn_resolutions = (16,)
model.resamp_with_conv = True
model.conditional = True
model.fir = True
model.fir_kernel = [1, 3, 3, 1]
model.skip_rescale = True
model.resblock_type = "biggan"
model.progressive = "none"
model.progressive_input = "residual"
model.progressive_combine = "sum"
model.attention_type = "ddpm"
model.init_scale = 0.0
model.fourier_scale = 16
model.conv_size = 3
model.rho = 7.0
model.data_std = 0.5
model.num_scales = 18
model.dropout = 0.3
# optimization
optim = config.optim
optim.weight_decay = 0.0
optim.optimizer = "radam"
optim.lr = 1e-3
optim.beta1 = 0.9
optim.eps = 1e-8
optim.warmup = int(1e7 / training.batch_size) # warmup for 10M images
optim.grad_clip = float("inf") # no gradient clipping
return config
|
from configs.default_cifar10_configs import get_default_configs
from configs.cifar10_k_ve import get_config as get_ref_config
import math
def get_config():
config = get_default_configs()
# training
training = config.training
training.sde = "kvesde"
training.loss = "progressive_distillation"
training.ref_model_path = "/path/to/edm_cifar10_ema"
training.ref_config = get_ref_config()
training.n_iters = 800001
training.n_jitted_steps = 10
training.snapshot_freq_for_preemption = 5000
training.snapshot_freq = 10000
training.batch_size = 512
training.loss_norm = "l2"
training.finetune = True
training.target_ema_mode = "fixed"
training.scale_mode = "progdist"
training.start_scales = 4096
training.distill_steps_per_iter = 50000
training.weighting = "truncated_snr"
# evaluation
evaluate = config.eval
evaluate.begin_ckpt = 1
evaluate.end_ckpt = 80
evaluate.enable_loss = False
# sampling
sampling = config.sampling
sampling.method = "progressive_distillation"
sampling.denoise = False
# data
data = config.data
data.dataset = "CIFAR10"
# model
model = config.model
model.name = "ncsnpp"
model.ema_rate = 0.0
model.normalization = "GroupNorm"
model.nonlinearity = "swish"
model.nf = 128
model.ch_mult = (2, 2, 2)
model.num_res_blocks = 4
model.attn_resolutions = (16,)
model.resamp_with_conv = True
model.conditional = True
model.fir = True
model.fir_kernel = [1, 3, 3, 1]
model.skip_rescale = True
model.resblock_type = "biggan"
model.progressive = "none"
model.progressive_input = "residual"
model.progressive_combine = "sum"
model.attention_type = "ddpm"
model.init_scale = 0.0
model.fourier_scale = 16
model.conv_size = 3
model.rho = 7.0
model.data_std = 0.5
model.num_scales = 18
model.dropout = 0.0
# optimization
optim = config.optim
optim.weight_decay = 0.0
optim.optimizer = "adam"
optim.lr = 5e-5
optim.schedule = "linear"
optim.linear_decay_steps = training.distill_steps_per_iter
optim.beta1 = 0.9
optim.eps = 1e-8
optim.warmup = 0
optim.grad_clip = 1.0
return config
|
from configs.default_cifar10_configs import get_default_configs
import math
def get_config():
config = get_default_configs()
# training
training = config.training
training.sde = "kvesde"
training.loss = "dsm"
training.batch_size = 512
training.n_iters = 400001
training.n_jitted_steps = 2
training.snapshot_freq = 10000
training.snapshot_freq_for_preemption = 5000
training.log_freq = 50
training.eval_freq = 100
# sampling
sampling = config.sampling
sampling.method = "heun"
sampling.denoise = True
# evaluation
evaluate = config.eval
evaluate.begin_ckpt = 1
evaluate.end_ckpt = 40
# model
model = config.model
model.name = "ncsnpp"
model.normalization = "GroupNorm"
model.nonlinearity = "swish"
model.nf = 128
model.ch_mult = (2, 2, 2)
model.num_res_blocks = 4
model.attn_resolutions = (16,)
model.resamp_with_conv = True
model.conditional = True
model.fir = False
model.fir_kernel = [1, 3, 3, 1]
model.skip_rescale = True
model.resblock_type = "biggan"
model.progressive = "none"
model.progressive_input = "none"
model.progressive_combine = "sum"
model.attention_type = "ddpm"
model.init_scale = 0.0
model.embedding_type = "positional"
model.fourier_scale = 16
model.conv_size = 3
model.ema_rate = math.exp(
math.log(0.5) / (0.5e6 / training.batch_size)
) # half life of 0.5M images
model.rho = 7.0
model.data_std = 0.5
model.num_scales = 18
model.dropout = 0.13
# optimization
optim = config.optim
optim.weight_decay = 0
optim.optimizer = "Adam"
optim.lr = 1e-3
optim.beta1 = 0.9
optim.eps = 1e-8
optim.warmup = int(1e7 / training.batch_size) # warmup for 10M images
optim.grad_clip = float("inf") # no gradient clipping
return config
|
from configs.default_cifar10_configs import get_default_configs
from configs.cifar10_k_ve import get_config as get_ref_config
import math
def get_config():
config = get_default_configs()
# training
training = config.training
training.sde = "kvesde"
training.loss = "consistency"
training.ref_model_path = "/path/to/edm_cifar10_ema"
training.ref_config = get_ref_config()
training.n_iters = 800001
training.n_jitted_steps = 1
training.snapshot_freq_for_preemption = 5000
training.snapshot_freq = 10000
training.batch_size = 512
training.loss_norm = "lpips"
training.finetune = True
training.stopgrad = True
training.dsm_target = False
training.solver = "heun"
training.weighting = "uniform"
# evaluation
evaluate = config.eval
evaluate.begin_ckpt = 1
evaluate.end_ckpt = 80
evaluate.enable_loss = True
# sampling
sampling = config.sampling
sampling.method = "onestep"
sampling.std = config.model.t_max
sampling.n_steps = 18
# data
data = config.data
data.dataset = "CIFAR10"
# model
model = config.model
model.name = "ncsnpp"
model.ema_rate = 0.9999
model.normalization = "GroupNorm"
model.nonlinearity = "swish"
model.nf = 128
model.ch_mult = (2, 2, 2)
model.num_res_blocks = 4
model.attn_resolutions = (16,)
model.resamp_with_conv = True
model.conditional = True
model.fir = True
model.fir_kernel = [1, 3, 3, 1]
model.skip_rescale = True
model.resblock_type = "biggan"
model.progressive = "none"
model.progressive_input = "residual"
model.progressive_combine = "sum"
model.attention_type = "ddpm"
model.init_scale = 0.0
model.fourier_scale = 16
model.conv_size = 3
model.rho = 7.0
model.data_std = 0.5
model.num_scales = 18
model.dropout = 0.0
# optimization
optim = config.optim
optim.weight_decay = 0.0
optim.optimizer = "radam"
optim.lr = 4e-4
optim.beta1 = 0.9
optim.eps = 1e-8
optim.warmup = int(1e7 / training.batch_size) # warmup for 10M images
optim.grad_clip = float("inf") # no gradient clipping
return config
|
import ml_collections
def get_default_configs():
config = ml_collections.ConfigDict()
# training
config.training = training = ml_collections.ConfigDict()
training.batch_size = 128
training.n_iters = 1300001
training.snapshot_freq = 50000
training.log_freq = 50
training.eval_freq = 100
## store additional checkpoints for preemption in cloud computing environments
training.snapshot_freq_for_preemption = 10000
## produce samples at each snapshot.
training.snapshot_sampling = True
training.likelihood_weighting = False
training.n_jitted_steps = 5 # TODO: important flag!
# sampling
config.sampling = sampling = ml_collections.ConfigDict()
sampling.n_steps_each = 1
sampling.noise_removal = True
sampling.probability_flow = False
sampling.snr = 0.16
# evaluation
config.eval = evaluate = ml_collections.ConfigDict()
evaluate.begin_ckpt = 9
evaluate.end_ckpt = 26
evaluate.batch_size = 512
evaluate.enable_sampling = True
evaluate.num_samples = 50000
evaluate.enable_loss = True
evaluate.enable_bpd = False
evaluate.bpd_dataset = "test"
# data
config.data = data = ml_collections.ConfigDict()
data.dataset = "CIFAR10"
data.image_size = 32
data.random_flip = True
data.uniform_dequantization = False
data.num_channels = 3
# model
config.model = model = ml_collections.ConfigDict()
model.sigma_min = 0.02
model.sigma_max = 100
model.num_scales = 1000
model.beta_min = 0.1
model.beta_max = 20.0
model.t_min = 0.002
model.t_max = 80.0
model.dropout = 0.1
model.embedding_type = "fourier"
model.double_heads = False
# optimization
config.optim = optim = ml_collections.ConfigDict()
optim.weight_decay = 0.0
optim.optimizer = "Adam"
optim.lr = 2e-4
optim.beta1 = 0.9
optim.beta2 = 0.999
optim.eps = 1e-8
optim.warmup = 5000
optim.grad_clip = 1.0
optim.clip_sigmas = 5.0
config.seed = 42
return config
|
from tqdm import tqdm
from model import CLIPImage, CLIPText
import tensorflow as tf
import os
import numpy as np
from lucid.optvis import objectives, param
import lucid.optvis.render as render
from lucid.optvis.objectives import wrap_objective, diversity
import lucid.optvis.transform as transform
from lucid.misc.io import load, save
@wrap_objective()
def l2(batch=None):
def inner(T):
return -tf.reduce_mean((T("input") - 0.5)**2)
return inner
@wrap_objective()
def vector(layer, d, batch=None):
def inner(T):
channel_obj = tf.reduce_mean( tf.einsum( "ijkl,j->ikl", tf.nn.relu(T(layer)), tf.constant(d) ), [1,2])
channel_obj_weighted = tf.reduce_mean(channel_obj)**(1/1)
return channel_obj_weighted
return inner
@wrap_objective()
def attr(obj, style_attrs, layers, strength):
def inner(T):
style = tf.constant(style_attrs)
obj_t = obj(T)
layer_t = T(layers[0])
w = tf.linspace(strength[0], strength[1], tf.shape(layer_t)[0])
batch_n, _, _, _ = layer_t.get_shape().as_list()
style = tf.transpose(style, (0,2,3,1))
style = tf.image.resize(style, (tf.shape(layer_t)[2],tf.shape(layer_t)[3]))
style = tf.transpose(style, (0,3,1,2))
flat_attrs = []
grads = tf.gradients(obj_t, [T(layer) for layer in layers])
for layer, grad_t in zip(layers, grads):
layer_t = T(layer)
attr_t = layer_t * tf.nn.relu(tf.stop_gradient(grad_t))
if len(style_attrs.shape) == 2:
flat_attr_t = tf.reduce_sum(attr_t, axis=(2,3))
elif len(style_attrs.shape) == 4:
flat_attr_t = attr_t
flat_attrs.append(flat_attr_t)
flat_attr_t = tf.concat(flat_attrs, -1)
return tf.reduce_sum(w[:,None,None,None]*flat_attr_t*style)
return inner
def render_facet(model, neuron_obj, layers, style_attrs, strength = (0.1, 0.3), l2_weight = 10.0, resolution = 128, alpha = False):
def mean_alpha():
def inner(T):
input_t = T("input")
return tf.sqrt(tf.reduce_mean(input_t[..., 3:] ** 2))
return objectives.Objective(inner)
standard_transforms = [
transform.pad(2, mode='constant', constant_value=.5),
transform.jitter(4),
transform.jitter(4),
transform.jitter(4),
transform.jitter(4),
transform.jitter(4),
transform.jitter(4),
transform.jitter(4),
transform.jitter(4),
transform.jitter(4),
transform.jitter(4),
transform.random_scale([0.995**n for n in range(-5,80)] + [0.998**n for n in 2*list(range(20,40))]),
transform.random_rotate(list(range(-20,20))+list(range(-10,10))+list(range(-5,5))+5*[0]),
transform.jitter(2),
transform.crop_or_pad_to(resolution, resolution)
]
if alpha:
standard_transforms.append(transform.collapse_alpha_random())
param_f = lambda: param.image(resolution, batch=9, alpha=True)
else:
param_f = lambda: param.image(resolution, batch=9)
optimizer = tf.train.AdamOptimizer(0.02)
ultimate_layer = [n.name for n in model.graph_def.node if "image_block_4" in n.name][-1]
obj = vector(ultimate_layer, neuron_obj)
facetsp = [(5/len(layers))*attr(obj, style, [layer], strength) for style, layer in list(zip(style_attrs, layers))]
for facetp in facetsp:
obj = obj + facetp
obj = obj + l2_weight*l2()
if alpha:
obj -= mean_alpha()
obj -= 1e2 * objectives.blur_alpha_each_step()
data = render.render_vis(model, obj, param_f, transforms=standard_transforms, optimizer=optimizer, thresholds=(1024*4,))
return data
def one_hot(ind):
z = np.zeros(2560)
z[ind] = 1
return z.astype(np.float32)
facets = ["face", "text", "logo", "pose", "arch", "nature", "indoor"]
model = CLIPImage()
d = one_hot(100)
for facet in facets:
layernames = [n.name for n in model.graph_def.node if ("image_block_3" in n.name) and ("Relu_2" in n.name)][::2]
def loadnpy(url):
import blobfile
from io import BytesIO
fp = blobfile.BlobFile(url, "rb")
x = np.load(BytesIO(fp.read()))
fp.close()
return x
style_attrs = [loadnpy(f"https://openaipublic.blob.core.windows.net/clip/facets/{model.name}/{layername}/{facet}_spatial.npy") for layername in layernames]
for l2_weight in [10]:
img = render_facet(model,
d,
layernames,
style_attrs,
l2_weight = l2_weight,
strength = (0.1, 5.0),
alpha = False,
resolution = 256)
save(img[0][-1], f"/root/{facet}.png")
|
from tokenizer import SimpleTokenizer
from model import CLIPImage, CLIPText
import tensorflow as tf
from lucid.misc.io import load
import numpy as np
def imresize(img, size, scale=255):
from PIL import Image
im = Image.fromarray((img*scale).astype(np.uint8) )
return np.array(im.resize(size, Image.BICUBIC)).astype(np.float32)/scale
tokenizer = SimpleTokenizer()
tf.reset_default_graph()
inp_text, T_text = CLIPText().load()
inp_img, T_img = CLIPImage().load()
sess = tf.Session()
captions = ["This is a dog", "This is a cat", "This is a dog and a cat"]
tokens = []
for caption in captions:
tokens.append(tokenizer.tokenize(caption)[0])
img = imresize(load("https://openaipublic.blob.core.windows.net/clarity/dog_cat.jpeg"), [288,288])
text_embd = sess.run(T_text("text_post/l2_normalize"), {inp_text: tokens})
img_embd = sess.run(T_img("l2_normalize"), {inp_img: [img]})
scores = (text_embd @ img_embd.T)[:,0]
for score, caption in zip(scores, captions):
print(caption, score) |
from lucid.modelzoo.vision_base import Model
from lucid.optvis import render
import tensorflow as tf
from lucid.misc.io import load, save
class CLIPImage(Model):
image_value_range = (0, 255)
input_name = 'input_image'
def __init__(self):
self.model_name = "RN50_4x"
self.image_shape = [288, 288, 3]
self.model_path = "https://openaipublic.blob.core.windows.net/clip/tf/RN50_4x/084ee9c176da32014b0ebe42cd7ca66e/image32.pb"
def load(self, inp = None):
import tensorflow as tf
if inp == None:
self.inp = tf.placeholder(shape = (None,self.image_shape[0], self.image_shape[1], 3), dtype = tf.float32)
else:
self.inp = inp
self.T = render.import_model(self, self.inp, self.inp)
return self.inp, self.T
class CLIPText(Model):
input_name = 'tokens'
def __init__(self):
self.model_name = f"RN50_4x_text"
self.model_path = "https://openaipublic.blob.core.windows.net/clip/tf/RN50_4x/da21bc82c7bba068aa8163333438354c/text32.pb"
def load(self, O = None):
import tensorflow as tf
if O == None:
self.O = tf.placeholder(tf.int32, [None, None])
else:
self.O = O
tf.import_graph_def(self.graph_def, {self.input_name: self.O}, name = "text")
gph = tf.get_default_graph()
self.T = lambda x: gph.get_tensor_by_name("text/" + x + ":0")
return self.O, self.T
|
# By Alec Radford
import html
import ftfy
import json
import regex as re
from functools import lru_cache
import tensorflow as tf
import blobfile
def pad(x, pad_length = 76):
z = np.zeros((pad_length))
z[0:len(x)] = x
return z
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path = None):
if bpe_path == None:
bpe_path = blobfile.BlobFile('https://openaipublic.blob.core.windows.net/clip/bpe_simple_vocab_16e6.txt', 'r')
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v:k for k, v in self.byte_encoder.items()}
merges = bpe_path.read().split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v:k for k,v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>':'<|startoftext|>', '<|endoftext|>':'<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
def tokenize(self, text, n_text = 76, pad = True):
sot = self.encoder['<|startoftext|>']
eot = self.encoder['<|endoftext|>']
tokens = self.encode(text)
tokens = [sot]+tokens[:n_text-1]+[eot]
if pad:
return [tokens + [0]*(n_text+1-len(tokens))]
else:
return tokens
def sot(self):
return self.encoder['<|startoftext|>']
def eot(self):
return self.encoder['<|endoftext|>']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.