PrakhAI's picture
Update app.py
b79c2f9
raw
history blame
2.89 kB
import streamlit as st
from PIL import Image
import jax
import numpy as np
import jax.numpy as jnp # JAX NumPy
from flax.training import train_state # Useful dataclass to keep train state
from flax import linen as nn # Linen API
from huggingface_hub import HfFileSystem
from flax.serialization import msgpack_restore, from_state_dict
import os
hf_key = text_input = st.text_input("Access token")
class CNN(nn.Module):
"""A simple CNN model."""
@nn.compact
def __call__(self, x):
x = nn.Conv(features=32, kernel_size=(3, 3))(x)
x = nn.relu(x)
x = nn.avg_pool(x, window_shape=(2, 2), strides=(2, 2))
x = nn.Conv(features=64, kernel_size=(3, 3))(x)
x = nn.relu(x)
x = nn.avg_pool(x, window_shape=(2, 2), strides=(2, 2))
x = x.reshape((x.shape[0], -1)) # flatten
x = nn.Dense(features=256)(x)
x = nn.relu(x)
x = nn.Dense(features=10)(x)
return x
cnn = CNN()
params = cnn.init(jax.random.PRNGKey(0), jnp.ones([1, 28, 28, 1]))['params']
fs = HfFileSystem(token=hf_key)
with fs.open("PrakhAI/HelloWorld/checkpoint.msgpack", "rb") as f:
params = from_state_dict(params, msgpack_restore(f.read())["params"])
# print(type(state))
# print(state)
# print([(k, type(v)) for (k, v) in state.items()])
# print(state['params'])
# print(state['opt_state'])
# logits = state.apply_fn({'params': state.params}, batch['image'])
# print(logits)
# x = st.slider('Select a value')
# st.write(x, 'squared is', x * x)
# print(dir(cnn))
uploaded_file = st.file_uploader("Input Images", type=['jpg','png','tif'], accept_multiple_files=False)
if uploaded_file is None:
st.write("Please upload an image!")
else:
img = Image.open(uploaded_file)
rescaled = img.convert("HSV").split()[2].resize((28, 28))
st.image(rescaled)
brightness = jnp.array(rescaled)
input = brightness.reshape(1, 28, 28, 1) / 255.
st.write(cnn.apply({"params": params}, input).argmax(axis=1)[0])
def gridify(kernel, grid, kernel_size, scaling=5, padding=1):
grid = np.pad(np.array(np.pad(np.repeat(np.repeat(kernel, repeats=scaling, axis=0), repeats=scaling, axis=1), ((padding,),(padding,),(0,),(0,)), 'constant', constant_values=(-1,)).reshape((kernel_size[0]*scaling+2*padding, kernel_size[1]*scaling+2*padding, grid[0], grid[1])).transpose(2,0,3,1).reshape(grid[0]*(kernel_size[0]*scaling+2*padding), grid[1]*(kernel_size[1]*scaling+2*padding))+1)*127., (padding,), 'constant', constant_values=(0,))
st.image(Image.fromarray(np.repeat(np.expand_dims(grid, axis=0), repeats=3, axis=0).astype(np.uint8).transpose(1,2,0), mode="RGB"))
with st.expander("See first convolutional layer"):
gridify(params["Conv_0"]["kernel"], grid=(4,8), kernel_size=(3,3))
with st.expander("See second convolutional layer"):
print(params["Conv_1"]["kernel"].shape)
gridify(params["Conv_1"]["kernel"], grid=(32,64), kernel_size=(3,3))