Luis Oala
commited on
Commit
·
c5473b3
1
Parent(s):
10fad57
demo files for static processing pipeline
Browse files- __pycache__/dataset.cpython-37.pyc +0 -0
- demo-files/car.png +0 -0
- demo-files/demo-requirements.txt +18 -0
- demo-files/micro.png +0 -0
- demo.py +54 -0
- processing/__pycache__/pipeline_numpy.cpython-37.pyc +0 -0
- utils/__pycache__/base.cpython-37.pyc +0 -0
- utils/__pycache__/dataset_utils.cpython-37.pyc +0 -0
- utils/base.py +2 -2
__pycache__/dataset.cpython-37.pyc
ADDED
Binary file (19.9 kB). View file
|
|
demo-files/car.png
ADDED
![]() |
demo-files/demo-requirements.txt
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
scikit_image
|
2 |
+
pandas
|
3 |
+
scipy
|
4 |
+
numpy
|
5 |
+
matplotlib
|
6 |
+
b2sdk
|
7 |
+
colour_demosaicing
|
8 |
+
gradio
|
9 |
+
ipython
|
10 |
+
mlflow
|
11 |
+
Pillow
|
12 |
+
pytorch_toolbelt
|
13 |
+
rawpy
|
14 |
+
scikit_learn
|
15 |
+
segmentation_models_pytorch
|
16 |
+
tifffile
|
17 |
+
torch==1.9.0
|
18 |
+
torchvision==0.10.0
|
demo-files/micro.png
ADDED
![]() |
demo.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
#import tensorflow as tf
|
3 |
+
import numpy as np
|
4 |
+
import json
|
5 |
+
from os.path import dirname, realpath, join
|
6 |
+
import processing.pipeline_numpy as ppn
|
7 |
+
|
8 |
+
|
9 |
+
# Load human-readable labels for ImageNet.
|
10 |
+
current_dir = dirname(realpath(__file__))
|
11 |
+
|
12 |
+
|
13 |
+
def process(RawImage, CameraParameters, Debayer, Sharpening, Denoising):
|
14 |
+
raw_img = RawImage
|
15 |
+
if CameraParameters == "Microscope":
|
16 |
+
black_level = [9.834368023181512e-06, 9.834368023181512e-06, 9.834368023181512e-06, 9.834368023181512e-06]
|
17 |
+
white_balance = [-0.6567, 1.9673, 3.5304]
|
18 |
+
colour_matrix = [-2.0338, 0.0933, 0.4157, -0.0286, 2.6464, -0.0574, -0.5516, -0.0947, 2.9308]
|
19 |
+
elif CameraParameters == "Drone":
|
20 |
+
#drone
|
21 |
+
black_level = [0.0625, 0.0626, 0.0625, 0.0626]
|
22 |
+
white_balance = [2.86653646, 1., 1.73079425]
|
23 |
+
colour_matrix = [1.50768983, -0.33571374, -0.17197604, -0.23048614,
|
24 |
+
1.70698738, -0.47650126, -0.03119153, -0.32803956, 1.35923111]
|
25 |
+
else:
|
26 |
+
print("No valid camera parameter")
|
27 |
+
debayer = Debayer
|
28 |
+
sharpening = Sharpening
|
29 |
+
denoising = Denoising
|
30 |
+
print(np.max(raw_img))
|
31 |
+
raw_img = (raw_img[:,:,0].astype(np.float64)/255.)
|
32 |
+
img = ppn.processing(raw_img, black_level, white_balance, colour_matrix,
|
33 |
+
debayer=debayer, sharpening=sharpening, denoising=denoising)
|
34 |
+
print(np.max(img))
|
35 |
+
return img
|
36 |
+
|
37 |
+
|
38 |
+
iface = gr.Interface(
|
39 |
+
process,
|
40 |
+
[gr.inputs.Image(),gr.inputs.Radio(["Microscope", "Drone"]),gr.inputs.Dropdown(["bilinear", "malvar2004", "menon2007"]),
|
41 |
+
gr.inputs.Dropdown(["sharpening_filter", "unsharp_masking"]),
|
42 |
+
gr.inputs.Dropdown(["gaussian_denoising", "median_denoising"])],
|
43 |
+
"image",
|
44 |
+
capture_session=True,
|
45 |
+
examples=[
|
46 |
+
["demo-files/car.png"],
|
47 |
+
["demo-files/micro.png"]
|
48 |
+
],
|
49 |
+
title="Lens2Logit - Static processing demo",
|
50 |
+
description="You can select a sample raw image, the camera parameters and the pipeline configuration to process the raw image.",)
|
51 |
+
|
52 |
+
if __name__ == "__main__":
|
53 |
+
iface.launch(share=True)
|
54 |
+
|
processing/__pycache__/pipeline_numpy.cpython-37.pyc
ADDED
Binary file (9.75 kB). View file
|
|
utils/__pycache__/base.cpython-37.pyc
ADDED
Binary file (9.97 kB). View file
|
|
utils/__pycache__/dataset_utils.cpython-37.pyc
ADDED
Binary file (6.76 kB). View file
|
|
utils/base.py
CHANGED
@@ -200,8 +200,8 @@ def get_mlflow_model_by_name(experiment_name, run_name,
|
|
200 |
|
201 |
# 0. mlflow basics
|
202 |
mlflow.set_tracking_uri(tracking_uri)
|
203 |
-
os.environ["AWS_ACCESS_KEY_ID"] = #TODO: add your AWS access key if you want to write your results to our collaborative lab server
|
204 |
-
os.environ["AWS_SECRET_ACCESS_KEY"] = #TODO: add your AWS seceret access key if you want to write your results to our collaborative lab server
|
205 |
|
206 |
# # 1. use get_experiment_by_name to get experiment objec
|
207 |
experiment = mlflow.get_experiment_by_name(experiment_name)
|
|
|
200 |
|
201 |
# 0. mlflow basics
|
202 |
mlflow.set_tracking_uri(tracking_uri)
|
203 |
+
os.environ["AWS_ACCESS_KEY_ID"] = "#TODO: add your AWS access key if you want to write your results to our collaborative lab server"
|
204 |
+
os.environ["AWS_SECRET_ACCESS_KEY"] = "#TODO: add your AWS seceret access key if you want to write your results to our collaborative lab server"
|
205 |
|
206 |
# # 1. use get_experiment_by_name to get experiment objec
|
207 |
experiment = mlflow.get_experiment_by_name(experiment_name)
|