andreped commited on
Commit
856ed7f
·
unverified ·
2 Parent(s): 808d93e 188c089

Merge pull request #14 from andreped/dev

Browse files

Added argparse support + linting CI + refactored

Files changed (9) hide show
  1. .github/workflows/linting.yml +28 -0
  2. README.md +25 -1
  3. app.py +29 -4
  4. neukit/gui.py +76 -38
  5. neukit/inference.py +51 -28
  6. neukit/utils.py +9 -5
  7. setup.cfg +14 -0
  8. shell/format.sh +4 -0
  9. shell/lint.sh +23 -0
.github/workflows/linting.yml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Linting
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - '*'
7
+ pull_request:
8
+ branches:
9
+ - '*'
10
+ workflow_dispatch:
11
+
12
+ jobs:
13
+ build:
14
+ runs-on: ubuntu-20.04
15
+ steps:
16
+ - uses: actions/checkout@v1
17
+ - name: Set up Python 3.7
18
+ uses: actions/setup-python@v2
19
+ with:
20
+ python-version: 3.7
21
+
22
+ - name: Install lint dependencies
23
+ run: |
24
+ pip install wheel setuptools
25
+ pip install black==22.3.0 isort==5.10.1 flake8==4.0.1
26
+
27
+ - name: Lint the code
28
+ run: sh shell/lint.sh
README.md CHANGED
@@ -10,7 +10,7 @@ license: mit
10
  app_file: app.py
11
  ---
12
 
13
- <div align="center">
14
  <h1 align="center">neukit</h1>
15
  <h3 align="center">Automatic brain extraction and preoperative tumor segmentation from MRI</h3>
16
 
@@ -36,6 +36,8 @@ To access the live demo, click on the `Hugging Face` badge above. Below is a sna
36
 
37
  ## Development
38
 
 
 
39
  Alternatively, you can deploy the software locally. Note that this is only relevant for development purposes. Simply dockerize the app and run it:
40
 
41
  ```
@@ -45,6 +47,28 @@ docker run -it -p 7860:7860 neukit
45
 
46
  Then open `http://127.0.0.1:7860` in your favourite internet browser to view the demo.
47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  ## Citation
49
 
50
  If you found the tool useful in your research, please, cite the corresponding software paper:
 
10
  app_file: app.py
11
  ---
12
 
13
+ <div align="center">M
14
  <h1 align="center">neukit</h1>
15
  <h3 align="center">Automatic brain extraction and preoperative tumor segmentation from MRI</h3>
16
 
 
36
 
37
  ## Development
38
 
39
+ ### Docker
40
+
41
  Alternatively, you can deploy the software locally. Note that this is only relevant for development purposes. Simply dockerize the app and run it:
42
 
43
  ```
 
47
 
48
  Then open `http://127.0.0.1:7860` in your favourite internet browser to view the demo.
49
 
50
+ ### Python
51
+
52
+ It is also possible to run the app locally without Docker. Just setup a virtual environment and run the app.
53
+ Note that the current working directory would need to be adjusted based on where `neukit` is located on disk.
54
+
55
+ ```
56
+ git clone https://github.com/andreped/neukit.git
57
+ cd neukit/
58
+
59
+ virtualenv -ppython3 venv --clear
60
+ source venv/bin/activate
61
+ pip install -r requirements.txt
62
+
63
+ python app.py --cwd ./
64
+ ```
65
+
66
+ ## Troubleshooting
67
+
68
+ Note that due to `share=True` being enabled by default when launching the app,
69
+ internet access is required for the app to be launched. This can disabled by setting
70
+ the argument to `--share 0`.
71
+
72
  ## Citation
73
 
74
  If you found the tool useful in your research, please, cite the corresponding software paper:
app.py CHANGED
@@ -1,14 +1,39 @@
 
 
 
1
  from neukit.gui import WebUI
2
 
3
 
4
  def main():
5
- print("Launching demo...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
- # cwd = "/Users/andreped/workspace/neukit/" # local testing -> macOS
8
- cwd = "/home/user/app/" # production -> docker
 
 
 
 
 
 
 
9
 
10
  # initialize and run app
11
- app = WebUI(cwd=cwd)
 
12
  app.run()
13
 
14
 
 
1
+ import os
2
+ from argparse import ArgumentParser
3
+
4
  from neukit.gui import WebUI
5
 
6
 
7
  def main():
8
+ parser = ArgumentParser()
9
+ parser.add_argument(
10
+ "--cwd",
11
+ type=str,
12
+ default="/home/user/app/",
13
+ help="Set current working directory (path to app.py).",
14
+ )
15
+ parser.add_argument(
16
+ "--share",
17
+ type=int,
18
+ default=1,
19
+ help="Whether to enable the app to be accessible online"
20
+ "-> setups a public link which requires internet access.",
21
+ )
22
+ args = parser.parse_args()
23
 
24
+ print("Current working directory:", args.cwd)
25
+
26
+ if not os.path.exists(args.cwd):
27
+ raise ValueError("Chosen 'cwd' is not a valid path!")
28
+ if args.share not in [0, 1]:
29
+ raise ValueError(
30
+ "The 'share' argument can only be set to 0 or 1, but was:",
31
+ args.share,
32
+ )
33
 
34
  # initialize and run app
35
+ print("Launching demo...")
36
+ app = WebUI(cwd=args.cwd, share=args.share)
37
  app.run()
38
 
39
 
neukit/gui.py CHANGED
@@ -1,10 +1,20 @@
 
 
1
  import gradio as gr
2
- from .utils import load_ct_to_numpy, load_pred_volume_to_numpy, nifti_to_glb
3
  from .inference import run_model
 
 
 
4
 
5
 
6
  class WebUI:
7
- def __init__(self, model_name:str = None, cwd:str = "/home/user/app/"):
 
 
 
 
 
8
  # global states
9
  self.images = []
10
  self.pred_images = []
@@ -14,8 +24,9 @@ class WebUI:
14
 
15
  self.model_name = model_name
16
  self.cwd = cwd
 
17
 
18
- self.class_name = "meningioma" # default - but can be updated based on which task is chosen from dropdown
19
  self.class_names = {
20
  "meningioma": "MRI_Meningioma",
21
  "low-grade": "MRI_LGGlioma",
@@ -33,41 +44,55 @@ class WebUI:
33
  }
34
 
35
  # define widgets not to be rendered immediantly, but later on
36
- self.slider = gr.Slider(1, self.nb_slider_items, value=1, step=1, label="Which 2D slice to show")
 
 
 
 
 
 
37
  self.volume_renderer = gr.Model3D(
38
  clear_color=[0.0, 0.0, 0.0, 0.0],
39
  label="3D Model",
40
  visible=True,
41
  elem_id="model-3d",
42
  ).style(height=512)
43
-
44
  def set_class_name(self, value):
45
  print("Changed task to:", value)
46
  self.class_name = value
47
 
48
  def combine_ct_and_seg(self, img, pred):
49
  return (img, [(pred, self.class_name)])
50
-
51
  def upload_file(self, file):
52
  return file.name
53
-
54
- def load_mesh(self, mesh_file_name):
55
  path = mesh_file_name.name
56
- run_model(path, model_path=self.cwd + "resources/models/", task=self.class_names[self.class_name], name=self.result_names[self.class_name])
 
 
 
 
 
57
  nifti_to_glb("prediction.nii.gz")
58
 
59
  self.images = load_ct_to_numpy(path)
60
  self.pred_images = load_pred_volume_to_numpy("./prediction.nii.gz")
61
  return "./prediction.obj"
62
-
63
  def get_img_pred_pair(self, k):
64
  k = int(k) - 1
65
  out = [gr.AnnotatedImage.update(visible=False)] * self.nb_slider_items
66
- out[k] = gr.AnnotatedImage.update(self.combine_ct_and_seg(self.images[k], self.pred_images[k]), visible=True)
 
 
 
67
  return out
68
 
69
  def run(self):
70
- css="""
71
  #model-3d {
72
  height: 512px;
73
  }
@@ -80,18 +105,15 @@ class WebUI:
80
  }
81
  """
82
  with gr.Blocks(css=css) as demo:
83
-
84
  with gr.Row():
85
-
86
- file_output = gr.File(file_count="single", elem_id="upload") # elem_id="upload"
87
  file_output.upload(self.upload_file, file_output, file_output)
88
 
89
- # with gr.Column():
90
-
91
  model_selector = gr.Dropdown(
92
  list(self.class_names.keys()),
93
  label="Task",
94
- info="Which task to perform - one model for each brain tumor type and brain extraction",
 
95
  multiselect=False,
96
  size="sm",
97
  )
@@ -101,39 +123,55 @@ class WebUI:
101
  outputs=None,
102
  )
103
 
104
- run_btn = gr.Button("Run analysis").style(full_width=False, size="lg")
 
 
105
  run_btn.click(
106
- fn=lambda x: self.load_mesh(x),
107
  inputs=file_output,
108
  outputs=self.volume_renderer,
109
  )
110
-
111
  with gr.Row():
112
  gr.Examples(
113
- examples=[self.cwd + "RegLib_C01_1.nii", self.cwd + "RegLib_C01_2.nii"],
 
 
 
114
  inputs=file_output,
115
  outputs=file_output,
116
  fn=self.upload_file,
117
  cache_examples=True,
118
  )
119
-
120
  with gr.Row():
121
  with gr.Box():
122
- image_boxes = []
123
- for i in range(self.nb_slider_items):
124
- visibility = True if i == 1 else False
125
- t = gr.AnnotatedImage(visible=visibility, elem_id="model-2d")\
126
- .style(color_map={self.class_name: "#ffae00"}, height=512, width=512)
127
- image_boxes.append(t)
128
-
129
- self.slider.change(self.get_img_pred_pair, self.slider, image_boxes)
130
-
 
 
 
 
 
 
 
 
 
 
131
  with gr.Box():
132
  self.volume_renderer.render()
133
-
134
- with gr.Row():
135
- self.slider.render()
136
 
137
- # sharing app publicly -> share=True: https://gradio.app/sharing-your-app/
138
- # inference times > 60 seconds -> need queue(): https://github.com/tloen/alpaca-lora/issues/60#issuecomment-1510006062
139
- demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=True)
 
 
 
 
 
1
+ import os
2
+
3
  import gradio as gr
4
+
5
  from .inference import run_model
6
+ from .utils import load_ct_to_numpy
7
+ from .utils import load_pred_volume_to_numpy
8
+ from .utils import nifti_to_glb
9
 
10
 
11
  class WebUI:
12
+ def __init__(
13
+ self,
14
+ model_name: str = None,
15
+ cwd: str = "/home/user/app/",
16
+ share: int = 1,
17
+ ):
18
  # global states
19
  self.images = []
20
  self.pred_images = []
 
24
 
25
  self.model_name = model_name
26
  self.cwd = cwd
27
+ self.share = share
28
 
29
+ self.class_name = "meningioma" # default
30
  self.class_names = {
31
  "meningioma": "MRI_Meningioma",
32
  "low-grade": "MRI_LGGlioma",
 
44
  }
45
 
46
  # define widgets not to be rendered immediantly, but later on
47
+ self.slider = gr.Slider(
48
+ 1,
49
+ self.nb_slider_items,
50
+ value=1,
51
+ step=1,
52
+ label="Which 2D slice to show",
53
+ )
54
  self.volume_renderer = gr.Model3D(
55
  clear_color=[0.0, 0.0, 0.0, 0.0],
56
  label="3D Model",
57
  visible=True,
58
  elem_id="model-3d",
59
  ).style(height=512)
60
+
61
  def set_class_name(self, value):
62
  print("Changed task to:", value)
63
  self.class_name = value
64
 
65
  def combine_ct_and_seg(self, img, pred):
66
  return (img, [(pred, self.class_name)])
67
+
68
  def upload_file(self, file):
69
  return file.name
70
+
71
+ def process(self, mesh_file_name):
72
  path = mesh_file_name.name
73
+ run_model(
74
+ path,
75
+ model_path=os.path.join(self.cwd, "resources/models/"),
76
+ task=self.class_names[self.class_name],
77
+ name=self.result_names[self.class_name],
78
+ )
79
  nifti_to_glb("prediction.nii.gz")
80
 
81
  self.images = load_ct_to_numpy(path)
82
  self.pred_images = load_pred_volume_to_numpy("./prediction.nii.gz")
83
  return "./prediction.obj"
84
+
85
  def get_img_pred_pair(self, k):
86
  k = int(k) - 1
87
  out = [gr.AnnotatedImage.update(visible=False)] * self.nb_slider_items
88
+ out[k] = gr.AnnotatedImage.update(
89
+ self.combine_ct_and_seg(self.images[k], self.pred_images[k]),
90
+ visible=True,
91
+ )
92
  return out
93
 
94
  def run(self):
95
+ css = """
96
  #model-3d {
97
  height: 512px;
98
  }
 
105
  }
106
  """
107
  with gr.Blocks(css=css) as demo:
 
108
  with gr.Row():
109
+ file_output = gr.File(file_count="single", elem_id="upload")
 
110
  file_output.upload(self.upload_file, file_output, file_output)
111
 
 
 
112
  model_selector = gr.Dropdown(
113
  list(self.class_names.keys()),
114
  label="Task",
115
+ info="Which task to perform - one model for"
116
+ "each brain tumor type and brain extraction",
117
  multiselect=False,
118
  size="sm",
119
  )
 
123
  outputs=None,
124
  )
125
 
126
+ run_btn = gr.Button("Run analysis").style(
127
+ full_width=False, size="lg"
128
+ )
129
  run_btn.click(
130
+ fn=lambda x: self.process(x),
131
  inputs=file_output,
132
  outputs=self.volume_renderer,
133
  )
134
+
135
  with gr.Row():
136
  gr.Examples(
137
+ examples=[
138
+ os.path.join(self.cwd, "RegLib_C01_1.nii"),
139
+ os.path.join(self.cwd, "RegLib_C01_2.nii"),
140
+ ],
141
  inputs=file_output,
142
  outputs=file_output,
143
  fn=self.upload_file,
144
  cache_examples=True,
145
  )
146
+
147
  with gr.Row():
148
  with gr.Box():
149
+ with gr.Column():
150
+ image_boxes = []
151
+ for i in range(self.nb_slider_items):
152
+ visibility = True if i == 1 else False
153
+ t = gr.AnnotatedImage(
154
+ visible=visibility, elem_id="model-2d"
155
+ ).style(
156
+ color_map={self.class_name: "#ffae00"},
157
+ height=512,
158
+ width=512,
159
+ )
160
+ image_boxes.append(t)
161
+
162
+ self.slider.input(
163
+ self.get_img_pred_pair, self.slider, image_boxes
164
+ )
165
+
166
+ self.slider.render()
167
+
168
  with gr.Box():
169
  self.volume_renderer.render()
 
 
 
170
 
171
+ # sharing app publicly -> share=True:
172
+ # https://gradio.app/sharing-your-app/
173
+ # inference times > 60 seconds -> need queue():
174
+ # https://github.com/tloen/alpaca-lora/issues/60#issuecomment-1510006062
175
+ demo.queue().launch(
176
+ server_name="0.0.0.0", server_port=7860, share=self.share
177
+ )
neukit/inference.py CHANGED
@@ -1,23 +1,28 @@
1
- import os
2
- import shutil
3
  import configparser
4
  import logging
5
- import traceback
 
6
 
7
 
8
- def run_model(input_path: str, model_path: str, verbose: str = "info", task: str = "MRI_Meningioma", name: str = "Tumor"):
 
 
 
 
 
 
9
  logging.basicConfig()
10
  logging.getLogger().setLevel(logging.WARNING)
11
 
12
- if verbose == 'debug':
13
  logging.getLogger().setLevel(logging.DEBUG)
14
- elif verbose == 'info':
15
  logging.getLogger().setLevel(logging.INFO)
16
- elif verbose == 'error':
17
  logging.getLogger().setLevel(logging.ERROR)
18
  else:
19
  raise ValueError("Unsupported verbose value provided:", verbose)
20
-
21
  # delete patient/result folder if they exist
22
  if os.path.exists("./patient/"):
23
  shutil.rmtree("./patient/")
@@ -25,33 +30,42 @@ def run_model(input_path: str, model_path: str, verbose: str = "info", task: str
25
  shutil.rmtree("./result/")
26
 
27
  try:
28
- # create sequence folder, rename patient, and add to temporary patient directory
29
  filename = input_path.split("/")[-1]
30
  splits = filename.split(".")
31
  extension = ".".join(splits[1:])
32
  patient_directory = "./patient/"
33
  os.makedirs(patient_directory + "T0/", exist_ok=True)
34
- shutil.copy(input_path, patient_directory + "T0/" + splits[0] + "-t1gd." + extension)
35
-
 
 
 
36
  # define output directory to save results
37
  output_path = "./result/prediction-" + splits[0] + "/"
38
  os.makedirs(output_path, exist_ok=True)
39
 
40
  # Setting up the configuration file
41
  rads_config = configparser.ConfigParser()
42
- rads_config.add_section('Default')
43
- rads_config.set('Default', 'task', 'neuro_diagnosis')
44
- rads_config.set('Default', 'caller', '')
45
- rads_config.add_section('System')
46
- rads_config.set('System', 'gpu_id', "-1")
47
- rads_config.set('System', 'input_folder', patient_directory)
48
- rads_config.set('System', 'output_folder', output_path)
49
- rads_config.set('System', 'model_folder', model_path)
50
- rads_config.set('System', 'pipeline_filename', os.path.join(model_path, task, 'pipeline.json'))
51
- rads_config.add_section('Runtime')
52
- rads_config.set('Runtime', 'reconstruction_method', 'thresholding') # thresholding, probabilities
53
- rads_config.set('Runtime', 'reconstruction_order', 'resample_first')
54
- rads_config.set('Runtime', 'use_preprocessed_data', 'False')
 
 
 
 
 
 
55
 
56
  with open("rads_config.ini", "w") as f:
57
  rads_config.write(f)
@@ -59,11 +73,20 @@ def run_model(input_path: str, model_path: str, verbose: str = "info", task: str
59
  # finally, run inference
60
  from raidionicsrads.compute import run_rads
61
 
62
- run_rads(config_filename='rads_config.ini')
63
-
64
  # rename and move final result
65
- os.rename("./result/prediction-" + splits[0] + "/T0/" + splits[0] + "-t1gd_annotation-" + name + ".nii.gz", "./prediction.nii.gz")
66
-
 
 
 
 
 
 
 
 
 
67
  except Exception as e:
68
  print(e)
69
 
 
 
 
1
  import configparser
2
  import logging
3
+ import os
4
+ import shutil
5
 
6
 
7
+ def run_model(
8
+ input_path: str,
9
+ model_path: str,
10
+ verbose: str = "info",
11
+ task: str = "MRI_Meningioma",
12
+ name: str = "Tumor",
13
+ ):
14
  logging.basicConfig()
15
  logging.getLogger().setLevel(logging.WARNING)
16
 
17
+ if verbose == "debug":
18
  logging.getLogger().setLevel(logging.DEBUG)
19
+ elif verbose == "info":
20
  logging.getLogger().setLevel(logging.INFO)
21
+ elif verbose == "error":
22
  logging.getLogger().setLevel(logging.ERROR)
23
  else:
24
  raise ValueError("Unsupported verbose value provided:", verbose)
25
+
26
  # delete patient/result folder if they exist
27
  if os.path.exists("./patient/"):
28
  shutil.rmtree("./patient/")
 
30
  shutil.rmtree("./result/")
31
 
32
  try:
33
+ # setup temporary patient directory
34
  filename = input_path.split("/")[-1]
35
  splits = filename.split(".")
36
  extension = ".".join(splits[1:])
37
  patient_directory = "./patient/"
38
  os.makedirs(patient_directory + "T0/", exist_ok=True)
39
+ shutil.copy(
40
+ input_path,
41
+ patient_directory + "T0/" + splits[0] + "-t1gd." + extension,
42
+ )
43
+
44
  # define output directory to save results
45
  output_path = "./result/prediction-" + splits[0] + "/"
46
  os.makedirs(output_path, exist_ok=True)
47
 
48
  # Setting up the configuration file
49
  rads_config = configparser.ConfigParser()
50
+ rads_config.add_section("Default")
51
+ rads_config.set("Default", "task", "neuro_diagnosis")
52
+ rads_config.set("Default", "caller", "")
53
+ rads_config.add_section("System")
54
+ rads_config.set("System", "gpu_id", "-1")
55
+ rads_config.set("System", "input_folder", patient_directory)
56
+ rads_config.set("System", "output_folder", output_path)
57
+ rads_config.set("System", "model_folder", model_path)
58
+ rads_config.set(
59
+ "System",
60
+ "pipeline_filename",
61
+ os.path.join(model_path, task, "pipeline.json"),
62
+ )
63
+ rads_config.add_section("Runtime")
64
+ rads_config.set(
65
+ "Runtime", "reconstruction_method", "thresholding"
66
+ ) # thresholding, probabilities
67
+ rads_config.set("Runtime", "reconstruction_order", "resample_first")
68
+ rads_config.set("Runtime", "use_preprocessed_data", "False")
69
 
70
  with open("rads_config.ini", "w") as f:
71
  rads_config.write(f)
 
73
  # finally, run inference
74
  from raidionicsrads.compute import run_rads
75
 
76
+ run_rads(config_filename="rads_config.ini")
77
+
78
  # rename and move final result
79
+ os.rename(
80
+ "./result/prediction-"
81
+ + splits[0]
82
+ + "/T0/"
83
+ + splits[0]
84
+ + "-t1gd_annotation-"
85
+ + name
86
+ + ".nii.gz",
87
+ "./prediction.nii.gz",
88
+ )
89
+
90
  except Exception as e:
91
  print(e)
92
 
neukit/utils.py CHANGED
@@ -1,5 +1,5 @@
1
- import numpy as np
2
  import nibabel as nib
 
3
  from nibabel.processing import resample_to_output
4
  from skimage.measure import marching_cubes
5
 
@@ -52,12 +52,16 @@ def nifti_to_glb(path, output="prediction.obj"):
52
  verts, faces, normals, values = marching_cubes(data, 0)
53
  faces += 1
54
 
55
- with open(output, 'w') as thefile:
56
  for item in verts:
57
- thefile.write("v {0} {1} {2}\n".format(item[0],item[1],item[2]))
58
 
59
  for item in normals:
60
- thefile.write("vn {0} {1} {2}\n".format(item[0],item[1],item[2]))
61
 
62
  for item in faces:
63
- thefile.write("f {0}//{0} {1}//{1} {2}//{2}\n".format(item[0],item[1],item[2]))
 
 
 
 
 
 
1
  import nibabel as nib
2
+ import numpy as np
3
  from nibabel.processing import resample_to_output
4
  from skimage.measure import marching_cubes
5
 
 
52
  verts, faces, normals, values = marching_cubes(data, 0)
53
  faces += 1
54
 
55
+ with open(output, "w") as thefile:
56
  for item in verts:
57
+ thefile.write("v {0} {1} {2}\n".format(item[0], item[1], item[2]))
58
 
59
  for item in normals:
60
+ thefile.write("vn {0} {1} {2}\n".format(item[0], item[1], item[2]))
61
 
62
  for item in faces:
63
+ thefile.write(
64
+ "f {0}//{0} {1}//{1} {2}//{2}\n".format(
65
+ item[0], item[1], item[2]
66
+ )
67
+ )
setup.cfg ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [metadata]
2
+ description-file = README.md
3
+
4
+ [isort]
5
+ force_single_line=True
6
+ known_first_party=neukit
7
+ line_length=80
8
+ profile=black
9
+
10
+ [flake8]
11
+ # imported but unused in __init__.py, that's ok.
12
+ per-file-ignores=*__init__.py:F401
13
+ ignore=E203,W503,W605,F632,E266,E731,E712,E741
14
+ max-line-length=80
shell/format.sh ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ #!/bin/bash
2
+ isort --sl neukit app.py
3
+ black --line-length 80 neukit app.py
4
+ flake8 neukit app.py
shell/lint.sh ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ isort --check --sl -c neukit app.py
3
+ if ! [ $? -eq 0 ]
4
+ then
5
+ echo "Please run \"sh shell/format.sh\" to format the code."
6
+ exit 1
7
+ fi
8
+ echo "no issues with isort"
9
+ flake8 neukit app.py
10
+ if ! [ $? -eq 0 ]
11
+ then
12
+ echo "Please fix the code style issue."
13
+ exit 1
14
+ fi
15
+ echo "no issues with flake8"
16
+ black --check --line-length 80 neukit app.py
17
+ if ! [ $? -eq 0 ]
18
+ then
19
+ echo "Please run \"sh shell/format.sh\" to format the code."
20
+ exit 1
21
+ fi
22
+ echo "no issues with black"
23
+ echo "linting success!"