yxmauw commited on
Commit
2cb6504
·
verified ·
1 Parent(s): 0a7e90f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -47
app.py CHANGED
@@ -3,72 +3,74 @@ from gpt4all import GPT4All
3
  from urllib.request import urlopen
4
  import json
5
  import time
 
 
6
 
7
 
8
  # populate all models available from GPT4All
9
- url = "https://raw.githubusercontent.com/nomic-ai/gpt4all/main/gpt4all-chat/metadata/models3.json"
10
- response = urlopen(url)
11
- data_json = json.loads(response.read())
12
 
13
 
14
- def model_choices():
15
- model_list = [data_json[i]['filename'] for i in range(len(data_json))]
16
- return model_list
17
 
18
 
19
  # get each models' description
20
- model_description = {model['filename']: model['description'] for model in data_json}
21
 
22
 
23
- def remove_endtags(html_string, tags):
24
- """Remove rear HTML tags from the input string."""
25
- for tag in tags:
26
- html_string = re.sub(fr"</{tag}>", "", html_string)
27
- return html_string
28
 
29
 
30
- def replace_starttags(html_string, replacements):
31
- """Replace starting HTML tags with the corresponding values."""
32
- for tag, replacement in replacements.items():
33
- html_string = html_string.replace(tag, replacement)
34
- return html_string
35
 
36
 
37
- def format_html_string(html_string):
38
- """Format the HTML string to a readable text format."""
39
- tags_to_remove = ["ul", "li", "br"]
40
- html_string = remove_endtags(html_string, tags_to_remove)
41
 
42
- tag_replacements = {
43
- "<li>": "\n➤ ",
44
- "<br>": "\n",
45
- "<strong>": "**",
46
- "</strong>": "**"
47
- }
48
- formatted_string = replace_starttags(html_string, tag_replacements)
49
 
50
- return formatted_string
51
 
52
 
53
- def llm_intro(selected_model):
54
- html_string = model_description.get(selected_model, "No description available for this model selection.")
55
- formatted_description = format_html_string(html_string)
56
- return formatted_description
57
 
58
- # cache models for faster reloads
59
- model_cache = {}
60
-
61
-
62
- def load_model(model_name):
63
- """
64
- This function checks the cache before loading a model.
65
- If the model is cached, it returns the cached version.
66
- Otherwise, it loads the model, caches it, and then returns it.
67
- """
68
- if model_name not in model_cache:
69
- model = GPT4All(model_name)
70
- model_cache[model_name] = model
71
- return model_cache[model_name]
72
 
73
  # clear = gr.ClearButton([input_text, chatbot])
74
 
 
3
  from urllib.request import urlopen
4
  import json
5
  import time
6
+ from load_llms import (format_html_string, model_choices,
7
+ llm_intro, model_cache, load_model)
8
 
9
 
10
  # populate all models available from GPT4All
11
+ # url = "https://raw.githubusercontent.com/nomic-ai/gpt4all/main/gpt4all-chat/metadata/models3.json"
12
+ # response = urlopen(url)
13
+ # data_json = json.loads(response.read())
14
 
15
 
16
+ # def model_choices():
17
+ # model_list = [data_json[i]['filename'] for i in range(len(data_json))]
18
+ # return model_list
19
 
20
 
21
  # get each models' description
22
+ # model_description = {model['filename']: model['description'] for model in data_json}
23
 
24
 
25
+ # def remove_endtags(html_string, tags):
26
+ # """Remove rear HTML tags from the input string."""
27
+ # for tag in tags:
28
+ # html_string = re.sub(fr"</{tag}>", "", html_string)
29
+ # return html_string
30
 
31
 
32
+ # def replace_starttags(html_string, replacements):
33
+ # """Replace starting HTML tags with the corresponding values."""
34
+ # for tag, replacement in replacements.items():
35
+ # html_string = html_string.replace(tag, replacement)
36
+ # return html_string
37
 
38
 
39
+ # def format_html_string(html_string):
40
+ # """Format the HTML string to a readable text format."""
41
+ # tags_to_remove = ["ul", "li", "br"]
42
+ # html_string = remove_endtags(html_string, tags_to_remove)
43
 
44
+ # tag_replacements = {
45
+ # "<li>": "\n➤ ",
46
+ # "<br>": "\n",
47
+ # "<strong>": "**",
48
+ # "</strong>": "**"
49
+ # }
50
+ # formatted_string = replace_starttags(html_string, tag_replacements)
51
 
52
+ # return formatted_string
53
 
54
 
55
+ # def llm_intro(selected_model):
56
+ # html_string = model_description.get(selected_model, "No description available for this model selection.")
57
+ # formatted_description = format_html_string(html_string)
58
+ # return formatted_description
59
 
60
+ # # cache models for faster reloads
61
+ # model_cache = {}
62
+
63
+
64
+ # def load_model(model_name):
65
+ # """
66
+ # This function checks the cache before loading a model.
67
+ # If the model is cached, it returns the cached version.
68
+ # Otherwise, it loads the model, caches it, and then returns it.
69
+ # """
70
+ # if model_name not in model_cache:
71
+ # model = GPT4All(model_name)
72
+ # model_cache[model_name] = model
73
+ # return model_cache[model_name]
74
 
75
  # clear = gr.ClearButton([input_text, chatbot])
76