georad commited on
Commit
14e76ad
·
verified ·
1 Parent(s): 15f7a2b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -9
app.py CHANGED
@@ -2,7 +2,7 @@ import streamlit as st
2
  import pandas as pd
3
  from io import StringIO
4
  import json
5
- import torch
6
  from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM #AutoModelForTokenClassification
7
  from sentence_transformers import SentenceTransformer, util
8
  #import lmdeploy
@@ -66,14 +66,8 @@ HF_model_results = util.semantic_search(INTdesc_embedding, SBScorpus_embeddings)
66
  HF_model_results_sorted = sorted(HF_model_results, key=lambda x: x[1], reverse=True)
67
  HF_model_results_displayed = HF_model_results_sorted[0:numMAPPINGS_input]
68
 
69
- model_id = "meta-llama/Llama-3.2-1B-Instruct"
70
- pipe = pipeline(
71
- "text-generation",
72
- model=model_id,
73
- torch_dtype=torch.bfloat16,
74
- device_map="auto",
75
- )
76
-
77
 
78
 
79
  col1, col2, col3 = st.columns([1,1,2.5])
 
2
  import pandas as pd
3
  from io import StringIO
4
  import json
5
+ #import torch
6
  from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM #AutoModelForTokenClassification
7
  from sentence_transformers import SentenceTransformer, util
8
  #import lmdeploy
 
66
  HF_model_results_sorted = sorted(HF_model_results, key=lambda x: x[1], reverse=True)
67
  HF_model_results_displayed = HF_model_results_sorted[0:numMAPPINGS_input]
68
 
69
+ #model_id = "meta-llama/Llama-3.2-1B-Instruct"
70
+ #pipe = pipeline("text-generation", model=model_id, torch_dtype=torch.bfloat16, device_map="auto",)
 
 
 
 
 
 
71
 
72
 
73
  col1, col2, col3 = st.columns([1,1,2.5])