wabang commited on
Commit
2f24e2f
·
verified ·
1 Parent(s): dcd6208

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -18
app.py CHANGED
@@ -2,32 +2,27 @@ import gradio as gr
2
  import pandas as pd
3
  from transformers import AutoTokenizer, AutoModelForCausalLM
4
  import torch
5
-
6
- # model, tokenizer 셋팅
7
- model_name = "meta-llama/Meta-Llama-3.1-8B-Instruct"
8
- tokenizer = AutoTokenizer.from_pretrained(model_name)
9
- model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
10
- # 환경 변수에서 토큰을 가져오기
11
- hf_token = os.environ.get("HF_TOKEN",None)
12
-
13
- #hugging face 로그인 (토큰 가져오기 위해서)
14
  from huggingface_hub import login
15
  import os
 
16
 
 
 
17
 
18
-
19
- # 토큰을 사용하여 로그인
20
  if hf_token:
21
  login(token=hf_token)
22
  else:
23
  print("HF_TOKEN 환경 변수가 설정되지 않았습니다.")
24
-
25
- # KMMLU 데이터셋 로드
26
- # 직접 불러오기 df = pd.read_csv("kmmlu_sample.csv")
27
 
28
- from datasets import load_dataset
 
 
 
29
 
30
- df = load_dataset("HAERAE-HUB/KMMLU", "Accounting")
 
 
31
 
32
  def evaluate_model(question, choices):
33
  prompt = f"질문: {question}\n\n선택지:\n"
@@ -68,11 +63,10 @@ def run_kmmlu_test(subject):
68
  subjects = df['subject'].unique().tolist()
69
 
70
  iface = gr.Interface(
71
-
72
  fn=run_kmmlu_test,
73
  inputs=gr.Dropdown(choices=subjects, label="주제 선택"),
74
  outputs="text",
75
- title="Llama 3을 이용한 KMMLU 테스트",
76
  description="선택한 주제에 대해 KMMLU 테스트를 실행합니다."
77
  )
78
 
 
2
  import pandas as pd
3
  from transformers import AutoTokenizer, AutoModelForCausalLM
4
  import torch
 
 
 
 
 
 
 
 
 
5
  from huggingface_hub import login
6
  import os
7
+ from datasets import load_dataset
8
 
9
+ # 환경 변수에서 토큰을 가져오기
10
+ hf_token = os.environ.get("HF_TOKEN", None)
11
 
12
+ # Hugging Face 로그인
 
13
  if hf_token:
14
  login(token=hf_token)
15
  else:
16
  print("HF_TOKEN 환경 변수가 설정되지 않았습니다.")
 
 
 
17
 
18
+ # model, tokenizer 셋팅
19
+ model_name = "meta-llama/Llama-2-7b-chat-hf"
20
+ tokenizer = AutoTokenizer.from_pretrained(model_name, token=hf_token)
21
+ model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto", token=hf_token)
22
 
23
+ # KMMLU 데이터셋 로드
24
+ dataset = load_dataset("HAERAE-HUB/KMMLU", "Accounting")
25
+ df = dataset['test'].to_pandas()
26
 
27
  def evaluate_model(question, choices):
28
  prompt = f"질문: {question}\n\n선택지:\n"
 
63
  subjects = df['subject'].unique().tolist()
64
 
65
  iface = gr.Interface(
 
66
  fn=run_kmmlu_test,
67
  inputs=gr.Dropdown(choices=subjects, label="주제 선택"),
68
  outputs="text",
69
+ title="Llama 2를 이용한 KMMLU 테스트",
70
  description="선택한 주제에 대해 KMMLU 테스트를 실행합니다."
71
  )
72