Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,14 +3,14 @@ import spaces
|
|
3 |
import torch
|
4 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
5 |
|
6 |
-
model_path = 'LLM4Binary/llm4decompile-
|
7 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
8 |
model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.bfloat16).cuda()
|
9 |
|
10 |
description = """
|
11 |
-
# LLM4Decompile
|
12 |
|
13 |
-
This is a space for testing the [LLM4Decompile
|
14 |
"""
|
15 |
|
16 |
@spaces.GPU
|
|
|
3 |
import torch
|
4 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
5 |
|
6 |
+
model_path = 'LLM4Binary/llm4decompile-6.7b-v2' # V2 Model
|
7 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
8 |
model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.bfloat16).cuda()
|
9 |
|
10 |
description = """
|
11 |
+
# LLM4Decompile 6.7B V2
|
12 |
|
13 |
+
This is a space for testing the [LLM4Decompile 6.7B V2 model](https://huggingface.co/LLM4Binary/llm4decompile-6.7b-v2). It expects to be given a decompiled function output by Ghidra. I simply copy and paste from the Ghidra GUI, but this is not the method recommended by the official model page, so YMMV.
|
14 |
"""
|
15 |
|
16 |
@spaces.GPU
|