File size: 1,156 Bytes
bae8e8d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch

model = AutoModelForCausalLM.from_pretrained("./.", trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained("./.", trust_remote_code=True)

with torch.no_grad():
  input_text = "Hi_"
  inputs = tokenizer(text=input_text, return_tensors="pt")
  del inputs["token_type_ids"]
  print(inputs)
  gen = model.generate(**inputs, max_new_tokens=1, do_sample=False)

  decoded = tokenizer.batch_decode(gen, skip_special_tokens=True)
  print(decoded)


"""
from hunyuan.configuration_hunyuan import HunYuanConfig
from hunyuan.modeling_hunyuan import HunYuanMoEV1ForCausalLM
import torch

config = HunYuanConfig.from_pretrained("./Hunyuan-A13B-Instruct", trust_remote_code=True)
config.moe_intermediate_size = [3072, 3072]
config.num_experts = 4
config.num_shared_expert = [1, 1]
config.moe_topk = [2, 2]
config.num_hidden_layers = 4

model = HunYuanMoEV1ForCausalLM(config)
print(model)

torch.manual_seed(0)
state_dict = model.state_dict()
for key in state_dict:
  state_dict[key].uniform_(-0.2, 0.2)
model.load_state_dict(state_dict)

model.save_pretrained("./hunyuan-tiny")
"""