OpenSound commited on
Commit
21ae0d8
·
verified ·
1 Parent(s): f53bac4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -30,13 +30,13 @@ with open(diffusion_config, 'r') as fp:
30
 
31
  v_prediction = diff_config["ddim"]["v_prediction"]
32
 
33
- # clapmodel = ClapModel.from_pretrained("laion/larger_clap_general").to(device)
34
  processor = AutoProcessor.from_pretrained('laion/larger_clap_general')
35
  clap_config = ClapConfig.from_pretrained("laion/larger_clap_general")
36
  clapmodel = ClapModel(config)
37
  clap_ckpt = torch.load(clap_bin_path, map_location='cpu')
38
  clapmodel.load_state_dict(clap_ckpt)
39
  clapmodel.to(device)
 
40
 
41
  autoencoder = Autoencoder(autoencoder_path, 'stable_vae', quantization_first=True)
42
  autoencoder.eval()
 
30
 
31
  v_prediction = diff_config["ddim"]["v_prediction"]
32
 
 
33
  processor = AutoProcessor.from_pretrained('laion/larger_clap_general')
34
  clap_config = ClapConfig.from_pretrained("laion/larger_clap_general")
35
  clapmodel = ClapModel(config)
36
  clap_ckpt = torch.load(clap_bin_path, map_location='cpu')
37
  clapmodel.load_state_dict(clap_ckpt)
38
  clapmodel.to(device)
39
+ # clapmodel = ClapModel.from_pretrained("laion/larger_clap_general").to(device)
40
 
41
  autoencoder = Autoencoder(autoencoder_path, 'stable_vae', quantization_first=True)
42
  autoencoder.eval()