1inkusFace commited on
Commit
ddd7baa
·
verified ·
1 Parent(s): 56812d7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -16,12 +16,12 @@ import numpy as np
16
  import random
17
 
18
  import torch
19
- torch.backends.cuda.matmul.allow_tf32 = True
20
  torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
21
  torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
22
- torch.backends.cudnn.allow_tf32 = True
23
  torch.backends.cudnn.deterministic = False
24
- torch.backends.cudnn.benchmark = True
25
  torch.backends.cuda.preferred_blas_library="cublas"
26
  torch.backends.cuda.preferred_linalg_library="cusolver"
27
  torch.set_float32_matmul_precision("highest")
 
16
  import random
17
 
18
  import torch
19
+ torch.backends.cuda.matmul.allow_tf32 = False
20
  torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
21
  torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
22
+ torch.backends.cudnn.allow_tf32 = False
23
  torch.backends.cudnn.deterministic = False
24
+ torch.backends.cudnn.benchmark = False
25
  torch.backends.cuda.preferred_blas_library="cublas"
26
  torch.backends.cuda.preferred_linalg_library="cusolver"
27
  torch.set_float32_matmul_precision("highest")