pszemraj commited on
Commit
eb17ecb
·
1 Parent(s): f49971f

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +94 -55
README.md CHANGED
@@ -13,59 +13,6 @@ metrics:
13
  language:
14
  - en
15
  widget:
16
- - text: |
17
- import torch
18
- from transformers import AutoTokenizer, AutoModelForSequenceClassification
19
-
20
- checkpoint = "distilbert-base-uncased-finetuned-sst-2-english"
21
- tokenizer = AutoTokenizer.from_pretrained(checkpoint)
22
- model = AutoModelForSequenceClassification.from_pretrained(checkpoint)
23
- sequences = ["I've been waiting for a HuggingFace course my whole life.", "So have I!"]
24
-
25
- tokens = tokenizer(sequences, padding=True, truncation=True, return_tensors="pt")
26
- output = model(**tokens)
27
- example_title: Example One
28
-
29
- - text: |
30
- import torch
31
- from tqdm.auto import tqdm
32
-
33
- device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
34
- model.to(device)
35
-
36
- progress_bar = tqdm(range(num_training_steps))
37
-
38
- model.train()
39
- for epoch in range(num_epochs):
40
- for batch in train_dataloader:
41
- batch = {k: v.to(device) for k, v in batch.items()}
42
- outputs = model(**batch)
43
- loss = outputs.loss
44
- loss.backward()
45
-
46
- optimizer.step()
47
- lr_scheduler.step()
48
- optimizer.zero_grad()
49
- progress_bar.update(1)
50
- example_title: Example Two
51
-
52
- - text: |
53
- import evaluate
54
-
55
- metric = evaluate.load("glue", "mrpc")
56
- model.eval()
57
- for batch in eval_dataloader:
58
- batch = {k: v.to(device) for k, v in batch.items()}
59
- with torch.no_grad():
60
- outputs = model(**batch)
61
-
62
- logits = outputs.logits
63
- predictions = torch.argmax(logits, dim=-1)
64
- metric.add_batch(predictions=predictions, references=batch["labels"])
65
-
66
- metric.compute()
67
- example_title: Example Three
68
-
69
  - text: |
70
  git lfs install
71
  huggingface-cli lfs-enable-largefiles .
@@ -73,7 +20,7 @@ widget:
73
  git add .
74
  git commit -a -m "add fp32 chkpt"
75
  git push
76
- example_title: Example Four
77
 
78
  - text: |
79
  export interface DocumentParams {
@@ -97,7 +44,99 @@ widget:
97
  this.metadata = fields?.metadata ?? {};
98
  }
99
  }
100
- example_title: Example Five
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
  inference:
102
  parameters:
103
  max_length: 96
 
13
  language:
14
  - en
15
  widget:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  - text: |
17
  git lfs install
18
  huggingface-cli lfs-enable-largefiles .
 
20
  git add .
21
  git commit -a -m "add fp32 chkpt"
22
  git push
23
+ example_title: bash
24
 
25
  - text: |
26
  export interface DocumentParams {
 
44
  this.metadata = fields?.metadata ?? {};
45
  }
46
  }
47
+ example_title: js
48
+ - text: |
49
+ def merge(left, right):
50
+ if len(left) == 0:
51
+ return right
52
+
53
+ if len(right) == 0:
54
+ return left
55
+
56
+ result = []
57
+ index_left = index_right = 0
58
+
59
+ while len(result) < len(left) + len(right):
60
+ if left[index_left] <= right[index_right]:
61
+ result.append(left[index_left])
62
+ index_left += 1
63
+ else:
64
+ result.append(right[index_right])
65
+ index_right += 1
66
+
67
+ if index_right == len(right):
68
+ result += left[index_left:]
69
+ break
70
+
71
+ if index_left == len(left):
72
+ result += right[index_right:]
73
+ break
74
+
75
+ return result
76
+ example_title: merge
77
+
78
+ - text: |
79
+ import pandas as pd
80
+ import plotly.graph_objects as go
81
+
82
+ df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/2014_apple_stock.csv')
83
+
84
+ fig = go.Figure(go.Scatter(x = df['AAPL_x'], y = df['AAPL_y'],
85
+ name='Share Prices (in USD)'))
86
+
87
+ fig.update_layout(title='Apple Share Prices over time (2014)',
88
+ plot_bgcolor='rgb(230, 230,230)',
89
+ showlegend=True)
90
+
91
+ fig.show()
92
+ example_title: plot
93
+ - text: |
94
+ from spellchecker import SpellChecker
95
+
96
+ spell = SpellChecker()
97
+
98
+ def check_word_spelling(word: str):
99
+ misspelled = spell.unknown([word])
100
+ return len(misspelled) == 0
101
+
102
+ def eval_and_replace(text: str, match_token: str = "- "):
103
+ if match_token not in text:
104
+ return text
105
+ else:
106
+ while True:
107
+ full_before_text = text.split(match_token, maxsplit=1)[0]
108
+ before_text = [
109
+ char for char in full_before_text.split()[-1] if char.isalpha()
110
+ ]
111
+ before_text = "".join(before_text)
112
+ full_after_text = text.split(match_token, maxsplit=1)[-1]
113
+ after_text = [char for char in full_after_text.split()[0] if char.isalpha()]
114
+ after_text = "".join(after_text)
115
+ full_text = before_text + after_text
116
+ if check_word_spelling(full_text):
117
+ text = full_before_text + full_after_text
118
+ else:
119
+ text = full_before_text + " " + full_after_text
120
+ if match_token not in text:
121
+ break
122
+ return text
123
+
124
+ text = "I- am- a go- od- boy"
125
+ eval_and_replace(text)
126
+ example_title: speel check
127
+ - text: |
128
+ import torch
129
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
130
+
131
+ checkpoint = "distilbert-base-uncased-finetuned-sst-2-english"
132
+ tokenizer = AutoTokenizer.from_pretrained(checkpoint)
133
+ model = AutoModelForSequenceClassification.from_pretrained(checkpoint)
134
+ sequences = ["I've been waiting for a HuggingFace course my whole life.", "So have I!"]
135
+
136
+ tokens = tokenizer(sequences, padding=True, truncation=True, return_tensors="pt")
137
+ output = model(**tokens)
138
+ example_title: model inference
139
+
140
  inference:
141
  parameters:
142
  max_length: 96