Datasets:
Upload main.py
Browse files
main.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
import pandas as pd
|
3 |
+
from tqdm import tqdm
|
4 |
+
from sklearn.model_selection import train_test_split
|
5 |
+
|
6 |
+
df = pd.read_parquet('gemago2_dataset.parquet')
|
7 |
+
|
8 |
+
texts = []
|
9 |
+
for _, row in tqdm(df.iterrows(), desc="row", leave=False, total=len(df)):
|
10 |
+
texts.append(rf"<kor>{row['korean']}</kor>\n\n<{row['language']}>{row['target']}</{row['language']}>")
|
11 |
+
texts.append(rf"<{row['language']}>{row['target']}</{row['language']}>\n\n<kor>{row['korean']}</kor>")
|
12 |
+
del df
|
13 |
+
|
14 |
+
train_texts, test_texts = train_test_split(texts, test_size=0.2, random_state=42)
|
15 |
+
del texts
|
16 |
+
|
17 |
+
random.shuffle(test_texts)
|
18 |
+
with open("test.txt", "w", encoding="UTF-8") as f:
|
19 |
+
f.write("\n".join(test_texts))
|
20 |
+
del test_texts
|
21 |
+
|
22 |
+
random.shuffle(train_texts)
|
23 |
+
with open("train.txt", "w", encoding="UTF-8") as f:
|
24 |
+
f.write("\n".join(train_texts))
|
25 |
+
del train_texts
|
26 |
+
|
27 |
+
# gemago2_dataset_final
|