Spaces:
Runtime error
Runtime error
Create backupapp.py
Browse files- backupapp.py +128 -0
backupapp.py
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import re
|
3 |
+
import json
|
4 |
+
|
5 |
+
def remove_timestamps(text):
|
6 |
+
return re.sub(r'\d{1,2}:\d{2}\n', '', text)
|
7 |
+
|
8 |
+
def process_text(text):
|
9 |
+
lines = text.split("\n")
|
10 |
+
processed_lines = []
|
11 |
+
|
12 |
+
for line in lines:
|
13 |
+
if line:
|
14 |
+
processed_lines.append(line)
|
15 |
+
|
16 |
+
outline = ""
|
17 |
+
for i, line in enumerate(processed_lines):
|
18 |
+
if i % 2 == 0:
|
19 |
+
outline += f"**{line}**\n"
|
20 |
+
else:
|
21 |
+
outline += f"- {line} π\n"
|
22 |
+
|
23 |
+
return outline
|
24 |
+
|
25 |
+
def create_jsonl_list(text):
|
26 |
+
lines = text.split("\n")
|
27 |
+
jsonl_list = []
|
28 |
+
|
29 |
+
for line in lines:
|
30 |
+
if line:
|
31 |
+
jsonl_list.append({"text": line})
|
32 |
+
|
33 |
+
return jsonl_list
|
34 |
+
|
35 |
+
def unit_test(input_text):
|
36 |
+
st.write("Test Text without Timestamps:")
|
37 |
+
test_text_without_timestamps = remove_timestamps(input_text)
|
38 |
+
st.write(test_text_without_timestamps)
|
39 |
+
|
40 |
+
st.write("Test JSONL List:")
|
41 |
+
test_jsonl_list = create_jsonl_list(test_text_without_timestamps)
|
42 |
+
st.write(test_jsonl_list)
|
43 |
+
|
44 |
+
text_input = st.text_area("Enter text:", value="", height=300)
|
45 |
+
text_without_timestamps = remove_timestamps(text_input)
|
46 |
+
|
47 |
+
st.markdown("**Text without Timestamps:**")
|
48 |
+
st.write(text_without_timestamps)
|
49 |
+
|
50 |
+
processed_text = process_text(text_without_timestamps)
|
51 |
+
st.markdown("**Markdown Outline with Emojis:**")
|
52 |
+
st.markdown(processed_text)
|
53 |
+
|
54 |
+
unit_test_text = '''
|
55 |
+
1:42
|
56 |
+
program the does very very well on your data then you will achieve the best
|
57 |
+
1:48
|
58 |
+
generalization possible with a little bit of modification you can turn it into a precise theorem
|
59 |
+
1:54
|
60 |
+
and on a very intuitive level it's easy to see what it should be the case if you
|
61 |
+
2:01
|
62 |
+
have some data and you're able to find a shorter program which generates this
|
63 |
+
2:06
|
64 |
+
data then you've essentially extracted all the all conceivable regularity from
|
65 |
+
2:11
|
66 |
+
this data into your program and then you can use these objects to make the best predictions possible like if if you have
|
67 |
+
2:19
|
68 |
+
data which is so complex but there is no way to express it as a shorter program
|
69 |
+
2:25
|
70 |
+
then it means that your data is totally random there is no way to extract any regularity from it whatsoever now there
|
71 |
+
2:32
|
72 |
+
is little known mathematical theory behind this and the proofs of these statements actually not even that hard
|
73 |
+
2:38
|
74 |
+
but the one minor slight disappointment is that it's actually not possible at
|
75 |
+
2:44
|
76 |
+
least given today's tools and understanding to find the best short program that explains or generates or
|
77 |
+
2:52
|
78 |
+
solves your problem given your data this problem is computationally intractable
|
79 |
+
'''
|
80 |
+
|
81 |
+
unit_test(unit_test_text)
|
82 |
+
|
83 |
+
unit_test_text_2 = '''
|
84 |
+
5
|
85 |
+
to talk a little bit about reinforcement learning so reinforcement learning is a framework it's a framework of evaluating
|
86 |
+
6:53
|
87 |
+
agents in their ability to achieve goals and complicated stochastic environments
|
88 |
+
6:58
|
89 |
+
you've got an agent which is plugged into an environment as shown in the figure right here and for any given
|
90 |
+
7:06
|
91 |
+
agent you can simply run it many times and compute its average reward now the
|
92 |
+
7:13
|
93 |
+
thing that's interesting about the reinforcement learning framework is that there exist interesting useful
|
94 |
+
7:20
|
95 |
+
reinforcement learning algorithms the framework existed for a long time it
|
96 |
+
7:25
|
97 |
+
became interesting once we realized that good algorithms exist now these are there are perfect algorithms but they
|
98 |
+
7:31
|
99 |
+
are good enough todo interesting things and all you want the mathematical
|
100 |
+
7:37
|
101 |
+
problem is one where you need to maximize the expected reward now one
|
102 |
+
7:44
|
103 |
+
important way in which the reinforcement learning framework is not quite complete is that it assumes that the reward is
|
104 |
+
7:50
|
105 |
+
given by the environment you see this picture the agent sends an action while
|
106 |
+
7:56
|
107 |
+
the reward sends it an observation in a both the observation and the reward backwards that's what the environment
|
108 |
+
8:01
|
109 |
+
communicates back the way in which this is not the case in the real world is that we figure out
|
110 |
+
8:11
|
111 |
+
what the reward is from the observation we reward ourselves we are not told
|
112 |
+
8:16
|
113 |
+
environment doesn't say hey here's some negative reward it's our interpretation over census that lets us determine what
|
114 |
+
8:23
|
115 |
+
the reward is and there is only one real true reward in life and this is
|
116 |
+
8:28
|
117 |
+
existence or nonexistence and everything else is a corollary of that so well what
|
118 |
+
8:35
|
119 |
+
should our agent be you already know the answer should be a neural network because whenever you want to do
|
120 |
+
8:41
|
121 |
+
something dense it's going to be a neural network and you want the agent to map observations to actions so you let
|
122 |
+
8:47
|
123 |
+
it be parametrized with a neural net and you apply learning algorithm so I want to explain to you how reinforcement
|
124 |
+
8:53
|
125 |
+
learning works this is model free reinforcement learning the reinforcement learning has actually been used in practice everywhere but it's
|
126 |
+
'''
|
127 |
+
|
128 |
+
unit_test(unit_test_text_2)
|